repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
l-liava-l/real_estate | https://github.com/l-liava-l/real_estate | 70901b5252186a7e22cb79394e1a8581383a3d1b | 1893ec62e3faf9b46b144ad58cfbf308342458b9 | 1e6b2bdca5665a7321a00261373d3767e1b390aa | refs/heads/master | 2021-01-21T07:41:36.640898 | 2014-10-02T18:07:59 | 2014-10-02T18:07:59 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6104783415794373,
"alphanum_fraction": 0.626423716545105,
"avg_line_length": 30.214284896850586,
"blob_id": "ce16c42e0dc16db10ece4404357a9ada201a7e83",
"content_id": "bb130f761894481000944c3a1938af85a7b1c7f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 439,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 14,
"path": "/nedviga_backend/adverts/management/commands/parce_cian.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "from django.core.management.base import BaseCommand\nfrom adverts.parcer import CianParcer\n\n\nclass Command(BaseCommand):\n\n def handle(self, adv_type='suburban', cian_id=2386544, *args, **options):\n print(adv_type)\n print(cian_id)\n p = CianParcer()\n if adv_type == 'flat':\n p.parce_rent_flat(cian_id=cian_id)\n elif adv_type == 'suburban':\n p.parce_rent_suburban(cian_id=cian_id)\n\n\n"
},
{
"alpha_fraction": 0.7197802066802979,
"alphanum_fraction": 0.7692307829856873,
"avg_line_length": 20.41176414489746,
"blob_id": "2b3b23e170b92f2f5fe2e8408f6a6be819919a0d",
"content_id": "5fee02dd0918c21da1b56781f5e47c00a6ad68df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 364,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 17,
"path": "/nedviga_backend/uwsgi/uwsgi.ini",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "[uwsgi]\nvirtualenv = /home/nedviga/.virtualenvs/nedviga\nchdir = /var/www/nedviga/nedviga_backend/nedviga_backend\nsocket = /var/www/nedviga/nedviga_backend/uwsgi/uwsgi.sock\nchmod-socket = 777\n\npythonpath=..\nplugins = python\nmodule = wsgi\n\nmaster = true\nprocesses = 2\nmax-requests = 5000\nbuffer-size = 32768\npost-buffering-bufsize = 65536\n\ntouch-reload=/tmp/nedviga\n"
},
{
"alpha_fraction": 0.6616997718811035,
"alphanum_fraction": 0.6639072895050049,
"avg_line_length": 40.181819915771484,
"blob_id": "311c3a75890fe0812b535e67ff062c3a587283b6",
"content_id": "7e9453ca4e3f5915748a3acc17bc881b1638d2d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1930,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 44,
"path": "/nedviga_backend/adverts/models.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nfrom django.forms import model_to_dict\nfrom .amodels import AbstractAdvert\nfrom core.amodels import TimeStampModel\n\n\nclass Advert(AbstractAdvert, TimeStampModel):\n cian_id = models.BigIntegerField('id на cian.ru ', unique=True, db_index=True)\n description = models.TextField('Описание')\n price = models.PositiveIntegerField('Цена', default=0)\n\n PERIOD_DAY = 1\n PERIOD_MONTH = 30\n PERIOD_CHOICES = (\n (PERIOD_DAY, 'в день'),\n (PERIOD_MONTH, 'в месяц'),\n )\n price_period = models.PositiveSmallIntegerField('Период оплаты', choices=PERIOD_CHOICES, null=True, blank=True)\n\n storey = models.SmallIntegerField('Этаж', null=True, blank=True)\n number_of_storeys = models.SmallIntegerField('Этажность', null=True, blank=True)\n area_all = models.PositiveIntegerField('Общая площадь', null=True, blank=True)\n area_kitchen = models.PositiveIntegerField('Площадь кухни', null=True, blank=True)\n area_living = models.PositiveIntegerField('Жилая площадь', null=True, blank=True)\n area_rooms = models.PositiveIntegerField('Площадь комнат', null=True, blank=True)\n\n class Meta:\n verbose_name = 'Объяление'\n verbose_name_plural = 'Объявления'\n ordering = ('-id',)\n\n def __str__(self):\n return str(self.id)\n\n def serialize_to_dict(self):\n base_fields = ['id', 'is_rent', 'price_min', 'price_max']\n if self.section == self.SECTION_HOUSE:\n house_fields = ['furniture', 'tv', 'balcony', 'kitchen_furniture', 'fridge', 'animals',\n 'phone', 'washing_machine', 'children']\n base_fields += house_fields\n fields_dict = model_to_dict(self, fields=base_fields)\n fields_dict['section'] = self.get_section_display()\n return fields_dict\n"
},
{
"alpha_fraction": 0.6476552486419678,
"alphanum_fraction": 0.6489226818084717,
"avg_line_length": 30.559999465942383,
"blob_id": "aedcc635d71a137a4330c12791c071fc977c86fb",
"content_id": "661df6df38e352ff1e83403ed1161c96a85641e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 789,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 25,
"path": "/nedviga_backend/nedviga_backend/urls.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom core.views import BaseHTMLPage, RawHTMLPage\n\nadmin.autodiscover()\n\nurlpatterns = patterns(\n '',\n url(\n r'^templates/(?P<template_path>[\\w\\-\\.\\/]+)',\n RawHTMLPage.as_view(),\n name='raw_html'\n ),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^frontend_docs/', include('core.urls', namespace='frontend_docs')),\n url(r'^api/adverts/', include('adverts.urls', namespace='adverts')),\n url(r'^api/filters/', include('filters.urls', namespace='filters')),\n url(r'^api/authentication/', include('authentication.urls', namespace='authentication')),\n)\n\nurlpatterns += patterns(\n '',\n url(r'', BaseHTMLPage.as_view(), name='base_page'),\n)\n"
},
{
"alpha_fraction": 0.6149193644523621,
"alphanum_fraction": 0.6330645084381104,
"avg_line_length": 26.55555534362793,
"blob_id": "33832d2042aa0591101c3545c52f6e1d0c90fc4c",
"content_id": "ccbe26e0f1c7c281400c28c1f84e4e703eb4d609",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 535,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 18,
"path": "/nedviga_backend/core/models.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom core.amodels import TimeStampModel\n\n\nclass Settings(TimeStampModel):\n \"\"\"\n Настройки\n \"\"\"\n key = models.CharField(verbose_name='Ключ', max_length=255, unique=True)\n value = models.CharField(verbose_name='Значение', max_length=500)\n\n class Meta:\n verbose_name = 'Настройка'\n verbose_name_plural = 'Настройки'\n ordering = ('key',)\n\n def __str__(self):\n return '{0}. {1} - {2}'.format(self.pk, self.key, self.value)\n"
},
{
"alpha_fraction": 0.6153033375740051,
"alphanum_fraction": 0.616148829460144,
"avg_line_length": 32.06993103027344,
"blob_id": "1bc042374bb7a239253fdf0e6ac2be0488ac91c7",
"content_id": "9e51bed87abaf07b935148d5133d882c76825304",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5402,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 143,
"path": "/nedviga_backend/authentication/views.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib.sessions.models import Session\nfrom django.http import HttpResponse\n\nfrom authentication.models import User, DEFAULT_PASSWORD\nfrom authentication.utils import send_sms, re_phone\n\nfrom core.views import BaseView\n\nfrom redis_connector import redis\n\n\nclass SessionidView(BaseView):\n \"\"\"\"\n View для тестирования авторизации через sessionid в url\n \"\"\"\n\n def is_authenticated(self, request):\n if request.user.is_authenticated():\n print('авторизирован id={}'.format(request.user.id))\n else:\n print('не авторизирован')\n\n def get(self, request):\n self.is_authenticated(request)\n return HttpResponse()\n\n def post(self, request):\n self.is_authenticated(request)\n return HttpResponse()\n\n def put(self, request):\n self.is_authenticated(request)\n return HttpResponse()\n\n def delete(self, request):\n self.is_authenticated(request)\n return HttpResponse()\n\n\nclass PhoneMixin(object):\n def get_phone(self, request):\n phone = request.POST.get('phone')\n if not phone:\n return self.render_internal_error('Empty phone')\n if not re_phone.match(phone):\n return self.render_internal_error('Invalid phone')\n self.phone = phone\n\n def get_user(self, phone, resend):\n if resend:\n self.sms_code = User().generate_sms_code()\n try:\n user = User.objects.get(phone=phone)\n self.user = user\n if not user.is_active:\n return self.render_internal_error('User is blocked')\n except User.DoesNotExist:\n pass\n if resend:\n redis.setex('nedviga_user_sms_{}'.format(phone), self.sms_code, 300)\n\n\nclass PreLogin(BaseView, PhoneMixin):\n def post(self, request):\n \"\"\"\n :param request:\n :return:\n По номеру телефона зарегистрированного пользователя отсылает sms с кодом авторизации на этот номер\n Варианты ошибок:\n 'Empty phone' - пустой телефон\n 'Invalid phone' - неверный формат номера\n 'User not registered' - пользователь с таким телефоном не зарегистрирован\n 'User is blocked' - пользователь заблокирован\n \"\"\"\n # TODO - сделать защиту от частой отправки смс\n # TODO - сделать на фронте возможность повторной отправки смс\n result = self.get_phone(request)\n if result:\n return result\n\n result = self.get_user(self.phone, True)\n if result:\n return result\n\n send_sms(self.phone, self.sms_code)\n return self.render_empty_success()\n\n\nclass Login(BaseView, PhoneMixin):\n\n def get_sms_code(self, request):\n sms_code = request.POST.get('key')\n user_sms_code = redis.get('nedviga_user_sms_{}'.format(self.phone))\n if user_sms_code:\n if int(user_sms_code) == int(sms_code):\n if not User.objects.filter(phone=self.phone):\n self.user = User()\n self.user.phone = self.phone\n self.user.set_password(DEFAULT_PASSWORD)\n self.user.save()\n else:\n return self.render_internal_error('Invalid sms-code')\n else:\n return self.render_internal_error('Sms-code expired')\n\n def post(self, request):\n \"\"\"\n :param request:\n :param phone: номер телефона\n :param sms_code: sms код проверки\n :return:\n Авторизует пользователя по номеру телефона и sms-коду\n Варианты ошибок:\n 'User not registered' - пользователь с таким телефоном не зарегистрирован\n 'User is blocked' - пользователь заблокирован\n 'Invalid sms-code' - неверный sms-код авторизации\n \"\"\"\n result = self.get_phone(request)\n if result:\n return result\n\n result = self.get_user(self.phone, False)\n if result:\n return result\n\n result = self.get_sms_code(request)\n if result:\n return result\n\n # еще раз вытаскиваем юзера, чтобы django его авторизировала, просто User передать нельзя\n self.user = authenticate(phone=self.phone, password=DEFAULT_PASSWORD)\n\n # удаляем все другие сессии этого пользователя, чтобы был залогинен всегда с одного устройства\n my_old_sessions = Session.objects.all()\n for row in my_old_sessions:\n if row.get_decoded().get(\"_auth_user_id\") == self.user.id:\n row.delete()\n # теперь спокойно логиним\n login(request, self.user)\n return self.render_json_response(data={'sessionid': request.session.session_key})\n\n\n"
},
{
"alpha_fraction": 0.6676136255264282,
"alphanum_fraction": 0.6704545617103577,
"avg_line_length": 31,
"blob_id": "e6df602f18f803988814d22e1b3282ac6feff83a",
"content_id": "2ea00f1b47e287d203dfe28f59763e54fc0bfe6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 352,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 11,
"path": "/nedviga_backend/authentication/urls.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nfrom django.conf.urls import patterns, url\nfrom .views import PreLogin, Login, SessionidView\n\nurlpatterns = patterns(\n 'authentication.views',\n url(r'^sessionid/?$', SessionidView.as_view(), name='sessionid'),\n url(r'^prelogin/?$', PreLogin.as_view(), name='prelogin'),\n url(r'^login/?$', Login.as_view(), name='login'),\n)\n"
},
{
"alpha_fraction": 0.5319926738739014,
"alphanum_fraction": 0.5676416754722595,
"avg_line_length": 44.58333206176758,
"blob_id": "4d1ac7f6bcb535d8eb835680ab9e38d76f59fdaf",
"content_id": "4231574965bcd952289d18071b5fc171f174cf65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1366,
"license_type": "no_license",
"max_line_length": 378,
"num_lines": 24,
"path": "/nedviga_backend/adverts/views.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nimport random\n\nfrom core.views import BaseView\n\n\nclass ListAdverts(BaseView):\n def get(self, request):\n results = []\n count = 0\n for x in range(10):\n count += 1\n result = {\n \"count\": count,\n \"type\": \"Жилой гараж\",\n \"address\": \"Каштаянца 15, кв. 80\",\n \"cost\": \"{0} сутки\".format(random.randint(500, 15000)),\n \"description\": \"ОТ СОБСТВЕННИКА.Звонить можно круглосуточно.Сдается комната посуточно в квартире , со всеми удобствами, с евроремонтом и новой мебелью. Для вашего комфортного проживания ЖК телевизор, спутниковое ТВ. Все входит в стоимость проживания Заселение круглосуточно.Без комиссий, предоплат и залога.\",\n \"images\": ['http://lorempixel.com/512/512/' for x in range(random.randint(2, 6))],\n \"numbers\": ['8 (900) 000 00 00' for x in range(random.randint(0, 4))],\n }\n results.append(result)\n return self.render_json_response(results)\n"
},
{
"alpha_fraction": 0.6127783060073853,
"alphanum_fraction": 0.6176185607910156,
"avg_line_length": 32.32258224487305,
"blob_id": "d4869b4b21dbed26ef7735afb30019c20b9f426c",
"content_id": "a10bcb7e176e82169f417dbb7cec7b1dd677cd3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2126,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 62,
"path": "/nedviga_backend/core/views.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nimport json\nimport os\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.views.generic import View\n\n\nclass BaseView(View):\n\n content_type = 'application/json'\n\n # def dispatch(self, request, *args, **kwargs):\n # if request.user.is_authenticated():\n # return super(BaseView, self).dispatch(request, *args, **kwargs)\n # else:\n # return self.render_json_response(success=False, errors={'internal_error': 'auth_required'}, status=401)\n\n def render_json_response(self, data=None, errors=None, success=None, status=200):\n \"\"\"\n Стандартная view для проекта\n \"\"\"\n #если success не заполенен, то заполняем автоматически\n if not success:\n if data:\n success = True\n if errors:\n success = False\n if data and errors:\n raise Exception('Manually specify \"success\" arg, because both \"error\" and \"data\" are not empty')\n ctx = {\n 'success': success,\n 'errors': errors,\n 'data': data,\n }\n json_context = json.dumps(ctx).encode('utf-8')\n response = HttpResponse(json_context, self.content_type, status=status)\n response['Access-Control-Allow-Origin'] = '*'\n response['Access-Control-Allow-Credentials'] = 'True'\n return response\n\n def render_internal_error(self, text):\n return self.render_json_response(errors={'internal_error': text})\n\n def render_empty_success(self):\n return self.render_json_response(success=True)\n\n\nclass BaseHTMLPage(View):\n @staticmethod\n def get(request):\n return render(request, 'base_page.html')\n\n\nclass RawHTMLPage(View):\n def get(self, request, template_path):\n base_dir = os.path.dirname(os.path.dirname(__file__))\n te = os.path.join(base_dir, '../_public/www/templates/%s' % template_path)\n data = open(te, mode=\"r\", encoding='utf-8', closefd=True).read().encode('utf-8')\n return HttpResponse(data)\n"
},
{
"alpha_fraction": 0.7053571343421936,
"alphanum_fraction": 0.7053571343421936,
"avg_line_length": 27.08333396911621,
"blob_id": "9264193a020ecce13ea272baecb5d2fd6ae584ab",
"content_id": "ed9239c07e25c4ce57418546b5e3b5ef983ce8c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 390,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 12,
"path": "/nedviga_backend/core/amodels.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n\nclass TimeStampModel(models.Model):\n \"\"\"\n Время создания и удаления\n \"\"\"\n when_created = models.DateTimeField(verbose_name='Когда создана', auto_now_add=True)\n when_modified = models.DateTimeField(verbose_name='Когда отредактирована', auto_now=True)\n\n class Meta:\n abstract = True"
},
{
"alpha_fraction": 0.47843530774116516,
"alphanum_fraction": 0.4964894652366638,
"avg_line_length": 26.69444465637207,
"blob_id": "6cd77cb882d7c6c38d9f25201a549defbc21bc8b",
"content_id": "7206b67952479634b9e9e5d1edc226cfe6f37818",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 997,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 36,
"path": "/parse.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\n\n\n\n\ndef parse():\n g = Grab()\n try:\n g.go('http://www.cian.ru/cat.php?deal_type=1&obl_id=1&city[0]=1&totime=300', log_file='./log')\n except (GrabNetworkError, GrabTimeoutError, GrabConnectionError, GrabAuthError) as details:\n print(details)\n return False\n\n reals = dict()\n for tr in g.doc.select('//*[@id=\"tbody\"]//fieldset//table[@class=\"cat\"]/tr'):\n cian_id = tr.node.get('id')\n real = dict()\n if cian_id:\n real['cian_id'] = cian_id\n for td in tr.select('//td'):\n td_id = td.node.get('id')\n if td_id:\n if 'metro' in td_id:\n real['city'] = td.node.findall('a')[0].text\n print(td.node.findall('a')[len(td.node.findall('a'))-1].text)\n if cian_id not in reals:\n reals[cian_id] = real\n\n\n\n\nif __name__ == '__main__':\n p = CianParcer()\n p.parce_adv(adv_id=11359831)\n # parse()\n"
},
{
"alpha_fraction": 0.7048192620277405,
"alphanum_fraction": 0.7063252925872803,
"avg_line_length": 23.592592239379883,
"blob_id": "486a912d877bc6ad1ec8a7660c983b51abaf35cd",
"content_id": "691c2b45db4375ff88f67de943b20e4d37c24bb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 664,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 27,
"path": "/nedviga_backend/filters/admin.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "#coding: utf-8\n\nfrom django.contrib import admin\n\nfrom bitfield import BitField\nfrom bitfield.forms import BitFieldCheckboxSelectMultiple\n\nfrom .models import Filter, UserAdvert\n\n\nclass AdminFilter(admin.ModelAdmin):\n formfield_overrides = {\n BitField: {'widget': BitFieldCheckboxSelectMultiple},\n }\n list_display = ('id', 'user', 'is_rent', 'price_min', 'price_max', 'section',)\n list_filter = ('is_rent', 'section',)\n\n\n\n\n\nclass AdminUserAdvert(admin.ModelAdmin):\n list_display = ('id', 'filter', 'advert', 'is_read',)\n list_filter = ('is_read',)\n\nadmin.site.register(Filter, AdminFilter)\nadmin.site.register(UserAdvert, AdminUserAdvert)\n"
},
{
"alpha_fraction": 0.6995305418968201,
"alphanum_fraction": 0.7042253613471985,
"avg_line_length": 22.66666603088379,
"blob_id": "867ea9418b20c157b116b3c5b688a2ad1048e87b",
"content_id": "14aa53b5fb91eb84ede08bfa12ba9227e279532e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 213,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 9,
"path": "/nedviga_backend/adverts/urls.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nfrom django.conf.urls import patterns, url\nfrom .views import ListAdverts\n\nurlpatterns = patterns(\n 'adverts.views',\n url(r'random/?$', ListAdverts.as_view(), name='list_random_adverts'),\n)\n"
},
{
"alpha_fraction": 0.6603773832321167,
"alphanum_fraction": 0.6641509532928467,
"avg_line_length": 25.5,
"blob_id": "1b0460b1de565a7766a2837c6f8e066a32240435",
"content_id": "4af2f62bac2a5a826dddba2ce7ce2b1822f31240",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 10,
"path": "/nedviga_backend/filters/urls.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nfrom django.conf.urls import patterns, url\nfrom .views import FilterView\n\nurlpatterns = patterns(\n 'filters.views',\n url(r'^get_list/?$', FilterView.as_view(), name='filter'),\n url(r'^save/?$', FilterView.as_view(), name='save_filter'),\n)\n"
},
{
"alpha_fraction": 0.7529691457748413,
"alphanum_fraction": 0.7529691457748413,
"avg_line_length": 18.136363983154297,
"blob_id": "8429939a4255f563917c34d2f6fb14786134931b",
"content_id": "e86771bfb778e733f95924aa165f1ccdb829bb06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 507,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 22,
"path": "/init.sh",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "echo \"Разворачиваем'с\"\n\necho \"Создаем новый проект cordova\"\ncordova create _public com.nedviga.eng nedviga \n\necho \"Добавляем android и ios\"\ncd ./_public \ncordova platform add android \ncordova platform add ios\n\necho \"подключаем зависимости\"\ncd ../mobile_app\nnpm install && bower install\nrm -R ../_public/www\nbrunch build\n\n\necho \"Создаем симлинк для jaded-brunch\" \ncd ../_public/www\nln -s scripts templates\n\necho \"Готово!\"\n"
},
{
"alpha_fraction": 0.6430976390838623,
"alphanum_fraction": 0.6599326729774475,
"avg_line_length": 21.846153259277344,
"blob_id": "4390ebc6daf670c98ee9c7050020ba20fcd66a88",
"content_id": "e9635455cf30defa316e5d2787b3c50056f9f7ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 297,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 13,
"path": "/nedviga_backend/authentication/utils.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nimport re\n\nre_phone = re.compile(\"^[0-9]+$\")\n\nfrom urllib.request import urlopen\n\n\ndef send_sms(phone, message):\n url = 'https://smsimple.ru/http_send.php?user=yorcc-lark&pass=aLt4U5C6&or_id=59989&phone={0}&message={1}'.\\\n format(phone, message)\n urlopen(url)\n"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.6233766078948975,
"avg_line_length": 18.25,
"blob_id": "1c26b1decf26a698b527b4f2082d6e6a3887d434",
"content_id": "ed8b545a99bbae3d3803bc68b93b8d412ae6634e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 77,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 4,
"path": "/nedviga_backend/redis_connector.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom redis import Redis\nprint('111')\nredis = Redis()\n"
},
{
"alpha_fraction": 0.6483757495880127,
"alphanum_fraction": 0.6501317024230957,
"avg_line_length": 31.08450698852539,
"blob_id": "2670259a886cb8cd753a16050b421a3ca9e4f406",
"content_id": "29d834051dfcdf28c95c2608ba3fe5ac4384cf10",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2337,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 71,
"path": "/nedviga_backend/authentication/models.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "#coding: utf-8\n\nimport pdb\nimport random\nimport string\n\nfrom django.core.validators import RegexValidator\n\nfrom django.db import models\nfrom django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager\nfrom django.utils import timezone\nfrom authentication.utils import re_phone\n\nDEFAULT_PASSWORD = 'yorcc-lab'\n\n\nclass UserManager(BaseUserManager):\n\n def _create_user(self, phone, password, is_staff, is_superuser, **extra_fields):\n \"\"\"\n Creates and saves a User with the given username and password.\n \"\"\"\n now = timezone.now()\n if not phone:\n raise ValueError('The given username must be set')\n\n user = self.model(phone=phone,\n is_staff=is_staff, is_active=True,\n is_superuser=is_superuser, last_login=now,\n date_joined=now, **extra_fields)\n user.set_password(DEFAULT_PASSWORD)\n user.save(using=self._db)\n return user\n\n def create_user(self, phone, password=None, **extra_fields):\n return self._create_user(phone, DEFAULT_PASSWORD, False, False, **extra_fields)\n\n def create_superuser(self, phone, password, **extra_fields):\n return self._create_user(phone, DEFAULT_PASSWORD, True, True, **extra_fields)\n\n\nclass User(AbstractBaseUser, PermissionsMixin):\n phone = models.CharField('Номер телефона', unique=True, max_length=15, validators=[RegexValidator(re_phone)])\n is_staff = models.BooleanField('staff status', default=False)\n is_active = models.BooleanField('active', default=True)\n date_joined = models.DateTimeField('Когда зарегистрировался?', auto_now_add=True)\n\n objects = UserManager()\n\n USERNAME_FIELD = 'phone'\n REQUIRED_FIELDS = []\n\n class Meta:\n verbose_name = 'Пользователь'\n verbose_name_plural = 'Пользователи'\n ordering = ('id',)\n\n def get_full_name(self):\n \"\"\"\n Returns the first_name plus the last_name, with a space in between.\n \"\"\"\n return self.phone\n\n def get_short_name(self):\n \"\"\"\n Returns the short name for the user.\n \"\"\"\n return self.phone\n\n def generate_sms_code(self):\n return ''.join([random.choice(string.digits) for i in range(4)])\n"
},
{
"alpha_fraction": 0.7214611768722534,
"alphanum_fraction": 0.7214611768722534,
"avg_line_length": 26.375,
"blob_id": "5df797b907c16e77d2cb4fd1d8222162a2a499c7",
"content_id": "1e89f3e05342536859acfdb90b4693ae5c48923d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 16,
"path": "/nedviga_backend/adverts/admin.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\nfrom bitfield import BitField\nfrom bitfield.forms import BitFieldCheckboxSelectMultiple\n\nfrom .models import Advert\n\n\nclass AdminAdvert(admin.ModelAdmin):\n formfield_overrides = {\n BitField: {'widget': BitFieldCheckboxSelectMultiple},\n }\n list_display = ('id', 'is_rent', 'price', 'section', 'cian_id',)\n list_filter = ('is_rent', 'section',)\n\nadmin.site.register(Advert, AdminAdvert)\n"
},
{
"alpha_fraction": 0.6035113334655762,
"alphanum_fraction": 0.6035113334655762,
"avg_line_length": 25.288461685180664,
"blob_id": "51a9d8788b07e43ccc2b4869c41e05051d50f0c8",
"content_id": "4f5456c1661878946ae4b93b25a12fc5849c3e55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 1367,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 52,
"path": "/fabfile.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "from fabric.api import run, settings, local, prompt\nfrom fabric.context_managers import lcd\n\ndef commit():\n \"\"\"\n Commit change\n \"\"\"\n local('git status')\n prompt('Press <Enter> to continue or <Ctrl+C> to cancel.')\n local('git add .')\n local('git commit')\n\n\ndef ch(branch):\n \"\"\"\n Move your branch to current HEAD and checkout\n \"\"\"\n local('git branch -f %s' % branch)\n local('git checkout %s' % branch)\n\n\ndef rebase(brunch='master'):\n \"\"\"\n Rebase current branch on other brunch\n \"\"\"\n current_branch = local('git rev-parse --abbrev-ref HEAD', capture=True)\n local('git checkout %s' % brunch)\n local('git pull origin %s' % brunch)\n\n\ndef merge(brunch='master', push=True, with_commit=True):\n \"\"\" Merge with master\n \"\"\"\n current_branch = local('git rev-parse --abbrev-ref HEAD', capture=True)\n try:\n if with_commit:\n commit()\n rebase(brunch)\n local('git checkout %s' % brunch)\n local('git merge --no-ff %s' % current_branch)\n if push:\n local('git push origin %s' % brunch)\n finally:\n local('git checkout %s' % current_branch)\n local('git rebase master')\n\n\ndef pull():\n current_branch = local('git rev-parse --abbrev-ref HEAD', capture=True)\n rebase()\n local('git checkout %s' % current_branch)\n local('git rebase master')\n"
},
{
"alpha_fraction": 0.5187320113182068,
"alphanum_fraction": 0.559077799320221,
"avg_line_length": 19.41176414489746,
"blob_id": "cab350d30bf9de70aaec04bb086e8d48e4bd333b",
"content_id": "3dac4603450770a476cf34138de67ba1e44c9fa6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 347,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 17,
"path": "/nedviga_backend/nedviga_backend/settings_prod.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "from nedviga_backend.settings_base import *\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nAPPEND_SLASH = True\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'nedviga',\n 'HOST': '192.168.10.1',\n 'PORT': '5432',\n 'USER': 'postgres',\n 'PASSWORD': 'raw_type_999',\n }\n}\n"
},
{
"alpha_fraction": 0.621082603931427,
"alphanum_fraction": 0.621082603931427,
"avg_line_length": 23.928571701049805,
"blob_id": "5ef64318acb91a836be23a27057f4edd7a60d1be",
"content_id": "cad8607e7b3549d80e79a1f1c733669e174fbbc1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 351,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 14,
"path": "/nedviga_backend/adverts/management/commands/get_parce_list.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "from django.core.management.base import BaseCommand\nfrom adverts.parcer import ParseSearchList, CianParcer\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n p = ParseSearchList()\n ids = p.get_flat_rent_list()\n\n pp = CianParcer()\n for x in ids:\n print(x)\n pp.parce_rent_flat(x)\n\n\n"
},
{
"alpha_fraction": 0.8156028389930725,
"alphanum_fraction": 0.8156028389930725,
"avg_line_length": 70,
"blob_id": "aa10b53666f32fc349ec53e10aeb57fcb4a11aa0",
"content_id": "74f5e9a0e9b613bb8fb6055599dccb8c3b8c4bd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 195,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 2,
"path": "/nedviga_backend/nedviga_backend/readme.md",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "Чтобы использовать свои settings, создате файл setting_dev_your_name.py и сделайте на него симлинк\nln -s setting_dev_your_name.py settings.py"
},
{
"alpha_fraction": 0.6788124442100525,
"alphanum_fraction": 0.6909581422805786,
"avg_line_length": 29.875,
"blob_id": "3b8bf762c37fefacc6f20130d158510037b89b5e",
"content_id": "68abc47a44094335cc2e391e72ca848ccd276446",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 800,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 24,
"path": "/nedviga_backend/adverts/amodels.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom bitfield import BitField\nfrom djorm_pgarray.fields import IntegerArrayField\n\n\nclass AbstractAdvert(models.Model):\n is_rent = models.BooleanField('True = аренда, False = продажа', default=True)\n SECTION_APARTMENT = 1\n SECTION_HOUSE = 2\n SECTION_NOT_LIVE = 3\n SECTION_CHOICES = (\n (SECTION_APARTMENT, 'Квартира'),\n (SECTION_HOUSE, 'Дом, коттедж'),\n (SECTION_NOT_LIVE, 'Нежилое помещение'),\n )\n section = models.PositiveSmallIntegerField('Тип помещения', choices=SECTION_CHOICES)\n section_additional = BitField(flags=range(1, 60))\n\n additional_requirements = BitField(flags=range(1, 60))\n\n metro = IntegerArrayField()\n\n class Meta:\n abstract = True\n"
},
{
"alpha_fraction": 0.6875,
"alphanum_fraction": 0.6890624761581421,
"avg_line_length": 35.5428581237793,
"blob_id": "fecb94c0fb0b50372d1a35b6c37ac53b9a147f75",
"content_id": "0849e5a5acf6fbfdf67b0654fbaed757f1b864a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1479,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 35,
"path": "/nedviga_backend/filters/models.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nfrom adverts.amodels import AbstractAdvert\nfrom core.amodels import TimeStampModel\n\n\nclass Filter(AbstractAdvert, TimeStampModel):\n user = models.ForeignKey('authentication.User', verbose_name='Пользователь')\n name = models.CharField('Название', null=False, blank=False, max_length=50)\n\n price_min = models.PositiveIntegerField('Нижняя граница цены', blank=True, null=True)\n price_max = models.PositiveIntegerField('Верхняя граница цены', blank=True, null=True)\n\n class Meta:\n verbose_name = 'Пользовательский фильтр'\n verbose_name_plural = 'Пользовательские фильтры'\n ordering = ('-id',)\n\n def __str__(self):\n return str(self.id)\n\n\nclass UserAdvert(TimeStampModel):\n filter = models.ForeignKey('Filter', verbose_name='Фильтр')\n advert = models.ForeignKey('adverts.Advert', verbose_name='Объявление')\n is_read = models.BooleanField('Прочитано?', default=False)\n\n class Meta:\n verbose_name = 'Объявление, подходящее под фильтр'\n verbose_name_plural = 'Объявления, подходящие под фильтр'\n ordering = ('-id',)\n unique_together = ('filter', 'advert')\n\n def __str__(self):\n return '{} фильтр {} объявление {}'.format(self.id, self.filter_id, self.advert_id)\n\n"
},
{
"alpha_fraction": 0.5364131331443787,
"alphanum_fraction": 0.5427678227424622,
"avg_line_length": 38.494022369384766,
"blob_id": "04a1442762f67d52716f87ad25c596b24913db6c",
"content_id": "4c60888ec1c4d4b0b3bd86f726ef6e122c424ccd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10244,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 251,
"path": "/nedviga_backend/adverts/parcer.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "import string\n\nfrom grab import Grab\nfrom grab.error import GrabNetworkError, GrabTimeoutError, GrabConnectionError, GrabAuthError\n\nfrom .models import Advert\n\n\nclass CianParcer(object):\n\n def check_extra_options(self, class_name):\n el = self.g.doc.select('//*[@class=\"{}\"]'.format(class_name))\n if len(el) > 0:\n return Advert.EXTRA_YES\n else:\n return Advert.EXTRA_NO\n\n def normalize_integer(self, x):\n if not isinstance(x, int):\n return x\n if x > 2147483647:\n return 2147483647\n else:\n return x\n\n def extract_digits(self, str):\n return ''.join([x for x in str if x in string.digits])\n\n def parce_rent_suburban(self, cian_id, is_rent=True, section=Advert.SECTION_HOUSE):\n self.g = Grab()\n try:\n url = 'http://www.cian.ru/rent/suburban/{}/'.format(cian_id)\n print(url)\n self.g.go(url)\n except (GrabNetworkError, GrabTimeoutError, GrabConnectionError, GrabAuthError) as details:\n print(details)\n return False\n\n raw_advert = {}\n\n #дом, часть дома, таунхауз, участок\n el = self.g.doc.select('//*[@class=\"object_descr_title\"]')[0]\n raw_text = el.node.text.strip()\n if 'дом' in raw_text:\n set_room = 'Дом'\n elif 'часть' in raw_text:\n set_room = 'Часть дома'\n elif 'таунхаус' in raw_text:\n set_room = 'Таунхаус'\n elif 'участок' in raw_text:\n set_room = 'Участок'\n else:\n set_room = False\n\n\n el = self.g.doc.select('//*[@class=\"object_descr_addr\"]')[0]\n raw_advert['street'] = el.node.text.strip()\n\n el = self.g.doc.select('//*[@class=\"object_descr_price\"]')[0]\n raw_price = el.node.text.strip()\n price = [x for x in raw_price if x in string.digits]\n raw_advert['price'] = int(''.join(price))\n if 'в сутки' in raw_price:\n raw_advert['price_period'] = Advert.PERIOD_DAY\n elif 'в месяц' in raw_price:\n raw_advert['price_period'] = Advert.PERIOD_MONTH\n else:\n print('Не смог вычислить период оплаты')\n\n el = self.g.doc.select('//*[@class=\"object_descr_text\"]')[0]\n description = el.node.text.strip()\n if not description:\n #если так не нашли описания - поищем в потомках\n cc = el.node.iterchildren()\n description = ''\n for c in cc:\n raw_text = c.text or ' '\n raw_tail = c.tail or ' '\n if 'Телефоны' in raw_text:\n continue\n description += raw_text.strip()\n description += raw_tail.strip()\n raw_advert['description'] = description\n\n try:\n advert = Advert.objects.get(cian_id=cian_id)\n for key, value in raw_advert.items():\n setattr(advert, key, value)\n except Advert.DoesNotExist:\n advert = Advert(**raw_advert)\n advert.is_rent = is_rent\n advert.section = section\n advert.cian_id = cian_id\n advert.rooms_count = 0\n advert.house_type = 0\n if set_room:\n setattr(advert.house_type, set_room, set_room)\n advert.save()\n\n def parce_rent_flat(self, cian_id, is_rent=True, section=Advert.SECTION_APARTMENT):\n self.g = Grab()\n try:\n url = 'http://www.cian.ru/rent/flat/{}/'.format(cian_id)\n # url = 'http://www.cian.ru/rent/suburban/{}/'.format(cian_id)\n print(url)\n self.g.go(url)\n except (GrabNetworkError, GrabTimeoutError, GrabConnectionError, GrabAuthError) as details:\n print(details)\n return False\n\n raw_advert = {}\n\n #комната или количество комнат\n el = self.g.doc.select('//*[@class=\"object_descr_title\"]')[0]\n raw_text = el.node.text.strip()\n if 'комната' in raw_text:\n set_room = 'room'\n elif 'комн.' in raw_text:\n for x in range(1, 7):\n print(x)\n if str(x) in raw_text:\n set_room = str(x)\n else:\n set_room = False\n\n\n el = self.g.doc.select('//*[@class=\"object_descr_addr\"]')[0]\n raw_advert['street'] = el.node.text.strip()\n\n el = self.g.doc.select('//*[@class=\"object_descr_props\"]')[0]\n for tr in el.node.findall('tr'):\n th = tr.findall('th')\n td = tr.findall('td')\n th_text = th[0].text\n try:\n td_text = td[0].text\n except IndexError:\n td_text = ''\n if th_text == 'Этаж:':\n raw_storey, raw_number_of_storeys = td_text.split('/')\n raw_advert['storey'] = int(raw_storey.strip())\n raw_advert['number_of_storeys'] = int(raw_number_of_storeys.strip())\n elif th_text == 'Общая площадь:':\n raw_area_all = td[0].findall('i')[0].tail.strip()\n raw_area_all_clean = self.extract_digits(raw_area_all)\n if not raw_area_all_clean:\n raw_advert['area_all'] = None\n else:\n raw_advert['area_all'] = int(raw_area_all_clean)\n elif th_text == 'Площадь комнат:':\n raw_area = td[0].findall('i')[0].tail.strip()\n raw_area = raw_area.split('-')[-1]\n raw_area_clean = self.extract_digits(raw_area)\n if not raw_area_clean:\n raw_advert['area_rooms'] = None\n else:\n raw_advert['area_rooms'] = int(raw_area_clean)\n\n elif th_text == 'Жилая площадь:':\n raw_area_living = td[0].findall('i')[0].tail.strip()\n raw_area_living_clean = self.extract_digits(raw_area_living)\n if not raw_area_living_clean:\n raw_advert['area_living'] = None\n else:\n raw_advert['area_living'] = int(raw_area_living_clean)\n elif th_text == 'Площадь кухни:':\n raw_area = td[0].findall('i')[0].tail.strip()\n raw_area_clean = self.extract_digits(raw_area)\n if not raw_area_clean:\n raw_advert['area_kitchen'] = None\n else:\n raw_advert['area_kitchen'] = int(raw_area_clean)\n\n raw_advert['area_all'] = self.normalize_integer(raw_advert.get('area_all', 0))\n raw_advert['area_rooms'] = self.normalize_integer(raw_advert.get('area_rooms', 0))\n raw_advert['area_living'] = self.normalize_integer(raw_advert.get('area_living', 0))\n raw_advert['area_kitchen'] = self.normalize_integer(raw_advert.get('area_kitchen', 0))\n\n\n el = self.g.doc.select('//*[@class=\"metro_icon\"]')[0]\n raw_metro = el.node.tail.strip()\n raw_advert['metro'] = ''.join(x for x in raw_metro if x not in string.punctuation)\n\n el = self.g.doc.select('//*[@class=\"object_descr_price\"]')[0]\n raw_price = el.node.text.strip()\n price = [x for x in raw_price if x in string.digits]\n raw_advert['price'] = int(''.join(price))\n if 'в сутки' in raw_price:\n raw_advert['price_period'] = Advert.PERIOD_DAY\n elif 'в месяц' in raw_price:\n raw_advert['price_period'] = Advert.PERIOD_MONTH\n else:\n print('Не смог вычислить период оплаты')\n\n el = self.g.doc.select('//*[@class=\"object_descr_text\"]')[0]\n raw_advert['description'] = el.node.text.strip()\n\n raw_advert['furniture'] = self.check_extra_options('objects_item_details_i_living_furnished')\n raw_advert['tv'] = self.check_extra_options('objects_item_details_i_tv')\n raw_advert['balcony'] = self.check_extra_options('objects_item_details_i_balcony')\n raw_advert['kitchen_furniture'] = self.check_extra_options('objects_item_details_i_kitchen_furnished')\n raw_advert['fridge'] = self.check_extra_options('objects_item_details_i_fridge')\n raw_advert['animals'] = self.check_extra_options('objects_item_details_i_animals')\n raw_advert['phone'] = self.check_extra_options('objects_item_details_i_phone')\n raw_advert['washing_machine'] = self.check_extra_options('objects_item_details_i_washing_machine')\n raw_advert['children'] = self.check_extra_options('objects_item_details_i_children')\n\n try:\n advert = Advert.objects.get(cian_id=cian_id)\n for key, value in raw_advert.items():\n setattr(advert, key, value)\n except Advert.DoesNotExist:\n advert = Advert(**raw_advert)\n advert.is_rent = is_rent\n advert.section = section\n advert.cian_id = cian_id\n advert.rooms_count = 0\n if set_room:\n setattr(advert.rooms_count, set_room, set_room)\n advert.save()\n\n\nclass ParseSearchList(object):\n totime = 300\n\n def __init__(self, totime=totime):\n self.totime = totime\n\n def get_flat_rent_list(self):\n url = 'http://www.cian.ru/cat.php?deal_type=1&obl_id=1&city[0]=1'\n self.g = Grab()\n try:\n print(url)\n self.g.go(url)\n except (GrabNetworkError, GrabTimeoutError, GrabConnectionError, GrabAuthError) as details:\n print(details)\n return False\n\n a_tag_list = self.g.doc.select('//a[@target=\"_blank\"]')\n count = 0\n ids = []\n for a_tag in a_tag_list:\n #нужны не все ссылки, а только те, в которых есть 1 тег font\n font_tag_list = a_tag.node.findall('font')\n if len(font_tag_list) == 1:\n count += 1\n href = a_tag.node.attrib.get('href')\n ids.append(int(href.split('/')[-1]))\n print(count)\n print(ids)\n return ids\n\n"
},
{
"alpha_fraction": 0.7077922224998474,
"alphanum_fraction": 0.7110389471054077,
"avg_line_length": 29.799999237060547,
"blob_id": "7a1184ba544361831a5179c20bd00cf755bc6783",
"content_id": "9d3e780d9a146d4e0d943d57b561a918f9660ee2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 308,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 10,
"path": "/nedviga_backend/authentication/middleware.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nfrom django.conf import settings\n\n\nclass SessionUrlHackMiddleware(object):\n\n def process_request(self, request):\n raw_session_key = request.GET.get(settings.SESSION_COOKIE_NAME)\n if raw_session_key:\n request.COOKIES[settings.SESSION_COOKIE_NAME] = raw_session_key\n"
},
{
"alpha_fraction": 0.48632386326789856,
"alphanum_fraction": 0.48741793632507324,
"avg_line_length": 25.83823585510254,
"blob_id": "1c261297a18d437cb5d922af79d7327ab44bec4d",
"content_id": "2abc37307c3fe17ea1032398a8814de589723c2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1861,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 68,
"path": "/mobile_app/app/scripts/pages/adverts/scripts/react-template.jsx",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "/** @jsx React.DOM */\n\nReact.initializeTouchEvents(false);\n\nwindow.AdvertList = React.createClass({\n displayName: \"AdvertList\",\n\n componentWillMount : function() {\n var scope = this.props.scope;\n scope.advertList.react = this;\n scope.$digest();\n scope.loadMore();\n },\n\n render: function() {\n\n var scope = this.props.scope;\n var items = scope.visible.map(function(advert, index, array) {\n\n advert.slider = 'slider' + index;\n\n var images = advert.images.map(function(img) {\n return (\n <div className=\"slide\">\n <img className=\"full-image\" src={img}></img>\n </div>)\n });\n\n var numbers = advert.numbers.map(function(num) {\n return (<a>{num} </a>)\n });\n\n return (\n <div className=\"list card\" >\n <div className=\"item row\">\n <div className=\"col margin\">\n <h2>{advert.type}</h2>\n <p>{advert.cost}</p>\n </div>\n <div className=\"col\">\n <p>{advert.address}</p>\n </div>\n </div>\n <div className=\"item item-body\">\n <div className=\"slidebox\">\n <div className=\"slides row\" id={advert.slider}>{images}</div>\n </div>\n <p>{advert.description}</p>\n <p>{numbers}</p>\n </div>\n <div className=\"item tabs tabs-secondary tabs-icon-left\" onClick={scope.addToBookmark(advert)}>\n <div className=\"tab-item\">\n <i className=\"ion-android-promotion\"></i>\n {(advert.bookmark === true) ? ' Убрать из закладок' : \" Добавить в закладки\"}\n </div>\n </div>\n\n </div>\n );\n });\n\n return (\n <div>\n {items}\n </div>\n );\n }\n});\n\n\n\n"
},
{
"alpha_fraction": 0.6584070920944214,
"alphanum_fraction": 0.6601769924163818,
"avg_line_length": 30.38888931274414,
"blob_id": "749ac2739b669cb4ec8891a289a98fb31b62f6f2",
"content_id": "d2b46a6f0d2d8363b4a2ad0309b7e007f4a70d14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 565,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 18,
"path": "/nedviga_backend/filters/views.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nfrom core.views import BaseView\nfrom adverts.models import Advert\n\n\nclass FilterView(BaseView):\n def get(self, request):\n filter_id = int(request.GET.get('filter_id'))\n user = request.user\n adverts = Advert.objects.filter(useradvert__filter__id=filter_id, useradvert__filter__user__id=user.id)\n adverts_list = []\n for advert in adverts:\n adverts_list.append(advert.serialize_to_dict())\n return self.render_json_response(data=adverts_list)\n\n def post(self, request):\n print(request)\n"
},
{
"alpha_fraction": 0.7096070051193237,
"alphanum_fraction": 0.7117903828620911,
"avg_line_length": 40.6363639831543,
"blob_id": "bd0d16dde27991c321c07ed2821dbf9e7b9b556a",
"content_id": "799d83a9fa023638ea97459c74166a0790ac528e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 458,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 11,
"path": "/nedviga_backend/core/urls.py",
"repo_name": "l-liava-l/real_estate",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nfrom django.conf.urls import patterns, url\nfrom django.views.generic import TemplateView\n\nurlpatterns = patterns(\n 'core.views',\n url(r'^$', TemplateView.as_view(template_name='frontend_docs/base.html'), name='base'),\n url(r'^authentication/?$', TemplateView.as_view(template_name='frontend_docs/auth.html'), name='auth'),\n url(r'^adverts/?$', TemplateView.as_view(template_name='frontend_docs/adverts.html'), name='adverts'),\n)\n"
}
] | 30 |
abcei2/mask_detector_api | https://github.com/abcei2/mask_detector_api | e4e6f775f7d98afd0f11426793599e02367e9f60 | d589bd6295fd757ef94567106df57af7ea71f2bf | 4eccf0c3281ca6388b4d9fb46f40d40d6f484518 | refs/heads/master | 2022-07-28T13:29:17.735144 | 2020-05-28T16:36:47 | 2020-05-28T16:36:47 | 266,783,321 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5960099697113037,
"alphanum_fraction": 0.618453860282898,
"avg_line_length": 28.703702926635742,
"blob_id": "5c65ecb4547c35f857e4ab0c30ae13c963866494",
"content_id": "9b0be67da0d7133205ed5702f2523f3f11b0d2fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1604,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 54,
"path": "/src/mask_detector/__init__.py",
"repo_name": "abcei2/mask_detector_api",
"src_encoding": "UTF-8",
"text": "import os\nimport cv2\nimport insightface\n\nimport numpy as np\n\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\nfrom tensorflow.keras.preprocessing.image import img_to_array\n\nmask_model_path = os.path.join(\n os.path.dirname(__file__), 'model', 'mask_detector.model'\n)\nmask_model = load_model(mask_model_path)\n\ninsight_model = insightface.model_zoo.get_model('retinaface_r50_v1')\ninsight_model.prepare(ctx_id=-1, nms=0.4)\n\n\ndef extract_face(img):\n bboxs, landmarks = insight_model.detect(img, threshold=0.5, scale=1.0)\n\n faces = [\n {\n \"upper_left\": [int(bbox[0]), int(bbox[1])],\n \"down_right\": [int(bbox[2]), int(bbox[3])],\n \"landmarks\": [\n [int(coord[0]), int(coord[1])] for coord in landmark\n ]\n } for bbox, landmark in zip(bboxs, landmarks) if bboxs is not None\n ]\n\n face = faces[0] if len(faces) > 0 else None\n\n return (img[\n face['upper_left'][1]:face['down_right'][1],\n face['upper_left'][0]:face['down_right'][0]\n ], face) if face else (None, None)\n\n\ndef detect(img):\n face, box = extract_face(img)\n\n if face is not None:\n face = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n face = cv2.resize(face, (224, 224))\n face = img_to_array(face)\n face = preprocess_input(face)\n face = np.expand_dims(face, axis=0)\n\n mask, withoutMask = mask_model.predict(face)[0]\n return {'with_mask': bool(mask > withoutMask), 'box': box}\n else:\n return {'with_mask': None, 'box': None}\n"
},
{
"alpha_fraction": 0.517912745475769,
"alphanum_fraction": 0.5552959442138672,
"avg_line_length": 31.935897827148438,
"blob_id": "24b2458ab423e5fc832b4da2479b3403519fc66b",
"content_id": "6a2484ba4cd6ca6618cf2a89eb9fb5b74baccb59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2568,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 78,
"path": "/test_api.py",
"repo_name": "abcei2/mask_detector_api",
"src_encoding": "UTF-8",
"text": "'''\nThis code is an example in the way we could use this api\ncommand:\n* register: to register a new face.\n* proccess: to detect and classify faces on an image.\nname:\n* name of register face, only work when command='register'.\n**************\nupdate_model: local variable True if you want to update the model, \nFalse if you want to register or detect faces\n'''\n\nimport requests\nimport os\nimport json\nimport cv2\n\n\nweb_camera=True\nip_of_api=\"192.168.1.2\"\nport_of_api=\"5000\"\npath_image=\"/home/santi/Downloads/90442488_4188275714531241_492913197330726912_n.jpg\"\n\nif not web_camera:\n\n frame=cv2.imread(path_image)\n frame=cv2.imencode(\".jpg\", frame)[1]\n\n ### UPLOAD IMAGE \n files = {'file': ('image.jpg', frame, 'multipart/form-data')}\n\n response = requests.post(f'http://{ip_of_api}:{port_of_api}/detect_faces',\n files=files)\nelse:\n cap=cv2.VideoCapture(0)\n while True:\n ret, frame= cap.read()\n fram_draw=frame.copy()\n frame_to_upload=cv2.imencode(\".jpg\", frame)[1]\n ### UPLOAD IMAGE \n files = {'file': ('image.jpg', frame_to_upload, 'multipart/form-data')}\n\n response = requests.post(f'https://ai.tucanoar.com/faces/detect_faces/',\n files=files)\n #print(response.json())\n detections=response.json()\n for faces in detections['message']['faces_detected']:\n print(faces)\n\n face_image=frame[faces['upper_left'][1]:faces['down_right'][1], faces['upper_left'][0]:faces['down_right'][0]]\n \n \n face_image_to_upload=cv2.imencode(\".jpg\", face_image)[1]\n ### UPLOAD IMAGE \n files = {'file': ('image.jpg', face_image_to_upload, 'multipart/form-data')}\n\n response = requests.post(f'http://localhost:5000/detect_mask/',\n files=files)\n print(response.json())\n \n if response.json()[\"message\"][\"with_mask\"]:\n color_mask=(0,200,0)\n else:\n color_mask=(0,0,200)\n\n cv2.rectangle(\n fram_draw,\n (faces['upper_left'][0],faces['upper_left'][1]),\n (faces['down_right'][0],faces['down_right'][1]),\n color_mask\n )\n for landmark in faces[\"landmarks\"]:\n print(landmark)\n cv2.circle(fram_draw, (landmark[0],landmark[1]), 2, color_mask, -1) \n \n \n cv2.imshow(\"frame with faces\",fram_draw)\n cv2.waitKey(50)"
},
{
"alpha_fraction": 0.5871080160140991,
"alphanum_fraction": 0.607665479183197,
"avg_line_length": 23.52991485595703,
"blob_id": "130a4b381233c5952a80e5b28b190a66b7174061",
"content_id": "545145ab2cf4c58788bbb1d9cbfe1c405ac879cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2870,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 117,
"path": "/src/api.py",
"repo_name": "abcei2/mask_detector_api",
"src_encoding": "UTF-8",
"text": "import os\nimport cv2\nimport json\nimport time\nimport base64\n\nimport numpy as np\n\nfrom flask import Flask, request, jsonify\nfrom flask_socketio import SocketIO, send\nfrom werkzeug.utils import secure_filename\n\nfrom mask_detector import detect\n\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret_key_786acdaf'\nsocketio = SocketIO(app, cors_allowed_origins=\"*\")\n\nALLOWED_EXTENSIONS = ['png', 'jpg', 'jpeg', 'gif']\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef is_busy():\n\n with open('src/database.json', 'r') as json_file:\n data = json.load(json_file)\n if(data['flag_occupied'] == \"BUSY\"):\n return True\n else:\n with open('src/database.json', 'w') as json_file:\n data['flag_occupied'] = \"BUSY\"\n json.dump(data, json_file)\n return False\n\n\ndef not_busy():\n with open('src/database.json', 'r') as json_file:\n data = json.load(json_file)\n\n with open('src/database.json', 'w') as json_file:\n data['flag_occupied'] = \"NOT BUSY\"\n json.dump(data, json_file)\n\n print(\"NOT BUSY\")\n\n\nglobal bussy\nbussy = False\n\n\[email protected]('/detect/', methods=['POST'])\ndef upload_file():\n global bussy\n if bussy:\n resp = jsonify({'message': 'Service is being used'})\n resp.status_code = 503\n return resp\n bussy = True\n\n if 'file' not in request.files:\n resp = jsonify({'message': 'No file part in the request'})\n resp.status_code = 400\n bussy = False\n return resp\n\n file_1 = request.files['file']\n\n if file_1.filename == '':\n resp = jsonify({'message': 'No file selected for uploading'})\n resp.status_code = 400\n bussy = False\n return resp\n\n if file_1 and allowed_file(file_1.filename):\n filename = secure_filename(file_1.filename)\n\n file_1.save(os.path.join(\"./\", filename))\n img = cv2.imread(f\"./{filename}\")\n\n detections = detect(img)\n print(detections)\n resp = jsonify({'message': detections})\n resp.status_code = 200\n bussy = False\n return resp\n\n else:\n resp = jsonify({\n 'message': f\"Allowed file types are {','.join(ALLOWED_EXTENSIONS)}\"\n })\n resp.status_code = 400\n bussy = False\n return resp\n\n\ndef frame_from_b64image(b64image):\n nparr = np.frombuffer(base64.b64decode(b64image), np.uint8)\n return cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n\[email protected]('message')\ndef handle_message(b64image):\n start = time.time()\n frame = frame_from_b64image(b64image)\n detections = detect(frame)\n send(detections)\n print(f\"Elapsed ({len(b64image)}): {time.time() - start}\")\n\n\nif __name__ == \"__main__\":\n print(\"Ready for action...\")\n # app.run(host='0.0.0.0')\n socketio.run(app, host='0.0.0.0')\n"
},
{
"alpha_fraction": 0.6420798301696777,
"alphanum_fraction": 0.6602176427841187,
"avg_line_length": 20.763158798217773,
"blob_id": "9f21e6028d7496e1cf23787ec57c36789fc5a703",
"content_id": "743aa3179861a94b66750da9868e9b87cc1dd00a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 827,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 38,
"path": "/test_socketio.py",
"repo_name": "abcei2/mask_detector_api",
"src_encoding": "UTF-8",
"text": "import time\nimport base64\nimport socketio\n\nsio = socketio.Client()\nstart = None\n\n\[email protected]\ndef connect():\n print('connection established')\n\n\[email protected]\ndef message(msg):\n print(f\"Elapsed time: {time.time() - start} sec\")\n print('message received with ', msg)\n # sio.emit('my response', {'response': 'my response'})\n\n\[email protected]\ndef disconnect():\n print('disconnected from server')\n\n\n# sio.connect('ws://localhost:8000/', socketio_path='/masks/socket.io')\nsio.connect('wss://ai.tucanoar.com/', socketio_path='/masks/socket.io')\n\nwith open(\"./data/ym_poor.jpg\", \"rb\") as image_file:\n encoded_string = base64.b64encode(image_file.read())\n start = time.time()\n sio.send(encoded_string)\n # for i in range(20):\n # sio.send(encoded_string)\n # time.sleep(0.3)\n\ntime.sleep(6)\nsio.disconnect()\n"
},
{
"alpha_fraction": 0.4885057508945465,
"alphanum_fraction": 0.6954023241996765,
"avg_line_length": 16.399999618530273,
"blob_id": "7d3458720a06801c1f4f7471587e2fcf01183b6b",
"content_id": "33b141af14cc5014c2cd822480801d1e3f78339c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 174,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 10,
"path": "/requirements.txt",
"repo_name": "abcei2/mask_detector_api",
"src_encoding": "UTF-8",
"text": "Werkzeug==0.16.0\nFlask==1.1.1\nFlask-SocketIO==4.3.0\neventlet==0.25.2\nopencv_python==4.2.0.32\nrequests==2.23.0\nnumpy==1.18.4\ntensorflow==2.2.0\ninsightface==0.1.5\nmxnet==1.6.0\n"
},
{
"alpha_fraction": 0.5969498753547668,
"alphanum_fraction": 0.6143791079521179,
"avg_line_length": 21.950000762939453,
"blob_id": "060bbada9b277315f6aba47fca4cd22b1394829f",
"content_id": "91b208e903a20541b9693a2c5e169aefaf14ad4d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 459,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 20,
"path": "/Dockerfile",
"repo_name": "abcei2/mask_detector_api",
"src_encoding": "UTF-8",
"text": "###########################################\n# General Dockerfile for Tucano detectors\n# TucanoRobotics 2020\n###########################################\n\nFROM python:3.7\n# FROM fbcotter/docker-tensorflow-opencv:latest\n\nENV PYTHONDONTWRITEBYTECODE 1\nENV PYTHONUNBUFFERED 1\n\nRUN apt-get update && apt-get -y upgrade\n\nWORKDIR /opt/detector\n\nCOPY . .\nRUN pip install --upgrade pip\nRUN pip install --no-cache-dir -r requirements.txt\n\nCMD [\"python\", \"./src/api.py\"]\n"
},
{
"alpha_fraction": 0.7041420340538025,
"alphanum_fraction": 0.7633135914802551,
"avg_line_length": 84,
"blob_id": "eef5a9be517778234a51d098dab38aced882e197",
"content_id": "659e7ea921bf197c90a0c3afad521c94d2761da8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 169,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 2,
"path": "/src/README.md",
"repo_name": "abcei2/mask_detector_api",
"src_encoding": "UTF-8",
"text": "# Mask Detector\n### Thanks to Adrian Rosebrock and his [blog](https://www.pyimagesearch.com/2020/05/04/covid-19-face-mask-detector-with-opencv-keras-tensorflow-and-deep-learning/)"
}
] | 7 |
chingsley/runestone | https://github.com/chingsley/runestone | 2e451b01acecd83b67981e78bda06d63717b8aad | f06931426e30d91f7f6f50a8e9cb7238214b3e11 | 30fe5662c87f32e72cd4f31103422973fc908b58 | refs/heads/master | 2020-12-04T15:52:51.953012 | 2020-10-15T16:35:01 | 2020-10-15T16:35:01 | 231,824,307 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5549525022506714,
"alphanum_fraction": 0.5739484429359436,
"avg_line_length": 20.676469802856445,
"blob_id": "702330b3afeed725579cb6ac08883c22d99cca68",
"content_id": "e6a62bf0c7c8a768bc2ded70f68ac03ab01d6efe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 737,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 34,
"path": "/infinite_monkey_theorem.py",
"repo_name": "chingsley/runestone",
"src_encoding": "UTF-8",
"text": "import random\n\n\ndef generate_str(strLen):\n char_pool = 'abcdefghijklmnopqrstuvwxyz '\n str = ''\n for i in range(strLen):\n str = str + char_pool[random.randrange(27)]\n\n return str\n\n\ndef test(test_str, target_str):\n count_same_str = 0\n for i in range(len(target_str)):\n if test_str[i] == target_str[i]:\n count_same_str += 1\n\n return count_same_str * 100 / len(target_str)\n\n\ndef main():\n target_str = \"methinks it is like a weasel\"\n score = 0\n best_score = 0\n while score < 100:\n test_str = generate_str(28)\n score = test(test_str, target_str)\n if score > best_score:\n best_score = score\n print(f\"{test_str}\\t=>\\tscore: {score}%\")\n\n\nmain()\n"
}
] | 1 |
kelvin-sudani/ros | https://github.com/kelvin-sudani/ros | 6e9cda4c652ad15f18e03d5287be2c82bea68ca1 | a5dd723ae5c7b7fc3a20ca3f84ed78c6c7a36bd5 | 9f364d3df1bb587de60fc1184bcc5a0965271ee6 | refs/heads/master | 2021-01-15T19:59:29.624196 | 2011-12-09T12:55:35 | 2011-12-09T12:55:35 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 18.5,
"blob_id": "974d37808a961adc17d12f64a033d69003d6cc9b",
"content_id": "0d4a6c54f35ca1ee3a33bc845791debb1af44451",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 39,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 2,
"path": "/brown-ros-pkg/experimental/ar_recog/src/ar_recog/msg/__init__.py",
"repo_name": "kelvin-sudani/ros",
"src_encoding": "UTF-8",
"text": "from _Tag import *\nfrom _Tags import *\n"
},
{
"alpha_fraction": 0.5403822064399719,
"alphanum_fraction": 0.5512340068817139,
"avg_line_length": 33.826087951660156,
"blob_id": "d629891a59e0353641837835611796a250aac9bb",
"content_id": "a8b56f7af1ec795e37ca16418c01fc099e901f05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10413,
"license_type": "no_license",
"max_line_length": 195,
"num_lines": 299,
"path": "/nxt_driver/bin/nxt_driver.py",
"repo_name": "kelvin-sudani/ros",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport roslib; roslib.load_manifest('nxt_driver')\nimport rospy\n\nfrom ar_recog.msg import Tag, Tags\nfrom geometry_msgs.msg import TwistStamped\nfrom geometry_msgs.msg import Twist\n\nimport std_msgs.msg\nimport std_srvs.srv\nimport math, logging, datetime\nimport subprocess\n\nlogging.basicConfig()\nlog = logging.getLogger(\"NxtDriver\")\nhdlr = logging.FileHandler('nxt_driver%s.log' % datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\"))\nlog.setLevel(logging.DEBUG) #set verbosity to show all messages of severity >= DEBUG\nformatter = logging.Formatter('%(asctime)s %(message)s')\nhdlr.setFormatter(formatter)\nlog.addHandler(hdlr)\nlog.info(\"Starting NxtDriver\")\n\nclass PID:\n \n def __init__(self, Key='none', Kp=0.1, Ki=0.0, Kd=0.05, PIDMax=1, ErrInit=0, ErrMin=10, DerInit=0, DerMin=1, IntInit=0, IntMax=25, IntMin=-25, Goal=0):\n self.Key = Key\n self.Kp = Kp\n self.Ki = Ki\n self.Kd = Kd\n self.Der = DerInit\n self.DerInit = DerInit\n self.DerMin = DerMin \n self.Int = IntInit\n self.IntInit = IntInit\n self.IntMax = IntMax\n self.IntMin = IntMin\n self.ErrMin = ErrMin\n self.Goal = Goal\n self.Err = ErrInit\n self.ErrInit = ErrInit\n self.PIDMax = PIDMax\n self.IsInit = True\n self.Time = datetime.datetime.now()\n \n def ReInit(self, Key='none', Kp=0.1, Ki=0.0, Kd=0.05, PIDMax=1, ErrInit=0, ErrMin=10, DerInit=0, DerMin=1, IntInit=0, IntMax=25, IntMin=-25, Goal=0):\n log.info(\"%s ReInit\" % (self.Key))\n self.Key = Key\n self.Kp = Kp\n self.Ki = Ki\n self.Kd = Kd\n self.Der = DerInit\n self.DerInit = DerInit\n self.DerMin = DerMin \n self.Int = IntInit\n self.IntInit = IntInit\n self.IntMax = IntMax\n self.IntMin = IntMin\n self.ErrMin = ErrMin\n self.Goal = Goal\n self.Err = ErrInit\n self.ErrInit = ErrInit\n self.PIDMax = PIDMax\n self.IsInit = True\n self.Time = datetime.datetime.now()\n \n def Reset(self):\n log.info(\"%s Reset\" % (self.Key))\n self.Der = self.DerInit \n self.Int = self.IntInit\n self.Err = self.ErrInit\n self.IsInit = True\n self.Time = datetime.datetime.now()\n\n def IncrementKp(self, Kp):\n self.Kp += Kp\n log.info(\"%s, Kp, %f, Ki, %f, Kd, %f\" % (self.Key, self.Kp, self.Ki, self.Kd))\n self.Reset()\n \n def IncrementKi(self, Ki):\n self.Ki += Ki\n log.info(\"%s, Kp, %f, Ki, %f, Kd, %f\" % (self.Key, self.Kp, self.Ki, self.Kd))\n self.Reset()\n \n def IncrementKd(self, Kd):\n self.Kd += Kd\n log.info(\"%s, Kp, %f, Ki, %f, Kd, %f\" % (self.Key, self.Kp, self.Ki, self.Kd))\n self.Reset()\n \n def ComputePID(self,CurVal):\n global log\n self.Err = self.Goal - CurVal\n # check for \"Dead Band\" region where no controls are sent \n if math.fabs(self.Err) < self.ErrMin and self.Der < self.DerMin:\n # Within the \"Dead Band\" region, so stop moving so not to induce error\n self.P = 0\n self.I = 0\n self.D = 0\n else:\n ###################################################################\n # Proportional\n ###################################################################\n self.P = self.Kp * self.Err\n from std_msgs.msg import Empty\n\n ###################################################################\n # Derivative\n ###################################################################\n # If IsInit that means this is first frame, so don't do anything with derivative\n TimeDelta = datetime.datetime.now() - self.Time\n if not self.IsInit:\n self.D = self.Kd * float( self.Err - self.Der ) / TimeDelta.microseconds\n else:\n self.D = 0\n self.Time = datetime.datetime.now()\n self.Der = self.Err\n \n ###################################################################\n # Integral\n ###################################################################\n self.Int = self.Int + self.Err\n \n if self.Int > self.IntMax:\n self.Int = self.IntMax\n elif self.Int < self.IntMin:\n self.Int = self.IntMin\n \n self.I = self.Int * self.Ki\n ###################################################################\n # PID Total\n ###################################################################\n PID = self.P + self.I + self.D\n if PID >= 0:\n PID = min(PID, self.PIDMax)\n else:\n PID = max(PID, -self.PIDMax)\n self.IsInit = False\n log.info(\"%s, Err, %f, P, %f, I, %f, D, %f, PID %f\" % (self.Key, self.Err, self.P, self.I, self.D, PID))\n return PID\n\n###################################################################\n# Constants\n###################################################################\nIMAGE_WIDTH = 320\nIMAGE_HEIGHT = 240\nX_MAX_VELOCITY = 0.1\nZ_ANG_MAX_VELOCITY = 0.001\nX_VEL_SCALAR = 0.003\nZ_ANG_VEL_SCALAR = 0.1\nX_DERIVATIVE_SCALAR = 0 #7500\nZ_ANG_DERIVATIVE_SCALAR = 0 #1500\nX_INT_SCALAR = 0\nZ_ANG_INT_SCALAR = 0\nX_DEAD_BAND = 1\nZ_ANG_DEAD_BAND = 8\nTAG_DIAMETER = 58\n\nMAX_HISTORY=5\n\nDIRECTION_LETTERS = ['x','y','z']\n\n# Global Variables1\nMyTwist = TwistStamped()\nLastTwist = Twist()\nPrevDiameter = 0\nPrevVector = []\np_x=PID('vel_x', X_VEL_SCALAR, X_INT_SCALAR, X_DERIVATIVE_SCALAR, X_MAX_VELOCITY, 0, X_DEAD_BAND)\np_z_ang=PID('vel_y', Z_ANG_VEL_SCALAR, Z_ANG_INT_SCALAR, Z_ANG_DERIVATIVE_SCALAR, Z_ANG_MAX_VELOCITY, 0, Z_ANG_DEAD_BAND)\n\nTagFound = False\nprocessHandle = subprocess.Popen(\"rosrun ar_recog ar_recog image:=/ardrone/image_raw camera_info:=/ardrone/camera_info\", cwd=\"/home/base/ros/brown-ros-pkg/experimental/ar_recog/bin\", shell=True) \n \ndef CalcScaledVelocity( LineVector ):\n global p_x\n NewVel = Twist().linear\n Velocity = p_x.ComputePID(getattr(LineVector, DIRECTION_LETTERS[0]))\n setattr(NewVel, DIRECTION_LETTERS[0], Velocity) \n return NewVel\n \ndef CalcScaledAngle( AngleVector ):\n global p_z_ang\n NewTwist = Twist().angular\n Angle = p_z_ang.ComputePID(getattr(AngleVector, DIRECTION_LETTERS[2]))\n print Angle\n setattr(NewTwist, DIRECTION_LETTERS[2], Angle )\n return NewTwist\n \ndef ProcessImagePosition (data):\n global MyTwist\n global p_x\n global p_z_ang\n global LastTwist\n global processHandle\n \n pub = rospy.Publisher('cmd_vel', Twist)\n \n NewTwist = data\n \n # This input image twist give the delta position and pose for the tag relative \n # to the center of the of the image frame.\n # Therefore the direction of movement is known from the direct input values and\n # only the magnitude (speed) must be calculated. \n # The speed will be calculated based on the previous position and speed vectors\n # from the previous frames.\n # If the tag continues to be far from the center of the frame then the speed\n # will be increased.\n # If the tag is switching direction in the frame (oscillating) then reduce the\n # speed to hover over target\n MyTwist.twist.linear = CalcScaledVelocity( NewTwist.twist.linear )\n MyTwist.twist.angular = CalcScaledAngle( NewTwist.twist.angular )\n if ( math.isnan( MyTwist.twist.linear.x ) ):\n print 'NaN X',\n MyTwist.twist.linear.x = 0\n if ( math.isnan( MyTwist.twist.linear.y ) ):\n print 'NaN Y',\n MyTwist.twist.linear.y = 0\n if ( math.isnan( MyTwist.twist.linear.z ) ):\n print 'NaN Z',\n MyTwist.twist.linear.z = 0\n if ( math.isnan( MyTwist.twist.angular.z ) ):\n print 'NaN Ang Z'\n MyTwist.twist.angular.z = 0\n \n # Only publish the twist parameters to the drone\n LastTwist = MyTwist.twist\n pub.publish(MyTwist.twist)\n# if MyTwist.twist.linear.x != 0 or MyTwist.twist.linear.y != 0:\n# print MyTwist\n# rospy.loginfo(MyTwist)\n\n \ndef ProcessXlateImage( data ):\n global PrevDiameter\n global p_x\n global p_z_ang\n global TAG_DIAMETER\n global TagFound\n global LastTwist\n \n InputTags = data\n NewTwist = TwistStamped()\n \n if InputTags.tag_count > 0:\n TagFound = True\n # +Z_fwd_cam = +Z_base - points up\n # +Y_fwd_cam = +Y_base - points left\n # +X_fwd_cam = +X_base - points forward\n NewTwist.twist.linear.x = InputTags.tags[0].diameter - TAG_DIAMETER\n #NewTwist.twist.linear.y = InputTags.tags[0].x - ( FWD_IMAGE_WIDTH/2 ) #( IMAGE_WIDTH/2 ) - InputTags.tags[0].y\n #NewTwist.twist.linear.z = InputTags.tags[0].y - ( FWD_IMAGE_HEIGHT/2 ) #( IMAGE_HEIGHT/2 ) - InputTags.tags[0].x\n NewTwist.twist.angular.z = InputTags.tags[0].x - ( IMAGE_WIDTH/2 ) #( IMAGE_WIDTH/2 ) - InputTags.tags[0].y #\n #PrevDiameter = InputTags.tags[0].diameter\n \n #rospy.Publisher(\"image_pos\", NewTwist )\n ProcessImagePosition( NewTwist )\n\n # Keep some history for when the tag disappears\n while len(PrevVector) >= MAX_HISTORY:\n PrevVector.pop(0)\n PrevVector.append(NewTwist.twist)\n else:\n # Extrapolate history\n try:\n NewTwist.twist = PrevVector.pop(0)\n print \"Use History %d\" % len(PrevVector)\n NewTwist.twist = LastTwist\n # Save off some history\n except IndexError, e:\n \n # Ran out of history\n NewTwist.twist.linear.x = 0\n NewTwist.twist.linear.y = 0\n NewTwist.twist.linear.z = 0\n NewTwist.twist.angular.z = 0\n if TagFound:\n p_x.Reset()\n p_z_ang.Reset()\n TagFound = False \n pub = rospy.Publisher(\"cmd_vel\", Twist )\n pub.publish( NewTwist.twist )\n #ProcessImagePosition( NewTwist )\n \ndef NxtDriver():\n global processHandle\n \n rospy.init_node('nxt_driver')\n rospy.Subscriber(\"tags\", Tags, ProcessXlateImage ) \n rospy.spin()\n\nif __name__ == '__main__':\n try:\n NxtDriver()\n except Exception as e:\n print e\n print repr(e)\n \n finally:\n processHandle.kill()\n # TODO emergency stop on exit\n print \"Done\"\n"
},
{
"alpha_fraction": 0.6449826955795288,
"alphanum_fraction": 0.6532871723175049,
"avg_line_length": 54.5,
"blob_id": "aef8a8a7d09bb307e03f336125661e7d199bfe58",
"content_id": "e5aa11f1479891dd4d7b45d22e2728e5d69a2d74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1445,
"license_type": "no_license",
"max_line_length": 185,
"num_lines": 26,
"path": "/ardrone_follow.sh",
"repo_name": "kelvin-sudani/ros",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# stupid simple script to open each ros terminal in tabs for ardrone project\n# to use:\n# chmod +x <this script>\n# ./<this script>\n\n#use WEBCAM\n#gnome-terminal \\\n#--tab -t \"roscore\" -e \"bash -c 'roscore'\" \\\n#--tab -t \"gscam\" -e \"bash -c 'sleep 1; rosrun gscam gscam'\" \\\n#--tab -t \"ar_recog\" -e \"bash -c 'sleep 5; cd ~/ros/brown-ros-pkg/experimental/ar_recog/bin; pwd; rosrun ar_recog ar_recog image:=/gscam/image_raw camera_info:=/gscam/camera_info'\" \\\n#--tab -t \"image_view\" -e \"bash -c 'sleep 1; rosrun image_view image_view image:=/ar/image'\" \\\n#--tab -t \"cmd_vel\" -e \"bash -c 'sleep 1; rostopic echo cmd_vel'\" \\\n#--tab -t \"tags\" -e \"bash -c 'sleep 1; rostopic echo tags'\" \\\n#--tab -t \"drone_driver\" -e \"bash -c 'sleep 5; rosrun drone_driver drone_driver.py; read'\"\n \n#use ardrone\ngnome-terminal \\\n--tab -t \"roscore\" -e \"bash -c 'roscore'\" \\\n--tab -t \"ardrone_driver\" -e \"bash -c 'sleep 1; rosrun ardrone_brown ardrone_driver'\" \\\n--tab -t \"ar_recog\" -e \"bash -c 'sleep 5; cd ~/ros/brown-ros-pkg/experimental/ar_recog/bin; pwd; rosrun ar_recog ar_recog image:=/ardrone/image_raw camera_info:=/ardrone/camera_info'\" \\\n--tab -t \"image_view\" -e \"bash -c 'sleep 1; rosrun image_view image_view image:=/ar/image'\" \\\n--tab -t \"cmd_vel\" -e \"bash -c 'sleep 1; rostopic echo cmd_vel'\" \\\n--tab -t \"tags\" -e \"bash -c 'sleep 1; rostopic echo tags'\" \\\n--tab -t \"drone_driver\" -e \"bash -c 'sleep 5; rosrun drone_driver drone_driver.py; read'\"\n\n\n"
},
{
"alpha_fraction": 0.739130437374115,
"alphanum_fraction": 0.739130437374115,
"avg_line_length": 22,
"blob_id": "ccbae359edbf3c6ff97c91ca02e9e9bcb481a8fd",
"content_id": "6ab91bb0cd8e55c39690be916392b04e2a49f4bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 1,
"path": "/brown-ros-pkg/experimental/ardrone_brown/src/ardrone_brown/msg/__init__.py",
"repo_name": "kelvin-sudani/ros",
"src_encoding": "UTF-8",
"text": "from _Navdata import *\n"
},
{
"alpha_fraction": 0.570235013961792,
"alphanum_fraction": 0.5821931958198547,
"avg_line_length": 37.76518249511719,
"blob_id": "3663d89e52fa5458bdd432358740986e1ffb7974",
"content_id": "46b22fec531cf277bf61276a17311fdb2ef7881b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19150,
"license_type": "no_license",
"max_line_length": 207,
"num_lines": 494,
"path": "/drone_driver/bin/drone_driver.py",
"repo_name": "kelvin-sudani/ros",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport roslib; roslib.load_manifest('drone_driver')\nimport rospy\n\nfrom ar_recog.msg import Tag, Tags\nfrom geometry_msgs.msg import TwistStamped\nfrom geometry_msgs.msg import Twist\nfrom ardrone_brown.msg import Navdata\nfrom std_msgs.msg import Empty\n\nimport std_msgs.msg\nimport std_srvs.srv\nimport math, logging, datetime\nimport subprocess\n\nlogging.basicConfig()\nlog = logging.getLogger(\"DroneDriver\")\nhdlr = logging.FileHandler('driver%s.log' % datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\"))\nlog.setLevel(logging.DEBUG) #set verbosity to show all messages of severity >= DEBUG\nformatter = logging.Formatter('%(asctime)s %(message)s')\nhdlr.setFormatter(formatter)\nlog.addHandler(hdlr)\nlog.info(\"Starting DroneDriver\")\n\nclass PID:\n \n def __init__(self, Key='none', Kp=0.1, Ki=0.0, Kd=0.05, PIDMax=1, ErrInit=0, ErrMin=10, DerInit=0, DerMin=1, IntInit=0, IntMax=25, IntMin=-25, Goal=0):\n self.Key = Key\n self.Kp = Kp\n self.Ki = Ki\n self.Kd = Kd\n self.Der = DerInit\n self.DerInit = DerInit\n self.DerMin = DerMin \n self.Int = IntInit\n self.IntInit = IntInit\n self.IntMax = IntMax\n self.IntMin = IntMin\n self.ErrMin = ErrMin\n self.Goal = Goal\n self.Err = ErrInit\n self.ErrInit = ErrInit\n self.PIDMax = PIDMax\n self.IsInit = True\n self.Time = datetime.datetime.now()\n \n def ReInit(self, Key='none', Kp=0.1, Ki=0.0, Kd=0.05, PIDMax=1, ErrInit=0, ErrMin=10, DerInit=0, DerMin=1, IntInit=0, IntMax=25, IntMin=-25, Goal=0):\n log.info(\"%s ReInit\" % (self.Key))\n self.Key = Key\n self.Kp = Kp\n self.Ki = Ki\n self.Kd = Kd\n self.Der = DerInit\n self.DerInit = DerInit\n self.DerMin = DerMin \n self.Int = IntInit\n self.IntInit = IntInit\n self.IntMax = IntMax\n self.IntMin = IntMin\n self.ErrMin = ErrMin\n self.Goal = Goal\n self.Err = ErrInit\n self.ErrInit = ErrInit\n self.PIDMax = PIDMax\n self.IsInit = True\n self.Time = datetime.datetime.now()\n \n def Reset(self):\n log.info(\"%s Reset\" % (self.Key))\n self.Der = self.DerInit \n self.Int = self.IntInit\n self.Err = self.ErrInit\n self.IsInit = True\n self.Time = datetime.datetime.now()\n\n def IncrementKp(self, Kp):\n self.Kp += Kp\n log.info(\"%s, Kp, %f, Ki, %f, Kd, %f\" % (self.Key, self.Kp, self.Ki, self.Kd))\n self.Reset()\n \n def IncrementKi(self, Ki):\n self.Ki += Ki\n log.info(\"%s, Kp, %f, Ki, %f, Kd, %f\" % (self.Key, self.Kp, self.Ki, self.Kd))\n self.Reset()\n \n def IncrementKd(self, Kd):\n self.Kd += Kd\n log.info(\"%s, Kp, %f, Ki, %f, Kd, %f\" % (self.Key, self.Kp, self.Ki, self.Kd))\n self.Reset()\n \n def ComputePID(self,CurVal):\n global log\n self.Err = self.Goal - CurVal\n # check for \"Dead Band\" region where no controls are sent \n if math.fabs(self.Err) < self.ErrMin and self.Der < self.DerMin:\n # Within the \"Dead Band\" region, so stop moving so not to induce error\n self.P = 0\n self.I = 0\n self.D = 0\n else:\n ###################################################################\n # Proportional\n ###################################################################\n self.P = self.Kp * self.Err\n from std_msgs.msg import Empty\n\n ###################################################################\n # Derivative\n ###################################################################\n # If IsInit that means this is first frame, so don't do anything with derivative\n TimeDelta = datetime.datetime.now() - self.Time\n if not self.IsInit:\n self.D = self.Kd * float( self.Err - self.Der ) / TimeDelta.microseconds\n else:\n self.D = 0\n self.Time = datetime.datetime.now()\n self.Der = self.Err\n \n ###################################################################\n # Integral\n ###################################################################\n self.Int = self.Int + self.Err\n \n if self.Int > self.IntMax:\n self.Int = self.IntMax\n elif self.Int < self.IntMin:\n self.Int = self.IntMin\n \n self.I = self.Int * self.Ki\n ###################################################################\n # PID Total\n ###################################################################\n PID = self.P + self.I + self.D\n if PID >= 0:\n PID = min(PID, self.PIDMax)\n else:\n PID = max(PID, -self.PIDMax)\n self.IsInit = False\n log.info(\"%s, Err, %f, P, %f, I, %f, D, %f, PID %f\" % (self.Key, self.Err, self.P, self.I, self.D, PID))\n return PID\n\n###################################################################\n# Forward Cam Constants\n###################################################################\nFWD_IMAGE_WIDTH = 320\nFWD_IMAGE_HEIGHT = 240\nFWD_X_MAX_VELOCITY = 0.1 \nFWD_Y_MAX_VELOCITY = 0.1\nFWD_Z_MAX_VELOCITY = 0.5\nFWD_Z_ANG_MAX_VELOCITY = 0.0\nFWD_X_VEL_SCALAR = 0.02\nFWD_Y_VEL_SCALAR = 0.001\nFWD_Z_VEL_SCALAR = 0.002\nFWD_Z_ANG_VEL_SCALAR = 0.0\nFWD_X_DERIVATIVE_SCALAR = 7500\nFWD_Y_DERIVATIVE_SCALAR = 1500\nFWD_Z_DERIVATIVE_SCALAR = FWD_Y_DERIVATIVE_SCALAR*(float(FWD_IMAGE_WIDTH)/FWD_IMAGE_HEIGHT)\nFWD_Z_ANG_DERIVATIVE_SCALAR = 0\nFWD_X_INT_SCALAR = 0\nFWD_Y_INT_SCALAR = 0\nFWD_Z_INT_SCALAR = 0\nFWD_Z_ANG_INT_SCALAR = 0\nFWD_X_DEAD_BAND=1 ### FWD CAM ONLY....DWN CAM 8\nFWD_Y_DEAD_BAND=8\nFWD_Z_DEAD_BAND=FWD_Y_DEAD_BAND*float(float(FWD_IMAGE_WIDTH)/FWD_IMAGE_HEIGHT)\nFWD_Z_ANG_DEAD_BAND=0\nFWD_TAG_DIAMETER=20\n###################################################################\n# Downward Cam Constants \n###################################################################\nDWN_IMAGE_WIDTH = 176\nDWN_IMAGE_HEIGHT = 144\nDWN_X_MAX_VELOCITY = 0.1 \nDWN_Y_MAX_VELOCITY = 0.1\nDWN_Z_MAX_VELOCITY = 0.2\nDWN_Z_ANG_MAX_VELOCITY = 0.3\nDWN_X_VEL_SCALAR = 0.0020\nDWN_Y_VEL_SCALAR = 0.0015\nDWN_Z_VEL_SCALAR = 0.01\nDWN_Z_ANG_VEL_SCALAR = 0.3\nDWN_X_DERIVATIVE_SCALAR = 550\nDWN_Y_DERIVATIVE_SCALAR = 450\nDWN_Z_DERIVATIVE_SCALAR = 7500\nDWN_Z_ANG_DERIVATIVE_SCALAR = 100\nDWN_X_INT_SCALAR = 0\nDWN_Y_INT_SCALAR = 0\nDWN_Z_INT_SCALAR = 0\nDWN_Z_ANG_INT_SCALAR = 0\nDWN_X_DEAD_BAND=8 \nDWN_Y_DEAD_BAND=8\nDWN_Z_DEAD_BAND=0.1\nDWN_Z_ANG_DEAD_BAND=0\nDWN_TAG_DIAMETER=13\n###################################################################\n# No Tag Constants \n###########################################from std_msgs.msg import Empty\n########################\nALT_Z_MAX_VELOCITY = 0.5\nALT_Z_VEL_SCALAR = 0.0005\nALT_Z_DERIVATIVE_SCALAR = 100\nALT_Z_INT_SCALAR = 0\nALT_Z_DEAD_BAND=1\nALT_Z_GOAL=1150\n\nANG_VEL_TO_RADIANS_SCALAR = -0.01\nMAX_HISTORY=5\nDIRECTION_LETTERS = ['x','y','z']\n\n# Global Variables1\nMyTwist = TwistStamped()\nNavTwist = Twist()\nLastTwist = Twist()\nPrevDiameter = 0\nPrevVector = []\np_x=PID('vel_x', FWD_X_VEL_SCALAR, FWD_X_INT_SCALAR, FWD_X_DERIVATIVE_SCALAR, FWD_X_MAX_VELOCITY, 0, FWD_X_DEAD_BAND)\np_y=PID('vel_y', FWD_Y_VEL_SCALAR, FWD_Y_INT_SCALAR, FWD_Y_DERIVATIVE_SCALAR, FWD_Y_MAX_VELOCITY, 0, FWD_Y_DEAD_BAND)\np_z=PID('vel_z', FWD_Z_VEL_SCALAR, FWD_Z_INT_SCALAR, FWD_Z_DERIVATIVE_SCALAR, FWD_Z_MAX_VELOCITY, 0, FWD_Z_DEAD_BAND)\np_z_ang=PID('ang_z', FWD_Z_ANG_VEL_SCALAR, FWD_Z_ANG_INT_SCALAR, FWD_Z_ANG_DERIVATIVE_SCALAR, FWD_Z_ANG_MAX_VELOCITY, 0, FWD_Z_ANG_DEAD_BAND)\np_z_alt=PID('alt_z', ALT_Z_VEL_SCALAR, ALT_Z_INT_SCALAR, ALT_Z_DERIVATIVE_SCALAR, ALT_Z_MAX_VELOCITY, 0, ALT_Z_DEAD_BAND)\n\nFwdCam = True\nFlying = False\nPrevCam = False\nCurCam = False\nprev_key = 'foobar'\nTagFound = False\nPoseMatch = False\nAutoAltitude = False\nprocessHandle = subprocess.Popen(\"rosrun ar_recog ar_recog image:=/ardrone/image_raw camera_info:=/ardrone/camera_info\", cwd=\"/home/base/ros/brown-ros-pkg/experimental/ar_recog/bin\", shell=True) \n \ndef CalcScaledVelocity( LineVector ):\n global p_x\n global p_y\n global p_z\n NewVel = Twist().linear\n Velocity = p_x.ComputePID(getattr(LineVector, DIRECTION_LETTERS[0]))\n setattr(NewVel, DIRECTION_LETTERS[0], Velocity) \n Velocity = p_y.ComputePID(getattr(LineVector, DIRECTION_LETTERS[1]))\n setattr(NewVel, DIRECTION_LETTERS[1], Velocity )\n Velocity = p_z.ComputePID(getattr(LineVector, DIRECTION_LETTERS[2]))\n setattr(NewVel, DIRECTION_LETTERS[2], Velocity )\n return NewVel\n \ndef CalcScaledAngle( AngleVector ):\n global p_z_ang\n NewTwist = Twist().angular\n Angle = p_z_ang.ComputePID(getattr(AngleVector, DIRECTION_LETTERS[2]))\n print Angle\n setattr(NewTwist, DIRECTION_LETTERS[2], Angle )\n print AngleVector\n print NewTwist\n print\n return NewTwist\n\ndef ProcessChangePID( data ):\n global p_x\n global p_y\n global p_z\n global p_z_ang\n global FwdCam\n \n if data.twist.linear.x != 0:\n if data.header.frame_id == \"p\":\n p_x.IncrementKp( data.twist.linear.x )\n elif data.header.frame_id == \"i\":\n p_x.IncrementKi( data.twist.linear.x )\n elif data.header.frame_id == \"d\":\n p_x.IncrementKd( data.twist.linear.x )\n if data.twist.linear.y != 0:\n if data.header.frame_id == \"p\":\n p_y.IncrementKp( data.twist.linear.y )\n elif data.header.frame_id == \"i\":\n p_y.IncrementKi( data.twist.linear.y )\n elif data.header.frame_id == \"d\":\n p_y.IncrementKd( data.twist.linear.y )\n \ndef ProcessImagePosition (data):\n global MyTwist\n global FwdCam\n global prev_key\n global p_x\n global p_y\n global p_z\n global p_z_ang\n global ANG_VEL_TO_RADIANS_SCALAR\n global PoseMatch\n global LastTwist\n global processHandle\n \n pub = rospy.Publisher('cmd_vel', Twist)\n land_pub = rospy.Publisher('/ardrone/land', std_msgs.msg.Empty)\n reset_pub = rospy.Publisher('/ardrone/reset', std_msgs.msg.Empty)\n takeoff_pub = rospy.Publisher('/ardrone/takeoff', std_msgs.msg.Empty)\n toggleCam = rospy.ServiceProxy('/ardrone/togglecam', std_srvs.srv.Empty)\n \n NewTwist = data\n key = NewTwist.header.frame_id\n # takeoff and landing\n if key == 'land':\n rospy.loginfo(rospy.get_name()+\"I heard %s\",NewTwist.header.frame_id)\n land_pub.publish(Empty())\n if key == 'abort':\n rospy.loginfo(rospy.get_name()+\"I heard %s\",NewTwist.header.frame_id)\n reset_pub.publish(Empty())\n if key == 'takeoff':\n rospy.loginfo(rospy.get_name()+\"I heard %s\",NewTwist.header.frame_id)\n takeoff_pub.publish(Empty())\n if key == 'hover':\n # TODO Implement hover\n rospy.loginfo(rospy.get_name()+\"I heard %s\",NewTwist.header.frame_id)\n takeoff_pub.publish(Empty())\n if key == 'switch' and prev_key != key:\n try:\n toggleCam()\n FwdCam = not FwdCam\n processHandle.kill()\n processHandle = subprocess.Popen(\"rosrun ar_recog ar_recog image:=/ardrone/image_raw camera_info:=/ardrone/camera_info\", cwd=\"/home/base/ros/brown-ros-pkg/experimental/ar_recog/bin\", shell=True) \n if FwdCam:\n p_x.ReInit('vel_x', FWD_X_VEL_SCALAR, FWD_X_INT_SCALAR, FWD_X_DERIVATIVE_SCALAR, FWD_X_MAX_VELOCITY, 0, FWD_X_DEAD_BAND)\n p_y.ReInit('vel_y', FWD_Y_VEL_SCALAR, FWD_Y_INT_SCALAR, FWD_Y_DERIVATIVE_SCALAR, FWD_Y_MAX_VELOCITY, 0, FWD_Y_DEAD_BAND)\n p_z.ReInit('vel_z', FWD_Z_VEL_SCALAR, FWD_Z_INT_SCALAR, FWD_Z_DERIVATIVE_SCALAR, FWD_Z_MAX_VELOCITY, 0, FWD_Z_DEAD_BAND)\n p_z_ang.ReInit('ang_z', FWD_Z_ANG_VEL_SCALAR, FWD_Z_ANG_INT_SCALAR, FWD_Z_ANG_DERIVATIVE_SCALAR, FWD_Z_ANG_MAX_VELOCITY, 0, FWD_Z_ANG_DEAD_BAND)\n else:\n p_x.ReInit('vel_x', DWN_X_VEL_SCALAR, DWN_X_INT_SCALAR, DWN_X_DERIVATIVE_SCALAR, DWN_X_MAX_VELOCITY, 0, DWN_X_DEAD_BAND)\n p_y.ReInit('vel_y', DWN_Y_VEL_SCALAR, DWN_Y_INT_SCALAR, DWN_Y_DERIVATIVE_SCALAR, DWN_Y_MAX_VELOCITY, 0, DWN_Y_DEAD_BAND)\n p_z.ReInit('vel_z', DWN_Z_VEL_SCALAR, DWN_Z_INT_SCALAR, DWN_Z_DERIVATIVE_SCALAR, DWN_Z_MAX_VELOCITY, 0, DWN_Z_DEAD_BAND)\n p_z_ang.ReInit('ang_z', DWN_Z_ANG_VEL_SCALAR, DWN_Z_ANG_INT_SCALAR, DWN_Z_ANG_DERIVATIVE_SCALAR, DWN_Z_ANG_MAX_VELOCITY, 0, DWN_Z_ANG_DEAD_BAND)\n print \"ToggleCam %d\" % FwdCam \n log.warn(\"ToggleCam %d\" % FwdCam)\n except rospy.ServiceException, e:\n log.critical(\"Service call failed: %s\"%e)\n print \"Service call failed: %s\"%e\n\n prev_key = key\n \n # This input image twist give the delta position and pose for the tag relative \n # to the center of the of the image frame.\n # Therefore the direction of movement is known from the direct input values and\n # only the magnitude (speed) must be calculated. \n # The speed will be calculated based on the previous position and speed vectors\n # from the previous frames.\n # If the tag continues to be far from the center of the frame then the speed\n # will be increased.\n # If the tag is switching direction in the frame (oscillating) then reduce the\n # speed to hover over target\n MyTwist.twist.linear = CalcScaledVelocity( NewTwist.twist.linear )\n if ( math.isnan( MyTwist.twist.linear.x ) ):\n print 'NaN X',\n MyTwist.twist.linear.x = 0\n if ( math.isnan( MyTwist.twist.linear.y ) ):\n print 'NaN Y',\n MyTwist.twist.linear.y = 0\n if ( math.isnan( MyTwist.twist.linear.z ) ):\n print 'NaN Z',\n MyTwist.twist.linear.z = 0\n\n if PoseMatch and not FwdCam:\n MyTwist.twist.angular = CalcScaledAngle( NewTwist.twist.angular )\n if ( math.isnan( MyTwist.twist.angular.z ) ):\n print 'NaN Z Ang',\n MyTwist.twist.angular.z = 0\n PrevX = MyTwist.twist.linear.x\n PrevY = MyTwist.twist.linear.y\n MyTwist.twist.linear.x = PrevX*math.cos(MyTwist.twist.angular.z*ANG_VEL_TO_RADIANS_SCALAR) - PrevY*math.sin(MyTwist.twist.angular.z*ANG_VEL_TO_RADIANS_SCALAR)\n MyTwist.twist.linear.y = PrevX*math.sin(MyTwist.twist.angular.z*ANG_VEL_TO_RADIANS_SCALAR) + PrevY*math.cos(MyTwist.twist.angular.z*ANG_VEL_TO_RADIANS_SCALAR) \n else:\n MyTwist.twist.angular.z = 0\n \n # Only publish the twist parameters to the drone\n LastTwist = MyTwist.twist\n pub.publish(MyTwist.twist)\n# if MyTwist.twist.linear.x != 0 or MyTwist.twist.linear.y != 0:\n# print MyTwist\n# rospy.loginfo(MyTwist)\n\n \ndef ProcessXlateImage( data ):\n global PrevDiameter\n global p_x\n global p_y\n global p_z\n global p_z_ang\n global FwdCam\n global prev_key\n global FWD_TAG_DIAMETER\n global DWN_TAG_DIAMETER\n global TagFound\n global NavTwist\n global LastTwist\n \n InputTags = data\n NewTwist = TwistStamped()\n \n if InputTags.image_width != 320 and FwdCam and prev_key != 'switch':\n print \"Switch to Down Cam\"\n p_x.ReInit('vel_x', DWN_X_VEL_SCALAR, DWN_X_INT_SCALAR, DWN_X_DERIVATIVE_SCALAR, DWN_X_MAX_VELOCITY, 0, DWN_X_DEAD_BAND)\n p_y.ReInit('vel_y', DWN_Y_VEL_SCALAR, DWN_Y_INT_SCALAR, DWN_Y_DERIVATIVE_SCALAR, DWN_Y_MAX_VELOCITY, 0, DWN_Y_DEAD_BAND)\n p_z.ReInit('vel_z', DWN_Z_VEL_SCALAR, DWN_Z_INT_SCALAR, DWN_Z_DERIVATIVE_SCALAR, DWN_Z_MAX_VELOCITY, 0, DWN_Z_DEAD_BAND)\n p_z_ang.ReInit('ang_z', DWN_Z_ANG_VEL_SCALAR, DWN_Z_ANG_INT_SCALAR, DWN_Z_ANG_DERIVATIVE_SCALAR, DWN_Z_ANG_MAX_VELOCITY, 0, DWN_Z_ANG_DEAD_BAND)\n FwdCam = False\n\n if InputTags.tag_count > 0:\n TagFound = True\n if InputTags.tags[0].id == 0:\n NewTwist.header.frame_id = 'switch'\n else:\n NewTwist.header.frame_id = 'move'\n \n if FwdCam:\n # Forward Camera\n # +Z_fwd_cam = +Z_base - points up\n # +Y_fwd_cam = +Y_base - points left\n # +X_fwd_cam = +X_base - points forward\n NewTwist.twist.linear.x = InputTags.tags[0].diameter - FWD_TAG_DIAMETER\n NewTwist.twist.linear.y = InputTags.tags[0].x - ( FWD_IMAGE_WIDTH/2 ) #( IMAGE_WIDTH/2 ) - InputTags.tags[0].y\n NewTwist.twist.linear.z = InputTags.tags[0].y - ( FWD_IMAGE_HEIGHT/2 ) #( IMAGE_HEIGHT/2 ) - InputTags.tags[0].x\n NewTwist.twist.angular.z = 0 # No rotation on fwd cam\n #PrevDiameter = InputTags.tags[0].diameter\n else:\n # Downward Camera\n # NOTE: the downward camera \n # +Z_down_cam = -Z_base - points down, \n # +Y_down_cam = +X_base - points forward\n # +X_down_cam = +Y_base - points left\n NewTwist.twist.linear.x = InputTags.tags[0].y - ( DWN_IMAGE_HEIGHT/2 )\n NewTwist.twist.linear.y = InputTags.tags[0].x - ( DWN_IMAGE_WIDTH/2 )\n NewTwist.twist.linear.z = DWN_TAG_DIAMETER - InputTags.tags[0].diameter\n print InputTags.tags[0].zRot\n NewTwist.twist.angular.z = 2*math.pi - InputTags.tags[0].zRot\n \n #rospy.Publisher(\"image_pos\", NewTwist )\n ProcessImagePosition( NewTwist )\n\n # Keep some history for when the tag disappears\n while len(PrevVector) >= MAX_HISTORY:\n PrevVector.pop(0)\n PrevVector.append(NewTwist.twist)\n else:\n NewTwist.header.frame_id = 'lost'\n # Extrapolate history\n try:\n NewTwist.twist = PrevVector.pop(0)\n print \"Use History %d\" % len(PrevVector)\n NewTwist.twist = LastTwist\n # Save off some history\n except IndexError, e:\n \n # Ran out of history\n NewTwist.twist.linear.x = 0\n NewTwist.twist.linear.y = 0\n NewTwist.twist.linear.z = NavTwist.linear.z\n NewTwist.twist.angular.z = 0\n if TagFound:\n p_x.Reset()\n p_y.Reset()\n p_z.Reset()\n p_z_ang.Reset()\n prev_key = 'foobar'\n TagFound = False \n pub = rospy.Publisher(\"cmd_vel\", Twist )\n pub.publish( NewTwist.twist )\n #ProcessImagePosition( NewTwist )\n \ndef ProcessNavData(data):\n global p_z_alt\n global NavTwist\n global AutoAltitude\n if AutoAltitude:\n NavTwist.linear.z = p_z_alt.ComputePID(data.altd-ALT_Z_GOAL)\n else:\n NavTwist.linear.z = 0\n \ndef DroneDriver():\n global processHandle\n \n rospy.init_node('drone_driver')\n rospy.Subscriber(\"tags\", Tags, ProcessXlateImage ) \n rospy.Subscriber(\"image_pos\", TwistStamped, ProcessChangePID )\n rospy.Subscriber(\"/ardrone/navdata\", Navdata, ProcessNavData )\n rospy.spin()\n\nif __name__ == '__main__':\n try:\n DroneDriver()\n except Exception as e:\n print e\n print repr(e)\n \n finally:\n processHandle.kill()\n land_pub = rospy.Publisher('/ardrone/land', std_msgs.msg.Empty)\n # emergency land on exit\n land_pub.publish(Empty())\n print \"Done\"\n"
},
{
"alpha_fraction": 0.4721941351890564,
"alphanum_fraction": 0.4922143518924713,
"avg_line_length": 32.181209564208984,
"blob_id": "499d3dd87d862887530eeecf12f6f4e12df8658b",
"content_id": "cf208202c4b4fa1609fcfd3efc9567bf00849d86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4945,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 149,
"path": "/fake_image/bin/fake_image.py",
"repo_name": "kelvin-sudani/ros",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport roslib; roslib.load_manifest('fake_image')\nimport rospy\n\nfrom geometry_msgs.msg import TwistStamped\n\nimport sys, select, termios, tty\nimport random\n\nSavedTwist = TwistStamped()\nSavedTwist.twist.linear.x = 160\nSavedTwist.twist.linear.y = 120\nPositiveValue = True\n\nmsg = \"\"\"\nCreating Fake Image position data and publishing to image_pos\n\nCTRL+c to quit\n\"\"\"\n\nmove_bindings = {\n 68:('linear', 'y', 1), #left\n 67:('linear', 'y', -1), #right\n 65:('linear', 'x', 1), #forward\n 66:('linear', 'x', -1), #back\n# 'w':('linear', 'z', 0.3),\n# 's':('linear', 'z', -0.3),\n# 'a':('angular', 'z', 1),\n# 'd':('angular', 'z', -1),\n 'w':('linear', 'x', 10), #forward\n '9':('linear', 'x', 9), #forward\n '8':('linear', 'x', 8), #forward\n '7':('linear', 'x', 7), #forward\n '6':('linear', 'x', 6), #forward\n '5':('linear', 'x', 5), #forward\n '4':('linear', 'x', 4), #forward\n '3':('linear', 'x', 3), #forward\n '2':('linear', 'x', 2), #forward\n '1':('linear', 'x', 1), #forward\n 's':('linear', 'x', -10),\n 'a':('linear', 'y', 10), #left\n 'd':('linear', 'y', -10),\n 'm':('linear', 'x', 0),\n 'n':('linear', 'y', 0),\n }\n\ndef getKey():\n tty.setraw(sys.stdin.fileno())\n select.select([sys.stdin], [], [], 0)\n key = sys.stdin.read(1)\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)\n return key\n\nif __name__==\"__main__\":\n global PositiveValue\n settings = termios.tcgetattr(sys.stdin)\n\n print msg\n \n pub = rospy.Publisher('image_pos', TwistStamped)\n# pub = rospy.Publisher('cmd_vel', Twist)\n rospy.init_node('fake_image')\n prevTwist = TwistStamped()\n\n try:\n while(True):\n key = getKey()\n\n twist = TwistStamped()\n if ord(key) in move_bindings.keys():\n key = ord(key)\n if key in move_bindings.keys():\n (lin_ang, xyz, speed) = move_bindings[key]\n setattr(getattr(twist.twist, lin_ang), xyz, speed)\n else:\n if (key == '-'):\n PositiveValue = not PositiveValue\n print \"Positive Sign = \",\n print PositiveValue\n if (key == '\\x03'):\n break\n twist.header.frame_id = \"%c\" % key\n twist.header.seq += 1\n if twist.twist.linear.x != 0:\n if PositiveValue:\n SavedTwist.twist.linear.x += twist.twist.linear.x\n else:\n SavedTwist.twist.linear.x -= twist.twist.linear.x\n elif twist.twist.linear.y != 0:\n if PositiveValue:\n SavedTwist.twist.linear.y += twist.twist.linear.y\n else:\n SavedTwist.twist.linear.y -= twist.twist.linear.y\n print SavedTwist\n pub.publish(SavedTwist)\n# pub.publish(twist.twist)\n\n \n\n\n\n# if (key == '\\x03'):\n# break\n#\n# for i in range(5):\n# prevTwist.twist.linear.x = 0.1\n# prevTwist.twist.linear.y = 0\n# prevTwist.twist.linear.z = 0\n# prevTwist.twist.angular.x = 0\n# prevTwist.twist.angular.y = 0\n# prevTwist.twist.angular.z = 0\n# pub.publish(prevTwist)\n# for i in range(5):\n# prevTwist.twist.linear.x = 0\n# prevTwist.twist.linear.y = 0.1\n# prevTwist.twist.linear.z = 0\n# prevTwist.twist.angular.x = 0\n# prevTwist.twist.angular.y = 0\n# prevTwist.twist.angular.z = 0\n# pub.publish(prevTwist)\n# for i in range(5):\n# prevTwist.twist.linear.x = -0.1\n# prevTwist.twist.linear.y = 0\n# prevTwist.twist.linear.z = 0\n# prevTwist.twist.angular.x = 0\n# prevTwist.twist.angular.y = 0\n# prevTwist.twist.angular.z = 0\n# pub.publish(prevTwist)\n# for i in range(5):\n# prevTwist.twist.linear.x = 0\n# prevTwist.twist.linear.y = -0.1\n# prevTwist.twist.linear.z = 0\n# prevTwist.twist.angular.x = 0\n# prevTwist.twist.angular.y = 0\n# prevTwist.twist.angular.z = 0\n# pub.publish(prevTwist)\n\n except Exception as e:\n print e\n print repr(e)\n\n finally:\n twist = TwistStamped()\n twist.header.frame_id = 'land'\n twist.twist.linear.x = 0; twist.twist.linear.y = 0; twist.twist.linear.z = 0\n twist.twist.angular.x = 0; twist.twist.angular.y = 0; twist.twist.angular.z = 0\n pub.publish(twist)\n\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)\n\n"
},
{
"alpha_fraction": 0.7649006843566895,
"alphanum_fraction": 0.7649006843566895,
"avg_line_length": 29.200000762939453,
"blob_id": "a8888c6eebd4725f4b012313d389c5c9ff955df4",
"content_id": "0ee758186d8377ceb633e674afdf4b026da72a5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 302,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 10,
"path": "/ardrone_surf_tag/CMakeFiles/ardrone_surf_tag.dir/cmake_clean.cmake",
"repo_name": "kelvin-sudani/ros",
"src_encoding": "UTF-8",
"text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/ardrone_surf_tag.dir/src/ardrone_surf_tag.o\"\n \"bin/ardrone_surf_tag.pdb\"\n \"bin/ardrone_surf_tag\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang CXX)\n INCLUDE(CMakeFiles/ardrone_surf_tag.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 28.200000762939453,
"blob_id": "a0f556ddb8cd6057d15d49a09138a44f1593ac67",
"content_id": "d257895ecad4866f062f6820edd171c7c612a17d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 292,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 10,
"path": "/draw_circle/build/CMakeFiles/draw_circle.dir/cmake_clean.cmake",
"repo_name": "kelvin-sudani/ros",
"src_encoding": "UTF-8",
"text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/draw_circle.dir/src/draw_circle.o\"\n \"../lib/libdraw_circle.pdb\"\n \"../lib/libdraw_circle.so\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang CXX)\n INCLUDE(CMakeFiles/draw_circle.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n"
},
{
"alpha_fraction": 0.7482269406318665,
"alphanum_fraction": 0.7482269406318665,
"avg_line_length": 27.200000762939453,
"blob_id": "d2adcce6084a65e0e67cec0ec7d3f88857beafbd",
"content_id": "08097650e34d240ea4c52ed7bad2ddcea6a63494",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 282,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 10,
"path": "/shirt/build/CMakeFiles/image_utils.dir/cmake_clean.cmake",
"repo_name": "kelvin-sudani/ros",
"src_encoding": "UTF-8",
"text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/image_utils.dir/src/ImageUtils.o\"\n \"../bin/image_utils.pdb\"\n \"../bin/image_utils\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang CXX)\n INCLUDE(CMakeFiles/image_utils.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n"
},
{
"alpha_fraction": 0.738095223903656,
"alphanum_fraction": 0.738095223903656,
"avg_line_length": 25.727272033691406,
"blob_id": "d10e52054d75cbc2880cdbb6af8e0b30586580e6",
"content_id": "67f27364e4501ac01a5052722f8e189e02b057c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 294,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 11,
"path": "/shirt/build/CMakeFiles/shirt.dir/cmake_clean.cmake",
"repo_name": "kelvin-sudani/ros",
"src_encoding": "UTF-8",
"text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/shirt.dir/src/ImageUtils.o\"\n \"CMakeFiles/shirt.dir/src/Main.o\"\n \"../bin/shirt.pdb\"\n \"../bin/shirt\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang CXX)\n INCLUDE(CMakeFiles/shirt.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n"
},
{
"alpha_fraction": 0.8181818127632141,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 32,
"blob_id": "22430df87b1e229d7d2547daf4d79d8edfdc200d",
"content_id": "5e00d04971dfdbccc3627c0c24828576de8c5f79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 33,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 1,
"path": "/brown-ros-pkg/experimental/ar_recog/src/ar_recog/srv/__init__.py",
"repo_name": "kelvin-sudani/ros",
"src_encoding": "UTF-8",
"text": "from _CalibrateDistance import *\n"
},
{
"alpha_fraction": 0.6407692432403564,
"alphanum_fraction": 0.6623076796531677,
"avg_line_length": 32.32820510864258,
"blob_id": "0e06ef337ae50c604ad5248b009241b09a889907",
"content_id": "358a8baef3907692cce66cb95f3d97c71cdcf7a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 6500,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 195,
"path": "/ardrone_surf_tag/src/ardrone_surf_tag.cpp",
"repo_name": "kelvin-sudani/ros",
"src_encoding": "UTF-8",
"text": "#include <ros/ros.h>\n#include <image_transport/image_transport.h>\n#include <cv_bridge/cv_bridge.h>\n#include <cv_bridge/CvBridge.h>\n#include <sensor_msgs/image_encodings.h>\n#include <opencv2/imgproc/imgproc.hpp>\n#include <opencv2/highgui/highgui.hpp>\n\nusing namespace std;\nusing namespace cv;\nnamespace enc = sensor_msgs::image_encodings;\n\nstatic const char WINDOW[] = \"Image window\";\n\nstatic const char* TagFile = \"images/clubs-3.jpg\";\n\nclass ImageConverter\n{\n ros::NodeHandle nh_;\n image_transport::ImageTransport it_;\n image_transport::Subscriber image_sub_;\n image_transport::Publisher image_pub_;\n IplImage* image;\n\npublic:\n ImageConverter()\n : it_(nh_)\n {\n image_pub_ = it_.advertise(\"out\", 1);\n image_sub_ = it_.subscribe(\"gscam/image_raw\", 1, &ImageConverter::imageCb, this);\n\n cv::namedWindow(WINDOW);\n }\n\n ~ImageConverter()\n {\n cv::destroyWindow(WINDOW);\n }\n\n void imageCb(const sensor_msgs::ImageConstPtr& msg)\n {\n cv_bridge::CvImagePtr cv_ptr;\n try\n {\n cv_ptr = cv_bridge::toCvCopy(msg, enc::BGR8);\n }\n catch (cv_bridge::Exception& e)\n {\n ROS_ERROR(\"cv_bridge exception: %s\", e.what());\n return;\n }\n IplImage* frame = &((IplImage)cv_ptr->image);\n Mat img_object = imread( TagFile, CV_LOAD_IMAGE_GRAYSCALE );\n Mat img_scene(frame); // = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );\n \n //if( !img_object.data || !img_scene.data )\n //{ std::cout<< \" --(!) Error reading images \" << std::endl; return -1; }\n\n //-- Step 1: Detect the keypoints using SURF Detector\n int minHessian = 300;\n\n SurfFeatureDetector detector( minHessian, 5, 5 );\n\n std::vector<KeyPoint> keypoints_object, keypoints_scene;\n\n detector.detect( img_object, keypoints_object );\n detector.detect( img_scene, keypoints_scene );\n\n //-- Step 2: Calculate descriptors (feature vectors)\n SurfDescriptorExtractor extractor;\n\n Mat descriptors_object, descriptors_scene;\n\n extractor.compute( img_object, keypoints_object, descriptors_object );\n extractor.compute( img_scene, keypoints_scene, descriptors_scene );\n\n //-- Step 3: Matching descriptor vectors using FLANN matcher\n BruteForceMatcher< L2<float> > matcher;\n vector< DMatch > matches;\n matcher.match( descriptors_object, descriptors_scene, matches );\n\nMat img_matches;\nvector< DMatch > good_matches;\n\n double max_dist = 0; double min_dist = 100;\n\n //-- Quick calculation of max and min distances between keypoints\n \n for( int i = 0; i < descriptors_object.rows; i++ )\n { double dist = matches[i].distance;\n if( dist < min_dist ) min_dist = dist;\n if( dist > max_dist ) max_dist = dist;\n }\n\n\n //printf(\"-- Max dist : %f \\n\", max_dist );\n //printf(\"-- Min dist : %f \\n\", min_dist );\n \n //-- Draw only \"good\" matches (i.e. whose distance is less than 3*min_dist )\n \n\n for( int i = 0; i < descriptors_object.rows; i++ )\n { if( matches[i].distance < 3*min_dist )\n { good_matches.push_back( matches[i]); }\n } \n \n //good_matches = matches;\n \n\n drawMatches( img_object, keypoints_object, img_scene, keypoints_scene, \n good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), \n vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); \n\n//-- Localize the object from img_1 in img_2 \n std::vector<Point2f> obj;\n std::vector<Point2f> scene;\n\n for( int i = 0; i < good_matches.size(); i++ )\n {\n //-- Get the keypoints from the good matches\n obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );\n scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt ); \n }\n Mat objPtsMat(obj);\n Mat scenePtsMat(scene);\nif( good_matches.size() >= 4 )\n{\n Mat H = findHomography( objPtsMat, scenePtsMat, CV_RANSAC, 5 );\n\n //-- Get the corners from the image_1 ( the object to be \"detected\" )\n std::vector<Point2f> obj_corners(4);\n obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );\n obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );\n std::vector<Point2f> scene_corners(4);\n\n Mat objCornersMat(obj_corners);\n Mat sceneCornersMat(scene_corners);\n\n perspectiveTransform( objCornersMat, sceneCornersMat, H);\n \n //-- Draw lines between the corners (the mapped object in the scene - image_2 )\n line( img_scene, scene_corners[0], scene_corners[1] , Scalar(0, 255, 0), 4 );\n line( img_scene, scene_corners[1], scene_corners[2] , Scalar( 0, 255, 0), 4 );\n line( img_scene, scene_corners[2], scene_corners[3] , Scalar( 0, 255, 0), 4 );\n line( img_scene, scene_corners[3], scene_corners[0] , Scalar( 0, 255, 0), 4 );\n\n line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );\n line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );\n line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );\n line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );\n}\n/*\n// Read input images\n\n IplImage* frame = &((IplImage)cv_ptr->image);\n Mat object = imread( TagFile, CV_LOAD_IMAGE_GRAYSCALE );\n Mat scene(frame); //CvMat(frame); //cvCreateMat( frame->width, frame->height, CV_8UC1 );\n //cvCvtColor(frame, image, CV_BGR2GRAY);\n\n SurfFeatureDetector detector(1000,1);\n vector<KeyPoint> sceneKeypoints, objectKeypoints;\n detector.detect(scene, sceneKeypoints);\n detector.detect(object, objectKeypoints);\n\n SurfDescriptorExtractor extractor;\n Mat sceneDescriptors, objectDescriptors;\n extractor.compute( scene, sceneKeypoints, sceneDescriptors );\n extractor.compute( object, objectKeypoints, objectDescriptors );\n\n FlannBasedMatcher matcher;\n vector<DMatch> matches;\n matcher.match( sceneDescriptors, sceneDescriptors, matches);\n\n Mat imageMatches;\n drawMatches( object, objectKeypoints, scene, sceneKeypoints, \n matches, imageMatches, Scalar::all(-1), Scalar::all(-1), \n vector<char>(), DrawMatchesFlags::DEFAULT ); \n*/\n \n imshow(\"scene\", img_scene);\n imshow(\"matches\", img_matches);\n\n waitKey(3);\n\n //image_pub_.publish(cv_ptr->toImageMsg());\n }\n};\n\nint main(int argc, char** argv)\n{\n ros::init(argc, argv, \"image_converter\");\n ImageConverter ic;\n ros::spin();\n return 0;\n}\n\n"
}
] | 12 |
qiurongfeng/DataAnalysis | https://github.com/qiurongfeng/DataAnalysis | ed73a7673c20121f9b7fe20047c35c9cae0c56fe | fadd508e6bb17b39e80f81a37046669f8d873a25 | 44742d373e8bdcfdc7f74159849450a35dbc7a90 | refs/heads/master | 2020-03-14T10:30:18.757634 | 2018-05-04T01:17:39 | 2018-05-04T01:17:39 | 131,568,540 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4803459048271179,
"alphanum_fraction": 0.5031446814537048,
"avg_line_length": 27.266666412353516,
"blob_id": "bee3c25b7f3aca125a88fc783827e847202b10be",
"content_id": "a8a993b478f69d5d1cc14adcf21f6225f0120559",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1292,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 45,
"path": "/during_timecount.py",
"repo_name": "qiurongfeng/DataAnalysis",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018/4/24 16:36\nauthor: Rongfeng.Qiu\nfile:during_timecount.py\n\"\"\"\nimport os\nimport datetime\ndef duringtime(str1,str2,countlist,filepath,count_map):\n listTemp = []\n down = 1\n flag = 0\n with open(filepath, \"r\") as f:\n while down:\n fileLine = f.readline()\n if fileLine == '':\n down = 0\n # fileLine = fileLine.strip().split(\",\")\n for str in countlist:\n if (str in fileLine) and flag:\n count_map[str] += 1\n if(str1 in fileLine):\n flag = 1\n if(str2 in fileLine):\n if(flag == 0):\n continue\n else:\n flag = 0\n return count_map\n\ndef count_duringtime(str1,str2,start,end,count_list,filepath):\n count_map = {}\n for v in count_list:\n count_map[v] = 0\n for i in range(start,end + 1):\n path = filepath.replace('*',str(i))\n if(not os.path.exists(path)):\n continue\n print(\"计算文件\" + path)\n try:\n duringtime(str1,str2,count_list,path,count_map)\n except(OSError , EOFError) as res:\n print(\"计算文件出错\" +res)\n continue\n return count_map\n"
},
{
"alpha_fraction": 0.5203251838684082,
"alphanum_fraction": 0.5392953753471375,
"avg_line_length": 27.384614944458008,
"blob_id": "0274e2336097179a21354c58c3c3f38e8d8f6bd5",
"content_id": "8c47505a504b77a89289d43975c6d61568df6e67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 750,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 26,
"path": "/output_pic.py",
"repo_name": "qiurongfeng/DataAnalysis",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018/4/26 15:49\nauthor: Rongfeng.Qiu\nfile:output_pic.py\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\ndef output(content):\n for title,value in content.items() :\n # 设置字体样式\n plt.figure()\n plt.rcParams['font.family'] = 'sans-serif'\n plt.rcParams['font.sans-serif'] = [u'SimHei']\n plt.title(title)\n labels = []\n fracs = []\n for k,v in value.items():\n if title!=k:\n labels.append(k.replace(title,\"\"))\n fracs.append(v)\n plt.pie(x = fracs,labels=labels)\n save_path =( \"pic/\" + title.split(\":\")[0] +\".png\")\n print(save_path)\n plt.savefig(save_path)\n plt.close(0)\n"
},
{
"alpha_fraction": 0.472533643245697,
"alphanum_fraction": 0.48766815662384033,
"avg_line_length": 32.660377502441406,
"blob_id": "424f611b099b686c61cc899527790d64febfd67e",
"content_id": "28e51c7e0238e5d1f6a29022e2c0eaa472be7b39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1804,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 53,
"path": "/upEndTime.py",
"repo_name": "qiurongfeng/DataAnalysis",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018/4/20 10:27\nauthor: Rongfeng.Qiu\nfile:upEndTime.py\n\"\"\"\nimport os\nimport datetime\ndef single_time(str1,str2,filepath,listTime):\n listTemp = []\n down = 1\n with open(filepath, \"r\") as f:\n while down:\n fileLine = f.readline()\n if fileLine == '':\n down = 0\n fileLine = fileLine.strip().split(\",\")\n if(str1 in fileLine):\n time = fileLine[1] + \" \" + fileLine[2]\n if listTemp:\n listTemp.pop()\n listTemp.append(time)\n else:\n listTemp.append(time)\n if(str2 in fileLine):\n if(listTemp == []):\n continue\n else:\n\n one_list = []\n endTime = fileLine[1] + \" \" + fileLine[2]\n startTime = listTemp.pop()\n un_endTime = datetime.datetime.strptime(endTime,'%Y-%m-%d %H:%M:%S.%f').timestamp()\n un_startTime = datetime.datetime.strptime(startTime,'%Y-%m-%d %H:%M:%S.%f').timestamp()\n one_list.append(startTime)\n one_list.append(endTime)\n one_list.append(float(un_endTime) - float(un_startTime))\n listTime.append(one_list)\n return listTime\n\ndef count_time(str1,str2,start,end,filepath):\n listTime = []\n for i in range(start,end + 1):\n path = filepath.replace('*',str(i))\n if(not os.path.exists(path)):\n continue\n print(\"计算文件\" + path)\n try:\n single_time(str1,str2,path,listTime)\n except(OSError , EOFError) as res:\n print(\"计算文件出错\" +res)\n continue\n return listTime\n"
},
{
"alpha_fraction": 0.5642701387405396,
"alphanum_fraction": 0.5806100368499756,
"avg_line_length": 23.83783721923828,
"blob_id": "fe7fbbe3712ad527c10326611fff86ee8e50adde",
"content_id": "688a567fa2b247d96fd25791cb307fd603d11945",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 980,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 37,
"path": "/wordcount.py",
"repo_name": "qiurongfeng/DataAnalysis",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018/4/20 10:27\nauthor: Rongfeng.Qiu\nfile:wordcount.py\n\"\"\"\nimport os\n\n#单文件单关键词\ndef wordCount(str,filepath):\n # pathfile = r\".\\System.log.1\"\n with open(filepath,\"r\") as f:\n file = f.read()\n warm_num = file.count(str)\n return warm_num\n##单文件多关键词\ndef listCount(count_list,filepath,count_map):\n for line in count_list:\n num = wordCount(line,filepath)\n count_map[line] += num\n return count_map\n#多文件多关键词\ndef fileListCount(start,end,count_list,filepath):\n count_map = {}\n for v in count_list:\n count_map[v] = 0\n for i in range(start,end + 1):\n path = filepath.replace('*',str(i))\n if(not os.path.exists(path)):\n continue\n print(\"统计文件\" + path)\n try:\n listCount(count_list, path, count_map)\n except:\n print(\"统计文件出错\")\n continue\n return count_map"
},
{
"alpha_fraction": 0.5846096277236938,
"alphanum_fraction": 0.6062756776809692,
"avg_line_length": 23.08108139038086,
"blob_id": "14b987407c2588825580dd308d79fc5ae2d6e4c2",
"content_id": "47fad2e9a3714410cdcd2a21c063b92074b3c4ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3115,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 111,
"path": "/main_fuc.py",
"repo_name": "qiurongfeng/DataAnalysis",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018/4/20 10:27\nauthor: Rongfeng.Qiu\nfile:main_fuc.py\n\"\"\"\nfrom DataAnalysis import key_valuecount,during_timecount,upEndTime,output_pic,output_excel,wordcount\nimport os\nimport xlwt\n\n#输入文件格式,文件放在当前目录下\nfilepath = r\"data\\System.log.*\"\n#文件初始编号\nstart = 0\n#文件结束编号\nend = 25\n#填入需要统计的出现词列表或者单个字符串\ncount_list1 = [\n \"WARNING\",\n \"saveImage\",\n \"Image transfer done. FrameNumber:0 ImageState:Blank\",\n \"Start shot\",\n \"End Shot\",\n \"ImagingEngineSvtImpl::getImage() fail\"\n]\ncount_list2 = [\n \"modeInt is:\",\n \"Mode is:\",\n \"pulseFlagInt is:\",\n \"ppsInt is:\",\n \"lowDoseFlagInt is:\",\n \"magModeInt is:\",\n \"kv is:\",\n \"ma is:\",\n \"maFloat to IE is\"\n]\ncount_list3 = [\n \"Image transfer done. FrameNumber:0 ImageState:Blank\"\n]\n#起始字符串\nstr1 = \"************************************************************\"\n#结束字符串\nstr2 = \"System exit in logger\"\n##\nstr3 = \"Start shot\"\n##\nstr4 = \"End Shot\"\n\n#############################################################\nworkbook = xlwt.Workbook(encoding='utf-8')\n#############################################################\n\n########统计时间差########\n#输入工作表名称\nsheetname1 = \"sheet 1\"\n#输入初始行\ninitList = [\"开机时间\",\"关机时间\",\"时间差(单位秒)\"]\nlistTime = upEndTime.count_time(str1,str2,start,end,filepath)\nworkbook = output_excel.output(listTime,workbook,sheetname1,initList)\n#########################\n\n\n########统计shot时间差########\n#输入工作表名称\nsheetname2 = \"sheet 2\"\n#输入初始行\ninitList = [\"start shot时间\",\"end shot时间\",\"时间差(单位秒)\"]\nshotList = upEndTime.count_time(str3,str4,start,end,filepath)\nworkbook = output_excel.output(shotList,workbook,sheetname2,initList)\n#########################\n\n\n########统计出现次数########\n#输入工作表名称\nsheetname3 = \"sheet 3\"\n#输入初始行\ninitList = [\"出现关键字\",\"出现次数\"]\ncount_map1 = wordcount.fileListCount(start,end,count_list1,filepath)\nworkbook = output_excel.output(count_map1,workbook,sheetname3,initList)\n#########################\n\n########统计出现次数########\n#输入工作表名称\nsheetname4 = \"sheet 4\"\n#输入初始行\ninitList = [\"出现关键字\",\"出现次数\"]\ncount_map2 = key_valuecount.fileKeyCount(start,end,count_list2,filepath)\noutput_pic.output(count_map2)\nworkbook = output_excel.output_keyvalue(count_map2,workbook,sheetname4,initList)\n\n#########################\n\n########统计出现次数########\n#输入工作表名称\nsheetname5 = \"sheet 5\"\n#输入初始行\ninitList = [\"出现关键字\",\"出现次数\"]\ncount_map3 = during_timecount.count_duringtime(str3,str4,start,end,count_list3,filepath)\nworkbook = output_excel.output(count_map3,workbook,sheetname5,initList)\n#########################\n\n\n\n# 输入Excel文件名\nexcelName = 'res_15.xls'\n\ntry:\n workbook.save(\"output/\" + excelName)\n print(\"操作成功,文件\" + excelName + \"已生成\")\nexcept OSError as reason:\n print(\"保存失败:\" + str(reason))\n\n\n\n\n"
},
{
"alpha_fraction": 0.5106382966041565,
"alphanum_fraction": 0.5259308218955994,
"avg_line_length": 27.94230842590332,
"blob_id": "ebde97730746e756f7530246bdf0c192a46d5b9d",
"content_id": "86804979a90938c5bbbff26fe9d6a04750f796b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1574,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 52,
"path": "/key_valuecount.py",
"repo_name": "qiurongfeng/DataAnalysis",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018/4/24 13:40\nauthor: Rongfeng.Qiu\nfile:key_valuecount.py\n\"\"\"\nimport os\nimport operator\n\n#单文件单关键词\ndef keyCount(keyList,filepath,count_map):\n # pathfile = r\".\\System.log.1\"\n down = 1\n with open(filepath,\"r\") as f:\n while down:\n fileLine = f.readline()\n if fileLine == '':\n down = 0\n fileList = fileLine.split(\",\")\n for index,key in enumerate(keyList):\n for line in fileList:\n if key in line:\n count_map[key] += 1\n count_map[line] = count_map[line] + 1 if (line in count_map) else 1\n return count_map\n#多文件多关键词\ndef fileKeyCount(start,end,count_list,filepath):\n list_map = {}\n count_map = {}\n for v in count_list:\n count_map[v] = 0\n for i in range(start,end + 1):\n path = filepath.replace('*',str(i))\n if(not os.path.exists(path)):\n continue\n print(\"统计文件key-value\" + path)\n try:\n keyCount(count_list, path, count_map)\n except:\n print(\"统计文件key-value出错\")\n continue\n # sorted(count_map.keys())\n #将键值对进行归类格式为{ key1:{}, key2:{} }\n for key in count_list:\n tempDict = {}\n for k,v in count_map.items():\n if key in k:\n tempDict[k] = v\n sorted_x = sorted(tempDict.items(), key=(operator.itemgetter(0)))\n list_map[key] = dict(sorted_x)\n\n return list_map"
},
{
"alpha_fraction": 0.5564024448394775,
"alphanum_fraction": 0.5746951103210449,
"avg_line_length": 28.840909957885742,
"blob_id": "7f60750fa069445a53a233bee076f02a2e3884a4",
"content_id": "55ae41a8620a9dfcd059b0959aa6099b46df9287",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1330,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 44,
"path": "/output_excel.py",
"repo_name": "qiurongfeng/DataAnalysis",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018/4/20 10:27\nauthor: Rongfeng.Qiu\nfile:output_excel.py\n\"\"\"\nimport xlwt\ndef output(content,workbook,sheetName,initList):\n if type(content) == list:\n booksheet = workbook.add_sheet(sheetName, cell_overwrite_ok=True)\n ##初始行\n for i in range(len(initList)):\n booksheet.write(0, i, initList[i ])\n\n for i, row in enumerate(content):\n for j, col in enumerate(row):\n booksheet.write(i + 1, j, col)\n return workbook\n elif type(content) == dict:\n booksheet = workbook.add_sheet(sheetName, cell_overwrite_ok=True)\n ##初始行\n for i in range( len(initList) ):\n booksheet.write(0, i, initList[i])\n\n i = 1\n for k,v in content.items():\n booksheet.write(i, 0, k)\n booksheet.write(i, 1, v)\n i += 1\n return workbook\n\ndef output_keyvalue(content,workbook,sheetName,initList):\n booksheet = workbook.add_sheet(sheetName, cell_overwrite_ok=True)\n ##初始行\n for i in range(len(initList)):\n booksheet.write(0, i, initList[i])\n\n i = 1\n for key,value in content.items():\n for k, v in value.items():\n booksheet.write(i, 0, k)\n booksheet.write(i, 1, v)\n i += 1\n return workbook"
},
{
"alpha_fraction": 0.48275861144065857,
"alphanum_fraction": 0.5862069129943848,
"avg_line_length": 15.714285850524902,
"blob_id": "5b90de90f66ab47808e0b274c1d89b471991e08c",
"content_id": "f580e3fc50c670def3fe09c6a399972b9cc461d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 128,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 7,
"path": "/__init__.py",
"repo_name": "qiurongfeng/DataAnalysis",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018/4/20 10:27\nauthor: Rongfeng.Qiu\nfile:__init__.py\n\"\"\"\nprint(\"初始化启动中....\")"
}
] | 8 |
abedess/catapult | https://github.com/abedess/catapult | 70df4ae5f88721f49da4b646b763eaa6f94b863d | a120c4f6e011a9830eefdb783b818ab7113aacfd | 4eca0f9a56de979976f3bbebc58c89f2833cc73e | refs/heads/master | 2020-08-07T03:10:38.025844 | 2019-10-04T10:50:37 | 2019-10-04T12:45:17 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6770527362823486,
"alphanum_fraction": 0.6811679601669312,
"avg_line_length": 38.55813980102539,
"blob_id": "240fb8c4175cef0ffe0b7e34bd68ff99174cd57c",
"content_id": "e81e7c8661647fa61eff2661f81e6356343ad60c",
"detected_licenses": [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 5103,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 129,
"path": "/third_party/typ/typ/tests/artifacts_test.py",
"repo_name": "abedess/catapult",
"src_encoding": "UTF-8",
"text": "# Copyright 2019 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nfrom typ import artifacts\n\n\nclass ArtifactsArtifactCreationTests(unittest.TestCase):\n\n def _VerifyPathAndContents(\n self, output_dir, file_rel_path, contents, iteration=0, test_base_dir='',\n intial_results_base_dir=False):\n path = output_dir\n if test_base_dir:\n path = os.path.join(path, test_base_dir)\n if iteration:\n path = os.path.join(path, 'retry_%d' % iteration)\n elif intial_results_base_dir:\n path = os.path.join(path, 'initial')\n path = os.path.join(path, file_rel_path)\n self.assertTrue(os.path.exists(path))\n with open(path, 'r') as f:\n self.assertEqual(f.read(), contents)\n\n def test_create_artifact_writes_to_disk_iteration_0_no_test_dir(self):\n \"\"\"Tests CreateArtifact will write to disk at the correct location.\"\"\"\n tempdir = tempfile.mkdtemp()\n try:\n ar = artifacts.Artifacts(tempdir)\n file_rel_path = os.path.join('stdout', 'text.txt')\n with ar.CreateArtifact('artifact_name', file_rel_path) as f:\n f.write(b'contents')\n self._VerifyPathAndContents(tempdir, file_rel_path, b'contents')\n finally:\n shutil.rmtree(tempdir)\n\n def test_create_artifact_writes_to_disk_iteration_1_no_test_dir(self):\n \"\"\"Tests CreateArtifact will write to disk at the correct location.\"\"\"\n tempdir = tempfile.mkdtemp()\n try:\n ar = artifacts.Artifacts(tempdir, iteration=1)\n file_rel_path = os.path.join('stdout', 'text.txt')\n with ar.CreateArtifact('artifact_name', file_rel_path) as f:\n f.write(b'contents')\n self._VerifyPathAndContents(tempdir, file_rel_path, b'contents', iteration=1)\n finally:\n shutil.rmtree(tempdir)\n\n def test_create_artifact_writes_to_disk_iteration_1_test_dir(self):\n \"\"\"Tests CreateArtifact will write to disk at the correct location.\"\"\"\n tempdir = tempfile.mkdtemp()\n try:\n ar = artifacts.Artifacts(tempdir, iteration=1, test_name='a.b.c')\n file_rel_path = os.path.join('stdout', 'text.txt')\n with ar.CreateArtifact('artifact_name', file_rel_path) as f:\n f.write(b'contents')\n self._VerifyPathAndContents(\n tempdir, file_rel_path, b'contents', iteration=1, test_base_dir='a.b.c')\n finally:\n shutil.rmtree(tempdir)\n\n def test_create_artifact_overwriting_artifact_raises_value_error(self):\n \"\"\"Tests CreateArtifact will write to disk at the correct location.\"\"\"\n tempdir = tempfile.mkdtemp()\n try:\n ar = artifacts.Artifacts(tempdir, iteration=1, test_name='a.b.c')\n file_rel_path = os.path.join('stdout', 'text.txt')\n with ar.CreateArtifact('artifact_name', file_rel_path) as f:\n f.write(b'contents')\n ar = artifacts.Artifacts(tempdir, iteration=0, test_name='a.b.c')\n file_rel_path = os.path.join('retry_1', 'stdout', 'text.txt')\n with self.assertRaises(ValueError) as ve:\n with ar.CreateArtifact('artifact_name', file_rel_path) as f:\n f.write(b'contents')\n self.assertIn('already exists.', str(ve.exception))\n finally:\n shutil.rmtree(tempdir)\n\n def test_create_artifact_writes_to_disk_initial_results_dir(self):\n \"\"\"Tests CreateArtifact will write to disk at the correct location.\"\"\"\n tempdir = tempfile.mkdtemp()\n try:\n ar = artifacts.Artifacts(\n tempdir, iteration=0, test_name='a.b.c', intial_results_base_dir=True)\n file_rel_path = os.path.join('stdout', 'text.txt')\n with ar.CreateArtifact('artifact_name', file_rel_path) as f:\n f.write(b'contents')\n self._VerifyPathAndContents(\n tempdir, file_rel_path, b'contents', iteration=0, test_base_dir='a.b.c',\n intial_results_base_dir=True)\n finally:\n shutil.rmtree(tempdir)\n\n\nclass ArtifactsLinkCreationTests(unittest.TestCase):\n def test_create_link(self):\n ar = artifacts.Artifacts(None)\n ar.CreateLink('link', 'https://testsite.com')\n self.assertEqual(ar.artifacts, {'link': ['https://testsite.com']})\n\n def test_create_link_invalid_url(self):\n ar = artifacts.Artifacts(None)\n with self.assertRaises(ValueError):\n ar.CreateLink('link', 'https:/malformedurl.com')\n\n def test_create_link_non_https(self):\n ar = artifacts.Artifacts(None)\n with self.assertRaises(ValueError):\n ar.CreateLink('link', 'http://testsite.com')\n\n def test_create_link_newlines(self):\n ar = artifacts.Artifacts(None)\n with self.assertRaises(ValueError):\n ar.CreateLink('link', 'https://some\\nbadurl.com')\n"
}
] | 1 |
sanj909/CollectiWise | https://github.com/sanj909/CollectiWise | 1e274986635ababa22bd8e2637aa81b6653f03de | 98f7722c78d3346e57029da97023aa7664f5f760 | 05fba7526bf3a1bac7725dbead874f19da6e1456 | refs/heads/main | 2023-03-31T09:55:16.355428 | 2021-03-26T15:23:46 | 2021-03-26T15:23:46 | 337,492,733 | 1 | 0 | null | 2021-02-09T18:05:49 | 2021-03-11T21:37:46 | 2021-03-11T21:39:29 | Python | [
{
"alpha_fraction": 0.725824773311615,
"alphanum_fraction": 0.743648111820221,
"avg_line_length": 38.969696044921875,
"blob_id": "f34791a705c596bddd668efc2a74f9112b8e968e",
"content_id": "538a7df4915a823c0b2071dce80782ee60092111",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2637,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 66,
"path": "/trainPlayground.py",
"repo_name": "sanj909/CollectiWise",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pywt\nimport time\nimport os\nimport tensorflow as tf\n\n\n#Importing stock data\nimport yfinance as yf\nfrom datetime import date,datetime,timedelta\nticker = '^GSPC'\nfirst_day = datetime(2000, 1, 3)\nlast_day = datetime(2019, 7, 1)\ndata = yf.Ticker(ticker).history(interval = '1d', start=first_day, end=last_day)\ndata.reset_index(inplace=True)\n\n\nfrom models import *\nfrom dataUtils import *\nfrom waveletDenoising import optDenoise, normalise\n\n\nclose_data = data.Close.to_numpy()\nclose_data = optDenoise(close_data) \nclose_data = normalise(close_data, 0, 1) #Normalise to N(0, 1)\nprint(close_data.shape)\n\nunroll_length = 50\nX_train, X_test, y_train, y_test = train_test_split_lstm(close_data, 5, int(close_data.shape[0] * 0.1))\nX_train = np.expand_dims(unroll(X_train, unroll_length), axis = 2)\ny_train = np.expand_dims(unroll(y_train, unroll_length), axis = 2)\nX_test = np.expand_dims(unroll(X_test, unroll_length), axis = 2)\ny_test = np.expand_dims(unroll(y_test, unroll_length), axis = 2)\nprint(int(close_data.shape[0] * 0.1))\nprint(X_train.shape)\nprint(y_train.shape)\nprint(X_test.shape)\nprint(y_test.shape)\n\n\n#model = build_basic_lstm_model(lstm_input_dim = X_train.shape[-1], lstm_output_dim = unroll_length, dense_output_dim = y_train.shape[-1], return_sequences=True)\nmodel = build_att_lstm_model(lstm_input_dim = X_train.shape[-1], lstm_output_dim = unroll_length, dense_output_dim = y_train.shape[-1], return_sequences=True)\n\n# Compile the model\nstart = time.time()\nopt = tf.keras.optimizers.Adam(learning_rate = 0.1)\nmodel.compile(loss='mean_squared_error', optimizer = opt, metrics=['accuracy']) #metrics argument is necessary for model.evaluate to return accuracy \nprint('compilation time : ', time.time() - start)\n\n\n# Create a callback that saves the model's weights\ncheckpoint_path = \"GHRepos/CollectiWise/model_checkpoints/cp.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\ncp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, verbose=1)\n\n#model.fit(X_train, y_train, epochs = 5, validation_split = 0.05, callbacks=[cp_callback])\n\n# Load saved weights\nmodel.load_weights(checkpoint_path) #All we have to do before this line is to create and compile the model\nresults = model.evaluate(X_test, y_test, verbose=1)\nprint(\"test loss, test acc:\", results)\npredictions = model.predict(X_test[(len(X_test)-1):]) #Predict using the last row of X_test (afaik, the last row is the 50 most recent prices)\nprint(\"predictions shape:\", predictions.shape) #Model prediction of the 50 next prices\nprint(predictions)"
},
{
"alpha_fraction": 0.720973789691925,
"alphanum_fraction": 0.7278401851654053,
"avg_line_length": 46.117645263671875,
"blob_id": "98b21f1db9f5e335383393cfb8b6c52dd254e9a7",
"content_id": "b6a3548db36600f3604377250dc0b6843cb72995",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1602,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 34,
"path": "/models.py",
"repo_name": "sanj909/CollectiWise",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport tensorflow.keras.layers as layers\nimport keras as og_keras\nfrom keras_self_attention import SeqSelfAttention\n# pip install keras_self_attention\n\ndef build_basic_lstm_model(lstm_input_dim, lstm_output_dim, dense_output_dim, return_sequences):\n model = tf.keras.models.Sequential()\n model.add(layers.LSTM(input_shape=(None, lstm_input_dim), units = lstm_output_dim, return_sequences = return_sequences))\n #model.add(layers.LSTM(100, return_sequences = False))\n model.add(layers.Dense(units = dense_output_dim))\n #model.add(Activation('softmax'))\n model.add(layers.Activation('linear'))\n return model\n\n\ndef build_att_lstm_model(lstm_input_dim, lstm_output_dim, dense_output_dim, return_sequences):\n model = tf.keras.models.Sequential()\n model.add(layers.LSTM(input_shape = (None, lstm_input_dim), units = lstm_output_dim, return_sequences = return_sequences))\n #model.add(layers.LSTM(100, return_sequences = False))\n #model.add(layers.Attention())\n model.add(SeqSelfAttention(attention_activation= 'tanh'))\n model.add(layers.Dense(units = dense_output_dim))\n #model.add(Activation('softmax'))\n model.add(layers.Activation('linear'))\n return model\n\n\ndef lstm(learning_rate, window_length, n_features):\n model = tf.keras.Sequential()\n model.add(layers.LSTM(units = 25, activation='relu', input_shape=(window_length, n_features)))\n #model.add(layers.Dropout(0.2))\n model.add(layers.Dense(units = n_features, activation='linear')) #We want the model to output a single number, it's prediction of bid1\n return model\n"
},
{
"alpha_fraction": 0.7730769515037537,
"alphanum_fraction": 0.8038461804389954,
"avg_line_length": 51,
"blob_id": "f1867141a51cf10a8719104e255513ae372fa045",
"content_id": "18457d50e0b1e6592ccfd4fa51569b582261a445",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 260,
"license_type": "no_license",
"max_line_length": 192,
"num_lines": 5,
"path": "/README.md",
"repo_name": "sanj909/CollectiWise",
"src_encoding": "UTF-8",
"text": "# CollectiWise\n\nModels for predicting cryptocurrency asset prices\n\n[](https://ssh.cloud.google.com/cloudshell/editor?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2Fsanj909%2FCollectiWise)\n"
},
{
"alpha_fraction": 0.6465151906013489,
"alphanum_fraction": 0.6569118499755859,
"avg_line_length": 37.19117736816406,
"blob_id": "62c34c4a28953a9e0fd56ec220e69e83614f37eb",
"content_id": "423cf19a4fd43877c3aa86b86ad9814f465afce0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2597,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 68,
"path": "/dataUtils.py",
"repo_name": "sanj909/CollectiWise",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom sklearn.preprocessing import StandardScaler\n\ndef split_df_by_asset(df, drop_label_column = True, num_columns_per_asset = 3):\n if(drop_label_column):\n df = df.drop(columns = ['label'])\n asset_dfs = []\n for i in range(0, len(df.columns), num_columns_per_asset):\n asset_dfs.append(df[df.columns[i : i + num_columns_per_asset]])\n return asset_dfs\n\ndef train_test_split_lstm(stocks, prediction_time=1, test_data_size=450, unroll_length=50):\n \"\"\"\n Split the data set into training and testing feature for Long Short Term Memory Model\n :param stocks: whole data set containing ['Open','Close','Volume'] features\n :param prediction_time: no of days\n :param test_data_size: size of test data to be used\n :param unroll_length: how long a window should be used for train test split\n :return: X_train : training sets of feature\n :return: X_test : test sets of feature\n :return: y_train: training sets of label\n :return: y_test: test sets of label\n \"\"\"\n # training data\n test_data_cut = test_data_size + unroll_length + 1\n\n x_train = stocks[0:-prediction_time - test_data_cut]\n #y_train = stocks[prediction_time:-test_data_cut]['Close'].as_matrix()\n y_train = stocks[prediction_time:-test_data_cut]\n\n # test data\n x_test = stocks[0 - test_data_cut:-prediction_time]\n #y_test = stocks[prediction_time - test_data_cut:]['Close'].as_matrix()\n y_test = stocks[prediction_time - test_data_cut:]\n\n return x_train, x_test, y_train, y_test\n\n\ndef unroll(data, sequence_length=24):\n \"\"\"\n use different windows for testing and training to stop from leak of information in the data\n :param data: data set to be used for unrolling\n :param sequence_length: window length\n :return: data sets with different window.\n \"\"\"\n result = []\n for index in range(len(data) - sequence_length):\n result.append(data[index: index + sequence_length])\n return np.asarray(result)\n\n\ndef standardise_df(df):\n scalers = [] #save the scalers so we can inverse_transform later\n for column in df.columns:\n x = np.array(df[column])\n scaler = StandardScaler()\n scaler.fit(x.reshape(len(x), 1))\n df[column] = scaler.transform(x.reshape(len(x), 1))\n scalers.append(scaler)\n return df, scalers\n\n\ndef reroll(array3d, unroll_length):\n array2d = array3d[0][unroll_length-1]\n for i in range(1, len(array3d)):\n next_row = array3d[i][unroll_length-1]\n array2d = np.vstack((array2d, next_row))\n return array2d\n"
},
{
"alpha_fraction": 0.6503292918205261,
"alphanum_fraction": 0.6607443690299988,
"avg_line_length": 40.58598709106445,
"blob_id": "b53d9c690d69231f851552ed981fb4f393ee0237",
"content_id": "e6ba01ab3592f49ca83f68645c1016d46afeb59d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6871,
"license_type": "no_license",
"max_line_length": 168,
"num_lines": 157,
"path": "/lstm_train_single.py",
"repo_name": "sanj909/CollectiWise",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pywt\nimport time\nimport os\nimport tensorflow as tf\nimport argparse\n\nfrom dataUtils import *\nfrom waveletDenoising import *\nfrom models import *\n\n\"\"\"\nRESULTS WE NEED:\n1. single asset result: lstm vs wlstm vs wlstm+a\n2. one model trained on all asset results(to test transfer learning): lstm vs wlstm vs wlstm+a\n\nMetrics: mse mae, rmse, R^2\n\"\"\"\n\ndef create_cp_name(path, args):\n name = \"model_cp\"\n for key,value in vars(args).items():\n name += \"_\" + key + str(value)\n return os.path.join(path, name + \".ckpt\")\n\ndef main(args):\n # Set seed\n np.random.seed(args.seed)\n tf.random.set_seed(args.seed)\n\n DATA_PATH = \"formatted_features.csv\"\n MODEL_PATH = \"model_checkpoints\"\n if(not os.path.isdir(MODEL_PATH)):\n os.mkdir(MODEL_PATH)\n checkpoint_save_name = create_cp_name(MODEL_PATH, args)\n print(checkpoint_save_name)\n\n df = pd.read_csv(DATA_PATH)\n asset_dfs = split_df_by_asset(df)\n print(\"Number of assets: \", len(asset_dfs))\n\n #dataframe for a single asset. Here, XBTUSD\n test_df = asset_dfs[args.stock_idx]\n print(\"Shape of test_df: \", test_df.shape)\n\n #dataframe of standardised data\n cleaned_data, scalers = standardise_df(test_df)\n #dataframe of standardised and denoised data\n if(\"w\" in args.model_type):\n cleaned_data = denoise_df(cleaned_data)\n\n #–––––––––––––––––––––––––––––––––––––––––––––––––––––––––\n\n #The model will use this many of the most recent rows to make a prediction\n unroll_length = args.unroll_length\n #The prediction will be this many timesteps in the future. If horizon=1, we're predicting data from the next timestep.\n horizon = 1\n\n #percentage of total data to be set aside for testing\n train_test_split = 0.1\n X_train, X_test, y_train, y_test = train_test_split_lstm(cleaned_data, horizon, int(cleaned_data.shape[0] * train_test_split))\n #If X is rows 0 to 1000 of cleaned_data, then y is rows horizon to 1000+horizon of cleaned_data.\n #We want to use unroll_length rows to predict the average price, volume and standard deviation in the next row (since horizon=1). So:\n #Shape of X data should be in the form (samples, unroll_length, features)\n #Shape of y data should be in the form (samples, features)?\n X_train = unroll(X_train, unroll_length)\n X_test = unroll(X_test, unroll_length)\n #y_train = y_train[unroll_length:]\n #y_test = y_test[unroll_length:]\n y_train = unroll(y_train, unroll_length)\n y_test = unroll(y_test, unroll_length)\n # Only keep price\n #y_train = y_train[:, :, 0]\n #y_test = y_test[:, :, 0]\n print(X_train.shape)\n print(y_train.shape)\n print(X_test.shape)\n print(y_test.shape)\n #–––––––––––––––––––––––––––––––––––––––––––––––––––––––––\n\n print(\"lstm_input_dim: \",X_train.shape[-1])\n print(\"lstm_output_dim: \", unroll_length)\n print(\"dense_output_dim :\", y_train.shape[-1])\n\n if(args.model_type == \"lstm\" or args.model_type == \"wlstm\"):\n model = build_basic_lstm_model(lstm_input_dim = X_train.shape[-1], lstm_output_dim = unroll_length, dense_output_dim = y_train.shape[-1], return_sequences=True)\n elif(args.model_type == \"wlstm_a\"):\n model = build_att_lstm_model(lstm_input_dim = X_train.shape[-1], lstm_output_dim = unroll_length, dense_output_dim = y_train.shape[-1], return_sequences=True)\n else:\n print(\"This should not happen\")\n exit()\n #model = lstm(0.01, X_train.shape[1], X_train.shape[2]) #learning rate, input dimension 1, input dimension 2\n\n # Compile the model\n start = time.time()\n opt = tf.keras.optimizers.Adam(learning_rate = args.lr)\n model.compile(loss='mean_squared_error', optimizer = opt, metrics=['mse', 'mae'])\n print('compilation time : ', time.time() - start)\n\n # Create a callback that saves the model's weights\n #checkpoint_path = \"/Users/Sanjit/Repos/CollectiWise/\" + checkpoint_save_name\n checkpoint_path = os.path.join(os.getcwd(), checkpoint_save_name)\n cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, verbose=2)\n model.fit(X_train, y_train, epochs = args.max_iter, validation_split = 0.05, callbacks=[cp_callback], verbose=2)\n #model.fit(X_train, y_train, epochs = args.max_iter, validation_split = 0.05, verbose=2)\n\n # Load saved weights\n model.load_weights(checkpoint_path) #All we have to do before this line is to create and compile the model\n results = model.evaluate(X_test, y_test, verbose=1)\n print(\"test loss, mse, mae:\", results)\n predictions = model.predict(X_test)\n print(\"predictions shape:\", predictions.shape)\n #–––––––––––––––––––––––––––––––––––––––––––––––––––––––––\n\n #Convert predictions and target back to 2D arrays, i.e. undo the effect of unroll()\n predictions = reroll(predictions, unroll_length)\n target = reroll(y_test, unroll_length)\n\n #Compute metrics\n mse = np.power((predictions - target), 2).sum(axis=0) / len(predictions)\n mae = np.abs(predictions - target).sum(axis=0)/len(predictions)\n rmse = np.sqrt(mse)\n\n ybar = np.tile(target.sum(axis=0)/len(target), (len(target), 1))\n tss = np.power(target - ybar, 2).sum(axis=0)\n r2 = 1 - (len(predictions)*mse / tss)\n\n print(\"MSE: \", mse)\n print(\"MAE: \", mae)\n print(\"RMSE: \", rmse)\n print(\"R-squared: \", r2)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='LSTM Single')\n parser.add_argument('--seed', default=1, type=int, help='random seed')\n parser.add_argument('--max_iter', default=100, type=float, help='maximum training iteration')\n parser.add_argument('--unroll_length', default=50, type=int, help='unroll length')\n parser.add_argument('--lr', default=0.01, type=float, help='learning rate')\n parser.add_argument('--model_type', default=\"wlstm_a\", type=str, help='model type (lstm, wlstm, wlstm_a)')\n parser.add_argument('--stock_idx', default=0, type=int, help='stock index in dataframe')\n args = parser.parse_args()\n main(args)\n\n'''\n1.\nWhy is our calculated values of mse, mae different from what we get by running model.evaluate?\n\n2.\nFor each 2hr period, i.e. each row in the dataframe:\nHow many standard deviations above the average price is the high price?\nHow many standard deviations below the average price is the low price?\nEstimate this from OHLC data.\nThen we can give estimate high and low prices in the next 2hr period using predictions from the model.\n'''\n"
},
{
"alpha_fraction": 0.704861581325531,
"alphanum_fraction": 0.7395681738853455,
"avg_line_length": 45.024391174316406,
"blob_id": "113c98d17d3e675ce51f8a64da0f9f5070cd38e6",
"content_id": "86ace9ff70e2aefbb8cbf20fc5a1540016aa71ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7549,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 164,
"path": "/GAN/GANmodel.py",
"repo_name": "sanj909/CollectiWise",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport tensorflow.keras.layers as layers\nimport keras as og_keras\nfrom keras_self_attention import SeqSelfAttention\nimport pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#BTC.csv\ndata = pd.read_csv('/Users/Sanjit/Google Drive/CollectiWise/Data/BTC.csv').drop(columns=['open', 'high', 'low', 'volume'])\n\n#Data preprocessing\n#Download the files models.py, dataUtils.py and waveletDenoising.py from LSTM branch and run this script in the same folder as those files. \nfrom models import *\nfrom dataUtils import *\nfrom waveletDenoising import normalise\n\nclose_data = normalise(data.close.to_numpy(), 0, 1)\n\nunroll_length = 10\nX_train, X_test, y_train, y_test = train_test_split_lstm(close_data, 5, int(close_data.shape[0] * 0.1))\nX_train = np.expand_dims(unroll(X_train, unroll_length), axis = 2)\ny_train = np.expand_dims(unroll(y_train, unroll_length), axis = 2)\nX_test = np.expand_dims(unroll(X_test, unroll_length), axis = 2)\ny_test = np.expand_dims(unroll(y_test, unroll_length), axis = 2)\n\n'''\n https://machinelearningmastery.com/how-to-develop-a-generative-adversarial-network-for-a-1-dimensional-function-from-scratch-in-keras/\n In the example above, generator guesses pairs (x, y). Discriminator classifies them as real or fake. \n The generator learns to guess pairs (x, x^2). The discriminator learns to classify only pairs (x, x^2) as real. \n\n We want our generator to guess a time series X, and the disciminator to classify it as real or fake. \n Generator: MLP. Returns next unroll_length prices from a randomly generated vector of size latent_dim.\n Disciminator: MLP with final sigmoid layer\n\n 'Training the discriminator model is straightforward. The goal is to train a generator model, not a discriminator model, \n and that is where the complexity of GANs truly lies.'\n\n\t'When the discriminator is good at detecting fake samples, the generator is updated more, and when the discriminator model \n\tis relatively poor or confused when detecting fake samples, the generator model is updated less.'\n\n\t'This is because the latent space has no meaning until the generator model starts assigning meaning to points in the space as it learns. \n\tAfter training, points in the latent space will correspond to points in the output space, e.g. in the space of generated samples.'\n\n\tIn the example, their samples were (x, y) pairs. For us, the sample will be an unroll_length dimensional vector.\n'''\n\n#a simple discriminator model\ndef define_discriminator(n_inputs=unroll_length):\n\tmodel = tf.keras.models.Sequential()\n\tmodel.add(layers.Dense(25, activation='relu', kernel_initializer='he_uniform', input_dim=n_inputs))\n\tmodel.add(layers.Dense(1, activation='sigmoid'))\n\t#Compile model\n\tmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\treturn model\n\ndef define_generator(latent_dim, n_outputs=unroll_length):\n\tmodel = tf.keras.models.Sequential()\n\tmodel.add(layers.Dense(15, activation='relu', kernel_initializer='he_uniform', input_dim=latent_dim))\n\tmodel.add(layers.Dense(n_outputs, activation='linear'))\n\treturn model\n\ndef define_gan(generator, discriminator):\n\t#Include the line below if the discriminator has been pre-trained. This kinda defeats the point of a GAN though\n\t#discriminator.trainable=False \n\tmodel = tf.keras.models.Sequential()\n\tmodel.add(generator)\n\tmodel.add(discriminator)\n\t#Compile model\n\tmodel.compile(loss='binary_crossentropy', optimizer='adam')\n\treturn model\n\ndef generate_real_samples(n_epoch, n): #gets n rows from X_train, which has 834 rows total\n\t#Line below slices first n rows when n_epoch = 1, slices next n rows when n_epoch = 2, and so on\n\tX1 = X_train[((n_epoch-1)*n):n_epoch*n] \n\tX = X1.reshape(n, unroll_length)\n\ty = np.ones((n, 1))\n\treturn X, y #1 is the class label, 1 means real\n\ndef generate_test_samples(n): #gets n rows from X_test, which has 136 rows total\n\t#####\n\t#####\n\tX1 = X_test[:n] #This simply gets the first n rows of X_test. Needs fixing so that we use the whole test set. \n\t#####\n\t#####\n\tX = X1.reshape(n, unroll_length)\n\ty = np.ones((n, 1))\n\treturn X, y #1 is the class label, 1 means real\n\n#We must have some vector to input into the first layer of the generator. \n#This function creates that input.\ndef generate_latent_points(latent_dim, n):\n\tx_input = np.random.randn(latent_dim * n)\n\tx_input = x_input.reshape(n, latent_dim)\n\treturn x_input\n\ndef generate_fake_samples(generator, latent_dim, n): #gets generator prediction\n\tx_input = np.random.randn(latent_dim * n)\n\tx_input = x_input.reshape(n, latent_dim)\n\tX = generator.predict(x_input)\n\ty = np.zeros((n, 1))\n\treturn X, y #0 is the class label, 0 means fake\n\ndef summarise_performance(epoch, generator, disciminator, latent_dim, n=34): \n\t#n = size of test set / int(n_epochs/n_eval). Here, it's 136/(4) = 34\n\t#My idea is that since int(n_epochs/n_eval) = 4, summarise performance will be called 4 times in train(), so each time\n\t#it's called, we use a quarter of the rows of X_test. Maybe each time it's called, we can use the whole of X_test???\n\tx_real, y_real = generate_test_samples(n) #On each successive call of this function, we want to get the next 34 rows. Right now, we just get the first 34 rows\n\t_, acc_real = discriminator.evaluate(x_real, y_real, verbose=0)\n\tx_fake, y_fake = generate_fake_samples(generator, latent_dim, n)\n\t_, acc_fake = discriminator.evaluate(x_fake, y_fake, verbose=0)\n\tprint(epoch, acc_real, acc_fake)\n\ndef train(generator, disciminator, gan, latent_dim, n_epochs=417, n_batch=4, n_eval=100):\n\thalf_batch = int(n_batch/2)\n\t#834 rows in X_train for BTC.csv. \n\t#n_epochs must be less or equal to 834/half_batch, ideally as close as possible to this limit so that we use all the training data. \n\t#In each epoch, dicriminator is trained on half_batch real samples and half_batch fake samples. We want to train at least once on each real sample in X_train.\n\tfor i in range(1, n_epochs+1):\n\t\tx_real, y_real = generate_real_samples(i, half_batch)\n\t\tx_fake, y_fake = generate_fake_samples(generator, latent_dim, half_batch)\n\n\t\tdisciminator.train_on_batch(x_real, y_real)\n\t\tdisciminator.train_on_batch(x_fake, y_fake)\n\n\t\tx_gan = generate_latent_points(latent_dim, n_batch)\n\t\ty_gan = np.ones((n_batch, 1))\n\n\t\tgan.train_on_batch(x_gan, y_gan)\n\t\t#Line below updates discriminator weights, so both models are trained simulataneously.\n\t\t#This is a deviation from the example, where they train the discriminator first. \n\t\t#This throws a shit ton of error messages\n\t\t#gan = define_gan(generator, discriminator) \n\n\t\t#evaluate the model every n_eval epochs on the test set\n\t\tif (i+1)%n_eval == 0:\n\t\t\tsummarise_performance(i, generator, disciminator, latent_dim)\n\nlatent_dim = 20 \ndiscriminator = define_discriminator()\ngenerator = define_generator(latent_dim)\ngan = define_gan(generator, discriminator)\n#train(generator, discriminator, gan, latent_dim) \n\n'''\n\tWhen we include line 118, partial output is\n\t299 0.0 0.9411764740943909\n\t399 0.23529411852359772 0.970588207244873\n\n\tWhen we comment out line 62, the output is \n\t99 0.0 0.6470588445663452\n\t199 0.0 0.20588235557079315\n\t299 0.3235294222831726 0.0882352963089943\n\t399 0.5 0.0882352963089943\n\n\tIdeally, what we want to see as output is something like\n\tindex 0.5 0.5 \n\tindex 0.49 0.51\n\tindex 0.51 0.49\n\ti.e. the generator is so good at creating fakes that the discriminator must guess at random. \n\n\tNext step: Try this with A LOT more data. \n\tIf you want to save the model weights, see trainPlayground.py in LSTM branch, or just see the guide on tensorflow.org.\n'''\n\n"
},
{
"alpha_fraction": 0.5353441834449768,
"alphanum_fraction": 0.5506309866905212,
"avg_line_length": 44.546730041503906,
"blob_id": "93b43570d4835cb8c9fbaf633897fbd18eba601c",
"content_id": "8e5bec225bd24f5146e686e83942a5daa56317f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9747,
"license_type": "no_license",
"max_line_length": 254,
"num_lines": 214,
"path": "/querying_data.py",
"repo_name": "sanj909/CollectiWise",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport json\nfrom google.cloud import bigquery\n\n\nclass SQL:\n def __init__(self, project = 'development-302415', dataset = 'machine_learning'):\n self.project = project\n self.client = bigquery.Client(project = self.project)\n self.dataset = self.client.dataset(dataset)\n self.query = []\n self.data = pd.DataFrame()\n \n def aggregate_to_intervals(self, interval_length, where = \"WHERE RIGHT(asset,3) = 'USD'\"):\n query = \"\"\"\n #Length in seconds of each interval\n DECLARE\n interval_length INT64 DEFAULT \"\"\"+str(interval_length)+\"\"\";\n\n #Adds a 'intervals' column which acts like an index for the interval each row belongs to\n CREATE OR REPLACE TABLE `development-302415.machine_learning.sorted_by_interval` AS\n WITH\n transactional AS (\n SELECT\n *,\n CAST(TRUNC(TIMESTAMP_DIFF(time_stamp,'2000-01-01 00:00:00+00', second)/interval_length,0) AS INT64) AS intervals,\n FROM\n `development-302415.machine_learning.weekly_v1`\n \"\"\"+where+\"\"\"\n ORDER BY\n intervals,\n asset )\n\n #Reverts 'intervals' index back to a timestamp, aggregates volume, average prices and OHLC prices over each interval (and over each asset)\n SELECT\n TIMESTAMP_ADD(TIMESTAMP '2000-01-01 00:00:00+00', INTERVAL t.intervals*interval_length second) AS time_stamp,\n t.asset,\n SUM(t.volume) AS volume,\n AVG(t.price) AS avg_price,\n AVG(open) AS open,\n MAX(t.price) AS high,\n MIN(t.price) AS low,\n AVG(close) AS close,\n AVG(label) as label,\n CASE\n WHEN COUNT(t.price) >= 2 THEN STDDEV(t.price)\n ELSE\n 0\n END\n AS std_price\n FROM (\n SELECT\n *,\n FIRST_VALUE(price) OVER(PARTITION BY intervals, asset ORDER BY time_stamp ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS open,\n LAST_VALUE(price) OVER(PARTITION BY intervals, asset ORDER BY time_stamp ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS close,\n FROM\n transactional) AS t\n GROUP BY\n t.intervals,\n t.asset\n ORDER BY t.intervals, label;\n \"\"\"\n self.query.append(query)\n self.client.query(query)\n \n def convert_to_features(self, features = 'avg_price,volume,std_price,label'):\n query = \"\"\"\n #DECLARE target_asset STRING DEFAULT 'ETHUSD';\n\n CALL `development-302415.machine_learning.pivot` (\n 'development-302415.machine_learning.sorted_by_interval','development-302415.machine_learning.assets_to_features', ['time_stamp'], 'asset','[STRUCT(\"\"\"+features+\"\"\")]',1000,'ANY_VALUE','');\n \n \n #CREATE OR REPLACE TABLE `development-302415.machine_learning.assets_to_features` AS\n #SELECT features.*, labels.label FROM `development-302415.machine_learning.assets_to_features` AS features\n #INNER JOIN `development-302415.machine_learning.sorted_by_interval` AS labels\n #ON features.time_stamp = labels.time_stamp AND labels.asset = target_asset\n #ORDER BY time_stamp;\n \"\"\"\n \n self.query.append(query)\n self.client.query(query)\n \n def get_table(self,table_name, max_results = None, csv_name = None):\n if max_results != None:\n self.data = self.client.query('SELECT * FROM `development-302415.'+table_name+'` LIMIT '+str(max_results))\n else:\n self.data = self.client.query('SELECT * FROM `development-302415.'+table_name+'`')\n self.data = self.data.result()\n self.data = self.data.to_dataframe()\n #table = self.client.get_table('development-302415.'+table_name)\n #self.data = self.client.list_rows(table).to_dataframe()\n if csv_name != None:\n self.data.to_csv(csv_name, index = False)\n return self.data\n\n def load_csv(self, path):\n self.data = pd.read_csv(path, header = 0, index_col = 0)\n \n def save_csv(self, path):\n self.data.to_csv(path)\n \n \n def unnest(self, columns_prefix = 'e_', na_fillers = {'avg_price':'ffill','volume':0,'std_price':0,'label':0,'high':'$avg_price','low':'$avg_price','open':'$avg_price','close':'$avg_price'}, dropna = False, merge_labels = True, label_name = 'label'):\n self.data = self.data.applymap(lambda x: x if x != '[]' else '[{}]')\n \n \n for column in self.data.columns.values:\n\n if column[:len(columns_prefix)] == columns_prefix:\n\n serie = self.data[column].map(lambda x: list(json.loads(x.replace('\\'','\\\"')))[0])\n serie = pd.json_normalize(serie)\n serie.set_index(self.data.index,inplace = True)\n \n for feature in serie.columns.values:\n try:\n if type(na_fillers[feature]) == int:\n serie[feature] = serie[feature].fillna(value = na_fillers[feature])\n self.data[column+' '+feature] = serie[feature]\n elif type(na_fillers[feature]) == str:\n if na_fillers[feature][0] == '$':\n serie[feature] = serie[feature].fillna(serie[na_fillers[feature][1:]])\n self.data[column+' '+feature] = serie[feature]\n else:\n serie[feature] = serie[feature].fillna(method = na_fillers[feature]) \n self.data[column+' '+feature] = serie[feature]\n else:\n raise KeyError('Fill method isn\\'t int or string for '+feature)\n\n except KeyError:\n raise KeyError('No NaN fill method declared for '+feature)\n\n self.data.drop(columns = column, inplace = True)\n \n #Puts all the '<asset> label' columns to the right\n if merge_labels:\n #print(self.data[[ i for i in list(self.data.columns) if i[-len(label_name):] == label_name]])\n labels = self.data[[ i for i in list(self.data.columns) if i[-len(label_name):] == label_name]].values.tolist()\n \n #print(labels)\n self.data.drop(columns = [ i for i in list(self.data.columns) if i[-len(label_name):] == label_name], inplace = True)\n self.data[label_name] = labels\n else:\n self.data = self.data[[ i for i in list(self.data.columns) if i[-len(label_name):] != label_name]+[ i for i in list(self.data.columns) if i[-len(label_name):] == label_name]]\n\n\n if dropna:\n self.data.dropna(axis = 0, inplace = True)\n\n return self.data\n \n def create_targets(self, targets = ['high','low'], merge_labels = True):\n for target in targets:\n df = self.data[[ i for i in list(self.data.columns) if i[-len(target):] == target]]\n df = df.rolling()\n \n \n \n def summarize(self, na_threshold = None):\n print('------------------------------------------------')\n if na_threshold != None:\n df = pd.Series(dtype = object)\n total = len(self.data.index.values)\n for column in self.data.columns.values:\n df[column] = self.data[column].isna().sum()/total*100\n\n df = df.where(df >= na_threshold).dropna().sort_values(ascending = False)\n\n with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n print('Features with more than ',na_threshold,'% NaN: \\n',df)\n print(len(df),' features with more than ',na_threshold,'% NaN values')\n \n print('Features: ',len(self.data.columns.values)-1,',\\n'\n 'Timestamps:',len(self.data.index.values))\n print('------------------------------------------------')\n\n def dropna(self, threshold = 100):\n\n df = pd.Series(dtype = object)\n total = len(self.data.index.values)\n for column in self.data.columns.values:\n df[column] = self.data[column].isna().sum()/total*100\n\n df = df.where(df >= threshold).dropna().sort_values(ascending = False)\n\n assets = [i[:-10] for i in df.index.values if i[-10:] == ' avg_price']\n self.data = self.data[[i for i in self.data.columns.values if list(filter(i.startswith, assets)) == []]]\n self.data.dropna(inplace = True, axis = 0) \n\ndir = 'cloudshell_open/CollectiWise/'\nsql = SQL()\n\n#sql.aggregate_to_intervals(7200)\nsql.convert_to_features(features = 'avg_price,volume,std_price,label,high,low')\nsql.get_table('machine_learning.assets_to_features', csv_name = dir+'assets_to_features.csv')\nsql.data.drop(columns = ['time_stamp'], inplace=True)\n\n\nsql.load_csv(dir+'assets_to_features.csv')\nsql.unnest(merge_labels=True)\nprint(sql.data.columns.values)\nsql.save_csv(dir+'features_df.csv')\n\nsql.load_csv(dir+'features_df.csv')\n#sql.summarize(na_threshold=50)\nsql.dropna(threshold=50)\nsql.summarize()\nsql.data.set_index('label', inplace = True)\nsql.save_csv(dir+'formatted_features.csv')\n\nsql.load_csv(dir+'formatted_features.csv')\nprint(sql.data)\n"
},
{
"alpha_fraction": 0.745258092880249,
"alphanum_fraction": 0.7543817758560181,
"avg_line_length": 43.79570007324219,
"blob_id": "0dafddc9254b47ca7a2a93ffc89e548b43e5dc77",
"content_id": "82da29025e189ba46646a63578277f3f7e2b8f4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4165,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 93,
"path": "/wdDemo.py",
"repo_name": "sanj909/CollectiWise",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pywt \n\n'''\n#Importing stock data \nimport yfinance as yf\nfrom datetime import date,datetime,timedelta\nticker = '^GSPC'\nfirst_day = datetime(2000, 1, 3)\nlast_day = datetime(2019, 7, 1)\ndata = yf.Ticker(ticker).history(interval = '1d', start=first_day, end=last_day)\ndata.reset_index(inplace=True)\n'''\n\n'''\n#Importing our crypto data\nticker = 'QTUMUSD' #Try QTUMUSD, XBTEUR, ETCUSD, ZECXBT, GNOXBT, XBTEUR, LTCEUR, XBTUSD, EOSXBT, EOSETH, GNOUSD\ndata = pd.read_csv('/Users/Sanjit/Google Drive/CollectiWise/Data/high_low.csv') #change this\ndata = data[data['asset'] == ticker]\ndata.reset_index(inplace=True, drop=True)\n'''\n\ndata = pd.read_csv('/Users/Sanjit/Repos/CollectiWise/formatted_features.csv')\ncolumn = 'e_XBTUSD avg_price'\ndata = data[column]\n\nfrom waveletDenoising import denoise, SNR, RMSE, optDenoise, standardise, gridSearch_v2, optDenoise_v2 #Store this file in the same folder as 'waveletDenoising.py'\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom skimage.restoration import denoise_wavelet\n\n#x = np.array(data.Close)\nx = np.array(data)\noriginal_mean = np.mean(x)\noriginal_std = np.std(x)\n\n#In the paper they used zero-mean normalization, which means the series is just shifted vertically downwards by its mean.\nx = x - np.mean(x) #equivalently, standardise(x, 0, np.std(x))\n\n#x = standardise(x, 0, 1) #N(0,1) standardisation\n\n#See https://www.youtube.com/watch?v=HSG-gVALa84 \n#y = denoise_wavelet(x, wavelet='coif3', mode='hard', wavelet_levels=3, method='BayesShrink', rescale_sigma=True)\n#method: 'BayesShrink' or 'VisuShrink'\n#Most of the time, the denoised series is basically identical to the original. Problem is worse when we standardise to N(0, 1)\n#VisuShrink doesn't capture price peaks, and these obviously can't be noise.\n\ny = optDenoise_v2(x) \n\n#x = x + original_mean\n#y = y + original_mean\n#x = standardise(x, original_mean, original_std)\n#y = standardise(x, original_mean, original_std)\n\nprint(\"SNR: \", SNR(x, y))\nprint(\"RMSE: \", RMSE(x, y))\n\nplt.plot(data.index, x, color='Green')\nplt.plot(data.index, y, color='Red')\n#plt.title(ticker)\nplt.title(column)\nplt.show()\n\n\n'''\nWe see strange behaviour when the prices are very large (XBTEUR, XBTUSD, in 1000s) and very small (GNOXBT, EOSXBT, in 0.001s)\nWhen prices are large, the denoised signal is almost identical to the raw signal\nWhen prices are small, the denoised signal is a constant zero signal, i.e. nothing like the raw signal\n\nIt seems that in the second case, everything is considered noise since all the movements are so small, and in the first case,\nnothing is considered noise since all the movements are so large. \n\nThere must be some way to 'normalise' the data, so that the absolute value of prices moves is irrelevant, and only the relative\nvalue of price moves matters. \n\nI've now implented this in the rescale function: it rescales the data to have any mean and std you specify. The issue with \nrescaling and then descaling is that RMSE increases by a lot (for GSPC, where new_mean = sqrt(old_mean) and similarly for std). \nDespite this, the plot looks alright. \n\nWhy do we descale? At some point we need to, either after feeding the data through the model or before.\nRescaling, to the squares of the orignial mean and standard deviation, works really nicely with QTUMUSD. \nWhen the numbers are too small (<1), there seems to be some kind of numerical overflow: the denoised signal is way off. So, the\nusual mean = 0 std = 1 transform is not really an option. \nMany cryptos were worth extremely small amounts when they started trading. In these cases, the denoised signal at the start of the\nperiod is way off. ZECXBT offers basically no information. \n\nIt seems that it's not easy to write one function which can properly denoise every series we give it in just one click. \nThere needs to be an element of inspection. Maybe we can try a grid search for each series, but I don't see anything better.\n\nI have now implemented a grid search! Don't see how we can do much better. It works for the most part, but for certain assets,\nthe denoised series is still not right. \n'''"
},
{
"alpha_fraction": 0.6534020900726318,
"alphanum_fraction": 0.6688712239265442,
"avg_line_length": 49.693180084228516,
"blob_id": "eb84a508e8a4570b93ef26a85fcaffdc6d937f15",
"content_id": "ab66c4c6a0d784f7bb934bbc7976626bd8f98770",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8921,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 176,
"path": "/waveletDenoising.py",
"repo_name": "sanj909/CollectiWise",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pywt #after running pip install PyWavelets (https://github.com/PyWavelets/pywt, https://pywavelets.readthedocs.io/en/latest/)\nfrom sklearn.preprocessing import StandardScaler\nfrom skimage.restoration import denoise_wavelet\n\n'''\nThe denoising steps are the following : https://www.kaggle.com/theoviel/denoising-with-direct-wavelet-transform\n(1)Apply the dwt to the signal\n Which signal extension mode is best for financial time series? \n See https://pywavelets.readthedocs.io/en/latest/ref/signal-extension-modes.html#ref-modes\n Periodization is bad: visually inspect the start of the signal constructed with this mode\n Actually it makes barely any difference, after trying out a few. \n if level is not specified or level = None, it's calculated using the dwt_max_level function. For this data, it is 5.\n coeffs is an array of arrays. len(coeffs) = level + 1\n As level increases the denoised signal gets smoother and is more different from the original signal.\n\n(2)Compute the threshold corresponding to the chosen level\n Increasing threshold seems to smooth out the curve more \n I've no idea how thresholds are calculated. See https://uk.mathworks.com/help/wavelet/ref/thselect.html\n threshold = 1 gives great results in terms of a high SNR and low RMSE.\n But not denoising at all gives the best result with these metrics (infinite SNR, 0 RMSE)! These metrics seem useless.\n I've been assessing performance visually, balancing smoothness of the curve with the similarity to the original.\n So, these (wavelet, mode, level) are true hyperparameters, which require retraining and testing of ML model to optimise\n https://uk.mathworks.com/help/wavelet/ug/denoising-signals-and-images.html: \n 'Minimax and SURE threshold selection rules are more conservative and would be more convenient when small details of \n the signal lie near the noise range. The two other (sqtwolog, mixture of sqtwolog and SURE) rules remove the noise \n more efficiently'\n https://uk.mathworks.com/help/wavelet/ug/wavelet-denoising.html:\n We assume that the magnitude of noise is constant throughout the signal. MATLAB has a function which automatically \n relaxes this assumption, i.e. automatically adjusts the threshold depending on the level of noise at each segment\n of the series (no of segments are also calculated automatically) but I'm not sure how it works. If you have MATLAB,\n this is one way we can improve the model. \n Maybe we can just split every series into 5 parts, denoise each separately, and stitch them back together. This will \n only work with a different threshold function though: sqtwolog, which we're using now, depends only on the length of \n the series. \n\n(3)Only keep coefficients with a value higher than the threshold\n Which threshold mode is best for financial time series? \n hard thresholding is much better the soft, by visual inspection of plot. garrote is similar to hard.\n It seems that they used hard thresholding in the paper.\n\n(4)Apply the inverse dwt to retrieve the signal\n(5)Sometimes, signal length is 1 greater than raw data length. We'll just remove the last value for now.\n'''\n\n#Input must be a numpy array. Output is a numpy array.\ndef denoise(raw, wavelet, level, mode='symmetric'):\n coeffs = pywt.wavedec(raw, wavelet, mode=mode, level=level) #(1)\n threshold = np.sqrt(2*np.log(len(raw))) #(2)sqtwolog function in MATLAB\n coeffs = [pywt.threshold(i, value=threshold, mode='hard') for i in coeffs]#(3)\n signal = pywt.waverec(coeffs, wavelet, mode=mode)#(4)\n if len(signal) > len(raw):#(5)\n signal = np.delete(signal, -1)\n return signal\n\ndef SNR(raw, denoised): #returns signal-to-noise ratio; equation (9) in paper; xhat_j is presumably the denoised series\n num = np.sum(np.power(raw, 2))\n den = np.sum(np.power(raw - denoised, 2))\n return 10*np.log(num/den)\n\ndef RMSE(raw, denoised): #Google 'root mean square deviation' for formula; equation (12) in paper is incorrect\n ss = np.sum(np.power(raw - denoised, 2))\n return np.sqrt(ss)\n\n#https://stats.stackexchange.com/questions/46429/transform-data-to-desired-mean-and-standard-deviation\ndef standardise(x, new_mean, new_std):\n return new_mean + (x - np.mean(x))*(new_std/np.std(x))\n\n#Rescaling series to ensure consistent performance of denoising function\n#The new mean should roughly be between 10 and 100, for most assets, according to the block below.\n#denoise function doesn't work with mean 0 variance 1 data for some reason\ndef rescale(x, orgnl_mean, orgnl_std):\n if 1 < orgnl_mean <= 10:\n x = standardise(x, np.power(orgnl_mean, 2), np.power(orgnl_std, 2))\n elif 100 < orgnl_mean:\n x = standardise(x, np.sqrt(orgnl_mean), np.sqrt(orgnl_std))\n elif orgnl_mean < 1:\n x = standardise(x, np.power(100, orgnl_mean), np.power(100, orgnl_std))\n elif orgnl_mean < 0.1:\n x = standardise(x, np.power(10000, orgnl_mean), np.power(10000, orgnl_std))\n return x\n\ndef gridSearch(x, orgnl_mean, orgnl_std):\n result = [-100000, '', 0] #SNR - RMSE, wavelet, level\n for w in pywt.wavelist(kind='discrete'):\n for l in range(2, 5):\n #x = rescale(x, orgnl_mean, orgnl_std)\n x = standardise(x, 0, 1)\n y = denoise(x, w, l)\n\n #x = standardise(x, orgnl_mean, orgnl_std)\n #y = standardise(y, orgnl_mean, orgnl_std)\n\n if (SNR(x, y) - RMSE(x, y)) > result[0]:\n result[0] = (SNR(x, y) - RMSE(x, y)); result[1] = w; result[2] = l\n\n return result\n\n#Input must be a simple iterable e.g. np.array, pd.Series, array. Output is a numpy array.\ndef optDenoise(x):\n x = np.array(x)\n orgnl_mean = np.mean(x); orgnl_std = np.std(x)\n\n params = gridSearch(x, orgnl_mean, orgnl_std)\n #x = rescale(x, orgnl_mean, orgnl_std)\n y = denoise(x, params[1], params[2])\n\n #standardise back to original distribution\n #x = standardise(x, orgnl_mean, orgnl_std)\n #y = standardise(y, orgnl_mean, orgnl_std)\n\n return y\n\n#grid search best parameters for denoising function. \ndef gridSearch_v2(x, metric):\n #metric=1: maximise SNR - RMSE\n #metric=2: maximise SNR\n #metric=3: minimise RMSE\n result = ['', 0, '', '', 1000000, 0, -1000000] #wavelet, level, mode, method, RMSE, SNR, SNR-RMSE\n\n #Only consider haar, db, sym, coif wavelet basis functions, as these are relatively suitable for financial data\n for w in [wavelet for wavelet in pywt.wavelist(kind='discrete') if wavelet.startswith(('haar', 'db', 'sym', 'coif'))]:\n for l in range(1, 5):\n for m in ['hard', 'soft']:\n for method in ['BayesShrink', 'VisuShrink']:\n y = denoise_wavelet(x, wavelet=w, mode=m, wavelet_levels=l, method=method, rescale_sigma=True)\n\n snr = SNR(x, y)\n rmse = RMSE(x, y)\n\n if metric == 1:\n if (snr - rmse) > result[6]:\n result[6] = (snr - rmse); result[0] = w; result[1] = l; result[2] = m; result[3] = method\n elif metric == 2:\n if (snr) > result[5]:\n result[5] = (snr); result[0] = w; result[1] = l; result[2] = m; result[3] = method\n elif metric == 3:\n if (rmse) < result[4]:\n result[4] = (rmse); result[0] = w; result[1] = l; result[2] = m; result[3] = method\n\n return result\n\ndef optDenoise_v2(x):\n x = np.array(x)\n #original_mean = np.mean(x)\n\n #In the paper they used zero-mean normalization, which means the series is just shifted vertically downwards by its mean.\n #x = x - np.mean(x) #equivalently, standardise(x, 0, np.std(x))\n\n #grid search best parameters for denoising function. \n #maximise SNR-RMSE, as they recommended in the paper.\n params = gridSearch_v2(x, 1) \n\n #See https://www.youtube.com/watch?v=HSG-gVALa84 \n y = denoise_wavelet(x, wavelet=params[0], wavelet_levels=params[1], mode=params[2], method=params[3], rescale_sigma=True)\n #y = denoise_wavelet(x, wavelet='coif3', wavelet_levels=3, mode='hard', method='BayesShrink', rescale_sigma=True) #paramters used in paper\n\n '''\n method: 'BayesShrink' or 'VisuShrink'\n Most of the time, the denoised series is basically identical to the original\n # VisuShrink doesn't capture price peaks, and these obviously can't be noise.\n '''\n #y = y + original_mean\n \n return y\n\n#takes a numerical dataframe as input\n#treats each column as a series, denoises each of these series\n#stitches back together and returns a dataframe\ndef denoise_df(df):\n for column in df.columns:\n x = np.array(df[column])\n df[column] = optDenoise_v2(x)\n return df"
}
] | 9 |
gmflau/redis-enterprise-asm-ingress | https://github.com/gmflau/redis-enterprise-asm-ingress | ef30e712900fc69f1b5fb99d5ff9d150c37dda89 | 341f910733136a1a4651e30b51d75a5cade2c90b | 87312593545b2bd8b8cc27503d250c10e8c14ec8 | refs/heads/main | 2023-06-05T03:42:04.953399 | 2021-06-27T04:33:33 | 2021-06-27T04:33:33 | 379,738,091 | 3 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7038952112197876,
"alphanum_fraction": 0.7222796678543091,
"avg_line_length": 29.644365310668945,
"blob_id": "9ca42906a4dd5a63618a7c94a7adbd2b138e9b51",
"content_id": "46bf4ea668eb5c84b1d7db7a22c859d89d6ccad3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8703,
"license_type": "no_license",
"max_line_length": 218,
"num_lines": 284,
"path": "/README.md",
"repo_name": "gmflau/redis-enterprise-asm-ingress",
"src_encoding": "UTF-8",
"text": "# Accessing a Redis Enterprise database from outside a GKE cluster (Through Anthos Service Mesh Ingress )\n\n## High Level Workflow\nThe following is the high level workflow which you will follow:\n1. Clone this repo\n2. Create a GKE cluster\n3. Install Anthos Service Mesh (ASM)\n4. Create a namespace for this deployment and deploy the Redis Enterprise Operator bundle\n5. Deploy a Redis Enterprise Cluster (REC)\n6. Deploy Ingress Gateway and Create routes for Redis Enterprise Cluster's HTTPS web access\n7. Access Redis Enterprise Cluster's console\n8. Generate a SSL certificate for the Redis Enterprise database\n9. Create a Redis Enterprise database instance with SSL/TLS enabled\n10. Update Ingress Gateway to include Redis Enterprise Database instance\n11. Verify SSL/TLS connection using openssl\n12. Connect to the Redis Enterprise database over SSL/TLS via a Python program\n\n\n#### 1. Clone this repo\n```\ngit clone https://github.com/gmflau/redis-enterprise-asm-ingress\ncd redis-enterprise-asm-ingress\n```\n\n\n#### 2. Create a GKE cluster\n```\nexport PROJECT_ID=$(gcloud info --format='value(config.project)')\nexport CLUSTER_NAME=\"glau-asm-gke-cluster\"\nexport CLUSTER_LOCATION=us-west1-a\n\n./create_cluster.sh $CLUSTER_NAME $CLUSTER_LOCATION\n```\n\n\n#### 3. Install Anthos Service Mesh (ASM)\nDownload ASM installation script\n```\ncurl https://storage.googleapis.com/csm-artifacts/asm/install_asm_1.9 > install_asm\ncurl https://storage.googleapis.com/csm-artifacts/asm/install_asm_1.9.sha256 > install_asm.sha256\nsha256sum -c --ignore-missing install_asm.sha256\nchmod +x install_asm\n```\nInstall Anthos Service Mesh (ASM) \nPlease make sure you have all the required [GCP IAM permissions](https://cloud.google.com/service-mesh/docs/installation-permissions) before running the script below. \n**Note:** You will need to run the command above in a Linux-based machine \n```\n./install_asm \\\n --project_id $PROJECT_ID \\\n --cluster_name $CLUSTER_NAME \\\n --cluster_location $CLUSTER_LOCATION \\\n --mode install \\\n --output_dir ./asm-downloads \\\n --enable_all\n```\n**Note:** You will need to run the command above in a Linux-based machine \n\n\n#### 4. Create a namespace for this deployment and deploy the Redis Enterprise Operator bundle\n```\nkubectl create namespace redis\nkubectl config set-context --current --namespace=redis\n\nkubectl apply -f https://raw.githubusercontent.com/RedisLabs/redis-enterprise-k8s-docs/v6.0.20-4/bundle.yaml\n```\n\n\n#### 5. Deploy a Redis Enterprise Cluster (REC)\n```\nkubectl apply -f - <<EOF\napiVersion: app.redislabs.com/v1alpha1\nkind: RedisEnterpriseCluster\nnamespace: redis\nmetadata:\n name: rec\nspec:\n nodes: 3\n persistentSpec:\n enabled: true\n storageClassName: \"standard\"\n volumeSize: 20Gi\nEOF\n```\n\n\n#### 6. Deploy Ingress Gateway and Create routes for Redis Enterprise Cluster's HTTPS web access\nDefine gateway for HTTPS access:\n```\nexport INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway \\\n -o jsonpath='{.status.loadBalancer.ingress[0].ip}')\nexport SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway \\\n -o jsonpath='{.spec.ports[?(@.name==\"https\")].port}')\n\nkubectl apply -f - <<EOF\napiVersion: networking.istio.io/v1alpha3\nkind: Gateway\nmetadata:\n name: redis-gateway\nspec:\n selector:\n istio: ingressgateway # use istio default ingress gateway\n servers:\n - port:\n number: ${SECURE_INGRESS_PORT}\n name: https\n protocol: HTTPS\n tls:\n mode: PASSTHROUGH\n hosts:\n - rec-ui.${INGRESS_HOST}.nip.io\nEOF\n```\nConfigure routes for traffic entering via the gateway:\n```\nkubectl apply -f - <<EOF\napiVersion: networking.istio.io/v1alpha3\nkind: VirtualService\nmetadata:\n name: rec\nspec:\n hosts:\n - rec-ui.${INGRESS_HOST}.nip.io\n gateways:\n - redis-gateway\n tls:\n - match:\n - port: ${SECURE_INGRESS_PORT}\n sniHosts:\n - rec-ui.${INGRESS_HOST}.nip.io\n route:\n - destination:\n host: rec-ui\n port:\n number: 8443\nEOF\n```\n\n\n#### 7. Access Redis Enterprise Cluster's console\nGrab the password for [email protected] user for accessing REC's configuration manager (CM):\n```\nkubectl get secrets -n redis rec -o jsonpath=\"{.data.password}\" | base64 --decode\n```\nAccess the CM's login page using the following URL:\n```\nhttps://rec-ui.<$INGRESS_HOST>.nip.io:443\n\nFor example:\nhttps://rec-ui.34.83.116.191.nip.io:443\n```\nLog in using [email protected] and the password collected above to view the cluster information in CM.\n\n\n\n#### 8. Generate a SSL certificate for the Redis Enterprise database\n```\nopenssl genrsa -out client.key 2048\n```\nWhen running the following command, just hit ENTER for every question except to enter *.rec.<$INGRESS_HOST>.nip.io for Common Name`:\n```\nopenssl req -new -x509 -key client.key -out client.cert -days 1826\n```\nCopy the content of proxy_cert.pem from one of the REC pods to your machine running **openssl** command later:\n```\nkubectl exec -it rec-0 -c redis-enterprise-node -n redis -- /bin/bash\ncd /etc/opt/redislabs\nmore proxy_cert.pem\n```\n\n\n#### 9. Create a Redis Enterprise database instance with SSL/TLS enabled\nGenerate a K8 secret for the SSL/TLS certificate:\n```\ncp client.cert cert\nkubectl create secret generic client-auth-secret-redb --from-file=./cert -n redis\n```\nDeploy a Redis Enterprise database:\n```\nkubectl apply -f - <<EOF\napiVersion: app.redislabs.com/v1alpha1\nkind: RedisEnterpriseDatabase\nnamespace: redis\nmetadata:\n name: redis-enterprise-database\nspec:\n memorySize: 100MB\n tlsMode: enabled\n clientAuthenticationCertificates:\n - client-auth-secret-redb\nEOF\n```\n\n#### 10. Update Ingress Gateway to include Redis Enterprise Database instance\nDefine gateway for SSL access:\n```\nexport INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway \\\n -o jsonpath='{.status.loadBalancer.ingress[0].ip}')\nexport SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway \\\n -o jsonpath='{.spec.ports[?(@.name==\"https\")].port}')\nexport DB_PORT=$(kubectl get secrets -n redis redb-redis-enterprise-database \\\n -o jsonpath=\"{.data.port}\" | base64 --decode)\n\nkubectl apply -f - <<EOF\napiVersion: networking.istio.io/v1alpha3\nkind: Gateway\nmetadata:\n name: redis-gateway\nspec:\n selector:\n istio: ingressgateway # use istio default ingress gateway\n servers:\n - port:\n number: ${SECURE_INGRESS_PORT}\n name: https\n protocol: HTTPS\n tls:\n mode: PASSTHROUGH\n hosts:\n - rec-ui.${INGRESS_HOST}.nip.io\n - redis-${DB_PORT}.demo.rec.${INGRESS_HOST}.nip.io\nEOF\n```\nConfigure routes for traffic entering via the gateway for the database:\n```\nkubectl apply -f - <<EOF\napiVersion: networking.istio.io/v1alpha3\nkind: VirtualService\nmetadata:\n name: redis-${DB_PORT}\nspec:\n hosts:\n - redis-${DB_PORT}.demo.rec.${INGRESS_HOST}.nip.io\n gateways:\n - redis-gateway\n tls:\n - match:\n - port: ${SECURE_INGRESS_PORT}\n sniHosts:\n - redis-${DB_PORT}.demo.rec.${INGRESS_HOST}.nip.io\n route:\n - destination:\n host: redis-enterprise-database\n port:\n number: ${DB_PORT}\nEOF\n```\n\n\n#### 11. Verify SSL/TLS connection using openssl\nGrab the password of the Redis Enterprise database:\n```\nkubectl get secrets -n redis redb-redis-enterprise-database \\\n-o jsonpath=\"{.data.password}\" | base64 --decode\n```\nRun the following to open a SSL session:\n```\nopenssl s_client -connect redis-${DB_PORT}.demo.rec.${INGRESS_HOST}.nip.io:${SECURE_INGRESS_PORT} \\\n-key client.key -cert client.cert -CAfile ./proxy_cert.pem \\\n-servername redis-${DB_PORT}.demo.rec.${INGRESS_HOST}.nip.io\n\nFor example,\nopenssl s_client -connect redis-11338.demo.rec.34.127.23.12.nip.io:443 \\\n-key client.key -cert client.cert -CAfile ./proxy_cert.pem \\\n-servername redis-11338.demo.rec.34.127.23.12.nip.io\n``` \nYou should see a similar output as follows. Replace <redis-enterprise-database-password> with your Redis Enterprise database instance's password. Make sure there is a space after the password on MacOS. See below:\n\nSend a **PING** command by entering PING followed by a blank space before hitting the **RETURN** button: \n\n\n\n#### 12. Connect to the Redis Enterprise database over SSL/TLS via a Python program\nRun test.py to verify SSL/TLS connection:\n```\nexport DB_PASSWORD=$(kubectl get secrets -n redis redb-redis-enterprise-database \\\n -o jsonpath=\"{.data.password}\" | base64 --decode)\n\npython test.py ${INGRESS_HOST} ${DB_PORT} ${DB_PASSWORD}\n\nFor example,\npython test.py 34.83.49.103 16667 QUhZiDXB \n```\nIt should produce output about the Redis Enterprise database's information as follows:\n\n"
},
{
"alpha_fraction": 0.5103857517242432,
"alphanum_fraction": 0.5311572551727295,
"avg_line_length": 36.44444274902344,
"blob_id": "1a93a642bab26b8934994e0f2b93b291d92cf067",
"content_id": "e01e50c7573720228ab1dd6030a7afc48d882386",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 337,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 9,
"path": "/test.py",
"repo_name": "gmflau/redis-enterprise-asm-ingress",
"src_encoding": "UTF-8",
"text": "import redis\nimport sys\n\nr = redis.StrictRedis(host='redis-' + sys.argv[2] + '.demo.rec.' + sys.argv[1] + '.nip.io',\n port=443, db=0, ssl=True, password=sys.argv[3],\n ssl_keyfile='./client.key',\n ssl_certfile='./client.cert',\n ssl_ca_certs='./proxy_cert.pem')\nprint(r.info())\n"
},
{
"alpha_fraction": 0.6455696225166321,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 20.454545974731445,
"blob_id": "d2b42ba7259cf405cf77b4d8bfd1725897cc22a2",
"content_id": "ef9714ddbaa95966fffcf36b35a9a37f2f32400e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 237,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 11,
"path": "/create_cluster.sh",
"repo_name": "gmflau/redis-enterprise-asm-ingress",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nset -ex\n\ngcloud beta container clusters create $1 \\\n --zone=$2 --num-nodes=3 \\\n --image-type=COS_CONTAINERD \\\n --machine-type=e2-standard-8 \\\n --network=default \\\n --enable-ip-alias \\\n --enable-stackdriver-kubernetes\n\n"
}
] | 3 |
rarchk/beemodoro | https://github.com/rarchk/beemodoro | bad60888e2a34a5878084fbb536b392f919cf9ee | c9d3b9b93ec4af6d1876b1e37e0786328d9d294e | 1e6a068bdf93dd528f8a91e87a3d701881ed902a | refs/heads/master | 2020-03-27T01:18:04.385497 | 2018-08-25T19:55:05 | 2018-08-25T19:55:05 | 145,698,326 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6715328693389893,
"alphanum_fraction": 0.6804541945457458,
"avg_line_length": 26.399999618530273,
"blob_id": "ff833824f95c0920cd8997c147beb1993b2febc9",
"content_id": "a7346b37624e6ce4336c2d9be1a5a93c7a4aafe4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1239,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 45,
"path": "/README.md",
"repo_name": "rarchk/beemodoro",
"src_encoding": "UTF-8",
"text": "Beeminder + Pomodoro = 🍅\n=========================\nUtilize the power of `Beeminder <http://beeminder.com/>`_ and `Pomodoro\n<http://pomodorotechnique.com>`_. This little script runs a Pomodoro timer in\nyour terminal and increments your Beeminder goal counter when it's done. This\nrequires your project to count 1 Pomodoro per step.\n\nIt uses linux **notify-send** to use notifications as well. \n\n## Setup \n```bash\ngit clone https://github.com/rarchk/beemodoro\n# It requires python > 3 \nvirtualenv bmdro -p python2.6\nsource bmdro/bin/activate\npip install -r requirements.txt\n```\n\n## Usage\n```bash\ncd beemodoro\npython __init.py__ \"My first beemodoro session\" [Custom time]\n```\n\n## BEEMINDER SETUP\n-----------\n1. I've Hardcoded the url in `__init__.py` \n\n - ``BEEMINDER_KEY``: your Beeminder API key\n - ``BEEMINDER_USER``: your Beeminder username\n - ``BEEMINDER_GOAL``: your Beeminder goal slug name (can be found in your\n goal settings)\n\n2. Run ``beemodoro \"Work on secret project\" [optional_custom_pomodoro_length]``\n3. Work for 25 minutes\n4. You just finished a Pomodoro! Yay! Take a break 🍅\n\nManual Goal Tracking\n--------------------\n``track_goal [comment]``\n\nRequirements\n---------------\n- Python 3\n- OS X say (for TTS output)\n"
},
{
"alpha_fraction": 0.5934990644454956,
"alphanum_fraction": 0.6045889258384705,
"avg_line_length": 23.439252853393555,
"blob_id": "082f64c0a016df9d0561965dbbe018c1be93adc9",
"content_id": "d923bc3c13e7a5c6737e252237ef26fa7115f128",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2618,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 107,
"path": "/beemodoro/__init__.py",
"repo_name": "rarchk/beemodoro",
"src_encoding": "UTF-8",
"text": "from multiprocessing import Process\nfrom os import (\n environ,\n system,\n)\nfrom sys import argv\nfrom requests import post\nfrom time import sleep\nimport platform\n\nPOMODORO_LENGTH = 25 * 60\nBREAK_LENGTH = 5 * 60\n\n\nBEEMINDER_KEY = environ['BEEMINDER_KEY']\nUSER = environ['BEEMINDER_USER']\nGOAL = environ['BEEMINDER_GOAL']\n\nBEEMINDER_API_ENDPOINT = \"https://www.beeminder.com/api/v1/{}\"\nBEEMINDER_POST_ENDPOINT = BEEMINDER_API_ENDPOINT.format(\n \"/users/{USER}/goals/{GOAL}/datapoints.json?\"\n \"auth_token={BEEMINDER_KEY}\".format(**locals()))\n\nmessages = {\n 'pomodoro_start': 'Pomodoro has started.',\n 'pomodoro_over': 'Pomodoro over.',\n 'break_start': 'Take a break, {}.'.format(USER),\n 'break_over': 'Break over.',\n 'transferring': 'Transferring data to Beeminder.',\n 'time_remaining': 'Time Remaining'}\n\n\ndef say_print(message_id):\n message = messages[message_id]\n print(message)\n p = Process(target=lambda: system(\"say {}\".format(message)))\n p.start()\n\n\ndef send_data(activity=\"\"):\n response = post(\n BEEMINDER_POST_ENDPOINT,\n data={'comment': activity, 'value': '1'})\n assert response.status_code < 400, response.text\n return response\n\n\ndef timer(length):\n for i in reversed(range(length)):\n minutes = i // 60\n seconds = i % 60\n print(\"\\r\", end='')\n print(\n \"{}: {:02}:{:02}\".format(\n messages['time_remaining'],\n minutes, seconds),\n end='',\n flush=True)\n sleep(1)\n print(\"\")\n\n\ndef pomodoro(activity, length=POMODORO_LENGTH):\n say_print('pomodoro_start')\n timer(length)\n say_print('pomodoro_over')\n sleep(5)\n send_notification(activity, 'face-cool')\n\n say_print('transferring')\n sleep(5)\n send_data(activity)\n\n say_print('break_start')\n timer(BREAK_LENGTH)\n say_print('break_over')\n send_notification('Pomodoro break over', 'face-cool')\n\n\ndef send_notification(activity, expression):\n os = check_operating_system()\n if os == 'Linux':\n cmd = \"notify-send '%s Pomodoro' '%s Done!' -i %s\"\\\n % (GOAL, activity, expression)\n elif os == 'Darwin':\n cmd = 'osascript -e \\'display notification \"%s 🍺\" with title \"%s\" sound name \"default\"\\''\\\n % (activity, GOAL)\n system(cmd)\n\n\ndef check_operating_system():\n return platform.system()\n\n\ndef main():\n assert len(argv) >= 2\n msg = argv[1]\n if len(argv) == 3:\n length = int(argv[2]) * 60\n else:\n length = POMODORO_LENGTH\n\n pomodoro(msg, length=length)\n\n\nif __name__ == \"__main__\":\n main()\n"
}
] | 2 |
PHugues/Poker | https://github.com/PHugues/Poker | 21d9fa2152359a9352f01c3a12f48c3162f49d88 | bdc0be86c401ae5a7e43da172cae6cb47ed0b484 | c0f89a49df18e06afe442180ff43ffc229a7bc33 | refs/heads/master | 2020-03-24T03:45:25.608432 | 2018-09-29T09:44:03 | 2018-09-29T09:44:03 | 142,431,886 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5760869383811951,
"alphanum_fraction": 0.584782600402832,
"avg_line_length": 19.909090042114258,
"blob_id": "10378ce8616c4581a2c5d34a6fcdd8966f372a17",
"content_id": "dde67fd209c782a678a1f389d4a3f65cdde5d58a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 460,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 22,
"path": "/winninghand.py",
"repo_name": "PHugues/Poker",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport variables\n\n\"\"\"Determines which hand is winning.\"\"\"\n\n\ndef rank_color(hand):\n \"\"\"Return the rank and the color of the hand given.\"\"\"\n rank = list()\n color = list()\n for card in hand:\n card = card.split(\" \")\n rank.append(card[0])\n color.append(card[-1])\n return (rank, color)\n\n\ndef value(hand):\n \"\"\"Return the value of the hand.\"\"\"\n (rank, color) = rank_color(hand)\n"
},
{
"alpha_fraction": 0.49513211846351624,
"alphanum_fraction": 0.5034770369529724,
"avg_line_length": 22.96666717529297,
"blob_id": "f5d6d24b40cfd54b6dea096585c2fba09c6bcc75",
"content_id": "80f96a8c08e1e984db38360d2d38b5937f7ff8dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 719,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 30,
"path": "/poker.py",
"repo_name": "PHugues/Poker",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport winninghand\nimport variables\nfrom random import choice\n\nhands = list()\n\n\ndef create_hands():\n \"Creates a hand of 5 cards.\"\n hand = list()\n for _ in range(0, 5):\n card = str()\n card = choice(variables.list_rank) + choice(variables.list_color)\n hand.append(card)\n return hand\n\n\n# Primary code\nfor _ in range(5):\n hands.append(create_hands())\nfor hand in hands:\n (rank, color) = winninghand.rank_color(hand)\n print(\"Hand : \", hand)\n print(\"=> Color : \", color)\n print(\"=> Rank : \", rank)\n print(\"----------------------------------------------------------------\" +\n \"--------------------------------------\")\n"
},
{
"alpha_fraction": 0.5054545402526855,
"alphanum_fraction": 0.5127272605895996,
"avg_line_length": 29.55555534362793,
"blob_id": "390f3483816bd4f1e0dcdc812303d21ecb472dcc",
"content_id": "cf619557eaa3e4a0914ec09373c95e336b941eec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 275,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 9,
"path": "/variables.py",
"repo_name": "PHugues/Poker",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nlist_color = ['Clubs', 'Hearts', 'Diamonds', 'Spades']\nlist_rank = [\n 'Ace of ', 'Two of ', 'Three of ', 'Four of ', 'Five of ',\n 'Six of ', 'Seven of ', 'Eight of ', 'Nine of ',\n 'Jack of ', 'Queen of ', 'King of '\n]\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 39,
"blob_id": "b97d340d173eed7df80630f648366fbe8e7123ad",
"content_id": "969e4f961bc530db983ea4c96cc55755d0800cc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 80,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 2,
"path": "/README.md",
"repo_name": "PHugues/Poker",
"src_encoding": "UTF-8",
"text": "# Poker\nSoftware created in order to determine which player would win the hand.\n"
}
] | 4 |
sluzhynskyi/json_manager | https://github.com/sluzhynskyi/json_manager | f5bfcd733062c45aeea30d96298221baa0b0642e | 36b8d9c43038ff5fc9761b496547d0e8f65fcd3f | 497f157be49b2b9173ccb51b33113ea04fba91e2 | refs/heads/master | 2020-04-23T05:04:31.380345 | 2019-02-15T21:31:42 | 2019-02-15T21:31:42 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7030516266822815,
"alphanum_fraction": 0.7147887349128723,
"avg_line_length": 17.88888931274414,
"blob_id": "d3d2ac765d3dfe94186e417aefca609cb4bb2493",
"content_id": "4d92454507d79f0014f7ec55b6d7269edf1a7ac6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 852,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 45,
"path": "/README.md",
"repo_name": "sluzhynskyi/json_manager",
"src_encoding": "UTF-8",
"text": "# Json manager\n\n> This python module helps you mange your Json file.\n\nYou write key(folder) and you get at response value, so than you can go to the next key(folder)...\n\n\n## Getting Started\n\nThis would be easy, just relax...\n\n### Prerequisites\n\nWithout Prerequisites\n\n### Installing\n\nA step by step series of examples that tell you how to get a development env running\n\nDownloading my repo\n\n```\nhttps://github.com/wat4era/json_manager.git\n```\n* Changing path in the json_manager.py (lines 3) \n\n\n## Running the tests\n* Run json_manager.py \n\n## Modules description:\n* fld_lst() - returns all possible folders where user can move\n* current_pos() - show your current position\n* usage() - prints usage\n* main() - runs program\n## Conclusion\n\nThis module is useful for json files reading. \n\n## Versioning\n02 15 2019 - demo version\n\n## Authors\n\n* **Danylo Sluzhynskyi** \n\n"
},
{
"alpha_fraction": 0.50917649269104,
"alphanum_fraction": 0.5115293860435486,
"avg_line_length": 23.70930290222168,
"blob_id": "84934515c9cfa5c06a60c34c9e27fcd9d647e4d1",
"content_id": "4f9ab70ae8e51b3aeebcc2386599e1b3009c630e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2125,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 86,
"path": "/json_manager.py",
"repo_name": "sluzhynskyi/json_manager",
"src_encoding": "UTF-8",
"text": "import json\n\npath = \"YOUR_JSONFILE_PATH\"\nfile = open(path, mode=\"r\", encoding='utf8')\nj_file = json.load(file)\n\n\ndef fld_lst(type):\n \"\"\"\n any -> lst\n Function returns all posible folders where user can move\n \"\"\"\n lst = []\n if isinstance(type, dict):\n return list(type.keys())\n elif isinstance(type, list):\n for i in range(1, len(type) + 1):\n lst.append(\"element \" + str(i))\n return lst\n\n\ndef current_pos():\n \"\"\"\n None -> None\n This function show your current position\n \"\"\"\n st = \"/ \"\n for i in parents:\n st += i + \" /\"\n return st\n\n\ndef usage():\n \"\"\"\n None -> None\n For user understanding\n \"\"\"\n print(\"IF you wont move to another directory write folder_name\")\n print(\"IF you wont move to the home directory just press Enter\\n\")\n\n\ndef main():\n \"\"\"\n None -> None\n Main function for starting all module\n \"\"\"\n global parents, current_lct\n parents = []\n current_lct = j_file\n\n # Printing usage\n usage()\n # Main program\n while True:\n st = \"\"\n try:\n # User position\n if parents:\n pos = current_pos()\n print(pos)\n # Folders that user can check out\n if isinstance(current_lct, dict) or isinstance(current_lct, list):\n for i in fld_lst(current_lct):\n st += i + \"|\"\n print(st)\n else:\n print(current_lct)\n # User moving to the another folder\n prompt = input(\"$: \")\n if prompt == \"\":\n current_lct = j_file\n parents = []\n elif isinstance(current_lct, list):\n current_lct = current_lct[int(prompt[8:]) - 1]\n parents.append(prompt)\n elif isinstance(current_lct, dict):\n current_lct = current_lct[prompt]\n parents.append(prompt)\n except KeyError:\n print(\"This file not exist\")\n except ValueError:\n print(\"This file not exist\")\n\n\nif __name__ == '__main__':\n main()\n"
}
] | 2 |
LucasAlexander13/free-code-camp | https://github.com/LucasAlexander13/free-code-camp | fe13807eb25c7a1414e56eefa2db175876f016b6 | 223ecc299edd74ad69f0d8d3553e7d1957f26b0b | df936786182477b4dc5f19560e6591e538a04a40 | refs/heads/master | 2023-08-27T17:37:36.265297 | 2021-09-03T12:01:08 | 2021-09-03T12:01:08 | 417,912,404 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5052728056907654,
"alphanum_fraction": 0.5263640284538269,
"avg_line_length": 28.079999923706055,
"blob_id": "01ac1f971a2a5a12c0de2d12e5c26ae3adac13de",
"content_id": "6e3e5bd2ea597b223a039de0f63b1896c08fc42f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2181,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 75,
"path": "/scientific-computing-with-python/boilerplate-arithmetic-formatter/arithmetic_arranger.py",
"repo_name": "LucasAlexander13/free-code-camp",
"src_encoding": "UTF-8",
"text": "def first_line(problem):\n max_len = len(max(problem, key=len))\n spaces = \" \" * (max_len - len(problem[0]) + 2)\n beetween = \" \" * 4\n return f\"{spaces}{problem[0]}{beetween}\"\n\ndef secnd_line(problem):\n max_len = len(max(problem, key=len))\n spaces = \" \" * (max_len - len(problem[2]) + 1)\n beetween = \" \" * 4\n return f\"{problem[1]}{spaces}{problem[2]}{beetween}\"\n\ndef third_line(problem):\n max_len = len(max(problem, key=len))\n underlines = \"-\" * (max_len + 2)\n beetween = \" \" * 4\n return f\"{underlines}{beetween}\"\n\ndef solve_line(problem):\n if problem[1] == \"+\":\n result = int(problem[0]) + int(problem[2])\n elif problem[1] == \"-\":\n result = int(problem[0]) - int(problem[2])\n \n max_len = len(max(problem, key=len)) + 2\n result_len = len(str(result))\n spaces = \" \" * (max_len - result_len)\n beetween = \" \" * 4\n return f\"{spaces}{result}{beetween}\"\n\n##############################################################################\n\ndef arithmetic_arranger(problems, result=False):\n\n if len(problems) > 5:\n return \"Error: Too many problems.\"\n \n for problem in problems:\n if \"x\" in problem or \"/\" in problem:\n return \"Error: Operator must be '+' or '-'.\"\n \n terms = problem.split(\" \")\n \n for char in terms[0] + terms[2]:\n if char.isalpha():\n return \"Error: Numbers must only contain digits.\"\n if len(terms[0]) > 4 or len(terms[2]) > 4:\n return \"Error: Numbers cannot be more than four digits.\"\n\n line1 = \"\"\n line2 = \"\"\n line3 = \"\"\n if result == True:\n line4 = \"\"\n\n for problem in problems:\n terms = problem.split()\n\n line1 += first_line(terms)\n line2 += secnd_line(terms)\n line3 += third_line(terms)\n if result == True:\n line4 += solve_line(terms)\n \n line1 = line1.rstrip()\n line2 = line2.rstrip()\n line3 = line3.rstrip()\n if result == True:\n line4 = line4.rstrip()\n \n arranged_problems = f\"{line1}\\n{line2}\\n{line3}\"\n if result == True:\n arranged_problems += f\"\\n{line4}\"\n\n return arranged_problems\n"
},
{
"alpha_fraction": 0.4844290614128113,
"alphanum_fraction": 0.49596309661865234,
"avg_line_length": 27.58241844177246,
"blob_id": "fdb84c65e5017dfe395d754d7006a74fe9905351",
"content_id": "386f812cacb377c6834673bb4cc97e5133d9348c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2601,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 91,
"path": "/scientific-computing-with-python/boilerplate-budget-app/budget.py",
"repo_name": "LucasAlexander13/free-code-camp",
"src_encoding": "UTF-8",
"text": "class Category:\n def __init__(self, name):\n self.category = name\n self.ledger = []\n self.balance = 0\n self.total = 0\n self.spent = 0\n\n def deposit(self, amount, description=\"\"):\n self.ledger.append({\"amount\": amount, \"description\": description})\n self.balance += amount\n self.total += amount\n\n def withdraw(self, amount, description=\"\"):\n if amount < self.balance:\n self.ledger.append({\"amount\": -amount, \"description\": description})\n self.balance -= amount\n self.spent += amount\n return True\n else:\n return False\n \n def get_balance(self):\n return self.balance\n \n def transfer(self, amount, budget):\n if amount < self.balance:\n self.withdraw(amount, f\"Transfer to {budget.category}\")\n budget.deposit(amount, f\"Transfer from {self.category}\")\n return True\n else:\n return False\n \n def check_funds(self, amount):\n if amount <= self.balance:\n return True\n else:\n return False\n\n def __str__(self):\n string = self.category.center(30, \"*\") + \"\\n\"\n \n for item in self.ledger:\n description_line = \"{:<23}\".format(item[\"description\"])\n amount_line = \"{:>7.2f}\".format(item[\"amount\"])\n\n string += f\"{description_line[:23]}{amount_line[:7]}\\n\" \n string += f\"Total: {self.balance:.2f}\"\n\n return string\n\ndef create_spend_chart(categories):\n string = \"Percentage spent by category\\n\"\n \n spent = {}\n max_length = 0\n for item in categories:\n spent[item.category] = int(item.spent / item.total * 10)\n\n if len(item.category) > max_length:\n max_length = len(item.category)\n \n for i in range(11):\n table = 100 - i * 10\n if table == 100:\n string += f\"{table}| \"\n elif table == 0:\n string += f\" {table}| \"\n else:\n string += f\" {table}| \"\n\n for item in categories:\n if spent[item.category] * 10 >= table:\n string += \"o \"\n else:\n string += \" \"\n string += \"\\n\"\n\n string += \" -\" + \"---\" * len(categories) + \"\\n\"\n\n for i in range(max_length):\n string += \" \"\n for item in categories:\n if len(item.category) > i:\n name = item.category\n string += name[i] + \" \"\n else:\n string += \" \"\n string += \"\\n\"\n \n return string[:-2]\n"
},
{
"alpha_fraction": 0.8201219439506531,
"alphanum_fraction": 0.8277438879013062,
"avg_line_length": 64.5,
"blob_id": "3a51a1798d1c8bb85daf321d07ac9b858c2c75ff",
"content_id": "bfad02eee200461ac8cd4f1e30e6c5e48ac0f86a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 656,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 10,
"path": "/README.md",
"repo_name": "LucasAlexander13/free-code-camp",
"src_encoding": "UTF-8",
"text": "# freeCodeCamp Projects\n\nThis repository was created to store my projects for freeCodeCamp courses, that are linked below.\n\n## Scientific Computing With Python\n1. [Arithmetic Formatter](scientific-computing-with-python/boilerplate-arithmetic-formatter/README.md)\n2. [Time Calculator](scientific-computing-with-python/boilerplate-time-calculator/README.md)\n3. [Budget App](scientific-computing-with-python/boilerplate-budget-app/README.md)\n4. [Polygon Area Calculator](scientific-computing-with-python/boilerplate-polygon-area-calculator/README.md)\n5. [Probability Calculator](scientific-computing-with-python/boilerplate-probability-calculator/README.md)\n\n"
},
{
"alpha_fraction": 0.4653652310371399,
"alphanum_fraction": 0.491183876991272,
"avg_line_length": 25.032787322998047,
"blob_id": "9fcb6805942956d2c9820eeda7d4e4bb66cf7393",
"content_id": "a1d3843b13b028cb63de065a53f6ab5cd649b523",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1588,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 61,
"path": "/scientific-computing-with-python/boilerplate-time-calculator/time_calculator.py",
"repo_name": "LucasAlexander13/free-code-camp",
"src_encoding": "UTF-8",
"text": "def next_day(day, days_gone):\n days = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\n position = days.index(day)\n\n position += days_gone\n while position > 6:\n position -= 7\n \n return days[position]\n\ndef add_time(start, duration, actual_day=None):\n start_hrs = int(start[:-6])\n start_min = int(start[-5:-3])\n period = start[-2:]\n\n duration_hrs = int(duration[:-3])\n duration_min = int(duration[-2:])\n\n new_min = start_min + duration_min\n if new_min >= 60:\n new_min -= 60\n duration_hrs += 1\n \n new_hrs = start_hrs + duration_hrs\n days_gone = 0\n\n new_min = new_min if len(str(new_min)) == 2 else \"0\" + str(new_min) \n\n if period == \"AM\":\n while new_hrs >= 24:\n new_hrs -= 24\n days_gone += 1\n if new_hrs >= 12:\n period = \"PM\"\n if new_hrs >= 13:\n new_hrs -= 12\n \n elif period == \"PM\":\n while new_hrs >= 24:\n new_hrs -= 24\n days_gone += 1\n if new_hrs >= 12:\n period = \"AM\"\n days_gone += 1\n if new_hrs >= 13:\n new_hrs -= 12\n\n \n if actual_day != None:\n actual_day = actual_day.title()\n new_day = next_day(actual_day, days_gone)\n new_time = f\"{new_hrs}:{new_min} {period}, {new_day}\"\n else:\n new_time = f\"{new_hrs}:{new_min} {period}\"\n \n if days_gone == 1:\n new_time += \" (next day)\"\n elif days_gone > 1:\n new_time += f\" ({days_gone} days later)\"\n\n return new_time\n"
},
{
"alpha_fraction": 0.5500349998474121,
"alphanum_fraction": 0.5570328831672668,
"avg_line_length": 24.51785659790039,
"blob_id": "10fd3bcbee645347de0cc33cc961587f4b16c5fa",
"content_id": "51ba356e45ed9715c561b42172d34ba9874724c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1429,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 56,
"path": "/scientific-computing-with-python/boilerplate-polygon-area-calculator/shape_calculator.py",
"repo_name": "LucasAlexander13/free-code-camp",
"src_encoding": "UTF-8",
"text": "class Rectangle:\n def __init__(self, width, height):\n self.width = width\n self.height = height\n\n def set_width(self, width):\n self.width = width\n\n def set_height(self, height):\n self.height = height\n\n def get_area(self):\n return self.width * self.height\n \n def get_perimeter(self):\n perimeter = (self.width * 2) + (self.height * 2)\n return perimeter\n \n def get_diagonal(self):\n diagonal = (self.width ** 2 + self.height ** 2) ** 0.5\n return diagonal\n \n def get_picture(self):\n if self.width > 50 or self.height > 50:\n return \"Too big for picture.\"\n else: picture = \"\"\n\n for line in range(self.height):\n picture += \"*\" * self.width + \"\\n\"\n return picture\n \n def get_amount_inside(self, other):\n amount_inside = self.get_area() / other.get_area()\n return int(amount_inside)\n \n def __str__(self):\n return f\"Rectangle(width={self.width}, height={self.height})\"\n\n\n\nclass Square(Rectangle):\n def __init__(self, length):\n super().__init__(length, length)\n\n def set_side(self, length):\n self.height = length\n self.width = length\n \n def set_height(self, length):\n self.set_side(length)\n \n def set_width(self, length):\n self.set_side(length)\n\n def __str__(self):\n return f\"Square(side={self.width})\"\n"
},
{
"alpha_fraction": 0.5411184430122375,
"alphanum_fraction": 0.5460526347160339,
"avg_line_length": 26.0222225189209,
"blob_id": "1a5227213e5e3a18ee2121d42b22ace3362f4db8",
"content_id": "84f4eb9f49a5c93a32517e40baad1bea3cf26c8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1216,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 45,
"path": "/scientific-computing-with-python/boilerplate-probability-calculator/prob_calculator.py",
"repo_name": "LucasAlexander13/free-code-camp",
"src_encoding": "UTF-8",
"text": "import copy\nimport random\n# Consider using the modules imported above.\n\nclass Hat():\n def __init__(self, **kwargs):\n self.contents = []\n for key, value in kwargs.items():\n for i in range(value):\n self.contents.append(key)\n\n def draw(self, number):\n if number > len(self.contents):\n return self.contents\n else:\n removed_list = []\n \n for i in range(number):\n removed = random.choice(self.contents)\n removed_list.append(removed)\n \n index = self.contents.index(removed)\n self.contents.pop(index)\n \n return removed_list\n\ndef experiment(hat, expected_balls, num_balls_drawn, num_experiments):\n sucesses = 0\n\n for experiment in range(num_experiments):\n experiment_draw = hat.draw(num_balls_drawn)\n\n good_draw = 0\n total_draw = 0\n for key, value in expected_balls.items():\n total_draw += 1\n draws = experiment_draw.count(key)\n if draws >= value:\n good_draw += 1\n \n if total_draw == good_draw:\n sucesses += 1\n \n\n return sucesses / num_experiments\n"
}
] | 6 |
martinambition/tianchi-lung-2019 | https://github.com/martinambition/tianchi-lung-2019 | d378703c6bed544f0f4c4b735130ef689ca35a55 | de41a6157ed34ef2f84ffb03c69b9f5c99f7f68a | 6379c9fb1dfe7abbad2e26523a62743b0453e864 | refs/heads/master | 2020-06-28T08:06:13.732540 | 2019-08-02T07:18:03 | 2019-08-02T07:18:03 | 200,184,048 | 4 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6411665081977844,
"alphanum_fraction": 0.6466610431671143,
"avg_line_length": 42.01818084716797,
"blob_id": "56b33bd9b02b2d40c94794e0aaac6d095e150fd1",
"content_id": "d87c961284a885ec952d45f5993fb5aef92f2d8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2366,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 55,
"path": "/train_classification.py",
"repo_name": "martinambition/tianchi-lung-2019",
"src_encoding": "UTF-8",
"text": "from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping\nfrom config import *\nfrom resnet import Resnet\nfrom vgg import SimpleVgg\nfrom data_generator import DataGenerator\nimport time\nfrom glob import glob\nimport random\nimport os\nimport numpy as np\n\n# def flow(mode='train',name=\"lung\", batch_size=TRAIN_BATCH_SIZE):\n# PAHT= PREPROCESS_GENERATOR_CLASS_LUNG_PATH if name ==\"lung\" else PREPROCESS_GENERATOR_CLASS_MEIASTINAL_PATH\n# files = glob(PAHT+'/*_x_'+mode+'.npy')\n# #random.seed(9)\n# while True:\n# idx = random.randint(0, len(files) - 1)\n# file = files[idx]\n# name = os.path.splitext(os.path.basename(file))[0]\n# id = name.split('_')[0]\n\n# X = np.load(file)\n# y = np.load(PAHT+ '/'+id+'_y_'+mode+'.npy')\n# yield X, y\n\ndef classify_train(name,learning_rate,init_weight=None):\n print('start classify_train')\n net = Resnet()\n #net = SimpleVgg()\n model = net.get_model(learning_rate)\n if not init_weight == None:\n model.load_weights(init_weight)\n model.summary()\n generator = DataGenerator(name=name)\n run = '{}-{}-{}'.format(name, time.localtime().tm_hour, time.localtime().tm_min)\n log_dir = CLASSIFY_LOG_DIR.format(run)\n check_point = log_dir + '/'+name+'_checkpoint-{epoch:02d}-{val_loss:.4f}.hdf5'\n\n print(\"classify train round {}\".format(run))\n tensorboard = TensorBoard(log_dir=log_dir, write_graph=False)\n checkpoint = ModelCheckpoint(filepath=check_point, monitor='val_loss', verbose=1, save_best_only=True)\n early_stopping = EarlyStopping(monitor='val_loss', patience=TRAIN_EARLY_STOPPING, verbose=1)\n\n model.fit_generator(generator.flow_classfication(mode='train'), steps_per_epoch=TRAIN_STEPS_PER_EPOCH,\n validation_data=generator.flow_classfication(mode='val'), validation_steps=TRAIN_VALID_STEPS,\n epochs=TRAIN_EPOCHS, verbose=1,\n callbacks=[tensorboard, checkpoint, early_stopping])\n# model.fit_generator(flow('train', name), steps_per_epoch=TRAIN_STEPS_PER_EPOCH,\n# validation_data=flow('val',name), validation_steps=TRAIN_VALID_STEPS,\n# epochs=TRAIN_EPOCHS, verbose=1,\n# callbacks=[tensorboard, checkpoint, early_stopping])\n \n\nif __name__ == '__main__':\n classify_train()\n"
},
{
"alpha_fraction": 0.4840257167816162,
"alphanum_fraction": 0.5026211738586426,
"avg_line_length": 45.482757568359375,
"blob_id": "48b062fec458f20d35026a321bf7a1814e8d06a3",
"content_id": "7135d6a2373e44c209efe60c87f809cd1fd73ead",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20322,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 435,
"path": "/predict.py",
"repo_name": "martinambition/tianchi-lung-2019",
"src_encoding": "UTF-8",
"text": "from config import *\nfrom unet import UNet\nfrom resnet import Resnet\nfrom skimage import morphology, measure, segmentation,filters\nimport scipy.ndimage\nimport glob\nimport os\nimport pickle\nimport h5py\nimport numpy as np\nimport pandas as pd\nfrom config import *\nfrom tqdm import tqdm\nimport SimpleITK as sitk\n\ndef find_all_sensitive_point():\n files = glob.glob(TEST_FOLDER+\"/*.mhd\")\n columns = ['seriesuid', 'coordX', 'coordY', 'coordZ']\n found_record = pd.DataFrame(columns=columns)\n for index,file in enumerate(tqdm(files)):\n seriesuid = os.path.splitext(os.path.basename(file))[0]\n itk_img = sitk.ReadImage(file)\n img_array = sitk.GetArrayFromImage(itk_img) # indexes are z,y,x (notice the ordering)\n img_array = np.transpose(img_array, (2, 1, 0)) # (x, y, z)\n origin = np.array(itk_img.GetOrigin()) # x,y,z Origin in world coordinates (mm)\n spacing = np.array(itk_img.GetSpacing()) # spacing of voxels in world coor. (mm)\n centers = find_sensitive_point_from_one_lung(img_array)\n center_in_world = centers*spacing\n for cindex in range(center_in_world.shape[0]):\n c_in_w = center_in_world[cindex]\n new_row = pd.DataFrame([[ seriesuid,c_in_w[0],c_in_w[1],c_in_w[2] ]], columns=columns)\n found_record = found_record.append(new_row, ignore_index=True)\n found_record.to_csv('./output/sensitive_point.csv',index=False)\n \ndef find_sensitive_point_from_one_lung(ret_img):\n area_threshold = 10000\n threshold = 299\n temp_img = ret_img.copy()\n\n #Clear Bound\n mask = temp_img>threshold\n #mask = morphology.binary_erosion(mask, selem=np.ones((2, 1, 1)))#binary_opening dilation\n mask = morphology.binary_dilation(mask, selem=np.ones((2, 2, 2)))#binary_opening dilation\n #edges = filters.hessian(mask)\n mask = scipy.ndimage.binary_fill_holes(mask)\n labels = measure.label(mask)\n regions = measure.regionprops(labels)\n for r in regions:\n if r.area>area_threshold:\n for c in r.coords:\n temp_img[c[0], c[1], c[2]] = 0\n #\n mask = temp_img==300\n mask = morphology.dilation(mask, np.ones([3, 3, 3]))\n mask = morphology.dilation(mask, np.ones([3, 3, 3]))\n mask = morphology.erosion(mask, np.ones([3, 3, 3]))\n centers = []\n for prop in regions:\n B = prop.bbox\n if B[3] - B[0] > 2 and B[4] - B[1] > 2 and B[5] - B[2] > 2: # ignore too small focus\n x = int((B[3] + B[0]) / 2.0)\n y = int((B[4] + B[1]) / 2.0)\n z = int((B[5] + B[2]) / 2.0)\n centers.append(np.array([x, y, z]))\n return np.array(centers)\n\ndef predict_test(name='lung',mode='test',seg_model_path=SEG_LUNG_TRAIN_WEIGHT,class_model_path=CLASS_LUNG_TRAIN_WEIGHT,\n seg_thresh_hold=0.8,limit = [0,0]):\n detect_net = UNet()\n class_net = Resnet()\n\n detect_model = detect_net.get_model(0.1)\n detect_model.load_weights(seg_model_path)\n class_model = class_net.get_model(0.1)\n class_model.load_weights(class_model_path)\n\n columns = ['seriesuid', 'coordX', 'coordY', 'coordZ', 'class', 'probability']\n df = pd.DataFrame(columns=columns)\n for img, meta in get_files(name,mode):\n count = 0\n cubs = []\n cub_sizes = []\n for w in range(limit[0], img.shape[0]-limit[0], 32):\n for h in range(limit[1], img.shape[1]-limit[1], 32):\n for d in range(0, img.shape[2], 32):\n if d + INPUT_DEPTH > img.shape[2]:\n d = img.shape[2] - INPUT_DEPTH\n if h + INPUT_HEIGHT > img.shape[1]:\n h = img.shape[1] - INPUT_HEIGHT\n if w + INPUT_WIDTH > img.shape[0]:\n w = img.shape[0] - INPUT_WIDTH\n cub = img[w:w + INPUT_WIDTH, h:h + INPUT_HEIGHT, d:d + INPUT_DEPTH]\n \n if np.all(cub == ZERO_CENTER):\n continue\n \n #batch_cub = cub[np.newaxis, ..., np.newaxis]\n cubs.append(cub)\n cub_sizes.append([w, h, d])\n for k in range(0,len(cub_sizes),16):\n t = 16\n if k + 16>= len(cub_sizes):\n t = len(cub_sizes) - k \n \n batch_cub = np.array(cubs[k:t+k])\n batch_cub_sizes = cub_sizes[k:t+k]\n \n batch_cub = batch_cub[..., np.newaxis]\n pre_y_batch = detect_model.predict(batch_cub)\n for k in range(pre_y_batch.shape[0]):\n pre_y = pre_y_batch[k, :, :, :, 0] > seg_thresh_hold\n #print('predicted pix:'+ str(np.sum(pre_y)))\n if np.sum(pre_y) > 0:\n crops, crop_centers,diameter,bboxes = crop_for_class(img, pre_y, np.array(batch_cub_sizes[k]))\n #print('find:'+str(len(crop_centers)))\n for i, center in enumerate(crop_centers):\n crop = crops[i]\n crop_cub = crop[np.newaxis,...,np.newaxis]\n class_type = class_model.predict(crop_cub)\n class_type= class_type[0]\n index = np.argmax(class_type)\n if index >0 :\n #print('Add one')\n location = meta['origin']+center\n new_row = pd.DataFrame([[meta['seriesuid'],location[0],location[1],location[2],\n label_softmax_reverse[index],class_type[index]]], columns=columns)\n df = df.append(new_row, ignore_index=True)\n df.to_csv('./output/predict_'+name+'_'+mode+'.csv', index=False)\n print('finished')\n \n \ndef predict_box(start,class_model,columns,df):\n step_w = CLASSIFY_INPUT_WIDTH\n step_h = CLASSIFY_INPUT_HEIGHT\n step_d = CLASSIFY_INPUT_DEPTH\n test_files_path = TEST_FOLDER+ \"/*.mhd\"\n test_files = glob.glob(test_files_path)\n total_step = len(test_files)\n print(\"total:\"+str(total_step))\n pbar = tqdm(total=total_step)\n count =0\n for img, meta in get_test_file():\n pbar.update(1)\n for w in range(start[0], img.shape[0], step_w):\n for h in range(start[1], img.shape[1], step_h):\n for d in range(start[2], img.shape[2], step_d):\n if d + step_d > img.shape[2]:\n d = img.shape[2] - step_d - 1\n if h + step_h > img.shape[1]:\n h = img.shape[1] - step_h - 1\n if w + step_w > img.shape[0]:\n w = img.shape[0] - step_w - 1\n \n if count % 16 == 0:\n X = np.zeros((16, CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT, CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL))\n seriesuids = []\n points = []\n location = meta['origin'] + np.array([w + step_w / 2, h + step_h / 2, d + step_d / 2])\n seriesuids.append(meta['seriesuid'])\n points.append(location)\n X[count%16, :, :, :, 0] = img[w:w + step_w, h:h + step_h, d:d + step_d]\n \n if (count % 16) == 15:\n class_type = class_model.predict(X)\n for k in range(class_type.shape[0]):\n cur_class = class_type[k]\n index = np.argmax(cur_class)\n if index>0 and cur_class[index] > 0.5:\n new_row = pd.DataFrame([[seriesuids[k], points[k][0], points[k][1], points[k][2],\n label_softmax_reverse[index], cur_class[index]]],\n columns=columns)\n df = df.append(new_row, ignore_index=True)\n \n count= count+1\n return df\n \n\ndef predict_test_only_classification():\n net = Resnet()\n name = 'Resnet'\n model = net.get_model()\n model.load_weights(CLASS_MODEL_PATH)\n\n columns = ['seriesuid', 'coordX', 'coordY', 'coordZ', 'class', 'probability']\n df = pd.DataFrame(columns=columns)\n\n #Use two round to detect. same step,different start point\n print('Round 1')\n df = predict_box(np.array([16, 16, 16]), model, columns, df)\n print('Round 2')\n df = predict_box(np.array([0, 0, 0]), model, columns, df)\n\n df.to_csv('./output/result_only_class.csv', index=False)\n\ndef crop_roi_cub(cub,orign):\n# centers = []\n# size = 8\n# for w in range(0, 64, size):\n# for h in range(0, 64, size):\n# for d in range(0, 64, size):\n# small_cub = cub[w:w + size, h:h + size, d:d + size]\n# binary = small_cub > np.percentile(small_cub,80)\n# labels = measure.label(binary)\n# regions = measure.regionprops(labels)\n# labels = [(r.area, r.bbox) for r in regions]\n \n# if len(labels)>0:\n# labels.sort(reverse=True)\n# B = labels[0][1]\n# #if B[3] - B[0] > 2 and B[4] - B[1] > 2 and B[5] - B[2] > 2: # ignore too small focus\n# x = int((B[3] + B[0]) / 2.0)\n# y = int((B[4] + B[1]) / 2.0)\n# z = int((B[5] + B[2]) / 2.0)\n# centers.append(np.array([x+w, y+h, z+d])+orign)\n# return centers\n# xs,ys,zs =np.where(cub > np.mean())\n# centers=[]\n# for i in range(len(xs)):\n# x = xs[i]\n# y = ys[i]\n# z = zs[i]\n# centers.append(np.array([x, y, z])+orign)\n# return centers\n binary = cub > 0\n# binary = morphology.dilation(binary, np.ones([2, 2, 2]))\n# binary = morphology.dilation(binary, np.ones([3, 3, 3]))\n# binary = morphology.erosion(binary, np.ones([2, 2, 2]))\n labels = measure.label(binary)\n regions = measure.regionprops(labels)\n centers = []\n for prop in regions:\n if prop.area > 100:\n B = prop.bbox\n #if B[3] - B[0] > 2 and B[4] - B[1] > 2 and B[5] - B[2] > 2: # ignore too small focus\n x = int((B[3] + B[0]) / 2.0)\n y = int((B[4] + B[1]) / 2.0)\n z = int((B[5] + B[2]) / 2.0)\n centers.append(np.array([x, y, z])+orign)\n return centers\n\ndef crop_for_class(img_arr,pre_y,orign,mean_val=-0.25):\n class_boundary = np.array([CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT, CLASSIFY_INPUT_DEPTH])\n# pre_y = morphology.dilation(pre_y, np.ones([3, 3, 3]))\n# pre_y = morphology.dilation(pre_y, np.ones([3, 3, 3]))\n# pre_y = morphology.erosion(pre_y, np.ones([3, 3, 3]))\n labels = measure.label(pre_y, connectivity=2)\n regions = measure.regionprops(labels)\n centers = []\n bboxes= []\n spans = []\n crops= []\n crop_centers = []\n for prop in regions:\n B = prop.bbox\n if B[3] - B[0] > 2 and B[4] - B[1] > 2 and B[5] - B[2] > 2: # ignore too small focus\n x = int((B[3] + B[0]) / 2.0)\n y = int((B[4] + B[1]) / 2.0)\n z = int((B[5] + B[2]) / 2.0)\n span = np.array([int(B[3] - B[0]), int(B[4] - B[1]), int(B[5] - B[2])])\n \n bcub = img_arr[B[0]+orign[0]:B[3]+orign[0],B[1]+orign[1]:B[4]+orign[1],B[2]+orign[2]:B[5]+orign[2]]\n# if np.mean(bcub) < mean_val:\n# continue\n \n spans.append(span)\n centers.append(np.array([x, y, z]))\n bboxes.append(B)\n for idx, bbox in enumerate(bboxes):\n crop = np.zeros(class_boundary, dtype=np.float32)\n crop_center = centers[idx]\n crop_center = crop_center + orign\n half = class_boundary / 2\n crop_center = check_center(class_boundary, crop_center, img_arr.shape)\n crop = img_arr[int(crop_center[0] - half[0]):int(crop_center[0] + half[0]), \\\n int(crop_center[1] - half[1]):int(crop_center[1] + half[1]), \\\n int(crop_center[2] - half[2]):int(crop_center[2] + half[2])]\n \n crops.append(crop)\n crop_centers.append(crop_center)\n return crops,crop_centers,spans,bboxes\n\ndef generate_detect_result(name='lung',mode='test',model_path=SEG_LUNG_TRAIN_WEIGHT,thresh_hold=0.8,limit = [0,0]):\n detect_net = UNet()\n detect_model = detect_net.get_model()\n detect_model.load_weights(model_path)\n columns = ['seriesuid', 'coordX', 'coordY', 'coordZ' ,'diameterX','diameterY','diameterZ']\n df = pd.DataFrame(columns=columns)\n for img, meta in get_files(name,mode):\n for w in range(limit[0], img.shape[0]-limit[0], INPUT_WIDTH):\n for h in range(limit[1], img.shape[1]-limit[0], INPUT_HEIGHT):\n for d in range(0, img.shape[2], INPUT_DEPTH):\n if d + INPUT_DEPTH > img.shape[2]:\n d = img.shape[2] - INPUT_DEPTH\n if h + INPUT_HEIGHT > img.shape[1]:\n h = img.shape[1] - INPUT_HEIGHT\n if w + INPUT_WIDTH > img.shape[0]:\n w = img.shape[0] - INPUT_WIDTH\n \n cub = img[w:w + INPUT_WIDTH, h:h + INPUT_HEIGHT, d:d + INPUT_DEPTH]\n \n batch_cub = cub[np.newaxis, ..., np.newaxis]\n pre_y = detect_model.predict(batch_cub)\n pre_y = pre_y[0, :, :, :, 0] > thresh_hold\n #print('predicted pix:'+ str(np.sum(pre_y)))\n if np.sum(pre_y) > 0:\n crops, crop_centers,diameter,bboxes = crop_for_class(img, pre_y, np.array([w, h, d]),mean_val)\n print('find:'+str(len(crop_centers)))\n for i, center in enumerate(crop_centers):\n #location = meta['origin']+center\n location = center\n print(center)\n \n new_row = pd.DataFrame([[meta['seriesuid'], location[0], location[1], location[2],diameter[i][0],diameter[i][1],diameter[i][2]]],columns=columns)\n df = df.append(new_row, ignore_index=True)\n \n df.to_csv('./output/predict_'+name+'_'+mode+'.csv', index=False)\n print('finished')\n\ndef check_detect_result_accuracy(name='lung',model_path=SEG_LUNG_TRAIN_WEIGHT,thresh_hold=0.8,limit = [0,0]):\n mode='train' \n df = pd.read_csv(ANNOTATION_FILE)\n detect_net = UNet()\n detect_model = detect_net.get_model()\n detect_model.load_weights(model_path)\n count = 0\n postive_focus= []\n negative_focus = []\n total_focus = 0\n postive_focus_set =set()\n for img, meta in get_files(name,mode):\n if count == 10:\n break\n count+=1\n seriesuid = meta['seriesuid']\n origin = meta['origin']\n \n if name == 'lung':\n focus_records = df[(df['seriesuid'] == int(seriesuid)) & ((df['label'] == 1) | (df['label'] == 5))]\n else:\n focus_records = df[(df['seriesuid'] == int(seriesuid)) & (df['label'] > 5 )]\n \n total_focus += focus_records.shape[0]\n focus_records['coordX'] = focus_records['coordX'] - origin[0] \n focus_records['coordY'] = focus_records['coordY'] - origin[1] \n focus_records['coordZ'] = focus_records['coordZ'] - origin[2]\n focus_records['radiusZ'] = focus_records['diameterZ']//2 \n focus_records['radiusY'] = focus_records['diameterY']//2 \n focus_records['radiusX'] = focus_records['diameterX']//2 \n \n step = 32\n for w in range(limit[0], img.shape[0]-limit[0], step):\n for h in range(limit[1], img.shape[1]-limit[0], step):\n for d in range(0, img.shape[2], step):\n if d + INPUT_DEPTH > img.shape[2]:\n d = img.shape[2] - INPUT_DEPTH\n if h + INPUT_HEIGHT > img.shape[1]:\n h = img.shape[1] - INPUT_HEIGHT\n if w + INPUT_WIDTH > img.shape[0]:\n w = img.shape[0] - INPUT_WIDTH\n cub = img[w:w + INPUT_WIDTH, h:h + INPUT_HEIGHT, d:d + INPUT_DEPTH]\n mean_val = np.percentile(cub,80)\n batch_cub = cub[np.newaxis, ..., np.newaxis]\n pre_y = detect_model.predict(batch_cub)\n pre_y = pre_y[0, :, :, :, 0] > thresh_hold\n \n if np.sum(pre_y) > 0:\n crops, crop_centers,diameter,bboxes = crop_for_class(img, pre_y, np.array([w, h, d]))\n #crop_centers_roi = crop_roi_cub(cub,np.array([w, h, d]))\n #print(\"Found ROI\",len(crop_centers_roi))\n #crop_centers = crop_centers_roi + crop_centers \n for i, center in enumerate(crop_centers):\n found_focus = False\n distances = []\n for fi,focus in focus_records.iterrows():\n anno_focus_center = np.array([focus['coordX'],focus['coordY'] ,focus['coordZ'] ])\n #distances.append(np.linalg.norm(center-anno_focus_center))\n if center[2] >= (focus['coordZ'] - focus['radiusZ']) and center[2] <= (focus['coordZ'] + focus['radiusZ']):\n if center[0] >= (focus['coordX'] - focus['radiusX']) and center[0] <= (focus['coordX'] + focus['radiusX']):\n if center[1] >= (focus['coordY'] - focus['radiusY']) and center[1] <= (focus['coordY'] + focus['radiusY']):\n \n postive_focus_set.add(str(seriesuid)+'_'+str(fi)+'_'+str(focus['label']))\n found_focus = True\n if found_focus:\n postive_focus.append(center)\n else:\n #print(min(distances))\n negative_focus.append(center)\n \n print('Found Right Focus:'+str(len(postive_focus_set)))\n print('Found Wrong Focus:'+str(len(negative_focus)))\n print('Total Ground-truth Focus:'+str(total_focus))\n print('finished')\n return postive_focus_set\n \ndef check_center(size,crop_center,image_shape):\n '''\n @size:所切块的大小\n @crop_center:待检查的切块中心\n @image_shape:原图大小\n Return:检查修正后切块中心\n '''\n half=size/2\n margin_min=crop_center-half#检查下界\n margin_max=crop_center+half-image_shape#检查上界\n for i in range(3):#如有超出,对中心进行修正\n if margin_min[i]<0:\n crop_center[i]=crop_center[i]-margin_min[i]\n if margin_max[i]>0:\n crop_center[i]=crop_center[i]-margin_max[i]\n return crop_center\n\ndef get_files(focus_type,mode):\n orgin_folder = TRAIN_FOLDER if mode == 'train' else TEST2_FOLDER\n process_parent_folder = PREPROCESS_PATH if mode == 'train' else TEST2_PROCESS_PATH\n processed_folder = process_parent_folder+'/lung' if focus_type == 'lung' else process_parent_folder+'/mediastinal'\n \n test_files = orgin_folder + \"/*.mhd\"\n files = glob.glob(test_files)\n print('total:'+str(len(files)))\n for index,file in enumerate(files):\n seriesuid = os.path.splitext(os.path.basename(file))[0]\n print('process:'+str(index)+', seriesuid:'+seriesuid)\n h5_file = processed_folder+\"/\"+seriesuid+\".h5\"\n meta_file = process_parent_folder+'/meta'+\"/\"+seriesuid+\".meta\"\n \n with open(meta_file, 'rb') as f:\n meta = pickle.load(f)\n ret_img = None\n with h5py.File(h5_file, 'r') as hf:\n ret_img = hf['img'].value\n \n yield ret_img, meta\n\nif __name__ == '__main__':\n generate_false_positive()\n"
},
{
"alpha_fraction": 0.49932560324668884,
"alphanum_fraction": 0.520906388759613,
"avg_line_length": 37.216495513916016,
"blob_id": "94c777131b54f10aa7dba7f9652fa6c554a7634e",
"content_id": "2c613f9509edbc4ea8fd6c551a65546c41a8eb48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3707,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 97,
"path": "/visual_utils.py",
"repo_name": "martinambition/tianchi-lung-2019",
"src_encoding": "UTF-8",
"text": "# import matplotlib\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nimport numpy as np\n\nPLOT_NUM = 16\nclass VisualUtil():\n \n @staticmethod\n def plot_all_slices(img, title='', box=None):\n print(title)\n img.shape[2]\n fig, axs = plt.subplots(img.shape[2]//4,4, figsize=(16, img.shape[2]//4*4), sharex=True, sharey=True)\n for index,ax in enumerate(axs.flat):\n ax.imshow(np.transpose(img[:,:,index],(1,0)), cmap='gray')\n @staticmethod\n def plot_all_slices_with_mask(img,focus_records, title=''):\n print(title)\n fig, axs = plt.subplots(img.shape[2]//4,4, figsize=(16, img.shape[2]//4*4), sharex=True, sharey=True)\n for index,ax in enumerate(axs.flat):\n ax.imshow(np.transpose(img[:,:,index],(1,0)), cmap='gray')\n for fi,focus in focus_records.iterrows():\n radiusZ = focus['diameterZ']//2 \n radiusY = focus['diameterY']//2 \n radiusX = focus['diameterX']//2 \n #if index == focus['coordZ']:\n if index >= (focus['coordZ'] - radiusZ) and index <= (focus['coordZ'] + radiusZ):\n ax.add_patch(patches.Rectangle((focus['coordX']-radiusX, focus['coordY']-radiusY),\n focus['diameterX'],focus['diameterY'], \n linewidth=1,edgecolor='r',facecolor='none'))\n \n @staticmethod\n def plot_slices(img, title='', box=None):\n print(title)\n\n fig, axs = plt.subplots(4, 4, figsize=(16, 16), sharex=True, sharey=True)\n c, c_step = 0, img.shape[2] // PLOT_NUM\n #\n # c = img.shape[2] // 4\n # c_step = c_step // 2\n for ax in axs.flat:\n ax.imshow(img[:,:,c], cmap='gray')\n\n if box:\n ax.add_patch(patches.Rectangle((box['x'], box['y']),box['w'] * 4,box['h'] * 4, linewidth=1,edgecolor='r',facecolor='none'))\n c += c_step\n\n axs[0,0].set(title=title)\n plt.show()\n\n @staticmethod\n def plot_middle_slices_comparison(imgs):\n shape = None\n for img in imgs:\n if shape is None:\n shape = img.shape\n else:\n if shape != img.shape:\n print('plot_middle_slices_comparison with images have different size, former {}, now {}'.format(shape, img.shape))\n return\n\n l = len(imgs)\n row = 3\n fig, axs = plt.subplots(row, l, figsize=(10, 15), sharex=True, sharey=True)\n for r in range(row):\n for i in range(l):\n offset = (r - 1) * 3\n depth = int(imgs[i].shape[2] / 2 + offset)\n axs[r][i].imshow(imgs[i][:, :, depth], cmap='gray')\n\n plt.show()\n\n @staticmethod\n def plot_comparison(X, y, pred, title='', box=None):\n print(title)\n\n assert X.shape[2] == y.shape[2] == pred.shape[2]\n z = X.shape[2] // 2\n\n fig, axs = plt.subplots(1, 3, figsize=(20, 10), sharex=True, sharey=True)\n axs[0].imshow(X[:,:,z], cmap='gray')\n axs[1].imshow(y[:,:,z], cmap='gray')\n axs[2].imshow(pred[:,:,z], cmap='gray')\n\n if box:\n rec = patches.Rectangle((box['x'], box['y']), box['w'] * 4, box['h'] * 4, linewidth=1, edgecolor='r', facecolor='none')\n axs[0].add_patch(rec)\n axs[1].add_patch(rec)\n axs[2].add_patch(rec)\n\n axs[0].set(title='X')\n axs[1].set(title='y')\n axs[2].set(title='pred')\n plt.show()\n"
},
{
"alpha_fraction": 0.67668217420578,
"alphanum_fraction": 0.7418535947799683,
"avg_line_length": 28.92405128479004,
"blob_id": "f748729c5a982f1a05c5e5025a30e4c136833281",
"content_id": "43515569eff6fc90d3d28e6a601070a1b1f1b1d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2505,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 79,
"path": "/config.py",
"repo_name": "martinambition/tianchi-lung-2019",
"src_encoding": "UTF-8",
"text": "#肺窗\nLUNG_MIN_BOUND = -1000.0\nLUNG_MAX_BOUND = 400.0\n\n#纵膈窗\nCHEST_MIN_BOUND = 40-350/2\nCHEST_MAX_BOUND = 40+350/2\n\nBINARY_THRESHOLD = -550\n\nTRAIN_SEG_LEARNING_RATE = 1e-4\nINPUT_WIDTH, INPUT_HEIGHT, INPUT_DEPTH, INPUT_CHANNEL, OUTPUT_CHANNEL = 64, 64, 64, 1, 1\n\n#4个疾病+1个unknow\nCLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT, CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL,CLASSIFY_OUTPUT_CHANNEL \\\n = 32, 32, 32, 1, 5\n\n#路径\nCT_PATH = '../dataset/*/*.mhd'\nTEST_FOLDER='../dataset/testset'\nTEST2_FOLDER='../dataset/testset2'\nTEST2_PROCESS_PATH = './temp/test'\n\nTRAIN_FOLDER='../dataset/trainset'\nANNOTATION_FILE = \"../dataset/chestCT_round1_annotation.csv\"\nLOG_BASE_PATH = './output/training_logs'\nSEG_LOG_DIR = LOG_BASE_PATH + '/seg-run-{}'\nCLASSIFY_LOG_DIR = LOG_BASE_PATH + '/classify-run-{}'\nPREPROCESS_PATH = './temp/preprocess'\nPREPROCESS_PATH_LUNG= './temp/preprocess/lung'\nPREPROCESS_PATH_MEIASTINAL= './temp/preprocess/mediastinal'\nPREPROCESS_PATH_META = './temp/preprocess/meta'\nPREPROCESS_GENERATOR_LUNG_PATH = './temp/generator/seg/lung'\nPREPROCESS_GENERATOR_MEIASTINAL_PATH = './temp/generator/seg/mediastinal'\n\nPREPROCESS_GENERATOR_CLASS_LUNG_PATH = './temp/generator/class/lung'\nPREPROCESS_GENERATOR_CLASS_MEIASTINAL_PATH = './temp/generator/class/mediastinal'\n\n\nlabel_dic = {1:u'结节', 5:u'索条',31:u'动脉硬化或钙化',32:u'淋巴结钙化'}\nlabel_softmax= {1:1,5:2,31:3,32:4}\nlabel_softmax_reverse = {0:0,1:1,2:5,3:31,4:32}\n\n#分割正负样本比列, 1:3\n#分类正负样本分割 1:1\nTRAIN_SEG_POSITIVE_SAMPLE_RATIO = 0.6\nTRAIN_CLASSIFY_POSITIVE_SAMPLE_RATIO = 0.5\n\n#分割随机漂移范围\nENABLE_RANDOM_OFFSET = True\nTRAIN_SEG_SAMPLE_RANDOM_OFFSET = 12\n#分类随机漂移范围。分类的格子要小一半。\nTRAIN_CLASSIFY_SAMPLE_RANDOM_OFFSET = 4\n\n#Evaluate Frequency\nTRAIN_SEG_EVALUATE_FREQ = 10\n\n#Train param\nTRAIN_EPOCHS = 100000000\nTRAIN_EARLY_STOPPING = 10\nTRAIN_BATCH_SIZE = 16\nTRAIN_VALID_STEPS = 160\nTRAIN_STEPS_PER_EPOCH = 1200\n\n\nDEBUG_PLOT_WHEN_EVALUATING_SEG = False\n\n# ResNet\nRESNET_BLOCKS = 16\nRESNET_SHRINKAGE_STEPS = 4\nRESNET_INITIAL_FILTERS = 16\nTRAIN_CLASSIFY_LEARNING_RATE = 1e-4\n\nZERO_CENTER = 0.25\n\n#Pretrain weight\nSEG_LUNG_TRAIN_WEIGHT= './output/training_logs/seg-run-lung-13-13/lung_checkpoint-04-0.6646.hdf5'\nSEG_MEDIASTINAL_TRAIN_WEIGHT='/output/training_logs/seg-run-mediastinal-16-16/mediastinal_checkpoint-07-0.5245.hdf5'\nCLASS_LUNG_TRAIN_WEIGHT='./output/training_logs/classify-run-lung-2-18/lung_checkpoint-01-2.3591.hdf5'"
},
{
"alpha_fraction": 0.5364203453063965,
"alphanum_fraction": 0.5968329906463623,
"avg_line_length": 52.30179214477539,
"blob_id": "ef802190f9f6225271b086d8207e1b0fe74ca246",
"content_id": "fc147c9b149be4b4cee6e7e346a58592f85593a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20840,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 391,
"path": "/unet.py",
"repo_name": "martinambition/tianchi-lung-2019",
"src_encoding": "UTF-8",
"text": "from keras.models import Model\nfrom keras.layers import Input, Conv3D, MaxPooling3D, UpSampling3D, concatenate, Dropout,BatchNormalization\nfrom keras.callbacks import Callback\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nfrom config import *\nfrom skimage import morphology, measure, segmentation\nfrom keras.utils import multi_gpu_model\n# from visual_utils import VisualUtil\nimport numpy as np\n\nSMOOTH = 1.0\n\nclass UNet():\n def __init__(self):\n pass\n\n @staticmethod\n def dice_coef(y_true, y_pred):\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return (2. * intersection + SMOOTH) / (K.sum(y_true_f) + K.sum(y_pred_f) + SMOOTH)\n\n @staticmethod\n def dice_coef_loss(y_true, y_pred):\n return 1 - UNet.dice_coef(y_true, y_pred)\n\n @staticmethod\n def metrics_true_sum(y_true, y_pred):\n return K.sum(y_true)\n\n @staticmethod\n def metrics_pred_sum(y_true, y_pred):\n return K.sum(y_pred)\n\n @staticmethod\n def metrics_pred_max(y_true, y_pred):\n return K.max(y_pred)\n\n @staticmethod\n def metrics_pred_min(y_true, y_pred):\n return K.min(y_pred)\n\n @staticmethod\n def metrics_pred_mean(y_true, y_pred):\n return K.mean(y_pred)\n\n# def get_model(self,learning_rate =TRAIN_SEG_LEARNING_RATE ,enable_drop_out=False):\n# inputs = Input((INPUT_WIDTH, INPUT_HEIGHT, INPUT_DEPTH, INPUT_CHANNEL))\n\n# conv1 = Conv3D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)\n# conv1 = Conv3D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)\n# pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)\n# conv2 = Conv3D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)\n# conv2 = Conv3D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)\n# pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)\n# conv3 = Conv3D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)\n# conv3 = Conv3D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)\n# pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)\n# conv4 = Conv3D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)\n# conv4 = Conv3D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)\n# drop4 = Dropout(0.5)(conv4)\n# pool4 = MaxPooling3D(pool_size=(2, 2, 2))(drop4)\n\n# conv5 = Conv3D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)\n# conv5 = Conv3D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)\n# drop5 = Dropout(0.5)(conv5)\n\n# up6 = Conv3D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n# UpSampling3D(size=(2, 2, 2))(drop5))\n# merge6 = concatenate([drop4, up6], axis=3)\n# conv6 = Conv3D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)\n# conv6 = Conv3D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)\n\n# up7 = Conv3D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n# UpSampling3D(size=(2, 2, 2))(conv6))\n# merge7 = concatenate([conv3, up7], axis=3)\n# conv7 = Conv3D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)\n# conv7 = Conv3D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)\n\n# up8 = Conv3D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n# UpSampling3D(size=(2, 2, 2))(conv7))\n# merge8 = concatenate([conv2, up8], axis=3)\n# conv8 = Conv3D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)\n# conv8 = Conv3D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)\n\n# up9 = Conv3D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n# UpSampling3D(size=(2, 2, 2))(conv8))\n# merge9 = concatenate([conv1, up9], axis=3)\n# conv9 = Conv3D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)\n# conv9 = Conv3D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)\n# conv9 = Conv3D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)\n# conv10 = Conv3D(1, 1, activation='sigmoid')(conv9)\n\n# model = Model(inputs=inputs, outputs=conv10)\n# model.compile(optimizer=Adam(lr=TRAIN_SEG_LEARNING_RATE), loss=UNet.dice_coef_loss,\n# metrics=[UNet.dice_coef, UNet.metrics_true_sum, UNet.metrics_pred_sum,\n# UNet.metrics_pred_max, UNet.metrics_pred_min,\n# UNet.metrics_pred_mean])\n# return model\n def get_complex_model(self,enable_drop_out=False):\n inputs = Input((INPUT_WIDTH, INPUT_HEIGHT, INPUT_DEPTH, INPUT_CHANNEL))\n\n conv1 = Conv3D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)\n conv1 = Conv3D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)\n pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)\n conv2 = Conv3D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)\n conv2 = Conv3D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)\n pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)\n conv3 = Conv3D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)\n conv3 = Conv3D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)\n pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)\n \n conv4 = Conv3D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)\n conv4 = Conv3D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling3D(pool_size=(2, 2, 2))(drop4)\n\n conv5 = Conv3D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)\n conv5 = Conv3D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)\n drop5 = Dropout(0.5)(conv5)\n\n up6 = Conv3D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling3D(size=(2, 2, 2))(drop5))\n merge6 = concatenate([drop4, up6], axis=3)\n conv6 = Conv3D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)\n conv6 = Conv3D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)\n\n up7 = Conv3D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling3D(size=(2, 2, 2))(conv6))\n merge7 = concatenate([conv3, up7], axis=3)\n conv7 = Conv3D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)\n conv7 = Conv3D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)\n\n up8 = Conv3D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling3D(size=(2, 2, 2))(conv7))\n merge8 = concatenate([conv2, up8], axis=3)\n conv8 = Conv3D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)\n conv8 = Conv3D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)\n\n up9 = Conv3D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling3D(size=(2, 2, 2))(conv8))\n merge9 = concatenate([conv1, up9], axis=3)\n conv9 = Conv3D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)\n conv9 = Conv3D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)\n conv9 = Conv3D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)\n conv10 = Conv3D(1, 1, activation='sigmoid')(conv9)\n\n model = Model(inputs=inputs, outputs=conv10)\n model.compile(optimizer=Adam(lr=TRAIN_SEG_LEARNING_RATE), loss=UNet.dice_coef_loss,\n metrics=[UNet.dice_coef, UNet.metrics_true_sum, UNet.metrics_pred_sum,\n UNet.metrics_pred_max, UNet.metrics_pred_min,\n UNet.metrics_pred_mean])\n return model\n def get_1024_model(self,enable_drop_out=False):\n inputs = Input((INPUT_WIDTH, INPUT_HEIGHT, INPUT_DEPTH, INPUT_CHANNEL))\n \n conv1 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(inputs)\n conv1 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv1)\n pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)\n\n conv2 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(pool1)\n conv2 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv2)\n pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)\n\n conv3 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(pool2)\n conv3 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conv3)\n pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)\n\n conv4 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(pool3)\n conv4 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(conv4)\n pool4 = MaxPooling3D(pool_size=(2, 2, 2))(conv4)\n\n conv5 = Conv3D(512, (3, 3, 3), activation='relu', padding='same')(pool4)\n conv5 = Conv3D(512, (3, 3, 3), activation='relu', padding='same')(conv5)\n pool5 = MaxPooling3D(pool_size=(2, 2, 2))(conv5)\n \n conv5_1 = Conv3D(1024, (3, 3, 3), activation='relu', padding='same')(pool5)\n conv5_1 = Conv3D(1024, (3, 3, 3), activation='relu', padding='same')(conv5_1)\n \n up6_1 = concatenate([UpSampling3D(size=(2, 2, 2))(conv5_1), conv5], axis=-1)\n conv6_1 = Conv3D(512, (3, 3, 3), activation='relu', padding='same')(up6_1)\n conv6_1 = Conv3D(512, (3, 3, 3), activation='relu', padding='same')(conv6_1)\n \n up6 = concatenate([UpSampling3D(size=(2, 2, 2))(conv6_1), conv4], axis=-1)\n conv6 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(up6)\n conv6 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(conv6)\n\n up7 = concatenate([UpSampling3D(size=(2, 2, 2))(conv6), conv3], axis=-1)\n conv7 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(up7)\n conv7 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conv7)\n\n up8 = concatenate([UpSampling3D(size=(2, 2, 2))(conv7), conv2], axis=-1)\n conv8 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(up8)\n conv8 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv8)\n\n up9 = concatenate([UpSampling3D(size=(2, 2, 2))(conv8), conv1], axis=-1)\n conv9 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(up9)\n conv9 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv9)\n\n conv10 = Conv3D(OUTPUT_CHANNEL, (1, 1, 1), activation='sigmoid')(conv9)\n \n model = Model(inputs=inputs, outputs=conv10)\n model.compile(optimizer=Adam(lr=TRAIN_SEG_LEARNING_RATE), loss=UNet.dice_coef_loss,\n metrics=[UNet.dice_coef, UNet.metrics_true_sum, UNet.metrics_pred_sum,\n UNet.metrics_pred_max, UNet.metrics_pred_min,\n UNet.metrics_pred_mean])\n \n return model\n def get_model_with_bn(self,learning_rate =TRAIN_SEG_LEARNING_RATE ,enable_drop_out=False,enable_bn=True):\n inputs = Input((INPUT_WIDTH, INPUT_HEIGHT, INPUT_DEPTH, INPUT_CHANNEL))\n\n conv1 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(inputs)\n if enable_bn:\n conv1 = BatchNormalization()(conv1)\n conv1 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv1)\n if enable_bn:\n conv1 = BatchNormalization()(conv1)\n pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)\n\n conv2 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(pool1)\n if enable_bn:\n conv2 = BatchNormalization()(conv2)\n conv2 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv2)\n if enable_bn:\n conv2 = BatchNormalization()(conv2)\n pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)\n\n conv3 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(pool2)\n if enable_bn:\n conv3 = BatchNormalization()(conv3)\n conv3 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conv3)\n if enable_bn:\n conv3 = BatchNormalization()(conv3)\n pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)\n\n conv4 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(pool3)\n if enable_bn:\n conv4 = BatchNormalization()(conv4)\n conv4 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(conv4)\n if enable_bn:\n conv4 = BatchNormalization()(conv4)\n pool4 = MaxPooling3D(pool_size=(2, 2, 2))(conv4)\n\n conv5 = Conv3D(512, (3, 3, 3), activation='relu', padding='same')(pool4)\n if enable_bn:\n conv5 = BatchNormalization()(conv5)\n conv5 = Conv3D(512, (3, 3, 3), activation='relu', padding='same')(conv5)\n if enable_bn:\n conv5 = BatchNormalization()(conv5)\n\n up6 = concatenate([UpSampling3D(size=(2, 2, 2))(conv5), conv4], axis=-1)\n conv6 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(up6)\n if enable_bn:\n conv6 = BatchNormalization()(conv6)\n conv6 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(conv6)\n if enable_bn:\n conv6 = BatchNormalization()(conv6)\n \n up7 = concatenate([UpSampling3D(size=(2, 2, 2))(conv6), conv3], axis=-1)\n conv7 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(up7)\n if enable_bn:\n conv7 = BatchNormalization()(conv7)\n conv7 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conv7)\n if enable_bn:\n conv7 = BatchNormalization()(conv7)\n\n up8 = concatenate([UpSampling3D(size=(2, 2, 2))(conv7), conv2], axis=-1)\n conv8 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(up8)\n if enable_bn:\n conv8 = BatchNormalization()(conv8)\n conv8 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv8)\n if enable_bn:\n conv8 = BatchNormalization()(conv8)\n \n up9 = concatenate([UpSampling3D(size=(2, 2, 2))(conv8), conv1], axis=-1)\n conv9 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(up9)\n if enable_bn:\n conv9 = BatchNormalization()(conv9)\n conv9 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv9)\n if enable_bn:\n conv9 = BatchNormalization()(conv9)\n conv10 = Conv3D(OUTPUT_CHANNEL, (1, 1, 1), activation='sigmoid')(conv9) \n model = Model(inputs=inputs, outputs=conv10)\n# model = multi_gpu_model(model, gpus=3)\n model.compile(optimizer=Adam(lr=learning_rate), loss=UNet.dice_coef_loss,\n metrics=[UNet.dice_coef, UNet.metrics_true_sum, UNet.metrics_pred_sum,\n UNet.metrics_pred_max, UNet.metrics_pred_min,\n UNet.metrics_pred_mean])\n\n return model\n def get_model(self,learning_rate =TRAIN_SEG_LEARNING_RATE ,enable_drop_out=False):\n inputs = Input((INPUT_WIDTH, INPUT_HEIGHT, INPUT_DEPTH, INPUT_CHANNEL))\n\n conv1 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(inputs)\n conv1 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv1)\n \n pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)\n\n conv2 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(pool1)\n conv2 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv2)\n pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)\n\n conv3 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(pool2)\n conv3 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conv3)\n pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)\n\n conv4 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(pool3)\n conv4 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling3D(pool_size=(2, 2, 2))(drop4)\n\n conv5 = Conv3D(512, (3, 3, 3), activation='relu', padding='same')(pool4)\n conv5 = Conv3D(512, (3, 3, 3), activation='relu', padding='same')(conv5)\n drop5 = Dropout(0.5)(conv5)\n\n up6 = concatenate([UpSampling3D(size=(2, 2, 2))(drop5), drop4], axis=-1)\n conv6 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(up6)\n conv6 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(conv6)\n conv6 = Dropout(0.5)(conv6)\n \n up7 = concatenate([UpSampling3D(size=(2, 2, 2))(conv6), conv3], axis=-1)\n conv7 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(up7)\n conv7 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conv7)\n conv7 = Dropout(0.5)(conv7)\n \n up8 = concatenate([UpSampling3D(size=(2, 2, 2))(conv7), conv2], axis=-1)\n conv8 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(up8)\n conv8 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv8)\n\n up9 = concatenate([UpSampling3D(size=(2, 2, 2))(conv8), conv1], axis=-1)\n conv9 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(up9)\n conv9 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv9)\n\n conv10 = Conv3D(OUTPUT_CHANNEL, (1, 1, 1), activation='sigmoid')(conv9)\n\n model = Model(inputs=inputs, outputs=conv10)\n# model = multi_gpu_model(model, gpus=3)\n model.compile(optimizer=Adam(lr=learning_rate), loss=UNet.dice_coef_loss,\n metrics=[UNet.dice_coef, UNet.metrics_true_sum, UNet.metrics_pred_sum,\n UNet.metrics_pred_max, UNet.metrics_pred_min,\n UNet.metrics_pred_mean])\n\n return model\n def get_evaluator(self,generator,name):\n return UNetEvaluator(generator,name)\n\nclass UNetEvaluator(Callback):\n def __init__(self,generator,name):\n self.counter = 0\n self.generator =generator\n self.name = name\n\n def on_epoch_end(self, epoch, logs=None):\n self.counter += 1\n #if self.counter % TRAIN_SEG_EVALUATE_FREQ == 0:\n self.do_evaluate(self.model)\n\n def do_evaluate(self,model):\n print('Model evaluating')\n if callable(self.generator):\n X, y_true = next(self.generator('val',self.name))\n else:\n X, y_true = next(self.generator.flow_segmentation('val'))\n y_true = y_true.astype(np.float64)\n y_pred = model.predict(X)\n #X, y_true, y_pred = X[:, :, :,:, 0], y_true[:, :, :,:, 0], y_pred[:, :, :, :,0]\n intersection = y_true * y_pred\n recall = (np.sum(intersection) + SMOOTH) / (np.sum(y_true) + SMOOTH)\n precision = (np.sum(intersection) + SMOOTH) / (np.sum(y_pred) + SMOOTH)\n print('Average recall {:.4f}, precision {:.4f}'.format(recall, precision))\n\n for threshold in range(0, 10, 2):\n threshold = threshold / 10.0\n pred_mask = (y_pred > threshold).astype(np.uint8)\n intersection = y_true * pred_mask\n recall = (np.sum(intersection) + SMOOTH) / (np.sum(y_true) + SMOOTH)\n precision = (np.sum(intersection) + SMOOTH) / (np.sum(pred_mask) + SMOOTH)\n print(\"Threshold {}: recall {:.4f}, precision {:.4f}\".format(threshold, recall, precision))\n print(str(np.sum(pred_mask))+'/'+str(np.sum(y_true))+'/'+\n str(y_pred.shape[0]*y_pred.shape[1]*y_pred.shape[2]*y_pred.shape[3]))\n\n #regions = measure.regionprops(measure.label(y_pred))\n #print('Num of pred regions {}'.format(len(regions)))\n\n # if DEBUG_PLOT_WHEN_EVALUATING_SEG:\n # VisualUtil.plot_comparison(X, y_true, y_pred)\n # VisualUtil.plot_slices(X)\n # VisualUtil.plot_slices(y_true)\n # VisualUtil.plot_slices(y_pred)"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.7112299203872681,
"avg_line_length": 15.909090995788574,
"blob_id": "051c331ceeb8cca652c9d5f168f1cf9a3999dfae",
"content_id": "2eeae2470d5bf7fd5cdf6b6a6289fd0fa166cae5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 307,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 11,
"path": "/README.md",
"repo_name": "martinambition/tianchi-lung-2019",
"src_encoding": "UTF-8",
"text": "# 全球数据智能大赛(2019)——“数字人体”赛场一:肺部CT多病种智能诊断\n### 最终成绩是1600队伍中的67名。\n\n### 训练设置。\n1. 下载天池数据\n2. 修改config.py里边的\n\t* TRAIN_FOLDER\n\t* ANNOTATION_FILE\n\t* CT_PATH\n3. 运行preprocess.ipynb\n4. 运行train.ipynb\n\n"
},
{
"alpha_fraction": 0.6577181220054626,
"alphanum_fraction": 0.6577181220054626,
"avg_line_length": 20.14285659790039,
"blob_id": "7343202376a0c8ba5fa06973cfe5c1fe41b3fee7",
"content_id": "2bd0606b38a7b268f65a06b99fe3c312d123395a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 149,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 7,
"path": "/test.py",
"repo_name": "martinambition/tianchi-lung-2019",
"src_encoding": "UTF-8",
"text": "\n\n#matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom preprocess import Preprocess\n\nif __name__ ==\"__main__\" :\n p = Preprocess()\n p.han"
},
{
"alpha_fraction": 0.6182445287704468,
"alphanum_fraction": 0.6259102821350098,
"avg_line_length": 43.220340728759766,
"blob_id": "371ea14994fcc136c76b6cec90485dfe3742d402",
"content_id": "958705dbba755ff399b4c062c12526913eb54987",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2609,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 59,
"path": "/train_segmentation.py",
"repo_name": "martinambition/tianchi-lung-2019",
"src_encoding": "UTF-8",
"text": "from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping\nfrom config import *\nfrom unet import UNet\nfrom data_generator import DataGenerator\nimport time\nfrom glob import glob\nimport random\nimport os\nimport numpy as np\n\n# def flow(mode='train',name=\"lung\", batch_size=TRAIN_BATCH_SIZE):\n# PAHT= PREPROCESS_GENERATOR_LUNG_PATH if name ==\"lung\" else PREPROCESS_GENERATOR_MEIASTINAL_PATH\n# files = glob(PAHT+'/*_x_'+mode+'.npy')\n# #random.seed(9)\n# while True:\n# idx = random.randint(0, len(files) - 1)\n# file = files[idx]\n# name = os.path.splitext(os.path.basename(file))[0]\n# id = name.split('_')[0]\n\n# X = np.load(file)\n# y = np.load(PAHT+ '/'+id+'_y_'+mode+'.npy')\n \n# #Ignore negative sample\n# if np.sum(y) == 0 and random.random() < 0.8:\n# continue\n# # s = random.randint(0, 8)\n# # yield X[s:s+8,...],y[s:s+8,...]\n# yield X, y\n \ndef seg_train(name,learning_rate,init_weight=None):\n print('start seg_train')\n net = UNet()\n model = net.get_model(learning_rate,enable_drop_out=False)\n if not init_weight == None:\n model.load_weights(init_weight)\n model.summary()\n generator = DataGenerator(name=name)\n\n run = '{}-{}-{}'.format(name, time.localtime().tm_hour, time.localtime().tm_min)\n log_dir = SEG_LOG_DIR.format(run)\n check_point = log_dir + '/' + name + '_checkpoint-{epoch:02d}-{val_loss:.4f}.hdf5'\n\n print(\"seg train round {}\".format(run))\n tensorboard = TensorBoard(log_dir=log_dir, write_graph=False)\n checkpoint = ModelCheckpoint(filepath=check_point, monitor='val_loss', verbose=1, save_best_only=True)\n early_stopping = EarlyStopping(monitor='val_loss', patience=TRAIN_EARLY_STOPPING, verbose=1)\n evaluator = net.get_evaluator(generator,name)\n model.fit_generator(generator.flow_segmentation(mode='train'), steps_per_epoch=TRAIN_STEPS_PER_EPOCH,\n validation_data=generator.flow_segmentation(mode='val'), validation_steps=TRAIN_VALID_STEPS,\n epochs=TRAIN_EPOCHS, verbose=1,\n callbacks=[tensorboard, checkpoint ,early_stopping, evaluator]) #\n# model.fit_generator(flow('train', name), steps_per_epoch=TRAIN_STEPS_PER_EPOCH,\n# validation_data=flow('val',name), validation_steps=TRAIN_VALID_STEPS,\n# epochs=TRAIN_EPOCHS, verbose=1,\n# callbacks=[tensorboard, checkpoint, early_stopping, evaluator])\n\nif __name__ == '__main__':\n seg_train('lung',TRAIN_SEG_LEARNING_RATE)\n"
},
{
"alpha_fraction": 0.5035144090652466,
"alphanum_fraction": 0.5188952088356018,
"avg_line_length": 42.496402740478516,
"blob_id": "592c85d12ce4d8c6465e82c86670af60288eb89f",
"content_id": "b6a6043612782fdceba7766751fa8d0043a902fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12093,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 278,
"path": "/data_generator.py",
"repo_name": "martinambition/tianchi-lung-2019",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport os\nimport pickle\nimport numpy as np\nimport random\nimport h5py\nfrom glob import glob\nfrom config import *\nclass DataGenerator(object):\n def __init__(self,name=\"lung\"):\n self.name = name\n self.h5path = PREPROCESS_PATH_LUNG if name == \"lung\" else PREPROCESS_PATH_MEIASTINAL\n self.meta_dict = self.get_meta_dict()\n self.records = self.get_ct_records()\n self.train_set,self.val_set = self.split_train_val()\n\n\n def split_train_val(self,ratio=0.8):\n record_len =self.records.shape[0]\n train_record = self.records[:int(record_len * ratio)]\n val_record = self.records[int(record_len * ratio):]\n return train_record, val_record\n\n def get_meta_dict(self):\n cache_file = '{}/all_meta_cache.meta'.format(PREPROCESS_PATH)\n if os.path.exists(cache_file):\n print('get meta_dict from cache')\n with open(cache_file, 'rb') as f:\n return pickle.load(f)\n\n meta_dict = {}\n for f in glob('{}/*.meta'.format(PREPROCESS_PATH_META)):\n seriesuid = os.path.splitext(os.path.basename(f))[0]\n\n with open(f, 'rb') as f:\n meta = pickle.load(f)\n meta_dict[meta['seriesuid']] = meta\n # cache it\n with open(cache_file, 'wb') as f:\n pickle.dump(meta_dict, f)\n\n return meta_dict\n\n def get_ct_records(self):\n numpy_files = glob(self.h5path + \"/*.h5\")\n fields = ['img_numpy_file', 'origin', 'spacing', 'shape']\n\n def fill_info(seriesuid):\n seriesuid = str(seriesuid)\n data = [None] * len(fields)\n matching = [s for s in numpy_files if seriesuid in s]\n\n if len(matching)>0:\n data[0] = matching[0]\n\n if seriesuid in self.meta_dict:\n t = self.meta_dict[seriesuid]\n data[1:] = [t['origin'], t['spacing'], t['shape']]\n\n return pd.Series(data, index=fields)\n\n records = pd.read_csv(ANNOTATION_FILE)\n\n if self.name ==\"lung\":\n #records = records[(records['label']==1) | (records['label']==5)]\n records = records[(records['label']==5)]\n else:\n records = records[records['label'] > 5]\n\n records[fields] = records['seriesuid'].apply(fill_info)\n records.dropna(inplace=True)\n\n print('ct record size {}'.format(records.shape))\n return records\n\n def get_positive(self,record, shape=(INPUT_WIDTH, INPUT_HEIGHT, INPUT_DEPTH),random_offset=(0,0,0)):\n '''\n Get positive sample\n :param record: one focus record\n :param shape:\n :return: one positve sample,(block,mask)\n '''\n if not ENABLE_RANDOM_OFFSET:\n random_offset= (0,0,0)\n mask = np.zeros(shape)\n with h5py.File(record['img_numpy_file'], 'r') as hf:\n W, H, D = hf['img'].shape[0], hf['img'].shape[1], hf['img'].shape[2]\n\n #DiameterX\n diameter = np.array([record['diameterX'],record['diameterY'],record['diameterZ']])\n radius = np.ceil(diameter/record['spacing']/2).astype(int)\n upper_z = 2\n orgin_coord = np.array([record['coordX'], record['coordY'], record['coordZ']+upper_z])\n \n orgin_coord = np.abs((orgin_coord - record['origin']) / record['spacing'])\n coord = orgin_coord + random_offset\n\n x, y, z = int(coord[0] - shape[0] // 2), int(coord[1] - shape[1] // 2), int(coord[2] - shape[2] // 2)\n\n x, y, z = max(x, 0), max(y, 0), max(z, 0)\n x, y, z = min(x, W - shape[0] - 1), min(y, H - shape[1] - 1), min(z, D - shape[2] - 1)\n\n block = hf['img'][x:x + shape[0], y:y + shape[1], z:z + shape[2]]\n\n # cub_coord = np.array([INPUT_WIDTH // 2, INPUT_HEIGHT // 2, INPUT_DEPTH // 2])\n\n real_coord = (orgin_coord - np.array([x, y, z])).astype(int)\n\n\n min_cor = np.clip(real_coord - radius,0,None)\n max_cor = real_coord + radius + 1# Add one \n if max_cor[0]>INPUT_WIDTH:\n max_cor[0] = INPUT_WIDTH\n if max_cor[1]>INPUT_HEIGHT:\n max_cor[1] = INPUT_HEIGHT\n if max_cor[2]>INPUT_DEPTH:\n max_cor[2] = INPUT_DEPTH\n\n mask[min_cor[0]:max_cor[0],\n min_cor[1]:max_cor[1],\n min_cor[2]:max_cor[2]] = 1.0\n # print(f\"Found Positive:{(x,y,z)},{(x+shape[0],y+shape[1],z+shape[2])}\")\n return block,mask\n\n\n def get_negative(self,slice_records,shape=(INPUT_WIDTH, INPUT_HEIGHT, INPUT_DEPTH)):\n '''\n Get negative sample\n :param slice_records: one CT related records\n :param shape:\n :return: negative sample,(block,mask)\n '''\n first_record = slice_records.iloc[0]\n W, H, D = first_record['shape'][0],first_record['shape'][1],first_record['shape'][2]\n mask = np.zeros((INPUT_WIDTH, INPUT_HEIGHT, INPUT_DEPTH))\n block = np.zeros((INPUT_WIDTH, INPUT_HEIGHT, INPUT_DEPTH))\n #All the coordZ seems too low.\n \n focus_coords = np.array([slice_records['coordX'].values, slice_records['coordY'].values, slice_records['coordZ'].values])\n focus_coords = focus_coords.transpose(1, 0)\n origin = first_record['origin']\n spacing = first_record['spacing']\n focus_coords = np.abs((focus_coords - origin) / spacing)\n focus_dim = np.array([slice_records['diameterX'].values, slice_records['diameterY'].values, slice_records['diameterZ'].values])\n focus_dim = focus_dim.transpose(1, 0)\n focus_size = focus_dim/spacing\n\n focus_start_coords = focus_coords - focus_size//2\n focus_end_coords = focus_coords + focus_size // 2\n\n #Get ramdom negative\n with h5py.File(first_record['img_numpy_file'], 'r') as hf:\n while True:\n x, y, z = random.randint(0, W - shape[0] - 1), random.randint(0, H - shape[1] - 1), random.randint(0, D - shape[\n 2] - 1)\n if not self.check_overlap((x,y,z),(x+shape[0],y+shape[1],z+shape[2]),\n focus_start_coords,focus_end_coords):\n block = hf['img'][x:x + shape[0], y:y + shape[1], z:z + shape[2]]\n #print(f\"Found Negative:{(x,y,z)},{(x+shape[0],y+shape[1],z+shape[2])}\")\n if np.sum(block!=-ZERO_CENTER) > 0:\n break\n return block, mask\n\n def check_overlap(self,start,end, focus_start_coords,focus_end_coords):\n for i in range(len(focus_start_coords)):\n cub_start = focus_start_coords[i]\n cub_end = focus_end_coords[i]\n if self.check_cub_overlap(start,end,cub_start,cub_end):\n #print(f'Found Collision,{start},{end},{cub_start},{cub_end}')\n return True\n return False\n\n def check_cub_overlap(self,cub_start,cub_end, focus_start,focus_end):\n x_min = cub_start[0]\n x_max = cub_end[0]\n y_min = cub_start[1]\n y_max = cub_end[1]\n z_min = cub_start[2]\n z_max = cub_end[2]\n\n x_min2 = focus_start[0]\n x_max2 = focus_end[0]\n y_min2 = focus_start[1]\n y_max2 = focus_end[1]\n z_min2 = focus_start[2]\n z_max2 = focus_end[2]\n #print('Box2 min %.2f, %.2f, %.2f' % (x_min2, y_min2, z_min2))\n #print('Box2 max %.2f, %.2f, %.2f' % (x_max2, y_max2, z_max2))\n isColliding = ((x_max >= x_min2 and x_max <= x_max2) \\\n or (x_min <= x_max2 and x_min >= x_min2) \\\n or (x_min <= x_min2 and x_max >= x_max2) \\\n or (x_min >= x_min2 and x_max <= x_max2) \\\n ) \\\n and ((y_max >= y_min2 and y_max <= y_max2) \\\n or (y_min <= y_max2 and y_min >= y_min2) \\\n or (y_min <= y_min2 and y_max >= y_max2) \\\n or (y_min >= y_min2 and y_max <= y_max2) \\\n ) \\\n and ((z_max >= z_min2 and z_max <= z_max2) \\\n or (z_min <= z_max2 and z_min >= z_min2) \\\n or (z_min <= z_min2 and z_max >= z_max2) \\\n or (z_min >= z_min2 and z_max <= z_max2) \\\n )\n return isColliding\n\n def flow_segmentation(self,mode = 'train',batch_size = TRAIN_BATCH_SIZE):\n idx = 0\n records = self.train_set if mode =='train' else self.val_set\n shape = (INPUT_WIDTH, INPUT_HEIGHT, INPUT_DEPTH)\n X = np.zeros((batch_size, *shape, INPUT_CHANNEL))\n y = np.zeros((batch_size, *shape, OUTPUT_CHANNEL))\n y_class = np.zeros((batch_size, CLASSIFY_OUTPUT_CHANNEL))\n\n \n\n while True:\n for b in range(batch_size):\n #Random select\n idx = random.randint(0, records.shape[0] - 1)\n record = records.iloc[idx]\n is_positive_sample = random.random() <= TRAIN_SEG_POSITIVE_SAMPLE_RATIO\n random_offset = np.array([\n random.randrange(-TRAIN_SEG_SAMPLE_RANDOM_OFFSET, TRAIN_SEG_SAMPLE_RANDOM_OFFSET),\n random.randrange(-TRAIN_SEG_SAMPLE_RANDOM_OFFSET, TRAIN_SEG_SAMPLE_RANDOM_OFFSET),\n random.randrange(-TRAIN_SEG_SAMPLE_RANDOM_OFFSET, TRAIN_SEG_SAMPLE_RANDOM_OFFSET)\n ])\n if is_positive_sample:\n X[b, :, :, :, 0],y[b, :, :, :, 0] = self.get_positive(record,shape,random_offset)\n y_class[b, label_softmax[record['label']]] = 1\n else:\n #Get all the focus records for one CT\n focus_records = records.loc[records['seriesuid'] == record['seriesuid']]\n if focus_records.empty:\n print(record['seriesuid'])\n X[b, :, :, :, 0], y[b, :, :, :, 0] = self.get_negative(focus_records,shape)\n y_class[b, 0] = 1\n\n # rotate\n # for b in range(batch_size):\n # _perm = np.random.permutation(3)\n # X[b, :, :, :, 0] = np.transpose(X[b, :, :, :, 0], _perm)\n # y[b, :, :, :, 0] = np.transpose(y[b, :, :, :, 0], _perm)\n\n yield X.astype(np.float16), y.astype(np.float16)#, y_class\n\n def flow_classfication(self, mode='train', batch_size=TRAIN_BATCH_SIZE):\n idx = 0\n records = self.train_set if mode == 'train' else self.val_set\n shape = (CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT, CLASSIFY_INPUT_DEPTH)\n X = np.zeros(\n (batch_size, *shape, CLASSIFY_INPUT_CHANNEL))\n y = np.zeros((batch_size, CLASSIFY_OUTPUT_CHANNEL))\n\n\n\n while True:\n for b in range(batch_size):\n idx = random.randint(0, records.shape[0] - 1)\n record = records.iloc[idx]\n is_positive_sample = random.random() <= TRAIN_CLASSIFY_POSITIVE_SAMPLE_RATIO\n random_offset = np.array([\n random.randrange(-TRAIN_CLASSIFY_SAMPLE_RANDOM_OFFSET, TRAIN_CLASSIFY_SAMPLE_RANDOM_OFFSET),\n random.randrange(-TRAIN_CLASSIFY_SAMPLE_RANDOM_OFFSET, TRAIN_CLASSIFY_SAMPLE_RANDOM_OFFSET),\n random.randrange(-TRAIN_CLASSIFY_SAMPLE_RANDOM_OFFSET, TRAIN_CLASSIFY_SAMPLE_RANDOM_OFFSET)\n ])\n if is_positive_sample:\n X[b, :, :, :, 0], _ = self.get_positive(record,shape,random_offset)\n y[b, label_softmax[record['label']]] = 1\n\n else:\n # Get all the focus records for one CT\n focus_records = records.loc[records['seriesuid'] == record['seriesuid']]\n if focus_records.empty:\n print(record['seriesuid'])\n X[b, :, :, :, 0], _ = self.get_negative(focus_records,shape)\n y[b, 0] = 1\n\n yield X, y\n\n"
},
{
"alpha_fraction": 0.5882353186607361,
"alphanum_fraction": 0.6228017210960388,
"avg_line_length": 35.66666793823242,
"blob_id": "4c93ffdd4f06c89768250ddf40cc9af093c2b9cb",
"content_id": "fd49b6f7db87e12e1d6b74cb1c4dd32f2caa7ea3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1649,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 45,
"path": "/vgg.py",
"repo_name": "martinambition/tianchi-lung-2019",
"src_encoding": "UTF-8",
"text": "from keras.optimizers import Adam, SGD, RMSprop\nfrom keras.layers import Input, Conv3D, MaxPooling3D, Dense, GlobalMaxPooling3D, Dropout, BatchNormalization\nfrom keras.models import Model\nfrom keras.metrics import categorical_accuracy\nfrom config import *\nclass SimpleVgg():\n def __init__(self):\n self.use_batchnom = False\n\n def get_model(self,learning_rate):\n inputs = Input((CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT, CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL))\n x = inputs\n\n x = Conv3D(32, (3, 3, 3), padding='same', activation='relu')(x)\n x = MaxPooling3D(pool_size=(2, 2, 2))(x)\n\n x = BatchNormalization()(x)\n\n x = Conv3D(64, (3, 3, 3), padding='same', activation='relu')(x)\n x = MaxPooling3D(pool_size=(2, 2, 2))(x)\n\n x = BatchNormalization()(x)\n\n x = Conv3D(128, (3, 3, 3), padding='same', activation='relu')(x)\n x = MaxPooling3D(pool_size=(2, 2, 2))(x)\n\n x = BatchNormalization()(x)\n\n x = Conv3D(256, (3, 3, 3), padding='same', activation='relu')(x)\n x = MaxPooling3D(pool_size=(2, 2, 2))(x)\n\n x = BatchNormalization()(x)\n\n x = Conv3D(512, (3, 3, 3), padding='same', activation='relu')(x)\n x = GlobalMaxPooling3D()(x)\n\n x = Dense(32, activation='relu')(x)\n #x = Dropout(0.5)(x)\n x = Dense(CLASSIFY_OUTPUT_CHANNEL, activation='softmax')(x)\n\n model = Model(inputs=inputs, outputs=x)\n #optimizer=Adam(lr=TRAIN_CLASSIFY_LEARNING_RATE)\n model.compile(optimizer=RMSprop(lr=learning_rate), loss='categorical_crossentropy', metrics=[categorical_accuracy])\n\n return model"
},
{
"alpha_fraction": 0.5743135809898376,
"alphanum_fraction": 0.5840566754341125,
"avg_line_length": 36.144737243652344,
"blob_id": "0ad91a915cbd0e6ce8072e24cf217724a31c4897",
"content_id": "146d4ca0ad3f1eb95734a26e994af3b500511422",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5645,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 152,
"path": "/preprocess.py",
"repo_name": "martinambition/tianchi-lung-2019",
"src_encoding": "UTF-8",
"text": "from config import *\nimport numpy as np\nimport SimpleITK as sitk\nfrom skimage import morphology, measure, segmentation\nfrom skimage.filters import roberts, sobel\nfrom scipy import ndimage as ndi\nfrom glob import glob\nimport h5py\nimport scipy\nimport os\nimport pickle\nimport pandas as pd\nfrom tqdm import tqdm\nclass Preprocess():\n def __init__(self):\n pass\n\n #CT_PATH, PREPROCESS_PATH_LUNG\n def handle(self,ct_path,out_path):\n self.anotations = pd.read_csv(ANNOTATION_FILE)\n print('start preprocess')\n self.ct_files = glob(ct_path)\n \n self.lung_path = os.path.join(out_path,'lung')\n self.mediastinal_path = os.path.join(out_path,'mediastinal')\n self.meta_path = os.path.join(out_path,'meta')\n if not os.path.exists(self.lung_path):\n os.makedirs(self.lung_path)\n if not os.path.exists(self.mediastinal_path):\n os.makedirs(self.mediastinal_path)\n if not os.path.exists(self.meta_path):\n os.makedirs(self.meta_path)\n \n handled_ids = set([f[-9:-3] for f in glob('{}/*.h5'.format(self.lung_path))])\n print('{} total, {} processed'.format(len(self.ct_files), len(handled_ids)))\n\n counter = 0\n for f in tqdm(self.ct_files):\n seriesuid = os.path.splitext(os.path.basename(f))[0]\n if seriesuid in handled_ids:\n print('{} handled'.format(seriesuid))\n continue\n # anno = self.anotations.loc[self.anotations['seriesuid'] == int(seriesuid)]\n # if anno.empty or anno[(anno['label']==1) | (anno['label']==5)].empty:\n # continue\n counter += 1\n print('{} process {}'.format(counter, f))\n\n itk_img = sitk.ReadImage(f)\n img = sitk.GetArrayFromImage(itk_img) # (depth, height, width)\n img = np.transpose(img, (2, 1, 0)) # (width, height, depth)\n\n origin = np.array(itk_img.GetOrigin())\n spacing = np.array(itk_img.GetSpacing())\n #Resample to 1:1:1\n img, new_spacing = self.resample(img, spacing)\n\n new_img_1 = img.copy()\n new_img_2 = img.copy()\n\n #Generate Lung Image\n lung_img = self.extract_lung_img_3d(new_img_1)\n lung_img = self.normalize(lung_img,LUNG_MIN_BOUND,LUNG_MAX_BOUND,zero_center=True)\n lung_img = lung_img.astype(np.float16)\n #Generate Mediastinal Image\n mediastinal_img =self.normalize(new_img_2,CHEST_MIN_BOUND,CHEST_MAX_BOUND,zero_center=True)\n mediastinal_img = mediastinal_img.astype(np.float16)\n\n meta = {\n 'seriesuid': seriesuid,\n 'shape': new_img_1.shape,\n 'origin': origin,\n 'spacing': new_spacing\n }\n self.save_to_numpy(seriesuid, lung_img,mediastinal_img, meta)\n\n print('all preprocess done')\n\n # Resample to 1mm, 1mm, 1mm\n def resample(self,image, spacing, new_spacing=[1, 1, 1]):\n resize_factor = spacing / new_spacing\n new_real_shape = image.shape * resize_factor\n new_shape = np.round(new_real_shape)\n real_resize_factor = new_shape / image.shape\n new_spacing = spacing / real_resize_factor\n image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest')\n return image, new_spacing\n\n def normalize(self,img,lower,upper,zero_center =False):\n img = np.clip(img, lower, upper)\n img = (img - lower) / (upper - lower)\n if zero_center:\n img = img - ZERO_CENTER\n return img\n\n def normalize_all(self,imgs):\n for i in range(imgs.shape[2]):\n imgs[:, :, i] = self.normalize(imgs[:, :, i])\n\n def extract_mediastinal_img(self,imgs):\n return np.clip(imgs,CHEST_MIN_BOUND,CHEST_MAX_BOUND)\n\n def extract_lung_img_3d(self,imgs):\n ret = np.zeros(imgs.shape)\n for i in range(imgs.shape[2]):\n ret[:,:,i] = self.extract_lung_img_2D(imgs[:,:,i])\n return ret\n\n def extract_lung_img_2D(self, im, plot=False):\n binary = im < -550\n cleared = segmentation.clear_border(binary)\n label_image = measure.label(cleared)\n areas = [r.area for r in measure.regionprops(label_image)]\n areas.sort()\n if len(areas) > 2:\n for region in measure.regionprops(label_image):\n if region.area < areas[-2]:\n for coordinates in region.coords:\n label_image[coordinates[0], coordinates[1]] = 0\n binary = label_image > 0\n\n selem = morphology.disk(2)\n binary = morphology.binary_erosion(binary, selem)\n\n selem = morphology.disk(10)\n binary = morphology.binary_closing(binary, selem)\n\n # #?\n # selem = morphology.disk(10)\n # binary = morphology.binary_dilation(binary, selem)\n\n edges = roberts(binary)\n binary = ndi.binary_fill_holes(edges)\n\n get_high_vals = binary == 0\n im[get_high_vals] = LUNG_MIN_BOUND\n return im\n\n\n\n def save_to_numpy(self,seriesuid, lung_img,mediastinal_img, meta):\n \n with h5py.File(os.path.join(self.lung_path,seriesuid+'.h5'), 'w') as hf:\n hf.create_dataset('img', data=lung_img)\n with h5py.File(os.path.join(self.mediastinal_path,seriesuid+'.h5'), 'w') as hf:\n hf.create_dataset('img', data=mediastinal_img)\n with open(os.path.join(self.meta_path,seriesuid+'.meta'), 'wb') as f:\n pickle.dump(meta, f)\n \nif __name__ ==\"__main__\" :\n p = Preprocess()\n p.handle()"
},
{
"alpha_fraction": 0.6318974494934082,
"alphanum_fraction": 0.6468493938446045,
"avg_line_length": 39.72463607788086,
"blob_id": "99dd2e0c306a7346312e8c45c2d625cb084f96f4",
"content_id": "4ef49c28712b0e27251676609047658f1d2ad261",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2809,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 69,
"path": "/resnet.py",
"repo_name": "martinambition/tianchi-lung-2019",
"src_encoding": "UTF-8",
"text": "from resnet_helper import Resnet3DBuilder\nfrom keras.optimizers import Adam\nfrom keras.layers import Input, Conv3D, Dense, BatchNormalization, Add, Flatten, Concatenate, AveragePooling3D, GlobalMaxPooling3D, Activation\nfrom keras.models import Model\nfrom keras.metrics import categorical_accuracy\nfrom config import *\n\nclass Resnet:\n\n def __init__(self):\n pass\n\n# def get_model(self,learning_rate):\n# inputs = (CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT, CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL)\n# model = Resnet3DBuilder.build_resnet_50(inputs,CLASSIFY_OUTPUT_CHANNEL)\n# model.compile(optimizer=Adam(lr=learning_rate), loss='categorical_crossentropy', metrics=[categorical_accuracy])\n# return model\n def conv_bn_relu(self,x, filters, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same', apply_relu=True):\n x = Conv3D(filters, kernel_size=kernel_size, strides=strides, padding=padding)(x)\n x = BatchNormalization()(x)\n if apply_relu:\n x = Activation('relu')(x)\n return x\n\n def bottleneck(self,x, shrinkage=False):\n print('resnet block, shrinkage:{}'.format(shrinkage))\n print(x.get_shape())\n\n input_filters = x.get_shape()[4].value\n keep_filters = input_filters // 2 if shrinkage else input_filters // 4\n output_filters = input_filters * 2 if shrinkage else input_filters\n first_strides = (2, 2, 2) if shrinkage else (1, 1, 1)\n\n residual = self.conv_bn_relu(x, filters=keep_filters, kernel_size=(1, 1, 1), strides=first_strides)\n residual = self.conv_bn_relu(residual, filters=keep_filters, kernel_size=(3, 3, 3))\n residual = self.conv_bn_relu(residual, filters=output_filters, kernel_size=(1, 1, 1), apply_relu=False)\n\n if shrinkage:\n x = self.conv_bn_relu(x, filters=output_filters, kernel_size=(3, 3, 3), strides=(2, 2, 2), apply_relu=False)\n\n print(residual.get_shape())\n print(x.get_shape())\n x = Add()([residual, x])\n x = Activation('relu')(x)\n\n return x\n\n def get_model(self,learning_rate):\n inputs = Input((CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT, CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL))\n\n x = self.conv_bn_relu(inputs, RESNET_INITIAL_FILTERS)\n\n print('base')\n print(x.get_shape())\n\n for i in range(RESNET_BLOCKS):\n x = self.bottleneck(x, shrinkage=(i % RESNET_SHRINKAGE_STEPS == 0))\n\n print('top')\n x = GlobalMaxPooling3D()(x)\n print(x.get_shape())\n\n x = Dense(5, activation='softmax')(x)\n print(x.get_shape())\n\n model = Model(inputs=inputs, outputs=x)\n model.compile(optimizer=Adam(lr=learning_rate), loss='binary_crossentropy', metrics=['accuracy'])\n\n return model"
}
] | 12 |
izaansohail/Django_Testing | https://github.com/izaansohail/Django_Testing | a2168d3000bf7c8350a4353361fb0311481d6989 | 3e8d81578e47c1631084943de5ccd2ed4c0916ee | 207b2bab675d5087f2ec700fada406f32605803e | refs/heads/main | 2023-06-11T06:56:55.451834 | 2021-07-09T05:28:53 | 2021-07-09T05:28:53 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7117853164672852,
"alphanum_fraction": 0.7467911243438721,
"avg_line_length": 52.625,
"blob_id": "99d70787fe83e97346aeccb9e18fdea201fd3e58",
"content_id": "f45333c80350e3d40581fd9599de1b51025726f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 857,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 16,
"path": "/home/models.py",
"repo_name": "izaansohail/Django_Testing",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\nclass Contact(models.Model):\n name = models.CharField(max_length=200,default=None,null=True)\n cnic = models.CharField(max_length=200,default=None,null=True)\n address = models.CharField(max_length=200,default=None,null=True)\n organization = models.CharField(max_length=200,default=None,null=True)\n contact_number = models.CharField(max_length=200,default=None,null=True)\n checkin = models.CharField(max_length=200,default=None,null=True)\n checkout = models.CharField(max_length=200,default=None,null=True)\n contact_person = models.CharField(max_length=200,default=None,null=True)\n purpose = models.CharField(max_length=200,default=None,null=True)\n img_location = models.CharField(max_length=200,default=None,null=True)\n def __str__(self):\n return str(self.name)"
},
{
"alpha_fraction": 0.5073891878128052,
"alphanum_fraction": 0.5911329984664917,
"avg_line_length": 21.55555534362793,
"blob_id": "6a813f81eb8bfabc2e8b22be66a592763d2fba7a",
"content_id": "ab654fcf97ede3ace5f202efff2fc01300e8c3c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 406,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 18,
"path": "/home/migrations/0003_contact_cnic.py",
"repo_name": "izaansohail/Django_Testing",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.2.5 on 2021-07-09 04:52\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('home', '0002_auto_20210708_1316'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='contact',\n name='cnic',\n field=models.CharField(default=None, max_length=200, null=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5154877305030823,
"alphanum_fraction": 0.5367544889450073,
"avg_line_length": 29.899999618530273,
"blob_id": "d3b1d2bca87ffac06c6bd9427764718ebc687772",
"content_id": "59c787d0202b88b91cf05f33936be8a685527fea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2163,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 70,
"path": "/home/migrations/0002_auto_20210708_1316.py",
"repo_name": "izaansohail/Django_Testing",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.2.5 on 2021-07-08 08:16\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('home', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='contact',\n name='date',\n ),\n migrations.RemoveField(\n model_name='contact',\n name='email',\n ),\n migrations.RemoveField(\n model_name='contact',\n name='password',\n ),\n migrations.AddField(\n model_name='contact',\n name='address',\n field=models.CharField(default=None, max_length=200, null=True),\n ),\n migrations.AddField(\n model_name='contact',\n name='checkin',\n field=models.CharField(default=None, max_length=200, null=True),\n ),\n migrations.AddField(\n model_name='contact',\n name='checkout',\n field=models.CharField(default=None, max_length=200, null=True),\n ),\n migrations.AddField(\n model_name='contact',\n name='contact_number',\n field=models.CharField(default=None, max_length=200, null=True),\n ),\n migrations.AddField(\n model_name='contact',\n name='contact_person',\n field=models.CharField(default=None, max_length=200, null=True),\n ),\n migrations.AddField(\n model_name='contact',\n name='img_location',\n field=models.CharField(default=None, max_length=200, null=True),\n ),\n migrations.AddField(\n model_name='contact',\n name='name',\n field=models.CharField(default=None, max_length=200, null=True),\n ),\n migrations.AddField(\n model_name='contact',\n name='organization',\n field=models.CharField(default=None, max_length=200, null=True),\n ),\n migrations.AddField(\n model_name='contact',\n name='purpose',\n field=models.CharField(default=None, max_length=200, null=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.7387096881866455,
"alphanum_fraction": 0.7387096881866455,
"avg_line_length": 33.55555725097656,
"blob_id": "3e3c85c509f685af826cd3d14595946c2780c99e",
"content_id": "312f019383f054281e9ba3b73ee29c8d54a8c7f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 310,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 9,
"path": "/home/admin.py",
"repo_name": "izaansohail/Django_Testing",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom home.models import Contact\n\n# Register your models here.\nadmin.site.register(Contact)\nclass ContactView(admin.ModelAdmin):\n list_display = ['Name','cnic','address',\n 'organization','contact_number','checkin','checkout',\n 'contact_person','purpose','img_location']"
},
{
"alpha_fraction": 0.6935803890228271,
"alphanum_fraction": 0.6935803890228271,
"avg_line_length": 38.71052551269531,
"blob_id": "37068d889a00179827529bb06d6f017052791a87",
"content_id": "0450884cda870194fe03eed40d44477df4efa3dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1511,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 38,
"path": "/home/views.py",
"repo_name": "izaansohail/Django_Testing",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom home.models import Contact\nfrom django.contrib import messages\n\n# Create your views here.\n\ndef index(request):\n return render(request, 'homescreen.html')\n # return HttpResponse(\"this is homepage\")\n\ndef about(request):\n return render(request, 'about.html')\n # return HttpResponse(\"this is about page\")\n\ndef services(request):\n return render(request, 'services.html')\n # return HttpResponse(\"this is services page\")\n\ndef contact(request):\n if request.method == \"POST\":\n name = request.POST.get('Name')\n cnic = request.POST.get('cnic')\n address = request.POST.get('Address')\n organization = request.POST.get('Organization')\n contact_number = request.POST.get('Contact_Number')\n checkin = request.POST.get('Check-In')\n checkout = request.POST.get('Check-Out')\n contact_person = request.POST.get('Contact_Person')\n purpose = request.POST.get('Purpose')\n img_location = request.POST.get('Img_lLocation')\n contact = Contact(name=name,cnic = cnic, address=address,\n organization=organization,contact_number=contact_number,checkin=checkin,checkout=checkout,\n contact_person=contact_person,purpose=purpose,img_location=img_location)\n contact.save()\n messages.success(request, 'Your Details have been saved in database.')\n return render(request, 'contact.html')\n # return HttpResponse(\"this is contact page\")\n\n\n"
}
] | 5 |
tkdcjs6229/webhaking.kr | https://github.com/tkdcjs6229/webhaking.kr | fc39cc305a5ac77e21b75aa87c2f7b87fdff3859 | 8f5ed15b9f3f1de07a2800582f58d0795102f2de | f8d78bcbcd9ea1196ace16352fc63c780675c49e | refs/heads/master | 2017-12-18T20:00:53.449213 | 2016-12-21T04:42:20 | 2016-12-21T04:42:20 | 76,542,227 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6102941036224365,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 41.894737243652344,
"blob_id": "4c98eab22f08a2d1cc2be653781c9ff836ddf328",
"content_id": "b7e7a42a84162e202f035cd40ea275d51c3b4642",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 848,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 19,
"path": "/webhacking_32.py",
"repo_name": "tkdcjs6229/webhaking.kr",
"src_encoding": "UTF-8",
"text": "#-*- coding: euc-kr -*-\nimport urllib2\n\nfor i in range(1, 100):\n url = \"http://webhacking.kr/challenge/codeing/code5.html?hit=tkdcjs\" #요청할 url\n req = urllib2.Request(url) #요청\n\n #헤더 추가\n req.add_header('Host', 'webhacking.kr')\n req.add_header('Proxy-Connection', 'keep-alive')\n req.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')\n req.add_header('Referer', 'http://webhacking.kr/challenge/codeing/code5.html')\n req.add_header('Accept-Encoding', 'sdch')\n req.add_header('Accept-Language', 'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4')\n req.add_header('Cookie', 'PHPSESSID=683cf810170e76fec5aec2451e047cbc')\n req.add_header('Accept-Charset','windows-949,utf-8;q=0.7,*;q=0.3')\n\n read = urllib2.urlopen(req).read() #확인 겸 열어보기\n print read\n\n"
}
] | 1 |
nataliavmors/natalia_v_mors | https://github.com/nataliavmors/natalia_v_mors | d5f89acec4f849d1a43082c21bbabaf03f116bc8 | 85003501d0288445de00ec61a092e91934067b20 | 409568b68002437102eff19f6bb36a30d5575733 | refs/heads/master | 2023-02-09T20:52:24.053283 | 2021-01-09T14:23:43 | 2021-01-09T14:23:43 | 297,370,320 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5501869320869446,
"alphanum_fraction": 0.5639919638633728,
"avg_line_length": 23.492958068847656,
"blob_id": "f6c908ff2249f981f2d2aad53d17df2717e972dc",
"content_id": "bc216b50fb6914a427f8c69f67878426df4767c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3808,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 142,
"path": "/lab5_3.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "from random import randint, choice\n\n\nclass Attacker:\n _health = None\n _attack = None\n\n def attack(self, target):\n target._health -= self._attack\n\n def is_alive(self):\n return self._health > 0\n\n\nclass Hero(Attacker):\n def __init__(self, name):\n self.name = name\n self._health = 100\n self._attack = 50\n self._experience = 0\n\n\nclass Enemy(Attacker):\n pass\n\n\ndef generate_random_enemy():\n RandomEnemyType = choice(enemy_types)\n enemy = RandomEnemyType()\n return enemy\n\n\ndef generate_dragon_list(enemy_number):\n enemy_list = [generate_random_enemy() for i in range(enemy_number)]\n return enemy_list\n\n\nclass Dragon(Enemy):\n def set_answer(self, answer):\n self.__answer = answer\n\n def check_answer(self, answer):\n return answer == self.__answer\n\n\nclass GreenDragon(Dragon):\n def __init__(self):\n self._health = 200\n self._attack = 10\n self._color = 'зелёный'\n\n def question(self):\n x = randint(1,100)\n y = randint(1,100)\n self.__quest = str(x) + '+' + str(y)\n self.set_answer(x + y)\n return self.__quest\n\n\nclass RedDragon(Dragon):\n def __init__(self):\n self._health = 200\n self._attack = 10\n self._color = 'красный'\n\n def question(self):\n x = randint(1, 100)\n y = randint(1, 100)\n self.__quest = str(x) + '-' + str(y)\n self.set_answer(x - y)\n return self.__quest\n\n\nclass BlackDragon(Dragon):\n def __init__(self):\n self._health = 200\n self._attack = 10\n self._color = 'черный'\n\n def question(self):\n x = randint(1, 100)\n y = randint(1, 100)\n self.__quest = str(x) + '*' + str(y)\n self.set_answer(x * y)\n return self.__quest\n\n\nenemy_types = [GreenDragon, RedDragon, BlackDragon]\n\ndef annoying_input_int(message =''):\n answer = None\n while answer == None:\n try:\n answer = int(input(message))\n except ValueError:\n print('Вы ввели недопустимые символы')\n return answer\n\n\ndef game_tournament(hero, dragon_list):\n for dragon in dragon_list:\n print('Вышел', dragon._color, 'дракон!')\n while dragon.is_alive() and hero.is_alive():\n print('Вопрос:', dragon.question())\n answer = annoying_input_int('Ответ:')\n\n if dragon.check_answer(answer):\n hero.attack(dragon)\n print('Верно! \\n** дракон кричит от боли **')\n else:\n dragon.attack(hero)\n print('Ошибка! \\n** вам нанесён удар... **')\n if dragon.is_alive():\n break\n print('Дракон', dragon._color, 'повержен!\\n')\n\n if hero.is_alive():\n print('Поздравляем! Вы победили!')\n print('Ваш накопленный опыт:', hero._experience)\n else:\n print('К сожалению, Вы проиграли...')\n\ndef start_game():\n\n try:\n print('Добро пожаловать в арифметико-ролевую игру с драконами!')\n print('Представьтесь, пожалуйста: ', end = '')\n hero = Hero(input())\n\n dragon_number = 3\n dragon_list = generate_dragon_list(dragon_number)\n assert(len(dragon_list) == 3)\n print('У Вас на пути', dragon_number, 'драконов!')\n game_tournament(hero, dragon_list)\n\n except EOFError:\n print('Поток ввода закончился. Извините, принимать ответы более невозможно.')\n\nprint(__name__)\n\nif __name__ == '__main__':\n start_game()"
},
{
"alpha_fraction": 0.5315203666687012,
"alphanum_fraction": 0.53399258852005,
"avg_line_length": 22.735294342041016,
"blob_id": "06023213ce29ff267db3653333caf04623608603",
"content_id": "b1b114240b9f87260b3a348d2978e3a3bdb965c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 809,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 34,
"path": "/4_4_1.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "\n\ndef decorator(function):\n def wrapper(*args,**kwargs):\n\n import time\n start = time.time()\n kkk = function(*args,**kwargs)\n end = time.time()\n period = end - start\n inform = []\n inform.append(start)\n inform.append(*args)\n if function(*args,**kwargs) is not None:\n inform.append(function(*args,**kwargs))\n else:\n inform.append(\"-\")\n inform.append(end)\n inform.append(period)\n with open(way, \"w\") as file:\n file.write(str(inform))\n return wrapper\n\n\n@decorator\ndef even_num(nums):\n obj = []\n for i in nums:\n if i % 2 == 0:\n obj.append(i)\n a=len(obj)\n return a\n\nway=\"D:\\programming.txt\"\nnums = list(map(int, input().split()))\ndecorator(even_num(nums))\n"
},
{
"alpha_fraction": 0.5283505320549011,
"alphanum_fraction": 0.5335051417350769,
"avg_line_length": 19.473684310913086,
"blob_id": "d305f81dd36c5f07ef18c6abde2c574333e20d0c",
"content_id": "22fde92ce342e081bfed8fec1c0109f5d650275e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 388,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 19,
"path": "/lab8_1_3.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "def get_combinations(s,n):\n from itertools import combinations\n res = combinations(s,n)\n res = list(res)\n res.sort()\n return res\n\ns = input()\nn = int(input())\nkkk = list()\nfor t in range(1,n+1):\n rrr = get_combinations(s,t)\n for i in rrr:\n elem = str()\n for j in range(t):\n j = (i[j])\n elem += j\n kkk.append(elem)\nprint(kkk)"
},
{
"alpha_fraction": 0.47877758741378784,
"alphanum_fraction": 0.5059422850608826,
"avg_line_length": 24.60869598388672,
"blob_id": "22a2fe84c1de38fee4eb7b2e00e653bb2f243708",
"content_id": "ebf3e38cd3ad20bd94744615c409da6cf288ce35",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 634,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 23,
"path": "/lab6_2_2.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "\n# площадь параллелограма, построенного на 2-ух векторах\n\nclass Vector():\n def __init__(self, x = 0, y = 0, z = 0):\n self.x = x\n self.y = y\n self.z = z\n\n\n def __matmul__(self, other):\n x1 = self.y * other.z - self.z * other.y\n y1 = -self.x * other.z + self.z * other.x\n z1 = self.x * other.y - self.y * other.x\n return Vector(x1, y1, z1)\n\n\nax, ay, az = map(int, input().split())\nbx, by, bz = map(int, input().split())\nv1 = Vector(ax, ay, az)\nv2 = Vector(bx, by, bz)\nc = v1 @ v2\nS = (c.x * c.x + c.y * c.y + c.z * c.z)**(1/2)\nprint(S)"
},
{
"alpha_fraction": 0.4946996569633484,
"alphanum_fraction": 0.4970553517341614,
"avg_line_length": 25.5,
"blob_id": "7d1d66b9feb2f48b9fd2503efb1f5ed8b8c7c005",
"content_id": "cfadbbfab1a76d65dffac325972929040095b7ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 849,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 32,
"path": "/lab4_4.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "\ndef pre_decorator(way):\n def decorator(function):\n def wrapper(*args, **kwargs):\n import time\n #print(way)\n start = time.time()\n kkk = function(*args, **kwargs)\n end = time.time()\n period = end - start\n if function(*args, **kwargs) is not None:\n inform=function(*args, **kwargs)\n else:\n inform=\"-\"\n with open(way, \"w\") as file:\n file.write(str(start)+'\\n'+str(*args)+'\\n'+str(inform)+'\\n'+str(end)+'\\n'+str(period))\n\n return wrapper\n return decorator\n\nway=\"D:\\programming.txt\"\n@pre_decorator(way)\ndef even_num(numss):\n obj = []\n for i in numss:\n if i % 2 == 0:\n obj.append(i)\n a=len(obj)\n return a\n\n\nnums = list(map(int, input().split()))\neven_num(nums)\n"
},
{
"alpha_fraction": 0.5954093933105469,
"alphanum_fraction": 0.5988352298736572,
"avg_line_length": 29.736841201782227,
"blob_id": "1d07cdbb1de2583e02a0cf9b3b8b0ae2f6232586",
"content_id": "2c5fec5d03d2c3a43fbb73a61dedceecdc9299a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3228,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 95,
"path": "/cycle_subjects.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "# Предметы\n\nclass Subject:\n def __init__(self,subj_name,marks,lecturers):\n self.name = subj_name\n self.marks = marks\n self.lecturers = lecturers\n\n #def __init__(self):\n #self.name=0\n #self.marks=0\n #self.lecturers=0\n\n #def InputSubject():\n #s_name = input()\n #s_marks=input()\n #s_grade_sys = input.split()\n # s_lecturers=input()\n # return Subject(s_name,s_marks,s_lecturers)\n #self(s_name,s_marks,s_lecturers)\n # self=self.InputSubject()\n\n\nclass SubjectCycle(Subject):\n def __init__(self,cycle_name,marks,grad_sys,sub,lecturer_name=None):\n self.name=cycle_name\n self.marks=marks\n lecturers = []\n for subject in sub:\n for lector in subject.lecturers:\n lecturers.append(lector)\n if lecturer_name != None:\n lecturers.append(lecturer_name)\n self.lecturers=lecturers\n self.grade_sys= grad_sys\n self.subjects=sub\n\n def CountAvg(self):\n median=3\n average=1\n harmonic=2\n import statistics as s\n if self.grade_sys == average :\n return float(sum(self.marks)) / max(len(self.marks), 1)\n if self.grade_sys == harmonic:\n return s.harmonic_mean(self.marks)\n if self.grade_sys == median:\n return s.median(self.marks)\n\n\n\ndef InputSubjects():\n print('Введите названия предметов')\n s_name=input().split()\n #print('введите код метода расчета средних ')\n #s_grade_sys=input().split()\n s_marks=[]\n s_lecturers=[]\n print('Введите оценки')\n for i in range(len(s_name)):\n s_marks.append(input().split())\n print('Введите имена лекторов')\n for i in range(len(s_name)):\n s_lecturers.append(input().split())\n subjcts=[]\n for i in range(len(s_name)):\n subjcts.append([])\n for i in range(len(s_name)):\n subjcts[i]=Subject(s_name[i],s_marks[i],s_lecturers[i])\n return subjcts\n\ndef InputCycleSubjects():\n\n print('Введите названия циклов предметов')\n s_name=input().split()\n print('Введите код метода расчета средних (1-среднее арифметическое, 2-среднее гармоническое, 3-среднее медианное')\n s_grade_sys=input().split()\n s_marks=[]\n s_lecturers=[]\n print('Введите оценки предметов циклов')\n for i in range(len(s_name)):\n s_marks.append(input().split())\n print('Введите имена лекторов предметов циклов (НЕ ОБЯЗАТЕЛЬНО)')\n for i in range(len(s_name)):\n s_lecturers.append(input().split())\n cycle_subjcts=[]\n for i in range(len(s_name)):\n cycle_subjcts.append([])\n for i in range(len(s_name)):\n print('Ввод предметов для цикла ' + s_name[i])\n sub = InputSubjects()\n cycle_subjcts[i]=SubjectCycle(s_name[i],s_marks[i],s_grade_sys[i],sub,s_lecturers[i])\n return cycle_subjcts\n\nprint(InputCycleSubjects())"
},
{
"alpha_fraction": 0.5693215131759644,
"alphanum_fraction": 0.5693215131759644,
"avg_line_length": 17.66666603088379,
"blob_id": "de14374a90b4aec89dd91494c0bf4cd53034c60f",
"content_id": "4f61009c05c3f42b3d2a2353951c7409222f81e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 339,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 18,
"path": "/lab8_1_2.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "def get_permutations(s,n):\n from itertools import permutations\n res = permutations(s,n)\n res = list(res)\n res.sort()\n return res\n\ns = input()\nn = int(input())\nrrr = get_permutations(s,n)\nkkk = list()\nfor i in rrr:\n elem = str()\n for j in range(n):\n j=(i[j])\n elem += j\n kkk.append(elem)\nprint(kkk)\n\n\n\n"
},
{
"alpha_fraction": 0.5555555820465088,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 8,
"blob_id": "cb4c014a550f9ae088e75c0585ea75f5d19ee251",
"content_id": "dcd6a1a2dfe97a99d933004cf2f68b99f478a8d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 27,
"license_type": "no_license",
"max_line_length": 8,
"num_lines": 3,
"path": "/main.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "print(5)\nprint(4)\nprint(4)\n"
},
{
"alpha_fraction": 0.5520833134651184,
"alphanum_fraction": 0.5520833134651184,
"avg_line_length": 25.72222137451172,
"blob_id": "6f24f163d1155ed1ce610b3d21cd271a2fb87012",
"content_id": "e64bd673a9e62d6286e431cf3d67788abdfbc7e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 480,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 18,
"path": "/lab3_3.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "import shutil\nimport os\n\nshutil.unpack_archive('D:\\main.zip',\n 'D:\\main', 'zip')\nlist = []\nfor dpath, dnames, finames in os.walk('D:\\main'):\n for dirname in dnames:\n for filename in finames:\n if filename.endswith('.py') and dirname not in list:\n list.append(dirname)\n\nlist.sort()\n\nwith open('D:\\oop.txt', 'w') as file:\n for line in range(len(list)):\n file.write(list[line])\n file.write('\\n')"
},
{
"alpha_fraction": 0.49618902802467346,
"alphanum_fraction": 0.5205792784690857,
"avg_line_length": 26.20833396911621,
"blob_id": "2e9202735fa4735bea33e11392ca0843f2b91e64",
"content_id": "ace934c2da21eea85f14d5ac484f10b70ea3942a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1312,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 48,
"path": "/lab6_1.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "class Complex:\n\n def __init__(self, real, imag):\n self.real=real\n self.imag=imag\n\n def __add__(self,other):\n z =self.real+other.real\n i =self.imag + other.imag\n return Complex(z,i)\n\n def __sub__(self, other):\n z =self.real-other.real\n i =self.imag - other.imag\n return Complex(z,i)\n\n def __mul__(self, other):\n z = self.real - other.real\n i = self.imag + other.imag\n return Complex(z,i)\n\n def __truediv__(self, other):\n z=(self.real * other.real + self.imag * other.y) / (other.real * other.real + other.imag * other.imag)\n i=(-self.real * other.y + self.imag * other.real) / (other.real * other.real + other.imag * other.imag)\n return Complex(z,i)\n\n def __abs__(self):\n return (self.real ** 2 + self.imag ** 2)**0.5\n\n\nx1, y1 = map(int, input().split())\nx2, y2 = map(int, input().split())\na = Complex(x1, y1)\nb = Complex(x2, y2)\nz1 = a + b\nz2 = a - b\nz3 = a * b\nz4 = a / b\nprint('a + b =', z1.real, '+', z1.imag, 'i')\nprint('a - b =', z2.real, '+', z2.imag, 'i')\nprint('a * b =', z3.real, '+', z3.imag, 'i')\nprint('a / b =', z4.real, '+', z4.imag, 'i')\nMA = abs(a)\nz5a = (MA.real + MA.imag)**(1/2)\nMB = abs(b)\nz5b = (MB.real + MB.imag)**(1/2)\nprint('|a| =', z5a)\nprint('|b| =', z5b)\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5945122241973877,
"alphanum_fraction": 0.625,
"avg_line_length": 16.3157901763916,
"blob_id": "edb85a86770883a5c1b2b575775fd3dee1bd2fb0",
"content_id": "3f5407353e3fbf681f0c7237866f1e2a08b79a19",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 328,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 19,
"path": "/lab5_2_1.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "class Shape():\n def __init__(self,w,h):\n self.width=w\n self.height=h\n\n\nclass Triangle(Shape):\n def area(self):\n return 0.5*self.width*self.height\n\n\nclass Rectangle(Shape):\n def area(self):\n return self.width*self.height\n\n\nfig1=Triangle(2,4)\nfig2=Rectangle(2,4)\nprint(fig1.area(),fig2.area())"
},
{
"alpha_fraction": 0.5523256063461304,
"alphanum_fraction": 0.5891472697257996,
"avg_line_length": 18.884614944458008,
"blob_id": "0c1df7d43c460b545a1f10972d2017b09b338dcb",
"content_id": "95b3462b6f9271912124a3d4a2b8914235640f49",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 531,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 26,
"path": "/lab5_2_3.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "class Animal():\n def __init__(self,n,a):\n self.name=n\n self.age=a\n\nclass Zebra(Animal):\n def __init__(self,n,a):\n self.name=n\n self.age=a\n self.legs=4\n\nclass Dolphin(Animal):\n def __init__(self,n,a):\n self.name=n\n self.age=a\n self.tails=1\nn1='кузя'\nn2='валера'\nn3='Гриша'\na1=16\na2=4\na3=54\nanimal=Animal(n1,a1)\ndolph=Dolphin(n3,a3)\nzeb=Zebra(n2,a2)\nprint('\\n',animal.name,animal.age,'\\n',dolph.name,dolph.age,dolph.tails,'\\n',zeb.name,zeb.age,zeb.legs)"
},
{
"alpha_fraction": 0.5033467411994934,
"alphanum_fraction": 0.5261043906211853,
"avg_line_length": 25.678571701049805,
"blob_id": "743e9ed1f82063edeff85ea61d59855d4b3c3865",
"content_id": "126ae925d4b27809c4aa56d1d1bc233cecabff21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 790,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 28,
"path": "/lab6_2_3.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "\n# объем параллелепипеда, построенного на 3-х векторах\n\nclass Vector():\n def __init__(self, x = 0, y = 0, z = 0):\n self.x = x\n self.y = y\n self.z = z\n\n def __matmul__(self, other):\n x1 = self.y * other.z - self.z * other.y\n y1 = -self.x * other.z + self.z * other.x\n z1 = self.x * other.y - self.y * other.x\n return Vector(x1, y1, z1)\n\n def __mul__(self, other):\n return Vector(self.x * other.x, self.y * other.y, self.z * other.z)\n\n\nax, ay, az = map(int, input().split())\nbx, by, bz = map(int, input().split())\ncx, cy, cz = map(int, input().split())\nv1 = Vector(ax, ay, az)\nv2 = Vector(bx, by, bz)\nv3 = Vector(cx, cy, cz)\nc = v1 @ v2\ncc = v3 * c\nS = (cc.x + cc.y + cc.z)/3\nprint(S)"
},
{
"alpha_fraction": 0.5673575401306152,
"alphanum_fraction": 0.5829015374183655,
"avg_line_length": 18.350000381469727,
"blob_id": "70a1af4dc32a2cf431ab302c5a5eb2e0e59a56e2",
"content_id": "e5d6c87e8cdb66aa79e7fe932b8a15fbce660c73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 386,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 20,
"path": "/lab4_1.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "import sys\nimport argparse\n\ndef fib(n):\n a = 1\n if n > 2:\n a = fib(n - 1) + fib(n - 2)\n return a\n\ndef createParser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', default=1, type=int)\n return parser\n\n\nif __name__ == '__main__':\n parser = createParser()\n namespace = parser.parse_args(sys.argv[1:])\n val = fib(namespace)\n print(val)"
},
{
"alpha_fraction": 0.4806378185749054,
"alphanum_fraction": 0.4920273423194885,
"avg_line_length": 18.086956024169922,
"blob_id": "481a425713e2bdfc69e70286bca9343c85320fa1",
"content_id": "58f51bc588362197a297e458440addd057defffc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 452,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 23,
"path": "/lab4_2.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "def decorator(function):\n def wrapper(a):\n kkk=function(a)\n if kkk > 10:\n print(\"очень много\")\n elif kkk == 0:\n print(\"нет(\")\n else:\n print(len(a))\n return wrapper\n\n@decorator\ndef even_num(nums):\n obj = []\n for i in nums:\n if i % 2 == 0:\n obj.append(i)\n a=len(obj)\n return a\n\n\nnums = list(map(int, input().split()))\ndecorator(even_num(nums))\n"
},
{
"alpha_fraction": 0.6867470145225525,
"alphanum_fraction": 0.6867470145225525,
"avg_line_length": 22.714284896850586,
"blob_id": "466d215d03887be7929b9dfe7b2be0080f9694c5",
"content_id": "f547438a2ba1890a74eb65e737960e23cec731b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 166,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 7,
"path": "/lab8_1_1.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "def get_cartesian_product(a,b):\n from itertools import product\n return list(map(list, product(a,b)))\n\na = input()\nb = input()\nprint(get_cartesian_product(a,b))\n"
},
{
"alpha_fraction": 0.4891485869884491,
"alphanum_fraction": 0.5091819763183594,
"avg_line_length": 22.038461685180664,
"blob_id": "06c52ea90a9fe0dd145d828a8c16f8b50dc36c0a",
"content_id": "cc9809b29791dcdb28b14decc0eb823d501f57a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 608,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 26,
"path": "/lab6_2_0.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "\n# центр масс\n\nclass Vector():\n def __init__(self, x=0, y=0, z=0):\n self.x = x\n self.y = y\n self.z = z\n\n def __add__(self, other):\n return Vector(self.x + other.x, self.y + other.y, self.z + other.z)\n\n def __truediv__(self, other):\n return Vector(self.x / other.x, self.y / other.y, self.z / other.z)\n\n\nn = int(input())\nax, ay, az = map(int, input().split())\nv1 = Vector(ax, ay, az)\nwhile n - 1 != 0:\n bx, by, bz = map(int, input().split())\n v2 = Vector(bx, by, bz)\n v1 = v1 + v2\n n -= 1\nnnn = Vector(n, n, n)\nc = v1 / nnn\nprint(c.x, c.y, c.z)"
},
{
"alpha_fraction": 0.5465838313102722,
"alphanum_fraction": 0.5465838313102722,
"avg_line_length": 12.5,
"blob_id": "a182938acdf56a8ee29c187627404684298bc6e2",
"content_id": "30ca2def2035910cfc8cf7654f48e29d5531d77e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 161,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 12,
"path": "/lab5_2_2.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "class Mother:\n def me(self):\n print(\"я мама\")\n\nclass Daughter(Mother):\n def me(self):\n print('я дочь')\n\nm=Mother()\nm.me()\nd=Daughter()\nd.me()"
},
{
"alpha_fraction": 0.6532257795333862,
"alphanum_fraction": 0.6612903475761414,
"avg_line_length": 21.636363983154297,
"blob_id": "d6d363c136dd6d0cb653f802102cb838c9ab3749",
"content_id": "8b21f2e99d340f7f648e5ac2f8285df7ce7cd8ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 282,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 11,
"path": "/lab8_1_5.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "def compress_string(s):\n from itertools import groupby\n sss = groupby(s, lambda x: x[0])\n return sss\n\n\n\n# в душе не знаю просто оставлю это так пока\nfor key, group in compress_string(s):\n for thing in group:\n print(thing[1], key)"
},
{
"alpha_fraction": 0.44813278317451477,
"alphanum_fraction": 0.46473029255867004,
"avg_line_length": 25.66666603088379,
"blob_id": "0689bfb25455a44730d8a4075abe6d9040ea165c",
"content_id": "a278d190a7bf5520c5a28ca04333f1c2cd369c9d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 241,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 9,
"path": "/lab3_1.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "with open(\"D:\\programming.txt\", \"r\") as file:\n # print(file.read())\n for line in file:\n i=0\n while line[i]==\" \":\n line = line[1:]\n while line[len(line)-1]==\" \":\n line = line[:-1]\n print(line)\n\n"
},
{
"alpha_fraction": 0.6370967626571655,
"alphanum_fraction": 0.6451612710952759,
"avg_line_length": 61.5,
"blob_id": "21ac068c315e599b21a6ea6eb39d32a44dccd068",
"content_id": "1a2a222b0321ea4bbce105fd5b87ca9337958b13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 124,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 2,
"path": "/lab3_2.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "with open(\"D:\\programming.txt\", \"r\") as file, open(\"D:\\programming_1.txt\", \"w\") as new_file:\n new_file.write(file.read())"
},
{
"alpha_fraction": 0.5236486196517944,
"alphanum_fraction": 0.5337837934494019,
"avg_line_length": 15.5,
"blob_id": "e5dac4175cf9186790ed8da24cc4caf8ef50ad11",
"content_id": "a076a752e229a37c0e8f019ee2b4e340f1d159f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 296,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 18,
"path": "/lab4_3.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "def swap(function):\n def wrapper(*args, show):\n args=args[::-1]\n if show:\n print(function(*args))\n return function(*args)\n return wrapper\n\n\n@swap\ndef div(x, y, show=False):\n res = x / y\n if show:\n print(res)\n return res\n\n\ndiv(2, 4, show=True)"
},
{
"alpha_fraction": 0.6721311211585999,
"alphanum_fraction": 0.6721311211585999,
"avg_line_length": 23.600000381469727,
"blob_id": "d3f94420481b98d080611ad5a9ef340f22836e0c",
"content_id": "75833043e171db76cadd35b844ef305bb261340a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 122,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 5,
"path": "/lab8_1.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "#def print_map(function, iterable):\n# for i in iterable:\n# print(function(i))\n\nmap(print,map(function,iterable))"
},
{
"alpha_fraction": 0.5898617506027222,
"alphanum_fraction": 0.5898617506027222,
"avg_line_length": 19.714284896850586,
"blob_id": "35edfaa6b86dc9527c61ec8c11856bb1efbc7b78",
"content_id": "2d06f0e0dd1c69676ad7a54e0ba7e1c20852284f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 434,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 21,
"path": "/lab8_1_4.py",
"repo_name": "nataliavmors/natalia_v_mors",
"src_encoding": "UTF-8",
"text": "def get_combinations_with_r(s,n):\n from itertools import combinations_with_replacement\n res = combinations_with_replacement(s,n)\n res = list(res)\n res.sort()\n return res\n\ns = input()\nn = int(input())\nrrr = get_combinations_with_r(s,n)\nkkk = list()\nfor i in rrr:\n elem = str()\n for j in range(n):\n j=(i[j])\n #print(j)\n elem += j\n #print(elem)\n kkk.append(elem)\n #print(kkk)\nprint(kkk)"
}
] | 24 |
shriawesome/Python-Concepts | https://github.com/shriawesome/Python-Concepts | 0a41eaaed3b8d6ede3d25c0eb217da7e2d3b8c78 | 1bc5d3f74539a4062ca338567f212c0dd9438646 | 6dd5a18cf664135da4ce6ff8e3103f1554601a93 | refs/heads/master | 2021-05-26T02:07:58.038418 | 2021-03-14T10:23:42 | 2021-03-14T10:23:42 | 254,011,945 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6177253723144531,
"alphanum_fraction": 0.642829954624176,
"avg_line_length": 25.55555534362793,
"blob_id": "6b8c986c6d94b4d27317593bddd2984baeab2107",
"content_id": "625618f03b369f303e4f6637681fad958c197ce4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2629,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 99,
"path": "/OOP/Polymorphism.py",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "# Polymorphism : means 1 thing that can take many forms i.e. a single object can take many forms.\n# Implementation in Python :\n# 1. Duck Typing : Same method under different classes and behaves differently.\n# 2. Operator Overloading : same '+' performs different with different objects. e.g. with int performs addition\n# and with str performs concatenation.(for coding make use of Magic Methods)\n# 3. Method Overloading : Same class with same method name but different arguments.(Not in Pyhton)\n# 4. Method Overriding : Class A and Class B with same method name and same sets of arguments.\n'''\n# Uncomment for understaning Duck typing\nclass PyCharm:\n def execute(self):\n print('Starting Pycharm')\n print('Compliling')\n print('Running')\n\n# Say another IDE\nclass MyEditor:\n def execute(self):\n print('Starting MyEditor')\n print('Auto complete')\n print('Dynamic Check')\n print('Compliling')\n print('Running')\n\nclass Laptop:\n\n def code(self,ide):\n # Type of ide depends upon the type\n ide.execute()\n\nide1=PyCharm()\nide2=MyEditor()\n\nlap1=Laptop()\nlap1.code(ide1)\nlap1.code(ide2)\n'''\n\n# Understanding Operator Overloading (Uncomment for understanding)\n'''\na,b=5,10\nprint(a+b)\nprint(int.__add__(a,b))\n# Thus in the background + is calling __add__ to perform operations.\n# same with - : __sub__() ; / : __div__() etc.\n\nclass Student:\n\n def __init__(self,m1,m2):\n self.m1=m1\n self.m2=m2\n\n # Magic fn to perform additions of their marks, and we are overloading the Operator.\n def __add__(self,other):\n m1=self.m1+other.m1\n m2=self.m2+other.m2\n s3=Student(m1,m2)\n return s3\n\n # Magic Fn for '>' operations\n def __gt__(self,other):\n v1=self.m1+self.m2\n v2=other.m1+other.m2\n\n if v1>v2:\n return True\n elif v1<v2:\n return False\n\ns1=Student(30,36)\ns2=Student(28,39)\n\n# To make this work we need to define __add__()\ns3=s1+s2\nprint(s3.m1,s3.m2)\n\n# to use > define \"__gt__()\"\nif s1 > s2:\n print(\"s1 is greater\")\nelse:\n print(\"s2 is greater\")\n'''\n\n# Understanding Method Overloading, by default if we try overloading with same method name but differnt\n# arguments it only runs the one with max number of arguments and none other. Hence to implement Method\n# Overloading in Python we can use some tricks.\nclass Student:\n\n def sum(self,a=None,b=None,c=None):\n if a!=None and b!=None and c!=None:\n return a+b+c\n\n elif a!=None and b!=None:\n return a+b\n\n else:\n return a\ns1=Student()\nprint(s1.sum(2,3))\n"
},
{
"alpha_fraction": 0.6468628644943237,
"alphanum_fraction": 0.6540810465812683,
"avg_line_length": 22.402597427368164,
"blob_id": "9f278be4d8255ee02f451218ec67d6c528efbb60",
"content_id": "f3c044851e648b9b336431809e3f42551e67167d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1801,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 77,
"path": "/decorators_basic.py",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "# Decorators are mainly used to add some additional functionality to the already present method/function \n# by without explicitly changing the code inside that method/function.\n# Suppose we have a divide f'n\n\n# Suppose we always want a > b then in that case without changing the div() code we can do so\n# by implementing decorators.\n\n'''\ndef div_decorator(func):\n def inner(a,b):\n if a < b:\n a,b=b,a\n\n func(a,b)\n\n# when inner() is used instead it returns the result and when just 'inner' is used it waits for the function to be executed.\n return inner\n\n@div_decorator\ndef div(a,b):\n print(a/b)\n\n#div(2,4)\n\n# by addind '@div_decorator' above with the div func' it does the same work as the below 2 lines of code does.\ndiv=div_decorator(div)\ndiv(2,4)\n'''\n\n'''\n# other example i.e. to use decortors to calculate time take by the code to execute.\nimport time\nimport math\n\ndef calculate_time(func):\n # by using *args and **kwargs we can use the same decorator to more than 1 functions.\n def inner(*args,**kwargs):\n begin=time.time()\n func(*args,**kwargs)\n end=time.time()\n\n print('Total time taken in : ',func.__name__,end-begin)\n\n return inner\n\n@calculate_time\ndef factorial(n):\n time.sleep(2)\n print(math.factorial(n))\n\nfactorial(10)\n'''\n\n\n'''\n# CLASS AS A DECORATOR\n# for understanding let's execute teh div_decorator\n\nclass div_decorator_class(object):\n def __init__(self,func):\n self.func=func\n\n # This works as a wrapper function or the inner function\n def __call__(self,*args, **kwargs):\n # args is a tuple hence variables needs to be used\n a,b=args[0],args[1]\n if a<b:\n a,b=b,a\n return self.func(a,b)\n\n@div_decorator_class\ndef div(a,b):\n print(a/b)\n\n\ndiv(2,4)\n'''"
},
{
"alpha_fraction": 0.5677419304847717,
"alphanum_fraction": 0.5806451439857483,
"avg_line_length": 16.22222137451172,
"blob_id": "480ea48b2781008318b41b130d62c0a658f31c05",
"content_id": "55978df1dafdbcad4b03dda0b01b0d63c475ea69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 155,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 9,
"path": "/Packages/reader/compressed/gzipped.py",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "import gzip\nimport sys\n\nopener=gzip.open\n\nif __name__=='__main__':\n f=gzip.open(sys.argv[1],mode='wt')\n f.write(''.join(sys.argv[2:]))\n f.close()\n"
},
{
"alpha_fraction": 0.7059276103973389,
"alphanum_fraction": 0.707467257976532,
"avg_line_length": 48.96154022216797,
"blob_id": "349dd7e55274d9454a5cfbef1fb45872f4b044d2",
"content_id": "84cc7013a1b73637da7d0731bccd47a8966b43ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1299,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 26,
"path": "/Packages/Readme.md",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "## Packages\n\n * Packages are modules that contains other modules.\n * Packages are generally implemented as directories containing a special `__init__.py` file.\n * The `__init__.py` file is executed when the package is imported.\n * Packages can contain sub packages which themselves are implemented with the `__init__.py` files directories.\n\n * sys.path :\n * List of directories python checks for modules.\n * It can be used for adding user defined module entries in the path as well, so as to avoid import errors.\n * This can be done simply by ```sys.path.append('not_seached_directory')```\n\n * `PYTHONPATH` is the Environment variable listing the paths added to the sys.path\n * This can also be used to add the `not_seached_directory` to the sys.path without actually appending it.\n * For linux/Unix :\n * `export PYTHONPATH=not_seached_directory`\n * For Windows :\n * `set PYTHONPATH=not_seached_directory`\n\n * Process for Creating a Package :\n * |- Path_entry/ (Must be a sys.path)\n * |- my_Package/ (Directory containing your package)\n * * |- `__init__.py`\n\n * For demo I've created a Reader Package that reads the bz2 and zip files automatically.\n * [Snapshot](https://github.com/shriawesome/Python-Concepts/blob/master/Packages/imgs/Reader_op1.png)\n"
},
{
"alpha_fraction": 0.6381909251213074,
"alphanum_fraction": 0.643216073513031,
"avg_line_length": 18.899999618530273,
"blob_id": "37b76fb1eb3a4806eecf31a32f2a2506fbc0e32a",
"content_id": "55b0aa58c382b609a0581b45e930bfffdd056f36",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 398,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 20,
"path": "/Packages/reader/reader.py",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom reader.compressed import bzipped,gzipped\n\nextension_map={\n'.bz2':bzipped.opener,\n'.gz':gzipped.opener,\n}\n\nclass Reader:\n def __init__(self,filename):\n extension=os.path.splitext(filename)[1]\n opener=extension_map.get(extension,open)\n self.f=opener(filename,'rt')\n\n def close(self):\n self.f.close()\n\n def read(self):\n return self.f.read()\n"
},
{
"alpha_fraction": 0.6790813207626343,
"alphanum_fraction": 0.6989447474479675,
"avg_line_length": 33.212764739990234,
"blob_id": "37cd83ea4704dd5fbcb4a58b4f2f6a36ce335423",
"content_id": "d46de2008faa5076c88b26de8380bc572087ca13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3222,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 94,
"path": "/data_structures.py",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "# Data Structure : It is the way to organise, store and manage data for efficient 'access' and 'modification'.\n# Types of Data Structures:\n# 1. Built-in DS : lists, dictionary, tuple, sets\n# 2. User-Defined DS : Arrays(stack, queue), linked list, tree, graph\n\n# 1. Lists: \n # a. List have hetrogeneous items(items with different datatypes) in it\n # b. Lists are mutable i.e. the contents within can be changed\n\nmylist=[1,'shri',2.0,True]\n\n# insert elements, append() : adds element to the end.\nmylist.append(3)\n#print(mylist)\n\n# extend() : adds elements in the list as single elements\nmylist.extend((5,6))\n#print(mylist)\n\n#insert(idx,element)\nmylist.insert(0,'First')\n#print(mylist)\n\n# REMOVING ELEMENTS : del, pop() : returns that element, remove() : won't return any value\ndel mylist[0]\na=mylist.pop(2)\n# print(a)\n\n# sorted() : actual list data is not changed, sort() : makes changes in the list itself\nlist1=[3,4,1,2,5]\n# print(sorted(list1),list1)\nlist1.sort(reverse=True)\n# print(list1)\n\n# RETURN THE INDEX OF THE ELEMENT\n# print(list1.index(3))\n# print(list1.count(3)) # return the count of element 3\n\n# 2. Tuples:\n # a. Same as lists but are not mutable i.e. once created can't be changed.\n # b. Faster than lists. How ?\n # i. Can be reused instead of copied like in list i.e. 'b=tuple(a)' is same as b, but in list 'b=list(a)' copies all the elements of a to b\n # since b can be changed.\n # ii. Since tuples are of fixed size, it can be stored more compactly than lists which need to over-allocate to make append() operations efficient. \n # iii. Tuples directly referende their elements unlike list which have an extra layer of indirection to an external array of pointers.\nmytuple=(1,2,3)\n# print(mytuple)\n# data can't be changed once created thus assignment operator is not allowed, when concatenating a new tuple is created.\n\n# tuple comprehension\nb=tuple(i for i in (1,2,3))\n#print(b)\n\n# 3. Dictionary:\n # a. Holds key, value pairs\n # b. are mutables\nmydict={i:i+1 for i in range(1,4)}\n\n# modifying value, mydict[key]=value, adding value is the same.\nmydict[1]='shri'\n#print(mydict)\n\n# removing element : del mydict[1], pop(), popitem(): returns (key, value)\n#print(mydict.popitem())\n\n# Few functions\n#print(mydict.keys()) # return all the key values\n#print(mydict.values())\n#print(mydict.items()) # returns key, value pairs.\n\n# 4. Sets:\n # a. Un-ordered collection of unique elements.\n # b. Sets are mutable\n # c. Contains unique value of elements\nset1={1,2,3,4,4,4,5}\nprint(set1)\n\n# Adding elements in sets set1.add(element)\n# Different operations : Union, intersection, difference, symmetric_difference() : both 'sets' differences is given\nset2={3,4,5,6,7}\nprint(set1.union(set2)) # same for intersection\n\n\n# II. User-defined data types\n# 1. Array (contains data of specific data types)\n# 2. Stacks:\n # a. Follows LIFO principle \n# 3. Queues :\n # same as stacks but follows FIFO principle and has a Head and the Tail\n# 4. Trees :\n # useful for defining hierarchy\n # consist of the Node that contains the data and the left and the right child\n# 5. Linked List :\n # single node consists of the data and pointer to the next value.\n \n\n"
},
{
"alpha_fraction": 0.719631552696228,
"alphanum_fraction": 0.7225100994110107,
"avg_line_length": 56.900001525878906,
"blob_id": "bf1f4c00db0fdee88496a1539149f3498c34a603",
"content_id": "64919fd55492148a343854cde6730efbd47c1acd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1737,
"license_type": "no_license",
"max_line_length": 196,
"num_lines": 30,
"path": "/README.md",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "# Python Concepts\n**Version 1.0.0**\n- - -\n1. Abstract Classes :\n * An abstract class can be considered as a blueprint for other classes.\n * It allows you to create a set of methods that must be created within any child classes built from the abstract class.\n * It can be used to define a common API for a set of subclasses. This capability is especially useful in a situation where a third-party is going to provide implementations, such as plugins etc.\n * Python does not provide inbuilt support for abstract class.\n * It comes with a module named abc(Abstract Base Class) for the purpose.\n * Important modules :-\n * `from abc import ABCMeta,abstractmethod`\n_ _ _\n\n2. Beyond Basic Functions :\n * Function `__call__()` :\n * This is used so as to make an instance of the class created behave as an function directly.\n * For e.g. refer to AboutFunctions/resolver.py <br>\n\n * lambdas :\n * Is an expression that evaluates to the Function.\n * Unlike function defined using def() keyword it need not be defined and can be anonymous i.e. it has no name.\n * Argument list terminated by colon, separated by commas i.e. e.g. `lambda a,b,c : a+b+c`\n * It can also support zero or more arguments. For zero argument `lambda : `\n * Body contains only a single statement. More than one statement is not permitted.\n * The single statement itself is a return statement and can't contain a keyword `return`.\n * e.g. `sorted(,key=)` in python contains a key argument accepts lambda function as a value.\n * Hence, given a list of names to sort for one can sort the names based on last name using this.\n * [lambda implementation](https://github.com/shriawesome/Python-Concepts/blob/master/imgs/lambda.png)\n\n- - -\n"
},
{
"alpha_fraction": 0.5890330076217651,
"alphanum_fraction": 0.598761796951294,
"avg_line_length": 27.504201889038086,
"blob_id": "4fe9890fc03f03cea460951a638d7951416495f8",
"content_id": "cb187c74ea2008bf568886f37023186e7b44b4ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3392,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 119,
"path": "/Data_Structures/BST.py",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "# Binary Tree Problem ?\n# -> In 'list' duplicate values will persist but when 'set' is used\n# then only a single value remains and the duplicate values are removed.\n# Thus internally to implement 'set' BST can be used.\n\n# What is BT ?\n# -> It's a normal tree with a constraint that each node can only have 2 child nodes.\n# BST is a special BT with certain order in the way elements are present like say left child node < right child node and\n# the elements are not duplicated. \n\n# Search Complexity :\n# -> Every iteration we reduce search space by 1/2\n# -> n=8(# elements) 8 -> 4 -> 2 -> 1 thus 3 iterations\n# -> log2 8 = 3, Search Complexity = O(log n)\n\n# Insertion Complexity :\n# -> Same explanation as above and Insertion Complexity = O(log n)\n\n# Different Traversals -\n# 1. Depth First Search(in order[LVR], pre order[VLR], post order traversal[LRV])\n# 2. Breadth First Search \n\nclass node:\n def __init__(self,data=None):\n self.data=data\n self.right=None\n self.left=None \n self.parent=None \n\nclass BinarySearchTree:\n def __init__(self):\n self.root=None\n\n def insert(self,data):\n if self.root==None:\n self.root=node(data) \n else: \n self._insert(data,self.root) \n\n def _insert(self,data,cur_node):\n if data<cur_node.data:\n if cur_node.left==None:\n cur_node.left=node(data)\n else:\n self._insert(data,cur_node.left)\n\n elif data>cur_node.data:\n if cur_node.right == None:\n cur_node.right=node(data)\n else:\n self._insert(data,cur_node.right)\n else:\n print(\"Value already in tree!!!\")\n\n \n def print_tree(self):\n if self.root!=None:\n self._print_tree(self.root)\n \n def _print_tree(self,cur_node):\n # In order Traversal\n if cur_node!=None:\n self._print_tree(cur_node.left)\n print(cur_node.data)\n self._print_tree(cur_node.right)\n\n\n def height(self):\n if self.root != None:\n return self._height(self.root,0)\n else:\n return 0\n \n def _height(self,cur_node,cur_height):\n if cur_node==None:\n return cur_height\n left_height=self._height(cur_node.left,cur_height+1)\n right_height=self._height(cur_node.right,cur_height+1)\n return max(left_height,right_height)\n\n def search(self,value):\n if self.root==None:\n return False\n else:\n return self._search(self.root,value)\n \n def _search(self,cur_node,value):\n if cur_node.data == value:\n return True\n elif cur_node.data < value and cur_node.right!=None:\n return self._search(cur_node.right,value)\n elif cur_node.data > value and cur_node.left!=None:\n return self._search(cur_node.left,value)\n return False\n\n\n\ndef fill_tree(tree,num_eles=25,max_int=1000):\n from random import randint\n for _ in range(num_eles):\n cur_elem = randint(0,max_int)\n tree.insert(cur_elem)\n return tree\n \ntree=BinarySearchTree()\n#tree=fill_tree(tree)\n\ntree.insert(5)\ntree.insert(1)\ntree.insert(3)\ntree.insert(6)\ntree.insert(8)\ntree.insert(9)\ntree.insert(2)\n\ntree.print_tree()\nh=tree.height()\nprint(\"Height of the tree : {}\".format(h))\nprint(tree.search(8))\n"
},
{
"alpha_fraction": 0.7167878150939941,
"alphanum_fraction": 0.7257105112075806,
"avg_line_length": 44.164180755615234,
"blob_id": "1a36b2d3c562f059dc8c89e09fcf0223d290d141",
"content_id": "97e3851dfd8f36f32a1ec582d0ffe93c4ff96b3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3026,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 67,
"path": "/OOP/Class_Objects.py",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "# Why Python ?\n# Python supports all programming paradigms including :-\n# 1. Functional Programming (original data remains unaltered during the process.\n# 2. Procedural Programming : where large code is broken down into small functions.\n# 3. Object Oriented Programming : where every thing in a program can be considered\n# as an object and can represent any real world entity.\n\n# Object ? Any real world entity can be termed as an object. For e.g. if there's a\n# company then every employee can be considered as an object.\n# Every object has certain data that can be called as an attribute(Variables) and\n# based on this data it can perform certain actions known to be the behaviour(Methods)\n# Methods :- Funcitons in OOPs is called as Methods.\n\n# Classes ? It is like a Factory that is responsible for DESIGNING different objects.\n# For e.g. A fan needs to be designed at a particular place, and there is not just one\n# fan, but many different fans of the same brand in different households. Fan can be\n# considered as an object(i.e. instance of the class) and the factory used for designing\n# can be considered as a Class. Thus, before creating an Object we need to create a Class.\n\n\n# Class is defined by 'class' keyword along with the name of the class.\nclass Computers:\n\n # Heap Memory :- Place where all the objects are stored in Memory. Whenever an object\n # is created it is stored in Heap Memory(print(id(comp1))). Now, the size allocated\n # depending upon the number of variables defined in the constructor.\n\n # For defining Variables of the class we make use of the Special Method also known as\n # the constructor of the class. It is a special method because unlike config(), it'll be\n # automatically each time an object is created.\n def __init__(self,cpu,ram):\n self.cpu=cpu\n self.ram=ram\n\n # Since every computer has a configuration.[Method]\n # 'self' is the pointer to the present object that we are passing, from line 32 self->comp1\n # self.cpu is required as CPU is not a local variable but associated with the Object.\n def config(self):\n return (\"{}, {} , 1TB\".format(self.cpu,self.ram))\n\n # Comparing 2 objects, one reference via 'self' other via the 'other'[can have any value] name.\n def compare(self,other):\n if self.cpu == other.cpu:\n return True\n else:\n return False\n\n# creating an instance/Object of the class\ncomp1=Computers(\"i5\",16)\ncomp2=Computers(\"amd A4\",8)\n\n# To use the fn of the class\nprint(Computers.config(comp1)) # if no value is given in config it shoots an error as it doesn't\nprint(Computers.config(comp2)) # know which object we are dealing with.\n\n# In place of the above code, directly this can also be used, behind the scene config takes comp1 as argument.\nprint(comp1.config())\n\n# Changing and Printing the variable values\ncomp1.cpu='i7'\nprint(comp1.cpu,comp1.ram)\n\n# Comparing 2 objects\nif comp1.compare(comp2):\n print(\"Same Machines\")\nelse:\n print(\"Different Machines\")\n"
},
{
"alpha_fraction": 0.6975036859512329,
"alphanum_fraction": 0.7055800557136536,
"avg_line_length": 30.674419403076172,
"blob_id": "3c6df6ea058834ac39d572a7b3ad7d2fe4a929e1",
"content_id": "abed360afb30e64d9045e2242c7f9bebcb5e46c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1362,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 43,
"path": "/Generators_fib.py",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "# Generators : Like iterators are used to generate a sequence. This is particularly\n# implemented by defining a function and replacing a 'return' keyword with a keyword \"yield\"\n# which unlike 'return' will not just return a value but a Generator obj.\n\n# Implementing Generators via fibonacci series\n\ndef fib(limit):\n a,b=0,1\n\n while a<limit:\n # yield keyword that returns a Generator obj, unlike 'return' it'll not terminate the fn\n yield a\n a,b=b,a+b\n\nx=fib(5)\nprint(next(x))\nprint(x.__next__())\nprint(next(x))\nprint(next(x))\nprint(next(x))\n\nprint('Using for loop')\nfor i in fib(5):\n print(i)\n\n\n# BENEFIT of using a GENERATORS : \n# 1. makes the code more readable.\n# 2. Generators come handy when we don't want the entire values in a single go in the memory,\n# and use one value at a time which can save the memory consumption.\n\n# When all the 'next()' options are exhausted then it returns an \"StopIteration\" error.\n# 'for' loop can be used in place of using 'next()' all the time.\n\n'''\n# Squaring the numbers using list comprehension to yield result in generator\n\n# By using '(',')' one can yeild result in generators, in below sqr_nums is a generator\nsqr_nums=(x*x for x in [1,2,3,4,5])\n\n# To print all the numbers together, by doing this we loose the performance benefits of using a generator\nprint(list(sqr_nums))\n'''\n"
},
{
"alpha_fraction": 0.6786941289901733,
"alphanum_fraction": 0.6786941289901733,
"avg_line_length": 31.33333396911621,
"blob_id": "fe8e1f173e5b986c8b7cbe2a12e7ae50dfec7ec0",
"content_id": "09532bb5f2dd97621493e211394e602d03faf6ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 582,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 18,
"path": "/AboutFunctions/ClassCallable.py",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "# By default at the time when we are creating an instance of the class say\n# resolver=Resolver() -> () :- calls the contructor of the class that can be used\n# for initialising the values. Functionality of this can be extended as below\n\ndef sequence_class(immutable):\n if immutable:\n class_=tuple\n\n else:\n class_=list\n return class_\n\nif __name__=='__main__':\n seq=sequence_class(immutable=True)\n # seq is of type class in this particular case tuple class\n t=seq('shri')\n # Now t contains a tuple with values as each alphabet of shri\n print(t)\n"
},
{
"alpha_fraction": 0.6239849925041199,
"alphanum_fraction": 0.6433479189872742,
"avg_line_length": 24.015625,
"blob_id": "6ff88b336ad70447e5f777491e1811453d2a8ac7",
"content_id": "54d755d90bdfcd5805eed36dbbe86717eb2893fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1601,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 64,
"path": "/OOP/inheritance.py",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "# Inheritance : similar to real life all the property of the parents is inherited by\n# the Child class.\n# Parent Class also called as the Super Class and Child Class as the Sub Class.\n# Types of inheritance :\n# 1. Single level Inheritance (A<-B)\n# 2. Multilevel Inheritance (A<-B<-C)\n# 3. Multiple Inheritance (A<-C ; B<-C)\n\nclass A:\n def __init__(self):\n print('in A init')\n\n def feature1(self):\n print(\"Feature 1 working\")\n\n def feature2(self):\n print(\"Feature 2 working\")\n\n# Simplying by class B(a) , we are inheriting features of A.\n# B is the child class and A is the parent class.\nclass B:\n def __init__(self):\n # Used to call the init of class 'A'\n #super().__init__()\n print('in B init')\n\n def feature3(self):\n print(\"Feature 3 working\")\n\n def feature4(self):\n print(\"Feature 4 working\")\n\n\n\n# Multiple inheritance\nclass C(A,B):\n def __init__(self):\n # has 2 super class, hence makes use of MRO(Method Resolution Order L->R)\n super().__init__()\n print(\"in C init\")\n\n def feature5(self):\n print(\"Feature 5 working\")\n\n#a1=A()\n#a1.feature1()\n#a1.feature2()\n\n# If B inherits A and B has no contructor then it calls the constructor of A and if\n# B has its own contructor then it uses __init__ of itself.\nb1=B()\nb1.feature3()\nb1.feature4()\n\n# Now in order for B to use features of A, that's when Inheritance comes into picture.\n# Thus after Inheritance. Change the code before uncommenting\n#b1.feature1()\n#b1.feature2()\n\n# Uncomment if using C\nc=C()\n#c.feature1()\n#c.feature3()\n#c.feature5()\n"
},
{
"alpha_fraction": 0.636734664440155,
"alphanum_fraction": 0.636734664440155,
"avg_line_length": 31.633333206176758,
"blob_id": "6f09fc96cae0de2b27cb5fc1f290d9d5850fc20d",
"content_id": "b4d4341bec9ecb6bea09f254062ff7a33117e718",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 980,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 30,
"path": "/AboutFunctions/resolver.py",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "\n# Understanding the importance of __call__ in class definition.\nimport socket\n\nclass Resolver:\n # Constructor for the class\n def __init__(self):\n # '_cache' Private Variable to class\n self._cache={}\n\n # when a normal instance call is made it can help to act it as a function.\n def __call__(self,host):\n if host not in self._cache:\n self._cache[host]=socket.gethostbyname(host)\n return self._cache\n\n def clear(self):\n self._cache.clear()\n\n def has_host(self,host):\n return host in self._cache\n\nif __name__=='__main__':\n resolver=Resolver() # Instance is created\n print(\"Checking if google.com is present initially\")\n print(resolver.has_host('google.com'))\n print(\"Adding google.com\")\n print(resolver('google.com')) # __call__ makes it behave as a function\n print(resolver.has_host('google.com'))\n resolver.clear() # clears the dictionary\n print(resolver.has_host('google.com'))\n"
},
{
"alpha_fraction": 0.6917340159416199,
"alphanum_fraction": 0.7016780376434326,
"avg_line_length": 26.741378784179688,
"blob_id": "29cd82b4c08f87d0653ed19c917e20714cd19955",
"content_id": "9063b9a150aaf811e45d13d32b488e3635e353f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1609,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 58,
"path": "/AbstractClass/Bookabc.py",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "# Abstract Method : Method that only has a declaration but no functionality.\n# Abstract Class : The one that contains an abstract method. Objects of such classes can't be\n# created.\n# Python by defalt doesn't support Abstraction and hence to do that we can make use of\n# abc module.\n\n# Hackerrank Day 13 challenge(30 Days of Code)\n#Given a Book class and a Solution class,\n# write a MyBook class that does the following:\n# 1. Inherits from Book\n# 2. Has a parameterized constructor taking these parameters:\n# string Title\n# string author\n# int Price\n# Implements the Book class' abstract display() method\n# so it prints these lines:\n# 1. Title : <title>\n# 2. Author : <author>\n# 3. Price : <price>\n\n\nfrom abc import ABCMeta, abstractmethod\n\nclass Book(metaclass=ABCMeta):\n\tdef __init__(self,title,author):\n\t\tself.title=title\n\t\tself.author=author\n\n\t# Abstractmethod keyword tells that this is an abstract method\n\t@abstractmethod\n\tdef display(): pass\n\nclass MyBook(Book):\n\tdef __init__(self,title,author,price):\n\t\tsuper().__init__(title,author) # inherits property from the Parent class\n\t\tself.price=price\n\n # Implementing the Abstract method from the base class\n\tdef display(self):\n\t\tprint(\"Title: {}\\nAuthor: {}\\nPrice: {}\".format(self.title,self.author,self.price))\n\n\nif __name__=='__main__':\n\ttitle=input()\n\tauthor=input()\n\tprice=int(input())\n\tnew_novel=MyBook(title,author,price)\n\tnew_novel.display()\n\n\n#---------------OUTPUT------------------------\n# Shri-2:AbstractClass shri$ python Bookabc.py\n# The Alchemist\n# Paulo Coelho\n# 248\n# Title: The Alchemist\n# Author: Paulo Coelho\n# Price: 248\n"
},
{
"alpha_fraction": 0.4631391167640686,
"alphanum_fraction": 0.4726516008377075,
"avg_line_length": 21.7297306060791,
"blob_id": "1b124d85a4d79358d4fc7785f36bad8af2542ed1",
"content_id": "92dce0a14d8912d9c338f6229c9d0256993602e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1682,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 74,
"path": "/Data_Structures/Linked_List.py",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "class node:\n def __init__(self,data=None):\n self.data=data \n self.next=None \n\nclass linked_list:\n def __init__(self):\n self.head=node()\n\n def append(self,data):\n new_node=node(data)\n cur=self.head\n\n while cur.next!=None:\n cur=cur.next\n \n cur.next=new_node\n\n def length(self):\n l=0\n cur=self.head\n while cur.next != None:\n cur=cur.next\n l+=1\n return l\n \n def display(self):\n cur=self.head \n while cur.next!=None:\n cur=cur.next\n if cur.next!=None:\n print(cur.data,end=\",\")\n else:\n print(cur.data)\n \n def get(self,index):\n cur=self.head\n ele=0\n l=self.length()\n if (index+1)<=l:\n while index!=-1:\n cur=cur.next \n ele=cur.data\n index-=1\n else:\n #print(\"Index out of Bound\")\n return \"Index out of Bound\"\n return ele\n\n def remove(self,index):\n cur=self.head \n l=self.length()\n if (index+1)<=l:\n while index!=-1:\n last_node=cur\n cur=cur.next\n index-=1\n print(cur.data)\n last_node.next=cur.next\n \nif __name__ ==\"__main__\":\n mylist=linked_list()\n mylist.append(1)\n mylist.append(2)\n mylist.append(3)\n mylist.append(4) \n print(mylist.length())\n mylist.display()\n print(mylist.get(3))\n mylist.remove(0) \n mylist.display()\n\n# Applications :\n# 1. Dynamic memory allocations i.e. to use ll of free blocks.\n"
},
{
"alpha_fraction": 0.6355352997779846,
"alphanum_fraction": 0.6628701686859131,
"avg_line_length": 27.322580337524414,
"blob_id": "1ca1cf3e89d955ba18663907f47e585727543c44",
"content_id": "2eea5a0572b738de1b643338422c6e3016be0c43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1756,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 62,
"path": "/OOP/Types_Variables_Methods.py",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "# Types of Variables :\n# 1. Instance Variables : Different for different objects\n# 2. Class Variables : Variables whose value is same accross all the objects.\n\n# Types of Methods :\n# 1. Instance Method : any method that has 'self' as an argument.\n# Note : Accessor Methods(When we just want to access the values), Mutator Methods(Modifying the values)\n# 2. Class Method : any method that deals with class variables.\n# 3. Static Method : Methods used to add additional functionality and doesn't work with class/ instance\n# variables.\n\n# namespace : area where you create and store objects/Variables.\nclass Cars:\n # Variables defined outside init are termed as Class variables and are created in\n # Class namespace\n wheels=4\n\n # Varibale defined within init are said to be instance Variables and are created in\n # Instance namespace\n def __init__(self):\n self.mil=10\n self.com=\"BMW\"\n\nclass Student:\n\n school=\"APS\"\n\n def __init__(self,m1,m2,m3):\n self.m1=m1\n self.m2=m2\n self.m3=m3\n\n # Instance Method as it is having an argument 'self'\n def avg(self):\n return (self.m1+self.m2+self.m3)/3\n\n # Class Method : 'cls' keyword, decorator needed for 'cls'\n @classmethod\n def info(cls):\n return (cls.school)\n\n # Decorator for static methods.\n @staticmethod\n def stats_class():\n return ('This is a Student Class with 3 methods')\n\nif __name__=='__main__':\n c1=Cars()\n c2=Cars()\n\n # Changing values of instance variables.\n c1.mil=8\n\n print(c1.com,c1.mil,c1.wheels)\n print(c2.com,c2.mil,c2.wheels)\n\n s1=Student(25,36,30)\n s2=Student(40,39,42)\n print(s1.avg())\n print(s2.avg())\n print(Student.info())\n print(Student.stats_class())\n"
},
{
"alpha_fraction": 0.693417489528656,
"alphanum_fraction": 0.6988277435302734,
"avg_line_length": 32.60606002807617,
"blob_id": "833f71c663798902ce9c9161d8d11089605e2dcf",
"content_id": "659b238a1c053a472a7ce7d3a9adbd6c3e7be602",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1109,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 33,
"path": "/OOP/Encapsulation.py",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "# Encapsulation :\n# One of the feature of OOPs is encapsulation that prevents the access to certain\n# variables and methods and protect the same.\n# It is about putting restrictions on accessing variables and methods directly and can\n# prevent the accidental modification of data.\n# Different Levels of encapsulation :\n# 1. Protected members : Members of the class that cannot be accessed outside the class.\n# Can be achieved by using a single \"_\" before a function or variable name.\n# 2. Private members : Class members declared private should not be accessed by either\n# outside the class or by derived class.\nclass Base:\n\n def __init__(self):\n # Protected Variable\n self._a=2\n\n # Declaring a Private Variable.\n self.__b=\"Private Variable\"\n\nclass Derived(Base):\n\n def __init__(self):\n super().__init__()\n print(\"Calling the protected member a....\")\n print(self._a)\n print(\"Calling the private member\")\n print(self.__b)\n\nobj1=Derived()\n\nobj2=Base()\n# Calling protected member outside the class will result in Attribute error.\nprint(obj2.a)\n"
},
{
"alpha_fraction": 0.5359477400779724,
"alphanum_fraction": 0.5686274766921997,
"avg_line_length": 16,
"blob_id": "51f2ac36cf087596ac5cc0d7ec40502609aa7b38",
"content_id": "68d3bad100da65b9b5d15ed57d4dbe4438e48b75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 153,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 9,
"path": "/Packages/reader/compressed/bzipped.py",
"repo_name": "shriawesome/Python-Concepts",
"src_encoding": "UTF-8",
"text": "import bz2\nimport sys\n\nopener=bz2.open\n\nif __name__=='__main__':\n f=bz2.open(sys.argv[1],mode='wt')\n f.write(' '.join(sys.argv[2:]))\n f.close()\n"
}
] | 18 |
jjjjohnson/RoboND-Perception-Exercises | https://github.com/jjjjohnson/RoboND-Perception-Exercises | 592f4f74acfcee99de79cefaa9c7a18c6bd747fc | 2b1d40ff016115150f56131b6e59be33a4dfda3e | e7aa08e8f034c4f2fcfb626c9fb002159232aea6 | refs/heads/master | 2020-12-02T19:39:24.819870 | 2017-08-11T10:27:58 | 2017-08-11T10:27:58 | 96,371,111 | 0 | 0 | null | 2017-07-06T00:10:52 | 2017-07-06T00:10:55 | 2017-08-09T02:07:36 | Python | [
{
"alpha_fraction": 0.7600519061088562,
"alphanum_fraction": 0.7678340077400208,
"avg_line_length": 28.69230842590332,
"blob_id": "1d83db6d11700c3645757c51f1ffa3c006ba5c4d",
"content_id": "e348f1c234ee31c1ecdfd8b4c49ef53604abef83",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 771,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 26,
"path": "/Exercise-1/outlier_remove.py",
"repo_name": "jjjjohnson/RoboND-Perception-Exercises",
"src_encoding": "UTF-8",
"text": "# Import PCL module\nimport pcl\n\n# Load Point Cloud file\ncloud = pcl.load('table_scene_lms400.pcd')\n\n# Much like the previous filters, we start by creating a filter object: \noutlier_filter = cloud.make_statistical_outlier_filter()\n\n# Set the number of neighboring points to analyze for any given point\noutlier_filter.set_mean_k(50)\n\n# Any point with a mean distance larger than global (mean distance+x*std_dev) will be considered outlier\nx = 1\noutlier_filter.set_std_dev_mul_thresh(x)\n\n# Finally call the filter function for magic\ncloud_filtered = outlier_filter.filter()\n\n# pcl.save(cloud, filename)\nfilename = 'outlier_removed.pcd'\npcl.save(cloud_filtered, filename)\n\nfilename = 'outlier.pcd'\noutlier_filter.set_negative(True)\npcl.save(outlier_filter.filter(), filename)"
},
{
"alpha_fraction": 0.7132372260093689,
"alphanum_fraction": 0.7218872904777527,
"avg_line_length": 33.0625,
"blob_id": "77ff0b5e61c5dbcd1cc8d517de8330873fd1f1ad",
"content_id": "92c971c08a8fa6d371e8aab4c1220399df43bc50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3815,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 112,
"path": "/Exercise-2/sensor_stick/scripts/segmentation.py",
"repo_name": "jjjjohnson/RoboND-Perception-Exercises",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n# Import modules\nfrom pcl_helper import *\n\n# TODO: Define functions as required\n\n# Callback function for your Point Cloud Subscriber\ndef pcl_callback(pcl_msg):\n\n\t# TODO: Convert ROS msg to PCL data\n\tcloud = ros_to_pcl(pcl_msg)\n\n\t# TODO: Voxel Grid Downsampling\n\tvox = cloud.make_voxel_grid_filter()\n\tLEAF_SIZE = 0.01 \n\t# Set the voxel (or leaf) size \n\tvox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)\n\t# Call the filter function to obtain the resultant downsampled point cloud\n\tcloud_filtered = vox.filter()\n\t# TODO: PassThrough Filter\n\tpassthrough = cloud_filtered.make_passthrough_filter()\n\n\t# Assign axis and range to the passthrough filter object.\n\tfilter_axis = 'z'\n\tpassthrough.set_filter_field_name (filter_axis)\n\taxis_min = 0.77\n\t#axis_min = 0.6 # leave vertical side of the table\n\taxis_max = 1.1\n\tpassthrough.set_filter_limits (axis_min, axis_max)\n\n\t# Finally use the filter function to obtain the resultant point cloud. \n\tcloud_filtered = passthrough.filter()\n\n\t# TODO: RANSAC Plane Segmentation\n\tseg = cloud_filtered.make_segmenter()\n\n\t# Set the model you wish to fit \n\tseg.set_model_type(pcl.SACMODEL_PLANE)\n\tseg.set_method_type(pcl.SAC_RANSAC)\n\n\t# Max distance for a point to be considered fitting the model\n\tmax_distance = 0.01\n\tseg.set_distance_threshold(max_distance)\n\n\t# Call the segment function to obtain set of inlier indices and model coefficients\n\tinliers, coefficients = seg.segment()\n\n\n\t# TODO: Extract inliers and outliers\n\t# Extract inliers\n\textracted_inliers = cloud_filtered.extract(inliers, negative=False)\n\t# Extract outliers\n\textracted_outliers = cloud_filtered.extract(inliers, negative=True)\n\n\t# TODO: Euclidean Clustering\n\twhite_cloud = XYZRGB_to_XYZ(extracted_outliers)# Apply function to convert XYZRGB to XYZ\n\ttree = white_cloud.make_kdtree()\n\t# Create a cluster extraction object\n\tec = white_cloud.make_EuclideanClusterExtraction()\n\t# Set tolerances for distance threshold \n\t# as well as minimum and maximum cluster size (in points)\n\tec.set_ClusterTolerance(0.015)\n\tec.set_MinClusterSize(20)\n\tec.set_MaxClusterSize(1500)\n\t# Search the k-d tree for clusters\n\tec.set_SearchMethod(tree)\n\t# Extract indices for each of the discovered clusters\n\tcluster_indices = ec.Extract()\n\n\t# TODO: Create Cluster-Mask Point Cloud to visualize each cluster separately\n\t#Assign a color corresponding to each segmented object in scene\n\tcluster_color = get_color_list(len(cluster_indices))\n\n\tcolor_cluster_point_list = []\n\n\tfor j, indices in enumerate(cluster_indices):\n\t for i, indice in enumerate(indices):\n\t color_cluster_point_list.append([white_cloud[indice][0],\n\t white_cloud[indice][1],\n\t white_cloud[indice][2],\n\t rgb_to_float(cluster_color[j])])\n\n\t#Create new cloud containing all clusters, each with unique color\n\tcluster_cloud = pcl.PointCloud_PointXYZRGB()\n\tcluster_cloud.from_list(color_cluster_point_list)\n\t# TODO: Convert PCL data to ROS messages\n\tros_cloud_objects = pcl_to_ros(cluster_cloud)\n\tros_cloud_table = pcl_to_ros(extracted_inliers)\n\n\t# TODO: Publish ROS messages\n\tpcl_objects_pub.publish(ros_cloud_objects)\n\tpcl_table_pub.publish(ros_cloud_table)\n\nif __name__ == '__main__':\n\n\t# TODO: ROS node initialization\n\trospy.init_node('clustering', anonymous=True)\n\n\t# TODO: Create Subscribers\n\tpcl_sub = rospy.Subscriber(\"/sensor_stick/point_cloud\", pc2.PointCloud2, pcl_callback, queue_size=1)\n\n\t# TODO: Create Publishers\n\tpcl_objects_pub = rospy.Publisher(\"/pcl_objects\", PointCloud2, queue_size=1)\n\tpcl_table_pub = rospy.Publisher(\"/pcl_table\", PointCloud2, queue_size=1)\n\n\t# Initialize color_list\n\tget_color_list.color_list = []\n\n\t# TODO: Spin while node is not shutdown\n\twhile not rospy.is_shutdown():\n\t\trospy.spin()\n"
}
] | 2 |
steve-bush/golf-tournament | https://github.com/steve-bush/golf-tournament | 30167d333b020f48d9c39809c445a406185f2ff9 | 476acafccd3f3006efc9a733968b7de6b7e3afd6 | 38c8e6a294be581003d04d78981cf44af88b284e | refs/heads/main | 2023-02-26T08:56:12.645758 | 2021-02-04T20:54:18 | 2021-02-04T20:54:18 | 313,108,326 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7924107313156128,
"alphanum_fraction": 0.7924107313156128,
"avg_line_length": 39.727272033691406,
"blob_id": "9908abaa3e3cd45d3f049582bcf0b918fdd530cd",
"content_id": "49a983163ec8a854ce859973e7a4c1ecd7d7a11e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 448,
"license_type": "permissive",
"max_line_length": 179,
"num_lines": 11,
"path": "/README.md",
"repo_name": "steve-bush/golf-tournament",
"src_encoding": "UTF-8",
"text": "# golf-tournament\n\nCreate a csv file in format:\nplayer,golfer,golfer,...\nCan calculate score for any number of golfers.\n\nThen run script to output standings and scores to terminal and as a csv.\n\nCan change number of rounds played in script for calculating scores before the tournament is over. Requires pandas for reading html leaderboard and outputting to terminal and csv.\n\nUsing ESPN for getting [scores](https://www.espn.com/golf/leaderboard).\n"
},
{
"alpha_fraction": 0.6000795960426331,
"alphanum_fraction": 0.61440509557724,
"avg_line_length": 35.95588302612305,
"blob_id": "3b50aa5d55162240dd344c010c649e32c7778dcc",
"content_id": "4de0ac8cece7d3ae0fc3bec906dfbcae3da8a000",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2513,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 68,
"path": "/golf.py",
"repo_name": "steve-bush/golf-tournament",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nfrom bs4 import BeautifulSoup\nimport requests\n\n# Variables to be set by the user\nurl = 'https://www.espn.com/golf/leaderboard' # Website used for scores and par\nfilename = 'players.csv' # csv file in name,player,player... format\nrounds = 4 # Number of rounds to use in when calculating the score\n\n# Parse espn for par\nhtml_content = requests.get(url).text\nsoup = BeautifulSoup(html_content, 'lxml')\npar = int(soup.find(class_='Leaderboard__Course__Location__Detail n8 clr-gray-04').get_text()[3:5])\n\n# Scrape espn leaderboard\ntable = pd.read_html(url)\nresults = table[0]\n\n# Create a dictionary of lists of columns\nresults_dict = results.to_dict(orient='list')\n\n# Clean the (a) from player names if needed\nfor i in range(len(results_dict['R1'])):\n name = results_dict['PLAYER'][i]\n if ' (a)' in name:\n results_dict['PLAYER'][i] = name[:4]\n\n# Read in player info\nplayer_golfers = {}\nwith open(filename) as f:\n for line in f:\n line = line.strip('\\n')\n line_list = line.split(',')\n player_golfers[line_list[0]] = line_list[1:]\n\n# Calculate each players score\nplayers = []\nplayer_scores = []\nfor player, golfers in player_golfers.items():\n player_score = 0\n for golfer in golfers:\n # Find index of the golfer in each list\n i = results_dict['PLAYER'].index(golfer)\n # Put the round scores into a usable format\n r1 = int(results_dict['R1'][i]) if results_dict['R1'][i] != '--' else 0\n r2 = int(results_dict['R2'][i]) if results_dict['R2'][i] != '--' else 0\n r3 = int(results_dict['R3'][i]) if results_dict['R3'][i] != '--' else 0\n r4 = int(results_dict['R4'][i]) if results_dict['R4'][i] != '--' else 0\n scores = [r1, r2, r3, r4]\n for j in range(rounds):\n # If player has made the cut\n if scores[j] != 0:\n # Add the par score\n player_score += scores[j] - par\n # If player missed the cut\n else:\n # Add the worst par score\n player_score += max(scores) - par\n players.append(player)\n player_scores.append(player_score)\n\n# Sort the players by score in reverse\nsorted_players = sorted(zip(players, player_scores), reverse=True)\n\n# Put the sorted list into pandas for output and showing\nplayer_df = pd.DataFrame(sorted_players, columns=['Name', 'Score'], index=range(1,len(players)+1))\nplayer_df.to_csv('scores.csv')\nprint(player_df)\n"
}
] | 2 |
reneichhorn/gameoflife | https://github.com/reneichhorn/gameoflife | e2dc230341acdb9d6840c183340056f44415e006 | cc302a23a5dae34e6c67e9e371a31bd53db11b58 | 90ac4a9939d6736d89445001a62ec1cdb8ccffb0 | refs/heads/master | 2023-06-05T00:05:19.374698 | 2021-06-18T15:12:28 | 2021-06-18T15:12:28 | 377,220,412 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.48638635873794556,
"alphanum_fraction": 0.5328764319419861,
"avg_line_length": 25.20388412475586,
"blob_id": "9b3e3720c83da80963a656651c7baddfdd250357",
"content_id": "70811631bd79f20b95d82ac784ada4870893180a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5399,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 206,
"path": "/main.py",
"repo_name": "reneichhorn/gameoflife",
"src_encoding": "UTF-8",
"text": "\nimport pyglet\nfrom pyglet import shapes\nimport random\nfrom cell import Cell\n\n\nwidth = 2500\nheight = 1300\nwindow = pyglet.window.Window(width, height)\nscale = 10\ncols = width // scale\nrows = height // scale\nbatch = pyglet.graphics.Batch()\nwidth = width - scale\nheight = height - scale\nDEAD = [0, 0, 0]\nALIVE = [255, 255, 255]\nPAUSED = False\nLIVELIHOOD = 0.45\ncells = []\nalreadyvisited = []\nlastshownplace = None\nmousepos = 0, 0\nfigureToPlace = None\nfigures = [\n [(0, -1), (1, -1), (1, 0), (0, 0)],\n [(-4, 0), (-4, -1), (-4, 1), (-3, -2), (-3, 2), (-2, -3), (-2, 3), (-1, -3), (-1, 3), (0,0), (1, -2), (1, 2), (2, -1), (2, 0), (2, 1), (3, 0)],\n [(-7, -8), (-7, 7), (-7, -7), (-7, 6), (-7, -6), (-7, 5),\n (-6, -5), (-6, 4), (-6, -8), (-6, 7),\n (-5, -8), (-5, 7),\n (-4, -4), (-4, 3), (-4, -8), (-4, 7),\n (-3, -4), (-3, 3), (-3, -8), (-3, 7),\n (-2, -3), (-2, 2), (-2, 6), (-2, -7),\n (-1, -2), (-1, 1),\n (0, 0), (0, -1),\n (1, 1), (1, -2), (1, -4), (1, 3),\n (2, -4), (2, 3),\n (3, -3), (3, 2), (3, -2), (3, 1),\n (4, -5), (4, 4), (4, -4), (4, 3),\n (5, -6), (5, 5), (5, -5), (5, 4), (5, -4), (5, 3), (5, -2), (5, 1),\n (6, -7), (6, 6), (6, -6), (6, 5), (6, -3), (6, 2),\n (7, -7), (7, 6), (7, -5), (7, 4),\n (8, -6), (8, 5), (8, -5), (8, 4), (8, -4), (8, 3),\n (9, -6), (9, 5), (9, -5), (9, 4), (9, -4), (9, 3),\n (10, -6), (10, 5), (10, -5), (10, 4)\n ]\n\n]\n\n\nfor j in range(0, rows):\n for i in range(0, cols):\n state = DEAD\n if random.random() > LIVELIHOOD:\n state = ALIVE\n gameobject = shapes.Rectangle(0+i*scale, height-j*scale,\n scale-1, scale-1, color=state,\n batch=batch)\n cells.append(Cell(gameobject, i, j, state))\n\nfor cell in cells:\n cell.getNeighbours(cells, cols)\n\n\ndef calculateNextBoard():\n global cells\n for cell in cells:\n cell.countAliveNeighbours()\n for cell in cells:\n cell.setState()\n cell.setColor()\n\n\ndef cellCountAliveNeighbours(cells):\n for cell in cells:\n cell.countAliveNeighbours()\n\n\ndef cellSetState(cells):\n for cell in cells:\n cell.setState()\n\n\ndef cleanEverything():\n global cells\n for cell in cells:\n cell.die()\n\n\[email protected]\ndef on_draw():\n window.clear()\n batch.draw()\n\n\[email protected]\ndef on_key_press(symbol, modifiers):\n global PAUSED, figures\n if chr(symbol) == 'p':\n PAUSED = not PAUSED\n if chr(symbol) == 'c':\n PAUSED = True\n cleanEverything()\n if chr(symbol) == '1':\n PAUSED = True\n # figureToPlace = figures[1]\n showFigure(figures[0])\n if chr(symbol) == '2':\n PAUSED = True\n # figureToPlace = figures[1]\n showFigure(figures[1])\n if chr(symbol) == '3':\n PAUSED = True\n # figureToPlace = figures[1]\n showFigure(figures[2])\n\n\ndef showFigure(figure):\n global cells, mousepos, height, cols, lastshownplace\n i, j = mousepos\n x = (i // scale)\n y = (height - j + scale//2)//scale\n lookup = cols * y + x\n origin = cells[lookup]\n offsets = figure\n if lastshownplace != (x, y) and lastshownplace is not None:\n for offset in offsets:\n xoffset = offset[0]\n yoffset = offset[1]\n lastshownplacex = lastshownplace[0]\n lastshownplacey = lastshownplace[1]\n neighbourlookup = (cols * (lastshownplacey + yoffset) + (lastshownplacex + xoffset)) % len(cells) - 1\n neighbour = cells[neighbourlookup]\n neighbour.state = DEAD\n neighbour.gameobject.color = DEAD\n for offset in offsets:\n xoffset = offset[0]\n yoffset = offset[1]\n neighbourlookup = (cols * (y + yoffset) + (x + xoffset)) % len(cells) - 1\n neighbour = cells[neighbourlookup]\n neighbour.state = ALIVE\n neighbour.gameobject.color = ALIVE\n lastshownplace = (x, y)\n pass\n\n\[email protected]\ndef on_mouse_press(x, y, button, modifiers):\n print('mousepress')\n global height\n x = x//scale\n y = (height - y + scale//2)//scale\n global cells, cols, ALIVE, DEAD\n lookup = cols * y + x\n print(cells[lookup])\n state = cells[lookup].state\n if state != DEAD:\n state = DEAD\n elif state == DEAD:\n state = ALIVE\n print(state)\n cells[lookup].state = state\n cells[lookup].gameobject.state = state\n print(cells[lookup].state, cells[lookup].gameobject.state)\n\n\[email protected]\ndef on_mouse_drag(x, y, dx, dy, buttons, modifiers):\n print('mousedrag')\n global height, cells, cols, ALIVE, DEAD, alreadyvisited\n x = x//scale\n y = (height - y + scale//2)//scale\n lookup = cols * y + x\n if lookup not in alreadyvisited:\n state = cells[lookup].state\n if state != DEAD:\n state = DEAD\n elif state == DEAD:\n state = ALIVE\n cells[lookup].state = state\n cells[lookup].gameobject.state = state\n alreadyvisited.append(lookup)\n\n\[email protected]\ndef on_mouse_release(x, y, button, modifiers):\n global alreadyvisited\n alreadyvisited = []\n\n\[email protected]\ndef on_mouse_motion(x, y, dx, dy):\n global mousepos\n mousepos = x, y\n\n\ndef update(dt):\n window.clear()\n batch.draw()\n global PAUSED\n if not PAUSED:\n calculateNextBoard()\n\n\npyglet.clock.schedule_interval(update, 1/20)\npyglet.app.run()\n"
},
{
"alpha_fraction": 0.48203498125076294,
"alphanum_fraction": 0.5217806100845337,
"avg_line_length": 32.105262756347656,
"blob_id": "8088521684365931128f5999b07e95138ae27eaa",
"content_id": "78f53bae3ae80732c0d47a6e8ef7c82770111c0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3145,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 95,
"path": "/cell.py",
"repo_name": "reneichhorn/gameoflife",
"src_encoding": "UTF-8",
"text": "DEAD = [0, 0, 0]\nALIVE = [255, 255, 255]\nALIVESTATE1 = [255, 0, 0]\nALIVESTATE2 = [255, 165, 0]\nALIVESTATE3 = [255, 255, 0]\nALIVESTATE4 = [0, 128, 0]\nALIVESTATE5 = [0, 0, 255]\nALIVESTATE6 = [75, 0, 130]\nALIVESTATE7 = [238, 130, 238]\nALIVESTATE8 = [255, 255, 255]\nWITHCOLOR = True\n\n\nclass Cell:\n def __init__(self, gameobject, i, j, state):\n self.gameobject = gameobject\n self.i = i\n self.j = j\n self.neighbours = []\n self.hasChanged = True\n self.state = state\n self.aliveNeighbours = 0\n self.aliveTime = 0\n\n def getNeighbours(self, cells, cols):\n i = self.i\n j = self.j\n for rowoffset in range(-1, 2):\n for coloffset in range(-1, 2):\n lookup = ((cols * (j + rowoffset)) + (i + coloffset)) % (len(cells) - 1)\n if (coloffset == 0 and rowoffset==0):\n continue\n self.neighbours.append(cells[lookup])\n\n def countAliveNeighbours(self):\n alivecount = 0\n for neighbour in self.neighbours:\n if not neighbour.hasChanged and neighbour.state == DEAD:\n continue\n if neighbour.gameobject.color != DEAD:\n alivecount += 1\n if alivecount > 3:\n break\n self.aliveNeighbours = alivecount\n\n def setState(self):\n self.hasChanged = False\n if self.state != DEAD:\n if self.aliveNeighbours < 2:\n self.hasChanged = True\n self.state = DEAD\n self.aliveTime = 0\n elif self.aliveNeighbours > 3:\n self.state = DEAD\n self.hasChanged = True\n self.aliveTime = 0\n else:\n if self.aliveNeighbours == 3:\n self.state = ALIVE\n self.hasChanged = True\n if self.hasChanged:\n self.gameobject.color = self.state\n self.setColor()\n\n def die(self):\n self.state = DEAD\n self.gameobject.color = self.state\n\n def setColor(self):\n if WITHCOLOR:\n if self.state != DEAD:\n self.aliveTime += 1\n if self.aliveTime < 40:\n self.gameobject.color = ALIVESTATE1\n elif self.aliveTime < 80:\n self.gameobject.color = ALIVESTATE2\n elif self.aliveTime < 160:\n self.gameobject.color = ALIVESTATE3\n elif self.aliveTime < 320:\n self.gameobject.color = ALIVESTATE4\n elif self.aliveTime < 640:\n self.gameobject.color = ALIVESTATE5\n elif self.aliveTime < 1280:\n self.gameobject.color = ALIVESTATE6\n elif self.aliveTime < 2560:\n self.gameobject.color = ALIVESTATE7\n elif self.aliveTime < 5120:\n self.gameobject.color = ALIVESTATE8\n self.aliveTime += 1\n\n def setStateManually(self, state):\n self.hasChanged = True\n self.state = state\n self.gameobject.state = self.state\n print(self.state)\n"
}
] | 2 |
duanzhijianpanxia/myPython | https://github.com/duanzhijianpanxia/myPython | c9a2f01c9861b16925cf5e86d713e35f4334cad3 | b5fb32e7660a813a3f34ed870e9e8ab5df7e33f4 | 520230d05bd159fcc47bc173c8fbec607a6476db | refs/heads/master | 2020-05-13T21:19:47.325682 | 2019-06-27T09:28:08 | 2019-06-27T09:28:08 | 181,662,204 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7199265360832214,
"alphanum_fraction": 0.7327823638916016,
"avg_line_length": 18.03508758544922,
"blob_id": "67642957cedecbfdffef742dea0ca98f58a4c1bc",
"content_id": "906ed0d3118c89205f63b01aa69abfc41c9d033e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1565,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 57,
"path": "/myPython/spider/v6.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "'''\n任务要求和v5一样\n本案例只是利用Request来实现v5的功能\n\n利用parse模块模拟post请求\n分析百度翻译\n分析步骤:\n1. 打开相应网页并且按下F12\n2. 尝试输入单词girl,发现每敲击一个字母后都有请求\n3. 请求地址是 http://fanyi.baidu.com/sug\n4. 利用network-all-hearders,查看发现,formdata的值是kw:girl\n5. 检查返回内容格式,发现返回的是json格式内容==>需要用到json包\n'''\n\nfrom urllib import request, parse\nimport json\n\n'''\n大致流程是:\n1. 利用data构造内容,然后urlopen打开\n2. 返回一个json格式的结果\n3. 结果应该就是girl的释义\n'''\n\nbaseurl = 'https://fanyi.baidu.com/sug'\n\n# 用来模拟form数据的一定是dict格式\ndata = {\n 'kw':input(\"Please input what you want to find:\")\n}\n\n# 需要使用parse模块对data进行编码\ndata = parse.urlencode(data).encode(\"utf-8\")\nprint(type(data))\n\nheaders = {\n # 因为使用post,至少应该包含content-length字段\n \"Content-Length\":len(data)\n}\n# 构造一个Request的实例\nreq = request.Request(url=baseurl, data=data, headers=headers)\n\n# 因为已经构造了一个Request的请求实例,则所有的请求信息都可以封装在request实例中\nrsp = request.urlopen(req)\n\njson_data = rsp.read().decode(\"utf-8\")\nprint(type(json_data))\nprint(json_data)\n\n# 把json字符转换成字典\njson_data = json.loads(json_data)\nprint(type(json_data))\nprint(json_data)\n\n# 把json字符转换成字典格式\nfor item in json_data['data']:\n print(item['k'], \" ==> \", item['v'])\n\n\n\n\n"
},
{
"alpha_fraction": 0.5340670943260193,
"alphanum_fraction": 0.5479559898376465,
"avg_line_length": 26.460432052612305,
"blob_id": "68c73cbab1d4d43f4bb2e006cda54af9b1525281",
"content_id": "71e912aacd5938edf66c9c28355b2ce0179d51d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5054,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 139,
"path": "/myPython/Tkinter/项目_屏保/屏保.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "import random\nimport tkinter\n\n\nclass RandomBall():\n '''\n 定义运动的球的类\n '''\n\n def __init__(self, canvas, scrnwidth, scrnheight):\n '''\n canvas: 画布,所有的内容都应该在画布上呈现出来,此处通过此变量传入\n scrnwidth/scrnheigh:屏幕宽高\n '''\n # 初始化画布\n self.canvas = canvas\n # 球出现的初始位置要随机,此处位置表示的球的圆心\n # xpos表示位置的x坐标\n self.xpos = random.randint(10, int(scrnwidth) - 20)\n self.ypos = random.randint(10, int(scrnheight) - 20)\n # 定义球运动的速度\n # 模拟运动:不断的擦掉原来画,然后在一个新的地方再从新绘制\n # 此处x_move模拟x轴方向运动\n self.x_move = random.randint(4, 20)\n self.y_move = random.randint(4, 20)\n # 定义屏幕的大小\n self.scrnwidth = scrnwidth\n self.scrnheight = scrnheight\n # 球的大小随机,用半径表示\n self.radius = random.randint(20, 120)\n\n # 定义颜色\n # RGB表示法:三个数字,每个数字的值是0-255之间,表示红绿蓝三个颜色的大小\n # 在某些系统中,之间用英文单词表示也可以,比如red, green\n # 此处用lambda表达式\n c = lambda: random.randint(0, 255)\n self.color = '#%02x%02x%02x' % (c(), c(), c())\n\n def create_ball(self):\n '''\n 用构造函数定义的变量值,在canvas上画一个球\n '''\n # tkinter没有画圆形函数\n # 只有一个画椭圆函数,画椭圆需要定义两个坐标,\n # 在一个长方形内画椭圆,我们只需要定义长方形左上角和右下角就好\n # 求两个坐标的方法是,已知圆心的坐标,则圆心坐标减去半径能求出\n # 左上角坐标,加上半径能求出右下角坐标\n x1 = self.xpos - self.radius\n y1 = self.ypos - self.radius\n x2 = self.xpos + self.radius\n y2 = self.ypos + self.radius\n # 再有两个对角坐标的前提下,可以进行画圆\n # fill表示填充颜色\n # outline是外围边框颜色\n self.item = self.canvas.create_oval(x1, y1, x2, y2, \\\n fill=self.color, \\\n outline=self.color)\n\n def move_ball(self):\n # 移动球的时候,需要控制球的方向\n # 每次移动后,球都有一个新的坐标\n self.xpos += self.x_move\n # 同理计算ypos\n self.ypos += self.y_move\n # 以下判断是会否撞墙\n # 撞了南墙就要回头\n # 注意撞墙的算法判断\n if self.xpos >= self.scrnwidth - self.radius:\n self.x_move = -self.x_move\n if self.ypos >= self.scrnheight - self.radius:\n self.y_move = -self.y_move\n if self.xpos < self.radius:\n self.x_move = abs(self.x_move)\n if self.ypos < self.radius:\n self.y_move = abs(self.y_move)\n\n\n # 在画布上挪动图画\n self.canvas.move(self.item, self.x_move, self.y_move)\n\n\nclass ScreenSaver():\n '''\n 定义屏保的类\n 可以被启动\n '''\n # 如何装随机产生的球?\n balls = []\n\n def __init__(self):\n # 每次启动球的数量随机\n self.num_balls = random.randint(6, 10)\n\n self.win = tkinter.Tk()\n self.width = self.win.winfo_screenwidth()\n self.height = self.win.winfo_screenheight()\n # 取消边框\n self.win.overrideredirect(1)\n #######################self.win.attributes('-alpha', 0.3)\n # 任何鼠标移动都需要取消\n self.win.bind('<Motion>', self.myquit)\n # 按动任何键盘都需要退出屏保\n self.win.bind('<Any-Button>', self.myquit)\n\n # 创建画布,包括画布的归属,规格\n self.canvas = tkinter.Canvas(self.win, width=self.width, height=self.height)\n self.canvas.pack()\n\n # 在画布上画球\n for i in range(self.num_balls):\n ball = RandomBall(self.canvas, scrnwidth=self.width, scrnheight=self.height)\n ball.create_ball()\n self.balls.append(ball)\n\n self.run_screen_saver()\n self.win.mainloop()\n\n def run_screen_saver(self):\n for ball in self.balls:\n ball.move_ball()\n\n # after是200毫秒后启动一个函数,需要启动的函数是第二个参数\n self.canvas.after(99, self.run_screen_saver)\n\n def myquit(self, event):\n # 此处只是利用了事件处理机制\n # 实际上并不关心事件的类型\n # 作业:\n # 此屏保程序扩展成,一旦捕获事件,则判断屏保不退出\n # 显示一个Button,Button上显示事件类型,点击Button后屏保\n # 才退出\n self.win.destroy()\n'''\ndef main():\n ScreenSaver(6)\n'''\nif __name__ == \"__main__\":\n # 启动屏保\n ScreenSaver()"
},
{
"alpha_fraction": 0.6343749761581421,
"alphanum_fraction": 0.6343749761581421,
"avg_line_length": 19.0625,
"blob_id": "bc4376f52883e9097fa16a9c5e91816ac6c789bb",
"content_id": "17f600a324141730f5f170e3eac9b6bda2ecfd3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 320,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 16,
"path": "/myPython/习题课练习/logging/01.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "import logging\n\nLOG_FORMAT=(\"%(asctime)s -- %(levelname)s -- %(message)s\")\nlogging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)\n\ndef log(func):\n def wrapper(*arg,**kw):\n logging.error(\"this is an error\")\n return func(*arg, **kw)\n return wrapper\n\n@log\ndef test():\n print(\"test done\")\n\ntest()"
},
{
"alpha_fraction": 0.614535391330719,
"alphanum_fraction": 0.650413990020752,
"avg_line_length": 22.148935317993164,
"blob_id": "f7336bd3f8c4f1e40ad5fc3a720108f403b54b77",
"content_id": "7657eb98cb79c43419917314e6a587b1878186f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1333,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 47,
"path": "/myPython/Tkinter/Entry输入框案例.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "import tkinter\n\n# 模拟的是登录函数\ndef reg():\n # 从相应的输入框中获取用户输入\n name = e1.get()\n pwd = e2.get()\n\n t1 = len(name)\n t2 = len(pwd)\n\n if name == \"1111\" and pwd == \"2222\":\n # 需要理解西面代码的含义\n lb3[\"text\"] = \"登录成功\"\n else:\n lb3['text'] = \"用户名或密码错误\"\n # 输入框删除用户输入的内容\n # 注意delete的两个参数,表示删除从第几个开始到第几个结束\n e1.delete(0, t1)\n e2.delete(0, t2)\n\n# 启动舞台\nbaseFrame = tkinter.Tk()\nbaseFrame.wm_title(\"输入框案例\")\n\nlb1 = tkinter.Label(baseFrame, text=\"用户名\")\nlb1.grid(row=0, column=0, sticky=tkinter.W)\n\ne1 = tkinter.Entry(baseFrame)\ne1.grid(row=0, column=1, sticky=tkinter.E)\n\nlb2 = tkinter.Label(baseFrame, text=\"密码\")\nlb2.grid(row=1,column=0, sticky=tkinter.W)\n\ne2 = tkinter.Entry(baseFrame)\ne2.grid(row=1, column=1, sticky=tkinter.E)\ne2['show'] = '*'\n\n# button参数command的意思是,当按钮被点击后启动相应的处理函数\nbtn = tkinter.Button(baseFrame, text=\"登录\", foreground=\"green\", background=\"gray\", command=reg)\nbtn.grid(row=2, column=1,sticky=tkinter.E)\n\nlb3 = tkinter.Label(baseFrame, text=\"\")\nlb3.grid(row=3, column=0, sticky=tkinter.W)\n\n# 启动主Frame\nbaseFrame.mainloop()"
},
{
"alpha_fraction": 0.5709057450294495,
"alphanum_fraction": 0.6203110814094543,
"avg_line_length": 30.257143020629883,
"blob_id": "08ff1688a2e0f2b1ae27a0cef1f359279876eed6",
"content_id": "1e6bd410eda02a42e7f06f2381069b9ba3067310",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1253,
"license_type": "no_license",
"max_line_length": 168,
"num_lines": 35,
"path": "/myPython/spider/v9_UserAgent.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "'''\nUserAgent: 用户代理,简称UA, 属于heads的一部分,服务器通过UA来判断访问者身份\n常见的UA值,使用的时候可以直接复制粘贴,也可以用浏览器访问的时候抓包\n'''\nfrom urllib import request, error\n\n\nif __name__ == '__main__':\n\n url = \"http://www.baidu.com\"\n\n try:\n # UserAgent方法一:利用headers方法添加\n # headers = {}\n # headers['User-Agent'] = 'Mozilla/5.0 (iPad; CPU OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3'\n # req = request.Request(url, headers=headers)\n\n # 方法二:利用add_header方法\n req = request.Request(url)\n req.add_header('UserAgent',\"Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19\")\n\n rsp = request.urlopen(req)\n html = rsp.read().decode()\n print(html)\n\n except error.HTTPError as e:\n print(\"HTTPError: {0}\".format(e.reason))\n print(\"HTTPError: {0}\".format(e))\n\n except error.URLError as e:\n print(\"URLError: {0}\".format(e.reason))\n print(\"URLError: {0}\".format(e))\n\n except Exception as e:\n print(e)"
},
{
"alpha_fraction": 0.6463878154754639,
"alphanum_fraction": 0.6463878154754639,
"avg_line_length": 19.30769157409668,
"blob_id": "9420e31baa0e30d28657cbf3d9cbeeba206324c7",
"content_id": "bec40cd8def30f0fd7fcc988c4a6d44ccf346eb8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 583,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 13,
"path": "/myPython/Tkinter/项目_屏保/屏保.md",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "# TKinter项目实战-屏保\n### 项目分析\n- 屏保可以自己启动,也可以手动启动\n- 一旦敲击键盘或者移动鼠标后,或者其他的引发时间,则停止\n- 如果屏保是一幅画的话,则没有画框\n- 图像的动作是随机的,具有随机性,可能包括颜色,大小,多少, 运动方向,变形等\n- 整个世界的构成是:\n - ScreenSaver:\n - 需要一个canvas, 大小与屏幕一致,没有边框\n\n - Ball\n - 颜色,大小,多少, 运动方向,变形等随机\n - 球能动,可以被调用"
},
{
"alpha_fraction": 0.7160940170288086,
"alphanum_fraction": 0.7396021485328674,
"avg_line_length": 25.380952835083008,
"blob_id": "66eb5d433e0b9de9474b287747d330bd41b3d1d5",
"content_id": "192930ce3669d4f904fc79ec7bf7080f03578712",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 611,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 21,
"path": "/myPython/Tkinter/Tkinter_grid布局.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "import tkinter\n\nbaseFrame = tkinter.Tk()\nbaseFrame.wm_title(\"grid布局案例\")\n\n# 下面注释掉的内容 和下面梁行代码等效\n# lb1 = tkinter.Label(baseFrame,txt=\"账号\").grid(row=0,sticky=tkinter.W)\nlb1 = tkinter.Label(baseFrame,text=\"账号\")\nlb1.grid(row=0, sticky=tkinter.W)\n\nen = tkinter.Entry(baseFrame)\nen.grid(row=0, column=1,sticky=tkinter.E)\n\nlb2 = tkinter.Label(baseFrame,text=\"密码\").grid(row=1,sticky=tkinter.W)\n\ntkinter.Entry(baseFrame).grid(row=1, column=1, sticky=tkinter.E)\n\nbtn = tkinter.Button(baseFrame,text=\"登录\").grid(row=2, column=1, sticky=tkinter.W)\n\n\ntkinter.mainloop()"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 26,
"blob_id": "75e7b42363cb042feea0f589acb029e4fe18e94b",
"content_id": "d75b3926d553e3d9bf3dc4ddd0e73942f7bee0b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 188,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 7,
"path": "/myPython/习题课练习/logging/logging01.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "import logging\n\nlogging.debug(\"This is a debug\")\nlogging.info(\"This is a info\")\nlogging.warning(\"This is a warning\")\nlogging.error(\"This is a error\")\nlogging.critical(\"This is a critical\")"
},
{
"alpha_fraction": 0.6258660554885864,
"alphanum_fraction": 0.6258660554885864,
"avg_line_length": 17.08333396911621,
"blob_id": "35346c54e32f3fb7b85e8f2795aea3efeba7cec6",
"content_id": "7c2d249859a1f51d64ddeb8bde4abacc41f7bd60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 473,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 24,
"path": "/myPython/习题课练习/logging/装饰器.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "# 使用装饰器根据不同的函数,传入不同的日志\nimport logging\n\nLOG_FORMAT = \"%(asctime)s -- %(levelname)s -- %(message)s\"\nlogging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)\ndef log(text):\n def decorator(func):\n def wrapper(*arg,**kw):\n return func(*arg,**kw)\n return wrapper\n return decorator\n\n\n@log(\"text done\")\ndef test():\n print(\"test done\")\n\n@log(\"main done\")\ndef main():\n print(\"main done\")\n\n\ntest()\nmain()"
},
{
"alpha_fraction": 0.6490963697433472,
"alphanum_fraction": 0.7033132314682007,
"avg_line_length": 21.89655113220215,
"blob_id": "4c2e71f24eadf1a9b7483fcec16bff0a3a3593ed",
"content_id": "39c0f50210add1efe12397eb208fa6e231264559",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 802,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 29,
"path": "/myPython/Tkinter/canvas/在画布上创建可移动的对象.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "import tkinter\n\nbaseFrame = tkinter.Tk()\nbaseFrame.wm_title(\"画布上的可移动对象案例\")\n\ndef btnclick(event):\n global cv\n cv.move(id_ball, 11, 3)\n cv.move('fall', 11, 7)\n\n\ncv = tkinter.Canvas(baseFrame, width=500, height=300, background=\"gray\")\ncv.pack()\ncv.bind(\"<Button-1>\", btnclick)\n\n\n# 创建组件后返回ID\n# 正方形是特殊的矩形,正圆是特殊的椭圆\nid_ball = cv.create_oval(30, 30, 50, 50, fill=\"yellow\")\n\n# 创建组件使用tag属性\ncv.create_text((123, 55), text=\"I love Wang Xiaomei\", tag='fall')\n# 创建的时候如果没有指定tag可以利用addtag_withtag添加\n# 同类函数还有addtag_all, addtag_above, addtag_xxx等等\nid_rectangle = cv.create_rectangle(78, 45, 121, 131, fill=\"purple\")\ncv.addtag_withtag('fall', id_rectangle)\n\n\nbaseFrame.mainloop()\n"
},
{
"alpha_fraction": 0.659375011920929,
"alphanum_fraction": 0.6781250238418579,
"avg_line_length": 20.399999618530273,
"blob_id": "df2e084092c07043c567e43015bcdb40b4a64e20",
"content_id": "eb03d5eb71f9e4bdb442c784ebda40074bf40623",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 320,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 15,
"path": "/myPython/Tkinter/tkinter02.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "import tkinter\n\nbase = tkinter.Tk()\nbase.wm_title(\"Just have a try\")\n\nlb1 = tkinter.Label(base, text = \"Just have a try also\")\nlb1.pack()\n\nlb2 = tkinter.Label(base, text = \"hello world\", background = \"red\")\nlb2.pack()\n\nlb3 = tkinter.Label(base, text = \"Just so so.\", background = \"yellow\")\nlb3.pack()\n\ntkinter.mainloop()"
},
{
"alpha_fraction": 0.619178056716919,
"alphanum_fraction": 0.6328766942024231,
"avg_line_length": 17.576271057128906,
"blob_id": "4a9fb41a68d5edbefafe698094f59ff7e1372e83",
"content_id": "8bc6b0952857e2b2d357c30d065899e904786160",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1395,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 59,
"path": "/myPython/spider/v14/v14.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "'''\n打印cookie\n不需要进入主页函数了\n'''\n\n\nfrom urllib import request, parse\nfrom http import cookiejar\n\n# 1. 创建cookie实例\ncookie = cookiejar.CookieJar()\n# 2. 创建cookie管理器\ncookie_handler = request.HTTPCookieProcessor(cookie)\n# 3. 创建http请求管理器\nhttp_handler = request.HTTPHandler()\n# 4. 创建https管理器\nhttps_handler = request.HTTPSHandler()\n\n# 创建请求管理器\nopener = request.build_opener(https_handler, http_handler,cookie_handler)\n\n# 定义登录函数\ndef login():\n '''\n 负责初次登录\n 需要输入用户名和密码,用来获取登录cookie登录凭证\n :return:\n '''\n\n # 此处url需要从登录form表单的action属性中获取\n url = \"http://www.renren.com/PLogin.do\"\n\n # 此处键值需要从登录form的两个对应input中提取name属性\n data = {\n \"email\" : \"15619509989\",\n \"password\" : \"123456\"\n }\n # 把数进行编码\n data = parse.urlencode(data)\n\n # 创建一个请求对象\n req = request.Request(url, data=data.encode())\n # 使用opener发起请求\n rsp = opener.open(req)\n\n\nif __name__ == '__main__':\n login()\n\n '''\n 执行完login函数,会得到授权之后的cookie\n 我们尝试把cookie打印出来\n '''\n print(cookie)\n for i in cookie:\n print(type(i))\n print(i)\n for j in dir(i):\n print(j)"
},
{
"alpha_fraction": 0.5972222089767456,
"alphanum_fraction": 0.6577380895614624,
"avg_line_length": 19.59183692932129,
"blob_id": "719235a75632ed01e3a2f45a7f7ee0e20086f527",
"content_id": "da60f02d6351e4c54a11402487e1c7f5992d2b8f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1184,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 49,
"path": "/myPython/spider/homework/dailifuwuqi/piliangdaili.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "'''\n构建代理集群/队列\n每次访问服务器,随机抽取一个代理\n随机抽取可以使用random.choice\n\n分析步骤\n1. 构建代理群\n2. 每次访问,随机选取代理并执行\n'''\nfrom urllib import request, error\nimport random\n\n# 使用代理步骤\n# 1. 设置代理地址\nproxy_list = [\n {\"http\": \"223.241.119.51\"},\n {\"http\": \"218.64.69.79\"},\n {\"http\": \"112.85.170.101\"},\n {\"http\": \"112.247.171.152:8060\"},\n {\"http\": \"111.40.84.73:9797\"}\n]\n# 2. 创建proxyhandler\nproxy_handler_list = []\nfor proxy in proxy_list:\n proxy_handler = request.ProxyHandler(proxy)\n proxy_handler_list.append(proxy_handler)\n# 3. 创建opener\nopener_list = []\nopener = request.build_opener(proxy_handler)\nopener_list.append(opener)\n\nurl = \"http://www.baidu.com\"\n# 现在如果访问url,则使用代理服务器\ntry:\n # 4. 安装opener\n opener = random.choice(opener_list)\n request.install_opener(opener)\n\n req = request.Request(url)\n rsp = request.urlopen(req)\n html = rsp.read().decode(\"utf-8\")\n print(html)\n\nexcept error.URLError as e:\n print(\"URLError: \".format(e.reason))\n print(\"UULError: \".format(e))\n\nexcept Exception as e:\n print(e)"
},
{
"alpha_fraction": 0.676402747631073,
"alphanum_fraction": 0.6871637105941772,
"avg_line_length": 20,
"blob_id": "6e3ac611b1697202b22ef56871cec489f1abc41c",
"content_id": "371903c5a111d17f90697c655ced71846869e1b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1457,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 62,
"path": "/myPython/习题课练习/logging/利用四大组件完成日志.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "# 利用logging的四大组件实现日志功能\n# 打印出函数的执行时间,日志等级,日志消息\n# 使用装饰器\n# 不同的日志记录不同等级的日志\nimport logging\n\nlogger = logging.getLogger(\"mylogger\")\nlogger.setLevel(logging.DEBUG)\n# handler 处理器\n# TimeRotationFileHandler是按照日期划分日志的\n# RotationFileHandler 按照日志文件的大小划分日志\n\ndebug_handler = logging.FileHandler(\"1024debug1.log\")\ndebug_handler.setLevel(logging.DEBUG)\ndebug_handler.setFormatter(logging.Formatter(\"%(asctime)s -- %(levelname)s -- %(message)s\"))\n\nerror_handler = logging.FileHandler(\"1024error1.log\")\nerror_handler.setLevel(logging.ERROR)\nerror_handler.setFormatter(logging.Formatter(\"%(asctime)s -- %(levelname)s -- %(message)s\"))\n\nlogger.addHandler(debug_handler)\nlogger.addHandler(error_handler)\n\n\ndef log(func):\n def wrapper(*arg, **kw):\n logger.debug(\"this is a debug message\")\n logger.error(\"this is an error message\")\n return func(*arg, **kw)\n\n return wrapper\n\n\ndef loghigher(text):\n def decorator(func):\n def wrapper(*arg, **kw):\n logger.debug(text)\n logger.error(text)\n return func(*arg, **kw)\n\n return wrapper\n\n return decorator\n\n\n@log\ndef test():\n print(\"test done\")\n\n\n@loghigher(\"this is test1 done\")\ndef test1():\n print(\"test1 done\")\n\n\n@loghigher(\"this is main done\")\ndef main():\n print(\"main done\")\n\n\ntest1()\nmain()"
},
{
"alpha_fraction": 0.7005254030227661,
"alphanum_fraction": 0.7022767066955566,
"avg_line_length": 17.45161247253418,
"blob_id": "72c30a97fde34d8b02b9ef007942bc8642869b25",
"content_id": "ddc7e4bef93907b759d4d6c3f33d4cea3cd8fbf8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 723,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 31,
"path": "/myPython/Tkinter/菜单/弹出式菜单案例.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "import tkinter\n\n\ndef makelabel():\n global baseFrame\n lb = tkinter.Label(baseFrame, text=\"PHP是最好的语言,我用的是Python\")\n lb.pack()\n\n\nbaseFrame = tkinter.Tk()\n\nbaseFrame.wm_title(\"弹出式菜单/上下文菜单 案例\")\n\nmenubar = tkinter.Menu(baseFrame)\nfor x in ['麻辣香菇', '汽锅鸡', '东坡肘子']:\n menubar.add_separator()\n menubar.add_command(label=x)\n\nmenubar.add_command(label='重庆火锅', command=makelabel)\n# 事件处理函数一定至少有一个参数,且第一个参数表示的是系统事件\n\n\ndef pop(event):\n # 注意使用event.x 和event.x_toot 的区别\n menubar.post(event.x_root, event.y_root)\n\n\nbaseFrame.bind(\"<Button-3>\", pop)\n\n\nbaseFrame.mainloop()"
},
{
"alpha_fraction": 0.623296320438385,
"alphanum_fraction": 0.6400168538093567,
"avg_line_length": 21.814102172851562,
"blob_id": "97d4b9df12bf9ab5c7602a64331ab62ab86909de",
"content_id": "34831d96b7d5973a3d957fb2fbe34bb90f40ef4d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 14069,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 312,
"path": "/myPython/面向对象/OOPnote.md",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "# oop-python面向对象\n-Python的面向对象\n- 面向对象编程\n - 基础\n - 公有私有\n - 继承\n - 组合,Mixin\n- 魔法函数\n - 魔法函数概述\n - 构造魔法函数\n - 运算类魔法函数 \n# 1 面向对象概述(ObjectOriented,OO)\n- OOP思想\n - 接触到任意一个任务,首先想到的是任务这个世界的构成,是有模型构成的\n- 几个名词\n - OO:面向对象\n - OOA:面向对象的分析\n - OOD:面向对象的设计\n - OOI:面向对象的实现\n - OOP:面向对象的编程\n - OOA-->OOD-->OOI:面向对象的实现过程\n- 类和对象的概念\n - 类:抽象名词,代表一个集合,共性事物\n - 对象:具体的事物,单个个体\n - 类跟对象的关系\n - 一个对象,代表一类事物的某一个个体\n - 一个类,是抽象的代表的是一类事物\n - 类中的内容,应该具有两个内容\n - 表明事物的特征,叫做属性(变量)\n - 表明事物的功能或者动作,称为成员方法(函数)\n# 2.类的基本实现\n \n\n\n- 类的命名\n - 遵循变量命名规则\n - 大驼峰(由一个或者多个单词构成,每个单词首字母大写,,单词 和单词可以直接相连)\n - 精良避免跟系统命名相似的命名\n - 如何声明一个类\n - 必须使用class关键字\n - 类由属性和方法构成,其他不允许出现\n - 成员属性定义可以直接使用变量赋值,如果没有值,许使用None\n - 实例化类\n - 案例 01.py\n 变量 = 类名() # 实例化一个对象\n - 访问对象成员\n - 使用点操作符\n \n obj.成员属性名称\n obj.成员方法\n - 可以通过默认内置变量检查类和对象的所有成员\n - 对象所有成员检查\n # 注意dict前后各有两个下划线\n obj.__dict__\n - 类左右的成员\n \n # dict前后各有两个下划线\n class_name.__dict__\n \n \n# 3. anaconda基本使用\n- anaconda主要是一个虚拟环境管理器\n- 还是一个安装包管理器\n- conda list:显示anaconda安装的包\n- conda env list:显示anaconda虚拟环境列表\n- conda create -n xxx python=3.6:创建Python版本为3.6的虚拟环境,名称为xxx \n\n# 4.类和对象的成员分析\n- 类和对象都可以存储成员,成员可以归类所有,也可以归对象所有\n- 类存储成员时使用的是与类关联的一个对象\n- 独享存储成员时存储在当前对象中\n- 对象访问一个成员时,如果对象中没有该成员,尝试访问类中的同名成员\n 如果对象中有此成员,一定使用对象中的成员\n- 创建对象的时候,类中的成员不会放在对象中,而是得到一个空对象,没有成员\n- 通过对类中成员重新赋值或者通过对象添加成员时,对应成员会保存在对象中而不会修改类成员\n# 5. 关于self\n- self在对象的方格中表示当前对象本身,如果通过对象调用一个方法, 那么该对象会自动传入\n 到当前方法的第一个参数中\n - self 并不是关键字,只是一个用于接受对象的普通参数,理论上可以使用任何一个普通\n 变量代替\n- 方法中有self形参的方法称为非绑定类的方法,可以通过对象访问,没有self的绑定类的方法\n 只能通过类访问\n- 使用类访问绑定类的方法时,如果类方法中需要访问当前类的成员,可以通过\n __dict__成员名来访问\n \n \n# 6. 面向对象的三大特性\n- 封装\n- 继承\n- 多态\n\n## 6.1 封装\n- 封装就是对对象的成员进行访问限制\n- 封装的三个级别:\n - 公开:public\n - 受保护的:protected\n - 私有的:private\n - public,protect,private不是关键字\n- [python中下划线使用](http://blog.csdn.net/handsomekang/article/details/40303207)\n- 判别对象的位置\n - 对象内部\n - 对象外部\n - 子类中\n- 私有\n - 私有成员是最高级别的封装,只能在当前类或对象中访问\n - 在成员前面加上两个下划线即可\n \n class person():\n name = 'liuying' # name 是共有成员\n __age = 19 # __age就是私有成员\n- Python的私有不是真私有,是一种称为 name mangling的改名策略\n可以使用对象._classname_attributenname访问\n\n - 受保护的封装 protect \n - 受保护的封装是将成员进行一定级别的封装,然后,在类中或者子类中都可以进行\n 访问,但是外部不能访问\n - 公共封装,公开的 public\n - 这类封装实际上对成员没有进行任何操作,任何地方都可以访问\n \n## 3.2 继承\n- 继承就是一个类可以获得另外一个类中的成员属性和成员方法\n- 作用: 减少代码,增加代码的复用功能,同事可以设置类与类直接的关系\n - 继承与被继承的概念\n - 被继承的类叫父类,也叫基类,或叫超类\n - 用于继承的类叫做子类,也叫派生类\n - 继承与被继承一定存在一个is- a 的关系,即所属关系\n- 继承的语法参见oop-2.Python\n- 继承的特性\n - 所有子类都继承自object类,即所有的类都是object的子类\n - 子类一旦继承父类,则可以使用父类中除私有成员外的所有内容\n - 子类继承父类后,并没有将父类成员完全赋值到子类中,而是通过\n 引用关系访问调用\n - 子类中可以定义独有的成员属性和方法\n - 子类中定义的成员和父类成员如果相同,则优先使用子类成员\n - 子类如果想扩充父类中的方法,可以在定义新方法的同时访问父类成员\n 来进行成代码的复用\n - 可以使用[父类名.父类成员]的格式来带哦用父类成员,也可以使用super().父类成员\n 的格式来调用\n \n- 继承查找的顺序问题\n - 优先查找自己的变量\n - 没有则查找父类\n - 构造函数如果本类没有定义,则自动调用父类构造函数\n - 如果本类有定义,则不再向上查找\n- 构造函数\n - 是一类特殊的函数,在类进行实例化之前进行调用\n - 如果定义了构造函数,则实例化是使用构造函数,粗不查找父类构造函数\n - 如果没有定义,则自动查找父类构造函数\n - 如果子类没定义,父类构造函数带有参数,则构造函数对象是的参数应该按照父类参数构造\n \n- super\n - super不是关键字,而是一个类\n - super的作用是获取MRO(MethodResolustionOrder)列表中的第一个类\n - super与父类没有直接实质性关系,但是通过super可以调用到父类\n - super使用两个方法,参见在构造函数中调用父类的构造函数\n \n- 单继承和度继承\n - 单继承:每个类只继承一个\n - 多继承:每个类允许继承多个类(不推荐使用)\n- 单继承和多继承的优缺点\n - 单继承:\n - 传承有序,逻辑清晰语法简单,隐患少\n - 功能不能无限扩展,只能在当前唯一的继承链中扩展\n - 多继承:\n - 优点:类的功能扩展方便\n - 确定:继承关系混乱\n \n- 菱形继承/钻石继承\n - 多个子类继承同一父类,这些子类由被同一个类继承,于是继承关系图形成一个菱形图\n - ([MRO](https://www.cnblogs.com/whatisfantasy/p/6046991.html)\n - 关于多继承的MRO\n - MRO 就是多继承中,用于保存继承顺序的一个列表\n - Python本身采用C3 算法来计算多继承中的菱形继承\n - MRO列表的计算原则:\n - 子类永远在父类前面\n - 如果多个父类,则根据继承语法中括号内类的书写顺序存放\n - 如果多个类继承了同一个父类,孙子类中只会选取继承语法括号中的第一个父类的父类\n \n \n- 构造函数\n - 在对象进行实例化的时候,系统自动调用的一个函数叫构造函数,通常次函数用来对实例对象进行初始化,\n - 构造函数一定要有,如果没有,则自动向上查找,按照MRO书序,直到找到为止\n \n## 3.3多态\n- 多态就是统一对象在不同情况下有不同的状态出现\n- 堕胎的语法,是一种设计思想\n- 多态性:一种调用方式,不同的执行结果\n- 多态:同一事物的多种形态,动物分为人类,猪类,狗类\n【多态和多态性】(https://www.cnblogs.com/luchuangao/p/6739557.html)\n\n- Mixin设计模式\n - 主要采用多继承方式对类的功能进行扩展\n - [Mixin概念](https://www.zhihu.com/question/20778853)\n - [MRO and Mixin](http://blog.csdn.net/robinjwong/article/details/48375833)\n - [Mixin模式](https://www.cnblogs.com/xybaby/p/6484262.html)\n - [Mixin MRO](http://runforever.github.io/2014-07-19/2014-07-19-python-mixin%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0/)\n - [MRO](http://xiaocong.github.io/blog/2012/06/13/python-mixin-and-mro/)\n \n - 我们使用多继承语法来实现Mixin\n - 使用Mixin实现多继承的时候非常小心\n - 首先他必须代表某一单一功能,而不是某个物品\n - 职责必须单一,如果有多个功能,则写多个Mixin\n - Mixin不能依赖于子类的实现\n - 子类即使没有继承这个Mixin类,也照样工作,只是缺少了某个功能\n- 优点:\n - 使用Mixin,可以在不对类进行任何修改的情况下,扩充功能\n - 可以方便的组织和维护不同功能组件的划分\n - 可以根据需要任意调整功能类的组合\n - 可以避免创建很多新类\n \n# 4. 类相关函数\n- issubclass:检测一个类是否是另一个类的子类\n- issinstance:检测一个对象是否是一个类的实例\n- hassattr:检测一个对象是否有成员xxx\n- getattr: get attribute\n- setattr:set attribute\n- delattr;delete attribute\n- dir:获取对象的成员列表\n\n# 5.类成员的描述符(属性)\n- 类成员的描述符是为了在类中对类的成员属性进行相关操作而创建的一种方式\n - get:获取属性的操作\n - set:修改或者添加属性的操作\n - delete:删除属性的操作\n- 如果想使用类的成员描述符,大概有三种方法\n - 使用类实现描述器\n - 使用属性修饰符】\n - 用property函数 \n - property函数很简单\n - property(fget,fset,fdel,doc)\n - 案例参见jupyter notebook\n- 无论哪种修饰符都是为了对成员属性进行相应的控制\n - 类的方式:适合多个类中的多个属性公用一个描述符\n - property:适用当前类中使用,控制一个类中的一个属性\n \n# 类的内置属性\n\n __dict__:以字典的方式显示类的 成员组成\n __doc__:获取类的文档信息\n __name__:获取类的名称,如果在模块中使用,获取模块的名称\n __bases__:获取某个类的所有父类,以元组的方式显示\n \n# 7.类的常用魔术方法\n- 魔术方法就是不需要人为调用的方法,基本是在特定的是可自动触发\n- 魔术方法的统一特征,方法名被前后两个下滑线包裹\n- 操作类\n - '__init__':构造函数\n - '__new__':对象实例化方法,次函数比较特殊,一般不需要使用\n - '__call__':对象当做函数使用时触发\n - '__str__':当对象被当成字符串使用时调用\n - '__repr__':返回字符串,跟'__str__'具体区别轻百度\n- 描述符相关\n - '__set__':\n - '__get__':\n - '__delete__':\n\n- 属性操作相关\n- '__getattr__':访问一个不存在的属性是触发\n- '__setattr__':对成员属性进行设置的时候触发\n - 参数:\n - self用来获取当前对象\n - 被设置的属性名称,以字符串的形式出现\n - 需要对属性名称设置的值\n - 作用:进行属性设置的时候进行验证或者修改\n - 注意:在该方法中不能独一属性直接进行赋值操作,否则死循环\n - 参看案例\n- 运算分类相关魔数方法\n - '__gt__':进行大于判断的时候触发的函数\n - 参数:\n - self\n - 第二个参数是第二个对象\n - 返回值可以是任意值,推荐返回布尔值\n - 案例\n \n# 8. 类和对象的三种方法\n- 实例方法\n - 需要实例化对象才能使用方法,使用过程中可能需要截止对象的其他对象的方法完成\n- 静态方法\n - 不需要实例化,通过类直接访问\n- 类方法\n - 不需要实例化\n - 参见案例,三个方法的具体区别自行百度\n \n# 9. 所用软件\n- 画图软件是faststone capture\n- 有问题加群158184562\n- 官网地址 www.tulingxueyuan.com\n\n# 10. 抽象类\n- 抽象方法:没有具体实现内容的方法称为抽象方法\n- 抽象方法的主要意义是规范了子类的行为和接口\n- 抽象类的使用需要借助abc模块\n \n import abc\n \n- 抽象类:包含抽象方法的类叫做抽象类,通常称为abc类\n- 抽象类的使用\n - 抽象类可以包含抽象方法,也可以包含具体方法\n - 抽象类中可以有方法也可以有属性\n - 抽象类不允许直接实例化\n - 必须继承才可以使用,且继承的子类必须实现所有继承来的抽象方法\n - 假定子类没有实现左右继承的抽象方法,则子类也不能实例化\n - 抽象类的主要作用是设定类的标准,以便与开发的时候具有统一的规范\n \n# 11.自定义类\n- 类其实是一个类定义和各种方法的自由组合\n- 可以定义类和函数,然后自己通过类直接赋值\n- 可以借助于MethodType实现\n- 借助type实现\n- 利用元类实现- MetaClass\n - 元类是类\n - 被用来创造别的类"
},
{
"alpha_fraction": 0.6257731914520264,
"alphanum_fraction": 0.6257731914520264,
"avg_line_length": 31.517240524291992,
"blob_id": "4cbef65d5b547feedcedc6baf22132a5ffd68466",
"content_id": "cd93baf1f7e1182cfe10285de4b3a823988bd087",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1122,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 29,
"path": "/myPython/习题课练习/测试代码/test_survey.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "import unittest\r\nfrom survey import AnonymousSurvey\r\n\r\nclass TestAnonymousSurvey(unittest.TestCase):\r\n '''针对AnonymousSurvey类的测试'''\r\n def test_store_single_response(self):\r\n '''测试单个答案也会被妥善存储'''\r\n question = \"What language did you first learn to speak?\"\r\n my_survey = AnonymousSurvey(question)\r\n my_survey.store_response(\"Chinese\")\r\n\r\n self.assertIn('Chinese', my_survey.responses)\r\n\r\n\r\n def test_store_three_responses(self):\r\n '''测试三个答案也会被妥善存储'''\r\n question = \"What language did you first learn to speak?\"\r\n my_survey = AnonymousSurvey(question)\r\n responses = ['English', 'Chinese', 'Franch']\r\n for response in responses:\r\n my_survey.store_response(response)\r\n # 定义三个不同答案的类表,并调用存储函数将其存储\r\n\r\n for response in responses:\r\n self.assertIn(response, my_survey.responses)\r\n # 将上面的结果和方法结果做判断,是否在列表中\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()"
},
{
"alpha_fraction": 0.740963876247406,
"alphanum_fraction": 0.740963876247406,
"avg_line_length": 32.29999923706055,
"blob_id": "9da51ddb78992d869a51faaee287b7d571396225",
"content_id": "c4027880fd14cadbc50a2e4e8d46e43c80a33222",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 332,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 10,
"path": "/myPython/习题课练习/logging/logging03.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "import logging\n\nLOG_FORMAT = \"%(asctime)s - %(levelname)s - %(message)s\"\nlogging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT, filename=\"mylogging.log\")\n\nlogging.debug(\"This is a debug\")\nlogging.info(\"This is a info\")\nlogging.warning(\"This is a warning\")\nlogging.error(\"This is a error\")\nlogging.critical(\"This is a critical\")"
},
{
"alpha_fraction": 0.6213991641998291,
"alphanum_fraction": 0.7037037014961243,
"avg_line_length": 16.428571701049805,
"blob_id": "6ec1329dfef3c1c4fb7832e169ae175a3264a1cf",
"content_id": "49c035eeedc5f1b689d97fde950fb8ce36819f9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 253,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 14,
"path": "/myPython/Tkinter/canvas/简单画布.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "import tkinter\n\n\nbaseFrame = tkinter.Tk()\n\nbaseFrame.wm_title(\"简单的画布\")\n\ncav = tkinter.Canvas(baseFrame, width=300, height=200)\ncav.pack()\n\ncav.create_line((33, 33), (98, 100))\ncav.create_text(99,177, text=\"I love Wang Xiaomei\")\n\nbaseFrame.mainloop()"
},
{
"alpha_fraction": 0.4167507588863373,
"alphanum_fraction": 0.4540867805480957,
"avg_line_length": 20.55434799194336,
"blob_id": "7e874d094fe757ad43de2be0a69e77d31b828726",
"content_id": "b4f58f92eac25224b89747d7d068429a1339d9e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2332,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 92,
"path": "/myPython/习题课练习/.ipynb_checkpoints/oop-进阶.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "import random as r\n\n\nclass Turtle(object):\n\n def __init__(self):\n self.power = 100 # 初始化乌龟体力\n\n self.x = r.randint(0, 10)\n self.y = r.randint(0, 10) # 初始化乌龟位置\n\n def move(self):\n new_x = r.choice([1, 2, -1, -2]) + self.x\n new_y = r.choice([1, 2, -1, -2]) + self.y\n\n # 接下来判断,乌龟的移动是否超出了边界\n if new_x < 0:\n new_x = 0 - (0 - new_x)\n elif new_x > 10:\n new_x = 10 - (10 - new_x)\n else:\n self.x = new_x\n\n if new_y < 0:\n new_y = 0 - (0 - new_y)\n elif new_y > 10:\n new_y = 10 - (10 - new_y)\n else:\n self.y = new_y\n\n self.power -= 1\n return (self.x, self.y)\n\n def eat(self):\n self.power += 20\n if self.power > 100:\n self.power = 100\n\n\nclass Fish(object):\n\n def __init__(self):\n self.x = r.randint(0, 10)\n self.y = r.randint(0, 10) # 初始化鱼的位置\n\n def move(self):\n new_x = r.choice([1, -1, ]) + self.x\n new_y = r.choice([1, -1, ]) + self.y\n\n # 接下来判断,鱼的移动是否超出了边界\n if new_x < 0:\n new_x = 0 - (0 - new_x)\n elif new_x > 10:\n new_x = 10 - (10 - new_x)\n else:\n self.x = new_x\n\n if new_y < 0:\n new_y = 0 - (0 - new_y)\n elif new_y > 10:\n new_y = 10 - (10 - new_y)\n else:\n self.y = new_y\n\n return (self.x, self.y)\n\n\nturtle = Turtle()\nfish = []\nfor i in range(10):\n new_fish = Fish()\n fish.append(new_fish)\n\nwhile True:\n if not len(fish):\n print(\"鱼被吃完了,游戏结束\")\n break\n\n if not turtle.power:\n print(\"乌龟体力用完了,游戏结束\")\n break\n\n pos = turtle.move()\n\n # 在迭代中对列表中的元素进行删除操作很危险,经常会出现一些意想不到的问题,因为迭代器是直接引用列表元素的数据的操作\n # 所以为了避免这种错误出现,我们把列表拷贝一份,然后对原列表进行操作\n\n for each_fish in fish[:]:\n if each_fish.move() == pos:\n turtle.eat()\n fish.remove(each_fish)\n print(\"有一条鱼被吃掉了\")"
},
{
"alpha_fraction": 0.7383592128753662,
"alphanum_fraction": 0.7405765056610107,
"avg_line_length": 20.5238094329834,
"blob_id": "bb98cb15e8acb0dd2bcb3638ad1a862fe4c1b466",
"content_id": "9b2c79921bd04c41983e15b6e01645ed16e9224f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 637,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 21,
"path": "/myPython/Tkinter/简单的事件案例.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "import tkinter\n# 这里定义了一个简单函数,作用是在GUI框架内显示\"谢谢点击\"\ndef basebutton(event):\n global baseFrame\n lb = tkinter.Label(text=\"谢谢点击\")\n lb.pack()\n\n\n# 画出程序总框架\nbaseFrame = tkinter.Tk()\nbaseFrame.wm_title(\"简单事件案例\")\n\nbtn = tkinter.Button(baseFrame, text=\"just test\",background=\"yellow\", foreground=\"black\")\n# button/label绑定相应的消息和处理函数\n# 自动获取鼠标左键点击就,并启动相应的处理函数basebutton\nbtn.bind(\"<Button-1>\", basebutton)\nbtn.pack()\n\n# 启动消息循环\n# 到此,表示程序开始运行\nbaseFrame.mainloop()"
},
{
"alpha_fraction": 0.4989440441131592,
"alphanum_fraction": 0.5723336935043335,
"avg_line_length": 26.071428298950195,
"blob_id": "9468e7b402a981cdb478808a78aff3321c4c91e6",
"content_id": "83b6f8cbe9c9cd16109c2683f8fdf4159666a722",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2022,
"license_type": "no_license",
"max_line_length": 180,
"num_lines": 70,
"path": "/myPython/spider/v18/youdao.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "'''\n破解有道词典\nv1\n'''\n\nfrom urllib import request, parse\n\ndef youdao(key):\n\n url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'\n\n '''\n 多行快速加引号步骤\n 1. ctrl + r:调出正则匹配工具栏\n 2. 选中想要匹配部分\n 3. 两栏依次填写匹配公式:\n \n 4. 勾选三个选项\n 5. 替换\n \n \n 选取多行开头alt + button1\n '''\n\n data = {\n 'i':' key',\n 'from':' AUTO',\n 'to':' AUTO',\n 'smartresult':' dict',\n 'client':' fanyideskweb',\n 'salt':' 15615613028207',\n 'sign':' 8127ef1b41b5fd99f4edd9f91baa7d0c',\n 'ts':' 1561561302820',\n 'bv':' d6c3cd962e29b66abe48fcb8f4dd7f7d',\n 'doctype':' json',\n 'version':' 2.1',\n 'keyfrom':' fanyi.web',\n 'action':' FY_BY_REALTlME',\n }\n\n # 参数data需要时bytes格式\n data = parse.urlencode(data).encode()\n\n headers = {\n \"Accept\": \"application/json,text/javascript,*/*;q=0.01\",\n # \"Accept-Encoding\": \"gzip,deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"Connection\": \"keep-alive\",\n \"Content-Length\": \"236\",\n \"Content-Type\": \"application/x-www-form-urlencoded;charset=UTF - 8\",\n \"Cookie\": \"[email protected];OUTFOX_SEARCH_USER_ID_NCOO=1652204480.2389536;JSESSIONID=aaad7w3BQpyeYb0J8qxUw;___rl__test__cookies=1561610962238\",\n \"Host\": \"fanyi.youdao.com\",\n \"Origin\": \"http: // fanyi.youdao.com\",\n \"Referer\": \"http: // fanyi.youdao.com /\",\n \"User-Agent\": \"Mozilla/5.0(WindowsNT10.0;Win64;x64)AppleWebKit/537.36(KHTML, likeGecko)Chrome/73.0.3683.86Safari/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\"\n }\n\n req = request.Request(url=url, data=data, headers=headers)\n\n rsp = request.urlopen(req)\n\n html = rsp.read().decode(\"utf-8\")\n print(html)\n with open('rsp.html', 'w', encoding='utf-8') as f:\n f.write(html)\n\n\nif __name__ == '__main__':\n youdao(\"girl\")"
},
{
"alpha_fraction": 0.5787965655326843,
"alphanum_fraction": 0.5802292227745056,
"avg_line_length": 14.886363983154297,
"blob_id": "d11ca537ba8a938cd561c13a45f6f94dcb96c4f5",
"content_id": "3af45d220c4259160278cad747917a0d082b08a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 848,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 44,
"path": "/myPython/spider/v4.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "from urllib import request, parse\nimport chardet\n\n\n'''\n掌握对url进行参数编码的方法\n要是用parse模块\n'''\n\nif __name__ == '__main__':\n url = 'https://www.baidu.com//s?'\n wd = input(\"Input your keyword:\")\n\n\n # 要想使用data,需要使用字典结构\n qs = {\n \"wd\":wd\n }\n\n # 转换URL编码\n qs = parse.urlencode(qs)\n print(qs)\n\n fullurl = url + qs\n print(fullurl)\n\n\n # 如果直接用可读参数的URL,是不能访问的\n # fullURL = ‘http://www.baidu.com/s?wd=大熊猫'\n\n rsp = request.urlopen(fullurl)\n html = rsp.read()\n\n # 利用chardet自动检测编码格式\n cs = chardet.detect(html)\n print(type(cs))\n print(cs)\n\n # 使用get取值保证不会出错\n html = html.decode(cs.get(\"encoding\", \"utf-8\"))\n\n # html = html.decode(\"ascii\")\n\n print(html)"
},
{
"alpha_fraction": 0.5363204479217529,
"alphanum_fraction": 0.5627970099449158,
"avg_line_length": 22.380952835083008,
"blob_id": "dc4d9f5848b3d29134cfc530eebe4f1f5d32a09e",
"content_id": "5553129f335f1d1935bd71bda36fe788b38f94c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1947,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 63,
"path": "/myPython/spider/homework/百度贴吧/baidutieba.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "'''\n爬取百度贴吧--西安科技大学吧\n1. 西安科技大学吧主页是 http://tieba.baidu.com/f?&kw=西安科技大学\n2. 进去之后,贴吧有很多页\n 第一页网址:http://tieba.baidu.com/f?kw=西安科技大学&ie=utf-8&pn=0\n 第二页网址:http://tieba.baidu.com/f?kw=西安科技大学&ie=utf-8&pn=50\n 第三页网址:http://tieba.baidu.com/f?kw=西安科技大学&ie=utf-8&pn=100\n 第四页网址:http://tieba.baidu.com/f?kw=西安科技大学&ie=utf-8&pn=150\n 第五页网址:http://tieba.baidu.com/f?kw=西安科技大学&ie=utf-8&pn=200\n\n3. 发现贴吧的页数和后面的pn存在这样的关系:pn=(页数-1)*50 , 根据这个关系猜测每一页显示的内容是五十条\n\n解决办法:\n1.准备构建参数字典\n 字典包含三部分,kw, ie, pn\n2.使用parse构建完整url\n3.使用for循环下载\n'''\nfrom urllib import request, parse, error\n\n\nif __name__ == '__main__':\n\n #1.准备构建参数字典\n qs = {\n \"kw\": \"西安科技大学\",\n \"ie\": \"utf-8\",\n \"pn\": 0\n }\n\n #2.使用parse构建完整的url\n # 假定只需要前十页\n urls = []\n baseurl = \"http://tieba.baidu.com/f?\"\n\n for i in range(10):\n # 构建新的qs\n pn = i * 50\n qs['pn'] = str(pn)\n # 把qs编码完成后的url和baseurl进行拼接\n # 拼接完成后转入ruls列表中\n urls.append(baseurl + parse.urlencode(qs))\n\n print(urls)\n\n #3. 使用for循环下载(HTML)\n try:\n for url in urls:\n rsp = request.urlopen(url)\n html = rsp.read().decode(\"utf-8\")\n print(url)\n print(html)\n\n with open(k, \"w\", encoding=\"utf-8\") as f:\n f.write(url)\n f.write(html)\n\n except error.URLError as e:\n print(\"URLError: {0}\".format(e.reason))\n print(\"URLError: {0}\".format(e))\n\n except Exception as e:\n print(e)\n"
},
{
"alpha_fraction": 0.7237991094589233,
"alphanum_fraction": 0.7390829920768738,
"avg_line_length": 31.75,
"blob_id": "5ce84c45aa560437901c6786d1808609d36c027c",
"content_id": "60a448573c135a78035dc24f712656c64c01ad0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 954,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 28,
"path": "/myPython/Tkinter/tkinter_pack布局案例.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "# pack 布局案例\nimport tkinter #导入Tkinter\n\nbase = tkinter.Tk() # 创建布局框\nbase.wm_title(\"pack布局案例\") # 设置标题\n\nbtn1 = tkinter.Button(base, text=\"a\",background=\"yellow\", foreground=\"red\")\nbtn1.pack(side=tkinter.LEFT, expand=tkinter.YES, fill=tkinter.Y)\n\nbtn2 = tkinter.Button(base, text=\"b\")\nbtn2.pack(side=tkinter.TOP, expand=tkinter.YES, fill=tkinter.BOTH)\n\nbtn3 = tkinter.Button(base, text=\"c\")\nbtn3.pack(side=tkinter.RIGHT, expand=tkinter.YES, fill = tkinter.X)\n\nbtn4 = tkinter.Button(base, text=\"d\", foreground=\"green\", background=\"orange\")\nbtn4.pack(side=tkinter.LEFT, expand=tkinter.YES, fill=tkinter.BOTH)\n\nbtn5 = tkinter.Button(base, text=\"e\")\nbtn5.pack(side=tkinter.TOP)\n\nbtn6 = tkinter.Button(base, text=\"f\")\nbtn6.pack(side=tkinter.BOTTOM, expand=tkinter.NO, fill=tkinter.X)\n\nbtn7=tkinter.Button(base, text=\"g\", foreground=\"purple\", background=\"white\")\nbtn7.pack(side=tkinter.TOP, anchor=tkinter.SE)\n\nbase.mainloop()"
},
{
"alpha_fraction": 0.6865671873092651,
"alphanum_fraction": 0.6883230805397034,
"avg_line_length": 32.57575607299805,
"blob_id": "f9a4017a91c77fe4e4b66954150d8346a4e5020a",
"content_id": "9e87555cd23d85a116bfc2bcab25658e83adabb8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1467,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 33,
"path": "/myPython/习题课练习/测试代码/test_setup.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "'''在前面的test_survey.py中,我们在每一个方法中都创建了一个AnonymousSurvey实例,\r\n并在每个方法中都创建了答案。unittest.TestCase类包含方法setup(),python 将先运行它,再运行各个\r\n以test打头的方法。这样你在编写的每个方法中都可以使用在方法setUp()中创建的对象了\r\n\r\n下面将利用setUp()方法创建一个问题和一组答案,供方法test_store_single_response()和\r\n方法test_store_three_response()使用'''\r\n\r\nimport unittest\r\nfrom survey import AnonymousSurvey\r\n\r\nclass TestAnonymousSurvey(unittest.TestCase):\r\n # 针对AnonymousSuvery类的测试\r\n\r\n def setUP(self):\r\n # 创建一个调查对象和一组答案\r\n question = \"What language did you first learn to speak?\"\r\n self.my_survey = AnonymousSurvey(question)\r\n self.responses = ['English', 'Chinese', 'Franch']\r\n\r\n def test_store_single_response(self):\r\n # 测试单个答案也能被妥善的保存\r\n self.my_survey .store_response(self.responses[0])\r\n self.assertIn(self.responses[0],self.my_survey.responses)\r\n\r\n def test_store_three_responses(self):\r\n # 测试三个答案会被妥善保存\r\n for response in self.responses:\r\n self.my_survey.store_response(response)\r\n for response in self.responses:\r\n self.assertIn(response, self.my_survey.responses)\r\n\r\nif __name__ == '__main__':\r\n unittest.main()"
},
{
"alpha_fraction": 0.5987671613693237,
"alphanum_fraction": 0.6068927049636841,
"avg_line_length": 18.83333396911621,
"blob_id": "2f3c084887face0aa7fc01a886892218b3ed6914",
"content_id": "d152eee164662af4838c79cd976b9ca42b584f69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5953,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 180,
"path": "/myPython/Tkinter/tkinter.md",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "# GUI介绍\n- GraphicalUserInterface,\n- GUI for Python: Tkinter, wxPython, PyQt\n- TKinter:\n - 绑定的是TK GUI工具集,用途Python包装的Tcl代码\n- PyGTK\n - Tkinter的替代品\n- wxPython\n - 跨平台的Python GUI\n- PyQt\n - 跨平台的\n - 商业授权可能由问题\n \n- tkinter测试参见案例Tkinter.py\n\n### Tkinter 常用组件\n- 按钮\n\n Button\t\t\t\t按钮组件\n RadioButton\t\t\t单选框组件\n CheckButton\t\t\t选择按钮组件\n Listbox\t\t\t\t列表框组件\n \n- 文本输入组件\n \n Entry\t\t\t\t单行文本框组件\n Text\t\t\t\t多行文本框组件\n \n- 标签组件\n\n Label\t\t\t\t标签组件,可以显示图片和文字\n Message\t\t\t\t标签组件,可以根据内容将文字换行\n\t\n- 菜单\n \n Menu\t\t\t\t菜单组件\n MenuButton\t\t\t菜单按钮组件,可以使用Menu代替\n \n- 滚动条\n \n scale\t\t\t\t滑块组件\n Scrollbar\t\t\t滚动条组件\n \n- 其他组件\n\n Canvas\t\t\t\t画布组件\n Frame\t\t\t\t框架组件,将多个组件编组\n Toplevel\t\t\t创建子窗口容器组件\n \n### 组件的大致使用步骤\n1. 创建总面板\n2. 创建面板上的各种组件\n 1. 指定组件的父组件,即依附关系\n 2. 利用相应的属性对组件进行设置\n 3. 给组件安排布局\n \n3. 同步骤2相似,创建好多个组件\n4. 最后,启动总面板的消息循环\n\n### 组件布局\n- 控制组件的摆放方式\n- 三种布局:\n - pack: 按照方位布局\n - place: 按照坐标布局\n - grid: 网格布局\n \n- pack布局\n - 最简单,代码量最少,挨个摆放,默认从上倒下,系统自动设置\n - 通用使用方式为: 组件对象.pack(设置,,,,,,,)\n - side: 停靠方位, 可选值为LEFT,TOP,RIGHT,BOTTON\n - fill: 填充方式,X,Y,BOTH,NONE\n - expande: YES/NO\n - anchor: N,E,S,W,CENTER\n - ipadx: x方向的内边距\n - ipady: y\n - padx: x方向外边界\n - pady: y........\n \n- grid布局\n - 通用使用方式:组件对象.grid(设置,,,,,,,)\n - 利用row,column编号,都是从0开始\n - sticky: N,E,S,W表示上下左右,用来决定组件从哪个方向开始\n - 支持ipadx,padx等参数,跟pack函数含义一样\n - 支持rowspan,columnspan,表示跨行,跨列数量\n\n- place布局\n - 明确方位的摆放\n - 相对位置布局,随意改变窗口大小会导致混乱\n - 使用place函数,分为绝对布局和相对布局,绝对布局使用x,y参数\n - 相对布局使用relx,rely, relheight, relwidth\n \n \n## 消息机制\n- 消息的传递机制\n - 自动发出事件/消息\n - 消息有系统负责发送到队列\n - 由相关组件进行绑定/设置\n - 后端自动选择感兴趣的事件并做出相应反应\n- 消息格式:\n - <[modifier-]---type-[-detail]>\n - <Button-1>: Button表示一个按钮事件,1代表的是鼠标左键,2代表中键\n - <KeyPress-A>: 键盘A键位\n - <Control-Shift-KeyPress-A>: 同时按下Control,Shift,A三个键位\n - <F1>:F1键盘\n - [键位对应名称](https://infohost.nmt.edu/tcc/help/pubs/tkinter/web/key-names.html) \n \n \n## 消息机制\n- 消息的传递机制\n - 自动发出事件/消息\n - 消息由系统负责发送到队列\n - 由相关组件进行绑定/设置\n - 后端自动选择感兴趣的事件并做出相应反应\n- 消息格式:\n - <[modifier-]---type-[-detail]>\n - <Button-1>: Button表示一个按钮事件,1代表的是鼠标左键,2代表中键,3代表右键\n - <KeyPress-A>: 键盘A键位\n - <Control-Shift-KeyPress-A>: 同时按下Control,Shift,A三个键位\n - <F1>:F1键盘\n - [键位对应名称](https://infohost.nmt.edu/tcc/help/pubs/tkinter/web/key-names.html)\n \n### Tkinter的绑定\n- bind_all: 全局范围的绑定,默认的是全局快捷键,比如F1是帮助文档\n- bind_class: 接受三个参数,第一个是类名,第二个是事件,第三个是操作\n - w.bind_class(\"Entry\", \"<Control-V>, my_paste)\n- bind:单独对某一个实例绑定\n- unbind: 解绑,需要一个参数,即你要解绑哪个事件\n\n### Entry\n- 输入框,功能单一\n- entry[\"show\"] = \"*\", 设置遮挡字符\n\n\n### 菜单\n### 1. 普通菜单\n- 第一个Menu类定义的是parent\n- add_command 添加菜单项,如果菜单是顶层菜单,则从左向右添加,否则就是下拉菜单\n - label: 指定菜单项名称\n - command: 点击后相应的调用函数\n - acceletor: 快捷键\n - underline: 制定是否菜单信息下有横线\n - menu:属性制定使用哪一个作为顶级菜单\n \n \n \n### 级联菜单\n- add_cascade:级联菜单,作用是引出后面的菜单\n- add_cascade的menu属性:指明把菜单级联到哪个菜单上\n- label: 名称\n- 过程:\n 1. 建立menu实例\n 2. add_command\n 3. add_cascade\n \n### 弹出式菜单\n- 弹出菜单也叫上下文菜单\n- 实现的大致思路\n 1. 简理财单并向菜单添加各种功能\n 2. 监听鼠标右键\n 3. 如果右键点击,则根据位置判断弹出\n 4. 调用Menu的pop方法\n- add_separator: 添加分隔符\n\n\n### canvas 画布\n- 画布: 可以自由的在上面绘制图形的一个小舞台\n- 在画布上绘制对象, 通常用create_xxxx,xxxx=对象类型, 例如line,rectangle\n- 画布的作用是把一定组件画到画布上显示出来\n- 画布所支持的组件:\n - arc-->>,弧,圆弧\n - bitmap-->>位图\n - image(BitmapImage, PhotoImage)-->>图片\n - line-->>直线\n - oval-->>椭圆\n - polygon-->>多边形\n - rectangle-->>矩形\n - text-->>文本\n - winodw(组件)\n- 每次调用create_xxx都会返回一个创建的组件的ID,同时也可以用tag属性指定其标签\n- 通过调用canvas.move实现一个一次性动作"
},
{
"alpha_fraction": 0.4468750059604645,
"alphanum_fraction": 0.4468750059604645,
"avg_line_length": 44,
"blob_id": "df44b65ae74de2dbcbe5e42cad09805bb578e6a9",
"content_id": "73cf20853cbafc3d854c4920e8e6e40163c82e22",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 370,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 7,
"path": "/myPython/习题课练习/测试代码/function_of _assert in_unittest_module.md",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "### 各种断言方法\r\n- assertEqual(a,b) 核实a == b\r\n- assertNotEqual(a,b) 核实a != b\r\n- assertTrue(x) 核实x为True\r\n- assertFalse(x) 核实x为False\r\n- assertIn(item, list) 核实item在list中\r\n- assertNotIn(item, list) 核实item不在list中"
},
{
"alpha_fraction": 0.5150793790817261,
"alphanum_fraction": 0.6777777671813965,
"avg_line_length": 41.03333282470703,
"blob_id": "cb3e902f2f66df39474db817d2c0bee50b9902d2",
"content_id": "24491e78b284495ada4b61f9ff15e02dc6644c58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1436,
"license_type": "no_license",
"max_line_length": 487,
"num_lines": 30,
"path": "/myPython/spider/v12/v12_cookes.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "from urllib import request, error\nimport chardet\n\nif __name__ == '__main__':\n\n url = 'http://www.renren.com/971280070/newsfeed/photo'\n\n try:\n headers = {\"Cookie\": \"anonymid=jxa267a0-c2vc6g; depovince=GW; _r01_=1; ick_login=babc69b3-fcf3-40b1-8674-c3391ad9de39; t=6e5bb9278060635882c075cdb88af8230; societyguester=6e5bb9278060635882c075cdb88af8230; id=971280070; xnsid=7793c3b5; jebecookies=100e647b-6873-4d35-9755-1435ab2b126b|||||; JSESSIONID=abc19eKEgkNbVc-UdBiUw; ver=7.0; loginfrom=null; jebe_key=e592f0e8-b684-4ab0-9891-7f3e57e23c60%7Cce2fd52ddd2b60ba914770b7fc7c7741%7C1561361925065%7C1%7C1561361925083; wp_fold=0\"}\n req = request.Request(url, headers=headers)\n rsp = request.urlopen(req)\n\n html = rsp.read()\n cs = chardet.detect(html)\n\n html = html.decode(cs.get(\"encoding\", \"utf-8\"))\n # print(html)\n\n # 之所以注释上面的打印函数 是因为在初次调试阶段遇到写出来的HTML页面乱码,尝试打印结果正常,去网上找了解决问题方法\n # 是因为入是,没有指明编码格式 添加了encoding=\"utf-8\" 问题成功解决\n # 得到经验 有问题还得问度娘\n with open(\"rsp.html\", \"w\", encoding=\"utf-8\") as f:\n f.write(html)\n\n except error.URLError as e:\n print(\"URLError: {0}\".format(e.reason))\n print(\"URLError: {0}\".format(e))\n\n except Exception as e:\n print(e)"
},
{
"alpha_fraction": 0.7163461446762085,
"alphanum_fraction": 0.7278845906257629,
"avg_line_length": 18.27777862548828,
"blob_id": "687547b3f06b72d7145fccf83c7de1e28a897890",
"content_id": "ffea7d96fbe51321abffbc89a9811eb4a515f3dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1542,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 54,
"path": "/myPython/spider/v5.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "'''\n利用parse模块模拟post请求\n分析百度词典\n分析步骤:\n1. 打开F12\n2. 尝试输入单词girl, 发现每敲一个字母后都有请求\n3. 请求地址是:https://fanyi.baidu.com/sug\n4.利用NetWork-all-hearders,查看,发现formdata的值是kw:girl\n5.检查返回内容格式,发现返回的是json格式内容==>需要用到json包\n'''\n\nfrom urllib import request, parse\nimport json\n\n'''\n大致流程是:\n1. 利用data构造内容,然后URLopen打开\n2. 返回一个json格式的结果\n3. 结果就应该是girl的释义\n'''\nbaseurl = 'https://fanyi.baidu.com/sug'\n\n# 存放用来模拟form的数据格式一定是dict格式\n\ndata = {\n # girl是翻译输入的英文内容, 应该是由用户输入的,此处使用硬编码\n\n 'kw':input(\"Please input what you want:\")\n}\n# 需要使用parse模块对data进行编码\ndata = parse.urlencode(data).encode(\"utf-8\")\n\nprint(type(data))\n# 我们需要构造一个请求头,请求头应该至少包含传入数据的长度\n# request要求掺入的请求头是一个dict格式\n\nheaders = {\n # 因为使用post方式,至少应该包含content-length 字段\n 'Content-Length':len(data)\n}\n# 有了headers,data,url,就可以尝试发送请求了\nres = request.urlopen(baseurl, data=data)\n\njson_data = res.read().decode(\"utf-8\")\nprint(type(json_data))\nprint(json_data)\n\n# 把json字符串转换成字典\njson_data = json.loads(json_data)\nprint(type(json_data))\nprint(json_data)\n\nfor item in json_data['data']:\n print(item['k'], \" ==> \", item['v'])"
},
{
"alpha_fraction": 0.5357142686843872,
"alphanum_fraction": 0.5549450516700745,
"avg_line_length": 23.299999237060547,
"blob_id": "5a0849c011f622b7691746d2c427b00c2d85004d",
"content_id": "43f0c73e574afc817b22bc53641bd2c7114197a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 846,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 30,
"path": "/myPython/习题课练习/多线程/简单的多线程案例.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "import _thread as thread\nimport time\n\nmovie_list = ['斗破苍穹.mp4', '复仇者联盟.mp4', '九层妖塔.rmvb', '色戒.avi']\nmusic_list = ['七里香.mp3', '新贵妃醉酒.mp3']\nmovie_format = ['mp4', 'avi']\nmusic_format = ['mp3']\n\n\ndef play(playlist):\n for i in playlist:\n if i.split(\".\")[1] in movie_format:\n print('你现在收看的是{0}'.format(i))\n time.sleep(3)\n elif i.split(\".\")[1] in music_format:\n print('你现在收听的是{0}'.format(i))\n time.sleep(2)\n else:\n print(\"没有能播放的格式\")\n\n\ndef thread_run():\n thread.start_new_thread(play, (movie_list,))\n thread.start_new_thread(play, (music_list,))\n # 没有西面的代码压根就不会打印\n while True:\n time.sleep(15)\n\nif __name__ == '__main__':\n thread_run()"
},
{
"alpha_fraction": 0.5465980768203735,
"alphanum_fraction": 0.5643224716186523,
"avg_line_length": 18.021739959716797,
"blob_id": "e9a8ee0863bb4be6c1db880912c6867a1aeebb03",
"content_id": "06e021ac5270ed46e3cff68db0435bc7efdaf21e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2869,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 92,
"path": "/myPython/web/day1/01.html",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n\n <title>HTML常用的块级标签(块级元素)</title>\n\n</head>\n<body>\n<!--下面的我没有打注释不是忘记:-->\n常用标题标签范围h1--h6<br>\n常用的有语义块级标签特性:有默认样式,独占一行\n这是一个标题\n<h1>这是一级标题</h1>\n<h2>这是二级标题</h2>\n<h3>这是三级标题</h3>\n<h4>这是四级标题</h4>\n<h5>这是五级标题</h5>\n<h6>这是六级标题</h6>\n\n<!--段落标签-->\n<!--段落标签也是有默认样式,且独占一行-->\n<!--<br>标签是强制换行标签-->\n<p>或者有经验的开发人员,可以快速的通过日志定位到问题的根源。可见,日志的重要性不可小觑。<br>\n 日志的作用可以简单总结为以下3点:\n</p>\n<p>日志的作用可以简单总结为以下3点:</p>\n\n<!--列表-->\n<!--无序类表是页面中最常用的列表,无需列表有默认样式独占一行-->\n\n<ul>\n <li>列表项一</li>\n <li>列表项二</li>\n <li>列表项三</li>\n</ul>\n\n<!--有序列表和无序列表都属于块级标签,有默认样式独占一行-->\n<!--有序列表具有两个属性\"type\" 和 \"start\"-->\n<ol type=\"i\" start=\"3\">\n <li>列表项一</li>\n <li>列表项二</li>\n</ol>\n\n<!--dl为自定义列表作为了解-->\n<dl>\n <dt>只是自定义列表标题</dt>\n <dd>这时候自定义列表解释项</dd>\n</dl>\n\n\n<!--table声明表格,有属性th表头,tr行,td列-->\n<!--table具有属性border边框,为1是显示边框-->\n<!--cellpadding这只文本内容和边框之间的距离-->\n<!--cellspacing这只单元格和单元格之间的距离-->\n<table border=\"1\" cellpadding=\"10px\" cellspacing=\"0\" width=\"800px\" height=\"300px\">\n\n <tr align=\"center\">\n <td colspan=\"4\">这是第一行的第一列</td>\n<!-- <td>这是第一行的第二列</td>-->\n<!-- <td>这是第一行的第三列</td>-->\n<!-- <td>这是第一行的第四列</td>-->\n </tr>\n <tr align=\"center\">\n <td>这是第二行的第一列</td>\n <td>这是第二行的第二列</td>\n <td>这是第二行的第三列</td>\n <td rowspan=\"3\">这是第二行的第四列</td>\n </tralign>\n <tr valign=\"bottom\">\n <td>这是第三行的第一列</td>\n <td>这是第三行的第二列</td>\n <td>这是第三行的第三列</td>\n<!-- <td>这是第三行的第四列</td>-->\n </tr>\n <tr>\n <td>这是第四行的第一列</td>\n <td>这是第四行的第二列</td>\n <td>这是第四行的第三列</td>\n<!-- <td>这是第四行的第四列</td>-->\n </tr>\n</table>\n<!--hr标签是水平分割线-->\n<hr>\n<!--无意义的区块即标签<div>-->\n<div>这是一个div</div>\n<!--div作用是画出一块空白区域,配合CSS在布局当中使用-->\n<br>\n<br>\n</body>\n\n</html>"
},
{
"alpha_fraction": 0.7331975698471069,
"alphanum_fraction": 0.7331975698471069,
"avg_line_length": 24.789474487304688,
"blob_id": "3ce96879ed147b4ba77704690d3e7fa5ac665f7e",
"content_id": "96ee277a5dcfe75e96a3c706fe77d7fdd51ec79c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 657,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 19,
"path": "/myPython/Tkinter/tkinter_buttun01.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "import tkinter\n\ndef showlabel():\n global baseFrame\n # 在函数中创建了一个label\n # label的父组件是baseFrame\n lb = tkinter.Label(baseFrame, text = \"显示label\", background = \"yellow\")\n lb.pack()\n\nbaseFrame = tkinter.Tk()\n# 生成一个button,\n# command参数的意义在于按下button后该执行什么函数或者命令\n# 如果没有command参数的话,所生成的button是一个死的button,除了会动之外没有任何的执行结果,参见下面的#注释\n\nbt = tkinter.Button(baseFrame, text = \"show Label\", command = showlabel)\n# bt = tkinter.Button(baseFrame, text = \"show Label\")\nbt.pack()\n\ntkinter.mainloop()\n\n"
},
{
"alpha_fraction": 0.6163522005081177,
"alphanum_fraction": 0.6331236958503723,
"avg_line_length": 21.73015785217285,
"blob_id": "86939bb28ce036c86c962a9aa9840ace28db8230",
"content_id": "009b518e31a4a75a38bff5c0e3658faef6562fff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1781,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 63,
"path": "/myPython/spider/v13/v13_CookieJar.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "from urllib import request, parse, error\nfrom http import cookiejar\n\n\n# 创建cookiejar实例\ncookie = cookiejar.CookieJar()\n# 生成cookie管理器\ncookie_handler = request.HTTPCookieProcessor(cookie)\n# 创建http请求管理器\nhttp_handler = request.HTTPHandler()\n# 生成https管理器\nhttps_handler = request.HTTPSHandler()\n\n# 创建请求管理器\nopener = request.build_opener(http_handler, https_handler,cookie_handler)\n\ndef login():\n '''\n 负责初次登录\n 需要输入用户密码,用来获取登录cookie凭证\n :return:\n '''\n # 此url需要从登录form的action属性中提取\n url = 'http://www.renren.com/PLogin.do'\n\n # 此键值需要从form的两个对应input中提取name属性\n data = {\n \"email\" : \"15619509989\",\n \"password\" : \"123456\"\n }\n\n # 把数据进行编码\n data = parse.urlencode(data)\n # 创建请求对象\n req = request.Request(url, data=data.encode())\n # 使用opener发起请求\n rsp = opener.open(req)\n\ndef getHomePage():\n url = \"http://www.renren.com/971283120/profile\"\n\n try:\n # 如果已经执行了login函数,则opener自动已经包含相应的cookie值\n rsp = opener.open(url)\n\n html = rsp.read().decode()\n\n # 写入文件的HTML页面乱码,去网上找了解决问题方法\n # 是因为入是,没有指明编码格式 添加了encoding=\"utf-8\" 问题成功解决\n # 得到经验 有问题还得问度娘\n with open(\"rsp.html\", \"w\", encoding='utf-8') as f:\n f.write(html)\n\n except error.URLError as e:\n print(\"URLError: {0}\".format(e.reason))\n print(\"URLError: {0}\".format(e))\n\n except Exception as e:\n print(e)\n\nif __name__ == '__main__':\n login()\n getHomePage()"
},
{
"alpha_fraction": 0.5615183115005493,
"alphanum_fraction": 0.5863874554634094,
"avg_line_length": 20.25,
"blob_id": "d6eabf6c4fbd807cbd1fa31c9963f49087ab875c",
"content_id": "af6cc61ebf48ba3c61e7d279cb7735234b03dc09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 862,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 36,
"path": "/myPython/spider/v10_ProxyHandler.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "'''\n使用代理访问百度\n'''\n\nfrom urllib import request,error\n\nif __name__ == '__main__':\n\n url = 'http://www.baidu.com'\n\n # 代理常用网址:\n # www.xicidaili.com\n # www.goubanjia.com\n # 基本使用步骤:\n # 1.设置代理地址\n Proxy = {'http': '124.250.26.129:8080'}\n # 2.创建ProxyHandler\n Proxy_handler = request.ProxyHandler(Proxy)\n # 3.创建Opener\n openner = request.build_opener(Proxy_handler)\n # 4.安装Opener\n request.install_opener(openner)\n\n # 现在如果访问url,则使用代理服务器\n try:\n req = request.Request(url)\n rsp = request.urlopen(req)\n html = rsp.read().decode()\n print(html)\n\n except error.URLError as e:\n print(\"URLError: {0}\".format(e.reason))\n print(\"URLError: {0}\".format(e))\n\n except Exception as e:\n print(e)"
},
{
"alpha_fraction": 0.7450980544090271,
"alphanum_fraction": 0.7450980544090271,
"avg_line_length": 16.33333396911621,
"blob_id": "14c3e47032b51b2bdeaf160ec472a7ba5639f364",
"content_id": "a3c4bb06cd5dbb6356eedd2636fb43c2c4f82546",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 65,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 3,
"path": "/myPython/Tkinter/Tkinter.py",
"repo_name": "duanzhijianpanxia/myPython",
"src_encoding": "UTF-8",
"text": "import tkinter\n'''测试Tkinter包是否好使'''\ntkinter._test()"
}
] | 36 |
davidchris/googleFooBarChallenge | https://github.com/davidchris/googleFooBarChallenge | ad20dea4b6764a266367b88d3db65c1870fd328e | e00d9aeb9d3f2dfaa2104ff2aa8dba5e8fb8b963 | 8d3be638f0cbe8fcf4c0062766a7af62dcc9d5f3 | refs/heads/master | 2022-11-27T01:53:58.813568 | 2020-08-09T14:30:10 | 2020-08-09T14:30:10 | 286,254,267 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.49035370349884033,
"alphanum_fraction": 0.49196141958236694,
"avg_line_length": 21.214284896850586,
"blob_id": "c50a15995769135423d50e255a5b2ddcf36afc8f",
"content_id": "3ebb70ab18164691f065a35a05d7f9360b97c13c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 622,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 28,
"path": "/solution.py",
"repo_name": "davidchris/googleFooBarChallenge",
"src_encoding": "UTF-8",
"text": "def solution(x, y):\n additional_ids_x = []\n additional_ids_y = []\n\n for i in y:\n if i not in x:\n additional_ids_x.append(i)\n else:\n additional_ids_x.append(None)\n\n for i in x:\n if i not in y:\n additional_ids_y.append(i)\n else:\n additional_ids_y.append(None)\n\n result = []\n\n if not any(additional_ids_x):\n for i in additional_ids_y:\n if i is not None:\n result.append(i)\n else:\n for i in additional_ids_x:\n if i is not None:\n result.append(i)\n\n return result[0]\n"
},
{
"alpha_fraction": 0.32758620381355286,
"alphanum_fraction": 0.4913793206214905,
"avg_line_length": 28,
"blob_id": "7df74d3d04d2d2441c0f1a037355aaad30ae8586",
"content_id": "df201eccc3174544aa0aaef202a11beaf74b8e53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 232,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 8,
"path": "/test_solution.py",
"repo_name": "davidchris/googleFooBarChallenge",
"src_encoding": "UTF-8",
"text": "import solution\n\nprint \"test 1\"\nassert solution.solution([13, 5, 6, 2, 5], [5, 2, 5, 13]) == 6\n\nprint \"test 2\"\nassert (solution.solution([14, 27, 1, 4, 2, 50, 3, 1],\n [2, 4, -4, 3, 1, 1, 14, 27, 50]) == -4)\n"
}
] | 2 |
jeffspry/MHOCMP | https://github.com/jeffspry/MHOCMP | a5a6ee209bc8ed3f6c8705e38360d92af00aaa4c | 2550785609a12799e328d13c821d6d7e08e5cafb | c7880ed51755e1602bcce0987d1c02c23379a0c0 | refs/heads/master | 2021-01-10T09:02:44.740087 | 2015-12-18T04:26:50 | 2015-12-18T04:26:50 | 48,214,443 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.626086950302124,
"alphanum_fraction": 0.6308300495147705,
"avg_line_length": 35.66666793823242,
"blob_id": "f7549dd7d8deb510e48db8d41b9ae44f2b1fe02b",
"content_id": "b41a7f901fbcd13a2df7acc21363f0a123e026e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2530,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 69,
"path": "/MHOCMP.py",
"repo_name": "jeffspry/MHOCMP",
"src_encoding": "UTF-8",
"text": "import time\nimport datetime\nimport praw\nimport csv\nimport os\nimport redis\n\n__author__ = '/u/spookyyz'\n__version__ = '0.1'\nuser_agent = 'MHOCMP Vote Reminder by /u/spookyyz'\nbot_signature = \"\\r\\n\\r\\n^(_MHOCMP Vote Reminder v%s created by /u/spookyyz ) ^|| ^(Feel free to message me with any ideas or problems_)\" % __version__\n\n###########Global Vars\nSUBREDDIT = 'MHOC'\nWAIT_TIME = 30\nSLEEP_TIME = 10\nDEVELOPER = True #True to print output instead of post\nSTART_TIME = time.time() #new var to monitor time of startup so posts prior to that time can be ignored. (in Unix)\nREPLIED_TO = [] #will read redis db keys into this list\n#REDIS_DB = redis.from_url(os.environ.get(\"REDIS_URL\"))\n###########\n\n###########CSV PROCESSING HOLDER (needs to be read periodically to check for updates)\n\nclass MHOC_bot(object):\n \"\"\"\n This bot will check /r/MHOCMP for any posts aged 4 days or more. If a post is found,\n it will compare users who have commented (voted) on said post and those who should have\n voted (via CSV list provided). If person on list has not voted a PM will be sent to that\n user reminding them to vote on the current issue. Submission ID will then be posted to a\n redis with a value of each person messaged.\n \"\"\"\n def __init__(self):\n self.r = praw.Reddit(user_agent=user_agent) #init praw\n if (DEVELOPER):\n print \"DEVELOPER MODE ON (NO POSTING)\"\n else:\n try:\n self.r.login(os.environ['MHOCMP_REDDIT_USER'], os.environ['MHOCMP_REDDIT_PASS'])\n except Exception, e:\n print \"ERROR(@login): \" + str(e)\n\n def submission_age(self, submission):\n \"\"\"\n Take submission object.\n RETURN: True if submission is 4 days or older, False if not\n \"\"\"\n age = datetime.datetime.utcfromtimestamp(submission.created)\n print \"[@get_submission_age]: %s\" % str(age)\n\n def scan(self):\n \"\"\"\n Workhorse, will iterate through submissions in an effort to find submissions older\n than 4 days old. If found will pass to processing method.\n RETURN: Submission object that is 4 days or older.\n \"\"\"\n try:\n sub_obj = self.r.get_subreddit(SUBREDDIT)\n if (DEVELOPER):\n print \"Getting listings for /r/%s...\" % SUBREDDIT\n except Exception, e:\n print \"ERROR(@sublisting): \" + str(e)\n\n for submission in sub_obj.get_new(limit=25):\n self.submission_age(submission)\n\n\nbot = MHOC_bot()\nbot.scan()\n"
}
] | 1 |
arpitacr/Python-Programs | https://github.com/arpitacr/Python-Programs | df4f0f82fc243f1a91a81bef93755ea4ad0ea1c5 | 1084a91a19ef3457f5c9785b9840cd8be606354e | 827184b8a5876c8ccd4facd2385edcfede69e5f1 | refs/heads/master | 2020-04-13T22:50:30.944691 | 2015-03-08T22:48:42 | 2015-03-08T22:48:42 | 31,866,634 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.5740457773208618,
"alphanum_fraction": 0.6167938709259033,
"avg_line_length": 31.75,
"blob_id": "a3e24df898dff0173d7ff549a9dfe9663ad3844c",
"content_id": "9e527092e7b3cb483f6f0e3e5d75e792b92ed7be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 655,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 20,
"path": "/Earthquake_energy.py",
"repo_name": "arpitacr/Python-Programs",
"src_encoding": "UTF-8",
"text": "def richter_energy():\n\twhile(1):\n\t\tflag = 0\n\t\tinp = raw_input(\"Enter a number between 1 and 10: \")\n\t\tfor c in inp:\n\t\t\tif c.isalpha() :\n\t\t\t\tflag = 1\n\t\tif flag == 1 :\n\t\t\tprint \"Not valid, please enter a number between 1 and 10: \"\n\t\telif ( float(inp) < 1.0 or float(inp) > 10.0 ):\n\t\t\tprint \"Number not in Richter scale, please enter again: \"\n\t\telse:\n\t\t\tbreak\n\t\n\trichter = float(inp)\n\tenergy = 10 ** ((1.5 * richter) + 4.8)\n\ttons_of_tnt = energy/(4.184 * (10 ** 9))\n\tprint \"The earthquake of \" + str(richter) + \" on Richter scale released energy of \" + str(energy) + \" Joules which is worth exploding \" + str(tons_of_tnt) + \" tons of TNT\"\n\t\t\nrichter_energy()\n"
},
{
"alpha_fraction": 0.5491480827331543,
"alphanum_fraction": 0.591087818145752,
"avg_line_length": 24.433332443237305,
"blob_id": "5768297f8772ce590a98fc28b5c92eb050a915d6",
"content_id": "16621f51e70e9016400e905b996e3ef05072990a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 763,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 30,
"path": "/Windchill_index.py",
"repo_name": "arpitacr/Python-Programs",
"src_encoding": "UTF-8",
"text": "def windchill():\n\twhile(1):\n\t\tflag = 0\n\t\ttemp = raw_input(\"Enter Air temperature (in Fahrenheit): \")\n\t\tfor c in temp:\n\t\t\tif c.isalpha() :\n\t\t\t\tflag = 1\n\t\tif flag == 1 :\n\t\t\tprint \"Not valid, please enter a valid number: \"\n\t\telse:\n\t\t\tbreak\n\ttemp = float(temp)\n\twhile(1):\n\t\tflag = 0\n\t\tspeed = raw_input(\"Enter Wind speed (in MPH): \")\n\t\tfor c in speed:\n\t\t\tif c.isalpha() :\n\t\t\t\tflag = 1\n\t\tif flag == 1 :\n\t\t\tprint \"Not valid, please enter a valid number: \"\n\t\telse:\n\t\t\tbreak\n\tspeed = float(speed)\n\t\t\t\n\twct_index = 35.74 + (0.6215 * temp) - (35.75 * (speed**0.16)) + (0.4275 * temp * (speed**0.16))\n\tprint \"Temperature (degrees F): %s\" % (str(temp))\n\tprint \"Wind Speed (MPH): %s\" % (str(speed))\n\tprint \"Wind Chill Temperature Index: %s\" % (str(wct_index))\t\n\nwindchill()\n"
},
{
"alpha_fraction": 0.6121711134910583,
"alphanum_fraction": 0.6344647407531738,
"avg_line_length": 32.0533332824707,
"blob_id": "79d0a79fab801439b86c4fd87b7d101bc7e026c6",
"content_id": "183e37a47576d9dd637f12f46cb0ec2b77c969f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4979,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 150,
"path": "/DNA_Seq_Alignment.py",
"repo_name": "arpitacr/Python-Programs",
"src_encoding": "UTF-8",
"text": "# DNA Sequence Alignment Program\n\n#Scientists measure how closely related a species is by looking at the DNA sequences for key proteins and seeing how similar/dissimilar they are. \n#If the two sequences of DNA are essentially the same, \n#the two species are considered to be evolutionarily closer since there is a relationship between changes and time. \n#This process is called sequence alignment.\n#A scientist can change the alignment by assuming that an insertion or deletion, of one of the bases has occurred. \n#They could make such a change, called an indel for short, to see if it improves the alignment\n#Assuming two indels, marked as two dashes(-), the alignment is greatly improved. \n#The scientist would assume that two changes happened, one change in each species.\n\n# This program supports researchers to do DNA alignment by hand\n# This program gives researchers the option to Add an indel, Delete an indel, Score (matches and mismatches between 2 DNA strings are calculated,\n# Matched DNA bases are displayed in lower case while Mismatches are denoted by upper case) and Quit the program\n\n\nimport sys\ndef dna_seq():\n# Block for prompting users to enter 2 valid DNA strings\n\twhile(1):\n\t\tflag1 = 0\n\t\tflag2 = 0\n\t\tstr1 = raw_input(\"Enter first string containing only A, T, C or G: \")\n\t\tstr2 = raw_input(\"Enter second string containing only A, T, C or G: \")\n\t\tfor ch in str1:\n\t\t\tif (ch != 'a' and ch!= 'A' and ch != 't' and ch!= 'T' and ch != 'c' and ch!= 'C' and ch != 'g' and ch!= 'G'):\n\t\t\t\tflag1 = 1\n\t\tfor ch in str2:\n\t\t\tif (ch != 'a' and ch!= 'A' and ch != 't' and ch!= 'T' and ch != 'c' and ch!= 'C' and ch != 'g' and ch!= 'G'):\n\t\t\t\tflag2 = 1\n\t\tif flag1 == 1 or flag2 == 1 :\n\t\t\tprint \"Not valid, enter again\"\n\t\telse:\n\t\t\tbreak\n# Block for prompting users to enter a valid operation to perform\n\twhile(1):\n\t\twhile(1):\n\t\t\tchoice = raw_input( \"What do you want to do: \\n a - add indel \\n d - delete indel \\n s - score \\n q - quit \\n\")\n\t\t\tif (choice!= 'a' and choice!= 'd' and choice!= 's' and choice!= 'q'):\n\t\t\t\tprint \"Not valid choice, please enter choice again\"\n\t\t\telse:\n\t\t\t\tbreak\n# Adding an indel\n\t\tif choice == 'a':\n# Accepting valid string to operate upon\n\t\t\twhile(1):\n\t\t\t\tstrchoice = raw_input(\"Which string do you want to modify - str1 or str2 : \")\n\t\t\t\tif (strchoice != 'str1' and strchoice != 'str2'):\n\t\t\t\t\tprint \"Enter a valid option for string\"\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\tif strchoice == 'str1':\n\t\t\t\tstrman = str1\n\t\t\telse:\n\t\t\t\tstrman = str2\n\t\t#print len(strman)\n# Prompting user to input valid index value\n\t\t\twhile(1):\n\t\t\t\tind = raw_input(\"Before which index: \")\n\t\t\t\tind = int(ind)\n\t\t\t\tif(ind < 1 or ind > len(strman)):\n\t\t\t\t\tprint \"Not valid index, enter index again\"\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\t\t\n# DNA manipulation\n\t\t\tif ind == 1:\n\t\t\t\tfinalstr = '-' + strman\n\t\t\telse:\n\t\t\t\tfinalstr = strman[:ind-1] + '-' + strman[ind-1:]\n\t\t\tif strman == str1:\n\t\t\t\tstr1 = finalstr\n\t\t\telse:\n\t\t\t\tstr2 = finalstr\n# Deleting an indel\t\t\t\n\t\telif choice == 'd':\n# Accepting valid string to operate upon\n\t\t\twhile(1):\n\t\t\t\tstrchoice = raw_input(\"Which string do you want to modify - str1 or str2: \")\n\t\t\t\tif (strchoice != 'str1' and strchoice != 'str2'):\n\t\t\t\t\tprint \"Enter a valid option for string \"\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\tif strchoice == 'str1':\n\t\t\t\tstrman = str1\n\t\t\telse:\n\t\t\t\tstrman = str2\n# Prompting user to input valid index value\n\t\t\twhile(1):\n\t\t\t\tind = raw_input(\"At which index:\")\n\t\t\t\tind = int(ind)\n\t\t\t\tif(ind < 0 or ind > (len(strman) - 1) or strman[ind] != '-'):\n\t\t\t\t\tprint \"Not valid index, enter index of indel to be deleted again\"\n\t\t\t\telse:\n\t\t\t\t\tbreak\n# DNA Manipulation\n\t\t\tif ind == 0:\n\t\t\t\tfinalstr = strman[:ind] + strman[ind+1:]\n\t\t\telif ind == len(strman)-1:\n\t\t\t\tfinalstr = strman[:ind]\n\t\t\telse:\n\t\t\t\tfinalstr = strman[:ind] + strman[ind+1:]\n\t\t\tif strman == str1:\n\t\t\t\tstr1 = finalstr\n\t\t\telse:\n\t\t\t\tstr2 = finalstr\n# Scoring\n\t\telif choice == 's':\n# Comparing lengths of 2 DNA strings and adding appropriate number of '-'s to shorter string\n\t\t\tlen1 = len(str1)\n\t\t\tlen2 = len(str2)\n\t\t\tif len1 < len2:\n\t\t\t\tstr1 = str1 + ((len2 - len1) * '-')\n\t\t\telse:\n\t\t\t\tstr2 = str2 + ((len1 - len2) * '-')\n\t\t\n# Calculating matches and mismatches\t\t\n\t\t\tlength = len(str1)\n\t\t\tfinalstr1 = \"\"\n\t\t\tfinalstr2 = \"\"\n\t\t\tmatches = 0\n\t\t\tmismatches = 0\n\t\t\tfor item in range(0,length):\n\t\t\t\tif str1[item] == str2[item]:\n\t\t\t\t\tfinalstr1 = finalstr1 + str1[item]\n\t\t\t\t\tfinalstr2 = finalstr2 + str2[item]\n\t\t\t\t\tmatches += 1\n\t\t\t\telse:\n\t\t\t\t\tmismatches += 1\n\t\t\t\t\tif str1[item] == '-':\n\t\t\t\t\t\tfinalstr1 = finalstr1 + str1[item]\n\t\t\t\t\t\tfinalstr2 = finalstr2 + str2[item].upper()\n\t\t\t\t\telif str2[item] == '-':\n\t\t\t\t\t\tfinalstr1 = finalstr1 + str1[item].upper()\n\t\t\t\t\t\tfinalstr2 = finalstr2 + str2[item]\n\t\t\t\t\telse:\n\t\t\t\t\t\tfinalstr1 = finalstr1 + str1[item].upper()\n\t\t\t\t\t\tfinalstr2 = finalstr2 + str2[item].upper()\n\t\t\tprint \"Matches : \",matches\n\t\t\tprint \"Mismatches : \",mismatches\n\t\t\tprint \"String 1: %s\" % (finalstr1)\n\t\t\tprint \"String 2: %s\" % (finalstr2)\n\t\t\t\n\t\n# Quit\n\t\telse:\n\t\t\tbreak\n\t\t\tsys.exit(0)\n\t\ndna_seq()\n\t\n\t\n\t\t\n\t\t\n\t\t\n\t\n\t\n\t\n\t\n"
},
{
"alpha_fraction": 0.6771728992462158,
"alphanum_fraction": 0.7005730867385864,
"avg_line_length": 34.033897399902344,
"blob_id": "dfab2e043393c764bd05985a53fda210c0493d61",
"content_id": "4b2fb0b2f805ececb490b41464824006fe1ff3ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2094,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 59,
"path": "/Palindrome_196.py",
"repo_name": "arpitacr/Python-Programs",
"src_encoding": "UTF-8",
"text": "# 196-algorithm based Palindrome Generator\n\n#The 196-algorithm is a procedure for creating a palindromic integer: an integer that has the same value when examined forwards or backwards.\n#The 196-algorithm is as follows:\n#\t\t1. If the integers is a palindrome, then print that integer\n#\t\t2. Otherwise, take the integer and its reversal and add them together.\n#\t\t3. With the sum, repeat the process starting at step 1.\n#It is called the 196-algorithm because the integer 196 is the first number that, it appears, does not converge to a palindromic number.\n#Such a number is called a Lychrel number.\n\n#This programs takes a range from the user and reports Natural palindromes, lychrel numbers and non-lychrel numbers within that range\n\ndef palindrome_196():\n# Prompts user to input starting of the range to check\t\n\twhile(1):\n\t\tstart = raw_input(\"Enter starting of the range of numbers to check: \")\n\t\tif not start.isdigit():\n\t\t\tprint \"not valid, please enter again\"\n\t\telse:\n\t\t\tbreak\n# Prompts user to input ending of the range to check\t\t\t\t\n\twhile(1):\n\t\tstop = raw_input(\"Enter ending of the range of numbers to check: \")\n\t\tif not stop.isdigit():\n\t\t\tprint \"not valid, please enter again\"\n\t\telse:\n\t\t\tbreak\n\tnat_pal = 0\n\tnon_lych = 0\n\tlych = 0\n\t\n\tfor item in range( int(start),int(stop)+1):\n# checking for natural palindromes\n\t\tnewitem = str(item)\n\t\tif newitem == newitem[::-1]:\n\t\t\tnat_pal += 1\n\t\telse:\n\t\t\tcount = 0\n\t\t\titem1 = newitem\n\t\t\twhile(count<60):\n\t\t\t\ttotal = int(item1) + int(item1[::-1])\n\t\t\t\tcount += 1\n\t\t\t\titem1s = str(total)\n\t\t\t\titem1 = item1s\n# checking for non lychrel numbers\n\t\t\t\tif item1s == item1s[::-1]:\n\t\t\t\t\tnon_lych += 1\n\t\t\t\t\tbreak\n\t\t\telse:\n# checking for lychrel numbers and printing them\n\t\t\t\tlych += 1\n\t\t\t\tprint newitem + \" is a Lychrel number\"\n\t\n# printing total count of natural palindromes, non-lychrel numbers and lychrel numbers\n\tprint \"The total number of natural palindromes are \" + str(nat_pal)\n\tprint \"The total number of non Lychrel numbers are \" + str(non_lych)\n\tprint \"The total number of Lychrel numbers are \" + str(lych)\n\t\npalindrome_196()\n\t\n\t\t\t\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\n\t\t\n\t\n"
}
] | 4 |
rahulkris1/SSW810 | https://github.com/rahulkris1/SSW810 | 6cba2892211f2b34d3e00982770fd4a9fbe6173e | a88634aabe82e2439cee0d7eb6bacabe0a22b78f | d1fffb8ab83561b4dc588b7d87ed0a16d440a8a3 | refs/heads/master | 2020-09-07T17:19:23.889631 | 2019-11-29T18:34:11 | 2019-11-29T18:34:11 | 220,858,008 | 0 | 0 | null | 2019-11-10T22:10:50 | 2019-11-11T02:17:43 | 2019-11-29T18:34:11 | Python | [
{
"alpha_fraction": 0.6935897469520569,
"alphanum_fraction": 0.6974359154701233,
"avg_line_length": 30.15999984741211,
"blob_id": "a8dd63d37641bf5d2dbd04c750a68c9fc500b906",
"content_id": "e02a97d758f77db7ae22cd29f731d4be4db9128d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4688,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 150,
"path": "/HW09_ReSub_Rahul_Kampati.py",
"repo_name": "rahulkris1/SSW810",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 26 November 2019, 08:00\n@author: Kampati Rahul\n\nCreation of a data repository for students and instructors to keep track of data\n\"\"\"\n\nimport os\nfrom collections import defaultdict\nfrom prettytable import PrettyTable\n\nclass Student:\n\tdef __init__(self, cwid, name, major):\n\t\t\"\"\" Students class to hold students data\"\"\"\n\t\tself.cwid = cwid\n\t\tself.name = name\n\t\tself.major = major\n\t\tself.course_grade_dict = defaultdict(str)\n\n\tdef course_grade_student(self, course, grade):\n\t\t\"\"\" Assign grade of each course\"\"\"\n\t\tself.course_grade_dict[course] = grade\n\n\tdef prettyTable_student(self):\n\t\t\"\"\" Structuring data for pretty table for students\"\"\"\n\t\treturn [self.cwid, self.name, sorted(self.course_grade_dict.keys())]\n\nclass Instructor:\n\tdef __init__(self, cwid, name, dept):\n\t\t\"\"\" instructors class to hold students data\"\"\"\n\t\tself.cwid = cwid\n\t\tself.name = name\n\t\tself.dept = dept\n\t\tself.course_inst_dict = defaultdict(int)\n\n\tdef num_course_students(self, course):\n\t\t\"\"\" Assign number of students under each professor\"\"\"\n\t\tself.course_inst_dict[course] += 1\n\n\tdef prettyTable_instructor(self):\n\t\t\"\"\" Structuring data for pretty table for students\"\"\"\n\t\tfor course in self.course_inst_dict:\n\t\t\tyield [self.cwid, self.name, self.dept, course, self.course_inst_dict[course]]\n\nclass Repository:\n\tdef __init__(self, directory):\n\t\t\"\"\" repository class to hold the students, instructors and grades data\"\"\"\n\t\tself.directory = directory\n\t\tself.student_dict = {}\n\t\tself.instructor_dict = {}\n\t\tself.student_analyser()\n\t\tself.instructor_analyser()\n\t\tself.grades_analyser()\n\t\tself.students_summary()\n\t\tself.instructors_summary()\n\n\tdef student_analyser(self):\n\t\t\"\"\" Analyse Students.txt data file\"\"\"\n\t\tif not os.path.exists(self.directory):\n\t\t\traise FileNotFoundError(\"Directory not found\")\n\n\t\tfile_students = os.path.join(self.directory, 'students.txt')\n\n\t\tfor cwid, name, major in self.file_reading_gen(file_students, 3, \"\\t\", False):\n\t\t\tself.student_dict[cwid] = Student(cwid, name, major)\n\t\t\n\n\tdef instructor_analyser(self):\n\t\t\"\"\" Analyse Instructors.txt data file\"\"\"\n\t\tif not os.path.exists(self.directory):\n\t\t\traise FileNotFoundError(\"Directory not found\")\n\n\t\tfile_instructors = os.path.join(self.directory, 'instructors.txt')\n\n\t\tfor cwid, name, dept in self.file_reading_gen(file_instructors, 3, \"\\t\", False):\n\t\t\tself.instructor_dict[cwid] = Instructor(cwid, name, dept)\n\n\tdef grades_analyser(self):\n\t\t\"\"\" Analyse grades.txt data file\"\"\"\n\t\tif not os.path.exists(self.directory):\n\t\t\traise FileNotFoundError(\"Directory not found\")\n\n\t\tfile_grades = os.path.join(self.directory, 'grades.txt')\n\n\t\tfor studentCwid, course, grade, instructorCwid in self.file_reading_gen(file_grades, 4, \"\\t\", False):\n\t\t\tif studentCwid in self.student_dict.keys():\n\t\t\t\tself.student_dict[studentCwid].course_grade_student(course, grade)\n\t\t\telse:\n\t\t\t\tprint(f\"Invalid student cwid {studentCwid}\")\n\n\t\t\tif instructorCwid in self.instructor_dict.keys():\n\t\t\t\tself.instructor_dict[instructorCwid].num_course_students(course)\n\n\t\t\telse:\n\t\t\t\tprint(f\"Invalid Instructor id {instructorCwid}\")\n\n\n\tdef file_reading_gen(self, path, fields, sep, header=False):\n\t\t\"\"\"Generator function that reads a flie and returns one line at a time.\"\"\"\n\n\t\ttry:\n\t\t\tfp = open(path, 'r')\n\n\t\texcept FileNotFoundError:\n\t\t\traise FileNotFoundError(\"Unable to open the file path provided\")\n\n\t\telse:\n\t\t\twith fp:\n\t\t\t\tif header:\n\t\t\t\t\theader_info = next(fp)\n\t\t\t\t\tif len(header_info.split(sep)) != fields:\n\t\t\t\t\t\traise ValueError(f\"File path has {len(header_info.split(sep))} invalid number of fields instead of {fields}\")\n\n\t\t\t\tfor line in fp:\n\t\t\t\t\tif len(line.split(sep)) != fields:\n\t\t\t\t\t\traise ValueError(f\" file has {len(next(fp.split(sep)))} fields instead of {fields} \")\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tline = line.strip().split(sep)\n\t\t\t\t\t\tyield tuple(line)\n\n\n\tdef students_summary(self):\n\t\t\"\"\" Summarising the students data\"\"\"\n\t\ttb_student = PrettyTable(field_names = [\"CWID\", \"Name\", \"Completed Courses\"])\n\t\tfor inst_student in self.student_dict.values():\n\t\t\ttb_student.add_row(inst_student.prettyTable_student())\n\t\tprint(\"Student Summary\")\n\t\tprint(tb_student)\n\n\tdef instructors_summary(self):\n\t\t\"\"\" Summarising the Instructors data\"\"\"\n\t\ttb_instructor = PrettyTable(field_names = [\"CWID\", \"Name\", \"Dept\", \"Course\", \"Students\"])\n\t\tfor inst_instructor in self.instructor_dict.values():\n\t\t\tfor instructor_data in inst_instructor.prettyTable_instructor():\n\t\t\t\ttb_instructor.add_row(instructor_data)\n\t\tprint(\"Instructor Summary\")\n\t\tprint(tb_instructor)\n\n\ndef main():\n try:\n Repository(\"C:/Users/HP/Desktop/redo/file_09\")\n except Exception as e:\n print(e)\n\nif __name__ == \"__main__\":\n main() \n\n\n\n\n\n"
},
{
"alpha_fraction": 0.609375,
"alphanum_fraction": 0.6442307829856873,
"avg_line_length": 24.212121963500977,
"blob_id": "514eeccf45a38d1cdfe822266eda23d7f0bf82a7",
"content_id": "a210f324c1138dca7ca314aa57fc525e1ef9d725",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 840,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 33,
"path": "/HW09_ReSub_Test_Rahul_Kampati.py",
"repo_name": "rahulkris1/SSW810",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 26 November 2019, 09:30:00\n\n@author: Kampati Rahul\n\nTesting the Creation of data repository of courses, students, faculty members.\n\"\"\"\n\nimport unittest\nfrom HW09_ReSub_Rahul_Kampati import Repository\n\n\nfp = \"C:/Users/HP/Desktop/redo/file_09/test\" \n\n\nclass TestRepository(unittest.TestCase):\n \"\"\" Testing the File Generators \"\"\"\n\n def test_student_dict(self):\n \"\"\" Test the info of the student\"\"\"\n test = Repository(fp)\n self.assertEqual(list(test.student_dict.keys()), [\"10103\"])\n\n def test_instructor_info_dict(self):\n \"\"\" Test the info of the instructor \"\"\"\n test = Repository(fp)\n self.assertEqual(list(test.insructor_dict.keys()), [\"98765\"])\n \n\nif __name__ == \"__main__\":\n unittest.main(exit=False, verbosity=2)\n"
}
] | 2 |
Love-YGX-and-D/2019.03.04_MySQL_xlrd_python | https://github.com/Love-YGX-and-D/2019.03.04_MySQL_xlrd_python | d820e2082fa1810c404f266da0b286cece2eb6c0 | c1c2ac53d5a95dc3e6c953cfb7c4ebb8ead497e9 | 6e887eb15ebe3c6f841492c863ed4067a96ecfec | refs/heads/master | 2020-04-26T17:39:49.150991 | 2019-03-04T10:11:58 | 2019-03-04T10:11:58 | 173,720,477 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6120826601982117,
"alphanum_fraction": 0.6200317740440369,
"avg_line_length": 28,
"blob_id": "3d3853f8ff08bcd537392bb69f5f16d208bc7cb4",
"content_id": "387e929945fbe63ccda4e1ce3bcf0e1091b60fcd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1286,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 43,
"path": "/InsertMysql/insert.py",
"repo_name": "Love-YGX-and-D/2019.03.04_MySQL_xlrd_python",
"src_encoding": "UTF-8",
"text": "import pymysql\ndb=pymysql.connect(\n host='localhost',\n user='root',\n password='1002',\n db='zhangzhishi',\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\ncursor=db.cursor()\n\nimport xlrd\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif','xls','xlsx'])\n\n# 判断是否是允许上传的文件类型\ndef allowed_file(filename):\n return '.' in filename and filename.split('.', 1)[1] in ALLOWED_EXTENSIONS\n\ncursor.execute(\"select id,name from zhishi_type\")\nresult=cursor.fetchall()\nobj={}\nfor item in result:\n obj[item[\"name\"]]=item[\"id\"]\n \n\n\ndef up():\n book=xlrd.open_workbook(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\test.xlsx\")\n sheet=book.sheet_by_index(0)\n arr=[]\n import time\n for item in range(1,sheet.nrows):\n con=sheet.row_values(item)\n types=obj[con[0]]\n question=con[1]\n answer=con[2]\n c_time=time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime())\n u_time=time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime());\n arr.append((types,question,answer,c_time,u_time))\n print(arr[:10]) \n cursor.executemany(\"insert into zhishi_testquestions (types_id,question,answer,c_time,u_time) values (%s,%s,%s,%s,%s)\",(arr))\n db.commit()\n db.close() \nup()\n "
},
{
"alpha_fraction": 0.6530612111091614,
"alphanum_fraction": 0.8163265585899353,
"avg_line_length": 23.5,
"blob_id": "a1903674cd2742ed4244f5790741fac075f12c55",
"content_id": "e78a59fa31a87b87a4b7c3c674d5dcaa4c0399b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 67,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Love-YGX-and-D/2019.03.04_MySQL_xlrd_python",
"src_encoding": "UTF-8",
"text": "# 2019.03.04_MySQL_xlrd_python\nxlrd读取xlsx文件导入数据库\n"
}
] | 2 |
Cydt/Pareidolia | https://github.com/Cydt/Pareidolia | 5763a105f53ef868f56da84346c5ae1d75683db2 | 1217fc9dbc2562d535245d26a6f3d2b73d2a781f | 82f46d96bf662055374b9cb8aa777ca24ddc8fa2 | refs/heads/master | 2020-12-07T19:12:25.746089 | 2016-08-30T15:24:15 | 2016-08-30T15:24:15 | 66,862,517 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7593985199928284,
"alphanum_fraction": 0.7593985199928284,
"avg_line_length": 25.600000381469727,
"blob_id": "b2481de2b3c5a9884532f94b1e06e5d60ccf3596",
"content_id": "f65ddf38a3ca8fa2c5410e980aa00236c928105c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 133,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 5,
"path": "/pareidolia/types.py",
"repo_name": "Cydt/Pareidolia",
"src_encoding": "UTF-8",
"text": "from collections import namedtuple\n\n\nSize = namedtuple('Size', 'width height')\nDimensions = namedtuple('Dimensions', 'rows columns')\n"
},
{
"alpha_fraction": 0.6567834615707397,
"alphanum_fraction": 0.6594789028167725,
"avg_line_length": 31.735294342041016,
"blob_id": "5eeb8dcdd0a19dc46107b0f2e0b07f1fd8d18a4e",
"content_id": "ced2daf8159a4b4967388164b132e60bc82a61e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1113,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 34,
"path": "/par",
"repo_name": "Cydt/Pareidolia",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport argparse\n\nfrom pareidolia import combine\nfrom pareidolia.types import Size, Dimensions\n\n\nparser = argparse.ArgumentParser(description='Create image for pareidolia')\nparser.add_argument('--images', '-i', nargs='+', dest='images',\n help='images to be processed')\nparser.add_argument('--number', '-n', dest='number', type=int,\n help='number of images to be created')\nparser.add_argument('--size', '-s', dest='size',\n help='size of the output images (format WxH)')\nparser.add_argument('--dimensions', '-d', dest='dimensions',\n help='rows x columns (format RxC)')\n\nargs = parser.parse_args()\n\ndef parse_dimensions(dimension_string, kind=tuple):\n \"\"\"\n x-separated numbers\n \"\"\"\n if dimension_string is None:\n return None\n\n dim_list = dimension_string.split('x')\n return kind(int(dim_list[0]), int(dim_list[1]))\n\ndimensions = parse_dimensions(args.dimensions, Dimensions)\nsize = parse_dimensions(args.size, Size)\n\ncombine(args.images, number=args.number, dimensions=dimensions, size=size)\n"
},
{
"alpha_fraction": 0.6147087812423706,
"alphanum_fraction": 0.6257728338241577,
"avg_line_length": 23.576000213623047,
"blob_id": "6cf05a44067a24d52c5799c6a8517a2dee146ae2",
"content_id": "9438db14f37f70bb8dcc2bb8f691b645ebb11f1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3073,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 125,
"path": "/pareidolia/combiner.py",
"repo_name": "Cydt/Pareidolia",
"src_encoding": "UTF-8",
"text": "import random\nimport math\n\nfrom PIL import Image, ImageFilter\nfrom PIL.ImageOps import autocontrast\n\nfrom .types import Size, Dimensions\n\n\n# PIL wrappers\n\ndef image(filename):\n return Image.open(filename)\n\ndef make_grayscale(image):\n return image.convert(\"L\")\n\n\ndef combine(filenames, size=None, number=None, dimensions=None):\n \"\"\"\n Create a random image from the passed files\n images: list\n size: (x, y)\n \"\"\"\n # some guards\n if filenames is None or len(filenames) == 0:\n print('Not enough files provided')\n return\n\n if number is None:\n number = 1\n\n # dimensions overrules number\n if dimensions is None:\n dimensions = Dimensions(1, number)\n else:\n number = dimensions.rows * dimensions.columns\n\n if size is None:\n size = Size(400, 200) \n\n # copy and shuffle\n shuffled = filenames[:]\n random.shuffle(shuffled)\n \n # pick one base image to fill the canvas\n base = shuffled[0]\n rest = shuffled[1:]\n\n # create grayscale versions\n images = map(image, shuffled)\n grayscales = list(map(make_grayscale, images))\n\n # create a new image and paste the grayscales\n combined = list()\n for _ in range(number):\n combined.append(combine_images(grayscales, size=size))\n\n show_collage(combined, dimensions)\n\n\ndef show_collage(images, dimensions):\n width, height = images[0].size\n rows, columns = dimensions\n\n padding = 10\n\n collage_size = (\n width * columns + padding * (columns-1),\n height * rows + padding * (rows-1)\n )\n collage = Image.new('L', collage_size)\n \n for row in range(rows):\n top = row * (height + padding)\n for col in range(columns):\n left = col * (width + padding)\n idx = row*columns + col\n collage.paste(images[idx], ((left, top)))\n\n collage.show()\n\ndef crop_square(image, size):\n \"\"\"\n crop a square from a random location in image\n \"\"\"\n width, height = image.size\n top = random.randint(0, max(0, height-size))\n left = random.randint(0, max(0, width-size))\n bottom = min(top + size, height)\n right = min(left + size, width)\n\n return image.crop((left, top, right, bottom))\n\ndef pythagoras(width, height):\n return math.ceil(math.sqrt(math.pow(width,2) + math.pow(height,2)))\n\ndef combine_images(images, size=None):\n\n width, height = size\n\n # size for the crop\n radius = pythagoras(*size)\n \n # locations for the paste\n left = int((width - radius) / 2)\n top = int((height - radius) / 2)\n\n # reusable mask (because opacity is fixed)\n opacity = 100 # out of 255\n mask = Image.new('L', (radius, radius), opacity)\n\n combined = Image.new('L', size, 'gray')\n for img in images:\n rotation = random.randint(0, 359)\n\n cropped = crop_square(img, radius)\n rotated = cropped.rotate(rotation, resample=Image.BICUBIC)\n rotated_mask = mask.rotate(rotation)\n\n combined.paste(rotated, (left, top), rotated_mask)\n\n combined = autocontrast(combined)\n\n return combined\n\n"
},
{
"alpha_fraction": 0.8333333134651184,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 29,
"blob_id": "8ea01ec08b1ef3556c05f563a717dde763eaa686",
"content_id": "b26c8c1219e96cdc47e89870fbcb4566b891d076",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 30,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 1,
"path": "/pareidolia/__init__.py",
"repo_name": "Cydt/Pareidolia",
"src_encoding": "UTF-8",
"text": "from .combiner import combine\n"
}
] | 4 |
MonkeyDad/decoratorworks | https://github.com/MonkeyDad/decoratorworks | fc4f22f3a9a4c94420274077db8580ccbcd326a8 | c2a74ce6cc6986fb7074490d475ccf2b38e577ce | cad47ce40e470e140f71f9e01cacf3f18d25bb99 | refs/heads/master | 2016-08-12T16:34:15.381025 | 2016-02-17T09:50:41 | 2016-02-17T09:50:41 | 51,911,451 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.614814817905426,
"alphanum_fraction": 0.6197530627250671,
"avg_line_length": 17.409090042114258,
"blob_id": "e8c0a4bc410fcbc3462adc9b43aa8a311c4873e7",
"content_id": "5964aaa7f7d3c3eeb8df8e48862d892d64371c4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 405,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 22,
"path": "/main.py",
"repo_name": "MonkeyDad/decoratorworks",
"src_encoding": "UTF-8",
"text": "#python3\n\nfrom functools import partial, wraps\n\ndef debug(func=None, *, prefix=''):\n if func is None:\n return partial(debug, prefix=prefix)\n\n msg = prefix + func.__qualname__\n @wraps(func)\n def wrapper(*args, **kwargs):\n print(msg)\n return func(*args, **kwargs)\n return wrapper\n\n@debug\ndef test(a):\n return a\n\n@debug(prefix='aaaaaaaaaaa')\ndef test2(a):\n return a\n"
}
] | 1 |
The-Real-Thisas/slippers | https://github.com/The-Real-Thisas/slippers | 9fe37db065d9c49ede801c7cd4d8f1565665afad | 5e0c8f3250dcf5c8fd657e4e6aad2e601ff65754 | e797336d0831184cf5790bb8dc995326c055edc2 | refs/heads/master | 2022-04-10T03:09:13.632208 | 2020-04-02T13:24:40 | 2020-04-02T13:24:40 | 260,272,524 | 0 | 1 | MIT | 2020-04-30T17:17:26 | 2020-04-27T00:10:55 | 2020-04-02T13:24:42 | null | [
{
"alpha_fraction": 0.5933610200881958,
"alphanum_fraction": 0.5933610200881958,
"avg_line_length": 25.77777862548828,
"blob_id": "30cfb57bd16a9c5ae46f3be1cdf6880dfc10ee32",
"content_id": "507208a3b9afd4d6e175f8de88e95596b62cd567",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 241,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 9,
"path": "/world/dataHandler/pluginManager/Plugin.py",
"repo_name": "The-Real-Thisas/slippers",
"src_encoding": "UTF-8",
"text": "class Plugin(object):\n \"\"\"\n A base for a Slippers plugin, used to respond to DataHandler events.\n \"\"\"\n\n def __init__(self, users, rooms, packet):\n self.users = users\n self.rooms = rooms\n self.packet = packet\n"
},
{
"alpha_fraction": 0.40447959303855896,
"alphanum_fraction": 0.4268774688243866,
"avg_line_length": 29.979591369628906,
"blob_id": "567ce0880f95ad58558d820a1b1995846b4422d6",
"content_id": "bf742e9bbf197ad969be7af9cb11149ec927edcc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 1518,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 49,
"path": "/www/php/login.php",
"repo_name": "The-Real-Thisas/slippers",
"src_encoding": "UTF-8",
"text": "<?php\n // Create connection\n $db = new mysqli(\"127.0.0.1\", \"root\", \"password\", \"slippers\");\n // Check connection\n if ($db->connect_error) {\n die(\"&e=0\");\n }\n\n // Query user data from the database\n $data = $db->query(\"SELECT * FROM users WHERE username = '\" . $db->real_escape_string($_POST[\"Username\"]) . \"'\");\n\n if ($data->num_rows > 0) { // If the user was found\n $user = mysqli_fetch_assoc($data);\n } else { // If the user was not found\n die(\"&e=100\");\n }\n\n // Authenticate login\n if (password_verify($_POST[\"Password\"], $user[\"password\"])) { // If the password is correct\n // Formats item array\n $items = explode(\",\", $user[\"items\"]);\n $items = str_replace(array(\"[\", \"]\", \"\\\"\", \" \"), \"\", $items);\n $items = implode(\"|\", $items);\n $member = \"1\";\n\n die(\"&s=0&rt=0&str=0&crumb=\" .\n $user[\"id\"] . \"|\" .\n $user[\"username\"] . \"|\" .\n $user[\"color\"] . \"|\" .\n $user[\"head\"] . \"|\" .\n $user[\"face\"] . \"|\" .\n $user[\"neck\"] . \"|\" .\n $user[\"body\"] . \"|\" .\n $user[\"hand\"] . \"|\" .\n $user[\"feet\"] . \"|\" .\n $user[\"pin\"] . \"|\" .\n $user[\"background\"] .\n \"|0|0|0|\" . $member . \"|0\" .\n \"&il=\" . $items .\n \"&c=\" . $user[\"coins\"] .\n \"&bl=\" .\n \"&nl=\" .\n \"&k1=\" . $user[\"loginKey\"] .\n \"&ed=\" . \"86400\" .\n \"&jd=\" . \"2018-1-1\");\n } else { // If the password is incorrect\n die(\"&e=101\");\n }\n?>\n"
}
] | 2 |
tommywenjiezhang/flask_auth_starter | https://github.com/tommywenjiezhang/flask_auth_starter | ad14780032f89b35a61d278aa836561b1039cc0d | bd1688d82335c977050051ef5483d5d49033ac79 | 3bf0dfa660be8b01d79dc95ef5da180dd5c35d8d | refs/heads/master | 2023-02-22T20:39:03.296229 | 2020-05-23T00:47:20 | 2020-05-23T00:47:20 | 266,235,683 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6564416885375977,
"alphanum_fraction": 0.6638036966323853,
"avg_line_length": 23.727272033691406,
"blob_id": "8e65f8dd52f8096ea9b3179b6463d378e669030b",
"content_id": "3bf198919ea304f82bcb6eee10f106d4fddfa7ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 815,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 33,
"path": "/flaskr/__init__.py",
"repo_name": "tommywenjiezhang/flask_auth_starter",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\ndb = SQLAlchemy()\nfrom flask_login import LoginManager\ndef create_app():\n app = Flask(__name__,instance_relative_config=False)\n\n\n login_manager = LoginManager()\n login_manager.login_view = \"auth.auth_bp\"\n login_manager.init_app(app)\n\n \n\n\n app.config['SECRET_KEY'] = '9OLWxND4o83j4K4iuopO'\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'\n\n db.init_app(app)\n\n from .model import User\n\n @login_manager.user_loader\n def load_user(user_id):\n return User.query.get(int(user_id))\n\n with app.app_context():\n from .home import home\n from .auth import auth\n app.register_blueprint(home.home_bp, url_prefix='/home')\n app.register_blueprint(auth.auth_bp)\n \n return app"
},
{
"alpha_fraction": 0.6710138916969299,
"alphanum_fraction": 0.672275960445404,
"avg_line_length": 32.47887420654297,
"blob_id": "d9825782aed2ce90e6f0e69d4b3a7072d95a1b3a",
"content_id": "efab3ffedcb447341015a8aae6d711e21a4dfab5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2377,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 71,
"path": "/flaskr/auth/auth.py",
"repo_name": "tommywenjiezhang/flask_auth_starter",
"src_encoding": "UTF-8",
"text": "from flask import Blueprint, render_template, url_for, request, redirect, flash\nimport os\nfrom flask import current_app as app\nfrom ..model import User\nfrom .. import db\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom flask_login import login_user\n\nauth_bp = Blueprint('auth_bp', __name__,template_folder='templates',static_folder='static')\n\n@auth_bp.context_processor\ndef override_url_for():\n return dict(url_for=dated_url_for)\n\ndef dated_url_for(endpoint, **values):\n if endpoint == 'static':\n filename = values.get('filename', None)\n if filename:\n file_path = os.path.join(app.root_path,\n endpoint, filename)\n values['q'] = int(os.stat(file_path).st_mtime)\n return url_for(endpoint, **values)\n\n@auth_bp.route('/login')\ndef login():\n return render_template(\"login.html\")\n\n@auth_bp.route('/login',methods=['POST'])\ndef login_post():\n email = request.form['email']\n password = request.form['password']\n\n user = User.query.filter_by(email=email).first()\n\n if not user or not check_password_hash(user.password, password):\n flash('Please check your login details and try again.', 'errors')\n return redirect(url_for('auth_bp.login'))\n login_user(user)\n flash('Login Successfully', 'success')\n return redirect(url_for('home_bp.profile'))\n\n\n@auth_bp.route('/signup')\ndef signup():\n return render_template('signup.html')\n\n@auth_bp.route('/signup', methods=['POST'])\ndef signup_post():\n email = request.form.get('email')\n name = request.form.get('name')\n password = request.form.get('password')\n\n user = User.query.filter_by(email=email).first() # if this returns a user, then the email already exists in database\n\n if user: # if a user is found, we want to redirect back to signup page so user can try again\n flash(\"Email address already exist\", 'errors')\n return redirect(url_for('auth_bp.signup'))\n\n # create new user with the form data. Hash the password so plaintext version isn't saved.\n new_user = User(email=email, name=name, password=generate_password_hash(password, method='sha256'))\n\n # add the new user to the database\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(url_for('auth_bp.login'))\n\n\n@auth_bp.route('/logout')\ndef logout():\n return 'Logout'\n"
},
{
"alpha_fraction": 0.7708333134651184,
"alphanum_fraction": 0.7708333134651184,
"avg_line_length": 15,
"blob_id": "cc1b39d20fb9a8d46faaf61dc31466756964b988",
"content_id": "970dd7eef38168da7c0b1ccb3bff4b5aa343e4b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 3,
"path": "/README.md",
"repo_name": "tommywenjiezhang/flask_auth_starter",
"src_encoding": "UTF-8",
"text": "# flask_auth_starter\n\nadd the basic auth router\n"
},
{
"alpha_fraction": 0.6891385912895203,
"alphanum_fraction": 0.6891385912895203,
"avg_line_length": 25.799999237060547,
"blob_id": "13c3b0d08ce9c09e954d8786f5fb8585a1c1d3e3",
"content_id": "d60faf2d8f990f3a939708d463b204960de4adf6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 267,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 10,
"path": "/flaskr/home/home.py",
"repo_name": "tommywenjiezhang/flask_auth_starter",
"src_encoding": "UTF-8",
"text": "from flask import Blueprint , render_template\n\nhome_bp = Blueprint('home_bp',__name__,template_folder='templates')\n\n@home_bp.route('/',methods=['GET'])\ndef index():\n return render_template(\"body.html\")\n@home_bp.route('/profile')\ndef profile():\n return 'Profile'"
}
] | 4 |
Harshitchourasiya/MINE | https://github.com/Harshitchourasiya/MINE | 023de4d81820b45f30d35caaa2a6fbf58845b421 | e544a33fb3c43eb027959087582608369d6559a2 | fb409e139d313dbc49e9f63c9a0735616cab5163 | refs/heads/master | 2020-03-17T20:44:02.711532 | 2019-03-09T07:18:09 | 2019-03-09T07:18:09 | 133,926,453 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5518518686294556,
"alphanum_fraction": 0.5518518686294556,
"avg_line_length": 22.545454025268555,
"blob_id": "70f6300c5287282a94d8c86947658c7a5a0f3fc2",
"content_id": "249a2d6e955665e4ec8ac09b6476659dd7bc86d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 270,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 11,
"path": "/3rd day/fourth.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "with open(\"new.txt\",'w') as f:\r\n f.write(\"my name is harshit\\n\")\r\n f.write(\"my name is \\n\")\r\n f.write(\"my name \\n\")\r\nname_list = [] \r\nwith open(\"new.txt\",'r') as f:\r\n for a_single_line in f:\r\n \r\n name_list.append(a_single_line)\r\n\r\nprint name_list\r\n"
},
{
"alpha_fraction": 0.5433070659637451,
"alphanum_fraction": 0.5748031735420227,
"avg_line_length": 23.399999618530273,
"blob_id": "0233947d8ebe4cb3cc7f966017445a96336f149e",
"content_id": "3e818c23ab6ff5538b1cfbcf5c6b06783c11e419",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 127,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 5,
"path": "/3rd day/sec(append).py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "f= open(\"new1.txt\",'a')\r\nf.write(\"\\n APPEND HELLO WORLD\")\r\nf.close()\r\nwith open (\"new1.txt\",'r') as f1:\r\n print(f1.read())\r\n"
},
{
"alpha_fraction": 0.6395348906517029,
"alphanum_fraction": 0.6395348906517029,
"avg_line_length": 15.199999809265137,
"blob_id": "131e551e6d6e1b0f42b8b90051a78d23141a1e86",
"content_id": "5b7d208c481cc21f96355ebce77d32f2e32b0ee2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 172,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 10,
"path": "/1st day/third_gui.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "#window\r\n# title\r\n# geometry\r\n#label inside windows\r\n# text=calculator\r\n# grid=row,column\r\n#label - enter a\r\n#label - enter b\r\n#button\r\n#label - show value of a+b\r\n"
},
{
"alpha_fraction": 0.5362318754196167,
"alphanum_fraction": 0.5942028760910034,
"avg_line_length": 20.33333396911621,
"blob_id": "8acb2dec1373b4cc2c56c0ab992db7092d9a4058",
"content_id": "f751ca8278e62b004c5b5395d4217e83b6ec4370",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 69,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 3,
"path": "/3rd day/sdidbdqgtdqkb,jqwfhu.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "\r\nfile1 = open(\"new1.txt\",'r')\r\nprint (file1.read())\r\nfile1.close()\r\n"
},
{
"alpha_fraction": 0.5152838230133057,
"alphanum_fraction": 0.528384268283844,
"avg_line_length": 20.700000762939453,
"blob_id": "744cc0488f96e6811ec659aa546147b5047490e2",
"content_id": "3c5976ae1c9a2ddabeebf742cc2076c1b82eb398",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 229,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 10,
"path": "/1st day/jasbcjhsbhj.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "def namemethodfunction(a):\r\n for i in range(a):\r\n print \"hello world\"\r\n \r\nnamemethodfunction(5)\r\nprint\"........................\"\r\ndef anotherMethod(a,b):\r\n c = a + b\r\n return c\r\nprint anotherMethod(5,7)\r\n\r\n"
},
{
"alpha_fraction": 0.5562701225280762,
"alphanum_fraction": 0.5755627155303955,
"avg_line_length": 19.064516067504883,
"blob_id": "be63daa297cb221fe0419f4adb612a41b5714dcf",
"content_id": "e4e76fa71c77e8cc61e3ad71c176e686491ceba6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 622,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 31,
"path": "/FaceRecognition/onlineFace/FacebyApi.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "import requests\nimport cv2\n\nheaders = { \"app_id\": \"cb84ee90\", \"app_key\": \"c0507e42e24b9b72990f34667b96266d\"}\n\n###########################################\n#Camera operation and save to file\ncamera = cv2.VideoCapture(0)\n\nfor i in xrange(10):\n\ttemp = camera.read()[1]\n\nimage = camera.read()[1]\n\ncv2.imwrite(\"detect.jpg\", image)\n\n\n###########################################\n#Using our API\n\n#url of our api to do a post request\nurl = \"https://api.kairos.com/recognize\"\n\nfiles = { 'image': open(\"detect.jpg\", 'rb')}\n\nvalues = {\"gallery_name\": \"Class\"}\n\n\n#making request\nr = requests.post(url, data = values, headers=headers, files = files)\nprint r.text\n"
},
{
"alpha_fraction": 0.4864864945411682,
"alphanum_fraction": 0.4864864945411682,
"avg_line_length": 30.375,
"blob_id": "06719f72fd92098ed5c4f572c7f4ce18dbc7a16c",
"content_id": "926377125e962c8d5d48ae76b96f7ed015c87c85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 259,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 8,
"path": "/3rd day/third.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "with open (\"new.txt\",'a') as f:\r\n f.write(\"hey dear\")\r\n f.write(\"shut up bro\")\r\nwith open (\"new.txt\",'r') as f:\r\n for a_single_line in f:\r\n print\"Name : \" +a_single_line\r\nprint \"all names printed\"\r\nprint \"...................................\"\r\n"
},
{
"alpha_fraction": 0.6451612710952759,
"alphanum_fraction": 0.6451612710952759,
"avg_line_length": 25.899999618530273,
"blob_id": "117ab69d8b233e5cd22456aed1f10dfc1faa55c9",
"content_id": "9ca682e326b7e1530ff053fc24ab93c89295a94d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 279,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 10,
"path": "/1st day/b.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "a=input(\" enter first side oftriangle\")\r\nb= input(\"enter second side of traiangle\")\r\nc= input(\"enter third side of triangle\")\r\n\r\nif(a==b) and (b==c):\r\n print\" equilateral tiangle\"\r\nelif (a!=b) and (b!=c):\r\n print\" scalene triangle \"\r\nelse :\r\n print \"isoceles triangle\"\r\n"
},
{
"alpha_fraction": 0.5873684287071228,
"alphanum_fraction": 0.6315789222717285,
"avg_line_length": 17.79166603088379,
"blob_id": "a2ee181f237dc385ca0f19f08b7352974b6d5320",
"content_id": "3ffa21021245fa632840e34c50dcb1b30a67a31b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 950,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 48,
"path": "/1st day/cal_gui.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "from Tkinter import *\r\n\r\nwindow = Tk()\r\n\r\nwindow.title(\"Calci\")\r\nwindow.geometry('480x620')\r\nwindow.configure(background = \"red\")\r\n\r\nenterPrompt= Label (window,text=\"enter first value\")\r\nenterPrompt.grid(column=0, row=0)\r\n\r\nentrybox1 = Entry(window,width = 5)\r\nentrybox1.grid(column=1, row=0)\r\nentrybox1.focus()\r\n\r\nenterPrompt= Label (window,text=\"enter second value\")\r\nenterPrompt.grid(column=0, row=25)\r\n\r\nentrybox2 = Entry(window,width = 5)\r\nentrybox2.grid(column=1, row=25)\r\nentrybox2.focus()\r\n\r\nshow=Label(window,text='')\r\nshow.grid(column=361,row=360)\r\n\r\ndef clicked():\r\n\r\n a= entrybox1.get()\r\n a= int(a)\r\n\r\n b= entrybox2.get()\r\n b= int(b)\r\n \r\n c= a + b\r\n c= str(c)\r\n\r\n show.configure(text=c)\r\n\r\n window.configure(background = \"blue\")\r\n window.title(\"Calculated\")\r\n\r\n \r\n \r\nbtn = Button(window, text=\"Enter\", bg = \"#2196f3\", command = clicked)\r\nbtn.grid(column=36,row=160)\r\n\r\n\r\nwindow.mainloop()\r\n"
},
{
"alpha_fraction": 0.6563557386398315,
"alphanum_fraction": 0.6645722389221191,
"avg_line_length": 34.050846099853516,
"blob_id": "dff143d91bb1c5018a3fae3ea376fa450242a540",
"content_id": "569a799052769ab493c8a8c94df30aa70367fcbe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2069,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 59,
"path": "/Sprint_backlog1_Image/face2_rec.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "# facerec.py\nimport cv2, sys, numpy, os,pyttsx,time\nfrom PIL import Image\nsize = 4\nfn_haar = 'haarcascade_frontalface_default.xml'\nfn_dir = 'database'\n\n\n# Part 1: Create fisherRecognizer\nprint('Training...')\n# Create a list of images and a list of corresponding names\n(images, lables, names, id) = ([], [], {}, 0)\nfor (subdirs, dirs, files) in os.walk(fn_dir):\n for subdir in dirs:\n names[id] = subdir\n subjectpath = os.path.join(fn_dir, subdir)\n for filename in os.listdir(subjectpath):\n path = subjectpath + '/' + filename\n lable = id\n\t images.append(cv2.imread(path, 0))\n lables.append(int(lable))\n id += 1\n(im_width, im_height) = (112, 92)\n\n# Create a Numpy array from the two lists above\n(images, lables) = [numpy.array(lis) for lis in [images, lables]]\n\n# OpenCV trains a model from the images\n\nRecognizer = cv2.face.FisherFaceRecognizer_create()\ndetector = cv2.CascadeClassifier(\"fn_haar\")\n\ndef getImagesAndLabels(path):\n\t #get the path of all the files in the folder\n imagePaths=[os.path.join(path,f) for f in os.listdir(path)] \n #create empth face list\n faceSamples=[]\n #create empty ID list\n Ids=[]\n #now looping through all the image paths and loading the Ids and the images\n for imagePath in imagePaths:\n #loading the image and converting it to gray scale\n pilImage=Image.open(imagePath).convert('L')\n #Now we are converting the PIL image into numpy array\n imageNp=numpy.array(pilImage,'uint8')\n #getting the Id from the image\n Id=int(os.path.split(imagePath)[-1].split(\".\")[1])\n # extract the face from the training image sample\n faces=detector.detectMultiScale(imageNp)\n #If a face is there then append that in the list as well as Id of it\n for (x,y,w,h) in faces:\n faceSamples.append(imageNp[y:y+h,x:x+w])\n Ids.append(Id)\n return faceSamples,Ids\n\n\nfaces,Id = getImagesAndLabels('database/Harshit')\nRecognizer.train(images, lables)\nRecognizer.save('trainer/trainer.yml')\n\n"
},
{
"alpha_fraction": 0.6635446548461914,
"alphanum_fraction": 0.6865994334220886,
"avg_line_length": 27.85416603088379,
"blob_id": "27e8dd2381f0491ae5e1b16d9fd04b9476f8fe8b",
"content_id": "984a94aa0def09fb8ab26656c1f98dd654bc5c6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1388,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 48,
"path": "/FaceRecognition/notworking/dataSheet.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "import cv2\n\n#initiate Camera instance\ncamera = cv2.VideoCapture(0)\ndetector = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n\n#Prompt for the id number of the sample\n#sampleNum for counter\nId = raw_input(\"Enter your id : \")\nsampleNum = 0\n\n\nwhile(True):\n\ttry:\n\t\timageFromCamera = camera.read()[1]\n\t\timageGray = cv2.cvtColor(imageFromCamera,cv2.COLOR_BGR2GRAY)\n\n\t\t#Detect face from the imageGray above\n\t\t# detectMultiScale takes arguments image, scaleFactor, minNeighbour\n\t\tfaces = detector.detectMultiScale(imageGray, 1.3 , 5)\n\t\tif(len(faces) == 1):\n\t\t\tfor (x,y,w,h) in faces:\n\t\t\t\tcv2.rectangle(imageFromCamera, (x, y), (x + w, y + w), (255,0,0), 2)\n\n\t\t\t\t#Now after we have already detected face, we will save\n\t\t\t\t#in our dataSet Folder, and increment sampleNum\n\t\t\t\tsampleNum = sampleNum + 1\n\t\t\t\tfileName = \"dataSet/user.\" + str(Id) + \".\" + str(sampleNum) + \".jpeg\"\n\t\t\t\tcv2.imwrite(fileName, imageGray[y:y + h, x:x + w])\n\t\t\t\tprint \"Sample : \" + str(sampleNum)\n\t\t\t\tcv2.imshow('Face Detection', imageFromCamera)\n\t\telse:\n\t\t\tprint \"More than one face to register.\"\n\t\t#Wait for keypress and \n\t\tif cv2.waitKey(100) & 0xFF == ord('q'):\n\t\t\tprint \"q pressed\"\n\t\t\tbreak\n\n\t\telif sampleNum > 40:\n\t\t\tbreak\n\texcept KeyboardInterrupt:\n\t\tprint \"^c Pressed :\"\n\t\tcamera.release()\n\t\tcv2.destroyAllWindows()\n\texcept:\n\t\tprint \"Exception occured : \"\n\t\tcamera.release()\n\t\tcv2.destroyAllWindows()\n\t\n\n"
},
{
"alpha_fraction": 0.6346483826637268,
"alphanum_fraction": 0.6535162925720215,
"avg_line_length": 17.25,
"blob_id": "2c4d04fcc7fd6f44a50488dd30ab1a57299e4b1f",
"content_id": "043b21985ef02a8ef2b5827e0c5fb88258d18ea1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 583,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 32,
"path": "/FaceRecognition/onlineFace/enroll.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "import requests, json\nimport cv2\n\n\nurl = \"https://api.kairos.com/enroll\"\n\n\n#subject_id = raw_input(\"Enter name of student to enroll : \")\n\nvalues = {\"subject_id\" : \"Prashant\", \"gallery_name\" : \"Class\"}\n\nheaders = { 'Content-Type' : 'application/json', 'app_id' : 'cb84ee90', 'app_key' : 'c0507e42e24b9b72990f34667b96266d'}\n\ncamera = cv2.VideoCapture(0)\n\nfor i in xrange(10):\n\ttemp = camera.read()\n\nimage = camera.read()[1]\n\ncamera.release()\n\nprint \"Image Taken\"\n\ncv2.imwrite(\"image.jpg\", image)\n\n\nfiles = { 'image': open('image.jpg','rb') }\n\nr = requests.post( url, headers=headers, files=files )\n\nprint r.text"
},
{
"alpha_fraction": 0.782608687877655,
"alphanum_fraction": 0.782608687877655,
"avg_line_length": 10.5,
"blob_id": "e1590afc00fa8411b1e2bc4f53585c6b356b914e",
"content_id": "f80d0e23078be401e8081d76e1991becc176ea25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 23,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "# MINE\nSUMMER TRAINING\n"
},
{
"alpha_fraction": 0.609375,
"alphanum_fraction": 0.625,
"avg_line_length": 10.800000190734863,
"blob_id": "3232a66b728eafe33d1b515f847dd55989f1ca86",
"content_id": "4de4013bbae16b47926a4bf45b415e179960f568",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 64,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 5,
"path": "/1st day/a.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "a=5\r\nprint a\r\nprint\"hello:\"\r\nb= input(\"enter any no\")\r\nprint b\r\n"
},
{
"alpha_fraction": 0.5760869383811951,
"alphanum_fraction": 0.5760869383811951,
"avg_line_length": 28,
"blob_id": "6ccc723c2997af605764fc3ba81097e59b648b80",
"content_id": "8e36e8525dd4616a6282c46dea235404a3787503",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 3,
"path": "/2nd Day/test.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "\r\ntest = {\"annmol\":\"pass\",\"deepak\":\"fail\",\"manish\":\"pass\"}\r\nfor i in test:\r\n print test\r\n"
},
{
"alpha_fraction": 0.48571428656578064,
"alphanum_fraction": 0.48571428656578064,
"avg_line_length": 15.5,
"blob_id": "a2c7266848bacaa5203e3391d2e5da1414d9eefb",
"content_id": "df6bab7526270b3065a5f41e356d9dd3b3ff324e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 70,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 4,
"path": "/1st day/jf gjf.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "def test():\r\n \"\"\"I'm a function\"\"\"\r\n pass\r\nprint(test.__doc__)\r\n"
},
{
"alpha_fraction": 0.6774193644523621,
"alphanum_fraction": 0.6774193644523621,
"avg_line_length": 13.5,
"blob_id": "da8ea6331257e35f231bde51fca05537b2534345",
"content_id": "056c04d0397f6eb4d916ea599501deeb8aaae795",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 31,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 2,
"path": "/5th day/first.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "from harshitlib import *\r\na()\r\n"
},
{
"alpha_fraction": 0.5799999833106995,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 10.5,
"blob_id": "318603c249ac3e07522586e74fec3fce1b87b8ce",
"content_id": "a179348552879f199015954e118f2282daebc514",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 50,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 4,
"path": "/1st day/zxcds.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "a=raw_input(\"Enter the no\")\r\nb=3\r\nc=a+b\r\nprint c\r\n"
},
{
"alpha_fraction": 0.6857143044471741,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 17.090909957885742,
"blob_id": "4109099701343f4b5bb0371eb2f2bbf323c228bf",
"content_id": "e319d954e590d4eb7a6b812c78daa0bf545bdfbc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 420,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 22,
"path": "/1st day/second_gui.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "from Tkinter import *\r\n\r\n\r\nwindow = Tk()\r\n\r\n\r\nwindow.title(\"attendence system\")\r\nwindow.geometry('480x620')\r\nwindow.configure(background='red')\r\n\r\nenterPrompt= Label (window,text=\"enter your roll no:\")\r\nenterPrompt.grid(column=0, row=0)\r\n\r\nnameLabel=Label(window, text=\"hello\")\r\nnameLabel.grid(column=0,row=2)\r\n\r\nattendanceLabel=Label(window,text=\"world\")\r\nattendanceLabel.grid(column=0,row=3)\r\n\r\n\r\n\r\nwindow.mainloop()\r\n"
},
{
"alpha_fraction": 0.6826417446136475,
"alphanum_fraction": 0.6890707015991211,
"avg_line_length": 35.425533294677734,
"blob_id": "4c5e01ec78a5d37458eab47b7999a3c073538f8b",
"content_id": "627c98e5afcc18dd3541908b147a2603adb75779",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1711,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 47,
"path": "/Sprint_backlog1_Image/face3_rec.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "# include header files\nimport cv2\nimport numpy as np \nfrom PIL import Image\nimport os\n\n#load the face detection cascade\nfn_haar = 'haarcascade_frontalface_default.xml'\n\nfaceCascade = cv2.CascadeClassifier(fn_haar)\n\n#creating the face Recognizer object\nRecognizer = cv2.face.LBPHFaceRecognizer_create()\n\n#function to prepare training sets\ndef get_images_and_labels(path):\n\timage_paths = [os.path.join(path, f) for f in os.listdir(path) if f in os.listdir(path) if not f.endswith('.sad')]\n\timages = []\n\tlabels = []\n\tfor image_path in image_paths:\n\t\t# Read the image and convert to grayscale\n\t\timage_pil = Image.open(image_path).convert('L')\n # Convert the image format into numpy array\n image = np.array(image_pil, 'uint8')\n # Get the label of the image\n nbr = int(os.path.split(image_path)[1].split(\".\")[0].replace(\"subject\", \"\"))\n # Detect the face in the image\n faces = faceCascade.detectMultiScale(image)\n # If face is detected, append the face to images and the label to labels\n for (x, y, w, h) in faces:\n images.append(image[y: y + h, x: x + w])\n labels.append(nbr)\n cv2.imshow(\"Adding faces to traning set...\", image[y: y + h, x: x + w])\n cv2.waitKey(50)\n # return the images list and labels list\n\treturn images, labels\n\t\t\n#preparing the training sets\n#The folder database is in the same folder as this python script is.\npath = 'yalefaces'\n\n#Now call get_images_and_labels function and get the face images and corresponding _labels\nimages , labels = get_images_and_labels(path)\ncv2.destroyAllWindows()\n\n#Now perform the training session for the available data\nRecognizer.train(images, np.array(labels))"
},
{
"alpha_fraction": 0.6218905448913574,
"alphanum_fraction": 0.6343283653259277,
"avg_line_length": 24.799999237060547,
"blob_id": "bcf86ced0bfb9339be9059abdb3297dee61c1e8f",
"content_id": "1fd82eacd2fef905f11b9ba131bb21145d872890",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 402,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 15,
"path": "/3rd day/six.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\r\nimport xlrd\r\nimport xlwt\r\nfrom xlutils.copy import copy\r\n\r\ndef write_xl(fields):\r\n rb = xlrd.open_workbook(\"traffic.xls\",formating_info=True)\r\n r_sheet = rb.sheet_by_index(0)\r\n r = r_sheet.\r\n wb = copy(rb)\r\n sheet = wb.get_sheet(0)\r\n sheet.write(r,0,fields[\"LP\"])\r\n sheet.write(r,1,fields[\"speed\"])\r\n sheet.write(r,2,fields[\"time\"])\r\n wb.save\r\n"
},
{
"alpha_fraction": 0.5740072131156921,
"alphanum_fraction": 0.5848375558853149,
"avg_line_length": 17.785715103149414,
"blob_id": "d604355fc6ab5406e773aeb5ae058ec14fb6e299",
"content_id": "ab1d4c7d53464dc43a447b6cea4f84647b92606c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 14,
"path": "/3rd day/fifth.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "name_list = []\r\nwith open(\"new.txt\",'r') as f:\r\n for a_single_line in f:\r\n \r\n name_list.append(a_single_line)\r\n\r\nprint name_list\r\n\r\nwith open(\"new1.txt\",'w') as f:\r\n f.writelines(name_list)\r\n\r\nwith open(\"new1.txt\",'r') as f:\r\n f.readlines(3)\r\nprint name_list\r\n"
},
{
"alpha_fraction": 0.5629138946533203,
"alphanum_fraction": 0.5761589407920837,
"avg_line_length": 14.777777671813965,
"blob_id": "067a0a7f56c3cefa4b3079e2844f0c4d8a3cd0d1",
"content_id": "164dea51b243f704c0caf5906071cf911acad6d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 151,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 9,
"path": "/3rd day/first.py",
"repo_name": "Harshitchourasiya/MINE",
"src_encoding": "UTF-8",
"text": "f=open(\"new.txt\",'w')\r\nf.write(\"hello world\\n\")\r\nf.write(\"this is new\\n\")\r\nf.close()\r\nf=open(\"new.txt\",'r')\r\n\r\nprint(f.read(10))\r\nf.close()\r\nprint(n)\r\n"
}
] | 23 |
Maettador/Central-Park-Squirrel-Colors | https://github.com/Maettador/Central-Park-Squirrel-Colors | efc8c186f039e432bc4ced8ab4fc2541d01b192f | 94fab48458db9f4ead598cc9d749636e8d8710cd | 58160ba5b2921ea979a8a57f3ba26ab75b666027 | refs/heads/main | 2023-07-21T23:24:45.629074 | 2021-08-31T19:56:28 | 2021-08-31T19:56:28 | 401,825,182 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7988826632499695,
"alphanum_fraction": 0.7988826632499695,
"avg_line_length": 58.66666793823242,
"blob_id": "1fb300bdfc6fb6f2ab67336f8a0b575389d4879b",
"content_id": "19276385b77ab949f0833fbc08a9d7c04e110dfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 179,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 3,
"path": "/README.md",
"repo_name": "Maettador/Central-Park-Squirrel-Colors",
"src_encoding": "UTF-8",
"text": "# Central-Park-Squirrel-Colors\n\nThis code extracts data from the squirrel survey and creates a new csv with a table of the squirrels colors and how many of each color were found.\n"
},
{
"alpha_fraction": 0.6266666650772095,
"alphanum_fraction": 0.6577777862548828,
"avg_line_length": 29,
"blob_id": "9917e5604a1641b7466cdc1cd6069a2704405209",
"content_id": "6bf1b3b243183c807ed789dccca516722f2f3dad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 450,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 15,
"path": "/main.py",
"repo_name": "Maettador/Central-Park-Squirrel-Colors",
"src_encoding": "UTF-8",
"text": "import csv\nimport pandas as pd\n\ndata = pd.read_csv(\"2018_Central_Park_Squirrel_Census_-_Squirrel_Data.csv\")\ngray_count = len(data[data[\"Primary Fur Color\"] == \"Gray\"])\nred_count = len(data[data[\"Primary Fur Color\"] == \"Cinnamon\"])\nblack_count = len(data[data[\"Primary Fur Color\"] == \"Black\"])\n\ndata_dict = {\n \"Fur Color\": [\"Gray\", \"Cinnamon\", \"Black\"],\n \"Count\": [2473, 392, 103]\n}\n\ndf = pd.DataFrame(data_dict)\ndf.to_csv(\"squirrel_count.csv\")\n"
}
] | 2 |
madiradu/Django-bookstore | https://github.com/madiradu/Django-bookstore | abf1f4a1b5027073e0dfe7e256ffe76df6e34192 | decc45407b549f19a8275ff501cec9e676e98d2d | e554db91ef18790bc8fc1077cf2271002acf55fd | refs/heads/master | 2020-09-19T14:20:51.566409 | 2019-11-26T16:27:13 | 2019-11-26T16:27:13 | 224,235,951 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7363636493682861,
"alphanum_fraction": 0.7363636493682861,
"avg_line_length": 26.5,
"blob_id": "36f7f888370671a6761bfa6403f16c92198dafa6",
"content_id": "798c0c51593f4922cdbbb1fa0f84a2f2404d03a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 220,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 8,
"path": "/bookstore/bookstore/bookshelf/serializers.py",
"repo_name": "madiradu/Django-bookstore",
"src_encoding": "UTF-8",
"text": "from bookstore.bookshelf.models import Book\nfrom rest_framework import serializers\n\n\nclass BookSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Book\n fields = ['name', 'author']\n"
},
{
"alpha_fraction": 0.7879418134689331,
"alphanum_fraction": 0.7879418134689331,
"avg_line_length": 35.92307662963867,
"blob_id": "8cf573eed7915288be32785186a7fb6d7d170f8a",
"content_id": "b32ceaa7711103f83a0c7f3100618c37d238b7fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 962,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 26,
"path": "/bookstore/bookstore/bookshelf/views.py",
"repo_name": "madiradu/Django-bookstore",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom bookstore.bookshelf.models import Book\nfrom rest_framework import viewsets\nfrom bookstore.bookshelf.serializers import BookSerializer\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\n# Create your views here.\n\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework import permissions as permissions\nfrom rest_framework import renderers\n\nclass BookViewSet(viewsets.ModelViewSet):\n queryset = Book.objects.all()\n serializer_class = BookSerializer\n #permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n\n @action(detail=True, renderer_classes=[renderers.StaticHTMLRenderer])\n def highlight(self, request, *args, **kwargs):\n book = self.get_object()\n return Response(book.highlighted)\n\n def perform_create(self, serializer):\n serializer.save()\n\n\n"
}
] | 2 |
haru77/web | https://github.com/haru77/web | ca8ae6b5e3afdc88c17dfbeef14568a4b9944f4f | 0f60e7b1cbf89471b4f02998cc006e6cccb971b2 | 8e3dc71e6f6554c6f1ae2d90e5ce190213df73dc | refs/heads/master | 2020-07-05T01:44:14.332892 | 2019-08-12T06:19:49 | 2019-08-12T06:19:49 | 202,486,180 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5571428537368774,
"alphanum_fraction": 0.5795918107032776,
"avg_line_length": 22.380952835083008,
"blob_id": "9d3c65a66883973cb224b4f0231d65052ae6505e",
"content_id": "e3aadc1625722d4f5d0e433d67a67cc621446e85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 562,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 21,
"path": "/django/FIRST_APP/pages/templates/match.html",
"repo_name": "haru77/web",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"ie=edge\">\n <title>Document</title>\n</head>\n<body>\n <h1> XXX님과 YYY님의 궁합은 ZZ%입니다! </h1>\n <!-- <h1>{{I}}님과 {{you}}님의 궁합은 ZZ%입니다! </h1> -->\n <h1> {{me}}님과 {{you}}님의 궁합은 {{goonghap}}%입니다! </h1>\n\n <h2>request 객체 살펴보기</h2>\n <!-- path -->\n {{test}}\n {{request.POST}}\n \n {{request.method}}\n</body>\n</html>"
},
{
"alpha_fraction": 0.5670545101165771,
"alphanum_fraction": 0.5811390280723572,
"avg_line_length": 24.936508178710938,
"blob_id": "a56a2c19e5b9a613d0481b5a916509b1b946bb43",
"content_id": "a255007c7aef53446206c0d0ae4bd36704dcc36a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1777,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 63,
"path": "/django/FIRST_APP/pages/views.py",
"repo_name": "haru77/web",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\n# Create your views here.\ndef index(request):\n return render(request,\"index.html\")\n\ndef home(request):\n # return HttpResponse('<h1>홈페이지</h1>') #pass\n name = '이한얼'\n data = ['이한얼','정의진']\n empty_data = ['엄복동', '클레멘타인']\n matrix = [[1,2,3],[4,5,6]]\n context = {\n 'name': name,\n 'data': data,\n 'empty_data': empty_data,\n 'matrix': matrix,\n }\n\n # return render(request, 'home.html', name=name)\n return render(request, 'home.html', context, {'name2': '이한얼'})\n\nimport random\ndef lotto(request):\n # lottoList = random.choices()\n # lottoList=list(map(str,random.randint(range(1,46),6)))\n lottoList=random.sample(range(1,46),6)\n nums = lottoList\n context = {\n 'nums': nums,\n 'number': 'lotto!',\n }\n return render(request, 'lotto.html', context)\n\ndef cube(request, num):\n result = num ** 3\n context = {\n 'result': result,\n }\n return render(request, 'cube.html', context)\n\n# input 과 form을 이용해서 넘기기\ndef match(request):\n import random\n goonghap = random.randint(50,100)\n # me = request.GET.get('me')\n # you = request.GET.get('you')\n print(request)\n print(request.GET)\n print(request.POST)\n me = request.POST.get('me') # flask에서의 request.get('me') 와 유사!\n you = request.POST.get('you')\n\n test = request.get_host() # host에 대한 값이 들어감\n # print(request.get())\n # 딕셔너리와 비슷한 쿼리 딕트가 들어옴, 'me', 'you'라는 객체가 들어온다.\n context = {\n 'goonghap': goonghap,\n 'me': me,\n 'you': you,\n 'test': test,\n }\n return render(request,'match.html', context)"
}
] | 2 |
steven1227/Drello | https://github.com/steven1227/Drello | a8a00ebd7b56adef1b66fdc542b1266e821ec15c | bea9c7d9709610a2e1ec3871f70fb8d9285916d1 | 2914f9e20d2bc529c182549ae20c33921fd89ed8 | refs/heads/master | 2021-01-21T13:14:57.617818 | 2016-04-18T14:01:37 | 2016-04-18T14:01:37 | 55,696,416 | 0 | 1 | null | 2016-04-07T13:22:34 | 2016-04-18T00:44:06 | 2016-04-18T14:01:37 | Python | [
{
"alpha_fraction": 0.6916666626930237,
"alphanum_fraction": 0.6916666626930237,
"avg_line_length": 22.899999618530273,
"blob_id": "8aa8fde807b99b2ea060e9150434fc58e0dd2b0d",
"content_id": "bb7d70133116323fcaa9e8118556a29dbd910e89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 240,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 10,
"path": "/config.py",
"repo_name": "steven1227/Drello",
"src_encoding": "UTF-8",
"text": "import os\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\nWTF_CSRF_ENABLED = True\nDATABASE = '/tmp/drello.db'\nDEBUG = True\nSECRET_KEY = 'development key'\nUSERNAME = 'admin'\nPASSWORD = 'default'\nMONGODB_SETTINGS = {'DB': \"Drello\"}\n\n"
},
{
"alpha_fraction": 0.5436681509017944,
"alphanum_fraction": 0.5502183437347412,
"avg_line_length": 29.53333282470703,
"blob_id": "b7615cdc4515c80aa47ce73996ba70071227ed94",
"content_id": "606834855514d4bc09abb2877e72bf4eedd96661",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 458,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 15,
"path": "/static/app/IndexApp.js",
"repo_name": "steven1227/Drello",
"src_encoding": "UTF-8",
"text": "var app = angular.module('IndexApp', ['ngMaterial', 'authentication']);\n\napp.controller('LoginControl', ['AuthService', function(AuthService) {\n this.username = \"[email protected]\"\n this.password = 123;\n this.login = function() {\n AuthService.login(this.username, this.password)\n .then(function() {\n \tconsole.log(\"success\")\n })\n .catch(function(){\n \tconsole.log(\"Error\")\n })\n }\n}]);\n"
},
{
"alpha_fraction": 0.6917293071746826,
"alphanum_fraction": 0.6928034424781799,
"avg_line_length": 23.5,
"blob_id": "ac67523ec825ba0cf100ebafc540934c5f9cfe99",
"content_id": "55f0d8b80d0e1edae3173290f80c8bf0731e5e15",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 931,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 38,
"path": "/Task.md",
"repo_name": "steven1227/Drello",
"src_encoding": "UTF-8",
"text": "## Tasks\n\n### Components\n- [ ] Doctor\n- [ ] Patient\n- [ ] Nurse\n- [ ] Bed\n- [ ] Blood Bank: BloodBankDonner and BloodBankStatus\n- [ ] Report\n- [ ] Message between Doctor and Patient\n- [ ] Appointment\n- [ ] Invoice\n- [ ] Event\n- [ ] Department\n- [ ] Admin\n- [ ] User\n- [ ] Different Dashboard\n- [ ] Other user type\n\n### Authentication\n\n- Registeration\n- Third party Login\n\n\n### Dashboard\n- The dashboard should be card-based\n- It contains to-do list, doing list, and done list\n- Each task in a list should be displayed as a card\n- User could add a task in each of the list, and task would be stored in the database\n- Each task can be dragged through these three lists\n- Each task should have a deadline, and when deadline is < 1 day, there should be a notification\n\n###Customization\n- Change the color of the board, card, add labels..\n- There must be a side bar, which allows user to review the task(action) history\n\n### Team Mode?\n"
},
{
"alpha_fraction": 0.5136612057685852,
"alphanum_fraction": 0.7103825211524963,
"avg_line_length": 15.545454978942871,
"blob_id": "a6ff364676832d83f01bef48366f8db78f547fda",
"content_id": "4ab3979c0f0d577f3405c7cc79dc7794ffe9e09d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 183,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 11,
"path": "/requirement.txt",
"repo_name": "steven1227/Drello",
"src_encoding": "UTF-8",
"text": "Flask==0.10.1\nflask-mongoengine==0.7.5\nFlask-WTF==0.12\nitsdangerous==0.24\nJinja2==2.8\nMarkupSafe==0.23\nmongoengine==0.10.6\npymongo==3.2.2\nWerkzeug==0.11.7\nwheel==0.24.0\nWTForms==2.1\n\n"
},
{
"alpha_fraction": 0.738916277885437,
"alphanum_fraction": 0.738916277885437,
"avg_line_length": 15.75,
"blob_id": "85c75788237f733926dd773bf01005d3906bad96",
"content_id": "d39b148d4b33621c2285a2a0bfad333d9b1e58f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 203,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 12,
"path": "/app/__init__.py",
"repo_name": "steven1227/Drello",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom flask.ext.mongoengine import MongoEngine\n\n\n\napp = Flask(__name__)\napp.config.from_object('config')\ndb = MongoEngine(app)\n\n\nfrom app.views import *\nfrom app.models import *\n\n\n"
},
{
"alpha_fraction": 0.6303030252456665,
"alphanum_fraction": 0.6343434453010559,
"avg_line_length": 21.930233001708984,
"blob_id": "cd4c50e33573aceeaa01c36e4492de6227b3e6b1",
"content_id": "5838ffad1c52ed49724de70a19da1a84289e2443",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 990,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 43,
"path": "/drello.py",
"repo_name": "steven1227/Drello",
"src_encoding": "UTF-8",
"text": "# all the imports\nimport sqlite3\nfrom contextlib import closing\nfrom flask import Flask, request, session, g, redirect, url_for, \\\n abort, render_template, flash, jsonify\n\n# configuration\nDATABASE = '/tmp/flaskr.db'\nDEBUG = True\nSECRET_KEY = 'development key'\nUSERNAME = 'admin'\nPASSWORD = 'default'\n\n# create our little application :)\napp = Flask(__name__)\napp.config.from_object(__name__)\n \n\n# email: email, password: password\n\[email protected]('/')\ndef index():\n return app.send_static_file('index.html')\n\[email protected]('/api/login', methods=['POST'])\ndef login():\n # todo database \n json_data = request.json\n if json_data['email'] == '[email protected]' and json_data['password']==123:\n session['loggedin'] = True\n status = True\n else:\n status = False\n return jsonify({'result':status}) \n\[email protected]('/api/logout')\ndef logout():\n session.pop('loggedin', None)\n return jsonify({'result': 'success'})\n\n\nif __name__ == '__main__':\n app.run()\n\n\n\n\n"
},
{
"alpha_fraction": 0.7102823257446289,
"alphanum_fraction": 0.7251184582710266,
"avg_line_length": 36.338462829589844,
"blob_id": "b1c84b5ee3c5245d45cbab4b5b548c850305395b",
"content_id": "7a3dc280913b98e3beac38079d63202b36bec9ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4853,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 130,
"path": "/app/models.py",
"repo_name": "steven1227/Drello",
"src_encoding": "UTF-8",
"text": "from app import db\nfrom flask import url_for\nimport datetime\n\n\nclass Comment(db.EmbeddedDocument):\n created_at = db.DateTimeField(default=datetime.datetime.now, required=True)\n body = db.StringField(verbose_name=\"Comment\", required=True)\n author = db.StringField(verbose_name=\"Name\", max_length=255, required=True)\n\n\nclass Post(db.Document):\n created_at = db.DateTimeField(default=datetime.datetime.now, required=True)\n title = db.StringField(max_length=255, required=True)\n slug = db.StringField(max_length=255, required=True)\n body = db.StringField(required=True)\n comments = db.ListField(db.EmbeddedDocumentField('Comment'))\n\n def get_absolute_url(self):\n return url_for('post', kwargs={\"slug\": self.slug})\n\n def __unicode__(self):\n return self.title\n\n def __repr__(self):\n return '<Post %r>' % (self.body)\n\n meta = {\n 'allow_inheritance': True,\n 'indexes': ['-created_at', 'slug'],\n 'ordering': ['-created_at']\n }\n\nclass User(db.DynamicDocument):\n account_type = db.StringField(max_length=255, required=True)\n email = db.EmailField(required=True,unique=True)\n password = db.StringField(max_length=255, required=True)\n name = db.StringField(max_length=255, required=True)\n image = db.ImageField(size=(500,500,True))\n address = db.StringField(max_length=255, required=True)\n phone = db.StringField(max_length=255,unique=True)\n\n#Can be intergate into User\n#Todo\nclass Patient(db.Document):\n user_id = db.ReferenceField(db.ObjectId,required=True)\n sex = db.StringField(max_length=255, required=True)\n birthdate = db.DateTimeField(required=True);\n name = db.StringField(max_length=255, required=True)\n blood = db.ReferenceField(db.ObjectId)\n\n\nclass Admin(db.Document):\n user_id = db.ReferenceField(db.ObjectId)\n\nclass Department(db.Document):\n name = db.StringField(max_length=255, required=True)\n description = db.StringField(max_length=255, required=True)\n\nclass Appointment(db.Document):\n date = db.DateTimeField(required=True);\n patient = db.EmbeddedDocumentField('Patient')\n\nclass Precription(db.Document):\n date = db.DateTimeField(required=True);\n patient = db.EmbeddedDocumentField('Patient')\n\nclass Bed(db.Document):\n bed_number = db.IntField(required=True)\n bed_type = db.StringField(max_length=255, required=True)\n patient = db.EmbeddedDocumentField('Patient',required=True)\n allotment_date = db.DateTimeField(required=True)\n discharge_date = db.DateTimeField(required=True)\n\nclass BloodBankDonner(db.Document):\n name = db.StringField(max_length=255, required=True)\n sex = db.StringField(max_length=255, required=True)\n blood = db.ReferenceField(db.ObjectId)\n last_donation_date = db.DateTimeField(required=True)\n\nclass BloodBankStatus(db.Document):\n blood_group = db.StringField(max_length=255, required=True,unique=True)\n status = db.IntField(required=True)\n\nclass Report(db.Document):\n type = db.StringField(max_length=255, required=True)\n description = db.StringField(max_length=255, required=True)\n date = db.DateTimeField(required=True)\n patient = db.EmbeddedDocumentField('Patient')\n\nclass Message(db.Document):\n from_who = db.EmbeddedDocumentField('User')\n content = db.StringField(max_length=255, required=True)\n\nclass Event(db.Document):\n date = db.DateTimeField(required=True)\n content = db.StringField(max_length=255, required=True)\n\n#Can be intergate into User\nclass Doctor(db.Document):\n user_id = db.ReferenceField(db.ObjectId,require=True)\n department = db.EmbeddedDocumentField('Department')\n appointments = db.ListField(db.EmbeddedDocumentField('Appointment'))\n precriptions = db.ListField(db.EmbeddedDocumentField('Precription'))\n patients = db.ListField(db.EmbeddedDocumentField('Patient'))\n reports = db.ListField(db.EmbeddedDocumentField('Report'))\n messages = db.ListField(db.EmbeddedDocumentField('Message'))\n\nclass Invoice(db.Document):\n invoice_number = db.IntField(required=True)\n title = db.StringField(max_length=255, required=True)\n patient = db.EmbeddedDocumentField('Patient')\n creation_date = db.DateTimeField(required=True)\n due_date = db.DateTimeField(required=True)\n vat_per = db.IntField(required=True)\n discount_amount = db.IntField(required=True)\n status = db.StringField(max_length=255, required=True)\n\n#Can be intergate into User\nclass accountant(db.Document):\n user_id = db.ReferenceField(db.ObjectId,require=True)\n invoice = db.ListField(db.EmbeddedDocumentField('Invoice'))\n\n#Can be intergate into User\nclass Nurse(db.Document):\n user_id = db.ReferenceField(db.ObjectId,require=True)\n patients = db.ListField(db.EmbeddedDocumentField('Patient'))\n reports = db.ListField(db.EmbeddedDocumentField('Report'))\n\n#Todo Pharmacist and Receptionist"
},
{
"alpha_fraction": 0.7135922312736511,
"alphanum_fraction": 0.7330096960067749,
"avg_line_length": 67.66666412353516,
"blob_id": "d8eeeb838040ffd88666c203429557e318fbf483",
"content_id": "9796628cbe90a86f613cf45f60f562ad138e76d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 206,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 3,
"path": "/README.md",
"repo_name": "steven1227/Drello",
"src_encoding": "UTF-8",
"text": "# Drello\nMini Forum for photos, books, movies and musics, using Flask + AngularJS + Foundation + MongoDB \nby: [Steven](https://github.com/steven1227),[ShenjunMa](https://github.com/mashenjun),[Bluebig]()\n"
}
] | 8 |
pakalnis92/property_manager | https://github.com/pakalnis92/property_manager | 31f7eab4f3beaae72b4e24098223785bc4282468 | a9735d422b3a77caeb49e0ca8aa830d810537a3d | 05bf977985c1de3822b3b4f37959347f62d3dcda | refs/heads/master | 2022-05-22T15:18:32.305195 | 2019-10-28T10:25:18 | 2019-10-28T10:25:18 | 217,870,518 | 0 | 1 | null | 2019-10-27T14:53:29 | 2019-10-28T10:25:21 | 2022-04-22T22:38:10 | Python | [
{
"alpha_fraction": 0.6342105269432068,
"alphanum_fraction": 0.6342105269432068,
"avg_line_length": 26.214284896850586,
"blob_id": "512c8969b335a1268cab9c8a5aa5441dce641aa9",
"content_id": "8241b5334e83bdd7484596d14ee314b3916debe8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 380,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 14,
"path": "/propmanager/forms.py",
"repo_name": "pakalnis92/property_manager",
"src_encoding": "UTF-8",
"text": "from django.forms import forms\n\nfrom propmanager.models import Property, Owner\n\n\nclass PropertyForm(forms.ModelForm):\n class Meta:\n model = Property\n fields = ('address', 'value', 'location', 'property_type', 'owner')\n\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['owner'].queryset = Owner.objects.none()"
},
{
"alpha_fraction": 0.682634711265564,
"alphanum_fraction": 0.682634711265564,
"avg_line_length": 29.454545974731445,
"blob_id": "af4d445174d66a36525e581b1ce17e6a685a3002",
"content_id": "0ae89cda542c2a44b95bdab9c009cd7fa9efaaa4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 334,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 11,
"path": "/propmanager/urls.py",
"repo_name": "pakalnis92/property_manager",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom propmanager.views import *\n\nfrom . import views\n\nurlpatterns = [\n # path('', views.index, name='index'),\n # path('edit/<int:room_id>/', EditProperty.as_view(), name='show-price_change_data-on-map'),\n path('properties-list', PropertiesList.as_view(), name='show-price_change_data-on-map'),\n\n]"
},
{
"alpha_fraction": 0.5444785356521606,
"alphanum_fraction": 0.5685071349143982,
"avg_line_length": 42.46666717529297,
"blob_id": "980964c5f1e6abff8eff47990641c93072b0d828",
"content_id": "68539af9dcb3e225248162436a1c3f2de0ea9a42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1956,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 45,
"path": "/propmanager/migrations/0001_initial.py",
"repo_name": "pakalnis92/property_manager",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.6 on 2019-10-27 11:42\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Owner',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('first_name', models.CharField(default='N/A', max_length=100)),\n ('last_name', models.CharField(default='N/A', max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='PropertyType',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(default='N/A', max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='Property',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('address_line_1', models.CharField(default='N/A', max_length=100)),\n ('address_line_2', models.CharField(default='N/A', max_length=100)),\n ('city_town', models.CharField(default='N/A', max_length=100)),\n ('county', models.CharField(default='N/A', max_length=100)),\n ('post_code', models.CharField(default='N/A', max_length=100)),\n ('value', models.DecimalField(decimal_places=2, max_digits=20)),\n ('location', models.CharField(default='N/A', max_length=100)),\n ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='propmanager.Owner')),\n ('property_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='propmanager.PropertyType')),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.621575653553009,
"alphanum_fraction": 0.6240317225456238,
"avg_line_length": 39.404579162597656,
"blob_id": "886c8a335773e4c17a3b06dc437bf4bffef7f3fe",
"content_id": "535cc776c8caeebb5631c6a143e5ec6d1eb570cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5293,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 131,
"path": "/propmanager/views.py",
"repo_name": "pakalnis92/property_manager",
"src_encoding": "UTF-8",
"text": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.contrib import messages\n\n# Create your views here.\nfrom django.urls import reverse_lazy, reverse\nfrom django.views.generic import TemplateView\nfrom propmanager.models import *\n\n\nclass PropertiesList(TemplateView):\n template_name = \"PropertyList.html\"\n\n def post(self, request, *args, **kwargs):\n if self.request.POST.get('go_edit'):\n return HttpResponseRedirect(reverse('add-property'))\n elif self.request.POST.get('main_menu'):\n return HttpResponseRedirect(reverse('main-menu'))\n\n def get_context_data(self, room_id=None, **kwargs):\n all_properties = Property.objects.all()\n\n context = {'properties': all_properties}\n\n return context\n\n\nclass EditPropertyDetails(TemplateView):\n template_name = \"edit.html\"\n\n def post(self, request, *args, **kwargs):\n if self.request.POST.get('edit'):\n property_id = int(self.kwargs['property_id'])\n\n owner_pk = int(request.POST.get(\"owner\", None))\n owner = Owner.objects.get(pk=owner_pk)\n\n type_pk = request.POST.get(\"type\", None)\n type = PropertyType.objects.get(pk=type_pk)\n value = request.POST.get(\"value\", None)\n\n edited_property = Property.objects.get(pk=property_id)\n edited_property.owner = owner\n edited_property.value = value\n edited_property.property_type = type\n\n edited_property.save(update_fields=['owner', 'value', 'property_type'])\n messages.success(request, 'Property details was successfully updated')\n\n return HttpResponseRedirect(f'/edit_property/{property_id}/')\n\n if self.request.POST.get('delete'):\n property_id = int(self.kwargs['property_id'])\n Property.objects.get(pk=property_id).delete()\n messages.success(request, 'Property was successfully deleted')\n return HttpResponseRedirect(reverse('properties-list'))\n if self.request.POST.get('list'):\n return HttpResponseRedirect(reverse('properties-list'))\n\n if self.request.POST.get('main'):\n return HttpResponseRedirect(reverse('main-menu'))\n\n def get_context_data(self, property_id=None, **kwargs):\n selected_property = Property.objects.get(pk=property_id)\n all_owners = Owner.objects.all()\n\n # All owners final excludes the owner which is linked to selected property, so no duplicates are displayed.\n all_owners_final = all_owners.exclude(pk=selected_property.owner.pk)\n all_property_types = PropertyType.objects.all()\n # All properties final excludes the property type which is linked to selected property, so no duplicates are displayed.\n\n all_property_types_final = all_property_types.exclude(name=selected_property.property_type.name)\n\n context = {'property': selected_property,\n 'owners': all_owners_final,\n 'property_types': all_property_types_final\n }\n return context\n\n\nclass MainMenu(TemplateView):\n template_name = \"main.html\"\n\n def post(self, request, *args, **kwargs):\n if self.request.POST.get('list'):\n return HttpResponseRedirect('properties-list/')\n elif self.request.POST.get('add_new'):\n return HttpResponseRedirect('add-property/')\n\n\nclass AddProperty(TemplateView):\n template_name = \"new_property.html\"\n\n def post(self, request, *args, **kwargs):\n if self.request.POST.get('create_property'):\n address1 = request.POST.get(\"address1\", None)\n address2 = request.POST.get(\"address2\", None)\n city_town = request.POST.get(\"city_town\", None)\n county = request.POST.get(\"county\", None)\n post_code = request.POST.get(\"post_code\", None)\n\n location = request.POST.get(\"location\", None)\n value = request.POST.get(\"value\", None)\n\n owner_pk = int(request.POST.get(\"owner\", None))\n owner = Owner.objects.get(pk=owner_pk)\n\n type_pk = request.POST.get(\"type\", None)\n type = PropertyType.objects.get(pk=type_pk)\n\n Property.objects.create(address_line_1=address1, address_line_2=address2, city_town=city_town,\n county=county,\n post_code=post_code, location=location, property_type=type, value=value,\n owner=owner)\n messages.success(request,\n f'Property with address: {address1} {address2} {city_town} {county} {post_code} was added to the system.')\n return HttpResponseRedirect(reverse('properties-list'))\n elif self.request.POST.get('main_menu'):\n return HttpResponseRedirect(reverse('main-menu'))\n\n\n def get_context_data(self, **kwargs):\n all_properties = Property.objects.all()\n all_owners = Owner.objects.all()\n all_property_types = PropertyType.objects.all()\n\n context = {'properties': all_properties,\n 'owners': all_owners,\n 'property_types': all_property_types,\n }\n return context\n"
},
{
"alpha_fraction": 0.7304581999778748,
"alphanum_fraction": 0.7304581999778748,
"avg_line_length": 25.5,
"blob_id": "5268c7514a96d0889cddfee1b3256760b52b38f2",
"content_id": "1953979247652904794a1d9341580bc37a94a038",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 371,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 14,
"path": "/propmanager/admin.py",
"repo_name": "pakalnis92/property_manager",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom propmanager.models import *\n\n# Register your models here.\[email protected](Owner)\nclass OwnerAdmin(admin.ModelAdmin):\n model = Owner\n list_display = ('first_name', 'last_name', 'email', 'telephone')\n\n\[email protected](PropertyType)\nclass PropertyTypeAdmin(admin.ModelAdmin):\n model = PropertyType\n list_display = ('name',)\n"
},
{
"alpha_fraction": 0.5085574388504028,
"alphanum_fraction": 0.6968215107917786,
"avg_line_length": 16.04166603088379,
"blob_id": "7570c4703e73eadec671e7e4afceef7a9a10bca2",
"content_id": "912b3787cb66ceb1ab7e4a7dc1d85a5110b076a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 409,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 24,
"path": "/requirements.txt",
"repo_name": "pakalnis92/property_manager",
"src_encoding": "UTF-8",
"text": "backcall==0.1.0\ndecorator==4.4.0\nDjango==2.2.6\ndjango-enumfield==1.5\nipykernel==5.1.3\nipython==7.8.0\nipython-genutils==0.2.0\njedi==0.15.1\njupyter-client==5.3.4\njupyter-core==4.6.0\nparso==0.5.1\npexpect==4.7.0\npickleshare==0.7.5\nprompt-toolkit==2.0.10\nptyprocess==0.6.0\nPygments==2.4.2\npython-dateutil==2.8.0\npytz==2019.3\npyzmq==18.1.0\nsix==1.12.0\nsqlparse==0.3.0\ntornado==6.0.3\ntraitlets==4.3.3\nwcwidth==0.1.7\n"
},
{
"alpha_fraction": 0.6471801996231079,
"alphanum_fraction": 0.671939492225647,
"avg_line_length": 34.43902587890625,
"blob_id": "b8aa7cd9124dd7c7659d53442fe4b91b04cfe488",
"content_id": "7d7fce4c431c3ce78950364b2e9ecf27ee17734e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1454,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 41,
"path": "/propmanager/models.py",
"repo_name": "pakalnis92/property_manager",
"src_encoding": "UTF-8",
"text": "from enum import Enum\n\nfrom django.db import models\n\n\n# Create your models here.\n\nclass Owner(models.Model):\n first_name = models.CharField(max_length=50, default=\"N/A\")\n last_name = models.CharField(max_length=50, default=\"N/A\")\n email = models.CharField(max_length=20, default=\"N/A\")\n telephone = models.CharField(max_length=20, default=\"N/A\")\n\n\n def __str__(self):\n return 'Owner full name: %s %s' % (self.first_name, self.last_name)\n\n\nclass PropertyType(models.Model):\n name = models.CharField(max_length=100, default=\"N/A\")\n\n def __str__(self):\n return ' %s ' % (self.name)\n\n\nclass Property(models.Model):\n \"\"\"\n Model to represent the property.\n \"\"\"\n address_line_1 = models.CharField(max_length=100, default=\"N/A\")\n address_line_2 = models.CharField(max_length=100, default=\"N/A\")\n city_town = models.CharField(max_length=100, default=\"N/A\")\n county = models.CharField(max_length=100, default=\"N/A\")\n post_code = models.CharField(max_length=100, default=\"N/A\")\n value = models.DecimalField(max_digits=20, decimal_places=2)\n location = models.CharField(max_length=100, default=\"N/A\")\n property_type = models.ForeignKey(PropertyType, on_delete=models.CASCADE)\n owner = models.ForeignKey(Owner, on_delete=models.CASCADE)\n\n def __str__(self):\n return 'Property at %s %s %s %s %s' % (self.address_line_1, self.address_line_2, self.city_town, self.county, self.post_code)\n\n"
},
{
"alpha_fraction": 0.6902502179145813,
"alphanum_fraction": 0.6971527338027954,
"avg_line_length": 38.965518951416016,
"blob_id": "f66a1cf1f3979193e3e70d545d53d5ad0550e3b8",
"content_id": "72149ac221e416152d2a39588a68dd18871645e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1159,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 29,
"path": "/prop_manage/urls.py",
"repo_name": "pakalnis92/property_manager",
"src_encoding": "UTF-8",
"text": "\"\"\"prop_manage URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom propmanager.views import *\n\nfrom django.contrib import admin\nfrom django.urls import path, include\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n # path('propmanager/', include('propmanager.urls')),\n path('properties-list/', PropertiesList.as_view(), name='properties-list'),\n path('add-property/', AddProperty.as_view(), name='add-property'),\n path('edit_property/<int:property_id>/', EditPropertyDetails.as_view(), name='edit-property'),\n path('main', MainMenu.as_view(), name='main-menu'),\n\n]\n"
},
{
"alpha_fraction": 0.7731958627700806,
"alphanum_fraction": 0.7731958627700806,
"avg_line_length": 18.399999618530273,
"blob_id": "9b5483a1f863521993efce0d1ee9f8e050cedacc",
"content_id": "715450a9c7879ffb35ddbc6937f918e71f515792",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 5,
"path": "/propmanager/apps.py",
"repo_name": "pakalnis92/property_manager",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass PropmanagerConfig(AppConfig):\n name = 'propmanager'\n"
},
{
"alpha_fraction": 0.7770897746086121,
"alphanum_fraction": 0.785345733165741,
"avg_line_length": 32.41379165649414,
"blob_id": "76f52d2f9174ec30e680baeca2a015683d1d3cea",
"content_id": "188f835e9c1fbb08a4e0bbc1fd5bf3b8184ede91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 969,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 29,
"path": "/README.md",
"repo_name": "pakalnis92/property_manager",
"src_encoding": "UTF-8",
"text": "# property_manager\n\nProject was made using Database SQLlite, to save time.\n\nNew Owner can be added through admin page.\n\nProperty also could be added using admin page.\n\nApplication main page is http://localhost:8000/main (locahost might be different)\n\nFrom main page, user can got to Properties List page or Add new property page.\n\nOn properties list page, user can click property price and it will take to edit page. On Edit page user can edit value,\nuser and property type. User also can delete on edit page.\n\nProperty owners has separate models, and One to Many relationship is set by ForeigKey.\n\nSome HTML validation was used and messages about succesfull operations will be desplayed.\n\nProperty type and owner to be selected through drop down, so options are valid only from database.\n\nUsed class based views.\n\nBefore trying to add new property, user needs to be added through admin page.\n\nhttp://localhost:8000/admin.\nLogin details: \n username: xx\n password: xx\n"
}
] | 10 |
king-phyte/aoc2020 | https://github.com/king-phyte/aoc2020 | 060f43c8790132f354ebb3e5123983515d037a18 | d0089216fce13bbf92d61afe466d073a98c449d5 | 280a27571133fc0dc08c12a3884d0a318dc1a435 | refs/heads/main | 2023-05-01T10:38:45.703120 | 2021-02-20T01:30:24 | 2021-02-20T01:30:24 | 324,241,419 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7005903124809265,
"alphanum_fraction": 0.7090908885002136,
"avg_line_length": 32.61111068725586,
"blob_id": "78646377bb316ae0f2a878675c175fbbccf181bb",
"content_id": "05fa60a1db3abfa5a86a093948fe960a08be5166",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4235,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 126,
"path": "/Day 24/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Day 24: Lobby Layout ---\n\nYour raft makes it to the tropical island; it turns out that the small crab was an excellent navigator.\nYou make your way to the resort.\n\nAs you enter the lobby, you discover a small problem: the floor is being renovated.\nYou can't even reach the check-in desk until they've finished installing the new tile floor.\n\nThe tiles are all hexagonal; they need to be arranged in a hex grid with a very specific color pattern.\nNot in the mood to wait, you offer to help figure out the pattern.\n\nThe tiles are all white on one side and black on the other.\nThey start with the white side facing up. The lobby is large enough to fit whatever pattern might need to appear there.\n\nA member of the renovation crew gives you a list of the tiles that need to be flipped over (your puzzle input).\nEach line in the list identifies a single tile that needs to be flipped by giving a series of steps\nstarting from a reference tile in the very center of the room. (Every line starts from the same reference tile.)\n\nBecause the tiles are hexagonal, every tile has six neighbors:\neast, southeast, southwest, west, northwest, and northeast.\nThese directions are given in your list, respectively, as e, se, sw, w, nw, and ne.\nA tile is identified by a series of these directions with no delimiters;\nfor example, esenee identifies the tile you land on if you start at the reference tile and then move one\ntile east, one tile southeast, one tile northeast, and one tile east.\n\nEach time a tile is identified, it flips from white to black or from black to white.\nTiles might be flipped more than once. For example, a line like esew flips a tile immediately adjacent to the\nreference tile, and a line like nwwswee flips the reference tile itself.\n\nHere is a larger example:\n\nsesenwnenenewseeswwswswwnenewsewsw\nneeenesenwnwwswnenewnwwsewnenwseswesw\nseswneswswsenwwnwse\nnwnwneseeswswnenewneswwnewseswneseene\nswweswneswnenwsewnwneneseenw\neesenwseswswnenwswnwnwsewwnwsene\nsewnenenenesenwsewnenwwwse\nwenwwweseeeweswwwnwwe\nwsweesenenewnwwnwsenewsenwwsesesenwne\nneeswseenwwswnwswswnw\nnenwswwsewswnenenewsenwsenwnesesenew\nenewnwewneswsewnwswenweswnenwsenwsw\nsweneswneswneneenwnewenewwneswswnese\nswwesenesewenwneswnwwneseswwne\nenesenwswwswneneswsenwnewswseenwsese\nwnwnesenesenenwwnenwsewesewsesesew\nnenewswnwewswnenesenwnesewesw\neneswnwswnwsenenwnwnwwseeswneewsenese\nneswnwewnwnwseenwseesewsenwsweewe\nwseweeenwnesenwwwswnew\n\nIn the above example, 10 tiles are flipped once (to black),\nand 5 more are flipped twice (to black, then back to white).\nAfter all of these instructions have been followed, a total of 10 tiles are black.\n\nGo through the renovation crew's list and determine which tiles they need to flip.\nAfter all of the instructions have been followed, how many tiles are left with the black side up?\n\n\"\"\"\nfrom typing import Sequence, Tuple, Set\n\n\ndef move(direction: str, magnitude: int) -> Tuple[int, int]:\n if direction == 'e':\n return 1, 0\n\n if direction == 'se':\n if magnitude % 2:\n return 0, -1\n return 1, -1\n\n if direction == 'sw':\n if magnitude % 2:\n return -1, -1\n return 0, -1\n\n if direction == 'w':\n return -1, 0\n\n if direction == 'nw':\n if magnitude % 2:\n return -1, 1\n return 0, 1\n elif magnitude % 2:\n return 0, 1\n\n return 1, 1\n\n\ndef tiles_with_black_side_up(instructions: Sequence[str]) -> Set[Tuple[int, int]]:\n flipped = set()\n moves = ('e', 'se', 'sw', 'w', 'nw', 'ne')\n\n for instruction in instructions:\n direction = ''\n x, y = (0, 0)\n\n for char in instruction:\n direction += char\n\n if direction in moves:\n change_in_x, change_in_y = move(direction, y)\n x += change_in_x\n y += change_in_y\n direction = ''\n\n if (x, y) in flipped:\n flipped.remove((x, y))\n else:\n flipped.add((x, y))\n\n return flipped\n\n\ndef main():\n with open('./input.txt') as f:\n puzzle_input = [line.strip() for line in f.readlines()]\n\n tiles = tiles_with_black_side_up(puzzle_input)\n print(len(tiles)) # Answer = 266\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.4499192237854004,
"alphanum_fraction": 0.4567851424217224,
"avg_line_length": 42.612613677978516,
"blob_id": "7d2487249ccc21f6e0745fdd08878c05a889f6b9",
"content_id": "5db6473e2fec9f8c5fb405abe95eaaad7b38d894",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4952,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 111,
"path": "/Day 3/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Day 3: Toboggan Trajectory ---\r\n\r\nWith the toboggan login problems resolved, you set off toward the airport. While travel by toboggan might be easy,\r\nit's certainly not safe: there's very minimal steering and the area is covered in trees.\r\nYou'll need to see which angles will take you near the fewest trees.\r\n\r\nDue to the local geology, trees in this area only grow on exact integer coordinates in a grid.\r\nYou make a map (your puzzle input) of the open squares (.) and trees (#) you can see. For example:\r\n\r\n..##.......\r\n#...#...#..\r\n.#....#..#.\r\n..#.#...#.#\r\n.#...##..#.\r\n..#.##.....\r\n.#.#.#....#\r\n.#........#\r\n#.##...#...\r\n#...##....#\r\n.#..#...#.#\r\n\r\nThese aren't the only trees, though; due to something you read about once involving arboreal genetics and biome\r\nstability, the same pattern repeats to the right many times:\r\n\r\n..##.........##.........##.........##.........##.........##....... --->\r\n#...#...#..#...#...#..#...#...#..#...#...#..#...#...#..#...#...#..\r\n.#....#..#..#....#..#..#....#..#..#....#..#..#....#..#..#....#..#.\r\n..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#\r\n.#...##..#..#...##..#..#...##..#..#...##..#..#...##..#..#...##..#.\r\n..#.##.......#.##.......#.##.......#.##.......#.##.......#.##..... --->\r\n.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#\r\n.#........#.#........#.#........#.#........#.#........#.#........#\r\n#.##...#...#.##...#...#.##...#...#.##...#...#.##...#...#.##...#...\r\n#...##....##...##....##...##....##...##....##...##....##...##....#\r\n.#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.# --->\r\n\r\nYou start on the open square (.) in the top-left corner and need to reach the bottom\r\n(below the bottom-most row on your map).\r\n\r\nThe toboggan can only follow a few specific slopes (you opted for a cheaper model that prefers rational numbers);\r\nstart by counting all the trees you would encounter for the slope right 3, down 1:\r\n\r\nFrom your starting position at the top-left, check the position that is right 3 and down 1.\r\nThen, check the position that is right 3 and down 1 from there, and so on until you go past the bottom of the map.\r\n\r\nThe locations you'd check in the above example are marked here with O where there was an open square and\r\nX where there was a tree:\r\n\r\n..##.........##.........##.........##.........##.........##....... --->\r\n#..O#...#..#...#...#..#...#...#..#...#...#..#...#...#..#...#...#..\r\n.#....X..#..#....#..#..#....#..#..#....#..#..#....#..#..#....#..#.\r\n..#.#...#O#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#\r\n.#...##..#..X...##..#..#...##..#..#...##..#..#...##..#..#...##..#.\r\n..#.##.......#.X#.......#.##.......#.##.......#.##.......#.##..... --->\r\n.#.#.#....#.#.#.#.O..#.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#\r\n.#........#.#........X.#........#.#........#.#........#.#........#\r\n#.##...#...#.##...#...#.X#...#...#.##...#...#.##...#...#.##...#...\r\n#...##....##...##....##...#X....##...##....##...##....##...##....#\r\n.#..#...#.#.#..#...#.#.#..#...X.#.#..#...#.#.#..#...#.#.#..#...#.# --->\r\n\r\nIn this example, traversing the map using this slope would cause you to encounter 7 trees.\r\n\r\nStarting at the top-left corner of your map and following a slope of right 3 and down 1,\r\nhow many trees would you encounter?\r\n\r\n\"\"\"\r\nfrom typing import Sequence\r\n\r\n\r\ndef encountered_tree(data: Sequence[str], x_movement: int, y_movement: int) -> int:\r\n \"\"\"\r\n Checks if a tree (#) is encountered in a data.\r\n The data in transversed to the right by x_movements and downwards by y_movements.\r\n It returns the total number of trees encountered in the data in the order of transversal.\r\n\r\n :param data: Sequence[str] - A list of lines to be transversed.\r\n :param x_movement: int - The movement in the x-axis along the line in the data.\r\n :param y_movement: int - The movement in the y-axis along the line in the data.\r\n :return: int - The total number of trees encountered.\r\n \"\"\"\r\n number_of_trees_encountered = 0\r\n index = 0\r\n\r\n for line in data:\r\n line_index = data.index(line) + 1\r\n\r\n if (y_movement != 2) or (line_index % y_movement != 0):\r\n if (index >= 0) and (line[index] == \"#\"):\r\n number_of_trees_encountered += 1\r\n elif (y_movement == 2) and (line_index % y_movement == 0):\r\n continue\r\n\r\n index += x_movement\r\n\r\n return number_of_trees_encountered\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = f.readlines()\r\n\r\n # Duplicate the lines side by side 39 times to improve the convenience of transversing the lines.\r\n # There is no specific reason for choosing 39. It just was convenient at the time.\r\n # To stay safe though, use a multiple of 13 greater than or equal to 39\r\n convenient_input = [(line.strip() * 39) for line in puzzle_input]\r\n print(encountered_tree(convenient_input, 3, 1)) # Answer = 173\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"
},
{
"alpha_fraction": 0.4448609948158264,
"alphanum_fraction": 0.46485474705696106,
"avg_line_length": 22.888059616088867,
"blob_id": "2b31050f63141f059151ef90a3bba9c3f89a4508",
"content_id": "1765aceb630dc4a75e3892b6839123371fba66f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6402,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 268,
"path": "/Day 20/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Day 20: Jurassic Jigsaw ---\n\nThe high-speed train leaves the forest and quickly carries you south. You can even see a desert in the distance!\nSince you have some spare time, you might as well see if there was anything interesting in the image the\nMythical Information Bureau satellite captured.\n\nAfter decoding the satellite messages, you discover that the data actually contains many small images created by the\nsatellite's camera array. The camera array consists of many cameras; rather than produce a single square image,\nthey produce many smaller square image tiles that need to be reassembled back into a single image.\n\nEach camera in the camera array returns a single monochrome image tile with a random unique ID number. The tiles\n(your puzzle input) arrived in a random order.\n\nWorse yet, the camera array appears to be malfunctioning: each image tile has been rotated and flipped to a random\norientation. Your first task is to reassemble the original image by orienting the tiles so they fit together.\n\nTo show how the tiles should be reassembled, each tile's image data includes a border that should line up exactly with\nits adjacent tiles. All tiles have this border, and the border lines up exactly when the tiles are both\noriented correctly. Tiles at the edge of the image also have this border, but the outermost edges won't line up\nwith any other tiles.\n\nFor example, suppose you have the following nine tiles:\n\nTile 2311:\n..##.#..#.\n##..#.....\n#...##..#.\n####.#...#\n##.##.###.\n##...#.###\n.#.#.#..##\n..#....#..\n###...#.#.\n..###..###\n\nTile 1951:\n#.##...##.\n#.####...#\n.....#..##\n#...######\n.##.#....#\n.###.#####\n###.##.##.\n.###....#.\n..#.#..#.#\n#...##.#..\n\nTile 1171:\n####...##.\n#..##.#..#\n##.#..#.#.\n.###.####.\n..###.####\n.##....##.\n.#...####.\n#.##.####.\n####..#...\n.....##...\n\nTile 1427:\n###.##.#..\n.#..#.##..\n.#.##.#..#\n#.#.#.##.#\n....#...##\n...##..##.\n...#.#####\n.#.####.#.\n..#..###.#\n..##.#..#.\n\nTile 1489:\n##.#.#....\n..##...#..\n.##..##...\n..#...#...\n#####...#.\n#..#.#.#.#\n...#.#.#..\n##.#...##.\n..##.##.##\n###.##.#..\n\nTile 2473:\n#....####.\n#..#.##...\n#.##..#...\n######.#.#\n.#...#.#.#\n.#########\n.###.#..#.\n########.#\n##...##.#.\n..###.#.#.\n\nTile 2971:\n..#.#....#\n#...###...\n#.#.###...\n##.##..#..\n.#####..##\n.#..####.#\n#..#.#..#.\n..####.###\n..#.#.###.\n...#.#.#.#\n\nTile 2729:\n...#.#.#.#\n####.#....\n..#.#.....\n....#..#.#\n.##..##.#.\n.#.####...\n####.#.#..\n##.####...\n##..#.##..\n#.##...##.\n\nTile 3079:\n#.#.#####.\n.#..######\n..#.......\n######....\n####.#..#.\n.#...#.##.\n#.#####.##\n..#.###...\n..#.......\n..#.###...\n\nBy rotating, flipping, and rearranging them, you can find a square arrangement that causes\nall adjacent borders to line up:\n\n#...##.#.. ..###..### #.#.#####.\n..#.#..#.# ###...#.#. .#..######\n.###....#. ..#....#.. ..#.......\n###.##.##. .#.#.#..## ######....\n.###.##### ##...#.### ####.#..#.\n.##.#....# ##.##.###. .#...#.##.\n#...###### ####.#...# #.#####.##\n.....#..## #...##..#. ..#.###...\n#.####...# ##..#..... ..#.......\n#.##...##. ..##.#..#. ..#.###...\n\n#.##...##. ..##.#..#. ..#.###...\n##..#.##.. ..#..###.# ##.##....#\n##.####... .#.####.#. ..#.###..#\n####.#.#.. ...#.##### ###.#..###\n.#.####... ...##..##. .######.##\n.##..##.#. ....#...## #.#.#.#...\n....#..#.# #.#.#.##.# #.###.###.\n..#.#..... .#.##.#..# #.###.##..\n####.#.... .#..#.##.. .######...\n...#.#.#.# ###.##.#.. .##...####\n\n...#.#.#.# ###.##.#.. .##...####\n..#.#.###. ..##.##.## #..#.##..#\n..####.### ##.#...##. .#.#..#.##\n#..#.#..#. ...#.#.#.. .####.###.\n.#..####.# #..#.#.#.# ####.###..\n.#####..## #####...#. .##....##.\n##.##..#.. ..#...#... .####...#.\n#.#.###... .##..##... .####.##.#\n#...###... ..##...#.. ...#..####\n..#.#....# ##.#.#.... ...##.....\n\nFor reference, the IDs of the above tiles are:\n\n1951 2311 3079\n2729 1427 2473\n2971 1489 1171\n\nTo check that you've assembled the image correctly, multiply the IDs of the four corner tiles together.\nIf you do this with the assembled tiles from the example above, you get 1951 * 3079 * 2971 * 1171 = 20899048083289.\n\nAssemble the tiles into an image. What do you get if you multiply together the IDs of the four corner tiles?\n\n\"\"\"\nfrom functools import reduce\nfrom collections import defaultdict\nfrom itertools import product\nfrom typing import Sequence, Dict, Tuple, List, Union, DefaultDict, Any\n\n\ndef digits_in(line: str) -> Union[List[int], list]:\n result = []\n for char in line:\n if char.isdigit():\n result.append(char)\n\n return [int(\"\".join(result))] if result else []\n\n\ndef find_edges(tile: Union[Sequence[str], Sequence[Sequence[str]]]) -> Tuple[str, str, str, str]:\n first = \"\".join(tile[0])\n last = \"\".join(tile[-1])\n first_in_row = []\n last_in_row = []\n for row in range(len(tile)):\n first_in_row.append(tile[row][0])\n last_in_row.append(tile[row][-1])\n return first, last, \"\".join(first_in_row), \"\".join(last_in_row)\n\n\ndef multiply_all(numbers: Sequence[int]) -> int:\n return reduce(lambda x, y: x * y, numbers)\n\n\ndef matches(x: str, y: str) -> bool:\n return (x == y) or (x == \"\".join(reversed(y)))\n\n\ndef find_id_of_corners(corners: Dict[int, int]):\n return [k for k, v in corners.items() if v == 2]\n\n\ndef find_corner_tiles(tiles: Dict[int, Sequence[str]]) -> DefaultDict[Any, int]:\n sides = {}\n tiles_and_number_of_matches = defaultdict(int)\n\n for tile_id, tile in tiles.items():\n sides[tile_id] = find_edges(tile)\n\n for a, b in product(sides.keys(), repeat=2):\n if a == b:\n continue\n\n for a_side, b_side in product(sides[a], sides[b]):\n if matches(a_side, b_side):\n tiles_and_number_of_matches[a] += 1\n\n return tiles_and_number_of_matches\n\n\ndef parse_input(data: Sequence[str]) -> Dict[int, List[str]]:\n tiles = {}\n tile = []\n tile_id = -1\n\n for line in data:\n if line.strip():\n if digits_in(line):\n tile_id = digits_in(line)[0]\n else:\n tile.append(line.strip())\n else:\n tiles[tile_id] = tile\n tile_id = -1\n tile = []\n\n return tiles\n\n\ndef main():\n with open(\"./input.txt\") as f:\n puzzle_input = f.readlines()\n\n tiles = parse_input(puzzle_input)\n\n four_corners = find_corner_tiles(tiles)\n id_of_corners = find_id_of_corners(four_corners)\n print(multiply_all(id_of_corners)) # Answer = 84_116_744_709_593\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6677543520927429,
"alphanum_fraction": 0.6763571500778198,
"avg_line_length": 32.366336822509766,
"blob_id": "7a7bbd5bb055ccccdba8ebb861f38ac6c1086473",
"content_id": "919c535af94f724279eda3c873c151650fcd92b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3371,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 101,
"path": "/Day 2/part_2.cpp",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "/*\n--- Part Two ---\n\nWhile it appears you validated the passwords correctly, they don't seem to be what the\nOfficial Toboggan Corporate Authentication System is expecting.\n\nThe shopkeeper suddenly realizes that he just accidentally explained the password policy rules from his old job\nat the sled rental place down the street! The Official Toboggan Corporate Policy actually works a little differently.\n\nEach policy actually describes two positions in the password, where 1 means the first character,\n2 means the second character, and so on. (Be careful; Toboggan Corporate Policies have no concept of \"index zero\"!)\nExactly one of these positions must contain the given letter. Other occurrences of the letter are irrelevant\nfor the purposes of policy enforcement.\n\nGiven the same example list from above:\n\n 1-3 a: abcde is valid: position 1 contains a and position 3 does not.\n 1-3 b: cdefg is invalid: neither position 1 nor position 3 contains b.\n 2-9 c: ccccccccc is invalid: both position 2 and position 9 contain c.\n\nHow many passwords are valid according to the new interpretation of the policies?\n\n*/\n#include <iostream>\n#include <vector>\n#include <string>\n#include <fstream>\n#include \"../cppheaders/functions.hpp\"\n\n/**\nThe function checks whether a password is valid or not.\nThe validation is such that, the password must contain the character to be validated at least once in either of the positions specified.\nIf the above holds true, the function returns True. Else, it returns False.\n\n@param pwd: str - The password to be validated.\n@param char: Union[int, str] - The character to be used for validation.\n@param first_position: int - Positon of character to be validated.\n@param last_position: int - Position of character to be validated.\n@return: bool\n*/\nbool is_valid(const std::string pwd, const char char_, const int first_position, const int last_position)\n{\n if (pwd.find(char_) == pwd.npos)\n {\n return false;\n }\n if ((char_ == pwd[first_position - 1]) && (char_ != pwd[last_position - 1]))\n {\n return true;\n }\n else if ((char_ != pwd[first_position - 1]) && (char_ == pwd[last_position - 1]))\n {\n return true;\n }\n return false;\n \n}\n\n\nint find_number_of_valid_passwords(std::vector<std::string> data)\n{\n int number_of_valid_passwords = 0;\n\n for (auto line : data)\n {\n std::vector<std::string> line_content = functions::split(line, \" \");\n std::vector<std::string> min_and_max_occurence = functions::split(line_content[0], \"-\");\n int minimum_occurence = std::stoi(min_and_max_occurence[0]);\n int maximum_occurence = std::stoi(min_and_max_occurence[1]);\n char character_to_validate = line_content[1][0];\n std::string password = line_content[2];\n\n if (is_valid(password, character_to_validate\n , minimum_occurence, maximum_occurence))\n {\n ++number_of_valid_passwords;\n }\n }\n return number_of_valid_passwords;\n}\n\nint main()\n{\n std::string line;\n std::vector<std::string> puzzle_input;\n std::ifstream f (\"./input.txt\");\n\n if (f.is_open())\n {\n while (getline(f, line))\n {\n puzzle_input.push_back(line);\n }\n \n f.close();\n }\n\n std::cout << find_number_of_valid_passwords(puzzle_input) << std::endl; // Answer = 708\n\n return 0;\n}\n\n"
},
{
"alpha_fraction": 0.5972404479980469,
"alphanum_fraction": 0.6268068552017212,
"avg_line_length": 28.843137741088867,
"blob_id": "007530ce6971226e3dd2b48de3ef49ced0e7f0e2",
"content_id": "304393e358c7c2e7b9cf2b526bd37e1e4f673bbc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1522,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 51,
"path": "/Day 1/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Part Two ---\n\nThe Elves in accounting are thankful for your help; one of them even offers you\na starfish coin they had left over from a past vacation. They offer\nyou a second one if you can find three numbers in your expense\nreport that meet the same criteria.\n\nUsing the above example again, the three entries that sum to 2020 are 979,\n366, and 675. Multiplying them together produces the answer, 241861950.\n\nIn your expense report, what is the product of the three entries\nthat sum to 2020?\n\"\"\"\nfrom typing import Sequence\n\nTARGET = 2020\n\n\ndef find_target(numbers: Sequence[int]) -> int:\n\n index_of_second_number = 0\n index_of_third_number = 1\n\n for number in set(numbers):\n while index_of_second_number < len(numbers):\n while index_of_third_number < len(numbers):\n second_number = numbers[index_of_second_number]\n third_number = numbers[index_of_third_number]\n\n sum_ = number + second_number + third_number\n\n if sum_ == TARGET:\n return (number * numbers[index_of_second_number]\n * numbers[index_of_third_number])\n\n index_of_third_number += 1\n index_of_second_number += 1\n index_of_third_number = 1\n index_of_second_number = 0\n\n\ndef main():\n with open(\"./input.txt\") as f:\n numbers = [int(line.strip()) for line in f.readlines()]\n\n print(find_target(numbers)) # Answer = 212428694\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5775423049926758,
"alphanum_fraction": 0.6303240060806274,
"avg_line_length": 29.668750762939453,
"blob_id": "0383c65ea32bcc3579ef5b9925968d0f7f8b923b",
"content_id": "997a2cdaf4f93590ca0844ef6b8bb346a1b5be43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4907,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 160,
"path": "/Day 23/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Day 23: Crab Cups ---\n\nThe small crab challenges you to a game!\nThe crab is going to mix up some cups, and you have to predict where they'll end up.\n\nThe cups will be arranged in a circle and labeled clockwise (your puzzle input).\nFor example, if your labeling were 32415, there would be five cups in the circle;\ngoing clockwise around the circle from the first cup, the cups would be labeled 3, 2, 4, 1, 5, and then back to 3 again.\n\nBefore the crab starts, it will designate the first cup in your list as the current cup.\nThe crab is then going to do 100 moves.\n\nEach move, the crab does the following actions:\n\n The crab picks up the three cups that are immediately clockwise of the current cup.\n They are removed from the circle; cup spacing is adjusted as necessary to maintain the circle.\n The crab selects a destination cup: the cup with a label equal to the current cup's label minus one.\n If this would select one of the cups that was just picked up, the crab will keep subtracting one until\n it finds a cup that wasn't just picked up.\n If at any point in this process the value goes below the lowest value on any cup's label,\n it wraps around to the highest value on any cup's label instead.\n The crab places the cups it just picked up so that they are immediately clockwise of the destination cup.\n They keep the same order as when they were picked up.\n The crab selects a new current cup: the cup which is immediately clockwise of the current cup.\n\nFor example, suppose your cup labeling were 389125467.\nIf the crab were to do merely 10 moves, the following changes would occur:\n\n-- move 1 --\ncups: (3) 8 9 1 2 5 4 6 7\npick up: 8, 9, 1\ndestination: 2\n\n-- move 2 --\ncups: 3 (2) 8 9 1 5 4 6 7\npick up: 8, 9, 1\ndestination: 7\n\n-- move 3 --\ncups: 3 2 (5) 4 6 7 8 9 1\npick up: 4, 6, 7\ndestination: 3\n\n-- move 4 --\ncups: 7 2 5 (8) 9 1 3 4 6\npick up: 9, 1, 3\ndestination: 7\n\n-- move 5 --\ncups: 3 2 5 8 (4) 6 7 9 1\npick up: 6, 7, 9\ndestination: 3\n\n-- move 6 --\ncups: 9 2 5 8 4 (1) 3 6 7\npick up: 3, 6, 7\ndestination: 9\n\n-- move 7 --\ncups: 7 2 5 8 4 1 (9) 3 6\npick up: 3, 6, 7\ndestination: 8\n\n-- move 8 --\ncups: 8 3 6 7 4 1 9 (2) 5\npick up: 5, 8, 3\ndestination: 1\n\n-- move 9 --\ncups: 7 4 1 5 8 3 9 2 (6)\npick up: 7, 4, 1\ndestination: 5\n\n-- move 10 --\ncups: (5) 7 4 1 8 3 9 2 6\npick up: 7, 4, 1\ndestination: 3\n\n-- final --\ncups: 5 (8) 3 7 4 1 9 2 6\n\nIn the above example, the cups' values are the labels as they appear moving clockwise around the circle;\nthe current cup is marked with ( ).\n\nAfter the crab is done, what order will the cups be in?\nStarting after the cup labeled 1,\ncollect the other cups' labels clockwise into a single string with no extra characters;\neach number except 1 should appear exactly once. In the above example, after 10 moves,\nthe cups clockwise from 1 are labeled 9, 2, 6, 5, and so on, producing 92658374.\nIf the crab were to complete all 100 moves, the order after cup 1 would be 67384529.\n\nUsing your labeling, simulate 100 moves. What are the labels on the cups after cup 1?\n\nYour puzzle input is 318946572.\n\"\"\"\n\n\ndef find_labels_on_cup(labeling: str, moves: int) -> str:\n cups = [int(x) for x in labeling]\n number_of_cups = len(cups)\n highest = max(cups)\n current_index = 0\n\n for _ in range(moves):\n current_value = cups[current_index]\n picked = []\n\n for i in range(current_index + 1, current_index + 4):\n picked.append(cups[i % number_of_cups])\n\n destination_label = cups[current_index] - 1\n\n start = 0 if (current_index < number_of_cups - 3) else (current_index - number_of_cups + 4)\n cups = cups[start:current_index + 1] + cups[current_index + 4:]\n\n while (destination_label not in cups) and (destination_label > 1):\n destination_label -= 1\n\n if destination_label not in cups:\n destination_label = highest\n while destination_label not in cups:\n destination_label -= 1\n\n for i in range(number_of_cups - 3):\n if cups[i] == destination_label:\n cups = cups[:i + 1] + picked + cups[i + 1:]\n break\n\n if cups[-1] == current_value:\n current_index = 0\n else:\n for i, cup in enumerate(cups):\n if cup == current_value:\n current_index = i + 1\n break\n\n labels_on_cup = []\n found_one = False\n\n for cup in cups:\n if cup == 1:\n found_one = True\n elif found_one:\n labels_on_cup.append(str(cup))\n\n for cup in cups:\n if cup == 1:\n break\n labels_on_cup.append(str(cup))\n\n return ''.join(labels_on_cup)\n\n\ndef main():\n print(find_labels_on_cup('318946572', 100)) # Answer = 52_864_379\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5194174647331238,
"alphanum_fraction": 0.5339806079864502,
"avg_line_length": 24.224489212036133,
"blob_id": "59bbbd59614d525d6ac43b96e5ce05c0aab7aacb",
"content_id": "dba3a18efbd8d71155c507e895929ebadc8198fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1236,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 49,
"path": "/Day 1/part_2.cpp",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <fstream>\n#include <string>\n#include <vector>\n\nconst int TARGET = 2020;\n\nint find_target(const std::vector<int> numbers)\n{\n int index_of_second_number = 0, index_of_third_number = 1;\n\n for (int number : numbers)\n {\n while (index_of_second_number < numbers.size())\n {\n while (index_of_third_number < numbers.size())\n {\n int second_number = numbers[index_of_second_number];\n int third_number = numbers[index_of_third_number];\n int sum = number + second_number + third_number;\n\n if (sum == TARGET)\n {\n return number * numbers[index_of_second_number] * numbers[index_of_third_number];\n }\n ++index_of_third_number;\n }\n ++index_of_second_number;\n index_of_third_number = 1;\n }\n index_of_second_number = 0;\n }\n}\n\nint main()\n{\n std::ifstream f(\"./input.txt\");\n std::vector<int> numbers;\n std::string line;\n\n while (getline(f, line))\n {\n numbers.push_back(std::stoi(line));\n }\n\n std::cout << find_target(numbers) << std::endl; // Answer = 212_428_694\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6315789222717285,
"alphanum_fraction": 0.6750398874282837,
"avg_line_length": 32,
"blob_id": "6344adf8652410616d7ff39726ab8b959a8afd7d",
"content_id": "2f888aaee961ce91417549b6b8812404d5370396",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2508,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 76,
"path": "/Day 23/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Part Two ---\n\nDue to what you can only assume is a mistranslation (you're not exactly fluent in Crab),\nyou are quite surprised when the crab starts arranging many cups in a circle on your raft -\none million (1000000) in total.\n\nYour labeling is still correct for the first few cups;\nafter that, the remaining cups are just numbered in an increasing fashion starting from the number\nafter the highest number in your list and proceeding one by one until one million is reached.\n(For example, if your labeling were 54321, the cups would be numbered 5, 4, 3, 2, 1, and then start counting up\nfrom 6 until one million is reached.) In this way, every number from one through one million is used exactly once.\n\nAfter discovering where you made the mistake in translating Crab Numbers,\nyou realize the small crab isn't going to do merely 100 moves; the crab is going to do ten million (10000000) moves!\n\nThe crab is going to hide your stars - one each - under the two cups that will end up immediately clockwise of cup 1.\nYou can have them if you predict what the labels on those cups will be when the crab is finished.\n\nIn the above example (389125467), this would be 934001 and then 159792;\nmultiplying these together produces 149245887792.\n\nDetermine which two cups will end up immediately clockwise of cup 1.\nWhat do you get if you multiply their labels together?\n\n\"\"\"\nfrom typing import Dict\n\n\ndef product_of_cups_labels(cups) -> int:\n return cups[1] * cups[cups[1]]\n\n\ndef find_cups(labeling: str, moves: int) -> Dict[int, int]:\n successor = {}\n\n for x, label in enumerate(labeling):\n if x > 0:\n successor[int(labeling[x - 1])] = int(label)\n\n successor[int(labeling[-1])] = 10\n\n for i in range(10, 10 ** 6):\n successor[i] = i + 1\n\n successor[10 ** 6] = int(labeling[0])\n n = len(successor)\n current = int(labeling[0])\n\n for _ in range(moves):\n a = successor[current]\n b = successor[a]\n c = successor[b]\n d = successor[c]\n\n moving = [a, b, c]\n successor[current] = d\n destination = ((current - 2) % n) + 1\n\n while destination in moving:\n destination = ((destination - 2) % n) + 1\n\n successor[c] = successor[destination]\n successor[destination] = a\n current = successor[current]\n\n return successor\n\n\ndef main():\n cups = find_cups('318946572', 10 ** 7)\n print(product_of_cups_labels(cups)) # Answer = 11_591_415_792\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.559440553188324,
"alphanum_fraction": 0.7181429862976074,
"avg_line_length": 37.906978607177734,
"blob_id": "15a89b959d0cb61a70342b3aff9f0b900be0e7d3",
"content_id": "c7042273e0cff4b61e601305118c8d4a4ee16ba6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5148,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 129,
"path": "/Day 14/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Part Two ---\r\n\r\nFor some reason, the sea port's computer system still can't communicate with your ferry's docking program.\r\nIt must be using version 2 of the decoder chip!\r\n\r\nA version 2 decoder chip doesn't modify the values being written at all. Instead, it acts as a memory address decoder.\r\nImmediately before a value is written to memory, each bit in the bitmask modifies the corresponding bit of the\r\ndestination memory address in the following way:\r\n\r\n If the bitmask bit is 0, the corresponding memory address bit is unchanged.\r\n If the bitmask bit is 1, the corresponding memory address bit is overwritten with 1.\r\n If the bitmask bit is X, the corresponding memory address bit is floating.\r\n\r\nA floating bit is not connected to anything and instead fluctuates unpredictably. In practice, this means the floating\r\nbits will take on all possible values, potentially causing many memory addresses to be written all at once!\r\n\r\nFor example, consider the following program:\r\n\r\nmask = 000000000000000000000000000000X1001X\r\nmem[42] = 100\r\nmask = 00000000000000000000000000000000X0XX\r\nmem[26] = 1\r\n\r\nWhen this program goes to write to memory address 42, it first applies the bitmask:\r\n\r\naddress: 000000000000000000000000000000101010 (decimal 42)\r\nmask: 000000000000000000000000000000X1001X\r\nresult: 000000000000000000000000000000X1101X\r\n\r\nAfter applying the mask, four bits are overwritten, three of which are different, and two of which are floating.\r\nFloating bits take on every possible combination of values; with two floating bits, four actual memory\r\naddresses are written:\r\n\r\n000000000000000000000000000000011010 (decimal 26)\r\n000000000000000000000000000000011011 (decimal 27)\r\n000000000000000000000000000000111010 (decimal 58)\r\n000000000000000000000000000000111011 (decimal 59)\r\n\r\nNext, the program is about to write to memory address 26 with a different bitmask:\r\n\r\naddress: 000000000000000000000000000000011010 (decimal 26)\r\nmask: 00000000000000000000000000000000X0XX\r\nresult: 00000000000000000000000000000001X0XX\r\n\r\nThis results in an address with three floating bits, causing writes to eight memory addresses:\r\n\r\n000000000000000000000000000000010000 (decimal 16)\r\n000000000000000000000000000000010001 (decimal 17)\r\n000000000000000000000000000000010010 (decimal 18)\r\n000000000000000000000000000000010011 (decimal 19)\r\n000000000000000000000000000000011000 (decimal 24)\r\n000000000000000000000000000000011001 (decimal 25)\r\n000000000000000000000000000000011010 (decimal 26)\r\n000000000000000000000000000000011011 (decimal 27)\r\n\r\nThe entire 36-bit address space still begins initialized to the value 0 at every address, and you still need the sum of\r\nall values left in memory at the end of the program. In this example, the sum is 208.\r\n\r\nExecute the initialization program using an emulator for a version 2 decoder chip. What is the sum of all values left\r\nin memory after it completes?\r\n\r\n\"\"\"\r\nfrom collections import defaultdict\r\nimport itertools\r\nfrom typing import List, Sequence\r\nfrom part_1 import parse_instruction_set\r\n\r\nmemory_block = defaultdict(int)\r\n\r\n\r\ndef flip_bits(address: str) -> List[str]:\r\n \"\"\"\r\n Finds a list of all addresses in an address after it is masked.\r\n Returns a list of all addresses in binary (base 2)\r\n :param address: 36-bit memory address. Eg: 0000011011111X1001101X1011X1001111X1 - str\r\n :return: List of all addresses in the input address after masking - List[str]\r\n \"\"\"\r\n all_addresses = []\r\n address = list(address)\r\n\r\n indices = [i for i, element in enumerate(address) if element == \"X\"]\r\n combinations = itertools.product([\"0\", \"1\"], repeat=len(indices))\r\n\r\n for items in combinations:\r\n for index, item in zip(indices, items):\r\n address[index] = item\r\n all_addresses.append(\"\".join(address))\r\n return all_addresses\r\n\r\n\r\ndef execute_instructions(instruction_set: Sequence[str]) -> None:\r\n mask = instruction_set[0].split(\"= \")[1]\r\n instructions = instruction_set[1:]\r\n\r\n for instruction in instructions:\r\n memory_address, value = instruction.split(\" = \")\r\n memory_address = memory_address[4:-1]\r\n binary_memory_address = bin(int(memory_address))[2:]\r\n full_binary_memory_address = (\"0\" * (len(mask) - len(binary_memory_address))) + binary_memory_address\r\n\r\n new_binary_address = \"\"\r\n\r\n for b, m in zip(full_binary_memory_address, mask):\r\n if m == \"0\":\r\n new_binary_address += b\r\n elif m == \"1\":\r\n new_binary_address += m\r\n elif m == \"X\":\r\n new_binary_address += m\r\n\r\n addresses = flip_bits(new_binary_address)\r\n for address in addresses:\r\n memory_block[address] = int(value)\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = [line.strip() for line in f.readlines()]\r\n\r\n convenient_input = parse_instruction_set(puzzle_input)\r\n for group in convenient_input:\r\n execute_instructions(group)\r\n\r\n print(sum(memory_block.values())) # Answer = 3_817_372_618_036\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.6073211431503296,
"alphanum_fraction": 0.6593177914619446,
"avg_line_length": 26.632183074951172,
"blob_id": "7777c6be41bbc332022acb5037878e219426cafa",
"content_id": "0ff3061c5134614e46f998a79732182162e94272",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2404,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 87,
"path": "/Day 24/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Part Two ---\n\nThe tile floor in the lobby is meant to be a living art exhibit.\nEvery day, the tiles are all flipped according to the following rules:\n\n Any black tile with zero or more than 2 black tiles immediately adjacent to it is flipped to white.\n Any white tile with exactly 2 black tiles immediately adjacent to it is flipped to black.\n\nHere, tiles immediately adjacent means the six tiles directly touching the tile in question.\n\nThe rules are applied simultaneously to every tile; put another way, it is first determined which tiles need to be\nflipped, then they are all flipped at the same time.\n\nIn the above example, the number of black tiles that are facing up after the given number of days has passed is as\nfollows:\n\nDay 1: 15\nDay 2: 12\nDay 3: 25\nDay 4: 14\nDay 5: 23\nDay 6: 28\nDay 7: 41\nDay 8: 37\nDay 9: 49\nDay 10: 37\n\nDay 20: 132\nDay 30: 259\nDay 40: 406\nDay 50: 566\nDay 60: 788\nDay 70: 1106\nDay 80: 1373\nDay 90: 1844\nDay 100: 2208\n\nAfter executing this process a total of 100 times, there would be 2208 black tiles facing up.\n\nHow many tiles will be black after 100 days?\n\n\"\"\"\nfrom collections import Counter\nfrom typing import Set, Tuple\n\nfrom part_1 import tiles_with_black_side_up\n\n\ndef hexagonal_neighbours(right: int, center: int) -> Set[Tuple[int, int]]:\n neighbours = {(right, center + 1), (right, center - 1)}\n\n if right % 2:\n neighbours |= {(right + 1, center - 1), (right + 1, center), (right - 1, center - 1), (right - 1, center)}\n else:\n neighbours |= {(right + 1, center), (right + 1, center + 1), (right - 1, center + 1), (right - 1, center)}\n\n return neighbours\n\n\ndef black_tiles_after_days(tiles: Set[Tuple[int, int]], days: int) -> Set[Tuple[int, int]]:\n for _ in range(days):\n c = Counter()\n\n for x, y in tiles:\n for ny, nx in hexagonal_neighbours(y, x):\n c[(nx, ny)] += 1\n\n tiles = {\n coordinates for coordinates in c if c[coordinates] == 2 or (coordinates in tiles and c[coordinates] == 1)\n }\n\n return tiles\n\n\ndef main():\n with open(\"./input.txt\") as f:\n puzzle_input = [line.strip() for line in f.readlines()]\n\n tiles = tiles_with_black_side_up(puzzle_input)\n days = 100\n black_tiles_after_100_days = black_tiles_after_days(tiles, days)\n print(len(black_tiles_after_100_days)) # Answer = 3627\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.39304158091545105,
"alphanum_fraction": 0.5778744220733643,
"avg_line_length": 34.425743103027344,
"blob_id": "43343c8a2360c6b9b9b46e70fb7c1b6dd7df9b3d",
"content_id": "05ff7ef6afa7d66442f282573f84b10d5b862e45",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3679,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 101,
"path": "/Day 10/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Part Two ---\r\n\r\nTo completely determine whether you have enough adapters, you'll need to figure out how many different ways they can be\r\narranged. Every arrangement needs to connect the charging outlet to your device. The previous rules about when adapters\r\ncan successfully connect still apply.\r\n\r\nThe first example above (the one that starts with 16, 10, 15) supports the following arrangements:\r\n\r\n(0), 1, 4, 5, 6, 7, 10, 11, 12, 15, 16, 19, (22)\r\n(0), 1, 4, 5, 6, 7, 10, 12, 15, 16, 19, (22)\r\n(0), 1, 4, 5, 7, 10, 11, 12, 15, 16, 19, (22)\r\n(0), 1, 4, 5, 7, 10, 12, 15, 16, 19, (22)\r\n(0), 1, 4, 6, 7, 10, 11, 12, 15, 16, 19, (22)\r\n(0), 1, 4, 6, 7, 10, 12, 15, 16, 19, (22)\r\n(0), 1, 4, 7, 10, 11, 12, 15, 16, 19, (22)\r\n(0), 1, 4, 7, 10, 12, 15, 16, 19, (22)\r\n\r\n(The charging outlet and your device's built-in adapter are shown in parentheses.) Given the adapters from the first\r\nexample, the total number of arrangements that connect the charging outlet to your device is 8.\r\n\r\nThe second example above (the one that starts with 28, 33, 18) has many arrangements. Here are a few:\r\n\r\n(0), 1, 2, 3, 4, 7, 8, 9, 10, 11, 14, 17, 18, 19, 20, 23, 24, 25, 28, 31,\r\n32, 33, 34, 35, 38, 39, 42, 45, 46, 47, 48, 49, (52)\r\n\r\n(0), 1, 2, 3, 4, 7, 8, 9, 10, 11, 14, 17, 18, 19, 20, 23, 24, 25, 28, 31,\r\n32, 33, 34, 35, 38, 39, 42, 45, 46, 47, 49, (52)\r\n\r\n(0), 1, 2, 3, 4, 7, 8, 9, 10, 11, 14, 17, 18, 19, 20, 23, 24, 25, 28, 31,\r\n32, 33, 34, 35, 38, 39, 42, 45, 46, 48, 49, (52)\r\n\r\n(0), 1, 2, 3, 4, 7, 8, 9, 10, 11, 14, 17, 18, 19, 20, 23, 24, 25, 28, 31,\r\n32, 33, 34, 35, 38, 39, 42, 45, 46, 49, (52)\r\n\r\n(0), 1, 2, 3, 4, 7, 8, 9, 10, 11, 14, 17, 18, 19, 20, 23, 24, 25, 28, 31,\r\n32, 33, 34, 35, 38, 39, 42, 45, 47, 48, 49, (52)\r\n\r\n(0), 3, 4, 7, 10, 11, 14, 17, 20, 23, 25, 28, 31, 34, 35, 38, 39, 42, 45,\r\n46, 48, 49, (52)\r\n\r\n(0), 3, 4, 7, 10, 11, 14, 17, 20, 23, 25, 28, 31, 34, 35, 38, 39, 42, 45,\r\n46, 49, (52)\r\n\r\n(0), 3, 4, 7, 10, 11, 14, 17, 20, 23, 25, 28, 31, 34, 35, 38, 39, 42, 45,\r\n47, 48, 49, (52)\r\n\r\n(0), 3, 4, 7, 10, 11, 14, 17, 20, 23, 25, 28, 31, 34, 35, 38, 39, 42, 45,\r\n47, 49, (52)\r\n\r\n(0), 3, 4, 7, 10, 11, 14, 17, 20, 23, 25, 28, 31, 34, 35, 38, 39, 42, 45,\r\n48, 49, (52)\r\n\r\nIn total, this set of adapters can connect the charging outlet to your device in 19208 distinct arrangements.\r\n\r\nYou glance back down at your bag and try to remember why you brought so many adapters; there must be more than a\r\ntrillion valid ways to arrange them! Surely, there must be an efficient way to count the arrangements.\r\n\r\nWhat is the total number of distinct ways you can arrange the adapters to connect the charging outlet to your device?\r\n\r\n\"\"\"\r\nfrom typing import Dict, Sequence\r\nfrom collections import defaultdict\r\nimport part_1\r\n\r\n\r\ndef count_sequences(differences: Sequence[int]) -> Dict[int, int]:\r\n counter = 0\r\n result = defaultdict(int)\r\n for number in differences:\r\n if number == 1:\r\n counter += 1\r\n else:\r\n if counter > 1:\r\n result[counter] += 1\r\n counter = 0\r\n if counter > 1:\r\n result[counter] += 1\r\n return dict(result)\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = [int(line.strip()) for line in f.readlines()]\r\n convenient_input = [0] + puzzle_input\r\n convenient_input.sort()\r\n\r\n differences = part_1.calculate_differences(convenient_input)\r\n sequence = count_sequences(differences)\r\n\r\n multiples = {2: 2, 3: 4, 4: 7, 5: 13}\r\n total = 1\r\n\r\n for key, value in sequence.items():\r\n total *= multiples[key] ** value\r\n\r\n print(total) # Answer = 6_908_379_398_144\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.6437098383903503,
"alphanum_fraction": 0.7061524391174316,
"avg_line_length": 23.75,
"blob_id": "ac41b412a89fd5f3e098150eab375e27fd0244c5",
"content_id": "05b753c864014dcecfb5453000078a202d338c8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1089,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 44,
"path": "/Day 1/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\nBefore you leave, the Elves in accounting just need you to fix your expense\nreport (your puzzle input); apparently, something isn't quite adding up.\n\nSpecifically, they need you to find the two entries that sum to 2020 and then\nmultiply those two numbers together.\n\nFor example, suppose your expense report contained the following:\n\n1721\n979\n366\n299\n675\n1456\n\nIn this list, the two entries that sum to 2020 are 1721 and 299. Multiplying\nthem together produces 1721 * 299 = 514579, so the correct answer is 514579.\n\nOf course, your expense report is much larger. Find the two entries that sum\nto 2020; what do you get if you multiply them together?\n\n\"\"\"\nfrom typing import Sequence\n\nTARGET = 2020\n\n\ndef find_target(numbers: Sequence[int]) -> int:\n for number in numbers:\n remainder = TARGET - number\n if remainder in numbers:\n return remainder * number\n\n\ndef main():\n with open(\"./input.txt\") as f:\n numbers = [int(line.strip()) for line in f.readlines()]\n\n print(find_target(numbers)) # Answer = 567171\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.7045454382896423,
"alphanum_fraction": 0.7045454382896423,
"avg_line_length": 22.294116973876953,
"blob_id": "9cf7afa9741a169441992f0e88e808ae7e8ee1dc",
"content_id": "19a7fa85e4a9b3d6178fc3d13585c2c552056ce3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 396,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 17,
"path": "/cppheaders/functions.hpp",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "#ifndef FUNCTIONS_HPP\n#define FUNCTIONS_HPP\n#include <vector>\n#include <string>\n\nnamespace functions\n{\n template <typename T>\n void print(const std::vector<T> list);\n\n int count(std::string str, const std::string value);\n int count(std::string str, const char value);\n\n std::vector<std::string> split(std::string str, const std::string delimeter);\n} // namespace functions\n\n#endif\n"
},
{
"alpha_fraction": 0.4972057044506073,
"alphanum_fraction": 0.5193798542022705,
"avg_line_length": 15.07826042175293,
"blob_id": "b35185d6b178af15e10fdd722a9a5853a1fc9a47",
"content_id": "576c1ec94b5db0fe284d3253d94b747191112cb1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5547,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 345,
"path": "/Day 17/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Part Two ---\n\nFor some reason, your simulated results don't match what the experimental energy source engineers expected.\nApparently, the pocket dimension actually has four spatial dimensions, not three.\n\nThe pocket dimension contains an infinite 4-dimensional grid. At every integer 4-dimensional coordinate (x,y,z,w),\nthere exists a single cube (really, a hypercube) which is still either active or inactive.\n\nEach cube only ever considers its neighbors: any of the 80 other cubes where any of their coordinates differ by at\nmost 1. For example, given the cube at x=1,y=2,z=3,w=4, its neighbors include the cube at x=2,y=2,z=3,w=3,\nthe cube at x=0,y=2,z=3,w=4, and so on.\n\nThe initial state of the pocket dimension still consists of a small flat region of cubes. Furthermore, the same rules\nfor cycle updating still apply: during each cycle, consider the number of active neighbors of each cube.\n\nFor example, consider the same initial state as in the example above. Even though the pocket dimension is\n4-dimensional, this initial state represents a small 2-dimensional slice of it. (In particular, this initial state\ndefines a 3x3x1x1 region of the 4-dimensional space.)\n\nSimulating a few cycles from this initial state produces the following configurations, where the result of each cycle\nis shown layer-by-layer at each given z and w coordinate:\n\nBefore any cycles:\n\nz=0, w=0\n.#.\n..#\n###\n\n\nAfter 1 cycle:\n\nz=-1, w=-1\n#..\n..#\n.#.\n\nz=0, w=-1\n#..\n..#\n.#.\n\nz=1, w=-1\n#..\n..#\n.#.\n\nz=-1, w=0\n#..\n..#\n.#.\n\nz=0, w=0\n#.#\n.##\n.#.\n\nz=1, w=0\n#..\n..#\n.#.\n\nz=-1, w=1\n#..\n..#\n.#.\n\nz=0, w=1\n#..\n..#\n.#.\n\nz=1, w=1\n#..\n..#\n.#.\n\n\nAfter 2 cycles:\n\nz=-2, w=-2\n.....\n.....\n..#..\n.....\n.....\n\nz=-1, w=-2\n.....\n.....\n.....\n.....\n.....\n\nz=0, w=-2\n###..\n##.##\n#...#\n.#..#\n.###.\n\nz=1, w=-2\n.....\n.....\n.....\n.....\n.....\n\nz=2, w=-2\n.....\n.....\n..#..\n.....\n.....\n\nz=-2, w=-1\n.....\n.....\n.....\n.....\n.....\n\nz=-1, w=-1\n.....\n.....\n.....\n.....\n.....\n\nz=0, w=-1\n.....\n.....\n.....\n.....\n.....\n\nz=1, w=-1\n.....\n.....\n.....\n.....\n.....\n\nz=2, w=-1\n.....\n.....\n.....\n.....\n.....\n\nz=-2, w=0\n###..\n##.##\n#...#\n.#..#\n.###.\n\nz=-1, w=0\n.....\n.....\n.....\n.....\n.....\n\nz=0, w=0\n.....\n.....\n.....\n.....\n.....\n\nz=1, w=0\n.....\n.....\n.....\n.....\n.....\n\nz=2, w=0\n###..\n##.##\n#...#\n.#..#\n.###.\n\nz=-2, w=1\n.....\n.....\n.....\n.....\n.....\n\nz=-1, w=1\n.....\n.....\n.....\n.....\n.....\n\nz=0, w=1\n.....\n.....\n.....\n.....\n.....\n\nz=1, w=1\n.....\n.....\n.....\n.....\n.....\n\nz=2, w=1\n.....\n.....\n.....\n.....\n.....\n\nz=-2, w=2\n.....\n.....\n..#..\n.....\n.....\n\nz=-1, w=2\n.....\n.....\n.....\n.....\n.....\n\nz=0, w=2\n###..\n##.##\n#...#\n.#..#\n.###.\n\nz=1, w=2\n.....\n.....\n.....\n.....\n.....\n\nz=2, w=2\n.....\n.....\n..#..\n.....\n.....\n\nAfter the full six-cycle boot process completes, 848 cubes are left in the active state.\n\nStarting with your given initial configuration, simulate six cycles in a 4-dimensional space. How many cubes are left\nin the active state after the sixth cycle?\n\n\"\"\"\nfrom dataclasses import dataclass\nfrom typing import Dict, Iterator, Set, Tuple\n\nCoordinates = Tuple[int, int, int, int]\n\n\ndef iter_neighbors(x: int, y: int, z: int, w: int) -> Iterator[Coordinates]:\n for nx in [x - 1, x, x + 1]:\n for ny in [y - 1, y, y + 1]:\n for nz in [z - 1, z, z + 1]:\n for nw in [w - 1, w, w + 1]:\n if (nx, ny, nz, nw) == (x, y, z, w):\n continue\n\n yield nx, ny, nz, nw\n\n\n@dataclass\nclass PocketDimension:\n active_cubes: Set[Coordinates]\n\n def is_active(self, x: int, y: int, z: int, w: int) -> bool:\n return (x, y, z, w) in self.active_cubes\n\n def copy(self):\n return PocketDimension(self.active_cubes.copy())\n\n def step(self) -> 'PocketDimension':\n \"\"\"\n Returns a copy of this pocket dimension evolved by 1 step.\n \"\"\"\n new_active_cubes = set()\n # Maps each inactive which can be potentially activated to the number of active neighbors it has\n activation_candidates: Dict[Coordinates, int] = {}\n for x, y, z, w in self.active_cubes:\n neighbors_count = 0\n for nx, ny, nz, nw in iter_neighbors(x, y, z, w):\n if self.is_active(nx, ny, nz, nw):\n neighbors_count += 1\n else:\n activation_candidates.setdefault((nx, ny, nz, nw), 0)\n activation_candidates[(nx, ny, nz, nw)] += 1\n\n if neighbors_count == 2 or neighbors_count == 3:\n new_active_cubes.add((x, y, z, w))\n\n for (x, y, z, w), neighbors_count in activation_candidates.items():\n if neighbors_count == 3:\n new_active_cubes.add((x, y, z, w))\n\n return PocketDimension(new_active_cubes)\n\n\ndef parse_pocket_dimensions(content: str) -> PocketDimension:\n content = content.strip()\n\n active_cubes = set()\n for line_ix, line in enumerate(content.split('\\n')):\n line = line.strip()\n for char_ix, char in enumerate(line):\n if char == '.':\n continue\n elif char == '#':\n active_cubes.add((char_ix, line_ix, 0, 0))\n\n return PocketDimension(active_cubes)\n\n\ndef main():\n with open('./input.txt') as f:\n puzzle_input = f.read()\n original_dimension = parse_pocket_dimensions(puzzle_input)\n\n dimension = original_dimension\n cycles = 6\n\n for i in range(cycles):\n dimension = dimension.step()\n print(len(dimension.active_cubes)) # Answer = 2240\n\n\nif __name__ == \"__main__\":\n main() # Original solution by Oleg Yam\n"
},
{
"alpha_fraction": 0.5346630811691284,
"alphanum_fraction": 0.5684748291969299,
"avg_line_length": 41.73404312133789,
"blob_id": "b5b47100b2cad470a0824a0c73b04ff6c78a7dc1",
"content_id": "3dbb9595fcc57d9ea8b0e0297fabfe3a31e3ec91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4111,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 94,
"path": "/Day 13/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Day 13: Shuttle Search ---\r\n\r\nYour ferry can make it safely to a nearby port, but it won't get much further. When you call to book another ship,\r\nyou discover that no ships embark from that port to your vacation island.\r\nYou'll need to get from the port to the nearest airport.\r\n\r\nFortunately, a shuttle bus service is available to bring you from the sea port to the airport!\r\nEach bus has an ID number that also indicates how often the bus leaves for the airport.\r\n\r\nBus schedules are defined based on a timestamp that measures the number of minutes since some fixed reference point in\r\nthe past. At timestamp 0, every bus simultaneously departed from the sea port.\r\nAfter that, each bus travels to the airport, then various other locations, and finally returns to the sea port to\r\nrepeat its journey forever.\r\n\r\nThe time this loop takes a particular bus is also its ID number: the bus with ID 5 departs from the sea port at\r\ntimestamps 0, 5, 10, 15, and so on. The bus with ID 11 departs at 0, 11, 22, 33, and so on.\r\nIf you are there when the bus departs, you can ride that bus to the airport!\r\n\r\nYour notes (your puzzle input) consist of two lines. The first line is your estimate of the earliest timestamp you\r\ncould depart on a bus. The second line lists the bus IDs that are in service according to the shuttle company;\r\nentries that show x must be out of service, so you decide to ignore them.\r\n\r\nTo save time once you arrive, your goal is to figure out the earliest bus you can take to the airport.\r\n(There will be exactly one such bus.)\r\n\r\nFor example, suppose you have the following notes:\r\n\r\n939\r\n7,13,x,x,59,x,31,19\r\n\r\nHere, the earliest timestamp you could depart is 939, and the bus IDs in service are 7, 13, 59, 31, and 19.\r\nNear timestamp 939, these bus IDs depart at the times marked D:\r\n\r\ntime bus 7 bus 13 bus 59 bus 31 bus 19\r\n929 . . . . .\r\n930 . . . D .\r\n931 D . . . D\r\n932 . . . . .\r\n933 . . . . .\r\n934 . . . . .\r\n935 . . . . .\r\n936 . D . . .\r\n937 . . . . .\r\n938 D . . . .\r\n939 . . . . .\r\n940 . . . . .\r\n941 . . . . .\r\n942 . . . . .\r\n943 . . . . .\r\n944 . . D . .\r\n945 D . . . .\r\n946 . . . . .\r\n947 . . . . .\r\n948 . . . . .\r\n949 . D . . .\r\n\r\nThe earliest bus you could take is bus ID 59. It doesn't depart until timestamp 944,\r\nso you would need to wait 944 - 939 = 5 minutes before it departs.\r\nMultiplying the bus ID by the number of minutes you'd need to wait gives 295.\r\n\r\nWhat is the ID of the earliest bus you can take to the airport multiplied by the number of minutes you'll need to wait\r\nfor that bus?\r\n\r\n\"\"\"\r\nfrom typing import Sequence\r\n\r\n\r\ndef find_soonest_bus(data: Sequence[str]) -> int:\r\n \"\"\"\r\n The soonest bus is the one greater than but closest to the timestamp.\r\n Multiply each bus number by the ceiling division of the timestamp and bus.\r\n Find the bus by the index of the smallest time.\r\n Find the wait time my subtracting the timestamp from the smallest time.\r\n Return the product of the bus number and the wait time.\r\n \"\"\"\r\n timestamp = int(data[0].strip())\r\n buses = [int(x) for x in data[1].split(\",\") if x.isdecimal()]\r\n times = [bus * (-(timestamp // -bus)) for bus in buses]\r\n min_time = min(times)\r\n bus = buses[times.index(min_time)]\r\n wait_time = min_time - timestamp\r\n return bus * wait_time\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = f.readlines()\r\n\r\n print(find_soonest_bus(puzzle_input)) # Answer = 3269\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.6426166296005249,
"alphanum_fraction": 0.655363142490387,
"avg_line_length": 32.3636360168457,
"blob_id": "a67be2c9ea0cf35818e8d38ae637701c71bbc715",
"content_id": "e1fa8abfbb3245527b0b051934b24dfc02a77dba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4158,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 121,
"path": "/Day 8/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Part Two ---\r\n\r\nAfter some careful analysis, you believe that exactly one instruction is corrupted.\r\n\r\nSomewhere in the program, either a jmp is supposed to be a nop, or a nop is supposed to be a jmp.\r\n(No acc instructions were harmed in the corruption of this boot code.)\r\n\r\nThe program is supposed to terminate by attempting to execute an instruction immediately after the last instruction in\r\nthe file. By changing exactly one jmp or nop, you can repair the boot code and make it terminate correctly.\r\n\r\nFor example, consider the same program from above:\r\n\r\nnop +0\r\nacc +1\r\njmp +4\r\nacc +3\r\njmp -3\r\nacc -99\r\nacc +1\r\njmp -4\r\nacc +6\r\n\r\nIf you change the first instruction from nop +0 to jmp +0, it would create a single-instruction infinite loop,\r\nnever leaving that instruction. If you change almost any of the jmp instructions, the program will still eventually\r\nfind another jmp instruction and loop forever.\r\n\r\nHowever, if you change the second-to-last instruction (from jmp -4 to nop -4), the program terminates! The instructions\r\nare visited in this order:\r\n\r\nnop +0 | 1\r\nacc +1 | 2\r\njmp +4 | 3\r\nacc +3 |\r\njmp -3 |\r\nacc -99 |\r\nacc +1 | 4\r\nnop -4 | 5\r\nacc +6 | 6\r\n\r\nAfter the last instruction (acc +6), the program terminates by attempting to run the instruction below the last\r\ninstruction in the file. With this change, after the program terminates, the accumulator contains the value 8\r\n(acc +1, acc +1, acc +6).\r\n\r\nFix the program so that it terminates normally by changing exactly one jmp (to nop) or nop (to jmp).\r\nWhat is the value of the accumulator after the program terminates?\r\n\r\n\"\"\"\r\nfrom typing import List, Sequence, Optional, Tuple\r\nimport copy\r\n\r\n\r\ndef find_correct_loop(instructions_set: List[Tuple[str, ...]]) -> Optional[int]:\r\n \"\"\"\r\n Checks the index of the instruction to the length of the entire instruction list.\r\n # If the index is greater than or equal to the length of the instruction list, return the accumulator.\r\n :param instructions_set: List[Tuple[str, ...]] - A list of tuples of the instructions in the puzzle input.\r\n :return: Optional[int] - Accumulator.\r\n \"\"\"\r\n length_of_instruction_set = len(instructions_set)\r\n accumulator = 0\r\n visited_indices = [0]\r\n while True:\r\n index = visited_indices[-1]\r\n instruction = instructions_set[index]\r\n\r\n if instruction[0] == 'jmp':\r\n index = index + int(instruction[1])\r\n\r\n if instruction[0] == 'acc':\r\n accumulator += int(instruction[1])\r\n index += 1\r\n\r\n if instruction[0] == 'nop':\r\n index += 1\r\n\r\n if index in visited_indices:\r\n return\r\n # Break if the index >= the total length of the instruction list and return the accumulated value.\r\n if index >= length_of_instruction_set:\r\n return accumulator\r\n\r\n visited_indices.append(index)\r\n\r\n\r\ndef fix_infinite_loop(puzzle_input: Sequence[str]) -> Optional[int]:\r\n \"\"\"\r\n Changes instances of \"jmp\" and \"nop\" and runs to see which one runs the whole set of instructions without repeating.\r\n The function returns the accumulator if the code runs without infinite loop.\r\n :param puzzle_input: Sequence[str] - The puzzle input.\r\n :return: Optional[int] - The accumulator.\r\n \"\"\"\r\n instruction_set = [tuple(i.split(\" \")) for i in puzzle_input]\r\n\r\n for i in range(0, len(puzzle_input)):\r\n # Making a shallow copy of the instruction_set.\r\n copy_of_instructions = copy.copy(instruction_set)\r\n signed_integer = copy_of_instructions[i][1]\r\n\r\n if copy_of_instructions[i][0] == 'jmp':\r\n copy_of_instructions[i] = ('nop', signed_integer)\r\n\r\n elif copy_of_instructions[i][0] == 'nop':\r\n copy_of_instructions[i] = ('jmp', signed_integer)\r\n\r\n accumulator = find_correct_loop(copy_of_instructions)\r\n\r\n if accumulator:\r\n return accumulator\r\n\r\n\r\ndef main():\r\n\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = [line.strip() for line in f.readlines()]\r\n\r\n print(fix_infinite_loop(puzzle_input)) # Answer = 2060\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.7768836617469788,
"alphanum_fraction": 0.7841989994049072,
"avg_line_length": 79.35294342041016,
"blob_id": "7845a9e794b82b35b76f992bbaef50da23a004a0",
"content_id": "444980b37de5809cd8d2064adafb7e57afbb0c9c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1367,
"license_type": "no_license",
"max_line_length": 340,
"num_lines": 17,
"path": "/README.md",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "# aoc2020\n\nMy solutions to `Advent of Code 2020`. \nI participated in and completed the challenges in the required time, but some solutions were slow, inefficient or not conventional.\nI upload the files after I have improved the algorithms and or naming conventions -- Not to say I have the best solutions. Some are still very slow TBH.\n\n\n**The emphasis of the code in this repository lies in verbosity (to a certain degree) and not speed.**\nNot to say speed was not necessary, but if possible, speed was traded off for understanding.\n\n\nAnd now, the introduction from the AoC challenge.\n\n> After saving Christmas five years in a row, you've decided to take a vacation at a nice resort on a tropical island. Surely, Christmas will go on without you.\nThe tropical island has its own currency and is entirely cash-only. The gold coins used there have a little picture of a starfish; the locals just call them stars. None of the currency exchanges seem to have heard of them, but somehow, you'll need to find fifty of these coins by the time you arrive so you can pay the deposit on your room.\nTo save your vacation, you need to get all fifty stars by December 25th.\nCollect stars by solving puzzles. Two puzzles will be made available on each day in the Advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck!\n\n"
},
{
"alpha_fraction": 0.6234350204467773,
"alphanum_fraction": 0.6375601887702942,
"avg_line_length": 33,
"blob_id": "ea872561c1e9b933e064c6d4044f7ef2582296e6",
"content_id": "d371d6fdbf813915521dc9dfa2302ca0d778c693",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3115,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 89,
"path": "/Day 7/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Part Two ---\r\n\r\nIt's getting pretty expensive to fly these days - not because of ticket prices, but because of the ridiculous number of\r\nbags you need to buy!\r\n\r\nConsider again your shiny gold bag and the rules from the above example:\r\n\r\n faded blue bags contain 0 other bags.\r\n dotted black bags contain 0 other bags.\r\n vibrant plum bags contain 11 other bags: 5 faded blue bags and 6 dotted black bags.\r\n dark olive bags contain 7 other bags: 3 faded blue bags and 4 dotted black bags.\r\n\r\nSo, a single shiny gold bag must contain 1 dark olive bag (and the 7 bags within it) plus 2 vibrant plum bags\r\n(and the 11 bags within each of those): 1 + 1*7 + 2 + 2*11 = 32 bags!\r\n\r\nOf course, the actual rules have a small chance of going several levels deeper than this example;\r\nbe sure to count all of the bags, even if the nesting becomes topologically impractical!\r\n\r\nHere's another example:\r\n\r\nshiny gold bags contain 2 dark red bags.\r\ndark red bags contain 2 dark orange bags.\r\ndark orange bags contain 2 dark yellow bags.\r\ndark yellow bags contain 2 dark green bags.\r\ndark green bags contain 2 dark blue bags.\r\ndark blue bags contain 2 dark violet bags.\r\ndark violet bags contain no other bags.\r\n\r\nIn this example, a single shiny gold bag must contain 126 other bags.\r\n\r\nHow many individual bags are required inside your single shiny gold bag?\r\n\r\n\"\"\"\r\n\r\nfrom typing import Dict, Union\r\n\r\nwith open(\"./input.txt\") as f:\r\n puzzle_input = [line.strip() for line in f.readlines()]\r\n\r\n\r\ndef bags_in(desired_bag: str) -> Union[Dict[str, int], dict]:\r\n \"\"\"\r\n Finds all the bags in a desired bag.\r\n Returns a dictionary of {bag_name : number_of_bags_it_can_contain}.\r\n If the desired bag does not contain any bag, it returns an empty dictionary.\r\n :param desired_bag: str - The bags inside this bag will be found.\r\n :return: Union[Dict[str, int], dict] - All bags inside the desired bag.\r\n \"\"\"\r\n bags = {}\r\n for line in puzzle_input:\r\n if line.startswith(desired_bag) and (\"no other\" in line):\r\n return {}\r\n if line.startswith(desired_bag):\r\n contain_end_index = line.index(\" contain \") + len(\" contain \")\r\n content = line[contain_end_index:-1] # -1 to remove trailing .\r\n content = content.split(\", \")\r\n for bag in content:\r\n can_contain = int(bag[0])\r\n bag = bag[2: bag.index(\" bag\")]\r\n bags[bag] = can_contain\r\n return bags\r\n \r\n \r\ndef count_bags_inside(bag: str) -> int:\r\n \"\"\"\r\n Recursively counts the number of bags inside a bag.\r\n\r\n :param bag: str - The bag to be whose contents should be counted.\r\n :return: int - The total number of bags inside the bag.\r\n \"\"\"\r\n\r\n bags = bags_in(bag)\r\n count = 0\r\n if bags == {}:\r\n return 0\r\n for bag in bags:\r\n count += bags[bag]\r\n count += bags[bag] * count_bags_inside(bag)\r\n return count\r\n\r\n\r\ndef main():\r\n my_bag = \"shiny gold\"\r\n print(count_bags_inside(my_bag)) # Answer = 176035\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.6423887610435486,
"alphanum_fraction": 0.678922712802887,
"avg_line_length": 42.0206184387207,
"blob_id": "e22c607f3ff1ed452448c2cdae292e9f1293c520",
"content_id": "842eab85373371fe4ce2de2e2d767f5b8778be5f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4270,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 97,
"path": "/Day 15/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Day 15: Rambunctious Recitation ---\r\n\r\nYou catch the airport shuttle and try to book a new flight to your vacation island. Due to the storm,\r\nall direct flights have been cancelled, but a route is available to get around the storm. You take it.\r\n\r\nWhile you wait for your flight, you decide to check in with the Elves back at the North Pole.\r\nThey're playing a memory game and are ever so excited to explain the rules!\r\n\r\nIn this game, the players take turns saying numbers. They begin by taking turns reading from a list of starting numbers\r\n(your puzzle input). Then, each turn consists of considering the most recently spoken number:\r\n\r\n If that was the first time the number has been spoken, the current player says 0.\r\n Otherwise, the number had been spoken before; the current player announces how many turns apart the number is from\r\n when it was previously spoken.\r\n\r\nSo, after the starting numbers, each turn results in that player speaking aloud either 0 (if the last number is new) or\r\nan age (if the last number is a repeat).\r\n\r\nFor example, suppose the starting numbers are 0,3,6:\r\n\r\n Turn 1: The 1st number spoken is a starting number, 0.\r\n Turn 2: The 2nd number spoken is a starting number, 3.\r\n Turn 3: The 3rd number spoken is a starting number, 6.\r\n Turn 4: Now, consider the last number spoken, 6. Since that was the first time the number had been spoken,\r\n the 4th number spoken is 0.\r\n Turn 5: Next, again consider the last number spoken, 0. Since it had been spoken before, the next number to speak\r\n is the difference between the turn number when it was last spoken (the previous turn, 4) and the turn number of the\r\n time it was most recently spoken before then (turn 1). Thus, the 5th number spoken is 4 - 1, 3.\r\n Turn 6: The last number spoken, 3 had also been spoken before, most recently on turns 5 and 2. So, the 6th number\r\n spoken is 5 - 2, 3.\r\n Turn 7: Since 3 was just spoken twice in a row, and the last two turns are 1 turn apart, the 7th number spoken is 1.\r\n Turn 8: Since 1 is new, the 8th number spoken is 0.\r\n Turn 9: 0 was last spoken on turns 8 and 4, so the 9th number spoken is the difference between them, 4.\r\n Turn 10: 4 is new, so the 10th number spoken is 0.\r\n\r\n(The game ends when the Elves get sick of playing or dinner is ready, whichever comes first.)\r\n\r\nTheir question for you is: what will be the 2020th number spoken?\r\nIn the example above, the 2020th number spoken will be 436.\r\n\r\nHere are a few more examples:\r\n\r\n Given the starting numbers 1,3,2, the 2020th number spoken is 1.\r\n Given the starting numbers 2,1,3, the 2020th number spoken is 10.\r\n Given the starting numbers 1,2,3, the 2020th number spoken is 27.\r\n Given the starting numbers 2,3,1, the 2020th number spoken is 78.\r\n Given the starting numbers 3,2,1, the 2020th number spoken is 438.\r\n Given the starting numbers 3,1,2, the 2020th number spoken is 1836.\r\n\r\nGiven your starting numbers, what will be the 2020th number spoken?\r\n\r\nYour puzzle input is 15,5,1,4,7,0.\r\n\"\"\"\r\n\r\nfrom typing import Union, Sequence\r\n\r\n\r\ndef play_numbers_game(starting_numbers: Sequence[int], end_at_turn: int) -> int:\r\n turns_played = 0\r\n last_number: Union[int, None] = None\r\n # Maps each number to the last two turns when it was spoken. Each turn number is 0-based.\r\n spoken_at_turns = {}\r\n\r\n def say(_number: int) -> None:\r\n nonlocal last_number\r\n last_number = _number\r\n\r\n _, _last_turn = spoken_at_turns.get(_number, (None, None))\r\n spoken_at_turns[_number] = (_last_turn, turns_played)\r\n\r\n for number in starting_numbers:\r\n say(number)\r\n turns_played += 1\r\n\r\n while turns_played < end_at_turn:\r\n second_to_last_turn, last_turn = spoken_at_turns[last_number]\r\n if second_to_last_turn is None:\r\n say(0)\r\n turns_played += 1\r\n continue\r\n else:\r\n age = last_turn - second_to_last_turn\r\n say(age)\r\n turns_played += 1\r\n continue\r\n\r\n return last_number\r\n\r\n\r\ndef main():\r\n starting_numbers = [15, 5, 1, 4, 7, 0]\r\n print(play_numbers_game(starting_numbers, 2020)) # Answer = 1259\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"
},
{
"alpha_fraction": 0.6923540234565735,
"alphanum_fraction": 0.699578583240509,
"avg_line_length": 38.5121955871582,
"blob_id": "f2cb1ae28b4923c8c820f3fe3191f4d2357497b7",
"content_id": "736e2f3c31124261743714b8efbabd9eca5a17cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3322,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 82,
"path": "/Day 2/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Day 2: Password Philosophy ---\r\n\r\nYour flight departs in a few days from the coastal airport; the easiest way down to the coast from here is via toboggan.\r\n\r\nThe shopkeeper at the North Pole Toboggan Rental Shop is having a bad day. \"Something's wrong with our computers;\r\nwe can't log in!\" You ask if you can take a look.\r\n\r\nTheir password database seems to be a little corrupted: some of the passwords wouldn't have been allowed by the\r\nOfficial Toboggan Corporate Policy that was in effect when they were chosen.\r\n\r\nTo try to debug the problem, they have created a list (your puzzle input) of passwords\r\n(according to the corrupted database) and the corporate policy when that password was set.\r\n\r\nFor example, suppose you have the following list:\r\n\r\n1-3 a: abcde\r\n1-3 b: cdefg\r\n2-9 c: ccccccccc\r\n\r\nEach line gives the password policy and then the password. The password policy indicates the lowest and\r\nhighest number of times a given letter must appear for the password to be valid.\r\nFor example, 1-3 a means that the password must contain a at least 1 time and at most 3 times.\r\n\r\nIn the above example, 2 passwords are valid. The middle password, cdefg, is not; it contains no instances of b,\r\nbut needs at least 1. The first and third passwords are valid:\r\nthey contain one a or nine c, both within the limits of their respective policies.\r\n\r\nHow many passwords are valid according to their policies?\r\n\r\n\"\"\"\r\nfrom typing import Union, Sequence\r\n\r\n\r\ndef is_valid(pwd: str, char: Union[str, int], minimum: int, maximum: int) -> bool:\r\n \"\"\"\r\n The function checks whether a password is valid or not.\r\n The validation is such that, the password must contain the character to be validated at least a certain number\r\n of times but not more than a certain number of times.\r\n Hence, the character occurrence is such that: minimum <= character occurrence <= maximum\r\n If the above holds true, the function returns True. Else, it returns False.\r\n :param pwd: str - The password to be validated.\r\n :param char: Union[int, str] - The character to be used for validation.\r\n :param minimum: int - Minimum occurrence of the character to be validated.\r\n :param maximum: int - Maximum occurrence of the character to be validated.\r\n :return: bool\r\n \"\"\"\r\n\r\n if char not in pwd:\r\n return False\r\n character_occurrence = pwd.count(char)\r\n if character_occurrence < minimum or character_occurrence > maximum:\r\n return False\r\n return True\r\n\r\n\r\ndef find_number_of_valid_passwords(data: Sequence[str]) -> int:\r\n number_of_valid_passwords = 0\r\n\r\n for line in data:\r\n line_content = line.split()\r\n min_and_max_occurrence = line_content[0].split(\"-\")\r\n minimum_occurrence = int(min_and_max_occurrence[0])\r\n maximum_occurrence = int(min_and_max_occurrence[1])\r\n character_to_validate = line_content[1][:-1]\r\n password = line_content[2]\r\n\r\n if is_valid(password, character_to_validate, minimum_occurrence, maximum_occurrence):\r\n number_of_valid_passwords += 1\r\n\r\n return number_of_valid_passwords\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = f.readlines()\r\n\r\n print(find_number_of_valid_passwords(puzzle_input)) # Answer: 519\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.48934218287467957,
"alphanum_fraction": 0.5649782419204712,
"avg_line_length": 40.359222412109375,
"blob_id": "fbd2bd5a9153a13f5e014492e6f4d6044baa93f1",
"content_id": "ed69fa8e41034c9b512bc32fffa5f6eaa419bcfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4363,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 103,
"path": "/Day 13/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Part Two ---\r\n\r\nThe shuttle company is running a contest: one gold coin for anyone that can find the earliest timestamp such that the\r\nfirst bus ID departs at that time and each subsequent listed bus ID departs at that subsequent minute.\r\n(The first line in your input is no longer relevant.)\r\n\r\nFor example, suppose you have the same list of bus IDs as above:\r\n\r\n7,13,x,x,59,x,31,19\r\n\r\nAn x in the schedule means there are no constraints on what bus IDs must depart at that time.\r\n\r\nThis means you are looking for the earliest timestamp (called t) such that:\r\n\r\n Bus ID 7 departs at timestamp t.\r\n Bus ID 13 departs one minute after timestamp t.\r\n There are no requirements or restrictions on departures at two or three minutes after timestamp t.\r\n Bus ID 59 departs four minutes after timestamp t.\r\n There are no requirements or restrictions on departures at five minutes after timestamp t.\r\n Bus ID 31 departs six minutes after timestamp t.\r\n Bus ID 19 departs seven minutes after timestamp t.\r\n\r\nThe only bus departures that matter are the listed bus IDs at their specific offsets from t.\r\nThose bus IDs can depart at other times, and other bus IDs can depart at those times.\r\nFor example, in the list above, because bus ID 19 must depart seven minutes after the timestamp at which bus ID\r\n7 departs, bus ID 7 will always also be departing with bus ID 19 at seven minutes after timestamp t.\r\n\r\nIn this example, the earliest timestamp at which this occurs is 1068781:\r\n\r\ntime bus 7 bus 13 bus 59 bus 31 bus 19\r\n1068773 . . . . .\r\n1068774 D . . . .\r\n1068775 . . . . .\r\n1068776 . . . . .\r\n1068777 . . . . .\r\n1068778 . . . . .\r\n1068779 . . . . .\r\n1068780 . . . . .\r\n1068781 D . . . .\r\n1068782 . D . . .\r\n1068783 . . . . .\r\n1068784 . . . . .\r\n1068785 . . D . .\r\n1068786 . . . . .\r\n1068787 . . . D .\r\n1068788 D . . . D\r\n1068789 . . . . .\r\n1068790 . . . . .\r\n1068791 . . . . .\r\n1068792 . . . . .\r\n1068793 . . . . .\r\n1068794 . . . . .\r\n1068795 D D . . .\r\n1068796 . . . . .\r\n1068797 . . . . .\r\n\r\nIn the above example, bus ID 7 departs at timestamp 1068788 (seven minutes after t). This is fine; the only requirement\r\non that minute is that bus ID 19 departs then, and it does.\r\n\r\nHere are some other examples:\r\n\r\n The earliest timestamp that matches the list 17,x,13,19 is 3417.\r\n 67,7,59,61 first occurs at timestamp 754018.\r\n 67,x,7,59,61 first occurs at timestamp 779210.\r\n 67,7,x,59,61 first occurs at timestamp 1261476.\r\n 1789,37,47,1889 first occurs at timestamp 1202161486.\r\n\r\nHowever, with so many bus IDs in your list, surely the actual earliest timestamp will be larger than 100000000000000!\r\n\r\nWhat is the earliest timestamp such that all of the listed bus IDs depart at offsets matching their positions\r\nin the list?\r\n\r\n\"\"\"\r\nfrom typing import Sequence\r\n\r\n\r\ndef find_subsequent_departures(data: Sequence[str]) -> int:\r\n \"\"\"\r\n This solution jumps the increment up by the bus number after finding a timestamp\r\n that satisfies the (timestamp + offset) % bus requirement.\r\n By the time the last bus is calculated, the timestamp will satisfy all the requirements.\r\n \"\"\"\r\n array = data[1].strip().split(\",\")\r\n bus_schedule = [(int(x), array.index(x)) for x in array if x.isdecimal()]\r\n timestamp = 0\r\n increment = 1\r\n for (bus, offset) in bus_schedule:\r\n while (timestamp + offset) % bus:\r\n timestamp += increment\r\n increment *= bus\r\n return timestamp\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = f.readlines()\r\n\r\n print(find_subsequent_departures(puzzle_input)) # Answer = 672_754_131_923_874\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.4280954897403717,
"alphanum_fraction": 0.4314269721508026,
"avg_line_length": 19.23595428466797,
"blob_id": "81dbede7f3fc8a5a31ad3444d851fe347b91c707",
"content_id": "ef41df830fd421bf6d31ff14d42e49876e4b7443",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1801,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 89,
"path": "/cppheaders/functions.cpp",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "#include \"./functions.hpp\"\n#include <iostream>\n\nint functions::count(std::string str, const std::string value)\n{\n int number = 0;\n\n do\n {\n if ((str.find(value)) == str.npos)\n {\n return number;\n }\n\n int pos = str.find(value);\n ++number;\n str = str.substr(pos + 1);\n } while (true);\n return number;\n}\n\nint functions::count(std::string str, const char value)\n{\n int number = 0;\n\n do\n {\n if ((str.find(value)) == str.npos)\n {\n return number;\n }\n\n int pos = str.find(value);\n ++number;\n str = str.substr(pos + 1);\n } while (true);\n return number;\n}\n\nstd::vector<std::string> functions::split(std::string str, const std::string delimeter = \" \")\n{\n std::vector<std::string> results;\n\n while (true)\n {\n\n if (str.find(delimeter) == str.npos)\n {\n results.push_back(str);\n return results;\n }\n else\n {\n std::string buffer = \"\";\n for (int i = 0; i < str.find(delimeter); i++)\n {\n buffer += str[i];\n }\n if (!buffer.empty())\n {\n results.push_back(buffer);\n std::string new_string = \"\";\n\n for (int j = str.find(delimeter) + 1; j < (str.length()) ; j++)\n {\n new_string += str[j];\n }\n str = new_string;\n }\n }\n }\n}\n\n\ntemplate <typename T>\nvoid functions::print(const std::vector<T> list)\n{\n std::cout << \"[\";\n for (auto item: list)\n {\n std::cout << \"\\\"\" << item << \"\\\"\";\n\n if (!(item == list.back()))\n {\n std::cout << \", \";\n }\n }\n std::cout << \"]\\n\";\n}\n"
},
{
"alpha_fraction": 0.5982710123062134,
"alphanum_fraction": 0.6095624566078186,
"avg_line_length": 22.134693145751953,
"blob_id": "2d0d286a5b4c65e9b3d102123eb28055e6a1155e",
"content_id": "35b33381d3e8f49a4a75e66a332a696c3051e577",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5668,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 245,
"path": "/Day 17/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Day 17: Conway Cubes ---\n\nAs your flight slowly drifts through the sky, the Elves at the Mythical Information Bureau at the North Pole contact you\nThey'd like some help debugging a malfunctioning experimental energy source aboard one of their super-secret imaging\nsatellites.\n\nThe experimental energy source is based on cutting-edge technology: a set of Conway Cubes contained in a\npocket dimension! When you hear it's having problems, you can't help but agree to take a look.\n\nThe pocket dimension contains an infinite 3-dimensional grid. At every integer 3-dimensional coordinate (x,y,z),\nthere exists a single cube which is either active or inactive.\n\nIn the initial state of the pocket dimension, almost all cubes start inactive.\nThe only exception to this is a small flat region of cubes (your puzzle input); the cubes in this region start in the\nspecified active (#) or inactive (.) state.\n\nThe energy source then proceeds to boot up by executing six cycles.\n\nEach cube only ever considers its neighbors:\nany of the 26 other cubes where any of their coordinates differ by at most 1.\nFor example, given the cube at x=1,y=2,z=3, its neighbors include the cube at x=2,y=2,z=2, the cube at x=0,y=2,z=3,\nand so on.\n\nDuring a cycle, all cubes simultaneously change their state according to the following rules:\n\n If a cube is active and exactly 2 or 3 of its neighbors are also active, the cube remains active.\n Otherwise, the cube becomes inactive.\n If a cube is inactive but exactly 3 of its neighbors are active, the cube becomes active.\n Otherwise, the cube remains inactive.\n\nThe engineers responsible for this experimental energy source would like you to simulate the pocket dimension and\ndetermine what the configuration of cubes should be at the end of the six-cycle boot process.\n\nFor example, consider the following initial state:\n\n.#.\n..#\n###\n\nEven though the pocket dimension is 3-dimensional, this initial state represents a small 2-dimensional slice of it.\n(In particular, this initial state defines a 3x3x1 region of the 3-dimensional space.)\n\nSimulating a few cycles from this initial state produces the following configurations, where the result of each cycle\nis shown layer-by-layer at each given z coordinate (and the frame of view follows the active cells in each cycle):\n\nBefore any cycles:\n\nz=0\n.#.\n..#\n###\n\n\nAfter 1 cycle:\n\nz=-1\n#..\n..#\n.#.\n\nz=0\n#.#\n.##\n.#.\n\nz=1\n#..\n..#\n.#.\n\n\nAfter 2 cycles:\n\nz=-2\n.....\n.....\n..#..\n.....\n.....\n\nz=-1\n..#..\n.#..#\n....#\n.#...\n.....\n\nz=0\n##...\n##...\n#....\n....#\n.###.\n\nz=1\n..#..\n.#..#\n....#\n.#...\n.....\n\nz=2\n.....\n.....\n..#..\n.....\n.....\n\n\nAfter 3 cycles:\n\nz=-2\n.......\n.......\n..##...\n..###..\n.......\n.......\n.......\n\nz=-1\n..#....\n...#...\n#......\n.....##\n.#...#.\n..#.#..\n...#...\n\nz=0\n...#...\n.......\n#......\n.......\n.....##\n.##.#..\n...#...\n\nz=1\n..#....\n...#...\n#......\n.....##\n.#...#.\n..#.#..\n...#...\n\nz=2\n.......\n.......\n..##...\n..###..\n.......\n.......\n.......\n\nAfter the full six-cycle boot process completes, 112 cubes are left in the active state.\n\nStarting with your given initial configuration, simulate six cycles.\nHow many cubes are left in the active state after the sixth cycle?\n\n\"\"\"\nfrom dataclasses import dataclass\nfrom typing import Dict, Iterator, Set, Tuple\n\nCoordinates = Tuple[int, int, int]\n\n\ndef iter_neighbors(x: int, y: int, z: int) -> Iterator[Coordinates]:\n for nx in [x - 1, x, x + 1]:\n for ny in [y - 1, y, y + 1]:\n for nz in [z - 1, z, z + 1]:\n if (nx, ny, nz) == (x, y, z):\n continue\n\n yield nx, ny, nz\n\n\n@dataclass\nclass PocketDimension:\n active_cubes: Set[Coordinates]\n\n def is_active(self, x: int, y: int, z: int) -> bool:\n return (x, y, z) in self.active_cubes\n\n def copy(self):\n return PocketDimension(self.active_cubes.copy())\n\n def step(self) -> 'PocketDimension':\n \"\"\"\n Returns a copy of this pocket dimension evolved by 1 step.\n \"\"\"\n new_active_cubes = set()\n # Maps each inactive which can be potentially activated to the number of active neighbors it has\n activation_candidates: Dict[Coordinates, int] = {}\n for x, y, z in self.active_cubes:\n neighbors_count = 0\n for nx, ny, nz in iter_neighbors(x, y, z):\n if self.is_active(nx, ny, nz):\n neighbors_count += 1\n else:\n activation_candidates.setdefault((nx, ny, nz), 0)\n activation_candidates[(nx, ny, nz)] += 1\n\n if neighbors_count == 2 or neighbors_count == 3:\n new_active_cubes.add((x, y, z))\n\n for (x, y, z), neighbors_count in activation_candidates.items():\n if neighbors_count == 3:\n new_active_cubes.add((x, y, z))\n\n return PocketDimension(new_active_cubes)\n\n\ndef parse_pocket_dimensions(content: str) -> PocketDimension:\n content = content.strip()\n\n active_cubes = set()\n for line_ix, line in enumerate(content.split('\\n')):\n line = line.strip()\n for char_ix, char in enumerate(line):\n if char == '.':\n continue\n elif char == '#':\n active_cubes.add((char_ix, line_ix, 0))\n\n return PocketDimension(active_cubes)\n\n\ndef main():\n with open('./input.txt') as f:\n puzzle_input = f.read()\n original_dimension = parse_pocket_dimensions(puzzle_input)\n\n cycles = 6\n\n dimension = original_dimension\n for _ in range(cycles):\n dimension = dimension.step()\n print(len(dimension.active_cubes)) # Answer = 284\n\n\nif __name__ == \"__main__\":\n main() # Original Solution by Oleg Yam\n"
},
{
"alpha_fraction": 0.57503342628479,
"alphanum_fraction": 0.5830625295639038,
"avg_line_length": 28.59064292907715,
"blob_id": "bb7e07af644fe271f68b14a9bc8ab1c5897cf5c0",
"content_id": "94e9a27833ee6535f135348b947a89a5dbeacdde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5231,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 171,
"path": "/Day 11/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Day 11: Seating System ---\r\n\r\nYour plane lands with plenty of time to spare. The final leg of your journey is a ferry that goes directly to the\r\ntropical island where you can finally start your vacation. As you reach the waiting area to board the ferry, you\r\nrealize you're so early, nobody else has even arrived yet!\r\n\r\nBy modeling the process people use to choose (or abandon) their seat in the waiting area, you're pretty sure you can\r\npredict the best place to sit. You make a quick map of the seat layout (your puzzle input).\r\n\r\nThe seat layout fits neatly on a grid. Each position is either floor (.), an empty seat (L), or an occupied seat (#).\r\nFor example, the initial seat layout might look like this:\r\n\r\nL.LL.LL.LL\r\nLLLLLLL.LL\r\nL.L.L..L..\r\nLLLL.LL.LL\r\nL.LL.LL.LL\r\nL.LLLLL.LL\r\n..L.L.....\r\nLLLLLLLLLL\r\nL.LLLLLL.L\r\nL.LLLLL.LL\r\n\r\nNow, you just need to model the people who will be arriving shortly. Fortunately, people are entirely predictable and\r\nalways follow a simple set of rules. All decisions are based on the number of occupied seats adjacent to a given seat\r\n(one of the eight positions immediately up, down, left, right, or diagonal from the seat).\r\nThe following rules are applied to every seat simultaneously:\r\n\r\n If a seat is empty (L) and there are no occupied seats adjacent to it, the seat becomes occupied.\r\n If a seat is occupied (#) and four or more seats adjacent to it are also occupied, the seat becomes empty.\r\n Otherwise, the seat's state does not change.\r\n\r\nFloor (.) never changes; seats don't move, and nobody sits on the floor.\r\n\r\nAfter one round of these rules, every seat in the example layout becomes occupied:\r\n\r\n#.##.##.##\r\n#######.##\r\n#.#.#..#..\r\n####.##.##\r\n#.##.##.##\r\n#.#####.##\r\n..#.#.....\r\n##########\r\n#.######.#\r\n#.#####.##\r\n\r\nAfter a second round, the seats with four or more occupied adjacent seats become empty again:\r\n\r\n#.LL.L#.##\r\n#LLLLLL.L#\r\nL.L.L..L..\r\n#LLL.LL.L#\r\n#.LL.LL.LL\r\n#.LLLL#.##\r\n..L.L.....\r\n#LLLLLLLL#\r\n#.LLLLLL.L\r\n#.#LLLL.##\r\n\r\nThis process continues for three more rounds:\r\n\r\n#.##.L#.##\r\n#L###LL.L#\r\nL.#.#..#..\r\n#L##.##.L#\r\n#.##.LL.LL\r\n#.###L#.##\r\n..#.#.....\r\n#L######L#\r\n#.LL###L.L\r\n#.#L###.##\r\n\r\n#.#L.L#.##\r\n#LLL#LL.L#\r\nL.L.L..#..\r\n#LLL.##.L#\r\n#.LL.LL.LL\r\n#.LL#L#.##\r\n..L.L.....\r\n#L#LLLL#L#\r\n#.LLLLLL.L\r\n#.#L#L#.##\r\n\r\n#.#L.L#.##\r\n#LLL#LL.L#\r\nL.#.L..#..\r\n#L##.##.L#\r\n#.#L.LL.LL\r\n#.#L#L#.##\r\n..L.L.....\r\n#L#L##L#L#\r\n#.LLLLLL.L\r\n#.#L#L#.##\r\n\r\nAt this point, something interesting happens: the chaos stabilizes and further applications of these rules cause no\r\nseats to change state! Once people stop moving around, you count 37 occupied seats.\r\n\r\nSimulate your seating area by applying the seating rules repeatedly until no seats change state.\r\nHow many seats end up occupied?\r\n\r\n\"\"\"\r\nfrom typing import List, Sequence\r\n\r\n\r\ndef switches(matrix: Sequence[Sequence[str]]) -> List[List[str]]:\r\n \"\"\"\r\n Makes an array out of each seat surrounding the current seat and checks to see if there is >= 4 or 0\r\n occupied seats and updates the seats as needed.\r\n\r\n :param matrix:\r\n :return: List[List[str]] - New Matrix\r\n \"\"\"\r\n new_matrix = []\r\n for (i, x) in enumerate(matrix):\r\n new_row = []\r\n for (j, y) in enumerate(x):\r\n adjacent_seats = [\r\n matrix[i - 1][j - 1] if ((i - 1 >= 0) and (j - 1 >= 0)) else None,\r\n matrix[i - 1][j] if (i - 1 >= 0) else None,\r\n matrix[i - 1][j + 1] if ((i - 1 >= 0) and (j + 1 < len(x))) else None,\r\n matrix[i][j - 1] if (j - 1 >= 0) else None,\r\n matrix[i][j + 1] if (j + 1 < len(x)) else None,\r\n matrix[i + 1][j - 1] if (i + 1 < len(matrix) and (j - 1 >= 0)) else None,\r\n matrix[i + 1][j] if (i + 1 < len(matrix)) else None,\r\n matrix[i + 1][j + 1] if (i + 1 < len(matrix) and (j + 1 < len(x))) else None\r\n ]\r\n if (adjacent_seats.count(\"#\") >= 4) and (y == \"#\"):\r\n new_row.append(\"L\")\r\n elif (adjacent_seats.count(\"#\") == 0) and (y == \"L\"):\r\n new_row.append(\"#\")\r\n else:\r\n new_row.append(y)\r\n new_matrix.append(new_row)\r\n return new_matrix\r\n\r\n\r\ndef switch_loop(matrix: Sequence[Sequence[str]], old_matrix: Sequence[Sequence[str]]) -> List[List[str]]:\r\n \"\"\"\r\n Loops through permutations until we get the same seating arrangement twice.\r\n Returns the matrix if it is equal to the old matrix.\r\n\r\n :param matrix:\r\n :param old_matrix:\r\n :return: List[List[str]] - Matrix\r\n \"\"\"\r\n return matrix if (matrix == old_matrix) else switch_loop(switches(matrix), matrix)\r\n\r\n\r\ndef occupied_seats(data: Sequence[str]) -> int:\r\n \"\"\"\r\n Returns the number of # in the final matrix\r\n :param data: Puzzle input - Sequence[str]\r\n :return: Number of # found - int\r\n \"\"\"\r\n matrix = [list(x) for x in data]\r\n final_matrix = switch_loop(switches(matrix), matrix)\r\n\r\n return sum(x.count(\"#\") for x in final_matrix)\r\n\r\n\r\ndef main():\r\n with open(\"input.txt\") as f:\r\n puzzle_input = [line.strip() for line in f.readlines()]\r\n\r\n print(occupied_seats(puzzle_input)) # Answer = 2418\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.7231025695800781,
"alphanum_fraction": 0.7256046533584595,
"avg_line_length": 29.743589401245117,
"blob_id": "bcfc060a9af21ba834641534fa394f1e427fdc54",
"content_id": "be8a09f7bc5707a8cf50abddc1374308728efe69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1199,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 39,
"path": "/Day 21/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Part Two ---\n\nNow that you've isolated the inert ingredients, you should have enough information to figure out which ingredient\ncontains which allergen.\n\nIn the above example:\n\n mxmxvkd contains dairy.\n sqjhc contains fish.\n fvjkl contains soy.\n\nArrange the ingredients alphabetically by their allergen and separate them by commas to produce your canonical\ndangerous ingredient list. (There should not be any spaces in your canonical dangerous ingredient list.)\nIn the above example, this would be mxmxvkd,sqjhc,fvjkl.\n\nTime to stock your raft with supplies. What is your canonical dangerous ingredient list?\n\n\"\"\"\nfrom typing import Dict, Set\nfrom part_1 import parse_input, find_allergens\n\n\ndef find_dangerous_ingredients(allergens: Dict[Set[str], Set[str]]) -> str:\n return ','.join(list(pair[1])[0] for pair in sorted((k, v) for k, v in allergens.items()))\n\n\ndef main():\n with open(\"./input.txt\") as f:\n puzzle_input = f.readlines()\n\n lines = parse_input(puzzle_input)\n allergens = find_allergens(lines)\n\n print(find_dangerous_ingredients(allergens)) # Answer = xncgqbcp,frkmp,qhqs,qnhjhn,dhsnxr,rzrktx,ntflq,lgnhmx\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6939560174942017,
"alphanum_fraction": 0.7002747058868408,
"avg_line_length": 35.767677307128906,
"blob_id": "cb67207ef48e6b6baf88f1886c536a13e122a5d6",
"content_id": "9ac8b2aab4307ad75ebf4c20f3acdd892a5ec41f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3640,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 99,
"path": "/Day 2/part_1.cpp",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "/**\n--- Day 2: Password Philosophy ---\n\nYour flight departs in a few days from the coastal airport; the easiest way down to the coast from here is via toboggan.\n\nThe shopkeeper at the North Pole Toboggan Rental Shop is having a bad day. \"Something's wrong with our computers;\nwe can't log in!\" You ask if you can take a look.\n\nTheir password database seems to be a little corrupted: some of the passwords wouldn't have been allowed by the\nOfficial Toboggan Corporate Policy that was in effect when they were chosen.\n\nTo try to debug the problem, they have created a list (your puzzle input) of passwords\n(according to the corrupted database) and the corporate policy when that password was set.\n\nFor example, suppose you have the following list:\n\n1-3 a: abcde\n1-3 b: cdefg\n2-9 c: ccccccccc\n\nEach line gives the password policy and then the password. The password policy indicates the lowest and\nhighest number of times a given letter must appear for the password to be valid.\nFor example, 1-3 a means that the password must contain a at least 1 time and at most 3 times.\n\nIn the above example, 2 passwords are valid. The middle password, cdefg, is not; it contains no instances of b,\nbut needs at least 1. The first and third passwords are valid:\nthey contain one a or nine c, both within the limits of their respective policies.\n\nHow many passwords are valid according to their policies?\n\n*/\n\n#include <iostream>\n#include <fstream>\n#include \"../cppheaders/functions.hpp\"\n\n\n\n/**\nThe function checks whether a password is valid or not.\nThe validation is such that, the password must contain the character to be validated at least a certain number of times but not more than a certain number of times.\nHence, the character occurrence is such that: minimum <= character occurrence <= maximum\nIf the above holds true, the function returns True. Else, it returns False.\n@param pwd: str - The password to be validated.\n@param char: Union[int, str] - The character to be used for validation.\n@param minimum: int - Minimum occurrence of the character to be validated.\n@param maximum: int - Maximum occurrence of the character to be validated.\n@return: bool\n*/\nbool is_valid(const std::string pwd, const char char_, const int minimum, const int maximum)\n{\n if (pwd.find(char_) == pwd.npos)\n {\n return false;\n }\n int character_occurrence = functions::count(pwd, char_);\n if ((character_occurrence < minimum) || (character_occurrence > maximum))\n {\n return false;\n }\n return true;\n}\n\nint find_number_of_valid_passwords(std::vector<std::string> data)\n{\n int number_of_valid_passwords = 0;\n for (auto line : data)\n {\n std::vector<std::string> line_content = functions::split(line, \" \");\n std::vector<std::string> min_and_max_occurrence = functions::split(line_content[0], \"-\");\n int minimum_occurrence = std::stoi(min_and_max_occurrence[0]);\n int maximum_occurrence = std::stoi(min_and_max_occurrence[1]);\n char character_to_validate = line_content[1][0];\n std::string password = line_content[2];\n\n if (is_valid(password, character_to_validate, minimum_occurrence, maximum_occurrence))\n {\n ++number_of_valid_passwords;\n }\n }\n return number_of_valid_passwords;\n}\n\nint main(){\n std::string line;\n std::vector<std::string> puzzle_input;\n std::ifstream f (\"./input.txt\");\n\n if (f.is_open())\n {\n while (getline(f, line))\n {\n puzzle_input.push_back(line);\n }\n f.close();\n }\n\n std::cout << find_number_of_valid_passwords(puzzle_input) << std::endl; // Answer = 519\n}\n"
},
{
"alpha_fraction": 0.6026048064231873,
"alphanum_fraction": 0.6099110841751099,
"avg_line_length": 24.016529083251953,
"blob_id": "eac3548d30feea148404ce662dc24e8e0ee98e68",
"content_id": "1a50136a999979fb93fa43d1933cbe0a15bd3c8b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3148,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 121,
"path": "/Day 6/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Part Two ---\r\n\r\nAs you finish the last group's customs declaration, you notice that you misread one word in the instructions:\r\n\r\nYou don't need to identify the questions to which anyone answered \"yes\";\r\nyou need to identify the questions to which everyone answered \"yes\"!\r\n\r\nUsing the same example as above:\r\n\r\nabc\r\n\r\na\r\nb\r\nc\r\n\r\nab\r\nac\r\n\r\na\r\na\r\na\r\na\r\n\r\nb\r\n\r\nThis list represents answers from five groups:\r\n\r\n In the first group, everyone (all 1 person) answered \"yes\" to 3 questions: a, b, and c.\r\n In the second group, there is no question to which everyone answered \"yes\".\r\n In the third group, everyone answered yes to only 1 question, a. Since some people did not answer \"yes\" to b or c,\r\n they don't count.\r\n In the fourth group, everyone answered yes to only 1 question, a.\r\n In the fifth group, everyone (all 1 person) answered \"yes\" to 1 question, b.\r\n\r\nIn this example, the sum of these counts is 3 + 0 + 1 + 1 + 1 = 6.\r\n\r\nFor each group, count the number of questions to which everyone answered \"yes\". What is the sum of those counts?\r\n\r\n\"\"\"\r\nfrom typing import List, Tuple, Sequence\r\n\r\n\r\ndef restart_search(chars: List[str], group_size: int, yes: int) -> Tuple[List[str], int]:\r\n for char in chars:\r\n if chars.count(char) == group_size:\r\n yes += 1\r\n while char in chars:\r\n chars.pop(chars.index(char))\r\n else:\r\n while char in chars:\r\n chars.pop(chars.index(char))\r\n\r\n return chars, yes\r\n\r\n\r\ndef number_of_yes_in_group(group: Sequence[str]) -> int:\r\n \"\"\"\r\n Finds and returns the number of yeses in a group.\r\n :param group:\r\n :return:\r\n \"\"\"\r\n group_size = len(group)\r\n if group_size == 1:\r\n return len(group[0])\r\n\r\n yeses = 0\r\n\r\n chars = []\r\n for string in group:\r\n for char in string:\r\n chars.append(char)\r\n\r\n while len(chars) > 0:\r\n c, y = restart_search(chars, group_size, yeses)\r\n chars = c\r\n yeses = y\r\n\r\n return yeses\r\n\r\n\r\ndef make_input_convenient(raw_input: Sequence[str]) -> List[str]:\r\n \"\"\"\r\n Makes an input convenient for further processing.\r\n This is done by flattening out the list.\r\n The function returns a list of strings convenient for processing.\r\n\r\n\r\n :param raw_input: Sequence[str] - A list of strings to be made convenient.\r\n :return: - List[str] - Convenient output\r\n \"\"\"\r\n convenient = []\r\n\r\n for lst in raw_input:\r\n convenient.append(lst)\r\n\r\n return convenient\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = [i.strip() for i in f.readlines()]\r\n groups: List[list] = []\r\n\r\n while \"\" in puzzle_input:\r\n index = puzzle_input.index(\"\")\r\n groups.append(make_input_convenient(puzzle_input[:index]))\r\n puzzle_input = puzzle_input[index + 1:]\r\n else:\r\n groups.append(make_input_convenient(puzzle_input))\r\n\r\n total_number_of_yeses = 0\r\n\r\n for group in groups:\r\n total_number_of_yeses += number_of_yes_in_group(group)\r\n\r\n print(total_number_of_yeses) # Answer = 3158\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.6301853656768799,
"alphanum_fraction": 0.6393056511878967,
"avg_line_length": 31,
"blob_id": "91ee3c76879128000ab806c78ad58f2630e6424a",
"content_id": "dd2145890a608742003e607d15e03334407dcbd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6798,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 206,
"path": "/Day 16/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Part Two ---\r\n\r\nNow that you've identified which tickets contain invalid values, discard those tickets entirely.\r\nUse the remaining valid tickets to determine which field is which.\r\n\r\nUsing the valid ranges for each field, determine what order the fields appear on the tickets.\r\nThe order is consistent between all tickets: if seat is the third field, it is the third field on every ticket,\r\nincluding your ticket.\r\n\r\nFor example, suppose you have the following notes:\r\n\r\nclass: 0-1 or 4-19\r\nrow: 0-5 or 8-19\r\nseat: 0-13 or 16-19\r\n\r\nyour ticket:\r\n11,12,13\r\n\r\nnearby tickets:\r\n3,9,18\r\n15,1,5\r\n5,14,9\r\n\r\nBased on the nearby tickets in the above example, the first position must be row, the second position must be class,\r\nand the third position must be seat; you can conclude that in your ticket, class is 12, row is 11, and seat is 13.\r\n\r\nOnce you work out which field is which, look for the six fields on your ticket that start with the word departure.\r\nWhat do you get if you multiply those six values together?\r\n\r\n\"\"\"\r\nfrom dataclasses import dataclass, field\r\nfrom typing import Dict, List, Set, Tuple\r\n\r\n\r\n@dataclass\r\nclass Field:\r\n name: str\r\n ranges: List[Tuple[int, int]] = field(default_factory=list)\r\n\r\n def is_valid(self, value: int) -> bool:\r\n for range_from, range_to in self.ranges:\r\n if range_from <= value <= range_to:\r\n return True\r\n\r\n return False\r\n\r\n\r\ndef parse_field(line: str) -> Field:\r\n line = line.strip()\r\n\r\n try:\r\n name, raw_ranges = line.split(\": \")\r\n raw_ranges = raw_ranges.split(\" or \")\r\n ranges = []\r\n for raw_range in raw_ranges:\r\n lower, upper = raw_range.split('-')\r\n ranges.append((int(lower), int(upper)))\r\n except ValueError:\r\n raise ValueError(f\"Invalid field: {line!r}\")\r\n\r\n return Field(name, ranges)\r\n\r\n\r\n@dataclass\r\nclass Ticket:\r\n # THE NUMBERS MASON!\r\n numbers: List[int]\r\n\r\n\r\ndef parse_ticket(line: str) -> Ticket:\r\n return Ticket([int(n) for n in line.strip().split(',')])\r\n\r\n\r\ndef parse_notes(notes: str) -> Tuple[List[Field], Ticket, List[Ticket]]:\r\n \"\"\"\r\n Parses the notes (the puzzle input) and returns the field, your ticket\r\n and nearby tickets in order.\r\n \"\"\"\r\n sections = notes.strip().split('\\n\\n')\r\n\r\n fields_lines = sections[0].split('\\n')\r\n fields = [parse_field(line) for line in fields_lines]\r\n\r\n your_ticket_lines = sections[1].split('\\n')\r\n your_ticket = parse_ticket(your_ticket_lines[1])\r\n\r\n nearby_tickets_lines = sections[2].split('\\n')\r\n nearby_tickets = [parse_ticket(line) for line in nearby_tickets_lines[1:]]\r\n\r\n return fields, your_ticket, nearby_tickets\r\n\r\n\r\ndef find_invalid_values(fields: List[Field], tickets: List[Ticket]) -> List[int]:\r\n \"\"\"\r\n Returns the list of values from the tickets that are invalid for all of the fields.\r\n \"\"\"\r\n invalid_values = []\r\n for ticket in tickets:\r\n for value in ticket.numbers:\r\n is_invalid = all(not _field.is_valid(value) for _field in fields)\r\n if is_invalid:\r\n invalid_values.append(value)\r\n\r\n return invalid_values\r\n\r\n\r\ndef ticket_is_valid(fields: List[Field], ticket: Ticket) -> bool:\r\n for value in ticket.numbers:\r\n is_invalid = all(not _field.is_valid(value) for _field in fields)\r\n if is_invalid:\r\n return False\r\n\r\n return True\r\n\r\n\r\ndef find_matching_fields(fields: List[Field], tickets: List[Ticket]) -> Dict[int, Set[str]]:\r\n \"\"\"\r\n For each value index in the tickets finds fields which match it. A field matches\r\n a value index if all values at said index in all tickets are valid for this field.\r\n Returns a dict with value indices as keys and sets of field names as value.\r\n \"\"\"\r\n index_fields: Dict[int, Set[str]] = {}\r\n\r\n for ticket in tickets:\r\n for value_index, value in enumerate(ticket.numbers):\r\n matching_fields = set(_field.name for _field in fields if _field.is_valid(value))\r\n if value_index in index_fields:\r\n index_fields[value_index] &= matching_fields\r\n else:\r\n index_fields[value_index] = matching_fields\r\n\r\n return index_fields\r\n\r\n\r\ndef resolve_matching_fields(field_matches: Dict[int, Set[str]]) -> Dict[int, str]:\r\n \"\"\"\r\n Given a dict that matches each index to a set of matching fields, tries to find\r\n a valid selection of one field for each index. Raises ValueError if there are\r\n multiple solutions.\r\n Returns a mapping from each index to the selected field name.\r\n \"\"\"\r\n if len(field_matches) == 0:\r\n return {}\r\n\r\n # Find an index for which there is already one matching field.\r\n solved_index = None\r\n solved_field = None\r\n for index, fields in field_matches.items():\r\n if len(fields) == 1:\r\n solved_index = index\r\n solved_field, = fields # Unpack the only value from the set\r\n break\r\n\r\n if solved_index is None:\r\n raise ValueError(f\"Multiple solutions for field matches: {field_matches}\")\r\n\r\n # Make a copy of field_matches without solved_index and solved_field\r\n reduced_field_matches = {index: fields.copy() for index, fields in field_matches.items()}\r\n del reduced_field_matches[solved_index]\r\n for index, fields in reduced_field_matches.items():\r\n if solved_field in fields:\r\n fields.remove(solved_field)\r\n\r\n resolved_matches = resolve_matching_fields(reduced_field_matches)\r\n resolved_matches[solved_index] = solved_field\r\n\r\n return resolved_matches\r\n\r\n\r\ndef determine_field_order(fields: List[Field], tickets: List[Ticket]) -> List[Field]:\r\n \"\"\"\r\n Returns the fields in the order they appear in the tickets.\r\n \"\"\"\r\n # Filter out invalid tickets\r\n tickets = [t for t in tickets if ticket_is_valid(fields, t)]\r\n\r\n # Map each index to the matching field name\r\n index_fields = resolve_matching_fields(find_matching_fields(fields, tickets))\r\n\r\n # Sort matches by index\r\n sorted_index_fields = sorted(index_fields.items(), key=lambda pair: pair[0])\r\n\r\n ordered_fields = []\r\n for index, name in sorted_index_fields:\r\n for f in fields:\r\n if f.name == name:\r\n ordered_fields.append(f)\r\n\r\n return ordered_fields\r\n\r\n\r\ndef main():\r\n with open('./input.txt') as f:\r\n fields, your_ticket, nearby_tickets = parse_notes(f.read())\r\n\r\n ordered_fields = determine_field_order(fields, nearby_tickets)\r\n answer_mult = 1\r\n for index, f in enumerate(ordered_fields):\r\n if f.name.startswith(\"departure\"):\r\n answer_mult *= your_ticket.numbers[index]\r\n print(answer_mult) # Answer = 964_373_157_673\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main() # Solution from Oleg Yam\r\n"
},
{
"alpha_fraction": 0.5737212896347046,
"alphanum_fraction": 0.6339914798736572,
"avg_line_length": 31.569766998291016,
"blob_id": "5bff1a3f8d4828ccaedbc9a54ec479f19641bdc4",
"content_id": "35e7a2e4c787b5ad934744b9d37ebe36e36414c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8661,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 258,
"path": "/Day 4/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Part Two ---\r\n\r\nThe line is moving more quickly now, but you overhear airport security talking about how passports with invalid data\r\nare getting through. Better add some data validation, quick!\r\n\r\nYou can continue to ignore the cid field, but each other field has strict rules about what values are valid for\r\nautomatic validation:\r\n\r\n byr (Birth Year) - four digits; at least 1920 and at most 2002.\r\n iyr (Issue Year) - four digits; at least 2010 and at most 2020.\r\n eyr (Expiration Year) - four digits; at least 2020 and at most 2030.\r\n hgt (Height) - a number followed by either cm or in:\r\n If cm, the number must be at least 150 and at most 193.\r\n If in, the number must be at least 59 and at most 76.\r\n hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.\r\n ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.\r\n pid (Passport ID) - a nine-digit number, including leading zeroes.\r\n cid (Country ID) - ignored, missing or not.\r\n\r\nYour job is to count the passports where all required fields are both present and valid according to the above rules.\r\nHere are some example values:\r\n\r\nbyr valid: 2002\r\nbyr invalid: 2003\r\n\r\nhgt valid: 60in\r\nhgt valid: 190cm\r\nhgt invalid: 190in\r\nhgt invalid: 190\r\n\r\nhcl valid: #123abc\r\nhcl invalid: #123abz\r\nhcl invalid: 123abc\r\n\r\necl valid: brn\r\necl invalid: wat\r\n\r\npid valid: 000000001\r\npid invalid: 0123456789\r\n\r\nHere are some invalid passports:\r\n\r\neyr:1972 cid:100\r\nhcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\r\n\r\niyr:2019\r\nhcl:#602927 eyr:1967 hgt:170cm\r\necl:grn pid:012533040 byr:1946\r\n\r\nhcl:dab227 iyr:2012\r\necl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\r\n\r\nhgt:59cm ecl:zzz\r\neyr:2038 hcl:74454a iyr:2023\r\npid:3556412378 byr:2007\r\n\r\nHere are some valid passports:\r\n\r\npid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\r\nhcl:#623a2f\r\n\r\neyr:2029 ecl:blu cid:129 byr:1989\r\niyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\r\n\r\nhcl:#888785\r\nhgt:164cm byr:2001 iyr:2015 cid:88\r\npid:545766238 ecl:hzl\r\neyr:2022\r\n\r\niyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\r\n\r\nCount the number of valid passports - those that have all required fields and valid values. Continue to treat cid as\r\noptional. In your batch file, how many passports are valid?\r\n\r\n\"\"\"\r\n\r\nfrom typing import List\r\nfrom part_1 import group_passport_data\r\n\r\n\r\ndef birth_year_is_valid(year: int) -> bool:\r\n \"\"\"\r\n Checks if a birth year is valid.\r\n A birth year is valid if it is such that: 1920 <= year <= 2002.\r\n Examples:\r\n - Valid: 1920, 2002, 1995\r\n - Invalid: 1919 (less than 1920), 2003 (greater than 2002)\r\n The function returns True if it is valid. Otherwise, it returns False.\r\n\r\n :param year: int - Year to be validated\r\n :return: bool\r\n \"\"\"\r\n return True if (1920 <= year <= 2002) else False\r\n\r\n\r\ndef expiration_year_is_valid(year: int) -> bool:\r\n \"\"\"\r\n Checks if a passport's expiration year is valid.\r\n The expiration year is valid if it is such that: 2020 <= year <= 2030.\r\n Examples:\r\n - Valid: 2020, 2030, 2026\r\n - Invalid: 2019 (less than 2020), 2031 (greater than 2030)\r\n The function returns True if it is valid. Otherwise, it returns False.\r\n\r\n :param year: int - Year to be validated\r\n :return: bool\r\n \"\"\"\r\n return True if (2020 <= year <= 2030) else False\r\n\r\n\r\ndef issue_year_is_valid(year: int) -> bool:\r\n \"\"\"\r\n Checks if a passport's year of issue is valid.\r\n The year of issue is valid if it is such that: 2010 <= year <= 2020.\r\n Examples:\r\n - Valid: 2010, 2018, 2020\r\n - Invalid: 2009 (less than 2010), 2021 (greater than 2020)\r\n The function returns True if it is valid. Otherwise, it returns False.\r\n\r\n :param year: int - Year to be validated\r\n :return: bool\r\n \"\"\"\r\n return True if (2010 <= year <= 2020) else False\r\n\r\n\r\ndef height_is_valid(height: str) -> bool:\r\n \"\"\"\r\n Checks if a person's height is valid.\r\n For a person's height to be valid, it must satisfy the following:\r\n - It must be a number followed by either cm or in:\r\n - If cm, the number must be at least 150 and at most 193.\r\n - If in, the number must be at least 59 and at most 76.\r\n Examples:\r\n - Valid: 150cm, 76in\r\n - Invalid: 150 (No unit), 77in (\"in\" must not be greater than 76)\r\n The function returns True if the height is valid and False if it is not.\r\n\r\n :param height: int - Height to be validated\r\n :return: bool\r\n \"\"\"\r\n height_unit = height[-2:]\r\n\r\n if (height_unit != \"cm\") and (height_unit != \"in\"):\r\n return False\r\n\r\n if height_unit == \"cm\":\r\n return True if (150 <= int(height[:-2]) <= 193) else False\r\n\r\n elif height_unit == \"in\":\r\n return True if (59 <= int(height[:-2]) <= 76) else False\r\n\r\n\r\ndef eye_color_is_valid(color: str) -> bool:\r\n \"\"\"\r\n Checks if a person's eye color is valid.\r\n For a person's eye color to be valid, it must have exactly one of \"amb\", \"blu\", \"brn\", \"gry\", \"grn\", \"hzl\" or \"oth\".\r\n The function returns True if the eye color is valid and False if it is not.\r\n\r\n :param color: str - Color to be validated\r\n :return: bool\r\n \"\"\"\r\n colors = (\"amb\", \"blu\", \"brn\", \"gry\", \"grn\", \"hzl\", \"oth\")\r\n return True if (color in colors) else False\r\n\r\n\r\ndef hair_color_is_valid(hexadecimal_color: str) -> bool:\r\n \"\"\"\r\n Checks if a Hexadecimal RGB is valid or not.\r\n For the color to be valid, it must begin with a # followed by exactly six characters 0-9 or a-f.\r\n Examples:\r\n - Valid: #1ef014, #000000, #aaaaaa\r\n - Invalid: 123410 (No #), #31 (less than 6 chars after #), #12r32c (r is not within a-f)\r\n The function returns True if the color is valid and False if it is not.\r\n\r\n :param hexadecimal_color: str - Hexadecimal color\r\n :return: bool\r\n \"\"\"\r\n valid_characters: List[str] = [str(i) for i in range(10)]\r\n valid_characters.extend((\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"))\r\n\r\n if not hexadecimal_color.startswith(\"#\") and (len(hexadecimal_color) <= 7):\r\n return False\r\n\r\n for character in hexadecimal_color[1:]:\r\n if character not in valid_characters:\r\n return False\r\n return True\r\n\r\n\r\ndef passport_id_is_valid(passport_id: str) -> bool:\r\n \"\"\"\r\n Checks if a passport ID is valid.\r\n The ID is valid if it's length is 9\r\n The function return True if the ID is valid else it returns False.\r\n\r\n :param passport_id: str - The Passport ID to be validated.\r\n :return: bool\r\n \"\"\"\r\n return True if (len(passport_id) == 9) else False\r\n\r\n\r\ndef passport_is_valid(passport: dict) -> bool:\r\n \"\"\"\r\n Checks whether a passport is valid or not.\r\n A passport is considered valid if all the functions called by this function returns True.\r\n If a required field is not found in the passport and it raises a KeyError, the passport is considered invalid.\r\n If a passport is valid, the function returns True. Otherwise, it returns False.\r\n\r\n :param passport: dict\r\n :return: bool\r\n \"\"\"\r\n try:\r\n birth_year = int(passport[\"byr\"])\r\n issue_year = int(passport[\"iyr\"])\r\n expiration_year = int(passport[\"eyr\"])\r\n height = passport[\"hgt\"]\r\n hair_color = passport[\"hcl\"]\r\n eye_color = passport[\"ecl\"]\r\n passport_id = passport[\"pid\"]\r\n except KeyError:\r\n return False\r\n\r\n if birth_year_is_valid(birth_year) \\\r\n and issue_year_is_valid(issue_year) \\\r\n and expiration_year_is_valid(expiration_year) \\\r\n and height_is_valid(height) \\\r\n and eye_color_is_valid(eye_color) \\\r\n and hair_color_is_valid(hair_color) \\\r\n and passport_id_is_valid(passport_id):\r\n return True\r\n return False\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n content: List[str] = [line.strip() for line in f.readlines()]\r\n passports: List[dict] = []\r\n number_of_valid_passports = 0\r\n\r\n # Use newlines (\"\") to find sections of info belonging to a passport\r\n while \"\" in content:\r\n index = content.index(\"\")\r\n passports.append(group_passport_data(content[:index]))\r\n # Remove the contents which has been grouped\r\n content = content[index + 1:]\r\n else: # Take care of the last section of info left which has no \"\"\r\n passports.append(group_passport_data(content))\r\n\r\n for passport_info in passports:\r\n if passport_is_valid(passport_info):\r\n number_of_valid_passports += 1\r\n\r\n print(number_of_valid_passports) # Answer = 137\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.677956223487854,
"alphanum_fraction": 0.6864233613014221,
"avg_line_length": 38.29411697387695,
"blob_id": "8b5738e9d2bfb05e5f6f19ed6fa831c82abfa104",
"content_id": "b22f344c919bfb67ae151e0015c63ac71407c514",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3425,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 85,
"path": "/Day 2/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Part Two ---\r\n\r\nWhile it appears you validated the passwords correctly, they don't seem to be what the\r\nOfficial Toboggan Corporate Authentication System is expecting.\r\n\r\nThe shopkeeper suddenly realizes that he just accidentally explained the password policy rules from his old job\r\nat the sled rental place down the street! The Official Toboggan Corporate Policy actually works a little differently.\r\n\r\nEach policy actually describes two positions in the password, where 1 means the first character,\r\n2 means the second character, and so on. (Be careful; Toboggan Corporate Policies have no concept of \"index zero\"!)\r\nExactly one of these positions must contain the given letter. Other occurrences of the letter are irrelevant\r\nfor the purposes of policy enforcement.\r\n\r\nGiven the same example list from above:\r\n\r\n 1-3 a: abcde is valid: position 1 contains a and position 3 does not.\r\n 1-3 b: cdefg is invalid: neither position 1 nor position 3 contains b.\r\n 2-9 c: ccccccccc is invalid: both position 2 and position 9 contain c.\r\n\r\nHow many passwords are valid according to the new interpretation of the policies?\r\n\r\n\"\"\"\r\nfrom typing import Union, Sequence\r\n\r\n\r\ndef is_valid(pwd: str, char: Union[str, int], first_position: int, last_position: int) -> bool:\r\n \"\"\"\r\n The function checks whether a password is valid or not.\r\n The validation is such that, the password must contain the character to be validated at least a certain number\r\n of times but not more than a certain number of times.\r\n Hence, the character occurrence is such that: minimum <= character occurrence <= maximum\r\n If the above holds true, the function returns True. Else, it returns False.\r\n\r\n :param pwd: str - The password to be validated.\r\n :param char: Union[int, str] - The character to be used for validation.\r\n :param first_position: int - Minimum occurrence of the character to be validated.\r\n :param last_position: int - Maximum occurrence of the character to be validated.\r\n :return: bool\r\n \"\"\"\r\n\r\n if char not in pwd:\r\n return False\r\n if (char == pwd[first_position - 1]) and (char != pwd[last_position - 1]):\r\n return True\r\n elif (char != pwd[first_position - 1]) and (char == pwd[last_position - 1]):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef find_number_of_valid_passwords(data: Sequence[str]) -> int:\r\n \"\"\"\r\n Finds the number of valid passwords in the data argument.\r\n It takes all elements in the data and checks their validity with the is_valid function.\r\n The function return the total number of valid passwords found.\r\n\r\n :param data:\r\n :return:\r\n \"\"\"\r\n number_of_valid_passwords = 0\r\n\r\n for line in data:\r\n line_content = line.split()\r\n min_and_max_occurrence = line_content[0].split(\"-\")\r\n minimum_occurrence = int(min_and_max_occurrence[0])\r\n maximum_occurrence = int(min_and_max_occurrence[1])\r\n character_to_validate = line_content[1][:-1]\r\n password = line_content[2]\r\n\r\n if is_valid(password, character_to_validate, minimum_occurrence, maximum_occurrence):\r\n number_of_valid_passwords += 1\r\n\r\n return number_of_valid_passwords\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = f.readlines()\r\n\r\n print(find_number_of_valid_passwords(puzzle_input)) # Answer: 708\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.5765830874443054,
"alphanum_fraction": 0.6045462489128113,
"avg_line_length": 38.8776969909668,
"blob_id": "06bc7d09ff11cf8d635bdd9da622fa9be1ac2bcd",
"content_id": "61ce85632c62f579d42d6a4ae62d30f5300ea192",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5543,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 139,
"path": "/Day 18/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Day 18: Operation Order ---\n\nAs you look out the window and notice a heavily-forested continent slowly appear over the horizon,\nyou are interrupted by the child sitting next to you. They're curious if you could help them with their math homework.\n\nUnfortunately, it seems like this \"math\" follows different rules than you remember.\n\nThe homework (your puzzle input) consists of a series of expressions that consist of\naddition (+), multiplication (*), and parentheses ((...)).\nJust like normal math, parentheses indicate that the expression inside must be evaluated before it can be used\nby the surrounding expression.\nAddition still finds the sum of the numbers on both sides of the operator, and multiplication still finds the product.\n\nHowever, the rules of operator precedence have changed. Rather than evaluating multiplication before addition,\nthe operators have the same precedence, and are evaluated left-to-right regardless of the order in which they appear.\n\nFor example, the steps to evaluate the expression 1 + 2 * 3 + 4 * 5 + 6 are as follows:\n\n1 + 2 * 3 + 4 * 5 + 6\n 3 * 3 + 4 * 5 + 6\n 9 + 4 * 5 + 6\n 13 * 5 + 6\n 65 + 6\n 71\n\nParentheses can override this order; for example, here is what happens if parentheses are added to form\n1 + (2 * 3) + (4 * (5 + 6)):\n\n1 + (2 * 3) + (4 * (5 + 6))\n1 + 6 + (4 * (5 + 6))\n 7 + (4 * (5 + 6))\n 7 + (4 * 11 )\n 7 + 44\n 51\n\nHere are a few more examples:\n\n 2 * 3 + (4 * 5) becomes 26.\n 5 + (8 * 3 + 9 + 3 * 4 * 3) becomes 437.\n 5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4)) becomes 12240.\n ((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2 becomes 13632.\n\nBefore you can help with the homework, you need to understand it yourself.\nEvaluate the expression on each line of the homework; what is the sum of the resulting values?\n\"\"\"\nfrom simpleeval import simple_eval\n\n\ndef evaluate_operation(expression: str) -> int:\n \"\"\"\n Uses a \"better\" form of (built-in) eval (, security-wise) to evaluate an expression and return an integer\n \"\"\"\n return simple_eval(expression)\n\n\ndef extract_bracket(expression: str) -> str:\n \"\"\"\n Extracts and returns the contents of the first and outermost bracket.\n Examples:\n extract_bracket(\"2 + (3 * 4)\") => 3 * 4\n extract_bracket(\"(1 + (2 + 3))\") => 1 + (2 + 3))\n \"\"\"\n level_within_bracket = 0\n expression_within_bracket = \"\"\n\n for char in expression[expression.index(\"(\"):]:\n if char == \"(\":\n level_within_bracket += 1\n if level_within_bracket:\n expression_within_bracket += char\n if char == \")\" and level_within_bracket:\n level_within_bracket -= 1\n if not level_within_bracket:\n return expression_within_bracket[1:-1] # Remove parentheses from expression with [1:-1]\n\n\ndef evaluate_expression(expression: str) -> int:\n \"\"\"\n Evaluates an expression from left to right with no regard for precedence of some operators.\n Returns the value of the expression after evaluation\n \"\"\"\n value_of_expression = \"\"\n index_of_current_char = 0\n while True:\n if expression.strip().isdigit():\n return int(expression)\n current_char = expression[index_of_current_char]\n next_operand_index = index_of_current_char + 2\n next_operand = expression[next_operand_index]\n\n if current_char in (\"*\", \"+\"):\n if next_operand.isdigit():\n value_of_expression = evaluate_operation(\" \".join(expression.split()[:3]))\n expression = str(value_of_expression) + \" \" + \" \".join(expression.split()[3:])\n index_of_current_char = 0\n elif next_operand == \"(\":\n expression_in_bracket = extract_bracket(expression)\n value_of_bracket_expression = evaluate_expression(expression_in_bracket)\n bracket_starts_from = next_operand_index\n bracket_ends_at = next_operand_index + len(expression_in_bracket) + 1\n expression = expression[:bracket_starts_from] \\\n + str(value_of_bracket_expression) \\\n + expression[bracket_ends_at + 1:]\n index_of_current_char = 0\n\n elif current_char == \"(\":\n expression_in_bracket = extract_bracket(expression)\n value_of_bracket_expression = evaluate_expression(expression_in_bracket)\n if expression.startswith(\"(\"):\n # If the expression starts with a (, the value of the expression must\n # replace the bracket accordingly (from the start)\n bracket_ends_at = len(expression_in_bracket) + 1\n expression = str(value_of_bracket_expression) + expression[bracket_ends_at + 1:]\n else:\n bracket_starts_from = next_operand_index\n bracket_ends_at = next_operand_index + len(expression_in_bracket) + 1\n expression = expression[:bracket_starts_from] \\\n + str(value_of_bracket_expression) \\\n + expression[bracket_ends_at + 1:]\n index_of_current_char = 0\n\n index_of_current_char += 1\n\n\ndef main():\n with open(\"./input.txt\") as f:\n puzzle_input = [line.strip() for line in f.readlines()]\n\n sum_of_values = 0\n\n for line in puzzle_input:\n sum_of_values += evaluate_expression(line)\n\n print(sum_of_values) # Answer = 4_696_493_914_530\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6372889876365662,
"alphanum_fraction": 0.6586395502090454,
"avg_line_length": 38.68687057495117,
"blob_id": "6b499aa0ed4ad918329d2898071705d9c793db8d",
"content_id": "2ec5e89ee94638bebf68a3f280b626ec797bd9f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4028,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 99,
"path": "/Day 12/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Day 12: Rain Risk ---\r\n\r\nYour ferry made decent progress toward the island, but the storm came in faster than anyone expected.\r\nThe ferry needs to take evasive actions!\r\n\r\nUnfortunately, the ship's navigation computer seems to be malfunctioning; rather than giving a route directly to safety,\r\nit produced extremely circuitous instructions. When the captain uses the PA system to ask if anyone can help,\r\nyou quickly volunteer.\r\n\r\nThe navigation instructions (your puzzle input) consists of a sequence of single-character actions paired with integer\r\ninput values. After staring at them for a few minutes, you work out what they probably mean:\r\n\r\n Action N means to move north by the given value.\r\n Action S means to move south by the given value.\r\n Action E means to move east by the given value.\r\n Action W means to move west by the given value.\r\n Action L means to turn left the given number of degrees.\r\n Action R means to turn right the given number of degrees.\r\n Action F means to move forward by the given value in the direction the ship is currently facing.\r\n\r\nThe ship starts by facing east. Only the L and R actions change the direction the ship is facing.\r\n(That is, if the ship is facing east and the next instruction is N10, the ship would move north 10 units, but would\r\nstill move east if the following action were F.)\r\n\r\nFor example:\r\n\r\nF10\r\nN3\r\nF7\r\nR90\r\nF11\r\n\r\nThese instructions would be handled as follows:\r\n\r\n F10 would move the ship 10 units east (because the ship starts by facing east) to east 10, north 0.\r\n N3 would move the ship 3 units north to east 10, north 3.\r\n F7 would move the ship another 7 units east (because the ship is still facing east) to east 17, north 3.\r\n R90 would cause the ship to turn right by 90 degrees and face south; it remains at east 17, north 3.\r\n F11 would move the ship 11 units south to east 17, south 8.\r\n\r\nAt the end of these instructions, the ship's Manhattan distance (sum of the absolute values of its east/west position\r\nand its north/south position) from its starting position is 17 + 8 = 25.\r\n\r\nFigure out where the navigation instructions lead. What is the Manhattan distance between that location and the ship's\r\nstarting position?\r\n\r\n\"\"\"\r\nfrom typing import Union, List, Tuple, Sequence\r\n\r\n\r\ndef move(_direction: str, amount: int, _location: Tuple[int, int]) -> Tuple[int, int]:\r\n \"\"\"\r\n Updates the _location based on +x, -y positioning.\r\n \"\"\"\r\n if _direction == \"N\":\r\n return _location[0], _location[1] + amount\r\n if _direction == \"S\":\r\n return _location[0], _location[1] - amount\r\n if _direction == \"E\":\r\n return _location[0] + amount, _location[1]\r\n if _direction == \"W\":\r\n return _location[0] - amount, _location[1]\r\n\r\n\r\ndef ship_movements(data: Sequence[str]) -> int:\r\n directions = [(d[:1], int(d[1:])) for d in data]\r\n facing = \"E\"\r\n origin = (0, 0)\r\n location = (0, 0)\r\n\r\n def turn(_direction: Tuple[str, int]) -> Union[str, List[str]]:\r\n \"\"\"\r\n Jumps to the new direction based on the current facing direction + the direction degrees divided by 90.\r\n \"\"\"\r\n options = (\"E\", \"S\", \"W\", \"N\", \"E\", \"S\", \"W\", \"N\")\r\n jumps = (_direction[1] // 90) if (_direction[0] == \"R\") else (- _direction[1] // 90)\r\n return options[options.index(facing) + jumps]\r\n\r\n for direction in directions:\r\n if (direction[0] == \"R\") or (direction[0] == \"L\"):\r\n facing = turn(direction)\r\n elif direction[0] == \"F\": # \"F\" direction moves in the \"facing\" direction.\r\n location = move(facing, direction[1], location)\r\n else:\r\n location = move(direction[0], direction[1], location)\r\n\r\n return abs(origin[0] - location[0]) + abs(origin[1] - location[1])\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = [line.strip() for line in f.readlines()]\r\n\r\n print(ship_movements(puzzle_input)) # Answer = 521\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.6875845193862915,
"alphanum_fraction": 0.6946172714233398,
"avg_line_length": 44.212501525878906,
"blob_id": "c63cfb9e671168d5d8c0a072330242f0cee751c5",
"content_id": "e6ed1ed75699593478c0cb20dc170144d40d3889",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3697,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 80,
"path": "/Day 7/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Day 7: Handy Haversacks ---\r\n\r\nYou land at the regional airport in time for your next flight. In fact, it looks like you'll even have time to grab some\r\n food: all flights are currently delayed due to issues in luggage processing.\r\n\r\nDue to recent aviation regulations, many rules (your puzzle input) are being enforced about bags and their contents;\r\nbags must be color-coded and must contain specific quantities of other color-coded bags.\r\nApparently, nobody responsible for these regulations considered how long they would take to enforce!\r\n\r\nFor example, consider the following rules:\r\n\r\nlight red bags contain 1 bright white bag, 2 muted yellow bags.\r\ndark orange bags contain 3 bright white bags, 4 muted yellow bags.\r\nbright white bags contain 1 shiny gold bag.\r\nmuted yellow bags contain 2 shiny gold bags, 9 faded blue bags.\r\nshiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.\r\ndark olive bags contain 3 faded blue bags, 4 dotted black bags.\r\nvibrant plum bags contain 5 faded blue bags, 6 dotted black bags.\r\nfaded blue bags contain no other bags.\r\ndotted black bags contain no other bags.\r\n\r\nThese rules specify the required contents for 9 bag types. In this example, every faded blue bag is empty,\r\nevery vibrant plum bag contains 11 bags (5 faded blue and 6 dotted black), and so on.\r\n\r\nYou have a shiny gold bag. If you wanted to carry it in at least one other bag, how many different bag colors would be\r\nvalid for the outermost bag? (In other words: how many colors can, eventually, contain at least one shiny gold bag?)\r\n\r\nIn the above rules, the following options would be available to you:\r\n\r\n A bright white bag, which can hold your shiny gold bag directly.\r\n A muted yellow bag, which can hold your shiny gold bag directly, plus some other bags.\r\n A dark orange bag, which can hold bright white and muted yellow bags, either of which could then hold\r\n your shiny gold bag.\r\n A light red bag, which can hold bright white and muted yellow bags, either of which could then hold\r\n your shiny gold bag.\r\n\r\nSo, in this example, the number of bag colors that can eventually contain at least one shiny gold bag is 4.\r\n\r\nHow many bag colors can eventually contain at least one shiny gold bag? (The list of rules is quite long;\r\nmake sure you get all of it.)\r\n\r\n\"\"\"\r\n\r\n\r\ndef can_contain_a_desired_bag(line: str, desired_bag: str = \"shiny gold\") -> bool:\r\n \"\"\"\r\n Check if a bag can contain a desired bag.\r\n The function returns True if it can. Else, it returns False.\r\n\r\n :param line: str - A line in which a desired bag will be searched for.\r\n :param desired_bag: str - Desired bag to be found in the line\r\n :return: bool\r\n \"\"\"\r\n return True if (desired_bag in line) and (not line.startswith(desired_bag)) else False\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = [line.strip() for line in f.readlines()]\r\n\r\n bags_which_can_contain_shiny_gold_bags = []\r\n for line in puzzle_input:\r\n if can_contain_a_desired_bag(line, desired_bag=\"shiny gold\"):\r\n bag = line[:line.index(\"bags\") - 1]\r\n bags_which_can_contain_shiny_gold_bags.append(bag)\r\n\r\n for i in range(5):\r\n for line in puzzle_input:\r\n for bag in bags_which_can_contain_shiny_gold_bags:\r\n if can_contain_a_desired_bag(line, desired_bag=bag):\r\n bag = line[:line.index(\"bags\") - 1]\r\n if bag not in bags_which_can_contain_shiny_gold_bags:\r\n bags_which_can_contain_shiny_gold_bags.append(bag)\r\n\r\n print(len(bags_which_can_contain_shiny_gold_bags)) # Answer = 242\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.6700195670127869,
"alphanum_fraction": 0.6761665344238281,
"avg_line_length": 29.95535659790039,
"blob_id": "d3778574c14a9ba4f6c4cc6e2f4c19cf20b28f55",
"content_id": "6c4bf764a39b7b382fe66ad125851bf62fe532b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3579,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 112,
"path": "/Day 6/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Day 6: Custom Customs ---\r\n\r\nAs your flight approaches the regional airport where you'll switch to a much larger plane,\r\ncustoms declaration forms are distributed to the passengers.\r\n\r\nThe form asks a series of 26 yes-or-no questions marked a through z. All you need to do is identify the questions\r\nfor which anyone in your group answers \"yes\". Since your group is just you, this doesn't take very long.\r\n\r\nHowever, the person sitting next to you seems to be experiencing a language barrier and asks if you can help.\r\nFor each of the people in their group, you write down the questions for which they answer \"yes\", one per line.\r\n\r\nFor example:\r\n\r\nabcx\r\nabcy\r\nabcz\r\n\r\nIn this group, there are 6 questions to which anyone answered \"yes\": a, b, c, x, y, and z.\r\n(Duplicate answers to the same question don't count extra; each question counts at most once.)\r\n\r\nAnother group asks for your help, then another, and eventually you've collected answers from every group on the plane\r\n(your puzzle input). Each group's answers are separated by a blank line, and within each group, each person's answers\r\nare on a single line. For example:\r\n\r\nabc\r\n\r\na\r\nb\r\nc\r\n\r\nab\r\nac\r\n\r\na\r\na\r\na\r\na\r\n\r\nb\r\n\r\nThis list represents answers from five groups:\r\n\r\n The first group contains one person who answered \"yes\" to 3 questions: a, b, and c.\r\n The second group contains three people; combined, they answered \"yes\" to 3 questions: a, b, and c.\r\n The third group contains two people; combined, they answered \"yes\" to 3 questions: a, b, and c.\r\n The fourth group contains four people; combined, they answered \"yes\" to only 1 question, a.\r\n The last group contains one person who answered \"yes\" to only 1 question, b.\r\n\r\nIn this example, the sum of these counts is 3 + 3 + 3 + 1 + 1 = 11.\r\n\r\nFor each group, count the number of questions to which anyone answered \"yes\". What is the sum of those counts?\r\n\r\n\"\"\"\r\nfrom typing import List, Sequence\r\n\r\n\r\ndef number_of_yes_in_group(group: Sequence[str]) -> int:\r\n \"\"\"\r\n Finds the number of yeses in a group.\r\n Since duplicate yeses are not counted, they are removed with \"set()\" function\r\n The number of yeses corresponds to the size of the group without duplicates.\r\n The function returns the number of yeses as described above.\r\n\r\n :param group: Sequence[str] - Answers from a group\r\n :return: int - Number of yeses in a group\r\n \"\"\"\r\n return len(set(group))\r\n\r\n\r\ndef make_input_convenient(raw_input: Sequence[str]) -> List[str]:\r\n \"\"\"\r\n Makes an input convenient for further processing.\r\n This is done by flattening out the list.\r\n The function returns a list of strings convenient for processing.\r\n\r\n\r\n :param raw_input: List[str] - A list of strings to be made convenient.\r\n :return: - List[str] - Convenient output\r\n \"\"\"\r\n convenient = []\r\n\r\n for lst in raw_input:\r\n for char in lst:\r\n convenient.append(char)\r\n\r\n return convenient\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = [i.strip() for i in f.readlines()]\r\n\r\n groups: List[list] = []\r\n\r\n while \"\" in puzzle_input:\r\n index = puzzle_input.index(\"\")\r\n groups.append(make_input_convenient(puzzle_input[:index]))\r\n puzzle_input = puzzle_input[index + 1:]\r\n else:\r\n groups.append(make_input_convenient(puzzle_input))\r\n\r\n total_number_of_yeses = 0\r\n\r\n for group in groups:\r\n total_number_of_yeses += number_of_yes_in_group(group)\r\n\r\n print(total_number_of_yeses) # Answer = 6297\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.6224663853645325,
"alphanum_fraction": 0.6855465173721313,
"avg_line_length": 37.69173049926758,
"blob_id": "99fc7d61797c2d8c6e2baa4be1791bacc34c828a",
"content_id": "70c3f72d04fa2682b4c956e9321fd6f801301beb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5279,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 133,
"path": "/Day 14/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Day 14: Docking Data ---\r\n\r\nAs your ferry approaches the sea port, the captain asks for your help again.\r\nThe computer system that runs this port isn't compatible with the docking program on the ferry, so the docking\r\nparameters aren't being correctly initialized in the docking program's memory.\r\n\r\nAfter a brief inspection, you discover that the sea port's computer system uses a strange bitmask system in its\r\ninitialization program. Although you don't have the correct decoder chip handy, you can emulate it in software!\r\n\r\nThe initialization program (your puzzle input) can either update the bitmask or write a value to memory.\r\nValues and memory addresses are both 36-bit unsigned integers.\r\nFor example, ignoring bitmasks for a moment, a line like mem[8] = 11 would write the value 11 to memory address 8.\r\n\r\nThe bitmask is always given as a string of 36 bits, written with the most significant bit (representing 2^35) on the\r\nleft and the least significant bit (2^0, that is, the 1s bit) on the right.\r\nThe current bitmask is applied to values immediately before they are written to memory: a 0 or 1 overwrites the\r\ncorresponding bit in the value, while an X leaves the bit in the value unchanged.\r\n\r\nFor example, consider the following program:\r\n\r\nmask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\r\nmem[8] = 11\r\nmem[7] = 101\r\nmem[8] = 0\r\n\r\nThis program starts by specifying a bitmask (mask = ....).\r\nThe mask it specifies will overwrite two bits in every written value: the 2s bit is overwritten with 0, and the 64s\r\nbit is overwritten with 1.\r\n\r\nThe program then attempts to write the value 11 to memory address 8. By expanding everything out to individual bits,\r\nthe mask is applied as follows:\r\n\r\nvalue: 000000000000000000000000000000001011 (decimal 11)\r\nmask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\r\nresult: 000000000000000000000000000001001001 (decimal 73)\r\n\r\nSo, because of the mask, the value 73 is written to memory address 8 instead.\r\nThen, the program tries to write 101 to address 7:\r\n\r\nvalue: 000000000000000000000000000001100101 (decimal 101)\r\nmask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\r\nresult: 000000000000000000000000000001100101 (decimal 101)\r\n\r\nThis time, the mask has no effect, as the bits it overwrote were already the values the mask tried to set.\r\nFinally, the program tries to write 0 to address 8:\r\n\r\nvalue: 000000000000000000000000000000000000 (decimal 0)\r\nmask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\r\nresult: 000000000000000000000000000001000000 (decimal 64)\r\n\r\n64 is written to address 8 instead, overwriting the value that was there previously.\r\n\r\nTo initialize your ferry's docking program, you need the sum of all values left in memory after the initialization\r\nprogram completes. (The entire 36-bit address space begins initialized to the value 0 at every address.)\r\nIn the above example, only two values in memory are not zero - 101 (at address 7) and 64 (at address 8) -\r\nproducing a sum of 165.\r\n\r\nExecute the initialization program. What is the sum of all values left in memory after it completes?\r\n(Do not truncate the sum to 36 bits.)\r\n\r\n\"\"\"\r\nfrom collections import defaultdict\r\nfrom typing import List, Sequence\r\n\r\nmemory_block = defaultdict(int)\r\n\r\n\r\ndef binary_to_decimal(binary: str) -> int:\r\n \"\"\"Converts a binary (base 2) number to decimal (base 10) \"\"\"\r\n return int(\"0b\" + binary, base=2)\r\n\r\n\r\ndef execute_instruction(instruction_set: Sequence[str]) -> None:\r\n \"\"\"\r\n Writes values into the memory block\r\n \"\"\"\r\n mask = instruction_set[0].split(\"= \")[1]\r\n instructions = instruction_set[1:]\r\n for instruction in instructions:\r\n memory_address, value = instruction.split(\" = \")\r\n binary = bin(int(value))[2:]\r\n full_binary = (\"0\" * (len(mask) - len(binary))) + binary\r\n\r\n new_binary = \"\"\r\n for b, m in zip(full_binary, mask):\r\n if m == \"X\":\r\n new_binary += b\r\n continue\r\n elif m == b:\r\n new_binary += b\r\n continue\r\n elif (m == \"1\") and (b == \"0\"):\r\n new_binary += m\r\n elif (m == \"0\") and (b == \"1\"):\r\n new_binary += m\r\n\r\n decimal = binary_to_decimal(new_binary)\r\n memory_block[memory_address[3:]] = decimal\r\n\r\n\r\ndef parse_instruction_set(data: Sequence[str]) -> List[List[str]]:\r\n \"\"\"\r\n Groups masks and related memory instructions for convenience of further processing\r\n :param data: Puzzle input - List[str]\r\n :return: Convenient input - List[List[str]]\r\n \"\"\"\r\n convenient_input = []\r\n from_index_1 = data[1:]\r\n for line in from_index_1:\r\n if line.startswith(\"mask\"):\r\n convenient_input.append(data[:data.index(line)])\r\n data = data[data.index(line):]\r\n else: # For the test file\r\n convenient_input.append([line for line in data])\r\n\r\n return convenient_input\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = [line.strip() for line in f.readlines()]\r\n\r\n convenient_input = parse_instruction_set(puzzle_input)\r\n\r\n for group in convenient_input:\r\n execute_instruction(group)\r\n\r\n print(sum(list(memory_block.values()))) # Answer = 10_035_335_144_067\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.5307530164718628,
"alphanum_fraction": 0.5482928156852722,
"avg_line_length": 43.08247375488281,
"blob_id": "54b9abb44ac8cf05122cfb73c35da13f3cca9f26",
"content_id": "69c2ab6c902cade8da89d10fa6d93eb4b850731e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8552,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 194,
"path": "/Day 18/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Part Two ---\n\nYou manage to answer the child's questions and they finish part 1 of their homework\nbut get stuck when they reach the next section: advanced math.\n\nNow, addition and multiplication have different precedence levels, but they're not the ones you're familiar with.\nInstead, addition is evaluated before multiplication.\n\nFor example, the steps to evaluate the expression 1 + 2 * 3 + 4 * 5 + 6 are now as follows:\n\n1 + 2 * 3 + 4 * 5 + 6\n 3 * 3 + 4 * 5 + 6\n 3 * 7 * 5 + 6\n 3 * 7 * 11\n 21 * 11\n 231\n\nHere are the other examples from above:\n\n 1 + (2 * 3) + (4 * (5 + 6)) still becomes 51.\n 2 * 3 + (4 * 5) becomes 46.\n 5 + (8 * 3 + 9 + 3 * 4 * 3) becomes 1445.\n 5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4)) becomes 669060.\n ((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2 becomes 23340.\n\nWhat do you get if you add up the results of evaluating the homework problems using these new rules?\n\n\"\"\"\n# TODO : Complete the program\nfrom part_1 import evaluate_operation, extract_bracket\nimport part_1\n\nADDITION_OPERATOR = \"+\"\n\n\ndef evaluate_expression(expression: str) -> int:\n \"\"\"\n 1 + 2 * 3 + 4 * 5 + 6\n 1 + (2 * 3) + (4 * (5 + 6))\n 2 * 3 + (4 * 5)\n 5 + (8 * 3 + 9 + 3 * 4 * 3)\n 5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))\n ((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2\n Check if \"+\" is in expression\n If none is in the expression\n Evaluate the expression and return the answer\n Else\n Find the first \"+\"\n Obtain the left and right operands\n Evaluate them and replace the section of the operation with the value of the operation\n \"\"\"\n\n while ADDITION_OPERATOR in expression:\n print(f\"Before => {expression}\")\n expression = expression.split()\n\n addition_operator_index = expression.index(ADDITION_OPERATOR)\n left_operand_index = addition_operator_index - 1\n right_operand_index = addition_operator_index + 1\n left_operand = expression[left_operand_index]\n right_operand = expression[right_operand_index]\n\n if expression[0].startswith(\"(\"):\n bracket = extract_bracket(\" \".join(expression))\n value_of_expression = evaluate_expression(bracket)\n expression = str(value_of_expression) + \" \" + \" \".join(expression[len(bracket.split()):])\n print(f\"After => {expression}\")\n\n elif left_operand.isdigit() and right_operand.isdigit():\n value_of_expression = evaluate_operation(f\"{left_operand} + {right_operand}\")\n expression = \" \".join(expression[:left_operand_index]) \\\n + \" \" + str(value_of_expression) + \" \" \\\n + \" \".join(expression[right_operand_index + 1:])\n expression = expression.strip()\n print(f\"After => {expression}\")\n\n elif left_operand.isdigit() and right_operand.startswith(\"(\"):\n \"\"\"\n Extract the bracket to the right, evaluate it and return the result\n \"\"\"\n to_right = \" \".join(expression[right_operand_index:])\n bracket_to_right = extract_bracket(to_right)\n result_of_bracket = evaluate_expression(bracket_to_right)\n value_of_expression = evaluate_operation(f\"{left_operand} + {result_of_bracket}\")\n\n if to_right[len(bracket_to_right.split())].endswith(\"))\"):\n print(\"Yes\")\n expression = \" \".join(expression[:left_operand_index]) \\\n + \" \" + str(value_of_expression) + \") \" \\\n + \" \".join(expression[right_operand_index + len(bracket_to_right.split()):])\n else:\n\n print(\"No\")\n expression = \" \".join(expression[:left_operand_index]) \\\n + \" \" + str(value_of_expression) + \" \" \\\n + \" \".join(expression[right_operand_index + len(bracket_to_right.split()):])\n expression = expression.strip()\n\n print(f\"After => {expression}\")\n\n elif left_operand.endswith(\")\") and right_operand.isdigit():\n last_bracket_index: int\n for index, item in enumerate(expression):\n if item.startswith(\"(\"):\n last_bracket_index = index\n value = extract_bracket(\" \".join(expression[last_bracket_index:]))\n som = evaluate_expression(value)\n expression = \" \".join(expression[:left_operand_index - len(value.split()) + 1]) \\\n + \" \" + str(som) + \" \" \\\n + \" \".join(expression[addition_operator_index:])\n print(f\"After => {expression}\")\n\n elif left_operand.startswith(\"(\") and right_operand.startswith(\"(\"):\n bracket = extract_bracket(\" \".join(expression[left_operand_index:]))\n value_of_expression = evaluate_expression(bracket)\n expression = \" \".join(expression[:left_operand_index]) \\\n + \" \" + str(value_of_expression) + \" \" \\\n + \" \".join(expression[left_operand_index + len(bracket.split()):])\n print(f\"After => {expression}\")\n\n elif left_operand.endswith(\")\") and right_operand.startswith(\"(\"):\n last_bracket_index: int\n for index, item in enumerate(expression):\n if item.startswith(\"(\"):\n last_bracket_index = index\n\n left = extract_bracket(\" \".join(expression[last_bracket_index:]))\n right = extract_bracket(\" \".join(expression[right_operand_index:]))\n value_of_expression = evaluate_expression(f\"{left} + {right}\")\n expression = \" \".join(expression[:left_operand_index - len(left.split()) + 1]) \\\n + \" \" + str(value_of_expression) + \" \" \\\n + \" \".join(expression[right_operand_index + len(right.split()):])\n print(f\"After => {expression}\")\n\n elif left_operand.isdigit() and right_operand.endswith(\")\"):\n detailed_right_operand = list(right_operand)\n digits = \"\".join([x for x in detailed_right_operand if x.isdigit()])\n brackets = \"\".join([x for x in detailed_right_operand if not x.isdigit()])\n value_of_expression = evaluate_operation(f\"{left_operand} + {digits}\")\n expression = \" \".join(expression[:left_operand_index]) \\\n + \" \" + str(value_of_expression) + f\"{brackets} \" \\\n + \" \".join(expression[right_operand_index + 1:])\n\n elif left_operand.startswith(\"(\") and right_operand.endswith(\")\"):\n if left_operand.startswith(\"((\"):\n bracket = extract_bracket(\" \".join(expression[left_operand_index:right_operand_index + 1])[1:])\n else:\n bracket = extract_bracket(\" \".join(expression[left_operand_index:right_operand_index + 1]))\n result_of_bracket = evaluate_expression(bracket)\n expression = \" \".join(expression[:left_operand_index]) \\\n + \" \" + str(result_of_bracket) + \" \" \\\n + \" \".join(expression[right_operand_index + 1:])\n expression = expression.strip()\n\n if left_operand.startswith(\"((\"):\n expression = \"(\" + expression\n\n print(f\"After => {expression}\")\n\n elif left_operand.startswith(\"(\") and right_operand.isdigit():\n detailed_left_operand = list(left_operand)\n digits = \"\".join([x for x in detailed_left_operand if x.isdigit()])\n brackets = \"\".join([x for x in detailed_left_operand if not x.isdigit()])\n value_of_expression = evaluate_operation(f\"{digits} + {right_operand}\")\n expression = \" \".join(expression[:left_operand_index]) \\\n + f\" {brackets}\" + str(value_of_expression) + \" \" \\\n + \" \".join(expression[right_operand_index + 1:])\n expression = expression.strip()\n print(f\"After => {expression}\")\n\n return part_1.evaluate_expression(expression)\n\n\ndef main():\n with open(\"./input.txt\") as f:\n puzzle_input = f.readlines()\n\n with open(\"./test.txt\") as f:\n puzzle_input = f.readlines()\n\n for line in puzzle_input:\n print(evaluate_expression(line.strip()))\n\n # sum_of_values = 0\n\n # for line in puzzle_input:\n # sum_of_values += evaluate_expression(line.strip())\n #\n # print(sum_of_values)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5818053483963013,
"alphanum_fraction": 0.639633297920227,
"avg_line_length": 20.870967864990234,
"blob_id": "c16cb43802ec7b00db21fbd834118c4bf6572361",
"content_id": "0ae4233a1dcec4a4ec2f3625b806cbe2f71ea21f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1418,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 62,
"path": "/Day 9/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Part Two ---\r\n\r\nThe final step in breaking the XMAS encryption relies on the invalid number you just found: you must find a contiguous\r\nset of at least two numbers in your list which sum to the invalid number from step 1.\r\n\r\nAgain consider the above example:\r\n\r\n35\r\n20\r\n15\r\n25\r\n47\r\n40\r\n62\r\n55\r\n65\r\n95\r\n102\r\n117\r\n150\r\n182\r\n127\r\n219\r\n299\r\n277\r\n309\r\n576\r\n\r\nIn this list, adding up all of the numbers from 15 through 40 produces the invalid number from step 1, 127. (Of course,\r\nthe contiguous set of numbers in your actual list might be much longer.)\r\n\r\nTo find the encryption weakness, add together the smallest and largest number in this contiguous range; in this example,\r\nthese are 15 and 47, producing 62.\r\n\r\nWhat is the encryption weakness in your XMAS-encrypted list of numbers?\r\n\r\n\"\"\"\r\nimport part_1\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = [int(line.strip()) for line in f.readlines()]\r\n\r\n target = part_1.main()\r\n\r\n contiguous_set = []\r\n\r\n from_line = 0\r\n for _ in range(1000):\r\n for i in range(from_line, len(puzzle_input)):\r\n contiguous_set.append(puzzle_input[i])\r\n if sum(contiguous_set) == target:\r\n return min(contiguous_set), max(contiguous_set)\r\n from_line += 1\r\n contiguous_set = []\r\n\r\n\r\nif __name__ == '__main__':\r\n min_, max_ = main()\r\n print(min_ + max_) # Answer = 219202240\r\n"
},
{
"alpha_fraction": 0.6334888339042664,
"alphanum_fraction": 0.6511145830154419,
"avg_line_length": 35.096153259277344,
"blob_id": "64fc3b68d93ac8bd0ebccb2d22596c82c4365078",
"content_id": "a7b0d18fad7ba3d85447d7d244fcc88d4ecd79f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3858,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 104,
"path": "/Day 8/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Day 8: Handheld Halting ---\r\n\r\nYour flight to the major airline hub reaches cruising altitude without incident.\r\nWhile you consider checking the in-flight menu for one of those drinks that come with a little umbrella,\r\nyou are interrupted by the kid sitting next to you.\r\n\r\nTheir handheld game console won't turn on! They ask if you can take a look.\r\n\r\nYou narrow the problem down to a strange infinite loop in the boot code (your puzzle input) of the device.\r\nYou should be able to fix it, but first you need to be able to run the code in isolation.\r\n\r\nThe boot code is represented as a text file with one instruction per line of text.\r\nEach instruction consists of an operation (acc, jmp, or nop) and an argument (a signed number like +4 or -20).\r\n\r\n acc increases or decreases a single global value called the accumulator by the value given in the argument.\r\n For example, acc +7 would increase the accumulator by 7. The accumulator starts at 0. After an acc instruction,\r\n the instruction immediately below it is executed next.\r\n jmp jumps to a new instruction relative to itself. The next instruction to execute is found using the argument as\r\n an offset from the jmp instruction; for example, jmp +2 would skip the next instruction, jmp +1 would continue to\r\n the instruction immediately below it, and jmp -20 would cause the instruction 20 lines above to be executed next.\r\n nop stands for No OPeration - it does nothing. The instruction immediately below it is executed next.\r\n\r\nFor example, consider the following program:\r\n\r\nnop +0\r\nacc +1\r\njmp +4\r\nacc +3\r\njmp -3\r\nacc -99\r\nacc +1\r\njmp -4\r\nacc +6\r\n\r\nThese instructions are visited in this order:\r\n\r\nnop +0 | 1\r\nacc +1 | 2, 8(!)\r\njmp +4 | 3\r\nacc +3 | 6\r\njmp -3 | 7\r\nacc -99 |\r\nacc +1 | 4\r\njmp -4 | 5\r\nacc +6 |\r\n\r\nFirst, the nop +0 does nothing. Then, the accumulator is increased from 0 to 1 (acc +1) and jmp +4 sets the next\r\ninstruction to the other acc +1 near the bottom. After it increases the accumulator from 1 to 2, jmp -4 executes,\r\nsetting the next instruction to the only acc +3. It sets the accumulator to 5, and jmp -3 causes the program to\r\ncontinue back at the first acc +1.\r\n\r\nThis is an infinite loop: with this sequence of jumps, the program will run forever. The moment the program tries to\r\nrun any instruction a second time, you know it will never terminate.\r\n\r\nImmediately before the program would run an instruction a second time, the value in the accumulator is 5.\r\n\r\nRun your copy of the boot code. Immediately before any instruction is executed a second time, what value is in the\r\naccumulator?\r\n\r\n\"\"\"\r\nfrom typing import List\r\n\r\n\r\ndef find_accumulator(data: List[List[str]]) -> int:\r\n\r\n accumulator = 0\r\n index = 0\r\n\r\n while True:\r\n instruction_set = data[index]\r\n if \"run\" in instruction_set:\r\n return accumulator\r\n else:\r\n instruction = instruction_set[0]\r\n signed_integer = instruction_set[1]\r\n sign = signed_integer[0]\r\n integer = int(signed_integer[1:])\r\n\r\n if instruction == \"nop\":\r\n index += 1\r\n elif (instruction == \"acc\") and (sign == \"+\"):\r\n accumulator += integer\r\n index += 1\r\n elif (instruction == \"acc\") and (sign == \"-\"):\r\n accumulator -= integer\r\n index += 1\r\n elif (instruction == \"jmp\") and (sign == \"+\"):\r\n index += integer\r\n elif (instruction == \"jmp\") and (sign == \"-\"):\r\n index -= integer\r\n\r\n instruction_set.append(\"run\")\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = [line.strip().split() for line in f.readlines()]\r\n\r\n print(find_accumulator(puzzle_input)) # Answer = 1801\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.6435391306877136,
"alphanum_fraction": 0.676002562046051,
"avg_line_length": 36.317073822021484,
"blob_id": "53cf7c389ec05915e2fc8540c280868b57f91429",
"content_id": "033397800b626dc4bbfacc9f8c43568fd1c57222",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1571,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 41,
"path": "/Day 3/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"--- Part Two ---\r\n\r\nTime to check the rest of the slopes - you need to minimize the probability of a sudden arboreal stop, after all.\r\n\r\nDetermine the number of trees you would encounter if, for each of the following slopes, you start at the top-left corner\r\nand traverse the map all the way to the bottom:\r\n\r\n Right 1, down 1.\r\n Right 3, down 1. (This is the slope you already checked.)\r\n Right 5, down 1.\r\n Right 7, down 1.\r\n Right 1, down 2.\r\n\r\nIn the above example, these slopes would find 2, 7, 3, 4, and 2 tree(s) respectively; multiplied together,\r\nthese produce the answer 336.\r\n\r\nWhat do you get if you multiply together the number of trees encountered on each of the listed slopes?\r\n\"\"\"\r\nfrom part_1 import encountered_tree\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = f.readlines()\r\n\r\n # Duplicate the lines side by side 78 times to improve the convenience of transversing the lines.\r\n # There is no specific reason for choosing 78. It just was convenient at the time.\r\n # To stay safe, use multiple of 13 greater than or equal to 78\r\n convenient_input = [line.strip() * (13 * 6) for line in puzzle_input]\r\n\r\n product = 1\r\n product *= encountered_tree(convenient_input, 1, 1)\r\n product *= encountered_tree(convenient_input, 3, 1)\r\n product *= encountered_tree(convenient_input, 5, 1)\r\n product *= encountered_tree(convenient_input, 7, 1)\r\n product *= encountered_tree(convenient_input, 1, 2)\r\n print(product) # Answer = 4385176320\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"
},
{
"alpha_fraction": 0.6235827803611755,
"alphanum_fraction": 0.6326530575752258,
"avg_line_length": 26.45161247253418,
"blob_id": "f880b1a021a4265abce952b5c8279d07eba0ad33",
"content_id": "0b93d2e94d252952d6ece724a156b5749815b46e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 882,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 31,
"path": "/Day 5/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Part Two ---\r\n\r\nDing! The \"fasten seat belt\" signs have turned on. Time to find your seat.\r\n\r\nIt's a completely full flight, so your seat should be the only missing boarding pass in your list.\r\nHowever, there's a catch: some of the seats at the very front and back of the plane don't exist on this aircraft,\r\nso they'll be missing from your list as well.\r\n\r\nYour seat wasn't at the very front or back, though; the seats with IDs +1 and -1 from yours will be in your list.\r\n\r\nWhat is the ID of your seat?\r\ns\r\n\"\"\"\r\n\r\nfrom part_1 import find_seat_id\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = f.readlines()\r\n\r\n seat_ids = [find_seat_id(seat.strip()) for seat in puzzle_input]\r\n\r\n for i in range(len(seat_ids)):\r\n if i not in seat_ids and i > 39:\r\n print(i) # Answer = 607\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.6247043609619141,
"alphanum_fraction": 0.6651774048805237,
"avg_line_length": 31.973215103149414,
"blob_id": "21e90eb6ebc347b69b33da49574fbe23132ff22d",
"content_id": "f50ba80c75457f4cf994c8e7393126f3e9b4ca08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3805,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 112,
"path": "/Day 9/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Day 9: Encoding Error ---\r\n\r\nWith your neighbor happily enjoying their video game, you turn your attention to an open data port on the little screen\r\nin the seat in front of you.\r\n\r\nThough the port is non-standard, you manage to connect it to your computer through the clever use of several paperclips.\r\nUpon connection, the port outputs a series of numbers (your puzzle input).\r\n\r\nThe data appears to be encrypted with the eXchange-Masking Addition System (XMAS) which, conveniently for you,\r\nis an old cypher with an important weakness.\r\n\r\nXMAS starts by transmitting a preamble of 25 numbers. After that, each number you receive should be the sum of any two\r\nof the 25 immediately previous numbers. The two numbers will have different values,\r\nand there might be more than one such pair.\r\n\r\nFor example, suppose your preamble consists of the numbers 1 through 25 in a random order. To be valid,\r\nthe next number must be the sum of two of those numbers:\r\n\r\n 26 would be a valid next number, as it could be 1 plus 25 (or many other pairs, like 2 and 24).\r\n 49 would be a valid next number, as it is the sum of 24 and 25.\r\n 100 would not be valid; no two of the previous 25 numbers sum to 100.\r\n 50 would also not be valid; although 25 appears in the previous 25 numbers, the two numbers in the pair\r\n must be different.\r\n\r\nSuppose the 26th number is 45, and the first number (no longer an option, as it is more than 25 numbers ago) was 20.\r\nNow, for the next number to be valid, there needs to be some pair of numbers among 1-19, 21-25, or 45 that add up to it:\r\n\r\n 26 would still be a valid next number, as 1 and 25 are still within the previous 25 numbers.\r\n 65 would not be valid, as no two of the available numbers sum to it.\r\n 64 and 66 would both be valid, as they are the result of 19+45 and 21+45 respectively.\r\n\r\nHere is a larger example which only considers the previous 5 numbers (and has a preamble of length 5):\r\n\r\n35\r\n20\r\n15\r\n25\r\n47\r\n40\r\n62\r\n55\r\n65\r\n95\r\n102\r\n117\r\n150\r\n182\r\n127\r\n219\r\n299\r\n277\r\n309\r\n576\r\n\r\nIn this example, after the 5-number preamble, almost every number is the sum of two of the previous 5 numbers;\r\nthe only number that does not follow this rule is 127.\r\n\r\nThe first step of attacking the weakness in the XMAS data is to find the first number in the list (after the preamble)\r\nwhich is not the sum of two of the 25 numbers before it. What is the first number that does not have this property?\r\n\r\n\"\"\"\r\nfrom typing import List, Sequence\r\n\r\n\r\ndef permute(lst: Sequence[int]) -> List[tuple]:\r\n \"\"\"\r\n Returns a list of tuples of all permutations (without repeated values).\r\n Note:\r\n - Eg: (1, 3) and (3, 1) are considered repeated and hence only one will be added to the list.\r\n :param lst:\r\n :return:\r\n \"\"\"\r\n permuted = []\r\n for i in lst:\r\n for j in lst:\r\n if ((i, j) not in lst) and ((j, i) not in lst):\r\n permuted.append((i, j))\r\n return permuted\r\n\r\n\r\ndef number_in_sum(num: int, lst: Sequence[int]) -> bool:\r\n \"\"\"\r\n Returns True if :param num is equal to the sum of any of the permutations in :param lst.\r\n Else, it returns false.\r\n :return: bool\r\n \"\"\"\r\n to_be_matched = permute(lst)\r\n for i, j in to_be_matched:\r\n if (i + j) == num:\r\n return True\r\n return False\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = [int(line.strip()) for line in f.readlines()]\r\n\r\n preamble = 25\r\n start = 0\r\n stop = start + 25\r\n for line in puzzle_input[preamble:]:\r\n if number_in_sum(line, puzzle_input[start:stop]):\r\n start += 1\r\n stop += 1\r\n continue\r\n else:\r\n return line\r\n\r\n\r\nif __name__ == '__main__':\r\n print(main()) # Answer = 1639024365\r\n"
},
{
"alpha_fraction": 0.593638002872467,
"alphanum_fraction": 0.6552419066429138,
"avg_line_length": 24.803468704223633,
"blob_id": "d2c67f9c0a7a5a6d36e9dda3c0422ae510a5759e",
"content_id": "b6918526f80bf5cbbbcf9d13bb322bfbd1c82881",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4464,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 173,
"path": "/Day 22/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Day 22: Crab Combat ---\n\nIt only takes a few hours of sailing the ocean on a raft for boredom to sink in.\nFortunately, you brought a small deck of space cards!\nYou'd like to play a game of Combat, and there's even an opponent available:\na small crab that climbed aboard your raft before you left.\n\nFortunately, it doesn't take long to teach the crab the rules.\n\nBefore the game starts, split the cards so each player has their own deck (your puzzle input).\nThen, the game consists of a series of rounds: both players draw their top card, and the player with the higher-valued\ncard wins the round. The winner keeps both cards, placing them on the bottom of their own deck so that the winner's\ncard is above the other card. If this causes a player to have all of the cards, they win, and the game ends.\n\nFor example, consider the following starting decks:\n\nPlayer 1:\n9\n2\n6\n3\n1\n\nPlayer 2:\n5\n8\n4\n7\n10\n\nThis arrangement means that player 1's deck contains 5 cards, with 9 on top and 1 on the bottom;\nplayer 2's deck also contains 5 cards, with 5 on top and 10 on the bottom.\n\nThe first round begins with both players drawing the top card of their decks: 9 and 5. Player 1 has the higher card,\nso both cards move to the bottom of player 1's deck such that 9 is above 5.\nIn total, it takes 29 rounds before a player has all of the cards:\n\n-- Round 1 --\nPlayer 1's deck: 9, 2, 6, 3, 1\nPlayer 2's deck: 5, 8, 4, 7, 10\nPlayer 1 plays: 9\nPlayer 2 plays: 5\nPlayer 1 wins the round!\n\n-- Round 2 --\nPlayer 1's deck: 2, 6, 3, 1, 9, 5\nPlayer 2's deck: 8, 4, 7, 10\nPlayer 1 plays: 2\nPlayer 2 plays: 8\nPlayer 2 wins the round!\n\n-- Round 3 --\nPlayer 1's deck: 6, 3, 1, 9, 5\nPlayer 2's deck: 4, 7, 10, 8, 2\nPlayer 1 plays: 6\nPlayer 2 plays: 4\nPlayer 1 wins the round!\n\n-- Round 4 --\nPlayer 1's deck: 3, 1, 9, 5, 6, 4\nPlayer 2's deck: 7, 10, 8, 2\nPlayer 1 plays: 3\nPlayer 2 plays: 7\nPlayer 2 wins the round!\n\n-- Round 5 --\nPlayer 1's deck: 1, 9, 5, 6, 4\nPlayer 2's deck: 10, 8, 2, 7, 3\nPlayer 1 plays: 1\nPlayer 2 plays: 10\nPlayer 2 wins the round!\n\n...several more rounds pass...\n\n-- Round 27 --\nPlayer 1's deck: 5, 4, 1\nPlayer 2's deck: 8, 9, 7, 3, 2, 10, 6\nPlayer 1 plays: 5\nPlayer 2 plays: 8\nPlayer 2 wins the round!\n\n-- Round 28 --\nPlayer 1's deck: 4, 1\nPlayer 2's deck: 9, 7, 3, 2, 10, 6, 8, 5\nPlayer 1 plays: 4\nPlayer 2 plays: 9\nPlayer 2 wins the round!\n\n-- Round 29 --\nPlayer 1's deck: 1\nPlayer 2's deck: 7, 3, 2, 10, 6, 8, 5, 9, 4\nPlayer 1 plays: 1\nPlayer 2 plays: 7\nPlayer 2 wins the round!\n\n\n== Post-game results ==\nPlayer 1's deck:\nPlayer 2's deck: 3, 2, 10, 6, 8, 5, 9, 4, 7, 1\n\nOnce the game ends, you can calculate the winning player's score.\nThe bottom card in their deck is worth the value of the card multiplied by 1, the second-from-the-bottom card is worth\nthe value of the card multiplied by 2, and so on.\nWith 10 cards, the top card is worth the value on the card multiplied by 10.\nIn this example, the winning player's score is:\n\n 3 * 10\n+ 2 * 9\n+ 10 * 8\n+ 6 * 7\n+ 8 * 6\n+ 5 * 5\n+ 9 * 4\n+ 4 * 3\n+ 7 * 2\n+ 1 * 1\n= 306\n\nSo, once the game ends, the winning player's score is 306.\n\nPlay the small crab in a game of Combat using the two decks you just dealt. What is the winning player's score?\n\n\"\"\"\nfrom typing import List, Sequence, Tuple\n\n\ndef find_winner(player1: Sequence[int], player2: Sequence[int]) -> List[int]:\n while player1 and player2:\n a = player1[0]\n b = player2[0]\n\n if a > b:\n player1 = player1[1:] + [a, b]\n player2 = player2[1:]\n else:\n player2 = player2[1:] + [b, a]\n player1 = player1[1:]\n\n winner = player1 if player1 else player2\n\n return winner\n\n\ndef sum_winner_score(winner: Sequence[int]) -> int:\n return sum(winner[x] * (len(winner) - x) for x in range(len(winner)))\n\n\ndef find_players_deck(data: Sequence[str]) -> Tuple[List[int], List[int]]:\n broken = False\n player1, player2 = [], []\n for line in data:\n if not line.rstrip():\n broken = True\n elif broken and line[0].isdigit():\n player2.append(int(line))\n elif line[0].isdigit():\n player1.append(int(line))\n return player1, player2\n\n\ndef main():\n with open('./input.txt') as f:\n puzzle_input = f.readlines()\n\n player1, player2 = find_players_deck(puzzle_input)\n\n winner = find_winner(player1, player2)\n print(sum_winner_score(winner)) # Answer = 32495\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.7257600426673889,
"alphanum_fraction": 0.7479457855224609,
"avg_line_length": 45.36190414428711,
"blob_id": "608571247662b7263d009c74d91112ffbccc9773",
"content_id": "d3ed7535824c5516395286cf924c68731035f327",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4868,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 105,
"path": "/Day 25/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Day 25: Combo Breaker ---\n\nYou finally reach the check-in desk.\nUnfortunately, their registration systems are currently offline, and they cannot check you in.\nNoticing the look on your face, they quickly add that tech support is already on the way!\nThey even created all the room keys this morning; you can take yours now and give them your room deposit once the\nregistration system comes back online.\n\nThe room key is a small RFID card. Your room is on the 25th floor and the elevators are also temporarily out of service,\nso it takes what little energy you have left to even climb the stairs and navigate the halls.\nYou finally reach the door to your room, swipe your card, and - beep - the light turns red.\n\nExamining the card more closely, you discover a phone number for tech support.\n\n\"Hello! How can we help you today?\" You explain the situation.\n\n\"Well, it sounds like the card isn't sending the right command to unlock the door.\nIf you go back to the check-in desk, surely someone there can reset it for you.\"\nStill catching your breath, you describe the status of the elevator and the exact number of stairs you just had to climb\n\n\"I see! Well, your only other option would be to reverse-engineer the cryptographic handshake the card does with the\ndoor and then inject your own commands into the data stream, but that's definitely impossible.\"\nYou thank them for their time.\n\nUnfortunately for the door, you know a thing or two about cryptographic handshakes.\n\nThe handshake used by the card and the door involves an operation that transforms a subject number.\nTo transform a subject number, start with the value 1.\nThen, a number of times called the loop size, perform the following steps:\n\n Set the value to itself multiplied by the subject number.\n Set the value to the remainder after dividing the value by 20201227.\n\nThe card always uses a specific, secret loop size when it transforms a subject number.\nThe door always uses a different, secret loop size.\n\nThe cryptographic handshake works like this:\n\n The card transforms the subject number of 7 according to the card's secret loop size.\n The result is called the card's public key.\n The door transforms the subject number of 7 according to the door's secret loop size.\n The result is called the door's public key.\n The card and door use the wireless RFID signal to transmit the two public keys (your puzzle input) to the\n other device. Now, the card has the door's public key, and the door has the card's public key.\n Because you can eavesdrop on the signal, you have both public keys, but neither device's loop size.\n The card transforms the subject number of the door's public key according to the card's loop size.\n The result is the encryption key.\n The door transforms the subject number of the card's public key according to the door's loop size.\n The result is the same encryption key as the card calculated.\n\nIf you can use the two public keys to determine each device's loop size, you will have enough information to\ncalculate the secret encryption key that the card and door use to communicate; this would let you send the unlock\ncommand directly to the door!\n\nFor example, suppose you know that the card's public key is 5764801.\nWith a little trial and error, you can work out that the card's loop size must be 8, because transforming the initial\nsubject number of 7 with a loop size of 8 produces 5764801.\n\nThen, suppose you know that the door's public key is 17807724.\nBy the same process, you can determine that the door's loop size is 11, because transforming the initial subject\nnumber of 7 with a loop size of 11 produces 17807724.\n\nAt this point, you can use either device's loop size with the other device's public key to calculate the encryption key.\nTransforming the subject number of 17807724 (the door's public key) with a loop size of 8 (the card's loop size)\nproduces the encryption key, 14897079. (Transforming the subject number of 5764801 (the card's public key) with a loop\nsize of 11 (the door's loop size) produces the same encryption key: 14897079.)\n\nWhat encryption key is the handshake trying to establish?\n\n\"\"\"\n\n\ndef find_encryption_key(number, loop_size):\n value = 1\n\n for _ in range(loop_size):\n value *= number\n value = value % 20201227\n\n return value\n\n\ndef find_loop_size(number):\n loop_size = 0\n subject_number = 7\n value = 1\n while True:\n value *= subject_number\n value = value % 20201227\n loop_size += 1\n if value == number:\n return loop_size\n\n\ndef main():\n with open(\"./input.txt\") as f:\n card_public_key, door_public_key = [int(key.strip()) for key in f.readlines()]\n\n door_loop_size = find_loop_size(door_public_key)\n print(find_encryption_key(card_public_key, door_loop_size))\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5289392471313477,
"alphanum_fraction": 0.533470630645752,
"avg_line_length": 23.552631378173828,
"blob_id": "f683fea2b6d82c35921fccc7dee002ef79d6b882",
"content_id": "0abea6bf1824983f82c02537d7aaa6369ba40e71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4855,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 190,
"path": "/Day 11/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Part Two ---\r\n\r\nAs soon as people start to arrive, you realize your mistake. People don't just care about adjacent seats\r\nThey care about the first seat they can see in each of those eight directions!\r\n\r\nNow, instead of considering just the eight immediately adjacent seats, consider the first seat in each of those eight\r\ndirections. For example, the empty seat below would see eight occupied seats:\r\n\r\n.......#.\r\n...#.....\r\n.#.......\r\n.........\r\n..#L....#\r\n....#....\r\n.........\r\n#........\r\n...#.....\r\n\r\nThe leftmost empty seat below would only see one empty seat, but cannot see any of the occupied ones:\r\n\r\n.............\r\n.L.L.#.#.#.#.\r\n.............\r\n\r\nThe empty seat below would see no occupied seats:\r\n\r\n.##.##.\r\n#.#.#.#\r\n##...##\r\n...L...\r\n##...##\r\n#.#.#.#\r\n.##.##.\r\n\r\nAlso, people seem to be more tolerant than you expected: it now takes five or more visible occupied seats for an\r\noccupied seat to become empty (rather than four or more from the previous rules). The other rules still apply:\r\nempty seats that see no occupied seats become occupied, seats matching no rule don't change, and floor never changes.\r\n\r\nGiven the same starting layout as above, these new rules cause the seating area to shift around as follows:\r\n\r\nL.LL.LL.LL\r\nLLLLLLL.LL\r\nL.L.L..L..\r\nLLLL.LL.LL\r\nL.LL.LL.LL\r\nL.LLLLL.LL\r\n..L.L.....\r\nLLLLLLLLLL\r\nL.LLLLLL.L\r\nL.LLLLL.LL\r\n\r\n#.##.##.##\r\n#######.##\r\n#.#.#..#..\r\n####.##.##\r\n#.##.##.##\r\n#.#####.##\r\n..#.#.....\r\n##########\r\n#.######.#\r\n#.#####.##\r\n\r\n#.LL.LL.L#\r\n#LLLLLL.LL\r\nL.L.L..L..\r\nLLLL.LL.LL\r\nL.LL.LL.LL\r\nL.LLLLL.LL\r\n..L.L.....\r\nLLLLLLLLL#\r\n#.LLLLLL.L\r\n#.LLLLL.L#\r\n\r\n#.L#.##.L#\r\n#L#####.LL\r\nL.#.#..#..\r\n##L#.##.##\r\n#.##.#L.##\r\n#.#####.#L\r\n..#.#.....\r\nLLL####LL#\r\n#.L#####.L\r\n#.L####.L#\r\n\r\n#.L#.L#.L#\r\n#LLLLLL.LL\r\nL.L.L..#..\r\n##LL.LL.L#\r\nL.LL.LL.L#\r\n#.LLLLL.LL\r\n..L.L.....\r\nLLLLLLLLL#\r\n#.LLLLL#.L\r\n#.L#LL#.L#\r\n\r\n#.L#.L#.L#\r\n#LLLLLL.LL\r\nL.L.L..#..\r\n##L#.#L.L#\r\nL.L#.#L.L#\r\n#.L####.LL\r\n..#.#.....\r\nLLL###LLL#\r\n#.LLLLL#.L\r\n#.L#LL#.L#\r\n\r\n#.L#.L#.L#\r\n#LLLLLL.LL\r\nL.L.L..#..\r\n##L#.#L.L#\r\nL.L#.LL.L#\r\n#.LLLL#.LL\r\n..#.L.....\r\nLLL###LLL#\r\n#.LLLLL#.L\r\n#.L#LL#.L#\r\n\r\nAgain, at this point, people stop shifting around and the seating area reaches equilibrium. Once this occurs,\r\nyou count 26 occupied seats.\r\n\r\nGiven the new visibility method and the rule change for occupied seats becoming empty, once equilibrium is reached,\r\nhow many seats end up occupied?\r\n\r\n\"\"\"\r\nfrom typing import List\r\n\r\n\r\ndef switches(matrix):\r\n new_matrix = []\r\n for (i, x) in enumerate(matrix):\r\n new_row = []\r\n for (j, y) in enumerate(x):\r\n left = list(reversed(x[:j]))\r\n right = x[j + 1:]\r\n up = [matrix[i - k][j] for k in range(1, i + 1)]\r\n down = [matrix[k][j] for k in range(i + 1, len(matrix))]\r\n up_left = list(filter(lambda g: g, [matrix[i - k][j - k] if j - k >= 0 else None for k in range(1, i + 1)]))\r\n up_right = list(\r\n filter(lambda g: g, [matrix[i - k][j + k] if j + k < len(x) else None for k in range(1, i + 1)]))\r\n down_left = list(filter(\r\n lambda g: g, [matrix[i + k][j - k] if j - k >= 0 else None for k in range(1, len(matrix) - i)]))\r\n down_right = list(filter(\r\n lambda g: g, [matrix[i + k][j + k] if j + k < len(x) else None for k in range(1, len(matrix) - i)]))\r\n directions = [left, right, up, down, up_left, up_right, down_left, down_right]\r\n first_seats = []\r\n for d in directions:\r\n first_seat = [f for f in d if f != \".\"][0:1]\r\n first_seats.append(\"\".join(first_seat))\r\n\r\n if (first_seats.count(\"#\") >= 5) and (y == \"#\"):\r\n new_row.append(\"L\")\r\n elif (first_seats.count(\"#\") == 0) and (y == \"L\"):\r\n new_row.append(\"#\")\r\n else:\r\n new_row.append(y)\r\n new_matrix.append(new_row)\r\n return new_matrix\r\n\r\n\r\ndef switch_loop(matrix: List[List[str]], old_matrix: List[list]) -> List[List[str]]:\r\n \"\"\"\r\n Loops through permutations until we get the same seating arrangement twice.\r\n \"\"\"\r\n if matrix == old_matrix:\r\n return matrix\r\n else:\r\n return switch_loop(switches(matrix), matrix)\r\n\r\n\r\ndef occupied_seats(data: List[str]) -> int:\r\n \"\"\"\r\n Makes an array of arrays of every seat on the diagonal then it finds the first seat in each seat array.\r\n Then it counts the number of occupied seats and updates.\r\n \"\"\"\r\n matrix = [list(x) for x in data]\r\n final_matrix = switch_loop(switches(matrix), matrix)\r\n\r\n return sum(x.count(\"#\") for x in final_matrix)\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = [line.strip() for line in f.readlines()]\r\n\r\n print(occupied_seats(puzzle_input)) # Answer = 2144s\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.6525768637657166,
"alphanum_fraction": 0.6778932809829712,
"avg_line_length": 32.29457473754883,
"blob_id": "00f58a561ff8da9e9dc2b2a3feb19f57aef265cf",
"content_id": "2d8067cef70d2a98f79166ba13ccea41d6f474e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4424,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 129,
"path": "/Day 4/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Day 4: Passport Processing ---\r\n\r\nYou arrive at the airport only to realize that you grabbed your North Pole Credentials instead of your passport.\r\nWhile these documents are extremely similar, North Pole Credentials aren't issued by a country and therefore\r\naren't actually valid documentation for travel in most of the world.\r\n\r\nIt seems like you're not the only one having problems, though; a very long line has formed for the automatic passport\r\nscanners, and the delay could upset your travel itinerary.\r\n\r\nDue to some questionable network security, you realize you might be able to solve both of these problems\r\nat the same time.\r\n\r\nThe automatic passport scanners are slow because they're having trouble detecting which passports have\r\nall required fields. The expected fields are as follows:\r\n\r\n byr (Birth Year)\r\n iyr (Issue Year)\r\n eyr (Expiration Year)\r\n hgt (Height)\r\n hcl (Hair Color)\r\n ecl (Eye Color)\r\n pid (Passport ID)\r\n cid (Country ID)\r\n\r\nPassport data is validated in batch files (your puzzle input). Each passport is represented as a sequence\r\nof key:value pairs separated by spaces or newlines. Passports are separated by blank lines.\r\n\r\nHere is an example batch file containing four passports:\r\n\r\necl:gry pid:860033327 eyr:2020 hcl:#fffffd\r\nbyr:1937 iyr:2017 cid:147 hgt:183cm\r\n\r\niyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\r\nhcl:#cfa07d byr:1929\r\n\r\nhcl:#ae17e1 iyr:2013\r\neyr:2024\r\necl:brn pid:760753108 byr:1931\r\nhgt:179cm\r\n\r\nhcl:#cfa07d eyr:2025 pid:166559648\r\niyr:2011 ecl:brn hgt:59in\r\n\r\nThe first passport is valid - all eight fields are present. The second passport is invalid - it is missing hgt\r\n(the Height field).\r\n\r\nThe third passport is interesting; the only missing field is cid, so it looks like data from North Pole Credentials,\r\nnot a passport at all! Surely, nobody would mind if you made the system temporarily ignore missing cid fields.\r\nTreat this \"passport\" as valid.\r\n\r\nThe fourth passport is missing two fields, cid and byr. Missing cid is fine, but missing any other field is not,\r\nso this passport is invalid.\r\n\r\nAccording to the above rules, your improved system would report 2 valid passports.\r\n\r\nCount the number of valid passports - those that have all required fields. Treat cid as optional.\r\nIn your batch file, how many passports are valid?\r\n\r\n\"\"\"\r\nfrom typing import List, Sequence, Dict\r\n\r\n\r\ndef group_passport_data(data: Sequence[str]) -> Dict[str, str]:\r\n \"\"\"\r\n Groups a list of string values into a dictionary.\r\n It splits the data by spaces and a colon(:) and generates a key-value pair.\r\n Returns a dictionary of passport information.\r\n\r\n :param data: list - Data to be added to the dictionary.\r\n :return: dict - A dictionary of passport info.\r\n \"\"\"\r\n passport = {}\r\n\r\n for string in data:\r\n for group in string.split():\r\n item = group.split(\":\")\r\n property_ = item[0]\r\n value = item[1]\r\n passport[property_] = value\r\n\r\n return passport\r\n\r\n\r\ndef passport_is_valid(passport: Dict[str, str]) -> bool:\r\n \"\"\"\r\n Checks if a passport is valid or not.\r\n A passport is valid if it has all of the following keys:\r\n \"byr\", \"iyr\", \"eyr\", \"hgt\", \"hcl\", \"ecl\", \"pid\"\r\n A passport can also have a \"cid\" key but it is not a requirement for a passport to be valid.\r\n The function returns True if a password is valid and False if it is not.\r\n\r\n :param passport: dict - Information about a passport\r\n :return: bool\r\n \"\"\"\r\n passport_fields = (\"byr\", \"iyr\", \"eyr\", \"hgt\", \"hcl\", \"ecl\", \"pid\", \"cid\")\r\n\r\n required = passport_fields[:-1] # cid is optional (not required).\r\n\r\n for field in required:\r\n if field not in passport.keys():\r\n return False\r\n return True\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n content: List[str] = [line.strip() for line in f.readlines()]\r\n\r\n passports = []\r\n\r\n while \"\" in content:\r\n index = content.index(\"\")\r\n passports.append(group_passport_data(content[:index]))\r\n content = content[index + 1:]\r\n else:\r\n passports.append(group_passport_data(content))\r\n\r\n number_of_valid_passports = 0\r\n\r\n for passport_info in passports:\r\n if passport_is_valid(passport_info):\r\n number_of_valid_passports += 1\r\n\r\n print(number_of_valid_passports) # Answer = 202\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.6344361305236816,
"alphanum_fraction": 0.6677539348602295,
"avg_line_length": 44.15053939819336,
"blob_id": "d374ff25d4fde876ad29f3ab7323475efc4aeec0",
"content_id": "f603d59dfe2c9365df9397f927bff1caa44fb3b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4292,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 93,
"path": "/Day 12/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Part Two ---\r\n\r\nBefore you can give the destination to the captain, you realize that the actual action meanings were printed on the\r\nback of the instructions the whole time.\r\n\r\nAlmost all of the actions indicate how to move a waypoint which is relative to the ship's position:\r\n\r\n Action N means to move the waypoint north by the given value.\r\n Action S means to move the waypoint south by the given value.\r\n Action E means to move the waypoint east by the given value.\r\n Action W means to move the waypoint west by the given value.\r\n Action L means to rotate the waypoint around the ship left (counter-clockwise) the given number of degrees.\r\n Action R means to rotate the waypoint around the ship right (clockwise) the given number of degrees.\r\n Action F means to move forward to the waypoint a number of times equal to the given value.\r\n\r\nThe waypoint starts 10 units east and 1 unit north relative to the ship. The waypoint is relative to the ship; that is,\r\nif the ship moves, the waypoint moves with it.\r\n\r\nFor example, using the same instructions as above:\r\n\r\n F10 moves the ship to the waypoint 10 times (a total of 100 units east and 10 units north), leaving the ship at\r\n east 100, north 10. The waypoint stays 10 units east and 1 unit north of the ship.\r\n N3 moves the waypoint 3 units north to 10 units east and 4 units north of the ship.\r\n The ship remains at east 100, north 10.\r\n F7 moves the ship to the waypoint 7 times (a total of 70 units east and 28 units north),\r\n leaving the ship at east 170, north 38. The waypoint stays 10 units east and 4 units north of the ship.\r\n R90 rotates the waypoint around the ship clockwise 90 degrees, moving it to 4 units east and 10 units south\r\n of the ship. The ship remains at east 170, north 38.\r\n F11 moves the ship to the waypoint 11 times (a total of 44 units east and 110 units south), leaving the ship at east\r\n 214, south 72. The waypoint stays 4 units east and 10 units south of the ship.\r\n\r\nAfter these operations, the ship's Manhattan distance from its starting position is 214 + 72 = 286.\r\n\r\nFigure out where the navigation instructions actually lead. What is the Manhattan distance between that location and\r\nthe ship's starting position?\r\n\r\n\"\"\"\r\nfrom typing import Tuple, Sequence\r\n\r\n\r\ndef move_waypoint(direction: str, amount: int, location: Tuple[int, int]) -> Tuple[int, int]:\r\n if direction == \"N\":\r\n return location[0], location[1] + amount\r\n if direction == \"S\":\r\n return location[0], location[1] - amount\r\n if direction == \"E\":\r\n return location[0] + amount, location[1]\r\n if direction == \"W\":\r\n return location[0] - amount, location[1]\r\n\r\n\r\ndef turns(direction: Tuple[str, int], location: Tuple[int, int]) -> Tuple[int, int]:\r\n \"\"\"\r\n Rotating the waypoint around the ship is just negating the coordinates in the case of 180,\r\n or flipping both and negating one if rotation is a quarter turn.\r\n \"\"\"\r\n degree = direction[1] if (direction[0] == \"R\") else -direction[1]\r\n if abs(degree) == 180:\r\n return - location[0], - location[1]\r\n elif (degree == 90) or (degree == -270):\r\n return location[1], - location[0]\r\n elif (degree == -90) or (degree == 270):\r\n return - location[1], location[0]\r\n\r\n\r\ndef waypoint_movements(data: Sequence[str]) -> int:\r\n directions = [(d[:1], int(d[1:])) for d in data]\r\n origin = (0, 0)\r\n waypoint = (10, 1)\r\n location = (0, 0)\r\n\r\n for direction in directions:\r\n if (direction[0] == \"R\") or (direction[0] == \"L\"):\r\n waypoint = turns(direction, waypoint)\r\n # \"F\" now moves towards the waypoint from the current location, multiplied by the number in the direction.\r\n elif direction[0] == \"F\":\r\n location = (location[0] + direction[1] * waypoint[0]), (location[1] + direction[1] * waypoint[1])\r\n else:\r\n waypoint = move_waypoint(direction[0], direction[1], waypoint)\r\n\r\n return abs(origin[0] - location[0]) + abs(origin[1] - location[1])\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = [line.strip() for line in f.readlines()]\r\n\r\n print(waypoint_movements(puzzle_input)) # Answer = 22848\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.6779900789260864,
"alphanum_fraction": 0.7166784405708313,
"avg_line_length": 31.91200065612793,
"blob_id": "cc831e2473dc0e21a11b49154a5c891549781355",
"content_id": "b855455e4c435b52500776c6f39a530485d82d67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4239,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 125,
"path": "/Day 10/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Day 10: Adapter Array ---\r\n\r\nPatched into the aircraft's data port, you discover weather forecasts of a massive tropical storm. Before you can figure\r\nout whether it will impact your vacation plans, however, your device suddenly turns off!\r\n\r\nIts battery is dead.\r\n\r\nYou'll need to plug it in. There's only one problem: the charging outlet near your seat produces the wrong number\r\nof jolts. Always prepared, you make a list of all of the joltage adapters in your bag.\r\n\r\nEach of your joltage adapters is rated for a specific output joltage (your puzzle input). Any given adapter can take an\r\ninput 1, 2, or 3 jolts lower than its rating and still produce its rated output joltage.\r\n\r\nIn addition, your device has a built-in joltage adapter rated for 3 jolts higher than the highest-rated adapter in your\r\nbag. (If your adapter list were 3, 9, and 6, your device's built-in adapter would be rated for 12 jolts.)\r\n\r\nTreat the charging outlet near your seat as having an effective joltage rating of 0.\r\n\r\nSince you have some time to kill, you might as well test all of your adapters. Wouldn't want to get to your resort and\r\nrealize you can't even charge your device!\r\n\r\nIf you use every adapter in your bag at once, what is the distribution of joltage differences between the charging\r\noutlet, the adapters, and your device?\r\n\r\nFor example, suppose that in your bag, you have adapters with the following joltage ratings:\r\n\r\n16\r\n10\r\n15\r\n5\r\n1\r\n11\r\n7\r\n19\r\n6\r\n12\r\n4\r\n\r\nWith these adapters, your device's built-in joltage adapter would be rated for 19 + 3 = 22 jolts, 3 higher than the\r\nhighest-rated adapter.\r\n\r\nBecause adapters can only connect to a source 1-3 jolts lower than its rating, in order to use every adapter, you'd need\r\nto choose them like this:\r\n\r\n The charging outlet has an effective rating of 0 jolts, so the only adapters that could connect to it directly would\r\n need to have a joltage rating of 1, 2, or 3 jolts. Of these, only one you have is an adapter rated 1 jolt\r\n (difference of 1).\r\n From your 1-jolt rated adapter, the only choice is your 4-jolt rated adapter (difference of 3).\r\n From the 4-jolt rated adapter, the adapters rated 5, 6, or 7 are valid choices. However, in order to not skip any\r\n adapters, you have to pick the adapter rated 5 jolts (difference of 1).\r\n Similarly, the next choices would need to be the adapter rated 6 and then the adapter rated 7\r\n (with difference of 1 and 1).\r\n The only adapter that works with the 7-jolt rated adapter is the one rated 10 jolts (difference of 3).\r\n From 10, the choices are 11 or 12; choose 11 (difference of 1) and then 12 (difference of 1).\r\n After 12, only valid adapter has a rating of 15 (difference of 3), then 16 (difference of 1), then 19\r\n (difference of 3).\r\n Finally, your device's built-in adapter is always 3 higher than the highest adapter, so its rating is 22 jolts\r\n (always a difference of 3).\r\n\r\nIn this example, when using every adapter, there are 7 differences of 1 jolt and 5 differences of 3 jolts.\r\n\r\nHere is a larger example:\r\n\r\n28\r\n33\r\n18\r\n42\r\n31\r\n14\r\n46\r\n20\r\n48\r\n47\r\n24\r\n23\r\n49\r\n45\r\n19\r\n38\r\n39\r\n11\r\n1\r\n32\r\n25\r\n35\r\n8\r\n17\r\n7\r\n9\r\n4\r\n2\r\n34\r\n10\r\n3\r\n\r\nIn this larger example, in a chain that uses all of the adapters, there are 22 differences of 1 jolt and 10 differences\r\nof 3 jolts.\r\n\r\nFind a chain that uses all of your adapters to connect the charging outlet to your device's built-in adapter and count\r\nthe joltage differences between the charging outlet, the adapters, and your device. What is the number of 1-jolt\r\ndifferences multiplied by the number of 3-jolt differences?\r\n\r\n\"\"\"\r\nfrom typing import List, Sequence\r\nfrom collections import Counter\r\n\r\n\r\ndef calculate_differences(data: Sequence[int]) -> List[int]:\r\n return [data[i] - data[i - 1] for i in range(1, len(data))]\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = [int(line.strip()) for line in f.readlines()]\r\n convenient_input = [0] + puzzle_input\r\n convenient_input.sort()\r\n\r\n differences = calculate_differences(convenient_input)\r\n counts = Counter(differences)\r\n print(counts[1] * (counts[3] + 1)) # Answer = 2080\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.6501984000205994,
"alphanum_fraction": 0.671230137348175,
"avg_line_length": 38,
"blob_id": "6c4af27e715b31fcf0db4306cbde1d0aae0db76e",
"content_id": "f39250b21871f32fc1c474145e43239ecd59b967",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5040,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 126,
"path": "/Day 5/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n--- Day 5: Binary Boarding ---\r\n\r\nYou board your plane only to discover a new problem: you dropped your boarding pass!\r\nYou aren't sure which seat is yours, and all of the flight attendants are busy with the flood of people that\r\nsuddenly made it through passport control.\r\n\r\nYou write a quick program to use your phone's camera to scan all of the nearby boarding passes (your puzzle input);\r\nperhaps you can find your seat through process of elimination.\r\n\r\nInstead of zones or groups, this airline uses binary space partitioning to seat people. A seat might be specified like\r\nFBFBBFFRLR, where F means \"front\", B means \"back\", L means \"left\", and R means \"right\".\r\n\r\nThe first 7 characters will either be F or B; these specify exactly one of the 128 rows on the plane\r\n(numbered 0 through 127). Each letter tells you which half of a region the given seat is in.\r\nStart with the whole list of rows; the first letter indicates whether the seat is in the front (0 through 63)\r\nor the back (64 through 127). The next letter indicates which half of that region the seat is in,\r\nand so on until you're left with exactly one row.\r\n\r\nFor example, consider just the first seven characters of FBFBBFFRLR:\r\n\r\n Start by considering the whole range, rows 0 through 127.\r\n F means to take the lower half, keeping rows 0 through 63.\r\n B means to take the upper half, keeping rows 32 through 63.\r\n F means to take the lower half, keeping rows 32 through 47.\r\n B means to take the upper half, keeping rows 40 through 47.\r\n B keeps rows 44 through 47.\r\n F keeps rows 44 through 45.\r\n The final F keeps the lower of the two, row 44.\r\n\r\nThe last three characters will be either L or R; these specify exactly one of the 8 columns of seats on the plane\r\n(numbered 0 through 7). The same process as above proceeds again, this time with only three steps.\r\nL means to keep the lower half, while R means to keep the upper half.\r\n\r\nFor example, consider just the last 3 characters of FBFBBFFRLR:\r\n\r\n Start by considering the whole range, columns 0 through 7.\r\n R means to take the upper half, keeping columns 4 through 7.\r\n L means to take the lower half, keeping columns 4 through 5.\r\n The final R keeps the upper of the two, column 5.\r\n\r\nSo, decoding FBFBBFFRLR reveals that it is the seat at row 44, column 5.\r\n\r\nEvery seat also has a unique seat ID: multiply the row by 8, then add the column.\r\nIn this example, the seat has ID 44 * 8 + 5 = 357.\r\n\r\nHere are some other boarding passes:\r\n\r\n BFFFBBFRRR: row 70, column 7, seat ID 567.\r\n FFFBBBFRRR: row 14, column 7, seat ID 119.\r\n BBFFBBFRLL: row 102, column 4, seat ID 820.\r\n\r\nAs a sanity check, look through your list of boarding passes. What is the highest seat ID on a boarding pass?\r\n\r\n\"\"\"\r\nfrom typing import List\r\n\r\n\r\ndef find_row(characters: str) -> int:\r\n \"\"\"\r\n Find the specific row in the airplane from the code (characters).\r\n There are 128 rows in total.\r\n The characters that determine the specific row are F (front) and B (back).\r\n - If a character is F, the lower half of the seats are taken.\r\n - If a character is B, the upper half of the seats remaining are taken.\r\n The function returns the last number in the list of rows.\r\n\r\n :param characters: str\r\n :return: int - Specific row\r\n \"\"\"\r\n rows: List[int] = [i for i in range(128)]\r\n for char in characters:\r\n midpoint = int(len(rows) / 2)\r\n if char == \"F\":\r\n rows = rows[:midpoint]\r\n elif char == \"B\":\r\n rows = rows[midpoint:]\r\n return rows[0]\r\n\r\n\r\ndef find_column(characters: str) -> int:\r\n \"\"\"\r\n Find the specific column in the airplane from the code (characters).\r\n There are 8 columns in total.\r\n The characters that determine the specific row are L (left) and R (right).\r\n - If a character is L, the lower half of the seats remaining are taken.\r\n - If a character is R, the upper half of the seats remaining are taken.\r\n The function returns the last number in the list of columns.\r\n\r\n :param characters: str\r\n :return: int - Specific column\r\n \"\"\"\r\n columns: List[int] = [i for i in range(8)]\r\n for char in characters:\r\n midpoint = int(len(columns) / 2)\r\n if char == \"L\":\r\n columns = columns[:midpoint]\r\n elif char == \"R\":\r\n columns = columns[midpoint:]\r\n return columns[0]\r\n\r\n\r\ndef find_seat_id(characters: str) -> int:\r\n \"\"\"\r\n The function returns the seat ID given the code (characters).\r\n The seat ID is obtained by multiplying the row by 8, then adding the column.\r\n\r\n :param characters: str\r\n :return: int - Seat ID\r\n \"\"\"\r\n row = find_row(characters[:8])\r\n column = find_column(characters[-3:])\r\n\r\n return (row * 8) + column\r\n\r\n\r\ndef main():\r\n with open(\"./input.txt\") as f:\r\n puzzle_input = f.readlines()\r\n\r\n seat_ids = [find_seat_id(seat.strip()) for seat in puzzle_input]\r\n print(max(seat_ids)) # Answer: 980\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.691535472869873,
"alphanum_fraction": 0.6949577927589417,
"avg_line_length": 35.525001525878906,
"blob_id": "1a3b859d2a04a126e6bcebd78cd2ff01b3ce5cc1",
"content_id": "228fdf95937de9ce5435d37f7822a7b93074d51f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4383,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 120,
"path": "/Day 21/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Day 21: Allergen Assessment ---\n\nYou reach the train's last stop and the closest you can get to your vacation island without getting wet.\nThere aren't even any boats here, but nothing can stop you now: you build a raft.\nYou just need a few days' worth of food for your journey.\n\nYou don't speak the local language, so you can't read any ingredients lists.\nHowever, sometimes, allergens are listed in a language you do understand.\nYou should be able to use this information to determine which ingredient contains which allergen\nand work out which foods are safe to take with you on your trip.\n\nYou start by compiling a list of foods (your puzzle input), one food per line.\nEach l5ine includes that food's ingredients list followed by some or all of the allergens the food contains.\n\nEach allergen is found in exactly one ingredient.\nEach ingredient contains zero or one allergen.\nAllergens aren't always marked; when they're listed (as in (contains nuts, shellfish) after an ingredients list),\nthe ingredient that contains each listed allergen will be somewhere in the corresponding ingredients list.\nHowever, even if an allergen isn't listed, the ingredient that contains that allergen could still be present:\nmaybe they forgot to label it, or maybe it was labeled in a language you don't know.\n\nFor example, consider the following list of foods:\n\nmxmxvkd kfcds sqjhc nhms (contains dairy, fish)\ntrh fvjkl sbzzf mxmxvkd (contains dairy)\nsqjhc fvjkl (contains soy)\nsqjhc mxmxvkd sbzzf (contains fish)\n\nThe first food in the list has four ingredients (written in a language you don't understand):\nmxmxvkd, kfcds, sqjhc, and nhms.\nWhile the food might contain other allergens, a few allergens the food definitely contains are listed afterward:\ndairy and fish.\n\nThe first step is to determine which ingredients can't possibly contain any of the allergens in any food in your list.\nIn the above example, none of the ingredients kfcds, nhms, sbzzf, or trh can contain an allergen.\nCounting the number of times any of these ingredients appear in any ingredients list produces 5:\nthey all appear once each except sbzzf, which appears twice.\n\nDetermine which ingredients cannot possibly contain any of the allergens in your list.\nHow many times do any of those ingredients appear?\n\n\"\"\"\nfrom typing import List, Sequence, Tuple, Set, Dict\nfrom functools import reduce\n\n\ndef count_ingredients_without_allergens(allergens: Dict[Set[str], Set[str]],\n lines: Sequence[Tuple[Set[str], Set[str]]]) -> int:\n used = set()\n\n for allergen in allergens.values():\n used |= allergen\n\n count = 0\n\n for ingredients, _ in lines:\n for ingredient in ingredients:\n if ingredient not in used:\n count += 1\n\n return count\n\n\ndef find_allergens(lines: Sequence[Tuple[Set[str], Set[str]]]) -> Dict[Set[str], Set[str]]:\n ingredients = [line[0] for line in lines]\n allergens = [line[1] for line in lines]\n ingredients = reduce(lambda a, b: a | b, ingredients)\n allergens = {allergen: set(ingredients) for allergen in reduce(lambda a, b: a | b, allergens)}\n\n for ingredient, _allergens in lines:\n for allergen in _allergens:\n allergens[allergen] &= ingredient\n\n definite = set()\n\n for ingredient in allergens.values():\n if len(ingredient) == 1:\n definite |= ingredient\n\n while True:\n reduct_dict = {}\n\n for key, ingredient in allergens.items():\n if len(ingredient - definite) == 1:\n reduct_dict[key] = ingredient - definite\n\n if not reduct_dict:\n break\n\n for k, v in reduct_dict.items():\n allergens[k] = v\n definite |= v\n\n return allergens\n\n\ndef parse_input(data: Sequence[str]) -> List[Tuple[Set[str], Set[str]]]:\n parsed_input = []\n\n for line in data:\n a, b = line.strip().split(\" (contains \")\n ingredients = set(a.split())\n allergens = set([word[:-1] for word in b.split()])\n parsed_input.append((ingredients, allergens))\n\n return parsed_input\n\n\ndef main():\n with open('./input.txt') as f:\n puzzle_input = f.readlines()\n\n lines = parse_input(puzzle_input)\n allergens = find_allergens(lines)\n print(count_ingredients_without_allergens(allergens, lines)) # Answer = 2635\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5299727320671082,
"alphanum_fraction": 0.5449591279029846,
"avg_line_length": 17.820512771606445,
"blob_id": "57a47d78492712049dc2cacb9cf64f1d64c15904",
"content_id": "83ad7bf42709b5753a6518f0e9e6df1f953c66f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 734,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 39,
"path": "/Day 1/part_1.cpp",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <fstream>\n#include <string>\n#include <vector>\n#include <algorithm>\n\nconst int TARGET = 2020;\n\nint find_target(const std::vector<int> numbers)\n{\n for (int number : numbers)\n {\n int remainder = TARGET - number;\n if (std::find(numbers.begin(), numbers.end(), remainder) != numbers.end())\n {\n return remainder * number;\n }\n }\n}\n\nint main()\n{\n std::vector<int> numbers;\n std::ifstream f (\"./input.txt\");\n std::string line;\n\n if (f.is_open())\n {\n while (getline(f, line))\n {\n numbers.push_back(std::stoi(line));\n }\n f.close();\n }\n\n std::cout << find_target(numbers); // Answer = 567171\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.640618622303009,
"alphanum_fraction": 0.6691086888313293,
"avg_line_length": 41,
"blob_id": "25facf11a050ffbf580ad25840128f7779fbf78c",
"content_id": "672f6e2bfca13dcfe9866c5d1240b31f1c324eec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4914,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 117,
"path": "/Day 16/part_1.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Day 16: Ticket Translation ---\n\nAs you're walking to yet another connecting flight, you realize that one of the legs of your re-routed trip coming up\nis on a high-speed train. However, the train ticket you were given is in a language you don't understand.\nYou should probably figure out what it says before you get to the train station after the next flight.\n\nUnfortunately, you can't actually read the words on the ticket. You can, however, read the numbers,\nand so you figure out the fields these tickets must have and the valid ranges for values in those fields.\n\nYou collect the rules for ticket fields, the numbers on your ticket, and the numbers on other nearby tickets for the\nsame train service (via the airport security cameras) together into a single document\nyou can reference (your puzzle input).\n\nThe rules for ticket fields specify a list of fields that exist somewhere on the ticket and the valid ranges of values\nfor each field.\nFor example, a rule like class: 1-3 or 5-7 means that one of the fields in every ticket is named class and can be any\nvalue in the ranges 1-3 or 5-7 (inclusive, such that 3 and 5 are both valid in this field, but 4 is not).\n\nEach ticket is represented by a single line of comma-separated values. The values are the numbers on the ticket in the\norder they appear; every ticket has the same format. For example, consider this ticket:\n\n.--------------------------------------------------------.\n| ????: 101 ?????: 102 ??????????: 103 ???: 104 |\n| |\n| ??: 301 ??: 302 ???????: 303 ??????? |\n| ??: 401 ??: 402 ???? ????: 403 ????????? |\n'--------------------------------------------------------'\n\nHere, ? represents text in a language you don't understand.\nThis ticket might be represented as 101,102,103,104,301,302,303,401,402,403; of course, the actual train tickets you're\nlooking at are much more complicated. In any case, you've extracted just the numbers in such a way that the first\nnumber is always the same specific field, the second number is always a different specific field, and so on - you just\ndon't know what each position actually means!\n\nStart by determining which tickets are completely invalid; these are tickets that contain values which aren't valid for\nany field. Ignore your ticket for now.\n\nFor example, suppose you have the following notes:\n\nclass: 1-3 or 5-7\nrow: 6-11 or 33-44\nseat: 13-40 or 45-50\n\nyour ticket:\n7,1,14\n\nnearby tickets:\n7,3,47\n40,4,50\n55,2,20\n38,6,12\n\nIt doesn't matter which position corresponds to which field; you can identify invalid nearby tickets by considering\nonly whether tickets contain values that are not valid for any field.\nIn this example, the values on the first nearby ticket are all valid for at least one field.\nThis is not true of the other three nearby tickets: the values 4, 55, and 12 are are not valid for any field.\nAdding together all of the invalid values produces your ticket scanning error rate: 4 + 55 + 12 = 71.\n\nConsider the validity of the nearby tickets you scanned. What is your ticket scanning error rate?\n\n\"\"\"\nfrom typing import Sequence, List, Tuple\n\nall_fields = []\n\n\ndef parse_input(puzzle_input: Sequence[str]) -> Tuple[List[str], List[int], List[List[int]]]:\n \"\"\"\n Parses a sequence of strings (the puzzle input).\n Returns a tuple containing fields, your ticket and nearby tickets respectively.\n :param puzzle_input: The puzzle input - Sequence[str]\n :return: Tuple i.e (fields, your ticket, nearby tickets) - Tuple[List[str], List[int], List[List[int]]]\n \"\"\"\n grouped_data = []\n while \"\" in puzzle_input:\n space_index = puzzle_input.index(\"\")\n grouped_data.append(puzzle_input[:space_index])\n puzzle_input = puzzle_input[space_index + 2:]\n else:\n grouped_data.append(puzzle_input)\n\n fields = []\n for field in grouped_data[0]:\n fields.extend(field.split(\": \")[1].split(\" or \"))\n\n your_tickets = [int(num) for num in grouped_data[1][0].split(\",\")]\n\n nearby_tickets = []\n for line in grouped_data[2]:\n numbers = line.split(\",\")\n nearby_tickets.append([int(number) for number in numbers])\n\n return fields, your_tickets, nearby_tickets\n\n\ndef main():\n with open(\"./input.txt\") as f:\n puzzle_input = [line.strip() for line in f.readlines()]\n fields, your_ticket, nearby_tickets = parse_input(puzzle_input)\n\n for start, stop in [field.split(\"-\") for field in fields]:\n for i in range(int(start), int(stop)+1):\n if i not in all_fields:\n all_fields.append(i)\n\n ticket_scanning_error_rate = 0\n for ticket in nearby_tickets:\n for field in ticket:\n if field not in all_fields:\n ticket_scanning_error_rate += field\n\n print(ticket_scanning_error_rate) # Answer = 23925\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.519002377986908,
"alphanum_fraction": 0.6935867071151733,
"avg_line_length": 29.071428298950195,
"blob_id": "cefbb94ac207fed1f28ed6eb46138b9fd1c34dd2",
"content_id": "fe3210f68d0989c2318ef677b66dbc84528f5566",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 842,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 28,
"path": "/Day 15/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Part Two ---\n\nImpressed, the Elves issue you a challenge: determine the 30000000th number spoken.\nFor example, given the same starting numbers as above:\n\n Given 0,3,6, the 30000000th number spoken is 175594.\n Given 1,3,2, the 30000000th number spoken is 2578.\n Given 2,1,3, the 30000000th number spoken is 3544142.\n Given 1,2,3, the 30000000th number spoken is 261214.\n Given 2,3,1, the 30000000th number spoken is 6895259.\n Given 3,2,1, the 30000000th number spoken is 18.\n Given 3,1,2, the 30000000th number spoken is 362.\n\nGiven your starting numbers, what will be the 30000000th number spoken?\n\n\"\"\"\n\nfrom part_1 import play_numbers_game\n\n\ndef main():\n starting_numbers = [15, 5, 1, 4, 7, 0]\n print(play_numbers_game(starting_numbers, 30_000_000)) # Answer = 689\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.41609978675842285,
"alphanum_fraction": 0.423417866230011,
"avg_line_length": 27.619468688964844,
"blob_id": "3e6f534799ac289667465e9fea51068b5af471f2",
"content_id": "62248184606d19d6078d6e7f18168bc38a01f821",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9702,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 339,
"path": "/Day 20/part_2.py",
"repo_name": "king-phyte/aoc2020",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--- Part Two ---\n\nNow, you're ready to check the image for sea monsters.\n\nThe borders of each tile are not part of the actual image; start by removing them.\n\nIn the example above, the tiles become:\n\n.#.#..#. ##...#.# #..#####\n###....# .#....#. .#......\n##.##.## #.#.#..# #####...\n###.#### #...#.## ###.#..#\n##.#.... #.##.### #...#.##\n...##### ###.#... .#####.#\n....#..# ...##..# .#.###..\n.####... #..#.... .#......\n\n#..#.##. .#..###. #.##....\n#.####.. #.####.# .#.###..\n###.#.#. ..#.#### ##.#..##\n#.####.. ..##..## ######.#\n##..##.# ...#...# .#.#.#..\n...#..#. .#.#.##. .###.###\n.#.#.... #.##.#.. .###.##.\n###.#... #..#.##. ######..\n\n.#.#.### .##.##.# ..#.##..\n.####.## #.#...## #.#..#.#\n..#.#..# ..#.#.#. ####.###\n#..####. ..#.#.#. ###.###.\n#####..# ####...# ##....##\n#.##..#. .#...#.. ####...#\n.#.###.. ##..##.. ####.##.\n...###.. .##...#. ..#..###\n\nRemove the gaps to form the actual image:\n\n.#.#..#.##...#.##..#####\n###....#.#....#..#......\n##.##.###.#.#..######...\n###.#####...#.#####.#..#\n##.#....#.##.####...#.##\n...########.#....#####.#\n....#..#...##..#.#.###..\n.####...#..#.....#......\n#..#.##..#..###.#.##....\n#.####..#.####.#.#.###..\n###.#.#...#.######.#..##\n#.####....##..########.#\n##..##.#...#...#.#.#.#..\n...#..#..#.#.##..###.###\n.#.#....#.##.#...###.##.\n###.#...#..#.##.######..\n.#.#.###.##.##.#..#.##..\n.####.###.#...###.#..#.#\n..#.#..#..#.#.#.####.###\n#..####...#.#.#.###.###.\n#####..#####...###....##\n#.##..#..#...#..####...#\n.#.###..##..##..####.##.\n...###...##...#...#..###\n\nNow, you're ready to search for sea monsters! Because your image is monochrome, a sea monster will look like this:\n\n #\n# ## ## ###\n # # # # # #\n\nWhen looking for this pattern in the image, the spaces can be anything; only the # need to match. Also,\nyou might need to rotate or flip your image before it's oriented correctly to find sea monsters. In the above image,\nafter flipping and rotating it to the appropriate orientation, there are two sea monsters (marked with O):\n\n.####...#####..#...###..\n#####..#..#.#.####..#.#.\n.#.#...#.###...#.##.O#..\n#.O.##.OO#.#.OO.##.OOO##\n..#O.#O#.O##O..O.#O##.##\n...#.#..##.##...#..#..##\n#.##.#..#.#..#..##.#.#..\n.###.##.....#...###.#...\n#.####.#.#....##.#..#.#.\n##...#..#....#..#...####\n..#.##...###..#.#####..#\n....#.##.#.#####....#...\n..##.##.###.....#.##..#.\n#...#...###..####....##.\n.#.##...#.##.#.#.###...#\n#.###.#..####...##..#...\n#.###...#.##...#.##O###.\n.O##.#OO.###OO##..OOO##.\n..O#.O..O..O.#O##O##.###\n#.#..##.########..#..##.\n#.#####..#.#...##..#....\n#....##..#.#########..##\n#...#.....#..##...###.##\n#..###....##.#...##.##.#\n\nDetermine how rough the waters are in the sea monsters' habitat by counting the number of # that are not part of a\nsea monster. In the above example, the habitat's water roughness is 273.\n\nHow many # are not part of a sea monster?\n\n\"\"\"\nimport re # Couldn't escape re forever :(\nfrom collections import defaultdict\nfrom itertools import product\nfrom typing import Dict, List, Sequence, Set, Tuple\nfrom part_1 import matches, find_edges\n\nMONSTER = (\n ' # ',\n '# ## ## ###',\n ' # # # # # # '\n)\n\n\ndef find_integers(line: str) -> List[int]:\n pattern = re.compile(r'-?\\d+')\n\n return [int(val) for val in re.findall(pattern, line) if val]\n\n\ndef flip_vertically(image: Sequence[Sequence[str]]) -> Tuple[Tuple[str], ...]:\n return tuple(tuple(row) for row in image[::-1])\n\n\ndef rotate(image: Sequence[str]) -> Tuple[Tuple[str]]:\n rotated_image = []\n row = []\n\n for x in range(len(image[0])):\n for y in range(len(image)):\n row.append(image[y][x])\n rotated_image.append(row[::-1])\n row = []\n\n if row:\n rotated_image.append(row)\n\n return tuple(tuple(row) for row in rotated_image)\n\n\ndef flip_turns(image: Tuple[str, str, str]) -> Set[Tuple[Tuple[str, ...], ...]]:\n flipped_turn = set()\n\n for _ in range(4):\n image = rotate(image)\n flipped_turn.add(image)\n flipped_turn.add(flip_vertically(image))\n\n return flipped_turn\n\n\ndef count_monsters(image: Sequence[Sequence[str]]) -> int:\n monster_flips = flip_turns(MONSTER)\n\n squares = {(y, x) for y in range(len(image)) for x in range(len(image[0])) if image[y][x] == '#'}\n monstered = set()\n\n height = len(image)\n width = len(image[0])\n\n for monster in monster_flips:\n monster_height = len(monster)\n monster_width = len(monster[0])\n\n for y in range(height - monster_height + 1):\n for x in range(width - monster_width + 1):\n match = True\n hit = set()\n\n for dy in range(monster_height):\n for dx in range(monster_width):\n if monster[dy][dx] != '#':\n continue\n if image[y + dy][x + dx] != '#':\n match = False\n else:\n hit.add((y + dy, x + dx))\n\n if match:\n monstered |= hit\n\n return len(squares - monstered)\n\n\ndef find_monsters(tiles: Dict[int, Sequence[str]]) -> List[List[str]]:\n sides = {}\n\n for tile_id, tile in tiles.items():\n sides[tile_id] = find_edges(tile)\n\n graph = defaultdict(list)\n\n for a, b in product(sides.keys(), repeat=2):\n if a == b:\n continue\n\n for a_side, b_side in product(sides[a], sides[b]):\n if matches(a_side, b_side):\n graph[a].append(b)\n\n corners = [k for k, v in graph.items() if len(v) == 2]\n\n image = {corners[0]: (0, 0)}\n node = corners[0]\n y = 0\n x = 1\n corner_count = 1\n\n while any((n not in image and len(graph[n]) < 4) for n in graph[node]):\n for neighbour in graph[node]:\n if (neighbour not in image) and (len(graph[neighbour]) < 4):\n image[neighbour] = (y, x)\n node = neighbour\n\n if len(graph[node]) == 2:\n corner_count += 1\n\n if corner_count == 1:\n x += 1\n elif corner_count == 2:\n y += 1\n elif corner_count == 3:\n x -= 1\n elif corner_count == 4:\n y -= 1\n\n break\n\n height = max(val[0] for val in image.values()) + 1\n width = max(val[1] for val in image.values()) + 1\n\n for x in range(1, width - 1):\n for y in range(1, height - 1):\n left = [k for k, v in image.items() if v == (y, x - 1)][0]\n up = [k for k, v in image.items() if v == (y - 1, x)][0]\n\n intersection = [\n tile_id for tile_id in graph.keys() if (tile_id in graph[left])\n and (tile_id in graph[up])\n and (tile_id not in image)][0]\n\n image[intersection] = (y, x)\n\n coord_to_id = {v: k for k, v in image.items()}\n\n tile_height = len(tiles[corners[0]])\n tile_width = len(tiles[corners[0]][0])\n\n canvas = [[' ' for _ in range((tile_width - 2) * width)] for _ in range((tile_height - 2) * height)]\n\n for y in range(height):\n for x in range(width):\n tile_id = coord_to_id[(y, x)]\n tile = tiles[tile_id]\n\n up, down, left, right = find_edges(tile)\n\n if x < width - 1:\n right_tile_id = coord_to_id[(y, x + 1)]\n right_tile = tiles[right_tile_id]\n right_tile_edges = find_edges(right_tile)\n\n while not any(matches(right, edge) for edge in right_tile_edges):\n tile = rotate(tile)\n up, down, left, right = find_edges(tile)\n\n else:\n left_tile_id = coord_to_id[(y, x - 1)]\n left_tile = tiles[left_tile_id]\n left_tile_edges = find_edges(left_tile)\n\n while not any(matches(left, edge) for edge in left_tile_edges):\n tile = rotate(tile)\n up, down, left, right = find_edges(tile)\n\n if y < height - 1:\n down_tile_id = coord_to_id[(y + 1, x)]\n down_tile = tiles[down_tile_id]\n down_tile_edges = find_edges(down_tile)\n\n if not any(matches(down, edge) for edge in down_tile_edges):\n tile = flip_vertically(tile)\n\n else:\n up_tile_id = coord_to_id[(y - 1, x)]\n up_tile = tiles[up_tile_id]\n up_tile_edges = find_edges(up_tile)\n\n if not any(matches(up, edge) for edge in up_tile_edges):\n tile = flip_vertically(tile)\n\n start_y = y * (tile_height - 2)\n start_x = x * (tile_width - 2)\n\n for dy in range(tile_height - 2):\n for dx in range(tile_width - 2):\n canvas[start_y + dy][start_x + dx] = tile[dy + 1][dx + 1]\n\n return canvas\n\n\ndef parse_input(data: Sequence[str]) -> Dict[int, List[str]]:\n \"\"\"\n Parses the input into a dictionary of numbers (tile IDs) and tiles\n \"\"\"\n tiles = {}\n tile = []\n tile_id = -1\n\n for line in data:\n if line.strip():\n if find_integers(line):\n tile_id = find_integers(line)[0]\n else:\n tile.append(line.strip())\n else:\n tiles[tile_id] = tile\n tile_id = -1\n tile = []\n\n tiles[tile_id] = tile\n return tiles\n\n\ndef main():\n with open(\"./input.txt\") as f:\n puzzle_input = f.readlines()\n\n tiles = parse_input(puzzle_input)\n\n canvas = find_monsters(tiles)\n print(count_monsters(canvas)) # Answer = 1957\n\n\nif __name__ == '__main__':\n main()\n"
}
] | 53 |
fagan2888/yalehackhealth2018 | https://github.com/fagan2888/yalehackhealth2018 | 4974e852c5eb85c5b580de6918074d254b234d1d | 0922bcf16c12529b37563272a8e7d39ae310b1ed | 7f7fcfb5dfbf5c72379847796a4b6443a53891a3 | refs/heads/master | 2022-01-11T20:34:14.424641 | 2019-05-21T14:54:30 | 2019-05-21T14:54:30 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7631579041481018,
"alphanum_fraction": 0.7789473533630371,
"avg_line_length": 46.5,
"blob_id": "6ab8609f402275911567d66fe4500112232a2958",
"content_id": "62b0cd640a0b196373927bb59db21c0250d59a3f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 380,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 8,
"path": "/README.md",
"repo_name": "fagan2888/yalehackhealth2018",
"src_encoding": "UTF-8",
"text": "# Welcome to the Yale Hackathon 2018\n\nThis repository can be used as a starting point for challenges #1 or #2 as published in [here.](http://www.yalehackhealth.org)\n\nI am loading pretrained model with interface to access class probabilities like the following:\n```\npython hackathon_example.py evaluate --input_file=butterfly_dataset/test/test.csv --export_dir=newLikelihood \n```\n"
},
{
"alpha_fraction": 0.45816734433174133,
"alphanum_fraction": 0.7051792740821838,
"avg_line_length": 15.733333587646484,
"blob_id": "9a417347532a8a1530f346f106114602ef513a6e",
"content_id": "f29eadf7606d6a07692ae3cf5fd227ca65b62023",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 251,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 15,
"path": "/requirements.txt",
"repo_name": "fagan2888/yalehackhealth2018",
"src_encoding": "UTF-8",
"text": "bleach==1.5.0\nclick==6.7\nenum34==1.1.6\nhtml5lib==0.9999999\nMarkdown==2.6.11\nnumpy==1.14.0\npandas==0.22.0\nprotobuf==3.5.1\npython-dateutil==2.6.1\npytz==2017.3\nsix==1.11.0\ntensorflow>=1.12.1\ntensorflow-tensorboard==0.4.0rc3\ntqdm==4.19.5\nWerkzeug==0.14.1\n"
},
{
"alpha_fraction": 0.5502175688743591,
"alphanum_fraction": 0.5589648485183716,
"avg_line_length": 33.474998474121094,
"blob_id": "05386edc46db7a27269825902388201bee5fa1af",
"content_id": "2c8e0d34a915be44b6a57bed25a504559037ef22",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22064,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 640,
"path": "/hackathon_example.py",
"repo_name": "fagan2888/yalehackhealth2018",
"src_encoding": "UTF-8",
"text": "# Copyright 2012-2018 (C) Butterfly Network, Inc.\n\nimport logging\nimport os\nimport shutil\nimport tarfile\nimport urllib.request\n\nimport click\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.contrib.slim.nets import inception\nfrom tqdm import tqdm\nfrom matplotlib import pyplot as plt\n\n# Create top level logger\nlog = logging.getLogger()\nlog.setLevel(logging.INFO)\n\n# Add console handler using our custom ColoredFormatter\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nlog.addHandler(ch)\n\n# Global parameters\n\n# The image labels.\nLABEL_NAMES = np.array([\n 'morisons_pouch',\n 'bladder',\n 'plax',\n '4ch',\n '2ch',\n 'ivc',\n 'carotid',\n 'lungs',\n 'thyroid',\n])\nNUM_CLASSES = len(LABEL_NAMES)\n\n# The size of the raw ultrasound images.\nIMAGE_WIDTH = 436\nIMAGE_HEIGHT = 512\n\n# The default image size as required by the inception v1 model\nTARGET_IMAGE_WIDTH = TARGET_IMAGE_HEIGHT = 224\n\nLARGE_DATASET_URLS = [\n 'https://github.com/ButterflyNetwork/YaleHackathon2018/releases/download'\n '/v.0.0.1/butterfly_dataset_test.tar.gz',\n 'https://github.com/ButterflyNetwork/YaleHackathon2018/releases/download'\n '/v.0.0.1/butterfly_dataset_training1.tar.gz',\n 'https://github.com/ButterflyNetwork/YaleHackathon2018/releases/download'\n '/v.0.0.1/butterfly_dataset_training2.tar.gz',\n]\n\nMINI_DATASET_URLS = [\n 'https://github.com/ButterflyNetwork/YaleHackathon2018'\n '/releases/download/v.0.0.1/butterfly_mini_dataset.tar.gz'\n]\n\n\[email protected]()\ndef cli():\n pass\n\n\[email protected](\n '--dest_dir',\n required=True,\n default=os.getcwd(),\n type=click.Path(exists=True, dir_okay=True)\n)\[email protected]('--large', default=False, is_flag=True)\[email protected]()\ndef download_dataset(dest_dir, large):\n \"\"\" Download and extract the mini dataset.\n\n :param dest_dir: The directory where the dataset will be extracted.\n :params large: Indicate whether to download the large dataset.\n\n Example:\n python hackathon_example.py download_dataset\n\n To download the large dataset use:\n python hackathon_example.py download_dataset --large\n \"\"\"\n urls = MINI_DATASET_URLS\n dataset_name = 'butterfly_mini_dataset'\n if large:\n urls = LARGE_DATASET_URLS\n dataset_name = 'butterfly_dataset'\n\n downloaded_files = []\n\n for url in urls:\n filename = os.path.basename(url)\n downloaded_files.append(filename)\n filepath = os.path.join(dest_dir, filename)\n if not os.path.exists(filepath):\n class TqdmUpTo(tqdm):\n def update_to(self, b=1, bsize=1, tsize=None):\n if tsize is not None:\n self.total = tsize\n self.update(b * bsize - self.n)\n\n with TqdmUpTo(unit='B', unit_scale=True, miniters=1,\n desc=os.path.basename(filename)) as progress_bar:\n urllib.request.urlretrieve(url, filename=filepath,\n reporthook=progress_bar.update_to)\n statinfo = os.stat(filepath)\n log.info('Successfully downloaded {} {} bytes.'.format(\n filename, statinfo.st_size))\n else:\n log.info('Data segment is already available here: {}'.format(\n dest_dir))\n\n extracted_dir_path = os.path.join(\n dest_dir,\n dataset_name\n )\n\n if os.path.exists(extracted_dir_path):\n shutil.rmtree(extracted_dir_path)\n\n for downloaded_file in downloaded_files:\n log.info('Extracting the data from {}'.format(downloaded_file))\n tarfile.open(downloaded_file, 'r:gz').extractall(\n extracted_dir_path\n )\n\n # If large dataset, combined the training folders into a single folder.\n if large:\n os.chdir(extracted_dir_path)\n os.rename('training1', 'training')\n # Copy content of trianig2 into training\n for src in os.listdir(\"training2\"):\n if os.path.isdir(src):\n dst = os.path.join('training', os.path.basename(src))\n shutil.copytree(src, dst)\n else:\n dst = 'training'\n shutil.move(os.path.join('training2', src), dst)\n os.rmdir(\"training2\")\n\n log.info('The dataset {} is now available here {}.'.format(\n dataset_name, extracted_dir_path))\n\n\[email protected]()\[email protected](\n '--input_file',\n required=True,\n type=click.Path(exists=True, dir_okay=False)\n)\[email protected]('--batch_size', required=False, type=click.INT, default=1)\[email protected]('--vis_dir', required=False, type=click.Path(exists=False, dir_okay=True), default=\"vis\")\[email protected](\n '--export_dir',\n required=True,\n type=click.Path(exists=True, dir_okay=True)\n)\ndef evaluate(input_file, batch_size, vis_dir, export_dir):\n \"\"\" Evaluates the given dataset\n\n :param input_file: the csv file containing the training set.\n :param batch_size: the batch size used for training\n :param export_dir: the checkpoint directory from which the model should\n be restored.\n\n Example:\n python hackathon_example.py evaluate\n --input_file=butterfly_mini_dataset/test/test.csv\n --export_dir=models\n \"\"\"\n import os\n try:\n os.mkdir(vis_dir)\n except:\n pass\n\n dataset_image_paths, dataset_labels = load_data_from_csv(input_file)\n\n with tf.Graph().as_default() as graph:\n\n # Define the data iterator.\n image_paths = tf.placeholder(tf.string, [None])\n label_data = tf.placeholder(tf.int32, [None])\n\n data_iterator = create_dataset_iterator(\n image_paths,\n label_data,\n batch_size,\n )\n next_test_batch = data_iterator.get_next()\n\n saver = tf.train.import_meta_graph(\n os.path.join(export_dir, 'butterfly-model.meta')\n )\n images = graph.get_tensor_by_name(\"images:0\")\n labels = graph.get_tensor_by_name(\"labels:0\")\n predictions = graph.get_tensor_by_name(\"predictions:0\")\n likelihood=graph.get_tensor_by_name(\"likelihood:0\")\n all_likelihood=graph.get_tensor_by_name(\"all_likelihood:0\")\n # Add an accuracy node.\n accuracy_to_value, accuracy_update_op = tf.metrics.accuracy(\n predictions,\n labels,\n )\n local_init_op = tf.local_variables_initializer()\n\n # Load from check-point\n with tf.Session() as session:\n\n # Restore model.\n saver.restore(session, tf.train.latest_checkpoint(export_dir))\n\n # Initialize the iterator.\n session.run([local_init_op, data_iterator.initializer],\n feed_dict={\n image_paths: dataset_image_paths,\n label_data: dataset_labels,\n })\n\n for i in range(10):\n try:\n # Read the next batch.\n batch_images, batch_labels = session.run(next_test_batch)\n img = batch_images.reshape((224,224,3))\n import scipy.misc\n scipy.misc.imsave(os.path.join(vis_dir,'{}.png'.format(i)), img)\n #plt.imshow(img)\n current_label = batch_labels[0]\n\n predicted_class = session.run(predictions,\n feed_dict={\n images: batch_images,\n labels: batch_labels,\n })\n prob_predicted_class = session.run(likelihood,\n feed_dict={\n images: batch_images,\n labels: batch_labels,\n })\n class_probs = session.run(all_likelihood,\n feed_dict={\n images: batch_images,\n labels: batch_labels,\n })\n class_probs_list = class_probs.reshape((9,)).tolist()\n\n # draw_bargraph\n bar_graph_indices = [0,1,2,3,4,5,6,7,8]\n fig, ax = plt.subplots()\n barlist = ax.barh(np.arange(len(class_probs_list)), class_probs_list,align='center',alpha=0.5)\n barlist[predicted_class[0]].set_color('r')\n barlist[current_label].set_color('g')\n ax.set_yticks(np.arange(len(class_probs_list)))\n ax.set_yticklabels(['morisons','bladder','plax','4ch','2ch','ivc','carotid','lungs','thyroid'])\n plt.savefig(os.path.join(vis_dir,'{}_class_probs.png'.format(i)))\n plt.close()\n\n # Evaluating the model.\n session.run(accuracy_update_op,\n feed_dict={\n images: batch_images,\n labels: batch_labels,\n })\n\n # generate partially patched images\n x_dim, y_dim = 32,32\n result = np.zeros(shape=(224, 224))\n for j in range(y_dim):\n x = 224//y_dim*j\n for k in range(x_dim):\n img_copy = np.copy(img)\n y = 224//x_dim*k\n for n in range(224//y_dim):\n for m in range(224//x_dim):\n img_copy[x+n,y+m,0] = 0.5\n img_copy[x+n,y+m,1] = 0.5\n img_copy[x+n,y+m,2] = 0.5\n #scipy.misc.imsave(os.path.join(vis_dir,'{}_masked_{}_{}.png'.format(i,j,k)), img_copy)\n class_probs = session.run(all_likelihood,\n feed_dict={\n images: img_copy.reshape((1,224,224,3)),\n labels: batch_labels,\n })\n class_probs_list = class_probs.reshape((9,)).tolist()\n #print(j,k)\n #print(class_probs_list[predicted_class[0]])\n for n in range(224//y_dim):\n for m in range(224//x_dim):\n result[x+n][y+m] = 1-class_probs_list[predicted_class[0]]\n plt.imshow(img,alpha=0.7)\n plt.imshow(result, cmap='plasma', interpolation='nearest',alpha=0.6)\n plt.savefig(os.path.join(vis_dir,'{}_heatmap.png'.format(i)))\n plt.close()\n\n\n\n \n\n\n except tf.errors.OutOfRangeError:\n break\n\n accuracy = session.run(accuracy_to_value)\n log.info('test accuracy: {:.1%}'.format(accuracy))\n\n\[email protected]()\[email protected](\n '--input_file',\n required=True,\n type=click.Path(exists=True, dir_okay=False)\n)\[email protected]('--batch_size', required=False, type=click.INT, default=36)\[email protected]('--number_of_epochs', required=False, type=click.INT, default=10)\[email protected](\n '--export_dir',\n required=True,\n type=click.Path(exists=False, dir_okay=True)\n)\ndef train(input_file, batch_size, number_of_epochs, export_dir):\n \"\"\"\n :param input_file: the csv file containing the training set.\n :param batch_size: the batch size used for training.\n :param number_of_epochs: the number of times the model will be trained\n on all the dataset.\n :param export_dir: The directory where the model will be saved.\n\n Example:\n python hackathon_example.py train\n --input_file=butterfly_mini_dataset/training/training.csv\n --export_dir=models\n \"\"\"\n\n train_image_paths, train_labels, valid_image_paths, valid_labels = \\\n load_data_from_csv(input_file, split=True)\n\n # Define the data iterators.\n image_paths = tf.placeholder(tf.string, [None])\n label_data = tf.placeholder(tf.int32, [None])\n\n training_iterator = create_dataset_iterator(\n image_paths,\n label_data,\n batch_size,\n )\n next_train_batch = training_iterator.get_next()\n\n valid_iterator = create_dataset_iterator(\n image_paths,\n label_data,\n batch_size,\n )\n next_valid_batch = valid_iterator.get_next()\n\n # Define input and output to the inception v1 model.\n images = tf.placeholder(\n tf.float32,\n [None, TARGET_IMAGE_WIDTH, TARGET_IMAGE_HEIGHT, 3],\n name=\"images\"\n )\n labels = tf.placeholder(tf.int64, [None], name=\"labels\")\n\n # Define inception v1 and return ops to load pre-trained model (trained on\n # ImageNet).\n\n restore_op, feed_dict, train_op, metrics_to_values, metrics_to_updates \\\n = create_model(images, labels)\n\n init_local_op = tf.local_variables_initializer()\n init_op = tf.group(tf.global_variables_initializer(), init_local_op)\n\n # Start the training validation loop.\n with tf.Session() as session:\n\n session.run(init_op)\n session.run(restore_op, feed_dict=feed_dict)\n\n # Define a ModelSaver\n saver = tf.train.Saver()\n saver.save(session, os.path.join(\"unintended\",\n 'butterfly-model'))\n print(\"Done\")\n\n best_validation_accuracy = None\n\n # Running training loop.\n for _ in range(number_of_epochs):\n\n session.run([init_local_op, training_iterator.initializer],\n feed_dict={\n image_paths: train_image_paths,\n label_data: train_labels,\n }\n )\n\n while True:\n try:\n # Read the next batch.\n batch_images, batch_labels = session.run(next_train_batch)\n\n \n # Train the model.\n session.run([metrics_to_updates, train_op],\n feed_dict={\n images: batch_images,\n labels: batch_labels,\n })\n except tf.errors.OutOfRangeError:\n break\n\n metrics_values = session.run(metrics_to_values)\n\n accuracy = metrics_values['accuracy']\n mean_loss = metrics_values['mean_loss']\n log.info('training accuracy: {:.1%}, '\n 'training mean loss: {}'.format(accuracy, mean_loss))\n\n # Running validation loop.\n\n session.run(\n [init_local_op, valid_iterator.initializer],\n feed_dict={\n image_paths: valid_image_paths,\n label_data: valid_labels,\n })\n\n while True:\n try:\n # Read the next batch.\n batch_images, batch_labels = session.run(next_valid_batch)\n\n # Train the model.\n session.run(\n metrics_to_updates,\n feed_dict={\n images: batch_images,\n labels: batch_labels,\n })\n except tf.errors.OutOfRangeError:\n break\n\n metrics_values = session.run(metrics_to_values)\n accuracy = metrics_values['accuracy']\n mean_loss = metrics_values['mean_loss']\n log.info(\n 'validation accuracy: {:.1%}, validation mean loss: {}'.format(\n accuracy,\n mean_loss))\n # Save model if accuracy improved.\n if (\n (not best_validation_accuracy) or\n best_validation_accuracy < accuracy\n ):\n best_validation_accuracy = accuracy\n saver.save(session, os.path.join(export_dir,\n 'butterfly-model'))\n\n\ndef create_model(images, labels):\n \"\"\"\n This methods initialize the inception v1 model with weights generated\n from training on the ImageNet dataset for all layers expect the last.\n The last layer is adjusted to output only 9 classes (instead of the\n 1000 required for ImageNet). Note also that the methods set the model\n for fine-tuning meaning that during training only the last layer's\n weights can change.\n\n :param images: A tensor containing the images.\n :param labels: A tensor representing the correct labels for the images.\n\n :return restore_op: The operation used to restore the weights of the model.\n :return feed_dict: The feed_dict used for restoring the model.\n :return train_op: The train_op used to train the model.\n :return metrics_to_values: The metrics collected when training.\n :return metrics_to_updates: The metrics update op used when training.\n \"\"\"\n with slim.arg_scope(inception.inception_v1_arg_scope()):\n # Load the deep learning model.\n logits, end_points = inception.inception_v1(\n images,\n num_classes=NUM_CLASSES,\n is_training=False\n )\n\n # We are going to train only the last layer of the model.\n trainable_layer = 'InceptionV1/Logits/Conv2d_0c_1x1'\n\n variables_to_restore = slim.get_variables_to_restore(\n )\n variables_to_train = slim.get_variables_by_suffix('', trainable_layer)\n\n # Transform the labels into one hot encoding.\n one_hot_labels = tf.one_hot(\n labels,\n NUM_CLASSES,\n )\n\n # Define the loss function.\n loss = tf.losses.softmax_cross_entropy(\n one_hot_labels,\n end_points['Logits'],\n )\n\n # Select the optimizer.\n optimizer = tf.train.AdamOptimizer(1e-4)\n\n # Create a train op.\n train_op = tf.contrib.training.create_train_op(\n loss,\n optimizer,\n variables_to_train=variables_to_train,\n )\n\n likelihood = tf.reduce_max(end_points['Predictions'], name=\"likelihood\")\n all_likelihood = tf.abs(end_points['Predictions'], name=\"all_likelihood\")\n\n\n predictions = tf.argmax(\n end_points['Predictions'], 1, name=\"predictions\"\n )\n metrics_to_values, metrics_to_updates = \\\n slim.metrics.aggregate_metric_map({\n 'accuracy': tf.metrics.accuracy(predictions, labels),\n 'mean_loss': tf.metrics.mean(loss),\n })\n\n # Define load predefined model operation.\n restore_op, feed_dict = slim.assign_from_checkpoint(\n 'models/butterfly-model',\n variables_to_restore\n )\n\n return (\n restore_op,\n feed_dict,\n train_op,\n metrics_to_values,\n metrics_to_updates,\n )\n\n\ndef create_dataset_iterator(\n image_placeholder,\n label_placeholder,\n batch_size,\n):\n \"\"\"\n\n :param image_placeholder: A placeholder for the images.\n :param label_placeholder: A placeholder for the labels.\n :param batch_size: The batch size used by the iterator.\n :return: A tensorflow iterator that can be used to iterate over the\n dataset.\n \"\"\"\n dataset = tf.data.Dataset.from_tensor_slices(\n (image_placeholder, label_placeholder)\n )\n dataset = dataset.map(load_image)\n dataset = dataset.cache()\n dataset = dataset.batch(batch_size)\n return dataset.make_initializable_iterator()\n\n\ndef load_data_from_csv(filename, split=False, split_percentage=0.8):\n \"\"\"\n :param filename: The path to the file to be loaded.\n :param split: whether to split the data into train and validation.\n :param split_percentage: The percentage that will be retained as\n :return: A tuple containing 2 lists in case there is no split: one with\n the image paths and one with the corresponding labels. If split is true\n the method returns 4 lists (2 for training and 2 for validation).\n \"\"\"\n df = pd.read_csv(filename)\n df = df.sample(frac=1).reset_index(drop=True)\n if split:\n mask = np.random.rand(len(df)) < split_percentage\n training_set = df[mask]\n validation_set = df[~mask]\n return (\n training_set['image_file_path'].tolist(),\n training_set['label'].tolist(),\n validation_set['image_file_path'].tolist(),\n validation_set['label'].tolist(),\n )\n else:\n return (\n df['image_file_path'].tolist(),\n df['label'].tolist(),\n )\n\n\ndef load_image(filepath, label):\n \"\"\"\n\n :param filepath: A tensor representing the filepath of the image\n :param label: The label for the image.\n :return: A tensor representing the image ready to be used in the inception\n model and its label.\n \"\"\"\n\n image_string = tf.read_file(filepath)\n image_decoded = tf.image.decode_image(\n image_string,\n channels=1\n )\n image_resized = tf.image.resize_image_with_crop_or_pad(\n image_decoded,\n IMAGE_WIDTH,\n IMAGE_HEIGHT\n )\n image_resized = tf.image.resize_images(\n image_resized,\n (\n TARGET_IMAGE_WIDTH,\n TARGET_IMAGE_HEIGHT\n )\n )\n # Normalize the image.\n image_normalized = image_resized / 255\n image = tf.reshape(\n tf.cast(image_normalized, tf.float32),\n shape=(TARGET_IMAGE_WIDTH, TARGET_IMAGE_HEIGHT)\n )\n # Stack the image 3 times since the pre-trained inception model\n # required a 3 channel image. This can be optimized by instantiating\n # inception with 1 channel and retrain the first layer from scratch.\n return tf.stack([image, image, image], axis=2), label\n\n\n# This setup the script so it can be used with different command groups from\n# command line.\nif __name__ == '__main__':\n cli()\n"
}
] | 3 |
aravindk2604/self-driving-car-nd | https://github.com/aravindk2604/self-driving-car-nd | 15aab990a7fe38e3bf185cc230aa71af89cf3548 | c7d8200a6743f05535fffea93ecc525cac571c1e | d00000449d272ede923763be0928596eecf65756 | refs/heads/master | 2021-04-29T20:25:59.642052 | 2018-02-26T06:15:00 | 2018-02-26T06:15:00 | 121,597,127 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7733333110809326,
"alphanum_fraction": 0.7733333110809326,
"avg_line_length": 36.5,
"blob_id": "46dc9d43d17fa7e92a42d8f5cca4592a49db4130",
"content_id": "b1612f59e9d360b29f3bc6f0322be11e227cd2dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 75,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 2,
"path": "/README.md",
"repo_name": "aravindk2604/self-driving-car-nd",
"src_encoding": "UTF-8",
"text": "# self-driving-car-nd\na repo to store my code for the Udacity SDCND course\n"
},
{
"alpha_fraction": 0.6559839844703674,
"alphanum_fraction": 0.6880320310592651,
"avg_line_length": 27.542856216430664,
"blob_id": "36c31e6f49ee6340e51c00fcbad8296eddea1fbd",
"content_id": "4181d5d209a439f612d2a3465893afede8906984",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1997,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 70,
"path": "/cv_fundamentals/colorAndRegion.py",
"repo_name": "aravindk2604/self-driving-car-nd",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\n\n# Read in the image and print out some stats\nimage = mpimg.imread('test.jpg')\n#print('This image is: ', type(image), 'with dimensions: ', image.shape)\n\n# Grab the x and y size and make a copy of the image \nysize = image.shape[0]\nxsize = image.shape[1]\nprint (xsize, ysize)\n# Note: always make a copy rather than simply using \"=\"\ncolor_select = np.copy(image)\nline_image = np.copy(image)\n\n# Define color selection criteria\n\nred_threshold = 200\ngreen_threshold = 200\nblue_threshold = 190\n\nrgb_threshold = [red_threshold, green_threshold, blue_threshold]\n\n\n# Define a triangular region of interest\nleft_bottom = [0, 390]\nright_bottom = [600, 390]\napex = [280, 180]\n\n# connect these points using a straight line formula\n\nfit_left = np.polyfit((left_bottom[0], apex[0]),(left_bottom[1], apex[1]),1 )\nfit_right = np.polyfit((right_bottom[0], apex[0]),(right_bottom[1], apex[1]),1 )\nfit_bottom = np.polyfit((left_bottom[0], right_bottom[0]),(left_bottom[1], right_bottom[1]),1 )\n\n\n# Mask the pixels below the threshold\ncolor_thresholds = (image[:,:,0] < rgb_threshold[0]) \\\n\t\t\t | (image[:,:,1] < rgb_threshold[1]) \\\n\t\t\t | (image[:,:,2] < rgb_threshold[2]) \n\n\n\n# creating meshgrid\nXX, YY = np.meshgrid(np.arange(0,xsize),np.arange(0,ysize))\nregion_thresholds = (YY > (XX*fit_left[0] + fit_left[1])) & \\\n\t\t\t\t\t(YY > (XX*fit_right[0] + fit_right[1])) & \\\n\t\t\t\t\t(YY < (XX*fit_bottom[0] + fit_bottom[1]))\n\n\n# mask color selection\ncolor_select[color_thresholds] = [0,0,0]\n\n# FInd where image is both colored right and in the region\nline_image[~color_thresholds & region_thresholds] = [255,0,0]\n\n\n#Display the image\nplt.imshow(color_select)\n#plt.show()\nplt.imshow(line_image)\n#plt.savefig(\"region-mask-lanes.jpg\")\n#plt.show()\nplt.imshow(region_thresholds)\n#plt.show()\n\n#inside anaconda env the imsave option doesn't save the \n#image as jpg but as png. So save it as png \nmpimg.imsave(\"region-mask-lanes.png\", line_image)"
},
{
"alpha_fraction": 0.6936842203140259,
"alphanum_fraction": 0.714736819267273,
"avg_line_length": 24.70270347595215,
"blob_id": "e578354eed5cdd7a1650f317ca914a1ba396cceb",
"content_id": "75db11476d273e3d7108ee352d9bb73f1d17b91c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 950,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 37,
"path": "/cv_fundamentals/firstCode.py",
"repo_name": "aravindk2604/self-driving-car-nd",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\n\n# Read in the image and print out some stats\nimage = mpimg.imread('test.jpg')\n#print('This image is: ', type(image), 'with dimensions: ', image.shape)\n\n# Grab the x and y size and make a copy of the image \nysize = image.shape[0]\nxsize = image.shape[1]\n\n# Note: always make a copy rather than simply using \"=\"\ncolor_select = np.copy(image)\n\n# Define color selection criteria\n\nred_threshold = 200\ngreen_threshold = 200\nblue_threshold = 190\n\nrgb_threshold = [red_threshold, green_threshold, blue_threshold]\n\n# Identify pixels below the threshold\nthresholds = (image[:,:,0] < rgb_threshold[0]) \\\n\t\t\t| (image[:,:,1] < rgb_threshold[1]) \\\n\t\t\t| (image[:,:,2] < rgb_threshold[2]) \n\nprint thresholds.shape\n#print color_select[thresholds]\ncolor_select[thresholds] = [0,0,0]\n\n#Display the image\nplt.imshow(color_select)\nplt.show()\n\nmpimg.imsave(\"test-after.jpg\", color_select)"
},
{
"alpha_fraction": 0.6537867188453674,
"alphanum_fraction": 0.6893354058265686,
"avg_line_length": 31.375,
"blob_id": "4a5475a2a8e3adfeb142ad3e275b2e546baaeece",
"content_id": "328ff2673168d1c203a4ed4c808d6c6d44c2ba18",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1294,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 40,
"path": "/cv_fundamentals/regionMask.py",
"repo_name": "aravindk2604/self-driving-car-nd",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\n\n# Read in the image and print out some stats\nimage = mpimg.imread('test.jpg')\n#print('This image is: ', type(image), 'with dimensions: ', image.shape)\n\n# Grab the x and y size and make a copy of the image \nysize = image.shape[0]\nxsize = image.shape[1]\nprint xsize, ysize\n# Note: always make a copy rather than simply using \"=\"\nregion_select = np.copy(image)\n\n# Define a triangular region of interest\nleft_bottom = [0, 390]\nright_bottom = [600, 390]\napex = [300, 150]\n\n# connect these points using a straight line formula\n\nfit_left = np.polyfit((left_bottom[0], apex[0]),(left_bottom[1], apex[1]),1 )\nfit_right = np.polyfit((right_bottom[0], apex[0]),(right_bottom[1], apex[1]),1 )\nfit_bottom = np.polyfit((left_bottom[0], right_bottom[0]),(left_bottom[1], right_bottom[1]),1 )\n\n# creating meshgrid\nXX, YY = np.meshgrid(np.arange(0,xsize),np.arange(0,ysize))\nregion_thresholds = (YY > (XX*fit_left[0] + fit_left[1])) & \\\n\t\t\t\t\t(YY > (XX*fit_right[0] + fit_right[1])) & \\\n\t\t\t\t\t(YY < (XX*fit_bottom[0] + fit_bottom[1]))\n\n# make the region of interest red in color\nregion_select[region_thresholds] = [255,0,0]\n\n#Display the image\nplt.imshow(region_select)\nplt.show()\n\nmpimg.imsave(\"region-mask.jpg\", region_select)"
},
{
"alpha_fraction": 0.6758090853691101,
"alphanum_fraction": 0.7191442847251892,
"avg_line_length": 30.44827651977539,
"blob_id": "8b724cccf16641d8714885343d1ef07eb053d5f0",
"content_id": "5df1d711fb345a984312dde1899864647e8560bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1823,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 58,
"path": "/cv_fundamentals/houghLaneLines.py",
"repo_name": "aravindk2604/self-driving-car-nd",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n\n# Read in and grayscale the image\nimage = mpimg.imread(\"highway.jpg\")\ngray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n# Define a kernel size and apply Gaussian smoothing\nkernel_size = 3\nblur_gray = cv2.GaussianBlur(gray, (kernel_size, kernel_size), 0)\n\n# Define our parameters for Canny and apply\nlow_threshold = 45\nhigh_threshold = 135\nedges = cv2.Canny(blur_gray, low_threshold, high_threshold)\n\n# Next we'll create a masked edges image using cv2.fillPoly()\nmask = np.zeros_like(edges)\nignore_mask_color = 255\n\n# This time we are defining a four sided polygon to mask\nimshape = image.shape\ntop_left = [222, 440]\ntop_right = [336, 440]\nbottom_right = [645, 600]\nvertices = np.array([[(0, imshape[0]),(top_left[0], top_left[1]),(top_right[0], top_right[1]), (bottom_right[0], bottom_right[1])]], dtype=np.int32)\ncv2.fillPoly(mask, vertices, ignore_mask_color)\nmasked_edges = cv2.bitwise_and(edges, mask)\n\n#plt.imshow(masked_edges)\n#plt.show()\n\n# Define Hough transform parameters\n# Make a blank the same size as our image to draw on\nrho = 2\ntheta = np.pi/180\nthreshold = 18\nmin_line_length = 40\nmax_line_gap = 20\nline_image = np.copy(image)*0 # creating a blank to draw lines on\n\n# Run Hough on edge detected image\nlines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)\n\n# Iterate over the output \"lines\" and draw lines on the blank\nfor line in lines:\n for x1, y1, x2, y2 in line:\n cv2.line(line_image, (x1,y1), (x2,y2), (255,0,0), 6)\n\n# Create a \"color\" binary image to combine with line image\ncolor_edges = np.dstack((edges, edges, edges))\n\n# Draw the lines on the edge image\ncombo = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)\nplt.imshow(combo)\nplt.show()"
}
] | 5 |
sdaza/i_milestone | https://github.com/sdaza/i_milestone | 1fa1dd06e0d951ff3fbe6f46fac47a92770884bc | ef837a173421d919c46f7c47e293d1e1cb2db2aa | b4990c6cdce7cd73744bc4d4e7b8e71d03bc2a08 | refs/heads/master | 2021-01-19T20:30:00.659012 | 2017-08-24T02:17:03 | 2017-08-24T02:17:03 | 101,227,654 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5973496437072754,
"alphanum_fraction": 0.60550457239151,
"avg_line_length": 31.340660095214844,
"blob_id": "f753995d2c6b8e7ea863548935502ea74819587c",
"content_id": "ad0ecdcd7465c2fe9570d7ee1bddca3d932d8cbe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2943,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 91,
"path": "/app.py",
"repo_name": "sdaza/i_milestone",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request\nfrom bokeh.io import curdoc\nfrom bokeh.plotting import figure\nfrom bokeh.models import ColumnDataSource, Select, CheckboxGroup, Legend, HoverTool\nimport pandas as pd\nimport datetime\nimport dateutil.relativedelta\nfrom bokeh.layouts import column, widgetbox\nfrom bokeh.embed import components\nimport quandl\n\napp = Flask(__name__)\n\n# get date and list of tickers\n\n# get dates\ntoday = datetime.datetime.today()\ntoday = today - dateutil.relativedelta.relativedelta(days=1)\nstart = today - dateutil.relativedelta.relativedelta(months=1)\ntoday_str = today.strftime('%Y-%m-%d')\nstart_str = start.strftime('%Y-%m-%d')\n\n# get list of tickers\nquandl.ApiConfig.api_key = '5Yj_Fcq66uKbMsC5-9PJ'\ndf = quandl.get_table('WIKI/PRICES',\n qopts = { 'columns': ['ticker'] },\n # ticker = ['AAPL', 'MSFT'],\n date = {today_str}, paginate=True)\nlist_tickers = list(df.ticker.unique())\n\n# create plot function\ndef create_figure(selected_ticker):\n df = quandl.get_table('WIKI/PRICES',\n qopts = { 'columns': ['ticker', 'date', 'open','close'] },\n ticker = [selected_ticker],\n date = {'gte': start_str, 'lte': today_str}, paginate=True)\n\n source = ColumnDataSource(\n data = {\n 'x' : df['date'],\n 'o' : df['open'],\n 'c' : df['close']\n })\n\n title = selected_ticker + ' Quandl WIKI Stock Prices, ' + start_str + ' to ' + today_str\n plot = figure(title = title, plot_height = 400, plot_width = 700,\n x_axis_type = 'datetime', toolbar_location = None)\n\n line_open = plot.line(x='x', y='o', source = source, color = 'blue', line_alpha=0.6,\n line_width = 1.5)\n line_close = plot.line(x='x', y='c', source = source, color = 'red', line_alpha=0.6,\n line_width = 1.5)\n\n lines = [line_open, line_close]\n\n legend = Legend(items=[\n ('Opening price', [line_open]),\n ('Closing price', [line_close])],\n location=(0, -30))\n\n plot.add_layout(legend, 'right')\n\n hover = HoverTool(tooltips =[\n ('Opening', '@o'),\n ('Closing', '@c')])\n\n plot.add_tools(hover)\n plot.xaxis.axis_label = \"Date\"\n plot.yaxis.axis_label = \"Stock price\"\n return plot\n\n# Index page\[email protected]('/')\ndef index():\n # Determine the selected feature\n\tselected_ticker = request.args.get(\"feature_ticker\")\n\tif selected_ticker == None:\n\t\tselected_ticker = \"FB\"\n\n # Create the plot\n\tplot = create_figure(selected_ticker)\n\n\t# Embed plot into HTML via Flask Render\n\tscript, div = components(plot)\n\treturn render_template(\"index.html\", script=script, div=div,\n\t\tlist_tickers=list_tickers, selected_ticker=selected_ticker)\n\n# With debug=True, Flask server will auto-reload\n# when there are code changes\nif __name__ == '__main__':\n app.run(port=33507)\n"
},
{
"alpha_fraction": 0.7250000238418579,
"alphanum_fraction": 0.7416666746139526,
"avg_line_length": 23.200000762939453,
"blob_id": "da2750b8107d89b9479c8e3346ad68522e000db0",
"content_id": "b1773788fe6bcb0d2318949ca50f799a6184444a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 120,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 5,
"path": "/README.md",
"repo_name": "sdaza/i_milestone",
"src_encoding": "UTF-8",
"text": "### 12 days program milestone\n\n- Flask app\n- Bokeh interactive plot\n- App in action: https://sdaza-stocks.herokuapp.com/"
},
{
"alpha_fraction": 0.45783132314682007,
"alphanum_fraction": 0.6867470145225525,
"avg_line_length": 12.833333015441895,
"blob_id": "7bc4327f3a70fa882ffa3e31eba6d3fc94fc29f4",
"content_id": "a6af6d6c812ecad30916e219bb1ad8cac4f8d22f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 83,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 6,
"path": "/requirements.txt",
"repo_name": "sdaza/i_milestone",
"src_encoding": "UTF-8",
"text": "bokeh==0.12.6\npandas==0.20.3\nquandl==3.2.0\nflask==0.12.0\ngunicorn==19.7.1\ndatetime\n"
}
] | 3 |
Pav-H/MD5_collisions | https://github.com/Pav-H/MD5_collisions | 2f3fba99056f48fe3a1984043900fdcf8797512d | ec0a945dee12377b44c5a6d0c08fd7db478848f1 | 3c30c2b8fb42bd8d71687bac180bb1c06579c064 | refs/heads/master | 2023-08-11T20:38:08.146088 | 2021-09-25T11:52:56 | 2021-09-25T11:52:56 | 410,259,800 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5541542768478394,
"alphanum_fraction": 0.5645400881767273,
"avg_line_length": 25.959999084472656,
"blob_id": "c94539f130d1966047659745902f45b9dc5a4c00",
"content_id": "2982f1a41d7a7c13a8d62a15dc1f27a51c05cbd6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1348,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 50,
"path": "/main.py",
"repo_name": "Pav-H/MD5_collisions",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf8 -*-\nimport itertools\nimport hashlib\nimport unicodedata as ucd\nimport sys\n\n\n# UTF-8 symbols generator\ndef dumpEncoding(enc):\n arr = []\n for i in range(sys.maxunicode):\n u = chr(i)\n try:\n s = u.encode(enc)\n except UnicodeEncodeError:\n continue\n try:\n name = ucd.name(u)\n except:\n name = '?'\n arr.append(s)\n return arr\n\n\n# tuple to string\ndef tupToStr(tup: tuple):\n flatten = [str(item) for sub in tup for item in sub]\n s = \"\".join(flatten)\n return s\n\n\ndef getLastNbait(s: str, n: int):\n s_new = \"\"\n for i in range(1, n*2 + 1):\n s_new += s[-i]\n return s_new\n\n\nif __name__ == \"__main__\":\n my_string = \"Hello World\"\n my_hashed_string = hashlib.md5(my_string.encode(\"UTF-8\")).hexdigest()\n print(f\"Hello World = {my_hashed_string}\")\n n = input(\"Choose n ( 0 < n < 16) baits to broot hash: \")\n char_str = dumpEncoding(\"UTF-8\")\n for i in range(1, len(my_string)):\n for c in itertools.combinations_with_replacement(char_str, i):\n comb = tupToStr(c)\n hashed_comb = hashlib.md5(comb.encode(\"UTF-8\")).hexdigest()\n if getLastNbait(hashed_comb, int(n)) == getLastNbait(my_hashed_string, int(n)):\n print(f\"Found collision: {comb} = {hashed_comb}\")\n"
}
] | 1 |
skrishnak7/Query-Expansion | https://github.com/skrishnak7/Query-Expansion | d6a1960654ca7e00020a52894fd0414a4aba18e5 | 4f341e2cb287daf7b9be2aae463d46a2a473ebe4 | 11537a67c45fa94fedcc88898c7212994b449fc5 | refs/heads/master | 2021-01-20T12:20:50.372827 | 2017-02-21T07:41:07 | 2017-02-21T07:41:07 | 82,649,777 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6083640456199646,
"alphanum_fraction": 0.6294055581092834,
"avg_line_length": 16.55609703063965,
"blob_id": "9f1ea8eb8685a7bd6ca572935a22cd2e959c154c",
"content_id": "cd7ed3e2d5ed3b4a9f461b358b0f92ffb694ac2b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3802,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 205,
"path": "/roc.py",
"repo_name": "skrishnak7/Query-Expansion",
"src_encoding": "UTF-8",
"text": "from __future__ import division\r\nimport re\r\nimport sys\r\nimport math\r\nimport json\r\nimport operator\r\nfrom nltk.stem.porter import*\r\nfrom operator import add\r\nfrom collections import Counter\r\n\r\ndef encode(text):\r\n\treturn text.encode('utf-8')\r\n\r\ndef getWordTF(word,doc):\r\n\tcount=0\r\n\tfor i in doc:\r\n\t\tif i==word:\r\n\t\t\tcount=count+1\r\n\r\n\treturn count\r\n\r\ndef getmaxtf(doc):\r\n\tterms=[]\r\n\tz=[]\r\n\tmaxtf=0\r\n\tfor term in doc:\r\n\t\tterms.append(term)\r\n\tz=Counter(terms)\r\n\tsorted_key = reversed(sorted(z.items(),key = operator.itemgetter(1)))\r\n\tfor i,j in sorted_key:\r\n\t\tmaxtf=j\r\n\t\tbreak\r\n\r\n\treturn maxtf\r\n\r\ndef getdocFreq(word,documents):\r\n\tcount=0\r\n\tfor doc in documents:\r\n\t\tif word in doc:\r\n\t\t\tcount+=1\r\n\treturn count\r\ndef getStopWords(filename):\r\n\tstopwords = []\r\n\tfp= open(filename,'r')\r\n\tline = fp.readline()\r\n\twhile line:\r\n\t\tword =line.strip()\r\n\t\tstopwords.append(word)\r\n\t\tline = fp.readline()\r\n\r\n\tfp.close()\r\n\treturn stopwords\r\n\r\ndef weight1(term,document,documents):\r\n tf= getWordTF(term,document)\r\n maxtf = getmaxtf(document)\r\n if term in document:\r\n \tw1= (0.4+0.6*math.log(tf+0.5)/math.log(maxtf+1.0))\r\n else:\r\n w1=0\t\r\n return w1\r\n\r\ndef weight11(term,document,documents):\r\n tf= getWordTF(term,document)\r\n maxtf = getmaxtf(document)\r\n cs=10\r\n df=getdocFreq(term,documents)\r\n if term in document:\r\n \tw1= (0.4+0.6*math.log( tf+0.5)/math.log(maxtf+1.0))*(math.log(cs/df)/math.log(cs)) \r\n else:\r\n w1=0\r\n\r\n return w1\r\n\r\n\r\nalpha = 1\r\nbeta = 0.8\r\n#gamma = 0\r\n\r\ndef getVector(doc,documents,terms):\r\n\tdummy=[]\r\n\tfor term in terms:\r\n\t\tif term in doc:\r\n\t\t\tw1=weight11(term,doc,documents)\r\n\t\t\tdummy.append(w1)\r\n\t\telse:\r\n\t\t\tdummy.append(0)\r\n\treturn dummy \t\t\r\n\r\ndef getVector1(doc,documents,terms):\r\n\tdummy=[]\r\n\tfor term in terms:\r\n\t\tif term in doc:\r\n\t\t\tw1=weight1(term,doc,documents)\r\n\t\t\tdummy.append(w1)\r\n\t\telse:\r\n\t\t\tdummy.append(0)\r\n\treturn dummy\r\n\r\ndef rocchio(query,documents,terms):\r\n\t\r\n\tdocvecs=[]\r\n\tfor doc in documents:\r\n\t\tc=getVector(doc,documents,terms)\r\n\t\tdocvecs.append(c)\r\n\r\n\tdocsum = [sum(x) for x in zip(*docvecs)]\r\n\tdocsum = [x/len(documents) for x in docsum]\r\n\tdocsum = [x*beta for x in docsum]\r\n\r\n\tqv = getVector1(query,documents,terms)\r\n\tqv = [x*alpha for x in qv]\r\n\r\n\tqm = map(add,qv,docsum)\r\n\treturn qm\r\n \r\ndocs=[]\r\n\r\n\r\n\r\nwith open('1.json') as data_file: \r\n data = json.load(data_file)\r\n\r\n#pprint(data)\r\n\r\nfor doc in data:\r\n\t#print \"content:\",encode(doc[\"content\"])\r\n\tdocs.append(encode(doc[\"content\"]))\r\n\r\np = PorterStemmer()\r\nstopwords = getStopWords('stopwords')\r\ncommonwords = getStopWords('common_words')\r\n\r\ndocs1=[]\r\nterms=[]\r\nfor doc in docs:\r\n\tdoc = re.sub(r'[^\\sa-zA-Z]','',doc)\r\n\tdoc = doc.split()\r\n\tdoc = [term.lower() for term in doc]\r\n\t#doc = [p.stem(term) for term in doc]\r\n\tdoc = [x for x in doc if x not in stopwords]\r\n\tdoc = [x for x in doc if x not in commonwords]\r\n\tdocs1.append(doc)\t\r\n\r\nfor doc in docs1:\r\n\tfor term in doc:\r\n\t\tterms.append(term)\t \r\n\r\nz= Counter(terms)\r\nz1 = sorted(z.items(),key=operator.itemgetter(0))\r\n\r\nvocab=[]\r\nfor i,j in z1:\r\n\tvocab.append(i)\r\n\r\n\r\nfor doc in docs1:\r\n\tl=getVector(doc,docs1,terms)\r\n\t#print len(l),l\r\n\r\n#print len(terms)\r\n\r\nquery = \"classic cars\"\r\nq1=query.split()\r\n\r\nqvec=getVector1(query,docs1,vocab)\r\n\r\n#print \"qvec\",qvec\r\n\r\n#print len(rocchio(q1,docs1,vocab))\r\n#print (rocchio(q1,docs1,vocab))\r\n\r\nl= rocchio(q1,docs1,vocab)\r\n\r\ndic = {}\r\ni=0\r\nfor term in vocab:\r\n\tdic[term]=l[i]\r\n\ti=i+1\r\n\r\n#print dic\t\r\n\r\nf=sorted(dic.items(),key=operator.itemgetter(1),reverse=True)\r\n\r\n#print f\r\n#print dic\r\n#print len(dic)\r\n\r\n#print z1[art]\r\n\r\nfor i,j in f:\r\n\td=0\r\n\texpandedterm=\"\"\r\n\tif i in q1:\r\n\t\t#print i,\"#There#\"\r\n\t\tcontinue\r\n\telse:\r\n\t\t#print i,\"NOtT\"\r\n\t\texpandedterm=expandedterm+i\r\n\t\td=d+1\r\n\t\tbreak\r\n\r\nprint f[d][0],f[d+1][0]\r\n#print query\r\nprint query + \" \" + f[d][0]"
},
{
"alpha_fraction": 0.6568511128425598,
"alphanum_fraction": 0.6720494031906128,
"avg_line_length": 20.8858699798584,
"blob_id": "a4c12ad514f4c2fa012cfc3ead21dfdd94c3e375",
"content_id": "00ff390ab59b1c17c83c1a17b9e2fe42f2878716",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4211,
"license_type": "permissive",
"max_line_length": 190,
"num_lines": 184,
"path": "/met.py",
"repo_name": "skrishnak7/Query-Expansion",
"src_encoding": "UTF-8",
"text": "from __future__ import division\r\nimport re\r\nimport sys\r\nimport operator\r\nimport math\r\nimport json\r\nfrom pprint import pprint\r\nfrom nltk.stem.porter import*\r\nfrom collections import Counter\r\nfrom itertools import islice\r\n\r\n#Function to encode the retrieved json content \r\ndef encode(text):\r\n\treturn text.encode('utf-8')\r\n\r\n#Take out the first two elements of the dictionary \r\ndef take(n, iterable):\r\n return list(islice(iterable, n))\r\n\r\n#Retrive stopwords from the file named stopwords \r\ndef getStopWords(filename):\r\n\tstopwords = []\r\n\tfp= open(filename,'r')\r\n\tline = fp.readline()\r\n\twhile line:\r\n\t\tword =line.strip()\r\n\t\tstopwords.append(word)\r\n\t\tline = fp.readline()\r\n\r\n\tfp.close()\r\n\treturn stopwords\r\n\r\n\r\ndef dist(u,v,doc):\r\n\tcountu=0\r\n\tcountv=0\r\n\tcount=0\r\n\tfor term in doc:\r\n\t\tif term == u:\r\n\t\t\tcountu=count\r\n\t\tif term == v:\r\n\t\t\tcountv=count\r\n\t\tcount=count+1\r\n\treturn countu-countv\t\r\n\r\n#Correlation factor for Association Clustering \r\ndef CorrelationFactor(u,v,mat1,z):\r\n\tuindex=0\r\n\tvindex=0\r\n\tcuv=0\r\n\tcount = 0\r\n\tfor i,j in z.items():\r\n\t\tif i==u :\r\n\t\t\tuindex=count \r\n\r\n\t\tif i==v :\r\n\t\t\tvindex = count\r\n\r\n\t\tcount = count + 1\t\t\r\n\r\n\tfor row in mat1:\r\n\t\tcuv=cuv+row[uindex]*row[vindex]\r\n \r\n\treturn cuv \t\r\n\r\n\r\n#Correlation factor for Metric Clustering\r\ndef MCorrelationFactor(u,v,documents):\r\n\tcuv=0\r\n\tfor doc in documents:\r\n\t\tif ((u in doc) and (v in doc)) :\r\n\t\t\tr=abs(dist(u,v,doc))\r\n\t\telse:\r\n\t\t\tr=0\r\n\t\tif(r!=0):\t\r\n\t\t cuv=cuv+(1/r)\r\n\r\n\treturn cuv\t\r\n\r\n\r\n\r\n\r\n#Metric Cluster for word U in the relevant documents obtained\r\ndef metric(u,documents,z):\r\n\ts={}\r\n\tfor i,j in z.items():\r\n\t\tsuv=MCorrelationFactor(u,i,documents)\r\n\t\ts[i]=suv\r\n\r\n\tslist= reversed(sorted(s.items(),key=operator.itemgetter(1)))\r\n\ttop2= take(20,slist)\r\n\treturn top2\t\r\n\r\n\t\t\t\t\r\ndocs=[]\r\n\r\n\r\nD1 = \"The Mustang, built in Flat Rock, Mich., is still a novelty in Europe, where it went on sale for the first time two years ago.\"\r\nD2 = \"Food allergies have been on the rise in recent years and are currently estimated to affect up to eight percent of children worldwide.\"\r\nD3 = \"Adobe has released an emergency update to its Flash Player after security researchers discovered a bug that allows attackers to take over and then crash users' machines.\"\r\nD4 = \"The worst attack on Adobe software came in 2013, when hackers managed to access personal data for nearly 3 million customers.\"\r\nD5 = \"The Mustang is relatively expensive in Italy compared to the United States. There are few cars available in Italy that offer the Mustang's performance capabilities at a similar price.\"\r\nD6 = \"Adobe appear flaw active exploit system run Window Flash Player statement Adobe flaw critical vulnerability urg user update soon possible\"\r\nD7 = \"Active avoidance of food allergens in baby's diets did not protect them from developing food allergies\"\r\nD8 = \"Flash Player wide use watch video animation multimedia.\"\r\n\r\n#docs.append(D1)\r\n#docs.append(D2)\r\n#docs.append(D3)\r\n#docs.append(D4)\r\n#docs.append(D5)\r\ndocs.append(D6)\r\n#docs.append(D7)\r\ndocs.append(D8)\r\n\r\n\r\n#with open('1.json') as data_file: \r\n # data = json.load(data_file)\r\n\r\n#pprint(data)\r\n\r\n#for doc in data:\r\n\t#print \"content:\",encode(doc[\"content\"])\r\n#\tdocs.append(encode(doc[\"content\"]))\r\n\r\n\r\nstopwords = getStopWords('stopwords')\r\ncommonwords = getStopWords('common_words')\r\np = PorterStemmer()\r\n\r\nterms = [] \r\n\r\ndocs1=[]\r\nfor doc in docs:\r\n\tdoc = re.sub(r'[^\\sa-zA-Z]', '', doc)\r\n\tdoc = doc.split()\r\n\tdoc = [term.lower() for term in doc]\r\n\t#doc = [p.stem(term) for term in doc]\r\n\tdocs1.append(doc)\r\n\r\n\r\nfor doc in docs1:\r\n\tprint doc\r\n\tfor term in doc:\r\n\t\tif (term in stopwords) or (term in commonwords):\r\n\t\t\tcontinue\r\n\t\telse:\t \r\n\t\t\tterms.append(term)\t \r\n\r\nz= Counter(terms)\r\nk=1\r\n\r\nz1 = sorted(z.items(),key=operator.itemgetter(0))\r\n#for i,j in z1:\r\n#\tprint i\r\nfor i, j in z1:\r\n\tprint k,i,j\r\n\tk=k+1\r\n\r\n \r\nmat1 = []\r\n\r\nfor doc in docs1:\r\n\trow = []\r\n\ty=Counter(doc)\r\n\tfor term,count in z.items():\r\n\t\tif term in doc:\r\n\t\t\tc=y[term]\r\n\t\t\trow.append(c)\r\n\t\telse:\r\n\t\t\trow.append(0)\r\n\tmat1.append(row)\t\t\t\r\n\r\nfor row in mat1:\r\n\tprint len(row),row\r\n\r\nquery=\"video security\"\r\nq1=query.split()\r\n#q1=[p.stem(term) for term in q1]\r\n\r\nfor q in q1:\r\n\tl=metric(q,docs1,z)\r\n\t#print q,len(l),l\r\n\tprint l[0][0]\r\n"
},
{
"alpha_fraction": 0.6052111983299255,
"alphanum_fraction": 0.6194236278533936,
"avg_line_length": 15.713286399841309,
"blob_id": "961936441796bab0acb42b76e2f988e21ecada62",
"content_id": "cc734d7183e9295b4cecc4b4a0404498b996dc99",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2533,
"license_type": "permissive",
"max_line_length": 139,
"num_lines": 143,
"path": "/local.py",
"repo_name": "skrishnak7/Query-Expansion",
"src_encoding": "UTF-8",
"text": "from __future__ import division\r\nimport sys\r\nimport math\r\nimport operator\r\nimport json\r\nfrom pprint import pprint\r\nfrom nltk.stem.porter import*\r\nfrom itertools import islice\r\nfrom collections import Counter\r\n\r\ndef take(n, iterable):\r\n return list(islice(iterable, n))\r\n\r\ndef encode(text):\r\n\treturn text.encode('utf-8')\r\n\r\ndef getStopWords(filename):\r\n\tstopwords = []\r\n\tfp= open(filename,'r')\r\n\tline = fp.readline()\r\n\twhile line:\r\n\t\tword =line.strip()\r\n\t\tstopwords.append(word)\r\n\t\tline = fp.readline()\r\n\r\n\tfp.close()\r\n\treturn stopwords\r\n\r\n\r\ndef CorrelationFactor(u,v,mat1,z):\r\n\tuindex=0\r\n\tvindex=0\r\n\tcuv=0\r\n\tcount = 0\r\n\tfor i,j in z.items():\r\n\t\tif i==u :\r\n\t\t\tuindex=count \r\n\r\n\t\tif i==v :\r\n\t\t\tvindex = count\r\n\r\n\t\tcount = count + 1\t\t\r\n\r\n\tfor row in mat1:\r\n\t\tcuv=cuv+row[uindex]*row[vindex]\r\n \r\n\treturn cuv \t\r\n\r\n\r\ndef StemCLuster(u,mat1,z):\r\n\ts={}\r\n\tfor i,j in z.items():\r\n\t\tsuv = (CorrelationFactor(u,i,mat1,z) / ((CorrelationFactor(u,u,mat1,z)) + CorrelationFactor(i,i,mat1,z)+ CorrelationFactor(u,i,mat1,z))) \r\n\t\t#print suv\r\n\t\ts[i]= suv\r\n\r\n\tsorteds= sorted(s.items(),key=operator.itemgetter(1),reverse=True)\r\n\t#firsttwoitems = take(3,sorteds)\r\n\t#print firsttwoitems\r\n\treturn sorteds \r\n\r\ndocs=[]\r\n\r\n\r\nwith open('1.json') as data_file: \r\n data = json.load(data_file)\r\n\r\n#pprint(data)\r\n\r\nfor doc in data:\r\n\t#print \"content:\",encode(doc[\"content\"])\r\n\tdocs.append(encode(doc[\"content\"]))\r\n\r\n#print len(data)\r\n\r\n\r\nstopwords = getStopWords('stopwords')\r\ncommonwords = getStopWords('common_words')\r\n\r\np = PorterStemmer()\r\n\r\nterms = [] \r\n\r\ndocs1=[]\r\nfor doc in docs:\r\n\tdoc = re.sub(r'[^\\sa-zA-Z]', '', doc)\r\n\tdoc = doc.split()\r\n\tdoc = [term.lower() for term in doc]\r\n\t#doc = [p.stem(term) for term in doc]\r\n\tdocs1.append(doc)\r\n\r\n#for doc in docs1:\r\n#\tprint doc\r\n\r\nfor doc in docs1:\r\n\tfor term in doc:\r\n\t\tif (term in stopwords) or (term in commonwords):\r\n\t\t\tcontinue\r\n\t\telse:\t \r\n\t\t\tterms.append(term)\t \r\n\r\nz= Counter(terms)\r\nz1 = sorted(z.items(),key=operator.itemgetter(0))\r\n\r\n\r\nmat1 = []\r\n\r\nfor doc in docs1:\r\n\trow = []\r\n\ty=Counter(doc)\r\n\tfor term,count in z.items():\r\n\t\tif term in doc:\r\n\t\t\tc=y[term]\r\n\t\t\trow.append(c)\r\n\t\telse:\r\n\t\t\trow.append(0)\r\n\tmat1.append(row)\t\t\t\r\n \r\n#for row in mat1:\r\n#\tprint row\r\n\r\nquery= \"car rental\"\r\nq1=query.split()\r\n\r\nf=StemCLuster(query,mat1,z)\r\n\r\nfor i,j in f:\r\n\td=0\r\n\texpandedterm=\"\"\r\n\tif i in q1:\r\n\t\t#print i,\"#There#\"\r\n\t\tcontinue\r\n\telse:\r\n\t\t#print i,\"NOtT\"\r\n\r\n\t\texpandedterm=expandedterm+i\r\n\t\td=d+1\r\n\t\tbreak\r\n\r\nprint f\r\n#print f[d][0],f[d+1][0]\r\n#print query\r\nprint query + \" \" + f[d][0]\r\n"
},
{
"alpha_fraction": 0.6821467280387878,
"alphanum_fraction": 0.6967835426330566,
"avg_line_length": 27.291006088256836,
"blob_id": "84217213c7b7dfc2e9d7cd1af492a68bfed14ee1",
"content_id": "b74a039e5fc2d650a50f7994f9c151f6ea884cfb",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5534,
"license_type": "permissive",
"max_line_length": 228,
"num_lines": 189,
"path": "/sample.py",
"repo_name": "skrishnak7/Query-Expansion",
"src_encoding": "UTF-8",
"text": "from __future__ import division\r\nimport re\r\nimport sys\r\nimport math\r\nimport json\r\nimport operator\r\nfrom nltk.stem.porter import*\r\nfrom operator import add\r\nfrom collections import Counter\r\n\r\ndef encode(text):\r\n\treturn text.encode('utf-8')\r\n\r\ndef take(n, iterable):\r\n return list(islice(iterable, n))\r\n\r\ndef getStopWords(filename):\r\n\tstopwords = []\r\n\tfp= open(filename,'r')\r\n\tline = fp.readline()\r\n\twhile line:\r\n\t\tword =line.strip()\r\n\t\tstopwords.append(word)\r\n\t\tline = fp.readline()\r\n\r\n\tfp.close()\r\n\treturn stopwords\r\n\r\ndef getITF(doc,documents):\r\n\tlen1=0\r\n\tfor doc1 in documents:\r\n\t\tlen1 = len1 + len(doc1)\r\n\r\n\titf= math.log(len1/len(doc))\t \r\n\treturn itf \r\n\r\ndef getfreq(term,doc):\r\n\tcount = 0\r\n\tfor i in doc:\r\n\t\tif (i == term) :\r\n\t\t\tcount = count+1\r\n\treturn count \t\t\r\n\r\n\r\ndef getmaxfreq(u,documents):\r\n\tfreqlist=[]\r\n\tfor doc in documents:\r\n\t\tc=getfreq(u,doc)\r\n\t\tfreqlist.append(c)\r\n\t#print freqlist,\"#####\"\r\n\treturn max(freqlist)\t\r\n\r\n\r\ndef getweightvector(u,documents):\r\n\tvec=[]\r\n\tmaxfreq= getmaxfreq(u,documents)\r\n\tsumweights=0\r\n\tfor doc in documents:\r\n\t\twij = (0.5 + 0.5*(getfreq(u,doc))/ maxfreq*getITF(doc,documents)) \r\n\t\tw= wij*wij\r\n\t\tsumweights = sumweights + w\r\n\t\r\n\tfor doc in documents:\r\n\t\twij = (0.5 + 0.5*(getfreq(u,doc))/ maxfreq*getITF(doc,documents))\r\n\t\t#print wij\r\n\t\tweight = wij / math.sqrt(sumweights)\r\n\t\tvec.append(weight)\r\n\r\n\treturn vec\t\r\n\r\n\r\ndef getQueryWeight(query):\r\n\t#q1=query.split()\r\n\tw=1/math.sqrt(len(q1))\r\n\treturn w\r\n\r\n#def getqueryvector(q1,docs1):\r\n#\tqweight=[]\r\n#\tqw=getQueryWeight(query)\r\n#\tfor word in q1:\r\n#\t\tc= getweightvector(word,docs1)\r\n#\t\tqweight.append(c)\r\n#\r\n#\tq2 = [sum(x) for x in zip(*qweight)]\r\n#\tq2 = [x*qw for x in q2]\t\r\n#\treturn q2 \r\n\r\ndef CUV(u,v,documents):\r\n\ta = getweightvector(u,documents)\r\n\tb = getweightvector(v,documents)\r\n\tab= [x*y for x,y in zip(a,b)]\r\n\tab1 = sum(ab)\r\n\treturn ab1\r\n\r\ndef sim(q,v,documents,terms):\r\n\tqw=getQueryWeight(q)\r\n\tl=0\r\n\tfor term in terms:\r\n\t\tif term in q:\r\n\t\t\tl=l+qw*CUV(term,v,documents)\r\n\treturn l\t\t\r\n\r\ndocs=[]\r\n'''\r\ndoc1 = \"The girl is studying in Paris while her brother lives in London. She likes a lot of girlish, artistic Parisian things\"\r\ndoc2 = \"The big museums in London are mostly free to the public, unlike those in Paris. This is why the girl goes to visits her brother often. There are more art lovers in Paris than in London, all youngsters.\"\r\ndoc3 = \"Girls like to visit Paris since they are young, whereas boys do not want to hear about Paris at all. London, with the tennis and soccer attractions, is more interesting to them. Boys like sports more.\"\r\ndoc4 = \"London is a bigger city than Paris, and they both have metro lines. Lots of people take the metro to go to work or to visit museums in their spare time. More people ride the metro in Paris than in London.\"\r\ndoc5 = \"London has a terrific theater scene, unlike Paris. In Paris people have interest in the movies. But they appreciate all sorts of art. Both cities have a great music scene, that often attracts young boys and girls.\"\r\n\r\ndocs.append(doc1)\r\ndocs.append(doc2)\r\ndocs.append(doc3)\r\ndocs.append(doc4)\r\ndocs.append(doc5)\r\n'''\r\nD1 = \"The Mustang, built in Flat Rock, Mich., is still a novelty in Europe, where it went on sale for the first time two years ago.\"\r\nD2 = \"Food allergies have been on the rise in recent years and are currently estimated to affect up to eight percent of children worldwide.\"\r\nD3 = \"Adobe has released an emergency update to its Flash Player after security researchers discovered a bug that allows attackers to take over and then crash users' machines.\"\r\nD4 = \"The worst attack on Adobe software came in 2013, when hackers managed to access personal data for nearly 3 million customers.\"\r\nD5 = \"The Mustang is relatively expensive in Italy compared to the United States. There are few cars available in Italy that offer the Mustang's performance capabilities at a similar price.\"\r\nD6 = \"Adobe said it appeared that the flaw was being actively exploited on systems running Windows with Flash Player. In a statement, Adobe called the flaw a critical vulnerability and urged users to update as soon as possible.\"\r\nD7 = \"Active avoidance of food allergens in baby's diets did not protect them from developing food allergies\"\r\nD8 = \"Flash Player is widely used for watching video animations and other multimedia\"\r\n\r\n#docs.append(D1)\r\n#docs.append(D2)\r\n#docs.append(D3)\r\n#docs.append(D4)\r\n#docs.append(D5)\r\ndocs.append(D6)\r\n#docs.append(D7)\r\ndocs.append(D8)\r\n\r\np = PorterStemmer()\r\nstopwords = getStopWords('stopwords')\r\ncommonwords = getStopWords('common_words')\r\n\r\ndocs1=[]\r\nterms=[]\r\nfor doc in docs:\r\n\tdoc = re.sub(r'[^\\sa-zA-Z]','',doc)\r\n\tdoc = doc.split()\r\n\tdoc = [term.lower() for term in doc]\r\n\t#doc = [p.stem(term) for term in doc]\r\n\tdoc = [x for x in doc if x not in stopwords]\r\n\tdoc = [x for x in doc if x not in commonwords]\r\n\tdocs1.append(doc)\t\r\n\r\nfor doc in docs1:\r\n\tfor term in doc:\r\n\t\tterms.append(term)\t \r\n\r\nz= Counter(terms)\r\nz1 = sorted(z.items(),key=operator.itemgetter(0))\r\n\r\nquery = \"video security\"\r\nq1=query.split()\r\n\r\n\r\nfinalsim={}\r\nfor term in terms:\r\n\tl=sim(q1,term,docs1,terms)\r\n\tprint term,l\r\n\tfinalsim[term]=l\r\n\r\n\r\nf= sorted(finalsim.items(),key=operator.itemgetter(1),reverse=True)\r\nprint f\r\n\r\nfor i,j in f:\r\n\td=0\r\n\texpandedterm=\"\"\r\n\tif i in q1:\r\n\t\t#print i,j,\"#There#\"\r\n\t\tcontinue\r\n\telse:\r\n\t\t#print i,\"NOtT\"\r\n\t\texpandedterm=expandedterm+i\r\n\t\td=d+1\r\n\t\tbreak\r\n\r\n, \r\nprint query\r\nprint query + \" \" + f[d-1][0]+ \" \"+f[d][0]\t\r\nl=\"video\"\r\nj=\"security\"\r\nprint getweightvector(l,docs1)\r\nprint getweightvector(j,docs1)"
},
{
"alpha_fraction": 0.8010505437850952,
"alphanum_fraction": 0.8010505437850952,
"avg_line_length": 37.9487190246582,
"blob_id": "082b84d3f47ab98db9c96e59764352eca6976087",
"content_id": "7a476237f574bd7abe7dbf2568b14ab71ef213cc",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1531,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 39,
"path": "/README.md",
"repo_name": "skrishnak7/Query-Expansion",
"src_encoding": "UTF-8",
"text": "# Query-Expansion\n\n\nAs association cluster is based on the co-occurrence of stems (or terms) inside\ndocuments. The idea is that stems which co-occur frequently inside documents\nhave a synonymity association. \n\nAssociation clusters are based on the frequency of co-occurrence of pairs of\nterms in documents and do not take into account where the terms occur in\na document.\nSince two terms which occur in the same sentence seem more\ncorrelated than two terms which occur far apart in a document, it\nmight be worthwhile to factor in the distance between two terms in the\ncomputation of their correlation factor. Metric clusters are based on this\nidea.\n\nOne additional form of deriving a synonymity relationship between\ntwo local stems (or terms) su\nand sv\nis by comparing the sets Su\n(n)\nand Sv\n(n).\n The idea is that two stems with similar neighborhoods have some\nsynonymity relationship.\n In this case we say that the relationship is indirect or induced by the\nneighborhood.\n One way of quantifying such neighborhood relationships is to\narrange all correlation values su,I in a vector , to arrange all\ncorrelation values sv,I in another vector , and to compare these\nvectors through a scalar measure.\n For instance, the cosine of the angle between the two vectors is\na popular scalar similarity measure. \n\n\nlocal.py ->Query expansion using Scalar clustering \nroc.py -> Query expansion using Rocchio Algorithm\nsample.py ->Query expansion using Association Clustering\nmet.py -> Query expansion using Metric Clustering \n\n\n"
}
] | 5 |
Pingze-github/pyRequest | https://github.com/Pingze-github/pyRequest | 47ae50c399019d08b33f3d3c6e31e09f29a1b799 | 59fc2e6bf69857b266b672afbf85442d2f0a9d3c | aaac7d66e649740746e9333c42d0166d78bcc53e | refs/heads/master | 2021-08-19T08:41:34.167254 | 2017-11-25T15:14:40 | 2017-11-25T15:14:40 | 110,262,955 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.48422330617904663,
"alphanum_fraction": 0.4939320385456085,
"avg_line_length": 29.5,
"blob_id": "422749e0363128394779b567a110ce4d582f36a8",
"content_id": "4b8639a0799e1a30b28831a0bd45d179982a9ba9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1662,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 54,
"path": "/lib/sqlite.py",
"repo_name": "Pingze-github/pyRequest",
"src_encoding": "UTF-8",
"text": "import sqlite3\n\nclass ReqLogDB:\n def __init__(self):\n (self.conn, self.cursor) = self.createConn()\n\n def createConn(self):\n conn = sqlite3.connect('reqlog.db')\n print('sqlite 数据库连接成功')\n cursor = conn.cursor()\n cursor.execute('''CREATE TABLE IF NOT EXISTS LOG\n (ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n METHOD TEXT NOT NULL,\n URL TEXT NOT NULL,\n QUERY TEXT NOT NULL,\n BODY TEXT NOT NULL);''')\n conn.commit()\n return (conn, cursor)\n\n def execute(self, sql):\n self.cursor.execute(sql)\n self.conn.commit()\n\n def insert(self, log):\n self.cursor.execute('INSERT INTO LOG (METHOD,URL,QUERY,BODY) VALUES (?,?,?,?)', (log['method'], log['url'], log['query'], log['body']))\n self.conn.commit()\n rows = self.cursor.execute('SELECT ID FROM LOG ORDER BY ID DESC LIMIT 1')\n return list(rows)[0][0]\n\n def selectAll(self):\n logs = []\n rows = self.cursor.execute('SELECT * FROM LOG ORDER BY ID DESC')\n for row in rows:\n logs.append({\n 'id': row[0],\n 'method': row[1],\n 'url': row[2],\n 'query': row[3],\n 'body': row[4],\n })\n return logs\n\n\n def selectOne(self, id):\n rows = self.cursor.execute('SELECT * FROM LOG WHERE ID=' + str(id))\n row = list(rows)[0]\n log = {\n 'id': row[0],\n 'method': row[1],\n 'url': row[2],\n 'query': row[3],\n 'body': row[4],\n }\n return log\n\n"
},
{
"alpha_fraction": 0.8387096524238586,
"alphanum_fraction": 0.8709677457809448,
"avg_line_length": 14.5,
"blob_id": "bfa6a39e81e66e4fd4ab2f25432a3b6804bb72ba",
"content_id": "ef5025fd9f613c1bd339fb4e5602443eebacdda2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 57,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Pingze-github/pyRequest",
"src_encoding": "UTF-8",
"text": "# pyRequest\n基于PyQt5开发的桌面端口测试工具\n"
},
{
"alpha_fraction": 0.5965728163719177,
"alphanum_fraction": 0.6059567332267761,
"avg_line_length": 31.84986686706543,
"blob_id": "a4fca62038e2f7cdc326f22e2819aac8017c9b77",
"content_id": "ff7329a8d86a5ed96b87913c952ba14e739040cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12489,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 373,
"path": "/index.py",
"repo_name": "Pingze-github/pyRequest",
"src_encoding": "UTF-8",
"text": "\nimport os\nimport sys\nimport time\nimport re\nimport json\nfrom urllib.parse import urlparse\nfrom collections import OrderedDict\nimport requests\nfrom PyQt5.QtWidgets import QApplication,QWidget,QVBoxLayout,QHBoxLayout,QTabWidget,QPushButton,QTextEdit,QPlainTextEdit,QLineEdit,QLabel,QComboBox,QListWidget,QListWidgetItem\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView\nfrom PyQt5.QtCore import QUrl,QThread,pyqtSignal\nfrom PyQt5.QtGui import QIcon,QFont,QFontDatabase\n\n# 全局异常捕获\ndef printErrors(exc_type, exc_value, traceback):\n sys.stderr.write(traceback)\nsys.excepthook = printErrors\n\nfrom lib.sqlite import ReqLogDB\nreqLogDB = ReqLogDB()\n\n# 超时\n# 请求详细信息\n# 支持多方法\n# 支持动态body/query\n# TODO 支持Headers\n# 编码问题\n# 美化、字体\n# TODO 增加记录\n# TODO 增加侧边栏\n# TODO debug 报错\n# 成功打包icon和字体\n# TODO 增加菜单\n# 增加查看快捷键\n# url和query联动\n# TODO 尾部状态栏\n# TODO Request标签页 自动切换\n\n# 全局变量\n\ndataPath = 'data.pkl'\n\nlogs = reqLogDB.selectAll()\n\ndef jsonPretty(jstr):\n return json.dump(json.loads(jstr), indent=2)\n\nclass RequestThread(QThread):\n finishSignal = pyqtSignal(dict)\n\n def __init__(self, window):\n super().__init__()\n self._window = window\n self.headers = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'\n }\n def __request(self, options):\n response = requests.request(options['method'], options['url'], headers=options['headers'], data=window.body, params=window.query)\n return response\n def run(self):\n start = time.time()\n url = self._window.reqUrlInput.text()\n text = ''\n try:\n print('Request Sending:', url)\n method = window.reqMethodCombo.currentText()\n print(method)\n response = self.__request({\n 'url': url,\n 'method': method,\n 'headers': self.headers\n })\n charsetPatt = re.compile('charset=[\"\\']{0,1}([A-Za-z0-9\\-]+)[\"\\']', re.IGNORECASE)\n matches = charsetPatt.search(str(response.text))\n if matches :\n response.encoding = matches.group(1)\n text = response.text\n print(response.__getattribute__('encoding'))\n print(response.status_code)\n print(response.headers)\n print(response.cookies)\n stats = 'Status: Success \\n'\n stats += '{}: {}\\n'.format('Code', response.status_code)\n stats += '{}: {:.3f}s\\n'.format('ResponseTime', time.time() - start)\n stats += '{}: {}\\n'.format('Encoding', response.encoding)\n stats += '{}: {}\\n'.format('Headers', json.dumps(dict(response.headers), indent=2))\n print('Request Success:', response.url)\n except Exception as e:\n #print('Request Failed:', e)\n stats = 'Status: Failed \\n' + 'Error: ' + str(e)\n print('请求耗时:', time.time() - start)\n try :\n text = json.dumps(json.loads(text), ensure_ascii=False)\n except:\n pass\n sigData = {\n 'url': url,\n 'text': text,\n 'stats': stats\n }\n self.finishSignal.emit(sigData)\n\ndef formatParamParse(paramText):\n param = OrderedDict()\n paramLines = str.split(paramText, '\\n')\n for line in paramLines:\n items = str.split(line)\n if len(items) == 2:\n param[str(items[0])] = str(items[1])\n return param\n\ndef formatParamStringify(param):\n paramFormat = ''\n for k in param:\n paramFormat += '{} {}\\n'.format(k, param[k])\n return paramFormat\n\ndef paramParse(paramStr):\n paramPats = str.split(paramStr, '&')\n param = OrderedDict()\n for paramPat in paramPats:\n equalIndex = paramPat.find('=')\n if equalIndex > 0:\n param[paramPat[:equalIndex]] = paramPat[equalIndex+1:]\n return param\n\ndef urlencodeFromMap(m):\n us = ''\n for k in m:\n us += k + '=' + str(m[k]) + '&'\n return us[:-1]\n\n\n\nclass Window(QWidget):\n query = {}\n reqStatsObj = {}\n zoom = 1.2\n\n def __init__(self):\n super().__init__()\n self.__render()\n self.show()\n self.reqUrlInput.setFocus()\n self.requestThread = RequestThread(self)\n self.requestThread.finishSignal.connect(self.__setRes)\n\n # 渲染组件\n def __render(self):\n self.setWindowTitle('PyRequest')\n self.__renderSelf()\n self.__renderMain()\n self.__renderLeft()\n self.reqUrlInput.textEdited.emit(self.reqUrlInput.text())\n self.queryEdit.textChanged.emit()\n self.bodyEdit.textChanged.emit()\n\n def __renderSelf(self):\n self.leftLayout = QVBoxLayout()\n self.mainLayout = QVBoxLayout()\n layout = QHBoxLayout(self)\n layout.addLayout(self.leftLayout)\n layout.addLayout(self.mainLayout)\n self.setWindowIcon(QIcon('assets/icon.ico'))\n self.resize(900 * self.zoom, 600 * self.zoom)\n\n def __renderLeft(self):\n self.reqList = QListWidget()\n self.reqList.itemClicked.connect(self.__logItemClicked)\n self.reqList.setMaximumWidth(300)\n for log in logs:\n logItem = QListWidgetItem(log['method'] + ' ' + log['url'])\n logItem.setData(99, log['id'])\n self.reqList.addItem(logItem)\n self.leftLayout.addWidget(self.reqList)\n\n def __logItemClicked(self, item):\n id = item.data(99)\n log = reqLogDB.selectOne(id)\n print(log)\n self.reqMethodCombo.setCurrentText(log['method'])\n self.reqUrlInput.setText(log['url'])\n self.queryEdit.setPlainText(log['query'])\n self.bodyEdit.setPlainText(log['body'])\n self.__clearRes()\n\n def __appendLog(self, log):\n logItem = QListWidgetItem(log['method'] + ' ' + log['url'])\n logItem.setData(99, log['id'])\n self.reqList.insertItem(0, logItem)\n\n def __renderMain(self):\n # input\n self.reqMethodCombo = QComboBox()\n self.reqMethodCombo.addItems(['GET', 'POST'])\n self.reqMethodCombo.currentTextChanged.connect(self.__methodChange)\n self.reqUrlInput = QLineEdit()\n self.reqUrlInput.setText('http://ip.taobao.com/service/getIpInfo.php?ip=59.41.95.234')\n self.reqUrlInput.textEdited.connect(self.__urlChanged)\n self.reqButton = QPushButton()\n self.reqButton.setText('SEND')\n self.reqButton.clicked.connect(self.__request)\n inputLayout = QHBoxLayout()\n inputLayout.addWidget(self.reqMethodCombo)\n inputLayout.addWidget(self.reqUrlInput)\n inputLayout.addWidget(self.reqButton)\n # body&query\n self.queryLabel = QLabel('Query')\n self.queryEdit = QPlainTextEdit()\n self.queryEdit.textChanged.connect(self.__queryEditChanged)\n self.bodyLabel = QLabel('Body')\n self.bodyEdit = QPlainTextEdit()\n self.bodyEdit.textChanged.connect(self.__bodyEditChanged)\n queryLayout = QVBoxLayout()\n queryLayout.addWidget(self.queryLabel)\n queryLayout.addWidget(self.queryEdit)\n bodyLayout = QVBoxLayout()\n bodyLayout.addWidget(self.bodyLabel)\n bodyLayout.addWidget(self.bodyEdit)\n self.bodyEdit.hide()\n self.bodyLabel.hide()\n paramLayout = QHBoxLayout()\n paramLayout.addLayout(queryLayout)\n paramLayout.addLayout(bodyLayout)\n self.mainLayout.addLayout(inputLayout)\n self.mainLayout.addLayout(paramLayout)\n # response\n self.resTab = self.__createResTab()\n self.mainLayout.addWidget(self.resTab)\n\n def __createResTab(self):\n resTab = QTabWidget()\n self.reqStats = QTextEdit()\n self.resStats = QTextEdit()\n self.resText = QTextEdit()\n self.resJSON = QTextEdit()\n self.resView = QWebEngineView()\n resTab.addTab(self.reqStats, 'req')\n resTab.addTab(self.resStats, 'res')\n resTab.addTab(self.resText, 'text')\n resTab.addTab(self.resJSON, 'json')\n resTab.addTab(self.resView, 'view')\n return resTab\n\n # 发起请求\n def __request(self):\n self.__clearRes()\n bodyRaw = self.bodyEdit.toPlainText()\n self.body = formatParamParse(bodyRaw)\n self.resView.setHtml('')\n # self.resView.setUrl(QUrl(self.reqUrlInput.text()))\n self.requestThread.start()\n\n # 处理返回\n def __setRes(self, res):\n if (self.resTab.currentIndex() == 0):\n self.resTab.setCurrentIndex(1)\n self.resStats.setPlainText(res['stats'])\n self.resText.setPlainText(res['text'])\n try :\n jsonstr = json.dumps(json.loads(res['text']), indent=2, ensure_ascii=False)\n self.resJSON.setPlainText(jsonstr)\n except Exception as e:\n print(e)\n self.resJSON.setPlainText('Not a JSON string')\n self.resView.setHtml(res['text'])\n self.__log()\n\n # 请求记录\n def __log(self):\n log = {}\n log['method'] = self.reqMethodCombo.currentText()\n log['url'] = self.reqUrlInput.text()\n log['query'] = self.queryEdit.toPlainText()\n log['body'] = self.bodyEdit.toPlainText()\n id = reqLogDB.insert(log)\n log['id'] = id\n self.__appendLog(log)\n\n # 清空返回栏\n def __clearRes(self):\n self.resStats.setText('')\n self.resText.setText('')\n self.resView.setHtml('')\n self.resJSON.setText('')\n\n # 方法切换\n def __methodChange(self, text):\n if text == 'GET':\n self.bodyEdit.hide()\n self.bodyLabel.hide()\n else:\n self.bodyEdit.show()\n self.bodyLabel.show()\n\n # 快捷键\n def keyPressEvent(self, event):\n print(event.key())\n key = event.key()\n if key in (16777268, 16777220, 16777221):\n self.__request()\n if key >= 49 and key <= 53:\n self.resTab.setCurrentIndex(key - 49)\n if key == 71:\n self.reqMethodCombo.setCurrentText('GET')\n if key == 80:\n self.reqMethodCombo.setCurrentText('POST')\n if key == 87:\n if (self.reqMethodCombo.currentText() == 'GET'):\n self.reqMethodCombo.setCurrentText('POST')\n elif (self.reqMethodCombo.currentText() == 'POST'):\n self.reqMethodCombo.setCurrentText('GET')\n\n # query/body/reqStats 联动\n def __queryEditChanged(self):\n queryRaw = self.queryEdit.toPlainText()\n self.__querySetFromFormat(queryRaw)\n\n def __bodyEditChanged(self):\n bodyRaw = self.bodyEdit.toPlainText()\n self.__reqStatsChanged({'body': formatParamParse(bodyRaw)})\n\n def __urlChanged(self, url):\n self.__querySetFromUrl(url)\n\n def __querySetFromFormat(self, queryRaw):\n query = formatParamParse(queryRaw)\n self.query = query\n self.__reqStatsChanged({'query': query})\n queryStr = urlencodeFromMap(query)\n if queryStr:\n url = self.reqUrlInput.text()\n urlParts = urlparse(url)\n url = '{}://{}{}?{}'.format(urlParts.scheme, urlParts.netloc, urlParts.path, queryStr)\n self.reqUrlInput.setText(url)\n self.__reqStatsChanged({'url': url})\n\n def __querySetFromUrl(self, url):\n queryStr = urlparse(url).query\n query = paramParse(queryStr)\n self.__reqStatsChanged({'url': url})\n self.__reqStatsChanged({'query': query})\n queryFormat = formatParamStringify(query)\n self.queryEdit.blockSignals(True)\n self.queryEdit.setPlainText(queryFormat)\n self.queryEdit.blockSignals(False)\n\n def __reqStatsChanged(self, things):\n for k in things:\n thing = things[k]\n self.reqStatsObj[k] = thing\n reqStatsStr = json.dumps(self.reqStatsObj, indent=2, ensure_ascii=False)\n self.reqStats.setPlainText(reqStatsStr)\n\n\ndef getfont():\n fontId = QFontDatabase.addApplicationFont('assets/MSYHMONO.ttf')\n if fontId != -1:\n fontFamilies = QFontDatabase.applicationFontFamilies(fontId)\n font = QFont()\n font.setFamily(fontFamilies[0])\n font.setPixelSize(12)\n return font\n\napp = QApplication(sys.argv)\n\nfont = getfont()\nif font:\n app.setFont(font)\n\nwindow = Window()\n\nsys.exit(app.exec_())\n\n"
}
] | 3 |
martinverr/CodeWars | https://github.com/martinverr/CodeWars | 1f365ee1d4a35b9ed13695b7cdd70c8ddcf2f859 | e24c140304d45a2cb73703c22370d72164d0eaf0 | cf8e7051638e67b71fe8b330dfaac8e357e96f80 | refs/heads/master | 2022-07-23T12:03:16.359716 | 2020-05-20T22:29:37 | 2020-05-20T22:29:37 | 262,686,410 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.690319299697876,
"alphanum_fraction": 0.7085656523704529,
"avg_line_length": 97.6500015258789,
"blob_id": "f19ae7eee180cc4f5c58cefdffb7a262da02038e",
"content_id": "e9219ff023c2db60ca50d7a30e80bae47e201110",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3946,
"license_type": "no_license",
"max_line_length": 336,
"num_lines": 40,
"path": "/Codewars style ranking system/Instructions.md",
"repo_name": "martinverr/CodeWars",
"src_encoding": "UTF-8",
"text": "# Codewars style ranking system\n <p>Write a class called User that is used to calculate the amount that a user will progress through a ranking system similar to the one Codewars uses. </p>\n <h3>Business Rules:</h3>\n <ul>\n <li>A user starts at rank -8 and can progress all the way to 8.</li>\n <li>There is no 0 (zero) rank. The next rank after -1 is 1.</li>\n <li>Users will complete activities. These activities also have ranks.</li>\n <li>Each time the user completes a ranked activity the users rank progress is updated based off of the activity's rank</li>\n <li>The progress earned from the completed activity is relative to what the user's current rank is compared to the rank of the activity</li>\n <li>A user's rank progress starts off at zero, each time the progress reaches 100 the user's rank is upgraded to the next level</li>\n <li>Any remaining progress earned while in the previous rank will be applied towards the next rank's progress (we don't throw any progress away). The exception is if there is no other rank left to progress towards (Once you reach rank 8 there is no more progression). </li>\n <li>A user cannot progress beyond rank 8. </li>\n <li>The only acceptable range of rank values is -8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8. Any other value should raise an error. </li>\n </ul>\n The progress is scored like so:\n <ul>\n <li>Completing an activity that is ranked the same as that of the user's will be worth 3 points</li>\n <li>Completing an activity that is ranked one ranking lower than the user's will be worth 1 point</li>\n <li>Any activities completed that are ranking 2 levels or more lower than the user's ranking will be ignored</li>\n <li>Completing an activity ranked higher than the current user's rank will accelerate the rank progression. The greater the difference between rankings the more the progression will be increased. The formula is <code>10 * d * d</code> where <code>d</code> equals the difference in ranking between the activity and the user. </li>\n </ul>\n <h3>Logic Examples:</h3>\n <ul>\n <li>If a user ranked -8 completes an activity ranked -7 they will receive 10 progress</li>\n <li>If a user ranked -8 completes an activity ranked -6 they will receive 40 progress</li>\n <li>If a user ranked -8 completes an activity ranked -5 they will receive 90 progress</li>\n <li>If a user ranked -8 completes an activity ranked -4 they will receive 160 progress, resulting in the user being upgraded to rank -7 and having earned 60 progress towards their next rank</li>\n <li>If a user ranked -1 completes an activity ranked 1 they will receive 10 progress (remember, zero rank is ignored)</li>\n </ul>\n<h3>Usage Examples:</h3>\n<pre>\n<code class=\"language-python\"><span class=\"cm-variable\">user</span> <span class=\"cm-operator\">=</span> <span class=\"cm-variable\">User</span>()\n<span class=\"cm-variable\">user</span>.<span class=\"cm-property\">rank</span> <span class=\"cm-comment\"># => -8</span>\n<span class=\"cm-variable\">user</span>.<span class=\"cm-property\">progress</span> <span class=\"cm-comment\"># => 0</span>\n<span class=\"cm-variable\">user</span>.<span class=\"cm-property\">inc_progress</span>(<span class=\"cm-operator\">-</span><span class=\"cm-number\">7</span>)\n<span class=\"cm-variable\">user</span>.<span class=\"cm-property\">progress</span> <span class=\"cm-comment\"># => 10</span>\n<span class=\"cm-variable\">user</span>.<span class=\"cm-property\">inc_progress</span>(<span class=\"cm-operator\">-</span><span class=\"cm-number\">5</span>) <span class=\"cm-comment\"># will add 90 progress</span>\n<span class=\"cm-variable\">user</span>.<span class=\"cm-property\">progress</span> <span class=\"cm-comment\"># => 0 # progress is now zero</span>\n<span class=\"cm-variable\">user</span>.<span class=\"cm-property\">rank</span> <span class=\"cm-comment\"># => -7 # rank was upgraded to -7</span></code>\n</pre>\n"
},
{
"alpha_fraction": 0.46204620599746704,
"alphanum_fraction": 0.507013201713562,
"avg_line_length": 27.51764678955078,
"blob_id": "83264e03c873b9721e9a4a48846f4d2a6dae9c2b",
"content_id": "0e6a12d40cc77caea04e2ca68e5b554b88581390",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2424,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 85,
"path": "/Sudoku Resolver/sudoku.py",
"repo_name": "martinverr/CodeWars",
"src_encoding": "UTF-8",
"text": "# NOTE: This kata appeared on my codewars dashboard. I already know how to\n# do it in C (see \"sudokubacktrack.c\"), I wanted to do it in Python\n\n\ndef isPossible(puzzle, row, col):\n currVal = puzzle[row][col]\n # check row\n for c in range(9):\n if puzzle[row][c] == currVal and c != col:\n return False\n # check col\n for r in range(9):\n if puzzle[r][col] == currVal and r != row:\n return False\n\n # check 3x3 block where currVal is in\n rBlock = 3 * (row//3)\n cBlock = 3 * (col//3)\n for r in range(rBlock, rBlock+3):\n for c in range(cBlock, cBlock+3):\n if r != row and c != col and puzzle[r][c] == currVal:\n return False\n # else\n return True\n\n\ndef bruteforce(puzzle, row, col):\n # if we finished (last recursive call), return the puzzle resolved\n if row == 9:\n \"\"\"\n print(\"Found solution:\")\n for row in puzzle:\n print(row)\n print()\n \"\"\"\n return puzzle\n\n # set next col and row\n nextRow = row\n nextCol = col + 1\n if nextCol == 9: # if col exceed, next row\n nextRow += 1\n nextCol = 0\n\n # IF: value not set, DO: try every number 1->9, each go next recursive call\n # ELSE DO: don't change the value, go to next recursive call\n if puzzle[row][col] == 0:\n for k in range(1, 10):\n puzzle[row][col] = k\n if isPossible(puzzle, row, col):\n if bruteforce(puzzle, nextRow, nextCol):\n return puzzle\n puzzle[row][col] = 0\n else:\n if bruteforce(puzzle, nextRow, nextCol):\n return puzzle\n\n\ndef sudoku(puzzle):\n # check if puzzle is 9x9\n if len(puzzle) != 9:\n return False\n for row in puzzle:\n if len(row) != 9:\n return False\n # call the resolver (recursive backtracking algorithm)\n return bruteforce(puzzle, 0, 0)\n\n\n\"\"\"\n# MAIN_______________________\npuzzle = [[5, 3, 0, 0, 7, 0, 0, 0, 0],\n [6, 0, 0, 1, 9, 5, 0, 0, 0],\n [0, 9, 8, 0, 0, 0, 0, 6, 0],\n [8, 0, 0, 0, 6, 0, 0, 0, 3],\n [4, 0, 0, 8, 0, 3, 0, 0, 1],\n [7, 0, 0, 0, 2, 0, 0, 0, 6],\n [0, 6, 0, 0, 0, 0, 2, 8, 0],\n [0, 0, 0, 4, 1, 9, 0, 0, 5],\n [0, 0, 0, 0, 8, 0, 0, 7, 9]]\nprint(\"resolving...\")\nresolvedPuzzle = sudoku(puzzle)\nfor row in resolvedPuzzle:\n print(row)\n\"\"\"\n"
},
{
"alpha_fraction": 0.6075851321220398,
"alphanum_fraction": 0.650928795337677,
"avg_line_length": 67,
"blob_id": "31d0b8f7781d645dc953abe2765c008a4ef5e5cd",
"content_id": "739c49f95a99472baa668f82449bd9c782d0f9a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1292,
"license_type": "no_license",
"max_line_length": 339,
"num_lines": 19,
"path": "/Roman Numerals Helper/Instructions.md",
"repo_name": "martinverr/CodeWars",
"src_encoding": "UTF-8",
"text": "<h2 id=\"task\">Task</h2>\n<p>Create a RomanNumerals class that can convert a roman numeral to and from an integer value. It should follow the API demonstrated in the examples below. Multiple roman numeral values will be tested for each helper method. </p>\n<p>Modern Roman numerals are written by expressing each digit separately starting with the left most digit and skipping any digit with a value of zero. In Roman numerals 1990 is rendered: 1000=M, 900=CM, 90=XC; resulting in MCMXC. 2008 is written as 2000=MM, 8=VIII; or MMVIII. 1666 uses each Roman symbol in descending order: MDCLXVI.</p>\n<h2 id=\"examples\">Examples</h2>\n<pre style=\"display: none;\"><code class=\"language-javascript\"><span class=\"cm-variable\">RomanNumerals</span>.<span class=\"cm-property\">toRoman</span>(<span class=\"cm-number\">1000</span>); <span class=\"cm-comment\">// should return 'M'</span>\n<span class=\"cm-variable\">RomanNumerals</span>.<span class=\"cm-property\">fromRoman</span>(<span class=\"cm-string\">'M'</span>); <span class=\"cm-comment\">// should return 1000</span></code></pre>\n\n<h2>Help</h2>\n<pre>\n| Symbol | Value |\n|-------------------|\n| I | 1 |\n| V | 5 |\n| X | 10 |\n| L | 50 |\n| C | 100 |\n| D | 500 |\n| M | 1000 |</p>\n</pre>\n"
},
{
"alpha_fraction": 0.5237998962402344,
"alphanum_fraction": 0.5403388738632202,
"avg_line_length": 19.831932067871094,
"blob_id": "ab432be7ed8d58b9d4109fc44183f51a9cd83f6e",
"content_id": "598c5ce4e2eed9af4052bab78507b369dccf7da0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4958,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 238,
"path": "/Sudoku Resolver/sudokubacktrack.c",
"repo_name": "martinverr/CodeWars",
"src_encoding": "UTF-8",
"text": "/**\n***\tsudokuSolve.c\n***\n***\tLetta in input una matrice 9x9 di numeri interi da un file,\n***\tverifica con un procedimento esaustiva l'esistenza di una\n***\tsoluzione della partita.\n***\n***\n**/\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n\n#define N 9\n\n/*\n *\tleggiFile(s, M)\n *\n *\tLegge da file di testo ASCII una configurazione del Sudoku\n *\te la memorizza nella matrice di interi.\n *\n *\ts: stringa contenente il nome del file da aprire; se la stringa\n *\t e' nulla viene richiesto il nome del file all'utente.\n *\n *\tM: matrice di interi 9x9 in cui viene memorizzata la configurazione.\n *\n *\tRestituisce 1 se la lettura del file e` andata a buon fine,\n *\t0 altrimenti.\n *\n */\n\nint leggiFile(char *s, int M[N][N]) {\n\tFILE *f;\n\tint i, j, rc;\n\n\twhile (s[0] == '\\0') {\n\t\tprintf(\"Nome del file: \");\n\t\tscanf(\"%s\", s);\n\t}\n\tif ((f = fopen(s, \"rt\"))) {\n\t\tfor (i=0; i<N && !feof(f); i++) {\n\t\t\tfor (j=0; j<N && !feof(f); j++) {\n\t\t\t\tfscanf(f, \"%d\", &M[i][j]);\n\t\t\t}\n\t\t}\n\t\tif (i==N && j==N) {\n\t\t\trc = 1;\n\t\t} else {\n\t\t\trc = 0;\n\t\t}\n\t\tfclose(f);\n\t} else {\n\t\tfprintf(stderr, \"ERRORE: il file %s non puo' essere aperto in lettura\\n\\n\", s);\n\t\trc = 0;\n\t}\n\treturn(rc);\n}\n\n/*\n *\tstampaConfigurazione(M)\n *\n *\tStampa la matrice M 9x9 con una configurazione della\n *\ttavola di gioco.\n *\n *\tM: matrice 9x9 di interi\n *\n */\n\nvoid stampaConfigurazione(int M[N][N]) {\n\tint i, j, h, k;\n\n\tprintf(\"\\n+---------+---------+---------+\\n\");\n\tfor (i=0; i<3; i++) {\n\t\tfor (j=3*i; j<3*(i+1); j++) {\n\t\t\tprintf(\"|\");\n\t\t\tfor (k=0; k<3; k++) {\n\t\t\t\tfor (h=3*k; h<3*(k+1); h++) {\n\t\t\t\t\tprintf(\"%2d \", M[j][h]);\n\t\t\t\t}\n\t\t\t\tprintf(\"|\");\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\t\t}\n\t\tprintf(\"+---------+---------+---------+\\n\");\n\t}\n\treturn;\n}\n\n\n/*\n *\tverifica(M, i, j, debug)\n *\n *\tVerifica che l'elemento M[i][j] contenga un valore compatibile\n *\tcon quelli presenti nella colonna j, nella riga i e nel riquadro\n *\t3x3 in cui si trova l'elemento M[i][j].\n *\n *\tM: matrice 9x9 di numeri interi con la configurazione di gioco.\n *\n *\ti: indice di riga dell'elemento da verificare.\n *\n *\tj: indice di colonna dell'elemento da verificare.\n *\n *\tdebug: se debug=1 visualizza messaggi di debug per seguire il\n *\t procedimento di calcolo.\n *\n *\tLa funzione restituisce 1 se la il valore di M[i][j] e'\n *\tcompatibile con il resto della matrice, 0 altrimenti.\n *\n */\n\nint verifica(int m[N][N], int i, int j, int debug) {\n\tint h, k, rc;\n\n\n//\tprintf(\"Verifica di m[%d][%d]=%d\\n\", i, j, m[i][j]);\n\trc = 1;\n\tfor (h=0; h<N && rc == 1; h++) {\n\t\tif (h != j && m[i][h] == m[i][j]) {\n\t\t\trc = 0;\n\t\t\tif (debug)\n\t\t\t\tprintf(\"m[%d][%d]=%d ... no: doppione in riga.\\n\", i, j, m[i][j]);\n\t\t} else {\n\t\t\tif (h != i && m[h][j] == m[i][j]) {\n\t\t\t\trc = 0;\n\t\t\t\tif (debug)\n\t\t\t\t\tprintf(\"m[%d][%d]=%d ... no: doppione in colonna.\\n\", i, j, m[i][j]);\n\t\t\t}\n\t\t}\n\t}\n\tfor (h=3*(i/3); h<3*(i/3+1) && rc == 1; h++) {\n\t\tfor (k=3*(j/3); k<3*(j/3+1) && rc == 1; k++) {\n\t\t\tif ((h != i || k != j) && m[h][k] == m[i][j]) {\n\t\t\t\trc = 0;\n\t\t\t\tif (debug)\n\t\t\t\t\tprintf(\"m[%d][%d]=%d ... no: doppione nel quadrato!\\n\", i, j, m[i][j]);\n\t\t\t}\n\t\t}\n\t}\n\tif (debug) {\n\t\tif (rc==1) {\n\t\t\tprintf(\"Il valore m[%d][%d]=%d e' compatibile.\\n\", i, j, m[i][j]);\n\t\t} else {\n\t\t\tprintf(\"Il valore m[%d][%d]=%d non e' compatibile.\\n\", i, j, m[i][j]);\n\t\t}\n\t}\n\treturn(rc);\n}\n\n/*\n *\tsudokuSolve(M, debug)\n *\n *\tFunzione ricorsiva per la ricerca della configurazione risolutiva\n *\tche prova a collocare nelle posizioni vuote tutti valori\n *\tk=1, 2, ..., 9.\n *\n *\tM: matrice 9x9 di interi con la configurazione corrente della\n *\t griglia di gioco.\n *\n *\tdebug: se debug=1 visualizza i messaggi di debug per seguire\n *\t l'evoluzione del processo di ricerca della soluzione.\n *\n *\tLa funzione restituisce 1 se la funzione riesce ad individuare\n *\tuna configurazione finale, 0 altrimenti (se la configurazione\n *\tiniziale non ammette alcuna soluzione).\n *\n */\n\nvoid sudokuSolve(int m[N][N], int debug, int i, int j, int *sol) {\n\tint k;\n\n\tif(i==N){\n (*sol)++;\n\t\tprintf(\"SUDOKU risolto (%d)!\\n\", *sol);\n\t\tstampaConfigurazione(m);\n\t\tgetchar();\n\t return;\n\n\t}\n\n\tif (debug) {\n\t\tprintf(\"\\nProviamo a risolvere questa configurazione:\");\n\t\tstampaConfigurazione(m);\n\t}\n\n int ii=i;\n int jj=j+1;\n if(jj==N) {\n\t ii++;\n\t jj=0;\n }\n\tif(m[i][j] == 0){\n\t for (k=1; k<10; k++) {\n\t\t m[i][j] = k;\n \tif (debug) {\n printf(\"Provo m[%d][%d]=%d\\n\", i, j, m[i][j]);\n \t}\n\t\t if (verifica(m, i, j, debug) == 1) {\n\t \t\tif (debug) getchar();\n\t\t sudokuSolve(m, debug, ii, jj, sol);\n\t }\n\t m[i][j] = 0;\n\t }\n\t}\n\telse{\n \tif (debug) {\n printf(\"Non cambio m[%d][%d]=%d\\n\", i, j, m[i][j]);\n \t}\n\t sudokuSolve(m, debug, ii, jj, sol);\n\n\t}\n\treturn;\n}\n\n/*\n *\tmain\n *\n *\tFunzione principale.\n *\n */\n\nint main(int argc, char *argv[]) {\n\tchar s[100];\n\tint m[N][N], i, debug = 0, sol=0;\n\n\ts[0] = '\\0';\n\n\tif (leggiFile(s, m) == 1) {\n\t\tstampaConfigurazione(m);\n\t\tgetchar();\n\t\tgetchar();\n\t\tsudokuSolve(m, debug, 0, 0, &sol);\n\n\t}\n\n\tgetchar();\n\treturn(0);\n}\n"
},
{
"alpha_fraction": 0.5160142183303833,
"alphanum_fraction": 0.530757486820221,
"avg_line_length": 27.507246017456055,
"blob_id": "cea7eb1925ce3dfc18e687f7a54fe6977a96a4e8",
"content_id": "2c37f942d81e28bb55a355741d59ffc26d618784",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1967,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 69,
"path": "/Roman Numerals Helper/RomanNumerals.py",
"repo_name": "martinverr/CodeWars",
"src_encoding": "UTF-8",
"text": "# TODO: to_roman()\n# NOTE: 1- from_roman pass all 2 + 5 test case on codewars\n# 2- This was not meant to be OOP, I created the functions as functions,\n# then joined them into a class.\n\n\nclass RomanNaturals:\n symbols = {\"I\": 1, \"V\": 5, \"X\": 10, \"L\": 50, \"C\": 100, \"D\": 500, \"M\": 1000}\n\n @classmethod\n def hasValidRomanLetters(cls, input):\n for letter in input:\n if letter not in cls.symbols:\n return False\n return True\n\n @classmethod\n def from_roman(cls, input, debug=False):\n if input is None or not cls.hasValidRomanLetters(input):\n return 0\n\n decimal = 0\n currblock = 0 # symbols[input[0]]\n\n for index in range(len(input)):\n thisVal = cls.symbols[input[index]]\n currblock += thisVal\n\n # get nextval\n if index < len(input) - 1:\n nextVal = cls.symbols[input[index+1]]\n else:\n decimal += currblock\n break\n if debug: # debug if active\n print(thisVal, nextVal, currblock)\n\n # if next letter is equal\n if thisVal == nextVal:\n pass\n\n # if next letter value is smaller\n if thisVal > nextVal:\n decimal += currblock\n currblock = 0\n\n # if next letter value is greater\n if thisVal < nextVal:\n decimal -= currblock\n currblock = 0\n\n # debug if active\n if debug is True:\n print(\"curr input analized:\", input[:index+1])\n print(\"currblock:\", currblock, \" | currVal:\", decimal)\n print()\n\n return decimal\n\n @classmethod\n def to_roman(cls, num):\n pass\n\n\n# MAIN\nprint(RomanNaturals.from_roman(\"CXII\"))\nprint(RomanNaturals.from_roman(\"CMXXIV\"))\nprint(RomanNaturals.from_roman(\"C\"))\nprint(RomanNaturals.from_roman(\"III\"))\n"
},
{
"alpha_fraction": 0.5912408828735352,
"alphanum_fraction": 0.5912408828735352,
"avg_line_length": 18.571428298950195,
"blob_id": "6a7a0140cde3c00ca30c373dc066df1b398373c5",
"content_id": "6fee425f7001b9068c13ac8194e6efe7b84b68f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 137,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 7,
"path": "/Roman Numerals Helper/TODO.md",
"repo_name": "martinverr/CodeWars",
"src_encoding": "UTF-8",
"text": "# TODO\n<ul>\n <li>\n <b>Add RomanNaturals.to_roman(cls, num):</b> Take \"num\" and converts it from decimal to Roman.\n \n </li>\n</ul>\n"
},
{
"alpha_fraction": 0.6335797309875488,
"alphanum_fraction": 0.6895459294319153,
"avg_line_length": 29.54838752746582,
"blob_id": "9f15c9c45c21e1e7a4128d60babbe401731d5750",
"content_id": "6f02ffee7086f3de5b8294b93bd3f7dbd439178a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 947,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 31,
"path": "/Codewars style ranking system/TEST.py",
"repo_name": "martinverr/CodeWars",
"src_encoding": "UTF-8",
"text": "import UserClass\n\n\nuser = UserClass.User()\n\n# Test case - Instruction test case\nprint(\"\\nTest case - Instruction test case\")\nprint(user.rank) # => -8\nprint(user.progress) # => 0\nprint(user.inc_progress(-7))\nprint(user.progress) # => 10\nprint(user.inc_progress(-5)) # will add 90 progress\nprint(user.progress) # => 0 # progress is now zero\nprint(user.rank) # => -7 # rank was upgraded to -7\n\n# Test case - Multipromotion, simple\nprint(\"\\nTest case - Multipromotion, simple\")\nuser.setRank(-8)\nuser.setProgress(0)\nprint(user)\nuser.inc_progress(-4) # 4*4*10 progress = 160 progress\nprint(user) # rank = -7, progress = 60\nuser.inc_progress(1) # 7*7*10 + 60 progress = 550 progress\nprint(user) # rank = -2, progress = 50\nuser.setRank(7)\nuser.setProgress(99)\nuser.inc_progress(8) # 10 + 99 progress = 109 progress\nprint(user) # rank = 8, progress = 0\n\n# Test case - Error Handle (invalid ranks)\n# No need, passed 255 test cases at this point\n"
},
{
"alpha_fraction": 0.5349276065826416,
"alphanum_fraction": 0.553178071975708,
"avg_line_length": 32.808509826660156,
"blob_id": "f1dfc1f63ebd610d02504c009c8918845b4524ed",
"content_id": "8ad72c47c06a1fc258c1029942c61a9793c79bdf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3178,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 94,
"path": "/Codewars style ranking system/UserClass.py",
"repo_name": "martinverr/CodeWars",
"src_encoding": "UTF-8",
"text": "class User:\n # class properties by default\n rank = -8\n progress = 0\n # data\n ranks = [-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8]\n\n def __init__(self, rank=None, progress=None):\n if rank is not None and self.isValidRank(rank):\n self.rank = rank\n if progress is not None and self.isValidProgress(progress):\n self.progress = progress\n\n def __str__(self):\n return f\"Rank: {self.rank}\\nProgress: {self.progress}\"\n\n @classmethod\n def isValidRank(cls, rank):\n if rank in User.ranks:\n return True\n else:\n return False\n\n @staticmethod\n def isValidProgress(progress):\n return True if progress >= 0 and progress <= 100 else False\n\n def setRank(self, rank):\n if rank is not None and self.isValidRank(rank):\n self.rank = rank\n else:\n raise Exception(\"Err: in User.setRank(self, rank)\"\n \"passed an invalid rank\")\n\n def setProgress(self, progress):\n if progress is not None and self.isValidProgress(progress):\n self.progress = progress\n else:\n raise Exception(\"Err: in User.setProgress(self, rank)\"\n \"passed an invalid progress\")\n\n def getRankDistFrom(self, rank2):\n rank1 = self.rank\n if not self.isValidRank(rank2):\n raise Exception(\"Err: in getRankDist(self, rank2), rank2 is not a\"\n \"valid rank\")\n return User.ranks.index(rank2) - User.ranks.index(rank1)\n\n def calculatePointsFrom(self, rank2):\n dist = self.getRankDistFrom(rank2)\n if dist == 0:\n return 3\n if dist == -1:\n return 1\n if dist < -1:\n return 0\n if dist > 0:\n return dist * dist * 10\n\n def promotionRanks(self, ranks):\n while ranks > 0:\n nextrank = self.rank + 1\n if self.isValidRank(nextrank):\n self.setRank(nextrank)\n else: # possibile not valid nextrank\n if self.rank == -1:\n self.setRank(nextrank + 1)\n elif self.rank == 8:\n break\n else:\n raise Exception(\"Undefined behaviour during promotion\")\n ranks -= 1\n\n def promotionByProgress(self, progress):\n advanceRanks = progress//100\n progressSurplus = progress % 100\n self.promotionRanks(advanceRanks)\n if(self.rank == 8):\n self.setProgress(0)\n return\n self.setProgress(progressSurplus)\n\n def inc_progress(self, rankActivity):\n if rankActivity is None or not self.isValidRank(rankActivity):\n raise Exception(\"Err: in inc_progress(self, rankActivity), \"\n \"rankActivity is not a valid rank\")\n if self.rank == 8:\n return\n totalProgress = self.progress + self.calculatePointsFrom(rankActivity)\n\n if totalProgress >= 100: # promotion\n self.promotionByProgress(totalProgress)\n else: # no promorion\n self.setProgress(totalProgress)\n"
},
{
"alpha_fraction": 0.7583333253860474,
"alphanum_fraction": 0.7583333253860474,
"avg_line_length": 29,
"blob_id": "563a8a5c54e9ac9febf86641030f671e21faf550",
"content_id": "98d472cbea2c7237d7ed73cccdfd8f24dfb5be2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 120,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 4,
"path": "/README.md",
"repo_name": "martinverr/CodeWars",
"src_encoding": "UTF-8",
"text": "# CodeWars\nSome of www.codewars.com katas I find intresting\n<br><br>\nMy profile: https://www.codewars.com/users/rotcod/\n"
}
] | 9 |
sumanthm-git/mongo_pipe | https://github.com/sumanthm-git/mongo_pipe | 143ec15e1b89aa2acdc30db4f65705b09b4cfa01 | a65d822cb683c69fdad2575b3c18e9c214b0a807 | 826878c417ddb8a7b240dadf847d40fe349f0ce1 | refs/heads/master | 2023-07-31T18:40:41.156327 | 2021-09-19T21:12:23 | 2021-09-19T21:12:23 | 408,238,286 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.722878634929657,
"alphanum_fraction": 0.7293233275413513,
"avg_line_length": 30.066667556762695,
"blob_id": "bc5962325c155630438defc6ac372ebd413a5192",
"content_id": "a9a8dd83f2be071bd6610571fa99eaa447553f2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 931,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 30,
"path": "/csv_json.py",
"repo_name": "sumanthm-git/mongo_pipe",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nfrom pandas.io import json\n\n#reading the sample CSV\ndf = pd.read_csv('sales.csv', dtype=object)\n\n#making the _id field for json to use as unique field in MongoDB\ndf.index = np.arange(1, len(df)+1)\ndf['_id'] = df.index\ncopy_index = df.pop('_id')\ndf.insert(0,'_id',copy_index)\n\n#converting onject to datetime64[ns]\ndf['Transaction_date'] = pd.to_datetime(df['Transaction_date'])\ndf['Account_Created'] = pd.to_datetime(df['Account_Created'])\ndf['Last_Login'] = pd.to_datetime(df['Last_Login'])\n#print(df.dtypes)\n#print(df)\n\n#encoding df using records formatted json ans using iso dateformat\nres = df.to_json(orient='records',date_format='iso')\n\n#loads take file-like object, reads the data from that object and use that string to create a json object\nparsed = json.loads(res)\n#print(parsed)\n\n#dumps take an json object and produces a string\nstr_res = json.dumps(parsed, indent=4)\n#print(str_res)"
},
{
"alpha_fraction": 0.7272727489471436,
"alphanum_fraction": 0.752525269985199,
"avg_line_length": 23.75,
"blob_id": "82c519186071914f26ab3002534da7c8671dee05",
"content_id": "d549e92f2a716b042ae201e2bcef4425f54cfab0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 198,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 8,
"path": "/json_into_mongo.py",
"repo_name": "sumanthm-git/mongo_pipe",
"src_encoding": "UTF-8",
"text": "import csv_json as cj\nimport pymongo\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n\nmydb = myclient[\"mydatabase\"]\nmycol = mydb[\"sales\"]\nmycol.insert_many(cj.parsed)\n#print(cj.parsed)\n"
}
] | 2 |
tcmcgee/elm-test-runner | https://github.com/tcmcgee/elm-test-runner | 022ae6ad7aa4ac815179815fa78ce21058437969 | 57677d6ec421058fb21143842fbd2e46b54de0b2 | d2421587e1cc83bfc74d17243c21046790e4829d | refs/heads/master | 2021-01-13T08:57:18.913962 | 2016-09-21T23:46:24 | 2016-09-21T23:46:24 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5882787108421326,
"alphanum_fraction": 0.58912193775177,
"avg_line_length": 26.90294075012207,
"blob_id": "a184db8070ef998592672c836d7978245a13381a",
"content_id": "60739c66dcac43da3713309de316ee5d2317bc30",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9487,
"license_type": "permissive",
"max_line_length": 133,
"num_lines": 340,
"path": "/run_single_elm_test.py",
"repo_name": "tcmcgee/elm-test-runner",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\nfrom __future__ import print_function\n\nimport json\nimport argparse\nimport os\nimport os.path\nimport re\n\n\ndef find_files(directory):\n for root, dirs, files in os.walk(directory):\n if 'elm-stuff' in root:\n continue\n\n for basename in files:\n if basename.startswith('_') or basename == 'Test.elm':\n continue\n\n if basename.endswith('.elm'):\n filename = os.path.join(root, basename)\n yield filename\n\ndef find_specs_importing_module(root_folder, module_names):\n specs = []\n\n dir = os.getcwd()\n folder = os.path.join(dir, root_folder)\n\n for file in find_files(folder):\n with open(file, 'r') as f:\n text = f.read()\n\n for module_name in module_names:\n if 'import ' + module_name in text:\n specs.append(file[len(dir) + 1:])\n\n return specs\n\ndef get_module_name(root_folder, spec_file):\n \"\"\"\n Takes a root folder, and a spec file, returns the elm module name\n\n >>> get_module_name(\"spec/elm\", \"spec/elm/Assignment/DashboardSpec.elm\")\n 'Assignment.DashboardSpec'\n\n >>> get_module_name(\"\", \"Assignment/DashboardSpec.elm\")\n 'Assignment.DashboardSpec'\n\n >>> get_module_name(\"spec/elm\", \"Assignment/DashboardSpec.elm\")\n 'Assignment.DashboardSpec'\n \"\"\"\n\n without_root = spec_file.lstrip(root_folder)\n without_elm = without_root.rstrip('.elm')\n\n return without_elm.replace('/', '.')\n\n\ndef find_exposed_names(text):\n \"\"\"\n ## If everything is exposed, then return '..'\n\n >>> find_exposed_names(\"module Robot (..) where\")\n '..'\n\n >>> find_exposed_names(\"module Robot where\")\n '..'\n\n >>> find_exposed_names(\"\")\n '..'\n\n\n ## Otherwise, only return the exposed names\n\n >>> find_exposed_names(\"module Robot (cat) where\")\n ['cat']\n\n >>> find_exposed_names(\"module Robot (cat, dog) where\")\n ['cat', 'dog']\n\n >>> find_exposed_names(\"module Robot \\\\n(cat\\\\n, dog)\\\\n where\")\n ['cat', 'dog']\n\n >>> find_exposed_names(\"module Robot \\\\n(cat\\\\n, dog)\\\\n where\\\\n f = (2 + 2)\\\\n wheret = 5\")\n ['cat', 'dog']\n \"\"\"\n\n if 'module' not in text:\n return '..'\n\n between_bits = re.findall('module(.+?)where', text, re.DOTALL)\n\n if len(between_bits) == 0:\n return '..'\n\n between_bits = between_bits[0]\n\n if '(' not in between_bits or '..' in between_bits:\n return '..'\n\n open_bracket_index = between_bits.index(\"(\")\n close_bracket_index = between_bits.index(\")\")\n\n exposed_names = between_bits[open_bracket_index + 1\n : close_bracket_index\n ].split(',')\n\n stripped_names = [ name.strip() for name in exposed_names ]\n\n return stripped_names\n\n\ndef is_a_spec_line(line):\n \"\"\"\n >>> is_a_spec_line(\"spec : Test\")\n True\n >>> is_a_spec_line(\"donald : Test\")\n True\n >>> is_a_spec_line(\"ashdasd\")\n False\n >>> is_a_spec_line(\"spec = blob\")\n True\n \"\"\"\n return 'spec =' in line or ': Test' in line\n\n\ndef get_identifier_name(line):\n \"\"\"\n Get the idenifier name from a line. Idents are considered to be the left-hand names\n\n >>> get_identifier_name(\"dave =\")\n 'dave'\n\n >>> get_identifier_name(\"dave=\")\n 'dave'\n\n >>> get_identifier_name(\"dave x=\")\n 'dave'\n\n >>> get_identifier_name(\"sausage : Test\")\n 'sausage'\n\n >>> get_identifier_name(\"sausage:Test\")\n 'sausage'\n \"\"\"\n\n # TODO: this is dumb and slow\n # but the code was fast to write\n valid_splits = ['=', ':']\n\n for split in valid_splits:\n line = line.replace(split, ' ')\n\n return line.split(' ')[0]\n\n\ndef find_spec_names(text):\n \"\"\"\n Spec names are those with the type test or `spec` as the name\n\n >>> find_spec_names(\"dave : Test\\\\n\\\\n\\\\nsausage = blob\\\\n\\\\nspec = something\")\n ['dave', 'spec']\n \"\"\"\n names = []\n\n for line in text.split('\\n'):\n if is_a_spec_line(line):\n name = get_identifier_name(line)\n\n if name not in names:\n names.append(name)\n\n return names\n\ndef imports():\n return \"\"\"\nimport Signal exposing (..)\nimport ElmTest exposing (..)\nimport Console exposing (IO, run)\nimport Task exposing (Task)\n\"\"\"\n\ndef runner():\n return \"\"\"\nport runner : Signal (Task.Task x ())\nport runner = run (consoleRunner tests)\n\"\"\"\n\ndef generate_imports(module_name):\n \"\"\"\n >>> generate_imports(\"Dog\")\n 'import Dog'\n \"\"\"\n\n return 'import ' + module_name\n\ndef generate_test_lines(spec_names):\n \"\"\"\n >>> generate_test_lines({\"Dog\" :[\"spec\"]})\n 'tests = suite \"Dog tests\" [ Dog.spec ]'\n >>> generate_test_lines({\"Dog\": [\"spec\", \"sausage\"]})\n 'tests = suite \"Dog tests\" [ Dog.spec, Dog.sausage ]'\n \"\"\"\n\n module_names = ', '.join(module_name for module_name in spec_names)\n\n tests_part = \"tests = suite \\\"{module_name} tests\\\" [ \".format(module_name=module_names)\n end = \" ]\"\n\n prepend_module_name = []\n\n for (module_name, specs) in spec_names.items():\n prepend_module_name.extend(\n '{module}.{func}'.format(module=module_name, func=spec_name) for spec_name in specs\n )\n\n joined = ', '.join(prepend_module_name)\n\n return tests_part + joined + end\n\n\ndef generate_runner(spec_names):\n \"\"\"\n >>> 'import Dog' in generate_runner({\"Dog\": [\"spec\"]})\n True\n >>> 'port runner = run' in generate_runner({\"Dog\": [\"spec\"]})\n True\n >>> 'tests = suite \"Dog tests\" [ Dog.spec ]' in generate_runner({\"Dog\": [\"spec\"]})\n True\n \"\"\"\n extra_imports = [ generate_imports(module_name) for module_name in spec_names ]\n\n top = imports() + '\\n'.join(extra_imports)\n tests = generate_test_lines(spec_names)\n bottom = runner()\n\n return top + '\\n' + tests + '\\n' + bottom\n\n\ndef run_elm_test_on_files(bin_location, root_folder, spec_files, output_file_name):\n \"\"\"\n run elm test on multiple files as described in the description\n \"\"\"\n spec_file_names = {}\n\n for spec_file in spec_files:\n with open(spec_file) as f:\n read_text = f.read()\n\n exposed_names = find_exposed_names(read_text)\n spec_names = find_spec_names(read_text)\n\n if exposed_names == '..':\n spec_file_names[spec_file] = spec_names\n else:\n valid_names = [ name for name in spec_names if name in exposed_names ]\n spec_file_names[spec_file] = valid_names\n\n spec_names = {}\n\n for (spec_file, names) in spec_file_names.items():\n module_name = get_module_name(root_folder, spec_file)\n spec_names[module_name] = names\n\n runner_code = generate_runner(spec_names)\n\n current_dir = os.getcwd()\n\n os.chdir(root_folder)\n\n with open(output_file_name, 'w') as f:\n f.write(runner_code)\n\n run_elm_test(current_dir, bin_location, output_file_name)\n\n\ndef run_elm_test(master_dir, bin_location, spec_file):\n \"\"\"\n Run elm test on a spec file\n Run elm-package in the current dir\n Uses the binaries from global if bin_location is not set, Otherwise\n `master_dir/bin_location/<bin_name>`\n \"\"\"\n from subprocess import call\n\n # default to global\n if bin_location is None:\n bin_location = ''\n master_dir = ''\n\n elm_package_path = os.path.join(master_dir, bin_location, \"elm-package\")\n elm_test_path = os.path.join(master_dir, bin_location, \"elm-test\")\n\n call([elm_package_path, \"install\", \"--yes\"])\n call([elm_test_path, spec_file])\n\ndef test():\n import doctest\n doctest.testmod()\n\nLONG_DESCRIPTION = '''\nRun tests in files manually by just giving their name. Give multiple names if you want. Globs too.\n\nUsing the `--module` flag will create a test suite generated from every module that imports that module.\n\nFiles called `Test.elm` or starting with `_` will be ignored as these are common entry points\n\nThis script will:\n - Grab the files\n - Find the exposed names from a file\n - Group all the ones with the type `: Test` or the name `spec`\n - Create a test suite composed from all the found tests in each file\n - Run them!\n'''\n\ndef main():\n parser = argparse.ArgumentParser(description=LONG_DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('spec_file', help='give relative path to the spec file you want to run or the name of the module', nargs='+')\n parser.add_argument('--module', help='use a module name', action='store_true', default=False)\n parser.add_argument('--root', help='define the root folder where your elm-package lives', default='spec/elm')\n parser.add_argument('--bin', help='define a custom binary location - if none provided, default to global', default=None)\n parser.add_argument('--output', help='define a custom output file', default='_Temp.elm')\n\n args = parser.parse_args()\n\n if args.module:\n spec_files = find_specs_importing_module(args.root, args.spec_file)\n print('Found the module {module} imported by:'.format(module=','.join(args.spec_file)))\n print('---------------------------------------')\n for file in spec_files:\n print(file)\n else:\n spec_files = args.spec_file\n\n run_elm_test_on_files(args.bin, args.root, spec_files, args.output)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6030120253562927,
"alphanum_fraction": 0.6963855624198914,
"avg_line_length": 45.11111068725586,
"blob_id": "273a91907eae7acf73a7c3633212c9fabed9622d",
"content_id": "749bc860364207f37f19fe7021cf3d4590511af7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1660,
"license_type": "permissive",
"max_line_length": 147,
"num_lines": 36,
"path": "/README.md",
"repo_name": "tcmcgee/elm-test-runner",
"src_encoding": "UTF-8",
"text": "# elm-test-runner\n\n\nOnly run and compile some test files at a time. Only rules are that you need to expose `spec : Test` in any files you want to have things run from.\n\n\n\n\n\n\n```\nusage: run_single_elm_test.py [-h] [--module] [--root ROOT] [--bin BIN]\n [--output OUTPUT]\n spec_file [spec_file ...]\n```\nRun tests in files manually by just giving their name. Give multiple names if you want. Globs too.\n\nUsing the `--module` flag will create a test suite generated from every module that imports that module.\n```\nThis script will:\n - Grab the files\n - Find the exposed names from a file\n - Group all the ones with the type `: Test` or the name `spec`\n - Create a test suite composed from all the found tests in each file\n - Run them!\n\npositional arguments:\n spec_file give relative path to the spec file you want to run or the name of the module\n\noptional arguments:\n -h, --help show this help message and exit\n --module use a module name\n --root ROOT define the root folder where your elm-package lives\n --bin BIN define a custom binary location - if none provided, default to global\n --output OUTPUT define a custom output file\n```\n"
}
] | 2 |
yegane-AI/object-localization | https://github.com/yegane-AI/object-localization | 0b2c4069c1d3effbe4179a2f66c244ddbb09e04f | 0781b70d20cde5bfaab35e7f80e2b2b2aa7f7c0f | 7291bf2b74f50ae02d3068d9e145c35f6121b453 | refs/heads/main | 2023-08-14T00:57:26.258218 | 2021-09-20T19:24:46 | 2021-09-20T19:24:46 | 408,572,966 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.668579638004303,
"alphanum_fraction": 0.6865136027336121,
"avg_line_length": 27.4489803314209,
"blob_id": "69a869131b1af59aa941d2775f2488107fa477d9",
"content_id": "a326586950122594641869bb953dad8728803f63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1394,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 49,
"path": "/image-color-detection.py",
"repo_name": "yegane-AI/object-localization",
"src_encoding": "UTF-8",
"text": "!pip install opencv-python scikit-learn numpy matplotlib \n\n\nfrom collections import Counter\nfrom sklearn.cluster import KMeans\nfrom matplotlib import colors\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\n\n\nimage = cv2.imread('tulips.jpg')\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\nplt.imshow(image)\n\ndef rgb_to_hex(rgb_color):\n hex_color = \"#\"\n for i in rgb_color:\n i = int(i)\n hex_color += (\"{:02x}\".format(i))\n return hex_color\n \n\ndef prep_image(raw_img):\n modified_img = cv2.resize(raw_img, (900, 600), interpolation = cv2.INTER_AREA)\n modified_img = modified_img.reshape(modified_img.shape[0]*modified_img.shape[1], 3)\n return modified_img\n \n \n \ndef color_analysis(img):\n clf = KMeans(n_clusters = 10)\n color_labels = clf.fit_predict(img)\n center_colors = clf.cluster_centers_\n counts = Counter(color_labels)\n ordered_colors = [center_colors[i] for i in counts.keys()]\n hex_colors = [rgb_to_hex(ordered_colors[i]) for i in counts.keys()]\n plt.figure(figsize = (12, 8))\n plt.pie(counts.values(), labels = hex_colors, colors = hex_colors, autopct='%1.1f%%')\n plt.savefig(\"color_analysis_report.png\")\n print(hex_colors)\n\n\n\nmodified_image = prep_image(image)\n#color_analysis(modified_image)\n#plt.imshow(image)\nprint(plt.imshow(image), color_analysis(modified_image))\n#plt.savefig('saved_figure.png')\n"
},
{
"alpha_fraction": 0.6770306825637817,
"alphanum_fraction": 0.7011271119117737,
"avg_line_length": 27.577777862548828,
"blob_id": "ebe83ccd990a4e046cc916dae4b0325e9fafeb81",
"content_id": "8d0102cc5493f65ed823c395d707d07778c2a45e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2573,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 90,
"path": "/auto_data_generator.py",
"repo_name": "yegane-AI/object-localization",
"src_encoding": "UTF-8",
"text": "\n# Commented out IPython magic to ensure Python compatibility.\n# import warnings\n# warnings.filterwarnings('ignore')\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# %matplotlib inline\nfrom keras import models,layers\nfrom keras import applications\n# import glob2 as glob\nfrom numpy import random\n\nfrom keras.datasets import mnist\nimport numpy as np\n\n(x_train, _), (x_test, _) = mnist.load_data() #underscore for unanimous label that we don't\n # want to keep im memory\n#Normalization\n\nx_train = x_train.astype('float32')/255.0\nx_test = x_test.astype('float32')/255.0\n\nx_train = x_train.reshape((-1,784)) #to go from (60000,28,28) to new shape and -1 let\n #numpy to calculate the number for you\nx_test = x_test.reshape((-1,784))\n\nprint(x_train.shape,x_test.shape)\n\n# dimensionality of the latents space \nembedding_dim = 32 \n\n#Input layer\ninput_img = layers.Input(shape=(784,)) \n\n#Encoding layer\nencoded = layers.Dense(embedding_dim, activation='relu')(input_img)\n\n#Decoding layer\ndecoded = layers.Dense(784,activation='sigmoid')(encoded) \n\n#Autoencoder --> in this API Model, we define the Input tensor and the output layer\n#wraps the 2 layers of Encoder e Decoder\nautoencoder = models.Model(input_img,decoded)\nautoencoder.summary()\n\n#Encoder\nencoder = models.Model(input_img,encoded)\nencoder.summary()\n\n#Decoder\nencoded_input = layers.Input(shape=(embedding_dim,))\ndecoder_layers = autoencoder.layers[-1] #applying the last layer\ndecoder = models.Model(encoded_input,decoder_layers(encoded_input))\n\nprint(input_img)\nprint(encoded)\n\nautoencoder.compile(\n optimizer='adadelta', #backpropagation Gradient Descent\n loss='binary_crossentropy'\n)\n\nhistory = autoencoder.fit(x_train,x_train,epochs=5,batch_size=256,shuffle=True,\n validation_data=(x_test,x_test))\n\nplt.plot(history.history['loss'],label='loss')\nplt.plot(history.history['val_loss'],label='val_loss')\nplt.legend()\nplt.show()\nplt.close()\n\nencoded_imgs = encoder.predict(x_test) \ndecoded_imgs = decoder.predict(encoded_imgs) \nprint(encoded_imgs.shape,decoded_imgs.shape)\n\nn = 10\nplt.figure(figsize=(20,4))\nfor i in range(n):\n ax = plt.subplot(2, n, i+1)\n plt.imshow(x_test[i].reshape((28,28)),cmap='gray')\n # ax.get_xaxis().set_visible(False)\n # ax.get_yaxis().set_visible(False)\n \n ax = plt.subplot(2,n,i+1+n)\n plt.imshow(decoded_imgs[i].reshape((28,28)),cmap='gray')\n # ax.get_xaxis().set_visible(False)\n # ax.get_yaxis().set_visible(False)\n \nplt.show()\nplt.close()\n"
}
] | 2 |
muhedin998/CryptoPriceAPI-in-Flask | https://github.com/muhedin998/CryptoPriceAPI-in-Flask | 08ba351b8e938fe73a17f96c6e2ef6463fe6c4ac | a90fd0f834157fb9d8ec3aee33d4dbc0619e7a64 | d38b9b7fd1f05fca177fea143e77a0e348c671c2 | refs/heads/master | 2023-08-17T12:07:38.726000 | 2021-10-08T18:35:44 | 2021-10-08T18:35:44 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5735944509506226,
"alphanum_fraction": 0.5837018489837646,
"avg_line_length": 25.847457885742188,
"blob_id": "709ae387d93025b23cdfbbfd6bb1f58342d78c7d",
"content_id": "11571390f81ebce9fe6bd683cbf024803b1f51c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1583,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 59,
"path": "/app.py",
"repo_name": "muhedin998/CryptoPriceAPI-in-Flask",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template\nfrom requests import Request, Session\nfrom requests.exceptions import ConnectionError, Timeout, TooManyRedirects\nimport json, requests\n\napp = Flask(__name__)\n\[email protected]('/')\ndef index():\n req = requests.get(\"http://data.fixer.io/api/latest?access_key=fa21cfc26f5ef926bd678abe21fb2b4f\").json()\n data = req[\"rates\"]\n lst = []\n lst2 = []\n for i in data:\n lst.append(i)\n lst2.append(req[\"rates\"][f\"{i}\"])\n \n ziped = zip(lst,lst2)\n\n return render_template('home.html', data=ziped) \[email protected]('/crypto')\ndef crypto():\n url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'\n parameters = {\n 'start':'1',\n 'limit':'50',\n 'convert':'USD'\n }\n headers = {\n 'Accepts': 'application/json',\n 'X-CMC_PRO_API_KEY': 'd5fb6795-691a-4694-88b2-213cf6424b52',\n }\n\n session = Session()\n session.headers.update(headers)\n\n try:\n response = session.get(url, params=parameters)\n data = json.loads(response.text)\n print(data)\n kripto = data[\"data\"]\n naziv = []\n cena = []\n for b in kripto:\n naziv.append(b[\"name\"])\n cena.append(round((b[\"quote\"][\"USD\"][\"price\"]), 2))\n \n ziped_crypto = zip(naziv,cena)\n\n except (ConnectionError, Timeout, TooManyRedirects) as e:\n print(e)\n\n return render_template('crypto.html', data = ziped_crypto )\[email protected]('/register')\ndef register():\n return render_template('register.html')\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0', port=5000 )"
}
] | 1 |
urbaneriver426/Heap | https://github.com/urbaneriver426/Heap | f25200bb370229e4fc9784d1260ee5a812580e23 | f146a7eee3906ee56c4e497e893fcf463deff397 | 8ad328e8b6d4fbab61e5d5d37e0f62017ba67ab4 | refs/heads/master | 2021-02-18T20:58:15.087192 | 2020-03-09T11:23:35 | 2020-03-09T11:23:35 | 245,236,208 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5690389275550842,
"alphanum_fraction": 0.6082724928855896,
"avg_line_length": 21.83333396911621,
"blob_id": "a8ba89b950dd3335228749cbbe9c9825e99ec2ee",
"content_id": "ec9e2283629600552cc9e0b01842da58f98d5c00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3288,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 144,
"path": "/heap_test.py",
"repo_name": "urbaneriver426/Heap",
"src_encoding": "UTF-8",
"text": "import unittest\n\nclass Heap:\n\n\tdef __init__(self):\n\t\tself.HeapArray = []\n\t\tself.Length = 0\n\t\t\n\tdef MakeHeap(self, a, depth):\n\t\tself.Length = 2**depth-1\n\t\tfor i in range (len(a)):\n\t\t\tif i < self.Length:\n\t\t\t\tself.HeapArray.append(a[i])\n\t\t\t\twhile i > 0: \n\t\t\t\t\tif i % 2 != 0:\n\t\t\t\t\t\tif self.HeapArray[i] > self.HeapArray[i//2]:\n\t\t\t\t\t\t\tself.HeapArray[i], self.HeapArray[i//2] = (self.HeapArray[i//2], \n\t\t\t\t\t\t\tself.HeapArray[i])\n\t\t\t\t\t\t\ti = i//2\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ti = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tif self.HeapArray[i] > self.HeapArray[i//2-1]:\n\t\t\t\t\t\t\tself.HeapArray[i], self.HeapArray[i//2-1] = (self.HeapArray[i//2-1], \n\t\t\t\t\t\t\tself.HeapArray[i])\n\t\t\t\t\t\t\ti = i//2-1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ti = 0\n\t\t\telse:\n\t\t\t\treturn\n\n\tdef GetMax(self):\n\t\tif len(self.HeapArray) > 0:\n\t\t\treturn self.HeapArray[0]\n\t\telse:\n\t\t\treturn -1\n\n\tdef Add(self, key):\n\t\tif len(self.HeapArray) < self.Length:\n\t\t\tself.HeapArray.append(key)\n\t\t\ti = len(self.HeapArray)-1\n\t\t\twhile i > 0: \n\t\t\t\t\tif i % 2 != 0:\n\t\t\t\t\t\tif self.HeapArray[i] > self.HeapArray[i//2]:\n\t\t\t\t\t\t\tself.HeapArray[i], self.HeapArray[i//2] = (self.HeapArray[i//2], \n\t\t\t\t\t\t\tself.HeapArray[i])\n\t\t\t\t\t\t\ti = i//2\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ti = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tif self.HeapArray[i] > self.HeapArray[i//2-1]:\n\t\t\t\t\t\t\tself.HeapArray[i], self.HeapArray[i//2-1] = (self.HeapArray[i//2-1], \n\t\t\t\t\t\t\tself.HeapArray[i])\n\t\t\t\t\t\t\ti = i//2-1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ti = 0\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\nclass TestHeap(unittest.TestCase):\n\tdef setUp(self):\n\t\tself.heap = Heap()\n\n\tdef testMakeEmptyHeap(self):\n\t\tx = [1]\n\t\tself.heap.MakeHeap(x,0)\n\t\tassert len(self.heap.HeapArray) == 0\n\n\tdef testMakeHalfFullHeap(self):\n\t\tx = [1,2,3,4]\n\t\tself.heap.MakeHeap(x,3)\n\t\tassert self.heap.Length == 7\n\t\tassert len(self.heap.HeapArray) == 4\n\n\tdef testMakeFullHeap(self):\n\t\tx = [1,2,3,4,5,6,7]\n\t\tself.heap.MakeHeap(x,3)\n\t\tassert self.heap.Length == 7\n\t\tassert len(self.heap.HeapArray) == 7\n\n\tdef testMakeFullHeapPlus(self):\n\t\tx = [1,2,3,4,5,6,7,8,9]\n\t\tself.heap.MakeHeap(x,3)\n\t\tassert self.heap.Length == 7\n\t\tassert len(self.heap.HeapArray) == 7\n\t\tassert 9 not in self.heap.HeapArray\n\t\tassert 8 not in self.heap.HeapArray\n\n\tdef testAddBiggest(self):\n\t\tx = [1,2,3,4]\n\t\tself.heap.MakeHeap(x,3)\n\t\ty = self.heap.Add(20)\n\t\tassert len(self.heap.HeapArray) == 5\n\t\tassert self.heap.HeapArray[0] == 20\n\t\tassert y is True\n\n\tdef testAddLowest(self):\n\t\tx = [2,3,4,5]\n\t\tself.heap.MakeHeap(x,3)\n\t\ty = self.heap.Add(1)\n\t\tassert len(self.heap.HeapArray) == 5\n\t\tassert self.heap.HeapArray[4] == 1\n\t\tassert y is True\n\n\tdef testAddSomewhere(self):\n\t\tx = [2,6,4,15]\n\t\tself.heap.MakeHeap(x,3)\n\t\ty = self.heap.Add(5)\n\t\tassert len(self.heap.HeapArray) == 5\n\t\tassert 5 in self.heap.HeapArray\n\t\tassert y is True\n\n\tdef testAddInEmpty(self):\n\t\tx = []\n\t\tself.heap.MakeHeap(x,3)\n\t\ty = self.heap.Add(10)\n\t\tassert self.heap.Length == 7\n\t\tassert len(self.heap.HeapArray) == 1\n\t\tassert 10 in self.heap.HeapArray\n\t\tassert y is True\n\n\tdef testAddInFull(self):\n\t\tx = [1,2,3,4,5,6,7]\n\t\tself.heap.MakeHeap(x,3)\n\t\ty = self.heap.Add(8)\n\t\tassert y is False\n\t\tassert 8 not in self.heap.HeapArray\n\n\tdef testGetMaxEmpty(self):\n\t\tx = []\n\t\tself.heap.MakeHeap(x,0)\n\t\ty = self.heap.GetMax()\n\t\tassert y == -1\n\n\tdef testGetMax(self):\n\t\tx = [1,2,3,4]\n\t\tself.heap.MakeHeap(x,3)\n\t\ty = self.heap.GetMax()\n\t\tassert y == 4\n\nif __name__ == '__main__':\n\tunittest.main()\n"
},
{
"alpha_fraction": 0.5354464650154114,
"alphanum_fraction": 0.5637354850769043,
"avg_line_length": 28.04950523376465,
"blob_id": "903e62e068b7441e85670ad111e38b9a96bcb430",
"content_id": "c311e97ea2144b1fcde80dd68a35dd0ef1494b41",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2938,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 101,
"path": "/heap.py",
"repo_name": "urbaneriver426/Heap",
"src_encoding": "UTF-8",
"text": "class Heap:\n\n\tdef __init__(self):\n\t\tself.HeapArray = []\n\t\tself.Length = 0\n\t\t\n\tdef MakeHeap(self, a, depth):\n\t\tself.Length = 2**(depth+1)-1\n\t\tfor i in range (len(a)):\n\t\t\tif i < self.Length:\n\t\t\t\tself.HeapArray.append(a[i])\n\t\t\t\twhile i > 0: \n\t\t\t\t\tif i % 2 != 0:\n\t\t\t\t\t\tif self.HeapArray[i] > self.HeapArray[i//2]:\n\t\t\t\t\t\t\tself.HeapArray[i], self.HeapArray[i//2] = (\n\t\t\t\t\t\t\t\tself.HeapArray[i//2], self.HeapArray[i])\n\t\t\t\t\t\t\ti = i//2\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ti = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tif self.HeapArray[i] > self.HeapArray[i//2-1]:\n\t\t\t\t\t\t\tself.HeapArray[i], self.HeapArray[i//2-1] = (\n\t\t\t\t\t\t\t\tself.HeapArray[i//2-1], self.HeapArray[i])\n\t\t\t\t\t\t\ti = i//2-1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ti = 0\n\t\t\telse:\n\t\t\t\treturn\n\n\tdef GetMax(self):\n\t\tif len(self.HeapArray) > 0:\n\t\t\tself.HeapArray[0], self.HeapArray[len(self.HeapArray)-1] = (\n\t\t\t\tself.HeapArray[len(self.HeapArray)-1], self.HeapArray[0])\n\t\t\tresult = self.HeapArray.pop(len(self.HeapArray)-1)\n\t\t\tif len(self.HeapArray) <= 1:\n\t\t\t\treturn result\n\t\t\telse:\n\t\t\t\ttest_node = self.HeapArray[0]\n\t\t\t\ttest_index = 0\n\t\t\t\twhile test_node:\n\t\t\t\t\tif 2*test_index+2 <= len(self.HeapArray)-1: \n\t\t\t\t\t\tif (self.HeapArray[2*test_index+1] > \n\t\t\t\t\t\t\tself.HeapArray[2*test_index+2]):\n\t\t\t\t\t\t\tif (self.HeapArray[2*test_index+1] > \n\t\t\t\t\t\t\t\tself.HeapArray[test_index]):\n\t\t\t\t\t\t\t\t(self.HeapArray[2*test_index+1], \n\t\t\t\t\t\t\t\t\tself.HeapArray[test_index]) = (\n\t\t\t\t\t\t\t\t\tself.HeapArray[test_index], \n\t\t\t\t\t\t\t\t\tself.HeapArray[2*test_index+1])\n\t\t\t\t\t\t\t\ttest_index = 2*test_index+1\n\t\t\t\t\t\t\t\ttest_node = self.HeapArray[test_index]\n\t\t\t\t\t\t\telse: \n\t\t\t\t\t\t\t\treturn result\n\t\t\t\t\t\telse: # если \n\t\t\t\t\t\t\tif (self.HeapArray[2*test_index+2] > \n\t\t\t\t\t\t\t\tself.HeapArray[test_index]):\n\t\t\t\t\t\t\t\t(self.HeapArray[2*test_index+2],\n\t\t\t\t\t\t\t\t\tself.HeapArray[test_index]) = (\n\t\t\t\t\t\t\t\t\tself.HeapArray[test_index], \n\t\t\t\t\t\t\t\t\tself.HeapArray[2*test_index+2])\n\t\t\t\t\t\t\t\ttest_index = 2*test_index+2\n\t\t\t\t\t\t\t\ttest_node = self.HeapArray[test_index]\n\t\t\t\t\t\t\telse: \n\t\t\t\t\t\t\t\treturn result\n\t\t\t\t\telif 2*test_index+1 <= len(self.HeapArray)-1:\n\t\t\t\t\t\tif (self.HeapArray[2*test_index+1] > \n\t\t\t\t\t\t\t\tself.HeapArray[test_index]):\n\t\t\t\t\t\t\t(self.HeapArray[2*test_index+1],\n\t\t\t\t\t\t\t\tself.HeapArray[test_index]) = (\n\t\t\t\t\t\t\t\tself.HeapArray[test_index], \n\t\t\t\t\t\t\t\tself.HeapArray[2*test_index+1])\n\t\t\t\t\t\t\ttest_index = 2*test_index+1\n\t\t\t\t\t\telse: \n\t\t\t\t\t\t\treturn result\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn result\n\t\telse:\n\t\t\treturn -1\n\n\tdef Add(self, key):\n\t\tif len(self.HeapArray) < self.Length:\n\t\t\tself.HeapArray.append(key)\n\t\t\ti = len(self.HeapArray)-1\n\t\t\twhile i > 0: \n\t\t\t\t\tif i % 2 != 0:\n\t\t\t\t\t\tif self.HeapArray[i] > self.HeapArray[i//2]:\n\t\t\t\t\t\t\tself.HeapArray[i], self.HeapArray[i//2] = (\n\t\t\t\t\t\t\t\tself.HeapArray[i//2], self.HeapArray[i])\n\t\t\t\t\t\t\ti = i//2\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ti = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tif self.HeapArray[i] > self.HeapArray[i//2-1]:\n\t\t\t\t\t\t\tself.HeapArray[i], self.HeapArray[i//2-1] = (\n\t\t\t\t\t\t\t\tself.HeapArray[i//2-1], self.HeapArray[i])\n\t\t\t\t\t\t\ti = i//2-1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ti = 0\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n"
}
] | 2 |
cpdugenio/yotted | https://github.com/cpdugenio/yotted | 028c33d5a630b83d4e515272744a61280895726d | 695049b1b959ca2e30254affc3729a4a93469100 | a0aeafdbd7500e6be12116a2ec5e9bffbf204435 | refs/heads/master | 2018-09-30T21:08:41.963003 | 2017-02-18T10:51:45 | 2017-02-18T10:51:45 | 82,379,951 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5788553953170776,
"alphanum_fraction": 0.5858453512191772,
"avg_line_length": 26.25,
"blob_id": "2170990694977705ee02ebd5a000d5afcf723419",
"content_id": "55dd8ffd467194e7d08925735be2066b008d42bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2289,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 84,
"path": "/yotted.py",
"repo_name": "cpdugenio/yotted",
"src_encoding": "UTF-8",
"text": "import sys\nimport json\nimport argparse\nimport tabulate\nfrom yelp.client import Client\nfrom yelp.oauth1_authenticator import Oauth1Authenticator\n\nMAX_DISTANCE = 800 # half a mile in meters?\nMIN_COUNT = 50 # high snobbery\nMIN_RATING = 3.5 # high snobbery\n\n# read API config\nyelp_client = None\ntry:\n with open('yelp-config.json') as config:\n auth = Oauth1Authenticator(**json.load(config))\n yelp_client = Client(auth)\nexcept Exception as e:\n sys.stderr.write(\"OOPS! Failed to create client!\\n\")\n raise e\n\n# parser\nparser = argparse.ArgumentParser(description=\"Search yelp\")\nparser.add_argument(\n \"coords\", type=str, help=\"Text file with coordinates to search\")\nparser.add_argument(\n \"search\", type=str, help=\"Yelp search\")\nargs = parser.parse_args()\n\n# parse coords\nraw_coords = []\nwith open(args.coords) as coords:\n raw_coords = coords.readlines()\ncoords = []\nfor coord in raw_coords:\n chash = coord.find('#')\n if chash != -1:\n coord = coord[:chash]\n coord = coord.strip()\n coords.append(coord)\n\nparams = {\n 'term': args.search,\n 'sort': 2, # best rated\n 'radius_filter': MAX_DISTANCE,\n}\n\nfindings = dict()\n\nfields = [\n \"Rate\",\n \"Ct\",\n \"Name\",\n \"City\",\n \"Dist\",\n \"Categories\",\n \"URL\",\n ]\nfor coord in coords:\n coord = coord.split(',')\n results = yelp_client.search_by_coordinates(*coord, **params)\n for business in results.businesses:\n if business.is_closed:\n continue\n\n # snob filter\n if(business.distance < MAX_DISTANCE and\n business.review_count >= MIN_COUNT and\n business.rating >= MIN_RATING):\n findings[business.id] = [\n business.rating,\n business.review_count,\n business.name.encode('ascii', 'ignore'),\n business.location.city,\n int(business.distance),\n ', '.join(\n [c.name.encode('ascii', 'ignore')\n for c in business.categories or []]),\n business.url[:business.url.find('?')],\n ]\n\nvalues = findings.values()\nvalues.sort(key=lambda x: (x[0], x[3], x[2], x[1]))\nprint tabulate.tabulate([fields] + values)\n"
},
{
"alpha_fraction": 0.4444444477558136,
"alphanum_fraction": 0.682539701461792,
"avg_line_length": 14.75,
"blob_id": "0662127fd827a371297e839e7007771992d9dddd",
"content_id": "f5637ca67256432f979c79d4b96c9d2c34aef524",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 126,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 8,
"path": "/requirements.txt",
"repo_name": "cpdugenio/yotted",
"src_encoding": "UTF-8",
"text": "appdirs==1.4.0\nhttplib2==0.10.3\noauth2==1.9.0.post1\npackaging==16.8\npyparsing==2.1.10\nsix==1.10.0\ntabulate==0.7.7\nyelp==1.0.2\n"
},
{
"alpha_fraction": 0.7168141603469849,
"alphanum_fraction": 0.7176991105079651,
"avg_line_length": 29.54054069519043,
"blob_id": "457f5f3848f2f7f96654b71cfcd1dffcb8726b7a",
"content_id": "2e0ff6f3902745ba2a757dffb4ae7b5275abbeec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1130,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 37,
"path": "/README.md",
"repo_name": "cpdugenio/yotted",
"src_encoding": "UTF-8",
"text": "# YOTTED\n\nI wrote this because I don't have a car and I have a metro unlimited\npass - I would like to at least explore around metro stops.\n\nInput is a search term and a text file with lat/long coordinates.\n\nOutput is list of business around about half a mile of each station with\na few snobbery filters (min rating, min review count)\n\n# Usage\n\nCreate `yelp-config.json` like so (see [Yelp: Manage API Keys](https://www.yelp.com/developers/v2/manage_api_keys)):\n\n ./yotted$ cat yelp-config.json\n {\n \"consumer_key\": \"YOUR_CONSUMER_KEY\",\n \"consumer_secret\": \"YOUR_CONSUMER_SECRET\",\n \"token\": \"YOUR_TOKEN\",\n \"token_secret\": \"YOUR_TOKEN_SECRET\"\n }\n\nRecommend using virtualenv:\n\n virtualenv venv\n source venv/bin/activate\n pip install -r requirements.txt\n python yotted.py metro.coords 'coffee'\n\nCoordinate text file should be in the form of `lat,long` on each line (See\n[metro.coords](metro.coords))\n\nSee [coffee.search](coffee.search) for current sample output\n\n# Metro Information\n\nTaken from [developer.metro.net](http://developer.metro.net/introduction/gis-data/download-gis-data/)\n"
}
] | 3 |
chzbrgr71/flight-predictor | https://github.com/chzbrgr71/flight-predictor | 26cb050ae135ff7007f1e948a9fd9d21abf535af | 5d0e7d0a0b954848c39d13ddcfeb9bd7430e8ee9 | 2db654ef06062261d25e2f5e152d906709baec44 | refs/heads/master | 2020-05-07T19:21:25.928891 | 2019-04-11T15:49:57 | 2019-04-11T15:49:57 | 180,810,304 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7096773982048035,
"alphanum_fraction": 0.7281106114387512,
"avg_line_length": 17.16666603088379,
"blob_id": "ff4f0f0e27d6edb9c60504a526f4e5a6420668ec",
"content_id": "4ebd8a537752e2733a7bb1ab50a5ef29e55de2bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 217,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 12,
"path": "/Dockerfile",
"repo_name": "chzbrgr71/flight-predictor",
"src_encoding": "UTF-8",
"text": "FROM tensorflow/tensorflow:2.0.0a0\n\nWORKDIR /\n\nCOPY ./requirements.txt /requirements.txt\n\nRUN pip install -r requirements.txt\n\nCOPY ./flights.py /flights.py\nCOPY ./data/*.* /data/\n\nENTRYPOINT [ \"python\",\"flights.py\" ]"
},
{
"alpha_fraction": 0.6900646090507507,
"alphanum_fraction": 0.7103195190429688,
"avg_line_length": 53.5523796081543,
"blob_id": "e3003841aef2b56bf7117bc92826c69055627e3d",
"content_id": "1fd9af8a0ddbd2bf68a0aa0a7480201c974ac012",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5727,
"license_type": "no_license",
"max_line_length": 473,
"num_lines": 105,
"path": "/flights-summary.py",
"repo_name": "chzbrgr71/flight-predictor",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nfrom tensorflow import keras\nimport os\nfrom tensorflow.keras import layers\nimport pandas as pd\n\ncolumn_names = ['YEAR','MONTH','DAY','DAY_OF_WEEK','AIRLINE','FLIGHT_NUMBER','TAIL_NUMBER','ORIGIN_AIRPORT','DESTINATION_AIRPORT','SCHEDULED_DEPARTURE','DEPARTURE_TIME','DEPARTURE_DELAY','TAXI_OUT','WHEELS_OFF','SCHEDULED_TIME','ELAPSED_TIME','AIR_TIME','DISTANCE','WHEELS_ON','TAXI_IN','SCHEDULED_ARRIVAL','ARRIVAL_TIME','ARRIVAL_DELAY','DIVERTED','CANCELLED','CANCELLATION_REASON','AIR_SYSTEM_DELAY','SECURITY_DELAY','AIRLINE_DELAY','LATE_AIRCRAFT_DELAY','WEATHER_DELAY']\ndataset = pd.read_csv('./data/flights-small.csv', delimiter = ',', names=column_names)\n\n#print(dataset.head(4))\n#print(dataset.sample(5))\n#print(dataset[['AIRLINE','FLIGHT_NUMBER','ORIGIN_AIRPORT','DESTINATION_AIRPORT','SCHEDULED_DEPARTURE','DEPARTURE_TIME']])\n\nairlines = pd.read_csv('./data/airlines.csv')\nairports= pd.read_csv('./data/airports.csv')\nflights = pd.read_csv('./data/flights-small.csv', low_memory=False, names=column_names)\n\nflights_v1 = pd.merge(flights, airlines, left_on='AIRLINE', right_on='IATA_CODE', how='left')\nflights_v1.drop('IATA_CODE', axis=1, inplace=True)\nflights_v1.rename(columns={'AIRLINE_x': 'AIRLINE_CODE','AIRLINE_y': 'AIRLINE'}, inplace=True)\n\nairport_mean_delays = pd.DataFrame(pd.Series(flights['ORIGIN_AIRPORT'].unique()))\nairport_mean_delays.set_index(0, drop = True, inplace = True)\nabbr_companies = airlines.set_index('IATA_CODE')['AIRLINE'].to_dict()\nidentify_airport = airports.set_index('IATA_CODE')['CITY'].to_dict()\n\n# function that extract statistical parameters from a grouby objet:\ndef get_stats(group):\n return {'min': group.min(), 'max': group.max(),\n 'count': group.count(), 'mean': group.mean()}\n#___________________________________________________________\n\nfor carrier in abbr_companies.keys():\n fg1 = flights[flights['AIRLINE'] == carrier]\n test = fg1['DEPARTURE_DELAY'].groupby(flights['ORIGIN_AIRPORT']).apply(get_stats).unstack()\n airport_mean_delays[carrier] = test.loc[:, 'mean'] \n\nairline_rank_v09 = pd.DataFrame(flights_v1.groupby(['AIRLINE'])['AIR_SYSTEM_DELAY', 'AIRLINE_DELAY', 'LATE_AIRCRAFT_DELAY', 'WEATHER_DELAY'].sum()).reset_index()\nairline_rank_v09['total'] = airline_rank_v09['AIR_SYSTEM_DELAY'] + airline_rank_v09['AIRLINE_DELAY'] + airline_rank_v09['LATE_AIRCRAFT_DELAY'] + airline_rank_v09['WEATHER_DELAY']\nairline_rank_v09['pcnt_LATE_AIRCRAFT_DELAY'] = (airline_rank_v09['LATE_AIRCRAFT_DELAY']/airline_rank_v09['total'])\nairline_rank_v09['pcnt_AIRLINE_DELAY'] = (airline_rank_v09['AIRLINE_DELAY']/airline_rank_v09['total'])\nairline_rank_v09['pcnt_AIR_SYSTEM_DELAY'] = (airline_rank_v09['AIR_SYSTEM_DELAY']/airline_rank_v09['total'])\nairline_rank_v09['pcnt_WEATHER_DELAY'] = (airline_rank_v09['WEATHER_DELAY']/airline_rank_v09['total'])\n\nairline_rank_v01 = pd.DataFrame({'flight_volume' : flights_v1.groupby(['AIRLINE'])['FLIGHT_NUMBER'].count()}).reset_index()\nairline_rank_v01.sort_values(\"flight_volume\", ascending=True, inplace=True)\nflight_volume_total = airline_rank_v01['flight_volume'].sum()\nairline_rank_v01['flight_pcnt'] = airline_rank_v01['flight_volume']/flight_volume_total\n\nairline_rank_v02 = pd.DataFrame({'cancellation_rate' : flights_v1.groupby(['AIRLINE'])['CANCELLED'].mean()}).reset_index()\nairline_rank_v02.sort_values(\"cancellation_rate\", ascending=False, inplace=True)\nairline_rank_v03 = pd.DataFrame({'divertion_rate' : flights_v1.groupby(['AIRLINE'])['DIVERTED'].mean()}).reset_index()\nairline_rank_v03.sort_values(\"divertion_rate\", ascending=False, inplace=True)\nairline_rank_v1 = pd.merge(airline_rank_v01, airline_rank_v02, left_on='AIRLINE', right_on='AIRLINE', how='left')\nairline_rank_v1 = pd.merge(airline_rank_v1, airline_rank_v03, left_on='AIRLINE', right_on='AIRLINE', how='left')\n\nairline_rank_v07 = pd.DataFrame({'avg_arrival_delay' : flights_v1.groupby(['AIRLINE'])['ARRIVAL_DELAY'].mean()}).reset_index()\nairline_rank_v08 = pd.DataFrame({'avg_departure_delay' : flights_v1.groupby(['AIRLINE'])['DEPARTURE_DELAY'].mean()}).reset_index()\nairline_rank_v1 = pd.merge(airline_rank_v1, airline_rank_v07, left_on='AIRLINE', right_on='AIRLINE', how='left')\nairline_rank_v1 = pd.merge(airline_rank_v1, airline_rank_v08, left_on='AIRLINE', right_on='AIRLINE', how='left')\n\nprint(airline_rank_v1.head(25))\n\n\nday_of_week_vocab = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\" ]\nday_of_week_column = tf.feature_column.categorical_column_with_vocabulary_list(\n key=\"day_of_week\", vocabulary_list=day_of_week_vocab)\n\nairline_feature_column = tf.feature_column.categorical_column_with_vocabulary_file(\n key=\"airline\",\n vocabulary_file=\"./data/airlines.txt\",\n vocabulary_size=14)\n\nairport_feature_column = tf.feature_column.categorical_column_with_vocabulary_file(\n key=\"airport\",\n vocabulary_file=\"./data/airports.txt\",\n vocabulary_size=322)\n\nfeature_columns = [ day_of_week_column, airline_feature_column, airport_feature_column ]\n\n\nairline_one_hot = pd.get_dummies(flights_v1['ORIGIN_AIRPORT'])\n\nprint(airline_one_hot.info())\nprint(\"First row values: \" + str(airline_one_hot.values[0]))\n\n# build model\nmodel = keras.Sequential([\n layers.Dense(64, activation='relu', input_shape=[len(train_ds.keys())]),\n layers.Dense(64, activation='relu'),\n layers.Dense(1)\n])\n\noptimizer = tf.keras.optimizers.RMSprop(0.001)\n\nmodel.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse'])\n\nmodel.summary()\n\nEPOCHS = 10\n\n# setup TB callback\ntensorboard_cbk = keras.callbacks.TensorBoard(log_dir='/chzbrgr71/flight-delays/logs')\n\nmodel.fit(train_ds, epochs=EPOCHS, validation_split = 0.2, verbose=0, callbacks=[tensorboard_cbk])"
},
{
"alpha_fraction": 0.6805555820465088,
"alphanum_fraction": 0.7480158805847168,
"avg_line_length": 20.913043975830078,
"blob_id": "61d4946a7335157073108aa6d59e81d882a5e291",
"content_id": "9c2d7e2552c5a140466f4e667396f88b305dd35d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 504,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 23,
"path": "/README.md",
"repo_name": "chzbrgr71/flight-predictor",
"src_encoding": "UTF-8",
"text": "# Flight Predictor Tensorflow\n\n### Running in Docker\n\ndocker run -it --rm --name tf \\\n --publish 6006:6006 \\\n --volume /home/me:/me \\\n --workdir /me \\\n tensorflow/tensorflow:2.0.0a0 bash\n\n\n\n### Links\n\nhttps://www.kaggle.com/usdot/flight-delays \n\nhttps://www.kaggle.com/giovamata/airlinedelaycauses \n\nhttps://github.com/sabbadini/FlightDelay \n\nhttps://gist.github.com/martinwicke/6838c23abdc53e6bcda36ed9f40cff39 \n\nhttps://developers.googleblog.com/2017/11/introducing-tensorflow-feature-columns.html "
},
{
"alpha_fraction": 0.6191393733024597,
"alphanum_fraction": 0.6362663507461548,
"avg_line_length": 48.680850982666016,
"blob_id": "ed7592de25719cab4cdc8d522a0d3edc727125d8",
"content_id": "dbe699a699dafa44cdc044b3c7d751f5299d117a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4671,
"license_type": "no_license",
"max_line_length": 473,
"num_lines": 94,
"path": "/flights.py",
"repo_name": "chzbrgr71/flight-predictor",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nfrom tensorflow import keras\nimport os\nfrom tensorflow.keras import layers\nimport pandas as pd\nimport numpy as np\nimport datetime\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\n\ncolumn_names = ['YEAR','MONTH','DAY','DAY_OF_WEEK','AIRLINE','FLIGHT_NUMBER','TAIL_NUMBER','ORIGIN_AIRPORT','DESTINATION_AIRPORT','SCHEDULED_DEPARTURE','DEPARTURE_TIME','DEPARTURE_DELAY','TAXI_OUT','WHEELS_OFF','SCHEDULED_TIME','ELAPSED_TIME','AIR_TIME','DISTANCE','WHEELS_ON','TAXI_IN','SCHEDULED_ARRIVAL','ARRIVAL_TIME','ARRIVAL_DELAY','DIVERTED','CANCELLED','CANCELLATION_REASON','AIR_SYSTEM_DELAY','SECURITY_DELAY','AIRLINE_DELAY','LATE_AIRCRAFT_DELAY','WEATHER_DELAY']\n\nairlines = pd.read_csv('./data/airlines.csv')\nabbr_companies = airlines.set_index('IATA_CODE')['AIRLINE'].to_dict()\nairports= pd.read_csv('./data/airports.csv')\nflights_v1 = pd.read_csv('./data/flights-small.csv', low_memory=False, names=column_names)\n\n#flights_v1 = pd.merge(flights, airlines, left_on='AIRLINE', right_on='IATA_CODE', how='left')\n#flights_v1.drop('IATA_CODE', axis=1, inplace=True)\n#flights_v1.rename(columns={'AIRLINE_x': 'AIRLINE_CODE','AIRLINE_y': 'AIRLINE'}, inplace=True)\n\n#_________________________________________________________\n# Function that convert the 'HHMM' string to datetime.time\ndef format_heure(chaine):\n if pd.isnull(chaine):\n return np.nan\n else:\n if chaine == 2400: chaine = 0\n chaine = \"{0:04d}\".format(int(chaine))\n heure = datetime.time(int(chaine[0:2]), int(chaine[2:4]))\n return heure\n#_____________________________________________________________________\n# Function that combines a date and time to produce a datetime.datetime\ndef combine_date_heure(x):\n if pd.isnull(x[0]) or pd.isnull(x[1]):\n return np.nan\n else:\n return datetime.datetime.combine(x[0],x[1])\n#_______________________________________________________________________________\n# Function that combine two columns of the dataframe to create a datetime format\ndef create_flight_time(df, col): \n liste = []\n for index, cols in df[['DATE', col]].iterrows(): \n if pd.isnull(cols[1]):\n liste.append(np.nan)\n elif float(cols[1]) == 2400:\n cols[0] += datetime.timedelta(days=1)\n cols[1] = datetime.time(0,0)\n liste.append(combine_date_heure(cols))\n else:\n cols[1] = format_heure(cols[1])\n liste.append(combine_date_heure(cols))\n return pd.Series(liste)\n\nflights_v1['DATE'] = pd.to_datetime(flights_v1[['YEAR','MONTH', 'DAY']])\nflights_v1['SCHEDULED_DEPARTURE'] = create_flight_time(flights_v1, 'SCHEDULED_DEPARTURE')\nflights_v1['DEPARTURE_TIME'] = flights_v1['DEPARTURE_TIME'].apply(format_heure)\nflights_v1['SCHEDULED_ARRIVAL'] = flights_v1['SCHEDULED_ARRIVAL'].apply(format_heure)\nflights_v1['ARRIVAL_TIME'] = flights_v1['ARRIVAL_TIME'].apply(format_heure)\ndays = {1:'Monday',2:'Tuesday',3:'Wednesday',4:'Thursday',5:'Friday',6:'Saturday',7:'Sunday'}\nmonths = {1:'January',2:'February',3:'March',4:'April',5:'May',6:'June',7:'July',8:'August',9:'September',10:'October',11:'November',12:'December'}\nflights_v1['DAY_OF_WEEK'] = flights_v1['DAY_OF_WEEK'].apply(lambda x: days[x])\nflights_v1['MONTH'] = flights_v1['MONTH'].apply(lambda x: months[x])\n\nvariables_to_remove = ['TAXI_OUT', 'TAXI_IN', 'WHEELS_ON', 'WHEELS_OFF', 'YEAR', \n 'DAY','DATE', 'AIR_SYSTEM_DELAY',\n 'SECURITY_DELAY', 'AIRLINE_DELAY', 'LATE_AIRCRAFT_DELAY',\n 'WEATHER_DELAY', 'DIVERTED', 'CANCELLED', 'CANCELLATION_REASON',\n 'FLIGHT_NUMBER', 'TAIL_NUMBER', 'AIR_TIME', 'SCHEDULED_DEPARTURE', \n 'DEPARTURE_TIME', 'SCHEDULED_ARRIVAL', 'ARRIVAL_TIME', \n 'SCHEDULED_TIME', 'ELAPSED_TIME', 'ARRIVAL_DELAY']\nflights_v1.drop(variables_to_remove, axis = 1, inplace = True)\nflights_v1 = flights_v1[['AIRLINE','ORIGIN_AIRPORT','DESTINATION_AIRPORT','MONTH','DAY_OF_WEEK','DEPARTURE_DELAY']]\n\n# show sample 5 rows\nprint(flights_v1.head(5))\n\nflights_shuffled = shuffle(flights_v1)\n\ntrain, test = train_test_split(flights_shuffled, test_size=0.2)\n\nprint(\"Training dataset size: \" + str(len(train)))\nprint(\"Test dataset size: \" + str(len(test)))\n\ndef df_to_dataset(dataframe, shuffle=True):\n dataframe = dataframe.copy()\n labels = dataframe.pop('DEPARTURE_DELAY')\n ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))\n if shuffle:\n ds = ds.shuffle(buffer_size=len(dataframe))\n return ds\n\ntrain_ds = df_to_dataset(train, shuffle=False)\ntest_ds = df_to_dataset(test, shuffle=False)\n\n"
}
] | 4 |
032-shi/pass-release-test | https://github.com/032-shi/pass-release-test | 24576dbbee64209eadc4a75eff8e3200589758be | 0c9fba31b4b7932a9d95b363cacd45ca1c2ecef0 | 712fd8a710387ceb6fdc4beb1439ec02fd0e5bf0 | refs/heads/master | 2023-07-09T22:56:54.697935 | 2021-08-17T13:50:58 | 2021-08-17T13:50:58 | 394,259,387 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6235154271125793,
"alphanum_fraction": 0.6318289637565613,
"avg_line_length": 27.100000381469727,
"blob_id": "e091205215184f12854e08143f69514d233f08f8",
"content_id": "c043487450d32765a08225592ef2b8d921b2d1fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1074,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 30,
"path": "/main.py",
"repo_name": "032-shi/pass-release-test",
"src_encoding": "UTF-8",
"text": "# coding: UTF-8\nimport zipfile\n\ndef open_zip(password):\n with zipfile.ZipFile(\"sampleFolder.zip\",\"r\") as zip_file:\n try: #まずは、tryの処理が行われる。\n zip_file.extractall(path=\"./unZipFolder\", pwd=password.encode()) #path=に解凍後のファイルを保存するフォルダーを指定する\n print(\"zipを解凍できました\")\n print(\"パスワード:{}\".format(password))\n exit()\n except Exception as e: #例外発生時に行う処理\n pass #例外発生後に何も処理を行わない\n #print(e)\n #print(\"zipを開けませんでした\")\n\ndef main():\n numbers = list(range(10))\n #print(numbers)\n my_passwords = []\n\n for a in numbers: #1桁目の数字の生成\n for b in numbers: #2桁目の数字の生成\n for c in numbers: #3桁目の数字の生成\n for d in numbers: #4桁目の数字の生成\n prediction_password = str(a) + str(b) + str(c) + str(d)\n print(prediction_password)\n open_zip(password=prediction_password)\n\nif __name__ == \"__main__\":\n main()"
}
] | 1 |
donsol007/EmailVeifier | https://github.com/donsol007/EmailVeifier | 3d436fd110d22511050dc5233a7022a801db7187 | ada4abecb6144404decac7bc2c3386968f26343c | 11d1541f8ebedb01a87a707848207048b366f447 | refs/heads/main | 2023-01-30T12:13:25.535278 | 2020-12-16T09:22:24 | 2020-12-16T09:22:24 | 321,930,153 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5512920618057251,
"alphanum_fraction": 0.5669537782669067,
"avg_line_length": 28.452381134033203,
"blob_id": "04f4f9d61946dce2ea360f985e5db9d4e919de72",
"content_id": "e43f0ee81ed373234f3d47c7ac852a47cda6b5cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1277,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 42,
"path": "/verify.py",
"repo_name": "donsol007/EmailVeifier",
"src_encoding": "UTF-8",
"text": "import re\r\nimport dns.resolver\r\nimport socket\r\nimport smtplib\r\n\r\n#Email address to verify is store in the array email_address\r\nemail_address = ['[email protected]','[email protected]','[email protected]','[email protected]', '[email protected]']\r\ndef validate_email(email):\r\n match = re.match('^[_a-z0-9-]+(\\.[_a-z0-9-]+)*@[a-z0-9-]+(\\.[a-z0-9-]+)*(\\.[a-z]{2,4})$', email)\r\n if (match !=None):\r\n return email\r\n\r\ndef check_email(email):\r\n try:\r\n # SMTP lib setup (use debug level for full output)\r\n server = smtplib.SMTP()\r\n server.set_debuglevel(0)\r\n\r\n # SMTP Conversation\r\n host = email.split('@')[1]\r\n server.connect(mxRecord)\r\n server.helo(host)\r\n server.mail('change_me_to_email address that can send message')\r\n code, message = server.rcpt(str(email))\r\n server.quit()\r\n # Assume 250 as Success\r\n if code == 250:\r\n return email\r\n except:\r\n pass\r\n\r\nfor email in email_address:\r\n try:\r\n validated_email = validate_email(email)\r\n domain = validated_email.split('@')[1]\r\n result = dns.resolver.resolve(domain, 'MX')\r\n mxRecord = result[0].exchange\r\n mxRecord = str(mxRecord)\r\n #print(mxRecord)\r\n print(check_email(validated_email))\r\n except:\r\n pass"
}
] | 1 |
arunpogula/Dvara_project | https://github.com/arunpogula/Dvara_project | 088165ab9c84e0a637ebb8f2486b2a001b67bdaa | 2fa67a75c77bb9bebcec3bd7174e2c1ee480bba3 | 4cdb928a3f3ca0afd60db943bd6557c0b69e228e | refs/heads/master | 2023-03-11T09:25:35.762921 | 2021-02-28T07:27:18 | 2021-02-28T07:27:18 | 343,063,467 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5440053343772888,
"alphanum_fraction": 0.5609431862831116,
"avg_line_length": 37.126583099365234,
"blob_id": "cb881aba00bb702c201bb3d52b37b7617dd5270d",
"content_id": "be5b6ccb8f451c5e63da63abe3fb8ebe27e5bdb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3011,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 79,
"path": "/Taskapp/templates/index.html",
"repo_name": "arunpogula/Dvara_project",
"src_encoding": "UTF-8",
"text": "<html>\n\n<head>\n <title>Dvara Django Task</title>\n <link href=\"//maxcdn.bootstrapcdn.com/bootstrap/4.1.1/css/bootstrap.min.css\" rel=\"stylesheet\" id=\"bootstrap-css\" />\n <script src=\"//maxcdn.bootstrapcdn.com/bootstrap/4.1.1/js/bootstrap.min.js\"></script>\n <style>\n body {\n padding: 50px;\n background-color: #fcfcfc;\n color: #000;\n }\n \n .center-box {\n margin: 0 auto;\n max-width: 600px;\n height: 170px;\n background-color: #ddd;\n border-radius: 3px;\n }\n </style>\n</head>\n\n<body class=\"center-box\">\n <h1>Dvara Task</h1>\n\n {% if form %}\n <h3>Provide the data to the database by uploading the file</h3>\n <p>Functionality:-</p>\n <ul>\n <li>Read the give Excel Sheet</li>\n <li>check table exist or not for the given sheet names. if yes</li>\n <li>load the data in there respective tables</li>\n <li>\n Right now data duplication is allowed because of to test multi load of the same data. But relationship won't disturb\n </li>\n <li>Dropdown functionality was implement by the ajax call</li>\n </ul>\n <p>Read the given Excel sheet</p>\n <form method=\"POST\" , enctype=\"multipart/form-data\">\n {% csrf_token %} {{form}}\n <button>submit</button>\n </form>\n <select name=\"course\" id=\"select2\" data-fees-url=\"{% url 'sub_category_list' %}\" required class=\"form-control\">\n <option value=\"\">Select category</option>\n {% for course in category %}\n <option value=\"{{course.id}}\">{{course.category_name}}</option>\n {% endfor %}\n </select>\n <select name=\"total_fee\" id=\"select3\" required class=\"form-control\">\n <option value=\"\">Select Fee</option>\n </select> {%elif data %}\n\n <p>sucessful</p>\n <input id=\"btntest\" type=\"button\" value=\"refresh\" onclick=\"window.location.href = 'http://127.0.0.1:8000/'\" /> {%else%} data\n <p>failure</p>\n <input id=\"btntest\" type=\"button\" value=\"refresh\" onclick=\"window.location.href = 'http://127.0.0.1:8000/'\" /> {%endif%}\n <script src=\"https://code.jquery.com/jquery-3.3.1.min.js\"></script>\n <script>\n $(\"#select2\").change(function() {\n var url = $(\"#select2\").attr(\"data-fees-url\"); // get the url of the ajax_load_course_fees view\n var cat_id = $(this).val(); // get the selected course pk from the HTML input\n\n $.ajax({\n // initialize an AJAX request\n url: url, // set the url of the request\n data: {\n cat_id: cat_id, // add the course pk to the GET parameters\n },\n success: function(data) {\n // `data` is the return of the `ajax_course_fees` view function\n $(\"#select3\").html(data); // replace the contents of the fees select with the data that came from the server\n },\n });\n });\n </script>\n</body>\n\n</html>"
},
{
"alpha_fraction": 0.6577777862548828,
"alphanum_fraction": 0.6577777862548828,
"avg_line_length": 27.125,
"blob_id": "420b8d84d47daed8bc8700da0b3e7db8904873a0",
"content_id": "8045daac5a128958cb6d0ce6f782ce644590027a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 225,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 8,
"path": "/Taskapp/urls.py",
"repo_name": "arunpogula/Dvara_project",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom .views import Home, sub_category_list\n\nurlpatterns = [\n path('', Home.as_view(), name=\"home\"),\n path('ajax/sub_category_list/', sub_category_list,\n name='sub_category_list'),\n]\n"
},
{
"alpha_fraction": 0.7727272510528564,
"alphanum_fraction": 0.7727272510528564,
"avg_line_length": 21,
"blob_id": "b9f651a12e8395253ff97b262beec8c620582ff9",
"content_id": "e45fdcb9b7400ca08a47323391652f4395d18d52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 132,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 6,
"path": "/Taskapp/forms.py",
"repo_name": "arunpogula/Dvara_project",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom .models import Category, SubCategory\n\n\nclass UploadFileForm(forms.Form):\n file = forms.FileField()\n"
},
{
"alpha_fraction": 0.5742056369781494,
"alphanum_fraction": 0.5771962404251099,
"avg_line_length": 34.66666793823242,
"blob_id": "86135a21daf5c3570179570438239275b76b54c9",
"content_id": "b8a05a44959ecc75f2dedc41c221f4fde4204141",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2675,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 75,
"path": "/Taskapp/views.py",
"repo_name": "arunpogula/Dvara_project",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, HttpResponse\nfrom django.views import View\nfrom .forms import UploadFileForm\nfrom .models import *\nimport pandas as pd\nfrom django.db import connection\nfrom django.conf import settings\n# Create your views here.\n\n\ndef sub_category_list(request):\n sub_category_list = SubCategory.objects.filter(\n cat_id=request.GET.get('cat_id')).values_list('cat_id', 'subCategory_name')\n\n # generate an html template for the specific option\n html_code = \"\"\n for sub_category in sub_category_list:\n var = f\"<option value = '{sub_category[0]}' > {sub_category[1]} </option>\"\n html_code += var\n\n return HttpResponse(html_code)\n\n\nclass Home(View):\n form_obj = UploadFileForm\n\n def inserting_rows(self, file_data):\n tables_list = connection.introspection.table_names()\n df = pd.ExcelFile(file_data)\n sheet_names = df.sheet_names\n\n all_tables_exist = False\n for sheet in sheet_names:\n if sheet.lower() in tables_list:\n all_tables_exist = True\n else:\n all_tables_exist = False\n\n if all_tables_exist:\n try:\n df1 = pd.read_excel(df, 'category')\n df2 = pd.read_excel(df, 'subcategory', names=[\n 'id', 'subcategory'])\n dff = pd.merge(df1, df2, on=\"id\")\n category_obj = Category()\n for i in dff.categories.unique():\n Category.objects.create(category_name=i)\n Category.save\n filter_data = dff.loc[dff['categories'] == i]\n for j in filter_data.subcategory.unique():\n SubCategory.objects.create(\n cat_id=Category.objects.latest('id'), subCategory_name=j)\n SubCategory.save\n return \"Data loaded sucessfully\"\n except Exception as error:\n return f\"Error arise while loading the data,{error}\"\n\n else:\n return \"Please check the sheet names, no tables are present \"\n\n return \"Data was not loaded\"\n\n def get(self, request):\n category_data = Category.objects.all()\n return render(request, 'index.html', {'form': self.form_obj(), 'category': category_data})\n\n def post(self, request):\n form = self.form_obj(request.POST, request.FILES)\n\n if form.is_valid():\n file_data = request.FILES['file']\n resp = self.inserting_rows(file_data)\n return render(request, 'index.html', {\"data\": resp})\n\n return HttpResponse(\"<h1>not working</h1>\")\n"
},
{
"alpha_fraction": 0.6563106775283813,
"alphanum_fraction": 0.6660194396972656,
"avg_line_length": 20.45833396911621,
"blob_id": "4afd46b373a283abdd6d776ca5c1fc494b541ef7",
"content_id": "e734ae15f3e869177c3d43dead532e73bbae4a3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 515,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 24,
"path": "/Taskapp/models.py",
"repo_name": "arunpogula/Dvara_project",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\n\n\nclass Category(models.Model):\n category_name = models.CharField(max_length=50)\n\n def __str__(self):\n return self.category_name\n\n class Meta:\n db_table = \"category\"\n\n\nclass SubCategory(models.Model):\n cat_id = models.ForeignKey(Category, on_delete=models.CASCADE)\n subCategory_name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.subCategory_name\n\n class Meta:\n db_table = \"subcategory\"\n"
}
] | 5 |
arzavj/detectCV | https://github.com/arzavj/detectCV | 1eac637d70ff5a9ba5a5a384dc0e0f914149d70c | f328a1f8ef8f1ebff320cae2b8e0a14500ca2729 | 32031a874b396476dd31d09bb96b15dee8bdd185 | refs/heads/master | 2020-04-12T11:55:06.469188 | 2015-03-16T23:03:15 | 2015-03-16T23:03:15 | 30,740,416 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7420381903648376,
"alphanum_fraction": 0.7452229261398315,
"avg_line_length": 15.526315689086914,
"blob_id": "4eba56fefd34c62a7bcf93ab98774c0747bac242",
"content_id": "b95c7b2d62b59563870725172c739989004cec8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 314,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 19,
"path": "/ObjectExtractor.h",
"repo_name": "arzavj/detectCV",
"src_encoding": "UTF-8",
"text": "#ifndef OBJECTEXTRACTOR_H\n#define OBJECTEXTRACTOR_H\n#pragma once\n\n#include <opencv2/opencv.hpp>\n\nusing namespace cv;\nusing namespace std;\n\nclass ObjectExtractor\n{\npublic:\n ObjectExtractor();\n vector<Rect> extractBoxes(Mat frame);\nprivate:\n vector<Rect> cluster(Mat frame);\n};\n\n#endif // OBJECTEXTRACTOR_H\n"
},
{
"alpha_fraction": 0.5678954720497131,
"alphanum_fraction": 0.6126924753189087,
"avg_line_length": 31.469696044921875,
"blob_id": "c293d9e83a02d2bc6c1d20521577ed94afb7bf12",
"content_id": "03101566423a0ba000905eaf0cddad5ecfc3de3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2143,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 66,
"path": "/tools/download_imagenet_data.py",
"repo_name": "arzavj/detectCV",
"src_encoding": "UTF-8",
"text": "import tarfile\nimport urllib\nimport os\nimport argparse\n\npwd = os.path.abspath(os.path.dirname(__file__))\ntmp_folder = pwd + '/../tmp/'\ntmp_tars = tmp_folder + 'tarfiles/'\nbase_image_url = \"http://www.image-net.org/download/synset?wnid=%s&username=%s&accesskey=%s&release=latest&src=stanford\"\nbase_mapping_url = \"http://www.image-net.org/api/text/imagenet.synset.geturls.getmapping?wnid=%s\"\nlabel_wnids = {\n 'bike': ['n02834778', 'n03792782', 'n03853924', 'n04126066'],\n 'bus': ['n04212165', 'n04146614'],\n 'truck': ['n03256166', 'n03417042', 'n04467665'],\n 'skateboard': ['n04225987'],\n 'stroller': ['n02766534'],\n 'car': ['n02958343']\n}\n\ndef download_images(wnid):\n url = base_image_url % wnid\n local_filename = tmp_tars + wnid + '.tar'\n try:\n urllib.urlretrieve(url, local_filename)\n except Exception,e:\n print \"Error downloading tar file at url: %s\" % url\n print e\n return\n \n # tar file path to extract\n extractTarPath = tmp_folder\n \n # open the tar file\n tfile = tarfile.open(local_filename)\n \n if tarfile.is_tarfile(local_filename):\n # extract all contents\n tfile.extractall(extractTarPath)\n else:\n print local_filename + \" is not a tarfile.\"\n\ndef download_mappings(wnid, label):\n url = base_mapping_url % wnid\n local_filename = pwd + '/' + label + '_' + wnid + '_imageurls.txt'\n try:\n urllib.urlretrieve(url, local_filename)\n except Exception,e:\n print \"Error downloading mappings file at url: %s\" % url\n print e\n\ndef main(args):\n for label, wnids in label_wnids.items():\n for wnid in wnids:\n if args.images:\n download_images(wnid)\n if args.mappings:\n download_mappings(wnid, label)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--images\", help=\"download imagenet images\",\n action=\"store_true\")\n parser.add_argument(\"-m\", \"--mappings\", help=\"download image url mappings\",\n action=\"store_true\")\n args = parser.parse_args()\n main(args)\n"
},
{
"alpha_fraction": 0.7487046718597412,
"alphanum_fraction": 0.7512953281402588,
"avg_line_length": 19.3157901763916,
"blob_id": "107772cf088c9c5ee7eb4653aadd8099a44837ec",
"content_id": "a44f80b1bd24fd293ccab1467e262e23e9449552",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 386,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 19,
"path": "/nonmaxsuppression.h",
"repo_name": "arzavj/detectCV",
"src_encoding": "UTF-8",
"text": "#ifndef NONMAXSUPPRESSION_H\n#define NONMAXSUPPRESSION_H\n\n#include <vector>\n#include <utility>\n#include <tuple>\n#include \"opencv2/core/core.hpp\"\n\nusing namespace std;\nusing namespace cv;\n\nclass NonMaxSuppression\n{\npublic:\n NonMaxSuppression();\n vector<tuple<Rect, float, int>> suppress(vector<Rect> windows, vector<pair<float, int>> scoreLabels);\n};\n\n#endif // NONMAXSUPPRESSION_H\n"
},
{
"alpha_fraction": 0.7301587462425232,
"alphanum_fraction": 0.7349206209182739,
"avg_line_length": 20.724138259887695,
"blob_id": "7fdf22dbd55300f1ce86e3a3698fabb4ec57b3de",
"content_id": "a31c004772346d62337033c35ad05de6f56efd95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 630,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 29,
"path": "/latte.h",
"repo_name": "arzavj/detectCV",
"src_encoding": "UTF-8",
"text": "#ifndef LATTE_H\n#define LATTE_H\n\n#include <string>\n#include <vector>\n\n#include \"caffe/caffe.hpp\"\n#include \"caffe/util/io.hpp\"\n#include <opencv2/imgproc/imgproc.hpp>\n#include <opencv2/core/core.hpp>\n#include <opencv2/highgui/highgui.hpp>\n\nusing namespace caffe;\nusing namespace cv;\nusing namespace std;\n\nclass Latte\n{\npublic:\n Latte(bool useGPU, string modelFilename, string weightsFilename);\n vector<float> getScores(Mat frame);\n pair<float, int> getMaxScoreAndLabel(Mat frame);\n vector<pair<float, int> > getScoresAndLabels(Mat frame, vector<Rect> windows);\n\nprivate:\n Net<float> *caffe_net;\n};\n\n#endif // LATTE_H\n"
},
{
"alpha_fraction": 0.566150426864624,
"alphanum_fraction": 0.5904867053031921,
"avg_line_length": 33.372623443603516,
"blob_id": "dd134ea84c8ae5c961ceb6f4e32eb74566d922f2",
"content_id": "0742dd5f53b26de24db49e2a2f504856aef9da48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 9040,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 263,
"path": "/ObjectExtractor.cpp",
"repo_name": "arzavj/detectCV",
"src_encoding": "UTF-8",
"text": "#include \"ObjectExtractor.h\"\n#include <math.h>\n#include <unordered_set>\n\n#define SW_H 200 // bike\n#define SW_W 120 // bike\n//#define SW_H 150 // train station\n//#define SW_W 80 // train station\n#define SW_A SW_H*SW_W\n#define THRESH 0.1\n#define THRESH_NUMPX (int)(THRESH*SW_A)\n#define DX 30\n#define DY 10\n\n#define BOUNDING_BOX_AREA_THRESH 1600\n#define NUM_WINDOW_SIZES 5\n\nstruct SW {\n static const pair<int, int> SIZES[NUM_WINDOW_SIZES];\n};\n\nconst pair<int, int> SW::SIZES[NUM_WINDOW_SIZES] = {\n make_pair(40, 75), // small pedestrian\n make_pair(80, 150), // big pedestrian\n make_pair(170, 170), // bike\n make_pair(80, 180), // skateboarder\n make_pair(300, 100) // car\n};\n\nstruct box{\n int minY, maxY, minX, maxX;\n};\n\nObjectExtractor::ObjectExtractor()\n{\n\n}\n\nstruct Point2iHash {\n size_t operator() (const Point2i &p) const {\n return p.x+7919*p.y;\n }\n};\n\nstatic void unordered_set_intersect(unordered_set<Point2i, Point2iHash> &out,\n const unordered_set<Point2i, Point2iHash> &in1, const unordered_set<Point2i, Point2iHash> &in2)\n{\n if (in2.size() < in1.size()) {\n unordered_set_intersect(out, in2, in1);\n return;\n }\n for (unordered_set<Point2i, Point2iHash>::const_iterator it = in1.begin(); it != in1.end(); it++)\n {\n if (in2.find(*it) != in2.end())\n out.insert(*it);\n }\n}\n\nstatic bool hasSufficientWhitePixels(Point2i& topLeft, unordered_set<Point2i, Point2iHash>* rowWhites,\n int numRows, unordered_set<Point2i, Point2iHash>* colWhites,\n int numCols){\n int lastWindowCol = topLeft.x+SW_W;\n int lastWindowRow = topLeft.y+SW_H;\n if (topLeft.x < 0 || topLeft.y < 0 || lastWindowCol >= numCols || lastWindowRow >= numRows) {\n return false;\n }\n\n unordered_set<Point2i, Point2iHash> colPixels;\n unordered_set<Point2i, Point2iHash> whitePixels;\n for (int col = topLeft.x; col < lastWindowCol; col++){\n colPixels.insert(colWhites[col].begin(), colWhites[col].end());\n }\n for (int row = topLeft.y; row < lastWindowRow; row++){\n unordered_set_intersect(whitePixels, rowWhites[row], colPixels);\n }\n\n return whitePixels.size() > THRESH_NUMPX;\n\n}\n\nstatic vector<Rect> extractBoxesSlow(Mat frame) {\n unordered_set<Point2i, Point2iHash>* rowWhites = new unordered_set<Point2i, Point2iHash>[frame.rows];\n unordered_set<Point2i, Point2iHash>* colWhites = new unordered_set<Point2i, Point2iHash>[frame.cols];\n vector<int>* rowWhiteVectors = new vector<int>[frame.rows];\n for (int y = 0; y < frame.rows; y++){\n for (int x = 0; x < frame.cols; x++){\n uchar pxVal = frame.at<uchar>(y, x);\n\n if (pxVal > 200) {\n Point2i pt(x, y);\n rowWhites[y].insert(pt);\n colWhites[x].insert(pt);\n rowWhiteVectors[y].push_back(x);\n }\n }\n }\n\n vector<Rect> rects;\n for (int y = 0; y < frame.rows; y += DY){\n for (int i = 0; i < rowWhiteVectors[y].size(); i += DY) {\n Point2i top(rowWhiteVectors[y][i], y);\n for (int dx = 0; dx < SW_W; dx += DX) {\n Point2i topLeft(top.x - dx, top.y);\n if (hasSufficientWhitePixels(topLeft, rowWhites, frame.rows, colWhites, frame.cols)){\n Point2i bottomRight(topLeft.x + SW_W, topLeft.y + SW_H);\n rects.push_back(Rect(topLeft, bottomRight));\n }\n }\n }\n }\n\n delete [] rowWhites;\n delete [] colWhites;\n delete [] rowWhiteVectors;\n\n return rects;\n}\n\nstatic void getSlidingWindows(Rect boundingBox, vector<Rect>& slidingWindows, Size frameSize) {\n Point topLeft = boundingBox.tl();\n Point bottomRight = boundingBox.br();\n\n for (int i = 0; i < NUM_WINDOW_SIZES; i++) {\n int width = SW::SIZES[i].first;\n int height = SW::SIZES[i].second;\n int y = topLeft.y;\n do {\n int x = topLeft.x;\n do {\n if ((x + width) < frameSize.width && (y + height) < frameSize.height) {\n slidingWindows.push_back(Rect(x, y, width, height));\n }\n x += DX;\n } while(x <= (bottomRight.x - width));\n y += DY;\n } while(y <= (bottomRight.y - height));\n }\n}\n\nstatic vector<Rect> extractContourBoxes(Mat frame) {\n vector<vector<Point> > contours;\n vector<Vec4i> hierarchy;\n\n Mat blur_out;\n GaussianBlur(frame, blur_out, Size(1,1), 2.0, 2.0);\n\n blur_out = blur_out >= 200;\n int erosion_size = 1;\n Mat element = getStructuringElement(MORPH_RECT, Size( 2*erosion_size + 1, 2*erosion_size+1 ));\n\n Mat morphedFrame;\n morphologyEx(blur_out, morphedFrame, MORPH_CLOSE, element);\n// dilate(blur_out, morphedFrame, element, Point(-1, -1), 1);\n// erode(morphedFrame, morphedFrame, element, Point(-1, -1), 2);\n// dilate(morphedFrame, morphedFrame, element, Point(-1, -1), 1);\n\n findContours(morphedFrame, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );\n\n // Approximate contours to get bounding rects\n vector<vector<Point> > contours_poly( contours.size() );\n vector<Rect> boundRects( contours.size() );\n\n for( int i = 0; i < contours.size(); i++ )\n {\n approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );\n boundRects[i] = boundingRect( Mat(contours_poly[i]) );\n }\n\n vector<Rect> slidingWindows;\n for (Rect boundingBox: boundRects) {\n// rectangle(morphedFrame, boundingBox, Scalar(255, 255, 255));\n// Size s = boundingBox.size();\n// char text[21];\n// sprintf(text, \"%d x %d\", s.width, s.height);\n// putText(morphedFrame, text, boundingBox.tl(), FONT_HERSHEY_SIMPLEX, 0.1, Scalar(255, 255, 255));\n if (boundingBox.area() > BOUNDING_BOX_AREA_THRESH) {\n getSlidingWindows(boundingBox, slidingWindows, frame.size());\n rectangle(frame, boundingBox, Scalar(255, 255, 255));\n cout << \"BB of size = \" << boundingBox.size().width << \", \" << boundingBox.size().height << endl;\n }\n }\n imshow(\"BGS Frame\", frame);\n return slidingWindows;\n}\n\nvector<Rect> ObjectExtractor::extractBoxes(Mat frame) {\n // return extractBoxesSlow(frame);\n return extractContourBoxes(frame);\n}\n\n\nvector<Rect> ObjectExtractor::cluster(Mat frame){\n // First allocate a vector of all the active points\n vector<Vec2i> whitePoints;\n for (int y = 0; y < frame.rows; y++){\n for (int x = 0; x < frame.cols; x++){\n uchar pxVal = frame.at<uchar>(y, x);\n if (pxVal > 200) {\n Vec2i pt(x, y);\n whitePoints.push_back(pt);\n }\n }\n }\n\n vector<Rect> rects;\n int clusterCount = 6;\n if (whitePoints.size() < clusterCount) {\n std::cout << \"Too few white points to cluster! \\n\";\n return rects;\n }\n\n // Construct a Mat with all the points\n Mat samples(whitePoints.size(), 2, CV_32F);\n for (int i = 0; i < whitePoints.size(); i++){\n //samples.at<Vec2b>(i) = whitePoints[i];\n int x = whitePoints[i][0];\n samples.at<float>(i, 0) = whitePoints[i][0];\n samples.at<float>(i, 1) = whitePoints[i][1];\n }\n\n // Perform k-means\n Mat clusterAssignments;\n int attempts = 2;\n Mat centers;\n double error = kmeans(samples, clusterCount, clusterAssignments, TermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 10000, 0.0001), attempts, KMEANS_PP_CENTERS, centers);\n\n /*\n double prevError = 0;\n for (clusterCount = 1; clusterCount < 10; clusterCount++){ //sketchy, using MLPack later\n Mat newClusterAssignments;\n double error = kmeans(samples, clusterCount, newClusterAssignments, TermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 10000, 0.0001), attempts, KMEANS_PP_CENTERS, centers);\n error = sqrt(error / whitePoints.size());\n cout << clusterCount << \" \" << error << \" \" << prevError / error << endl;\n if (prevError / error != 0.0 && prevError / error < 2.0) break;\n prevError = error;\n clusterAssignments = newClusterAssignments;\n }*/\n\n vector<box> boxes(clusterCount);\n for (int i = 0; i < clusterCount; i++){\n boxes[i].minY = frame.rows;\n boxes[i].maxY = 0;\n boxes[i].minX = frame.cols;\n boxes[i].maxX = 0;\n }\n for (int i = 0; i < whitePoints.size(); i++){\n int clusterId = clusterAssignments.at<int>(i, 0);\n Vec2i pt = whitePoints[i];\n int x = pt[0];\n int y = pt[1];\n if (y < boxes[clusterId].minY) boxes[clusterId].minY = y;\n if (x < boxes[clusterId].minX) boxes[clusterId].minX = x;\n if (y > boxes[clusterId].maxY) boxes[clusterId].maxY = y;\n if (x > boxes[clusterId].maxX) boxes[clusterId].maxX = x;\n }\n\n for (box b : boxes){\n Point2i minPt(b.minX, b.minY);\n Point2i maxPt(b.maxX, b.maxY);\n rects.push_back(Rect(minPt, maxPt));\n }\n return rects;\n}\n"
},
{
"alpha_fraction": 0.624482274055481,
"alphanum_fraction": 0.6341463327407837,
"avg_line_length": 34.04838562011719,
"blob_id": "47c52d54dd0c8dd431b918f5712b1bc91a617fc2",
"content_id": "6717227fc9cef34e797febb2922b298af2d17375",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2173,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 62,
"path": "/nonmaxsuppression.cpp",
"repo_name": "arzavj/detectCV",
"src_encoding": "UTF-8",
"text": "#include \"nonmaxsuppression.h\"\n#include <algorithm>\n#include <numeric>\n\n#define OVERLAP_THRESH 0.5\n#define SCORE_THRESH 0.8\n\n/*\n * Obtained from\n * http://stackoverflow.com/questions/17074324/how-can-i-sort-two-vectors-in-the-same-way-with-criteria-that-uses-only-one-of */\n\nstatic vector<int> sort_permutation(vector<pair<float, int>> const& scoreLabels)\n{\n vector<int> p(scoreLabels.size());\n iota(p.begin(), p.end(), 0);\n sort(p.begin(), p.end(), [&](int i, int j){ return scoreLabels[i].first > scoreLabels[j].first; });\n return p;\n}\n\nstatic vector<Rect> apply_permutation(vector<Rect> const& windows, vector<int> const& sortedIndices)\n{\n vector<Rect> sortedWindows(sortedIndices.size());\n transform(sortedIndices.begin(), sortedIndices.end(), sortedWindows.begin(),\n [&](int i){ return windows[i]; });\n return sortedWindows;\n}\n\nstatic double getOverlap(Rect w1, Rect w2)\n{\n int intersectionArea = (w1 & w2).area();\n int unionArea = w1.area() + w2.area() - intersectionArea;\n return ((double) intersectionArea) / unionArea; //TODO: pick smaller or bigger area?\n}\n\nNonMaxSuppression::NonMaxSuppression()\n{\n}\n\nvector<tuple<Rect, float, int>> NonMaxSuppression::suppress(vector<Rect> windows,\n vector<pair<float, int>> scoreLabels)\n{\n vector<int> sortedIndices = sort_permutation(scoreLabels);\n vector<Rect> sortedWindows = apply_permutation(windows, sortedIndices);\n vector<bool> keepWindows(sortedWindows.size(), true);\n vector<tuple<Rect, float, int>> result;\n\n for (int i = 0; i < sortedWindows.size(); i++) {\n int sortedIdx = sortedIndices[i];\n if (!keepWindows[i] || scoreLabels[sortedIdx].first < SCORE_THRESH) {\n continue;\n }\n for (int j = i + 1; j < sortedWindows.size(); j++) {\n if (getOverlap(sortedWindows[i], sortedWindows[j]) > OVERLAP_THRESH) {\n keepWindows[j] = false;\n }\n }\n result.push_back(make_tuple(sortedWindows[i], scoreLabels[sortedIdx].first,\n scoreLabels[sortedIdx].second));\n }\n\n return result;\n}\n"
},
{
"alpha_fraction": 0.597091555595398,
"alphanum_fraction": 0.6073567271232605,
"avg_line_length": 25.56818199157715,
"blob_id": "0f2e92990e87790856cfe8ed63d892b47335ab71",
"content_id": "f95382e3d2970bbe0ee10fb9bd55b809124d7816",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1169,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 44,
"path": "/tools/multiList.py",
"repo_name": "arzavj/detectCV",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nimport os\nimport numpy as np\nimport re\nnTest = 100\nnVal = 100\nprint ('Using nTest = %d, nVal = %d\\n' % (nTest, nVal))\npwd = os.getcwd()\nf_train = open('train_listfile.txt', 'wb')\nf_test = open('test_listfile.txt', 'wb')\nf_val = open('val_listfile.txt', 'wb')\n\nlabels = ['bike', 'bus', 'car', 'truck', 'others', 'pedestrian', 'skateboard', 'stroller']\ncounts = [0]*len(labels)\n\nfor file in os.listdir(\".\"):\n for i in xrange(len(labels)):\n if file.startswith(labels[i]):\n counts[i] += 1\n break\n\ntestSets = []\nvalSets = []\nfor i in xrange(len(labels)):\n perms = np.random.permutation(counts[i])+1\n testSets.append(perms[0: nTest])\n valSets.append(perms[nTest: nTest+nVal])\n\nassignCounts = [0]*len(labels)\nregex = re.compile(r'\\d+')\nfor file in os.listdir(\".\"):\n for i in xrange(len(labels)):\n if file.startswith(labels[i]):\n num = int(regex.findall(file)[0])\n if num in testSets[i]:\n f_test.write('%s %d\\n' % (file, i))\n elif num in valSets[i]:\n f_val.write('%s %d\\n' % (file, i))\n else:\n f_train.write('%s %d\\n' % (file, i))\n\nf_test.close()\nf_val.close()\nf_train.close()\n"
},
{
"alpha_fraction": 0.7262569665908813,
"alphanum_fraction": 0.7411545515060425,
"avg_line_length": 27.263158798217773,
"blob_id": "1a0729e7763d8d57211907634022305b0cceedf2",
"content_id": "017a135fd983b7f6fd038133d92297dfbb15771b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1074,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 38,
"path": "/BGSConvertor.h",
"repo_name": "arzavj/detectCV",
"src_encoding": "UTF-8",
"text": "//\n// bgs_convert.h\n// OpenCVTutorial2\n//\n// Created by Chung Yu Wang on 2/10/15.\n// Copyright (c) 2015 Jacky Wang. All rights reserved.\n//\n\n#ifndef __OpenCVTutorial2__bgs_convert__\n#define __OpenCVTutorial2__bgs_convert__\n\n#include <stdio.h>\n#include \"opencv2/highgui/highgui.hpp\"\n#include \"opencv2/imgproc/imgproc.hpp\"\n#include \"opencv2/core/core.hpp\"\n#include \"package_bgs/FrameDifferenceBGS.h\"\n#include \"package_bgs/WeightedMovingMeanBGS.h\"\n#include \"package_bgs/WeightedMovingVarianceBGS.h\"\n#include \"package_bgs/AdaptiveBackgroundLearning.h\"\n#include \"package_bgs/AdaptiveSelectiveBackgroundLearning.h\"\n#include \"package_bgs/dp/DPEigenbackgroundBGS.h\"\n#include \"package_bgs/dp/DPZivkovicAGMMBGS.h\"\n\nusing namespace cv;\n\nclass BGSConvertor {\nprivate:\n string bgs_class;\n IBGS *bgs;\npublic:\n BGSConvertor(std::string bgs_class);\n ~BGSConvertor();\n \n VideoCapture convert_video(VideoCapture capture, std::string store_path);\n Mat convert_image(Mat frame, Mat img_bkgmodel, std::string store_path);\n};\n\n#endif /* defined(__OpenCVTutorial2__bgs_convert__) */\n"
},
{
"alpha_fraction": 0.802395224571228,
"alphanum_fraction": 0.802395224571228,
"avg_line_length": 166,
"blob_id": "e4e944179c6d3ebaafb6551fdc66c2e78ff158b9",
"content_id": "65b6c6a125568bb4c130acf1a7a7b0d2b2f12207",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 167,
"license_type": "no_license",
"max_line_length": 166,
"num_lines": 1,
"path": "/data/README.txt",
"repo_name": "arzavj/detectCV",
"src_encoding": "UTF-8",
"text": "The folder is tracked in git only to be a placeholder. All images and videos in this directory should not be version controlled. Please use other means to share data.\n"
},
{
"alpha_fraction": 0.6564885377883911,
"alphanum_fraction": 0.8396946787834167,
"avg_line_length": 64.625,
"blob_id": "767781c5ee4881f68241131f628064b7af100526",
"content_id": "2f744e11e26fd14e2fc694583f7254ec44a5e28d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 524,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 8,
"path": "/tools/rename_mappings.sh",
"repo_name": "arzavj/detectCV",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/bash\n\ncat truck_n03256166_imageurls.txt truck_n03417042_imageurls.txt truck_n04467665_imageurls.txt > truck_imageurls.txt\nmv stroller_n02766534_imageurls.txt stroller_imageurls.txt\nmv skateboard_n04225987_imageurls.txt skateboard_imageurls.txt\nmv car_n02958343_imageurls.txt car_imageurls.txt\ncat bus_n04146614_imageurls.txt bus_n04212165_imageurls.txt > bus_imageurls.txt\ncat bike_n02834778_imageurls.txt bike_n03792782_imageurls.txt bike_n03853924_imageurls.txt bike_n04126066_imageurls.txt > bike_imageurls.txt"
},
{
"alpha_fraction": 0.5885084271430969,
"alphanum_fraction": 0.596053421497345,
"avg_line_length": 27.71666717529297,
"blob_id": "88fb113fec7d95161b075ae372d01dc6fa34e76e",
"content_id": "aad5960dbd912101d90fbc0fc6b09c4b68cc2204",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3446,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 120,
"path": "/BGSConvertor.cpp",
"repo_name": "arzavj/detectCV",
"src_encoding": "UTF-8",
"text": "//\n// bgs_convert.cpp\n// OpenCVTutorial2\n//\n// Created by Chung Yu Wang on 2/10/15.\n// Copyright (c) 2015 Jacky Wang. All rights reserved.\n//\n\n#include \"BGSConvertor.h\"\n#include <assert.h>\n\nBGSConvertor::BGSConvertor(std::string bgs_class) {\n assert(bgs_class.length() != 0);\n this->bgs_class = bgs_class;\n if (bgs_class.compare(\"FrameDifferenceBGS\") == 0) {\n bgs = new FrameDifferenceBGS();\n return;\n }\n \n if (bgs_class.compare(\"WeightedMovingVarianceBGS\") == 0) {\n bgs = new WeightedMovingVarianceBGS();\n return;\n }\n \n if (bgs_class.compare(\"WeightedMovingMeanBGS\") == 0) {\n bgs = new WeightedMovingMeanBGS();\n return;\n }\n \n if (bgs_class.compare(\"AdaptiveBackgroundLearning\") == 0) {\n bgs = new AdaptiveBackgroundLearning();\n return;\n }\n \n if (bgs_class.compare(\"AdaptiveSelectiveBackgroundLearning\") == 0) {\n bgs = new AdaptiveSelectiveBackgroundLearning();\n return;\n }\n\n if (bgs_class.compare(\"DPEigenbackgroundBGS\") == 0) {\n bgs = new DPEigenbackgroundBGS();\n return;\n }\n \n if (bgs_class.compare(\"DPZivkovicAGMMBGS\") == 0) {\n bgs = new DPZivkovicAGMMBGS();\n return;\n }\n\n std::cout << \"No matching BGS Type specified in bgs_convert constructor\\n\";\n}\n\nBGSConvertor::~BGSConvertor() {\n delete bgs;\n}\n\nVideoCapture BGSConvertor::convert_video(VideoCapture capture, std::string store_path) {\n std::cout << bgs_class << \" converting video, storing to \" << store_path << \"\\n\";\n \n if (!capture.isOpened()) {\n printf(\"input video not opened\\n\");\n return NULL;\n }\n \n // Intrinsic properties of input\n double dWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH);\n double dHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT);\n int fps = capture.get(CV_CAP_PROP_FPS);\n Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));\n int fourcc = static_cast<int>(capture.get(CV_CAP_PROP_FOURCC));\n \n // Create output video with same properties as input\n VideoWriter output_cap(store_path, CV_FOURCC('m', 'p', '4', 'v'), fps, frameSize, true);\n if(!output_cap.isOpened()) { // check if we succeeded\n printf(\"output video not opened\\n\");\n return NULL;\n }\n \n\n Mat frame;\n Mat imageGrey;\n Mat img_mask;\n Mat img_bkgmodel;\n while (1)\n {\n capture >> frame;\n if (!frame.data) break;\n bgs->process(frame, img_mask, img_bkgmodel);\n if (img_mask.data) {\n // Attempt to fix Mac OS gray scale not able to be written\n Mat imageArr[] {img_mask, img_mask, img_mask};\n merge(imageArr, 3, imageGrey);\n output_cap.write(imageGrey);\n }\n if (cvWaitKey(1) >= 0)\n break;\n }\n \n output_cap.release();\n\n std::cout << bgs_class << \" finished converting bgs video\\n\";\n return VideoCapture(store_path);\n}\n\nMat BGSConvertor::convert_image(Mat frame, Mat img_bkgmodel, std::string store_path) {\n Mat imageGrey;\n Mat img_mask;\n Mat img_bkgmodel1;\n if (frame.data) {\n bgs->process(frame, img_mask, img_bkgmodel1);\n if (img_mask.data) {\n // Attempt to fix Mac OS gray scale not able to be written\n Mat imageArr[] {img_mask, img_mask, img_mask};\n merge(imageArr, 3, imageGrey);\n imwrite(store_path, img_mask);\n }\n }\n \n return img_mask;\n}\n"
},
{
"alpha_fraction": 0.62269127368927,
"alphanum_fraction": 0.6306068897247314,
"avg_line_length": 34.092594146728516,
"blob_id": "faa56096045cafea3cc97c424979425315a45cc6",
"content_id": "263f9beb6dce9056cda7e6b72e887862848dc66c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1895,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 54,
"path": "/latte.cpp",
"repo_name": "arzavj/detectCV",
"src_encoding": "UTF-8",
"text": "#include \"latte.h\"\n\nLatte::Latte(bool useGPU, string modelFilename, string weightsFilename)\n{\n Caffe::set_mode(Caffe::CPU);\n caffe_net = new Net<float>(modelFilename, TEST);\n caffe_net->CopyTrainedLayersFrom(weightsFilename);\n}\n\nvector<pair<float, int> > Latte::getScoresAndLabels(Mat originalFrame, vector<Rect> windows)\n{\n vector<pair<float, int> > scoreLabels(windows.size());\n for (int i = 0; i< windows.size(); i++) {\n scoreLabels[i] = getMaxScoreAndLabel(originalFrame(windows[i]));\n }\n return scoreLabels;\n}\n\npair<float, int> Latte::getMaxScoreAndLabel(Mat frame)\n{\n vector<float> scores = getScores(frame);\n float maxScore = 0.0;\n int mostLikelyLabel = -1;\n cout << \"Window of size = \" << frame.size().width << \", \" << frame.size().height << endl;\n for (int label = 0; label < scores.size(); label++) {\n if (scores[label] > maxScore) {\n mostLikelyLabel = label;\n maxScore = scores[label];\n }\n }\n cout << \"Label = \" << mostLikelyLabel << \" Score = \" << maxScore << endl;\n return make_pair(maxScore, mostLikelyLabel);\n}\n\nvector<float> Latte::getScores(Mat frame)\n{\n Mat resized;\n resize(frame, resized, Size(256, 256));\n// cout << \"resized_col = \" << resized.cols << \" resized_row = \" << resized.rows << endl;\n Datum datum;\n CVMatToDatum(resized, &datum);\n vector<Datum> datum_vector;\n datum_vector.push_back(datum);\n boost::dynamic_pointer_cast<MemoryDataLayer<float> >(caffe_net->layers()[0])->AddDatumVector(datum_vector);\n vector<Blob<float>* > bottom_vec;\n float iter_loss;\n const vector<Blob<float>*>& result = caffe_net->Forward(bottom_vec, &iter_loss);\n const float* result_vec = result[1]->cpu_data();\n vector<float> scores(result[1]->count());\n for (int k = 0; k < scores.size(); ++k) {\n scores[k] = result_vec[k];\n }\n return scores;\n}\n"
},
{
"alpha_fraction": 0.5996615886688232,
"alphanum_fraction": 0.6131979823112488,
"avg_line_length": 33.156070709228516,
"blob_id": "7f74b1001d175537ee7f754fd01459c2d8d51776",
"content_id": "6aa29410f5674155b0cfd43f2de737bd997e29ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5910,
"license_type": "no_license",
"max_line_length": 168,
"num_lines": 173,
"path": "/main.cpp",
"repo_name": "arzavj/detectCV",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <iomanip>\n#include \"opencv2/highgui/highgui.hpp\"\n#include \"opencv2/imgproc/imgproc.hpp\"\n#include \"opencv2/core/core.hpp\"\n#include \"BGSConvertor.h\"\n#include \"ObjectExtractor.h\"\n#include \"caffe/util/io.hpp\"\n#include \"caffe/caffe.hpp\"\n#include \"caffe/blob.hpp\"\n#include \"latte.h\"\n#include \"nonmaxsuppression.h\"\n//#define WRITE_TO_FILE\n\nusing namespace caffe;\nusing namespace cv;\nusing namespace std;\n\n/* Prototypes */\nvoid TestBGSVideoConvertor();\n\nconst int MAX_N_CLASSES = 9;\nString CLASS_LIST[2][MAX_N_CLASSES] = {{\"bike\", \"bus\", \"car\", \"dog\", \"motorbike\", \"others\", \"pedestrian\", \"skateboard\", \"stroller\"}, {\"pedestrian\", \"non-pedestrian\"}};\nString PROTOTXT_LIST[2] = {\"models/multiclass_train_val.prototxt\", \"models/binary_train_val.prototxt\"};\nString MODEL_LIST[2] = {\"models/equalTrainingMultiClass100000.caffemodel\", \"models/binaryFinetune100000.caffemodel\"};\n\nint main(int, char **)\n{\n bool isMultiClass = true;\n // Mapping from label number to label text\n String *classes;\n Latte caffeModel(false, PROTOTXT_LIST[isMultiClass ? 0 : 1], MODEL_LIST[isMultiClass ? 0 : 1]);\n classes = CLASS_LIST[isMultiClass ? 0 : 1];\n\n string originalVideoName = \"data/train.avi\";\n// VideoCapture bgsVideo(\"data/DPZivkovicAGMMBGS.avi\");\n\n VideoCapture inputVideo(originalVideoName);\n if (!inputVideo.isOpened()) {\n std::cout << \"input video not opened\\n\";\n exit(1);\n }\n\n // Create output video with same properties as input\n String video_store_path = \"campusBoundingBox5000pixelMin2.avi\";\n\n // Intrinsic properties of input\n double dWidth = inputVideo.get(CV_CAP_PROP_FRAME_WIDTH);\n double dHeight = inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT);\n int fps = inputVideo.get(CV_CAP_PROP_FPS);\n Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));\n\n /*\n VideoWriter outputVideo(video_store_path, CV_FOURCC('m', 'p', '4', 'v'), fps, frameSize, true);\n if(!outputVideo.isOpened()) { // check if we succeeded\n printf(\"output video not opened\\n\");\n return NULL;\n }\n*/\n //IBGS *bgs = new DPEigenbackgroundBGS();\n //IBGS *bgs = new DPZivkovicAGMMBGS();\n IBGS *bgs = new FrameDifferenceBGS();\n\n Mat frame;\n// Mat bgsFrame;\n ObjectExtractor extractor;\n Mat img_mask;\n Mat img_bkgmodel;\n Scalar WHITE(255, 255, 255);\n Scalar RED(0, 0, 255);\n\n NonMaxSuppression nms;\n float counts[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};\n float countTotal = 0;\n int count = 0;\n while(true) {\n inputVideo >> frame;\n// bgsVideo >> bgsFrame;\n if (!frame.data) {\n break;\n }\n#ifdef WRITE_TO_FILE\n imwrite( \"originalOutput/image\" + to_string(count) + \".jpg\", frame);\n#endif\n bgs->process(frame, img_mask, img_bkgmodel);\n\n//#ifdef WRITE_TO_FILE\n imwrite( \"bgsOutput/DifImage\" + to_string(count) + \".jpg\", img_mask);\n//#endif\n\n vector<Rect> windows = extractor.extractBoxes(img_mask);\n#ifdef WRITE_TO_FILE\n Mat windowWithoutLabel = frame.clone();\n for (Rect w : windows) {\n rectangle(windowWithoutLabel, w, WHITE);\n }\n#endif\n\n#ifdef WRITE_TO_FILE\n imwrite(\"withoutLabel/image\" + to_string(count) + \".jpg\", windowWithoutLabel);\n#endif\n vector<pair<float, int>> scoreLabels = caffeModel.getScoresAndLabels(frame, windows);\n#ifdef WRITE_TO_FILE\n for (int i = 0; i < scoreLabels.size(); i++) {\n Rect& w = windows[i];\n int label = scoreLabels[i].second;\n putText(windowWithoutLabel, classes[label], w.tl(), FONT_HERSHEY_SIMPLEX, 0.5, RED);\n }\n#endif\n\n string windowDirName = \"windowImages/\";\n //TODO: remove below loop\n for (int i = 0; i < scoreLabels.size(); i++) {\n Rect& w = windows[i];\n string label = classes[scoreLabels[i].second];\n string score = to_string(scoreLabels[i].first);\n string filename = windowDirName + label + \"_\" + score + \"_\" + to_string(i);\n imwrite(filename, frame(w));\n }\n\n#ifdef WRITE_TO_FILE\n imwrite(\"withLabel/image\" + to_string(count) + \".jpg\", windowWithoutLabel);\n#endif\n vector<tuple<Rect, float, int>> boxes = nms.suppress(windows, scoreLabels);\n for (tuple<Rect, float, int> boxTuple : boxes) {\n int label = get<2>(boxTuple);\n Rect& box = get<0>(boxTuple);\n // imwrite(\"outputImages/\" + classes[label] + to_string((int) countTotal) + \".jpg\", frame(box));\n rectangle(frame, box, WHITE);\n putText(frame, classes[label], box.tl(), FONT_HERSHEY_SIMPLEX, 0.5, RED);\n putText(frame, to_string(get<1>(boxTuple)), box.br(), FONT_HERSHEY_SIMPLEX, 0.5, RED);\n counts[label]++;\n countTotal++;\n for (int count_i = 0; count_i < sizeof(counts)/sizeof(counts[0]); count_i++) {\n cout << \"label \" << count_i << \" % = \" << counts[count_i] / countTotal << endl;\n }\n }\n imshow(\"Final Output after NMS\", frame);\n printf(\"Done drawing %d boxes\\n\", boxes.size());\n#ifdef WRITE_TO_FILE\n imwrite( \"nonMaxOutput/image\" + to_string(count) + \".jpg\", frame);\n#endif\n count++;\n\n //outputVideo.write(frame);\n\n if (cvWaitKey(10) >= 0)\n break;\n }\n// outputVideo.release();\n return 0;\n}\n\nvoid TestBGSVideoConvertor() {\n VideoCapture capture(\"data/campus1.mov\"); // open the default camera\n BGSConvertor *convert = new BGSConvertor(\"DPZivkovicAGMMBGS\");\n\n // Test video convert\n VideoCapture result = convert->convert_video(capture, \"data/campus1ZivkovicOutput.mp4\");\n Mat vidTest;\n while(1) {\n result >> vidTest;\n if (!vidTest.data) {\n cout << \"no result data\\n\";\n break;\n }\n\n imshow(\"Video Test\", vidTest);\n\n if (cvWaitKey(1) >= 0)\n break;\n }\n}\n\n"
},
{
"alpha_fraction": 0.4590231776237488,
"alphanum_fraction": 0.4664735198020935,
"avg_line_length": 39.21666717529297,
"blob_id": "fde50f28439882e6f0cf2b48570ce9d44cfbe0e0",
"content_id": "9077a3ed9ccef65a861e29637c25add5cc209b67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2416,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 60,
"path": "/tools/create_bbox_images.py",
"repo_name": "arzavj/detectCV",
"src_encoding": "UTF-8",
"text": "\nimport cv2\nimport urllib\nimport sys, os\nimport xml.etree.ElementTree as ET\n\npwd = os.path.abspath(os.path.dirname(__file__))\nimage_dir_name = pwd + '/../images/'\ntmp_folder = pwd + '/../tmp/'\n\nif __name__ == '__main__':\n\n for x in os.walk(pwd):\n if (x[0] != pwd):\n label = x[0].split('/')[-1]\n print 'Writing bbox images for %s' % label\n\n image_urls_mapping = {}\n with open(label + \"_imageurls.txt\") as f:\n for line in f:\n if line.strip():\n (key, val) = line.split()\n image_urls_mapping[key] = val\n\n print 'Image URL mapping created for %s' % label\n\n count = 0\n for xml_filename in x[2]:\n if not xml_filename.endswith('.xml'): continue\n tree = ET.parse(label + '/' + xml_filename)\n root = tree.getroot()\n filename = root.findall('./filename')[0].text\n local_filename = tmp_folder + '%s.JPEG' % filename\n img = cv2.imread(local_filename)\n if img is None:\n url = image_urls_mapping.get(filename)\n if url is not None:\n try:\n urllib.urlretrieve(url, local_filename)\n img = cv2.imread(local_filename)\n except Exception,e:\n print \"Error downloading image file at url: %s\" % url\n print e\n\n if img is not None:\n xmin = int(root.findall('./object/bndbox/xmin')[0].text)\n ymin = int(root.findall('./object/bndbox/ymin')[0].text)\n xmax = int(root.findall('./object/bndbox/xmax')[0].text)\n ymax = int(root.findall('./object/bndbox/ymax')[0].text)\n\n count = count + 1\n bbox_img = img[ymin:ymax, xmin:xmax]\n bbox_filename = image_dir_name + label + '_' + filename + \"_bbox.jpg\"\n cv2.imwrite(bbox_filename, bbox_img)\n if count % 50 == 0:\n print '%s bbox images downloaded so far' % str(count)\n else:\n print local_filename + ' could not be read'\n\n print 'Done'\n print 'You can now delete the folder %s' % tmp_folder\n\n\n"
},
{
"alpha_fraction": 0.6933333277702332,
"alphanum_fraction": 0.7190476059913635,
"avg_line_length": 34,
"blob_id": "6f8cbe8e4ea234c44ac1609c0847673cdbcc6f86",
"content_id": "cbb74c2c510c86f86d12d40bf39776d1033acdcf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1050,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 30,
"path": "/README.md",
"repo_name": "arzavj/detectCV",
"src_encoding": "UTF-8",
"text": "<h1>detectCV</h1>\nAn application that detects and labels objects in a video taken from an RGB camera.\n\n<h3>Setup Instructions</h3>\n\n<i>You need to have OpenCV installed and built, as well as Qt Creator.</i>\n\n1. Clone the repo.\n2. On a Mac you might have to do:\n<ul>\n<li>```cd Qt5.3.2/5.3/clang_64/mkspecs/```</li>\n<li>```sudo vim qdevice.pri```</li>\n<li>change 10.8 to 10.9</li>\n</ul>\n3. **Launch Qt creator from the terminal.**\n4. Open Existing project by selecting the detectCV.pro file in the repo.\n5. Click on Projects (on the left pane) -> Run (on top) and then change the Working Directory to the detectCV folder that you cloned.\n6. Click Run.\n\n<h3>QT Creator Setup for Mac</h3>\nFollow http://stanford.edu/~rawatson/lair/mac.html\n\n<h3>Importing Caffe into QT Creator</h3>\n<ul>\n<li> $CAFFEDIR/make\n<li> Copy folder $CAFFEDIR/include/caffe to /usr/local/include\n<li> Copy folder $CAFFEDIR/build/include/caffe/proto to /usr/local/include/caffe\n<li> brew install openblas\n<li> Copy folder $CAFFEDIR/build/lib/* (.dylib, .a) to /usr/local/lib\n</ul>\n"
}
] | 15 |
meighanv/05-Python-Programming | https://github.com/meighanv/05-Python-Programming | 0072051149b8ed3b668250b2c1d4d5503715b0b6 | 5493fc2adfbf5fbf5a03a1e027c269646b639332 | 0a89b8ecca5912cbf086f17de25d543c17d225a0 | refs/heads/master | 2020-08-29T14:21:19.646870 | 2020-03-13T18:36:07 | 2020-03-13T18:36:07 | 218,059,331 | 0 | 0 | null | 2019-10-28T13:58:16 | 2019-10-28T03:54:05 | 2019-10-28T03:54:03 | null | [
{
"alpha_fraction": 0.5433025360107422,
"alphanum_fraction": 0.5479214787483215,
"avg_line_length": 26.95161247253418,
"blob_id": "747414adc00bf381c0ad9f269ad9f6190c40cbb9",
"content_id": "fd16bb37448d4f739782bc2b98958d5caa4753d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1732,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 62,
"path": "/LABS/Classes/superhero.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# This is a class to create a super hero\nclass SuperHero:\n\n # This __init__ accepts an argument to set the hero's name\n def __init__(self, name):\n self.__name = name\n\n # This accepts an argument to set the hero's real name\n def set_real_name(self, realName):\n self.__real_name = realName\n\n # This accepts an argument to set the hero's powers\n def set_powers(self):\n self.__powers = []\n power = ''\n while power.upper() != 'DONE':\n power = input('What are your hero\\'s powers? (Enter one at a time. Type \\'DONE\\' when finished)')\n if power.upper() != 'DONE':\n self.__powers.append(power)\n \n # This accepts an argument to set the hero's colors\n def set_colors(self):\n self.__colors = []\n color = ''\n while color.upper() != 'DONE':\n color = input('What colors does your hero wear? (Enter one at a time. Type \\'DONE\\' when finished)')\n if color.upper() != 'DONE':\n self.__powers.append(color)\n\n def get_name(self):\n print('Hero name: ', str(self.__name))\n print()\n \n def get_real_name(self):\n print('Hero\\'s real name: ', str(self.__real_name))\n print()\n\n def get_powers(self):\n print('Hero\\'s powers: ')\n for i in self.__powers:\n print(i)\n print()\n\n def get_colors(self):\n print('Hero\\'s colors: ')\n for i in self.__colors:\n print(i)\n print()\n\ndef main():\n hero1 = SuperHero('Wolverine')\n hero1.set_real_name('Logan')\n hero1.set_powers()\n hero1.set_colors()\n\n hero1.get_name()\n hero1.get_real_name()\n hero1.get_powers()\n hero1.get_colors()\n\n\nmain()"
},
{
"alpha_fraction": 0.6040723919868469,
"alphanum_fraction": 0.6485671401023865,
"avg_line_length": 14.785714149475098,
"blob_id": "8d71e554351b3bfd2fc63dd3da33e3b11a5f8712",
"content_id": "a7bfae155761574f25aa14c53b257a15ca1aefd1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1334,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 84,
"path": "/03_Flow_Control/07_break_continue.md",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "|[Table of Contents](/00-Table-of-Contents.md)|\n|---|\n\n---\n\n## Break and Continue\n\nIf further examples are needed of how Break/Continue works, please reference PyDocs.\n\n### Break\n\nThe break statement simply breaks out of the innermost enclosing loop.\n\n```python\nfor i in range(1, 101):\n if i == 10:\n print(\"WE BROKE IT!\")\n break\n print(i)\n```\n\n**Output:**\n\n```text\n1\n2\n3\n4\n5\n6\n7\n8\n9\nWE BROKE IT!\n```\n\n### Continue\n\nThe continue statement continues with the next iteration of the loop.\n\n```python\nfor i in range(1, 100):\n if i % 2 == 0:\n print(\"{} is an even number!\".format(i))\n continue #prevents second print from running\n print(\"{} is an odd number!\".format(i))\n```\n\n**Output:**\n\n```text\n1 is an odd number!\n2 is an even number!\n3 is an odd number!\n4 is an even number!\n5 is an odd number!\n6 is an even number!\n7 is an odd number!\n8 is an even number!\n9 is an odd number!\n10 is an even number!\n11 is an odd number!\n12 is an even number!\n13 is an odd number!\n14 is an even number!\n15 is an odd number!\n16 is an even number!\n17 is an odd number!\n18 is an even number!\n19 is an odd number!\n20 is an even number!\n....\n```\n\nNotice how the second print is skipped entirely for even numbers! \n\n---\n\n**Continue to Performance Lab:** 3F\n\n---\n\n|[Lab 3F](/03_Flow_Control/lab3f.md)|\n|---|\n"
},
{
"alpha_fraction": 0.7191234827041626,
"alphanum_fraction": 0.724103569984436,
"avg_line_length": 31.354839324951172,
"blob_id": "2fb118a62547d065adac78ea481b32971e52e018",
"content_id": "9ddab4bee98f09159609cf84adb56490fa451735",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1006,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 31,
"path": "/Practice_Test/C_Types.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import ctypes, os\nfrom ctypes import *\n\n\"\"\"\nThis question will test your knowledge of using Ctypes and C-DLL in python. You should use Ctype data types and Ctype functions when possible\nSteps:\n\n•\tGiven \"Sum.c\" program that is used to produce \"SumDLL.dll\", you are required to:\n\n o\tCreate a method sum_numbers that takes an array of integers (as an input parameter) and return its sum\n\n o\tThe method sum_numbers should utilize sum_numbers from the given SumDLL.dll\n\n###If performing this on a linux platform, may need to recompile the source into a *nix so###\n- gcc -fPIC -c Sum.c\n- gcc -shared -o libSum.so Sum.o\n\n\"\"\"\n\n\n\ndef sum_numbers(numbers):\n # Load custom cdll after creating .so from orig C source\n libc = cdll.LoadLibrary(\"./libSum.so\")\n # Cast original python list as c array\n arr = (ctypes.c_int * len(numbers))(*numbers)\n #return the result of shared library function to calling program\n return libc.sum_numbers(len(numbers), arr)\n\n# nums = [1, 2, 4, 5, 7]\n# print(sum_numbers(nums))\n\n"
},
{
"alpha_fraction": 0.6959064602851868,
"alphanum_fraction": 0.6959064602851868,
"avg_line_length": 33,
"blob_id": "41c9dc9faaf3146d28c8791cf31c8b93a702be84",
"content_id": "202490dc0856f81f10f694ed22b36dda79e9dd23",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 342,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 10,
"path": "/LABS/Socket/domain_name.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#from tld import get_tld # this gives the com ending and usually gave the full google.com \nfrom tld import get_fld # new way to get the full google.com\n\ndef get_domain_name(url):\n\t\n #domain_name = get_tld(url) # tld is the old way. \n domain_name = get_fld(url)\n return domain_name\n\nprint(get_domain_name('https://www.google.com'))\n\n\n"
},
{
"alpha_fraction": 0.6689519882202148,
"alphanum_fraction": 0.6723800301551819,
"avg_line_length": 43.41304397583008,
"blob_id": "11fbcfd7fa221cccbb29bb6c115b4a4abee7903d",
"content_id": "59f6f81ad154aa46681912726a6c19c81aa382e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2056,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 46,
"path": "/LABS/Classes/cashregister.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\" \n7. Cash Register\n This exercise assumes that you have created the RetailItem class for Programming\n Exercise 5. Create a CashRegister class that can be used with the RetailItem class. The\n CashRegister class should be able to internally keep a list of RetailItem objects. The\n class should have the following methods:\n • A method named purchase_item that accepts a RetailItem object as an argument.\n Each time the purchase_item method is called, the RetailItem object that is passed as\n an argument should be added to the list.\n • A method named get_total that returns the total price of all the RetailItem objects\n stored in the CashRegister object’s internal list.\n • A method named show_items that displays data about the RetailItem objects stored\n in the CashRegister object’s internal list.\n • A method named clear that should clear the CashRegister object’s internal list.\n Demonstrate the CashRegister class in a program that allows the user to select several\n items for purchase. When the user is ready to check out, the program should display a list\n of all the items he or she has selected for purchase, as well as the total price.\n \"\"\"\nfrom functools import reduce\nclass CashRegister:\n\n def __init__ (self):\n self.__purchase = []\n self.__total = 0.0\n\n def set_total(self):\n # This code below was replaced by the uncommented lambda function\n # total = 0\n # for i in self.__purchase:\n # total += float(i.get_price()) * float(i.get_unitCount())\n receipt = (item.get_price()*item.get_unitCount() for item in self.__purchase)\n self.__total = reduce((lambda x,y: x + y), receipt)\n\n def purchase_item(self,item):\n self.__purchase.append(item)\n\n def get_total(self):\n return self.__total\n \n def show_items(self):\n for i in self.__purchase:\n print(i.get_desc())\n\n def clear_register(self):\n self.__purchase = []\n self.__total = 0.0"
},
{
"alpha_fraction": 0.6425926089286804,
"alphanum_fraction": 0.6496296525001526,
"avg_line_length": 33.628204345703125,
"blob_id": "028ce49a2a14f8f8297412c85234334c69a69f83",
"content_id": "989dfbf49602379dd1f32cdc0b8f4a54d79803f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2700,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 78,
"path": "/LABS/Socket/full-chat-svr.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#Server for multithreaded (asynchronous) chat application.\nfrom socket import AF_INET, socket, SOCK_STREAM\nfrom threading import Thread\n\n\ndef accept_incoming_connections():\n #Sets up handling for incoming clients.\n while True:\n client, client_address = SERVER.accept()\n print(f\"{client_address}:{client_address} has connected.\")\n # Send greeting upon successful connection\n client.send(bytes(\"Greetings from the cave! Now type your name and press enter!\", \"utf8\"))\n # Adding to the addresses dictionary\n addresses[client] = client_address\n # Create and start the thread calling the handle_client function\n # and the necessary arguments \n Thread(target=handle_client, args=(client,)).start()\n\n\ndef handle_client(client): # Takes client socket as argument.\n #Handles a single client connection.\n # Set name to info in the socket buffer from the response from\n # the prompt in accept_incoming_connections \n name = client.recv(BUFSIZ).decode(\"utf8\")\n welcome = 'Welcome %s! If you ever want to quit, type {quit} to exit.' % name\n # Welcoming the user\n client.send(bytes(welcome, \"utf8\"))\n # Creating the msg for broadcast\n msg = \"%s has joined the chat!\" % name\n # Calls broadcast function to send message to all users\n broadcast(bytes(msg, \"utf8\"))\n # Adding the name to the clients dictionary with the socket \n # as the key\n clients[client] = name\n\n # Loop to continue broadcasting new messages\n while True:\n # Setting mesg to broadcast\n msg = client.recv(BUFSIZ)\n # as long as the message is not {quit}\n if msg != bytes(\"{quit}\", \"utf8\"):\n broadcast(msg, name+\": \")\n # if it is {quit} then close connection and notify other users\n else:\n client.send(bytes(\"{quit}\", \"utf8\"))\n client.close()\n # Remove the quitting client\n del clients[client]\n broadcast(bytes(\"%s has left the chat.\" % name, \"utf8\"))\n break\n\n# prefix is for name identification.\ndef broadcast(msg, prefix=\"\"): \n # Broadcasts a message to all the clients.\n # Iterate through each socket in the clients dictionary to send msg\n for sock in clients:\n sock.send(bytes(prefix, \"utf8\")+msg)\n\n \nclients = {}\naddresses = {}\n\nHOST = ''\nPORT = 33000\nBUFSIZ = 1024\nADDR = (HOST, PORT)\n\nSERVER = socket(AF_INET, SOCK_STREAM)\nSERVER.bind(ADDR)\n\nif __name__ == \"__main__\":\n SERVER.listen(5)\n print(\"Waiting for connection...\")\n ACCEPT_THREAD = Thread(target=accept_incoming_connections)\n ACCEPT_THREAD.start()\n ACCEPT_THREAD.join()\n SERVER.close()"
},
{
"alpha_fraction": 0.6391304135322571,
"alphanum_fraction": 0.654347836971283,
"avg_line_length": 18.16666603088379,
"blob_id": "3926ac869680911709d0b628bc6eaa140df80d18",
"content_id": "1c7007416c57e7a4aa47f3f6c93d8b61e93d1935",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 460,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 24,
"path": "/README.md",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "|[Table of Contents](/00-Table-of-Contents.md)|\n|---|\n\n---\n\n## Introduction\n\n\n\n**Note:** Please use 00-Table-of-Contents to navigate this curriculum. That will ensure that you accomplish the topics in the correct order. \n\n## Chapters:\n\n* **Features**\n* **Data Types**\n* **Control Flow**\n* **Functions**\n* **Modules and Classes**\n* **Ctypes and Misc**\n\n---\n\n|[Next Topic](/01_python_features/README.md)|\n|---|\n"
},
{
"alpha_fraction": 0.6912280917167664,
"alphanum_fraction": 0.7473683953285217,
"avg_line_length": 24.909090042114258,
"blob_id": "6a193dc48f2286a26af34c8bbb8dd36ff9d6f9cd",
"content_id": "434f0c96d4176297977e192db229824f41f59122",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 285,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 11,
"path": "/Networking/mac3.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport subprocess\n\ninterface = \"eth0\"\n\nprint(\"Changing Mac Changer address: \" + interface)\n\nsubprocess.call(\"ifconfig eth0 down\", shell=True)\nsubprocess.call(\"ifconfig eth0 hw ether 00:22:33:44:55:66\", shell=True)\nsubprocess.call(\"ifconfig eth0 up\", shell=True)\n"
},
{
"alpha_fraction": 0.5771144032478333,
"alphanum_fraction": 0.5945273637771606,
"avg_line_length": 24.967741012573242,
"blob_id": "4dd8f15c56f90f69918f5cf671d6dbe72219e5b6",
"content_id": "a9b761d4413cd8ecb3d175278e0ec963af35b03a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 804,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 31,
"path": "/LABS/Socket/rps-clt.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "from socket import socket as Socket\nfrom socket import AF_INET, SOCK_STREAM\n\ndef playGame(PLAYER, BUFFER):\n choices = [0, 1, 2]\n\n print('Let\\'s play Rock, Paper, Scissors!')\n while True:\n GUESS = -1\n while int(GUESS) not in choices:\n GUESS = input('Select 0 for Rock, 1 for Paper, 2 for Scissors: ')\n PLAYER.send(GUESS.encode())\n ANSWER = PLAYER.recv(BUFFER).decode()\n print('>', ANSWER)\n if ANSWER != 'TIE':\n break\n\ndef main():\n HOSTNAME = 'localhost' # on same host\n PORTNUMBER = 11267 # same port number\n BUFFER = 80 # size of the buffer\n\n DEALER = (HOSTNAME, PORTNUMBER)\n PLAYER = Socket(AF_INET, SOCK_STREAM)\n PLAYER.connect(DEALER)\n\n playGame(PLAYER, BUFFER)\n\n PLAYER.close()\n\nmain()"
},
{
"alpha_fraction": 0.6656976938247681,
"alphanum_fraction": 0.6792635917663574,
"avg_line_length": 42,
"blob_id": "3eedf636ee804bdee6e11668a2765c195158e6d9",
"content_id": "14fa231c28039aaa571714ee1be2d0a1044be0b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1032,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 24,
"path": "/Practice_Test/binary_to_decimal.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\n# Write a function binary_to_decimal that takes an integer containing 0s and 1s (i.e., a \"binary\" integer)\n# and returns its decimal equivalent.\n# For example, the decimal equivalent of binary 1101 is 13\n# If the input value is not a binary number, the function should return \"Invalid Input\"\n\"\"\"\n\ndef binary_to_decimal(binaryNumber):\n #Cast the int value as string\n string = str(binaryNumber)\n #Start try accept to handle value error\n try:\n #store the decimal value as a base 2 (binary) int\n decimal = int(string,2)\n # Unit test does not want ato convert negative numbers as they are represented differently in binary\n if decimal < 0:\n # Return invalid input if the decimal is less than 0 (negative)\n return f'Invalid Input'\n else:\n # Otherwise return the decimal value\n return decimal\n #In the case of int value containing anything other than 1's and 0's return 'Invalid Input'\n except ValueError:\n return f'Invalid Input'\n"
},
{
"alpha_fraction": 0.5987085103988647,
"alphanum_fraction": 0.5987085103988647,
"avg_line_length": 20.27450942993164,
"blob_id": "d00a40884ba51cd5e12a01541e6dd828b884088a",
"content_id": "ba5aec7721946fa7c2b8e61d70686ae4a60662a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1084,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 51,
"path": "/LABS/Classes/animals.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "class Mammal:\n # Initializing Mammal object attributes\n def __init__(self, species):\n self.__species = species\n\n def show_species(self):\n print('I am a ', self.__species)\n\n def make_sound(self):\n print('Grrr')\n\n\nclass Dog(Mammal):\n # Initializing Dog subclass object attributes\n def __init__(self):\n Mammal.__init__(self, 'Dog')\n\n # This is an overide because it has the same name \n # as above, but this one will overwrite the 'Grrr'\n # sound\n def make_sound(self):\n print('Woof')\n\n\nclass Cat(Mammal):\n # Initializing Dog subclass object attributes\n def __init__(self):\n Mammal.__init__(self, 'Cat')\n\n # This is an overide because it has the same name \n # as above, but this one will overwrite the 'Grrr'\n # sound\n def make_sound(self):\n print('Meow')\n\n# Define Main\n\ndef main():\n cat = Cat()\n cat.make_sound()\n cat.show_species()\n\n dog = Dog()\n dog.make_sound()\n dog.show_species()\n\n mammal = Mammal('fish')\n mammal.make_sound()\n mammal.show_species()\n\nmain()"
},
{
"alpha_fraction": 0.713798999786377,
"alphanum_fraction": 0.7301533222198486,
"avg_line_length": 38.13333511352539,
"blob_id": "f417109164675d39a3ac7351fed80b5261bb17aa",
"content_id": "dbfb903e81092d0f7e955efd1bfb86e587d94fd6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2935,
"license_type": "no_license",
"max_line_length": 190,
"num_lines": 75,
"path": "/LABS/Socket/weblibs.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# # Request Library\n# import requests\nlink = 'http://www.github.com'\n# r = requests.get(link)\n# print(r.status_code) #status\n# #print(r.content[0:400])#actual content [0:400] prints the first 401 bytes in bytes\n# print(r.url) #The URL queried\n# print(r.history) #Shows historical r codes\n# print(r.headers) # Provides server information as a dictionary ; r.headers['<Category>']\n# print(r.request.headers) # provides client header infor as dict including cookie info\n# print(r.encoding) #Provides page encoding\n# #print(r.text) #prints content as string\n# # r = requests.get(link, stream=True) #raw r \n# # r.raw.read(100) #shows the first 100 bytes in hex\n# # There Doesn't appear any parsing in this library\n\n\"\"\"\nurllib module\n\"\"\"\n# Any of the following libraries can be passed by name into dir() to get info\nimport urllib.request as request\nimport urllib.error as error\nimport urllib.parse as urlparse\nimport urllib.robotparser as robot\n\ndir(request) # list of the features\nr = request.urlopen(link) #open link\nc = request.urlopen(link).read() #open link and read content (in bytes) could have also done r.read()\nr.getcode() # Obtain the response status code\nr.code # same as above\nr.geturl # states what was requested\nr._method # shows the request method\nr.getheaders() #Gets the response headers (server info)\nr.getheader()['Content-Type'] #get a value for a specific key in head dict\nr.info()['Content-Type'] # Same as above\n\n# TRY EXCEPT FOR OPENING A URL\ntry:\n request.urlopen(\"https://www.pyrthon.ogr\")\nexcept error.URLError as e:\n print(e.reason)\n\n### PARSING ###\namazon = \"https://www.amazon.com/s/ref=nb_sb_noss?url=search-alias%3dstripbooks-intl-ship&field-keywords=Pearson+Books\"\nprint(urlparse.urlsplit(amazon))\n# prints the output: SplitResult(scheme='https', netloc='www.amazon.com', path='/s/ref=nb_sb_noss', query='url=search-alias%3dstripbooks-intl-ship&field-keywords=Pearson+Books', fragment='')\nprint(urlparse.urlparse(amazon)) #basically same as above\n\ndata = {'param1':'value1','param2':'value2'}\nurlparse.urlencode(data)\n# Output 'param1=value1¶m2=value2'\nurlparse.parse_qs(urlparse.urlencode(data))\n# Output: {'param1': ['value1'], 'param2': ['value2']}\nurlparse.urlencode(data).encode('UTF-8')\n# Output: b'param1=value1¶m2=value2'\nurlparse.urljoin('http://localhost:8080/~cache/', 'data file') #create a url\n# Output: 'http://localhost:8080/~cache/data file'\npar = robot.RobotFileParser()\npar.set_url('https://www.samsclub.com/robots.txt')\npar.read() #reading the URL content\nprint(par)\n# User-agent: *\n# Allow: /ads.txt\n# Allow: /sams/account/signin/createSession.jsp\n# Disallow: /cgi-bin/\n# Disallow: /sams/checkout/\n# Disallow: /sams/account/\n# Disallow: /sams/cart/\n# Disallow: /sams/search/\n# Disallow: /sams/eValues/clubInsiderOffers.jsp\n# Disallow: /friend\n# Disallow: /s/\n# Disallow: /%2ASCDC%3D1%2A\n# Allow: /sams/account/referal/\npar.can_fetch('*','https://www.samsclub.com/friend/')\n"
},
{
"alpha_fraction": 0.5676127076148987,
"alphanum_fraction": 0.5717862844467163,
"avg_line_length": 28.975000381469727,
"blob_id": "a977b5a15ab690fc0e20ca9c106b53b73db29ff5",
"content_id": "3cdb8212c753cbaaad4975b37a7695158858e9b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1198,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 40,
"path": "/LABS/Socket/queryTags.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import re\nimport requests\nimport argparse\nimport bs4\nfrom bs4 import BeautifulSoup as bs\n\ndef main(tag,url):\n r = requests.get(url)\n soup = bs(r.content, 'html.parser')\n for t in tag:\n tags = soup.find_all(t)\n if len(tags) > 0:\n print(f'\\'{t}\\' tags:')\n getText(tags)\n print()\n else: \n print(f'The website \\'{url}\\' does not contain any \\'{t}\\' tags.')\n\ndef getText(tags):\n for i in tags:\n for j in i.contents:\n if isinstance(j, bs4.element.NavigableString):\n # print(type(j))\n # print(dir(j))\n if len(j) > 1:\n print(j)\n\n\nif __name__ == '__main__': \n # This series of statements allows for in-line arguments\n parser = argparse.ArgumentParser (description='TCP Socket Client Example') \n parser.add_argument('--url', action=\"store\", dest=\"url\", type=str, required=True) \n # This was testing how to add additional, optional arguments\n parser.add_argument('--tag', action=\"store\", dest=\"tag\", required=True, nargs='+')\n given_args = parser.parse_args() \n url = given_args.url \n tag = given_args.tag\n \n\nmain(tag,url)"
},
{
"alpha_fraction": 0.61689293384552,
"alphanum_fraction": 0.6250377297401428,
"avg_line_length": 34.559139251708984,
"blob_id": "e8666a9080ebfd0b8d398558830acb97bd665a84",
"content_id": "e173643892f0be1e2e37424e8aa6f5039aa1b30e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3333,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 93,
"path": "/LABS/PerfExam/prompt3.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\n3. \n(The Triangle class) Design a class named Triangle that extends the\nGeometricObject class defined below. The Triangle class contains:\n - Three float data fields named side1, side2, and side3 to denote the three\n sides of the triangle.\n - A constructor that creates a triangle with the specified side1, side2, and\n side3 with default values 1.0.\n - The accessor methods for all three data fields.\n - A method named getArea() that returns the area of this triangle.\n - A method named getPerimeter() that returns the perimeter of this triangle.\n - A method named __str__() that returns a string description for the triangle.\n\n\n class GeometricObject:\n def __init__(self, color = \"green\", filled = True):\n self.color = color\n self.filled = filled\n\n def getColor(self):\n return self.color\n\n def setColor(self, color):\n self.color = color\n\n def isFilled(self):\n return self.filled\n\n def setFilled(self, filled):\n self.filled = filled\n \n def toString(self):\n return \"color: \" + self.color + \" and filled: \" + str(self.filled)\n\n\n Write a test program that prompts the user to enter the three sides of the \n triangle, a color, and 1 or 0 to indicate whether the triangle is filled. \n The program should create a Triangle object with these sides and set the \n color and filled properties using the input. The program should display the \n triangle’s area, perimeter, color, and True or False to indicate whether the \n triangle is filled or not.\n # Do this last\n\n\"\"\"\nimport geometricObj\nfrom math import sqrt\n\ndef main():\n # Collecting triangle data\n sides = getSides()\n color = input('What color is the triangle?\\n')\n filled = fill()\n \n # creating Triangle object\n triangle = geometricObj.Triangle(color, filled, sides[0], sides[1], sides[2])\n print(triangle)\n\n# Method to get the user input for sides\ndef getSides():\n # Empty list for side input\n sides = []\n # Collect input for each possible side of triangle\n for i in range(3):\n # Tracks whether or not valid input has been input\n valid = False\n while valid == False:\n try:\n # getting input for current side\n side = float(input(f'Please provide the length of side {i+1} for your triangle:\\n'))\n valid = True\n sides.append(side)\n except ValueError:\n print('Error: Please provide a number value (ex: 5 or 5.3)!')\n # return the list of sides\n return sides\n\n# Getting input for filled status with input validation\ndef fill():\n # Tracks whether or not valid input has been input\n valid = False\n while valid == False:\n try:\n # getting input for filled status\n validOptions = [0,1]\n fill = int(input('Would you like to fill this triangle? (1 = yes, 2 = no)\\n'))\n while int(fill) not in validOptions:\n fill = int(input('Incorrect input.\\nWould you like to fill this triangle? (1 = yes, 2 = no)\\n'))\n valid = True\n return fill\n except ValueError:\n print('Error: Please provide a number value (1 or 0)!')\n\nmain()\n "
},
{
"alpha_fraction": 0.6896918416023254,
"alphanum_fraction": 0.7183846831321716,
"avg_line_length": 31.482759475708008,
"blob_id": "5b125ffc646fb8d2762adc8ad2b07d1e6e06d849",
"content_id": "7b7581fe8a73ad85c487d4b0245a559aada7e14c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 941,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 29,
"path": "/LABS/Labs-3-1/lab3-1-10.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Stock Purchase\n#Declaring variables from available data\nsharesBought = float(1000)\npurchasePrice = float(32.87)\ncommission = 0.02\nsharesSold = float(1000)\nsalePrice = 33.92\n\n#calculating money spent and made with commission considerations\npurchase = purchasePrice * sharesBought\npurchaseComm = purchase * commission\nsale = salePrice * sharesSold\nsaleComm = sale * commission\nprofit = purchase - sale - commission - commission\n\n#Statements of information about transactions \nprint('Joe paid ${:.2f} for the stock.'.format(purchase))\nprint('Joe paid ${:.2f} commission to his broker on purchase.'.format(purchaseComm))\nprint('Joe sold the stock for ${:.2f}.'.format(sale))\nprint('Joe paid ${:.2f} commission to his broker on sale.'.format(saleComm))\n\n#Test \nif profit > 0:\n print('Joe profitted by ${:.2f}'.format(profit))\nelse:\n if profit < 0:\n print('Joe lost ${:.2f}'.format(profit))\n else:\n print('Joe broke even.')"
},
{
"alpha_fraction": 0.6164020895957947,
"alphanum_fraction": 0.6296296119689941,
"avg_line_length": 22.625,
"blob_id": "12dda2f00ff36a5f2d72f7099b2a83faf7a44b7d",
"content_id": "02e5d97f036c83f8b25de6dd924a62be0d2b256b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 378,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 16,
"path": "/Algorithms/countfib.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# Prints the # of calls of a recursive Fibonacci \n# function w/ problem sizes that double\n\nfrom counter import counter\n\ndef fib(n, counter):\n # count the number of calls of the fib function\n counter.increment()\n if n < 3:\n return 1\n else:\n return fib(n-1, counter) + fib(n -2, counter)\n\n problemSize = 2\n for count in range(%):\n counter = "
},
{
"alpha_fraction": 0.663914144039154,
"alphanum_fraction": 0.6787778735160828,
"avg_line_length": 34.64706039428711,
"blob_id": "c7eb28b0f8deb814e3418f494bf5fa762b3e01c7",
"content_id": "4b4bce315534bc86db9bebca868d5827e6b225fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1211,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 34,
"path": "/LABS/Labs-3-4/lab3-4-7-bookClub.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Defining constant dictionary for the amount of book purchased to the amount of points for the month\nCLUB_POINTS = {'0': 0,\n '1': 5,\n '2': 15,\n '3': 30,\n '4': 60}\n\ndef main():\n #requesting user input for the number of books purchased\n numBooks = getBooks() \n\n #References the constant dictionary to determine the amount of points awarded\n calcPoints(numBooks)\n\n#Getting input for number of books bought \ndef getBooks():\n #getting user input as string\n books = input('How many books did you buy last month:\\n')\n #Input validation to ensure the books entered is greater than 0\n while int(books) < 0:\n books = input('How many books did you buy last month:\\n')\n #Checking if the books purchase was more than four\n if int(books) > 4:\n #if it is more than four no more points can be earned, therefore we set the string to 4\n return '4'\n else:\n #Otherwise we return the string of the number of books to match a dictionary key\n return str(books)\n\ndef calcPoints(count):\n #Prints the points earned by looking up their string in the dict as the key \n print('You have earned {} points this month!'.format(CLUB_POINTS[count]))\n\nmain()"
},
{
"alpha_fraction": 0.622107982635498,
"alphanum_fraction": 0.622107982635498,
"avg_line_length": 24.933332443237305,
"blob_id": "fbb22f5fa643a313967e58be01b8c85490121e69",
"content_id": "9452430eedbc09bd0f25269f8e8673aac2a92e77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 389,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 15,
"path": "/LABS/Socket/pingprompt.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import subprocess\n\ndef main():\n ping()\n\ndef ping():\n targ = input('What address would you like to ping?\\n')\n count = input(f'How many times would you like to ping {targ}?\\n')\n data = subprocess.Popen(['ping', '-c', count, targ ], stdout = subprocess.PIPE)\n output = data.communicate()\n formatted = str(output).split('\\n')\n for i in formatted:\n print(i)\n\nmain()\n"
},
{
"alpha_fraction": 0.5859788656234741,
"alphanum_fraction": 0.6067019104957581,
"avg_line_length": 29.246665954589844,
"blob_id": "182d09e63811401d444a45fe7a66694e7cd9de6d",
"content_id": "8d976a048c4afc84eb334079439c8d4e7a3198b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4536,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 150,
"path": "/Algorithms/sorting_algs.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\" \nBasic Sort Algorithms\n\n The algorithms examined here are easy to write but inefficien\n Each alg we discuss here will utilize a list of integers and the \n swap function defined below \n\"\"\"\nimport timeit\ndef wrapper(func, *args, **kwargs):\n def wrapped():\n return func(*args, **kwargs)\n return wrapped\n\ndef swap(myList, i , j):\n # Exchanges the positions of two items in a list\n temp = myList[i]\n myList[i] = myList[j]\n myList[j] = temp\n\n# *************************Selection Sort*************************\n# Each pass through the main loop selets a single item to be moved\n# Searches the list for the position of the smallest item\n# If that position is not the first position it swaps the items at those\n# positions \n# Ih then finds the next smallest itm and swaps the item at the second \n# position\n\ndef selectionSort(myList):\n i = 0\n # Do n-1 searches for the smallest item \n while i < len(myList) - 1:\n minIndex = i\n j = i + 1\n while j < len(myList):\n if myList[j] < myList[minIndex]:\n minIndex = j\n j += 1\n #Exchange if needed\n if minIndex != i:\n swap(myList, minIndex, i)\n i += 1\n\n\"\"\"\nBig O: O(n^2)\n(1/2)n^2 - (1/2)n\n\nBecause data items are swapped only in the outer loop, this additional \ncost for selection sort is linear in the worst and average cases.\n\"\"\"\n\n##############################BUBBLE SORT##############################\n\"\"\"\nThe strategy is to start at the beginning of the list and compare pairs\nof data items as it moves down to the end. Each time the pairs are out\nof order, the algorithm swaps them. The largest item will swap them.\nThe largest item will eventually\"bubble out to the end of the list.\n\"\"\"\n\ndef bubSort(myList):\n n = len(myList)\n # do n - 1 searches \n while n > 1:\n i = 1\n #start each bubble\n while i < n:\n if myList[i] < myList[i-1]:\n #exchange if needed\n swap(myList, i , i-1)\n i += 1\n n -= 1\n\n\"\"\"\n(1/2)n^2 -(1/2)n\n\nO(n^2)\n\"\"\"\n\n# Update bubble sort to linear best case check\n# In best case list is already sorted; there are no swaps\n# We can modify alg to be more efficient\n\ndef bubbleSort(myList):\n n = len(myList)\n # do n - 1 searches \n while n > 1:\n swapped = False\n i = 1\n #start each bubble\n while i < n:\n if myList[i] < myList[i-1]:\n #exchange if needed\n swap(myList, i , i-1)\n swapped = True\n i += 1\n if not swapped: return \n n -= 1\n\n\n#sortedList = [i for i in range(10000)]\nunsorted = [3,2,6,99,46,22,85,12,98]\n\n# print(unsorted)\n# bubbleSort(unsorted)\n# print(unsorted)\n\n# unsorted = [3,2,6,99,46,22,85,12,98]\n# print(unsorted)\n# bubSort(unsorted)\n# print(unsorted)\n\n# wrapped = wrapper(bubSort, sortedList)\n# print(f'bubSort of sorted: {timeit.timeit(wrapped, number=1)}')\n# wrapped = wrapper(bubSort, unsorted)\n# print(f'bubSort of unsorted: {timeit.timeit(wrapped, number=1000)}')\n\n# wrapped = wrapper(bubbleSort, sortedList)\n# print(f'bubbleSort of sorted: {timeit.timeit(wrapped, number=1000)}')\n# wrapped = wrapper(bubbleSort, unsorted)\n# print(f'bubbleSort of unsorted: {timeit.timeit(wrapped, number=1000)}')\n\n##############################INSERTION SORT###########################\n# On the ith pass through the list, where i ranges from 1 to n-1, the ith\n# item should be inserted into its propoer place in the list amoung the \n# first i items in the list. After the ith pass, the first i items should \n# be in sorted order. This process is analagous to the way in which many \n# people organize playing cards . That is, if you hold the first --1 cards\n# in order, you can pick the ith cadrd and compare it to these cards until \n# its proper spot is found\n# Insertion sort consists of two loops. The outer loops traverse the \n# positions from 1 to n-1. For each position i in this loop, you save the\n# item and start the inner loop at position i-1. For each position J in \n# this loop, you move the item to position j+1 until youfind the insertion\n# point for the saved(ith) item.\n# \n\ndef insertionSort(myList):\n i = 1\n while i < len(myList):\n itemToInsert = myList[j]:\n j = i-1\n while j >= 0:\n if itemToInsert < myList[j]:\n myList[j+1] = myList[j]\n j -= 1\n else:\n break\n myList[j+1] = itemToInsert\n i += 1\n\ninsertionSort(unsorted)"
},
{
"alpha_fraction": 0.6411698460578918,
"alphanum_fraction": 0.6614173054695129,
"avg_line_length": 29.517240524291992,
"blob_id": "386f08d28d74c76a68be8a76d791bea0a41b1f45",
"content_id": "2c6010aaad2e145cc9f611d06ee1e45b2e5398f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 889,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 29,
"path": "/Practice_Test/counting_pieces_re.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import re\n\n\"\"\"\n# Write a function count_pieces that counts the number of digits, non-digit characters (excluding white spaces),\n# whitespace characters and words in a string. The function takes a string as input and returns\n# a list of integers that represents the counts.\nFor example, \"We have 8 digits.\", the output list will be [1, 13, 3, 4],\n1: digits (ONLY 8)\n13: non-digit characters (excluding white spaces)\n3: white spaces\n4: words\n\n\"\"\"\n\n\ndef count_pieces(testString):\n whitespace = re.compile(r'\\s')\n nonDigit = re.compile(r'[^\\d\\s:]')\n digits = re.compile(r'\\d')\n words = re.compile(r'[A-Za-z0-9]+')\n \n outputList = [len(digits.findall(testString)),\n len(nonDigit.findall(testString)), \n len(whitespace.findall(testString)), \n len(words.findall(testString))]\n\n return outputList\n\n#print(count_pieces(\"1 2 3 4\"))\n\n\n\n\n"
},
{
"alpha_fraction": 0.5602836608886719,
"alphanum_fraction": 0.5957446694374084,
"avg_line_length": 15.529411315917969,
"blob_id": "7589bc01a7bf2a1d21078b2d97c85fe0e356a648",
"content_id": "2e96f189c85b413cbc1dd84c3ce379b07710aed5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 282,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 17,
"path": "/LABS/Projects/lecture-tuples.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#A tuple is an immutable list\nmy_tuple = (1, 2, 3, 4, 5)\n\nprint(my_tuple)\n\nnames = ('Holly', 'Warren', 'Ashley')\nfor n in names:\n print(n)\n\nfor i in range(len(names)):\n print(names[i])\n\n#Convert tuple to list\n\"\"\" list = [2, 4, 5, 1, 6] \ntype(list)\ntuple(list)\ntype(list) \"\"\"\n\n"
},
{
"alpha_fraction": 0.6704067587852478,
"alphanum_fraction": 0.6802244186401367,
"avg_line_length": 34.70000076293945,
"blob_id": "4094e2d535578ffd715cbef8ffd9c31c607a76e1",
"content_id": "d4d2c94699302f034639b77cca1286a9f91ad524",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 713,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 20,
"path": "/LABS/Socket/remoteip.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n# This program is optimized for Python 2.7.12 and Python 3.5.2.\n# It may run on any other version with/without modifications.\n\nimport socket\n\ndef get_remote_machine_info():\n remote_host = 'www.dakjldksajkj.org'\n try:\n print (\"IP address of %s: %s\" %(remote_host, socket.gethostbyname(remote_host)))\n # There are situations where even nonsensical domains will be \n # redirected by the ISP when a valid suffix is provided which will\n # return an IP that actually points to something like DNSSearch.com \n # or an ISP webpage \n except socket.error as err_msg:\n print (\"%s: %s\" %(remote_host, err_msg))\n\nif __name__ == '__main__':\n get_remote_machine_info()"
},
{
"alpha_fraction": 0.5854460000991821,
"alphanum_fraction": 0.6065727472305298,
"avg_line_length": 33.35483932495117,
"blob_id": "db66a04d1fab004b6bb3dba54e7afb41f15bee4b",
"content_id": "913721d381f363582020f52401ac65b79cc9e7e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2130,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 62,
"path": "/Practice_Test/c_types_rectangle.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThis program will test your knowledge of using Ctypes in python.\nYou should use ctype data types and ctype functions when possible\n\nSteps:\n * You are provided with the POINT class that has two integers: x-coordinate and y-coordinate\n * Create a class RECT that represents a rectangle with two fields: upperleft and lowerright \n each of type POINT\n * The class RECT should have:\n - a constructor to initialize its corners\n - a method compute_Area to compute the area of the rectangle\n\n * Create a function quickSort (NOT part of the RECT class):\n - The quickSort function will take an array of integers (as an input parameter) and \n sort the elements\n - the quickSort function should utilize ***libc.qsort*** to sort the items\n\n\"\"\"\nfrom ctypes import *\nlib = cdll.LoadLibrary(\"libc.so.6\")\n\nclass POINT(Structure):\n _fields_ = [(\"x\", c_int),\n (\"y\", c_int)]\n\nclass RECT(Structure):\n _fields_ = [(\"lowerleft\", POINT),\n (\"upperright\", POINT)]\n def compute_Area(self):\n return (self.upperright.x - self.lowerleft.x) * (self.upperright.y - self.lowerleft.y) \n \n\n# def py_cmp_func(a, b):\n# print(\"py_cmp_func\", a, b)\n# return 0\n\ndef quickSort(IntArray):\n ascending = lambda x, y: x.contents.value > y.contents.value\n CMPFUNC = CFUNCTYPE(c_int, POINTER(c_int), POINTER(c_int))\n cmp_func = CMPFUNC(ascending)\n lib.qsort(IntArray, len(IntArray), sizeof(c_int), cmp_func)\n for i in IntArray:\n print(i)\n return 0\n\n\n# r1 = RECT(POINT(2, 2), POINT(7, 6)) # Area = 20\n# r2 = RECT(POINT(1, 2), POINT(3, 4)) # Area = 4\n# r3 = RECT(POINT(1, 3), POINT(4, 9)) # Area = 18\n# r4 = RECT(POINT(3, 4), POINT(5, 8)) # Area = 8\n# r5 = RECT(POINT(4, 6), POINT(7, 11)) # Area = 15\n\n# IntArray5 = c_int * 5\n# areas = IntArray5(r1.compute_Area(),\n# r2.compute_Area(),\n# r3.compute_Area(),\n# r4.compute_Area(),\n# r5.compute_Area())\n# print(\"Hello\")\n# areasSorted = quickSort(areas)\n# # for a in areasSorted:\n# # print(a)\n"
},
{
"alpha_fraction": 0.5721518993377686,
"alphanum_fraction": 0.6025316715240479,
"avg_line_length": 17.809524536132812,
"blob_id": "7732fbb18daeda7979189f6c86b2082b424332fa",
"content_id": "8c90a8d29256bed80e631c3c19b8ed99eaf63043",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 395,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 21,
"path": "/LABS/Socket/server6.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# This is the code for the server side\nfrom socket import *\nsize = 512\nhost = ''\nport = 9898\n\n# Create server socket\ns = socket(AF_INET6, SOCK_STREAM)\ns.bind((host, port))\ns.listen(5)\nc,a = s.accept()\ndata = c.recv(size)\n\nif data:\n f = open(\"storage.dat\", '+w')\n print(\"connection from: \", a[0])\n f.write(a[0])\n f.write(\":\")\n f.write(data.decode(\"utf-8\"))\n f.close()\ns.close()\n"
},
{
"alpha_fraction": 0.5796403288841248,
"alphanum_fraction": 0.5908799171447754,
"avg_line_length": 25.60683822631836,
"blob_id": "eb76869602620bb6070bee8018d09b617182a26d",
"content_id": "81a030b2b7af39834f173336c067406c3c7ac25a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3114,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 117,
"path": "/Practice_Test/net-client-session.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\nimport socket\nimport random\nfrom random import randint\nfrom socket import AF_INET, socket, SOCK_STREAM\nimport re\n\n'''\n The objective of this task is to match the session key provided by\n a webserver running on port 5000.\n\n Create a connection to the server using the IP and port number (5000).\n Once connected, you will recieve a message back from the server. ie. \"SERVER>>> Connection successful\"\n You will then recieve a hex value from the server.\n You will need to \"decrypt\" that hex value by XOR'ing it with the appropriate key from the session_keys array\n The decrypted value is going to match one of keys from the server XOR'ed with a random hex number\n You will then send that \"decrypted\" value back to the server to see if it matches the hex value generated by the server\n'''\n\nserverName = \"\"\nserverPort = 5000\n\nsession_keys = [\"aa072fbbf5f408d81c78033dd560d4f6\",\n \"bb072fbbf5f408d81c78033dd560f6d4\",\n \"f5072fbbf5f408d81c78033dd5f6d460\",\n \"408df5072fbbf5f81c3dd5f6d4607803\",\n \"dd5f408df5072fbbfc36d46078035f81\",\n \"c36d408df5072fbbf46078035f81dd5f\",\n \"35f8c36df5072fbbf4607801dd5fd408\",\n \"2f07aaf408d81c78033dd560d4f6bbf5\",\n \"80332ff408d81c7dd560d4f6bbf507aa\",\n \"560d4f8033281c7dd6bbf507aaff408d\",\n ]\n\n# step 1: create socket\n\n# Setting buffer size for socket\nBUFSIZE = 1024\n\nprint(\"Attempting connection\")\nsock = socket(AF_INET, SOCK_STREAM)\naddr = (serverName, serverPort)\nsock.connect(addr)\n\ndef receive(sock):\n while True:\n try:\n msg = sock.recv(BUFSIZE)\n return msg\n except OSError:\n break\n\nmsg = receive(sock)\nif (msg.decode(\"utf-8\") == \"SERVER>>> Connection successful\"):\n encoded = receive(sock).decode(\"utf-8\")\n\nprint(encoded) # see the value of server msg\nprint(int(encoded,16))\n\n#setting empty string to store server response\nresponse = \"\"\n\ni=0\nwhile True:\n\n decoded = hex(int(session_keys[i], 16)^int(encoded,16))\n print(\"Trying key {}\".format(session_keys[i]))\n # print(type(decoded.encode()))\n # print(decoded)\n # print(int(decoded,16))\n\n sock.send(decoded.encode())\n ## Recv key match status\n response = receive(sock)\n if (\"Success! You found the key:\" in response.decode()):\n successMessage = response.decode()\n print(\"Success! You found the key!\")\n break\n\n if ('INVALID' in response.decode()):\n print(\"Invalid Key\\n\")\n \n ## Connection success message\n response = receive(sock)\n if ('Connection Successful' in response.decode()):\n print(\"Next Try..\")\n encoded = \"\"\n while (\"0x\" not in encoded):\n encoded = receive(sock).decode(\"utf-8\")\n i += 1\n if (i % 3 == 0):\n sock.close()\n sock = socket(AF_INET, SOCK_STREAM)\n sock.connect(addr)\n if (i == len(session_keys)):\n i=0\n\n\n'''\n Message recieved from server:\n Success = \"SUCCESS, you found the key\"\n Incorrect = \"INVALID KEY\"\n'''\n\n\n\n\n\n\n\n\n# ======= SAVE MESSAGE TO 'successMessage'. DO NOT CHANGE!!! ===========\nf = open('unittest.xml', 'w')\nsuccessMessage = re.findall(\"[/<].*\", successMessage)\nprint(\"\\n\".join(successMessage), file=f)\nf.close()\n# ===========================================================================\n\n"
},
{
"alpha_fraction": 0.5910652875900269,
"alphanum_fraction": 0.5910652875900269,
"avg_line_length": 23.982759475708008,
"blob_id": "694a74c455a679279c66ec738c64a9181cc6a9c2",
"content_id": "9016e9808b2a720b06a8614242dec13a7298fd89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1455,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 58,
"path": "/LABS/Classes/employees.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "class Employee:\n # Initializing Mammal object attributes\n def __init__(self, name, number):\n self.__name = name\n self.__number = number\n\n def set_name(self, name):\n self.__name = name\n\n def set_number(self, number):\n self.__number = number\n\n def get_name(self):\n return self.__name\n\n def get_number(self):\n return self.__number\n\nclass ProductionWorker(Employee):\n # Initializing ProductionWorker subclass object attributes\n def __init__(self, name, number, shift, pay_rate):\n super().__init__(name, number)\n\n self.__shift = shift\n self.__pay_rate = pay_rate\n \n def set_shift(self, shift):\n self.__shift = shift\n\n def set_pay_rate(self, pay_rate):\n self.__pay_rate = pay_rate \n \n def get_shift(self):\n return self.__shift\n\n def get_pay_rate(self):\n return self.__pay_rate\n\n\nclass ShiftSupervisor(Employee):\n # Initializing ProductionWorker subclass object attributes\n def __init__(self, name, number, salary, prod_bonus):\n super().__init__(name, number)\n\n self.__salary = salary\n self.__prod_bonus = prod_bonus\n\n def set_salary(self, salary):\n self.__salary = salary\n\n def set_prod_bonus(self, prod_bonus):\n self.__prod_bonus = prod_bonus \n \n def get_salary(self):\n return self.__salary\n\n def get_prod_bonus(self):\n return self.__prod_bonus\n \n\n"
},
{
"alpha_fraction": 0.6658565998077393,
"alphanum_fraction": 0.6707168817520142,
"avg_line_length": 21.88888931274414,
"blob_id": "da26cf585062e4b3e633f66ea8d900e9cb57a21e",
"content_id": "1d6b96839e421302858ff654a6e6e07af1a29557",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 823,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 36,
"path": "/LABS/Multiprocessing/test_multiprocessing3.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\npass in an argument\n\nunlike threading, arguments must be passed in with pickle\narguments have to be serialized, convert the python object to a format that can be deconstructed and reconstructed \n\n\n\"\"\"\n\nimport multiprocessing\nimport time\n\nstart = time.perf_counter()\n\n# now our function accepts a number of seconds\ndef do_something(seconds):\n print(f'Sleeping {seconds} second(s)...')\n time.sleep(seconds)\n print('Done Sleeping...')\n\nif __name__ == \"__main__\":\n # create list so we can join all the processes\n processes = []\n\n for _ in range(10):\n p = multiprocessing.Process(target=do_something, args=[1.5])\n p.start()\n processes.append(p)\n\n for process in processes:\n process.join()\n\n\n finish = time.perf_counter()\n\n print(f'Finished in {finish-start} second(s)')"
},
{
"alpha_fraction": 0.6200589537620544,
"alphanum_fraction": 0.6246010065078735,
"avg_line_length": 34.88546371459961,
"blob_id": "87f81c3ccfd98d0ad97a3140a3c77dbfd1263f95",
"content_id": "3bb6574fdffbf86bbed8c23cf318812c69ff1219",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8164,
"license_type": "no_license",
"max_line_length": 243,
"num_lines": 227,
"path": "/LABS/Projects/hangman.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import random\nimport pickle\nfrom os import path\n\nprint('Let\\'s play Hangman! There is a word that you must \\nguess by guessing letters one at a time. If the\\n letter is present is will be populated. If not,\\n then your person will start to be hung. \\n 6 wrong questions and it\\'s GAME OVER!')\n\ndef main():\n usedAlpha = []\n death = 0\n word = getWord()\n answer = splitWord(word)\n guess = initGuess(word)\n state = False\n compAlpha = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n gameModes = ['c', 'i']\n mode = 'g'\n hang = loadHang()\n while mode.lower() not in gameModes:\n mode = input('Which mode would you prefer:\\n- (C)omputer Solo Mode\\n- (I)nteractive Mode\\n')\n if mode == 'i': \n name = input('Please provide your name to track your scores:\\n')\n scoreData = getProfile(name.lower())\n deathState(hang, death)\n while state == False:\n print('You have {} wrong guesses remaining.'.format(6 - death))\n death += userGuess(usedAlpha, answer, guess)\n deathState(hang, death)\n state = checkWin(death, answer, guess, state)\n updateScores(scoreData, name, death)\n elif mode == 'c':\n while state == False:\n print('Computer has {} wrong guesses remaining.'.format(6 - death))\n death += compGuess(usedAlpha, answer, guess, compAlpha)\n deathState(hang, death)\n state = checkWin(death, answer, guess, state)\n\n\ndef getWord():\n with open(\"wordbank.txt\", encoding = 'utf-8') as bank:\n words = bank.readlines()\n word = (words[random.randint(0,len(words)-1)]).lower().replace('\\n','')\n bank.close()\n return word.lower()\n\ndef splitWord(word):\n answer = []\n for i in word:\n answer.append(i)\n return answer\n\ndef initGuess(word):\n guess = []\n for i in word:\n guess.append('_')\n return guess\n\ndef userGuess(usedAlpha, answer, guess):\n hit = 0\n for i in guess:\n print(i ,end=' ')\n print('\\n')\n print('Letters used: ', usedAlpha)\n letter = input('Please guess a letter:\\n')\n while letter.isalpha() == False or letter.lower() in usedAlpha:\n letter = input('Please guess a letter:\\n')\n print('Letters used: ', usedAlpha)\n usedAlpha.append(letter.lower())\n if letter in answer:\n positions = getIndexPositions(answer, letter)\n for i in positions:\n #replaces the existing element with the marker\n guess[i] = letter.lower()\n print('Yes!')\n else:\n hit = 1\n print('Nope!')\n return hit\n\ndef checkWin(death, answer, guess, state):\n state = False\n if death == 6:\n print('GAME OVER! YOU DIED!')\n state = True\n elif answer == guess:\n print('GAME OVER! YOU WIN!')\n state = True\n else:\n state = False\n return state\n\ndef getIndexPositions(listOfElements, element):\n ''' Returns the indexes of all occurrences of give element in\n the list- listOfElements '''\n indexPosList = []\n indexPos = 0\n while True:\n try:\n # Search for item in list from indexPos to the end of list\n indexPos = listOfElements.index(element, indexPos)\n # Add the index position in list\n indexPosList.append(indexPos)\n indexPos += 1\n except ValueError as e:\n break\n return indexPosList\n\ndef compSelect(compAlpha,usedAlpha):\n compIndex = random.randint(0,len(compAlpha)-1)\n compChoice = compAlpha[compIndex]\n print('The computer chooses \\'{}\\'....'.format(compChoice))\n compAlpha.remove(compChoice)\n return compChoice\n\ndef compGuess(usedAlpha, answer, guess, compAlpha):\n hit = 0\n for i in guess:\n print(i ,end=' ')\n print('\\n')\n print('Letters used: ', usedAlpha)\n letter = compSelect(compAlpha, usedAlpha)\n usedAlpha.append(letter.lower())\n if letter in answer:\n positions = getIndexPositions(answer, letter)\n for i in positions:\n #replaces the existing element with the marker\n guess[i] = letter.lower()\n print('Yes!')\n else:\n hit = 1\n print('Nope!')\n return hit\n\ndef getProfile(name):\n scoreFile = 'scores.dat'\n scoreData = {}\n if path.exists(scoreFile):\n with open(scoreFile, 'rb') as scores:\n end_of_file = False\n while end_of_file == False:\n try:\n #unpickle next object\n scoreData = pickle.load(scores)\n except EOFError:\n #Set flag to indicate EOF reached\n end_of_file = True\n print('Your score history:\\n{}\\n'.format(scoreData[name]))\n else:\n scoreData = {name: {'win':0, 'loss':0}}\n with open(scoreFile, 'wb') as scores:\n pickle.dump(scoreData, scores) \n \n if name.lower() not in scoreData:\n scoreData.update({name: {'win':0, 'loss':0}}) \n \n return scoreData\n\ndef updateScores(scoreData, name, death):\n scoreFile = 'scores.dat'\n if death >= 6:\n scoreData[name]['loss'] += 1\n else:\n scoreData[name]['loss'] += 1\n with open(scoreFile, 'wb') as scores:\n pickle.dump(scoreData, scores)\n \ndef loadHang():\n filename = 'death.txt'\n with open(filename, 'r') as hang:\n image = hang.readlines()\n return image\n\ndef deathState(hang, death):\n start = death * 6\n stop = start + 6\n step = 1\n for i in range(start,stop,step):\n print(hang[i].rstrip('\\n'))\n print('\\n')\nmain()\n\n\n\"\"\"\nTask:\n Your task is to implement the Hangman game in Python.\n\nProgram Specifications:\n 1) Output a brief description of the game of hangman and how to play\n 2) Ask the user to enter the word or phrase that will be guessed (have a friend enter the phrase \n for you if you want to be surprised)\n 3) Output the appropriate number of dashes and spaces to represent the phrase (make sure it’s clear \n how many letters are in each word and how many words there are)\n 4) Continuously read guesses of a letter from the user and fill in the corresponding blanks if the \n letter is in the word, otherwise report that the user has made an incorrect guess.\n 5) Each turn you will display the phrase as dashes but with any already guessed letters filled in, \n as well as which letters have been incorrectly guessed so far and how many guesses the user has remaining.\n 6) Your program should allow the user to make a total of k=6 guesses.\n\nAssignment Notes:\nIf the letter has already been guessed, output a message to the player and ask for input again.\nIf the guess entered is not an alphabetic letter, output a message and ask for input again.\n\nIf the letter is present in the word to be guessed, fill in the blanks appropriately with this particular letter. \nIf the complete name has been guessed, the game is over - player wins the game. Output a message telling the \nplayer they have won and quit the game.\n\nIf the letter/digit is not present in the word to be guessed, give a message to the player indicating that the \nguess is incorrect and remaining number of chances is one less. If remaining number of chances is 0 (zero), \nthe game is over - player loses the game. Output a message that they have lost and what the correct word was. Quit the game.\n\nBonus:\n You can do one or both of the following:\n\n 1) Using a file:\n Instead of asking for user input for the word, make a word bank in a file named hangman_words.txt. \n Read in the contents of the file and choose a word at random.\n\n 2) Forever alone option:\n You enter the word (or it is randomly chosen from the word bank) and have the computer try to guess the letters.\n\n 3) Add some more functionality: \n - Persist user profiles with scores\n - Prompt for which user is playing\n - Ask if the user wants to play a set of games\n - Build a leader board\n \n Have fun, get creative, and demonstrate what you've come up with.\n\"\"\"\n"
},
{
"alpha_fraction": 0.6618704795837402,
"alphanum_fraction": 0.6868610382080078,
"avg_line_length": 30.452381134033203,
"blob_id": "a853437191c8a2a9ebde76221d568523d23c9691",
"content_id": "69a5066e6990e1ab41b07a762e7b9f81e050443f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2641,
"license_type": "no_license",
"max_line_length": 435,
"num_lines": 84,
"path": "/03_Flow_Control/08_recursion.md",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "<a href=\"https://github.com/CyberTrainingUSAF/07-Python-Programming/blob/master/00-Table-of-Contents.md\" rel=\"Return to TOC\"> Return to TOC </a>\n\n# Recursion\n\nRecursion is a function that calls itself. In other words a function will continue to call itself until a certain condition is met. Any problem that can be solved by recursion can be solved by using a loop. We mentioned before that it is almost always preferential to use a loop due to overhead. However, you may want to use recursion when you have a problem that needs to be broken into smaller similar problems and solve them first. \n\n### Base case and recursive case\n\n**Base Case**\n\nIf the problem can be solved now, then the function solves it and returns\n\n**Recursive Case**\n\nIf the problem cannot be solved now, then the function reduces it to smaller similar problems and calls itself to solve the smaller problem.\n\n#### Recursion to find factorial of a number\n\nThe best way to understand is to dive in. Lets take a look at solving for n!. As a quick reminder n! is defined as:\n\n```text\n n! = n x (n-1) x (n-2) x (n-3) x...x 3 x 2 x 1\n 4! = 4 x 3 x 2 x 1 = 24\n \nBase case:\n If n = 0 then n! = 1\nor If n = 0 then factorial(n) = 1\n\nRecursive case:\n If n > 0 then n! = 1 x 2 x 3 x ... n\nor If n > 0 then factorial(n) = n x factorial(n-1)\n```\nNow lets translate to Python:\n\n```python\ndef recursive_factorial(n):\n #base case\n if n == 0:\n return 1\n #recursive case\n else:\n return n * recursive_factorial(n-1)\n```\n\nBe sure that every time you write a recursive function that you can control the number of times it will be executed.\n\n```python\n# Endless recursion\ndef forever_recursion():\n annoying_message()\n\ndef annoying_message():\n print('Nudge Nudge, Wink Wink, Say No More Say No More')\n message()\n```\n\n\n\n#### Recursion for the Fibonacci Series\n\nA very common way to present recursion is by writing a program to calculate Fibonacci numbers. After the second number, every number in the series is the sum of the two previous numbers. The sequence is the following:\n0,1,1,2,3,5,8,13,21,34,55,89,144,233,...\n\n```text\nA recursive function can have multiple base cases. Both of them return a value without making a recursive call.\n\nBase case:\n If n = 0 then fibonacci(n) = 0\n If n = 1 then fibonacci(n) = 1\n\nRecursive case:\n If n > 1 then fibonacci(n) = fibonacci(n-1) + fibonacci(n-2)\n```\nNow lets translate to Python:\n\n```python\ndef fibonacci(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fibonacci(n-1) + fibonacci(n-2)\n```"
},
{
"alpha_fraction": 0.5863747000694275,
"alphanum_fraction": 0.5944849848747253,
"avg_line_length": 24.66666603088379,
"blob_id": "3cd4e6314a8d27fa780f45e5e2ac14bb4e0eb6da",
"content_id": "6840c78cd4c5bb4dbb6b0364cd65155e56fb03c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1233,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 48,
"path": "/LABS/Socket/tsTserv.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom socket import *\nfrom time import ctime\nimport pickle\nimport os\n\ndef oscommand(data, client):\n if data.decode() == 'date':\n result = pickle.dumps(ctime())\n client.send(result)\n elif data.decode() == 'os':\n result = pickle.dumps(os.name)\n client.send(result)\n elif data.decode() == 'ls':\n result = pickle.dumps(os.listdir())\n client.send(result)\n elif data.decode().lower() == 'exit':\n return 'exit'\n else:\n client.send(b'I would love to have a conversation, but I am not equipped for that. Please try \\'os\\', \\'ls\\', or \\'date\\'')\n \n\nHOST = ''\nPORT = 21567\nBUFSIZ = 2048\nADDR = (HOST, PORT)\n\ntcpSerSock = socket(AF_INET, SOCK_STREAM)\ntcpSerSock.bind(ADDR)\ntcpSerSock.listen(5)\n\nwhile True:\n print('waiting for connection...')\n tcpCliSock, addr = tcpSerSock.accept()\n print('...connected from:', addr)\n\n while True:\n data = tcpCliSock.recv(BUFSIZ)\n print(type(data))\n if not data:\n break\n #tcpCliSock.send(('[%s] %s' % (ctime(), data)).encode())\n action = oscommand(data, tcpCliSock)\n if action == 'exit':\n exit()\n tcpCliSock.close()\ntcpSerSock.close()\n\n"
},
{
"alpha_fraction": 0.557692289352417,
"alphanum_fraction": 0.5769230723381042,
"avg_line_length": 15.076923370361328,
"blob_id": "a4ab9dc50dc73fa73b4151b40a667a110748d787",
"content_id": "f41e1f3b732a137f2b9f1eeaaad73a001be78611",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 208,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 13,
"path": "/LABS/Socket/clientU.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "from socket import *\n\ndef main():\n host = 'localhost'\n\n sock = socket(AF_INET, SOCK_DGRAM)\n addr = (host,9898)\n \n msg = b\"What happened?!?\\n\"\n sock.sendto(msg, addr)\n sock.close()\n\nmain()"
},
{
"alpha_fraction": 0.6378896832466125,
"alphanum_fraction": 0.6378896832466125,
"avg_line_length": 18,
"blob_id": "336903ab17bd8b2c47f6a1387a1c4b21c48c6b13",
"content_id": "3eca45592c2aa3e107421333c4c89ca3abd9313e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 417,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 22,
"path": "/LABS/Classes/coin_arg.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# This program passes a Coin object as\n# an argument to a function\nimport coin\n\n#main function\ndef main():\n # create a coin object\n my_coin = coin.Coin()\n\n # This might display 'Heads' or 'Tails'\n print(my_coin.get_sideup())\n \n # This will display 'Heads'\n flip(my_coin)\n\n # This might display 'Heads' or 'Tails'\n print(my_coin.get_sideup())\n\ndef flip(coin_obj):\n coin_obj.toss()\n\nmain()"
},
{
"alpha_fraction": 0.7034883499145508,
"alphanum_fraction": 0.710132896900177,
"avg_line_length": 33.41428756713867,
"blob_id": "3fb9b4fc30a1cb1305623d641f05d007a4b4997a",
"content_id": "34db837c0ad3bb6f7d2b8c1c61ba296f976b139f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2436,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 70,
"path": "/LinkedLists/linked_structures.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\n********************LINKED STRUCTURES************************\nA linked structure is a concrete data type that implements many types \nof collections, including lists.\n\nAs the name implies, a linked structure consists of items that are \nlinked to other items. The two that we'll go over are the Singly/Doubly\nLinked structures\n\n- Head Link - the first item of a linked structure\n- Tail Link - an external link in a doubly linked structure to access the last item directly\n- Empty Link - the absence of a link, indicated by the slash in the diagram\n\n- The last item in either structure has no link to a next item\n\n- Node - The basic unit of representation in a linked structure\n - comprised of atwo items\n - A Data Item\n - A link to the next node in the structure\n\nIn python we set up nodes and linked structures by using references to object\n\n\"\"\"\n\n\"\"\"\n1. Using box and pointer notation, draw a picture of the nodes created by the \nfirst loop in the tester program. \n\nHEAD -> D1 |BOX| -> D2 |BOX| -> D3 |BOX| -> D4 |BOX| -> D5 |\\| \n\n2. What happens when a programmer attempts to access a node’s data fields when \nthe node variable refers to None? How do you guard against it?\nYou could __data upon initialization and create specific methods for accessing \nand mutating the data you need\n\n3. Write a code segment that transfers items from a full array to a singly linked \nstructure. The operation should preserve the ordering of the items.\n\n\"\"\"\n\n\"\"\"\nCore Exercises:\n\n 1. Finish out your doubly and circular linked list to add more functionality\n - prepend\n - insert\n - delete\n - print\n\n 2. Implement a swap_node method to singly and doubly.\n\n 3. Implement a reverse method to singly and doubly.\n\n 4. Modify delete to find the data you want to delete rather than an index.\n Modify delete to take in either an index or data.\n\n 5. Implement a count_ocurrences method.\n\n 6. Create a new file and modify your code to have DoublyLinkedList inherit from your \n SinglyLinkedList class.\n\nExtras Exercises:\n\n 7. Define a length function that returns the number of items in your linked structure.\n\n 8. Define a function makeDoubly that expects a singly linked structure as its argument. The \n function builds and returns a doubly linked structure that contains the items in the singly\n linked structure. \n\n\"\"\""
},
{
"alpha_fraction": 0.5997521877288818,
"alphanum_fraction": 0.6226765513420105,
"avg_line_length": 39.337501525878906,
"blob_id": "c132c83a657139f46ca9ec21eec29b711f4c8cde",
"content_id": "1b03b828aee6e2d4c93a705720c88c634be4bb86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3228,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 80,
"path": "/Algorithms/complexity _analysis.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# *****************COMPLEXITY ANALYSIS*****************\n# Complexity analysis - a method of determining the efficiency of \n# algorithms that allows you to rate them \n# independently of platform-dependent timings \n# or impractical instructional counts\n\n# Order of complexity - the difference in performance of your algorithms\n\n\n\n# linear - 'n' = our problem size (directly proportional growth) \n# quadratic - n^2 = a nested loop of our problem size (grows as a function \n# of the square of the problem size)\n# logarithmic - proportional to log base 2 of the problem size log 2 n = is \n# more efficient than n^2 or 2^n\n# A logrithm is the power to which a number must be raised to be equal \n# to another number (recursion)\n\n# polynomial time algorithm - grows at a rate of n raised to the k (in same \n# category as quadratic as far as order)\n\n# exponential - (worst performance) grows at a rate of 2^n\n\n# *****************Big-O Notation*****************\n# dominant - the amount of work in an algorithm that becomes so large that \n# you can ignore the amount of work represented by the other terms\n# (1/2)n^2-(1/2)n \n# n^2 is the dominant term\n# (1/2) - a constant of proportionality\n\n# Represented in Big-O:\n# O(n^2)\n\n# Constant O(1)\n# - Time taken is independent of the amount of data\n# - Stack push, pop and peek; Queue, dequeue and enqueue; insert a node \n# into a linked list\n\n# Linear O(n)\n# - Time take is directly proportional to the amount of data\n# - Linear search; Count items in a list; compare a pair of strings\n\n# Quadratic O(n^2)\n# - Time taken is proportional to the amount of data squared; twice as \n# much data takes 4x time to process; 3x takes 9x\n# - Bubble sort; selection sort; insertion sort; Traverse a 2D array\n\n# Polynomial O(n^k)\n# - Time taken is proportional to the amount of data raised to the power\n# of a constant \n# - \n\n# Logorithmic O(log n)\n# - Time taken is proportional to the logarithm of the amount of data,\n# good scalability\n# -Binary search a sorted list; Search a binary tree; Divide and Conquer\n# algorithm approaches\n\n# Linearithmic O(n log n)\n# - Time taken is proportional to the logarithm of the amount of data, \n# multiplied by the amount of data \n\n# Exponential O(k^n)\n# - Time taken is proportional to a constant raised to the power of the\n# amount of data, very poor scalability almost immediately\n# - If constant k is 10, then one extra item of data will slow it down\n# by 10 times \n\n# Logarithms - The inverse of exponentiation\n# Generally - \n# x^z = y log base x of y = z\n# 2^0 = 1 log base 2 of 1 = 0\n# 2^1 = 2 log base 2 of 2 = 1\n# 2^2 = 4 log base 2 of 4 = 2\n# 2^3 = 8 log base 2 of 8 = 3\n# 2^4 = 16 log base 2 of 16 = 4\n# 10^4 = 10000 log base 10 of 10000 = 4\n\n# notice that each # that we're calculating the log of is twice as \n# much as the previous #, but each log is only 1 bigger than the previous value\n\n"
},
{
"alpha_fraction": 0.6582597494125366,
"alphanum_fraction": 0.6620428562164307,
"avg_line_length": 32.08333206176758,
"blob_id": "3aa1818dea035fe68565111c1f3ae1b309efb3a9",
"content_id": "0e71f2dea8b3a20e7cf7061d63443fcadbf17e02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 793,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 24,
"path": "/LABS/Labs-6-1/lab6-1-speedDistance.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Speed and Distance given time\n#Define main function\ndef main():\n print('Your expected distance to cover is: {:.2f} miles'.format(calcDistance(getSpeed(),getTime())))\n\ndef getSpeed():\n speed = input('Provide the speed in MPH:\\n')\n #Input validation checking numeric and a positive number\n while float(speed) <= 0 and speed.replace('.','').isnumeric() == False:\n speed = input('Invalid input. Provide the speed:\\n')\n return float(speed)\n\ndef getTime():\n time = input('Provide the time in hours:\\n')\n #Input validation checking numeric and a positive number\n while float(time) <= 0 and time.replace('.','').isnumeric() == False:\n time = input('Invalid input. Provide the time:\\n')\n return float(time)\n\ndef calcDistance(s,t):\n return s * t\n \n\nmain()"
},
{
"alpha_fraction": 0.5743243098258972,
"alphanum_fraction": 0.5743243098258972,
"avg_line_length": 18.799999237060547,
"blob_id": "9e4ffe5968829b2670c8b42c813f5464ed30bb95",
"content_id": "4a82d05bbad0149218306299d0eb8e975bfc81ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 296,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 15,
"path": "/LABS/FILE-IO/displayFile.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "def main():\n filename = input('Enter the filename: \\n')\n\n try:\n infile = open(filename, 'r')\n\n contents = infile.read()\n\n print(contents)\n\n infile.close\n except IOError:\n print('An error occurred trying to read the file')\n print(filename)\nmain()"
},
{
"alpha_fraction": 0.7057728171348572,
"alphanum_fraction": 0.7225325703620911,
"avg_line_length": 20.479999542236328,
"blob_id": "067c97d97c2eee1f0f3bdc6a8bd1865152de1bff",
"content_id": "6f5efcc899e6c88ece2be95898c38996019cb1e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 537,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 25,
"path": "/05_oop/lab5a.md",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "|[Table of Contents](/00-Table-of-Contents.md)|\n|---|\n\n---\n\n## Lab 5A\n\n### Instructions\n\nUsing your calculator you created from Lab4A, split up the functionality into modules and utilize packaging. Some things you could split up:\n\n* The user menu into it's own module on a higher level package\n* Operations into one module, lower level\n* Algorithms into one module, lower level\n* etc\n\n### Requirements\n\n* All requirements from Lab4A\n* Utilize clean and proper dir and module names\n\n---\n\n|[Next Topic](/05_oop/03a_user_classes.md)|\n|---|\n"
},
{
"alpha_fraction": 0.5856481194496155,
"alphanum_fraction": 0.6053240895271301,
"avg_line_length": 26.870967864990234,
"blob_id": "690bfc5ad4081aa138579e0e8be49e1b74513e7f",
"content_id": "5991028f65beb95b19e7b76092a0fadaa498e7f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 864,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 31,
"path": "/LABS/List-Tuple/list-tuple-numAnalysis.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Defining the maximum number of numbers\nCAP = 20\n\n#define main\ndef main():\n numArray = []\n for i in range(CAP):\n getInput(numArray)\n measureList(numArray)\n print(numArray)\ndef getInput(array):\n userInput = -5000\n while userInput == -5000:\n try:\n userInput = int(input('Please provide an integer value:\\n'))\n array.append(userInput)\n except:\n userInput = -5000\n print('That was not a number.')\n\ndef measureList(array):\n total = 0\n for i in array:\n total += i\n print('Total months of array in data: {}'.format(len(array)))\n print('Total array: {:.2f} inches'.format(total))\n print('Average monthly array: {:.2f}'.format(total/len(array)))\n print('The maximum array was {}'.format(max(array)))\n print('The minimum array was {}'.format(min(array)))\n\nmain()\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6703841090202332,
"avg_line_length": 28.925926208496094,
"blob_id": "79778c6a898eeb976389fc20e3532ba5f9a065bf",
"content_id": "46b670e0f6fcf8dbca124c00931e41b6e2f2abb8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 807,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 27,
"path": "/LABS/Classes/account_test.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# This program demos the BankAccount class\nimport bankaccount\n\ndef main():\n # get the starting balance \n start_bal = float(input('Enter your starting balance:\\n'))\n\n # Instantiate bank account\n savings = bankaccount.BankAccount(start_bal)\n\n #Deposit the users paycheck\n pay = float(input('How much were you paid this week?\\n'))\n print('I will deposit this in your account')\n savings.deposit(pay)\n \n #Display the balance\n print('Your balance is now {:,.2f}.'.format(savings.get_balance()))\n\n #Withdraw cash\n cash = input('How much would you want to withdraw?\\n')\n print('I will with draw {:,.2f} from your account.'.format(cash))\n savings.withdraw(cash)\n \n #Display the balance\n print('Your balance is now {:,.2f}.'.format(savings.get_balance()))\n\nmain()"
},
{
"alpha_fraction": 0.707342267036438,
"alphanum_fraction": 0.7135470509529114,
"avg_line_length": 29.25,
"blob_id": "8538116009ea7b02c9bbe6fef26d0731e804bd00",
"content_id": "3ba7ebe295e88bb3ef9178b622ba0600eae94252",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 967,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 32,
"path": "/LABS/Threading/multithreading4.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\nProve that these are actually coming in as they are completed,\n lets pass in a range of seconds\n\nStart 5 second thread first, but since we use as_completed() method it prints\n the results in the order they are completed\n\"\"\"\nimport concurrent.futures\nimport time\n\nstart = time.perf_counter()\n\ndef do_something(seconds):\n print(f'Sleeping {seconds} second(s)...')\n time.sleep(seconds)\n return f'Done Sleeping...{seconds}'\n\n# using a context manager\nwith concurrent.futures.ThreadPoolExecutor() as executor:\n\n seconds_list = [5, 4, 3, 2, 1]\n # list comprehension to create multiple threads\n results = [executor.submit(do_something, sec) for sec in seconds_list]\n\n # to get the results we can use another function, as_completed() from future object that \n # gives us an iterator\n for f in concurrent.futures.as_completed(results):\n print(f.result())\n\nfinish = time.perf_counter()\n\nprint(f'Finished in {finish-start} second(s)')"
},
{
"alpha_fraction": 0.5180360674858093,
"alphanum_fraction": 0.5260521173477173,
"avg_line_length": 21.200000762939453,
"blob_id": "95086b016690d12a7b7754288536a90d6670fb78",
"content_id": "7527039158f0fb7b998f1a54fea964e66948f0f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 998,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 45,
"path": "/LABS/Classes/car.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "class Car:\n def __init__(self):\n self.__year_model = ''\n self.__make = ''\n self.__speed = 0\n \n def set_year_model(self): \n self.__year_model = input('What is the year and model of your car? (i.e. 2015 Camaro)\\n')\n \n def set_make(self):\n self.__make = input('What is the make of your car?\\n')\n \n def set_age(self):\n self.__speed = input('What is the current speed? (In MPH)\\n')\n \n def accelerate(self):\n self.__speed += 5\n\n def brake(self):\n self.__speed -= 5\n\n def get_speed(self):\n return self.__speed\n\n def get_year_model(self):\n return self.__year_model\n\n def get_make(self):\n return self.__make\n \n\n\n\"\"\" def main ():\n my_car = Car()\n my_car.set_year_model()\n my_car.set_make()\n \n for i in range(5):\n my_car.accelerate()\n print(f'You are now traveling at {str(my_car.get_speed())} MPH in your {str(my_car.get_year_model())}.')\n\n\n \n\nmain() \"\"\""
},
{
"alpha_fraction": 0.6442952752113342,
"alphanum_fraction": 0.6661073565483093,
"avg_line_length": 21.11111068725586,
"blob_id": "d9f53cffe37cd12c73d87e78cee9fe62636983ba",
"content_id": "7fa086213bd7b5139c02cc9626459ed38f9e08a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 596,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 27,
"path": "/LABS/Iteration/reduce_func.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# reduce()\n# Receives two args function and iterable\n# returns a single value\n# good for rolling computation to sequential \n# pairs of values in a list\n\n# Factorial is like factorial 3 is 3 * 2 * 1 = 6\n\nimport functools\n\n# Define our function\ndef mult(x, y):\n print('x = ', x, 'y = ', y)\n return x*y\n\n# Apply reduce to our function\nfact = functools.reduce(mult, range(1,4))\nprint('Factorial of 3 is ', fact)\n\nproduct = 1\nmylist = [1, 2, 3, 4, ]\n\"\"\" for num in mylist:\n product = product * num \"\"\"\n\nfrom functools import reduce\nproduct = reduce((lambda x, y: x*y), mylist)\nprint(product)"
},
{
"alpha_fraction": 0.6397058963775635,
"alphanum_fraction": 0.6647923588752747,
"avg_line_length": 35.69841384887695,
"blob_id": "554c3af8426b6ef495b63fc14b3290370178869d",
"content_id": "c50eaffba5f976b450dc61a887fe62adf3844c4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2312,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 63,
"path": "/Practice_Test/guts_cards.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#This function takes a list of integers and compares each of the element\n# to find three of a kind. Returns True if the calue of each element is equal\ndef isthreeok(hand):\n # If the first card is equal to each of the others then it is three of a\n # kind\n if (hand[0] == hand[1] and hand[0] == hand[2]):\n return True\n else:\n return False\n\n#This function checks for a pair in a list\ndef containsPair(hand):\n # Check to see if any of the elements are equal in an expected 3 element list\n if (hand[0] == hand[1] or hand[0] == hand[2] or hand[1] == hand[2]):\n return True\n else:\n return False\n# This compares the value of the highest card in each of the two hands \ndef compareHigh(hand1, hand2):\n if (max(hand1) > max (hand2)):\n return hand1\n elif (max(hand1) < max (hand2)):\n return hand2\n else:\n\t return hand1\n\n#Function to compare two hands to return the winning hand\ndef findGutsWinner(hand1, hand2):\n # check to ensure each hand has a size of three\n if len(hand1)!=3 or len(hand2)!=3:\n\t#if either hand size != 3 then return the empty list\n return []\n #Checking if both hands are three of a kind\n if (isthreeok(hand1) and isthreeok(hand2)):\n\t# In this case, return the hand with the highest card\n return compareHigh(hand1, hand2)\n #The next two return the hand that is three of a kind if the other is not\n elif (isthreeok(hand1) and not isthreeok(hand2)):\n return hand1\n elif (not isthreeok(hand1) and isthreeok(hand2)):\n return hand2\n # Checking to see if both hands contain a pair\n elif (containsPair(hand1) and containsPair(hand2)):\n # If they both contain pairs return the hand with highest card\n\treturn compareHigh(hand1, hand2)\n # The next two return the hand that has the pair if the other does not\n elif (containsPair(hand1) and not containsPair(hand2)):\n return hand1\n elif (not containsPair(hand1) and containsPair(hand2)):\n return hand2\n #When the hands contain neither three of a kind or a pair, \n #return the hand with the high card\n else:\n return compareHigh(hand1, hand2)\n\n## Manual testing of code\n#def main():\n# hand1 = [2,4]\n# hand2 = [2,1,2]\n# print(findGutsWinner(hand1, hand2))\n# return\n#\n#main()\n"
},
{
"alpha_fraction": 0.7209533452987671,
"alphanum_fraction": 0.7318768501281738,
"avg_line_length": 26.216217041015625,
"blob_id": "76eff331384c13ca0e3b5e13045916095054a5fd",
"content_id": "055849525a256ad5b33bf11e5071c2d755b38bdd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1007,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 37,
"path": "/LinkedLists/testnode.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# Testing the node class\n\nfrom node import Node\n\nhead = None\n\n# Add five nodes to the beginning of the linked structure\nfor count in range(1, 6):\n head = Node(count, head)\n\n#print the contents of the structure\nwhile head != None:\n print(head.data)\n head = head.next\n\nhead2 = None\nmylist = 'Who Goes There'\nfor i in range(1,len(mylist)+1):\n head2 = Node(mylist[-i], head2)\n\nwhile head2 != None:\n print(head2.data)\n head2 = head2.next\n\"\"\"\n\n- One pointer, head, generates the linked structure. This pointer is \nmanipulated in such a way that the mot recently inserted item is always\n at the beginning of the structure\n- When the data are displayed, they appear in the reverse order of \ntheir insertion\n- Also, the head pointer is reset to the next node, until the head \npointer becomes None. Thus, at the end of this process, the nodes are \neffectively deleted from the linked structure. They are no longer \navailable to the program and are recylcled during the next garbage \ncollection\n\n\"\"\"\n"
},
{
"alpha_fraction": 0.6438356041908264,
"alphanum_fraction": 0.6484017968177795,
"avg_line_length": 25.625,
"blob_id": "5e32414a59327bcbb0d799b51cc1b1a3680c2007",
"content_id": "2e3426c23f91bd7a53530c637f8315ff9151df35",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 219,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 8,
"path": "/LABS/Socket/json-info.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import json\nimport requests \nurl = 'https://analytics.usa.gov/data/live/realtime.json'\nr = requests.get(url)\ndata = r.json()\nusers = (data['data'][0])\nfor key, value in users.items():\n print(f'{key}: {value}')\n \n\n"
},
{
"alpha_fraction": 0.6361892819404602,
"alphanum_fraction": 0.6413043737411499,
"avg_line_length": 27.454545974731445,
"blob_id": "996cc1d7a92b00197329762397a1224f47f0a34e",
"content_id": "9618ac8d3bb5fcca3cca48ceb563900805cca1dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1564,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 55,
"path": "/LABS/Classes/coin.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Classes\n\n#A class is code that specifies the data attributes and methods for a particular type of object\nimport random\n#The coin Coin class simulates a coin that can be flipped\n\nclass Coin:\n #the __init__ method is present in every class and initializes the sideup data attribute with heads\n def __init__(self):\n self.__sideup = 'Heads'\n\n # The toss method generateds a random number\n # in the range 0-1. If the number\n # is 0, then sideup is set to heads, \n # otherwise, sideup is set to tails\n def toss(self):\n if random.randint(0, 1) == 0:\n self.__sideup = 'Heads'\n else:\n self.__sideup = 'Tails'\n\n # The get_sideup method returns the value\n # referenced by sideup\n def get_sideup(self):\n return self.__sideup \n\n# main function to operate with this class\ndef main():\n #Create an object from the coin class\n my_coin = Coin()\n type(my_coin)\n\n # sideup attribute is not private\n my_coin.__sideup = 'Tails'\n #If we don't want the user to set that variable \n # it needs to be private. Prepend the variable with __\n # It will not error, but it prevents the assignment\n\n #Display the side of the coin that is facing up\n print('This side is up: ', my_coin.get_sideup())\n\n #Toss the coin\n print('I am tossing the coin ....')\n for count in range(10):\n my_coin.toss()\n print(my_coin.get_sideup())\n\n\n #Display the side of the coin that is facing up\n print('This side is up: ', my_coin.get_sideup())\n\n\n#main()\n####\n#More Lecture notes"
},
{
"alpha_fraction": 0.6138613820075989,
"alphanum_fraction": 0.6237623691558838,
"avg_line_length": 15.88888931274414,
"blob_id": "970507c3ba4f0bfd3509e14f5b74e2186b9f2d1c",
"content_id": "fd68468eaa887d183f7bf0157faa5cf56aee35df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 303,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 18,
"path": "/LABS/List-Tuple/list-tuple-lottery.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import random\n\n#define main\ndef main():\n lotto = []\n selectNum(lotto)\n showNum(lotto)\n\ndef selectNum(lotto):\n for i in range(7):\n lotto.append(random.randint(0,9))\n\ndef showNum(lotto):\n print('Your lottery numbers for this week are:\\n')\n for i in lotto:\n print(i)\n\nmain()"
},
{
"alpha_fraction": 0.641132652759552,
"alphanum_fraction": 0.6526080369949341,
"avg_line_length": 37.130680084228516,
"blob_id": "17eedf9a01658f0de596af22f4d130294710b482",
"content_id": "705d058fde7f89af35d322696a08bb46474ac822",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6772,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 176,
"path": "/LABS/PerfExam/Exam_prompt.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPython Basics Performance Exam\n\n This exam is open note, open book, and open internet. Feel free to use any resources\n you can (other than someone else) to solve the following problems. Direct collaboration with another\n individual will result in immediate failure and consequences to follow. If you are unsure about \n whether or not you can use a resource please ask me. If you are unsure about any of the prompts I can clarify. \n\n Comments are necessary. \n\n Each problem will weigh the same towards the final grade. 4 Problems at 25% each. \n\n Please send each problem as a .py file separately. Please direct message them to me (Daniel Curran) \n through slack. If there are supporting files for a problem then please send them with the .py file \n as a zipped folder. \n\n You will have 3 hours to complete this exam. If you complete this portion early and I have verified\n I have everything needed to grade your exam then you will be released.\n\n Happy Thanksgiving. \n\n1. \n (Remove text) Write a program that removes all the occurrences of a specified\n string from a text file named pointsOfAuthority.txt. Your program should prompt the user to enter \n a filename and a string to be removed.\n\n Points Of Authority - Linkin Park\n\n Forfeit the game\n Before somebody else\n Takes you out of the frame\n And puts your name to shame\n Cover up your face\n You can't run the race\n The pace is too fast\n You just won't last\n\n You love the way I look at you\n While taking pleasure in the awful things you put me through\n You take away if I give in\n My life, my pride is broken\n\n You like to think you're never wrong\n (You live what you've learned)\n You have to act like you're someone\n (You live what you've learned)\n You want someone to hurt like you\n (You live what you've learned)\n You want to share what you've been through\n (You live what you've learned)\n\n You love the things I say I'll do\n The way I'll hurt myself again just to get back at you\n You take away when I give in\n My life, my pride is broken\n\n You like to think you're never wrong\n (You live what you've learned)\n You have to act like you're someone\n (You live what you've learned)\n You want someone to hurt like you\n (You live what you've learned)\n You want to share what you've been through\n (You live what you've learned)\n\n Forfeit the game\n Before somebody else\n Takes you out of the frame\n And puts your name to shame\n Cover up your face\n You can't run the race\n The pace is too fast\n You just won't last\n\n Forfeit the game\n Before somebody else\n Takes you out of the frame\n And puts your name to shame\n Cover up your face\n You can't run the race\n The pace is too fast\n You just won't last\n\n You like to think you're never wrong\n (You live what you've learned)\n You have to act like you're someone\n (You live what you've learned)\n You want someone to hurt like you\n (You live what you've learned)\n You want to share what you've been through\n (You live what you've learned)\n #Create text file\n #Read in text file to list\n # Display text of file\n #Get user input for line to remove\n #Parse the list\n #track if the input had a match in the file\n # Inform user if line did not exists \n\n\n2.\n (Locate the largest element) Write the following function that returns the location\n of the largest element in a two-dimensional list:\n \n def locateLargest(a):\n The return value is a one-dimensional list that contains two elements. These\n two elements indicate the row and column indexes of the largest element in the\n two-dimensional list. Write a test program that prompts the user to enter a \n two-dimensional list and displays the location of the largest element in the list. \n \n Here is a sample run(You don't have to mimic this, this is just a guide):\n\n Enter the number of rows in the list: 3\n Enter a row: 23.5 35 2 10\n Enter a row: 4.5 3 45 3.5\n Enter a row: 35 44 5.5 11.6\n The location of the largest element is at (1,2)\n # This should just be a nested loop that will update the var 'largest' if its bigger than\n # Current largest \n\n3. \n(The Triangle class) Design a class named Triangle that extends the\nGeometricObject class defined below. The Triangle class contains:\n - Three float data fields named side1, side2, and side3 to denote the three\n sides of the triangle.\n - A constructor that creates a triangle with the specified side1, side2, and\n side3 with default values 1.0.\n - The accessor methods for all three data fields.\n - A method named getArea() that returns the area of this triangle.\n - A method named getPerimeter() that returns the perimeter of this triangle.\n - A method named __str__() that returns a string description for the triangle.\n\n\n class GeometricObject:\n def __init__(self, color = \"green\", filled = True):\n self.color = color\n self.filled = filled\n\n def getColor(self):\n return self.color\n\n def setColor(self, color):\n self.color = color\n\n def isFilled(self):\n return self.filled\n\n def setFilled(self, filled):\n self.filled = filled\n \n def toString(self):\n return \"color: \" + self.color + \" and filled: \" + str(self.filled)\n\n\n Write a test program that prompts the user to enter the three sides of the \n triangle, a color, and 1 or 0 to indicate whether the triangle is filled. \n The program should create a Triangle object with these sides and set the \n color and filled properties using the input. The program should display the \n triangle’s area, perimeter, color, and True or False to indicate whether the \n triangle is filled or not.\n # Do this last\n\n4. \n (The sqrt function) Write a program that prints the following table\n using your knowledge of loops and the sqrt function in the math module.\n Make sure your table is neat by using print formatting methods we've learned. \n\n Number Square Root\n 0 0.0000\n 1 1.0000\n 2 1.4142\n ...\n 18 4.2426\n 20 4.4721\n # could use list comprehension and lambda for this after importing math sqrt\n\"\"\""
},
{
"alpha_fraction": 0.5494557619094849,
"alphanum_fraction": 0.5721722841262817,
"avg_line_length": 27.958904266357422,
"blob_id": "bfb3e6492986e7634dc4107015db1aac4e6200ad",
"content_id": "10577f7301c5999ce0ea6ac95e5a8dcd0554418f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2113,
"license_type": "no_license",
"max_line_length": 188,
"num_lines": 73,
"path": "/LABS/PerfExam/geometricObj.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "from math import sqrt\n\nclass GeometricObject:\n def __init__(self, color = \"green\", filled = True):\n self.color = color\n self.filled = filled\n\n def getColor(self):\n return self.color\n \n def setColor(self, color):\n self.color = color\n\n def isFilled(self):\n return self.filled\n\n def setFilled(self, filled):\n self.filled = filled\n\n def toString(self):\n return \"color: \" + self.color + \" and filled: \" + str(self.filled)\n\nclass Triangle(GeometricObject):\n \n def __init__(self, color = \"green\", filled = True, side1 = 1.0, side2 = 1.0, side3 = 1.0):\n super().__init__(color,filled)\n self.__side1 = side1\n self.__side2 = side2\n self.__side3 = side3\n\n # mutators\n def set_side1(self, side1):\n self.__side1 = side1\n\n def set_side2(self, side2):\n self.__side2 = side2\n \n def set_side3(self, side3):\n self.__side3 = side3 \n\n # Accessors\n def get_side1(self):\n return self.__side1\n\n def get_side2(self):\n return self.__side2\n\n def get_side3(self):\n return self.__side3\n \n # Triangle Math functions \n # Triangle Perimeter\n def getPerimeter(self):\n return self.get_side1()+self.get_side2()+self.get_side3()\n \n # Triangle Area\n def getArea(self):\n p = self.getPerimeter()/2\n area = sqrt(p*(p-self.__side1)*(p-self.__side2)*(p-self.__side3))\n return area\n\n ## __str__ method\n def __str__(self):\n # Converting filled integer value to the appropriate string\n if self.filled == 1:\n filled = 'True'\n else:\n filled = 'False'\n # Checking Triangle validity before returning triangle data to user. \n if self.getArea() == 0:\n return f'A triangle with sides of {self.__side1}, {self.__side2}, and {self.__side3} is not valid. Goodbye!'\n else:\n return f'\\nTriangle sides:\\n{self.__side1}, {self.__side2}, {self.__side3}\\n\\nPerimeter: {self.getPerimeter()}\\nArea: {self.getArea()}\\n\\nColor: {self.color}\\nFilled: {filled}'"
},
{
"alpha_fraction": 0.6235294342041016,
"alphanum_fraction": 0.6294117569923401,
"avg_line_length": 20.25,
"blob_id": "67bf23c923dcc53d44f252c69cc01b01c8d6ec11",
"content_id": "2af8c8c48c8d4b7c8fd424f7b6d4ea3ca985abac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 388,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 16,
"path": "/LABS/regex/regex-prac-code.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\n1. Recognize the following strings: “bat,” “bit,” “but,” “hat,”\n“hit,” or “hut.”\n\"\"\"\nstring = \"\"\"\n1. Recognize the following strings: “bat,” “bit,” “but,” “hat,”\n“hit,” or “hut.”\n\"\"\"\n\nimport re\npattern = re.compile(r'[HB].t', re.IGNORECASE)\nmatches = pattern.findall(string)\n\nprint(type(matches))\nfor match in matches:\n print(match)\n"
},
{
"alpha_fraction": 0.65625,
"alphanum_fraction": 0.6691176295280457,
"avg_line_length": 24.952381134033203,
"blob_id": "4cb43bcde748fc99ac612c4ef92b7878d2183d7b",
"content_id": "3502e6538fd7ae951f9e2d0cd1f61f025c8c497b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 544,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 21,
"path": "/LABS/Socket/example-chall-cli.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "from socket import socket as Socket\nfrom socket import AF_INET, SOCK_STREAM\n\nHOSTNAME = 'localhost' # on same host\nPORTNUMBER = 11267 # same port number\nBUFFER = 80 # size of the buffer\n\nDEALER = (HOSTNAME, PORTNUMBER)\nPLAYER = Socket(AF_INET, SOCK_STREAM)\nPLAYER.connect(DEALER)\n\nprint('player is ready to guess')\nwhile True:\n GUESS = input('Give number : ')\n PLAYER.send(GUESS.encode())\n ANSWER = PLAYER.recv(BUFFER).decode()\n print('>', ANSWER)\n if ANSWER == 'found the secret':\n break\n\nPLAYER.close()"
},
{
"alpha_fraction": 0.5827814340591431,
"alphanum_fraction": 0.6070640087127686,
"avg_line_length": 24.22222137451172,
"blob_id": "7d98bd2fd04350fe32eb01b62a271deadf7aa638",
"content_id": "c75f6a64f546361d6396e8a44bc528267272b3cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 453,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 18,
"path": "/Algorithms/counting.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# This prints number of iterations for problem sizes that double\n# using a single loop\n\nproblemSize = 1000\n\nfor count in range(5):\n # Accumulator for the number of instructions \n number = 0\n # Start of Algorithm\n work = 1\n for j in range(problemSize):\n for k in range(problemSize):\n number += 1\n work += 1\n work -= 1\n #End of Algorithm\n print(f'{problemSize} - {number}')\n problemSize *= 2"
},
{
"alpha_fraction": 0.6429980397224426,
"alphanum_fraction": 0.6568047404289246,
"avg_line_length": 23.190475463867188,
"blob_id": "8816383abcad8e4e61a0020f92a496a89a66b497",
"content_id": "c4f6a5583c98a1fbf76bbce113d2c2a0bd853a5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 507,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 21,
"path": "/LABS/Socket/login-client.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "from socket import socket as Socket\nfrom socket import AF_INET, SOCK_STREAM\n\nHOSTNAME = 'localhost' # on same host\nPORTNUMBER = 11267 # same port number\nBUFFER = 80 # size of the buffer\n\nSVR = (HOSTNAME, PORTNUMBER)\nCLT = Socket(AF_INET, SOCK_STREAM)\nCLT.connect(SVR)\n\nprint('player is ready to guess')\nwhile True:\n GUESS = input('Password: ')\n CLT.send(GUESS.encode())\n ANSWER = CLT.recv(BUFFER).decode()\n print('>', ANSWER)\n if 'GOLD' in ANSWER:\n break\n\nCLT.close()"
},
{
"alpha_fraction": 0.49586373567581177,
"alphanum_fraction": 0.49975669384002686,
"avg_line_length": 28.797101974487305,
"blob_id": "cc5deada8b7b06889e0eede334c3c6ae42e0aad6",
"content_id": "7bce968141a707c29515d21dd9d3f8c85e8709e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2055,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 69,
"path": "/LABS/PythonBasicsExam/revlinked.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "class Node:\n def __init__(self, data, next = None, prev = None):\n self.data = data\n self.next = next\n self.prev = prev\n\nclass DoublyLinkedList:\n def __init__(self):\n self.head = None\n\n # Two cases, empty list and list with items\n def append(self, data):\n newNode = Node(data)\n if self.head is None:\n newNode.prev = None\n self.head = newNode \n else:\n probe = self.head\n while probe.next != None:\n probe = probe.next\n newNode.prev = probe\n probe.next = newNode\n\n def print_list(self):\n probe = self.head\n while probe != None:\n print(probe.data)\n probe = probe.next\n\n # def insert_node(self, index, data):\n # probe = self.head\n # while probe != None:\n # if probe.next is None and probe.data == index:\n # self.prepend(data)\n # elif probe.next == index:\n # newNode = Node(data)\n # prev = probe.prev\n # prev.next = newNode\n # newNode.next = probe\n # newNode.prev = prev\n # probe = probe.next\n \n def reverse(self):\n probe = self.head\n while probe != None:\n if probe.prev == None:\n probe.prev = probe.next\n probe.next = None\n probe = probe.prev\n else:\n temp = probe.prev\n probe.prev = probe.next\n probe.next = temp\n if probe.prev == None:\n self.head = probe\n break\n else:\n probe = probe.prev\n\n # implement this function\n\ndoubly_linked_list = DoublyLinkedList()\ndoubly_linked_list.append(\"A\")\ndoubly_linked_list.append(\"b\")\ndoubly_linked_list.append([7,245,8,68,\"hello\"])\n#doubly_linked_list.insert_node(1, \"one\")\ndoubly_linked_list.print_list()\ndoubly_linked_list.reverse()\ndoubly_linked_list.print_list()"
},
{
"alpha_fraction": 0.5177664756774902,
"alphanum_fraction": 0.5177664756774902,
"avg_line_length": 23.5,
"blob_id": "7f42e3ab86260b7cf128a676b60a6fc7bb2365f1",
"content_id": "cf24897cc67be8045d39b3fba2ac7cd23eddfd00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 8,
"path": "/LABS/Labs-6-1/lab6-1-10.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "rows = input('Row height')\ncolumns = input('column width')\n\nfor i in range(int(rows)):\n for i in range(int(columns)):\n print('#', end='')\n print(' '*i, end='')\n print('#')\n\n"
},
{
"alpha_fraction": 0.7289377450942993,
"alphanum_fraction": 0.732600748538971,
"avg_line_length": 33.25,
"blob_id": "d7ed0009cbf618921988a81e0f7dee29e14913f1",
"content_id": "db0660fca59d04967950bc526ea095ffcfe634fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 273,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 8,
"path": "/LABS/Labs-3-1/lab3-1-7.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Miles per gallon calculator\n#Asking user for miles driven and gas used\nmilesDriven = input('Provide miles driven:\\n')\ngasUsed = input('Provide gallons of gas used:\\n')\n\nmpg = float(milesDriven)/float(gasUsed) \n\nprint('The miles used per gallon was {:.2f}'.format(mpg))"
},
{
"alpha_fraction": 0.5963141918182373,
"alphanum_fraction": 0.6037735939025879,
"avg_line_length": 27.860759735107422,
"blob_id": "09d56eca178d59ee13c3f259f680c9703cbdfc30",
"content_id": "3b93b74852bc4111aa330d527162315c5d0c1ed5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2283,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 79,
"path": "/LABS/Classes/employee.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\" 4. Employee Class\n Write a class named Employee that holds the following data about an employee in attributes: \n name, ID number, department, and job title.\n Once you have written the class, write a program that creates three Employee objects to\n hold the following data:\n\nName ID Number Department Job Title\nSusan Meyers 47899 Accounting Vice President\nMark Jones 39119 IT Programmer\nJoy Rogers 81774 Manufacturing Engineer\n\nThe program should store this data in the three objects and then display the data for each\nemployee on the screen. \"\"\"\n\nclass Employee:\n # Defining Attributes of employee class\n def __init__(self):\n self.__name = ''\n self.__id = ''\n self.__dept = ''\n self.__title = ''\n\n # Configuring setters\n def set_name(self):\n self.__name = input('Provide the employee name: \\n')\n\n def set_id(self):\n self.__id = input('Provide the employee ID number: \\n')\n\n def set_dept(self):\n self.__dept = input('Provide the employee department: \\n')\n\n def set_title(self):\n self.__title = input('Provide the employee job title: \\n')\n\n # Configuring Getters\n def get_name(self):\n return self.__name\n\n def get_id(self):\n return self.__id\n\n def get_dept(self):\n return self.__dept\n\n def get_title(self):\n return self.__title \n\n\n\"\"\" # Define main function\ndef main():\n # Set employee object storage list\n employees = []\n\n # Iterate through three employee info entries\n for i in range(3):\n emp = Employee()\n emp.set_name()\n emp.set_id()\n emp.set_dept()\n emp.set_title()\n # Print extra line for spacing (visual user assist)\n print()\n # Append each completion of an employee record to \n # the employees list\n employees.append(emp)\n\n # Header for displaying employee information\n print('Name:\\t\\tID:\\t\\tDepartment:\\t\\tTitle:')\n\n # Iterate through each record stored in employees list\n for emp in employees:\n # Print with same spacing as header\n print(f'{emp.get_name()}\\t\\t{emp.get_id()}\\t\\t{emp.get_dept()}\\t\\t{emp.get_title()}')\n\n print()\n\n#Call main\n#main() \"\"\""
},
{
"alpha_fraction": 0.5863267779350281,
"alphanum_fraction": 0.5921205282211304,
"avg_line_length": 26.870967864990234,
"blob_id": "0ca94bf311a7c05916ae958ec1454a79c88bd174",
"content_id": "cf8c1b3b2292d513351849a6376b3fe9f243e926",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 863,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 31,
"path": "/LABS/List-Tuple/list-tuples-ex-sales.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "from functools import reduce\n\n#Defining constant of days of the week\nDAYS = ('MON','TUE','WED','THU','FRI','SAT','SUN')\n\n#Define Main function\ndef main():\n sales = []\n getSales(sales,DAYS)\n measureSales(sales)\n\ndef getSales(salesArray,DAYS):\n for day in DAYS:\n sale = -1.0\n while sale == -1.0:\n #Testing user input via try/except\n try:\n sale = float(input(f'Provide the sales for {day}:'))\n salesArray.append(sale)\n except:\n print('A valid sale number was not provided')\n\ndef measureSales(salesArray):\n total = reduce((lambda x, y: x + y), salesArray)\n # for sale in salesArray:\n # total += sale\n print(f'Total sales for the week is ${total}')\n print('Average sales for the week is ${:.2f}'.format(total/len(salesArray)))\n\n#Call main\nmain()"
},
{
"alpha_fraction": 0.6717850565910339,
"alphanum_fraction": 0.6996161341667175,
"avg_line_length": 22.704545974731445,
"blob_id": "686d8318ef5f6d106f1b259292fd737b2067ba38",
"content_id": "f810e79cb8ca46b475e28499c3cb714f51198f29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1042,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 44,
"path": "/LABS/TKinter/salutations_app.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# The app will take in a name and randomly generate a greeting\n\nimport tkinter as tk\nimport random\n\nwindow = tk.Tk()\nwindow.title('Salutations')\nwindow.geometry(\"400x400\")\n\n\n#Functions\ndef phraseGenerator():\n # Create a list of greetings for random selection\n phrases = ['Hello ', 'What\\'s up ', 'Howdy ','Aloha ']\n name = str(entry1.get())\n return random.choice(phrases) + name\ndef phraseDisplay():\n # Get greeting from phrase generator\n greeting = phraseGenerator()\n greeting_display = tk.Text(master=window, height=10, width=30)\n greeting_display.grid(column=0,row=3)\n\n greeting_display.insert(tk.END, greeting)\n return \n\n\n# Setting up a label\n\nlabel1 = tk.Label(text = 'How you doin' )\nlabel1.grid(column=0, row=0)\n\nlabel2 = tk.Label(text = \"what is your name?\")\nlabel2.grid(column=0, row=1)\n\n# Entry\n# No Args because we want user input\nentry1 = tk.Entry() \nentry1.grid(column=1, row=1)\n\n# Button\nbutton1 = tk.Button(text = 'Click Me', command=phraseDisplay)\nbutton1.grid(column=0, row=2)\n\nwindow.mainloop()"
},
{
"alpha_fraction": 0.5591882467269897,
"alphanum_fraction": 0.569334864616394,
"avg_line_length": 18.688888549804688,
"blob_id": "0bbcd1b5544ef196719a6e94916733272e8c768a",
"content_id": "67d17d501ca59d9726e8dd026a9ecf9446754c71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 887,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 45,
"path": "/LABS/List-Tuple/list-tuple-matrix.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import random\ndef main():\n\n ROWS = 4\n COLS = 4\n matrix = []\n makeMatrix(matrix)\n printMatrix(matrix)\n\ndef makeMatrix(matrix):\n for r in range(ROWS):\n matrix.append([random.randint(0,1)])\n for c in range(COLS-1):\n matrix[r].append(random.randint(0,1))\n\n\ndef printMatrix(matrix):\n for row in matrix:\n for i in row:\n print(i, end='')\n print()\n\ndef sumRow(row):\n total = 0 \n for i in row:\n total += i\n return total\n\ndef sumCol(matrix, rows, cols):\n for i in range(cols):\n for j in range()\n\ndef largestRow(matrix):\n largest = -1\n for i in range(ROWS):\n if sumRow(i) > largest:\n largest = sumRow(i)\n largestIndex = i\n return largestIndex \n\n\n\n\nprint('The largest row index: {}'.format(maxRow))\nprint('The largest column index: {}, {}'.format(maxRow,maxCol))\n\n"
},
{
"alpha_fraction": 0.6859395503997803,
"alphanum_fraction": 0.6995182037353516,
"avg_line_length": 23.031578063964844,
"blob_id": "f845746141182ee2dc18e041393da34f5a789dc6",
"content_id": "718e9089df1e0da9c95b461260dbc5b126bb40ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2283,
"license_type": "no_license",
"max_line_length": 213,
"num_lines": 95,
"path": "/04_functions/03_lambda_functions.md",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "|[Table of Contents](/00-Table-of-Contents.md)|\n|---|\n\n---\n\n## Lambda Functions\n\nIt's best to start off with some examples:\n\n**Example 1:**\n\n```python\nmy_list = range(26)\n\nprint(my_list)\nprint(filter(lambda x: x % 2 == 0, my_list))\n```\n\n**Breaking down the first example:**\n```text\n\nlambda - the keyword to create a lambda\nx - the parameter. Multiple parameters ex: lambda x,y,z:\nx % 2 == 0: the execution code\nmy_list: the argument. Multiple args ex: lambda x,y,z: <code>, input1, input2, key\n\nMultiple arg/param lambda: \nlambda x,y,z: (x + y) - z, input1, input2, key\n```\n**Example 2:**\n\n```python\ng = lambda x,y: x>y\nprint g(1,2)\nprint g(2,1)\n```\n\nAs you can tell, lambdas appear to be shortened functions; specifically one lined functions. And while this is true to an extent... that is not their purpose.\n\nLambdas, in short, are anonymous functions. Functions without a name. They are generally passed as arguments to higher-order functions as well as a variety of other uses. Below are some of the features of a Lambda\n\n**Lambda Features:**\n\n* Lambda forms can take any number of arguments\n* Return only one value in the form of an expression\n* They cannot contain commands or multiple expressions\n* It cannot be a direct call to print because lambda requires an expression\n* Lambda functions have their own namespace\n\nDue to the fact that they have their own local namespace... Lambdas cannot access variables other than those in their parameters list and globals.\n\n### **Example:**\n\n```python\ndef factorial(n):return reduce(lambda x,y:x*y,[1]+range(1,n+1))\n```\n\n### **Breakdown of a Lambda**\n\n```python\n#Define a regular function\ndef reg_function(x):\n return x**2\n\n# Make it one line\ndef reg_function(x): return x**2\n\n# Turn it into a lambda\nnew_stuff = lambda x: x**2\n```\n\n## Common Lambda Uses\n\n#### map\\(\\)\n\n* Map applies a function to all the items in an input\\_list. That function can be a lambda. \n\n#### filter\\(\\)\n\n* Filter creates a list of elements for which a function returns true. \n\n#### reduce\\(\\)\n\n* Reduce accepts a function and a sequence and returns a single calculated value.\n\n#### [Examples](http://book.pythontips.com/en/latest/map_filter.html) \n\n---\n\n**Continue to Performance Labs:** 4A and 4B \n\n---\n\n|[Labs 4A & 4B](/04_functions/lab4a.md)|\n|---|\n"
},
{
"alpha_fraction": 0.574999988079071,
"alphanum_fraction": 0.5874999761581421,
"avg_line_length": 20.727272033691406,
"blob_id": "3c02cf70dec31ebebc481d91ec12d35a963001ae",
"content_id": "6d54ee3f22d3e221597633d23c5e62d3edc473c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 240,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 11,
"path": "/LABS/Lab-2-1/lab2d.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "print(\"Please input a string:\")\nx = str(input())\n\ncount = len(x) - 1\nrevStr = ''\n\nwhile count >= 0:\n revStr += x[count].upper()\n count -= 1\n\nprint(\"Your string was \" + x + \". your string reversed and capitalized is \" + revStr + \"!\")\n\n"
},
{
"alpha_fraction": 0.732758641242981,
"alphanum_fraction": 0.7471264600753784,
"avg_line_length": 37.77777862548828,
"blob_id": "14e585adddfdc6505698d31fdfcfde9dd6b0828c",
"content_id": "aa40610a1749645fb55cbea0e2589ec033514d7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 348,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 9,
"path": "/LABS/Labs-3-1/lab3-1-3.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Defining CONSTANT for feet in acres\nFEET_IN_ACRES = 43560\n\n#gathering input and storing the feet into a variable\nfeet = int(input(\"Provide the total amount of square feet:\"))\n\n#Calculate and print using int type for feet and \n#float type for calculation of acres\nprint('The total acres from {:d} is {:f}'.format(feet, (float(feet)/FEET_IN_ACRES)))"
},
{
"alpha_fraction": 0.6430780291557312,
"alphanum_fraction": 0.6534253358840942,
"avg_line_length": 22.288087844848633,
"blob_id": "b4750e53b87826aa1f678fbb9921d5320373c9b8",
"content_id": "ec00eb53a9e79f0e236497c42a0c24e167cef81e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8408,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 361,
"path": "/LABS/regex/regex-lecture.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# Regular Expressions\n\nimport re\n\ntext_to_match = \"\"\"lol lolololol\nDaniel abcd 1234 abcd \[email protected]\\t\[email protected]\nlol lolololol\n210-444-4444\n512*888*8888\n800-444-4444\n900-555-5555\n\nMr. Anderson\nMr Smith\nMs. Davis\nMrs. Robinson\nMr. T\n\ncat,bat,pat,mat\"\"\"\n\n# 'r' before a string indicates raw string literal which ignores escape chars\n#print(r'\\tTab')\n\n# match()\n# determines if the regex matches at the BEGINNING of the string\n# Returns 'None' if its not at BEGINNING, same as ^anchor\n\n\n# my_string = 'tkinter is the best GUI library out there'\n\n# Compile allows us to separate our pattern matches into a variable\n# pattern = re.compile(r'Daniel')\n# match = pattern.match(text_to_match)\n# Result is <re.Match object; span=(0, 6), match='Daniel'>\n# span is useful to slice the string \n# print(match)\n# that makes it easier to reuse our variable \n# my_match = re.compile(r'Tkinter')\n\n\n# Search() let's us search the entire string\n# Best to use over match unless you know it has to be at start of\n# string \n# pattern = re.compile(r'1234')\n# searched_item = pattern.search(text_to_match)\n# print(searched_item)\n\n\n# finditer()\n# finds all instances of literal string and stores as\n# callable iterator \n\n# pattern = re.compile(r'abcd')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n# # **********************META CHARS************************\n# # '.' will match any non-newline character\n# pattern = re.compile(r'.')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # find literal '.' in string\n# pattern = re.compile(r'\\.')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # find digits in string\n# pattern = re.compile(r'\\d')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # find non-digits in string\n# pattern = re.compile(r'\\D')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # find non-digits in string\n# # includes new lines, special characters, just no numbers \n# pattern = re.compile(r'\\D')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # Find any word char (a-z, A-Z, 0-9, _)\n# pattern = re.compile(r'\\w')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # Find any non-word char (a-z, A-Z, 0-9, _)\n# pattern = re.compile(r'\\W')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # Find any whitespace \n# pattern = re.compile(r'\\s')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # Find any non-whitespace \n# pattern = re.compile(r'\\S')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n\n#*******************ANCHORS***********************\n# Anchors don't match characters; they match invisible positions before\n# or after characters\n\n# \\b - finds word boundaries ( if there is a space in front of a character) \n# Find any boundary matches \n# pattern = re.compile(r'\\blol')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # Find any non-boundary matches \n# pattern = re.compile(r'\\Blol')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # ^ - finds matches at beginning of string\n# pattern = re.compile(r'^lol')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # $ - finds matches at end of string\n# pattern = re.compile(r'lol$')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# finds 3 digits in a row\n# pattern = re.compile(r'\\d\\d\\d')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # find phone number\n# pattern = re.compile(r'\\d\\d\\d.\\d\\d\\d.\\d\\d\\d\\d')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n\n# find using a character set [], used to search for a group of characters\n# pattern = re.compile(r'\\d\\d\\d[-*]\\d\\d\\d[-*]\\d\\d\\d\\d')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# pattern = re.compile(r'[89]00[-*]\\d\\d\\d[-*]\\d\\d\\d\\d')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # Find a range of values\n# # using pipe to match either set\n# pattern = re.compile(r'[a-d]|[A-D]')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # Find a range of values\n# # Another way to get same effect as using pipe to match either set\n# pattern = re.compile(r'[a-dA-D]')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # Find a range of values\n# # Negative char set search\n# pattern = re.compile(r'[^1-7]')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # Match cat, mat, pat, but not bat\n# pattern = re.compile(r'[^bB]at')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n#***********************************************************\n#QUANTIFIERS\n\n# pattern = re.compile(r'(\\d{3}.){3}')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # ? - find 0 or 1 match\n\n# pattern = re.compile(r'Mr\\.?')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # + finds 1 or more\n# pattern = re.compile(r'Mr\\.?\\s[A-Z]\\w+')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # * - for 0 or more\n# pattern = re.compile(r'Mr\\.?\\s[A-Z]\\w*')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # () - an anchor that allows us to match several different patterns\n# pattern = re.compile(r'M(r|s|rs)\\.?\\s[A-Z]\\w*')\n# matches = pattern.finditer(text_to_match)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\nemails = \"\"\"\[email protected]\[email protected]\[email protected]\n\"\"\"\n\n# # Matching for e-mails\n# pattern = re.compile(r'[a-zA-z]+@[a-zA-Z]+\\.com')\n# matches = pattern.finditer(emails)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# # Matching for e-mails\n# pattern = re.compile(r'[a-zA-z.0-9-_+]+@[a-zA-Z.0-9-_]+\\.[a-zA-Z0-9-.]+')\n# matches = pattern.finditer(emails)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\nurls = \"\"\"\nhttp://testsite.com\nhttps://www.google.com\nhttps://youtube.com\nhttps://www.nasa.gov\n\"\"\"\n# pattern = re.compile(r'https?://(www\\.)?\\w+\\.\\w+')\n# matches = pattern.finditer(urls)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n\n# # We can group all of these to grab groups\n# # groups 0 being all groups, 1 being first group, etc. \n# pattern = re.compile(r'https?://(www\\.)?(\\w+)(\\.\\w+)')\n# matches = pattern.finditer(urls)\n\n# print(type(matches))\n# for match in matches:\n# print(match.group(0,1,2,3))\n\n\n# Return domains\n# pattern = re.compile(r'https?://(www\\.)?(\\w+)(\\.\\w+)')\n# subbed_urls = pattern.sub(r'\\2\\3', urls)\n\n# print(subbed_urls)\n\n# findall() - finds all and returns as tuples if there is more than one group\npattern = re.compile(r'(Mr|Ms|Mrs)\\.?(\\s[A-Z]\\w*)')\nmatches = pattern.findall(text_to_match)\n\nprint(type(matches))\nfor match in matches:\n print(match)\n\n# Shows findall() without groups; returns list of strings\n# pattern = re.compile(r'[a-zA-z.0-9-_+]+@[a-zA-Z.0-9-_]+\\.[a-zA-Z0-9-.]+')\n# matches = pattern.findall(emails)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# flags\n\nstring = 'I cAan TyPeE GOod'\n\n# case-insensitive search; re.I works for re.IGNORECASE\npattern = re.compile(r'[ao]', re.IGNORECASE)\nmatches = pattern.findall(string)\n\nprint(type(matches))\nfor match in matches:\n print(match)\n\n"
},
{
"alpha_fraction": 0.6479949355125427,
"alphanum_fraction": 0.6607256531715393,
"avg_line_length": 31.081632614135742,
"blob_id": "b63cda8ec2a3febd0e615c78fddad3ccdf442a6f",
"content_id": "7a4bc8263ba66a63eed608be49b72d05b23585e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1571,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 49,
"path": "/LABS/Socket/Exam/prompt1.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# Write a Python program somewhat similar to \n# http://www.py4e.com/code3/json2.py. The program\n# will prompt for a URL, read the JSON data from \n# that URL using urllib and then parse and extract \n# the comment counts from the JSON data, compute the \n# sum of the numbers in the file and enter the sum below:\n# Here are two files for this assignment. One is a sample\n# file where we give you the sum for your testing and the \n# other is the actual data you need to process for the \n# assignment.\n\n# Sample data: http://py4e-data.dr-chuck.net/comments_42.json (Sum=2553)\n\n# Actual data: http://py4e-data.dr-chuck.net/comments_57128.json (Sum ends with 10)\n\nimport urllib.request as request\nimport json\n\ndef main():\n # Prompt user for url\n url = input('Please provide a json URL with the counts:\\n')\n\n # grab page with json data\n page = request.urlopen(url)\n\n # Read the body of the page (expected to contain json data)\n body = page.read()\n \n # decode json data\n j = json.loads(body.decode(\"utf-8\"))\n sumCount(j)\n # Statements used to examine objects during dev\n #print(j['comments'])\n #print(type(j))\n\ndef sumCount(j):\n total = 0\n try:\n for i in j['comments']:\n try:\n total += i['count']\n except KeyError:\n print('Program expects a nested key of \\'count\\' which does not exist.')\n except KeyError:\n print('Program expects a key of \\'comments\\' which does not exist') \n print(f'Sum of counts in the provided json file: {total}')\n return total\n\nmain()"
},
{
"alpha_fraction": 0.6707819104194641,
"alphanum_fraction": 0.6872428059577942,
"avg_line_length": 17.69230842590332,
"blob_id": "6adfe9398333fccb4ba7eb6bc5f62cd3350a6fb9",
"content_id": "3e9f8485ae9acdc2e0e45772429446db04501b9c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 486,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 26,
"path": "/LABS/TKinter/button-label-test.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import tkinter as tk\nfrom tkinter import StringVar\n\ndef change():\n v.set(\"goodbye\")\n #label.pack()\n\nwindow = tk.Tk()\n\n# Set the title for your window\nwindow.title('Auto Shop')\n\n# Set size of window\nwindow.geometry(\"600x600\")\n\n# Adding a label\nv = StringVar()\ntitle = tk.Label(textvariable=v)\nv.set('hello')\n# grid() tells you where you want the label, (0,0) is default\ntitle.pack() \n\nclick = tk.Button(text = \"Change\", bg = \"blue\", command=change)\nclick.pack()\n\nwindow.mainloop()\n"
},
{
"alpha_fraction": 0.5360230803489685,
"alphanum_fraction": 0.6834774017333984,
"avg_line_length": 47.41860580444336,
"blob_id": "7b0e7553fcad4332deb296faa24134b4db31454a",
"content_id": "36f0b251a0f0d82ab65d294284b57d9a0380885a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2082,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 43,
"path": "/Practice_Test/dictdupes.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\nWrite a function duplicate_Values(aDictionary) that takes a dictionary of 20 values in the range 1- 99,\nand determines whether there are any duplicate values in the dictionary.\n\nThe function duplicate_Values(aDictionary) returns 1 if there are duplicate values and 0 otherwise\n\n\"\"\"\n\ndef duplicateValues(aDictionary):\n \"\"\"\n There are many ways to determine if there are duplicate values.\n 1- One can create an array of 100 bits (initialized to '0') and set the bit to '1' at the index of the number\n in the dictionary. If the bit at that index is already '1' that means we have duplicate values.\n\n 2- Another simple way is to sort the values of the dictionary then look if there are duplicate values\n\n \"\"\"\n # Get a list of values in a variable\n values = list(aDictionary.values())\n # Get a set of values from the same dictionary (unique values only)\n unique = set(aDictionary.values())\n # If there are no dupes then the set and list should have the same length\n # therefore, if these are unequal then duplicates are present\n if (len(unique) != len(values)):\n return 1\n else:\n #otherwise there are no duplicates\n return 0\n\n# # dictionary1 has duplicate values: 53 and 69\n# dictionary1 = {0:53, 1:76, 2:16, 3:17, 4:43, 5:30, 6:38, 7:26, 8:4, 9:53, 10:69, 11:80, 12:71, 13:12, 14:69, 15:7, 16:82, 17:84, 18:87, 19:15}\n# # dictionary2 has NO duplicate values\n# dictionary2 = {0:93, 1:76, 2:16, 3:17, 4:43, 5:30, 6:38, 7:26, 8:4, 9:53, 10:69, 11:80, 12:71, 13:12, 14:77, 15:7, 16:82, 17:84, 18:87, 19:15}\n\n# # dictionary3 has NO duplicate values\n# dictionary3 = {0:33, 1:49, 2:88, 3:9, 4:14, 5:89, 6:59, 7:32, 8:34, 9:41, 10:38, 11:35, 12:42, 13:15, 14:50, 15:20, 16:66, 17:77, 18:8, 19:4}\n\n# # dictionary4 has NO duplicate values: 59 and 20\n# dictionary4 = {0:59, 1:49, 2:88, 3:9, 4:14, 5:89, 6:59, 7:32, 8:34, 9:20, 10:38, 11:35, 12:42, 13:15, 14:50, 15:20, 16:66, 17:77, 18:8, 19:4}\n# print(duplicateValues(dictionary1))\n# print(duplicateValues(dictionary2))\n# print(duplicateValues(dictionary3))\n# print(duplicateValues(dictionary4))\n"
},
{
"alpha_fraction": 0.6513761281967163,
"alphanum_fraction": 0.7087156176567078,
"avg_line_length": 35.41666793823242,
"blob_id": "e3c481c9af378c53fff9e683f6a0906ff984fac9",
"content_id": "9b617af8791ef6926c4ee121c0e388442215cb5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 436,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 12,
"path": "/LABS/Labs-3-1/lab3-1.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "## This is a program that prints simple, personal information\n## Setting up the variables that will be passed as strings to print\nname = 'Hans Grueber'\naddress = '2121 Avenue of the Stars, Los Angeles, CA 90067'\nphone = '(213) 974-3211'\nmos = '0301, GG-12'\n\n#Printing each variable in a statement with the various string formatting\nprint(\"Name: %s\" % name)\nprint(f\"Address: \", address)\nprint(\"Phone: {} \".format(phone))\nprint(\"MOS: %s\" % mos)"
},
{
"alpha_fraction": 0.6985118985176086,
"alphanum_fraction": 0.7148809432983398,
"avg_line_length": 37.102272033691406,
"blob_id": "605bdebae29ae2fb0c587bb11e84373fa8145601",
"content_id": "2603ac241dd036d9f07c91506c09e0ee076f9401",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3360,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 88,
"path": "/Practice_Test/class_player.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\" Demonstrates: 3.p OOP and Classes\"\"\"\n\nimport math\n\n\"\"\" Create a class Player which represents a character in a game. The player should have the following attributes:\n\t\tname: string representing Paladin's name\n\t\thit_points: float, number of hit points (starts at 100)\n\t\tx_pos: int x position on a grid (starts at 0)\n\t\ty_pos: int y position on a grid (starts at 0)\n\t\t\n\tPlayer methods are:\n\t\treport_pos: returns a tuple (x_pos, y_pos)\n\t\treduce_health: takes in <float> distance as argument and reduces player hit_points by half the distance.\n\t\tmove: takes in x_pos and y_pos as arguments, computes the distance by getting the square root of \n\t\t (x_pos*x_pos + y_pos*y_pos)\n\t\t calls reduce_health(), and returns hit points.\n\t\t\t- If move is called and it reduces hit_points below 0, return \"You are out of hit points!\"\n\t\t\"\"\"\n\t\t\n''' \n\n***REVIEWER COMMENTS/APPROVAL****\n\n\n\n\n\n'''\t\t\n'''\n*** NOTES FOR REVIEWER***\nThis prompt assumes that player movement is always a positive movement from current position based on how TestCase1 is written.\nThe provided formula makes the same assumption which removes the ability to move backwards.\nThis also assumes that there is no negative space, removing 3 quadrants of a true 2D plane\nIf multiple moves are to be considered on a true 2D plane and alowwing for backward movement then it is recommended that the \nformula be used:\n math.sqrt((math.fabs(self.x_pos-x_pos)**2 + math.fabs(self.y_pos-y_pos)**2)\n \nAside from all that, the packaged unit test is broken since TestCase3 moves Jokem by 100, 100 which results in taking the \nSqrt of 20000 which ~= 141 which should result in an \"out of hit points\" message. This makes sense for the second part of TestCase3.\nBased on the expected value in TestCase3 is the exact result if move was to (50,50). \n'''\n\n\n#based on the original code provided it appeared player was meant to be a subclass of object\nclass object:\n # Creating a general object class for the theorhetical game\n\tdef __init__(self, objType):\n # Initializing the object type\n\t\tself.__objType = objType\n\n# Defining the 'Player' subclass\nclass Player(object):\n # initializing the player object with provided name, default HP of 100, and position 0,0\n\tdef __init__(self, name, hit_points = 100, x_pos = 0, y_pos = 0 ):\n\t\tobject.__init__(self, 'Player')\n\t\tself.name = name\n\t\tself.hit_points = hit_points\n\t\tself.__x_pos = x_pos\n\t\tself.__y_pos = y_pos\n\t\n #defining report_position which returns coordinates as a tuple\n\tdef report_pos(self):\n\t\treturn self.__x_pos, self.__y_pos\n\t\n #defining the reduce_health method which reduces player object HP by distance arg and returns HP\n\tdef reduce_health(self, distance):\n\t\tself.hit_points -= distance\n\t\treturn self.hit_points\n\n # defining move method for player\n\tdef move(self, x_pos, y_pos):\n #Calculates the distance assuming start point is always 0,0 based on above prompt\n\t\tdistance = math.sqrt(x_pos**2 + y_pos**2)\n\t\t# Reduces player HP by distance value\n self.reduce_health(distance)\n # Checks to see if HP is depleted\n\t\tif self.hit_points < 0:\n # To return 'game over' message \n\t\t\treturn f'You are out of hit points!'\n\t\t#sets new position for player\n self.__x_pos += x_pos\n\t\tself.__y_pos += y_pos\n #returns player HP\n\t\treturn self.hit_points\n\n#jokem = Player(\"Jokem\")\n#print(jokem.move(100,100))\n#print(jokem.report_pos())\n\n\n\n\n\t\t\n"
},
{
"alpha_fraction": 0.6471389532089233,
"alphanum_fraction": 0.6480472087860107,
"avg_line_length": 35.71666717529297,
"blob_id": "f038ab2d621500166d2221b932f4c77c8840c2da",
"content_id": "44249c2a291bb85ff7fe8316819ebd8c20462ae2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2202,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 60,
"path": "/LABS/PerfExam/prompt1.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# This is a program to ask the user for a filename (text file)\n# and a string to remove to remove the line and write the output to\n# to a file called reduced .txt user w/ the content with the line removed \n\n\n\ndef getFileContent():\n # Setting an empty array for content for the while loop condition\n content = []\n \n # While the array (content) is empty, the user will be prompted\n while len(content) == 0:\n # The try/except for reading in the file\n try: \n filename = input('Provide the name of the text file you wish to edit:\\n')\n f = open(filename, 'r')\n # Store the lines of the file as an list of lines\n content = f.readlines()\n f.close()\n return content\n except FileNotFoundError:\n print(f'Your file, {filename} was not found.')\n\ndef removeLine(content):\n # Tracks if a count of matching lines found\n found = 0\n\n # makes a unique list of line as options for the user\n setOfLines = set(content)\n \n # Displays the unique lines\n for line in setOfLines:\n print(line)\n # Get user input for line to remove \n remove = input('Type the line you would like to remove. Available lines above:\\n')\n while remove+'\\n' not in setOfLines:\n for line in setOfLines:\n print(line)\n remove = input('Line does not exist!\\nType the line you would like to remove. Available lines above (case sensitive):\\n')\n # Creating empty list for the list of non-matching lines\n newLines = []\n # Loops through the lines to add non-matching ones to new list\n for line in content:\n if line != remove+'\\n' and line != remove:\n newLines.append(line)\n return newLines\n\ndef main():\n # getting the content of the file using the function\n content = getFileContent()\n # Passing content into the remove line function to return \n # all non-matching lines as the new content to write\n newLines = removeLine(content)\n # opening a new file called removed.txt to write the output\n # preserving the original file \n outFile = open('removed.txt','w')\n outFile.writelines(newLines)\n outFile.close()\n\nmain()"
},
{
"alpha_fraction": 0.7457399368286133,
"alphanum_fraction": 0.7457399368286133,
"avg_line_length": 23.47252655029297,
"blob_id": "4205f5ecaa23455649306612c8cfbb8130486a78",
"content_id": "a92444a0dec6d4b7e925ef1db700058541239be8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2234,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 91,
"path": "/07_Algorithms/01_Algorith_SSC.md",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# Algorithmic, Searching, Sorting, and Complexity\n\n**Resource**\n\n## Learning Objectives\n\n* Determine the rate of growth of the work of an algorithm in terms of its problem size\n* Use big-O notation to describe the running time and memory usage of an algorithm\n* Recognize the common rates of growth of work, or complexity classes – constant, logarithmic, linear, quadratic, and exponential\n* Convert an algorithm to a faster version that reduces its complexity by an order of magnitude\n* Describe how the sequential search and binary search algorithms work\n* Describe how the selection sort and quicksort algorithms work\n\n## Measuring Algorithms\n\n* When choosing algorithms\n * You often have to settle for a space/time trade-off\n* An algorithm can be designed to gain faster run times \n * At the cost of using extra space (memory) or the other way around\n* Space/time trade-off is more relevant for miniature devices\n\n## Measuring Algorithm Efficiency\n\n* Use the computer’s clock to obtain an actual run time\n * Process called benchmarking or profiling\n* Starts by determining the time for several different data sets of the same size and then calculates the average time\n* Next, similar data are gathered for larger and larger data sets\n * After several tests enough data are available to predict how the algorithm will behave for a data set of any size\n* Code for a tester program is found on the following slide\n\n## Measuring Algorithm Runtime\nUnder construction\n\n\n## Counting Instructions\n\nUnder construction\n\n## Measuring Memory used by an Algorithm\n\nUnder construction\n\n## Complexity Analysis\n\nUnder construction\n\n## Orders of Complexity\n\n## Big-O-Notation\n\n## Constant Proportionality\n\n## Search Algorithms\n\n## Search for the Minimum\n \n * Sequential Search of a List\n \n * Case Performances\n \n * Binary Search\n\n## Comparing Data\n\n## Sorting Algorithms\n \n * Basic Sort\n \n ***Selection Sort**\n\n ***Bubble Sort**\n \n ***Insertion Sort**\n \n ***Case Performance Revisited**\n \n ***Faster Sorting**\n \n ***Quicksort**\n \n **Quicksort Overview**\n \n **Implementation of Quicksort**\n \n ***Merge Sort**\n\n**Exponetial Algorithm: Recursive Fibonacci**\n\n**Converting Fibonacci to Linear**\n\n**Summary**\n \n"
},
{
"alpha_fraction": 0.6724764108657837,
"alphanum_fraction": 0.6790123581886292,
"avg_line_length": 32.60975646972656,
"blob_id": "24b6e546e562258a9d4d39b97bce873029591f81",
"content_id": "f026c91ca9a5fe279ba5294b21a60b3286c8d236",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1377,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 41,
"path": "/LABS/Multiprocessing/test_multiprocessing4.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\nunlike threading, arguments must be passed in with pickle\narguments have to be serialized, convert the python object to a format that can be deconstructed and reconstructed \n\n\n\"\"\"\n\nimport concurrent.futures\nimport time\n\nstart = time.perf_counter()\n\n# now our function accepts a number of seconds\ndef do_something(seconds):\n print(f'Sleeping {seconds} second(s)...')\n time.sleep(seconds)\n return 'Done Sleeping...'\n\nif __name__ == \"__main__\":\n\n # with concurrent.futures.ProcessPoolExecutor() as executor:\n # # submit schedules a function to be executed and returns a future object\n # # a future object encapsulates the execution of our function and allows us \n # # to check on it after its been scheduled, we can see if its running, done, or the result\n # f1 = executor.submit(do_something, 1)\n # f2 = executor.submit(do_something, 1)\n # print(f1.result())\n # print(f2.result())\n\n # using list comprehension\n with concurrent.futures.ProcessPoolExecutor() as executor:\n results = [executor.submit(do_something, 1) for _ in range(10)]\n\n # pass in our list of futures objects results so it can be used with as_completed\n for f in concurrent.futures.as_completed(results):\n print(f.result())\n\n\n finish = time.perf_counter()\n\n print(f'Finished in {finish-start} second(s)')"
},
{
"alpha_fraction": 0.5718390941619873,
"alphanum_fraction": 0.579023003578186,
"avg_line_length": 32.69355010986328,
"blob_id": "f94340a010be3c737aa361f3d5fd8a5131dcba45",
"content_id": "8db5b20453b1e4518a336c84b400807b7eb27dbb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2088,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 62,
"path": "/03_Flow_Control/04_if_elif_else_mc.md",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# Decision Structure Questions \n\n1. A __________ structure can execute a set of statements only under certain circumstances. \n a. sequence \n b. circumstantial \n c. ***decision*** \n d. Boolean\n2. A __________ structure provides one alternative path of execution. \n a. sequence \n b. ***single alternative decision*** \n c. one path alternative \n d. single execution decision\n3. A(n) __________ expression has a value of either true or false. \n a. binary \n b. decision \n c. unconditional \n d. ***Boolean*** \n4. The symbols and are all operators>, <, == _____ . \n a. ***relational*** \n b. logical \n c. conditional \n d. ternary \n5. A(n) _________ structure tests a condition and then takes one path if the condition is true, or another path if the condition is false. \n a. if statement \n b. single alternative decision \n c. ***dual alternative decision*** \n d. sequence \n6. You use a(n) __________ statement to write a single alternative decision structure. \n a. test-jump \n b. ***if*** \n c. if-else \n d. if-call \n7. You use a(n) __________ statement to write a dual alternative decision structure.\n a. test-jump \n b. if \n c. ***if-else*** \n d. if-cal \n8. and, or, and not are __________ operators. \n a. relational \n b. ***logical*** \n c. conditional \n d. ternary \n9. A compound Boolean expression created with the __________ operator is true only if both of its subexpressions are true. \n a. ***and*** \n b. or \n c. not \n d. both \n10. A compound Boolean expression created with the _________ operator is true if either of its subexpressions is true. \n a. and \n b. ***or*** \n c. not \n d. either \n11. The ___________ operator takes a Boolean expression as its operand and reverses its logical value. \n a. and \n b. or \n c. ***not*** \n d. either\n12. A ___________ is a Boolean variable that signals when some condition exists in the program. \n a. ***flag***\n b. signal\n c. sentinel\n d. siren"
},
{
"alpha_fraction": 0.7733644843101501,
"alphanum_fraction": 0.7733644843101501,
"avg_line_length": 25.6875,
"blob_id": "39827f77458e71ebcc5ff058c381570ab16b668e",
"content_id": "e7b21f2be0b5fb78a16567f19fc1af2014c380b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 428,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 16,
"path": "/LABS/Classes/str_vs_repr.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import datetime\n\ntoday = datetime.datetime.now()\n# Presenting this information to the user\nprint(str(today))\n\n# Repr is more for developer mindset\nprint(repr(today))\n\n# An accessor method (or getter) is something that gets attributes of the objects\n# A mutator method (or setter) changes the attribute of an object instance\n\n# Cannot use a name for a class that matches a keyword\n# Get keywords\nimport keyword\nkeyword.kwlist()\n\n"
},
{
"alpha_fraction": 0.7844827771186829,
"alphanum_fraction": 0.795976996421814,
"avg_line_length": 30.727272033691406,
"blob_id": "b67f943b678593e90bd8e11be1b2fcf0ae7c6367",
"content_id": "04ae5d0c82afbdb7d7532848fba1fd01bfb6b559",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 348,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 11,
"path": "/LABS/Labs-3-1/lab3-1-2.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Defining Variables for profit calculator\nprojProfit = 0.23\n\n#Requesting user input for totalSales\ntotalSales = input('What is the profected total annual sales?')\n\n#Calculate profit\nprofit = float(totalSales)*projProfit\n\n#Printing profit result to screen with appropriate float formatting for money\nprint(\"Total projected profit is $%.2f\" % profit)"
},
{
"alpha_fraction": 0.6601036190986633,
"alphanum_fraction": 0.6704663038253784,
"avg_line_length": 25.83333396911621,
"blob_id": "aa54652295be8e7085685f7292cddd3f5cd5003c",
"content_id": "c5d177eb85b8895ce5f3a4a9f89ea6c9fa7c5ace",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 965,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 36,
"path": "/LABS/Socket/example-chall-svr.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "from random import randint\nfrom socket import socket as Socket\nfrom socket import AF_INET, SOCK_STREAM\n\nHOSTNAME = '' # blank so any address can be used\nPORTNUMBER = 11267 # number for the port\nBUFFER = 80 # size of the buffer\n\nDEALER_ADDRESS = (HOSTNAME, PORTNUMBER)\nDEALER = Socket(AF_INET, SOCK_STREAM)\nDEALER.bind(DEALER_ADDRESS)\nDEALER.listen(1)\n\nprint('dealer waits for player to connect')\nPLAYER, PLAYER_ADDRESS = DEALER.accept()\nprint('dealer accepted connection request from ',\\\n PLAYER_ADDRESS)\n\nSECRET = randint(0, 9)\nprint('the secret is %d' % SECRET)\n\nwhile True:\n print('dealer waits for a guess')\n GUESS = PLAYER.recv(BUFFER).decode()\n print('dealer received ' + GUESS)\n if int(GUESS) < SECRET:\n REPLY = 'too low'\n elif int(GUESS) > SECRET:\n REPLY = 'too high'\n else:\n REPLY = 'found the secret'\n PLAYER.send(REPLY.encode())\n if REPLY == 'found the secret':\n break\n\nDEALER.close()"
},
{
"alpha_fraction": 0.6522276997566223,
"alphanum_fraction": 0.6732673048973083,
"avg_line_length": 27.75,
"blob_id": "c692ae695a4fe524d14bc817c2180870176e68e0",
"content_id": "ba352b4446d17e848baf1a3e2eb04bc8013dc657",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 808,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 28,
"path": "/LABS/Iteration/filter.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# Filter\n# Takes in two arguments, function and iterable,\n# and returns items in the list that are True\n\ndef isPrime(x):\n for n in range(2, x):\n if x % n ==0:\n return False\n return True\n \nfilterObject = filter(isPrime, range(100))\nprint('Prime numbers between 1-10: ', list(filterObject))\n\n# Filter applied with lambda\nfilterObject2 = filter(lambda x: x % 2 == 0, range(10))\nprint(list(filterObject2))\n\n# Filter on a random list w/out a function (None is not a function)\nrandomList = [1, 'a', 0, False, True, '0']\nfilteredList = filter(None, randomList)\n\nprint('The filtered elements are ')\nfor element in filteredList:\n print(element)\n\n# filter for words in a list that contain the letter 'e'\nwords = ['list', 'done', 'exit']\nprint(list(filter(lambda x: 'e' in x, words)))\n\n\n\n"
},
{
"alpha_fraction": 0.591304361820221,
"alphanum_fraction": 0.5974424481391907,
"avg_line_length": 27.764705657958984,
"blob_id": "e7337251fb00c26e83d5f1e539f1df3fa6eb35f3",
"content_id": "bccea3f805636fb9d4abeaffe616831d9b9d7c17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1955,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 68,
"path": "/queues/queue.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\nQueues\n A linear collection that adds elements to one end and removes them \n from the other in a FIFO (first in first out protocol)\n Queues are implemented in many ways, some based on arrays, and some\n based on linked structures\n\n rear - inserttions are restricted to this end\n front - removals are restricted to this end\n\n Two funcamental operations, add and pop\n add - adds to rear\n pop - removes from front\n peek - seek the front item\n\n There are priority queues that schedule their elements using a rating scheme\n as well as FIFO. If two elements have the same rating then they are scheduled\n in FIFO order. Elements are ranked from smallest to largest according to some\n attribute like a number or char, generally smallest are removed first no \n matter when they are added to the queue. (Dijkstra's shortest path algorithm)\n\"\"\"\n\nclass Queue:\n def __init__(self):\n self.queue = []\n \n def push(self):\n item = input(\"What would you like to push?\\n\")\n self.queue.append(item)\n\n def pop(self):\n item = self.queue.pop(0)\n print(\"You popped \", item)\n \n def peek(self):\n if self.queue:\n print(self.queue[0])\n\n def view_q(self):\n for i in range(len(self.queue)):\n print(f'Item {i} is {self.queue[i]}')\n\nq = Queue()\n\nwhile True:\n print(\"----------------\")\n print(\"Queue Options\")\n print(\"1. View Queue\")\n print(\"2. See Next in Queue\")\n print(\"3. Push onto Queue\")\n print(\"4. Pop out of Queue\")\n print(\"----------------\")\n\n menu_choice = 0\n while menu_choice == 0:\n try:\n menu_choice = int(input(\"Choose your option:\\n\"))\n except ValueError:\n print('invalid option')\n\n if menu_choice == 1:\n q.view_q()\n if menu_choice == 2:\n q.peek()\n if menu_choice == 3:\n q.push()\n if menu_choice == 4:\n q.pop()"
},
{
"alpha_fraction": 0.592415452003479,
"alphanum_fraction": 0.5958319306373596,
"avg_line_length": 31.120878219604492,
"blob_id": "bb79dee97bdbe5efa21ed63a895260f0d104303a",
"content_id": "a0453992f25aab734a5d4878d220e6b974d95500",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2927,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 91,
"path": "/Projects/UNO/turns.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# This is meant to be a doubly, cirularly linked list to simulate the \n# turns being taken.\n\n\n# Creating the node class for a player's turn which will hold a player \n# object as data\nclass PlayerTurn:\n def __init__(self, data, next = None, prev = None):\n # Instantiates a node with a default next of None\n self.data = data # player data object\n self.next = next # will point to next clockwise player\n self.prev = prev # will point to next counter-clockwise player\n \n \nclass TurnList:\n def __init__(self):\n self.head = None #creating head and tail for the list\n self.tail = None # May not really need tail once it is connected\n self.__direction = 1 # This will indicate the direction the token is moving\n self.__token = None # This will track current player node thus indicating turn\n self.__dealer = None # Marks if this player is currently the dealer\n self.__change = False # Tracks changes made and Unity's state of reflecting change\n\n # Add a player node to our linked list\n def append(self, data):\n # Instantiate a new node\n newPlayer = PlayerTurn(data)\n \n # Is there something in our linked list yet\n if self.head is None:\n newPlayer.prev = None\n self.head = newPlayer\n self.tail = self.head\n # There are node(s) in our linked list\n else:\n self.tail.next = newPlayer\n newPlayer.prev = self.tail\n self.tail = self.tail.next\n \n # Method to make the list circular once fully populated and place the token at the head\n def complete(self):\n self.head.prev = self.tail\n self.tail.next = self.head\n self.__dealer = self.head\n self.__token = self.__dealer.next\n\n def nextTurn(self):\n if (self.__direction == 1):\n self.__token = self.__token.next\n else:\n self.__token = self.__token.prev\n\n def reverse(self):\n if (self.__direction == 1):\n self.__direction = 0\n elif(self.__direction == 0):\n self.__direction = 1\n\n def skip(self):\n self.nextTurn()\n self.nextTurn()\n\n def toggleChange(self):\n if (self.__change == False):\n self.__change = True\n elif (self.__change == True):\n self.__change = False\n\n def printCurrent(self):\n print(self.__token.data)\n\n # This is meant to return the current player object\n def get_player(self):\n return self.__token.data\n\ndef main():\n names = [\"player1\", \"player2\", \"player3\", \"player4\"]\n turns = TurnList()\n for name in names:\n turns.append(name)\n turns.complete()\n print(turns.get_player())\n turns.nextTurn()\n print(turns.get_player())\n turns.skip()\n print(turns.get_player())\n turns.reverse()\n turns.nextTurn()\n print(turns.get_player())\n\nmain()\n\n\n\n\n"
},
{
"alpha_fraction": 0.6179900169372559,
"alphanum_fraction": 0.6315010190010071,
"avg_line_length": 33.64102554321289,
"blob_id": "f44c9065977ede4be4c2071d10fe05037d80357a",
"content_id": "d2b7d544fc94287d79c783c2e2038742b6f21684",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5419,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 156,
"path": "/LABS/Classes/trivia.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"8. Trivia Game\n In this programming exercise you will create a simple trivia game for two players. The program will \n work like this:\n • Starting with player 1, each player gets a turn at answering 5 trivia questions. (There\n should be a total of 10 questions.) When a question is displayed, 4 possible answers are\n also displayed. Only one of the answers is correct, and if the player selects the correct\n answer, he or she earns a point.\n • After answers have been selected for all the questions, the program displays the number\n of points earned by each player and declares the player with the highest number of points\n the winner.\n To create this program, write a Question class to hold the data for a trivia question. The\n Question class should have attributes for the following data:\n • A trivia question\n • Possible answer 1\n • Possible answer 2\n • Possible answer 3\n • Possible answer 4\n • The number of the correct answer (1, 2, 3, or 4)\n The Question class also should have an appropriate __init__ method, accessors, and\n mutators.\n The program should have a list or a dictionary containing 10 Question objects, one for\n each trivia question. Make up your own trivia questions on the subject or subjects of your\n choice for the objects.\n\"\"\"\n\n# This style of import corrected an issue reading in the dat \n# file for an AttributeError in __main__ \nfrom questions import Question\nimport random\nimport pickle\n\n#Define main function\ndef main():\n # Set filename for question bank\n filename = 'qbank.dat'\n\n # Call the readData function to pickle in the data\n qbank = readData(filename)\n \n # setting the question bank index list to keep track of questions \n # asked\n qindex = []\n\n # Loops through all questions in question bank to create a list of indexes\n for i in range(len(qbank)):\n qindex.append(i)\n \n # Sets the turn at 1.\n # Used for tracking the turn and player's turn\n turn = 1\n\n # Initialize variables to track players scores\n p1score = 0\n p2score = 0\n\n # Loops through all questions in the question bank\n for i in range(len(qbank)):\n\n # Calls the askQuestion function and returns the result of \n # 1 or 0 (correct or incorrect) for addition to player score\n result = askQuestion(turn, qbank, qindex)\n\n # Determines which player to credit the result to\n if turn % 2 == 1:\n p1score += result\n else:\n p2score += result\n # Increments the turn\n turn += 1\n # Prints scores\n print(f'\\n\\nPlayer 1 score:\\n{p1score}\\n\\nPlayer 2 score:\\n{p2score}\\n')\n # Determines and prints winner\n if p1score > p2score:\n print('Player 1 Wins!!!')\n elif p2score > p1score:\n print('Player 2 Wins!!!')\n else:\n print('Everybody Wins!!!')\n\n# Define ask question function\ndef askQuestion(turn, qbank, qindex):\n # Checks whose turn it is\n if turn % 2 == 1:\n print('Player 1\\'s turn:')\n # initializes prompt variable\n prompt = -1\n # Random selection validation mechanism while keeping \n # track using the qindex\n while prompt not in qindex:\n # Generates random number within the length \n # of the qbank\n prompt = random.randint(0,len(qbank)-1)\n # Prints the question by calling the printQuestion function\n printQuestion(qbank, prompt)\n\n # Storing the result of checkAnswer in points (T/F)\n points = checkAnswer(qbank, prompt)\n\n # Removes the choice from the qindex so that \n # the choice (and the question) cannot be selected \n # again\n qindex.remove(prompt)\n\n # Based on the state of points it returns 1 or 0\n if points == True:\n return 1\n else:\n return 0\n\n # Same logic as the 'if', but for player two \n else:\n print('Player 2\\'s turn:')\n prompt = -1\n while prompt not in qindex:\n prompt = random.randint(0,len(qbank)-1)\n printQuestion(qbank, prompt)\n points = checkAnswer(qbank, prompt)\n qindex.remove(prompt)\n if points == True:\n return 1\n else:\n return 0\n\ndef printQuestion(bank, prompt):\n print(f'{bank[prompt].get_question()}')\n print(f'1. {bank[prompt].get_answer1()}')\n print(f'2. {bank[prompt].get_answer2()}')\n print(f'3. {bank[prompt].get_answer3()}')\n print(f'4. {bank[prompt].get_answer4()}')\n\ndef checkAnswer(bank, prompt):\n correct = False\n userAns = input('What is your answer? (Please type 1, 2, 3, or 4)\\n')\n while int(userAns) not in range(1,5):\n userAns = input('What is your answer? (Please type 1, 2, 3, or 4)\\n')\n if bank[prompt].get_correct() == userAns:\n correct = True\n return correct\n\ndef readData(filename):\n # Opening the file in read mode\n emp_file = open(filename, 'rb')\n # Setting EOF to false\n end_of_file = False\n #Setting while loop to get each object in binary file\n while not end_of_file:\n try:\n #unpickle next object\n dictionary = pickle.load(emp_file)\n return dictionary\n except EOFError:\n #Set flag to indicate EOF reached\n end_of_file = True\n emp_file.close()\n\nmain()"
},
{
"alpha_fraction": 0.6206675171852112,
"alphanum_fraction": 0.6322208046913147,
"avg_line_length": 32.1489372253418,
"blob_id": "90bd6ee221d8240047b5ec28c8399797dd545296",
"content_id": "fd6646c7da2bce96fb2c3afafece6a8ef481c631",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1558,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 47,
"path": "/LABS/Labs-3-4/lab3-4-6-currency.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Defining constant values for coin currency\nCURRENCY = {'DOLLAR': 1.00,\n 'QUARTER': 0.25,\n 'DIME': 0.10,\n 'NICKEL': 0.05,\n 'PENNY': 0.01,\n }\n\ndef main():\n #Get the total currency\n total = getCurrency()\n\n #Determine if total matches the value of a dollar\n matchDollar(total)\n\n#Prompt user for coin input\ndef getCurrency():\n #initalizing 'total' variable\n total = float(0.00)\n\n #Loop through the global dictionary for currency\n for i in CURRENCY:\n #Skipping 'DOLLAR' because it is currency but not typically a coin\n if i != 'DOLLAR':\n #Setting denomination to the key\n denom = i\n #Setting value to the key value in the dictonary\n value = CURRENCY[i]\n #Getting amount of coin input from user; could utilize some input validation\n coins = int(input('Provide the number of {} coins you have:\\n'.format(denom.lower())))\n #Calculate the monatary value of this group of coins\n coinsVal = float(coins) * float(value)\n #Adding the value of the coins to the running total\n total += coinsVal\n #returning the running total to main\n return total\n\n#Compare the running total to the value of a dollar\ndef matchDollar(total):\n if total == CURRENCY['DOLLAR']:\n print('Congratulations! You have an exact dollar')\n elif total > CURRENCY['DOLLAR']:\n print('You do not have an exact dollar. You have more.')\n else:\n print('You do not have an exact dollar. You have less.')\n\nmain()\n"
},
{
"alpha_fraction": 0.736947774887085,
"alphanum_fraction": 0.740963876247406,
"avg_line_length": 28.352941513061523,
"blob_id": "1d62245208f7b471d5b153e8484b6c22f333d813",
"content_id": "3f06066189ef3621f0f6d17f95bca47bd76d480e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 498,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 17,
"path": "/LABS/Projects/crypto.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\n\nstring_input = input(\"Enter a string to encrypt:\")\n\nshift = int(input('Provide th enumber of places to shift input:'))\ninput_length = len(string_input)\nprint(string_input)\n#print(input_length)\n\nstring_output = \"\"\n\nfor i in range(input_length):\n character = string_input[i]\n charlocation = alphabet.find(character)\n newlocation = (charlocation + shift) % 52\n string_output += (alphabet[newlocation])\nprint(string_output)"
},
{
"alpha_fraction": 0.6371200084686279,
"alphanum_fraction": 0.6371200084686279,
"avg_line_length": 34.931034088134766,
"blob_id": "bab0a4b95d1487f5beaa7c40cbb0a6545cbb794b",
"content_id": "e0fb80885267aac738ac4ac86c53fad07e2fc656",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3125,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 87,
"path": "/Trees/trees.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\nBinary tree is a tree data structure in which wach node has at most two\n children which are left child and right child\n\nTop node is the root\n\nComplete binary tree - Every level, except possibly the last, is \n completely filled and all nodes in the last level\n are as far left as possible\n\nFull binary tree - a full tree (referred to as a proper or plane binary tree)\n is a tree where every node except the leaves has two children\n\nTree Traversal - process of visiting (checking and/or updating) each node in a tree\n data structure, exactly once\n\nUnlike linked list, one-dimensional arrays, etc., which are traversed in linear order,\ntrees may be traversed in multiple ways.\n\ndepth-first order - pre-order, post-order, in-order\n\npre-order - Check if current node is empty\n display the data part of the root or current node\n traverse the left subtree by recursively calling the pre-order\n traverse the right subtree by recursively calling the pre-order\n\nin-order - Check if current node is empty\n traverse the left subtree by recursively calling the in-order\n display the data part of the root or current node\n traverse the right subtree by recursively calling the in-order\n\n\"\"\"\n\nclass TreeNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\nclass BinaryTree:\n def __init__(self, root):\n self.root = TreeNode(root)\n\n def print_tree(self, traversal_type):\n if traversal_type == 'preorder':\n return self.preorder_print(self.root, \"\")\n if traversal_type == 'inorder':\n return self.inorder_print(self.root, \"\")\n if traversal_type == 'postorder':\n return self.postorder_print(self.root, \"\")\n\n def preorder_print(self, start, traversal):\n # Root Left Right\n if start:\n traversal +=(str(start.value)+\"-\")\n traversal = self.preorder_print(start.left, traversal)\n traversal = self.preorder_print(start.right, traversal)\n return traversal\n\n def inorder_print(self, start, traversal):\n # Left Root Right\n if start:\n traversal = self.inorder_print(start.left, traversal)\n traversal +=(str(start.value)+\"-\") \n traversal = self.inorder_print(start.right, traversal)\n return traversal\n\n def postorder_print(self, start, traversal):\n # Left Root Right\n if start:\n traversal = self.postorder_print(start.left, traversal)\n traversal = self.postorder_print(start.right, traversal)\n traversal +=(str(start.value)+\"-\") \n \n return traversal\n\nbt = BinaryTree(\"D\")\nbt.root.left = TreeNode(\"B\")\nbt.root.right = TreeNode(\"F\")\nbt.root.left.left = TreeNode(\"A\")\nbt.root.left.right = TreeNode(\"C\")\nbt.root.right.left = TreeNode(\"E\")\nbt.root.right.right = TreeNode(\"G\")\n\nprint(bt.print_tree(\"preorder\"))\nprint(bt.print_tree(\"inorder\"))\nprint(bt.print_tree(\"postorder\"))"
},
{
"alpha_fraction": 0.5883849263191223,
"alphanum_fraction": 0.6201780438423157,
"avg_line_length": 22.8383846282959,
"blob_id": "6a2bef2c266a3615f626cc4a22a2c6c214c3d7d0",
"content_id": "3b6de5cb092a87f3a14eaec2e2ead1d70f120d0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2359,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 99,
"path": "/LABS/Iteration/list_comp.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# List comprehension\n# Good way to create a new list by performing an\n# operation on each item in an existing list\n\n# Separating letters in a string\n# chars = []\n# for ch in 'Daniel':\n# chars.append(ch)\n# print(chars)\n\n# ######Output expression <input sequence>\n# print([ch for ch in 'Daniel'])\n\n# squares = [x*x for x in range(11)]\n\n# print(squares)\n\n\n# #list of tuples\n# list1 = [1, 2, 3]\n# list2 = ['a','b','c']\n\n# combined_list = [(x,y) for x in list1 for y in list2]\n# print(combined_list)\n\n# # list comp with optional predicate\n# evens = [x for x in range(21) if x % 2 == 0]\n# print(evens)\n\n# # list comp with multiple optional predicate (AND)\n# byFiveTwo = [x for x in range(21) if x % 2 == 0 if x % 5 == 0]\n# print(byFiveTwo)\n\n# # list comp with multiple optional predicate (AND)\n# byFiveTwo = [x for x in range(21) if x % 2 == 0 or x % 5 == 0]\n# print(byFiveTwo)\n\n# obj = ['Even' if i % 2 == 0 else \"Odd\" for i in range(30) if i % 5 == 0]\n# print(obj)\n\n# # flatten a list\n# matrix = [[1,2,3],[4,5,6],[7,8,9]]\n# flat_list = [num for row in matrix for num in row]\n# print(flat_list)\n\nnums = [1,2,3,4]\nstring = ['a','b','c','d']\n# I want a letter , number pair for each letter in str and each number in nums\n\nmylist = []\nfor letter in string:\n for num in nums:\n mylist.append((letter,num))\nprint(mylist)\n\n# the list comprehention way\nmylist2 = [(letter, num) for letter in string for number in nums]\nprint(mylist2)\n\n# # Set comprehension\n# nums = [1,2,1,1,9,3,4,5,2,7,6,7,8,9]\n# mySet = set()\n# for n in nums:\n# mySet.add(n)\n# print(mySet)\n\n# mySet2 = {n for n in nums}\n# print(mySet2)\n\n# # Dictionary comprehension\n# names = ['Bruce','Clark','Peter','Logan','Wade']\n# secrets = ['Batman', 'Superman', 'Spiderman', 'Wolverine', 'Deadpool']\n\n# #I want a dict{'name':'secret} for each name, secret in zip names, secret\n# my_dict = {}\n# for name, secret in zip(names, secrets):\n# my_dict[name] = secret\n# print(my_dict)\n\n# my_dict = {name:secret for name, secret in zip(names, secrets)}\n# print(my_dict)\n\n# # Generator Expression\n# # I want to yield 'n*n' for each 'n' in nums\n# nums = [1,2,3,4,5,6,7,8,9,10]\n\n# def gen_func(nums):\n# for n in nums:\n# yield n*n\n\n# my_gen =gen_func(nums)\n\n# print(next(my_gen))\n# print(next(my_gen))\n# print(next(my_gen))\n\n# my_gen2 = (n*n for n in nums)\n# for i in my_gen2:\n# print(i)"
},
{
"alpha_fraction": 0.7566834688186646,
"alphanum_fraction": 0.7601704597473145,
"avg_line_length": 50.63999938964844,
"blob_id": "ce078fc471db0aeaffacbbfa26e31da61dc68fd5",
"content_id": "3382cab2885ec5ff2efaeed3281c6745b288b448",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2599,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 50,
"path": "/LABS/Classes/inheritance.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# Inheritance\n\n# Inheritance allows a ner class to extent an \n# existing class. The new class inherits the \n# members of the class it extends.\n\n# Super class or Base class\n# Sub Class or Derived Class\n\n# Polymorphism\n\n# Polymorphism allows subclasses to have methods with \n# the same names as methods in their superclasses. It \n# gives the ability for a program to call the correct \n# method depending on the type of object that is used \n# to call it.\n\n\"\"\"\n1. Employee and ProductionWorker Classes\n Write an Employee class that keeps data attributes for the following pieces of information:\n • Employee name\n • Employee number\n Next, write a class named ProductionWorker that is a subclass of the Employee class. The\n ProductionWorker class should keep data attributes for the following information:\n • Shift number (an integer, such as 1, 2, or 3)\n • Hourly pay rate\n The workday is divided into two shifts: day and night. The shift attribute will hold an integer value representing the shift that the employee works. The day shift is shift 1 and the\n night shift is shift 2. Write the appropriate accessor and mutator methods for each class.\n Once you have written the classes, write a program that creates an object of the\n ProductionWorker class and prompts the user to enter data for each of the object’s data\n attributes. Store the data in the object and then use the object’s accessor methods to retrieve\n it and display it on the screen.\n\n2. ShiftSupervisor Class\n In a particular factory, a shift supervisor is a salaried employee who supervises a shift. In\n addition to a salary, the shift supervisor earns a yearly bonus when his or her shift meets\n production goals. Write a ShiftSupervisor class that is a subclass of the Employee class\n you created in Programming Exercise 1. The ShiftSupervisor class should keep a data\n attribute for the annual salary and a data attribute for the annual production bonus that a\n shift supervisor has earned. Demonstrate the class by writing a program that uses a\n ShiftSupervisor object.\n\n3. Person and Customer Classes\n Write a class named Person with data attributes for a person’s name, address, and telephone\n number. Next, write a class named Customer that is a subclass of the Person class.\n The Customer class should have a data attribute for a customer number and a Boolean data\n attribute indicating whether the customer wishes to be on a mailing list. Demonstrate an\n instance of the Customer class in a simple program.\n \n\"\"\""
},
{
"alpha_fraction": 0.6266968250274658,
"alphanum_fraction": 0.6425339579582214,
"avg_line_length": 23.55555534362793,
"blob_id": "3e52bda4af910dccd9710c8d2bfd86388fe4fe07",
"content_id": "bc15b909937bad1932a8adf1ef687cc35f1fd450",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 442,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 18,
"path": "/LABS/recursion/rec-lines.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\n3. Recursive Lines\n Write a recursive function that accepts an integer argument, n. The function should display\n n lines of asterisks on the screen, with the first line showing 1 asterisk, the second line\n showing 2 asterisks, up to the nth line which shows n asterisks.\n\"\"\"\n\ndef main():\n recLines(7)\n\n\ndef recLines(n):\n if n == 0:\n return 0\n else:\n print('*' + recLines(n-1)*'*')\n return n \nmain()\n"
},
{
"alpha_fraction": 0.5548780560493469,
"alphanum_fraction": 0.6036585569381714,
"avg_line_length": 11.615385055541992,
"blob_id": "7979946f94a02ec9ea11fe665d3753d87eb0e732",
"content_id": "38b42cb0e00aa3cc6bdeafcedd4cfa52aefcc59e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 164,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 13,
"path": "/02_Data_Types/lab2g.md",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "|[Table of Contents](/00-Table-of-Contents.md)|\n|---|\n\n---\n\n# Lab 2G\n\n* Follow the instructions on lab2g.py\n\n---\n\n|[Next Topic](/02_Data_Types/06_tuples.md)|\n|---|\n"
},
{
"alpha_fraction": 0.6279237866401672,
"alphanum_fraction": 0.6279237866401672,
"avg_line_length": 30.664121627807617,
"blob_id": "bd04ce18c6482c1aa3c752c72a8ba0fa61f0f01c",
"content_id": "fa6f2806356773b67c0593ec68dbc4967d16550c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4147,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 131,
"path": "/LABS/Classes/employeemgmt.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "from os import path\nimport employee as e\nimport pickle\n\ndef main():\n # Calls the function to unpickle file data for usage \n # in the dictionary\n filename = 'employee.dat'\n if path.exists(filename):\n emp_dict = readData(filename)\n else:\n print('\\nNo employee database found. Starting fresh....\\n')\n emp_dict = {}\n \n # Presents menu and returns the selection\n selection = menu()\n while selection != 'x':\n # Takes the selection and executes the operation on the dictionary\n execute(selection, emp_dict)\n\n # Writes data to disk\n writeData(emp_dict, filename)\n\n # Reads data back in to refresh the dictionary\n emp_dict = readData(filename)\n\n # Next selection\n selection = menu()\n\ndef menu():\n #Setting options list\n options = ['l','m','a','d','x']\n #Printing program menu\n print('PROGRAM MENU')\n print('Employee Lookup (press L)')\n print('Add an employee entry (press A)')\n print('Modify an employee entry (press M)')\n print('Delete an employee entry (press D)')\n print('EXIT (press X)') \n print('\\n\\n') \n # Getting user input for menu option\n selection = input('What would you like to do?') \n # Input validation for menu selection\n while selection.lower() not in options:\n selection = input('Invalid selection. What would you like to do?\\n')\n print('\\n')\n return selection\n\ndef readData(filename):\n # Opening the file in read mode\n emp_file = open(filename, 'rb')\n # Setting EOF to false\n end_of_file = False\n #Setting while loop to get each object in binary file\n while not end_of_file:\n try:\n #unpickle next object\n dictionary = pickle.load(emp_file)\n return dictionary\n except EOFError:\n #Set flag to indicate EOF reached\n end_of_file = True\n emp_file.close()\n\ndef writeData(dictionary, filename):\n #Opens the file on disk for writing\n emp_file = open(filename, 'wb')\n #Dump data to file\n pickle.dump(dictionary, emp_file)\n #close file\n emp_file.close()\n\ndef addEntry(dictionary):\n emp = e.Employee()\n emp.set_name()\n emp.set_id()\n emp.set_dept()\n emp.set_title()\n dictionary.update({emp.get_id(): emp})\n \ndef modEntry(dictionary):\n # Print keys as options\n print('Names\\n-----------')\n for i in dictionary:\n print(f'\\n{dictionary[i].get_id()}:\\t{dictionary[i].get_name()}\\n')\n # Gets user input for entry they wish to change\n query = input('Provide the ID from above to change:\\n')\n # Prompts for email entry to modify\n emp = dictionary[query]\n emp.set_name()\n emp.set_dept()\n emp.set_title()\n dictionary.update({query: emp})\n \ndef lookupEntry(dictionary):\n # Gets user input for entry they wish to lookup\n query = input('Provide the ID to lookup:\\n')\n # Prints the email for the query or lets them know it's not found\n result = dictionary.get(query, 'Employee ID not found')\n if result != 'Employee ID not found':\n print(f'\\nEmployee Information:\\n{dictionary[query].get_id()}\\n{dictionary[query].get_name()}\\n{dictionary[query].get_dept()}\\n{dictionary[query].get_title()}\\n\\n')\n else:\n print('****Employee ID not found****')\n print()\n \ndef removeEntry(dictionary):\n # Print keys as options for removal\n print('ID\\\\Names\\n-----------')\n for i in dictionary:\n print(f'{dictionary[i].get_id()}\\n{dictionary[i].get_name()}\\n\\n')\n # Gets user input for entry they wish to lookup\n query = input('Provide the ID to remove:\\n')\n # Deletes entry from dictionary\n del dictionary[query]\n # Verifies to the user that the entry was removed\n print(dictionary.get(query, '\\n****Information successfully removed.****\\n'))\n print()\n\ndef execute(selection, dictionary):\n if selection.lower() == 'a':\n addEntry(dictionary)\n elif selection.lower() == 'm':\n modEntry(dictionary)\n elif selection.lower() == 'l':\n lookupEntry(dictionary)\n elif selection.lower() == 'd':\n removeEntry(dictionary)\n\n\n# Calling main\nmain()"
},
{
"alpha_fraction": 0.6830357313156128,
"alphanum_fraction": 0.6955357193946838,
"avg_line_length": 31.97058868408203,
"blob_id": "41622c55f95ac1aff6244a89e5086bcffe68ecf1",
"content_id": "09d6ebf9a41f6fe62dc4ce5eb22eae99eec268af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1120,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 34,
"path": "/LABS/Labs-3-1/lab3-1-4.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "## Subtotal of items purchased\n#define initialized variable for total\ntotal = 0\nsalesTax = 0.06\n\n#Ask the user for the number of items\nitemCount = input('How many items did you purchase?\\n')\n\n#Initialize item price list as input from user\nitems = [0] * int(itemCount)\n\n#init while counter for loop through the number of items specified\ncount = 1\n\n#Begin while loop starting from count to itemCount\nwhile int(count) <= int(itemCount):\n item = input(\"Input price of item {} \\n\".format(count))\n #Input validation for numeric\n #Using .replace to get rid of decimal to validate input is numeric\n while item.replace('.','').isnumeric() == False:\n item = input(\"Input was not a number. Input price of item {}\\n\".format(count))\n #Add item price to items array\n items[count-1] = item\n #incrememnt counter\n count += 1\n\n#loop through array to create subtotal\nsubtotal = 0.00\nfor i in items:\n subtotal += float(i)\n \nprint('Subtotal: ${:.2f}\\n'.format(subtotal))\nprint('Sales Tax: ${:.2f}\\n'.format(float(subtotal*salesTax)))\nprint('Total: ${:.2f}\\n'.format(float(subtotal +(subtotal*salesTax))))"
},
{
"alpha_fraction": 0.644035816192627,
"alphanum_fraction": 0.6473361849784851,
"avg_line_length": 29.753623962402344,
"blob_id": "f73d837420f39bf7cd83841a811343b024949427",
"content_id": "abb0ceb4769870b4e4be3cda1954d8a8379992b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2121,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 69,
"path": "/LABS/Socket/full-chat-clt.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "from socket import AF_INET, socket, SOCK_STREAM\nfrom threading import Thread\nimport argparse\n\n# Setting Global constant for buffer size\nBUFSIZ = 1024\n\n# Defining Main\ndef main(): end\n # Creating a thread for receiving chat data; pointing it to the \n # receive function.\n receive_thread = Thread(target=receive)\n # Starting the thread\n receive_thread.start()\n\n # Creating a thread for sending\n send_thread = Thread(target=send)\n # Starting the thread for sending\n send_thread.start()\n\ndef receive():\n #Handles receiving of messages.\n while True:\n try:\n # reads what the sicket has received and prints to client screen\n msg = client_socket.recv(BUFSIZ).decode(\"utf8\")\n print(msg)\n except OSError: # Possibly client has left the chat.\n break\n\n\ndef send(): \n while True:\n #Handles sending of messages\n msg = input('\\n>')\n # Sends the input as bytes with UTF-8 encoding over the socket\n client_socket.send(bytes(msg, \"utf8\"))\n # Checks the input to determine if the user has decided to quit\n if msg == \"{quit}\":\n # If the user has quit, close connex and exit program\n client_socket.close()\n exit()\n\n# Creating the client connection and returning socket to main\ndef client_conn(host, port):\n # Setting the address tuple w/ in-line args\n ADDR = (host, port)\n\n sock = socket(AF_INET, SOCK_STREAM)\n sock.connect(ADDR)\n return sock\n\n\n\nif __name__ == '__main__': \n # This series of statements allows for in-line arguments\n parser = argparse.ArgumentParser (description='TCP Chat Client Example') \n # Client parameter\n parser.add_argument('--port', action=\"store\", dest=\"port\", type=int, required=True) \n # Host parameter\n parser.add_argument('--host', action=\"store\", dest=\"host\", type=str, required=True)\n given_args = parser.parse_args() \n port = given_args.port \n host = given_args.host\n\n # Calling client_conn to create the client socket\n client_socket = client_conn(host, port)\n # call Main\n main()"
},
{
"alpha_fraction": 0.5673916339874268,
"alphanum_fraction": 0.5763956308364868,
"avg_line_length": 30.94247817993164,
"blob_id": "42914180702800d2712ca2f923a6e40134f47b60",
"content_id": "7fcfdad69ac1d79535612389bf7087fda6d991ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7219,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 226,
"path": "/LinkedLists/node.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# This class will represent a singly linked node\n\nclass Node:\n def __init__(self, data, next = None):\n # Instantiates a node with a default next of None\n self.data = data\n self.next = next\n\n# This is meant to instantiate nodes and we'll add methods to this\n# class to add functionality\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n # printing our linked list\n def printLinked(self):\n probe = self.head\n while probe != None:\n print(probe.data)\n probe = probe.next\n\n # To add something to our linked list\n def append(self, data):\n # Instantiate a new node\n newNode = Node(data)\n # Is there something in our linked list yet\n if self.head is None:\n self.head = newNode\n # There are node(s) in our linked list\n else:\n # Initialize our probe pointer\n probe = self.head\n # Is probe at the end of list?\n while probe.next != None:\n probe = probe.next\n probe.next = newNode\n\n # Add to beginning (prepend)\n def prepend(self, data):\n #instantiate a new node\n newNode = Node(data)\n # Anything in our linked list\n newNode.next = self.head\n self.head = newNode\n\n # Inserting within a linked list\n def insert(self, index, data):\n # If empty or index is less than 1\n if self.head is None or index <= 0:\n self.head = Node(data, self.head)\n else:\n probe = self.head\n while index > 0 and probe.next != None:\n probe = probe.next\n index -= 1\n # Insert new node after the node at position index -1 or last position\n probe.next = Node(data, probe.next)\n \n # Inserting before index\n def insertBefore(self, index, data):\n # If empty or index is less than 1\n if self.head is None or index <= 0:\n self.head = Node(data, self.head)\n else:\n probe = self.head\n while index > 1 and probe.next != None:\n probe = probe.next\n index -= 1\n # Insert new node after the node at position index -1 or last position\n probe.next = Node(data, probe.next)\n \n # Inserting after index\n def insertAfter(self, index, data):\n # If empty or index is less than 1\n if self.head is None or index <= 0:\n self.head = Node(data, self.head)\n else:\n probe = self.head\n while index > -1 and probe.next != None:\n probe = probe.next\n index -= 1\n # Insert new node after the node at position index -1 or last position\n probe.next = Node(data, probe.next)\n \n def delete(self, index):\n if index <= 0 or self.head.next is None:\n removedItem = self.head.data\n self.head = self.head.next\n else:\n probe = self.head\n while index > 1 and probe.next != None:\n probe = probe.next\n index -= 1\n removedItem = probe.next.data\n probe.next = probe.next.next\n return removedItem\n\n # Swapping the data at specified indicies with first node being 0\n # Too High of an index results in the last element being involved\n # in the swap.\n def swapNode(self, index1, index2):\n # Setting and moving a probe for the 1st item\n probe1 = self.head\n while index1 > 0 and probe1.next != None:\n probe1 = probe1.next\n index1 -= 1\n # Setting and moving the probe for the second item\n probe2 = self.head\n while index2 > 0 and probe1.next != None:\n probe1 = probe1.next\n index2 -= 1\n # swapping the data elements of each node\n tempData = probe1.data\n probe1.data = probe2.data\n probe2.data = tempData\n\n # This actually reverses the elements by swaping the data\n # this preserves all the pointers\n #\n def reverse(self):\n # Sets an empty list to store the elements\n templist = []\n probe = self.head\n # Moving through the linked list and adding the data elements\n # in existing order to the tempList\n while probe.next != None:\n templist.append(probe.data)\n probe = probe.next\n templist.append(probe.data)\n\n # After resetting the probe\n probe = self.head\n # Sets the index for loop termination of all elements in tempList\n index = len(templist)\n # Walking backwards through tempList and forward through the\n # linked list to reset each .data to the element stored in\n # tempList\n while index > -1 and probe != None:\n probe.data = templist[index-1]\n probe = probe.next\n index -=1\n\n\n\n def getIndex(self, index):\n # If empty or index is less than 1\n probe = self.head\n while index > 1 and probe.next != None:\n probe = probe.next\n index -= 1\n # Insert new node after the node at position index -1 or last position\n print(probe.data)\n\n def copy(self):\n newLL = LinkedList()\n probe = self.head\n while probe is not None:\n newLL.append(probe.data)\n probe = probe.next\n return newLL\n\n def __len__(self):\n probe = self.head\n count = 1\n while probe.next != None:\n probe = probe.next\n count += 1\n return count\n\n\"\"\"\nCircular Linked List - Special case of singly linked list\n Insertion and removal of the first node are special cases of\n the insert ith and remove ith operations on a singly linked\n list. These are special b/c the head pointer must be reset.\n You can use circular liked lists with a dummy header node.\n Contains a link from the last node back to the first node in\n the structure.\n\"\"\"\n\nclass CircLinked:\n def __init__(self):\n self.head = None\n\n def append(self, data):\n if not self.head:\n self.head = Node(data)\n self.head.next = self.head\n else:\n newNode = Node(data)\n probe = self.head\n while probe.next != self.head:\n probe = probe.next\n probe.next = newNode\n newNode.next = self.head\n\n\n\nlinked = LinkedList()\nlinked.append(\"A\")\nlinked.append(\"B\")\nlinked.append(\"C\")\nlinked.append(\"D\")\nlinked.append(\"E\")\nlinked.append(\"F\")\nlinked.insertBefore(3,\"Before\")\nlinked.insertAfter(3,\"After\")\nlinked.printLinked()\n#linked.prepend(\"I Should be at the beginning\")\n#linked.insert(2, \"This I inserted\")\n#linked.insert(67, \"I inserted this too, with a high index\")\n# #linked.getIndex(1)\n# linked.printLinked()\n# print('\\n')\n# #print(len(linked))\n# #linked.delete(1)\n# #linked.getIndex(1)\n# #linked.printLinked()\n# #print(len(linked))\n# #linked.swapNode(0,67)\n# linked.reverse()\n# linked.printLinked()\n#testing copy and integrity of shallow copy\n# linked2 = linked.copy()\n# linked.delete(2)\n# linked.printLinked()\n# linked2.printLinked()\n"
},
{
"alpha_fraction": 0.5799068808555603,
"alphanum_fraction": 0.606283962726593,
"avg_line_length": 39.92063522338867,
"blob_id": "946037993d9259c4efa360531185d58a0ae5db1a",
"content_id": "cd525bd972f3c0a58d95f9428c66f6790a450455",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2578,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 63,
"path": "/Practice_Test/change_for_snacks.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "'''\nWrite the function computeChange that receives a dictionary with the following possible keys:\n\n'H' - half dollar\n'Q' - quarter\n'D' - dime\n'N' - nickel\n'P' - penny\n\nEach key will have an associated integer value representing a quantity. The dictionary will represent a gathering\nof change (coins). Your function will compute the total amount of change and determine whether or not there is\nenough money to purchase items at a fast-food restaurant. If you have $2.50 or more you can buy BOTH fries\nand a soda. If you have at least $1.50 but less than $2.50 can buy FRIES. If you have at least $1.00 but less than $1.50\nyou can get a SODA. If you have less than $1.00 you get NOTHING.\n\nFor example, you may receive a dictionary such as {'Q':3, 'D':7, 'P':14} which would compute to $1.59. (3 quarters,\n7 dimes, 14 pennies)\n\nIf any quantity in the dictionary is less than zero, the function will return a single value of zero (0), otherwise\nthe function should return two values: \n 1. Total money in the form of a Float (dollar representation with two decimal places) \n\t\t\t\t\t\t\t\t\t e.g. 1.59\n 2. A string representing what could be purchased (if anything). Use\n one of the following strings:\n 'NOTHING'\n 'SODA'\n 'FRIES'\n 'BOTH'\n\nreturn the total money as the first value, followed by the string representing the purchase as the second value\n\n\n'''\n\n\ndef computeChange(change):\n # Setting values for each possible currency denomination\n values = {'H':0.5, 'Q':0.25, 'D':0.10, 'N':0.05, 'P':0.01}\n \n #setting accumulator\n total = 0.00\n\n #Iterating through each of the keys in the change dictionary\n for denom in change.keys():\n if change[denom] < 0:\n return 0\n else:\n total += change[denom] * values[denom]\n # Checking the total to return the possible purchased items\n if total < 1:\n # All returns require that total is rounded to two decimal places because\n # floating point math is slightly inaccurate in the way they are stored.\n return round(total,2), f'NOTHING'\n elif total < 1.5:\n return round(total,2), f'SODA'\n elif total < 2.5:\n return round(total,2), f'FRIES'\n else:\n return round(total,2), f'BOTH' \n\n return 0, ''\n\nprint(computeChange({'Q':3, 'D':7, 'P':14}))\n"
},
{
"alpha_fraction": 0.5249999761581421,
"alphanum_fraction": 0.5299999713897705,
"avg_line_length": 21.33333396911621,
"blob_id": "d523135c33de3bb77907c6b00c62c79f899860c6",
"content_id": "97688894f9f5be1618b06742ab26cbd4595d4d67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 200,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 9,
"path": "/LABS/FILE-IO/file-io-head.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "def main():\n head = 5\n filename = 'C:\\\\Users\\\\student\\\\Documents\\\\numbers.txt'\n f = open(filename, 'r')\n a = f.readlines()\n for i in range(head):\n print(a[i], end='')\n \nmain()"
},
{
"alpha_fraction": 0.6659109592437744,
"alphanum_fraction": 0.6737427711486816,
"avg_line_length": 33.16901397705078,
"blob_id": "b55ebd53ebb77477db456d291cb792140abdd0a4",
"content_id": "b6b802e169d7dabb7d06f1d864f6228cacdeba07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4852,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 142,
"path": "/Algorithms/fastersorting.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\nFaster Sorting\n\nUp until now we've leared about sorting methods with a O(n^2) complexity. Even\nw/ mods they are only marginally better.\n\nLet's now discuss some algs w/ a complexity of O(log n) or )(n log n)\nThe secret here is that we use our divide and conquer strategy\n\nEach Alg finds a way of breaking the list into smaller lists. Then these sublists \nare sorted recursively. The # of subdivisions is log(n) and the amount of work \nneeded to rearrange the data on each subdivision is n, thus making our complexity \nO(n log n)\n\"\"\"\n\"\"\"\n**********************QUICK SORT**********************\n- The strategy here is theat we start with a PIVOT. Pivot can be anywhere but lets \njust start by setting our pivot to the midpoint.\n\n- Partition items in the list so that all items less than the pivot move left of \nthe pivot and the rest move right. The final position of the pivot after the list \nis sorted could be at the end of the list or the beginning of the list.\n\n- Divide and Conquer. Reapply the process recursively to the sublists formed by \nsplitting the list at the pivot. one sublist consists of all the items to the \nleft of the pivot(now all the smaller items), and the other sublist has all items \nto the right of the pivot (larger items)\n\n- Process terminates each time it encounters a sublist with fewer than two items\n\n\"\"\"\n\n\ndef quickSort(myList):\n quickSortHelper(myList, 0, len(myList)-1)\n \n\n# recursive function to hide extra arguments for the endpoints of a subset\ndef quickSortHelper(myList, left, right):\n if left < right:\n pivotLocation = partition(myList, left, right)\n # REcursively calls helper for the left of partition\n quickSortHelper(myList, left, pivotLocation - 1)\n # REcursively calls helper for the left of partition\n quickSortHelper(myList, pivotLocation + 1, right)\n\n\ndef partition(myList, left, right):\n middle = (left + right)//2\n pivot = myList[middle]\n myList[middle] = myList[right]\n myList[right] = pivot\n #set boundary point to first position\n boundary = left\n # Move items less than pivot to the left\n for index in range(left, right):\n if myList[index] < pivot:\n swap(myList, index, boundary)\n boundary +=1\n swap(myList, right, boundary)\n return boundary\n\ndef swap(myList, i , j):\n # Exchanges the positions of two items in a list\n temp = myList[i]\n myList[i] = myList[j]\n myList[j] = temp\n\nimport random\ndef main(size = 20, sort = quickSort):\n myList = []\n for count in range(size):\n myList.append(random.randint(1,size + 1))\n print(myList)\n sort(myList)\n print(myList)\n\nmain()\n\n\"\"\"\n********************MERGE SORT************************\n\n- Divide and conquer strategy\n- Computer the middle position of a list and recursively sort its \nleft and right sublists. \n- Merge the two sorted sublists back into a single sorted list\n- Stop the process when sublists can no longer be subdivided\n\n\"\"\"\n\n# This is the merge sort function\nfrom array import array\n\ndef mergeSort(myList):\n # CopyBuff is a temporary space needed during the merge\n copyBuffer = array(len(myList))\n mergeSortHelper(myList, copyBuffer, 0, len(myList) - 1)\n\n\n# myList is being sorted\n# copyBuffer = temp space needed during merge\n# low, high = bounds of sublist\n# middle = midpoint of sublist\ndef mergeSortHelper(myList, copyBuffer, low, high):\n if low < high:\n middle = (low + high) //2\n mergeSortHelper(myList, copyBuffer, low, middle)\n mergeSortHelper(myList, copyBuffer, middle +1, high)\n merge(myList, copyBuffer, low, middle, high)\n\n# Init i1 and i2 to first items in each sublist\n# The merge function combines two sorted sublists into a larger sorted list\n# The first sublist lies between the low and middle and the second between \n# middle +1 and high\n# - set up index pointers to the first items in each sublist (low and\n# middle + 1)\n# - Starting w/ the st item in each sublist, repeatedly compare items. \n# Copy the smaller\n# item from its sublist to the copy buffer and advance to the next \n# item in sublist\n# - copy the portion of copybuffer between low and high back to the \n# corresponding positions in myList \ndef merge(myList, copyBuffer, low, middle, high):\n i1 = low\n i2 = middle +1\n for i in range(low, high +1):\n if i1 > middle:\n # First sublist\n copyBuffer[i] = myList[i2]\n i2 += 1\n elif i2 > high:\n # second sublist exhausted\n copyBuffer[i] = myList[i2]\n elif myList[i1] < myList[i2]:\n copyBuffer[i] = myList[i1]\n i1 += 1\n else:\n copyBuffer[i] = myList[i2]\n i2 += 1\n #Copy sorted items back into proper position in the list\n for i in range(low, high +1):\n myList[i] = copyBuffer[i]\n"
},
{
"alpha_fraction": 0.49330955743789673,
"alphanum_fraction": 0.510258674621582,
"avg_line_length": 23.369565963745117,
"blob_id": "ff4c37a6cf9abea0190177405abf3fe8b5392602",
"content_id": "6503f7fe39922e25e6b19656139d0009d7f37b9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1121,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 46,
"path": "/LABS/PythonBasicsExam/sorting.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# The selectionSort function\ndef selectionSort(my_list):\n i = 0\n # do n-1 searches for the smallest\n while i < len(my_list) -1:\n minIndex = i\n j = i + 1\n # start a search\n while j < len(my_list):\n if my_list[j] < my_list[minIndex]:\n minIndex = j\n j += 1\n # Exchange if needed\n if minIndex != i:\n swap(my_list, minIndex, i)\n i += 1\n\ndef reverseSort(my_list):\n i = 0\n # do n-1 searches for the smallest\n while i < len(my_list) -1:\n maxIndex = i\n j = i + 1\n # start a search\n while j < len(my_list):\n if my_list[j] > my_list[maxIndex]:\n maxIndex = j\n j += 1\n # Exchange if needed\n if maxIndex != i:\n swap(my_list, maxIndex, i)\n i += 1\n\n# The swap function\ndef swap(my_list, i , j):\n # exchanges the positions of i and j \n temp = my_list[i]\n my_list[i] = my_list[j]\n my_list[j] = temp\n\nmy_list = [1,4,5,6,2,3,9]\nprint(my_list)\nreverseSort(my_list)\nprint(my_list)\nselectionSort(my_list)\nprint(my_list)\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6726190447807312,
"avg_line_length": 32.79999923706055,
"blob_id": "fffec5244b96fa9a96099ddc5743b2c957e99a10",
"content_id": "fa903a079cd6ad3496012ca44ab753fa110cdb4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 168,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 5,
"path": "/LABS/FILE-IO/withOpen.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "filename = 'C:\\\\Users\\\\student\\\\Documents\\\\names.txt'\nwith open(filename,'r') as file:\n size_to_read = 3\n fcontents = file.read(size_to_read)\n print(fcontents)"
},
{
"alpha_fraction": 0.6035879850387573,
"alphanum_fraction": 0.6171875,
"avg_line_length": 26.436508178710938,
"blob_id": "249de0423e05460dc2209f817e39c56f2c889c66",
"content_id": "e9e276197f79e1416dbad395ee2c7d57df5b83ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3456,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 126,
"path": "/04_functions/recursion.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# # This is an example of recursion, where the function (annoying) calls itself until python kills it\n# def forever(times):\n# annoying(times)\n\n# def annoying(times):\n# if times > 0:\n# print('Test message')\n# annoying(times - 1)\n\n# def main():\n# forever(500)\n\n# main()\n\n# # Must have something to kill your recursion\n# # Depth of recursion is the number of times it calls itself\n# def recursive_factorial(n):\n# # Start with base case (If the problem can be solved now,\n# # then the function solves it and returns)\n# if n == 0:\n# return 1\n# #Recursive case (If the problem cannot be solved now, \n# # then the function reduces it to smaller similar problems\n# # and calls itself to solve the smaller problem)\n# else:\n# return n * recursive_factorial(n-1)\n\n# print(recursive_factorial(4))\n\n\n# Range sum function\n# def main():\n# # Create a list of numbers \n# numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n# # Get the sim of the items at indicies 2 - 5\n# my_sum = range_sum(numbers, 2, 5)\n\n# # Display the sum\n# print('The sum of items 2 - 5 is ', my_sum)\n\n# The range_sum function returns the sum of a specified \n# range of items in the num_list. The start parameter\n# specified the index of the starting item. The end \n# parameter specifies the index of the ending item\n# def range_sum(num_list, start, end):\n# #base case\n# if start > end:\n# return 0\n# # recursive case\n# else:\n# return num_list[start] + range_sum(num_list, start + 1, end) \n\n# main()\n\n# Fibonacci returns the nth number in the fibonacci series\n# def fibonacci(n):\n# if n == 0:\n# return 0\n# elif n == 1:\n# return 1\n# else:\n# return fibonacci(n-1) + fibonacci(n-2)\n\n# for i in range(0,50):\n# print(fibonacci(i))\n\n\n# GCD greatest common divisor(denominator)\n\n# if x can evenly be divided by y then gcd(x, y) = y\n# Otherwise, gcd(x,y) = gcd(y, remainder of x/y)\n\n# def main():\n# # Get two numbers\n# num1 = int(input('Enter an integer: \\n'))\n# num2 = int(input('Enter another integer: \\n'))\n\n# # Display the GCD\n# print('The greatest common divisor of\\nthe two numbers is: ', gcd(num1,num2))\n\n# def gcd(x,y):\n# #base case\n# if x % y == 0:\n# return y\n# else:\n# return gcd(y, x % y)\n \n# # The GCD returns the greates common divisor of two numbers\n# main()\n\n## Towers of Hanoi\n# RULES\n # Only one disc may be moved at a time\n # A disc cannot be placed on top of another \n # disk\n # All discs must be stored on a peg while it's being moved \n # You must move all discs from the third peg to the first peg \n\n# This program simulates the Towers of Hanoi Game\ndef main():\n # set up some initial values\n num_discs = 3\n from_peg = 1\n to_peg = 3\n temp_peg = 2\n\n moveDiscs(num_discs, from_peg, to_peg, temp_peg)\n print('All the discs are moved')\n\n\n# The moveDiscs function displays a disc move in\n# the Towers of Hanoi game\n# The parameters are:\n# num: the number of discs to move\n# from_peg: the peg to move from \n# to_peg: the peg to move to\n# temp_peg the temporary peg\ndef moveDiscs(num, from_peg, to_peg, temp_peg):\n if num > 0:\n moveDiscs(num -1, from_peg, temp_peg, to_peg)\n print('Move a disc from peg', from_peg, 'to_peg ', to_peg)\n moveDiscs(num -1, temp_peg, to_peg, from_peg) \n\n# Call main\nmain()"
},
{
"alpha_fraction": 0.6460905075073242,
"alphanum_fraction": 0.6858710646629333,
"avg_line_length": 20.47058868408203,
"blob_id": "7d75e77cf4161e0b287690707a29a69173722e6d",
"content_id": "df265b74d4e3c5c073265aff840486cf64f1afc9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 729,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 34,
"path": "/LABS/Iteration/map_func.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# map() function\n# calls a specified function and applies it to each item of an iterable\n\ndef square(x):\n return x*x\n\nnumbers = [1, 2, 3, 4, 5]\n\n# Uses the map function to square each item in the list of numbers\n#sqrList = map(square, numbers)\n#print(next(sqrList))\n#print(next(sqrList))\n#print(next(sqrList))\n#print(next(sqrList))\n#print(next(sqrList))\n#print(next(sqrList))\n\n\"\"\" sqrList2 = map(lambda x: x*x, numbers)\nprint(next(sqrList2))\nprint(next(sqrList2))\nprint(next(sqrList2))\nprint(next(sqrList2))\nprint(next(sqrList2))\nprint(next(sqrList2)) \"\"\"\n\n# Use of map is map(<function>, arg1, arg2)\ntens = [10, 20, 30, 40, 50]\nindx = [1, 2, 3, 4, 5]\npowers = list(map(pow, tens, indx))\nprint(powers)\n\n\nmydict= {}\ndir(mydict)"
},
{
"alpha_fraction": 0.5866968631744385,
"alphanum_fraction": 0.5986420512199402,
"avg_line_length": 42.69780349731445,
"blob_id": "3b56f6aed3e8a93ffaf7f199c0f5fd670e525245",
"content_id": "4d5a1c16326fcb06f5b218a8fa8c44c3477fe577",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7953,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 182,
"path": "/Practice_Test/net-server-sess.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\"\"\"\nCreate a shared session keys Client Server application according to the following specifications:\n\n* Both the Server and the Client share a list of 10 session keys, where each key is 128 bits\n\n* After a successful connection setup between the server and the client, the server randomly picks\n one session key, and masks this key using XOR operation with a random value R of 128 bits\n\n* The server sends the masked value to the client,\n which will try to figure out which session key has been selected by the server.\n The Client will do the following:\n - Randomly select a key from the session key list\n - Apply XOR operation with the masked value received from the server to obtain a random value R'\n - Send R' to the server\n - The server compares its random value R with R' received from the client.\n # if there is a match, then the client has picked the same session key,\n and the server will send \"SUCCESS\" message to the client,\n which will then display the session key and close the connection with the server\n # If there is no match, the server will send \"INVALID KEY: x more trail(s)\",\n where x is the number of remaining trails that the client can do.\n In total, the client has 3 trails to guess the session key selected by the server.\n If the client consumed the 3 trails without getting the session key, the sever will send \"UNAUTHORIZED CLIENT\",\n and terminate the connection with the client.\n\n* Note the following:\n - The server is running forever\n - The server displays the IP address of the client connecting to it\n - After a successful connection setup between the server and the client,\n the server send a message \"SERVER>>> Connection successful\" to the client,\n which will be displayed on the client side\n - The server messages: \"SUCCESS\", \"INVALID KEY: x more trail(s)\" , and \"UNAUTHORIZED CLIENT\"\n are displayed on the client side\n - \"Connection terminated\" message is displayed on both the client and the server sides\n when the client-server connection is terminated\n - You can run the client program many times, this will ***simulate*** that different clients are connecting to the server.\n One run for the client program may end up with \"UNAUTHORIZED CLIENT\" while another run may end up with \"SUCCESS\"\n\n\"\"\"\nimport socket, random, string, struct, sys, threading, logging\nfrom random import randint\nfrom threading import Thread\n\n# ------------------------set logging configs -----------\n# https://docs.python.org/2/library/logging.html?highlight=logging#module-logging \n# https://realpython.com/python-logging/#the-logging-module\nlogging.basicConfig(\n filename = 'serverOuput.log', \n filemode = 'w', \n format = '%(process)d ---- %(levelname)s ---- %(message)s',\n level = logging.INFO # will log anything greater than the level specified here\n )\n\n\nfailureXML = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<testsuite errors=\"0\" failures=\"1\" name=\"Networking-Socket\" tests=\"1\" time=\"0.000\">\n<testcase classname=\"Networking-Socket\" name=\"find_key_requests\" time=\"0.000\">\n<failure message=\"Incorrect\"></failure>\n</testcase>\n</testsuite>\"\"\"\n\nsuccessXML = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<testsuite errors=\"0\" failures=\"0\" name=\"Networking-Socket\" tests=\"1\" time=\"0.000\">\n<testcase classname=\"Networking-Socket\" name=\"find_key_requests\" time=\"0.000\"/>\n</testsuite>\"\"\"\n\nserverHOST = \"0.0.0.0\"\nserverPort = 5000\nsession_keys = [\"aa072fbbf5f408d81c78033dd560d4f6\",\n \"bb072fbbf5f408d81c78033dd560f6d4\",\n \"f5072fbbf5f408d81c78033dd5f6d460\",\n \"408df5072fbbf5f81c3dd5f6d4607803\",\n \"dd5f408df5072fbbfc36d46078035f81\",\n \"c36d408df5072fbbf46078035f81dd5f\",\n \"35f8c36df5072fbbf4607801dd5fd408\",\n \"2f07aaf408d81c78033dd560d4f6bbf5\",\n \"80332ff408d81c7dd560d4f6bbf507aa\",\n \"560d4f8033281c7dd6bbf507aaff408d\",\n ]\n\ng_evt = threading.Event()\n\n\ndef signaled():\n return g_evt.isSet()\n\n\ndef doWork(sock):\n while not signaled():\n\n try:\n connection, address = sock\n connection.settimeout(5)\n print(\"Connection received from: {}\".format(address[0]))\n logging.info(\"Connection received from: {}\".format(address[0]))\n\n # =========================================================================\n # no longer setting key using the client's IP, hardcoding the key here\n # ---------------------------------------------------------------------\n # array = address[0].split('.')\n # key_index = list(str(int(array[3])))\n # key_index = key_index[-1]\n key_index = 5\n # =========================================================================\n\n # step 5: send and receive data via connection\n connection.send(\"SERVER>>> Connection successful\".encode())\n serverRandomValue = ''.join([random.choice(\"abcdef\" + string.digits) for n in range(32)])\n xValue = hex(int(session_keys[int(key_index)], 16) ^ int(serverRandomValue, 16)).rstrip(\"L\")\n connection.send(xValue.encode())\n print(\"Server sent {}\".format(xValue))\n logging.info(\"Server sent {}\".format(xValue))\n # print >> sys.stderr, \"session_key is: \", session_keys[int(key_index)]\n # print >> sys.stderr, \"xValue is: \", xValue\n\n clientRandomValue = connection.recv(1024) # 1024 is the buffer size\n print(\"Server recieved {}\".format(clientRandomValue))\n logging.info(\"Server recieved {}\".format(clientRandomValue))\n clientRandomValue = int(clientRandomValue, 16)\n serverRandomValue = int(serverRandomValue, 16)\n if(clientRandomValue == serverRandomValue):\n connection.send(\"Success! You found the key: \\n{0}\\n\".format(successXML).encode())\n print(\"Success! You found the key\")\n logging.info(\"Success! You found the key\")\n #print >> sys.stderr, \"SUCCESS on ip number: \", address[0]\n break\n else:\n ## ENCODE ORIGINALLY MISSING CREATING BYTES OBJECT ISSUE\n connection.send(\"INVALID KEY\\n{0}\\n\".format(failureXML).encode()) \n print(\"FAILURE, invalid key received\")\n logging.info(\"FAILURE, invalid key received\")\n # print >> sys.stderr, \"clientRandomValue: \", clientRandomValue\n # print >> sys.stderr, \"serverRandomValue: \", serverRandomValue\n print(\"\\n\\n\")\n except Exception as e:\n connection.send(\"ERROR FROM SERVER:\\n{0}\".format(e).encode())\n print(\"Exception:\")\n print(e)\n logging.error(e)\n break\n\n\ndef main():\n # step 1: create socket\n serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n workers = []\n\n try:\n # step 2: bind the socket to address\n serverSocket.bind((serverHOST, serverPort))\n # step 3: wait for connection request\n serverSocket.listen(10)\n print(\"Server waiting for connection on port 5000...\")\n logging.info(\"Server waiting for connection on port 5000...\")\n #print \"Server waiting for connection on port 5000...\"\n while True:\n # step 4: establish connection for request\n sock = serverSocket.accept()\n t = Thread(target=doWork, args=(sock,))\n t.daemon = True\n t.start()\n workers.append(t)\n\n except socket.error as e:\n print(\"Call to bind failed\")\n logging.error(\"Call to bind failed\")\n print(str(e))\n logging.error(str(e))\n\n except KeyboardInterrupt:\n print(\"Caught Ctrl-C, signaling worker threads to exit\")\n logging.warning(\"Caught Ctrl-C, signaling worker threads to exit\")\n g_evt.set()\n for t in workers:\n t.join()\n finally:\n serverSocket.close()\n\n\nif __name__=='__main__':\n main()\n"
},
{
"alpha_fraction": 0.5697036385536194,
"alphanum_fraction": 0.571459949016571,
"avg_line_length": 29.563758850097656,
"blob_id": "f67f9bb8f987b374341868dadf7a10dd21122435",
"content_id": "6b707c465e2238ff8bd1920df5f8e60235544fec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4555,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 149,
"path": "/Practice_Test/binary_search_tree.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "'''\n\nA Binary Search Tree (BST) is a tree in which all the nodes follow the below-mentioned properties:\n The left sub-tree of a node has a value less than its parent node's value.\n The right sub-tree of a node has a value greater than to its parent node's value.\n In this scenario, no duplicate values are allowed.\n \nTask #1\n\nWrite the fucntion buildBST that receives a list of integers. The function will build The\nBST by:\n 1. Iterating the list\n 2. Using the provided Node class and creating Nodes for each value\n 3. Inserting each Node into the BST.\nIf a value in the list is already in the BST, ignore the value and continue processing the rest of the list.\n\nWhen complete, the function will return a reference to the root node of the BST.\n\nTask #2\n\nWrite the function findLevel that receives the root of a BST (root) and a integer \nvalue (findVal). The function will search the BST and find and return level of the\nBST where the value resides. If the value is not found, the function returns None.\n\n'''\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.level = 0\n self.right = None\n self.left = None\n\n # Defining a recursive function the will perform the heavy lifting\n # of the search tree \n def insert(self, data):\n # This is checking to see if the data is already present in the\n # tree and returns False if it is there \n if self.val == data:\n return False\n # Checks the data arg against the current node value\n elif self.val > data:\n # If a left node exists, recursively calls insert passing data\n if self.left:\n return self.left.insert(data)\n # Otherwise a node is added\n else:\n self.left = Node(data)\n self.left.level = self.level + 1\n return True\n # If data is greater than current value\n else:\n # If a right node exists, it passes data recursively to insert\n if self.right:\n return self.right.insert(data)\n # Otherwise it creates and adds a new right node\n else:\n self.right = Node(data)\n self.right.level = self.level + 1\n return True\n\n def find(self, data):\n if (self.val == data):\n return self\n # Checks the data arg against the current node value\n elif self.val > data:\n #checks if left node exists\n if self.left:\n # if so, recursively calls the node find function on Node\n return self.left.find(data)\n else:\n #Otherwise this is the bottom of the tree and data is not present\n return False\n # search to the right\n else:\n if self.right:\n return self.right.find(data)\n else:\n return False\n\n def preorder(self):\n if self:\n print(str(self.val))\n if self.left:\n self.left.preorder()\n if self.right:\n self.right.preorder()\n\n def postorder(self):\n if self:\n if self.left:\n self.left.postorder()\n if self.right:\n self.right.postorder()\n print(str(self.val))\n\n def inorder(self):\n if self:\n if self.left:\n self.left.inorder()\n print(str(self.val))\n if self.right:\n self.right.inorder()\n \n\nclass Tree:\n def __init__(self):\n self.root = None\n \n def insert(self, data):\n if self.root:\n return self.root.insert(data)\n else:\n self.root = Node(data)\n return True\n def find(self, data):\n # If root exists then it calls Node find function on that node\n if self.root:\n return self.root.find(data)\n # Otherwise it returns False b/c the data is not in the tree\n else:\n return False\n\n def preorder(self):\n print(\"Preorder\")\n self.root.preorder()\n \n def postorder(self):\n print(\"Postorder\")\n self.root.postorder()\n\n def inorder(self):\n print(\"Inorder\")\n self.root.inorder()\n\n\ndef buildBST(nums):\n bst = Tree()\n for num in nums:\n bst.insert(num)\n return bst.root\n\n\n\ndef findLevel(root, findVal):\n targnode = root.find(findVal)\n if targnode:\n return targnode.level\n return None\n\n"
},
{
"alpha_fraction": 0.5672844648361206,
"alphanum_fraction": 0.5737308859825134,
"avg_line_length": 36.60606002807617,
"blob_id": "6ed508655be4ba9942be195b59639f862c493c9b",
"content_id": "b888148f717583a5e1f204f34ab85a3cd9c631dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1241,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 33,
"path": "/Practice_Test/buildMatrix.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "def buildMatrix (rows, cols):\n # set midpoint variable for the odd square matrix situation\n midpoint = 0\n \n # Identify if parameters define a square matrix\n if (rows == cols):\n # Checking to see if the dimensions are odd\n if (rows%2 != 0):\n #Setting the midpoint variable to be an integer value of \n #dimension divided by two which will put it right on the midpoint\n #since counting starts at 0\n midpoint = int(rows/2)\n \n #Create empy matrix to which we can append data\n matrix = []\n # Iterating through the range rows\n for r in range(rows):\n #createing an empty row list to which we append column elements\n row = []\n # Iterating through the size of columns\n for c in range(cols):\n #Checking the square midpoint condition\n if (r == c and r == midpoint and midpoint != 0):\n #If it checks out, then place a '1' in the element\n row.append(1)\n #Otherwise ...\n else:\n # Append the appropriate value\n row.append(r*c)\n #append the row list to the matrix list\n matrix.append(row)\n \n return matrix\n"
},
{
"alpha_fraction": 0.6923821568489075,
"alphanum_fraction": 0.7075532674789429,
"avg_line_length": 31.27083396911621,
"blob_id": "9b9807435e6c750da75a7c22ae2f14331fccf250",
"content_id": "abbc94c13eba646a5aa79648a795c325dd630718",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3098,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 96,
"path": "/Practice_Test/count_time.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "'''\nWrite the function find_culprits that receives the name of a file.\nThe function will open and read the file.\nThe file contains a list of user ids and number of minutes their\nlogin session has been idle. The file can contain multiple session entries\nof the same user id. The format of the file is: userid, mins\n\nExample:\n\njschmo, 22\nhaaron, 12\nhaaron, 7\njschmo, 17\n\nThe function should find the (up to) top five users with the most total idle time and \nsave them in order (highest to lowest) in a list of lists. Each list item in the list will contain a userid\nand number of mins as an integer in that order.\n\nExample [[\"jschmo\",39],[\"haaron\",19]]\n\nWrite no more than the top five users to the list handliing the case where there may not be five total\nusers.\n\nIf the file cannot be opened, return the string \"BAD_FILE\"\n\nFor this exercise all data in the file has valid format/content.\n\nIf the file process successfully, return the list of lists of users\n\n'''\n\n# Define find_culprits function receiving fileName as an arg for a \n# file name\ndef find_culprits(fileName):\n\t# Try to read file; return BAD_FILE msg on error\n\ttry:\n\t\tf = open(fileName, 'r')\n\t\t# Read the lines of the file to lines list \n\t\tlines = f.readlines()\n\texcept FileNotFoundError:\n\t\treturn f'BAD_FILE'\n\n\t# Creating a temporary list to store each line as a nested list\t\n\tusersList = []\n\t# Creating a list for just users\n\tusers= []\n\t# Iterate through the lines from file\n\tfor line in lines:\n\t\t# set temp user var to first element in line\n\t\tuser = line.split(\", \")[0]\n\t\t# set temp time var to second element in line\n\t\ttime = int(line.split(\", \")[1])\n\t\t# append current user to users list\n\t\tusers.append(user) \n\t\t# append user/time combination to usersList as nested list\n\t\tusersList.append([user, time])\n\t#convert users list into a set to make it unique\n\tusers = set(users)\n\t# Creating a totalList variable to store users pair with total time\n\ttotalList = []\n\t# iterate through the unique users list\n\tfor user in users:\n\t\t# Set totalTime Accumulator\n\t\ttotalTime = 0\n\t\t#iterate through each entry in usersList\n\t\tfor entry in usersList:\n\t\t\t# if user matched the first line element ...\n\t\t\tif entry[0] == user:\n\t\t\t\t# add the second line element to the total\n\t\t\t\ttotalTime += entry[1]\n\t\t# Append the user, totalTime pair to the totalTime list \n\t\ttotalList.append([user, totalTime])\n\t\n\t# call the list sort function using 'key' to sort by \n\t# subelement 1 (time) first, then subelement 0 (name) 2nd\n\t# in reverse order\n\ttotalList.sort(key = lambda x :(x[1], x[0]), reverse=True)\n\t# Setting an empty top 5 list \n\ttop5List = []\n\t# Iterating through first 5 elements in totalList\n\tfor i in range(5):\n\t\ttry:\n\t\t\t# Attempt to append list element i\n\t\t\ttop5List.append(totalList[i])\n\t\texcept IndexError:\n\t\t\t# but if it is out of range (less than 5 total users)...\n\t\t\tbreak\n\t#return the top 5 list\n\treturn top5List\n\n# good = [['bbergner', 46], ['rboone', 44], ['ecruz', 44], ['dfarr', 42], ['jwarren', 38]]\n\t\t\n# small = [['ecruz', 44], ['rboone', 34], ['dgilles', 26], ['jwarren', 14]]\n\n# for i in find_culprits('smallList.txt'):\n# \tprint(i)\n"
},
{
"alpha_fraction": 0.6420764923095703,
"alphanum_fraction": 0.6775956153869629,
"avg_line_length": 29.5,
"blob_id": "8e82707b3e44593b014955f4a966aba0a8dfb0e8",
"content_id": "05cafe8b7795d2c6132e5eef488e194a90ad34e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 366,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 12,
"path": "/Networking/mac4.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport subprocess\n\ninterface = \"eth0\"\nnew_mac = \"00:11:44:55:66:66\"\n\nprint(\"Changing Mac Changer address: \" + interface + \" to \" + new_mac)\n\nsubprocess.call(\"ifconfig \" + interface + \" down\", shell=True)\nsubprocess.call(\"ifconfig \" + interface + \" hw ether \" + new_mac, shell=True)\nsubprocess.call(\"ifconfig \" + interface + \" up\", shell=True)\n"
},
{
"alpha_fraction": 0.6882715821266174,
"alphanum_fraction": 0.6882715821266174,
"avg_line_length": 22.14285659790039,
"blob_id": "d94ce16753636191c2f89af2691aa32b8f3a62fb",
"content_id": "720929b8222db3a48ff8a3c847961d2877223993",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 324,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 14,
"path": "/LABS/Socket/general.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# Importing os to make operating system calls using Python\nimport os\n\n# Function to create a new directory\ndef create_dir(directory):\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n# Function to write data to a file\ndef write_file(path, data):\n f = open(path, \"w\")\n f.write(data)\n f.close()\n"
},
{
"alpha_fraction": 0.6381322741508484,
"alphanum_fraction": 0.6536964774131775,
"avg_line_length": 20.45833396911621,
"blob_id": "8dd73c4e1b9101f4eaf40af5cfca9881135d6e7d",
"content_id": "37389ac5d0b0091a72cfa66d39d57b8538e90205",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 514,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 24,
"path": "/LABS/Multiprocessing/test_multiprocessing.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import multiprocessing\nimport time\n\nstart = time.perf_counter()\n\ndef do_something():\n print('Sleeping 1 second...')\n time.sleep(1)\n print('Done Sleeping...')\n\nif __name__ == \"__main__\":\n p1 = multiprocessing.Process(target=do_something)\n p2 = multiprocessing.Process(target=do_something)\n\n p1.start()\n p2.start()\n\n #Ensures threads complete before moving on to the finish time\n p1.join()\n p2.join()\n\n finish = time.perf_counter()\n\n print(f'Finished in {finish-start} second(s)')"
},
{
"alpha_fraction": 0.5681818127632141,
"alphanum_fraction": 0.5934343338012695,
"avg_line_length": 16.217391967773438,
"blob_id": "9179d5249fbb10485a0f29df239329d455fb24e5",
"content_id": "f206cdb31a89047c19c4e103bc41de147c0f2ee8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 396,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 23,
"path": "/LABS/Socket/serverU.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# This is the code for the server side\nfrom socket import *\nsize = 512\nhost = ''\nport = 9898\n\n# Create server socket\ns = socket(AF_INET, SOCK_DGRAM)\ns.bind((host, port))\n\nc,a = s.recvfrom(size)\ndata = c \n# print(c)\n\nif data:\n f = open(\"storageUDP.dat\", '+w')\n print(\"connection from: \", a[0])\n f.write(a[0])\n f.write(\":\")\n f.write(data.decode(\"utf-8\"))\n f.close()\n \ns.close()\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6799468994140625,
"avg_line_length": 25,
"blob_id": "eb2c64349d8c1234cec906d7ac8ac6a03925dd51",
"content_id": "98779a04c970e89c7d452ea42e045e6742730d6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 753,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 29,
"path": "/LABS/Labs-3-4/lab3-4-10-bmi.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Defining Coefficient for the weight equation\nCOEFF = float(703)\n\n#Defining main function\ndef main():\n bmi = calcBmi(float(input('Please provide the weight in pounds:')),\n float(input('Please provide the height in inches:')))\n \n overOrUnder(bmi)\n\n#Function to take in mass(weight) and height to calculate BMI\ndef calcBmi(mass,height):\n return mass * COEFF/height**2\n\n#Determine if numbers are overweight, underweight, or in acceptable range\ndef overOrUnder(ratio):\n #overweight range\n if ratio > 25:\n print(\"Overweight!\")\n #underweight range\n elif ratio < 18.5:\n print(\"Underweight!\")\n #Solid BMI range\n else:\n print(\"{:.2f} BMI is in the acceptable range.\".format(ratio))\n\n\n#calling main\nmain()"
},
{
"alpha_fraction": 0.555891215801239,
"alphanum_fraction": 0.5709969997406006,
"avg_line_length": 17.44444465637207,
"blob_id": "10b57132ea992ba21247d5aa5ceecdaabc54e750",
"content_id": "b782b47cea41a1611f10da5ec71675d28c949fad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 331,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 18,
"path": "/LABS/Socket/client6.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "from socket import *\n\ndef main():\n host = 'localhost'\n\n sock = socket(AF_INET6, SOCK_STREAM)\n addr = (host,9898)\n sock.connect(addr)\n\n try:\n msg = b\"This was a terrible test!\\n\"\n sock.sendall(msg)\n except socket.errno as e:\n print(\"Socket error \", e)\n finally:\n sock.close()\n\nmain()"
},
{
"alpha_fraction": 0.6570680737495422,
"alphanum_fraction": 0.6570680737495422,
"avg_line_length": 32.260868072509766,
"blob_id": "8c45e9cd396772f4e9a55dec86dd046fbc4c55b6",
"content_id": "cb3be8fab0a8a7ce0168aaa5701f3b27820129b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 764,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 23,
"path": "/LABS/Socket/exists.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import requests\nimport argparse\n\ndef main(url):\n try:\n r = requests.get(url)\n print(r.content)\n except requests.exceptions.ConnectionError as e:\n print(f'Error: {e.strerror}. This site ({url}) probably doesn\\'t exist.')\n \n\n\nif __name__ == '__main__': \n # This series of statements allows for in-line arguments\n parser = argparse.ArgumentParser (description='TCP Socket Client Example') \n parser.add_argument('--url', action=\"store\", dest=\"url\", type=str, required=True) \n # This was testing how to add additional, optional arguments\n # parser.add_argument('--tag', action=\"store\", dest=\"tag\", type=str, required=True)\n given_args = parser.parse_args() \n url = given_args.url \n #tag = given_args.tag\n\nmain(url)"
},
{
"alpha_fraction": 0.57594233751297,
"alphanum_fraction": 0.5787139534950256,
"avg_line_length": 31.23214340209961,
"blob_id": "d6d66be74090e9be4c7db008423b6ca5516c0864",
"content_id": "3d9c652a1dde112fd46d08833bc3ab644bce96fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1804,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 56,
"path": "/LABS/PerfExam/prompt2.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "def main():\n # Asking user for the number of rows and columns\n rows = getDimension('rows')\n cols = getDimension('columns')\n # Create and return a 3x4 matrix\n matrix = createMatrix(rows,cols)\n # Displays matrix for user to confirm the grid\n for row in matrix:\n print(row)\n\n # Find largest and coordinates\n # and print the info\n largest = findLargest(matrix)\n print(f'Largest number: {largest[0]}')\n print(f'Coordinates: {largest[1]}')\n\ndef createMatrix(rows,cols):\n matrix = []\n for row in range(rows):\n # Creating empty list for the row for each column element\n # to be written to\n contents = []\n for col in range(cols):\n valid = False\n while valid == False:\n try:\n # Appending successful try to contents for row element\n contents.append(float(input('Please provide a number:')))\n valid = True\n except ValueError:\n print('Error: This is a non-number input!')\n # Writing the completed row\n matrix.append(contents)\n return matrix\n\ndef findLargest(matrix):\n largeNum = 0\n largeCoord = []\n for row in range(len(matrix)):\n for col in range(len(matrix[row])):\n if matrix[row][col] > largeNum:\n largeNum = matrix[row][col]\n largeCoord = [row, col]\n return largeNum,largeCoord\n\ndef getDimension(dimension):\n valid = False\n while valid == False:\n try:\n # getting input for dimension as dim\n dim = int(input(f'Please provide a number {dimension} for your matrix:'))\n valid = True\n except ValueError:\n print('Error: This is a non-integer input!')\n return dim\nmain()"
},
{
"alpha_fraction": 0.5443723201751709,
"alphanum_fraction": 0.5454545617103577,
"avg_line_length": 27.90625,
"blob_id": "f49bfb1511fa1841be60d7cd6757942cfe0250a5",
"content_id": "6a51e3b5dae86ef05882ba8cb36ac963094f6a6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 924,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 32,
"path": "/LABS/Group-Project/displayMenu.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import time\n\ndef displayMenu():\n\n valid_choice = ['os', 'sys']\n\n print(\"Welcome to the Python Library Help System, please\\nchoose one of the following libraries (q to quit):\")\n \n #keep user in while loop until they make a valid selection\n while True:\n try:\n \n #ask user for input at beginning of loop\n user_choice = input(\"os, sys\\n>\").lower()\n \n if user_choice in valid_choice:\n print(\"Taking you to the\", user_choice, \"help page!\")\n \n #using time.sleep gives user time to read validation message\n time.sleep(1)\n displaySelection(user_choice)\n elif user_choice == 'q':\n break\n else:\n continue\n except ValueError:\n print(\"You entered something weird.\")\n\ndef displaySelection(uc):\n pass\n\ndisplayMenu()"
},
{
"alpha_fraction": 0.6467065811157227,
"alphanum_fraction": 0.6766467094421387,
"avg_line_length": 32.599998474121094,
"blob_id": "2eb3a0505c2cd957b7be840fd58d6208b3e5e143",
"content_id": "dfc2edb95f1674faeda33db2bca00226ce019aeb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 167,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 5,
"path": "/LABS/Lab-2-1/lab2c.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "tax = 0.025\nprint(\"Enter the price of the item:\")\nprice = input()\ntotal = float(price) + round(float(price)*tax),2)\nprint(\"The total price with tax is $\" + str(total))"
},
{
"alpha_fraction": 0.6462775468826294,
"alphanum_fraction": 0.6522194743156433,
"avg_line_length": 31.89655113220215,
"blob_id": "120e273c072f5d79cb6beddfe10a6782808435a3",
"content_id": "ba25b9dee779a43362fe1dd275c6362f3bc0890b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2861,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 87,
"path": "/LABS/DICTIONARIES-SETS/dict-set-compare.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#This is meant to take two files and compare them\ndef main():\n #Stores the lines of the file specified by the user\n source = readFile('Provide the name of the first file')\n source2 = readFile('Provide the name of the second file')\n \n #This calls the function to extract all the words from a file\n words = getWords(source)\n words2 = getWords(source2)\n \n #This stores the return of the function which casts the words list as a set, making all words unique.\n unique = getUnique(words)\n unique2 = getUnique(words2)\n \n #Printing the sets\n printSet('Here is the list of words for the first file',unique)\n print('\\n\\n')\n printSet('Here is the list of words for the second file',unique2)\n \n #Printing various set comparisons\n compareSets(unique,unique2)\n\n#this simple takes an array and casts/returns it as a set\ndef readFile(prompt):\n #Getting filename from input for filename\n filename = input('{}: \\n'.format(prompt))\n #Reads the file of filename \n f = open(filename, 'r')\n #Recording file contents in array\n contents = f.readlines()\n f.close()\n return contents\n\n#Gets the individual words and normalizes them (lowercase, no trailing or preceding punctuation)\ndef getWords(original):\n #Iterate through each line\n newlist = []\n for i in original:\n #Split the lines by spaces (a typical delimeter in English between words)\n line = i.split(' ')\n #Add the words in the line to the list.\n newlist += line\n #Clean up each word in the list, getting rid of . \\n \"\" and ?\n cleanlist = []\n for i in newlist:\n i = i.replace('\\n','').replace('.','').replace('!','').replace('?','').replace('\"','').replace(',','').replace('\\'','')\n #ensures than all words are lower case to ensure set is properly unique\n i = i.lower()\n cleanlist.append(i)\n return cleanlist\n\n#Casts any list to a set and returns result to main\ndef getUnique(array):\n uniqueItems = set(array)\n return uniqueItems\n\n#Printing sets\ndef printSet(prompt,theSet):\n print('{}:\\n------------'.format(prompt))\n array = list(theSet)\n for i in sorted(array):\n print(i)\n\n#Function for comparing sets (intersection, difference, xor )\ndef compareSets(set1,set2):\n print('The words common to both files are:')\n displaySet = set1.intersection(set2)\n for i in displaySet:\n print(i)\n print()\n print('Uncommon words only in the first file:')\n displaySet = set1.difference(set2)\n for i in displaySet:\n print(i)\n print()\n print('Uncommon words only in the second file:')\n displaySet = set2.difference(set1)\n for i in displaySet:\n print(i)\n print()\n print('Uncommon in either file:')\n displaySet = set1.symmetric_difference(set2)\n for i in displaySet:\n print(i)\n print()\n\nmain()"
},
{
"alpha_fraction": 0.6469957232475281,
"alphanum_fraction": 0.6555793881416321,
"avg_line_length": 27.272727966308594,
"blob_id": "24381d72938c659d9e1c258fccd1d8f79af90af9",
"content_id": "f7bb6c8659497ea1e1bc1598f4aaf9c8b8fc52f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 932,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 33,
"path": "/LABS/Socket/login-svr.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "from socket import socket as Socket\nfrom socket import AF_INET, SOCK_STREAM\n\nHOSTNAME = '' # blank so any address can be used\nPORTNUMBER = 11267 # number for the port\nBUFFER = 80 # size of the buffer\n\nSVR_ADDRESS = (HOSTNAME, PORTNUMBER)\nSVR = Socket(AF_INET, SOCK_STREAM)\nSVR.bind(SVR_ADDRESS)\nSVR.listen(1)\n\nprint('Waiting for client to connect')\nCLT, CLT_ADDRESS = SVR.accept()\nprint('Connection from ',\\\n CLT_ADDRESS)\n\nSECRET = ['password', 'whodey', 'winning', 'jamaica']\n# print('the secret is %d' % SECRET)\n\nwhile True:\n print('Please provide your password:')\n GUESS = CLT.recv(BUFFER).decode()\n # print('dealer received ' + GUESS)\n if GUESS not in SECRET:\n REPLY = 'Access Denied'\n else:\n REPLY = 'Access Granted. You have access to the GOLD!!!'\n CLT.send(REPLY.encode())\n if REPLY == 'Access Granted. You have access to the GOLD!!!':\n break\n\nSVR.close()"
},
{
"alpha_fraction": 0.669222891330719,
"alphanum_fraction": 0.6789366006851196,
"avg_line_length": 32.15254211425781,
"blob_id": "aa63f7e209041378cba79cc26ae48439648d25fb",
"content_id": "b030da2aa04c31bbeb33b6e0b0a9b1214e82c562",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1962,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 59,
"path": "/Algorithms/practice3.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\n1. The list method reverse reverses the elements in the list. Define a function named reverse that \nreverses the elements in its list argument (without using the method reverse). Try to make this \nfunction as efficient as possible, and state its computational complexity using big-O notation. \n\n2. Python’s pow function returns the result of raising a number to a given power. Define a function \nexpo that performs this task and state its computational com-plexity using big-O notation. The first \nargument of this function is the number, and the second argument is the exponent (nonnegative numbers only). \nYou can use either a loop or a recursive function in your implementation, but do not use Python’s ** operator \nor pow function. \n\"\"\"\n# # Swap function used to exchange positions of two elements\n# def swap(myList, i , j):\n# # Exchanges the positions of two items in a list\n# temp = myList[i]\n# myList[i] = myList[j]\n# myList[j] = temp\n\n# # Reverse function, attacks the list from both ends swapping elements \n# # until the start/end boundaries pass each other\n# def reverse(myList):\n# # Boundary - first element\n# start = 0\n# # Boundary - last element\n# end = len(myList) - 1\n# while start < end:\n# swap(myList, start, end)\n# # Move the boundaries closer to the middle\n# start += 1\n# end -= 1\n\n# # Big O = O(n)\n# import random\n\n# myList = []\n# for i in range(10):\n# myList.append(random.randint(0,100))\n\n# print(myList)\n# reverse(myList)\n# print(myList)\n\n\n# This next function will satisfy prompt 2\ndef expo(base, exponent):\n # setting product (accumulator) to base for base number\n product = base\n if exponent == 0:\n return 1\n else:\n # looping exponent # of times (subtracting 1 for since base was already started)\n for i in range(exponent-1):\n product *= base\n return product\n\ndef main():\n print(expo(2, 5))\n\nmain()\n"
},
{
"alpha_fraction": 0.5631067752838135,
"alphanum_fraction": 0.5631067752838135,
"avg_line_length": 24.875,
"blob_id": "0e146ec5fa716443f81a21e5a9efc047a10006d3",
"content_id": "c64a17e197fd8e5cfbf17dea3ac0935e00bb6494",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 206,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 8,
"path": "/LABS/FILE-IO/file-io-countItm.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "def main():\n filename = input('Provide the name of a file:\\n')\n f = open(filename, 'r')\n a = f.readlines()\n f.close()\n print('Your file {} has {} names.'.format(filename, len(a)))\n \nmain()"
},
{
"alpha_fraction": 0.6274999976158142,
"alphanum_fraction": 0.6424999833106995,
"avg_line_length": 25.733333587646484,
"blob_id": "65c59a117b897d250c6b0bf926f62d885973256a",
"content_id": "44f400e0193f1d46c0f0482cd629911219c017ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 400,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 15,
"path": "/LABS/recursion/rec-Power.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\" 7. Recursive Power Method\n Design a function that uses recursion to raise a number to a power. The function should\n accept two arguments: the number to be raised and the exponent. Assume that the exponent is a \n nonnegative integer. \"\"\"\n\ndef main():\n print(getPower(7,2))\n\ndef getPower(x,y):\n if x == 0:\n return 1\n else:\n return getPower(x-1,y) * y\n \nmain()"
},
{
"alpha_fraction": 0.6409395933151245,
"alphanum_fraction": 0.6610738039016724,
"avg_line_length": 26,
"blob_id": "8479a7ecb96d95faa873ea01462badc8cd77e881",
"content_id": "952580e5a8f38cd9b80434ba4a33c51545bcbe7c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 298,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 11,
"path": "/LABS/Socket/nmap.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# Importing os\nimport os\n# Function to get Nmap Port Scan\ndef get_nmap ( options, ip ):\n\tcommand = \"nmap \" + options + \" \" + ip\n\tprocess = os.popen( command )\n\tresults = str( process.read() )\n\t# Returning the final result\n\treturn results\nprint(get_nmap('-F','127.0.0.1'))\nprint(\"Nmap Scan done!\")\n\n"
},
{
"alpha_fraction": 0.46052631735801697,
"alphanum_fraction": 0.5592105388641357,
"avg_line_length": 12.818181991577148,
"blob_id": "2c641992bcf2aef7311b62a80a2d4fce29de16e7",
"content_id": "94dc33f7b67e3272396b420a48b51055106f4a54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 490,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 33,
"path": "/02_Data_Types/lab2a.md",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "|[Table of Contents](/00-Table-of-Contents.md)|\n|---|\n\n---\n\n## Lab 2A\n\nUsing the Python interpreter: find the type of the following variables. Feel free to experiment with other variables and such.\n\n**Type of:**\n\n* 10\n* 10.5\n* \"10\"\n* \"Hello!\"\n* \"\"\n* ''\n* True\n* 0\n* type\n* object\n* b\"10101101\" **Try in Py2 and Py3** \n* 0b10101101\n* \\[1,2,3\\]\n* \\(1,2,3\\)\n* {1,2,3}\n* {'one':1}\n* 5j\n\n---\n\n|[Next Topic](/02_Data_Types/02_numbers.md)|\n|---|\n"
},
{
"alpha_fraction": 0.5492063760757446,
"alphanum_fraction": 0.6126984357833862,
"avg_line_length": 27.454545974731445,
"blob_id": "f2833f436f718781c65a3900e53303629774b7cf",
"content_id": "ef91c812c63dc1d3672a6130476bea5ae1dbd6bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 315,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 11,
"path": "/LABS/recursion/testSum.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import unittest\nimport recSum \n\nclass TestSum(unittest.TestCase):\n def test_sum(self):\n self.assertEqual(recSum.getSum([1,2,3,4],len([1,2,3,4])-1), 10)\n def test_type(self):\n self.assertIsInstance(recSum.getSum([1,2,3,4],len([1,2,3,4])-1), int)\n\nif __name__ == \"__main__\":\n unittest.main() "
},
{
"alpha_fraction": 0.4935064911842346,
"alphanum_fraction": 0.5021644830703735,
"avg_line_length": 22.200000762939453,
"blob_id": "134bbd83baa13509a61b3e6268a55b616cd33066",
"content_id": "e74510195c5658f3eff2d462ef4e4c5e55847a23",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 231,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 10,
"path": "/LABS/FILE-IO/file-io-lineNum.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "def main():\n filename = input('Provide the name of a file:\\n')\n f = open(filename, 'r')\n a = f.readlines()\n lineNum = 1\n for i in a:\n print('{}. {}'.format(lineNum, i, end=''))\n lineNum += 1\n \nmain()"
},
{
"alpha_fraction": 0.7030651569366455,
"alphanum_fraction": 0.7290640473365784,
"avg_line_length": 47.092105865478516,
"blob_id": "c39b2fb39b2b0fa2e5f0668c5eed791720ee7f12",
"content_id": "2c6a61ca8802b5e22c1b7c6a25606cdb634ad895",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3704,
"license_type": "no_license",
"max_line_length": 178,
"num_lines": 76,
"path": "/LABS/DICTIONARIES-SETS/dict-set-practice.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\nWrite a program that creates a dictionary containing course numbers and the room numbers of the rooms where the \ncourses meet. The dictionary should have the following keyvalue pairs:\n\nCourse Number (key) Room Number (value)\nCS101 3004\nCS102 4501\nCS103 6755\nNT110 1244\nCM241 1411\n\nThe program should also create a dictionary containing course numbers and the names of\nthe instructors that teach each course. The dictionary should have the following key-value\npairs:\n\nCourse Number (key) Instructor (value)\nCS101 Haynes\nCS102 Alvarado\nCS103 Rich\nNT110 Burke\nCM241 Lee\n\nThe program should also create a dictionary containing course numbers and the meeting\ntimes of each course. The dictionary should have the following key-value pairs:\n\nCourse Number (key) Meeting Time (value)\nCS101 8:00 a.m.\nCS102 9:00 a.m.\nCS103 10:00 a.m.\nNT110 11:00 a.m.\nCM241 1:00 p.m.\n\nThe program should let the user enter a course number, and then it should display the\ncourse’s room number, instructor, and meeting time.\n\n\n\n2. Capital Quiz\nWrite a program that creates a dictionary containing the U.S. states as keys and their capitals as values. \n(Use the Internet to get a list of the states and their capitals.) The program\nshould then randomly quiz the user by displaying the name of a state and asking the user\nto enter that state’s capital. The program should keep a count of the number of correct and\nincorrect responses. (As an alternative to the U.S. states, the program can use the names of\ncountries and their capitals.)\n\n3. File Encryption and Decryption\nWrite a program that uses a dictionary to assign “codes” to each letter of the alphabet. For\nexample:\ncodes = { 'A' : '%', 'a' : '9', 'B' : '@', 'b' : '#', etc...}\nUsing this example, the letter A would be assigned the symbol %, the letter a would be\nassigned the number 9, the letter B would be assigned the symbol @, and so forth.\nThe program should open a specified text file, read its contents, and then use the dictionary to \nwrite an encrypted version of the file’s contents to a second file. Each character in\nthe second file should contain the code for the corresponding character in the first file.\n\n4. Unique Words\nWrite a program that opens a specified text file and then displays a list of all the unique\nwords found in the file.\nHint: Store each word as an element of a set\n\n5. Word Frequency\nWrite a program that reads the contents of a text file. The program should create a dictionary in which the keys are the individual words found in the file and the values are the\nnumber of times each word appears. For example, if the word “the” appears 128 times,\nthe dictionary would contain an element with 'the' as the key and 128 as the value.\nThe program should either display the frequency of each word or create a second file\ncontaining a list of each word and its frequency\n\n6. File Analysis\nWrite a program that reads the contents of two text files and compares them in the following ways:\n• It should display a list of all the unique words contained in both files.\n• It should display a list of the words that appear in both files.\n• It should display a list of the words that appear in the first file but not the second.\n• It should display a list of the words that appear in the second file but not the first.\n• It should display a list of the words that appear in either the first or second file but not both.\nHint: Use set operations to perform these analyses.\n\"\"\""
},
{
"alpha_fraction": 0.5869120359420776,
"alphanum_fraction": 0.6216768622398376,
"avg_line_length": 26.16666603088379,
"blob_id": "9f47d3d869cd62ea9d30416ab48b4a38ff09e6be",
"content_id": "c998bbe1fae3e2834932a441a4d0db49de3356ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 489,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 18,
"path": "/LABS/recursion/rec-largest.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"4. Largest List Item\n Design a function that accepts a list as an argument, and returns the largest value in the list.\n The function should use recursion to find the largest item.\n\"\"\"\n\ndef main():\n myList = [10,6,1,9,20,30,29,3]\n print(findLargest(myList,len(myList)-1))\n\ndef findLargest(array,x):\n if x == -1:\n return 0\n else:\n largest = findLargest(array,x-1)\n if array[x] > largest:\n largest = array[x]\n return largest\nmain()\n"
},
{
"alpha_fraction": 0.642642617225647,
"alphanum_fraction": 0.642642617225647,
"avg_line_length": 28.382352828979492,
"blob_id": "88e7767f6e7350652feef011407c93dfb9a59b6e",
"content_id": "32cf7e03f7923f26d1402876edeafbaa36dddbd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1000,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 34,
"path": "/LABS/Socket/main.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "from general import *\nfrom domain_name import *\nfrom ip_address import *\nfrom nmap import *\nfrom robots_txt import *\nfrom whois import *\n\n\nROOT_DIR = 'companies'\n\ncreate_dir ( ROOT_DIR )\n\ndef gather_info( name, url ):\n\n robots_txt = get_robots_txt ( url )\n domain_name = get_domain_name ( url )\n whois = get_whois ( domain_name )\n ip_address = get_ip_address ( domain_name )\n nmap = get_nmap (\"-F\", ip_address )\n\n create_report( name, url, domain_name, nmap, robots_txt, whois )\n\ndef create_report( name, url, domain_name, nmap, robots_txt, whois ):\n project_dir = ROOT_DIR + \"/\" + name\n create_dir( project_dir )\n write_file( project_dir + \"/full_url.txt\", url )\n write_file( project_dir + \"/domain_name.txt\", domain_name )\n write_file( project_dir + \"/nmap.txt\", nmap )\n write_file( project_dir + \"/robots.txt\", robots_txt )\n write_file( project_dir + \"/whois.txt\", whois )\n \ngather_info( \"google\", \"https://www.google.com/\" )\n\nprint(\"Scan Completed!!\")\n"
},
{
"alpha_fraction": 0.595061719417572,
"alphanum_fraction": 0.595061719417572,
"avg_line_length": 20.91891860961914,
"blob_id": "0147bb5f79a0fc8b8893e9fdcef38292e6c4be4a",
"content_id": "93024b0cba1357429d35c8bdd33b1421af8dbdaf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 810,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 37,
"path": "/LABS/Pickle/pickle-unpickle-intro.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Demo object unpickling\nimport pickle\n\n#main function\ndef main():\n #Indicate end of the file\n end_of_file = False\n\n #open a file for binary writing\n input_file = open('information.dat', 'rb')\n \n #read to end of file\n while not end_of_file:\n try:\n #unpickle next object\n person = pickle.load(input_file)\n\n #Display data\n display_data(person)\n\n except EOFError:\n #Set flag to indicate EOF reached\n end_of_file = True\n \n #close the file\n input_file.close()\n\n#This function displays the person data in the dict\n# that is passed as an arg\ndef display_data(person):\n print('Name: ', person['name'])\n print('Age: ', person['age'])\n print('Weight: ', person['weight'])\n print()\n\n#Call main\nmain()"
},
{
"alpha_fraction": 0.7326968908309937,
"alphanum_fraction": 0.7446300983428955,
"avg_line_length": 35.434783935546875,
"blob_id": "f2a18a35acb2ca5a6cd2f10e5bce39c9679bf2bc",
"content_id": "40c8b430221ca2164a5ec581d8a01464940edfeb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 838,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 23,
"path": "/07_Algorithms/README.md",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "<a href=\"https://github.com/CyberTrainingUSAF/07-Python-Programming/blob/master/00-Table-of-Contents.md\" rel=\"Return to TOC\"> Return to TOC </a>\n\n# Algorithms\n\n* **Topics:**\n * **Algorithmic Searching, Sorting, and Complexity**\n * **Stacks**\n * **Queues**\n * **Trees**\n\n## By the end of this lesson you should be able to:\n\n\n* Describe how the sequential search and binary search algorithms work\n* Describe how the selection sort and quicksort algorithms work\n* Describe the features and behavior of a stack\n* Design and implement a backtracking algorithm that uses a stack\n* Describe the features of a queue and the operations on it\n* Describe the features and applications of a tree\n\n---\n\n<a href=\"https://github.com/CyberTrainingUSAF/07-Python-Programming/blob/master/07_Algorithms/01_Algorith_SSC.md\" > Continue to Next Topic </a>\n"
},
{
"alpha_fraction": 0.5631579160690308,
"alphanum_fraction": 0.5757894515991211,
"avg_line_length": 21.64285659790039,
"blob_id": "d4fe3d0ffdf97fdaead64e61cedb80d097a5c28f",
"content_id": "a25e35100ee30f7ceb99aa6da2176c8943e94e79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 950,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 42,
"path": "/LABS/DICTIONARIES-SETS/dict-set-capitol.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import random\n\ncapitols = {'Virginia':'Richmond',\n 'Texas':'Austin',\n 'New York':'Albany',\n 'Maryland':'Baltimore'}\n\n\ndef main():\n correct = 0\n incorrect = 0\n count = 0\n while count < 3:\n states = getStates(capitols)\n state = quizChoice(states)\n if getAnswer(capitols,state) == 1:\n correct += 1\n else:\n incorrect += 1\n count += 1\n print('You got {} correct!'.format(correct))\n print('You got {} incorrect!'.format(incorrect))\n\ndef getStates(dictionary):\n array = list(dictionary.keys())\n return array\n\ndef quizChoice(states):\n state = states[(random.randint(0,len(states)-1))]\n print('What is the capitol of {}?'.format(state))\n return state\n\ndef getAnswer(states,state):\n score = 0\n answer = capitols[state]\n userInput = input()\n if userInput.lower() == answer.lower():\n score += 1\n return score\n\n\nmain()"
},
{
"alpha_fraction": 0.578012228012085,
"alphanum_fraction": 0.5946696400642395,
"avg_line_length": 22.102563858032227,
"blob_id": "ada06ef27a3cc0e7f05085c9516aedda9b360705",
"content_id": "28d6d8fe1bedb2130570bd92c930b455d0589991",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1801,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 78,
"path": "/LABS/Classes/questions.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import pickle\n\nclass Question:\n def __init__(self):\n self.__question = ''\n self.__answer1 = ''\n self.__answer2 = ''\n self.__answer3 = ''\n self.__answer4 = ''\n self.__correct = ''\n\n def set_question(self, statement):\n self.__question = statement\n\n def set_answer1(self, statement):\n self.__answer1 = statement\n\n def set_answer2(self, statement):\n self.__answer2 = statement\n \n def set_answer3(self, statement):\n self.__answer3 = statement\n \n def set_answer4(self, statement):\n self.__answer4 = statement\n \n def set_correct(self, statement):\n self.__correct = statement\n\n def get_question(self):\n print(f'{self.__question}')\n\n def get_answer1(self):\n return self.__answer1\n\n def get_answer2(self):\n return self.__answer2\n\n def get_answer3(self):\n return self.__answer3\n\n def get_answer4(self):\n return self.__answer4\n\n def get_correct(self):\n return self.__correct\n\n\"\"\" def writeData(data, filename):\n #Opens the file on disk for writing\n q_file = open(filename, 'wb')\n #Dump data to file\n pickle.dump(data, q_file)\n #close file\n q_file.close() \"\"\"\n\n\"\"\" qbank = []\n\nfilename = 'qbank.dat'\n\nfor i in range(10):\n q = Question()\n statement = input('Give question:\\n')\n q.set_question(statement)\n statement = input('Give answer1:\\n')\n q.set_answer1(statement)\n statement = input('Give answer2:\\n')\n q.set_answer2(statement)\n statement = input('Give answer3:\\n')\n q.set_answer3(statement)\n statement = input('Give answer4:\\n')\n q.set_answer4(statement)\n statement = input('Give correct answer:\\n')\n q.set_correct(statement)\n qbank.append(q)\n\nwriteData(qbank, filename)\n\n \"\"\""
},
{
"alpha_fraction": 0.732839822769165,
"alphanum_fraction": 0.7355316281318665,
"avg_line_length": 40.30555725097656,
"blob_id": "5af65e7dcd1034729138b8ded5e1c2b3d402ed74",
"content_id": "acb14983f7de65c8c575bcac533bafec8c87d570",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1486,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 36,
"path": "/LABS/FILE-IO/file-io-randomRead.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Importing random library\nimport random\n\n#Defining main function\ndef main():\n #Getting filename from input for filename\n filename = input('Provide the new file name:\\n')\n #Reads the file of filename \n f = open(filename, 'r')\n #Recording file contents in array\n contents = f.readlines()\n #Selecting a random number from within the range of the count of numbers read into the array\n numSelected = random.randint(1,len(contents))\n #Create the array to store the numbers randomly selected from filename\n randFromFile = []\n #Function for selecting a random number (numselected) from contents to add to randFromFile\n getRandFromFile(numSelected,contents,randFromFile)\n #Displays the data from array randFromFile\n measureRand(randFromFile, numSelected, filename)\n\n#Function loops num times to select a random number from contents to add to numArray \ndef getRandFromFile(num,contents,numArray):\n for i in range(num):\n numArray.append(contents[random.randint(1,num)])\n\n#Function to measure the randFromFile array; count, sum, and average of elements\ndef measureRand(randFromFile,numSelected,filename):\n total=0\n for num in randFromFile:\n total += int(num)\n print('{} numbers selected at random from {}.'.format(numSelected,filename))\n print('The total of the selected numbers was {}.'.format(total))\n print('The average of the selected numbers was {:.2f}'.format(int(total)/len(randFromFile)))\n\n#Call main function\nmain()"
},
{
"alpha_fraction": 0.5783582329750061,
"alphanum_fraction": 0.5970149040222168,
"avg_line_length": 15.8125,
"blob_id": "01a2367f251531d40f17843ccb08a42139844e62",
"content_id": "3b8a541b96c69e21405e1eb2e5e52cb2ec58bc3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 268,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 16,
"path": "/LABS/recursion/rec-print.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\n1. Recursive Printing\n Design a recursive function that accepts an integer argument, n, and prints the numbers 1\n up through n.\n\"\"\"\n\ndef recPrint(n):\n if n > 0:\n (recPrint(n-1))\n print(n)\n return n\n\ndef main():\n recPrint(5)\n\nmain()"
},
{
"alpha_fraction": 0.6738627552986145,
"alphanum_fraction": 0.6784887909889221,
"avg_line_length": 36.05714416503906,
"blob_id": "5a4c8885a0adbd3eda865468677ee51ad8221ceb",
"content_id": "92fd4ce78fe8ae8b8f640a1a7b80b4b359d8d704",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1297,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 35,
"path": "/LABS/Labs-3-4/lab3a-madlib.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "## This is a function to walk through\n## an list of words (passed as arg) to\n## focus word selection for madlib.\ndef selectWord (x):\n print(\"Provide a word from this list:\")\n #Shows the user the options available\n print(x)\n #Get user input\n userInput = input()\n #Input validation for input matching a list item\n while userInput not in x:\n print(\"Invalid selection, please retry\")\n print(x)\n userInput = input()\n return userInput\n\nnouns = ['dog','leader','house','car','phone','desk']\nverbs = ['catches','runs','celebrates','forgets','wins','tests']\npreps = ['ahead','behind','above','below','under','over']\nadjectives = ['joyful','underwhelming','breezy','hot','fast','slow']\ncolors = ['gold','purple','green','red','silver','yellow']\n\nprint(\"Hello! Please enter the following: \")\n#getting input for each of the terms to be used in the sentences\nnoun = selectWord(nouns)\nnoun2 = selectWord(nouns)\nverb = selectWord(verbs)\nadjective = selectWord(adjectives)\nadjective2 = selectWord(adjectives)\nprep = selectWord(preps)\ncolor = selectWord(colors)\n\n#taking input and putting them into sentences\nprint(\"The {} {} {} {} {} the {} {}!\".format(adjective,color,noun,verb,prep,adjective2,noun2))\nprint(\"A {} is a {} {} {}!\".format(noun,adjective2,color,noun2))\n"
},
{
"alpha_fraction": 0.6455696225166321,
"alphanum_fraction": 0.6645569801330566,
"avg_line_length": 17.52941131591797,
"blob_id": "1d1cfb1cb76910dbc85c0cfc6671c41a2ddf5806",
"content_id": "f72ce4759aaa8b6ed3df9729f6b7e3aa3297170c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 316,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 17,
"path": "/LABS/Iteration/generator.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# Generators\n\ndef myGenerator():\n print('First item')\n yield 10\n # These cannot contain 'return' unless you wan to stop the generatator at a specific point \n\n print('Second item')\n yield 20\n\n print('Third item')\n yield 30\n\ngen = myGenerator()\nprint(next(gen))\nprint(next(gen))\nprint(next(gen))\n\n"
},
{
"alpha_fraction": 0.6834319233894348,
"alphanum_fraction": 0.7011834383010864,
"avg_line_length": 29.636363983154297,
"blob_id": "4f390d0eed7af74bc9ea62e24d6c3d8cb845873d",
"content_id": "1249ef8f4a4cbd762731de4f83c08244842be63d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 338,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 11,
"path": "/LABS/Labs-3-1/lab3-1-5.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Declaring variable for speed calculation\nspeed = 0\ntime = [5, 8, 10]\ndistance = 0\n\n#Getting user input for speed\nspeed = input('How fast in MPH are you traveling?\\n')\n\n#for loop to go through the time intervals\nfor i in time:\n print('If traveling at {} for {} hours, you will have traveled {} miles.\\n'.format(speed,i,int(speed)*i))\n\n"
},
{
"alpha_fraction": 0.5471428632736206,
"alphanum_fraction": 0.550000011920929,
"avg_line_length": 24.035715103149414,
"blob_id": "428c6df790afe14a6ee255820053af026ea16673",
"content_id": "097340d1638b1f86c011fd329655f1124f87f331",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 700,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 28,
"path": "/LABS/FILE-IO/file-io-sumNum.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "def main():\n f = getFilename()\n if f != '':\n a = f.readlines()\n f.close()\n getTotal(a)\n\ndef getFilename():\n try:\n filename = input('Provide the name of a file:\\n')\n return open(filename, 'r')\n except IOError:\n print('The file {} is not found.'.format(filename))\n return ''\n\ndef getTotal(a):\n total = 0\n for i in a:\n try:\n total += int(i)\n except:\n print('This line does not contain a number.')\n print('The total of the numbers is {}.'.format(total))\n #This statement will satisfy the number average as well\n print('The average of the numbers is {:.2f}.'.format(total/len(a)))\n\n \nmain()"
},
{
"alpha_fraction": 0.6165577173233032,
"alphanum_fraction": 0.6470588445663452,
"avg_line_length": 31.85714340209961,
"blob_id": "89dafbcc8a498a1db94735ba515464ac4908efbd",
"content_id": "058c79f126b5d4736048a6667324178996faf284",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 459,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 14,
"path": "/LABS/recursion/rec-sumTerm.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\" 6. Sum of Numbers\n Design a function that accepts an integer argument and returns the sum of all the integers from 1 up \n to the number passed as an argument. For example, if 50 is passed as an argument, the function will \n return the sum of 1, 2, 3, 4, . . . 50. Use recursion to calculate the sum. \"\"\"\n\ndef main():\n print(getSumTerm(5))\n\ndef getSumTerm(x):\n if x == -1:\n return 0\n else:\n return getSumTerm(x-1) + x\nmain()"
},
{
"alpha_fraction": 0.6515286564826965,
"alphanum_fraction": 0.674977719783783,
"avg_line_length": 23.413043975830078,
"blob_id": "33209168304a2f25583f64df3098459e9550f3b8",
"content_id": "a7f7119fce649b883ef3da8261342ae5c3a6840f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3369,
"license_type": "no_license",
"max_line_length": 232,
"num_lines": 138,
"path": "/04_functions/04_list_comprehension.md",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "|[Table of Contents](/00-Table-of-Contents.md)|\n|---|\n\n---\n\n## List Comprehension\n\nPython supports something called \"list comprehension\". In short, this allows us to write minimal, easy and readable lists in a way like mathematicans do. Essentially you put everything in a one liner. \n\n\n\n### Steps\n\nTaking a look at the example above, let's break it down into something more understandable.\n\n* First, the _for e in a\\_list_ is examined. \n * **a\\_list** is just some var list that is visable within our scope\n * We are going to iterate through this list...\n* While itterating through the list, we are going to check the **optional predicate**, _if type\\(e\\) == types.IntType_ in this case\n * So per iteration, we check that condition. If it's true, we execute the output expression...\n * As per the name, it's optional. If there is no optional predicate, the output expression simply runs. \n* The **output expression**, e\\*\\*2 in this case, is ran if the optional predicate exists and is True. \n * This happens per iteration of a\\_list as well. \n\n**Normal List**\n\n```python\na_list = [1,2,3,4,5]\n\ndef square_list(a_list):\n a = []\n for item in a_list:\n a.append(item*item)\n return a\n\nprint(square_list(a_list))\n\n# Output\n[1, 4, 9, 16, 25]\n```\n\n**Normal List with Refactoring** \n\n### \\(In this example the a\\_list global variable was overwritten. This can be avoided by reassigning the results to a new variable.\\)\n\n```python\na_list = [1,2,3,4,5]\n\ndef square_list(a_list):\n for i in range(len(a_list)):\n a_list[i] *= a_list[i]\n\nsquare_list(a_list)\nprint(a_list)\n\n# Output\n[1, 4, 9, 16, 25]\n```\n\n**List Comprehension Without Conditional**\n\n```python\na_list = [1,2,3,4,5]\n\ndef square_list(a_list):\n return [x*x for x in a_list]\n\nprint(square_list(a_list))\n\n#Output \n[1, 4, 9, 16, 25]\n```\n\n**List Comprehension With Conditional**\n\n```python\na_list = [1,2,3,4,5]\n\ndef square_list(a_list):\n return [x*x for x in a_list if x % 2 == 0]\n\nprint(square_list(a_list))\n\n# Output\n[4, 16]\n```\n\n**Set Comprehension With Conditional** \n\nInstead of using brackets \\[ \\] as you would for lists you use curly braces \\{ \\} like you would for a set.\n\n```python\na_list = [1,2,3,4,5]\n\ndef square_list(a_list):\n return {x*x for x in a_list if x % 2 == 0}\n\nprint(square_list(a_list))\n\n# Output\nset([16, 4])\n```\nIf you are looking to get rid of duplicates within a list then using Set Comprehension is good for this. \n\n```python\n# This list has duplicates\nb_list = [1,2,2,3,4,4,5]\n\ndef square_list(b_list):\n return [x*x for x in b_list]\n\nsquared_list = square_list(b_list))\nprint(squared_list)\n# We can convert this list to a set by using the set() method\nset(squared_list)\n\n\n# Why do the extra steps when we can just use set comprehension\ndef square_set(b_list)\n return {x*x for x in b_list}\n\nsquared_set = square_set(b_list))\nprint(squared_set)\n```\n**Dictionary Comprehension** \n\nDictionary comprehension is a great way to take one dictionary and transform, or conditionally include items into a new dictionary. Just remember not to make these too complex. Their main purpose is to make your code more readable. \n\n```python\nmy_dict = {'a': 1, 'b': 2, 'c': 3, 'd': 4, }\n\nsquared_dict = {k:v*v for (k,v) in my_dict.items()}\nprint(squared_dict)\n```\n---\n\n|[Next Topic](/04_functions/05_closures_iterators_generators.md)|\n|---|\n"
},
{
"alpha_fraction": 0.45753899216651917,
"alphanum_fraction": 0.5285961627960205,
"avg_line_length": 27.875,
"blob_id": "3dbca8546a4068aee3f086974e2b87bf99795e70",
"content_id": "17f0ef11785be5e7af643c9b5caf23b45e37ef9a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1154,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 40,
"path": "/LABS/DICTIONARIES-SETS/dict-set-classSched.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "classRooms = { 'CS101':'3004',\n 'CS102':'4501',\n 'CS103':'6755',\n 'NT110':'1244',\n 'CM241':'1411' }\n\nclassInst = { 'CS101':'Haynes',\n 'CS102':'Alvarado',\n 'CS103':'Rich',\n 'NT110':'Burke',\n 'CM241':'Lee' }\n\nclassTimes = { 'CS101':'8:00 am',\n 'CS102':'9:00 am',\n 'CS103':'10:00 am',\n 'NT110':'11:00 am',\n 'CM241':'1:00 pm' }\n\ndef main():\n course = getClassNumber(classTimes)\n classInfo(course,classRooms,classInst,classTimes)\n\ndef getClassNumber(times):\n displayClass(times)\n course = ''\n while course not in times:\n course = input('Please select a class number from the above catalog:\\n')\n return course \n\ndef displayClass(dictionary):\n for key in dictionary:\n print(key)\n\ndef classInfo(course,room,inst,time):\n print('Information for {}: '.format(course))\n print('Room:\\t\\t {}'.format(classRooms[course]))\n print('Instructor:\\t {}'.format(classInst[course]))\n print('Time:\\t\\t {}'.format(classTimes[course]))\n \nmain()"
},
{
"alpha_fraction": 0.5314960479736328,
"alphanum_fraction": 0.5324802994728088,
"avg_line_length": 23.80487823486328,
"blob_id": "ca09f574f9bac261f6f86935f455573d859772b9",
"content_id": "0d7162027374b00d00458f31c0692fbe6bbe85b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1016,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 41,
"path": "/LABS/Classes/pet.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "class Pet:\n def __init__(self):\n self.__name = ''\n self.__animal_type = ''\n self.__age = 0\n \n def set_name(self): \n self.__name = input('What is the name of your pet?\\n')\n \n def set_animal_type(self):\n self.__animal_type = input('What type of animal is your pet?\\n')\n \n def set_age(self):\n self.__age = input('What is the age of your pet? (In human years)\\n')\n \n def get_name(self):\n #print(f'Your pet\\'s name is {self.__name}')\n return self.__name\n\n def get_type(self):\n #print(f'Your pet\\'s is of the {self.__animal_type} species')\n return self.__animal_type\n\n def get_age(self):\n #print(f'Your pet\\'s is {str(self.__age)} years old.')\n return self.__age\n\n\n\n\"\"\" def main ():\n my_pet = Pet()\n my_pet.set_name()\n my_pet.set_animal_type()\n my_pet.set_age()\n\n print('Your Pet\\'s Bio:')\n print(my_pet.get_name())\n print(my_pet.get_type())\n print(my_pet.get_age())\n\nmain() \"\"\""
},
{
"alpha_fraction": 0.6304896473884583,
"alphanum_fraction": 0.6314992308616638,
"avg_line_length": 30.967741012573242,
"blob_id": "6963e9b7069b8bcd986331c1fe8a244eca289398",
"content_id": "5e94f28e69c406ab3ff65dc380a51f6f6325490d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1981,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 62,
"path": "/LABS/DICTIONARIES-SETS/dict-set-wordcount.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#This program is to grab all the unique words in a file.\ndef main():\n \n #this is the dictionary to store a word count for each unique word\n wordcount = {}\n #Stores the lines of the file specified by the user\n source = readFile()\n #This calls the function to extract all the words from a file\n words = getWords(source)\n \n #This stores the return of the function which casts the words list as a set, making all words unique.\n unique = getUnique(words)\n \n countWords(wordcount,words,unique)\n print('Here is the count for each word in the file:')\n for i in wordcount:\n print('{}: {}'.format(i, wordcount[i]))\n\n#this simple takes an array and casts/returns it as a set\ndef getWords(original):\n #Iterate through each line\n newlist = []\n for i in original:\n #Split the lines by spaces (a typical delimeter in English between words)\n line = i.split(' ')\n #Add the words in the line to the list.\n newlist += line\n #Clean up each word in the list, getting rid of . \\n \"\" and ?\n cleanlist = []\n for i in newlist:\n i = i.replace('\\n','').replace('.','').replace('!','').replace('?','').replace('\"','')\n #ensures than all words are lower case to ensure set is properly unique\n i = i.lower()\n cleanlist.append(i)\n return cleanlist\n\n#Casts any list to a set and returns result to main\ndef getUnique(array):\n uniqueItems = set(array)\n return uniqueItems\n\ndef readFile():\n #Getting filename from input for filename\n filename = input('Provide the new file name:\\n')\n #Reads the file of filename \n f = open(filename, 'r')\n #Recording file contents in array\n contents = f.readlines()\n f.close()\n return contents\n\ndef countWords(wordCount,words,unique):\n for uni in unique:\n count = 0\n for word in words:\n if uni.lower() == word.lower():\n count += 1\n wordCount.update({uni: int(count)})\n\n\n\nmain()"
},
{
"alpha_fraction": 0.5512030124664307,
"alphanum_fraction": 0.5601511001586914,
"avg_line_length": 30.816455841064453,
"blob_id": "ecc7e5e6b0acd3d3b5cae4b08e9c956c70f865d7",
"content_id": "00312ed06da075598a0f84c87d4b254e0293abd7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5029,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 158,
"path": "/LinkedLists/double_node.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# Doubly Linked Lists - Very similar to Single linked lists these have \n# prev pointer and a tail node\n# Move left, to previous node, from a given node \n# Move immediately to the last node\n\nclass DoubleNode:\n def __init__(self, data, next = None, prev = None):\n # Instantiates a node with a default next of None\n self.data = data\n self.next = next\n self.prev = prev\n\nclass DLList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n # printing our linked list\n def printLinked(self):\n probe = self.head\n while probe != None:\n print(probe.data)\n probe = probe.next\n\n # printing our linked list reversed starting from tail\n def printReverse(self):\n probe = self.tail\n while probe != None:\n print(probe.data)\n probe = probe.prev\n\n # To add something to our linked list\n def append(self, data):\n # Instantiate a new node\n newNode = DoubleNode(data)\n # Is there something in our linked list yet\n if self.head is None:\n newNode.prev = None\n self.head = newNode\n self.tail = self.head\n # There are node(s) in our linked list\n else:\n self.tail.next = newNode\n newNode.prev = self.tail\n self.tail = self.tail.next\n \n \n # Add to beginning (prepend)\n def prepend(self, data):\n #instantiate a new node\n newNode = DoubleNode(data)\n # Anything in our linked list\n newNode.next = self.head\n self.head = newNode\n\n def delete(self, index):\n if index <= 0 or self.head.next is None:\n removedItem = self.head.data\n self.head = self.head.next\n else:\n probe = self.head\n while index > 1 and probe.next != None:\n probe = probe.next\n index -= 1\n removedItem = probe.next.data\n probe.next = probe.next.next\n return removedItem\n\n def insert(self, index, data):\n # If empty or index is less than 1\n if self.head is None or index <= 0:\n self.head = DoubleNode(data, self.head)\n else:\n probe = self.head\n while index > 1 and probe.next != None:\n probe = probe.next\n index -= 1\n # Insert new node after the node at position index -1 or last position \n # Setting the new node as the Next for current position\n # New node points to current position as prev and current next as next \n probe.next = DoubleNode(data, probe.next, probe)\n # Advance to new node\n probe = probe.next\n # Reset the previous of next node to point to probe (inserted node)\n probe.next.prev = probe\n \n # Swapping the data at specified indicies with first node being 0\n # Too High of an index results in the last element being involved\n # in the swap. \n def swapNode(self, index1, index2):\n # Setting and moving a probe for the 1st item\n probe1 = self.head\n while index1 > 0 and probe1.next != None:\n probe1 = probe1.next\n index1 -= 1\n # storing data for probe1 in tempData\n tempData = probe1.data\n # Setting and moving the probe for the second item\n probe2 = self.head\n while index2 > 0 and probe2.next != None:\n probe2 = probe2.next\n index2 -= 1\n # swapping the data elements of each node\n probe1.data = probe2.data\n probe2.data = tempData\n\n def reverse(self):\n probe = self.tail\n while probe != None:\n temp = probe.next\n probe.next = probe.prev\n probe.prev = temp\n probe = probe.next\n temp = self.tail\n self.tail = self.head\n self.head = temp\n # temp = head.data\n # head.data = tail.data\n # tail.data = temp\n # head = head.next\n # tail = tail.prev\n # if head.prev != None and head.prev == tail.next or head == tail:\n # break\n\n def getIndex(self, index):\n # If empty or index is less than 1\n probe = self.head\n while index > 1 and probe.next != None:\n probe = probe.next\n index -= 1\n # Insert new node after the node at position index -1 or last position \n print(probe.data)\n\n def __len__(self):\n probe = self.head\n count = 1\n while probe.next != None:\n probe = probe.next\n count += 1\n return count\n\ndublink = DLList()\ndublink.append(\"A\")\ndublink.append(\"B\")\ndublink.append(\"C\")\ndublink.append(\"D\")\ndublink.append(\"E\")\ndublink.append(\"F\")\n#dublink.prepend([9,8,7,6])\n#dublink.insert(2, \"WhoDey\")\ndublink.printLinked()\n#print(len(dublink))\nprint()\n#dublink.printReverse()\n#dublink.delete(2)\nprint()\ndublink.reverse()\ndublink.printLinked()\n\n\n"
},
{
"alpha_fraction": 0.5560859441757202,
"alphanum_fraction": 0.6181384325027466,
"avg_line_length": 25.1875,
"blob_id": "901bdd4b87c403e3738a4164401901f8670ce184",
"content_id": "b0ce0ab5f224ad18228b8b7ef7bb17777a8dbece",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 419,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 16,
"path": "/LABS/Iteration/lambda.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# Is a temporary function that you use. Used \n# once and cannot be called again. One-liner\n# anonymous functions.\nsquare = lambda x : x*x \nprint(square(5))\n\nsumnum = lambda x, y, z : x + y + z\nprint(sumnum(5, 10, 15))\n\nfrom math import sqrt\nquadPos = lambda a, b, c : (((-1*b)+sqrt((b**2)-(4*a*c)))/2*a)\n\nprint(quadPos(21,44,13))\n\nquadNeg = lambda a, b, c : (((-1*b)-sqrt((b**2)-(4*a*c)))/2*a)\nprint(quadNeg(21,44,13))\n"
},
{
"alpha_fraction": 0.7198581695556641,
"alphanum_fraction": 0.73758864402771,
"avg_line_length": 24.636363983154297,
"blob_id": "809c2536d21575405f69f6993f9f2c26ab9dbe27",
"content_id": "58bb9114223aadaafae8b19e47349dda30ddd621",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 282,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 11,
"path": "/LABS/Labs-3-1/lab3-1-9.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#defining constant coefficient\nCOEFF = float(9/5)\n\n#getting user input for Celcius temp\nc = float(input('Provide the Celcius temperature to convert to Fahrenheit:\\n'))\n\n#Calculating Fahrenheit\nf = COEFF*c + 32\n\n#printing result\nprint('{} Celcius is {:.2f} Fahrenheit.'.format(c,f))\n"
},
{
"alpha_fraction": 0.6098003387451172,
"alphanum_fraction": 0.6352087259292603,
"avg_line_length": 26.549999237060547,
"blob_id": "837b697934570e23eaeb6bdbf965df81518c8d70",
"content_id": "b87e76d149bba3b51525ff0d7d6e4c636010dd7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 551,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 20,
"path": "/LABS/recursion/rec-mult.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\n2. Recursive Multiplication\n Design a recursive function that accepts two arguments into the parameters x and y. The\n function should return the value of x times y. Remember, multiplication can be performed\n as repeated addition as follows:\n 7 X 4 = 4 + 4 + 4 + 4 + 4 + 4 + 4\n (To keep the function simple, assume that x and y will always hold positive nonzero\n integers.\n\"\"\"\n\ndef main():\n print(alt_mult(7,4))\n\ndef alt_mult(x, y):\n if x == 1:\n return y\n else:\n return y + alt_mult(x-1, y)\n \nmain()\n"
},
{
"alpha_fraction": 0.38353413343429565,
"alphanum_fraction": 0.40361446142196655,
"avg_line_length": 25.210525512695312,
"blob_id": "468907ae63d4634b76d8f7df514a623a3889b32e",
"content_id": "85aaa2e131dc2c6ab067b9877c640bd584afe2c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 498,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 19,
"path": "/Practice_Test/convertAndShift.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "def convertAndShift(bStr):\n if len(bStr) != 16:\n return f\"INVALID_LENGTH\"\n else:\n if (string.isdigit() == False):\n return f\"INVALID_VALUE\"\n try:\n num = int(bStr,2)\n if (num%2 == 0):\n num = num << 2\n else:\n num = num >> 1\n if (num > 200):\n num = ~num\n else:\n pass\n except ValueError:\n return f\"INVALID_VALUE\"\n return num\n"
},
{
"alpha_fraction": 0.5375253558158875,
"alphanum_fraction": 0.5496957302093506,
"avg_line_length": 18.68000030517578,
"blob_id": "9c96e4c3087bd084d12c2d91748613410e556767",
"content_id": "5c07ca69c0846cce29d373f69f8b95eefc9b55e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 493,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 25,
"path": "/LABS/FILE-IO/file-io-golf.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "def main():\n filename = input('Provide the new file name:\\n')\n f = open(filename, 'w')\n name = getName()\n \n\ndef getName():\n name = input('Provide the player\\'s name:\\n')\n return name\n\ndef getScore(player):\n score = -55\n while score <= -50\n try:\n score = int(input('Provide score for {}'.format(player)))\n except:\n print('Incorrect input.')\n score =-55\n\ndef writeScore(file,name):\n score = getScore(name)\n \n\n\nmain()\n\n"
},
{
"alpha_fraction": 0.5614618062973022,
"alphanum_fraction": 0.5681062936782837,
"avg_line_length": 20.571428298950195,
"blob_id": "27a6b85487b9404b4ca927994ace3d67034e6f64",
"content_id": "0facb6be82f1852cf016da94d32a37550612aa76",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 301,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 14,
"path": "/LABS/Projects/readSales.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "def main():\n filename = 'C:\\\\Users\\\\student\\\\Documents\\\\sales2.txt'\n sales_file = open(filename, 'r')\n\n line = sales_file.readline()\n\n while line != '':\n amount = float(line)\n print('{:.2f}'.format(amount))\n line = sales_file.readline()\n\n sales_file.close()\n\nmain()"
},
{
"alpha_fraction": 0.6536681056022644,
"alphanum_fraction": 0.6538461446762085,
"avg_line_length": 34.74522399902344,
"blob_id": "3888d78296a2ddbc02eb9eceebb9ee68652b9c3c",
"content_id": "fc67297735e8d99054ff71c70b7c34096eddaef7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5622,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 157,
"path": "/LABS/Pickle/pickle-email.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\n\n8. Name and Email Addresses\nWrite a program that keeps names and email addresses in a dictionary as key-value pairs.\nThe program should display a menu that lets the user look up a person’s email address, add\na new name and email address, change an existing email address, and delete an existing\nname and email address. The program should pickle the dictionary and save it to a file\nwhen the user exits the program. Each time the program starts, it should retrieve the dictionary from the file and unpickle it.\n\n\"\"\"\nimport pickle \nfrom os import path\n\n#define main\ndef main():\n #output_file = open('email.dat', 'wb')\n \n #List of valid menu options\n menuOptions = ['l','m','a','d','x']\n #setting filename variable for check\n filename = 'email.dat'\n #Checking for existence of file; if it's missing it will create prompting for first entry\n if path.exists(filename) == False:\n freshStart(filename)\n \n #Init selection for while loop\n selection = 'b'\n while selection.lower() != 'x':\n selection = menu(menuOptions)\n #lowers the input of to match against selections\n if selection.lower() == 'l':\n lookupEntry(filename)\n elif selection.lower() == 'm':\n modEntry(filename)\n elif selection.lower() == 'a':\n addEntry(filename)\n elif selection.lower() == 'd':\n removeEntry(filename)\n elif selection.lower() == 'x':\n #Print exit message\n print('Good bye!')\n else:\n #Print error message if input doesn't match option\n print('Incorrect Selection')\n\n#This function is meant to address the first use of the program where the email file doesn't exist\ndef freshStart(filename):\n #Opens filename in binary write mode\n email_file = open(filename, 'wb+')\n #Create empty dictionary\n email_dict = {}\n #Let's the user know why they are being prompted for initial entries\n print('The file {} is not detected. Starting fresh; please provide the first entry: '.format(filename))\n #Prompts for initial entry\n email_dict.update({(input('Name: ')).lower(): (input('Email: ')).lower()})\n #writes dictionary to binary file\n pickle.dump(email_dict, email_file)\n #close file\n email_file.close()\n\ndef menu(options):\n #Printing program menu\n print('PROGRAM MENU')\n print('E-mail Lookup (press L)')\n print('Add an entry (press A)')\n print('Modify an entry (press M)')\n print('Delete an entry (press D)')\n print('EXIT (press X)') \n print('\\n\\n') \n # Getting user input for menu option\n selection = input('What would you like to do?') \n # Input validation for menu selection\n while selection.lower() not in options:\n selection = input('Invalid selection. What would you like to do?\\n')\n print('\\n')\n return selection\n \n\n#Function to add entry to existing binary data file\ndef addEntry(filename):\n # Calling the readBinary function to read in the file as a dictionary\n email_dict = readBinary(filename)\n #Prompts for entry\n email_dict.update({(input('Name: ')).lower(): (input('Email: ')).lower()})\n #Opens the file on disk for writing\n email_file = open(filename, 'wb')\n #Dump data to file\n pickle.dump(email_dict, email_file)\n #close file\n email_file.close()\n\ndef modEntry(filename):\n # Calling the readBinary function to read in the file as a dictionary\n email_dict = readBinary(filename)\n # Print keys as options\n print('Names\\n-----------')\n for i in email_dict:\n print(i)\n # Gets user input for entry they wish to change\n query = input('Provide the name from above to change:\\n')\n # Prompts for email entry to modify\n email_dict.update({query.lower(): (input('Email: ')).lower()})\n # Opens the file on disk for writing\n email_file = open(filename, 'wb')\n # Dump data to file\n pickle.dump(email_dict, email_file)\n # close file\n email_file.close()\n\ndef lookupEntry(filename):\n # Calling the readBinary function to read in the file as a dictionary\n email_dict = readBinary(filename)\n # Gets user input for entry they wish to lookup\n query = input('Provide the name to lookup:\\n')\n # Prints the email for the query or lets them know it's not found\n print(email_dict.get(query.lower(), 'Name not found'))\n print()\n\ndef removeEntry(filename):\n # Calling the readBinary function to read in the file as a dictionary\n email_dict = readBinary(filename)\n # Print keys as options for removal\n print('Names\\n-----------')\n for i in email_dict:\n print(i)\n # Gets user input for entry they wish to lookup\n query = input('Provide the name to remove:\\n')\n # Deletes entry from dictionary\n del email_dict[query]\n # Verifies to the user that the entry was removed\n print(email_dict.get(query.lower(), 'Information successfully removed.\\n'))\n print()\n # Opens the file on disk for writing\n email_file = open(filename, 'wb')\n # Dump data to file\n pickle.dump(email_dict, email_file)\n # close file\n email_file.close()\n\ndef readBinary(filename):\n # Opening the file in read mode\n email_file = open(filename, 'rb')\n # Setting EOF to false\n end_of_file = False\n #Setting while loop to get each object in binary file\n while not end_of_file:\n try:\n #unpickle next object\n dictionary = pickle.load(email_file)\n return dictionary\n except EOFError:\n #Set flag to indicate EOF reached\n end_of_file = True\n email_file.close()\n\n\nmain()\n "
},
{
"alpha_fraction": 0.5479195713996887,
"alphanum_fraction": 0.5582047700881958,
"avg_line_length": 31.923076629638672,
"blob_id": "bd196aa083f35ff6f5cc058715b7285365a1c861",
"content_id": "e2084f825cba57e54c882e6fae0b63061c878d8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2139,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 65,
"path": "/LABS/Socket/rps-svr.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "from random import randint\nfrom socket import socket as Socket\nfrom socket import AF_INET, SOCK_STREAM\n\ndef playGame(PLAYER, BUFFER):\n choices = ['Rock', 'Paper', 'Scissors']\n\n while True:\n CHOICE = randint(0, 2)\n print(f'Computer has chosen: {CHOICE}')\n print('Computer waits for user\\'s choice...')\n GUESS = PLAYER.recv(BUFFER).decode()\n print(f'User\\'s choice: {choices[int(GUESS)]}')\n user_wins = 'User\\'s ' + choices[int(GUESS)] + ' beats Computer\\'s ' + choices[int(CHOICE)]\n comp_wins = 'Computer\\'s ' + choices[int(CHOICE)] + ' beats User\\'s ' + choices[int(GUESS)]\n # 'User wins' scenarios\n if int(GUESS) == 0 and int(CHOICE) == 2:\n REPLY = user_wins \n PLAYER.send(REPLY.encode())\n break\n elif int(GUESS) == 1 and int(CHOICE) == 0:\n REPLY = user_wins\n PLAYER.send(REPLY.encode())\n break\n elif int(GUESS) == 2 and int(CHOICE) == 1:\n REPLY = user_wins\n PLAYER.send(REPLY.encode())\n break\n # 'Computer wins' scenarios\n elif int(CHOICE) == 0 and int(GUESS) == 2:\n REPLY = comp_wins\n PLAYER.send(REPLY.encode())\n break\n elif int(CHOICE) == 1 and int(GUESS) == 0:\n REPLY = comp_wins\n PLAYER.send(REPLY.encode())\n break\n elif int(CHOICE) == 2 and int(GUESS) == 1:\n REPLY = comp_wins\n PLAYER.send(REPLY.encode())\n break\n else:\n REPLY = 'TIE'\n PLAYER.send(REPLY.encode())\n\ndef main():\n HOSTNAME = '' # blank so any address can be used\n PORTNUMBER = 11267 # number for the port\n BUFFER = 80 # size of the buffer\n\n DEALER_ADDRESS = (HOSTNAME, PORTNUMBER)\n DEALER = Socket(AF_INET, SOCK_STREAM)\n DEALER.bind(DEALER_ADDRESS)\n DEALER.listen(1)\n\n print('dealer waits for player to connect')\n PLAYER, PLAYER_ADDRESS = DEALER.accept()\n print('dealer accepted connection request from ',\\\n PLAYER_ADDRESS)\n\n playGame(PLAYER, BUFFER)\n\n DEALER.close()\n\nmain()"
},
{
"alpha_fraction": 0.5303030014038086,
"alphanum_fraction": 0.5303030014038086,
"avg_line_length": 19.3125,
"blob_id": "def28a5eed58e3b53dc22c76b6224025e4e1cf9c",
"content_id": "f8fe39535f984c08036ab01d05e3360732438bc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 330,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 16,
"path": "/LABS/TKinter/autoservice.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "class AutoService:\n def __init__(self, desc, cost):\n self.__desc = desc\n self.__cost = cost\n\n def setDesc(self,desc):\n self.__desc = desc\n\n def setCost(self,cost):\n self.__cost = cost \n\n def getDesc(self):\n return self.__desc \n\n def getCost(self):\n return self.__cost "
},
{
"alpha_fraction": 0.6455309391021729,
"alphanum_fraction": 0.6524063944816589,
"avg_line_length": 29.465116500854492,
"blob_id": "808811951c539c67504735063f407442135aa7d9",
"content_id": "4524ee13e0e6b3dfafdaa76c3e8492632deb2f01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1309,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 43,
"path": "/LABS/Threading/multithreading.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\nRunning things concurrently is known as multithreading\nRunning things in parallel is known as multiprocessing\n\nI/O bound tasks - Waiting for input and output to be completed,\n reading and writing from file system,\n network operations\n These all benefit more from threading\n You get the illusion of running code at the same time,\n however other code starts running while other code is waiting\n\ncpu bound tasks - Good for number crunching\n using CPU\n data crunching\n These benefit more from multiprocessing and running in parallel\n Using multiprocessing might be slower if you have overhead from creating and\n destroying files\n\n\"\"\"\nimport threading\nimport time\nstart = time.perf_counter()\n\ndef do_something():\n print('Sleeping 1 second...')\n time.sleep(1)\n print('Done Sleeping...')\n\n# create 2 threads\nt1 = threading.Thread(target=do_something)\nt2 = threading.Thread(target=do_something)\n\n# start the thread\nt1.start()\nt2.start()\n\n# make sure the threads complete before moving on to calculate finish time\nt1.join()\nt2.join()\n\nfinish = time.perf_counter()\n\nprint(f'Finished in {finish-start} second(s)')"
},
{
"alpha_fraction": 0.6874838471412659,
"alphanum_fraction": 0.7039999961853027,
"avg_line_length": 47.45000076293945,
"blob_id": "4311eff968623fca0893977b1fe8d1fbbcdd9cd2",
"content_id": "02fcfcb15b71cb1281c187716ff682d6422eeadf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3933,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 80,
"path": "/LABS/TKinter/tkinter_intro.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# GUI - Allows user to interact with OS using grapics\n# In python we can use tkinter module to create simple GUIs\n# There are other modules available but this ships with python\n# There are WIDGETS\n# ***********************\n# Button - button that can cause an action when clicked\n# Canvas - A rectangular area that can be used to display graphics\n# Check button = a button that toggles 'on'/'off'\n# Entry - An area that takes a single line of input from user\n# Frame - A container that can hold other widgets\n# Label - An area that displays one line of text or an image\n# Listbox - A list from which the user may select an item\n# Menu - A list of choices that are displayed when a user clicks \n# the menu button widget\n# Menubutton - A menu that is displayed on the screen and may be \n# clicked bu the user\n# Radiobutton - A widget that can be either selected or deselected\n# Scale - Widget that allows the use to select a value by moving\n# a slider \n# Scrollbar - Can be used w/ some other types of widgets to provide\n# scrolling capability\n# Text - A widget that allows the user to enter multiple lines of input\n# TopLevel - a container, like a frame, but displayed in its own window \n# effbot.org/tkinterbook/text.html\n\n\"\"\"\n\n1. Name and Address\n Write a GUI program that displays your name and address when a button is clicked. \n When the user clicks the Show Info button, the program should display your name and\n address. Mess with the display to make it look neat.\n\n2. Latin Translator\n Look at the following list of Latin words and their meanings.\n Latin English\n sinister left\n dexter right\n medium center\n Write a GUI program that translates the Latin words to English. The window should have\n three buttons, one for each Latin word. When the user clicks a button, the program displays \n the English translation in a label.\n\n3. Miles Per Gallon Calculator\n Write a GUI program that calculates a car’s gas mileage. The program’s window should\n have Entry widgets that let the user enter the number of gallons of gas the car holds, and\n the number of miles it can be driven on a full tank. When a Calculate MPG button is\n clicked, the program should display the number of miles that the car may be driven per gallon of \n gas. Use the following formula to calculate miles-per-gallon:\n\n MPG = miles/gallons\n\n4. Celsius to Fahrenheit\n Write a GUI program that converts Celsius temperatures to Fahrenheit temperatures. The user\n should be able to enter a Celsius temperature, click a button, and then see the equivalent\n Fahrenheit temperature. Use the following formula to make the conversion:\n\n F = (9/5)C + 32\n\n F is the Fahrenheit temperature and C is the Celsius temperature.\n\n5. Property Tax\n A county collects property taxes on the assessment value of property, which is 60 percent\n of the property’s actual value. If an acre of land is valued at $10,000, its assessment value\n is $6,000. The property tax is then $0.64 for each $100 of the assessment value. The tax\n for the acre assessed at $6,000 will be $38.40. Write a GUI program that displays the\n assessment value and property tax when a user enters the actual value of a property.\n\n6. Joe’s Automotive\n Joe’s Automotive performs the following routine maintenance services:\n • Oil change — $26.00\n • Lube job — $18.00\n • Radiator flush — $30.00\n • Transmission flush — $80.00\n • Inspection — $15.00\n • Muffler replacement — $100.00\n • Tire rotation — $20.00\n Write a GUI program with check buttons that allow the user to select any or all of these\n services. When the user clicks a button the total charges should be displayed.\n\n\"\"\""
},
{
"alpha_fraction": 0.5862300992012024,
"alphanum_fraction": 0.6142632961273193,
"avg_line_length": 28.733333587646484,
"blob_id": "6eaee508953a5302dc47c14671b2e50fe68b733f",
"content_id": "d810b6b5851c53174a3d486954b2ccf63c756d77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4525,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 150,
"path": "/LABS/regex/regex-practice.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "string = \"\"\"\n1. Recognize the following strings: “bat,” “bit,” “but,” “hat,”\n“hit,” or “hut.”\n\n2. Match any pair of words separated by a single space, that is,\nfirst and last names.\n\n3. Match any word and single letter separated by a comma and\nsingle space, as in last name, first initial.\n\n4. Match the set of all valid Python identifiers.\n\n5. Match a street address according to your local format (keep\nyour regex general enough to match any number of street\nwords, including the type designation). For example, American\nstreet addresses use the format: 1180 Bordeaux Drive. Make\nyour regex flexible enough to support multi-word street\nnames such as: 3120 De la Cruz Boulevard.\n\n6. Match simple Web domain names that begin with “www.”\nand end with a “.com” suffix; for example, www.yahoo.com.\nExtra Credit: If your regex also supports other high-level\ndomain names, such as .edu, .net, etc. (for example,\nwww.foothill.edu).\n\n7. Match the set of all valid e-mail addresses (start with a loose\nregex, and then try to tighten it as much as you can, yet\nmaintain correct functionality). Try to break what we did in class \nand improve it.\n\n8. Match the set of all valid Web site addresses (URLs) (start\nwith a loose regex, and then try to tighten it as much as you\ncan, yet maintain correct functionality). Try to break what we did in \nclass and improve it.\n\n9. type(). The type() built-in function returns a type object,\nwhich is displayed as the following Pythonic-looking string:\n\n # >>> type(0)\n # <type 'int'>\n # >>> type(.34)\n # <type 'float'>\n # >>> type(dir)\n<type 'builtin_function_or_method'>\n\nCreate a regex that would extract the actual type name from\nthe string. Your function should take a string like this <type\n'int'> and return int. (Ditto for all other types, such as\n‘float’, ‘builtin_function_or_method’, etc.) Note: You\nare implementing the value that is stored in the __name__\nattribute for classes and some built-in types.\n\n10. Processing Dates. In Section 1.2, we gave you the regex pattern\nthat matched the single or double-digit string representations of\nthe months January to September (0?[1-9]). Create the regex\nthat represents the remaining three months in the standard\ncalendar.\n\n04-21-81\n10-31-2009\n31-31-3131\n\nMeighan, V\n\n\"\"\"\n\n# Ex. 1******************************************\nimport re\n# pattern = re.compile(r'[HB].t', re.IGNORECASE)\n# matches = pattern.findall(string)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# Ex. 2******************************************\n# pattern = re.compile(r'[A-Z]\\w+\\s{1}[A-Z]\\w+')\n# matches = pattern.findall(string)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# Ex. 3******************************************\n# pattern = re.compile(r'[A-Z]\\w+,\\s{1}[A-Z]\\.?\\s')\n# matches = pattern.findall(string)\n\n# print(type(matches))\n# for match in matches:\n# print(match)\n\n# Ex. 4******************************************\nimport re\nimport keyword as kw\nids = \"\"\"\n!gfg\n123\n_abc12\nabc%\nclass\n\n\"\"\"\n\npattern = re.compile(r'\\b[A-Za-z_^!][A-Za-z0-9_]*')\nmatches = pattern.findall(ids)\n\nfor match in matches:\n if kw.iskeyword(match) == False:\n print(f'{match}: Valid Identifier')\n\n# Ex. 5 ******************************************\n# pattern = re.compile(r'\\d+\\s[A-Za-z]*\\s{1}[A-Za-z]*\\s?\\w*\\s?\\w*')\n# matches = pattern.findall(string)\n\n# for match in matches:\n# print(f'{match}')\n\n# Ex. 6 ******************************************\n# pattern = re.compile(r'(w{3}\\.)?([a-zA-Z0-9]*)(\\.[a-zA-Z]{2,3})')\n# matches = pattern.findall(string)\n\n# for match in matches:\n# print(f'{match}')\n\n# Ex. 9 ******************************************\n# def rawType(object):\n# #import re\n# tempStr = str(type(object))\n# pattern = pattern = re.compile(r'\\'\\w*\\'')\n# matches = pattern.findall(tempStr)\n# return matches\n\n# testInt = 0\n# testString = 'who?'\n# testFloat = 0.01\n# testFunc = print\n# varList = [testInt, testString, testFloat, testFunc]\n\n# for i in varList:\n# #print(type(i))\n# print(rawType(i)[0])\n\n# Ex. 10 ******************************************\n# pattern = re.compile(r'[0-1]?[0-9][-/\\s][0-3]?[0-9][-/\\s][1-2]?[0-9]?[0-9]{2}')\n# pattern = re.compile(r'^([1-9]| 1[0-2])([-./\\s])([1-9]|[1-2][0-9])([-./\\s])([1-2]{2}[0-9]{2})')\npattern = re.compile(r'\\b[0-1]?[0-9][-/\\s][0-3]?[0-9][-/\\s][1-2]?[0-9]?[0-9]{2}') \nmatches = pattern.findall(string)\n\nfor match in matches:\n print(f'{match}')"
},
{
"alpha_fraction": 0.6996699571609497,
"alphanum_fraction": 0.7238723635673523,
"avg_line_length": 22.947368621826172,
"blob_id": "0c337e5a078d70d0075c22aff6d3674b3bb1ac61",
"content_id": "f0bf9b49bf606e960de397872d598f4875f96700",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 909,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 38,
"path": "/LABS/TKinter/tkinter_window.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import tkinter as tk\n\n# Instantiate tkinter object\nwindow = tk.Tk()\n\n# Set the title for your window\nwindow.title('First TKinter App')\n\n# Set size of window\nwindow.geometry(\"400x400\")\n\n# Adding a label\ntitle = tk.Label(text = \"Hello World\")\n\n# grid() tells you where you want the label, (0,0) is default\ntitle.grid()\n\n# pack() this allows the module to force objects into the existing window\n#title.pack()\n\n# Adding a button\nbutton1 = tk.Button(text = \"Click Me\", bg = \"red\")\nbutton1.grid(column=0, row=1)\n\n# Adding entry\n\nentry_field1 = tk.Entry()\nentry_field1.grid(column=0, row=2)\n\n# Add text field\ntext_field = tk.Text(master = window, height = 10, width = 30)\ntext_field.grid(column=0, row=3)\n\n# This opens window and keeps it open until closed\n# Everything you do must be between this and instantiation\n# This is always at the bottom\n# Continueously puts window in a loop to keep open \nwindow.mainloop()"
},
{
"alpha_fraction": 0.58152174949646,
"alphanum_fraction": 0.58152174949646,
"avg_line_length": 22,
"blob_id": "e1c4ccf250548e05916c6a43a43184ab1fab9953",
"content_id": "dc0c859850ee8f949382791bac671ad1ff0eee7c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 8,
"path": "/LABS/FILE-IO/file-io-fileDisplay.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Read data from a file\ndef main():\n filename = 'C:\\\\Users\\\\student\\\\Documents\\\\numbers.txt'\n f = open(filename, 'r')\n for i in f.readlines():\n print(i, end='')\n\nmain()\n"
},
{
"alpha_fraction": 0.6725274920463562,
"alphanum_fraction": 0.6798534989356995,
"avg_line_length": 36.94444274902344,
"blob_id": "e2cddf63d67dccd474579506732dfbf61c2d509c",
"content_id": "c72df65f17c86cbf7db202fb4cfc0b0391d7a7eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1365,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 36,
"path": "/LABS/Labs-4-1/lab4-1-CarMaintenance.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Define Constant for number of months\nNUM_MONTHS=12\n\n#Main function\ndef main():\n category = ['loan payment','insurance','gas','oil','tires','maintenance']\n expenses = []\n getExpenses(category,expenses)\n displayCosts(category,expenses)\n#Iterates through the expense items \ndef getExpenses(category,expenses):\n for i in category:\n #Appends the cost of the category to 'expenses'\n # by passing i as an argument for getCost which then fills in the question\n expenses.append(getCost(i))\n\n#Defining a function to take in a prompt as a question \n# and validate input as positive and numeric\ndef getCost(prompt):\n cost = input('Please enter the montly costs for {:s}:\\n'.format(prompt))\n #Input validation checking numeric and a positive number\n while float(cost) <= 0 and cost.replace('.','').isnumeric() == False:\n cost = input('Invalid input. Please enter the montly costs for {:s}:\\n'.format(prompt))\n return float(cost)\n\n#Define function for displayCosts\ndef displayCosts(category, expenses):\n total = 0.0\n for i in range(len(category)):\n print('Expense for {:s}: ${:.2f}'.format(category[i],expenses[i]))\n total += float(expenses[i])\n print('-'*30)\n print('Monthly total expenses: ${:.2f}'.format(float(total)))\n print('Annual Total: ${:.2f}'.format(float(total)*NUM_MONTHS))\n\nmain()"
},
{
"alpha_fraction": 0.5871559381484985,
"alphanum_fraction": 0.607798159122467,
"avg_line_length": 20.850000381469727,
"blob_id": "75783a73e47e3dffa8dc5371f7832f33b09b3428",
"content_id": "7b9d20a4bf28f14dc19ce7ceb039d4efe183d3c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 436,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 20,
"path": "/Algorithms/timing1.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# This prints the running times for problem sizes that double\n# using a single loop\n# \n\nimport time\n\nproblemSize = 1000\n\nfor count in range(5):\n start = time.time()\n # Start of Algorithm\n work = 1\n for j in range(problemSize):\n for k in range(problemSize):\n work += 1\n work -= 1\n #End of Algorithm\n elapsed = time.time() - start\n print(f'{problemSize} - {elapsed}')\n problemSize *= 2"
},
{
"alpha_fraction": 0.4297385513782501,
"alphanum_fraction": 0.4812091588973999,
"avg_line_length": 20.75,
"blob_id": "a68fcdb1e09775b8b5b77b8d55e5e85a0ef7a4c3",
"content_id": "07618fcaf3e22359f8afc8f501faa363f9fcd1eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1224,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 56,
"path": "/Algorithms/practice1.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# 1. Write a tester program that counts and displays the number of iterations \n# of the following loop: \n\ndef work():\n size = 100000\n while size > 1:\n #start = time.time()\n size = size // 2\n #elapsed = time.time() - start\n #print(f'{size}')\n print('##################') \n\nimport time\nimport timeit\n\nproblemSize = [1000, 2000, 4000, 10000, 100000]\n\n# for size in problemSize:\n# num = 0\n#work()\ntotal_time = timeit.timeit(work, number=1000)\nprint(total_time)\n # while size > 0:\n # start = time.time()\n # size = size // 2\n # num += 1\n # elapsed = time.time() - start\n # print(f'{size} - {elapsed} - {num}')\n # print('##################')\n\nfor i in range(50):\n print(f'{i} - {2**i} - {i**4}')\n\n\n# Comparing time complexity between 2^i and i^4 to determine when \n# i^4 becomes more efficient\nx = False\ni = 2\nwhile x == False:\n print(f'{i} - {2**i} - {i**4}')\n if 2**i > i**4:\n x = True\n print(f'{i} - {2**i} - {i**4}')\n i += 1\n\n\nz = False\ni = 2\nwhile z == False:\n x = i**2\n y = 0.5*(i**2) + (0.5)*i\n print(f'{i} - {x} - {y}')\n if x < y:\n z = True\n print(f'{i} - {x} - {y}')\n i += 1\n\n \n"
},
{
"alpha_fraction": 0.5732899308204651,
"alphanum_fraction": 0.638436496257782,
"avg_line_length": 27,
"blob_id": "024c7e170cd7ac8d85e72f4e5f4e12d31ef235ef",
"content_id": "364f8f3aa0e8c8652da69e4bc36af1d621d1d72a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 307,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 11,
"path": "/LABS/Iteration/zip_func.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# zip() function\n# a good way to take two sequences of data and \n# pair elements together\n\n# set up two lists\nprices = [72.51, 9.27, 153.74, 30.23, 53.00]\nnames = ['CAT', 'GE', 'MSFT', 'AA', 'IBM']\n\n# use for loop and zip() to pair together\nfor name, price in zip(names, prices):\n print(name, '=',price)"
},
{
"alpha_fraction": 0.6112469434738159,
"alphanum_fraction": 0.6246943473815918,
"avg_line_length": 27.241378784179688,
"blob_id": "a4eb1b6d68bc9568f7261304f5bdc685c5e10fab",
"content_id": "ae089f2712f084e03bdf1585969095cee3f4ce37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 818,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 29,
"path": "/LABS/Labs-3-4/lab3-4-5-colorMixer.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Defining CONSTANT array of primary colors\nPRI_COLORS = ['red','blue','yellow']\nMIXES = {'redblue': 'purple',\n 'bluered': 'purple',\n 'blueyellow': 'green',\n 'yellowblue': 'green',\n 'yellowred': 'orange',\n 'redyellow': 'orange'\n }\ndef main():\n color1 = getColor()\n color2 = getColor()\n mixColor(color1,color2)\n\ndef getColor():\n print(PRI_COLORS)\n color = input('Please select a primary color from above list:\\n')\n while color.lower() not in PRI_COLORS:\n color = input('Invalid selection. Please select a primary color from above list:\\n')\n return color\n\ndef mixColor(c1,c2):\n if c1.lower() == c2.lower():\n print('Your resultant color is {}'.format(c1))\n else:\n newColor = c1+c2\n print('Your resultant color is {}'.format(MIXES[newColor]))\n\nmain()"
},
{
"alpha_fraction": 0.5699658989906311,
"alphanum_fraction": 0.6006826162338257,
"avg_line_length": 21.461538314819336,
"blob_id": "1ef0b7f00080b3f3e1654d85c71f31cad0250a7d",
"content_id": "648a2723f8106e740f547a21160bb32885c6fd29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 293,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 13,
"path": "/LABS/Group-Project/helpClassTest.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import helpClass\n\narray = []\n\nfor i in range(2):\n desc = input('Description:\\n')\n ex1 = input('Example 1:\\n')\n ex2 = input('Example 2:\\n')\n test = helpClass.HelpObject(desc, ex1, ex2)\n array.append(test)\n\nfor i in array:\n print(f'{i.getDesc()}\\n{i.getEx1()}\\n{i.getEx2()}')\n\n"
},
{
"alpha_fraction": 0.7032257914543152,
"alphanum_fraction": 0.7133640646934509,
"avg_line_length": 28.351350784301758,
"blob_id": "3517ee966bf5d52656ea62ed10acd9ca7d2dfbf3",
"content_id": "8ef95faee3b4111a45272ca266e517ce8721c769",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1085,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 37,
"path": "/LABS/Threading/multithreading3.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\nUsing multithread module is the older way\nPython 3.2 introduced called thread pull executor\n\nNo longer need threading module\nWe use concurrent.futures module instead\n\"\"\"\nimport concurrent.futures\nimport time\n\nstart = time.perf_counter()\n\ndef do_something(seconds):\n print(f'Sleeping {seconds} second(s)...')\n time.sleep(seconds)\n return 'Done Sleeping...'\n\n# using a context manager\nwith concurrent.futures.ThreadPoolExecutor() as executor:\n # submit schedules a function to be executed one at a time and returns a future object\n # f1 = executor.submit(do_something, 1)\n # f2 = executor.submit(do_something, 1)\n\n # print(f1.result())\n # print(f2.result())\n\n # list comprehension to create multiple threads\n results = [executor.submit(do_something, 1) for _ in range(10)]\n\n # to get the results we can use another function, as_completed() from future object that \n # gives us an iterator\n for f in concurrent.futures.as_completed(results):\n print(f.result())\n\nfinish = time.perf_counter()\n\nprint(f'Finished in {finish-start} second(s)')"
},
{
"alpha_fraction": 0.6004842519760132,
"alphanum_fraction": 0.6464890837669373,
"avg_line_length": 28.571428298950195,
"blob_id": "45a3974eb460c671c22beaae9ac15bf05f5b6869",
"content_id": "bf374d4e9ee77c9fbebde9e5a0b5caaa5c6532f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 413,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 14,
"path": "/LABS/recursion/recSum.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\" 5. Recursive List Sum\n Design a function that accepts a list of numbers as an argument. The function should recursively \n calculate the sum of all the numbers in the list and return that value. \"\"\"\n\ndef main():\n myList = [1000,6,1,9,20,30,29,3]\n print(getSum(myList,len(myList)-1))\n\ndef getSum(array,x):\n if x == -1:\n return 0\n else:\n return getSum(array,x-1) + array[x]\nmain()"
},
{
"alpha_fraction": 0.6911566257476807,
"alphanum_fraction": 0.6997842788696289,
"avg_line_length": 55.20454406738281,
"blob_id": "db35a8bf99283b66eb0e957643be5edcb1b65f4b",
"content_id": "c2ce15d747eb57fb7db5438b11aba4160385a76b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7542,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 132,
"path": "/LABS/Classes/Class-Practice.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\n1. Pet Class\n Write a class named Pet, which should have the following data attributes:\n • __name (for the name of a pet)\n • __animal_type (for the type of animal that a pet is. Example values are ‘Dog’, ‘Cat’,\n and ‘Bird’)\n • __age (for the pet’s age)\n The Pet class should have an __init__ method that creates these attributes. It should also\n have the following methods:\n • set_name\n This method assigns a value to the __name field.\n • set_animal_type\n This method assigns a value to the __animal_type field.\n • set_age\n This method assigns a value to the __age field.\n • get_name\n This method returns the value of the name field.\n • get_type\n This method returns the value of the type field.\n • get_age\n This method returns the value of the age field.\n Once you have written the class, write a program that creates an object of the class and\n prompts the user to enter the name, type, and age of his or her pet. This data should be\n stored as the object’s attributes. Use the object’s accessor methods to retrieve the pet’s name,\n type, and age and display this data on the screen.\n\n2. Car Class\n Write a class named Car that has the following data attributes:\n • __year_model (for the car’s year model)\n • __make (for the make of the car)\n • __speed (for the car’s current speed)\n The Car class should have an __init__ method that accept the car’s year model and make\n as arguments. These values should be assigned to the object’s __year_model and __make\n data attributes. It should also assign 0 to the __speed data attribute.\n The class should also have the following methods:\n • accelerate\n The accelerate method should add 5 to the speed data attribute each time it is\n called.\n • brake\n The brake method should subtract 5 from the speed data attribute each time it is called.\n • get_speed\n The get_speed method should return the current speed.\n Next, design a program that creates a Car object, and then calls the accelerate method\n five times. After each call to the accelerate method, get the current speed of the car and\n display it. Then call the brake method five times. After each call to the brake method, get\n the current speed of the car and display it.\n\n3. Personal Information Class\n Design a class that holds the following personal data: name, address, age, and phone number. Write \n appropriate accessor and mutator methods. Also, write a program that creates\n three instances of the class. One instance should hold your information, and the other two\n should hold your friends’ or family members’ information.\n\n4. Employee Class\n Write a class named Employee that holds the following data about an employee in attributes: \n name, ID number, department, and job title.\n Once you have written the class, write a program that creates three Employee objects to\n hold the following data:\n\nName ID Number Department Job Title\nSusan Meyers 47899 Accounting Vice President\nMark Jones 39119 IT Programmer\nJoy Rogers 81774 Manufacturing Engineer\n\nThe program should store this data in the three objects and then display the data for each\nemployee on the screen.\n\n5. RetailItem Class\nWrite a class named RetailItem that holds data about an item in a retail store. The class\nshould store the following data in attributes: item description, units in inventory, and price.\nOnce you have written the class, write a program that creates three RetailItem objects\nand stores the following data in them:\n\n Description Units in Inventory Price\nItem #1 Jacket 12 59.95\nItem #2 Designer Jeans 40 34.95\nItem #3 Shirt 20 24.95\n\n6. Employee Management System\n This exercise assumes that you have created the Employee class for Programming Exercise 4.\n Create a program that stores Employee objects in a dictionary. Use the employee ID number\n as the key. The program should present a menu that lets the user perform the following actions:\n • Look up an employee in the dictionary\n • Add a new employee to the dictionary\n • Change an existing employee’s name, department, and job title in the dictionary\n • Delete an employee from the dictionary\n • Quit the program\n When the program ends, it should pickle the dictionary and save it to a file. Each time the\n program starts, it should try to load the pickled dictionary from the file. If the file does not\n exist, the program should start with an empty dictionary.\n\n7. Cash Register\n This exercise assumes that you have created the RetailItem class for Programming\n Exercise 5. Create a CashRegister class that can be used with the RetailItem class. The\n CashRegister class should be able to internally keep a list of RetailItem objects. The\n class should have the following methods:\n • A method named purchase_item that accepts a RetailItem object as an argument.\n Each time the purchase_item method is called, the RetailItem object that is passed as\n an argument should be added to the list.\n • A method named get_total that returns the total price of all the RetailItem objects\n stored in the CashRegister object’s internal list.\n • A method named show_items that displays data about the RetailItem objects stored\n in the CashRegister object’s internal list.\n • A method named clear that should clear the CashRegister object’s internal list.\n Demonstrate the CashRegister class in a program that allows the user to select several\n items for purchase. When the user is ready to check out, the program should display a list\n of all the items he or she has selected for purchase, as well as the total price.\n\n8. Trivia Game\n In this programming exercise you will create a simple trivia game for two players. The program will \n work like this:\n • Starting with player 1, each player gets a turn at answering 5 trivia questions. (There\n should be a total of 10 questions.) When a question is displayed, 4 possible answers are\n also displayed. Only one of the answers is correct, and if the player selects the correct\n answer, he or she earns a point.\n • After answers have been selected for all the questions, the program displays the number\n of points earned by each player and declares the player with the highest number of points\n the winner.\n To create this program, write a Question class to hold the data for a trivia question. The\n Question class should have attributes for the following data:\n • A trivia question\n • Possible answer 1\n • Possible answer 2\n • Possible answer 3\n • Possible answer 4\n • The number of the correct answer (1, 2, 3, or 4)\n The Question class also should have an appropriate __init__ method, accessors, and\n mutators.\n The program should have a list or a dictionary containing 10 Question objects, one for\n each trivia question. Make up your own trivia questions on the subject or subjects of your\n choice for the objects.\n\"\"\""
},
{
"alpha_fraction": 0.6010209321975708,
"alphanum_fraction": 0.6380978226661682,
"avg_line_length": 30,
"blob_id": "f4a802b1bf05a840e0e862707a3c87366ae216fa",
"content_id": "11d0d25e06169591dc53ff2e1ccab022314601b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3756,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 120,
"path": "/LABS/TKinter/Automotive.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\" 6. Joe’s Automotive\n Joe’s Automotive performs the following routine maintenance services:\n • Oil change — $26.00\n • Lube job — $18.00\n • Radiator flush — $30.00\n • Transmission flush — $80.00\n • Inspection — $15.00\n • Muffler replacement — $100.00\n • Tire rotation — $20.00\n Write a GUI program with check buttons that allow the user to select any or all of these\n services. When the user clicks a button the total charges should be displayed.\n\"\"\"\nimport tkinter as tk\nfrom autoservice import AutoService\nfrom tkinter import ttk \n\n# Calc total from checkboxes in 'selected' state\ndef calcTotal(services, svc0, svc1, svc2, svc3, svc4, svc5, svc6 ):\n total = 0\n if 'selected' in svc0.state():\n total += services[0].getCost()\n else:\n total += 0\n if 'selected' in svc1.state():\n total += services[1].getCost()\n else:\n total += 0\n if 'selected' in svc2.state():\n total += services[2].getCost()\n else:\n total += 0\n if 'selected' in svc3.state():\n total += services[3].getCost()\n else:\n total += 0\n if 'selected' in svc4.state():\n total += services[4].getCost()\n else:\n total += 0\n if 'selected' in svc5.state():\n total += services[5].getCost()\n else:\n total += 0\n if 'selected' in svc6.state():\n total += services[6].getCost()\n else:\n total += 0\n return total\n\ndef dispTotal():\n total = calcTotal(services, svc0, svc1, svc2, svc3, svc4, svc5, svc6)\n totalString = 'Service Total: $'+str(total)\n stringVar.set(totalString)\n #label.pack()\n\n# Setting services into list\nservices = []\nservices.append(AutoService('Oil change',26.00))\nservices.append(AutoService('Lub Job',18.00))\nservices.append(AutoService('Radiator Flush', 30.00))\nservices.append(AutoService('Transmission Flush', 80.00))\nservices.append(AutoService('Inspection', 15.00))\nservices.append(AutoService('Muffler Replacement', 100.00))\nservices.append(AutoService('Tire Rotation', 20.00))\n\nwindow = tk.Tk()\n\n# Set the title for your window\nwindow.title('Auto Shop')\n\n# Set size of window\nwindow.geometry(\"600x600\")\n\n# Adding a label\ntitle = tk.Label(text = \"Welcome to the Auto Shop\")\n\n# grid() tells you where you want the label, (0,0) is default\ntitle.pack() \n\n# Setting menu\n\n# class svcCheck(ttk.Checkbutton):\n# def __init__(self, master, service, cost):\n# c = ttk.Checkbutton(master, text=service+'\\t\\t$'+str(cost))\n# c.pack()\n \n\n\nsvc0 = ttk.Checkbutton(window, text=services[0].getDesc()+'\\t\\t$'+str(services[0].getCost())) \nsvc1 = ttk.Checkbutton(window, text=services[1].getDesc()+'\\t\\t$'+str(services[1].getCost())) \nsvc2 = ttk.Checkbutton(window, text=services[2].getDesc()+'\\t\\t$'+str(services[2].getCost())) \nsvc3 = ttk.Checkbutton(window, text=services[3].getDesc()+'\\t\\t$'+str(services[3].getCost())) \nsvc4 = ttk.Checkbutton(window, text=services[4].getDesc()+'\\t\\t$'+str(services[4].getCost())) \nsvc5 = ttk.Checkbutton(window, text=services[5].getDesc()+'\\t\\t$'+str(services[5].getCost())) \nsvc6 = ttk.Checkbutton(window, text=services[6].getDesc()+'\\t\\t$'+str(services[6].getCost())) \nsvc0.pack() \nsvc1.pack()\nsvc2.pack()\nsvc3.pack()\nsvc4.pack()\nsvc5.pack()\nsvc6.pack()\ntotal = calcTotal(services, svc0, svc1, svc2, svc3, svc4, svc5, svc6)\ntotalString = 'Service Total: $'+str(total)\nstringVar = tk.StringVar()\nstringVar.set(totalString)\nscreenTotal = tk.Label(window, textvariable=stringVar).pack()\n\n\nclickCalc = tk.Button(text = \"Calculate Total\", bg = \"blue\", command=dispTotal)\nclickCalc.pack()\n\n\n\n\n\n\n\n\nwindow.mainloop()\n\n\n"
},
{
"alpha_fraction": 0.6996047496795654,
"alphanum_fraction": 0.7022398114204407,
"avg_line_length": 27.11111068725586,
"blob_id": "fc72847c048c9250f3bafbeb44c00cc35641222b",
"content_id": "6b17b48d61986ba7ab5883fb58501d2eda518cd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 759,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 27,
"path": "/LABS/Iteration/enumerate.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# enumerate()\n\n# Iterates over different types of iterables \n# and returns both the index and the value of\n# each item.\n\n# Enumerate over some names \n# ',start = X' will determine the index where\n# the enumeration starts\nnames = ['Daniel', 'Joe','jim','Travis']\nprint(list(enumerate(names, start = 4)))\nfor name in enumerate(names, start = 6):\n print(name)\n\n# This allows us to control what is printing\n# Can shorten the ' start = ' command with just\n# the number\nfor count, item in enumerate(names):\n print(count, item)\n\nmy_string = 'Enumerating is Powerful'\nfor idx, ch in enumerate(my_string):\n print(f'Index is {idx} and character is {ch}')\n\n# dictionary comprehension w/ enumerate\nmy_dict = {k: v for k, v in enumerate(names)}\nprint(my_dict) "
},
{
"alpha_fraction": 0.6347305178642273,
"alphanum_fraction": 0.6706587076187134,
"avg_line_length": 15.399999618530273,
"blob_id": "8dab0314f3df061a43d2815264994950dd69abf4",
"content_id": "c25ae14bb259b8f8cdcbee7b9198ad566743a036",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 167,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 10,
"path": "/Networking/netscan2.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport scapy.all as scapy\n\ndef scan(ip):\n\tarp_request = scapy.ARP(pdst=ip)\n\tprint(arp_request.summary())\n\t\n\nscan(\"10.0.2.2\") # python3 issue \n\n\n"
},
{
"alpha_fraction": 0.5971860289573669,
"alphanum_fraction": 0.6117769479751587,
"avg_line_length": 29.967741012573242,
"blob_id": "1cb4fe653d036983a218e5efd8fe2d13a4d237a0",
"content_id": "dbc1de1c53514b04d2ce2cdc600cf717bb2de859",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1921,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 62,
"path": "/LABS/Classes/retailItem.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\" 5. RetailItem Class\nWrite a class named RetailItem that holds data about an item in a retail store. The class\nshould store the following data in attributes: item description, units in inventory, and price.\nOnce you have written the class, write a program that creates three RetailItem objects\nand stores the following data in them:\n\n Description Units in Inventory Price\nItem #1 Jacket 12 59.95\nItem #2 Designer Jeans 40 34.95\nItem #3 Shirt 20 24.95 \"\"\"\n\nclass RetailItem:\n # defining default attributes\n def __init__(self):\n self.__desc = ''\n self.__unitCount = 0\n self.__price = 0.0\n\n # Define setters\n def set_desc(self, desc):\n self.__desc = desc\n\n def set_unitCount(self, unitCount):\n self.__unitCount = unitCount\n\n def set_price(self, price):\n self.__price = price\n\n # Define getters\n def get_desc(self):\n return self.__desc\n\n def get_unitCount(self):\n return float(self.__unitCount)\n \n def get_price(self):\n return float(self.__price)\n\n\"\"\" # Define main\ndef main():\n # Initialize object storage list\n inventory = []\n\n # Iterate for input of data for 3 retail items\n for i in range(3):\n item = RetailItem()\n item.set_desc()\n item.set_unitCount()\n item.set_price()\n inventory.append(item)\n # Print visual assistance line\n print()\n \n # Print header for item listing\n print('\\tDescription:\\tUnits in Inventory:\\tPrice:')\n # Iterate through the count of items such that the iterator \n # variable of item can be used for counter for item\n # number labeling\n for item in range(len(inventory)):\n print(f'Item#{item+1}\\t{inventory[item].get_desc()}\\t\\t{inventory[item].get_unitCount()}\\t\\t{inventory[item].get_price()}')\n\nmain() \"\"\""
},
{
"alpha_fraction": 0.6278464794158936,
"alphanum_fraction": 0.6297982931137085,
"avg_line_length": 26.96363639831543,
"blob_id": "3145b934013f96b98e88bc623d36494240568a13",
"content_id": "5377a0c7da5e66e11474a57c5bff9565f019584b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1537,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 55,
"path": "/LABS/Classes/storeInventory.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import retailItem\nimport pickle\n\nfilename = 'inventory.dat'\n\ndef main():\n # Initialize object storage list\n inventory = []\n\n # Iterate for input of data for 3 retail items\n for i in range(3):\n item = retailItem.RetailItem()\n item.set_desc()\n item.set_unitCount()\n item.set_price()\n inventory.append(item)\n # Print visual assistance line\n print()\n \n # Print header for item listing\n print('\\tDescription:\\tUnits in Inventory:\\tPrice:')\n # Iterate through the count of items such that the iterator \n # variable of item can be used for counter for item\n # number labeling\n for item in range(len(inventory)):\n print(f'Item#{item+1}\\t{inventory[item].get_desc()}\\t\\t{inventory[item].get_unitCount()}\\t\\t{inventory[item].get_price()}')\n\n writeData(inventory, filename)\n\n\ndef readData(filename):\n # Opening the file in read mode\n emp_file = open(filename, 'rb')\n # Setting EOF to false\n end_of_file = False\n #Setting while loop to get each object in binary file\n while not end_of_file:\n try:\n #unpickle next object\n dictionary = pickle.load(emp_file)\n return dictionary\n except EOFError:\n #Set flag to indicate EOF reached\n end_of_file = True\n emp_file.close()\n\ndef writeData(data, filename):\n #Opens the file on disk for writing\n q_file = open(filename, 'wb')\n #Dump data to file\n pickle.dump(data, q_file)\n #close file\n q_file.close()\n\nmain()"
},
{
"alpha_fraction": 0.6428571343421936,
"alphanum_fraction": 0.6476545929908752,
"avg_line_length": 32.51785659790039,
"blob_id": "021493547b85466ea78945f0ff428e05e4cc33e3",
"content_id": "434c4a92b7b092d897cfc7ecc33b813eda122a9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1876,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 56,
"path": "/LABS/DICTIONARIES-SETS/dict-set-encrypted.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Configuring the encryption dictionary\ncodebook = {}\nalphanum ='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'\nencoded = '098765432!@$#%^&*()_+=-][}{;\\'\":\\\\`~}]abcdefghijklmnopqrstuvwxyz'\n# Modified to use zip function to replace this code\n# for i in range(len(alphanum)):\n# key = alphanum[i]\n# value = encoded[i]\n# codebook.update({key: value})\nfor alphanum, encoded in zip(alphanum,encoded):\n codebook.update({alphanum: encoded})\n\n#Define main function\ndef main():\n #Calls the readFile function to read the content of user specified file into the variable\n original = readFile()\n #Runs the substitution function on the input text using the codebook dictionary\n substitution(original,codebook)\n\n#Substitution function\ndef substitution(contents,codebook):\n #creates and opens encrypted.txt file in write mode\n newfilename = 'encrypted.txt'\n f = open(newfilename, 'w')\n #Empty list for strings to be written once encoded\n lines = []\n #Loop through lines..\n for i in contents:\n #setting an empty string to place substitution characters\n line = ''\n # ...to loop through each line character\n for j in i:\n #Checks character presence as a codebook key\n if j in codebook:\n #adds the value to the string\n line += codebook[j]\n else:\n #writes same character if not in codebook\n line += j\n #writes the encrypted line to the lines array \n lines += line\n #Writes the list of lines to the file\n f.writelines(lines)\n f.close()\n \ndef readFile():\n #Getting filename from input for filename\n filename = input('Provide the new file name:\\n')\n #Reads the file of filename \n f = open(filename, 'r')\n #Recording file contents in array\n contents = f.readlines()\n f.close()\n return contents\n\nmain()"
},
{
"alpha_fraction": 0.6051108837127686,
"alphanum_fraction": 0.6181291937828064,
"avg_line_length": 33.01639175415039,
"blob_id": "4eb21c4bff75b4958a43b161e0e2019122ac32bf",
"content_id": "a190a1406d723a901f064fe35f404b1b9a510adb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2074,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 61,
"path": "/LABS/Classes/emp-class-test.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import employees\n\n# Define main\ndef main():\n #worker = createWorker()\n #displayWorker(worker)\n supervisor = createSup()\n displaySup(supervisor)\n\n# Define function to create a worker instance\ndef createWorker():\n name = input('What is the employee\\'s name?\\n')\n num = 0\n while num < 1:\n num = int(input('What is the employee\\'s ID number?\\n'))\n shift = 0\n while (shift < 1 and num > 2):\n shift = int(input('What is the employee\\'s shift? (1: Day, 2: Night):\\n'))\n pay_rate = 0.0\n while pay_rate < 1:\n pay_rate = float(input('What is the employee\\'s pay rate (ex: 52.50)?\\n')) \n worker = employees.ProductionWorker(name, num, shift, pay_rate)\n return worker\n\n# Define function to display the worker object data\ndef displayWorker(worker):\n print('\\nEmployee Info:\\n--------------\\n')\n print('Name:\\t', worker.get_name())\n print('Employee ID:\\t', worker.get_number())\n shiftText = ''\n if worker.get_shift() == 1:\n shiftText = 'Day Shift'\n else:\n shiftText = 'Night Shift'\n print('Shift: ', shiftText)\n print('Pay Rate: {:.2f}'.format(worker.get_pay_rate()))\n\n# Define function to create a supervisor \ndef createSup():\n name = input('What is the supervisor\\'s name?\\n')\n num = 0\n while num < 1:\n num = int(input('What is the supervisor\\'s ID number?\\n'))\n salary = -1.0\n while salary < 0.0:\n salary = float(input('What is the supervisor\\'s salary?\\n'))\n prod_bonus = 0.0\n while prod_bonus < 1:\n prod_bonus = float(input('What is the supervisor\\'s production bonus?\\n')) \n worker = employees.ShiftSupervisor(name, num, salary, prod_bonus)\n return worker\n\n# Define function to display the supervisor object data\ndef displaySup(worker):\n print('\\nEmployee Info:\\n--------------\\n')\n print('Name:\\t\\t\\t', worker.get_name())\n print('Employee ID:\\t\\t', worker.get_number())\n print('Salary:\\t\\t\\t${:.2f}'.format(worker.get_salary()))\n print('Production Bonus:\\t${:.2f}'.format(worker.get_prod_bonus()))\n print()\nmain()"
},
{
"alpha_fraction": 0.6146327257156372,
"alphanum_fraction": 0.6892772316932678,
"avg_line_length": 47.20000076293945,
"blob_id": "2275359ce6273a7564a7e4a47d509ac4754f7614",
"content_id": "212caffea38411bb5305b724cc34749946f5d477",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3376,
"license_type": "no_license",
"max_line_length": 298,
"num_lines": 70,
"path": "/00-Table-of-Contents.md",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "|<a href=\"https://github.com/CyberTrainingUSAF/01-Course-Introduction-and-setup/blob/master/README.md\" rel=\"Return to Course Introduction\"> Return to IDF Course introduction </a>| **or** |<a href=\"https://github.com/CyberTrainingUSAF/11-Cyber-A-La-Carte\" > Return to Cyber A-La-Carte </a>|\n|---| :---: |---|\n\n---\n\n## Python - Table of contents\n\n* [Introduction](README.md)\n* [Python Features](01_python_features/README.md)\n * [Introduction to Python](01_python_features/01_python_intro.md)\n * [PyDocs & PEP8](01_python_features/02_pydocs_pep8.md)\n * [Objects](01_python_features/03_objects.md)\n * [Lab 1A](01_python_features/lab1a.md)\n * [Py2 vs Py3 Differences](01_python_features/04_py2_py3.md)\n * [Running Python](01_python_features/05_running_python.md)\n* [Data Types](02_Data_Types/README.md)\n * [Variables](02_Data_Types/01_variables.md)\n * [Lab 2A](02_Data_Types/lab2a.md)\n * [Numbers](02_Data_Types/02_numbers.md)\n * [Lab 2B & Lab2C](02_Data_Types/lab2b_c.md)\n * [Strings](02_Data_Types/03_strings.md)\n * [Lab 2D & Lab2E](02_Data_Types/lab2d_e.md)\n * [Lists](02_Data_Types/04_lists.md)\n * [Lab 2F](02_Data_Types/lab2f.md)\n * [Bytes and Bytearray](02_Data_Types/05_byte_array.md)\n * [Lab 2G](02_Data_Types/lab2g.md)\n * [Tuples, range & buffer](02_Data_Types/06_tuples.md)\n * [Dictionaries & Sets](02_Data_Types/07_mapping.md)\n * [Lab 2H](02_Data_Types/lab2h.md)\n* [Control Flow](03_Flow_Control/README.md)\n * [Operators](03_Flow_Control/01_operators.md)\n * [I/O Print](03_Flow_Control/02_io_print.md)\n * [Lab 3A](03_Flow_Control/lab3a.md)\n * [I/O: Files](03_Flow_Control/03_io_files.md)\n * [Lab 3B](03_Flow_Control/lab3b.md)\n * [If, Elif, Else](03_Flow_Control/04_if_elif_else.md)\n * [Lab 3C](03_Flow_Control/lab3c.md)\n * [While Loops](03_Flow_Control/05_while_loops.md)\n * [Lab 3D](03_Flow_Control/lab3d.md)\n * [For Loops](03_Flow_Control/06_for_loops.md)\n * [Lab 3E](03_Flow_Control/lab3e.md)\n * [Break and Continue](03_Flow_Control/07_break_continue.md)\n * [Lab 3F](03_Flow_Control/lab3f.md)\n * [Recursion](03_Flow_Control/08_recursion.md)\n* [Functions](04_functions/README.md)\n * [Scope](04_functions/01_scope.md)\n * [User Functions](04_functions/02_user_functions.md)\n * [Lambda Functions](04_functions/03_lambda_functions.md)\n * [Lab 4A & 4B](04_functions/lab4a.md)\n * [List Comprehension](04_functions/04_list_comprehension.md)\n * [Closures, Iterators & Generators](04_functions/05_closures_iterators_generators.md)\n* [Object Oriented](05_oop/README.md)\n * [Modules](05_oop/01_modules.md)\n * [Packages](05_oop/02_packages.md)\n * [Lab5A](05_oop/lab5a.md)\n * [User Classes Pt1](05_oop/03a_user_classes.md)\n * [Lab5B](05_oop/lab5b.md)\n * [User Classes Pt2](05_oop/03b_user_classes_pt2.md)\n * [Lab5C](05_oop/lab5c.md)\n * [Exceptions](05_oop/04_exceptions.md)\n * [OOP Principles](05_oop/05_oop_principles.md)\n * [OOP Terminology Review \\[Bonus Lab\\]](05_oop/06_oop_terminology.md)\n * [Lab5D](05_oop/lab5d.md)\n* [Advanced](06_advanced/README.md)\n * [CTypes and Structures](06_advanced/01_ctypes.md)\n * [Regular Expressions](06_advanced/02_regular_expressions.md)\n * [Additional Libraries and Modules](06_advanced/03_additional_libaries_modules.md)\n * [Multithreading](06_advanced/04_multithreading.md)\n * [UnitTesting](06_advanced/05_unit_testing.md)\n * [Metaclasses](06_advanced/06_metaclasses.md)\n\n\n"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.6367647051811218,
"avg_line_length": 20.28125,
"blob_id": "9e806cdaf622fad3cc1b35a8023e118ba8b1d6f4",
"content_id": "9f75f419ff06202a01d7701ad872e0f3feeb606a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 680,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 32,
"path": "/LABS/Multiprocessing/test_multiprocessing2.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\nlets try it 10 times now. Our computer doesn't have 10 cores, but its set up to where it can switch\noff between cores when it isn't busy\n\n\"\"\"\n\nimport multiprocessing\nimport time\n\nstart = time.perf_counter()\n\ndef do_something():\n print('Sleeping 1 second...')\n time.sleep(1)\n print('Done Sleeping...')\n\nif __name__ == \"__main__\":\n # create list so we can join all the processes\n processes = []\n\n for _ in range(10):\n p = multiprocessing.Process(target=do_something)\n p.start()\n processes.append(p)\n\n for process in processes:\n process.join()\n\n\n finish = time.perf_counter()\n\n print(f'Finished in {finish-start} second(s)')"
},
{
"alpha_fraction": 0.6673751473426819,
"alphanum_fraction": 0.6794190406799316,
"avg_line_length": 37.671234130859375,
"blob_id": "0419f49baefc3a95a3076ac29744d2007888e736",
"content_id": "8485cbcfebd7cf8579e03361ff96e672cc37fff2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2823,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 73,
"path": "/Practice_Test/computer_list.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "'''\nWrite the function create_computer_objects that will read data from a file to \ncreate one or more computer objects and store one or more of these objects in a list.\n \nEach valid object will be stored in the list in the order the data is read from the file.\nThe Computer class used to create the objects is defined in the supportClass.py file. \n\nObjects should be created with cost as a float, and ramGB/storageGB as integer\n\nThe function receives one parameter which is the name of the file. \nEach line in the file will be in the format of: brand, model, cost, ram, storage\n\nExample: Lenovo, Super Duper, 795.95, 16, 512\n \nOnly the following computers should have objects created and stored in the list:\n- all brands except Asus, and\n- cost greater than $500.00 and less than $1000, and\n- 8 or more GB of ram\n\nIf a file cannot be opened, the function will return the string \"FILE CORRUPTED\"\nIf any of the cost, ram, or storage are invalid numbers, or any line in the file contains less\nor more than the five required items, the function will return the string \"INVALID DATA\"\n\nAfter the file is processed successfully, the function will return the list of objects.\n\n'''\n\n# Importing the class for computer\nfrom supportClass import * \n\n# defining the function to create computer objects from file\ndef create_computer_objects(fileName):\n # Try opening file with exception handling if file unable to be found or read\n try:\n f = open(fileName, 'r')\n a = f.readlines()\n f.close()\n except FileNotFoundError:\n return f\"FILE CORRUPTED\"\n\n # creating empty list to store computer objects\n compList = []\n\n # Iterating through each line read from file\n for i in a:\n # splitting list on \", \" which is expected format of the text file\n lineList = i.split(\", \")\n\n # checking for valid count of line elements based on split\n if len(lineList) != 5:\n # Inform user data is invalid\n return f\"INVALID DATA\"\n \n # Storing each line element based on expected order\n brand = lineList[0]\n model = lineList[1]\n # Trying to convert cost, ram, storage into appropriate data types \n try:\n cost = float(lineList[2])\n ramGB = int(lineList[3])\n storageGB = int(lineList[4])\n #if any of the conversions fail, inform user of invalid data\n except ValueError:\n return f\"INVALID DATA\"\n #create the computer object\n comp = Computer(brand, model, cost, ramGB, storageGB)\n \n # Add computer objects to list that meet prompt requirements\n if (comp.brand not in ['Asus','asus'] and (comp.cost > 500 and comp.cost < 1000) and comp.ramGB >= 8):\n compList.append(comp)\n\n #return the list \n return compList\n"
},
{
"alpha_fraction": 0.6472843289375305,
"alphanum_fraction": 0.6511182188987732,
"avg_line_length": 33.043479919433594,
"blob_id": "8a577b39f4184b977aa58a5271ae391e7e5ba8b0",
"content_id": "d46843d96e9bc7384e34f58ae8732197dca4beb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1565,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 46,
"path": "/LABS/Labs-4-1/lab4-1-rainFall.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Constant array for Months\nMONTHS = ('JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC')\n\n#Average rainfall\n#Define main function\ndef main():\n #Array for rainfall numbers\n rainFall = []\n years = getYears()\n getRainfall(years, rainFall, MONTHS)\n measureRain(rainFall)\n\n\ndef getYears():\n #Initial prompt for input\n years = input('How many years of rainfall data to input?\\n')\n #Input validation checking numeric and a positive number\n while int(years) <= 0 and years.isnumeric() == False:\n years = input('Invalid input. How man years of rainfall data to input?\\n')\n return int(years)\n\ndef getRain(month,year):\n #Initial prompt for input\n rain = input('How much rain for {} in year {}?\\n'.format(month,year))\n #Input validation checking numeric and a positive number\n while float(rain) < 0 and year.replace('.','').isnumeric() == False:\n rain = input('Invalid input. How much rain for {} in year {}?\\n')\n return float(rain)\n\ndef getRainfall(years, rainFall, months):\n for i in range(years):\n for j in months:\n rainFall.append(getRain(j,i+1))\n\ndef measureRain(rainFall):\n total = 0\n for i in rainFall:\n total += i\n print('Total months of rainfall in data: {}'.format(len(rainFall)))\n print('Total rainfall: {:.2f} inches'.format(total))\n print('Average monthly rainfall: {:.2f}'.format(total/len(rainFall)))\n print('The maximum rainfall was {}'.format(max(rainFall)))\n print('The minimum rainfall was {}'.format(min(rainFall)))\n \n\nmain()"
},
{
"alpha_fraction": 0.6527870893478394,
"alphanum_fraction": 0.6527870893478394,
"avg_line_length": 50.344825744628906,
"blob_id": "e4df9bdb44bd9ab9f3f0a8241d4c7dd6d2df676f",
"content_id": "e7aa6b15e7a91096c57f048acf6ab5e7322bcac5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2978,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 58,
"path": "/Algorithms/collections.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# Collection - a group of zero or more items that can be reated as a \n# conceptual unit. Things like lists, strings, tuples, dictionaries\n# \n# Other collection types - stacks queues priority queues binary \n# search trees, heaps, graphs, and bags\n# \n# Typically dynamic rather than static with the exception of tuples and strings (immutable)\n# \n# linear collection - like people in a line, they are ordered by position.\n# Each item except the first has a unique predecessor\n# \n# Examples: grocery lists, atm line, stacks of dinner plates \n# \n# Heirarchical collection - ordered in a structure resembling an upside down tree.\n# Each data item except the one at the top (the root) has\n# just one predecessor called its parent.but potenitally \n# has many successors\n# \n# Examples: File directory system, organizational trees, and a table of contents \n# \n# Graph collection - also called graph, each item can have many predecessors and many \n# successors. These are also called neighbors \n# \n# Examples: include airline routes, electrical wiring diagrams, and WWW\n# \n# unordered collections - these are not in any particular order, and its not possible to \n# meaninfully speak of an item's predecessor or successor\n# Examples: bag of marbles \n#*****************************OPERATION TYPES**************************************\n# Determine the size - like using python's len() to obtain the number of items in the\n# collection\n\n# Test for item membership - Use Python's in operator to search for a given target\n# item in the collection. Returns True if the item is found\n# and False otherwise \n# \n# Traverse collection - Use python's for loop to visit each item in the collections. The \n# order which items are visited depends upon the type of collection\n# \n# Obtain a string representation - Use Python's str()\n# \n# Test for equality - Use Python's == operator to determin if collections are equal. Two\n# collections are equal if they are of the same type and contain the \n# same items. The order in which pairs of items are compared depends \n# on the type of collection.\n# \n# Concatenate collections - Use Python's + operator to obtain a new collection of the same\n# type as the operands, and containing the items in each.\n# \n# Convert to another type of collection - Create a new collection w/ same items as source\n# \n# Insert an item - Add the item to collection, possibly at a given position\n# \n# Remove an item - Remove the item from the collection, possibly at a given position\n# \n# Replace an utem - Combine removal and insertion into one operation\n# \n# Access or retrieve and item - obtain an item, possibly at a given position "
},
{
"alpha_fraction": 0.5371549725532532,
"alphanum_fraction": 0.5371549725532532,
"avg_line_length": 20.454545974731445,
"blob_id": "a718c0d5c682a4eefb3a218c81025b0fa7dbf290",
"content_id": "c935eafc951b5379597444837bb5f784ca7254a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 471,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 22,
"path": "/LABS/Classes/coursepersons.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "class Person:\n def __init__(self, name, title, number):\n self.__name = name\n self.__title = title\n self.__number = number\n\n # Settors\n def set_name(self, name):\n self.__name = name\n\n def set_address(self, address):\n self.__address = address\n \n def set_number(self, number):\n self.__number = number\n\n # Getters\n def get_name(self):\n self.__name\n\n def get_address(self):\n self.__address"
},
{
"alpha_fraction": 0.5557714700698853,
"alphanum_fraction": 0.5623785257339478,
"avg_line_length": 23.990291595458984,
"blob_id": "8a789cdcdfcbac1f64f80ea553ea4994af7b8441",
"content_id": "fb2d294ef3f33ff33aa3cebe077687fa30daeb32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2573,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 103,
"path": "/Stacks/stacks.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\" \nStacks are linear collections in which access is completely \n restricted to just one called top\nLIFO - last in first out protocol, (dishes stacked on table)\n push - put item on stack\n pop - remove top item from stack\n peek - examine object on top\n\n -The stack type is not built into python so we use a list to \n emulate an array-based stack\n -Going to use the list methos and pop\n -It's possible to utilize other list methods such as insert, \n replace, and remove but that defeats the purpose of the stack\n\"\"\"\n\nclass Stack:\n def __init__(self):\n self.my_stack = []\n \n def push(self, item):\n self.my_stack.append(item)\n\n def getStack(self):\n return self.my_stack\n \n def pop(self):\n return self.my_stack.pop()\n\n def peek(self):\n if not self.is_empty():\n return self.my_stack[-1]\n\n def is_empty(self):\n return self.my_stack == []\n\n\ns = Stack()\n# s.push(\"first\")\n# print(s.getStack())\n# s.push(\"Next one\")\n# print()\n# print(s.getStack())\n# s.push(\"top\")\n# print()\n# print(s.getStack())\n# print()\n# print(s.pop())\n# print()\n# print(s.getStack())\n# print(s.peek())\n# print(s.pop())\n# print(s.pop())\n# print(s.is_empty())\n\n\"\"\"\nUse a stack to check whether or not a string has balanced usage of parentheses\n\nExample:\n Balances: (), ()(), ((({})))\n Unbalanced: ((), {{{)}], [][]]], ([)]\n\"\"\"\ndef is_match(p1, p2):\n if p1 == \"(\" and p2 == \")\":\n return True\n elif p1 == \"{\" and p2 == \"}\":\n return True\n elif p1 == \"[\" and p2 == \"]\":\n return True\n else:\n return False\n\ndef is_paren_bal(paren_string):\n s = Stack()\n is_balanced = True\n # Keep track of where we are\n index = 0\n #loop through the string \n while index < len(paren_string) and is_balanced:\n paren = paren_string[index]\n #is it an open paren>\n if paren in \"({[\":\n s.push(paren)\n # it's a closed paren\n elif paren in \"})]\":\n top = s.pop()\n # The popped item and the current item we're on\n if not is_match(top, paren):\n #not a match\n is_balanced = False\n # else:\n # print(\"Stop Playin'\")\n # increment to evaluate the rest of the string\n index += 1\n\n if s.is_empty() and is_balanced:\n return True\n else:\n return False\n\nstring1 = '(({s}))'\nstring2 = '{{{)}]'\nprint(f'String {string1} is balanced: {is_paren_bal(string1)}')\nprint(f'String {string2} is balanced: {is_paren_bal(string2)}')"
},
{
"alpha_fraction": 0.6592857241630554,
"alphanum_fraction": 0.6928571462631226,
"avg_line_length": 41.45454406738281,
"blob_id": "9274734c6e50e164d86eb699a38088f1c6259aa1",
"content_id": "787f81c23420f529a876dff8f8939af198246f7a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1400,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 33,
"path": "/LABS/Labs-3-4/lab3-4-8-Discount.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Setting up main function\ndef main():\n #Calling Discount function passing result of the numPurchase function as an argument\n calcDiscount(int(numPurchase()))\n\n#Defining function to capture and validate the user's input of the number of packages purchased\ndef numPurchase():\n #Getting initial input\n packages = input('How many packages did you buy:\\n')\n #Validating input to be numeric and greater than 0\n while int(packages) < 0 or packages.isnumeric() == False:\n packages = input('Invalid input. How many packages did you buy:\\n')\n #Returning result to main\n return packages\n\n#Function to determine the level of discount\ndef calcDiscount(num):\n #No discount if less than 10 packages purchased\n if num < 10:\n print('There is no discount for {} purchases'.format(num))\n #20 percent discount 10-19 packages purchased\n elif num >= 10 and num <= 19:\n print('There is a 20 percent discount for {} purchases'.format(num))\n #30 percent discount 20-49 packages purchased\n elif num >= 20 and num <= 49:\n print('There is a 30 percent discount for {} purchases'.format(num))\n #40 percent discount 50-99 packages purchased\n elif num >= 50 and num <= 99:\n print('There is a 40 percent discount for {} purchases'.format(num))\n elif num >= 100:\n print('There is a 50 percent discount for {} purchases'.format(num))\n\nmain()"
},
{
"alpha_fraction": 0.5191457867622375,
"alphanum_fraction": 0.5493372678756714,
"avg_line_length": 22.431034088134766,
"blob_id": "7ca7ccada3a39c304e9e46e194ff1211b5ae5a76",
"content_id": "f2c6e838c51013c7554245efa79b272f79db90ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1358,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 58,
"path": "/LABS/FILE-IO/file-io.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "def main():\n '''#WRITE TO A FILE\n filename = 'C:\\\\Users\\\\student\\\\Documents\\\\names.txt' \n f = open(filename, 'w')\n\n f.write('name 1\\n')\n f.write('name 2\\n')\n f.write('name 3\\n')\n\n f.close()\n '''\n\n '''\n #READ FROM A FILE\n filename = 'C:\\\\Users\\\\student\\\\Documents\\\\names.txt' \n f = open(filename, 'r')\n f_contents = f.read()\n f.close()\n print(f_contents)\n '''\n\n \"\"\" #READ A LINE FROM FILE\n filename = 'C:\\\\Users\\\\student\\\\Documents\\\\names.txt' \n f = open(filename, 'r')\n #Readline reads the line then moves the pointer in the file to next line\n line1 = f.readline()\n line2 = f.readline()\n line3 = f.readline()\n\n #Strips characters\n line1 = line1.rstrip('\\n')\n line2 = line2.rstrip('\\n')\n line3 = line3.rstrip('\\n')\n \n \n f.close()\n \n print(line1, end='')\n print(line2, end='')\n print(line3, end='') \"\"\"\n\n \"\"\" #APPEND FILE\n filename = 'C:\\\\Users\\\\student\\\\Documents\\\\names.txt' \n f = open(filename, 'a')\n f.write('NewName 1\\n')\n f.close() \"\"\"\n\"\"\" \n #WRITING TO SALES\n filename = 'C:\\\\Users\\\\student\\\\Documents\\\\sales.txt' \n f = open(filename, 'w')\n f.write('1000.0\\n2000.0\\n3000.0\\n4000.0\\n5000.0\\n')\n f.close() \"\"\"\n\n \n filename = 'C:\\\\Users\\\\student\\\\Documents\\\\sales.txt' \n f = open(filename, 'w')\n f.close()\nmain()"
},
{
"alpha_fraction": 0.6391883492469788,
"alphanum_fraction": 0.701331615447998,
"avg_line_length": 16.909090042114258,
"blob_id": "7b2f37b83036df7d2fee1e8258309b66cdaaff6b",
"content_id": "8bed586b4693eda142467a385a844ca0af03c073",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1577,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 88,
"path": "/lab-set-lecture.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Set contains a collection of unique values\n#and works like a mathematical set\n\n#All the elements in a set must be unique, no two elements can have the same value\n\n#Sets are unordered\n\n#Elements stored in a set can be of different data types\nmySet= set(['a','b','c'])\nprint(mySet)\n\nmySet2 = set('abc')\nprint(mySet2)\n\nmySet3 = set('aabbcc')\nprint(mySet3)\n#All of the above appear the same when printed\n\n#set can only take on arg\n#mySet4 = set('one','two','three') is invalid\n\nmySet4 = set('one,two,three')\nprint(mySet4)\n\nnewSet = set()\nnewSet.add(1)\nnewSet.add(2)\nnewSet.add(3)\nprint('newSet', newSet)\n\nnewSet.update([4,5,6])\n\nnewSet2 = ([7,8,9])\nnewSet.update(newSet2)\n\nnewSet.remove(1)\n\n#using for loop to iterate\nnewSet3 = set('abc')\nfor val in newSet3:\n print(val)\n\nnumbers_set([1, 2, 3])\nif 1 in numbers_set:\n print('The value {} is in the set'.format(val))\n\nif 99 not in numbers_set:\n print('The value {} is not in the set'.format(val))\n\n#unions\nset1= set([1,2,3,4])\nset2= set([3,4,5,6])\nset3= set1.union(set2)\nprint(set1)\nprint(set2)\nprint(set3)\n\nset5= set1|set2\n\n#Find intersection\nset4= set1.intersection(set2)\nprint(set4)\nset6= set1&set2\n\ncharSet = ('abc')\ncharSetUpper = ('ABC')\n\n#difference \nset7 = set1.difference(set2)\nset8 = set2.difference(set1)\nprint(set1)\nprint(set2)\n\nset9 = set1-set2\nprint(set9)\n\n#Finding symmetric differences of sets\nset10 = set1.symmetric_difference(set2)\nprint(set10)\n\nset11 = set1 ^ set2\nprint(set11)\n\n#Finding subset\nset12 = set([1,2,3,4,5,6])\nset13 = set([1,2,3])\nprint(set13.issubset(set12))\nprint(set12.issuperset(set13))\n\n"
},
{
"alpha_fraction": 0.5708661675453186,
"alphanum_fraction": 0.6318897604942322,
"avg_line_length": 28.941177368164062,
"blob_id": "8f97e98be7378e4c06d7930858a58f1f25171ef1",
"content_id": "7d673a310efc8020a73749f1e4d2c24b4bdbb52b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 508,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 17,
"path": "/LABS/Classes/car_demo.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# This program demonstrates the Car class\n\nimport vehicles\n\ndef main ():\n # Create an object from Car class\n # The car is a 2007 Audi with 12,500 miles, price at $21,500.00, and has 4 doors\n used_car = vehicles.Car('Audi', 'A3', 12500, 21500.00, 4)\n\n # Display car's data\n print('Make: ', used_car.get_make())\n print('Model: ', used_car.get_model())\n print('Mileage: ', used_car.get_mileage())\n print('Price: ', used_car.get_price())\n print('Doors: ', used_car.get_doors())\n\nmain()"
},
{
"alpha_fraction": 0.6017488241195679,
"alphanum_fraction": 0.6049284338951111,
"avg_line_length": 27.613636016845703,
"blob_id": "6238652a165172518b633e8bedcef9a54fe597fb",
"content_id": "338de4455c8b55327a26cf4e77bac2a1b0962c57",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1258,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 44,
"path": "/LABS/Socket/tsTclnt.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nfrom socket import *\nimport pickle\nimport argparse\n\ndef client_conn(HOST, PORT):\n BUFSIZ = 2048\n ADDR = (HOST, PORT)\n\n tcpCliSock = socket(AF_INET, SOCK_STREAM)\n try:\n tcpCliSock.connect(ADDR) \n except ConnectionRefusedError as e:\n print(f\"Bad Address or Port: {e}\")\n return\n\n while True:\n data = input('> ')\n if not data:\n break\n tcpCliSock.send(data.encode())\n data = tcpCliSock.recv(BUFSIZ)\n if not data:\n break\n result = pickle.loads(data)\n if isinstance(result, list):\n for i in result:\n print(i)\n else:\n print(result)\n\n tcpCliSock.close()\n\nif __name__ == '__main__': \n # This series of statements allows for in-line arguments\n parser = argparse.ArgumentParser (description='TCP Socket Client Example') \n parser.add_argument('--port', action=\"store\", dest=\"port\", type=int, required=True) \n # This was testing how to add additional, optional arguments\n parser.add_argument('--host', action=\"store\", dest=\"host\", type=str, required=True)\n given_args = parser.parse_args() \n port = given_args.port \n host = given_args.host\n\n client_conn(host, port)"
},
{
"alpha_fraction": 0.6491379141807556,
"alphanum_fraction": 0.6793103218078613,
"avg_line_length": 35.28125,
"blob_id": "4a23b8eca75e3ab4d85135818bf5c496292894d4",
"content_id": "e1117d1465d3882a9e4d1c58cb9d027815819c97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1160,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 32,
"path": "/LABS/Labs-3-4/lab3-4-9-shipCharge.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Setting up main function\ndef main():\n #Calling Charge function passing result of the getWeight function as an argument\n calcCharge(float(getWeight()))\n\n#Defining function to capture and validate the user's input of the number of packages purchased\ndef getWeight():\n #Getting initial input\n packageWeight = input('What is the weight of your package:\\n')\n #Validating input to be numeric and greater than 0.00\n while float(packageWeight) < 0 or packageWeight.replace('.','').isnumeric() == False:\n packageWeight = input('Invalid input. What is the weight of your package:\\n')\n #Returning result to main\n return packageWeight\n\n#Function to determine shipping charge\ndef calcCharge(num):\n #Charge for less than 2 pounds\n if num <= 2.00:\n print('The shipping charge is $1.10')\n #Charge for over 2 pounds upt0 6\n elif num > 2 and num <= 6:\n print('The shipping charge is $2.20')\n #Charge for over 6 pounds upto 10\n elif num > 6 and num <= 10:\n print('The shipping charge is $3.70')\n #Charge for more than 10 pounds\n elif num > 10:\n print('The shipping charge is $3.80')\n \n\nmain()"
},
{
"alpha_fraction": 0.6829268336296082,
"alphanum_fraction": 0.6957026720046997,
"avg_line_length": 32.7843132019043,
"blob_id": "bca28646381cf81029933d1f4cc1b769b2aa48b6",
"content_id": "207a8f55be5783ac021324452bfb705952ac6a82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1722,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 51,
"path": "/LABS/Socket/Exam/prompt2.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\" Scraping Numbers from HTML using python modules. In this assignment you\nwill write a Python program similar to \nhttp://www.py4e.com/code3/urllink2.py. The program will read the HTML\nfrom the data files below, and parse the data, extracting numbers and\ncompute the sum of the numbers in the file. \"\"\"\n\n\"\"\" We provide two files for this assignment. One is a sample file where \nwe give you the sum for your testing and the other is the actual data \nyou need to process for the assignment.\n\nSample data: http://py4e-data.dr-chuck.net/comments_42.html (Sum=2553)\n\nActual data: http://py4e-data.dr-chuck.net/comments_57125.html \n(Sum ends with 54)\n\nYou do not need to save these files to your folder since your program will \nread the data directly from the URL. \"\"\"\n\nimport requests\nimport bs4\nfrom bs4 import BeautifulSoup as bs\n\ndef main():\n # Prompt user for url\n url = input('Please provide a URL with a table:\\n')\n\n # Grab webpage data\n r = requests.get(url)\n # Create the soup (parsed html)\n soup = bs(r.content, 'html.parser')\n\n # Extract all the comments with the number data\n comments = soup.find_all('span',{\"class\": \"comments\"})\n # Print a statement reflecting the URL input\n print(f'The total of the \\'comments\\' column in the table at \\'{url}\\' is:')\n # Call the commentTotal function to do the math\n print(f'{commentTotal(comments)}') \n\ndef commentTotal(comments):\n # Init acculmulator to 0\n total = 0\n # Loop through the list \n for comment in comments:\n # Check if the content is numeric \n if comment.text.isnumeric() :\n # If it is, add it to the accumulator\n total += int(comment.text)\n return total\n\n# Call main()\nmain()"
},
{
"alpha_fraction": 0.47863247990608215,
"alphanum_fraction": 0.5156695246696472,
"avg_line_length": 17.526315689086914,
"blob_id": "26ed93117c173b2116b1b31af0ddaf8b8a5e0387",
"content_id": "6047ef6381946a43ede0bcce1bfaaff491c9a597",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 351,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 19,
"path": "/LABS/Socket/daytimedummy.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "from socket import *\ndef main():\n host = '129.6.15.26'\n\n sock = socket(AF_INET, SOCK_STREAM)\n addr = (host,13)\n \n msg = b\"What happened?!?\\n\"\n sock.connect(addr)\n sock.send(msg)\n while True:\n data = sock.recv(512)\n if data:\n print(data.decode())\n else:\n break\n sock.close()\n\nmain()"
},
{
"alpha_fraction": 0.5476602911949158,
"alphanum_fraction": 0.5502599477767944,
"avg_line_length": 23.04166603088379,
"blob_id": "632c570a2fb45edab68d83e58f129a2e82306480",
"content_id": "a00d21e453ac4e399ec54dc804fb3db18701df34",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1154,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 48,
"path": "/LABS/Classes/personnel.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "class PersonInfo:\n def __init__(self):\n self.__name = ''\n self.__address = ''\n self.__age = 0\n self.__phoneNum = ''\n \n def set_name(self):\n self.__name = input('Provide the person\\'s name:\\n')\n \n def set_address(self):\n self.__address = input('Provide the person\\'s address:\\n')\n \n def set_age(self):\n self.__age = input('Provide the person\\'s age:\\n')\n \n def set_phoneNum(self):\n self.__phoneNum = input('Provide the person\\'s phone number:\\n')\n\n def get_name(self):\n return self.__name\n \n def get_address(self):\n return self.__address\n \n def get_age(self):\n return self.__age\n \n def get_phoneNum(self):\n return self.__phoneNum\n\n#def main():\npersonnel = []\nfor i in range(1,3):\n person = PersonInfo()\n person.set_name()\n person.set_address()\n person.set_age()\n person.set_phoneNum()\n personnel.append(person)\nfor i in personnel:\n print(f'Name: {i.get_name()} ')\n print(f'Address: {i.get_address()} ')\n print(f'Age: {i.get_age()} ')\n print(f'Phone: {i.get_phoneNum()} ')\n print()\n\n#main()\n"
},
{
"alpha_fraction": 0.6754098534584045,
"alphanum_fraction": 0.6950819492340088,
"avg_line_length": 17.875,
"blob_id": "877270d24157398ffc38c407746585cfdf6de443",
"content_id": "a310b5c84909d78ea191cd3f4d258868ad9ee354",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 305,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 16,
"path": "/Networking/netscan3.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport scapy.all as scapy\n\ndef scan(ip):\n\tarp_request = scapy.ARP(pdst=ip)\n\tarp_request.show()\n\tbroadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\t\n\tbroadcast.show()\n\tarp_request_broadcast = broadcast/arp_request\n\tarp_request_broadcast.show()\n\n\n\t\n\nscan(\"10.0.2.2\") # python3 issue \n\n\n"
},
{
"alpha_fraction": 0.7105997204780579,
"alphanum_fraction": 0.7189679145812988,
"avg_line_length": 35.26582336425781,
"blob_id": "0e3e56762ba5009e2716432ea70d89219eb13bbb",
"content_id": "90be63903550b84f25c861d253a0e7f0725314c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2894,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 79,
"path": "/LABS/Group-Project/grp-prj.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPython Libraries Group Project\n\n The point of this project is to get used to using libraries, gain a deep understanding of the os and sys \n libraries, collaborating with git, and getting used to referencing documentation. One of Python's \n strengths is its many standard libraries that come built into the language. While diving deeper \n into Python you will realize that there are a plethora of libraries, standard or third party, that \n you will eventually have to start using. \n\nRead through os and sys Python Libraries located below:\n\n https://docs.python.org/3/library/os.html \n https://docs.python.org/3/library/sys.html \n\n\nMake an app that takes in user input to get definitions and an example of methods.\nCan be GUI or command line. Definitions and examples can either be displayed\non the command line or written to a file for the user to look at later, or both. \n\nSplit into groups of 4.\n\nEach team member will upload different parts to github by creating a repository and using branching.\nBefore you start splitting up work, sit together and decide how you're going to \nbuild out your app. Use pseudocode to come up with function names and classes.\nYou can split up work by assigning each member to different classes or different \nfunctionality. If you realize you need more functions or more classes then modify \nyour design document. \n\nCommenting code is necessary, your team members, and eventually the rest of the class\nwill need to know how your code works and why you decided to design your program the \nway you did.\n\nUtilize everything we have learned so far\n\nIf you finish early then add extra functionality or add another library of your choice. \nRemember to branch off of master tokeep a working version of your project. Merge branches \nif you are happy with your extra functionality.\n\nRecommended extra libraries to look into:\n Pyxel - pixel games\n Pygame - 2d games\n csv - working with csv files\n NumPy - working with arrays of data\n Pandas - data analysis library\n Matplotlib - plotting, graphs\n Twisted - networking\n\nYou will have until 12:30 PM Tuesday 11/26/2019 to complete this. We will demonstrate our projects\nthat afternoon. \n\n\"\"\"\n\n################DESIGN###########################\n# Need a class\n# -Attributes\n# --Description\n# --Example1\n# --Example2\n# -Methods\n# --DisplayDescription\n# --DisplayExample1\n# --DisplayExample2\n \n###############PSEUDOCODE######################\n# MAIN()\n# If dat file exists\n# read in dat file\n# Else: \n# Read in a text file of the method data\n# import to class\n# Write the data w/ pickle to a dat file\n# DisplayMenu\n# Opt1 - os.XXX\n# Opt2 - sys.XXX\n# return classObj\n# DisplaySelection(classObj)\n# show classObj.description\n# show classObj.example1\n# show classObj.example2 \n\n"
},
{
"alpha_fraction": 0.6610997915267944,
"alphanum_fraction": 0.6932790279388428,
"avg_line_length": 26.288888931274414,
"blob_id": "477c30bb61c9036be46a0ac9f516d8a47709388b",
"content_id": "ee82832cba6493da0a83a01c87c20f5125982cec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2465,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 90,
"path": "/lab-dict-lecture.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Dictionaries\n\n#Creating a dictionary\nphonebook = {'David': '555-6644', 'Chris':'555-1234', 'Katie': '555-2222', 'JoAnn': '555-1122'}\n\n#Adding an element\nphonebook['David'] = '555-5555'\n\n#Delete a key/value pair\ndel phonebook['David']\n\n#Length of dictionary\nlength = len(phonebook)\n\n#Using update\nphonebook.update({'David': '555-6644', 'Chris':'555-1234'})\n\nphonebook2 = {'Jim':'555-9900'}\nphonebook.update(phonebook2)\n\n#A neater way to define a dictionary. Data types can be mixed as well (within keys and values)\ntest_scores = { 'Kayla': [88,92,55],\n 'Luis': [95, 74, 91],\n 'Sophie': [70,75,78] \n }\nkayla_scores = test_scores['Kayla']\n#Getting a specific element of the list value\nkayla_scores[1]\n\n#Empty dictionary \nempty_dict = {}\nempty_dict[1] = 'This is a value'\n\n#Iterating through a dictionary\nfor key in phonebook:\n print(key)\n\nfor key in phonebook:\n print(key, phonebook[key])\n\n#makes the dict empty\n#phonebook.clear()\n\n#get method\n#dictionary.get(key, default)\nvalue = phonebook.get('Katie', 'Entry not found.')\n\nvalue2 = phonebook.get('David', 'Entry not found.')\n\n#items method\nprint(phonebook.items())\n\n#Key method\nfor key in phonebook.keys():\n print(key)\n\nfor value in phonebook.values():\n print(value)\n\n#Pop method - takes value out of dictionary (removes) the key-value pair, and returns the value ONLY\nphonebook.pop('Chris', \"Entry not found\")\n\n#pop item method - takes the latest key-value pair out of the dictionary, and returns the key-value pair\nkey, value = phonebook.popitem()\n\n#Iterate through nested dictionary\n# Multi Dimensional Dictionary\n\n# Intitialize instrument dictionary\ninstruments = {'drums': {'color':'black', 'sound':'boom'},\n 'guitar': {'color':'blue', 'sound':'wahhh'}}\nprint('Original', instruments)\n\n# Add something to the nested dictionary\ninstruments.update({'violin':{'color':'brown', 'sound':'whineeee'}})\nprint('After updating', instruments)\n\n# More ways to add instruments\ninstruments['bass'] = {'color':'purple', 'sound':'slappadabass'}\nprint('AFter adding bass', instruments)\n\n# Access items/nested items within our dictionary\nprint('guitar', instruments['guitar'])\nprint('guitar sound', instruments['guitar']['sound'])\n\n# Iterate through nested dictionary\nfor instrument, properties in instruments.items():\n print('Instrument: ', instrument)\n for property in properties:\n print(property + \":\", properties[property])"
},
{
"alpha_fraction": 0.6224138140678406,
"alphanum_fraction": 0.6310344934463501,
"avg_line_length": 27.317073822021484,
"blob_id": "2667b36d113e3e8e7840ee14da5df91967dcc842",
"content_id": "856f9c75b707bce29b31e5437455f1f1d57829b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1162,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 41,
"path": "/LABS/PythonBasicsExam/recFive.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\" \nHave a user input a list of at least 5 integers. Write a function to find the \nGCD (greatest common divisor) of two randomly selected numbers from the list by \nusing recursion. Output the answer to the terminal.\n\nThe greatest common divisor of two or more integers, which are not all zero, is \nthe largest positive integer that divides each of the integers. For example, the \ngcd of 8 and 12 is 4.\n\"\"\"\nimport random\n\ndef get_intList():\n userlist = []\n while len(userlist) < 5:\n userInput = input('Provide an integer: ')\n try:\n int(userInput)\n userlist.append(int(userInput))\n except ValueError:\n print('Input Error. Provide an integer: ')\n \n return userlist\n\ndef get_gcd(a, b):\n if b == 0:\n return a\n else:\n return get_gcd(b, a%b)\n\ndef main():\n userlist = get_intList()\n randa = random.randint(0, len(userlist))\n randb = random.randint(0, len(userlist))\n while randa == randb:\n randb = userlist[random.randint(0, len(userlist))]\n a = userlist[randa]\n b = userlist[randb]\n \n print(f'The GCD of {a} and {b} is: {get_gcd(a,b)}')\n\nmain()"
},
{
"alpha_fraction": 0.671875,
"alphanum_fraction": 0.6785714030265808,
"avg_line_length": 29.931034088134766,
"blob_id": "bb73413868fc50349d1c102beb66968d04319089",
"content_id": "268cebbffe7b2f4ed5c6b3ff07ad28e0c787b26c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 896,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 29,
"path": "/LABS/FILE-IO/file-io-randomNum.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Importing random library\nimport random\n\n#defining main function\ndef main():\n #Getting filename from input for filename\n filename = input('Provide the new file name:\\n')\n f = open(filename, 'w')\n writeRandNum(getCount(),f)\n\n#Input validation for user input for the number of random numbers to write to the file\ndef getCount():\n #initializing numCount for while condition\n numCount = 0\n while numCount == 0:\n #Testing user input via try/except\n try:\n numCount = int(input('Provide the number of random numbers to put in file:'))\n except:\n print('A number was not provided')\n #Once through the successful try/except input is received return it to main \n return numCount\n\n#Takes the user input and the filename \ndef writeRandNum(num,file):\n for i in range(num):\n file.write('{}\\n'.format(random.randint(1,100)))\n\nmain()"
},
{
"alpha_fraction": 0.6163122653961182,
"alphanum_fraction": 0.6163122653961182,
"avg_line_length": 28.457944869995117,
"blob_id": "ed179525f7d7736b1e103527a8756890f7c66d2e",
"content_id": "38aacd3d84411d060af83a608954f7b2c9732380",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3151,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 107,
"path": "/LABS/Classes/vehicles.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# The Automobile class holds general \n# data about and automobile object\n\nclass Automobile:\n # The __init__ method accepts arguments for\n # the make, model, mileage, and price. It \n # Initializes the data attributes with \n # these values.\n\n # Set initial values for object\n def __init__(self, make, model, mileage, price):\n self.__make = make\n self.__model = model\n self.__mileage = mileage\n self.__price = price\n\n # Setting Settors\n def set_make(self, make):\n self.__make = make\n\n def set_model(self, model):\n self.__model = model\n \n def set_mileage(self, mileage):\n self.__mileage = mileage\n\n def set_price(self, price):\n self.__price = price\n\n # Setting Gettors\n def get_make(self):\n return self.__make \n\n def get_model(self):\n return self.__model\n \n def get_mileage(self):\n return self.__mileage\n\n def get_price(self):\n return self.__price\n \n# The car class represents a car. It is a subclass of the \n# automobile class\nclass Car(Automobile):\n \n # The init method accepts args for the car's make model mileage price and doors\n def __init__(self, make, model, mileage, price, doors):\n # Call the superclass's __init__ method and pass \n # the required args. Note that we also have to \n # pass self as an arg\n super().__init__(make, model, mileage, price)\n\n # Init the __doors\n self.__doors = doors\n\n # The set_doors method is the mutator for the __doors attribute\n def set_doors(self, doors):\n self.__doors = doors\n\n # Gettor for doors\n def get_doors(self):\n return self.__doors\n\n# The ctruck class represents a truck. It is a subclass of the \n# automobile class\nclass Trucks(Automobile):\n \n # The init method accepts args for the truck's make model\n # mileage price and drive type\n def __init__(self, make, model, mileage, price, drive_type):\n # Call the superclass's __init__ method and pass \n # the required args. \n super().__init__(make, model, mileage, price)\n\n # Init the __drive_type attribute\n self.__drive_type = drive_type\n\n # The set_doors method is the mutator for the __doors attribute\n def set_drive_type(self, drive_type):\n self.__drive_type = drive_type\n\n # Gettor for doors\n def get_drive_type(self):\n return self.__drive_type\n\n# The SUV class represents a truck. It is a subclass of the \n# automobile class\nclass SUV(Automobile):\n \n # The init method accepts args for the SUV's make model\n # mileage price and drive type\n def __init__(self, make, model, mileage, price, pass_cap):\n # Call the superclass's __init__ method and pass \n # the required args. \n super().__init__(make, model, mileage, price)\n\n # Init the __pass_cap attribute\n self.__pass_cap = pass_cap\n\n # The set_doors method is the mutator for the __doors attribute\n def set_pass_cap(self, pass_cap):\n self.__pass_cap = pass_cap\n\n # Gettor for doors\n def get_pass_cap(self):\n return self.__pass_cap"
},
{
"alpha_fraction": 0.6572437882423401,
"alphanum_fraction": 0.6572437882423401,
"avg_line_length": 26.764705657958984,
"blob_id": "fc8c7a4c7fea6ccbacaed7dd8766b9c0f75a5b28",
"content_id": "ebbab117882385a74a71af22d370cf4f79fa1fba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1415,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 51,
"path": "/LABS/Pickle/pickle_intro.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#serializing objects is the process of converting an object to a ....\n#stream of bytes that can be saved to a file for later retreival. \n#This is called pickling in python.\n#Good for transferring data, sending RPCs, sending things through a network; not human readable\n# the 'b' in fileo-io modes is binary format which is used for pickling\n\n\"\"\" \noutput_file = open('mydata.dat','wb')\npickle.dump(object, file) \n\"\"\"\n#This program demos object pickling\nimport pickle\n\n#main function\ndef main():\n #Controls loop repetition\n again = 'y'\n\n #open a file for binary writing\n output_file = open('information.dat', 'wb')\n\n #Get data until user quits\n while again.lower() == 'y':\n #Get data on person and save it\n save_data(output_file)\n\n #Check for more user data\n again = input('Enter more data? (y/n): ')\n\n #close the file\n output_file.close()\n\n#The save_data function gets data about a poerson\n# stores it in a directory, and then pickles the\n# dictionary to the specified file\ndef save_data(file):\n #Create empty dict\n person = {}\n\n #Get data for a person\n #person['name'] = input('Name: ')\n #person['age'] = input('Age: ')\n #person['weight'] = input('Weight: ')\n\n person.update({'name': input('Name: '), 'age', input('Age: '), 'weight', float(input('weight: ')) })\n \n #Pickle the dictionary\n pickle.dump(person, file)\n\n#Call main\nmain()"
},
{
"alpha_fraction": 0.6843137145042419,
"alphanum_fraction": 0.685620903968811,
"avg_line_length": 36.317073822021484,
"blob_id": "029499e631f06e8d24c1e9041b11eb9c0856cac4",
"content_id": "64f945971ab4be7c07945e1ea1f596a0f5210025",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1530,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 41,
"path": "/LABS/Labs-6-1/lab6-1-bugCounter.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "from functools import reduce\n\n#Initializing constant array for days of the week\nDAYS = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']\n\ndef main():\n #Setting empty array for bug collection numbers\n bugsCollected = []\n #Calls the collect bugs function to capture bug count for each day of the week\n collectBugs(bugsCollected)\n print('You have collected {} bugs this week!'.format(totalBugs(bugsCollected)))\n\ndef collectBugs(collection):\n for i in DAYS:\n #Each iteration calls the getBugs function for input collection \n # and validation to append what is returned to the array passed in \n collection.append(getBugs(i))\n\n#Defining a function to take in a day passed by \n# for loop in collectBugs() as a question \n# and validate input as positive and numeric\ndef getBugs(day):\n bugs = input('How many bugs did you collect on {:s}:\\n'.format(day))\n #Input validation checking numeric and a positive number\n while int(bugs) <= 0 and bugs.replace('.','').isnumeric() == False:\n bugs = input('Invalid input. How many bugs did you collect on {:s}:\\n'.format(day))\n return int(bugs)\n\n#Calculates the total bugs collected \ndef totalBugs(bugs):\n # Replacing original function code with lambda function\n total = reduce((lambda x, y: x + y), bugs)\n # #Initialize total \n # total = 0\n # #Loop through the bugs(array) to add to the total\n # for i in bugs:\n # #Adding the value of i to total\n # total += i\n return total\n\nmain()\n"
},
{
"alpha_fraction": 0.7166990041732788,
"alphanum_fraction": 0.7418997287750244,
"avg_line_length": 41.494117736816406,
"blob_id": "a1a56a42ddc8381d7117e408c80f07d012657cb0",
"content_id": "9e99ebbfe315dec87fc88908d8c8455ff40dfe75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3649,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 85,
"path": "/Algorithms/practice.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\n1. Write a tester program that counts and displays the number of iterations \n of the following loop: \n \n while problemSize > 0:\n problemSize = problemSize // 2\n\n\n2. Run the program you created in Exercise 1 using problem sizes of \n 1000, 2000, 4000, 10,000, and 100,000. As the problem size doubles \n or increases by a factor of 10, what happens to the number of iterations?\n\n\n3. The difference between the results of two calls of the function time.time() \n is an elapsed time. Because the operating system might use the CPU for part \n of this time, the elapsed time might not reflect the actual time that a \n Python code segment uses the CPU. Browse the Python documentation for an \n alternative way of recording the processing time, and describe how this \n would be done.\n\n\"\"\"\n\n\"\"\"\n1. Assume that each of the following expressions indicates the number of \noperations performed by an algorithm for a problem size of n. Point out \nthe dominant term of each algorithm and use big-O notation to classify it. \n\n a. 2^n - 4n^2 + 5n O(2^n)\n b. 3n^2 + 6 O(n^2)\n c. n^3 + n^2 - n O(n^3)\n\n2. For problem size n, algorithms A and B perform n^2 and (1/2)n^2 + (1/2)n \ninstructions, respectively. Which algorithm does more work? Are there particular \nproblem sizes for which one algorithm performs significantly better than the \nother? Are there particular problem sizes for which both algorithms perform \napproximately the same amount of work?\n\nThey are the same b/c the dominant portion is the same with n^2. There is no \nperformance advantage of one over the other.\n\n3. At what point does an n^4 algorithm begin to perform better than a 2^n algorithm?\n\nwhen n = 17\n\"\"\"\n\n\"\"\"\n1. Suppose that a list contains the values 20, 44, 48, 55, 62, 66, 74, 88, 93, 99 \nat index positions 0 through 9. Trace the values of the variables left, right, and \nmidpoint in a binary search of this list for the target value 90. Repeat for the \ntarget value 44.\n\n\n2(challenge). The method that’s usually used to look up an entry in a phone book is not \nexactly the same as a binary search because, when using a phone book, you don’t always go \nto the midpoint of the sublist being searched. Instead, you estimate the position of the \ntarget based on the alphabetical position of the first letter of the person’s last name. \nFor example, when you are looking up a number for “Smith,” you look toward the middle of \nthe second half of the phone book first, instead of in the middle of the entire book. \nSuggest a modification of the binary search algorithm that emulates this strategy for a \nlist of names. Is its computational complexity any better than that of the standard \nbinary search? \n\n\"\"\"\n\"\"\"\n1. Which configuration of data in a list causes the smallest number of exchanges \nin a selection sort? Which configuration of data causes the largest number of \nexchanges?\nAscending vs descending order\nSorted vs unsorted\n\n 2. Explain the role that the number of data exchanges plays in the analysis of \n selection sort and bubble sort. What role, if any, does the size of the data \n objects play?\n\n Both of the strategies have a big-O of O(n^2) there for the size of n has a quadratic \n effect on the exchanges\n\n 3. Explain why the modified bubble sort still exhibits O(n^2) behavior on average.\n Because the modification checks to see if it had to swap anything allowing it to \n end earlier. However, on average it will still have to do at least half the iterations\n of the outer loop\n\n 4. Explain why insertion sort works well on partially sorted lists. \n \n\"\"\""
},
{
"alpha_fraction": 0.7216312289237976,
"alphanum_fraction": 0.7234042286872864,
"avg_line_length": 23.434782028198242,
"blob_id": "32b83905e79e906bb4a4252030d155b735180ed6",
"content_id": "ea19da6dbfaf1c7b78748f452ed84a3ad0042bb0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 564,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 23,
"path": "/LABS/Socket/modules.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# Used to execute existing OS commands\nimport os\n\n# Used to execute existing OS commands\nimport sys\n\n# Used to encode text\nimport io\n\n#Creating a data variable to execute a command and store the returned data as a tuple\nfilename = ''\noptions= '-l'\ncommand = 'ls ' + options + \" \" + filename\ndata = os.popen(command)\n\noutput = str(data.read())\n#formatted = io.TextIOWrapper(output, encoding = 'utf-8')\n\nprint(output)\n\n# Leverage sys library to get the size of the output in bytes\nsize = sys.getsizeof(str(output))\nprint(f'The size of the output is {size} bytes')\n\n\n"
},
{
"alpha_fraction": 0.5549362897872925,
"alphanum_fraction": 0.5617038011550903,
"avg_line_length": 35.15107727050781,
"blob_id": "1f005b94934283b60262ba73c7dff3bb48ccb297",
"content_id": "c61285874cd51bec55abd93d4e3172fab51c5b58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5070,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 139,
"path": "/LABS/PythonBasicsExam/Prompt.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPython Basics Performance Exam\n\n This exam is open note, open book, and open internet. Feel free to use any resources\n you can (other than someone else) to solve the following problems. Direct collaboration with another\n individual will result in immediate failure and consequences to follow. If you are unsure about \n whether or not you can use a resource please ask me. If you are unsure about any of the prompts I can \n clarify. \n\n Comments are necessary. \n\n Each problem will weigh the same towards the final grade. 4 Problems at 25% each. \n\n Please send each problem as a .py file separately. Please direct message them to me (Daniel Curran) \n through slack. If there are supporting files for a problem then please send them with the .py file \n as a zipped folder. \n\n You will have 4 hours to complete this exam. If you complete this portion early and I have verified\n I have everything needed to grade your exam then you will be released.\n\n Happy Coding.\n\n\n1. Recursion\n Have a user input a list of at least 5 integers. Write a function to find the \n GCD (greatest common divisor) of two randomly selected numbers from the list by \n using recursion. Output the answer to the terminal.\n\n The greatest common divisor of two or more integers, which are not all zero, is \n the largest positive integer that divides each of the integers. For example, the \n gcd of 8 and 12 is 4.\n\n2. Sorting\n We discussed how to do a selection sort in class, the function is posted below.\n Modify the selectionSort function so it sorts in reverse order and call it\n reverseSort(). Don't use the list method reverse...\n\n#******************************************************\n # The selectionSort function\n def selectionSort(my_list):\n i = 0\n # do n-1 searches for the smallest\n while i < len(my_list) -1:\n minIndex = i\n j = i + 1\n # start a search\n while j < len(my_list):\n if my_list[j] < my_list[minIndex]:\n minIndex = j\n j += 1\n # Exchange if needed\n if minIndex != i:\n swap(my_list, minIndex, i)\n i += 1\n\n # The swap function\n def swap(my_list, i , j):\n # exchanges the positions of i and j \n temp = my_list[i]\n my_list[i] = my_list[j]\n my_list[j] = temp\n\n my_list = [1,4,5,6,2,3,9]\n print(my_list)\n selectionSort(my_list)\n print(my_list)\n#******************************************************\n\n###################DO THIS LAST ONE####################\n3. Singly Linked Lists\n Implement an insert_before() and insert_after() function for singly linked lists.\n insert_before takes in an index as an argument and inserts the node before the given\n index. (Its possible we already did this in class...)\n insert_after takes in an index as an argument and inserts the node after the given\n index.\n\n4. Doubly Linked Lists\n Implement a reverse function by using the doubly linked list below. Do this \n without using the tail node. \n\n###################DO THIS LAST ONE####################\n\n#******************************************************\n class Node:\n def __init__(self, data, next = None, prev = None):\n self.data = data\n self.next = next\n self.prev = prev\n\n class DoublyLinkedList:\n def __init__(self):\n self.head = None\n\n # Two cases, empty list and list with items\n def append(self, data):\n newNode = Node(data)\n if self.head is None:\n newNode.prev = None\n self.head = newNode \n else:\n probe = self.head\n while probe.next != None:\n probe = probe.next\n newNode.prev = probe\n probe.next = newNode\n\n def print_list(self):\n probe = self.head\n while probe != None:\n print(probe.data)\n probe = probe.next\n\n def insert_node(self, index, data):\n probe = self.head\n while probe != None:\n if probe.next is None and probe.data == index:\n self.prepend(data)\n elif probe.next == index:\n newNode = Node(data)\n prev = probe.prev\n prev.next = newNode\n newNode.next = probe\n newNode.prev = prev\n probe = probe.next\n\n def reverse(self):\n # implement this function\n\n doubly_linked_list = DoublyLinkedList()\n doubly_linked_list.append(\"A\")\n doubly_linked_list.append(\"b\")\n doubly_linked_list.append([7,245,8,68,\"hello\"])\n doubly_linked_list.insert_node(1, \"one\")\n doubly_linked_list.print_list()\n doubly_linked_list.reverse()\n doubly_linked_list.print_list()\n#******************************************************\n\n\"\"\""
},
{
"alpha_fraction": 0.713850200176239,
"alphanum_fraction": 0.729529619216919,
"avg_line_length": 48.934783935546875,
"blob_id": "4e4210e92c98c6cc9b7e878c659f36dc49807d28",
"content_id": "ac208a85b0d98f5dfdf4a4e040563674844c7627",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2320,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 46,
"path": "/04_functions/recursion-practice.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\n1. Recursive Printing\n Design a recursive function that accepts an integer argument, n, and prints the numbers 1\n up through n.\n\n2. Recursive Multiplication\n Design a recursive function that accepts two arguments into the parameters x and y. The\n function should return the value of x times y. Remember, multiplication can be performed\n as repeated addition as follows:\n 7 X 4 = 4 + 4 + 4 + 4 + 4 + 4 + 4\n (To keep the function simple, assume that x and y will always hold positive nonzero\n integers.)\n\n3. Recursive Lines\n Write a recursive function that accepts an integer argument, n. The function should display\n n lines of asterisks on the screen, with the first line showing 1 asterisk, the second line\n showing 2 asterisks, up to the nth line which shows n asterisks.\n\n4. Largest List Item\n Design a function that accepts a list as an argument, and returns the largest value in the list.\n The function should use recursion to find the largest item.\n\n5. Recursive List Sum\n Design a function that accepts a list of numbers as an argument. The function should recursively \n calculate the sum of all the numbers in the list and return that value.\n\n6. Sum of Numbers\n Design a function that accepts an integer argument and returns the sum of all the integers from 1 up \n to the number passed as an argument. For example, if 50 is passed as an argument, the function will \n return the sum of 1, 2, 3, 4, . . . 50. Use recursion to calculate the sum.\n\n7. Recursive Power Method\n Design a function that uses recursion to raise a number to a power. The function should\n accept two arguments: the number to be raised and the exponent. Assume that the exponent is a \n nonnegative integer.\n\n8. Ackermann’s Function\n Ackermann’s Function is a recursive mathematical algorithm that can be used to test how\n well a system optimizes its performance of recursion. Design a function ackermann(m, n),\n which solves Ackermann’s function. Use the following logic in your function:\n If m = 0 then return n + 1\n If n = 0 then return ackermann(m - 1, 1)\n Otherwise, return ackermann(m - 1, ackermann(m, n - 1))\n Once you’ve designed your function, test it by calling it with small values for m and n.\n\n\"\"\""
},
{
"alpha_fraction": 0.5345454812049866,
"alphanum_fraction": 0.5345454812049866,
"avg_line_length": 20.63157844543457,
"blob_id": "b76a29f160cee0dd16e223917b033854fc502360",
"content_id": "150a3e038bff0e6d89d8b842d0486c68f08630b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 825,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 38,
"path": "/LABS/Classes/persons.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "class Person:\n def __init__(self, name, address, number):\n self.__name = name\n self.__address = address\n self.__number = number\n\n # Settors\n def set_name(self, name):\n self.__name = name\n\n def set_address(self, address):\n self.__address = address\n \n def set_number(self, number):\n self.__number = number\n\n # Getters\n def get_name(self):\n self.__name\n\n def get_address(self):\n self.__address\n \n def get_number(self):\n self.__number\n\n\nclass Customer(Person):\n def __init__(self, name, address, number, opt_in):\n super().__init__(name, address, number)\n\n self.__opt_in = opt_in\n \n def set_opt_in(self, opt_in):\n self.__opt_in = opt_in\n\n def get_opt_in(self):\n return self.__opt_in \n\n\n"
},
{
"alpha_fraction": 0.6268922686576843,
"alphanum_fraction": 0.6304541230201721,
"avg_line_length": 33.06060791015625,
"blob_id": "fffcd7b5c788e1eaded10378b8fe224f2f61aec8",
"content_id": "690f1c9330b61324c001ee6dd8948650214ff4fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1123,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 33,
"path": "/LABS/Classes/customers.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import persons\n\ndef main():\n customer = createCustomer()\n printCustomer(customer)\n \ndef createCustomer(): \n name = input('Provide a customer name:\\n')\n address = input('Provide customer address:\\n')\n number = input('Provide a 10-digit phone number (no dashes, spaces, or other punctuation):\\n')\n while number.isnumeric() == False:\n number = input('Invalid input. Provide a 10-digit phone number (no dashes, spaces, or other punctuation):\\n')\n opt_in = input('Are they subscribed to the email program? (Y/N)\\n')\n while opt_in.upper() not in ['Y','N']:\n opt_in = input('Invalid option. Are they subscribed to the email program? (Y/N)\\n')\n if opt_in == 'Y':\n opt_in = True\n else:\n opt_in = False\n \n cust = persons.Customer(name, address, number, opt_in)\n return cust\n\ndef printCustomer(customer):\n print('Name:\\t', customer.get_name())\n print('Address:\\t', customer.get_address())\n print('Phone:\\t', customer.get_number())\n if customer.get_opt_in == True:\n print('Subscribed:\\tYes')\n else: \n print('Subscribed:\\tNo')\n\nmain()"
},
{
"alpha_fraction": 0.6974398493766785,
"alphanum_fraction": 0.7075251936912537,
"avg_line_length": 24.780000686645508,
"blob_id": "10e39f414edd0d4b7d8ee26f570747b95dd6f1c3",
"content_id": "94e215e84debd92ef51de848b30fc50b2db27945",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1289,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 50,
"path": "/Practice_Test/parseArgsSys.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n'''\n\nWrite a python script that parses one or more command line items. The items may be\nconvertable to either an int or float. If not, assume it is a string.\n\nFor example, you should be able to run your file from the command line like:\n\ntestfile.py 1 3 4.7 hello 5.4 2 hi\n\nIn the above example, there are seven arguments to parse and process.\n\nThe script should create a variable to store a running total of an integer.\n\nThe running total will be increased accordingly as you parse each\nargument from the command line:\n 1. If the argument is convertable to an int, add the int to the total\n 2. Otherwise, if the argument is converatable to a float, round the float\n to an int and add it to total.\n 3. If the argument is convertable to neither an int or float, obtain the\n length of the string and add the length to the total.\n \nAfter all command line arguments have been parsed, write the single total to a file\ncalled output.txt\n\n\n\n'''\n\nimport sys\nsys.argv.pop(0)\n#print(len(sys.argv))\n\n#print(sys.argv)\nfilename = \"output.txt\"\n\ntotal = 0\nfor i in sys.argv:\n try:\n i = float(i)\n i = int(round(i))\n total += i\n except ValueError:\n total += len(i)\n \n#print(total)\n\nf = open(filename, \"w\")\nf.writelines(str(total))\nf.close()\n"
},
{
"alpha_fraction": 0.5575641989707947,
"alphanum_fraction": 0.6065651774406433,
"avg_line_length": 31.33846092224121,
"blob_id": "ecb3881ebe80119e986de9da86b922b6ceaa596b",
"content_id": "bd897fcb7e03225158b4ebbc8584c58b339c7477",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2102,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 65,
"path": "/Algorithms/practice2.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import timeit\n\ndef wrapper(func, *args, **kwargs):\n def wrapped():\n return func(*args, **kwargs)\n return wrapped\n\n# This is meant to find the values of 44 and 90 in a sorted list\n\ndef binSearch(target, sortedList):\n left = 0\n right = len(sortedList) - 1\n while left <= right:\n midpoint = (left + right) // 2\n if target == sortedList[midpoint]:\n return midpoint\n elif target < sortedList[midpoint]:\n right = midpoint -1\n else:\n left = midpoint + 1\n # Trace values print statement\n #print(f'Left = {left}, Right = {right}, Midpoint = {midpoint}')\n return -1\n\n# myList = [20, 44, 48, 55, 62, 66, 74, 88, 93, 99]\n# print(binSearch(44, myList))\n# print(binSearch(90, myList))\n\n# Returns index of x in arr if present, else -1 \ndef binarySearch (arr, l, r, x): \n \n # Check base case \n if r >= l: \n \n mid = l + (r - l)//2\n \n # If element is present at the middle itself \n if arr[mid] == x: \n return mid \n \n # If element is smaller than mid, then it can only \n # be present in left subarray \n elif arr[mid] > x: \n return binarySearch(arr, l, mid-1, x) \n \n # Else the element can only be present in right subarray \n else: \n return binarySearch(arr, mid+1, r, x) \n \n else: \n # Element is not present in the array \n return -1 \n\nmyList = [20, 44, 48, 55, 62, 66, 74, 88, 93, 99]\n#print(binarySearch(myList, 0, len(myList)-1, 44))\n#print(binarySearch(myList, 0, len(myList)-1, 90))\nwrapped = wrapper(binSearch, 44, myList)\nprint(f'Non-Recursive search for 44: {timeit.timeit(wrapped, number=1000)}')\nwrapped = wrapper(binSearch, 90, myList)\nprint(f'Non-Recursive search for 90: {timeit.timeit(wrapped, number=1000)}')\n\nwrapped = wrapper(binarySearch, myList, 0, len(myList)-1, 44)\nprint(f'Recursive search for 44: {timeit.timeit(wrapped, number=1000)}')\nwrapped = wrapper(binarySearch, myList, 0, len(myList)-1, 90)\nprint(f'Recursive search for 90: {timeit.timeit(wrapped, number=1000)}')\n"
},
{
"alpha_fraction": 0.5808922648429871,
"alphanum_fraction": 0.5993691086769104,
"avg_line_length": 37.938594818115234,
"blob_id": "72691f914c209874864ed3b5082911e9848cddef",
"content_id": "f23a02133a29ed5532c022b22ea9feb2508d44cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4438,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 114,
"path": "/LABS/List-Tuple/list-tuple-tictactoe.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#define board\nboard = [['1','2','3'],['4','5','6'],['7','8','9']]\n#define main function\ndef main():\n #Prints welcome\n print('Let\\'s play Tic-Tac-Toe')\n #Initializes move counter for determining the turns and marker\n moveNum = 1 \n #An array of possible moves used to track move history\n moves = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n #A while loop to go through all possible moves unless there is a winner first\n while moveNum <= len(moves):\n #Calls method to print board's current state\n printBoard(board, moveNum)\n #Calls function to get player move taking into account move history and current board state\n getMove(board,moveNum,moves)\n #Calls function to check win conditions\n winner = checkBoard(board)\n #checks the winner array to determine who has one if there is a winner\n if winner[0] == True:\n print('Player \\'X\\' is the winner')\n break\n elif winner[1] == True:\n print('Player \\'O\\' is the winner')\n break\n moveNum += 1\n #If no winner has been determined by the time the while loop ends the game assumes a tie \n print('This game was a tie')\n \n\n#The board printing function\ndef printBoard(board, moveNum):\n #Uses moveNum to determine whose turn it is for the prompt\n if moveNum % 2 == 1:\n print('X\\'s turn to move:')\n else:\n print('O\\'s turn to move:')\n #prints each row in the matrix\n for row in board:\n print(row)\n\n#Gets move input with input validation\ndef getMove(board,moveNum,moves):\n #Sets the marker based on moveNum\n if moveNum % 2 == 1:\n marker = 'X'\n else:\n marker = 'O'\n\n #Initializes 'place' for while loop\n place = 'z'\n #Checking to see if the input is numeric for loop sentinel value\n while place.isnumeric() == False:\n #Take in input for 'place'\n place = input('Where would you want to place your \\'{}\\'?\\n'.format(marker))\n #Tries to force it to an integer which is necessary after input\n try:\n #Nested while loop to ensure that the input exists in the valid available moves\n while int(place) not in moves:\n place = input('Invalid input of {}. Where would you want to place your \\'{}\\'?\\n'.format(place, marker))\n except ValueError:\n #An error that shows them their input is invalid with available options\n print('Input must be a number in ', moves)\n place = input('Where would you want to place your \\'{}\\'?\\n'.format(marker))\n \n #A for loop that checks each row...\n for row in board:\n #... for the presence of the user's input to get it's index\n if place in row:\n moveIndex = row.index(place)\n #replaces the existing element with the marker\n row[moveIndex] = str(marker)\n #removes the selected move from the list of possibilities\n moves.remove(int(place))\n\n#the function to check win conditions\ndef checkBoard(board):\n #Defining each winning line on the 3x3 board\n topRow = [board[0][0],board[0][1],board[0][2]]\n midRow = [board[1][0],board[1][1],board[1][2]]\n botRow = [board[2][0],board[2][1],board[2][2]]\n leftCol = [board[0][0],board[1][0],board[2][0]]\n midCol = [board[0][1],board[1][1],board[2][1]]\n rgtCol = [board[2][0],board[2][1],board[2][2]]\n tlbrDiag = [board[0][0],board[1][1],board[2][2]]\n bltrDiag = [board[2][0],board[1][1],board[0][2]]\n #A list of lists making up the win conditions\n winCons = [topRow, midRow, botRow, leftCol, midCol, rgtCol, tlbrDiag, bltrDiag]\n #Booleans for X wins or O wins \n xWin = False\n oWin = False\n #Loops through each win condition \n for line in winCons:\n #Setting the counters for 'x' or 'o'\n xCount = 0\n oCount = 0\n #loops through each element in the win condition\n for j in line:\n #checks for 'X' and increments if it exists\n if j == 'X':\n xCount += 1\n #checks for 'O' and increments if it exists\n elif j == 'O':\n oCount += 1\n #If either of these counts reaches three then the win condition is met and the boolean is set to true\n if xCount == 3:\n xWin = True\n elif oCount == 3:\n oWin = True\n #return win condition state to main\n return xWin, oWin\n \n\nmain()"
},
{
"alpha_fraction": 0.6410142183303833,
"alphanum_fraction": 0.6450178027153015,
"avg_line_length": 34.140625,
"blob_id": "748b5c0a9aef0797fd04392dddb717d6bc1860ba",
"content_id": "31f98bb1bc9bab0a924d09c9adbb80042fdda2af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2248,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 64,
"path": "/LABS/Classes/storeFront.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# Still need to add clear register function\n\nimport retailItem\nimport cashregister\nimport pickle\n\ndef main():\n inv_file = 'inventory.dat'\n inventory = readData(inv_file)\n register = cashregister.CashRegister()\n\n keepGoing = ''\n while keepGoing.upper() != 'N':\n selection = -1\n printInv(inventory)\n while int(selection) not in range(1,len(inventory)+1):\n selection = input('Make a selection by choosing the item number:')\n addCart(selection, inventory, register)\n keepGoing = input('Add another item? (Y/N)')\n options = ['Y','N']\n while keepGoing.upper() not in options:\n keepGoing = input('Add another item? (Y/N)')\n register.set_total()\n print('Total purchase:\\n${:.2f}'.format(register.get_total()))\n \n\n \n\ndef printInv(inventory):\n print('\\tDescription:\\tUnits in Inventory:\\t\\tPrice:')\n for item in range(len(inventory)):\n print(f'Item#{item+1}\\t{inventory[item].get_desc()}\\t\\t{inventory[item].get_unitCount()}\\t\\t{inventory[item].get_price()}')\n\ndef addCart(selection, inventory, register):\n item = retailItem.RetailItem()\n desc = inventory[int(selection)-1].get_desc()\n price = inventory[int(selection)-1].get_price()\n unitCount = inventory[int(selection)-1].get_unitCount()\n item.set_desc(desc)\n item.set_price(price)\n purchase_count = input('How many would you like to purchase?\\n')\n while purchase_count.isnumeric == False or int(purchase_count) > int(unitCount) :\n purchase_count = input('We do not have that many in stock. How many would you like to purchase?\\n')\n item.set_unitCount(purchase_count)\n inventory[int(selection)-1].set_unitCount(int(unitCount) - int(purchase_count))\n register.purchase_item(item)\n\ndef readData(filename):\n # Opening the file in read mode\n data = open(filename, 'rb')\n # Setting EOF to false\n end_of_file = False\n #Setting while loop to get each object in binary file\n while not end_of_file:\n try:\n #unpickle next object\n output = pickle.load(data)\n return output\n except EOFError:\n #Set flag to indicate EOF reached\n end_of_file = True\n data.close()\n\nmain()"
},
{
"alpha_fraction": 0.6429980397224426,
"alphanum_fraction": 0.6577908992767334,
"avg_line_length": 34,
"blob_id": "76d01545fa2beaa8cb91962caeef8204cbe4abf8",
"content_id": "ba6749e2e0ee1e03057386350d994bc56cb0a66e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1014,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 29,
"path": "/LABS/Labs-3-4/lab3-4-1-romanNumerals.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Set Roman numeral array\nNUMERALS = [\"I\",\"II\",\"III\",\"IV\",\"V\",\"VI\",\"VII\",\"VIII\",\"IX\",\"X\"]\n\ndef main():\n #Store user input for the number they want to convert\n selection = getNumber()\n #Displays the number based on user input and it's reference to the dict\n print('Your number in roman numerals is {}'.format(getNumeral(selection)))\n\n#Takes the number they input and subtracts 1 to fix 'off by one'\ndef getNumeral(num):\n return NUMERALS[num-1]\n\n#Function with input validation to ensure it is in range\ndef getNumber():\n #Initializes selection\n select = 0 \n #Ensures number provided is between 1 and 10\n while int(select) > 10 or int(select) < 1:\n select = input('Please provide a number between 1 and 10:\\n')\n #determines which side of the range they are on for an adjustment\n if int(select) > 10:\n print('Number too high!')\n elif int(select) < 1:\n print('Number too low!')\n #returns the selection to main\n return int(select)\n\nmain()"
},
{
"alpha_fraction": 0.5707635283470154,
"alphanum_fraction": 0.5772811770439148,
"avg_line_length": 30.58823585510254,
"blob_id": "da47f7d4fdfbcaf9a97e318006ba03d9205aee2d",
"content_id": "db14fe2bbe204da9d94247d06e14adf3ffc04903",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1074,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 34,
"path": "/LABS/List-Tuple/list-tupl-license.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Setting list of correct answers\nanswers = ['b','d','a','a','c','a','b','a','c','d','b','c','d','a','d','c','c','b','d','a']\n\ndef main():\n #Getting filename from input for filename\n filename = input('Provide the file name of the test:\\n')\n #Reads the file of filename \n f = open(filename, 'r')\n #Recording file contents in array\n contents = f.readlines()\n incorrect = []\n total = 0\n total = int(gradeTest(contents,answers,incorrect))\n scoreTest(total,incorrect)\n \ndef gradeTest(test,answers,incorrect):\n total = 0\n for i in range(len(answers)):\n if test[i].lower().rstrip('\\n') == answers[i].lower():\n total += 1\n else:\n incorrect.append(i+1)\n return total\n\ndef scoreTest(total,incorrect):\n if total/len(answers) >= 0.75:\n print('You passed!')\n print('You got {} of {} correct.'.format(total,len(answers)))\n else:\n print('You failed!')\n print('You got {} of {} correct.'.format(total,len(answers)))\n print('Questions missed: ', incorrect)\n \nmain()\n"
},
{
"alpha_fraction": 0.6463414430618286,
"alphanum_fraction": 0.6646341681480408,
"avg_line_length": 21.409090042114258,
"blob_id": "41097297d8e3a19ec7041f075ff7ba19cc34440c",
"content_id": "766fc752bd8438c06d551246f13b7295be88dbb8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 492,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 22,
"path": "/LABS/Labs-3-4/lab3-4-3-weight.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Defining Coefficient for the weight equation\nCOEFF = float(9.8)\n\n#Defining main function\ndef main():\n weight = calcWeight(float(input('Please provide the mass of the object:')))\n heavyOrLight(weight)\n\ndef calcWeight(mass):\n return mass * COEFF\n\ndef heavyOrLight(newtons):\n if newtons > 1000:\n print(\"Too heavy!\")\n elif newtons < 10:\n print(\"Too light.\")\n else:\n print(\"{:.2f} newtons is in the acceptable range.\".format(newtons))\n\n\n#calling main\nmain()"
},
{
"alpha_fraction": 0.6891334056854248,
"alphanum_fraction": 0.7028886079788208,
"avg_line_length": 44.4375,
"blob_id": "7c75d535aeac7ac97939bcc129e26b73b9cc6b2e",
"content_id": "e7f3f076a77cd40b5af1f741c1240b80de1a40a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 727,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 16,
"path": "/LABS/Labs-3-1/lab3-1-6.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Setting variable for state and county tax\nstateTax = 0.04\ncountyTax = 0.02\n\n#User input for amount of purchase\npurchase = input(\"Input amount of purchase: \\n\")\n#Input validation for numeric\n#Using .replace to get rid of decimal to validate input is numeric\n\nwhile purchase.replace('.','').isnumeric() == False:\n purchase = input(\"Input was not valid. Input amount of purchase:\\n\")\n\nprint('State Tax: ${:.2f}'.format(float(purchase) * stateTax))\nprint('County Tax: ${:.2f}'.format(float(purchase) * countyTax))\nprint('Total Tax: ${:.2f}'.format((float(purchase) * stateTax) + (float(purchase) * countyTax) ))\nprint('State Tax: ${:.2f}'.format((float(purchase) * stateTax) + (float(purchase) * countyTax) + float(purchase)))\n"
},
{
"alpha_fraction": 0.5780885815620422,
"alphanum_fraction": 0.6146076321601868,
"avg_line_length": 32.02564239501953,
"blob_id": "aa32c536e8c96a05a62ba4224c732e696aeeda27",
"content_id": "cec9b35f91459a19badc1c6184a15766789aa37c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1287,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 39,
"path": "/LABS/Classes/car-truck-suv-demo.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# This program creates a Car and SUV objects\n\nimport vehicles\n\ndef main ():\n # Create an object from Car class\n # The car is a 2007 Audi with 12,500 miles, price at $21,500.00, and has 4 doors\n new_car = vehicles.Car('Bugatti', 'Veyron', 0, 3000000.00, 2)\n\n # Display car's data\n print('Make: ', new_car.get_make())\n print('Model: ', new_car.get_model())\n print('Mileage: ', new_car.get_mileage())\n print('Price: ', new_car.get_price())\n print('Doors: ', new_car.get_doors())\n print()\n\n # Create an object from Truck class\n new_truck = vehicles.Trucks('Dodge', 'Power Wagon', 0, 57000.00, '4WD')\n\n # Display car's data\n print('Make: ', new_truck.get_make())\n print('Model: ', new_truck.get_model())\n print('Mileage: ', new_truck.get_mileage())\n print('Price: ', new_truck.get_price())\n print('Drive Type: ', new_truck.get_drive_type())\n print()\n\n # Create an object from SUV class\n new_suv = vehicles.SUV('Jeep', 'Grand Cherokee SRT8', 0, 57000.00, 7)\n\n # Display car's data\n print('Make: ', new_suv.get_make())\n print('Model: ', new_suv.get_model())\n print('Mileage: ', new_suv.get_mileage())\n print('Price: ', new_suv.get_price())\n print('Passenger Capacity: ', new_suv.get_pass_cap())\n print()\nmain()"
},
{
"alpha_fraction": 0.5772727131843567,
"alphanum_fraction": 0.6204545497894287,
"avg_line_length": 29.068965911865234,
"blob_id": "c2391ff92d790f0318fb37b9274654c42d56e770",
"content_id": "97dbc95c3d81f4e822260bf706ff737ec23fecf6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 882,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 29,
"path": "/LABS/PerfExam/prompt4.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\n4. \n (The sqrt function) Write a program that prints the following table\n using your knowledge of loops and the sqrt function in the math module.\n Make sure your table is neat by using print formatting methods we've learned. \n\n Number Square Root\n 0 0.0000\n 1 1.0000\n 2 1.4142\n ...\n 18 4.2426\n 20 4.4721\n # could use list comprehension and lambda for this after importing math sqrt\n\"\"\"\n# This program is mean to perform the prompt above\n\n# Importing the sqrt() function from math\nfrom math import sqrt\ndef main():\n # Printing table header\n print('Number\\tSquare Root')\n # Looping through numbers 0-20\n for i in range(21):\n # Printing the number with a \\t for spacing readability and \n # the actual sqrt result\n print(f'{i}\\t{sqrt(i)}')\n\nmain()\n "
},
{
"alpha_fraction": 0.6111111044883728,
"alphanum_fraction": 0.6375661492347717,
"avg_line_length": 20.05555534362793,
"blob_id": "b92d1fb3123b2ab2509c8feb626ae70eb0dfe047",
"content_id": "83fd32fa549ec5053335551a9b7d0b8d4f2e3578",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 378,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 18,
"path": "/LABS/Iteration/iter.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "myList = [1, 2, 3, 4]\n\nl1 = [1, 2, 3, 4]\nit = iter(l1)\nprint(it.__next__())\nprint(it.__next__())\nprint(it.__next__())\nprint(it.__next__())\n# This will produce a 'StopIteration' error\nprint(it.__next__())\n\n# An alternative to the statement above\nprint(next(it))\nprint(next(it))\nprint(next(it))\nprint(next(it))\n\n# If something is not iterable it will say 'object is not iterable'"
},
{
"alpha_fraction": 0.4953271150588989,
"alphanum_fraction": 0.5327102541923523,
"avg_line_length": 10.777777671813965,
"blob_id": "7c1112a0f859c27fa8d8473d2750c6fe1e7a2030",
"content_id": "aca3ccb07a0a22e08c6ac6a7bef198fbf7682fe3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 107,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 9,
"path": "/LABS/Projects/test.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "def incDeath(x):\n x = x + 1\n \n\ndeath = 0\n\nfor i in range(0,6):\n incDeath(death)\n print(death)\n\n"
},
{
"alpha_fraction": 0.7169172763824463,
"alphanum_fraction": 0.7169172763824463,
"avg_line_length": 44.879310607910156,
"blob_id": "37c5c209079b920c816c6a06442630b6ee933fe2",
"content_id": "c15a209c7ed44c2c0c2c005f9711e05a510cce40",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2698,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 58,
"path": "/LABS/Classes/cellphone.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\nWireless Solutions, Inc. is a business that sells cell phones and wireless service. \nYou are a programmer in the company’s IT department, and your team is designing a program to manage\nall of the cell phones that are in inventory. You have been asked to design a class that represents \na cell phone. The data that should be kept as attributes in the class are as follows:\n\n • The name of the phone’s manufacturer will be assigned to the __manufact attribute.\n • The phone’s model number will be assigned to the __model attribute.\n • The phone’s retail price will be assigned to the __retail_price attribute.\n\nThe class will also have the following methods:\n • An __init__ method that accepts arguments for the manufacturer, model number,\n and retail price.\n • A set_manufact method that accepts an argument for the manufacturer. This\n method will allow us to change the value of the __manufact attribute after the object has been created, if necessary.\n • A set_model method that accepts an argument for the model. This method will allow\n us to change the value of the __model attribute after the object has been created, if\n necessary.\n • A set_retail_price method that accepts an argument for the retail price. This\n method will allow us to change the value of the __retail_price attribute after the\n object has been created, if necessary.\n • A get_manufact method that returns the phone’s manufacturer.\n • A get_model method that returns the phone’s model number.\n • A get_retail_price method that returns the phone’s retail price.\n\"\"\"\n\n# The CellPhone class pulls data about the cell phone\n\nclass CellPhone:\n # The __init__ method initializes the attributes\n def __init__(self,manufact, model, price):\n self.__manufact = manufact\n self.__model = model\n self.__retail_price = price\n\n #The set_manufact method accepts an argument for the phone's manufacturer\n def set_manufact(self, manufact):\n self.__manufact = manufact\n\n # The set_model method accepts an argument for the phone's model number\n def set_model(self, model):\n self.__model = model\n\n # The set_retail_price method accepts and arguement for retail price\n def set_retail_price(self, price):\n self.__retail_price = price \n\n # The get_manufact method returns the phones manufacturer\n def get_manufact(self):\n return self.__manufact\n\n # The get_model method returns the phones model number\n def get_model(self):\n return self.__model\n\n # The get_price method returns the phones price\n def get_retail_price(self):\n return self.__retail_price"
},
{
"alpha_fraction": 0.6157845258712769,
"alphanum_fraction": 0.6327977180480957,
"avg_line_length": 38.943397521972656,
"blob_id": "9b8d07f247db20aae9b0f0ebeeb4ee0caa79d1db",
"content_id": "517e115b07dbf4d99c2dd2fabfa202434499ed14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2116,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 53,
"path": "/LABS/Labs-3-4/lab3-4-4-magicDate.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Defining main\ndef main():\n #Requesting user input for date\n year = getYear()\n month = getMonth()\n day = getDay(month,year)\n \n #Calling the isMagic function to check the date\n isMagic(month,day,year)\n\n#defining function to get the month while incorporating some input validation\ndef getMonth():\n month = int(input('Please provide the month (in numerical form):\\n'))\n while month < 1 or month > 12:\n month = int(input('Invalid response. Please provide the month (in numerical form, 1-12):\\n'))\n return month\n\n#defining function to get the month while incorporating some input validation\ndef getYear():\n year = (input('Please provide the 2-digit year:\\n'))\n while int(year) < 0 or int(year) > 99:\n year = (input('Invalid response. Please provide the 2-digit year:\\n'))\n return year\n\n#defining the validation for days including the month of feb and the leap year\ndef getDay(m,y):\n day = int(input('Please provide the day of the month:\\n'))\n if m == 2 and int(y)%4 == 0:\n while day < 1 or day > 29:\n day = int(input('Invalid respone for the month of February in a leap year. Please provide the day of the month:\\n'))\n elif m == 2:\n while day < 1 or day > 28:\n day = int(input('Invalid respone for the month of February. Please provide the day of the month:\\n'))\n elif (m < 8 and m % 2 == 0) or (m > 7 and m % 2 == 1):\n while day < 1 or day > 30:\n day = int(input('Invalid respone for the months of FEB, APR, JUN, SEP, and NOV. Please provide the day of the month:\\n'))\n else:\n while day < 1 or day > 31:\n day = int(input('Invalid respone; days cannot be less than 1 or greater than 31. Please provide the day of the month:\\n'))\n return day\n\n#defining magic check function\ndef isMagic(m,d,y):\n #condition formula is the magic date calculation\n if m * d == y:\n #Displaying Magic message!\n print('{}/{}/{} is magic!'.format(m,d,y))\n else:\n #Displaying not magic message.\n print('{}/{}/{} is NOT magic. :-/'.format(m,d,y))\n\n#Calling Main\nmain()"
},
{
"alpha_fraction": 0.6458333134651184,
"alphanum_fraction": 0.648065447807312,
"avg_line_length": 27.617021560668945,
"blob_id": "a515bad7583184ce053029bc21a56d01f60a411e",
"content_id": "a2eb8458f1dfd8e3ee3abfc68c69b6299480a6fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1344,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 47,
"path": "/LABS/Classes/cellphone_list.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# This program creates five CellPhone objects and\n# stores them in a list\n\nimport cellphone as c\n\ndef main():\n # Get a list of CellPhone\n phones = make_list()\n\n # Display the data in the list\n print('Here is the data you entered:')\n display_list(phones)\n\n# The make_list function gets data from the user \n# for five phones. The function returns a list \n# of CellPhone Objects containing the data\ndef make_list():\n #Create empty list\n phone_list = []\n\n #Add five CellPhone objects to the list\n print('Enter data for five phones.')\n for count in range(1,6):\n #Get the phone data\n print('Phone number ' +str(count) +':')\n man = input('Enter the manufacturer:\\n')\n mod = input('Enter the model number:\\n')\n retail = float(input('Enter the retail price:\\n'))\n print()\n\n phone = c.CellPhone(man,mod,retail)\n\n phone_list.append(phone)\n \n return phone_list\n\n# The display_list function gets data from the list \n# passed as an argument. The function returns a list \n# of CellPhone Objects containing the data\ndef display_list(phone_list):\n for phone in phone_list:\n print('Manufacturer: ', phone.get_manufact())\n print('Model Number: ', phone.get_model())\n print(f'Retail Price: ${phone.get_retail_price():,.2f}')\n print()\n\nmain()"
},
{
"alpha_fraction": 0.6821829676628113,
"alphanum_fraction": 0.6950240731239319,
"avg_line_length": 31.842105865478516,
"blob_id": "1a8ef3091ec298c53d13ede8ea65e4ebed382ee8",
"content_id": "3e9a8c0f660604c06f357b2662e336eb1037355b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 623,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 19,
"path": "/LABS/Labs-4-1/lab4-1-kmToMi.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Setting the equation coefficient for KM conversion to miles\nCOEFF = 0.6214\n\ndef main():\n km = getKm()\n print('The number of miles in {:.2f} kilometers is {:.2f}'.format(km,kmToMi(km)))\n\n#defining function to get the miles while incorporating some input validation\ndef getKm():\n miles = int(input('Please provide the # of kilometers to convert:\\n'))\n while miles < 1 or str(miles).isnumeric() == False:\n miles = int(input('Invalid input. Please provide the # of kilometers to convert:\\n'))\n return miles\n\n#defining function to make the calculation.\ndef kmToMi(km):\n return float(km) * COEFF\n\nmain()"
},
{
"alpha_fraction": 0.6576576828956604,
"alphanum_fraction": 0.7117117047309875,
"avg_line_length": 17.66666603088379,
"blob_id": "92dc17e54f4949d3be9a2f804a3841ac09f96765",
"content_id": "99d3e86fd244e4b00ea25b2fcaed91f5b63694fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 111,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 6,
"path": "/LABS/Socket/firstchars.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "import requests\n\nurl = 'http://www.py4inf.com/code/romeo-full.txt'\n\nr = requests.get(url)\nprint(r.text[0:3000])"
},
{
"alpha_fraction": 0.6010100841522217,
"alphanum_fraction": 0.6237373948097229,
"avg_line_length": 24.580644607543945,
"blob_id": "09801381b8fb451e7f56c6156819c8daba5d5be2",
"content_id": "a25b5652dc5adcd47e27af9a972b4abc9a72616f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 792,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 31,
"path": "/LABS/Classes/coin_instances.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# This program imports the simulation module and\n# creates three instances of the Coin class\n\nimport coin\n\ndef main ():\n #Create three coins from the Coin class\n coin1 = coin.Coin()\n coin2 = coin.Coin()\n coin3 = coin.Coin()\n\n #Show the state of the coins\n print('I have three coins with these sides up:')\n print('Coin 1: ', coin1.get_sideup())\n print('Coin 2: ', coin2.get_sideup())\n print('Coin 3: ', coin3.get_sideup())\n\n #Toss the coins\n print('I am tossing the coins...')\n print()\n coin1.toss()\n coin1.toss()\n coin1.toss()\n\n #Show the state of the coins\n print('I have three coins with these sides up:')\n print('Coin 1: ', coin1.get_sideup())\n print('Coin 2: ', coin2.get_sideup())\n print('Coin 3: ', coin3.get_sideup())\n\nmain()"
},
{
"alpha_fraction": 0.6367501020431519,
"alphanum_fraction": 0.6452527046203613,
"avg_line_length": 35.5,
"blob_id": "9fad3aeeec457ecfd33780291976a24172786b56",
"content_id": "756db9abc7de6084c128840b5752a4fb7f32192b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2117,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 58,
"path": "/Practice_Test/findparks.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "'''\nWrite a function find_parks that takes a list of dictionaries and a string that represents \na state name. The state name may or may not be capitalized but should match regardless.\n\nThe list contains a collection of dictionaries that represent state parks. The dictionaries each\ncontain a state name, park name, and cost of camping at the park. The dictionary's keys are\n\"state\", \"park\", and \"cost\". \n\nExample: [{\"state\":\"Texas\",\"park\":\"Guadalupe\",\"cost\": 12.50},\n {\"state\":\"Michigan\",\"park\":\"Sterling\",\"cost\": 8.50},\n {\"state\":\"Texas\",\"park\":\"Pedernales\",\"cost\": 13.50}]\n\nThe state name may or may not be capitalized but should match \nregardless when compared to the state name being searched.\nThe function should iterate through the list of dictionaries and find all parks that reside\nin the state name passed to the function and has a cost of less than 15 dollars. \nIf a match is found, store the park name as a string in a python set. \n\nif the list or state name passed to the function is empty or None, return \"INVALID_DATA\"\n\nThe minimum cost for a state park is $8. Any park listed with cost lower than 8, the function should\nreturn \"INVALID_DATA\"\n\nif any key errors are encountered with the dictionary, the function should return the string \"KEY_ERROR\"\n\nThe function will return the set when the list is successfully iterated. Return an empty set \nif no parks are found.\n'''\n\n\ndef find_parks(parkList, state):\n output = []\n if state==\"\" or state == None or parkList == []:\n return \"INVALID_DATA\"\n else:\n state = state.lower()\n try:\n for park in parkList:\n try:\n if park[\"cost\"] < 8:\n return \"INVALID_DATA\"\n elif park[\"state\"].lower() == state:\n if park[\"cost\"] < 15:\n output.append(park[\"park\"])\n else:\n pass\n except KeyError:\n return \"KEY_ERROR\"\n except TypeError:\n return \"INVALID_DATA\"\n\n \n \n\n return set(output)\n\n# stateParks = []\n# print(find_parks(None, \"texas\"))\n"
},
{
"alpha_fraction": 0.6788321137428284,
"alphanum_fraction": 0.6788321137428284,
"avg_line_length": 24.407407760620117,
"blob_id": "74ad64a99aaecaa565b49fe991ed6c64e1e0c0e8",
"content_id": "a52c8ef27453328b2fa03fb58f7c1edf20253de9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 685,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 27,
"path": "/LABS/Classes/account_test2.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# This program demos the BankAccount class\nimport bankaccount\n\ndef main():\n # get the starting balance \n start_bal = float(input('Enter your starting balance:\\n'))\n\n # Instantiate bank account\n savings = bankaccount.BankAccount(start_bal)\n\n #Deposit the users paycheck\n pay = float(input('How much were you paid this week?\\n'))\n print('I will deposit this in your account')\n savings.deposit(pay)\n \n #Display the balance\n print(savings)\n\n #Withdraw cash\n cash = input('How much would you want to withdraw?\\n')\n print('I will with draw this from your account.')\n savings.withdraw(cash)\n \n #Display the balance\n print(savings)\n\nmain()"
},
{
"alpha_fraction": 0.6605405211448669,
"alphanum_fraction": 0.6605405211448669,
"avg_line_length": 56.875,
"blob_id": "1f36173426a757f845df72dbab8c71ff1b557bdc",
"content_id": "a54de4c268b2d559b1f419a6af0e6148516ddaf7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 925,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 16,
"path": "/Algorithms/algorithms.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# **************** ALGORITHMS **********************\n# Algorithm - Describes a computational process that halts with a solution\n# to a problem.\n\n# There are many criteria for assessing the quality of an algorithm.\n# The most essential criterion is correctness, it actually solves \n# the problem it's meant to solve. Readabilitity and ease of \n# maintenance are also important qualities. Run-time performance \n# is one of the most important qualities. \n\n# Benchmarking - measure the time cost of an algorithm to obtain an actual run time\n\n# Counting - Another way to measure time complexity (efficiency) of an algorithm \n# with different problem sizes. Kee in mind you are counting the instructions\n# in the high-level code in which the algorithm is written, not instructions \n# in the executable machine language code"
},
{
"alpha_fraction": 0.6823338866233826,
"alphanum_fraction": 0.7001620531082153,
"avg_line_length": 43.07143020629883,
"blob_id": "638fa45799bb6c7ce2687b37c8df46d362f2fb70",
"content_id": "d20b17910771022788f7770fc22129197d659e04",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 617,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 14,
"path": "/LABS/Labs-3-1/lab3-1-8.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Setting variable for tip and sales tax\nsalesTax = 0.07\ntip = 0.15\n\n#User input for amount of meal purchase\npurchase = input(\"Input amount of meal purchase: \\n\")\n#Input validation for numeric\n#Using .replace to get rid of decimal to validate input is numeric\nwhile purchase.replace('.','').isnumeric() == False:\n purchase = input(\"Input was not valid. Input amount of purchase:\\n\")\n\nprint('Sales Tax: ${:.2f}'.format(float(purchase) * salesTax))\nprint('Tip at 15%: ${:.2f}'.format(float(purchase) * tip))\nprint('State Tax: ${:.2f}'.format((float(purchase) * salesTax) + (float(purchase) * tip) + float(purchase)))\n"
},
{
"alpha_fraction": 0.7006802558898926,
"alphanum_fraction": 0.7006802558898926,
"avg_line_length": 30.5,
"blob_id": "fff81237a9d73b613e1b0203acc6df16f8cc8ca5",
"content_id": "1db6a56fc9c15b2475e441c8cd3e6ee83117fd88",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 441,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 14,
"path": "/LABS/Labs-3-4/lab3-4-2-rectangles.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\n#Start main\ndef main():\n #Take user input for dimensions\n length = float(input('What is the length of your rectangle:\\n'))\n width = float(input('What is the width of your rectangle:\\n'))\n #Printing result using the calcArea function\n print('The area of your rectangle is {}'.format(calcArea(length,width)))\n\n#Define the calcArea function\ndef calcArea(leng, wid):\n #Calculating the area\n return float(leng * wid)\n\nmain()"
},
{
"alpha_fraction": 0.5961002707481384,
"alphanum_fraction": 0.6053853034973145,
"avg_line_length": 30.676469802856445,
"blob_id": "2c9924e097c743d5745631d4192561f58d56f90c",
"content_id": "9cbad05ae6908bf065aef0e91d7b175782e3ec11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1077,
"license_type": "no_license",
"max_line_length": 206,
"num_lines": 34,
"path": "/LABS/Group-Project/parseTextfile.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# This function is meant to read in the help text file to parse it into\n# helpObjects that are returned as a list\n\ndef parseHelp(filename):\n # Opening and reading the lines of the file\n f = open(filename, 'r')\n # This reads in each line as it's own list item\n contents = f.read()\n f.close()\n\n import re\n import helpClass\n # Repacking the file content as a single string to control how\n # Content is broken up later for proper parsing\n \n helpObjects = []\n pattern = re.compile(r'Python ([\\w]+) library.[\\*]+\\n\\nCopied from: [\\w\\:\\/\\.]+\\n\\n(Description\\: [\\n \\(\\)\\,\\.\\'\\:\\/\\w-]+)\\n(Example 1 \\- [\\w\\n \\(\\),\\.\\'\\:\\/]+)\\n(Example 2 \\- [\\w\\n \\(\\),\\.\\'\\:\\/]+)\\n')\n matches = pattern.finditer(contents) \n\n for match in matches:\n lib = match.group(1)\n desc = match.group(2)\n ex1 = match.group(3)\n ex2 = match.group(4)\n helpTemp = helpClass.HelpObject(desc, ex1, ex2, lib)\n helpObjects.append(helpTemp)\n return helpObjects\n\n\n\n\nhelpData = parseHelp(filename)\nfor i in helpData:\n print(i.getDesc())\n"
},
{
"alpha_fraction": 0.618229866027832,
"alphanum_fraction": 0.6406869292259216,
"avg_line_length": 36.900001525878906,
"blob_id": "502ae4d88dfd00166482bc686913997567469c36",
"content_id": "78c057221f1844604074769b8e2cf2dbfab28e65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 765,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 20,
"path": "/LABS/recursion/rec-Ackermann.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\" 8. Ackermann’s Function\n Ackermann’s Function is a recursive mathematical algorithm that can be used to test how\n well a system optimizes its performance of recursion. Design a function ackermann(m, n),\n which solves Ackermann’s function. Use the following logic in your function:\n If m = 0 then return n + 1\n If n = 0 then return ackermann(m - 1, 1)\n Otherwise, return ackermann(m - 1, ackermann(m, n - 1))\n Once you’ve designed your function, test it by calling it with small values for m and n. \"\"\"\n\ndef main():\n print(ackermann(3,6))\n\ndef ackermann(m, n):\n if m == 0:\n return n + 1\n elif n == 0:\n return ackermann(m - 1, 1)\n else:\n return ackermann(m - 1, ackermann(m, n - 1))\nmain()"
},
{
"alpha_fraction": 0.7137562036514282,
"alphanum_fraction": 0.719110369682312,
"avg_line_length": 32.72222137451172,
"blob_id": "6a8f8fdee6b1f0eb1e3cc144b35a4d51ab388526",
"content_id": "cf37a828ebc228b4d00f2725e936c0f9af6b1a63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2428,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 72,
"path": "/Practice_Test/manipulateFile.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "'''\nWrite the function manipulateFile that receives four parameters:\n fileName - the name of a file that you will read/write/append to/from\n findWord - a word to search for in the file\n insertWord - a word that will be inserted in the file\n appendWord - a word that will be appended to the end of the file\n\nThe function should read the file in fileName and determine if the string in findWord exists in the file. If it does\nexist, insert a space and the string in insertWord immediately after the findWord.\n\nFor example, the file may contain the following text:\n\nThe quick brown fox jumped \n\nIf findWord contained \"quick\" and the insertWord contained \"sly\", once processed the line in the file would have:\n\nThe quick sly brown fox jumped\n\nThe string in appendWord will simply be appended to the end of the file preceded by a space. For example, the end of the\nfile may contain:\n\nover the lazy dog's back\n\nif appendWord contains \"fence\", once processed the line in the file would have:\n\nover the lazy dog's back fence\n\nif the file in fileName does not exist, the function should return the string: \"FILE_ERROR\"\nif the string in findWord does not exist in the file, the function should return the string: \"WORD_NOT_FOUND\"\nand make no changes to the file.\notherwise, the function should return \"SUCCESS\"\n\nNOTE: during your development, should the data files under test get corrupted, pristine copies are located in the\n'original files' folder. Simply make a copy and bring over into your working directory.\n\n'''\n\n\ndef manipulateFile(fileName, findWord, insertWord, appendWord):\n try:\n f = open(fileName, 'r')\n except FileNotFoundError:\n return f\"FILE_ERROR\"\n lines = f.readlines()\n f.close()\n #print(lines)\n newlines = []\n found = False\n for i in lines:\n if (findWord in i):\n i = i.replace(findWord,findWord+\" \"+insertWord,1)\n found = True\n newlines.append(i) \n if (found == False):\n return f\"WORD_NOT_FOUND\"\n #print(newlines)\n if (newlines[-1][-1] == \"\\n\"):\n newlines[-1] = newlines[-1].replace(newlines[-1][-1], \" \"+appendWord)\n else:\n newlines[-1] = newlines[-1].replace(newlines[-1][-1], newlines[-1][-1]+\" \"+appendWord)\n #print(newlines)\n\n # Present for testing to protect existing files\n #outfile = \"test.txt\"\n f = open(fileName, \"w\")\n for i in newlines:\n f.writelines(i)\n f.close()\n\n return \"SUCCESS\"\n\n#print(manipulateFile(\"names.txt\", \"Jake\", \"Jamie\", \"Bevis\"))\n"
},
{
"alpha_fraction": 0.5961538553237915,
"alphanum_fraction": 0.6634615659713745,
"avg_line_length": 11.875,
"blob_id": "c3e24cb21e9d6bbbb347aa86e049605a93ad9379",
"content_id": "a8734fa3f4aa4961c2edc3a3657859400a265125",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 104,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 8,
"path": "/Networking/netscan1.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport scapy.all as scapy\n\ndef scan(ip):\n\tscapy.arping(ip)\n\nscan(\"10.0.2.0/24\")\n\n"
},
{
"alpha_fraction": 0.6159601211547852,
"alphanum_fraction": 0.6159601211547852,
"avg_line_length": 22.647058486938477,
"blob_id": "6cb9f9dc43d65bdb31d3c60c5aa814688a6b0605",
"content_id": "af45fac0b253f4392df5faef174a7202cedc5dd2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 401,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 17,
"path": "/LABS/Projects/testing.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "def readFile():\n #Getting filename from input for filename\n filename = input('Provide the new file name:\\n')\n #Reads the file of filename \n f = open(filename, 'r')\n #Recording file contents in array\n contents = f.readlines()\n f.close()\n return contents\n\nwords = []\nsource = readFile()\n\nfor i in source:\n line = i.split(' ')\n for j in line:\n words += j.rstrip('\\n')"
},
{
"alpha_fraction": 0.6807095408439636,
"alphanum_fraction": 0.7095343470573425,
"avg_line_length": 27.25,
"blob_id": "db827280c005f46d504422b290f74421b11d25c8",
"content_id": "a41fcbf4e99c9745deb962eb01ef75dfb91104db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 451,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 16,
"path": "/LABS/Labs-6-1/lab6-1-calsBurned.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Setting constant of treadmill Calories per minute \nCALS_PER_MIN = 3.9\n\n#Define main function\ndef main():\n intervals = [10,15,20,25,30]\n calsBurned(intervals)\n\n#Funtion to calculate calories burned for each interval\ndef calsBurned(time):\n for i in time:\n #Displaying the interval measured and the calculated calories burned\n print('Calories burned in {:d} minutes: {:.2f}'.format(i, float(i*CALS_PER_MIN)))\n\n#Calling main\nmain()"
},
{
"alpha_fraction": 0.7098039388656616,
"alphanum_fraction": 0.7163398861885071,
"avg_line_length": 35.42856979370117,
"blob_id": "bda69e8ecf47e42acbfd1a2e288a988580c4639e",
"content_id": "6a83694724ae34df804ea1ed7d99ac583f759510",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 765,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 21,
"path": "/LABS/Labs-4-1/lab4-1-minInsurance.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "#Miminum percentage of replacement cost\nFLOOR_PERC = 0.80\n\n#Define the main function\ndef main():\n calcMin(getCost())\n\n#User input for the replacement building cost with input validation to be returned to main \ndef getCost():\n #Initial prompt for input\n cost = input('What is the replacement cost of your building?\\n')\n #Input validation checking numeric and a positive number\n while float(cost) <= 0 and cost.replace('.','').isnumeric() == False:\n cost = input('Invalid input. What is the replacement cost of your building?\\n')\n return cost\n\n#Function to print the minimum insurance based on replacement cost in a statement\ndef calcMin(cost):\n print('Your building must be insured for ${:.2f}'.format(float(cost)*FLOOR_PERC))\n\nmain()\n"
},
{
"alpha_fraction": 0.5905152559280396,
"alphanum_fraction": 0.6128590703010559,
"avg_line_length": 29.47222137451172,
"blob_id": "5e6af5bde82755caa0c2059e90a8830eb1efaa2b",
"content_id": "ea2a4d548281d149d18fcee9d83ceb14828d32d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2193,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 72,
"path": "/Algorithms/search_algorithms.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\" \n# Search Algorithms\n\n# This function mimics python's min()\n# Use this function to strudy the complexity of this Algorithm\n# by returning the index of the minimum item \n# This Algorithm assumes that the list is not empty and that the \n# items are not in order.\n# This Algorithm starts by treating the first position as that of \n# the minimum item. It then searches to the right for a smaller \n# number, and if found resets the position. When it reaches the \n# end of the list it returns the position of the minimum item.\n\n\"\"\"\n# def indexOfMin(myList):\n# minIndex = 0\n# currentIndex = 1\n# while currentIndex < len(myList):\n# if myList[currentIndex] < myList[minIndex]:\n# minIndex = currentIndex\n# currentIndex += 1\n# return minIndex\n\n# theList = [2,6,5,1,3,4,9]\n# print(indexOfMin(theList))\n\n\"\"\" \n# ***********************Sequential search******************************\n\n# This function returns the position of the target item if found, \n# or -1 otherwise\n \"\"\"\n# def seqSearch(target, myList):\n# position = 0\n# while position < len(myList):\n# if target == myList[position]:\n# return position\n# position += 1\n# return -1\n# theList = [2,6,5,1,3,4,9]\n# print(seqSearch(2, theList))\n\"\"\" \n# *************************BINARY SEARCH**********************************\n# Requires a sorted list to start\n# Used to search an ordered list for a pariticular value\n# Divide and Conquer approach\n# Target is compared with the middle value, then half the list is discared\n# repeatedly until the target is found \n \"\"\"\ndef binSearch(target, sortedList):\n left = 0\n right = len(sortedList) - 1\n while left <= right:\n midpoint = (left +right) // 2\n if target == sortedList[midpoint]:\n return midpoint\n elif target < sortedList[midpoint]:\n right = midpoint -1\n else:\n left = midpoint + 1\n return -1\n\nmyList = [1,2,3,4,5,6,7,8,9]\nprint(binSearch(10, myList))\n\n\"\"\" \nFor a list of size n we perform the reduction n/2/2/2/2.... until we get to 1\n\nLet k be the number of times we divide by 2 then we get n/2^k =1\n\nn = 2^k ---> k = log base 2 of n \n\"\"\""
},
{
"alpha_fraction": 0.5912793278694153,
"alphanum_fraction": 0.5994250178337097,
"avg_line_length": 19.460784912109375,
"blob_id": "1baf0eb293dc6cb317370b2242a491f8764b69ef",
"content_id": "8b4a121e92d14075e15940e1f0bf2e89d78dac67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2087,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 102,
"path": "/05_oop/04_exceptions.md",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "|[Table of Contents](/00-Table-of-Contents.md)|\n|---|\n\n---\n\n## Exceptions\n\nPython exceptions allow you to attempt code that may result in an error and execute additional functionality on said error.\n\n* **Try/except/finally/else**\n * Try statement begins an exception handling block\n* **Raise**\n * Triggers exceptions\n* **Except**\n * Handles the exception\n* **Multiple Exceptions**\n * Handles multiple exceptions\n\n**Example**\n\n```python\ntry:\n <statements>\nexcept: <name>:\n <statements>\nexcept: <name> as <data>:\n <statements>\nelse:\n <statements>\n\ntry:\n <statements>\nfinally:\n <statements>\n```\n\n**Example 2:**\n\n```python\n\"\"\"\nChecks a certain range of numbers to see if they can divide into a user specified num\n\"\"\"\n# Program main, runs at start of program\ndef launch():\n num = input('What number would you like to check?')\n amount = input('How many numbers do you want to check?')\n\n if isInt(num) == False or isInt(amount) == False:\n print(\"You must enter an integer\")\n launch() \n elif int(amount) < 0 or int(num) < 0:\n print(\"You must enter a number greater than 0\")\n launch() \n else:\n divisible_by(int(num), int(amount))\n\n# Checks num divisible numbers up to amount or itself\ndef divisible_by(num, amount):\n i = 1.0\n while (num / i >= 1 and amount > 0):\n if num % i == 0:\n print('{} is divisible by {}'.format(int(num), int(i)))\n amount -= 1\n i += 1\n\n# EXCEPTION HANDLING\ndef isInt(x):\n try:\n int(x) ###\n return True\n except ValueError:\n return False\n\nlaunch()\n```\n\n```python\n \nclass CustomError(Exception):\n def _init_(self, msg):\n self.msg = msg\n \n def _str_(self):\n return \"your error is {}\".format(self.msg)\n \n def doStuff(danger):\n if danger == True:\n raise CustomError(\"Whoa don't do that!\")\n print(\"Success\") #What happens here?\n \ntry:\n doStuff(True)\n \nexcept CustomError as stuff:\n print(stuff)\n \n```\n\n---\n\n|[Next Topic](/05_oop/05_oop_principles.md)|\n|---|\n"
},
{
"alpha_fraction": 0.5724946856498718,
"alphanum_fraction": 0.6162046790122986,
"avg_line_length": 28.25,
"blob_id": "24537e5b02d1fee640ee629dfaabce9d4dbcd912",
"content_id": "39f7820e6ae1e310ccff7563de122cda4dbd2253",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 938,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 32,
"path": "/LABS/UnitTest/test_calc.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "# any file we want to use for testing\n# needs to have test_ in front of file name\n\nimport unittest\nimport calc\n\n# inherit from the unittest.TestCase()\nclass TestCalc(unittest.TestCase):\n def test_add(self):\n self.assertEqual(calc.add(8, 9), 17)\n self.assertEqual(calc.add(-1, 1), 0)\n self.assertEqual(calc.add(-1, -1), -2)\n\n def test_subtract(self):\n self.assertEqual(calc.subtract(10, 5), 5)\n self.assertEqual(calc.subtract(-1, 1), -2)\n self.assertEqual(calc.subtract(-1, -1), 0)\n\n def test_multiply(self):\n self.assertEqual(calc.multiply(10, 5), 50)\n self.assertEqual(calc.multiply(-1, 1), -1)\n self.assertEqual(calc.multiply(-1, -1), 1)\n\n def test_divide(self):\n self.assertEqual(calc.divide(10, 5), 2)\n self.assertEqual(calc.divide(-1, 1), -1)\n self.assertEqual(calc.divide(-1, -1), 1)\n\n\n\nif __name__ == \"__main__\":\n unittest.main() "
},
{
"alpha_fraction": 0.6046814322471619,
"alphanum_fraction": 0.6215865015983582,
"avg_line_length": 32.456520080566406,
"blob_id": "20ff8fe8d421a537799976d4584c41b0e047f6ed",
"content_id": "d243df3e2b6e7018b03caff13d84291ea349c860",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1538,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 46,
"path": "/LABS/Labs-6-1/lab6-1-onBudget.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "from functools import reduce\n\n#Define main\ndef main():\n budget = getBudget()\n expenses = []\n getExpenses(expenses)\n overOrUnder(budget,totalExp(expenses))\n\n\ndef getBudget():\n budget = input('What is your budget? (Ex: 3206.10):\\n')\n #Input validation checking numeric and a positive number\n while float(budget) <= 0 and budget.replace('.','').isnumeric() == False:\n budget = input('Invalid input. What is your budget? (Ex: 3206.10):\\n')\n return float(budget)\n\ndef getExpenses(expenseList):\n done = False\n while done == False:\n expense = input('Input an expense (206.10). Type \"exit\" when no more expenses are to be entered:\\n')\n try:\n #Input validation checking numeric and a positive number\n while float(expense) <= 0 and expense.replace('.','').isnumeric() == False:\n expense = input('Input an expense (206.10). Type \"exit\" when no more expenses are to be entered:\\n')\n expenseList.append(float(expense))\n except:\n done = True \n \n\ndef totalExp(expenseList):\n # Replace total calculation with reduce and lambda function\n total = reduce((lambda x, y: x + y),expenseList)\n # for i in expenseList:\n # total += float(i)\n return total\n\ndef overOrUnder(cap,spent):\n if spent > cap:\n print('You are overbudget by ${:.2f}!'.format(spent-cap))\n elif cap > spent:\n print('You are ${:.2f} under budget!'.format(cap-spent))\n else:\n print('You are right on budget!')\n\nmain()"
},
{
"alpha_fraction": 0.6316872239112854,
"alphanum_fraction": 0.6399176716804504,
"avg_line_length": 23.350000381469727,
"blob_id": "48891af93c72643bb4ff3624f8187016ca42a037",
"content_id": "efd8c736f48da2ccd5d960eeb5cf225c3f6678d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 486,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 20,
"path": "/LABS/Lab-2-1/lab2e.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "print(\"Please type a sentence:\")\nx = input()\n\nwordCount = len(x.split(\" \"))\nprint(\"Sentence word count: \" + str(wordCount))\n\ncharCount = len(x)\nprint(\"Sentence character count: \" + str(charCount))\n\nupperCount = 0\nlowerCount = 0\nfor i in x:\n if i.isupper() == True:\n upperCount += 1\n else:\n if i.islower() == True:\n lowerCount += 1\n\nprint(\"Uppercase characters in sentence: \" + str(upperCount))\nprint(\"Lowercase characters in sentence: \" + str(lowerCount))"
},
{
"alpha_fraction": 0.6643233895301819,
"alphanum_fraction": 0.6783831119537354,
"avg_line_length": 18,
"blob_id": "9608a3857524bea6b4356013b8ff78fa0aa1e1e4",
"content_id": "bcd11a361c2475c00106feda94feb0d75ca1e547",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 569,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 30,
"path": "/LABS/Threading/multithreading2.py",
"repo_name": "meighanv/05-Python-Programming",
"src_encoding": "UTF-8",
"text": "\"\"\"\nRun 10 threads\n\nWe can also now pass in an argument for seconds\n\"\"\"\nimport threading\nimport time\n\nstart = time.perf_counter()\n\ndef do_something(seconds):\n print(f'Sleeping {seconds} second(s)...')\n time.sleep(seconds)\n print('Done Sleeping...')\n\n# initialize list of threads\nthreads = []\n\n# set up loop to start 10 threads\nfor _ in range(10):\n t = threading.Thread(target=do_something, args=[1.5])\n t.start()\n threads.append(t)\n\nfor thread in threads:\n thread.join()\n\nfinish = time.perf_counter()\n\nprint(f'Finished in {finish-start} second(s)')"
}
] | 236 |
edwinbalani/grov | https://github.com/edwinbalani/grov | a611445fb028c7e54947fecee8bcbcc824707409 | 9caa71658441244376a9faefcbddc799078f2dca | 7850078cdf6bc9844190dfaeb6ff3a81e63d175b | refs/heads/master | 2021-01-20T12:12:25.463974 | 2017-03-11T15:29:33 | 2017-03-11T15:29:33 | 84,005,255 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6194868683815002,
"alphanum_fraction": 0.645142674446106,
"avg_line_length": 39.79999923706055,
"blob_id": "b14b77fd679858dd4d0244d70db53087742c6d44",
"content_id": "653f8a42df16bafab8cc4e54b79ba9ab1463d089",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3469,
"license_type": "permissive",
"max_line_length": 150,
"num_lines": 85,
"path": "/manual_control.py",
"repo_name": "edwinbalani/grov",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDemonstrates using custom hillshading in a 3D surface plot.\n\"\"\"\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cbook\nfrom matplotlib import cm\nfrom matplotlib.colors import LightSource\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.animation as animation\nimport time\nimport copy\n\ndef command_interpreter(standing_point, command):\n command_words = command.split()\n prop_stand_point = copy.copy(standing_point)\n if command_words[0].lower() == \"east\":\n prop_stand_point[0] += int(command_words[1])\n if command_words[0].lower() == \"west\":\n prop_stand_point[0] -= int(command_words[1])\n if command_words[0].lower() == \"north\":\n prop_stand_point[1] += int(command_words[1])\n if command_words[0].lower() == \"south\":\n prop_stand_point[1] -= int(command_words[1])\n return prop_stand_point\n\n\nfilename = cbook.get_sample_data('jacksboro_fault_dem.npz', asfileobj=False)\nwith np.load(filename) as dem:\n z = dem['elevation']\n nrows, ncols = z.shape\n x = np.linspace(dem['xmin'], dem['xmax'], ncols)\n y = np.linspace(dem['ymin'], dem['ymax'], nrows)\n x, y = np.meshgrid(x, y)\n\nregion = np.s_[5:50, 5:50]\nx, y, z = x[region], y[region], z[region]\nbarriers = [[25, 25], []]\n\nfig, ax = plt.subplots(subplot_kw=dict(projection='3d'))\nax.set_xlabel(\"South-North\")\nax.set_ylabel(\"East-West\")\n\nls = LightSource(270, 45)\n# To use a custom hillshading mode, override the built-in shading and pass\n# in the rgb colors of the shaded surface calculated from \"shade\".\nrgb = ls.shade(z, cmap=cm.gist_earth, vert_exag=0.1, blend_mode='soft')\nsurf = ax.plot_surface(x, y, z, rstride=1, cstride=1, facecolors=rgb,\n linewidth=0, antialiased=False, shade=False)\n\norigin = [20, 20]\nstanding_point = origin\ngoal = [26, 28]\nobstacles = [[20, 22]]\n\ntrajectory = ax.plot([x[origin[0]][origin[1]]], [y[origin[0]][origin[1]]],\n [z[origin[0]][origin[1]]], markerfacecolor='m',\n markeredgecolor='w', marker='o', markersize=5, alpha=0.6)\ntrajectory = ax.plot([x[goal[0]][goal[1]]], [y[goal[0]][goal[1]]], [z[goal[0]][goal[1]]], markerfacecolor='g',\n markeredgecolor='w', marker='o', markersize=5, alpha=0.6)\nplt.show(block=False)\nplt.pause(5)\n\nwhile standing_point != goal:\n command = input(\"**Please type your command with direction and distance, e.g. east 2**\\n\")\n proposed_standing_point = command_interpreter(standing_point, command)\n if proposed_standing_point in obstacles:\n print(\"Ah oh, there is an obstacle at that location.\")\n print(\"Please avoid that point marked in black.\")\n trajectory = ax.plot([x[proposed_standing_point[0]][proposed_standing_point[1]]], [y[proposed_standing_point[0]][proposed_standing_point[1]]],\n [z[proposed_standing_point[0]][proposed_standing_point[1]]], markerfacecolor='k',\n markeredgecolor='k', marker='o', markersize=5, alpha=0.6)\n else:\n standing_point = proposed_standing_point\n trajectory = ax.plot([x[standing_point[0]][standing_point[1]]], [y[standing_point[0]][standing_point[1]]],\n [z[standing_point[0]][standing_point[1]]], markerfacecolor='r', markeredgecolor='r',\n marker='o', markersize=5, alpha=0.6)\n plt.draw()\n\n plt.show(block=False)\n plt.pause(2)\n\nprint(\"You have reached the goal!\")\nplt.show(block = False)\nplt.pause(10)\n\n"
},
{
"alpha_fraction": 0.7647058963775635,
"alphanum_fraction": 0.7647058963775635,
"avg_line_length": 24.5,
"blob_id": "dc17fe059c2aaacca647f860b3a6ee912899df91",
"content_id": "39182109ba4fcf658b703b8ba9919ab680560aaa",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 51,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 2,
"path": "/README.md",
"repo_name": "edwinbalani/grov",
"src_encoding": "UTF-8",
"text": "# grov\nPart IA General Engineering project: Ganymede Rover\n"
},
{
"alpha_fraction": 0.6972476840019226,
"alphanum_fraction": 0.6972476840019226,
"avg_line_length": 32.53845977783203,
"blob_id": "e1954eb62b551ce5662f27bcd717326be395448a",
"content_id": "e973af24e3fcc3ae3054842d30ec2a5534759d29",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 436,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 13,
"path": "/TODO.md",
"repo_name": "edwinbalani/grov",
"src_encoding": "UTF-8",
"text": "## Todo list\n\n### Albert\n- [ ] Control system for probe to reach its goal (manual control or A* search)\n- [ ] Demo code: manual control and automatic tour\n\n### Edwin\n- [ ] Decide and document heightmap format\n- [ ] Generate heightmap data\n- [ ] System to determine whether probe can climb a slope\n- [ ] How to tackle slopes (wheel design etc.)\n- [ ] Demo: climbing mountain; zig-zag route\n- [ ] Demonstration-ready wheel designs (CAD?)\n"
},
{
"alpha_fraction": 0.584055483341217,
"alphanum_fraction": 0.6091854572296143,
"avg_line_length": 31.97142791748047,
"blob_id": "4d382c6be4eb7bf42eaa4d3f5f67ea011e5d8fba",
"content_id": "81a91b64cecb5cea3a0231ca373f46f560493c28",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4616,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 140,
"path": "/analysis.py",
"repo_name": "edwinbalani/grov",
"src_encoding": "UTF-8",
"text": "# Copyright 2017 Edwin Bahrami Balani and Qiaochu Jiang\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This module contains functions for grid data analysis.\"\"\"\n\nimport numpy as np\n\n\ndef window(grid, indices, window_size=5):\n \"\"\"\n Return a square window from a 2-D grid.\n Note: edge values are padded using `numpy.pad(mode='edge')`\n :param grid: 2-D grid of values\n :param indices: (x, y) tuple of indices\n :param window_size: length of the window, must be odd\n :return:\n \"\"\"\n if window_size % 2 != 1:\n raise ValueError('window_size must be odd')\n if len(indices) != 2:\n raise ValueError('indices (x, y) must be specified')\n x, y = indices\n\n # Preemptive padding on all edges prevents having to check if our window falls beyond the end\n # However, we must then adjust our indices due to the grid expansion\n grid = np.pad(grid, window_size, 'edge')\n x += window_size\n y += window_size\n\n xmin = x - window_size//2\n ymin = y - window_size//2\n xmax = x + window_size//2 + 1\n ymax = y + window_size//2 + 1\n grid = grid[xmin:xmax, ymin:ymax]\n return grid\n\n\ndef gradient(grid, indices, window_size=5, dx=1, dy=1):\n \"\"\"\n Finds the gradient at a point in a 2-D grid using a polynomial fit\n :param grid: 2-D grid of z-values\n :param indices: (x, y) tuple of indices where gradient will be found\n :param window_size: size of the square window used for the polynomial fit (default 5)\n :param dx: spacing between adjacent x-points (default 1)\n :param dy: spacing betwween adjacent y-points (default 1)\n :return: a 3-D gradient vector\n \"\"\"\n X, Y, coeff = fit(grid, indices, window_size)\n\n # Coefficients in order 1, x, y, x^2, x^2.y, x^2.y^2, y^2, x.y^2, x.y\n # a, b, c, d, e, f, g, h, i\n\n # I am not proud of this:\n # TODO rewrite this section for smarter/iterative processing\n # (This also has the benefit of allowing for a general nth-order polynomial fit:\n # see https://gistpreview.github.io/?f9990a6c0eec76c0c8176b050121e694)\n\n x, y = X[window_size//2, window_size//2], Y[window_size//2, window_size//2]\n return np.array([__wrt_x(x, y, coeff) / dx, __wrt_y(x, y, coeff) / dy])\n\n\ndef fit(grid, indices, window_size=5):\n \"\"\"\n Calculates a second-order 2-D fit function at a point on a grid\n :param grid: 2-D grid of z-values\n :param indices: (x, y) tuple of indices where gradient will be found\n :param window_size: size of the square window used for the polynomial fit (default 5)\n :return:\n \"\"\"\n grid = window(grid, indices, window_size=window_size)\n X, Y = np.mgrid[0:window_size, 0:window_size]\n Xf = X.flatten()\n Yf = Y.flatten()\n A = np.array([np.ones(X.size), Xf, Yf, Xf ** 2, Xf ** 2 * Yf, Xf ** 2 * Yf ** 2, Yf ** 2, Xf * Yf ** 2, Xf * Yf]).T\n B = grid.flatten()\n return X, Y, np.linalg.lstsq(A, B)\n\n\ndef __z(x, y, c):\n a = c[0]\n b = c[1]\n c = c[2]\n d = c[3]\n e = c[4]\n f = c[5]\n g = c[6]\n h = c[7]\n i = c[8]\n return a + b*x + c*y + d*x**2 + e*x**2*y + f*x**2*y**2 + g*y**2 + h*x*y**2 + i*x*y\n\n\ndef __wrt_x(x, y, c):\n b = c[1]\n c = c[2]\n d = c[3]\n e = c[4]\n f = c[5]\n h = c[7]\n i = c[8]\n return b + 2*d*x + 2*e*x*y + 2*f*x*y**2 + h*y**2 + i*y\n\n\ndef __wrt_y(x, y, c):\n c = c[2]\n e = c[4]\n f = c[5]\n g = c[6]\n h = c[7]\n i = c[8]\n return c + e*x**2 + 2*f*x**2*y + 2*g*y + 2*h*x*y + i*x\n\n\ndef direc_deriv(grad: np.ndarray, direc: np.ndarray, norm=True):\n \"\"\"\n Calculates the directional derivative of a function\n :param grad: Gradient vector (2-D)\n :param dir: Direction\n :param: norm: Whether to normalise the direction vector (default True)\n :return:\n \"\"\"\n if not grad.size == 2:\n raise ValueError(\"Gradient vector must have 2 components\")\n return grad * (np.linalg.norm(direc) if norm else direc)\n\n\ndef slope(grad: np.ndarray):\n \"\"\"\n Calculates the slope of a function in the direction of its gradient\n :param grad: Gradient vector (2-D)\n :return:\n \"\"\"\n return direc_deriv(grad, grad, norm=True)\n"
},
{
"alpha_fraction": 0.6925403475761414,
"alphanum_fraction": 0.7076612710952759,
"avg_line_length": 37.153846740722656,
"blob_id": "0614c4fa2183b7db716518afb1a714394139c57f",
"content_id": "6164f3be495c400b6c05d1dfe9b98665713ff5af",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 992,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 26,
"path": "/util.py",
"repo_name": "edwinbalani/grov",
"src_encoding": "UTF-8",
"text": "# Copyright 2017 Edwin Bahrami Balani and Qiaochu Jiang\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility functions for testing purposes\"\"\"\n\nimport numpy as np\n\ndef rgrid(size=10, integers=True, low=None, high=None):\n \"\"\"Return a grid of random values\"\"\"\n if low is None:\n low = 1 if integers else 0\n if high is None:\n high = 21 if integers else 1\n\n if integers:\n return np.random.randint(low, high, (size, size))\n else:\n return np.random.randn(low, high, (size, size))\n"
},
{
"alpha_fraction": 0.5046455264091492,
"alphanum_fraction": 0.5616037249565125,
"avg_line_length": 51.67021179199219,
"blob_id": "2d0e1f7042427b123a0cba4f6779b1ea8747b9fc",
"content_id": "8899a492cede5d24d982377e215c06a866aa7f64",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9902,
"license_type": "permissive",
"max_line_length": 133,
"num_lines": 188,
"path": "/autopilot.py",
"repo_name": "edwinbalani/grov",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDemonstrates using custom hillshading in a 3D surface plot.\n\"\"\"\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cbook\nfrom matplotlib import cm\nfrom matplotlib.colors import LightSource\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.animation as animation\nimport time\nfrom math import sqrt\nimport copy\nimport math\n\ndef gradient_calculation(z, point1, point2):\n return (z[point2[0]][point2[1]] - z[point1[0]][point1[1]]) / (math.sqrt((point1[0]-point2[0]) ** 2 + (point1[1]-point2[1]) ** 2))\n\ndef gradient_difficulty(gradient):\n return 1 + 1 / (1 + math.exp(-gradient))\n\n\ndef command_interpreter(standing_point, command):\n command_words = command.split()\n if (command_words[0].lower() == \"east\"):\n standing_point[0] += int(command_words[1])\n if (command_words[0].lower() == \"west\"):\n standing_point[0] -= int(command_words[1])\n if (command_words[0].lower() == \"north\"):\n standing_point[1] += int(command_words[1])\n if (command_words[0].lower() == \"south\"):\n standing_point[1] -= int(command_words[1])\n print(standing_point)\n return standing_point\n\n\ndef priority_enqueue(q, key, distance, heur, parent):\n l = len(q) - 1\n if l >= 0:\n while (q[l][1] + q[l][2] > distance + heur and l >= 0):\n l -= 1\n q.insert(l + 1, (key, distance, heur, parent))\n else:\n q.append((key, distance, heur, parent))\n\n\ndef heuristic(standing_point, goal):\n return sqrt((goal[0] - standing_point[0]) ** 2 + (goal[1] - standing_point[1]) ** 2)\n\n\ndef a_star_search(origin, goal, heuristic, coordinates, anomaly, marked, z):\n original_marked = copy.copy(marked)\n pq = list()\n trace = dict()\n pq.append((origin, 0, heuristic(origin, goal), origin))\n while len(pq) > 0:\n boundary = pq.pop(0)\n if boundary[0] == goal:\n return trace, boundary[1], boundary[3]\n if boundary[0] in anomaly:\n trajectory = ax.plot([x[boundary[0][0]][boundary[0][1]]], [y[boundary[0][0]][boundary[0][1]]],\n [z[boundary[0][0]][boundary[0][1]]], markerfacecolor='k',\n markeredgecolor='k', marker='o', markersize=5, alpha=0.6)\n print(\"There is an obastacle at\", (boundary[0]))\n print(\"Start over, avoiding\", boundary[0])\n marked = original_marked\n marked[boundary[0]] = True\n return a_star_search(origin, goal, heuristic, coordinates, anomaly, marked, z)\n if (boundary[0][0] + 1 in coordinates[0]) and (boundary[0][1] + 1 in coordinates[1]) and (\n marked[(boundary[0][0] + 1, boundary[0][1] + 1)] == False):\n grad = gradient_calculation(z, boundary[0], (boundary[0][0] + 1, boundary[0][1] + 1))\n marked[(boundary[0][0] + 1, boundary[0][1] + 1)] = True\n priority_enqueue(pq, (boundary[0][0] + 1, boundary[0][1] + 1), boundary[1] + 1.414 * gradient_difficulty(grad),\n heuristic((boundary[0][0] + 1, boundary[0][1] + 1), goal), boundary[0])\n trace[(boundary[0][0] + 1, boundary[0][1] + 1)] = boundary[0]\n if (boundary[0][0] + 1 in coordinates[0]) and (boundary[0][1] - 1 in coordinates[1]) and (\n marked[(boundary[0][0] + 1, boundary[0][1] - 1)] == False):\n grad = gradient_calculation(z, boundary[0], (boundary[0][0] + 1, boundary[0][1] - 1))\n marked[(boundary[0][0] + 1, boundary[0][1] - 1)] = True\n priority_enqueue(pq, (boundary[0][0] + 1, boundary[0][1] - 1), boundary[1] + 1.414 * gradient_difficulty(grad),\n heuristic((boundary[0][0] + 1, boundary[0][1] - 1), goal), boundary[0])\n trace[(boundary[0][0] + 1, boundary[0][1] - 1)] = boundary[0]\n if (boundary[0][0] - 1 in coordinates[0]) and (boundary[0][1] + 1 in coordinates[1]) and (\n marked[(boundary[0][0] - 1, boundary[0][1] + 1)] == False):\n grad = gradient_calculation(z, boundary[0], (boundary[0][0] - 1, boundary[0][1] + 1))\n marked[(boundary[0][0] - 1, boundary[0][1] + 1)] = True\n priority_enqueue(pq, (boundary[0][0] - 1, boundary[0][1] + 1), boundary[1] + 1.414 * gradient_difficulty(grad),\n heuristic((boundary[0][0] - 1, boundary[0][1] + 1), goal), boundary[0])\n trace[(boundary[0][0] - 1, boundary[0][1] + 1)] = boundary[0]\n if (boundary[0][0] - 1 in coordinates[0]) and (boundary[0][1] - 1 in coordinates[1]) and (\n marked[(boundary[0][0] - 1, boundary[0][1] - 1)] == False):\n grad = gradient_calculation(z, boundary[0], (boundary[0][0] - 1, boundary[0][1] - 1))\n marked[(boundary[0][0] - 1, boundary[0][1] - 1)] = True\n priority_enqueue(pq, (boundary[0][0] - 1, boundary[0][1] - 1), boundary[1] + 1.414 * gradient_difficulty(grad),\n heuristic((boundary[0][0] - 1, boundary[0][1] - 1), goal), boundary[0])\n trace[(boundary[0][0] - 1, boundary[0][1] - 1)] = boundary[0]\n if (boundary[0][0] in coordinates[0]) and (boundary[0][1] - 1 in coordinates[1]) and (\n marked[(boundary[0][0], boundary[0][1] - 1)] == False):\n grad = gradient_calculation(z, boundary[0], (boundary[0][0], boundary[0][1] - 1))\n marked[(boundary[0][0], boundary[0][1] - 1)] = True\n priority_enqueue(pq, (boundary[0][0], boundary[0][1] - 1), boundary[1] + 1 * gradient_difficulty(grad),\n heuristic((boundary[0][0], boundary[0][1] - 1), goal), boundary[0])\n trace[(boundary[0][0], boundary[0][1] - 1)] = boundary[0]\n if (boundary[0][0] in coordinates[0]) and (boundary[0][1] + 1 in coordinates[1]) and (\n marked[(boundary[0][0], boundary[0][1] + 1)] == False):\n grad = gradient_calculation(z, boundary[0], (boundary[0][0], boundary[0][1] + 1))\n marked[(boundary[0][0], boundary[0][1] + 1)] = True\n priority_enqueue(pq, (boundary[0][0], boundary[0][1] + 1), boundary[1] + 1 * gradient_difficulty(grad),\n heuristic((boundary[0][0], boundary[0][1] + 1), goal), boundary[0])\n trace[(boundary[0][0], boundary[0][1] + 1)] = boundary[0]\n if (boundary[0][0] - 1 in coordinates[0]) and (boundary[0][1] in coordinates[1]) and (\n marked[(boundary[0][0] - 1, boundary[0][1])] == False):\n grad = gradient_calculation(z, boundary[0], (boundary[0][0] - 1, boundary[0][1]))\n marked[(boundary[0][0] - 1, boundary[0][1])] = True\n priority_enqueue(pq, (boundary[0][0] - 1, boundary[0][1]), boundary[1] + 1 * gradient_difficulty(grad),\n heuristic((boundary[0][0] - 1, boundary[0][1]), goal), boundary[0])\n trace[(boundary[0][0] - 1, boundary[0][1])] = boundary[0]\n if (boundary[0][0] + 1 in coordinates[0]) and (boundary[0][1] in coordinates[1]) and (\n marked[(boundary[0][0] + 1, boundary[0][1])] == False):\n grad = gradient_calculation(z, boundary[0], (boundary[0][0] + 1, boundary[0][1]))\n marked[(boundary[0][0] + 1, boundary[0][1])] = True\n priority_enqueue(pq, (boundary[0][0] + 1, boundary[0][1]), boundary[1] + 1 * gradient_difficulty(grad),\n heuristic((boundary[0][0] + 1, boundary[0][1]), goal), boundary[0])\n trace[(boundary[0][0] + 1, boundary[0][1])] = boundary[0]\n print(\"There is no way we can reach the goal.\")\n\nfilename = cbook.get_sample_data('jacksboro_fault_dem.npz', asfileobj=False)\nwith np.load(filename) as dem:\n z = dem['elevation']\n nrows, ncols = z.shape\n x = np.linspace(dem['xmin'], dem['xmax'], ncols)\n y = np.linspace(dem['ymin'], dem['ymax'], nrows)\n x, y = np.meshgrid(x, y)\n\ncoordinates = (range(0, 45), range(0, 45))\nregion = np.s_[5:50, 5:50]\nx, y, z = x[region], y[region], z[region]\n\nfig, ax = plt.subplots(subplot_kw=dict(projection='3d'))\nax.set_xlabel(\"South-North\")\nax.set_ylabel(\"West-East\")\n\nls = LightSource(270, 45)\n# To use a custom hillshading mode, override the built-in shading and pass\n# in the rgb colors of the shaded surface calculated from \"shade\".\nrgb = ls.shade(z, cmap=cm.gist_earth, vert_exag=0.1, blend_mode='soft')\nsurf = ax.plot_surface(x, y, z, rstride=1, cstride=1, facecolors=rgb,\n linewidth=0, antialiased=False, shade=False)\n\norigin = (5, 5)\ngoal = (40, 40)\nobstacles = [(7, 9), (8, 9), (9, 9), (10, 9), (11, 9), (12, 9),\n (20, 16), (20, 17), (20, 18), (20, 19), (20, 20)]\n\nfor i in range(15, 25):\n for j in range(15, 25):\n obstacles.append((i, j))\nfor i in range(30, 40):\n for j in range(30, 40):\n obstacles.append((i, j))\n#obstacles = []\n\ntrajectory = ax.plot([x[origin[0]][origin[1]]], [y[origin[0]][origin[1]]],\n [z[origin[0]][origin[1]]], markerfacecolor='m',\n markeredgecolor='w', marker='o', markersize=5, alpha=0.6)\ntrajectory = ax.plot([x[goal[0]][goal[1]]], [y[goal[0]][goal[1]]],\n [z[goal[0]][goal[1]]], markerfacecolor='g',\n markeredgecolor='w', marker='o', markersize=5, alpha=0.6)\nplt.show(block=False)\nplt.pause(5)\n\nmarked = dict()\nfor i in coordinates[0]:\n for j in coordinates[1]:\n marked[(i, j)] = False\n\ntrace, dist, parent = a_star_search(origin, goal, heuristic, coordinates, obstacles, marked, z)\nwhile parent != origin:\n print(parent[0], parent[1])\n trajectory = ax.plot([x[parent[0]][parent[1]]], [y[parent[0]][parent[1]]],\n [z[parent[0]][parent[1]]], markerfacecolor='r',\n markeredgecolor='r', marker='o', markersize=5, alpha=0.6)\n parent = trace[parent]\nplt.draw()\n\nprint(\"You have reached the goal!\")\nprint(\"The final distance walked is\", dist)\nplt.show()\n"
},
{
"alpha_fraction": 0.6745055913925171,
"alphanum_fraction": 0.6867985129356384,
"avg_line_length": 34.980770111083984,
"blob_id": "3aff64cb176ecb3147f57f20cf082645edfeeeb0",
"content_id": "75506956b273cc5fbc7f555ed5a9857d0c57f12c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1871,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 52,
"path": "/friction.py",
"repo_name": "edwinbalani/grov",
"src_encoding": "UTF-8",
"text": "# Copyright 2017 Edwin Bahrami Balani and Qiaochu Jiang\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Functions related to probe traction\"\"\"\n\nimport numpy as np\nimport analysis\n\ndef fix_angle_range(a: float):\n \"\"\"Fix radian angles to range [0, pi]\"\"\"\n while a < 0:\n a += 2*np.pi\n while a > 2*np.pi:\n a -= 2*np.pi\n if a > np.pi:\n a = 2*np.pi - a\n return a\n\n\ndef safe_slope(grad: np.ndarray, mu: float):\n \"\"\"\n Return a True/False value determining whether the current slope is safe, given a coefficient of friction.\n :param grad: Gradient vector representing steepest slope at a point\n :param mu: Coefficient of friction\n :return:\n \"\"\"\n if not grad.size == 2:\n raise ValueError(\"Gradient vector must have two components\")\n grad = np.linalg.norm(grad)\n phi = np.arctan2(mu, 1) # Angle of limiting friction\n slope_angle = fix_angle_range(np.arctan2(grad[1], grad[0]))\n return slope_angle <= phi\n\n\ndef safe_point(grid, indices, mu, window_size=5):\n \"\"\"\n Determine whether a point is safe for the probe to climb.\n :param grid: 2-D grid of Z values\n :param indices: indices of point\n :param mu: coefficient of friction at point\n :param window_size: window size for gradient fit calculation (default 5)\n :return:\n \"\"\"\n return safe_slope(analysis.gradient(grid, indices, window_size=window_size), mu)\n"
}
] | 7 |
dethmix/test | https://github.com/dethmix/test | 2309d6b1c7990a26263a979777684072df22c0c1 | 81ea2ee9ff2c92d465b92926a1cac8487f2cd6da | dcf5ff8f3c267d677c5fb42f3a1a30f991a784a7 | refs/heads/master | 2021-01-10T11:58:49.578388 | 2016-02-10T09:37:09 | 2016-02-10T09:37:09 | 51,426,652 | 0 | 0 | null | 2016-02-10T07:22:42 | 2016-02-10T09:21:29 | 2016-02-10T09:37:09 | Python | [
{
"alpha_fraction": 0.6717557311058044,
"alphanum_fraction": 0.7137404680252075,
"avg_line_length": 15.375,
"blob_id": "64c299f954bc73c948b7530477f2265e3a1b7898",
"content_id": "c5745c15176c79c4f9ba6ca47a373b5cc1e94fca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 262,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 16,
"path": "/README.md",
"repo_name": "dethmix/test",
"src_encoding": "UTF-8",
"text": "# test\nTest repo\n\nI had to redone this page due to reverting pull request back from the previous branch. So, again:\n\n* test\n* test1\n * test2\n * test3\n\n1. test\n2. test2\n3. test3\n4. test4\n\n[panda link](http://perec.info/wp-content/uploads/perecinfo-panda-1.jpg)\n"
},
{
"alpha_fraction": 0.5483871102333069,
"alphanum_fraction": 0.5483871102333069,
"avg_line_length": 11.399999618530273,
"blob_id": "8d93a82f9d5a6dec16bbacc678128ee312f8963f",
"content_id": "4ae818fccb168a9e9beac1c37b0c816d40b0f2bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 124,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 10,
"path": "/first.py",
"repo_name": "dethmix/test",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\ndef foo(bar):\n if not bar:\n return True\n print 'Function works!'\n \n\"\"\"\nHere we start\n\"\"\"\n"
}
] | 2 |
SheynD/Dawg_Picker | https://github.com/SheynD/Dawg_Picker | 3b4ee9dae3abe8357f4091e2ffbc08ed877b921c | fe842cc688f892d628e1c9ea246ff4bb92a97f5e | e5ab54d81ee22baaa33e848e43341c98fcdd9961 | refs/heads/master | 2021-04-03T09:18:11.499723 | 2018-04-03T01:00:22 | 2018-04-03T01:00:22 | 124,965,827 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6524926424026489,
"alphanum_fraction": 0.6561583280563354,
"avg_line_length": 34.894737243652344,
"blob_id": "25a707bc4bb5622015b68d69f3cffdf86b447f02",
"content_id": "85b429bfd7cee460c87504bc3aab6ade776af170",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1364,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 38,
"path": "/dog_scraper.py",
"repo_name": "SheynD/Dawg_Picker",
"src_encoding": "UTF-8",
"text": "\"\"\"\nScrape dog stats/info from dogtime.com/dog-breeds\n\"\"\"\nimport requests\nfrom bs4 import BeautifulSoup\n\nif __name__ == \"__main__\":\n\n\twith open(\"characteristic_stats.csv\", \"w+\") as out:\n\t\tout.write(\"breed,characteristic,rating\\n\")\n\n\t\turl = \"http://www.dogtime.com/dog-breeds\"\n\t\tr = requests.get(url)\n\t\tpage = BeautifulSoup(r.text, \"lxml\")\n\n\t\tmain_box = page.find(\"div\", {\"class\": \"group with-image-mobile-only\"})\n\t\tall_breeds = main_box.findAll(\"div\", recursive=False) # recursive=False will only find matching tags on the top level\n\n\t\tfor letter_group in all_breeds:\n\t\t\tbreeds = letter_group.findAll(\"div\", recursive=False)\n\n\t\t\tfor breed in breeds:\n\t\t\t\tbreed_url = breed.find(\"a\").get(\"href\")\n\t\t\t\tr = requests.get(breed_url)\n\t\t\t\tpage = BeautifulSoup(r.text, \"lxml\")\n\n\t\t\t\tbreed = breed_url.split(\"/\")[-1] # http://dogtime.com/dog-breeds/saint-bernard\n\t\t\t\tmain_box = page.find(\"div\", {\"class\": \"inside-box\"})\n\t\t\t\tcharacteristics = main_box.findAll(\"div\", recursive=False)\n\n\t\t\t\tfor characteristic in characteristics:\n\t\t\t\t\tname = characteristic.find(\"span\", {\"class\": \"characteristic item-trigger-title\"}).get_text()\n\t\t\t\t\tstars = characteristic.select(\"span[class*='star star-']\")[0]\n\t\t\t\t\trating = stars.get(\"class\")[1].split(\"-\")[1]\n\n\t\t\t\t\tout.write(\"%s,%s,%i\\n\" % (breed.strip(), name.strip(), int(rating)))\n\t\t\t\t\n\t\t\t\tprint(\"Pulling data for %s... \" % breed.strip())\n"
},
{
"alpha_fraction": 0.7804877758026123,
"alphanum_fraction": 0.7804877758026123,
"avg_line_length": 19.5,
"blob_id": "ef6414a5602092989a4442bd35cddb4ca20db370",
"content_id": "f48e973c8a2ed8c12ec70057f0c5228dee2dd46c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 41,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 2,
"path": "/README.md",
"repo_name": "SheynD/Dawg_Picker",
"src_encoding": "UTF-8",
"text": "# Dawg_Picker\nChoosing man's best friend\n"
}
] | 2 |
RGunning/Pokemon_Go_API | https://github.com/RGunning/Pokemon_Go_API | 941b5fdf9dca044d79a764665d829dfe45804976 | 0200af20265a27db70410c910969be40cf615fe7 | ab46b00076b2d20b7c523a7513e24c9ba3a063cc | refs/heads/master | 2023-04-10T11:03:21.669964 | 2016-07-29T18:05:32 | 2016-07-29T18:05:32 | 64,484,524 | 0 | 0 | null | 2016-07-29T13:56:27 | 2016-07-29T13:56:28 | 2023-03-24T22:26:57 | Python | [
{
"alpha_fraction": 0.6144638657569885,
"alphanum_fraction": 0.6149625778198242,
"avg_line_length": 32.98305130004883,
"blob_id": "5f15d8b423424d5432b87e30f0368098f3899550",
"content_id": "afcfd62120e7743efc2e0cab7fd5343d495c7556",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2005,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 59,
"path": "/main.py",
"repo_name": "RGunning/Pokemon_Go_API",
"src_encoding": "UTF-8",
"text": "import argparse\nimport os\nimport platform\nfrom getpass import getpass\n\nimport dirty\nimport login\n\n\ndef get_acces_token(usr, pws, type):\n access_token = None\n ltype = None\n if 'goo' in type:\n print '[!] Using google as login..'\n google_data = None\n if platform.system() == 'Windows':\n google_data = login.login_google(usr, pws)\n if google_data is not None:\n access_token = google_data['id_token']\n else:\n access_token = login.login_google_v2(usr, pws)\n if access_token is not None:\n ltype = 'google'\n else:\n print '[!] I am a poketrainer..'\n access_token = login.login_pokemon(usr, pws)\n ltype = 'ptc'\n dirty.accessToken = access_token\n dirty.globalltype = ltype\n return access_token, ltype\n\n\ndef main():\n if platform.system() == 'Windows':\n os.system(\"title Pokemon GO API Python\")\n os.system(\"cls\")\n else:\n # Catches \"Lunux\" and \"Darwin\" (OSX), among others\n os.system(\"clear\")\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-u\", \"--username\", help=\"Login\", default=None)\n parser.add_argument(\"-p\", \"--password\", help=\"Password\", default=None)\n parser.add_argument(\"-t\", \"--type\", help=\"Google/PTC\", required=True)\n parser.add_argument(\"-l\", \"--location\", help=\"Location\", required=True)\n # parser.add_argument(\"-d\", \"--distance\", help=\"Distance\", required=True)\n dirty.argsStored = parser.parse_args()\n if not dirty.argsStored.username:\n dirty.argsStored.username = getpass(\"Username: \")\n if not dirty.argsStored.password:\n dirty.argsStored.password = getpass(\"Password: \")\n if 'ptc' in dirty.argsStored.type.lower() or 'goo' in dirty.argsStored.type.lower():\n # config.distance=dirty.argsStored.distance\n dirty.start()\n else:\n print '[!] used type \"%s\" only Google or PTC valid' % (dirty.argsStored.type.lower())\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5365579128265381,
"alphanum_fraction": 0.5426321625709534,
"avg_line_length": 31.67424201965332,
"blob_id": "a174b95a6034a3b5daa8e9f4d1b203720139e7a6",
"content_id": "b285dc1562722cb37481db1f98df83e9b187de89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4445,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 132,
"path": "/dirty.py",
"repo_name": "RGunning/Pokemon_Go_API",
"src_encoding": "UTF-8",
"text": "import time\r\nfrom multiprocessing import Process\r\n\r\nimport api\r\nimport config\r\nimport location\r\nimport logic\r\nimport main\r\nimport pokemon_pb2\r\n\r\nmulti = False\r\n\r\nargsStored = []\r\nstartTime = time.time()\r\naccessToken = None\r\ngloballtype = None\r\n\r\n\r\ndef start():\r\n global argsStored\r\n while True:\r\n if accessToken is None or globalltype is None:\r\n refresh_access()\r\n location.set_location(argsStored.location)\r\n print '[+] Token:', accessToken[:40] + '...'\r\n prot1 = logic.gen_first_data(accessToken, globalltype)\r\n local_ses = api.get_rpc_server(accessToken, prot1)\r\n new_rcp_point = 'https://%s/rpc' % (local_ses.rpc_server,)\r\n work_stop(local_ses, new_rcp_point)\r\n\r\n\r\ndef refresh_access():\r\n global accessToken, globalltype\r\n accessToken, globalltype = main.get_acces_token(argsStored.username, argsStored.password, argsStored.type.lower())\r\n if accessToken is None:\r\n print '[-] access Token bad'\r\n raise RuntimeError\r\n\r\n\r\ndef walk_random():\r\n COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE = location.get_location_coords()\r\n COORDS_LATITUDE = location.l2f(COORDS_LATITUDE)\r\n COORDS_LONGITUDE = location.l2f(COORDS_LONGITUDE)\r\n COORDS_ALTITUDE = location.l2f(COORDS_ALTITUDE)\r\n COORDS_LATITUDE = COORDS_LATITUDE + config.steps\r\n COORDS_LONGITUDE = COORDS_LONGITUDE + config.steps\r\n location.set_location_coords(COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE)\r\n\r\n\r\ndef split_list(a_list):\r\n half = len(a_list) / 2\r\n return a_list[:half], a_list[half:]\r\n\r\n\r\ndef work_half_list(part, ses, new_rcp_point):\r\n for t in part:\r\n if config.debug:\r\n print '[!] farming pokestop..'\r\n work_with_stops(t, ses, new_rcp_point)\r\n\r\n\r\ndef work_stop(local_ses, new_rcp_point):\r\n while True:\r\n proto_all = logic.all_stops(local_ses)\r\n all_stops = api.use_api(new_rcp_point, proto_all)\r\n maps = pokemon_pb2.maps()\r\n maps.ParseFromString(all_stops)\r\n data_list = location.get_near(maps)\r\n data_list = sorted(data_list, key=lambda x: x[1])\r\n if len(data_list) > 0:\r\n print '[+] found: %s Pokestops near' % (len(data_list))\r\n if local_ses is not None and data_list is not None:\r\n print '[+] starting show'\r\n if multi:\r\n a, b = split_list(data_list)\r\n p = Process(target=work_half_list, args=(a, local_ses.ses, new_rcp_point))\r\n o = Process(target=work_half_list, args=(a, local_ses.ses, new_rcp_point))\r\n p.start()\r\n o.start()\r\n p.join()\r\n o.join()\r\n print '[!] farming done..'\r\n else:\r\n for t in data_list:\r\n if config.debug:\r\n print '[!] farming pokestop..'\r\n if not work_with_stops(t, local_ses.ses, new_rcp_point):\r\n break\r\n else:\r\n walk_random()\r\n\r\n\r\ndef work_with_stops(current_stop, ses, new_rcp_point):\r\n Kinder = logic.gen_stop_data(ses, current_stop)\r\n tmp_api = api.use_api(new_rcp_point, Kinder)\r\n try:\r\n if tmp_api is not None:\r\n map = pokemon_pb2.map()\r\n map.ParseFromString(tmp_api)\r\n st = map.sess[0].status\r\n config.earned_xp += map.sess[0].amt\r\n if st == 4:\r\n print \"[!] +%s (%s)\" % (map.sess[0].amt, config.earned_xp)\r\n elif st == 3:\r\n print \"[!] used\"\r\n elif st == 2:\r\n print \"[!] charging\"\r\n elif st == 1:\r\n print \"[!] walking..\"\r\n expPerHour()\r\n time.sleep(14)\r\n work_with_stops(current_stop, ses, new_rcp_point)\r\n else:\r\n print \"[?]:\", st\r\n else:\r\n print '[-] tmp_api empty'\r\n return True\r\n except:\r\n print '[-] error work_with_stops - Trying to restart process'\r\n return False\r\n\r\n\r\ndef expPerHour():\r\n diff = time.time() - startTime\r\n minutesRun = diff / 60.\r\n hoursRun = minutesRun / 60.\r\n earned = float(config.earned_xp)\r\n if hoursRun > 0:\r\n expHour = int(earned / hoursRun)\r\n else:\r\n expHour = \"n/a\"\r\n print \"[!] Gained: %s (%s exp/h)\" % (config.earned_xp, expHour)\r\n"
},
{
"alpha_fraction": 0.5929778814315796,
"alphanum_fraction": 0.6254876255989075,
"avg_line_length": 30.723403930664062,
"blob_id": "14f1c130f0542880b210018801b021f0d34bf354",
"content_id": "273e2088ac2a3a1b9223acb7b40e760c7e78b849",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1538,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 47,
"path": "/config.py",
"repo_name": "RGunning/Pokemon_Go_API",
"src_encoding": "UTF-8",
"text": "import requests\r\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\r\n\r\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\r\n\r\n# urls\r\napi_url = 'https://pgorelease.nianticlabs.com/plfe/rpc'\r\nlogin_url = 'https://sso.pokemon.com/sso/oauth2.0/authorize?client_id=mobile-app_pokemon-go&redirect_uri=https%3A%2F%2Fwww.nianticlabs.com%2Fpokemongo%2Ferror'\r\nlogin_oauth = 'https://sso.pokemon.com/sso/oauth2.0/accessToken'\r\n# urls end\r\n\r\n# values\r\nuse_proxy = False\r\ndebug = False\r\n# distance=0\r\nsteps = 0.000095\r\ngoogle = True\r\npub = None\r\nearned_xp = 0\r\nuse_powerball = False\r\n# values end\r\n\r\n# session\r\nproxies = {\r\n 'http': 'http://127.0.0.1:8888',\r\n 'https': 'http://127.0.0.1:8888',\r\n}\r\ns = requests.session()\r\nif use_proxy:\r\n s.proxies.update(proxies)\r\n s.verify = False\r\ns.headers.update({'User-Agent': 'Niantic App'})\r\n# session end\r\n###########################################################################################\r\n# public\r\nAPI_URL = 'https://pgorelease.nianticlabs.com/plfe/rpc'\r\nLOGIN_URL = 'https://sso.pokemon.com/sso/login?service=https%3A%2F%2Fsso.pokemon.com%2Fsso%2Foauth2.0%2FcallbackAuthorize'\r\nLOGIN_OAUTH = 'https://sso.pokemon.com/sso/oauth2.0/accessToken'\r\nPTC_CLIENT_SECRET = 'w8ScCUXJQc6kXKw8FiOhd8Fixzht18Dq3PEVkUCP5ZPxtgyWsbTvWHFLm2wNY0JR'\r\n\r\nSESSION = requests.session()\r\nSESSION.headers.update({'User-Agent': 'Niantic App'})\r\nif use_proxy:\r\n SESSION.proxies.update(proxies)\r\n SESSION.verify = False\r\nDEBUG = True\r\n###########################################################################################\r\n"
},
{
"alpha_fraction": 0.5278587341308594,
"alphanum_fraction": 0.5671181082725525,
"avg_line_length": 26.876237869262695,
"blob_id": "8f7712c01652b9afdf29cb18abe8b0d140272568",
"content_id": "0cd6c8c890fd1ab312982f70bbcbcc8c33d72eaf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5833,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 202,
"path": "/location.py",
"repo_name": "RGunning/Pokemon_Go_API",
"src_encoding": "UTF-8",
"text": "import math\r\nimport struct\r\nfrom math import radians, cos, sin, asin, sqrt\r\n\r\nfrom geopy.distance import vincenty\r\nfrom geopy.geocoders import GoogleV3\r\n\r\nimport config\r\n\r\nCOORDS_LATITUDE = 0\r\nCOORDS_LONGITUDE = 0\r\nCOORDS_ALTITUDE = 0\r\nFLOAT_LAT = 0\r\nFLOAT_LONG = 0\r\n\r\n\r\ndef get_location_coords():\r\n return (COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE)\r\n\r\n\r\ndef get_lat():\r\n return COORDS_LATITUDE\r\n\r\n\r\ndef get_lot():\r\n return COORDS_LONGITUDE\r\n\r\n\r\ndef set_lat(new):\r\n global COORDS_LATITUDE\r\n COORDS_LATITUDE = f2i(new)\r\n\r\n\r\ndef set_lot(new):\r\n global COORDS_LONGITUDE\r\n COORDS_LONGITUDE = f2i(new)\r\n\r\n\r\ndef set_location(location_name):\r\n geolocator = GoogleV3()\r\n loc = geolocator.geocode(location_name)\r\n\r\n print('[!] Your given location: {}'.format(loc.address.encode('utf-8')))\r\n set_location_coords(loc.latitude, loc.longitude, loc.altitude)\r\n\r\n\r\ndef set_location_coords(lat, long, alt):\r\n if config.debug:\r\n print('[!] lat/long/alt: {} {} {}'.format(lat, long, alt))\r\n global COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE\r\n global FLOAT_LAT, FLOAT_LONG\r\n FLOAT_LAT = lat\r\n FLOAT_LONG = long\r\n COORDS_LATITUDE = f2i(lat)\r\n COORDS_LONGITUDE = f2i(long)\r\n COORDS_ALTITUDE = f2i(alt)\r\n\r\n\r\ndef encode(cellid):\r\n output = []\r\n encoder._VarintEncoder()(output.append, cellid)\r\n return ''.join(output)\r\n\r\n\r\ndef getNeighbors():\r\n origin = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT, FLOAT_LONG)).parent(15)\r\n walk = [origin.id()]\r\n # 10 before and 10 after\r\n next = origin.next()\r\n prev = origin.prev()\r\n for i in range(10):\r\n walk.append(prev.id())\r\n walk.append(next.id())\r\n next = next.next()\r\n prev = prev.prev()\r\n return walk\r\n\r\n\r\ndef i2f(int):\r\n return struct.unpack('<Q', struct.pack('<d', int))[0]\r\n\r\n\r\ndef f2h(float):\r\n return hex(struct.unpack('<Q', struct.pack('<d', float))[0])\r\n\r\n\r\ndef f2i(float):\r\n return struct.unpack('<Q', struct.pack('<d', float))[0]\r\n\r\n\r\ndef l2f(float):\r\n return struct.unpack('d', struct.pack('Q', int(bin(float), 0)))[0]\r\n\r\n\r\ndef h2f(hex):\r\n return struct.unpack('<d', struct.pack('<Q', int(hex, 16)))[0]\r\n\r\n\r\ndef get_near(map):\r\n ms = []\r\n ms.append(('start', get_lat(), get_lot(), get_distance(get_lat(), get_lot(), COORDS_LATITUDE, COORDS_LONGITUDE)))\r\n for cell in [map]:\r\n for block in cell.b:\r\n for obj in block.c:\r\n for stop in obj.s:\r\n # if distance(stop.lat,stop.lon,COORDS_LATITUDE,COORDS_LONGITUDE):\r\n ms.append((stop.name, stop.lat, stop.lon,\r\n get_distance(stop.lat, stop.lon, COORDS_LATITUDE, COORDS_LONGITUDE)))\r\n return ms\r\n\r\n\r\ndef get_near_p(map):\r\n ms = []\r\n ms.append(('start', get_lat(), get_lot(), 'start', 'start',\r\n get_distance(get_lat(), get_lot(), COORDS_LATITUDE, COORDS_LONGITUDE)))\r\n for cell in [map]:\r\n for block in cell.b:\r\n for obj in block.c:\r\n for stop in obj.p:\r\n # if distance(stop.lat,stop.lon,COORDS_LATITUDE,COORDS_LONGITUDE):\r\n ms.append((stop.t.type, stop.lat, stop.lon, stop.name, stop.hash,\r\n get_distance(stop.lat, stop.lon, COORDS_LATITUDE, COORDS_LONGITUDE)))\r\n # for stop in obj.s:\r\n #\tif stop.p.type:\r\n #\t\tms.append((stop.p.type,stop.lat,stop.lon,stop.name,stop.p.u2,get_distance(stop.lat,stop.lon,COORDS_LATITUDE,COORDS_LONGITUDE)))\r\n return ms\r\n\r\n\r\ndef move_to(lat1, lot1, lat2, lot2):\r\n if (lat1 > lat2):\r\n while (lat1 < lat2):\r\n lat1 = lat1 - 0.000095\r\n else:\r\n while (lat1 < lat2):\r\n lat1 = lat1 + 0.000095\r\n if (lot1 > lot2):\r\n while (lot1 > lot2):\r\n lot1 = lot1 - 0.000095\r\n else:\r\n while (lot2 > lot1):\r\n lot1 = lot1 + 0.000095\r\n return lat1, lot1, lat2, lot2\r\n\r\n\r\ndef distance(lat1, lon1, lat2, lon2):\r\n lat1 = l2f(lat1)\r\n lon1 = l2f(lon1)\r\n lat2 = l2f(lat2)\r\n lon2 = l2f(lon2)\r\n radius = 6371 # km *1000 m\r\n dlat = math.radians(lat2 - lat1)\r\n dlon = math.radians(lon2 - lon1)\r\n a = math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) \\\r\n * math.cos(math.radians(lat2)) * math.sin(dlon / 2) * math.sin(\r\n dlon / 2)\r\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\r\n d = radius * c * 1000\r\n return d < config.distance\r\n\r\n\r\ndef get_distance(lat1, lon1, lat2, lon2):\r\n lat1 = l2f(lat1)\r\n lon1 = l2f(lon1)\r\n lat2 = l2f(lat2)\r\n lon2 = l2f(lon2)\r\n # convert decimal degrees to radians\r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\r\n # haversine formula\r\n dlon = lon2 - lon1\r\n dlat = lat2 - lat1\r\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\r\n c = 2 * asin(sqrt(a))\r\n meter = 6367000 * c\r\n return meter\r\n\r\n\r\ndef haversine(lon1, lat1, lon2, lat2):\r\n lat1 = l2f(lat1)\r\n lon1 = l2f(lon1)\r\n lat2 = l2f(lat2)\r\n lon2 = l2f(lon2)\r\n \"\"\"\r\n Calculate the great circle distance between two points\r\n on the earth (specified in decimal degrees)\r\n \"\"\"\r\n # convert decimal degrees to radians\r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\r\n\r\n # haversine formula\r\n dlon = lon2 - lon1\r\n dlat = lat2 - lat1\r\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\r\n c = 2 * asin(sqrt(a))\r\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\r\n return c * r * 1000\r\n\r\n\r\ndef is_near(locx, locy, myx, myy):\r\n tmp1 = (l2f(locx), l2f(locy))\r\n tmp2 = (l2f(myx), l2f(myy))\r\n res = vincenty(tmp1, tmp2).meters\r\n return res < config.distance\r\n"
}
] | 4 |
Grisselle-Lab/python-challenge | https://github.com/Grisselle-Lab/python-challenge | 9644161ec97ef25ce7dbc3114d0cc12a7b134c75 | 21c8fdbd7ff5c891114da22a0bc527ffa3342326 | 3ffd6eba8f7c536c3e2aaed7c5e574c413fc124c | refs/heads/master | 2022-12-23T09:34:33.372486 | 2020-09-29T06:12:59 | 2020-09-29T06:12:59 | 299,520,768 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7419354915618896,
"alphanum_fraction": 0.774193525314331,
"avg_line_length": 14.5,
"blob_id": "f8b724b1b90a266399b1b2b9e485e7882e6b2ef4",
"content_id": "1fe19f8f1446f7b92cc4a4461f9af878dfd6982e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 31,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Grisselle-Lab/python-challenge",
"src_encoding": "UTF-8",
"text": "# python challenge\n Homework 3\n"
},
{
"alpha_fraction": 0.6980982422828674,
"alphanum_fraction": 0.7300581336021423,
"avg_line_length": 51.58333206176758,
"blob_id": "ff690c6fa077494feefc131fcb36fae09f4a51b4",
"content_id": "45313d0b6b2a493fa0b40bee00e86966b10d5fdd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3786,
"license_type": "no_license",
"max_line_length": 287,
"num_lines": 72,
"path": "/PyBank/main.py.py",
"repo_name": "Grisselle-Lab/python-challenge",
"src_encoding": "UTF-8",
"text": "# This task is to create a Python script that analyzes the records to calculate each of the following:\n# 1-The total number of months included in the dataset\n# 2-The net total amount of \"Profit/Losses\" over the entire period\n# 3-The average of the changes in \"Profit/Losses\" over the entire period\n# 4-The greatest increase in profits (date and amount) over the entire period\n# 5-The greatest decrease in losses (date and amount) over the entire period\n\n#First I set my environment, importing the file that contains the information that the code will use.\n#Imported the csv, and by preinspecting the file in excel we can identify and set the months and profits as lists.\nimport csv\nimport sys\ndate = []\nprofloss = []\n\n#set file budget_data.csv to be open/read by rows per column header (months and profit)\nwith open(\"budget_data.csv\") as csvfile:\n csvreader = csv.reader(csvfile)\n for row in csvreader:\n #set = split(row,',')\n date.append(row[0])\n profloss.append(row[1])\n\n#Since I am going to do to calculations and need to return an index value I will use [].pop then categorize the data and convert the numerical into float (decimal).\ndate.pop(0)\nprofloss.pop(0)\nfor i in range(0,len(profloss)):\n profloss[i] = float(profloss[i])\n\n#Once everything properly identify, I can start the code for some findings, calculations and reports.\n#The net total number and sum of \"Profit/Losses\" over the entire period\ntotal = len(profloss)\ntotal_profloss = sum(profloss)\n\n# The average of the changes in \"Profit/Losses\" over the entire period\n# Reference 1\navg_profloss = total_profloss/(float(total))\n\n#The greatest increase in profits (date and amount) over the entire period.\n# Reference 5\nincrease = max(profloss)\nmax = [i for i,j in enumerate(profloss) if j == increase]\n\n# The greatest decrease in losses (date and amount) over the entire period\n# Reference 5\ndecrease = min(profloss)\nmin = [i for i,j in enumerate(profloss) if j == decrease]\n\n#Report output using \"pout\" for print out for Total Months, Total Profit, Average Change, Greates Increase Profict,\n#and Greates Decrease in Profit.\n#Reference 2\ndef pout():\n print(\"Financial Analysis\")\n print(\"---------------------------------------------\")\n print(\"Total Months: {0}\".format(total))\n print(\"Total Profit: ${0:.0f}\".format(total_profloss))\n print(\"Average Change: ${0:.2f}\".format(avg_profloss))\n print(\"Greatest Increase in Profit: {0} (${1:.0f})\".format(date[max[0]],increase))\n print(\"Greatest Decrease in Profit: {0} (${1:.0f})\".format(date[min[0]],decrease))\n\n#Final output prints and exports summary of the analysis to terminal in .txt format\n#Reference 4\npout()\nwith open(\"Summary_Budget_Data.txt\",'w+') as flush:\n sys.stdout = flush\n pout()\n\n# References Sample \n# 1-Gallagher. J, (2020 July 15).Python Average: A Step-by-Step Guide. URL: https://careerkarma.com/blog/python-average/\n# 2-Marcyes. J,(2020 May 3). Effective use for \"PrintOut\"in Python.PYPI.ORG. URL:https://pypi.org/project/pout/#:~:text=A%20collection%20of%20handy%20functions,variables%20and%20debugging%20Python%20code.&text=Pout%20tries%20to%20print%20out,when%20you're%20done%20debugging. \n# 3-Ramalho. L (2005).Fluent Python: Clear, Concise, and Effective Programming. Dictionaries and Sets:Set of Operations. 1st ed. ch3,ch4.O'Reilly Media Inc\n# 4-Saha. R, (2020 April 10). File flush() method in Python.GeekforGeeks.org. URL: https://www.geeksforgeeks.org/file-flush-method-in-python/#:~:text=The%20flush()%20method%20in,using%20the%20flush()%20method.&text=This%20method%20does%20not%20require,it%20does%20not%20return%20anything.\n# 5-How to find the max and min of a list in Python. (n.d.) Kite. Retrieved September 24, 2020 from: https://www.kite.com/python/answers/how-to-find-the-max-and-min-of-a-list-in-python\n"
},
{
"alpha_fraction": 0.6831875443458557,
"alphanum_fraction": 0.71331387758255,
"avg_line_length": 43.2365608215332,
"blob_id": "107e236812ed56633974cc9bb7fe2dd76545438e",
"content_id": "ec1d39b25389a90f532313f10e3ada53f90c727d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4116,
"license_type": "no_license",
"max_line_length": 287,
"num_lines": 93,
"path": "/PyPoll/main.py.py",
"repo_name": "Grisselle-Lab/python-challenge",
"src_encoding": "UTF-8",
"text": "\n# This task is to create a Python script that analyzes the votes and calculates each of the following:\n\n# 1-The total number of votes cast\n\n# 2-A complete list of candidates who received votes\n\n# 3-The percentage of votes each candidate won\n\n# 4-The total number of votes each candidate won\n\n# 5-The winner of the election based on popular vote.\n\n#First I set my environment, importing the file that contains the information that the code will use.\n#Imported the csv, and by preinspecting the file in excel we can identify and set the the voteid as voterslist, votelist and country. I change the terms lightly to help me trace the calculations and made a personal signature.\n\nimport csv\nimport sys\nvotelist = []\nvoterslist = []\ncountylist = []\n\n# list of candidates\ncandidates = []\ncandidatespercent = float\ncandidatesvotelist = int\n\n#set file election_data.csv to be open/read by rows per column header (voterslist,votelist and countrylist)\nwith open(\"election_data.csv\") as csvfile:\n csvreader = csv.reader(csvfile)\n for row in csvreader:\n voterslist.append(row[0])\n countylist.append(row[1])\n votelist.append(row[2])\n # populate candidate list if new candidate\n if row[2] not in candidates:\n candidates.append(row[2])\n\n#Since I am going to do to calculations and need to return an index value I will use [].pop then categorize the data and convert the numerical into float (decimal).\n#Reference 2\nvoterslist.pop(0)\ncountylist.pop(0)\nvotelist.pop(0)\ncandidates.remove(\"Candidate\")\n\n# Once everything properly identify, I can start the code for some findings, calculations and reports.\n# The total number of votes cast\ncount = len(voterslist)\ncandidatesvotelist = []\nfor x in candidates:\n candidatesvotelist.append(0)\n\n# The complete list of candidates who received votes \nfor x in votelist:\n i = candidates.index(x)\n candidatesvotelist[i] = candidatesvotelist[i] + 1\n\n# The percentage of votes each candidate won\n# Reference 3\ncandidatespercent = []\nfor i in range(0,len(candidatesvotelist)):\n candidatespercent.append(round(float(candidatesvotelist[i])/float(count)*100))\n\n# The winner of the election based on the popular vote\nmaxvotelist = max(candidatesvotelist)\nwindex = [i for i,y in enumerate(candidatesvotelist) if y == maxvotelist]\nwinner = candidates[windex[0]]\n\n# Report output using \"pout\" for print out for Election Results.\n# Rerefence 2\ndef pout():\n print(\"Election Results\")\n print(\"----------------------------------\")\n print(\"Total Votes : {0}\".format(count))\n print(\"------------------------------\")\n for i in range(0,len(candidatesvotelist)):\n print(\"{0}: {1}% ({2})\".format(candidates[i],candidatespercent[i],candidatesvotelist[i]))\n print(\"----------------------------\")\n print(\"Winner : {0} \".format(winner))\n print(\"------------------------------\")\n\n#Final output prints and exports summary of the analysis to terminal in .txt format\n#Reference 2 and 4\npout()\nwith open(\"Summary_Election_Data.txt\",'w+') as flush:\n sys.stdout = flush\n pout()\n\n# References Sample \n# 1-Gallagher. J, (2020 July 15).Python Average: A Step-by-Step Guide. URL: https://careerkarma.com/blog/python-average/\n# 2-Marcyes. J,(2020 May 3). Effective use for \"PrintOut\"in Python.PYPI.ORG. URL:https://pypi.org/project/pout/#:~:text=A%20collection%20of%20handy%20functions,variables%20and%20debugging%20Python%20code.&text=Pout%20tries%20to%20print%20out,when%20you're%20done%20debugging. \n# 3-Ramalho. L (2005).Fluent Python: Clear, Concise, and Effective Programming. Dictionaries and Sets:Set of Operations. 1st ed. ch3,ch4.O'Reilly Media Inc\n# 4-Saha. R, (2020 April 10). File flush() method in Python.GeekforGeeks.org. URL: https://www.geeksforgeeks.org/file-flush-method-in-python/#:~:text=The%20flush()%20method%20in,using%20the%20flush()%20method.&text=This%20method%20does%20not%20require,it%20does%20not%20return%20anything.\n# 5-How to find the max and min of a list in Python. (n.d.) Kite. Retrieved September 24, 2020 from: https://www.kite.com/python/answers/how-to-find-the-max-and-min-of-a-list-in-python\n\n"
}
] | 3 |
tingwen0125/L-store-database-management-system | https://github.com/tingwen0125/L-store-database-management-system | bda8aa0b2dce7ffa1c83484adcda5bbf0d530d87 | 60f3bf468d32140802c6f5ef09701c8bdfc7dfc1 | cd8fae7e0e6caf0c1643801b910f9a923d2c2a84 | refs/heads/main | 2023-03-07T11:58:00.582018 | 2021-02-04T06:04:01 | 2021-02-04T06:04:01 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6695082187652588,
"alphanum_fraction": 0.6695082187652588,
"avg_line_length": 30.446807861328125,
"blob_id": "cea17862428f2af3976be9c50b87e2616a23828d",
"content_id": "380ccd01faa866e700b85b0cad4b1fffeef7ee6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1525,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 47,
"path": "/template/index.py",
"repo_name": "tingwen0125/L-store-database-management-system",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nA data strucutre holding indices for various columns of a table. \r\nKey column should be indexd by default, other columns can be indexed through this object.\r\nIndices are usually B-Trees, but other data structures can be used as well.\r\n\r\nThe Index class provides a data structure that allows fast processing of queries (e.g.,\r\nselect or update) by indexing columns of tables over their values. Given a certain\r\nvalue for a column, the index should efficiently locate all records having that value. The\r\nkey column of all tables is usually indexed by default for performance reasons.\r\nSupporting indexing is optional for this milestone. The API for this class exposes the\r\ntwo functions create_index and drop_index (optional for this milestone).\r\n\"\"\"\r\n\r\nclass Index:\r\n\r\n def __init__(self, table):\r\n # One index for each table. All are empty initially.\r\n self.indices = [None] * table.num_columns\r\n pass\r\n\r\n \"\"\"\r\n # returns the location of all records with the given value on column \"column\"\r\n \"\"\"\r\n\r\n def locate(self, column, value):\r\n pass\r\n\r\n \"\"\"\r\n # Returns the RIDs of all records with values in column \"column\" between \"begin\" and \"end\"\r\n \"\"\"\r\n\r\n def locate_range(self, begin, end, column):\r\n pass\r\n\r\n \"\"\"\r\n # optional: Create index on specific column\r\n \"\"\"\r\n\r\n def create_index(self, column_number):\r\n pass\r\n\r\n \"\"\"\r\n # optional: Drop index of specific column\r\n \"\"\"\r\n\r\n def drop_index(self, column_number):\r\n pass\r\n"
},
{
"alpha_fraction": 0.6344113349914551,
"alphanum_fraction": 0.6432315111160278,
"avg_line_length": 39.73958206176758,
"blob_id": "ebabf3cce3e926e0b0e7de0dc30b1960c79d3cc0",
"content_id": "812579437f8029033ff317c86fc2f78d55e80841",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7823,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 192,
"path": "/template/query.py",
"repo_name": "tingwen0125/L-store-database-management-system",
"src_encoding": "UTF-8",
"text": "from template.table import Table, Record\nfrom template.index import Index\nfrom template.page import Page, BasePage, PageRange\nimport datetime\n'''\nThe Query class provides standard SQL operations such as insert, select,\nupdate, delete and sum. The select function returns the specified set of columns\nfrom the record with the given key (if available). The insert function will insert a new\nrecord in the table. All columns should be passed a non-NULL value when inserting. The\nupdate function updates values for the specified set of columns. The delete function\nwill delete the record with the specified key from the table. The sum function will sum\nover the values of the selected column for a range of records specified by their key\nvalues. We query tables by direct function calls rather than parsing SQL queries.\n'''\n\nclass Query:\n \"\"\"\n # Creates a Query object that can perform different queries on the specified table \n Queries that fail must return False\n Queries that succeed should return the result or True\n Any query that crashes (due to exceptions) should return False\n \"\"\"\n\n def __init__(self, table):\n self.table = table\n pass\n\n \"\"\"\n # internal Method\n # Read a record with specified key\n # Returns True upon succesful deletion\n # Return False if record doesn't exist or is locked due to 2PL\n \"\"\"\n def delete(self, key):\n\n pass\n\n \"\"\"\n # Insert a record with specified columns\n # Return True upon succesful insertion\n # Returns False if insert fails for whatever reason\n \"\"\"\n def insert(self, *columns):\n '''record example:[0, 0, 20210131111207, 0, 906659671, 93, 0, 0, 0]'''\n # Check if key is duplicated\n if (columns[self.table.key] in self.table.keyToBaseRID):\n return False\n total_col = []\n schema_encoding = int('0' * self.table.num_columns, 2)\n time = datetime.datetime.now()\n int_time = int(time.strftime(\"%Y%m%d%H%M%S\"))\n curPageRange = self.table.pageRanges[-1]\n curBasePage = curPageRange.basePageList[-1]\n\n # open a new page range or new base page\n if curPageRange.has_capacity() == False:\n self.table.pageRanges.append(PageRange(self.table.num_columns))\n curPageRange = self.table.pageRanges[-1]\n curBasePage = curPageRange.basePageList[-1]\n elif curBasePage.has_capacity() == False:\n curPageRange.basePageList.append(BasePage(self.table.num_columns))\n curBasePage = curPageRange.basePageList[-1]\n \n total_col.extend([0, self.table.baseRID, int_time, schema_encoding])\n total_col += columns\n for i in range(len(total_col)):\n curBasePage.basePage[i].write(total_col[i])\n #test\n #start = (curBasePage.basePage[i].num_records - 1) * 8\n #end = curBasePage.basePage[i].num_records * 8\n #int_val=int.from_bytes(curBasePage.basePage[i].data[start:end],'big')\n #print(int_val)\n \n self.table.keyToBaseRID[total_col[self.table.key + 4]] = self.table.baseRID\n self.table.baseRID += 1\n return True\n \n \"\"\"\n # Read a record with specified key\n # :param key: the key value to select records based on\n # :param query_columns: what columns to return. array of 1 or 0 values.\n # Returns a list of Record objects upon success\n # Returns False if record locked by TPL\n # Assume that select will never be called on a key that doesn't exist\n \"\"\"\n def select(self, key, column, query_columns):\n listSelect = []\n recordSelect = []\n\n #locate record position\n if key in self.table.keyToBaseRID.keys():\n baseRID = self.table.keyToBaseRID[key]\n baseRecord = self.table.baseRIDToRecord(baseRID)\n\n for i in range(len(query_columns)):\n if query_columns[i] == 1:\n val = baseRecord[i+4]\n recordSelect.append(val)\n else:\n recordSelect.append(None)\n listSelect.append(Record(baseRID, key, recordSelect))\n return listSelect\n\n \"\"\"\n # Update a record with specified key and columns\n # Returns True if update is succesful\n # Returns False if no records exist with given key or if the target record cannot be accessed due to 2PL locking\n \"\"\"\n\n '''\n def getUpdateRID(self,key): \n return self.table.keyToBaseRID[key]\n\n def getUpdatePageR(self,rid):\n return self.table.getPageR(rid)\n '''\n\n def update(self, key, *columns):\n baseRID = self.table.keyToBaseRID[key]\n location = self.table.baseRIDToLocation(baseRID)\n pageRange_index = location[0]\n baseRecord = self.table.baseRIDToRecord(baseRID)\n #print(\"Before update:\", baseRecord)\n\n #check if the tail page in that page range still have space\n if self.table.pageRanges[pageRange_index].tailPageList[-1].has_capacity() == False: #if no capacity, add a new tail page\n self.table.pageRanges[pageRange_index].tailPageList.append(BasePage(self.table.num_columns)) \n updateEncoding = \"\" #updated schema encoding\n for i in range(len(columns)):\n if columns[i] == None:\n updateEncoding += \"0\"\n else:\n updateEncoding += \"1\"\n \n updateEncoding = int(updateEncoding, 2)\n time = datetime.datetime.now()\n int_time = int(time.strftime(\"%Y%m%d%H%M%S\"))\n\n baseRecordIndirect = baseRecord[0]\n tailIndirect = 0\n # Current tailRecord is not the first update to a baseRecord then get the last tail record RID\n if (baseRecordIndirect != 0):\n tailIndirect = baseRecordIndirect\n\n # Update baseRecord indirect column\n self.table.writeByte(self.table.tailRID, location, 0)\n \n tailrecord = [self.table.tailRID, tailIndirect, int_time,updateEncoding]+list(columns)\n currTailPage = self.table.pageRanges[pageRange_index].tailPageList[-1]\n for i in range(len(tailrecord)):\n currTailPage.basePage[i].write(tailrecord[i])\n self.table.tailRID += 1\n\n #baseRecord = self.table.baseRIDToRecord(baseRID)\n #print(\"After update:\", baseRecord)\n return True\n \n \"\"\"\n :param start_range: int # Start of the key range to aggregate \n :param end_range: int # End of the key range to aggregate \n :param aggregate_columns: int # Index of desired column to aggregate\n # this function is only called on the primary key.\n # Returns the summation of the given range upon success\n # Returns False if no record exists in the given range\n \"\"\"\n def sum(self, start_range, end_range, aggregate_column_index):\n startRID = start_range + 1\n endRID = end_range + 1\n sum = 0\n if (startRID > self.table.baseRID):\n return False\n for i in range(startRID, endRID):\n baseRecord = self.table.baseRIDToRecord(i)\n sum += baseRecord[aggregate_column_index+4]\n return sum\n\n \"\"\"\n incremenets one column of the record\n this implementation should work if your select and update queries already work\n :param key: the primary of key of the record to increment\n :param column: the column to increment\n # Returns True is increment is successful\n # Returns False if no record matches key or if target record is locked by 2PL.\n \"\"\"\n def increment(self, key, column):\n r = self.select(key, self.table.key, [1] * self.table.num_columns)[0]\n if r is not False:\n updated_columns = [None] * self.table.num_columns\n updated_columns[column] = r[column] + 1\n u = self.update(key, *updated_columns)\n return u\n return False\n\n"
},
{
"alpha_fraction": 0.6271899342536926,
"alphanum_fraction": 0.6278907060623169,
"avg_line_length": 27.52083396911621,
"blob_id": "043b3902f5db69777e44390c6958b4c0966f2c0a",
"content_id": "45c97a174161383920a9219eed059cd9e6e7c63d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1427,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 48,
"path": "/template/db.py",
"repo_name": "tingwen0125/L-store-database-management-system",
"src_encoding": "UTF-8",
"text": "from template.table import Table\r\n\r\n'''\r\nThe Database class is a general interface to the database and handles high-level\r\noperations such as starting and shutting down the database instance and loading the\r\ndatabase from stored disk files. This class also handles the creation and deletion of\r\ntables via the create and drop function.The create function will create a new\r\ntable in the database. The Table constructor takes as input the name of the table,\r\nnumber of columns and the index of the key column. The drop function drops the\r\nspecified table.\r\n'''\r\n\r\nclass Database():\r\n\r\n def __init__(self):\r\n self.tables = {}\r\n pass\r\n\r\n # Not required for milestone1\r\n def open(self, path):\r\n pass\r\n\r\n def close(self):\r\n pass\r\n\r\n \"\"\"\r\n # Creates a new table\r\n :param name: string #Table name\r\n :param num_columns: int #Number of Columns: all columns are integer\r\n :param key: int #Index of table key in columns\r\n \"\"\"\r\n def create_table(self, name, num_columns, key):\r\n table = Table(name, num_columns, key)\r\n self.tables[name]=table\r\n return table\r\n\r\n \"\"\"\r\n # Deletes the specified table\r\n \"\"\"\r\n def drop_table(self, name):\r\n return self.tables.pop(name)\r\n \r\n\r\n \"\"\"\r\n # Returns table with the passed name\r\n \"\"\"\r\n def get_table(self, name):\r\n return self.tables[name]\r\n \r\n"
},
{
"alpha_fraction": 0.7362831830978394,
"alphanum_fraction": 0.7539823055267334,
"avg_line_length": 35.79999923706055,
"blob_id": "e6c5b815bfea5e311aff6addd3d30ef93246c733",
"content_id": "cf273ba5f22a86ac7a28cdca8a07fc8ad2ebab6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 565,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 15,
"path": "/template/config.py",
"repo_name": "tingwen0125/L-store-database-management-system",
"src_encoding": "UTF-8",
"text": "# Global Setting for the Database\r\n# PageSize, StartRID, etc..\r\n\r\n'''\r\nThe config.py file is meant to act as centralized storage for all the configuration options\r\nand the constant values used in the code. It is good practice to organize such\r\ninformation into a Singleton object accessible from every file in the project. This class\r\nwill find more use when implementing persistence in the next milestone.\r\n'''\r\n\r\nPAGE_SIZE = 4096\r\nINT_SIZE = 8\r\nPAGE_RANGE_SIZE = 65536\r\nMAX_NUM_RECORD = PAGE_SIZE / INT_SIZE\r\nBASE_PAGE_PER_PAGE_RANGE = PAGE_RANGE_SIZE / PAGE_SIZE"
},
{
"alpha_fraction": 0.6161125302314758,
"alphanum_fraction": 0.6401534676551819,
"avg_line_length": 37.46464538574219,
"blob_id": "d7575eab2646f9983facc92440ad7f36e89ac671",
"content_id": "723969130e11d0e9dbe63f9c86c5a51b52db645c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3910,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 99,
"path": "/template/table.py",
"repo_name": "tingwen0125/L-store-database-management-system",
"src_encoding": "UTF-8",
"text": "from template.page import PageRange\r\nfrom template.index import Index\r\nfrom template.config import *\r\nfrom time import time\r\n\r\n\r\nINDIRECTION_COLUMN = 0\r\nRID_COLUMN = 1\r\nTIMESTAMP_COLUMN = 2\r\nSCHEMA_ENCODING_COLUMN = 3\r\n\r\n'''\r\nThe Table class provides the core of our relational storage functionality. All columns are\r\n64-bit integers in this implementation. Users mainly interact with tables through queries.\r\nTables provide a logical view over the actual physically stored data and mostly manage\r\nthe storage and retrieval of data. Each table is responsible for managing its pages and\r\nrequires an internal page directory that given a RID it returns the actual physical location\r\nof the record. The table class should also manage the periodical merge of its\r\ncorresponding page ranges.\r\n'''\r\n#16 base pages in one page range\r\nclass Record:\r\n\r\n def __init__(self, rid, key, columns):\r\n self.rid = rid\r\n self.key = key\r\n self.columns = columns\r\n\r\n def getColumns(self):\r\n return self.columns\r\n\r\nclass Table:\r\n\r\n \"\"\"\r\n :param name: string #Table name\r\n :param num_columns: int #Number of Columns: all columns are integer\r\n :param key: int #Index of table key in columns\r\n \"\"\"\r\n def __init__(self, name, num_columns, key):\r\n self.name = name\r\n self.key = key\r\n self.num_columns = num_columns\r\n self.page_directory = {}\r\n self.index = Index(self)\r\n self.pageRanges = [PageRange(self.num_columns)]\r\n self.keyToBaseRID = {} \r\n self.baseRID = 1\r\n self.tailRID = 1\r\n pass\r\n\r\n # Given a baseRID return a baseRecord\r\n # The way to access a value using a location: \r\n # e.g. value = int.from_bytes(pageRanges[pageRange_index].basePageList\r\n # [basePageList_index].basePage[columnNum].data[offset_index*8:(offset_index+1)*8], 'big')\r\n\r\n def baseRIDToRecord(self, baseRID):\r\n pageRange_index = (baseRID-1) // 8192 #512*16\r\n basePageList_index = (baseRID-1-512 * 16 * pageRange_index) // 512\r\n offset_index = baseRID-512 * (16*pageRange_index+basePageList_index)-1\r\n baseRecord = []\r\n for i in range(4+self.num_columns):\r\n baseRecord.append(int.from_bytes(self.pageRanges[pageRange_index].basePageList[basePageList_index].basePage[i].data \\\r\n [offset_index*8:(offset_index+1)*8], 'big'))\r\n return baseRecord\r\n \r\n def baseRIDToLocation(self, baseRID):\r\n pageRange_index = (baseRID-1) // 8192 #512*16\r\n basePageList_index = (baseRID-1-512 * 16 * pageRange_index) // 512\r\n offset_index = baseRID-512 * (16*pageRange_index+basePageList_index)-1\r\n location = [pageRange_index, basePageList_index, offset_index]\r\n return location\r\n '''\r\n def tailRIDToLocation(self, tailRID):\r\n pageRange_index = (tailRID-1) // 8192 \r\n tailPageList_index = (tailRID-1-512 * 16 * pageRange_index) // 512\r\n offset_index = (tailRID-512 * (16*pageRange_index+tailPageList_index)-1)\r\n location = [pageRange_index, tailPageList_index, offset_index]\r\n '''\r\n def writeByte(self, value, location, columnNum):\r\n pageRange_index = location[0]\r\n basePageList_index = location[1]\r\n offset_index = location[2]\r\n self.pageRanges[pageRange_index].basePageList[basePageList_index] \\\r\n .basePage[columnNum].data[offset_index*8:(offset_index+1)*8] = \\\r\n value.to_bytes(8, 'big')\r\n return True\r\n\r\n def printRecord(self, rid):\r\n pass\r\n\r\n '''\r\n def getPageR(self,rid): #given rid return the page range the rid record is at\r\n print(\"PageR\", rid//MAX_NUM_RECORD//BASE_PAGE_PER_PAGE_RANGE)\r\n print(type(rid//MAX_NUM_RECORD//BASE_PAGE_PER_PAGE_RANGE))\r\n return int(rid//MAX_NUM_RECORD//BASE_PAGE_PER_PAGE_RANGE)\r\n '''\r\n\r\n def __merge(self):\r\n pass\r\n \r\n"
}
] | 5 |
sathyainfotech/CRUD-SQLite-Tkinter | https://github.com/sathyainfotech/CRUD-SQLite-Tkinter | 1f59fa30cc5c7514a32a2ba7571ae06fb820484b | 9af24f73588ac7f77fbd8270fc9e80509ff87c9d | ffa45eea14875650e63822685329f69466cce13a | refs/heads/main | 2023-07-03T08:24:26.465433 | 2021-07-11T15:35:51 | 2021-07-11T15:35:51 | 384,819,317 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6289117932319641,
"alphanum_fraction": 0.6807995438575745,
"avg_line_length": 30.75,
"blob_id": "c2297c124c97946f060acc6698dc4dcedac0609f",
"content_id": "289fe588974cd3b92a24abdaa0ae0d97b320cbd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4953,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 156,
"path": "/Sqlite.py",
"repo_name": "sathyainfotech/CRUD-SQLite-Tkinter",
"src_encoding": "UTF-8",
"text": "from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom database import *\n\ndb=Database(\"SqliteDatabase.db\")\n\nwindow=Tk()\nwindow.title(\"SQLite\")\nwindow.geometry(\"1920x1080\")\n\nname=StringVar()\nage=StringVar()\ngender=StringVar()\naddress=StringVar()\ncontact=StringVar()\nmail=StringVar()\n\nframe1=Frame(window,padx=20,pady=20,bg=\"#636e72\")\nframe1.pack(side=TOP,fill=X)\n\nlblTitle=Label(frame1,bg=\"#636e72\",text=\"REGISTRATION\",font=(\"times\",16,\"bold\"),fg=\"white\",pady=10)\nlblTitle.grid(columnspan=2)\n\nlblName=Label(frame1,text=\"Name\",bg=\"#636e72\",fg=\"white\",font=(\"times\",16,\"bold\"),pady=10)\nlblName.grid(row=1,column=0)\n\ntxtName=Entry(frame1,textvariable=name,font=(\"times\",16),width=43)\ntxtName.grid(row=1,column=1)\n\nlblAge=Label(frame1,text=\"Age\",bg=\"#636e72\",fg=\"white\",font=(\"times\",16,\"bold\"),pady=10)\nlblAge.grid(row=2,column=0)\n\ntxtAge=Entry(frame1,font=(\"times\",16),textvariable=age,width=43)\ntxtAge.grid(row=2,column=1)\n\nlblgen=Label(frame1,text=\"Gender\",bg=\"#636e72\",fg=\"white\",font=(\"times\",16,\"bold\"),pady=10)\nlblgen.grid(row=3,column=0)\n\ncb=ttk.Combobox(frame1,width=41,textvariable=gender,state=\"readonly\",font=(\"times\",16))\ncb['values']=(\"Male\",\"Female\",\"Others\")\ncb.grid(row=3,column=1)\n\nlblAdd=Label(frame1,text=\"Address\",bg=\"#636e72\",fg=\"white\",font=(\"times\",16,\"bold\"),pady=10)\nlblAdd.grid(row=4,column=0)\n\ntxtAdd=Entry(frame1,font=(\"times\",16),width=43,textvariable=address)\ntxtAdd.grid(row=4,column=1)\n\nlblCon=Label(frame1,text=\"Contact\",bg=\"#636e72\",fg=\"white\",font=(\"times\",16,\"bold\"),pady=10)\nlblCon.grid(row=5,column=0)\n\ntxtCon=Entry(frame1,font=(\"times\",16),textvariable=contact,width=43)\ntxtCon.grid(row=5,column=1)\n\nlblMail=Label(frame1,text=\"Mail\",bg=\"#636e72\",fg=\"white\",font=(\"times\",16,\"bold\"),pady=10)\nlblMail.grid(row=6,column=0)\n\ntxtMail=Entry(frame1,font=(\"times\",16),textvariable=mail,width=43)\ntxtMail.grid(row=6,column=1)\n\nbtn_frame=Frame(frame1,bg=\"#2d3436\")\nbtn_frame.grid(row=7,column=1,columnspan=4)\n\ndef fetchData():\n table.delete(*table.get_children())\n count=0\n for row in db.fetch_record():\n count+=1\n table.insert(\"\",END,values=(count,row[0],row[1],row[2],row[3],row[4],row[5],row[6]))\n\ndef addData():\n if txtName.get()==\"\" or txtAge.get()==\"\" or txtAdd.get()==\"\" or txtCon.get()==\"\" or txtMail.get()==\"\":\n messagebox.showinfo(\"Message\",\"Please Fill All Records\")\n else:\n db.insert(txtName.get(),txtAge.get(),cb.get(),txtAdd.get(),txtCon.get(),txtMail.get())\n fetchData()\n clearData()\n messagebox.showinfo(\"Message\",\"Record Insert Successfully\")\n\ndef getrecord(event):\n srow = table.focus()\n data = table.item(srow)\n global row\n row = data['values']\n name.set(row[2])\n age.set(row[3])\n gender.set(row[4])\n contact.set(row[6])\n mail.set(row[7])\n address.set(row[5])\n\ndef updateData():\n if txtName.get() == \"\" or txtAge.get() == \"\" or txtAdd.get() == \"\" or cb.get() == \"\" or txtCon.get() == \"\" or txtMail.get() == \"\":\n messagebox.showinfo(\"Message\", \"Please Fill All Records\")\n else:\n db.update_record(txtName.get(), txtAge.get(), cb.get(), txtAdd.get(), txtCon.get(), txtMail.get(), (row[1]))\n fetchData()\n clearData()\n messagebox.showinfo(\"Message\", \"Record Update Successfully\")\n\ndef deleteData():\n db.remove_record(row[1])\n fetchData()\n clearData()\n messagebox.showinfo(\"Message\", \"Record Delete Successfully\")\n\ndef clearData():\n name.set(\"\")\n age.set(\"\")\n gender.set(\"\")\n contact.set(\"\")\n mail.set(\"\")\n address.set(\"\")\n\nbtnSub=Button(btn_frame,text=\"Insert\",bg=\"#01a3a4\",fg=\"white\",width=6,padx=20,pady=5,font=(\"times\",16,\"bold\"),command=addData)\nbtnSub.grid(row=0,column=0)\n\nbtnUp=Button(btn_frame,text=\"Update\",bg=\"#F79F1F\",fg=\"white\",width=6,padx=20,pady=5,font=(\"times\",16,\"bold\"),command=updateData)\nbtnUp.grid(row=0,column=1)\n\nbtnDel=Button(btn_frame,text=\"Delete\",bg=\"#ee5253\",fg=\"white\",width=6,padx=20,pady=5,font=(\"times\",16,\"bold\"),command=deleteData)\nbtnDel.grid(row=0,column=2)\n\nbtnClr=Button(btn_frame,text=\"Clear\",bg=\"#1289A7\",fg=\"white\",width=6,padx=20,pady=5,font=(\"times\",16,\"bold\"),command=clearData)\nbtnClr.grid(row=0,column=3)\n\nmyFrame=Frame(window)\nmyFrame.place(x=0,y=425,width=1920,height=500)\n\nstyle=ttk.Style()\nstyle.configure(\"Treeview\",font=(\"times\",15),rowheight=35)\nstyle.configure(\"Treeview.Heading\",font=(\"times\",16,\"bold\"))\n\ntable=ttk.Treeview(myFrame,columns=(0,1,2,3,4,5,6,7))\n\ntable.column(\"0\",anchor=CENTER)\ntable.column(\"1\",stretch=NO,width=0)\ntable.column(\"3\",anchor=CENTER)\ntable.column(\"6\",anchor=CENTER)\n\ntable.heading(\"0\",text=\"S.NO\")\ntable.heading(\"1\",text=\"ID\")\ntable.heading(\"2\",text=\"NAME\")\ntable.heading(\"3\",text=\"AGE\")\ntable.heading(\"4\",text=\"GENDER\")\ntable.heading(\"5\",text=\"ADDRESS\")\ntable.heading(\"6\",text=\"CONTACT\")\ntable.heading(\"7\",text=\"MAIL\")\ntable[\"show\"]='headings'\ntable.bind(\"<ButtonRelease-1>\",getrecord)\ntable.pack(fill=X)\n\nfetchData()\n\nwindow.mainloop()\n"
},
{
"alpha_fraction": 0.49088358879089355,
"alphanum_fraction": 0.4922861158847809,
"avg_line_length": 32.92856979370117,
"blob_id": "777ac271a4e498ddc4bbc2cb50bca7139e1e702c",
"content_id": "d260e50190c513a934a5cc206d3ac98759a697d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1426,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 42,
"path": "/database.py",
"repo_name": "sathyainfotech/CRUD-SQLite-Tkinter",
"src_encoding": "UTF-8",
"text": "import sqlite3\n\nclass Database:\n def __init__(self,db):\n self.con = sqlite3.connect(db)\n self.c=self.con.cursor()\n self.c.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS datas(\n pid INTEGER PRIMARY KEY,\n name TEXT NOT NULL,\n age TEXT NOT NULL,\n gender TEXT NOT NULL,\n address TEXT NOT NULL,\n contact TEXT NOT NULL,\n mail TEXT NOT NULL \n )\n \"\"\")\n self.con.commit()\n\n def insert(self,name,age,gender,address,contact,mail):\n sql=\"\"\"\n insert into datas values(NULL,?,?,?,?,?,?)\n \"\"\"\n self.c.execute(sql,(name,age,gender,address,contact,mail))\n self.con.commit()\n\n def fetch_record(self):\n self.c.execute(\"SELECT * FROM datas\")\n data = self.c.fetchall()\n return data\n\n def update_record(self,name,age,gender,address,contact,mail,pid):\n sql=\"\"\"\n update datas set name=?,age=?,gender=?,address=?,contact=?,mail=? where pid=?\n \"\"\"\n self.c.execute(sql,(name,age,gender,address,contact,mail,pid))\n self.con.commit()\n\n def remove_record(self,pid):\n sql=\"delete from datas where pid=?\"\n self.c.execute(sql,(pid,))\n self.con.commit()\n\n"
}
] | 2 |
djfooks/loveletter | https://github.com/djfooks/loveletter | 0d742847341ff41752d2867b165429f2de3809d0 | 030d91d32393da511828c868c33d65da397d434b | b77c8eb3a9c820c5f0fde3cf7f7aca5737fad02d | refs/heads/master | 2023-02-08T05:32:00.242262 | 2020-07-06T09:16:24 | 2020-07-06T09:16:24 | 270,452,023 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5474189519882202,
"alphanum_fraction": 0.5512776374816895,
"avg_line_length": 38.870086669921875,
"blob_id": "b332a96f907d4bdb5558c3514f6e74fce72fe46e",
"content_id": "62e30a12bb8206d8ee818bc1e8b480ad1fa9e74c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23324,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 585,
"path": "/update.py",
"repo_name": "djfooks/loveletter",
"src_encoding": "UTF-8",
"text": "import json\nimport boto3\nimport hashlib\nimport random\nimport string\n\ndef send_to_connection(connection_id, data, event):\n gatewayapi = boto3.client(\"apigatewaymanagementapi\",\n endpoint_url = \"https://\" + event[\"requestContext\"][\"domainName\"] +\n \"/\" + event[\"requestContext\"][\"stage\"])\n try:\n gatewayapi.post_to_connection(ConnectionId=connection_id,\n Data=json.dumps(data).encode(\"utf-8\"))\n except gatewayapi.exceptions.GoneException:\n print(\"ERROR: Connection id \" + connection_id + \" has gone\")\n\ndef handle_websocket(event, context):\n connection_id = event[\"requestContext\"].get(\"connectionId\")\n print(\"connection_id \" + connection_id)\n\n dynamodb = boto3.resource(\"dynamodb\")\n table = dynamodb.Table(\"loveletter-sessions\")\n\n body = json.loads(event[\"body\"])\n\n param_room = body[\"room\"]\n param_cmd = body[\"cmd\"] if \"cmd\" in body else None\n\n try:\n room_data = table.get_item(Key={\"room\": param_room})[\"Item\"]\n except KeyError:\n return send_to_connection(connection_id, {\"ERROR\": \"ROOM_NOT_FOUND\"}, event)\n\n if \"turn\" in room_data:\n room_data[\"turn\"] = int(room_data[\"turn\"])\n if \"deck\" in room_data:\n room_data[\"deck\"] = json.loads(room_data[\"deck\"])\n\n def get_num_players():\n for i in range(4):\n if \"player\" + str(i) not in room_data:\n return i\n return 4\n\n num_players = get_num_players()\n player_states = [None] * num_players\n players_data = []\n player_index = -1\n for i in range(num_players):\n player_id = \"player\" + str(i)\n player = json.loads(room_data[player_id])\n players_data.append(player)\n\n player_state_id = \"playerstate\" + str(i)\n if player_state_id in room_data:\n player_states[i] = json.loads(room_data[player_state_id])\n\n if player[\"connectionId\"] == connection_id:\n player_index = i\n\n if player_index == -1:\n return send_to_connection(connection_id, {\"ERROR\": \"NOT_IN_ROOM\"}, event)\n\n card_types = []\n card_types += [\"GUARD\"] * 5\n card_types += [\"PRIEST\"] * 2\n card_types += [\"BARON\"] * 2\n card_types += [\"HANDMAID\"] * 2\n card_types += [\"PRINCE\"] * 2\n card_types += [\"KING\"] * 1\n card_types += [\"COUNTESS\"] * 1\n card_types += [\"PRINCESS\"] * 1\n\n card_value_map = {\n \"GUARD\": 1,\n \"PRIEST\": 2,\n \"BARON\": 3,\n \"HANDMAID\": 4,\n \"PRINCE\": 5,\n \"KING\": 6,\n \"COUNTESS\": 7,\n \"PRINCESS\": 8\n }\n\n rounds_to_win_map = { 2: 7, 3: 5, 4: 4, 5: 4, 6: 4, 7: 4, 8: 4 }\n rounds_to_win = rounds_to_win_map[num_players]\n\n def send_to_all(data):\n for i in range(num_players):\n send_to_connection(players_data[i][\"connectionId\"], data, event)\n\n def get_public_state():\n response = {\n \"cmd\": \"STATE\",\n \"gamestate\": room_data[\"gamestate\"],\n \"players\": [x[\"name\"] for x in players_data]\n }\n if room_data[\"gamestate\"] == \"PLAYING\":\n response[\"round\"] = int(room_data[\"round\"])\n response[\"turn\"] = int(room_data[\"turn\"])\n response[\"playerStates\"] = [{\"state\": x[\"state\"], \"wins\": x[\"wins\"], \"played\": x[\"played\"]} for x in player_states]\n response[\"interaction\"] = dict(room_data[\"interaction\"])\n return response\n\n def add_private_state(player_index, response):\n response[\"playerId\"] = player_index\n if room_data[\"gamestate\"] == \"PLAYING\":\n hand = [player_states[player_index][\"hand\"]]\n if \"pickup\" in player_states[player_index]:\n hand.append(player_states[player_index][\"pickup\"])\n response[\"hand\"] = hand\n response[\"humanHand\"] = [card_types[x] for x in hand]\n if \"interaction\" in player_states[player_index]:\n for (k,v) in player_states[player_index][\"interaction\"].items():\n response[\"interaction\"][k] = v\n\n def update_room():\n set_string = \"SET deck = :deck, turn = :turn, round = :round, gamestate = :gamestate, interaction = :interaction, callback = :callback\"\n lookup = {\n \":deck\": json.dumps(room_data[\"deck\"]),\n \":turn\": room_data[\"turn\"],\n \":round\": room_data[\"round\"],\n \":gamestate\": \"PLAYING\",\n \":interaction\": json.dumps(room_data[\"interaction\"]),\n \":callback\": json.dumps(room_data[\"callback\"])\n }\n\n for i in range(num_players):\n set_string += \", playerstate\" + str(i) + \" = :playerstate\" + str(i)\n lookup[\":playerstate\" + str(i)] = json.dumps(player_states[i])\n\n table.update_item(\n Key={\"room\": param_room },\n UpdateExpression=set_string,\n ExpressionAttributeValues=lookup)\n\n def start_game(is_new_round=False):\n if num_players <= 1:\n return send_to_connection(connection_id, {\"ERROR\": \"NOT_ENOUGH_PLAYERS\"}, event)\n\n cards = [i for i in range(16)]\n random.shuffle(cards)\n\n #test\n def move_card(index, cardStr):\n found_index = index + [card_types[x] for x in cards[index:]].index(cardStr)\n tmp = cards[found_index]\n cards[found_index] = cards[index]\n cards[index] = tmp\n\n move_card(0, \"COUNTESS\")\n move_card(1, \"KING\")\n move_card(2, \"PRINCE\")\n move_card(3, \"PRINCESS\")\n\n hands = cards[:num_players]\n del cards[:num_players]\n\n turn = room_data[\"turn\"] if is_new_round else 0 #random.randint(0, num_players - 1)\n\n room_data[\"gamestate\"] = \"PLAYING\"\n room_data[\"deck\"] = cards\n room_data[\"turn\"] = turn\n room_data[\"round\"] = (room_data[\"round\"] + 1) if is_new_round else 0\n room_data[\"interaction\"] = {}\n room_data[\"callback\"] = {}\n\n for i in range(num_players):\n player_states[i] = {\"state\": \"ALIVE\", \"wins\": player_states[i][\"wins\"] if is_new_round else 0, \"hand\": hands[i], \"played\": []}\n\n first_pickup = cards[0]\n del cards[0]\n player_states[turn][\"pickup\"] = first_pickup\n update_room()\n\n response = get_public_state()\n response[\"cmd\"] = \"NEXT_ROUND\" if is_new_round else \"START_GAME\"\n send_to_all(response)\n\n for i in range(num_players):\n pickup = player_states[i][\"hand\"]\n response = { \"cmd\": \"START_CARD\", \"playerId\": i, \"pickup\": pickup, \"humanHand\": [card_types[pickup]] }\n send_to_connection(players_data[i][\"connectionId\"], response, event)\n\n response = {\"cmd\": \"YOUR_TURN\", \"pickup\": first_pickup}\n send_to_connection(players_data[turn][\"connectionId\"], response, event)\n\n def next_turn():\n potential_winner = None\n alive_count = 0\n for i in range(num_players):\n if player_states[i][\"state\"] != \"DEAD\":\n potential_winner = i\n alive_count += 1\n if alive_count == 1:\n return round_completed([potential_winner])\n\n new_turn = -1\n for step in range(num_players - 1):\n new_turn = (room_data[\"turn\"] + 1 + step) % num_players\n if player_states[new_turn][\"state\"] == \"DEAD\":\n new_turn = -1\n else:\n break\n\n room_data[\"turn\"] = new_turn\n room_data[\"interaction\"] = {}\n player_states[room_data[\"turn\"]][\"interaction\"] = {}\n if \"target\" in room_data[\"interaction\"]:\n player_states[room_data[\"interaction\"][\"target\"]][\"interaction\"] = {}\n\n if len(room_data[\"deck\"]) == 1:\n return round_completed(None)\n\n send_to_all({\"cmd\": \"NEXT_TURN\", \"turn\": new_turn})\n player_states[new_turn][\"state\"] = \"ALIVE\" # clear SAFE state\n\n pickup = room_data[\"deck\"][0]\n del room_data[\"deck\"][0]\n player_states[new_turn][\"pickup\"] = pickup\n response = {\"cmd\": \"YOUR_TURN\", \"pickup\": pickup}\n add_private_state(new_turn, response)\n send_to_connection(players_data[new_turn][\"connectionId\"], response, event)\n\n def round_completed(round_winners):\n final_cards = []\n if round_winners == None:\n round_winners = []\n highest_card = -1\n for i in range(num_players):\n card_value = card_value_map[card_types[player_states[i][\"hand\"]]]\n if player_states[i][\"state\"] != \"DEAD\" and card_value >= highest_card:\n if card_value > highest_card:\n round_winners = [i]\n highest_card = card_value\n else:\n round_winners.append(i)\n\n if len(round_winners) != 1:\n tied = round_winners\n round_winners = []\n highest_total = 0\n for x in tied:\n total = 0\n for card in player_states[i][\"played\"]:\n total += card_value_map[card_types[card]]\n if total > highest_total:\n highest_total = total\n round_winners = [x]\n elif total == highest_total:\n round_winners.append(x)\n\n final_cards = [state[\"hand\"] for state in player_states]\n\n game_winners = []\n game_winner = -1\n highest_round_wins = 0\n for x in round_winners:\n player_states[x][\"wins\"] += 1\n wins = player_states[x][\"wins\"]\n if wins >= rounds_to_win:\n if wins > highest_round_wins:\n game_winners = [x]\n highest_round_wins = wins\n elif wins == highest_round_wins:\n game_winners.append(x)\n\n room_data[\"turn\"] = round_winners[random.randint(0, len(round_winners) - 1)]\n room_data[\"interaction\"] = {\"state\": \"ROUND_COMPLETE\", \"roundWinners\": round_winners, \"finalCards\": final_cards, \"hiddenCard\": room_data[\"deck\"][0]}\n if len(game_winners) == 1:\n room_data[\"interaction\"][\"gameWinner\"] = game_winners[0]\n\n for i in range(num_players):\n msg = get_public_state()\n msg[\"cmd\"] = \"ROUND_COMPLETE\"\n add_private_state(i, msg)\n send_to_connection(players_data[i][\"connectionId\"], msg, event)\n\n update_room()\n\n def discard_card(player_index, card, played):\n player_states[player_index][\"played\"].append(card)\n send_to_all({\"cmd\": \"DISCARD\", \"playerId\": player_index, \"card\": card})\n if card_types[card] == \"PRINCESS\":\n player_states[player_index][\"state\"] = \"DEAD\"\n\n if played:\n if card == player_states[room_data[\"turn\"]][\"hand\"]:\n player_states[room_data[\"turn\"]][\"hand\"] = player_states[room_data[\"turn\"]][\"pickup\"]\n del player_states[room_data[\"turn\"]][\"pickup\"]\n\n elif player_states[player_index][\"state\"] != \"DEAD\":\n pickup = room_data[\"deck\"][0]\n del room_data[\"deck\"][0]\n player_states[player_index][\"hand\"] = pickup\n msg = {\"cmd\": \"PICKUP\", \"pickup\": pickup, \"humanCard\": card_types[pickup]}\n send_to_connection(players_data[player_index][\"connectionId\"], msg, event)\n\n\n def valid_target(target, allow_self):\n target = int(target)\n if not allow_self and target == player_index:\n return False\n return player_states[target][\"state\"] == \"ALIVE\"\n\n def play_card():\n hand = player_states[player_index][\"hand\"]\n pickup = player_states[player_index][\"pickup\"]\n played_card = hand if param_cmd == \"PLAY_HAND\" else pickup\n other_card = pickup if param_cmd == \"PLAY_HAND\" else hand\n played_card_str = card_types[played_card]\n other_card_str = card_types[other_card]\n\n if other_card_str == \"COUNTESS\":\n if played_card_str == \"KING\" or played_card_str == \"PRINCE\":\n return send_to_connection(connection_id, {\"ERROR\": \"MUST_PLAY_COUNTESS\"}, event)\n\n interaction = {\"playerId\": room_data[\"turn\"], \"card\": played_card, \"humanCard\": played_card_str}\n\n active_player_state = player_states[room_data[\"turn\"]]\n active_player_state[\"interaction\"] = {\"otherCard\": other_card}\n active_interaction = active_player_state[\"interaction\"]\n\n any_valid_target = False\n for i in range(num_players):\n if i != room_data[\"turn\"] and player_states[i][\"state\"] == \"ALIVE\":\n any_valid_target = True\n break\n\n if not any_valid_target and ( \\\n played_card_str == \"GUARD\" or \\\n played_card_str == \"PRIEST\" or \\\n played_card_str == \"BARON\" or \\\n played_card_str == \"KING\"):\n discard_card(room_data[\"turn\"], played_card, True)\n interaction[\"state\"] = \"CONTINUE\"\n\n elif played_card_str == \"GUARD\":\n if \"target\" not in body:\n return send_to_connection(connection_id, {\"ERROR\": \"NO_TARGET\"}, event)\n if \"guess\" not in body:\n return send_to_connection(connection_id, {\"ERROR\": \"NO_GUESS\"}, event)\n param_target = int(body[\"target\"])\n param_guess = body[\"guess\"][:10]\n\n if not valid_target(param_target, False):\n return send_to_connection(connection_id, {\"ERROR\": \"INVALID_TARGET\"}, event)\n\n if param_guess == \"GUARD\":\n return send_to_connection(connection_id, {\"ERROR\": \"CANT_GUESS_GUARD\"}, event)\n\n interaction[\"target\"] = param_target\n interaction[\"guess\"] = param_guess[:10]\n target_state = player_states[param_target]\n\n target_card = target_state[\"hand\"]\n\n correct_guess = card_types[target_card] == param_guess\n interaction[\"result\"] = \"CORRECT_GUESS\" if correct_guess else \"INCORRECT_GUESS\"\n if correct_guess:\n room_data[\"callback\"] = {\"REVEAL\": {\"kill\": param_target}}\n\n discard_card(room_data[\"turn\"], played_card, True)\n interaction[\"state\"] = \"REVEAL\"\n\n elif played_card_str == \"PRIEST\":\n # Player is allowed to see another player's hand.\n if \"target\" not in body:\n return send_to_connection(connection_id, {\"ERROR\": \"NO_TARGET\"}, event)\n param_target = int(body[\"target\"])\n\n if not valid_target(param_target, False):\n return send_to_connection(connection_id, {\"ERROR\": \"INVALID_TARGET\"}, event)\n\n interaction[\"target\"] = param_target\n target_state = player_states[param_target]\n\n revealed_card = target_state[\"hand\"]\n active_interaction[\"revealedCard\"] = revealed_card\n\n discard_card(room_data[\"turn\"], played_card, True)\n interaction[\"state\"] = \"REVEAL\"\n\n elif played_card_str == \"BARON\":\n # Player will choose another player and privately compare hands. The player with the lower-strength hand is eliminated from the round.\n if \"target\" not in body:\n return send_to_connection(connection_id, {\"ERROR\": \"NO_TARGET\"}, event)\n param_target = int(body[\"target\"])\n\n if not valid_target(param_target, False):\n return send_to_connection(connection_id, {\"ERROR\": \"INVALID_TARGET\"}, event)\n\n interaction[\"target\"] = param_target\n target_state = player_states[param_target]\n\n revealed_card = target_state[\"hand\"]\n\n turn_value = card_value_map[card_types[other_card]]\n reveal_value = card_value_map[card_types[revealed_card]]\n discard_card(room_data[\"turn\"], played_card, True)\n kill_id = None\n if turn_value == reveal_value:\n interaction[\"result\"] = \"TIE\"\n else:\n kill_id = param_target if turn_value > reveal_value else room_data[\"turn\"]\n discarded = player_states[kill_id][\"hand\"]\n interaction[\"result\"] = \"LOSE\"\n interaction[\"loser\"] = kill_id\n interaction[\"discard\"] = discarded\n room_data[\"callback\"] = {\"CONTINUE\": {\"kill\": param_target}}\n\n target_state[\"interaction\"] = {\"revealedCard\": other_card}\n active_interaction[\"revealedCard\"] = revealed_card\n interaction[\"state\"] = \"REVEAL\"\n\n elif played_card_str == \"PRINCE\":\n # Player can choose any player (including themselves) to discard their hand and draw a new one.\n if \"target\" not in body:\n return send_to_connection(connection_id, {\"ERROR\": \"NO_TARGET\"}, event)\n param_target = int(body[\"target\"])\n\n if not valid_target(param_target, True):\n return send_to_connection(connection_id, {\"ERROR\": \"INVALID_TARGET\"}, event)\n\n target_state = player_states[param_target]\n interaction[\"target\"] = param_target\n discard_card(room_data[\"turn\"], played_card, True)\n interaction[\"revealedCard\"] = target_state[\"hand\"]\n if param_target != room_data[\"turn\"]:\n room_data[\"callback\"] = {\"REVEAL\": {\"discard\": param_target}}\n interaction[\"state\"] = \"REVEAL\"\n else:\n discard_card(room_data[\"turn\"], other_card, False)\n interaction[\"state\"] = \"CONTINUE\"\n\n elif played_card_str == \"KING\":\n # Player trades hands with any other player.\n if \"target\" not in body:\n return send_to_connection(connection_id, {\"ERROR\": \"NO_TARGET\"}, event)\n param_target = int(body[\"target\"])\n\n if not valid_target(param_target, False):\n return send_to_connection(connection_id, {\"ERROR\": \"INVALID_TARGET\"}, event)\n\n interaction[\"target\"] = param_target\n target_state = player_states[param_target]\n\n # discard the KING first\n discard_card(room_data[\"turn\"], played_card, True)\n room_data[\"callback\"] = {\"REVEAL\": {\"swap\": True}}\n\n active_interaction[\"swappedFor\"] = target_state[\"hand\"]\n target_state[\"interaction\"] = {\"swappedFor\": other_card, \"prevCard\": target_state[\"hand\"]}\n interaction[\"state\"] = \"REVEAL\"\n\n elif played_card_str == \"HANDMAID\" or \\\n played_card_str == \"COUNTESS\" or \\\n played_card_str == \"PRINCESS\":\n if played_card_str == \"HANDMAID\":\n player_states[room_data[\"turn\"]][\"state\"] = \"SAFE\"\n discard_card(room_data[\"turn\"], played_card, True)\n interaction[\"state\"] = \"CONTINUE\"\n\n else:\n return send_to_connection(connection_id, {\"ERROR\": \"INVALID_CARD_STR\"}, event)\n\n room_data[\"interaction\"] = interaction\n for i in range(num_players):\n msg = get_public_state()\n msg[\"cmd\"] = \"PLAYED\"\n add_private_state(i, msg)\n send_to_connection(players_data[i][\"connectionId\"], msg, event)\n update_room()\n\n def handle_callback(callback):\n if \"kill\" in callback:\n player_states[callback[\"kill\"]][\"state\"] = \"DEAD\"\n discard_card(callback[\"kill\"], player_states[callback[\"kill\"]][\"hand\"], False)\n\n if \"discard\" in callback:\n discard_card(callback[\"discard\"], player_states[callback[\"discard\"]][\"hand\"], False)\n\n if \"swap\" in callback:\n active_player = player_states[room_data[\"turn\"]]\n target_player = player_states[room_data[\"interaction\"][\"target\"]]\n tmp = active_player[\"hand\"]\n active_player[\"hand\"] = target_player[\"hand\"]\n target_player[\"hand\"] = tmp\n\n def on_reveal():\n if \"REVEAL\" in room_data[\"callback\"]:\n handle_callback(room_data[\"callback\"][\"REVEAL\"])\n\n room_data[\"interaction\"][\"state\"] = \"CONTINUE\"\n\n for i in range(num_players):\n msg = get_public_state()\n msg[\"cmd\"] = \"REVEALED\"\n add_private_state(i, msg)\n send_to_connection(players_data[i][\"connectionId\"], msg, event)\n update_room()\n\n def on_continue():\n if \"CONTINUE\" in room_data[\"callback\"]:\n handle_callback(room_data[\"callback\"][\"CONTINUE\"])\n\n for i in range(num_players):\n msg = get_public_state()\n msg[\"cmd\"] = \"END_TURN\"\n add_private_state(i, msg)\n send_to_connection(players_data[i][\"connectionId\"], msg, event)\n next_turn()\n update_room()\n\n # TODO set False!\n debugging = True\n ## ^^^\n\n if debugging and param_cmd == \"RESTART\":\n return start_game()\n\n if \"interaction\" in room_data:\n room_data[\"interaction\"] = json.loads(room_data[\"interaction\"])\n\n if \"callback\" in room_data:\n room_data[\"callback\"] = json.loads(room_data[\"callback\"])\n\n # TODO remove debugging only\n if debugging and param_cmd == \"FORCE_ROUND_END\":\n room_data[\"deck\"] = room_data[\"deck\"][0:2]\n update_room()\n return\n ## ^^^\n\n if param_cmd == \"GET\":\n response = get_public_state()\n add_private_state(player_index, response)\n return send_to_connection(connection_id, response, event)\n\n if \"state\" in room_data[\"interaction\"]:\n if room_data[\"interaction\"][\"state\"] == \"REVEAL\":\n if param_cmd == \"REVEAL\":\n if player_index == room_data[\"interaction\"][\"target\"]:\n on_reveal()\n return\n return send_to_connection(connection_id, {\"ERROR\": \"WAITING_FOR_INTERACTION\"}, event)\n\n if room_data[\"interaction\"][\"state\"] == \"CONTINUE\":\n if param_cmd == \"CONTINUE\":\n if player_index == room_data[\"turn\"] or player_states[room_data[\"turn\"]][\"state\"] == \"DEAD\":\n on_continue()\n return\n return send_to_connection(connection_id, {\"ERROR\": \"WAITING_FOR_INTERACTION\"}, event)\n\n if room_data[\"interaction\"][\"state\"] == \"ROUND_COMPLETE\":\n if param_cmd == \"ROUND_COMPLETE\":\n if player_index == room_data[\"turn\"]:\n start_game(True)\n return\n return send_to_connection(connection_id, {\"ERROR\": \"WAITING_FOR_INTERACTION\"}, event)\n\n if room_data[\"gamestate\"] == \"LOGIN\":\n if param_cmd == \"START\":\n if \"connectionId\" not in players_data[0]:\n return send_to_connection(connection_id, {\"ERROR\": \"INVALID_ROOM\"}, event)\n if connection_id == players_data[0][\"connectionId\"]:\n return start_game()\n\n elif room_data[\"gamestate\"] == \"PLAYING\":\n if player_index == room_data[\"turn\"]:\n if param_cmd == \"PLAY_HAND\" or param_cmd == \"PLAY_PICKUP\":\n return play_card()\n else:\n return send_to_connection(connection_id, {\"ERROR\": \"NOT_YOUR_TURN\"}, event)\n\n return send_to_connection(connection_id, {\"ERROR\": \"INVALID_CMD\"}, event)\n\ndef lambda_handler(event, context):\n handle_websocket(event, context)\n return {\n \"statusCode\": 200,\n \"headers\": {},\n \"body\": \"\",\n \"isBase64Encoded\": False\n }\n"
},
{
"alpha_fraction": 0.5387276411056519,
"alphanum_fraction": 0.5436053276062012,
"avg_line_length": 27.11964988708496,
"blob_id": "c41afdb3558c5da02d96b5a7297f524c8c9ef763",
"content_id": "af431caeda1fd937ce8d64f9f4bbb62beb55eb2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 28907,
"license_type": "no_license",
"max_line_length": 178,
"num_lines": 1028,
"path": "/app.js",
"repo_name": "djfooks/loveletter",
"src_encoding": "UTF-8",
"text": "var cardTypes = [];\n\nvar cardDetailsMap = {\n \"GUARD\": { \"value\": 1, \"name\": \"Guard\", \"numInDeck\": 5,\n \"shortAction\": \"Guess another players card (you cannot guess \\\"Guard\\\").\",\n \"action\": \"Pick another player and guess their card type (you cannot guess \\\"Guard\\\"). If correct, the other player is eliminated.\" },\n \"PRIEST\": { \"value\": 2, \"name\": \"Priest\", \"numInDeck\": 2,\n \"shortAction\": \"Pick another player to privately see their hand.\",\n \"action\": \"Pick another player to privately see their hand.\" },\n \"BARON\": { \"value\": 3, \"name\": \"Baron\", \"numInDeck\": 2,\n \"shortAction\": \"Pick another player and compare hands. Lowest value is eliminated.\",\n \"action\": \"Pick another player and privately compare hands. The player with the lower-strength hand is eliminated.\" },\n \"HANDMAID\": { \"value\": 4, \"name\": \"Handmaid\", \"numInDeck\": 2,\n \"shortAction\": \"You cannot be targeted until your next turn.\",\n \"action\": \"You cannot be targeted until your next turn.\" },\n \"PRINCE\": { \"value\": 5, \"name\": \"Prince\", \"numInDeck\": 2,\n \"shortAction\": \"Pick any player to discard their hand and draw a new one.\",\n \"action\": \"Pick any player (including youself) to discard their hand and draw a new one. If they discard the Princess they are eliminated.\" },\n \"KING\": { \"value\": 6, \"name\": \"King\", \"numInDeck\": 1,\n \"shortAction\": \"Pick another player and trade hands with them.\",\n \"action\": \"Pick another player and trade hands with them.\" },\n \"COUNTESS\": { \"value\": 7, \"name\": \"Countess\", \"numInDeck\": 1,\n \"shortAction\": \"Must be played if your other card is a King or Prince.\",\n \"action\": \"If your other card is a King or Prince card, this card must be played.\" },\n \"PRINCESS\": { \"value\": 8, \"name\": \"Princess\", \"numInDeck\": 1,\n \"shortAction\": \"If you play this card for any reason, you are eliminated.\",\n \"action\": \"If you play this card for any reason, you are eliminated from the round.\" },\n};\n\nvar orderedCards = [\"GUARD\", \"PRIEST\", \"BARON\", \"HANDMAID\", \"PRINCE\", \"KING\", \"COUNTESS\", \"PRINCESS\"];\n\nvar tokensToWinMap = [ -1, -1, 7, 5, 4, 4, 4, 4, 4 ];\n\nfunction init()\n{\n function addCardTypes(cardStr)\n {\n var i;\n var count = cardDetailsMap[cardStr].numInDeck;\n for (i = 0; i < count; i += 1)\n {\n cardTypes.push(cardStr);\n }\n }\n\n var i;\n for (i = 0; i < orderedCards.length; i += 1)\n {\n addCardTypes(orderedCards[i]);\n cardDetailsMap[orderedCards[i]].cardType = orderedCards[i];\n }\n}\ninit();\n\nfunction getCardName(card)\n{\n return cardDetailsMap[cardTypes[card]].name + \" (\" + cardDetailsMap[cardTypes[card]].value + \")\";\n}\n\nvar App = function ()\n{\n window.onerror = this.onError.bind(this);\n this.debugText = \"\";\n\n var i;\n\n this.responseText = document.getElementById(\"responseText\");\n this.responseText.value = \"\";\n\n this.roomSpan = document.getElementById(\"roomSpan\");\n this.playersText = document.getElementById(\"playersText\");\n\n this.msgText = document.getElementById(\"msgText\");\n this.msgReadButton = document.getElementById(\"msgReadButton\");\n\n this.startButton = document.getElementById(\"startButton\");\n\n this.toggleHelpButton = document.getElementById(\"toggleHelpButton\");\n this.gameDiv = document.getElementById(\"gameDiv\");\n this.cardsHelpDiv = document.getElementById(\"cardsHelpDiv\");\n this.mainDiv = document.getElementById(\"mainDiv\");\n this.joinGameDiv = document.getElementById(\"joinGameDiv\");\n this.disconnectDiv = document.getElementById(\"disconnectDiv\");\n this.debugDiv = document.getElementById(\"debugDiv\");\n this.cardsDiv = document.getElementById(\"cardsDiv\");\n this.pickPlayerDiv = document.getElementById(\"pickPlayerDiv\");\n this.guessCardDiv = document.getElementById(\"guessCardDiv\");\n this.msgDiv = document.getElementById(\"msgDiv\");\n this.playingDivs = [ this.cardsDiv, this.pickPlayerDiv, this.guessCardDiv ];\n\n this.handText = [];\n this.handText.push(document.getElementById(\"card0Text\"));\n this.handText.push(document.getElementById(\"card1Text\"));\n\n this.playButtons = [];\n this.playButtons.push(document.getElementById(\"card0Button\"));\n this.playButtons.push(document.getElementById(\"card1Button\"));\n\n this.pickupDiv = document.getElementById(\"pickupDiv\");\n\n this.playerButtons = [];\n for (i = 0; i < 4; i += 1)\n {\n this.playerButtons.push(document.getElementById(\"pickPlayer\" + i + \"Button\"));\n }\n\n this.guessButtons = [];\n for (i = 1; i < 8; i += 1) // ignore GUARD\n {\n this.guessButtons.push(document.getElementById(\"guess\" + i + \"Button\"));\n this.guessButtons[i - 1].innerHTML = cardDetailsMap[orderedCards[i]].name;\n }\n\n this.cardHelpTexts = [];\n for (i = 0; i < 8; i += 1)\n {\n this.cardHelpTexts.push(document.getElementById(\"cardHelp\" + i + \"Text\"));\n }\n\n this.localStorage = window.localStorage;\n\n this.key = this.localStorage.getItem('key');\n if (!this.key)\n {\n this.key = Math.floor(Math.random() * 9999999);\n this.localStorage.setItem('key', this.key);\n }\n\n this.roomCodeInput = document.getElementById(\"roomCodeInput\");\n var roomCode = this.localStorage.getItem('room');\n if (roomCode)\n {\n this.roomCodeInput.value = roomCode;\n }\n\n var name = this.localStorage.getItem('name');\n this.nameInput = document.getElementById(\"nameInput\");\n if (name)\n {\n this.nameInput.value = name;\n }\n\n this.websocket = null;\n\n this.resetGame();\n\n if (name && roomCode)\n {\n this.connect();\n }\n else\n {\n this.setVisualState(\"JOIN_ROOM\");\n }\n\n this.updateCardHelp();\n this.showingCardHelp = false;\n};\n\nApp.prototype.toggleView = function ()\n{\n this.showingCardHelp = !this.showingCardHelp;\n if (this.showingCardHelp)\n {\n this.cardsHelpDiv.style.display = \"block\";\n this.mainDiv.style.display = \"none\";\n this.toggleHelpButton.innerHTML = \"Back\";\n }\n else\n {\n this.cardsHelpDiv.style.display = \"none\";\n this.mainDiv.style.display = \"block\";\n this.toggleHelpButton.innerHTML = \"Cards Descriptions\";\n }\n};\n\nApp.prototype.getCardPlayedCount = function (cardStr)\n{\n var i;\n var j;\n var total = 0;\n for (i = 0; i < this.playerStates.length; i += 1)\n {\n var played = this.playerStates[i].played;\n for (j = 0; j < played.length; j += 1)\n {\n if (cardTypes[played[j]] == cardStr)\n total += 1;\n }\n }\n return total;\n};\n\nApp.prototype.updateCardHelp = function ()\n{\n var i;\n var j;\n var padding = 50;\n for (i = 0; i < 8; i += 1)\n {\n var details = cardDetailsMap[orderedCards[i]];\n var played = this.getCardPlayedCount(details.cardType);\n var topLineText = details.name + \" (\" + details.value + \")\";\n for (; topLineText.length < padding;)\n {\n topLineText += \" \";\n }\n\n for (j = 0; j < details.numInDeck; j += 1)\n {\n topLineText += (j < played) ? \"I\" : \"-\";\n }\n topLineText += \"\\n\";\n this.cardHelpTexts[i].value = topLineText + details.shortAction;\n }\n}\n\nApp.prototype.resetGame = function ()\n{\n this.hand = [];\n this.players= [];\n this.playerStates = [];\n this.playerId = -1;\n this.turnId = -1;\n this.resetTurnState();\n};\n\nApp.prototype.resetTurnState = function ()\n{\n this.playingCardId = -1;\n this.playingCard = -1;\n this.playingCardStr = '';\n this.pickedPlayer = -1;\n this.interaction = {};\n this.msgReadCmd = '';\n}\n\nApp.prototype.show = function (div)\n{\n var i;\n for (i = 0; i < this.playingDivs.length; i += 1)\n {\n var element = this.playingDivs[i];\n var show = element == div;\n if (div.length && div.indexOf(element) !== -1)\n show = true;\n\n if (show)\n {\n element.style.display = \"block\";\n }\n else\n {\n element.style.display = \"none\";\n }\n }\n};\n\nApp.prototype.createRoom = function ()\n{\n var that = this;\n var xhr = new XMLHttpRequest();\n function reqListener ()\n {\n try\n {\n var data = JSON.parse(xhr.response);\n that.roomCode = data.room;\n that.roomCodeInput.value = that.roomCode;\n }\n catch (e)\n {\n }\n }\n\n xhr.onload = reqListener;\n xhr.open(\"GET\", 'https://bzlzgmgcuh.execute-api.eu-west-2.amazonaws.com/default/loveletter-create');\n xhr.setRequestHeader(\"Content-Type\", \"text/plain\");\n xhr.send();\n};\n\nApp.prototype.getRoomCode = function ()\n{\n return this.roomCodeInput.value.toUpperCase();\n};\n\nApp.prototype.connect = function ()\n{\n this.localStorage.setItem('name', this.nameInput.value);\n this.localStorage.setItem('room', this.getRoomCode());\n this.websocket = new WebSocket(\"wss://u72xrovjcj.execute-api.eu-west-2.amazonaws.com/test?room=\" + this.getRoomCode() + \"&key=\" + this.key + \"&name=\" + this.nameInput.value);\n\n var that = this;\n this.websocket.onopen = function (event) {\n that.onopen();\n };\n this.websocket.onmessage = function (event) {\n that.onmessage(event.data);\n };\n};\n\nApp.prototype.setVisualState = function (state)\n{\n this.joinGameDiv.style.display = (state === \"JOIN_ROOM\") ? \"block\" : \"none\";\n this.gameDiv.style.display = (state === \"CONNECTED\") ? \"block\" : \"none\";\n};\n\nApp.prototype.disconnect = function ()\n{\n this.websocket.close();\n this.responseText.value = \"Websocket disconnected\";\n\n this.localStorage.removeItem('room');\n this.resetGame();\n this.setVisualState(\"JOIN_ROOM\");\n};\n\nApp.prototype.onopen = function ()\n{\n this.roomSpan.innerHTML = this.getRoomCode();\n this.responseText.value = \"Websocket connected...\";\n this.send({ \"cmd\": \"GET\" });\n this.setVisualState(\"CONNECTED\");\n};\n\nApp.prototype.start = function ()\n{\n this.send({ \"cmd\": \"START\" });\n this.startButton.style.display = \"none\";\n};\n\nApp.prototype.restart = function ()\n{\n this.send({ \"cmd\": \"RESTART\" });\n this.startButton.style.display = \"none\";\n};\n\nApp.prototype.forceRoundEnd = function ()\n{\n this.send({ \"cmd\": \"FORCE_ROUND_END\" });\n};\n\nApp.prototype.onmessage = function (strData)\n{\n this.responseText.value += \"\\n\" + strData;\n var data = JSON.parse(strData);\n switch(data.cmd) {\n case \"START_CARD\":\n {\n this.playerId = data.playerId;\n this.addCard(data.pickup);\n }\n break;\n case \"PICKUP\":\n case \"YOUR_TURN\":\n {\n // clear your SAFE state\n this.playerStates[this.playerId][\"state\"] = \"ALIVE\";\n this.addCard(data.pickup);\n this.updatePlayersText();\n this.updatePlayButtons();\n }\n break;\n case \"JOINED\":\n {\n this.players[data.index] = data.name;\n this.updatePlayersText();\n }\n break;\n case \"START_GAME\":\n {\n this.setMsg(\"\");\n this.responseText.value = strData;\n this.gotFullState(data);\n }\n break;\n case \"STATE\":\n case \"PLAYED\":\n case \"ROUND_COMPLETE\":\n case \"NEXT_ROUND\":\n {\n this.gotFullState(data);\n }\n break;\n case \"END_TURN\":\n {\n this.endTurn();\n this.gotFullState(data);\n }\n break;\n case \"REVEALED\":\n {\n this.interaction.state = \"CONTINUE\";\n this.reveal();\n this.updateInteraction();\n }\n break;\n case \"NEXT_TURN\":\n {\n this.nextTurn(data);\n }\n break;\n case \"DISCARD\":\n {\n this.discard(data);\n }\n break;\n case \"ROUND_COMPLETE\":\n {\n this.turnId = data.turn;\n this.playerStates = data.playerStates;\n this.updateInteraction();\n }\n break;\n }\n};\n\nApp.prototype.gotFullState = function (data)\n{\n if (data.playerId !== undefined)\n this.playerId = data.playerId;\n this.players = data.players;\n if (data.gamestate == \"LOGIN\")\n {\n this.show([]);\n if (this.playerId == 0)\n this.startButton.style.display = \"block\";\n }\n else if (data.gamestate == \"PLAYING\")\n {\n this.hand = data.hand || [];\n this.turnId = data.turn;\n this.playerStates = data.playerStates;\n this.updateHandText();\n\n this.interaction = data.interaction;\n this.updateInteraction();\n }\n this.updatePlayersText();\n this.updatePlayButtons();\n this.updateCardHelp();\n}\n\nApp.prototype.discard = function (data)\n{\n var playerState = this.playerStates[data.playerId];\n if (data.playerId == this.playerId)\n {\n var i;\n for (i = 0; i < this.hand.length; i += 1)\n {\n if (data.card == this.hand[i])\n {\n playerState.played.push(data.card);\n this.hand.splice(i, 1);\n this.updateHandText();\n }\n }\n }\n else\n {\n playerState.played.push(data.card);\n }\n this.updateCardHelp();\n};\n\nApp.prototype.nextTurn = function (data)\n{\n this.turnId = data.turn;\n this.show(this.cardsDiv);\n this.interaction = {};\n this.updateInteraction();\n this.updatePlayersText();\n};\n\nApp.prototype.send = function (jsonData)\n{\n jsonData[\"room\"] = this.getRoomCode();\n this.websocket.send(JSON.stringify(jsonData));\n};\n\nApp.prototype.addCard = function (card)\n{\n var i;\n for (i = 0; i < this.hand.length; i += 1)\n {\n if (card == this.hand[i])\n {\n return;\n }\n }\n this.hand.push(card);\n this.updateHandText();\n};\n\nApp.prototype.updateHandText = function ()\n{\n var card;\n var details;\n var i;\n if (!this.hand)\n {\n for (i = 0; i < 2; i += 1)\n {\n this.handText[i].value = \"\";\n }\n return;\n }\n\n for (i = 0; i < this.hand.length; i += 1)\n {\n card = this.hand[i];\n details = cardDetailsMap[cardTypes[card]];\n this.handText[i].value = getCardName(card) + \"\\n\" + details.shortAction;\n }\n\n if (this.hand.length == 2)\n {\n this.pickupDiv.style.display = \"block\";\n }\n else\n {\n this.pickupDiv.style.display = \"none\";\n }\n};\n\nApp.prototype.roundComplete = function ()\n{\n this.show([]);\n var i;\n var j;\n var roundWinners = this.interaction.roundWinners;\n this.updatePlayersText();\n var msgText = \"\";\n if (roundWinners.length == 1)\n {\n msgText += this.players[roundWinners[0]] + \" wins the round!\\n\";\n }\n else\n {\n for (i = 0; i < roundWinners.length; i += 1)\n msgText += (i == 0 ? \"\" : \", \") + this.players[roundWinners[i]]\n msgText += \" tied for the win!\\n\";\n }\n\n var longestName = 12;\n for (i = 0; i < this.players.length; i += 1)\n {\n longestName = Math.max(longestName, this.players[i].length);\n }\n\n for (i = 0; i < this.playerStates.length; i += 1)\n {\n msgText += this.players[i];\n for (j = this.players[i].length; j < longestName + 4; j += 1)\n {\n msgText += \" \";\n }\n if (this.playerStates[i].state !== \"DEAD\")\n {\n if (this.interaction.finalCards.length > 0)\n {\n msgText += getCardName(this.interaction.finalCards[i]);\n }\n else\n {\n msgText += \"ROUND WINNER!\";\n }\n }\n else\n {\n msgText += \"ELIMINATED\";\n }\n msgText += \"\\n\";\n }\n if (this.interaction.hiddenCard !== null)\n msgText += \"Hidden card was \" + getCardName(this.interaction.hiddenCard);\n\n this.setMsg(msgText, (this.turnId == this.playerId && this.interaction.gameWinner === undefined) ? \"START NEW ROUND\" : null)\n};\n\nApp.prototype.updatePlayersText = function ()\n{\n var roundWinners = this.interaction.roundWinners;\n if (!roundWinners)\n roundWinners = [];\n\n var i;\n var j;\n var longestName = 12;\n var wins;\n var winner = -1;\n var highestRoundsWon = 0;\n var gotPlayerStates = this.playerStates && this.playerStates.length == this.players.length;\n for (i = 0; i < this.players.length; i += 1)\n {\n longestName = Math.max(longestName, this.players[i].length);\n if (gotPlayerStates)\n {\n wins = this.playerStates[i].wins;\n if (wins >= tokensToWinMap[this.players.length])\n {\n if (wins > highestRoundsWon)\n {\n winner = i;\n highestRoundsWon = wins;\n }\n else if (wins == highestRoundsWon)\n {\n winner = -1;\n }\n }\n }\n }\n\n var playersMsg = \"\";\n var first = true;\n for (i = 0; i < this.players.length; i += 1)\n {\n if (!first)\n {\n playersMsg += \"\\n\";\n }\n playersMsg += this.players[i];\n for (j = this.players[i].length; j < longestName + 4; j += 1)\n {\n playersMsg += \" \";\n }\n if (gotPlayerStates)\n {\n if (winner === i)\n {\n playersMsg += \"GAME WINNER! \";\n }\n else if (roundWinners.indexOf(i) !== -1)\n {\n playersMsg += \"ROUND WINNER! \";\n }\n else if (winner === -1 && this.turnId == i)\n {\n playersMsg += \"TURN... \";\n }\n else if (this.playerStates[i].state === \"DEAD\")\n {\n playersMsg += \"ELIMINATED \";\n }\n else if (this.playerStates[i].state === \"SAFE\")\n {\n playersMsg += \"SAFE \";\n }\n else\n {\n playersMsg += \" \";\n }\n\n for (j = 0; j < this.playerStates[i].wins; j += 1)\n {\n playersMsg += \"I\";\n }\n }\n first = false;\n }\n\n this.playersText.value = playersMsg;\n this.updatePlayersButtons();\n};\n\nApp.prototype.updatePlayersButtons = function ()\n{\n var i;\n if (!this.anyValidTargets() && (this.playingCardStr != \"PRINCE\"))\n {\n this.playerButtons[0].innerHTML = \"No Valid Target\";\n this.playerButtons[0].style.display = \"block\";\n for (i = 1; i < 4; i += 1)\n {\n this.playerButtons[i].style.display = \"none\";\n }\n return;\n }\n\n for (i = 0; i < 4; i += 1)\n {\n var show = i < this.players.length;\n\n if (i == this.playerId && this.playingCardStr != \"PRINCE\")\n show = false;\n\n if (show && this.playerStates[i] && this.playerStates[i][\"state\"] != \"ALIVE\")\n show = false;\n\n if (show)\n {\n this.playerButtons[i].innerHTML = this.players[i];\n this.playerButtons[i].style.display = \"block\";\n }\n else\n {\n this.playerButtons[i].style.display = \"none\";\n }\n }\n};\n\nApp.prototype.updatePlayButtons = function (forceDisable)\n{\n var showButtons = !forceDisable && this.turnId == this.playerId && !this.interaction.state;\n this.playButtons[0].style.display = showButtons ? \"inline\" : \"none\";\n this.playButtons[1].style.display = showButtons ? \"inline\" : \"none\";\n};\n\nApp.prototype.playCard = function (cardId)\n{\n var card = this.hand[cardId];\n if (card === undefined)\n return;\n var otherCard = this.hand[cardId == 0 ? 1 : 0];\n var cardStr = cardTypes[card];\n\n this.playingCardId = cardId;\n this.playingCard = card;\n this.playingCardStr = cardStr;\n\n if (cardTypes[otherCard] == \"COUNTESS\" && (cardStr == \"KING\" || cardStr == \"PRINCE\"))\n {\n var turnName = this.players[this.turnId];\n this.setMsg(\"TURN: \" + turnName + \"...\\nMUST PLAY COUNTESS WITH A KING OR PRINCE!\");\n return;\n }\n\n switch(cardStr) {\n case \"GUARD\":\n case \"PRIEST\":\n case \"BARON\":\n case \"PRINCE\":\n case \"KING\":\n {\n this.updatePlayersButtons();\n this.show(this.pickPlayerDiv);\n return;\n }\n break;\n }\n\n this.sendPlayCard(cardId);\n};\n\nApp.prototype.pickPlayer = function (pickedId)\n{\n if (pickedId == -1) // back\n {\n this.resetTurnState();\n this.show(this.cardsDiv);\n return;\n }\n\n if (this.playingCardStr == \"GUARD\" && this.anyValidTargets())\n {\n this.pickedPlayer = pickedId;\n this.show(this.guessCardDiv);\n }\n else\n {\n this.sendPlayCard(this.playingCardId, pickedId);\n }\n};\n\nApp.prototype.guess = function (guessCardIndex)\n{\n if (guessCardIndex == -1) // back\n {\n this.updatePlayersButtons();\n this.show(this.pickPlayerDiv);\n return;\n }\n\n var cardStr = orderedCards[guessCardIndex];\n this.sendPlayCard(this.playingCardId, this.pickedPlayer, cardStr);\n this.updatePlayButtons(true);\n};\n\nApp.prototype.sendPlayCard = function (cardId, target, guess)\n{\n var cmd;\n if (cardId == 0)\n cmd = \"PLAY_HAND\";\n else if (cardId == 1)\n cmd = \"PLAY_PICKUP\";\n var msg = { \"room\": this.getRoomCode(), \"cmd\": cmd };\n if (target != undefined)\n msg[\"target\"] = target;\n if (guess != undefined)\n msg[\"guess\"] = guess;\n this.send(msg);\n this.resetTurnState();\n\n this.hand.splice(cardId, 1);\n this.updateHandText();\n this.show(this.cardsDiv);\n};\n\nApp.prototype.getOtherCard = function ()\n{\n var i;\n for (i = 0; i < 2; i += 1)\n {\n if (this.hand[i] !== this.interaction.card)\n return this.hand[i];\n }\n return this.hand[0];\n};\n\nApp.prototype.anyValidTargets = function ()\n{\n var i;\n for (i = 0; i < this.playerStates.length; i += 1)\n {\n if (i != this.turnId && this.playerStates[i].state == \"ALIVE\")\n {\n return true;\n }\n }\n return false;\n};\n\nApp.prototype.reveal = function ()\n{\n var i;\n var j;\n var myTurn = this.playerId == this.turnId;\n var target = this.interaction.target;\n var isTarget = this.playerId == target;\n var cardStr = cardTypes[this.interaction.card];\n if (cardStr == \"KING\" && this.interaction.swappedFor !== undefined)\n {\n this.hand[0] = this.interaction.swappedFor;\n this.updateHandText();\n }\n};\n\nApp.prototype.endTurn = function ()\n{\n};\n\nApp.prototype.updateInteraction = function ()\n{\n if (this.interaction.state == \"ROUND_COMPLETE\")\n {\n this.roundComplete();\n return;\n }\n this.show(this.cardsDiv);\n\n var myTurn = this.playerId == this.turnId;\n var turnName = this.players[this.turnId];\n var target = this.interaction.target;\n if (this.interaction.card === undefined)\n {\n this.setMsg(\"TURN: \" + turnName + (myTurn ? \"...\" : \"\"));\n return;\n }\n\n var isTarget = this.playerId == target;\n var guess = this.interaction.guess;\n var cardStr = cardTypes[this.interaction.card];\n var result = this.interaction.result;\n\n var targetName = this.players[target];\n\n var stateIsReveal = this.interaction.state == \"REVEAL\";\n var stateIsContinue = this.interaction.state == \"CONTINUE\";\n\n var buttonText;\n var showButton = (myTurn && stateIsContinue) || (isTarget && stateIsReveal);\n if (myTurn)\n buttonText = \"END TURN...\"\n\n var msgText = \"TURN: \" + turnName + (stateIsContinue ? \"...\" : \"\") + \"\\n\";\n msgText += \"PLAYED: \" + getCardName(this.interaction.card) + \"\\n\";\n\n if (!this.anyValidTargets() && (cardStr == \"GUARD\" ||\n cardStr == \"PRIEST\" ||\n cardStr == \"BARON\" ||\n cardStr == \"KING\"))\n {\n msgText += \"NO VALID TARGET\";\n this.setMsg(msgText, showButton ? buttonText : null);\n return;\n }\n\n if (target !== undefined)\n {\n msgText += \"TARGET: \" + targetName + (stateIsReveal ? \"...\" : \"\") + \"\\n\";\n }\n\n switch(cardStr) {\n case \"GUARD\":\n {\n // Player designates another player and names a type of card.\n // If that player's hand matches the type of card specified, that player is eliminated from the round.\n // However, Guard cannot be named as the type of card.\n var guessName = cardDetailsMap[guess].name;\n var guessCorrect = result == \"CORRECT_GUESS\";\n msgText += \"GUESS: \" + guessName + \"\\n\";\n if (stateIsContinue)\n msgText += \"RESULT: \" + \"Guess \" + (guessCorrect ? \"CORRECT\" : \"INCORRECT\") + \"\\n\";\n if (stateIsReveal && isTarget)\n buttonText = \"RESPOND \" + (guessCorrect ? \"(Oh no they guessed correctly!)\" : \"(Ha they guessed wrong!)\");\n }\n break;\n\n case \"PRIEST\":\n {\n // Player is allowed to see another player's hand.\n if (isTarget)\n buttonText = \"REVEAL CARD\";\n\n if (myTurn && stateIsContinue)\n {\n msgText += \"REVEALED CARD: \" + getCardName(this.interaction.revealedCard) + \"\\n\";\n }\n }\n break;\n\n case \"BARON\":\n {\n // Player will choose another player and privately compare hands.\n // The player with the lower-strength hand is eliminated from the round.\n if (isTarget)\n buttonText = \"COMPARE CARDS\";\n\n if (stateIsContinue)\n {\n if (myTurn || isTarget)\n {\n msgText += \"YOUR CARD: \" + getCardName(this.getOtherCard()) + \"\\n\";\n msgText += \"TARGET CARD: \" + getCardName(this.interaction.revealedCard) + \"\\n\";\n }\n if (this.interaction.result == \"TIE\")\n {\n msgText += \"RESULT: TIE\\n\";\n }\n else if (myTurn || isTarget)\n {\n var isLoser = this.playerId == this.interaction.loser;\n msgText += \"RESULT: \" + (isLoser ? \"You lose\" : \"You win\");\n }\n else\n {\n msgText += \"LOSER: \" + this.players[this.interaction.loser] + \"\\n\";\n msgText += \"LOSING CARD: \" + getCardName(this.interaction.discard);\n }\n }\n }\n break;\n\n case \"PRINCE\":\n {\n // Player can choose any player (including themselves) to discard their hand and draw a new one.\n // If the discarded card is the Princess, the discarding player is eliminated.\n if (stateIsContinue)\n {\n msgText += \"DISCARDED CARD: \" + getCardName(this.interaction.revealedCard);\n }\n else if (isTarget)\n {\n buttonText = \"DISCARD CARD\";\n }\n }\n break;\n\n case \"KING\":\n {\n // Player trades hands with any other player.\n if (isTarget)\n buttonText = \"SWAP CARDS\";\n\n if (myTurn || isTarget)\n {\n if (stateIsContinue)\n {\n msgText += \"OLD CARD: \" + getCardName(myTurn ? this.interaction.otherCard : this.interaction.prevCard);\n }\n else\n {\n msgText += \"YOUR CARD: \" + getCardName(myTurn ? this.interaction.otherCard : this.interaction.prevCard);\n }\n }\n }\n break;\n }\n\n this.setMsg(msgText, showButton ? buttonText : null);\n};\n\nApp.prototype.setMsg = function (msgText, buttonText)\n{\n this.msgText.value = msgText;\n if (buttonText)\n {\n this.msgReadButton.innerHTML = buttonText;\n this.msgReadButton.style.display = \"inline\";\n }\n else\n {\n this.msgReadButton.style.display = \"none\";\n }\n this.show(this.cardsDiv);\n};\n\nApp.prototype.msgRead = function ()\n{\n this.send({\"cmd\": this.interaction.state});\n this.msgReadCmd = '';\n this.msgReadButton.style.display = \"none\";\n};\n\nApp.prototype.debugInfo = function debugInfo(str)\n{\n this.debugMsg += str + \"<br>\";\n document.getElementById(\"debugText\").innerHTML = this.debugMsg;\n};\n\nApp.prototype.onError = function onError(message, source, lineno, colno, error)\n{\n this.debugInfo(\"Error: \" + source + \":\" + lineno + \" \" + message);\n};\n\nvar app = new App();\n"
}
] | 2 |
christippett/timepro-timesheet | https://github.com/christippett/timepro-timesheet | 8c98de7300fa417289e3669fea5aab78cad895ae | 868af3999695ed2b17ec91ea1e81913a47667ddd | 848df8abae00c58e93e186f4e4e92dcb723026dc | refs/heads/master | 2021-07-13T02:52:54.211861 | 2020-05-23T16:22:39 | 2020-05-23T16:22:39 | 143,497,903 | 2 | 1 | MIT | 2018-08-04T04:35:07 | 2018-11-01T03:53:53 | 2018-11-23T05:47:47 | Python | [
{
"alpha_fraction": 0.5654118657112122,
"alphanum_fraction": 0.5691390037536621,
"avg_line_length": 33.619354248046875,
"blob_id": "c2a87e4012023968fcd4af55f6b009d92fa79a16",
"content_id": "a78020afa4821efafa2bcfcc668c52131bb820ca",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5366,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 155,
"path": "/src/timepro_timesheet/cli.py",
"repo_name": "christippett/timepro-timesheet",
"src_encoding": "UTF-8",
"text": "import argparse\nimport json\nimport sys\nfrom datetime import date\n\nfrom dateutil.parser import parse as dateparser\nfrom dateutil.relativedelta import relativedelta, MO, FR\n\nfrom .api import TimesheetAPI\nfrom .timesheet import Timesheet\n\nTODAY = date.today()\n\n\nclass TimesheetCLI:\n def __init__(self):\n parser = argparse.ArgumentParser(\n description=\"Programmatically get your timesheet from Intertec TimePro (timesheets.com.au.)\"\n )\n parser.add_argument(\"command\", help=\"Action to run\")\n # parse only common arguments, the rest will be parsed per subcommand\n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print(\"Invalid command\")\n parser.print_help()\n exit(1)\n # use dispatch pattern to invoke method with same name\n getattr(self, args.command)(sys.argv[2:])\n\n def _create_parser(self, description):\n parser = argparse.ArgumentParser(description=description)\n login_parameters = parser.add_argument_group(\"login parameters\")\n login_parameters.add_argument(\n \"-c\",\n \"--customer\",\n dest=\"customer\",\n required=True,\n help=\"Employer's TimePro Customer ID\",\n )\n login_parameters.add_argument(\n \"-u\",\n \"--user\",\n dest=\"username\",\n required=True,\n help=\"Username to log into TimePro\",\n )\n login_parameters.add_argument(\n \"-p\",\n \"--password\",\n dest=\"password\",\n required=True,\n help=\"Password to log into TimePro\",\n )\n return parser\n\n def get(self, arg_options):\n parser = self._create_parser(\n description=\"Get timesheet data from Intertec TimePro\"\n )\n get_parameters = parser.add_argument_group(\"filter options\")\n get_parameters.add_argument(\n \"--start\",\n dest=\"start_date\",\n metavar=\"START_DATE\",\n help=\"Start date of timesheet period\",\n )\n get_parameters.add_argument(\n \"--end\",\n dest=\"end_date\",\n metavar=\"END_DATE\",\n help=\"End date of timesheet period\",\n )\n get_parameters.add_argument(\n \"--current-week\",\n dest=\"current_week\",\n action=\"store_true\",\n help=\"Get current week's timesheet\",\n )\n get_parameters.add_argument(\n \"--current-month\",\n dest=\"current_month\",\n action=\"store_true\",\n help=\"Get current month's timesheet\",\n )\n get_parameters.add_argument(\n \"--last-week\",\n dest=\"last_week\",\n action=\"store_true\",\n help=\"Get last week's timesheet\",\n )\n get_parameters.add_argument(\n \"--last-month\",\n dest=\"last_month\",\n action=\"store_true\",\n help=\"Get last month's timesheet\",\n )\n\n # If Saturday or Sunday, treat \"last week\" as the week just been\n week_offset = 1 if TODAY.weekday() >= 5 else 0\n\n args = parser.parse_args(arg_options)\n if args.start_date and args.end_date:\n start_date = dateparser(args.start_date)\n end_date = dateparser(args.end_date)\n elif args.current_month:\n start_date = TODAY + relativedelta(day=1)\n end_date = TODAY + relativedelta(day=31)\n elif args.last_month:\n start_date = TODAY + relativedelta(day=1, months=-1)\n end_date = TODAY + relativedelta(day=31, months=-1)\n elif args.current_week:\n start_date = TODAY + relativedelta(weekday=MO(-1), weeks=week_offset)\n end_date = start_date + relativedelta(weekday=FR)\n elif args.last_week:\n start_date = TODAY + relativedelta(weekday=MO(-1), weeks=week_offset - 1)\n end_date = start_date + relativedelta(weekday=FR)\n else:\n # default to get this week's timesheet (excl. previous month)\n start_date = max(\n [TODAY + relativedelta(day=1), TODAY + relativedelta(weekday=MO(-1))]\n )\n end_date = TODAY + relativedelta(weekday=FR)\n date_kwargs = dict(start_date=start_date, end_date=end_date)\n api = TimesheetAPI()\n api.login(\n customer_id=args.customer, username=args.username, password=args.password\n )\n timesheet = api.get_timesheet(**date_kwargs)\n print(timesheet.json())\n\n def post(self, arg_options):\n parser = self._create_parser(\n description=\"Submit timesheet data to Intertec TimePro\"\n )\n post_parameters = parser.add_argument_group(\"input options\")\n # post input file and allow piping from stdin\n post_parameters.add_argument(\n \"-f\", \"--file\", type=argparse.FileType(\"r\"), default=sys.stdin\n )\n args = parser.parse_args(arg_options)\n data = json.loads(args.file.read())\n timesheet = Timesheet(data=data)\n api = TimesheetAPI()\n api.login(\n customer_id=args.customer, username=args.username, password=args.password\n )\n timesheet = api.post_timesheet(timesheet)\n\n\ndef main():\n TimesheetCLI()\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6098020672798157,
"alphanum_fraction": 0.6154571175575256,
"avg_line_length": 26.921052932739258,
"blob_id": "dee35e89fefb881e3fc6d8b3d2a2f4eaaecddb74",
"content_id": "40bc890b75d5d3317ca785079350ef308cb9f612",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1061,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 38,
"path": "/src/timepro_timesheet/utils.py",
"repo_name": "christippett/timepro-timesheet",
"src_encoding": "UTF-8",
"text": "from datetime import timedelta, date, datetime\n\nfrom dateutil.parser import parse as dateparser\n\n\ndef generate_date_series(start_date, end_date):\n \"\"\"\n Generate series of dates from start to end date\n \"\"\"\n days_diff = (end_date - start_date).days\n return [start_date + timedelta(days=x) for x in range(0, days_diff + 1)]\n\n\ndef convert_keys_to_dates(data):\n converted_data = {}\n for k, d in data.items():\n key = k\n if not isinstance(key, date) and not isinstance(key, datetime):\n key = dateparser(key)\n converted_data[key] = d\n return converted_data\n\n\ndef convert_time_string_and_minutes_to_hours(time_string):\n colon_count = time_string.count(\":\")\n\n if colon_count < 1:\n return float(time_string)\n elif colon_count > 1:\n raise ValueError(\n \"expected time_string to be in the format hh:mm or hh.h; got {}\".format(\n repr(time_string)\n )\n )\n\n hours, minutes = [float(x) for x in time_string.split(\":\")]\n\n return hours + (minutes / 60)\n"
},
{
"alpha_fraction": 0.6412859559059143,
"alphanum_fraction": 0.6937394142150879,
"avg_line_length": 35.9375,
"blob_id": "5162c3260909b1a0e34acc07e84a44545229c12c",
"content_id": "b140b5e93603c975c28824f6f7fbd74fbbd6a856",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 591,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 16,
"path": "/tests/test_timesheet.py",
"repo_name": "christippett/timepro-timesheet",
"src_encoding": "UTF-8",
"text": "from timepro_timesheet.utils import convert_time_string_and_minutes_to_hours\n\n\ndef test_convert_time_string_and_minutes_to_hours():\n assert convert_time_string_and_minutes_to_hours(\"13\") == 13.0\n assert convert_time_string_and_minutes_to_hours(\"13:00\") == 13.0\n assert convert_time_string_and_minutes_to_hours(\"13.5\") == 13.5\n assert convert_time_string_and_minutes_to_hours(\"13:30\") == 13.5\n\n exception = None\n try:\n convert_time_string_and_minutes_to_hours(\"13:30:30\")\n except Exception as e:\n exception = e\n\n assert isinstance(exception, ValueError)\n"
},
{
"alpha_fraction": 0.7557603716850281,
"alphanum_fraction": 0.7557603716850281,
"avg_line_length": 35.16666793823242,
"blob_id": "52982befe0683256c8de4efc92660c8d1dcec498",
"content_id": "ad5efa4f70ff56feb363b2d6bbafea507b28e0e6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 217,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 6,
"path": "/src/timepro_timesheet/version.py",
"repo_name": "christippett/timepro-timesheet",
"src_encoding": "UTF-8",
"text": "from pkg_resources import get_distribution, DistributionNotFound\n\ntry:\n __version__ = get_distribution(\"timepro-timesheet\").version\nexcept DistributionNotFound:\n __version__ = \"unknown\" # package not installed\n"
},
{
"alpha_fraction": 0.670749306678772,
"alphanum_fraction": 0.6844380497932434,
"avg_line_length": 29.173913955688477,
"blob_id": "3d6c7a793ad56b9b9589aaffb70c9243995b0005",
"content_id": "78373cc36910e758235a7318dd80db5d3fd24180",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2776,
"license_type": "permissive",
"max_line_length": 231,
"num_lines": 92,
"path": "/README.md",
"repo_name": "christippett/timepro-timesheet",
"src_encoding": "UTF-8",
"text": "Intertec TimePro Utils\n=============================================================\n\n[](https://pypi.python.org/pypi/timepro-timesheet)\n[](https://travis-ci.org/christippett/timepro-timesheet)\n[](https://coveralls.io/github/christippett/timepro-timesheet?branch=master)\n[](https://pypi.python.org/pypi/timepro-timesheet)\n[](https://github.com/christippett/timepro-timesheet)\n\nDescription\n===========\n\nProgrammatically get and submit timesheet data to Intertec TimePro (timesheets.com.au)\n\n**NB:** My company no longer uses timesheets.com.au as its timesheeting application, therefore its difficult to impossible for me to guarantee the future compatibility and maintenance of this application. \n\n\nInstallation\n============\n\nInstall with `pip`:\n\n``` bash\npip install timepro-timesheet\n```\n\nUsage\n=====\n\nCommand line\n------------\n\n**GET data**\n\nOnce installed, you can use the CLI to get your timesheet data as JSON.\n\n``` bash\n$ timepro get -c CUST -u john.doe -p password123\n {\n \"2018-08-04\": [\n {\n \"customer_code\": \"EXAMPLE\",\n \"customer_description\": \"Example Company Pty Ltd\",\n \"project_code\": \"EX-123\",\n \"project_psid\": \"EX-123{:}1\",\n \"project_description\": \"EXAMPLE - EX-123 - SOW000 - Important Business Stuff - PO 123\",\n \"task_id\": null,\n \"task_description\": null,\n \"hours\": 8\n }\n ]\n }\n```\n\nYou can filter the timesheet period by specifying dates for `--start` and `--end`, or by using the `--this-week`, `--this-month`, `--last-week` or `--last-month` flags. By default, the current week's timesheet entries are returned.\n\n**POST data**\n\nData can be submitted by reading from a JSON file.\n\n``` bash\n$ timepro post -c CUST -u john.doe -p password123 -f timesheet_entries.json\n```\n\nor\n\n``` bash\n$ cat timesheet_entries.json | timepro post -c CUST -u john.doe -p password123\n```\n\nPython\n------\n\n``` python\nfrom timepro_timesheet.api import TimesheetAPI\n\n# Log into timesheets.com.au via the TimesheetAPI class\napi = TimesheetAPI()\napi.login(customer_id='CUST', username='john.doe', password='password123')\n\n# Get timesheet (defaults to current month)\ntimesheet = api.get_timesheet()\n\n# Get timesheet for a given date\ntimesheet = api.get_timesheet(start_date=date(2018, 6, 1), end_date=date(2018, 6, 25))\n\n# Output timesheet\ntimesheet.json()\ntimesheet.row_entries()\ntimesheet.date_entries()\n\n```\n"
},
{
"alpha_fraction": 0.5256457924842834,
"alphanum_fraction": 0.5272835493087769,
"avg_line_length": 40.07950973510742,
"blob_id": "c5e28dfb4ed87056d1b82cf633d5c9b3e9b79e76",
"content_id": "5e3d7a743c89a14313cfa783e40e1e6ffadbd589",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13433,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 327,
"path": "/src/timepro_timesheet/timesheet.py",
"repo_name": "christippett/timepro-timesheet",
"src_encoding": "UTF-8",
"text": "import itertools\nimport json\nimport re\n\nfrom dateutil.parser import parse as dateparser\n\nfrom .utils import (\n generate_date_series,\n convert_keys_to_dates,\n convert_time_string_and_minutes_to_hours,\n)\n\n\nclass Timesheet:\n FORM_XPATH_INPUT_ROWS = '//input[@name=\"InputRows\"]'\n FORM_XPATH_START_DATE = '//input[@name=\"StartDate\"]'\n FORM_XPATH_END_DATE = '//input[@name=\"EndDate\"]'\n FORM_XPATH_CUSTOMERS = '//*[contains(@name, \"CustomerCode_\")]'\n FORM_XPATH_PROJECTS = '//*[contains(@name, \"Project_\")]'\n FORM_XPATH_TASKS = '//*[contains(@name, \"Task_\")]'\n FORM_XPATH_TIMES = '//*[contains(@name, \"FinishTime_\")]'\n FORM_XPATH_DESCRIPTIONS = '//*[contains(@name, \"Description_\")]'\n TIMESHEET_FIELD_PATTERN = (\n r\"^(?P<entry_type>\\w+)_(?P<row_id>\\d+)_(?P<column_id>\\d+)$\"\n )\n\n def __init__(\n self,\n html=None,\n data=None,\n customer_options=None,\n project_options=None,\n task_options=None,\n ):\n self._customer_options = customer_options or []\n self._project_options = project_options or []\n self._task_options = task_options or []\n self._form_data = {}\n self._html = html\n if html:\n self._form_data = self.extract_form_data_from_html(html)\n if data:\n data = convert_keys_to_dates(data)\n self._form_data = self.extract_form_data_from_dict(data)\n\n def lookup_customer(self, customer):\n customers = [\n c for c in self._customer_options if c[\"customer_code\"] == customer\n ]\n return customers[0] if customers else {}\n\n def lookup_project(self, project):\n search_key = \"project_psid\" if \"{:}\" in project else \"project_code\"\n projects = []\n for p in self._project_options.copy():\n p.pop(\n \"task_count\", None\n ) # exclude task_count when returning project details\n if p[search_key] == project:\n projects.append(p)\n return projects[0] if projects else {}\n\n def lookup_task(self, task):\n tasks = [t for t in self._task_options if t[\"task_id\"] == task]\n return tasks[0] if tasks else {}\n\n def row_entries(self):\n \"\"\"\n Construct dictionary of timesheet entries, with row numbers as keys.\n \"\"\"\n entries = {}\n for k, v in self._form_data.items():\n m = re.match(self.TIMESHEET_FIELD_PATTERN, k)\n if not m:\n continue\n entry_type, row_id, column_id = m.groups()\n row_id, column_id = int(row_id), int(column_id)\n entry = entries.get(row_id, {})\n if entry_type == \"Customer\":\n entry[\"customer\"] = v\n elif entry_type == \"Project\":\n entry[\"project\"] = v\n elif entry_type == \"Task\":\n entry[\"task\"] = v\n elif entry_type == \"Description\":\n # TODO: Add descriptions to OrderedDict instead,\n # zip with hours to ensure a complete list\n descriptions = entry.get(\"descriptions\", [])\n descriptions.append((column_id, v))\n entry[\"descriptions\"] = descriptions\n elif entry_type == \"FinishTime\":\n times = entry.get(\"times\", [])\n hours = convert_time_string_and_minutes_to_hours(v) if v != \"\" else 0\n times.append((column_id, hours))\n entry[\"times\"] = times\n entries[row_id] = entry\n # Process times into ordered (based on `column_id`) list of hours\n for k in entries.copy().keys():\n customer = entries[k].get(\"customer\", \"\")\n project = entries[k].get(\"project\", \"\")\n times = entries[k].get(\"times\", [])\n descriptions = entries[k].get(\"descriptions\", [])\n if times:\n sorted_times = sorted(times, key=lambda t: t[0])\n times = [t[1] for t in sorted_times]\n if descriptions:\n sorted_descriptions = sorted(descriptions, key=lambda t: t[0])\n descriptions = [t[1] for t in sorted_descriptions]\n # Remove rows with no data\n if (customer == \"\" and project == \"\") or sum(times) == 0:\n entries.pop(k)\n continue\n entries[k][\"times\"] = times\n entries[k][\"descriptions\"] = descriptions\n return entries\n\n def count_entries(self):\n \"\"\"\n Count number of timesheet entries. This should reconcile with the\n `InputRows` field from the form data.\n \"\"\"\n return len(self.row_entries().keys())\n\n def form_data(self):\n \"\"\"\n Output timesheet data in a format that can be POST'd to the\n timesheets.com.au servers.\n \"\"\"\n data = self._form_data.copy()\n for k in data.copy().keys():\n m = re.match(self.TIMESHEET_FIELD_PATTERN, k)\n if not m:\n continue\n entry_type, row_id, column_id = m.groups()\n if entry_type == \"FinishTime\":\n # Some form elements not present in read-only timesheet,\n # we'll add these fields manually for completeness\n description_key = \"Description_{}_{}\".format(row_id, column_id)\n if description_key not in data:\n data[description_key] = \"\"\n pbatch_key = \"PBatch_{}_{}\".format(row_id, column_id)\n if pbatch_key not in data:\n data[pbatch_key] = \"\"\n sbatch_key = \"SBatch_{}_{}\".format(row_id, column_id)\n if sbatch_key not in data:\n data[sbatch_key] = \"\"\n return data\n\n def extract_form_data_from_dict(self, data):\n # Get unique customer/project/task/description entries, these will become our rows\n unique_entries = set()\n for _, entries in data.items():\n for e in entries:\n customer = e.get(\"customer_code\")\n project = e.get(\"project_psid\")\n task = e.get(\"task_id\") or \"\"\n unique_entries.add(\"{}|{}|{}\".format(customer, project, task))\n\n # Use lambda to create default entry to avoid later referencing same object\n default_entry = lambda: dict(\n customer=\"\", project=\"\", task=\"\", times=[], descriptions=[]\n )\n row_entries = dict((e, default_entry()) for e in unique_entries)\n\n # Generate range of dates from start to end date (to account for any missing dates in between)\n start_date = min(data.keys())\n end_date = max(data.keys())\n timesheet_dates = generate_date_series(start_date, end_date)\n\n # Populate row entry, sum hours across multiple days into single row value\n for dt in timesheet_dates:\n date_entries = data.get(dt, []) # list of entries for the given date\n for key, entry in row_entries.items():\n # Sum all hours for a single date for the same customer/project/task\n hours = []\n descriptions = []\n for e in date_entries:\n entry_key = \"{}|{}|{}\".format(\n e.get(\"customer_code\"),\n e.get(\"project_psid\"),\n e.get(\"task_id\") or \"\",\n )\n if entry_key == key:\n hours.append(e.get(\"hours\", 0))\n descriptions.append(e.get(\"description\", \"\"))\n entry[\"times\"].append(sum(hours))\n entry[\"descriptions\"].append(\"; \".join(descriptions))\n entry[\"customer\"], entry[\"project\"], entry[\"task\"] = key.split(\n \"|\"\n ) # populate row info\n\n # Replace key with row number\n row_entries = dict((i, v[1]) for i, v in enumerate(row_entries.items()))\n\n form_data = {\n \"StartDate\": start_date.strftime(\"%d-%b-%Y\"),\n \"EndDate\": end_date.strftime(\"%d-%b-%Y\"),\n }\n for row_id, entry in row_entries.items():\n f = \"{}_{}_{}\" #\n form_data.update(\n {\n f.format(\"CustomerCode\", row_id, 0): entry.get(\"customer\") or \"\",\n f.format(\"Project\", row_id, 0): entry.get(\"project\") or \"\",\n f.format(\"Task\", row_id, 0): entry.get(\"task\") or \"\",\n }\n )\n for column_id in range(0, len(entry[\"times\"])):\n hours = entry.get(\"times\")[column_id]\n description = entry.get(\"descriptions\")[column_id]\n form_data.update(\n {\n f.format(\"FinishTime\", row_id, column_id): hours\n if hours > 0\n else \"\",\n f.format(\"Description\", row_id, column_id): description,\n }\n )\n return form_data\n\n def extract_form_data_from_html(self, html):\n \"\"\"\n Extract timesheet form data from HTML\n \"\"\"\n form_input_rows = html.xpath(self.FORM_XPATH_INPUT_ROWS, first=True)\n input_rows = (\n int(form_input_rows.attrs.get(\"value\")) - 1 if form_input_rows else None\n )\n data_elements = itertools.chain(\n html.xpath(self.FORM_XPATH_START_DATE)[:1],\n html.xpath(self.FORM_XPATH_END_DATE)[:1],\n html.xpath(self.FORM_XPATH_TIMES),\n html.xpath(self.FORM_XPATH_CUSTOMERS),\n html.xpath(self.FORM_XPATH_PROJECTS),\n html.xpath(self.FORM_XPATH_TASKS),\n html.xpath(self.FORM_XPATH_DESCRIPTIONS),\n )\n form_data = {}\n\n # Construct data dictionary\n for el in data_elements:\n name = el.attrs.get(\"name\")\n # form elements can be a select element (drop down) if timesheet is not read-only\n if el.element.tag == \"select\":\n option = el.xpath(\"//option[@selected]\", first=True)\n value = option.attrs.get(\"value\") if option else \"\"\n else:\n value = el.attrs.get(\"value\")\n form_data[name] = value\n\n # Customer form elements aren't present in read-only timesheet, we need to lookup `customer_code` from project\n for k, v in form_data.copy().items():\n m = re.match(self.TIMESHEET_FIELD_PATTERN, k)\n if not m:\n continue\n entry_type, row_id, column_id = m.groups()\n # Read-only timesheet can contain extra empty rows that do not need to be included\n if input_rows and int(row_id) > input_rows:\n form_data.pop(k)\n continue\n if entry_type == \"Project\":\n customer_key = \"Customer_{}_{}\".format(row_id, column_id)\n if customer_key not in form_data:\n customer = self.lookup_project(v)\n form_data[customer_key] = (\n customer[\"customer_code\"] if customer else \"\"\n )\n return form_data\n\n def date_entries(self):\n \"\"\"\n Construct dictionary of timesheet entries, with dates (`column_id` indexes) as keys.\n \"\"\"\n form_data = self._form_data\n dates = {}\n for k, v in form_data.items():\n m = re.match(self.TIMESHEET_FIELD_PATTERN, k)\n if not m:\n continue\n entry_type, row_id, column_id = m.groups()\n\n # Only loop through FinishTime entries to assemble date entries\n if entry_type != \"FinishTime\" or v == \"0\" or not v:\n continue\n row_id, column_id = int(row_id), int(column_id)\n date_entries = dates.get(column_id, [])\n\n # Lookup row\n row_entry = self.row_entries().get(row_id)\n\n entry = {\n \"hours\": convert_time_string_and_minutes_to_hours(v) if v != \"\" else 0\n }\n\n # Check description list is populated (missing/empty when reading historical timesheets)\n descriptions = row_entry.get(\"descriptions\")\n if descriptions:\n entry.update({\"description\": descriptions[column_id]})\n\n # Lookup customer/project/task details\n customer = self.lookup_customer(row_entry.get(\"customer\"))\n project = self.lookup_project(row_entry.get(\"project\"))\n task = self.lookup_task(row_entry.get(\"task\"))\n entry.update(customer)\n entry.update(project)\n entry.update(task)\n\n # Add entry under date\n date_entries.append(entry)\n dates[column_id] = date_entries\n\n # Generate range of dates from start to end date (to account for any missing dates in between)\n start_date = dateparser(form_data[\"StartDate\"])\n end_date = dateparser(form_data[\"EndDate\"])\n timesheet_dates = generate_date_series(start_date, end_date)\n\n # Match dates in timesheet period with ordinal index from `dates`\n d = {}\n for i, dt in enumerate(timesheet_dates):\n d[dt] = dates.get(i, [])\n return d\n\n def json(self):\n date_entries = self.date_entries()\n return json.dumps(\n dict((k.strftime(\"%Y-%m-%d\"), v) for k, v in date_entries.items()), indent=2\n )\n"
},
{
"alpha_fraction": 0.6466666460037231,
"alphanum_fraction": 0.653333306312561,
"avg_line_length": 34.29411697387695,
"blob_id": "03a4b8f8037be6898cc38ab7a2bd70cf12c69275",
"content_id": "5ae644d25bd7e0d558b2f26eecdc1547e483868f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1200,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 34,
"path": "/setup.py",
"repo_name": "christippett/timepro-timesheet",
"src_encoding": "UTF-8",
"text": "from setuptools import setup, find_packages\n\n\nLONG_DESCRIPTION = open(\"README.md\").read()\n\nINSTALL_REQUIRES = [\"requests\", \"requests-html\", \"python-dateutil\"]\n\nsetup(\n name=\"timepro-timesheet\",\n use_scm_version=True,\n setup_requires=[\"setuptools_scm\"],\n description=\"Utility for programmatically getting and submitting data to Intertec TimePro (timesheets.com.au)\",\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n url=\"http://github.com/christippett/timepro-timesheet\",\n author=\"Chris Tippett\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n package_dir={\"\": \"src\"},\n packages=find_packages(\"src\"),\n entry_points={\"console_scripts\": [\"timepro=timepro_timesheet.cli:main\"]},\n install_requires=INSTALL_REQUIRES,\n classifiers=[\n \"Environment :: Web Environment\",\n \"Operating System :: OS Independent\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n zip_safe=False,\n)\n"
},
{
"alpha_fraction": 0.5484562516212463,
"alphanum_fraction": 0.5510291457176208,
"avg_line_length": 34.33333206176758,
"blob_id": "1112fe9bb514a7734625791d9c94787714c91d01",
"content_id": "180413a747e622ee50b678012697550bb1fa153c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6996,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 198,
"path": "/src/timepro_timesheet/api.py",
"repo_name": "christippett/timepro-timesheet",
"src_encoding": "UTF-8",
"text": "import re\nfrom datetime import date\n\nfrom dateutil.relativedelta import relativedelta, MO, FR\nfrom requests_html import HTMLSession\n\nfrom .timesheet import Timesheet\n\nTODAY = date.today()\n\n\nclass LoginError(Exception):\n pass\n\n\nclass WebsiteError(Exception):\n pass\n\n\nclass TimesheetAPI:\n LOGIN_URL = \"https://www.timesheets.com.au/tplogin/default.asp\"\n VIEW_TIMESHEET_URL = \"https://www.timesheets.com.au/tp60/ViewTimeSheet.asp\"\n INPUT_TIME_URL = \"https://www.timesheets.com.au/tp60/InputTime.asp\"\n ERROR_TABLE_XPATH = '//a[@name=\"ErrorTable\"]/following-sibling::table'\n\n LoginError = LoginError\n WebsiteError = WebsiteError\n\n def __init__(self):\n self.session = HTMLSession()\n self.user_context_id = None\n self.staff_id = None\n self.logged_in = False\n\n def _parse_html_login_errors(self, error_table):\n error_tds = error_table.xpath(\n '//img[@src=\"images/invalid.png\"]/ancestor::tr[1]/td[2]'\n )\n return [e.text for e in error_tds]\n\n def _parse_html_options(self, html, option_name, selected=False):\n if selected:\n options = html.xpath(\n f'//select[@name=\"{option_name}\"]//option[@selected]'\n ) or html.xpath(f'//input[@name=\"{option_name}\"]')\n else:\n options = html.xpath(\n f'//select[@name=\"{option_name}\"]//option[not(@value=\"\")]'\n )\n options = [(o.attrs.get(\"value\"), o.text) for o in options]\n if selected:\n return options[0] if options else None\n return options\n\n def _parse_html_customer_options(self, html):\n options = self._parse_html_options(html, option_name=\"CustomerCode_0_0\")\n customers = []\n for code, description in options:\n customers.append(\n {\"customer_code\": code, \"customer_description\": description}\n )\n return customers\n\n def _parse_html_project_options(self, html):\n pattern = (\n r\"AddProjectEntry\\(\"\n \"'(?P<customer_code>[^']*?)',\"\n \"'(?P<project_code>[^']*?)',\"\n \"'(?P<project_psid>[^']*?)',\"\n \"'(?P<project_description>[^']*?)',\"\n \"(?P<task_count>[^']*?)\"\n \"\\)\\s\"\n )\n projects = re.finditer(pattern, html.html)\n return [p.groupdict() for p in projects]\n\n def _parse_html_task_options(self, html):\n pattern = (\n r\"AddTaskEntry\\(\"\n \"'(?P<project_code>[^']*?)',\"\n \"'(?P<task_id>[^']*?)',\"\n \"'(?P<task_description>[^']*?)'\"\n \"\\)\"\n )\n tasks = re.finditer(pattern, html.html)\n return [t.groupdict() for t in tasks]\n\n def login(self, username, password, customer_id):\n data = {\n \"CurrentClientTime\": \"\",\n \"compact\": \"off\",\n \"ForceInterface\": \"S\",\n \"systemid\": customer_id,\n \"username\": username,\n \"password\": password,\n }\n r = self.session.post(self.LOGIN_URL, data=data)\n\n # Detect errors\n error_table = r.html.xpath(self.ERROR_TABLE_XPATH, first=True)\n if error_table:\n errors = self._parse_html_login_errors(error_table)\n raise LoginError(\" \".join(errors))\n\n # Detect rejected logon\n rejected_login_input = r.html.find('input[name=\"RejectedLogon\"]')\n if rejected_login_input:\n raise LoginError(\"Invalid login credentials.\")\n\n # Find UserContextID (required for future session requests)\n user_context_input = r.html.find('input[name=\"UserContextID\"]', first=True)\n if user_context_input:\n self.user_context_id = user_context_input.attrs.get(\"value\")\n else:\n raise LoginError(\"UserContextID not found in login response.\")\n\n # Load ViewTimesheet page to get StaffID\n r = self.session.post(\n self.VIEW_TIMESHEET_URL, data={\"UserContextID\": self.user_context_id}\n )\n staff_id_input = r.html.find('input[name=\"StaffID\"]', first=True)\n if staff_id_input:\n self.staff_id = staff_id_input.attrs.get(\"value\")\n else:\n raise LoginError(\"StaffID not found in login response.\")\n self.logged_in = True\n\n def get_timecodes(self):\n if not self.logged_in:\n raise LoginError(\"Not logged in.\")\n next_month_end = TODAY + relativedelta(months=+1, day=31)\n filter_day = next_month_end.strftime(\"%d-%b-%Y\")\n data = {\n \"UserContextID\": self.user_context_id,\n \"StaffID\": self.staff_id,\n \"Mode\": \"Day\",\n \"StartDate\": filter_day,\n \"EndDate\": filter_day,\n }\n r = self.session.post(self.INPUT_TIME_URL, data=data)\n customers = self._parse_html_customer_options(r.html)\n projects = self._parse_html_project_options(r.html)\n tasks = self._parse_html_task_options(r.html)\n return customers, projects, tasks\n\n def get_timesheet(self, start_date=None, end_date=None):\n if start_date is None and end_date is None:\n # default to get this week's timesheet (excl. previous month)\n start_date = max(\n [TODAY + relativedelta(day=1), TODAY + relativedelta(weekday=MO(-1))]\n )\n end_date = TODAY + relativedelta(weekday=FR)\n r = self.session.post(\n self.INPUT_TIME_URL,\n data={\n \"UserContextID\": self.user_context_id,\n \"StaffID\": self.staff_id,\n \"Mode\": \"Week\",\n \"StartDate\": start_date.strftime(\"%d-%b-%Y\"),\n \"EndDate\": end_date.strftime(\"%d-%b-%Y\"),\n },\n )\n customer_options, project_options, task_options = self.get_timecodes()\n return Timesheet(\n html=r.html,\n customer_options=customer_options,\n project_options=project_options,\n task_options=task_options,\n )\n\n def post_timesheet(self, timesheet):\n form_data = timesheet.form_data()\n row_count = timesheet.count_entries()\n form_data.update(\n {\n \"UserContextID\": self.user_context_id,\n \"StaffID\": self.staff_id,\n \"InputRows\": row_count,\n \"Save\": \"%A0%A0Save%A0%A0\",\n \"DataForm\": \"TimeEntry {}\".format(self.staff_id), # Important!\n # 'OptionsDisplayed': 'N',\n # 'OverrideAction': '',\n # 'DeletesPending': ''\n }\n )\n r = self.session.post(\n self.INPUT_TIME_URL,\n data=form_data,\n headers={\"Referer\": self.INPUT_TIME_URL},\n )\n\n # Detect errors\n error_table = r.html.xpath(self.ERROR_TABLE_XPATH, first=True)\n if error_table:\n errors = self._parse_html_login_errors(error_table)\n raise WebsiteError(\" \".join(errors))\n\n return r\n"
},
{
"alpha_fraction": 0.8351648449897766,
"alphanum_fraction": 0.8351648449897766,
"avg_line_length": 9.11111068725586,
"blob_id": "46b6780755501556b7111967fa503ed37246fb2d",
"content_id": "e1b71b421ef0536711efc9857e76035cf821784f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 91,
"license_type": "permissive",
"max_line_length": 19,
"num_lines": 9,
"path": "/requirements.txt",
"repo_name": "christippett/timepro-timesheet",
"src_encoding": "UTF-8",
"text": "requests\nrequests-html\npython-dateutil\n\n# Test dependencies\npytest\ncoverage\npytest-cov\ntox\n"
},
{
"alpha_fraction": 0.6010638475418091,
"alphanum_fraction": 0.6436170339584351,
"avg_line_length": 14.666666984558105,
"blob_id": "744ae87a37618ea5aaab4d51829513d117e6390b",
"content_id": "94e4c76675a56c1d4441733a7a3df9a2e7e6ec4d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 188,
"license_type": "permissive",
"max_line_length": 38,
"num_lines": 12,
"path": "/tox.ini",
"repo_name": "christippett/timepro-timesheet",
"src_encoding": "UTF-8",
"text": "[tox]\nenvlist = py{27,35,36,37}\n\n[testenv]\npassenv = TRAVIS TRAVIS_*\ndeps =\n pytest\n pytest-cov\n coverage\ncommands =\n coverage run --branch -m pytest -v\n coverage report -m\n"
}
] | 10 |
gfek/VTapi2 | https://github.com/gfek/VTapi2 | 9a64595e1d7fb82876610d50bb04da3a993931f1 | 53bab08213c92ae03a4e0de52b24cc1aaf4638b3 | ecb061fae446091c0491c43933c966304445a23f | refs/heads/master | 2021-01-19T16:51:49.452455 | 2015-02-23T08:03:04 | 2015-02-23T08:03:04 | 31,198,519 | 2 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.586980938911438,
"alphanum_fraction": 0.590347945690155,
"avg_line_length": 26.8125,
"blob_id": "e5fabf41ce01d08e592daa9f1309f0ec74f58369",
"content_id": "a355906e3b72bc4504f602961bf263f0329c1816",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 891,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 32,
"path": "/README.md",
"repo_name": "gfek/VTapi2",
"src_encoding": "UTF-8",
"text": "# Virus Total Public API v2\n\nPyhton script using the official VT API documentation page. https://www.virustotal.com/en/documentation/public-api/\n\n### Dependencies\n\n* requests\n* colorama\n\n### Help\n\nusage: Public API v2 VirusTotal [-h] [--domain DOMAIN] [--ip IP] [--hash HASH] [--url URL] [--scanurl SCANURL] [--uploadfile UPLOADFILE] [--version]\n \nVirusTotal Search (Public API v2).\n\noptional arguments:\n\n -h, --help show this help message and exit\n \n --domain DOMAIN Get report by a domain.\n \n --ip IP Get report by an ip address.\n \n --hash HASH Get report by a hash.\n \n --url URL Get report by a URL.\n \n --scanurl SCANURL Scan a URL.\n \n --uploadfile UPLOADFILE Upload a file to Virus Total.\n \n --version show program's version number and exit\n\n"
},
{
"alpha_fraction": 0.679073691368103,
"alphanum_fraction": 0.6877930164337158,
"avg_line_length": 37.366905212402344,
"blob_id": "af289ce29f0556c3d810b729993d1fca558077af",
"content_id": "6b6e393b4ef760e4aeecfb504b80670ad24e87a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10666,
"license_type": "no_license",
"max_line_length": 256,
"num_lines": 278,
"path": "/VTapi2.py",
"repo_name": "gfek/VTapi2",
"src_encoding": "UTF-8",
"text": "import requests\nimport json\nimport colorama\nfrom colorama import Fore, Back, Style\nimport argparse\nimport sys\nimport re\nimport os\n\nclass PublicV2VirusTotal:\n\tdef __init__(self):\n\t\tself.apikey = \"f76bdbc3755b5bafd4a18436bebf6a47d0aae6d2b4284f118077aa0dbdbd76a4\"\n\t\tself.domainurl=\"https://www.virustotal.com/vtapi/v2/domain/report\"\n\t\tself.ipurl=\"https://www.virustotal.com/vtapi/v2/ip-address/report\"\n\t\tself.hashurl=\"https://www.virustotal.com/vtapi/v2/file/report\"\n\t\tself.url=\"https://www.virustotal.com/vtapi/v2/url/report\"\n\t\tself.scanurl=\"https://www.virustotal.com/vtapi/v2/url/scan\"\n\t\tself.uploadfile=\"https://www.virustotal.com/vtapi/v2/file/scan\"\n\n\tdef DomainReport(self,domain):\n\t\tparameters = {'domain': domain, 'apikey': self.apikey}\n\t\tresponse = requests.get(self.domainurl, params=parameters)\n\t\tresponse_dict = response.json()\n\t\treturn response_dict\n\n\tdef IPReport(self,IP):\n\t\tparameters = {'ip': IP, 'apikey': self.apikey}\n\t\tresponse = requests.get(self.ipurl, params=parameters)\n\t\tresponse_dict = response.json()\n\t\treturn response_dict\n\n\tdef HashReport(self,md5_sha_hash):\n\t\tparameters = {'resource': md5_sha_hash, 'apikey': self.apikey}\n\t\tresponse = requests.get(self.hashurl, params=parameters)\n\t\tresponse_dict = response.json()\n\t\treturn response_dict\n\n\tdef UrlReport(self,url):\n\t\tparameters = {'resource': url, 'apikey': self.apikey}\n\t\tresponse = requests.get(self.url, params=parameters)\n\t\tresponse_dict = response.json()\n\t\treturn response_dict\n\n\tdef ScanReport(self,scanurl):\n\t\tparameters = {'url': scanurl, 'apikey': self.apikey}\n\t\tresponse = requests.post(self.scanurl, params=parameters)\n\t\tresponse_dict = response.json()\n\t\treturn response_dict\n\n\tdef UploadFile(self,file):\n\t\ttry:\n\t\t\tf = open(file, 'rb')\n\t\texcept IOError as e:\n\t\t\tprint Fore.RED+\"Unable to open file (File does not exist or no read permissions)\"+Style.RESET_ALL\n\t\t\tsys.exit(-1)\n\t\tparameters = {'apikey': self.apikey}\n\t\tdata = requests.post(self.uploadfile, data=parameters, files={'file':f})\n\t\tresponse_dict = data.json()\n\t\treturn response_dict\n\t\t\nparser = argparse.ArgumentParser(prog=\"Public API v2 VirusTotal\",description='VirusTotal Search (Public API v2).')\n\nparser.add_argument(\"--domain\",help=\"Get report by a domain.\")\nparser.add_argument(\"--ip\", help=\"Get report by an ip address.\")\nparser.add_argument(\"--hash\", help=\"Get report by a hash.\")\nparser.add_argument(\"--url\", help=\"Get report by a URL.\")\nparser.add_argument(\"--scanurl\", help=\"Scan a URL.\")\nparser.add_argument(\"--uploadfile\", help=\"Upload a file to Virus Total.\")\nparser.add_argument(\"--version\", action=\"version\", version=\"%(prog)s 1.0\")\n\nargs = parser.parse_args()\n\ncolorama.init()\n\nvt=PublicV2VirusTotal()\n\nif args.uploadfile:\n\tuploadfile=vt.UploadFile(args.uploadfile)\n\tif uploadfile['response_code']==0:\n\t\tprint Fore.RED+\"?????.\"+Style.RESET_ALL\n\t\tsys.exit(-1)\n\n\tprint Fore.RED+\"-= Upload File Information =-\"+Style.RESET_ALL\n\tprint Fore.BLUE+'\\tMessage:'+Style.RESET_ALL,uploadfile['verbose_msg']\n\tprint Fore.BLUE+'\\tLink:'+Style.RESET_ALL,uploadfile['permalink']\n\tprint Fore.BLUE+'\\tScan ID:'+Style.RESET_ALL,uploadfile['scan_id']\n\tprint Fore.BLUE+'\\tSHA1:'+Style.RESET_ALL,uploadfile['sha1']\n\tprint Fore.BLUE+'\\tSHA256:'+Style.RESET_ALL,uploadfile['sha256']\n\tprint Fore.BLUE+'\\tMD5:'+Style.RESET_ALL,uploadfile['md5']\n\nif args.domain:\n\tdReport=vt.DomainReport(args.domain)\n\tif dReport['response_code']==0:\n\t\tprint Fore.RED+\"Domain not found in dataset.\"+Style.RESET_ALL\n\t\tsys.exit(-1)\n\n\tif 'whois' in dReport:\n\t\tprint Fore.RED+\"-= WHOIS Lookup =-\"+Style.RESET_ALL\n\t\tprint dReport['whois'], \"\\n\"\n\n\tif 'subdomains' in dReport:\n\t\tprint Fore.RED+\"-= Observed Subdomains =-\"+Style.RESET_ALL\n\t\tfor domain in dReport['subdomains']:\n\t\t\tprint \"\\t\",domain\n\t\tprint \"\\n\"\n\n\tif 'resolutions' in dReport:\n\t\tprint Fore.RED+\"-= Resolution =-\"+Style.RESET_ALL\n\t\tfor resolution in dReport['resolutions']:\n\t\t\tprint \"\\t\",resolution['last_resolved'], resolution['ip_address']\n\t\tprint \"\\n\"\n\n\tif 'Alexa domain info' in dReport:\n\t\tprint Fore.RED+\"-= Alexa Ranking Information =-\"+Style.RESET_ALL\n\t\tprint \"\\t\",dReport['Alexa domain info']\n\t\tprint \"\\n\"\n\n\tif 'BitDefender domain info' in dReport:\n\t\tprint Fore.RED+\"-= BitDefender domain Information =-\"+Style.RESET_ALL\n\t\tprint \"\\t\",dReport['BitDefender domain info']\n\t\tprint \"\\n\"\t\n\n\tif 'BitDefender category' in dReport:\n\t\tprint Fore.RED+\"-= BitDefender Category =-\"+Style.RESET_ALL\n\t\tprint \"\\t\",dReport['BitDefender category']\n\t\tprint \"\\n\"\n\n\tif 'TrendMicro category' in dReport:\n\t\tprint Fore.RED+\"-= TrendMicro Category =-\"+Style.RESET_ALL\n\t\tprint \"\\t\",dReport['TrendMicro category']\n\t\tprint \"\\n\"\n\n\tif 'Webutation domain info' in dReport:\n\t\tprint Fore.RED+\"-= Webtutation Domain Information =-\"+Style.RESET_ALL\n\t\tprint \"\\t\",\"Safety Score:\",dReport['Webutation domain info']['Safety score'],\"Adult Content:\",dReport['Webutation domain info']['Adult content'],\"Verdict:\",dReport['Webutation domain info']['Verdict']\n\t\tprint \"\\n\"\n\n\tif 'WOT domain info' in dReport:\n\t\tprint Fore.RED+\"-= WOT Domain Information =-\"+Style.RESET_ALL\n\t\tprint \"\\t\",\"Vendor reliability:\",dReport['WOT domain info']['Vendor reliability'],\"Child safety:\",dReport['WOT domain info']['Child safety'],\"Trustworthiness:\",dReport['WOT domain info']['Trustworthiness'],\"Privacy:\",dReport['WOT domain info']['Privacy']\n\t\tprint \"\\n\"\n\n\tif 'detected_communicating_samples' in dReport:\n\t\tprint Fore.RED+\"-= Latest detected files that communicate with this domain =-\"+Style.RESET_ALL\n\t\tfor x in dReport['detected_communicating_samples']:\n\t\t\tprint \"\\t\",x['date'], x['positives'],\"/\",x['total'],x['sha256']\n\t\tprint \"\\n\"\n\t\n\tif 'detected_urls' in dReport:\n\t\tprint Fore.RED+\"-= Latest detected URLs =-\"+Style.RESET_ALL\n\t\tfor durls in dReport['detected_urls']:\n\t\t\tprint \"\\t\",durls['url'], durls['positives'],\"/\",durls['total'],durls['scan_date']\t\t\t\n\t\tprint \"\\n\"\n\t\n\tif 'detected_referrer_samples' in dReport:\n\t\tprint Fore.RED+\"-= Latest detected files that embed this domain in their strings =-\"+Style.RESET_ALL\n\t\tfor samples in dReport['detected_referrer_samples']:\n\t\t\tprint \"\\t\",samples['positives'],\"/\",samples['total'],samples['sha256']\t\t\n\t\tprint \"\\n\"\n\n\tif 'detected_downloaded_samples' in dReport:\n\t\tprint Fore.RED+\"-= Latest detected files that were downloaded from this domain =-\"+Style.RESET_ALL\n\t\tfor latest in dReport['detected_downloaded_samples']:\n\t\t\tprint \"\\t\",latest['date'],latest['positives'],\"/\",latest['total'],latest['sha256']\n\nif args.ip:\n\tipres=re.findall(r\"^(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})$\", args.ip)\n\tif ipres:\n\t\tipReport=vt.IPReport(args.ip)\n\t\tif ipReport['response_code']==0:\n\t\t\tprint Fore.RED+\"IP address not found in dataset.\"+Style.RESET_ALL\n\t\t\tsys.exit(-1)\n\t\n\t\tif 'country' in ipReport:\n\t\t\tprint Fore.RED+\"-= Geolocation =-\"+Style.RESET_ALL\n\t\t\tprint \"\\t\",\"Country:\", ipReport['country'],\"Autonomous System:\",ipReport['asn'],\"Owner:\",ipReport['as_owner']\n\t\t\tprint \"\\n\"\n\n\t\tif 'resolutions' in ipReport:\n\t\t\tprint Fore.RED+\"-= Resolution-Passive DNS Replication =-\"+Style.RESET_ALL\n\t\t\tfor resolution in ipReport['resolutions']:\n\t\t\t\tprint \"\\t\",resolution['last_resolved'], resolution['hostname']\n\t\t\tprint \"\\n\"\n\n\t\tif 'detected_communicating_samples' in ipReport:\n\t\t\tprint Fore.RED+\"-= Latest detected files that communicate with this domain =-\"+Style.RESET_ALL\n\t\t\tfor x in ipReport['detected_communicating_samples']:\n\t\t\t\tprint \"\\t\",x['date'], x['positives'],'/',x['total'],x['sha256']\n\t\t\tprint \"\\n\"\n\n\t\tif 'detected_urls' in ipReport:\n\t\t\tprint Fore.RED+\"-= Latest detected URLs =-\"+Style.RESET_ALL\n\t\t\tfor detection in ipReport['detected_urls']:\n\t\t\t\tprint \"\\t\",\"URL:\",detection['url'], \"Detection ratio:\",detection['positives'],'/',detection['total'],\"Scanned Date:\",detection['scan_date']\n\t\t\tprint \"\\n\"\n\n\t\tif 'detected_referrer_samples' in ipReport:\n\t\t\tprint Fore.RED+\"-= Latest detected files that embed this domain in their strings =-\"+Style.RESET_ALL\n\t\t\tfor samples in ipReport['detected_referrer_samples']:\n\t\t\t\tprint \"\\t\",samples['positives'],'/',samples['total'],samples['sha256']\t\t\n\t\t\tprint \"\\n\"\n\n\t\tif 'detected_downloaded_samples' in ipReport:\n\t\t\tprint Fore.RED+\"-= Latest detected files that were downloaded from this domain =-\"+Style.RESET_ALL\n\t\t\tfor latest in ipReport['detected_downloaded_samples']:\n\t\t\t\tprint \"\\t\",latest['date'],latest['positives'],\"/\",latest['total'],latest['sha256']\n\t\t\tprint \"\\n\"\n\n\telse:\n\t\tprint Fore.RED+\"Invalid IP\"+Style.RESET_ALL\n\t\tsys.exit(-1)\n\t\nif args.hash:\n\tresult=re.findall(\"^[a-f\\d]{32}$|^[A-F\\d]{32}$|^[a-f\\d]{64}$|^[A-F\\d]{64}$\", args.hash)\n\tif result:\n\t\treport=vt.HashReport(args.hash)\n\t\tif report['response_code']==0:\n\t\t\tprint Fore.RED+\"Hash not found in dataset.\"+Style.RESET_ALL\n\t\t\tsys.exit(-1)\n\n\t\tprint Fore.RED+\"-= Information =-\"+Style.RESET_ALL\n\t\tprint Fore.BLUE+\"\\tLink:\"+Style.RESET_ALL,report['permalink']\n\t\tprint Fore.BLUE+\"\\tScanID:\"+Style.RESET_ALL,report['scan_id']\n\t\tprint Fore.BLUE+\"\\tSHA1:\"+Style.RESET_ALL,report['sha1']\n\t\tprint Fore.BLUE+\"\\tSHA256:\"+Style.RESET_ALL,report['sha256']\n\t\tprint Fore.BLUE+\"\\tMD5:\"+Style.RESET_ALL,report['md5']\n\n\t\tprint \"\\n\"\n\n\t\tprint Fore.RED+\"-= Scanned Date =-\"+Style.RESET_ALL\n\t\tprint \"\\t\",report['scan_date']\n\t\tprint \"\\n\"\n\t\tprint Fore.RED+\"-= Detected Ratio =-\"+Style.RESET_ALL\n\t\tprint \"\\t\",report['positives'],'/',report['total']\n\t\tprint \"\\n\"\n\t\tprint Fore.RED+\"-= Virus Total Analysis =-\"+Style.RESET_ALL\n\t\tfor x in report['scans']:\n\t\t\tprint \"\\t\", x,\"\\t\" if len(x) < 7 else '',\"\\t\" if len(x) < 14 else '',\"\\t\",report['scans'][x]['detected'], \"\\t\",report['scans'][x]['result']\n\telse:\n\t\tprint Fore.RED+\"Not a valid MD5/SHA256 hash.\"+Style.RESET_ALL\n\t\tsys.exit(-1)\n\nif args.url:\n\turlreport=vt.UrlReport(args.url)\n\tif urlreport['response_code']==0:\n\t\tprint Fore.RED+\"URL not found in dataset\"+Style.RESET_ALL\n\t\tsys.exit(-1)\n\n\tprint Fore.RED+\"-= Detected Ratio =-\"+Style.RESET_ALL\n\tprint \"\\t\",urlreport['positives'],'/',urlreport['total']\n\tprint \"\\n\"\n\n\tprint Fore.RED+\"-= Analysis Date =-\"+Style.RESET_ALL\n\tprint \"\\tScanned on:\",urlreport['scan_date']\n\tprint \"\\n\"\n\n\tprint Fore.RED+\"-= Virus Total Analysis =-\"+Style.RESET_ALL\n\tfor x in urlreport['scans']:\n\t\tprint \"\\t\", x,\"\\t\" if len(x) < 7 else '',\"\\t\" if len(x) < 14 else '','\\t',urlreport['scans'][x]['detected'], \"\\t\",urlreport['scans'][x]['result']\n\nif args.scanurl:\n\tscanurl=vt.ScanReport(args.scanurl)\n\n\tif scanurl['response_code']==-1:\n\t\tprint Fore.RED+\"Invalid URL, the scan request was not queued.\"+Style.RESET_ALL\n\t\tsys.exit(-1)\n\n\tprint Fore.RED+\"-= Link =-\"+Style.RESET_ALL\n\tprint \"\\t\", scanurl['permalink']\n\tprint \"\\n\"\n\tprint Fore.RED+\"-= Scan Date =-\"+Style.RESET_ALL\n\tprint \"\\t\", scanurl['scan_date']\n\tprint \"\\n\"\n\tprint Fore.RED+\"-= Scan ID =-\"+Style.RESET_ALL\n\tprint \"\\t\", scanurl['scan_id']\n\tprint \"\\n\"\n\tprint Fore.RED+\"-= Message =-\"+Style.RESET_ALL\n\tprint \"\\t\", scanurl['verbose_msg']\n"
}
] | 2 |
felixnavarro/BRAINSTools | https://github.com/felixnavarro/BRAINSTools | 54121cd236b252f3d5fce8a1ad5261db1ffe786e | 8177f7d0ad334d09588ee1964be57bdf8cf4711f | 472f54934b06ffe038f7261ab662f3eacfcb3bf2 | refs/heads/master | 2018-12-28T09:46:36.860618 | 2014-08-29T20:21:46 | 2014-08-29T20:21:46 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7000864148139954,
"alphanum_fraction": 0.7026793360710144,
"avg_line_length": 20.03636360168457,
"blob_id": "0f502463eb8fcffa9acb93574c5c6fa8fdf1ef4d",
"content_id": "43314240478fc02a499d87f5fc1ec1f097f7ebd9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 1157,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 55,
"path": "/CMake/FindITKUtil.cmake",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "#\n# encapsulate calling find_package(ITK)\n\nmacro(FindITKUtil)\n if(Slicer_BUILD_BRAINSTOOLS) ## Slicer has it's own internal MGHIO that conflicts with ITK\n set(FindITK_MGHIO \"\")\n else()\n set(FindITK_MGHIO MGHIO )\n endif()\n\n # ITK_FOUND needs to be reset, or it won't redo\n # setting up the include directories\n set(ITK_FOUND OFF)\n find_package(ITK COMPONENTS\n # Everything needs ITKCommon\n ITKCommon\n # Common depends on thes modules\n ITKVNLInstantiation\n ITKKWSys\n ITKDoubleConversion\n ITKVNLInstantiation\n ITKVNL\n # IO Components\n ITKIOImageBase\n ITKIOBMP\n ITKIOBioRad\n ITKIOGDCM\n ITKIOGIPL\n ITKIOJPEG\n ITKIOLSM\n ITKIOMeta\n ITKIONIFTI\n ITKIONRRD\n ITKIOPNG\n ITKIOStimulate\n ITKIOTIFF\n ITKIOTransformInsightLegacy\n ITKIOVTK\n ITKIOSpatialObjects\n ITKIOTransformBase\n ITKIOHDF5\n ITKIOTransformMatlab\n ITKIOTransformHDF5\n ITKIOGE\n ${FindITK_MGHIO}\n # other modules specific to the current directory\n ${ARGN}\n REQUIRED)\n\n if(Slicer_BUILD_BRAINSTOOLS)\n set(ITK_NO_IO_FACTORY_REGISTER_MANAGER 1)\n endif()\n include(${ITK_USE_FILE})\n\nendmacro()\n"
},
{
"alpha_fraction": 0.8313252925872803,
"alphanum_fraction": 0.8313252925872803,
"avg_line_length": 19.75,
"blob_id": "976acf20842fed3adb68fa252efa205697816a63",
"content_id": "0f7ea1ad3d44d091920d7235caee3c44f497d594",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 83,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 4,
"path": "/AutoWorkup/__init__.py",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "# import utilities\n# import workflows\nfrom AutoWorkup import setup\nimport SEMTools\n"
},
{
"alpha_fraction": 0.8563535809516907,
"alphanum_fraction": 0.8563535809516907,
"avg_line_length": 59,
"blob_id": "cac6ae39d2738de7fb520ae51656c360c588a137",
"content_id": "7a816816e56388a2ae820b87ebd94d4a10b7c4ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 181,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 3,
"path": "/BRAINSSurfaceTools/BRAINSSurfaceStat/CMakeLists.txt",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "StandardBRAINSBuildMacro(NAME BRAINSSurfaceStat\n ADDITIONAL_SRCS vtkFSSurfaceReader vtkFSSurfaceScalarReader\n TARGET_LIBRARIES BRAINSCommonLib ${VTK_LIBRARIES} ${ITK_LIBRARIES})\n\n"
},
{
"alpha_fraction": 0.6376021504402161,
"alphanum_fraction": 0.6376021504402161,
"avg_line_length": 25.14285659790039,
"blob_id": "cbad9621a2ab77db5d31fe464ced26d2f7fd8a3e",
"content_id": "c7a706a6768388df7377f227fee143ea67c4bbce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 367,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 14,
"path": "/BRAINSCreateLabelMapFromProbabilityMaps/CMakeLists.txt",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "\n#-----------------------------------------------------------------------------\n# Dependencies.\n#\n\n#\n# ITK\n#\nFindITKUtil(ITKImageCompare ITKTestKernel)\n\nStandardBRAINSBuildMacro( NAME BRAINSCreateLabelMapFromProbabilityMaps TARGET_LIBRARIES BRAINSCommonLib ${ITK_LIBRARIES})\n\nif(BUILD_TESTING AND NOT Slicer_BUILD_BRAINSTOOLS)\n add_subdirectory(TestSuite)\nendif()\n"
},
{
"alpha_fraction": 0.5944095849990845,
"alphanum_fraction": 0.6023958921432495,
"avg_line_length": 32.075469970703125,
"blob_id": "be4f9a609c843113eb0a7eaa6b2cc1e4d491b245",
"content_id": "62103b7567003d77a6dabc3c0b507cd7332ad58a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1753,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 53,
"path": "/BRAINSSurfaceTools/BRAINSSurfaceGeneration/vtkITKWin32Header.h",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "/*=========================================================================\n *\n * Copyright SINAPSE: Scalable Informatics for Neuroscience, Processing and Software Engineering\n * The University of Iowa\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0.txt\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n *=========================================================================*/\n/*=========================================================================\n\n Copyright Brigham and Women's Hospital (BWH) All Rights Reserved.\n\n See COPYRIGHT.txt\n or http://www.slicer.org/copyright/copyright.txt for details.\n\n Program: vtkITK\n Module: $HeadURL$\n Date: $Date$\n Version: $Revision$\n\n==========================================================================*/\n\n/// vtkITKWin32Header - manage Windows system differences\n///\n/// The vtkITKWin32Header captures some system differences between Unix\n/// and Windows operating systems.\n\n#ifndef __vtkITKWin32Header_h\n#define __vtkITKWin32Header_h\n\n#include <vtkITKConfigure.h>\n\n#if defined(WIN32) && !defined(VTKITK_STATIC)\n#if defined(vtkITK_EXPORTS)\n#define VTK_ITK_EXPORT __declspec( dllexport )\n#else\n#define VTK_ITK_EXPORT __declspec( dllimport )\n#endif\n#else\n#define VTK_ITK_EXPORT\n#endif\n\n#endif\n"
},
{
"alpha_fraction": 0.8290598392486572,
"alphanum_fraction": 0.8290598392486572,
"avg_line_length": 57,
"blob_id": "458addacb75a8a8c870c941a6df119d37fb187d0",
"content_id": "c0d3dc08c8c31373966f97f84d894f42b68dc6aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 117,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 2,
"path": "/BRAINSSurfaceTools/BRAINSSurfaceGeneration/CompareSurfaces/CMakeLists.txt",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "StandardBRAINSBuildMacro(NAME CompareSurfaces TARGET_LIBRARIES BRAINSCommonLib ${VTK_LIBRARIES}\n ${ITK_LIBRARIES})\n\n"
},
{
"alpha_fraction": 0.544135332107544,
"alphanum_fraction": 0.5533869862556458,
"avg_line_length": 62.124000549316406,
"blob_id": "b8a888967db92b2a74e4a9da33ac9ce2e858f2d4",
"content_id": "707e73d080897ef0593dc0f11cce14255f2e76ef",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15781,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 250,
"path": "/AutoWorkup/workflows/baseline.py",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#################################################################################\n## Program: BRAINS (Brain Research: Analysis of Images, Networks, and Systems)\n## Language: Python\n##\n## Author: Hans J. Johnson, David Welch\n##\n## This software is distributed WITHOUT ANY WARRANTY; without even\n## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR\n## PURPOSE. See the above copyright notices for more information.\n##\n#################################################################################\n\nimport os\nimport sys\nimport string\n#\"\"\"Import necessary modules from nipype.\"\"\"\n# from nipype.utils.config import config\n# config.set('logging', 'log_to_file', 'false')\n# config.set_log_dir(os.getcwd())\n#--config.set('logging', 'workflow_level', 'DEBUG')\n#--config.set('logging', 'interface_level', 'DEBUG')\n#--config.set('execution','remove_unnecessary_outputs','false')\n\nimport nipype.pipeline.engine as pe\nimport nipype.interfaces.io as nio\n\nfrom nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, Directory\nfrom nipype.interfaces.base import traits, isdefined, BaseInterface\nfrom nipype.interfaces.utility import Split, Rename, IdentityInterface, Function\n\nfrom nipype.utils.misc import package_check\n# package_check('nipype', '5.4', 'tutorial1') ## HACK: Check nipype version\npackage_check('numpy', '1.3', 'tutorial1')\npackage_check('scipy', '0.7', 'tutorial1')\npackage_check('networkx', '1.0', 'tutorial1')\npackage_check('IPython', '0.10', 'tutorial1')\n\nfrom utilities.distributed import modify_qsub_args\nfrom PipeLineFunctionHelpers import convertToList, FixWMPartitioning, AccumulateLikeTissuePosteriors\nfrom PipeLineFunctionHelpers import UnwrapPosteriorImagesFromDictionaryFunction as flattenDict\n\nfrom WorkupT1T2LandmarkInitialization import CreateLandmarkInitializeWorkflow\nfrom WorkupT1T2TissueClassify import CreateTissueClassifyWorkflow\n\nfrom utilities.misc import *\ntry:\n from SEMTools import *\nexcept ImportError:\n from AutoWorkup.SEMTools import *\n\ndef get_list_element(nestedList, index):\n return nestedList[index]\n\ndef getAllT1sLength(allT1s):\n return len(allT1s)\n\ndef generate_single_session_template_WF(projectid, subjectid, sessionid, master_config, phase, interpMode, pipeline_name):\n \"\"\"\n Run autoworkup on a single session\n\n This is the main function to call when processing a data set with T1 & T2\n data. ExperimentBaseDirectoryPrefix is the base of the directory to place results, T1Images & T2Images\n are the lists of images to be used in the auto-workup. atlas_fname_wpath is\n the path and filename of the atlas to use.\n \"\"\"\n\n if not 'auxlmk' in master_config['components'] or not 'tissue_classify' in master_config['components']:\n print \"Baseline DataSink requires 'AUXLMK' and/or 'TISSUE_CLASSIFY'!!!\"\n raise NotImplementedError\n # master_config['components'].append('auxlmk')\n # master_config['components'].append('tissue_classify')\n assert phase in ['atlas-based-reference', 'subject-based-reference'], \"Unknown phase! Valid entries: 'atlas-based-reference', 'subject-based-reference'\"\n\n baw201 = pe.Workflow(name=pipeline_name)\n\n\n inputsSpec = pe.Node(interface=IdentityInterface(fields=['atlasLandmarkFilename', 'atlasWeightFilename',\n 'LLSModel', 'inputTemplateModel', 'template_t1',\n 'atlasDefinition', 'T1s', 'T2s', 'PDs', 'FLs', 'OTHERs']),\n run_without_submitting=True, name='inputspec')\n\n outputsSpec = pe.Node(interface=IdentityInterface(fields=['t1_average', 't2_average', 'pd_average', 'fl_average',\n 'posteriorImages', 'outputLabels', 'outputHeadLabels',\n 'tc_atlas2session_tx',\n 'tc_atlas2sessionInverse_tx',\n 'BCD_ACPC_T1_CROPPED',\n 'outputLandmarksInACPCAlignedSpace',\n 'outputLandmarksInInputSpace',\n 'output_tx', 'LMIatlasToSubject_tx',\n 'writeBranded2DImage',\n 'UpdatedPosteriorsList' # Longitudinal\n ]),\n run_without_submitting=True, name='outputspec')\n print \"\"\"\n denoise image filter\n \"\"\"\n print \"\"\"\n Merge all T1 and T2 List\n \"\"\"\n makeDenoiseInImageList = pe.Node(Function(function=MakeOutFileList,\n input_names=['T1List', 'T2List', 'PDList', 'FLList',\n 'OtherList','postfix','PrimaryT1'],\n output_names=['inImageList','outImageList','imageTypeList']),\n run_without_submitting=True, name=\"99_makeDenoiseInImageList\")\n baw201.connect(inputsSpec, 'T1s', makeDenoiseInImageList, 'T1List')\n baw201.connect(inputsSpec, 'T2s', makeDenoiseInImageList, 'T2List')\n baw201.connect(inputsSpec, 'PDs', makeDenoiseInImageList, 'PDList')\n makeDenoiseInImageList.inputs.FLList= []# an emptyList HACK\n makeDenoiseInImageList.inputs.PrimaryT1= None # an emptyList HACK\n makeDenoiseInImageList.inputs.postfix = \"_UNM_denoised.nii.gz\"\n # HACK tissueClassifyWF.connect( inputsSpec, 'FLList', makeDenoiseInImageList, 'FLList' )\n baw201.connect(inputsSpec, 'OTHERs', makeDenoiseInImageList, 'OtherList')\n\n print \"\"\"\n Denoise:\n \"\"\"\n DenoiseInputImgs = pe.MapNode( interface=UnbiasedNonLocalMeans(),\n name='denoiseInputImgs',\n iterfield=['inputVolume',\n 'outputVolume'])\n DenoiseInputImgs.inputs.rc= [1,1,1]\n DenoiseInputImgs.inputs.rs= [4,4,4]\n DenoiseInputImgs.plugin_args = modify_qsub_args(master_config['queue'], '200M', 1, 2, hard=False)\n baw201.connect([ (makeDenoiseInImageList, DenoiseInputImgs, [('inImageList', 'inputVolume')]),\n (makeDenoiseInImageList, DenoiseInputImgs, [('outImageList','outputVolume')])\n ])\n\n makeDenoiseOutImageList = pe.Node(Function(function=GenerateSeparateImageTypeList,\n input_names=['inFileList','inTypeList'],\n output_names=['T1List', 'T2List', 'PDList', 'FLList', 'OtherList']),\n run_without_submitting=True, name=\"99_makeDenoiseOutImageList\")\n baw201.connect(DenoiseInputImgs, 'outputVolume', makeDenoiseOutImageList, 'inFileList')\n baw201.connect(makeDenoiseInImageList, 'imageTypeList', makeDenoiseOutImageList, 'inTypeList')\n\n DoReverseMapping = False # Set to true for debugging outputs\n if 'auxlmk' in master_config['components']:\n DoReverseMapping = True\n myLocalLMIWF = CreateLandmarkInitializeWorkflow(\"LandmarkInitialize\", interpMode, DoReverseMapping)\n\n baw201.connect([(makeDenoiseOutImageList, myLocalLMIWF,\n [(('T1List', get_list_element,0), 'inputspec.inputVolume' )]),\n (inputsSpec, myLocalLMIWF,\n [('atlasLandmarkFilename', 'inputspec.atlasLandmarkFilename'),\n ('atlasWeightFilename', 'inputspec.atlasWeightFilename'),\n ('LLSModel', 'inputspec.LLSModel'),\n ('inputTemplateModel', 'inputspec.inputTemplateModel'),\n ('template_t1', 'inputspec.atlasVolume')]),\n (myLocalLMIWF, outputsSpec,\n [('outputspec.outputResampledCroppedVolume','BCD_ACPC_T1_CROPPED'),\n ('outputspec.outputLandmarksInACPCAlignedSpace',\n 'outputLandmarksInACPCAlignedSpace'),\n ('outputspec.outputLandmarksInInputSpace',\n 'outputLandmarksInInputSpace'),\n ('outputspec.outputTransform', 'output_tx'),\n ('outputspec.atlasToSubjectTransform','LMIatlasToSubject_tx'),\n ('outputspec.writeBranded2DImage', 'writeBranded2DImage')])\n ])\n\n if 'tissue_classify' in master_config['components']:\n myLocalTCWF = CreateTissueClassifyWorkflow(\"TissueClassify\", master_config['queue'], master_config['long_q'], interpMode)\n baw201.connect([(makeDenoiseOutImageList,myLocalTCWF, [('T1List','inputspec.T1List')]),\n (makeDenoiseOutImageList,myLocalTCWF, [('T2List','inputspec.T2List')]),\n (inputsSpec, myLocalTCWF, [('atlasDefinition', 'inputspec.atlasDefinition'),\n (('T1s', getAllT1sLength), 'inputspec.T1_count'),\n ('PDs', 'inputspec.PDList'),\n ('FLs', 'inputspec.FLList'),\n ('OTHERs', 'inputspec.OtherList')]),\n (myLocalLMIWF, myLocalTCWF, [('outputspec.outputResampledCroppedVolume', 'inputspec.PrimaryT1'),\n ('outputspec.atlasToSubjectTransform',\n 'inputspec.atlasToSubjectInitialTransform')]),\n (myLocalTCWF, outputsSpec, [('outputspec.t1_average', 't1_average'),\n ('outputspec.t2_average', 't2_average'),\n ('outputspec.pd_average', 'pd_average'),\n ('outputspec.fl_average', 'fl_average'),\n ('outputspec.posteriorImages', 'posteriorImages'),\n ('outputspec.outputLabels', 'outputLabels'),\n ('outputspec.outputHeadLabels', 'outputHeadLabels'),\n ('outputspec.atlasToSubjectTransform', 'tc_atlas2session_tx'),\n ('outputspec.atlasToSubjectInverseTransform',\n 'tc_atlas2sessionInverse_tx')]),\n ])\n\n dsName = \"{0}_ds_{1}\".format(phase, sessionid)\n DataSink = pe.Node(name=dsName, interface=nio.DataSink())\n DataSink.overwrite = master_config['ds_overwrite']\n DataSink.inputs.container = '{0}/{1}/{2}'.format(projectid, subjectid, sessionid)\n DataSink.inputs.base_directory = master_config['resultdir']\n\n baw201.connect([(outputsSpec, DataSink, # TODO: change to myLocalTCWF -> DataSink\n [(('t1_average', convertToList), 'TissueClassify.@t1'),\n (('t2_average', convertToList), 'TissueClassify.@t2'),\n (('pd_average', convertToList), 'TissueClassify.@pd'),\n (('fl_average', convertToList), 'TissueClassify.@fl')]),\n ])\n baw201.connect([(outputsSpec, DataSink, # TODO: change to myLocalLMIWF -> DataSink\n [('outputLandmarksInACPCAlignedSpace', 'ACPCAlign.@outputLandmarks_ACPC'),\n ('writeBranded2DImage', 'ACPCAlign.@writeBranded2DImage'),\n ('BCD_ACPC_T1_CROPPED', 'ACPCAlign.@BCD_ACPC_T1_CROPPED'),\n ('outputLandmarksInInputSpace', 'ACPCAlign.@outputLandmarks_Input'),\n ('output_tx', 'ACPCAlign.@output_tx'),\n ('LMIatlasToSubject_tx', 'ACPCAlign.@LMIatlasToSubject_tx'),]\n )\n ]\n )\n\n currentFixWMPartitioningName = \"_\".join(['FixWMPartitioning', str(subjectid), str(sessionid)])\n FixWMNode = pe.Node(interface=Function(function=FixWMPartitioning,\n input_names=['brainMask', 'PosteriorsList'],\n output_names=['UpdatedPosteriorsList', 'MatchingFGCodeList',\n 'MatchingLabelList', 'nonAirRegionMask']),\n name=currentFixWMPartitioningName)\n\n baw201.connect([(myLocalTCWF, FixWMNode, [('outputspec.outputLabels', 'brainMask'),\n (('outputspec.posteriorImages', flattenDict), 'PosteriorsList')]),\n (FixWMNode, outputsSpec, [('UpdatedPosteriorsList', 'UpdatedPosteriorsList')]),\n ])\n\n currentBRAINSCreateLabelMapName = 'BRAINSCreateLabelMapFromProbabilityMaps_' + str(subjectid) + \"_\" + str(sessionid)\n BRAINSCreateLabelMapNode = pe.Node(interface=BRAINSCreateLabelMapFromProbabilityMaps(),\n name=currentBRAINSCreateLabelMapName)\n ## TODO: Fix the file names\n BRAINSCreateLabelMapNode.inputs.dirtyLabelVolume = 'fixed_headlabels_seg.nii.gz'\n BRAINSCreateLabelMapNode.inputs.cleanLabelVolume = 'fixed_brainlabels_seg.nii.gz'\n\n baw201.connect([(FixWMNode, BRAINSCreateLabelMapNode, [('UpdatedPosteriorsList','inputProbabilityVolume'),\n ('MatchingFGCodeList', 'foregroundPriors'),\n ('MatchingLabelList', 'priorLabelCodes'),\n ('nonAirRegionMask', 'nonAirRegionMask')]),\n (BRAINSCreateLabelMapNode, DataSink, [('cleanLabelVolume', 'TissueClassify.@outputLabels'),\n ('dirtyLabelVolume',\n 'TissueClassify.@outputHeadLabels')]),\n (myLocalTCWF, DataSink, [('outputspec.atlasToSubjectTransform',\n 'TissueClassify.@atlas2session_tx'),\n ('outputspec.atlasToSubjectInverseTransform',\n 'TissueClassify.@atlas2sessionInverse_tx')]),\n (FixWMNode, DataSink, [('UpdatedPosteriorsList', 'TissueClassify.@posteriors')]),\n ])\n\n currentAccumulateLikeTissuePosteriorsName = 'AccumulateLikeTissuePosteriors_' + str(subjectid) + \"_\" + str(sessionid)\n AccumulateLikeTissuePosteriorsNode = pe.Node(interface=Function(function=AccumulateLikeTissuePosteriors,\n input_names=['posteriorImages'],\n output_names=['AccumulatePriorsList',\n 'AccumulatePriorsNames']),\n name=currentAccumulateLikeTissuePosteriorsName)\n\n baw201.connect([(FixWMNode, AccumulateLikeTissuePosteriorsNode, [('UpdatedPosteriorsList', 'posteriorImages')]),\n (AccumulateLikeTissuePosteriorsNode, DataSink, [('AccumulatePriorsList',\n 'ACCUMULATED_POSTERIORS.@AccumulateLikeTissuePosteriorsOutputDir')])])\n return baw201\n"
},
{
"alpha_fraction": 0.5959494709968567,
"alphanum_fraction": 0.6067453622817993,
"avg_line_length": 51.407894134521484,
"blob_id": "98939cdb12b9c40dcf0a0e3aa1b6585f6c15a74c",
"content_id": "f15ce088b1a65b9155ec083844e4a0eded96ef05",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11949,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 228,
"path": "/AutoWorkup/workflows/atlasNode.py",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "def MakeAtlasNode(atlasDirectory, name, atlasParts=['BRAINSABCSupport','BRAINSCutSupport','BCDSupport','ExtraSupport']):\n \"\"\" Make an atlas node that contains the elements requested in the atlasParts section\n This will allow more fine grained data grabbers to be used, thereby allowing enhanced\n compartmentalization of algorithmic components.\n \"\"\"\n import nipype.interfaces.io as nio # Data i/o\n import nipype.pipeline.engine as pe # pypeline engine\n import os\n\n # Generate by running a file system list \"ls -1 $AtlasDir *.nii.gz *.xml *.fcsv *.wgts\"\n # atlas_file_names=atlas_file_list.split(' ')\n atlas_file_names = list()\n if 'BRAINSABCSupport' in atlasParts:\n atlas_file_names.extend( [\n \"ExtendedAtlasDefinition.xml\"\n ] )\n if 'TemplateBuildSupport' in atlasParts:\n atlas_file_names.extend( [\n \"ExtendedAtlasDefinition.xml.in\"\n ] )\n if 'BRAINSCutSupport' in atlasParts:\n atlas_file_names.extend( [\n \"hncma-atlas.nii.gz\",\n \"template_t1.nii.gz\",\n \"probabilityMaps/l_accumben_ProbabilityMap.nii.gz\",\n \"probabilityMaps/r_accumben_ProbabilityMap.nii.gz\",\n \"probabilityMaps/l_caudate_ProbabilityMap.nii.gz\",\n \"probabilityMaps/r_caudate_ProbabilityMap.nii.gz\",\n \"probabilityMaps/l_globus_ProbabilityMap.nii.gz\",\n \"probabilityMaps/r_globus_ProbabilityMap.nii.gz\",\n \"probabilityMaps/l_hippocampus_ProbabilityMap.nii.gz\",\n \"probabilityMaps/r_hippocampus_ProbabilityMap.nii.gz\",\n \"probabilityMaps/l_putamen_ProbabilityMap.nii.gz\",\n \"probabilityMaps/r_putamen_ProbabilityMap.nii.gz\",\n \"probabilityMaps/l_thalamus_ProbabilityMap.nii.gz\",\n \"probabilityMaps/r_thalamus_ProbabilityMap.nii.gz\",\n \"spatialImages/phi.nii.gz\",\n \"spatialImages/rho.nii.gz\",\n \"spatialImages/theta.nii.gz\",\n \"modelFiles/trainModelFile.txtD0060NT0060.gz\"\n ] )\n if 'BCDSupport' in atlasParts:\n atlas_file_names.extend( [\n \"template_t1.nii.gz\",\n \"20111119_BCD/LLSModel_50Lmks.hdf5\",\n \"20111119_BCD/T1_50Lmks.mdl\",\n \"20111119_BCD/template_landmarks_50Lmks.fcsv\",\n \"20111119_BCD/template_weights_50Lmks.wts\"\n ] )\n if 'ExtraSupport' in atlasParts:\n atlas_file_names.extend( [\n \"tempNOTVBBOX.nii.gz\",\n \"template_ABC_labels.nii.gz\",\n \"avg_t1.nii.gz\",\n \"avg_t2.nii.gz\",\n \"hncma-atlas.nii.gz\",\n \"hncma-atlas-lut-mod2.ctbl\",\n \"template_rightHemisphere.nii.gz\",\n \"template_WMPM2_labels.nii.gz\",\n \"template_WMPM2_labels.txt\",\n \"template_brain.nii.gz\",\n \"template_cerebellum.nii.gz\",\n \"template_class.nii.gz\",\n \"template_headregion.nii.gz\",\n \"template_leftHemisphere.nii.gz\",\n \"template_nac_labels.nii.gz\",\n \"template_nac_labels.txt\",\n \"template_t1.nii.gz\",\n \"template_t2.nii.gz\",\n \"template_t1_clipped.nii.gz\",\n \"template_t2_clipped.nii.gz\",\n \"template_ventricles.nii.gz\"\n ] )\n atlas_file_names = list(set(atlas_file_names)) # Make a unique listing\n ## Remove filename extensions for images, but replace . with _ for other file types\n atlas_file_keys = [os.path.basename(fn).replace('.nii.gz', '').replace('.', '_') for fn in atlas_file_names]\n atlas_outputs_filename_match = dict(zip(atlas_file_keys, atlas_file_names))\n\n node = pe.Node(interface=nio.DataGrabber(force_output=False, outfields=atlas_file_keys),\n run_without_submitting=True,\n name=name)\n node.inputs.base_directory = atlasDirectory\n node.inputs.sort_filelist = False\n node.inputs.template = '*'\n ## Prefix every filename with atlasDirectory\n atlas_search_paths = ['{0}'.format(fn) for fn in atlas_file_names]\n node.inputs.field_template = dict(zip(atlas_file_keys, atlas_search_paths))\n ## Give 'atlasDirectory' as the substitution argument\n atlas_template_args_match = [ [[]] for i in atlas_file_keys] # build a list of proper length with repeated entries\n node.inputs.template_args = dict(zip(atlas_file_keys, atlas_template_args_match))\n # print \"+\" * 100\n # print node.inputs\n # print \"-\" * 100\n return node\n\ndef CreateAtlasXMLAndCleanedDeformedAverages(t1_image, deformed_list, AtlasTemplate, outDefinition):\n import os\n import sys\n import SimpleITK as sitk\n\n patternDict = {\n 'AVG_WM.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/GENERATED_WM.nii.gz',\n 'AVG_SURFGM.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/GENERATED_SURFGM.nii.gz',\n 'AVG_BASAL.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/GENERATED_BASAL.nii.gz',\n 'AVG_GLOBUS.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/GENERATED_GLOBUS.nii.gz',\n 'AVG_THALAMUS.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/GENERATED_THALAMUS.nii.gz',\n 'AVG_HIPPOCAMPUS.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/GENERATED_HIPPOCAMPUS.nii.gz',\n 'AVG_CRBLGM.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/GENERATED_CRBLGM.nii.gz',\n 'AVG_CRBLWM.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/GENERATED_CRBLWM.nii.gz',\n 'AVG_CSF.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/GENERATED_CSF.nii.gz',\n 'AVG_VB.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/GENERATED_VB.nii.gz',\n 'AVG_NOTCSF.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/GENERATED_NOTCSF.nii.gz',\n 'AVG_NOTGM.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/GENERATED_NOTGM.nii.gz',\n 'AVG_NOTWM.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/GENERATED_NOTWM.nii.gz',\n 'AVG_NOTVB.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/GENERATED_NOTVB.nii.gz',\n 'AVG_AIR.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/GENERATED_AIR.nii.gz',\n 'AVG_BRAINMASK.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/template_brain.nii.gz',\n 'T1_RESHAPED.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/template_t1.nii.gz',\n 'AVG_T2.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/template_t2.nii.gz',\n 'AVG_PD.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/template_t2.nii.gz',\n 'AVG_FL.nii.gz': '@ATLAS_INSTALL_DIRECTORY@/template_t2.nii.gz'\n }\n templateFile = open(AtlasTemplate, 'r')\n xmlAtlasFileContents = templateFile.read() # read entire file into memory\n templateFile.close()\n\n ## Now clean up the posteriors based on anatomical knowlege.\n ## sometimes the posteriors are not relevant for priors\n ## due to anomolies around the edges.\n #print(\"\\n\\n\\nALL_FILES: {0}\\n\\n\\n\".format(deformed_list))\n load_images_list = dict()\n for full_pathname in deformed_list:\n base_name = os.path.basename(full_pathname)\n if base_name in patternDict.keys():\n load_images_list[base_name] = sitk.ReadImage(full_pathname)\n else:\n print(\"MISSING FILE FROM patternDict: {0}\".format(base_name))\n ## Make binary dilated mask\n binmask = sitk.BinaryThreshold(load_images_list['AVG_BRAINMASK.nii.gz'], 1, 1000000)\n brainmask_dilatedBy5 = sitk.DilateObjectMorphology(binmask, 5)\n brainmask_dilatedBy5 = sitk.Cast(brainmask_dilatedBy5, sitk.sitkFloat32) # Convert to Float32 for multiply\n\n inv_brainmask_erodedBy5 = 1 - sitk.ErodeObjectMorphology(binmask, 5)\n inv_brainmask_erodedBy5 = sitk.Cast(inv_brainmask_erodedBy5, sitk.sitkFloat32) # Convert to Float32 for multiply\n\n ## Now clip the interior brain mask with brainmask_dilatedBy5\n interiorPriors = [\n 'AVG_WM.nii.gz',\n 'AVG_SURFGM.nii.gz',\n 'AVG_BASAL.nii.gz',\n 'AVG_CRBLGM.nii.gz',\n 'AVG_CRBLWM.nii.gz',\n 'AVG_CSF.nii.gz',\n 'AVG_VB.nii.gz',\n 'AVG_GLOBUS.nii.gz',\n 'AVG_THALAMUS.nii.gz',\n 'AVG_HIPPOCAMPUS.nii.gz',\n ]\n exteriorPriors = [\n 'AVG_NOTWM.nii.gz',\n 'AVG_NOTGM.nii.gz',\n 'AVG_NOTCSF.nii.gz',\n 'AVG_NOTVB.nii.gz',\n 'AVG_AIR.nii.gz'\n ]\n clean_deformed_list = deformed_list\n T2File = None\n PDFile = None\n for index in range(0, len(deformed_list)):\n full_pathname = deformed_list[index]\n base_name = os.path.basename(full_pathname)\n if base_name == 'AVG_BRAINMASK.nii.gz':\n ### Make Brain Mask Binary\n clipped_name = 'CLIPPED_' + base_name\n patternDict[clipped_name] = patternDict[base_name]\n sitk.WriteImage(binmask, clipped_name)\n clean_deformed_list[index] = os.path.realpath(clipped_name)\n elif base_name == 'AVG_T2.nii.gz':\n T2File = full_pathname\n elif base_name == 'AVG_PD.nii.gz':\n PDFile = full_pathname\n elif base_name in interiorPriors:\n ### Make clipped posteriors for brain regions\n curr = sitk.Cast(sitk.ReadImage(full_pathname), sitk.sitkFloat32)\n curr = curr * brainmask_dilatedBy5\n clipped_name = 'CLIPPED_' + base_name\n patternDict[clipped_name] = patternDict[base_name]\n sitk.WriteImage(curr, clipped_name)\n clean_deformed_list[index] = os.path.realpath(clipped_name)\n #print \"HACK: \", clean_deformed_list[index]\n curr = None\n elif base_name in exteriorPriors:\n ### Make clipped posteriors for brain regions\n curr = sitk.Cast(sitk.ReadImage(full_pathname), sitk.sitkFloat32)\n curr = curr * inv_brainmask_erodedBy5\n clipped_name = 'CLIPPED_' + base_name\n patternDict[clipped_name] = patternDict[base_name]\n sitk.WriteImage(curr, clipped_name)\n clean_deformed_list[index] = os.path.realpath(clipped_name)\n #print \"HACK: \", clean_deformed_list[index]\n curr = None\n else:\n import sys\n print \"ERROR: basename {0} not in list!! \\n{1}\".format(base_name,['AVG_BRAINMASK.nii.gz','AVG_T2.nii.gz','AVG_PD.nii.gz',interiorPriors,exteriorPriors])\n sys.exit(-1)\n\n binmask = None\n brainmask_dilatedBy5 = None\n inv_brainmask_erodedBy5 = None\n\n for full_pathname in clean_deformed_list:\n base_name = os.path.basename(full_pathname)\n if base_name in patternDict.keys():\n xmlAtlasFileContents = xmlAtlasFileContents.replace(patternDict[base_name], base_name)\n ## If there is no T2, then use the PD image\n if T2File is not None:\n xmlAtlasFileContents = xmlAtlasFileContents.replace('@ATLAS_INSTALL_DIRECTORY@/template_t2.nii.gz', os.path.basename(T2File))\n elif PDFile is not None:\n xmlAtlasFileContents = xmlAtlasFileContents.replace('@ATLAS_INSTALL_DIRECTORY@/template_t2.nii.gz', os.path.basename(PDFile))\n xmlAtlasFileContents = xmlAtlasFileContents.replace('@ATLAS_INSTALL_DIRECTORY@/template_t1.nii.gz', 'AVG_T1.nii.gz')\n ## NOTE: HEAD REGION CAN JUST BE T1 image.\n xmlAtlasFileContents = xmlAtlasFileContents.replace('@ATLAS_INSTALL_DIRECTORY@/template_headregion.nii.gz', os.path.basename(t1_image) )\n ## NOTE: BRAIN REGION CAN JUST BE the label images.\n outAtlasFullPath = os.path.realpath(outDefinition)\n newFile = open(outAtlasFullPath, 'w')\n newFile.write(xmlAtlasFileContents) # write the file with the text substitution\n newFile.close()\n return outAtlasFullPath, clean_deformed_list\n"
},
{
"alpha_fraction": 0.5541031360626221,
"alphanum_fraction": 0.5584604144096375,
"avg_line_length": 34.30769348144531,
"blob_id": "719f9d5c2b22d33faca55b76d2c543c541dcac73",
"content_id": "ba401917624c3bdea833a20a28b387b0fea3729a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1377,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 39,
"path": "/BRAINSSurfaceTools/BRAINSSurfaceGeneration/vtkITK.h",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "/*=========================================================================\n *\n * Copyright SINAPSE: Scalable Informatics for Neuroscience, Processing and Software Engineering\n * The University of Iowa\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0.txt\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n *=========================================================================*/\n/*=========================================================================\n\n Copyright Brigham and Women's Hospital (BWH) All Rights Reserved.\n\n See COPYRIGHT.txt\n or http://www.slicer.org/copyright/copyright.txt for details.\n\n Program: vtkITK\n Module: $HeadURL$\n Date: $Date$\n Version: $Revision$\n\n==========================================================================*/\n\n#ifndef __vtkITK_h\n#define __vtkITK_h\n\n#include \"vtkITKWin32Header.h\"\n#include \"vtkITKNumericTraits.h\"\n\n#endif\n"
},
{
"alpha_fraction": 0.5778546929359436,
"alphanum_fraction": 0.5778546929359436,
"avg_line_length": 18.200000762939453,
"blob_id": "70a5a922b0582d6151fae505528da6790e8c8e58",
"content_id": "19f052e89b1fd830cf2f6f9923d1d241f0240b65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 289,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 15,
"path": "/BRAINSStripRotation/CMakeLists.txt",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "\nproject(BRAINSStripRotation)\n\n#-----------------------------------------------------------------------------\n# Dependencies.\n#\n\n#\n# ITK\n#\nFindITKUtil(ITKTransform\n ITKImageCompare)\n\nset(prog BRAINSStripRotation)\n\nStandardBRAINSBuildMacro( NAME ${prog} TARGET_LIBRARIES BRAINSCommonLib )\n"
},
{
"alpha_fraction": 0.5757380127906799,
"alphanum_fraction": 0.5844286680221558,
"avg_line_length": 53.38671875,
"blob_id": "c6bc53251cd266c111a3adafa7a516539691ffd2",
"content_id": "dbf6bac2343d8f7049a83071438b718122c76410",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13923,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 256,
"path": "/AutoWorkup/singleSession.py",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\"\"\"\nsingleSession.py\n=========\nThis program is used to generate the subject- and session-specific workflows for BRAINSTool processing\n\nUsage:\n singleSession.py [--rewrite-datasinks] [--wfrun PLUGIN] [--use-sentinal] --workphase WORKPHASE --pe ENV --ExperimentConfig FILE SESSIONS...\n singleSession.py -v | --version\n singleSession.py -h | --help\n\nArguments:\n SESSIONS List of sessions to process. Specifying 'all' processes every session in\n the database (specified in the --ExperimentConfig FILE)\n\nOptions:\n -h, --help Show this help and exit\n -v, --version Print the version and exit\n --rewrite-datasinks Turn on the Nipype option to overwrite all files in the 'results' directory\n --use-sentinal Use the t1_average file as a marker to determine if session needs to be run\n --pe ENV The processing environment to use from configuration file\n --wfrun PLUGIN The name of the workflow plugin option (default: 'local')\n --workphase WORKPHASE The type of processing to be done [atlas-based-reference|subject-based-reference]\n --ExperimentConfig FILE The configuration file\n\n\nExamples:\n $ singleSession.py --pe OSX --ExperimentConfig my_baw.config all\n $ singleSession.py --use-sentinal --wfrun SGEGraph --pe OSX --ExperimentConfig my_baw.config 00001 00002\n $ singleSession.py --rewrite-datasinks --pe OSX --ExperimentConfig my_baw.config 00003\n\n\"\"\"\n\nimport os\n\n\ndef create_singleSession(dataDict, master_config, interpMode, pipeline_name):\n \"\"\"\n create singleSession workflow on a single session\n\n This is the main function to call when processing a data set with T1 & T2\n data. ExperimentBaseDirectoryPrefix is the base of the directory to place results, T1Images & T2Images\n are the lists of images to be used in the auto-workup. atlas_fname_wpath is\n the path and filename of the atlas to use.\n \"\"\"\n assert 'tissue_classify' in master_config['components'] or \\\n 'auxlmk' in master_config['components'] or \\\n 'segmentation' in master_config['components']\n\n from nipype import config, logging\n config.update_config(master_config) # Set universal pipeline options\n logging.update_logging(config)\n\n import nipype.pipeline.engine as pe\n import nipype.interfaces.io as nio\n from nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, Directory, traits, isdefined, BaseInterface\n from nipype.interfaces.utility import Split, Rename, IdentityInterface, Function\n\n from workflows.baseline import generate_single_session_template_WF\n from PipeLineFunctionHelpers import convertToList\n from utilities.misc import GenerateSubjectOutputPattern as outputPattern\n from utilities.misc import GenerateWFName\n from workflows.utils import run_workflow, print_workflow\n from workflows.atlasNode import MakeAtlasNode\n\n project = dataDict['project']\n subject = dataDict['subject']\n session = dataDict['session']\n\n pname = \"{0}_{1}_{2}\".format(master_config['workflow_phase'], subject, session)\n sessionWorkflow = generate_single_session_template_WF(project, subject, session, master_config,\n phase=master_config['workflow_phase'],\n interpMode=interpMode,\n pipeline_name=pipeline_name)\n sessionWorkflow.base_dir = master_config['cachedir']\n\n SSinputsSpecPtr = sessionWorkflow.get_node('inputspec')\n SSinputsSpecPtr.inputs.T1s = dataDict['T1s']\n SSinputsSpecPtr.inputs.T2s = dataDict['T2s']\n SSinputsSpecPtr.inputs.PDs = dataDict['PDs']\n SSinputsSpecPtr.inputs.FLs = dataDict['FLs']\n SSinputsSpecPtr.inputs.OTHERs = dataDict['OTs']\n atlasBCDNode = MakeAtlasNode(master_config['atlascache'], 'BBCDAtlas_{0}'.format(session), ['BCDSupport'])\n sessionWorkflow.connect([(atlasBCDNode, SSinputsSpecPtr, [('template_t1', 'template_t1'),\n ('template_landmarks_50Lmks_fcsv',\n 'atlasLandmarkFilename'),\n ('template_weights_50Lmks_wts', 'atlasWeightFilename'),\n ('LLSModel_50Lmks_hdf5', 'LLSModel'),\n ('T1_50Lmks_mdl', 'inputTemplateModel')]),\n ])\n if master_config['workflow_phase'] == 'atlas-based-reference':\n # TODO: input atlas csv\n atlasABCNode = MakeAtlasNode(master_config['atlascache'], 'BABCAtlas_{0}'.format(session), ['BRAINSABCSupport'])\n\n sessionWorkflow.connect([(atlasABCNode, SSinputsSpecPtr,\n [('ExtendedAtlasDefinition_xml', 'atlasDefinition')]\n ),\n ])\n elif master_config['workflow_phase'] == 'subject-based-reference':\n print master_config['previousresult']\n template_DG = pe.Node(interface=nio.DataGrabber(infields=['subject'],\n outfields=['outAtlasXMLFullPath']),\n name='Template_DG')\n template_DG.inputs.base_directory = master_config['previousresult']\n template_DG.inputs.subject = subject\n template_DG.inputs.template = '%s/Atlas/AtlasDefinition_%s.xml'\n template_DG.inputs.template_args['outAtlasXMLFullPath'] = [['subject', 'subject']]\n template_DG.inputs.sort_filelist = True\n template_DG.inputs.raise_on_empty = True\n\n sessionWorkflow.connect([(template_DG, SSinputsSpecPtr,\n [('outAtlasXMLFullPath', 'atlasDefinition')\n ]),\n ])\n else:\n assert 0 == 1, \"Invalid workflow type specified for singleSession\"\n\n if 'segmentation' in master_config['components']:\n from workflows.segmentation import segmentation\n from workflows.WorkupT1T2BRAINSCut import GenerateWFName\n try:\n bCutInputName = \".\".join([GenerateWFName(project, subject, session, 'Segmentation'), 'inputspec'])\n except:\n print project, subject, session\n raise\n sname = 'segmentation'\n onlyT1 = not(len(dataDict['T2s']) > 0)\n atlasBCUTNode = MakeAtlasNode(master_config['atlascache'],\n 'BBCUTAtlas_{0}'.format(session), ['BRAINSCutSupport'])\n segWF = segmentation(project, subject, session, master_config, onlyT1, pipeline_name=sname)\n sessionWorkflow.connect([(atlasBCUTNode, segWF,\n [('hncma-atlas', 'inputspec.hncma-atlas'),\n ('template_t1', 'inputspec.template_t1'),\n ('template_t1', bCutInputName + '.template_t1'),\n ('rho', bCutInputName + '.rho'),\n ('phi', bCutInputName + '.phi'),\n ('theta', bCutInputName + '.theta'),\n ('l_caudate_ProbabilityMap', bCutInputName + '.l_caudate_ProbabilityMap'),\n ('r_caudate_ProbabilityMap', bCutInputName + '.r_caudate_ProbabilityMap'),\n ('l_hippocampus_ProbabilityMap', bCutInputName + '.l_hippocampus_ProbabilityMap'),\n ('r_hippocampus_ProbabilityMap', bCutInputName + '.r_hippocampus_ProbabilityMap'),\n ('l_putamen_ProbabilityMap', bCutInputName + '.l_putamen_ProbabilityMap'),\n ('r_putamen_ProbabilityMap', bCutInputName + '.r_putamen_ProbabilityMap'),\n ('l_thalamus_ProbabilityMap', bCutInputName + '.l_thalamus_ProbabilityMap'),\n ('r_thalamus_ProbabilityMap', bCutInputName + '.r_thalamus_ProbabilityMap'),\n ('l_accumben_ProbabilityMap', bCutInputName + '.l_accumben_ProbabilityMap'),\n ('r_accumben_ProbabilityMap', bCutInputName + '.r_accumben_ProbabilityMap'),\n ('l_globus_ProbabilityMap', bCutInputName + '.l_globus_ProbabilityMap'),\n ('r_globus_ProbabilityMap', bCutInputName + '.r_globus_ProbabilityMap'),\n ('trainModelFile_txtD0060NT0060_gz', bCutInputName + '.trainModelFile_txtD0060NT0060_gz')])])\n outputSpec = sessionWorkflow.get_node('outputspec')\n sessionWorkflow.connect([(outputSpec, segWF, [('t1_average', 'inputspec.t1_average'),\n ('LMIatlasToSubject_tx', 'inputspec.LMIatlasToSubject_tx'),\n ('outputLabels', 'inputspec.inputLabels'),\n ('posteriorImages', 'inputspec.posteriorImages'),\n ('tc_atlas2sessionInverse_tx',\n 'inputspec.TissueClassifyatlasToSubjectInverseTransform'),\n ('UpdatedPosteriorsList', 'inputspec.UpdatedPosteriorsList'),\n ('outputHeadLabels', 'inputspec.inputHeadLabels')])\n ])\n if not onlyT1:\n sessionWorkflow.connect([(outputSpec, segWF, [('t2_average', 'inputspec.t2_average')])])\n\n return sessionWorkflow\n\n\ndef createAndRun(sessions, environment, experiment, pipeline, cluster, useSentinal=False):\n from baw_exp import OpenSubjectDatabase\n from utilities.misc import add_dict\n from workflows.utils import run_workflow, print_workflow\n master_config = {}\n for configDict in [environment, experiment, pipeline, cluster]:\n master_config = add_dict(master_config, configDict)\n database = OpenSubjectDatabase(experiment['cachedir'], ['all'], environment['prefix'], experiment['dbfile'])\n database.open_connection()\n try:\n all_sessions = database.getAllSessions()\n if not set(sessions) <= set(all_sessions) and 'all' not in sessions:\n missing = set(sessions) - set(all_sessions)\n assert len(missing) == 0, \"Requested sessions are missing from the database: {0}\".format(missing)\n elif 'all' in sessions:\n sessions = set(all_sessions)\n else:\n sessions = set(sessions)\n print \"!=\" * 40\n print(\"Doing sessions {0}\".format(sessions))\n print \"!=\" * 40\n for session in sessions:\n _dict = {}\n _dict['session'] = session\n _dict['project'] = database.getProjFromSession(session)\n _dict['subject'] = database.getSubjFromSession(session)\n _dict['T1s'] = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])\n _dict['T2s'] = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])\n _dict['PDs'] = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])\n _dict['FLs'] = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])\n _dict['OTs'] = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])\n sentinal_file_basedir = os.path.join(\n master_config['resultdir'],\n _dict['project'],\n _dict['subject'],\n _dict['session']\n )\n ## Use t1 average sentinal file if specified.\n if 'tissue_classify' in master_config['components']:\n sentinal_file = os.path.join(\n sentinal_file_basedir,\n \"TissueClassify\",\n \"t1_average_BRAINSABC.nii.gz\"\n )\n ## Use different sentinal file if segmentation specified.\n if 'segmentation' in master_config['components']:\n sentinal_file = os.path.join(\n sentinal_file_basedir,\n \"CleanedDenoisedRFSegmentations\",\n \"allLabels_seg.nii.gz\"\n )\n\n if useSentinal and os.path.exists(sentinal_file):\n print(\"SKIPPING: {0} exists\".format(sentinal_file))\n else:\n workflow = create_singleSession(_dict, master_config, 'Linear',\n 'singleSession_{0}_{1}'.format(_dict['subject'], _dict['session']))\n print(\"Starting session {0}\".format(session))\n # HACK Hard-coded to SGEGraph, but --wfrun is ignored completely\n run_workflow(workflow, plugin=master_config['plugin_name'], plugin_args=master_config['plugin_args'])\n except:\n raise\n finally:\n try:\n database.close_connection()\n except:\n pass\n\n\ndef _main(environment, experiment, pipeline, cluster, **kwds):\n from utilities.configFileParser import nipype_options\n from utilities.misc import add_dict\n\n print \"Copying Atlas directory and determining appropriate Nipype options...\"\n pipeline = nipype_options(kwds, pipeline, cluster, experiment, environment) # Generate Nipype options\n print \"Getting session(s) from database...\"\n createAndRun(kwds['SESSIONS'], environment, experiment, pipeline, cluster, useSentinal=kwds['--use-sentinal'])\n return 0\n\nif __name__ == '__main__':\n import sys\n from docopt import docopt\n from AutoWorkup import setup\n\n argv = docopt(__doc__, version='1.1')\n print argv\n print '=' * 100\n configs = setup(argv)\n exit = _main(*configs, **argv)\n sys.exit(exit)\n"
},
{
"alpha_fraction": 0.6453201770782471,
"alphanum_fraction": 0.6472906470298767,
"avg_line_length": 33.40678024291992,
"blob_id": "2dce1545311cebfa59b8a52e807e049a61b7731b",
"content_id": "3e72ea72d991aa0f8e3216d0761c3f033bb13a24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2030,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 59,
"path": "/BRAINSSurfaceTools/BRAINSSurfaceGeneration/vtkITKArchetypeImageSeriesScalarReader.h",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "/*=========================================================================\n *\n * Copyright SINAPSE: Scalable Informatics for Neuroscience, Processing and Software Engineering\n * The University of Iowa\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0.txt\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n *=========================================================================*/\n/*=========================================================================\n\n Copyright Brigham and Women's Hospital (BWH) All Rights Reserved.\n\n See COPYRIGHT.txt\n or http://www.slicer.org/copyright/copyright.txt for details.\n\n Program: vtkITK\n Module: $HeadURL$\n Date: $Date$\n Version: $Revision$\n\n==========================================================================*/\n\n#ifndef __vtkITKArchetypeImageSeriesScalarReader_h\n#define __vtkITKArchetypeImageSeriesScalarReader_h\n\n#include \"vtkITKArchetypeImageSeriesReader.h\"\n\n#include \"itkImageFileReader.h\"\n\nclass VTK_ITK_EXPORT vtkITKArchetypeImageSeriesScalarReader : public vtkITKArchetypeImageSeriesReader\n{\npublic:\n static vtkITKArchetypeImageSeriesScalarReader * New();\n\n vtkTypeMacro(vtkITKArchetypeImageSeriesScalarReader, vtkITKArchetypeImageSeriesReader);\n void PrintSelf(ostream& os, vtkIndent indent);\n\nprotected:\n vtkITKArchetypeImageSeriesScalarReader();\n ~vtkITKArchetypeImageSeriesScalarReader();\n\n void ExecuteData(vtkDataObject *data);\n\n static void ReadProgressCallback(itk::ProcessObject* obj, const itk::ProgressEvent &, void* data);\n\n /// private:\n};\n\n#endif\n"
},
{
"alpha_fraction": 0.6037623286247253,
"alphanum_fraction": 0.6195879578590393,
"avg_line_length": 43.6533317565918,
"blob_id": "43ff6e82802c13726110a8243dccfc837e249fbb",
"content_id": "fdb48ae8da4cb6cd2bd221375adb1a35bb803007",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3349,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 75,
"path": "/AutoWorkup/utilities/distributed.py",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "def load_cluster(modules=[]):\n if len(modules) > 0:\n module_list = []\n for module in modules:\n module_list.append(\"module load {name}\".format(name=module))\n assert len(modules) == len(module_list)\n return '\\n'.join(module_list)\n return ''\n\n\ndef source_virtualenv(virtualenv_dir=''):\n if virtualenv_dir is None:\n return ''\n assert virtualenv_dir != ''\n return \"source {0}\".format(virtualenv_dir)\n\n\ndef prepend_env(environment={}):\n import os\n export_list = []\n for key, value in environment.items():\n export_list.append(\"export {key}={value}{sep}${key}\".format(key=key, value=value, sep=os.pathsep)) # Append to variable\n return '\\n'.join(export_list)\n\n\ndef create_global_sge_script(cluster, environment):\n \"\"\"\n This is a wrapper script for running commands on an SGE cluster\n so that all the python modules and commands are pathed properly\n\n >>> import os\n >>> nomodules = open(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'TestSuite', 'node.sh.template.nomodules'), 'r')\n >>> create_global_sge_script({'modules':[]}, {'virtualenv_dir':'/path/to/virtualenv_dir', 'env': os.environ}).split('\\n')[0]\n True\n >>> create_global_sge_script({'modules':[]}, {'virtualenv_dir':'/path/to/virtualenv_dir', 'env': os.environ}).split('\\n')[0] == '#!/bin/bash FAIL'\n\n \"\"\"\n import os\n from string import Template\n import sys\n\n sub_dict = dict(LOAD_MODULES=load_cluster(cluster['modules']),\n VIRTUALENV_DIR=source_virtualenv(environment['virtualenv_dir']),\n EXPORT_ENV=prepend_env(environment['env']))\n with open(os.path.join(os.path.dirname(__file__), 'node.sh.template')) as fid:\n tpl = fid.read()\n retval = Template(tpl).substitute(sub_dict)\n return retval\n\n\ndef modify_qsub_args(queue, memory, minThreads=1, maxThreads=None, stdout='/dev/null', stderr='/dev/null', hard=True):\n \"\"\"\n Outputs qsub_args string for Nipype nodes\n\n >>> modify_qsub_args('test', 200, 5)\n -S /bin/bash -cwd -pe smp 5 -l mem_free=200 -o /dev/null -e /dev/null test FAIL\n >>> modify_qsub_args('test', 200, 5, hard=False)\n -S /bin/bash -cwd -pe smp 5- -l mem_free=200 -o /dev/null -e /dev/null test FAIL\n >>> modify_qsub_args('test', 800, 5, 7)\n -S /bin/bash -cwd -pe smp 5-7 -l mem_free=800 -o /dev/null -e /dev/null test FAIL\n >>> modify_qsub_args('test', 800, 5, 7, hard=False)\n -S /bin/bash -cwd -pe smp 5-7 -l mem_free=800 -o /dev/null -e /dev/null test FAIL\n >>> modify_qsub_args('test', 1000, 5, 7, stdout='/my/path', stderr='/my/error')\n -S /bin/bash -cwd -pe smp 5-7 -l mem_free=1000 -o /my/path -e /my/error test FAIL\n\n \"\"\"\n if maxThreads is None:\n if hard:\n format_str = '-S /bin/bash -cwd -pe smp {mint} -l mem_free={mem} -o {stdout} -e {stderr} {queue}'\n else:\n format_str = '-S /bin/bash -cwd -pe smp {mint}- -l mem_free={mem} -o {stdout} -e {stderr} {queue}'\n return format_str.format(mint=minThreads, mem=memory, stdout=stdout, stderr=stderr, queue=queue)\n else:\n format_str = '-S /bin/bash -cwd -pe smp {mint}-{maxt} -l mem_free={mem} -o {stdout} -e {stderr} {queue}'\n return format_str.format(mint=minThreads, maxt=maxThreads, mem=memory, stdout=stdout, stderr=stderr, queue=queue)\n"
},
{
"alpha_fraction": 0.8208954930305481,
"alphanum_fraction": 0.8208954930305481,
"avg_line_length": 21.33333396911621,
"blob_id": "c06ae9364a280c7c87e4966ec5adb8ff2ad407a8",
"content_id": "ff6e9098d37e7ac20b366b427ee309067e523694",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 67,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 3,
"path": "/AutoWorkup/TestSuite/__init__.py",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "# import utilities\n# import workflows\nfrom AutoWorkup import setup\n"
},
{
"alpha_fraction": 0.5735408067703247,
"alphanum_fraction": 0.5820134878158569,
"avg_line_length": 60.213558197021484,
"blob_id": "def6864ac6e387572d6729128095182089f45a89",
"content_id": "8b87a537d9daca6d824e4709357a8e9870f2d22d",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18058,
"license_type": "no_license",
"max_line_length": 213,
"num_lines": 295,
"path": "/AutoWorkup/template.py",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\"\"\"\ntemplate.py\n=========\nThis program is used to generate the subject- and session-specific workflows for BRAINSTool processing\n\nUsage:\n template.py [--rewrite-datasinks] [--wfrun PLUGIN] [--dotfilename PFILE] --workphase WORKPHASE --pe ENV --ExperimentConfig FILE SUBJECTS...\n template.py -v | --version\n template.py -h | --help\n\nArguments:\n SUBJECTS List of subject IDs to process\n\nOptions:\n -h, --help Show this help and exit\n -v, --version Print the version and exit\n --dotfilename=PFILE Turn on printing pipeline to file PFILE\n --rewrite-datasinks Turn on the Nipype option to overwrite all files in the 'results' directory\n --pe=ENV The processing environment to use from configuration file\n --wfrun=PLUGIN The name of the workflow plugin option (default: 'local')\n --workphase WORKPHASE The type of processing to be done only VALID is ['subject-template-generation']\n --ExperimentConfig=FILE The configuration file\n\nExamples:\n $ template.py --pe OSX --ExperimentConfig my_baw.config all\n $ template.py --wfrun helium_all.q --pe OSX --ExperimentConfig my_baw.config 1058 1059\n $ template.py --rewrite-datasinks --pe OSX --ExperimentConfig my_baw.config 2001\n\n\"\"\"\nimport os\nimport sys\nimport traceback\n\nfrom baw_exp import OpenSubjectDatabase\n\ndef get_subjects_sessions_dictionary(subjects, cache, prefix, dbfile, shuffle=False):\n import random\n _temp = OpenSubjectDatabase(cache, ['all'], prefix, dbfile)\n if \"all\" in subjects:\n subjects = _temp.getAllSubjects()\n if shuffle:\n random.shuffle(subjects) # randomly shuffle to get max\n subject_sessions_dictionary = dict()\n for subject in subjects:\n subject_sessions_dictionary[subject]=_temp.getSessionsFromSubject(subject)\n return subjects,subject_sessions_dictionary\n\ndef MergeByExtendListElements(t1s, t2s, pds, fls, labels, posteriors):\n \"\"\"\n *** NOTE: All input lists MUST have the same number of elements (even if they are null) ***\n\n output = [{'T1': os.path.join(mydatadir, '01_T1_half.nii.gz'),\n 'INV_T1': os.path.join(mydatadir, '01_T1_inv_half.nii.gz'),\n 'LABEL_MAP': os.path.join(mydatadir, '01_T1_inv_half.nii.gz')\n },\n {'T1': os.path.join(mydatadir, '02_T1_half.nii.gz'),\n 'INV_T1': os.path.join(mydatadir, '02_T1_inv_half.nii.gz'),\n 'LABEL_MAP': os.path.join(mydatadir, '02_T1_inv_half.nii.gz')\n },\n {'T1': os.path.join(mydatadir, '03_T1_half.nii.gz'),\n 'INV_T1': os.path.join(mydatadir, '03_T1_inv_half.nii.gz'),\n 'LABEL_MAP': os.path.join(mydatadir, '03_T1_inv_half.nii.gz')\n }\n ]\n labels = ['brain_label_seg.nii.gz', 'brain_label_seg.nii.gz']\n pds = [None, None]\n t1s = ['t1_average_BRAINSABC.nii.gz', 't1_average_BRAINSABC.nii.gz']\n t2s = ['t2_average_BRAINSABC.nii.gz', 't2_average_BRAINSABC.nii.gz']\n\n \"\"\"\n # print \"t1s\", t1s\n # print \"t2s\", t2s\n # print \"pds\", pds\n # print \"fls\", fls\n # print \"labels\", labels\n # print \"$$$$$$$$$$$$$$$$$$$$$$$\"\n # print \"posteriors\", posteriors\n ListOfImagesDictionaries = [dict() for i in t1s] # Initial list with empty dictionaries\n ## HACK: Need to make it so that AVG_AIR.nii.gz has a background value of 1\n registrationImageTypes = ['T1'] # ['T1','T2'] someday.\n DefaultContinuousInterpolationType = 'Linear' # or 'LanczosWindowedSinc' ('Linear' for speed)\n interpolationMapping = {'T1': DefaultContinuousInterpolationType,\n 'T2': DefaultContinuousInterpolationType,\n 'PD': DefaultContinuousInterpolationType,\n 'FL': DefaultContinuousInterpolationType,\n 'BRAINMASK': 'MultiLabel'\n }\n for list_index in range(len(t1s)):\n if t1s[list_index] is not None:\n ListOfImagesDictionaries[list_index]['T1'] = t1s[list_index]\n if isinstance(t2s, list) and t2s[list_index] is not None:\n ListOfImagesDictionaries[list_index]['T2'] = t2s[list_index]\n if isinstance(pds, list) and pds[list_index] is not None:\n ListOfImagesDictionaries[list_index]['PD'] = pds[list_index]\n if isinstance(fls, list) and fls[list_index] is not None:\n ListOfImagesDictionaries[list_index]['FL'] = fls[list_index]\n if labels[list_index] is not None:\n ListOfImagesDictionaries[list_index]['BRAINMASK'] = labels[list_index]\n print ListOfImagesDictionaries[list_index]\n for key, value in posteriors.items():\n # print \"key:\", key, \" -> value:\", value\n ListOfImagesDictionaries[list_index][key] = value[list_index]\n\n # print \"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\"\n # print \"ListOfImagesDictionaries\", ListOfImagesDictionaries\n # print \"registrationImageTypes\", registrationImageTypes\n # print \"interpolationMapping\", interpolationMapping\n return ListOfImagesDictionaries, registrationImageTypes, interpolationMapping\n\n\ndef xml_filename(subject):\n return 'AtlasDefinition_{0}.xml'.format(subject)\n\ndef getSessionsFromSubjectDictionary(subject_session_dictionary,subject):\n return subject_session_dictionary[subject]\n\n\ndef _template_runner(argv, environment, experiment, pipeline, cluster):\n print \"Getting subjects from database...\"\n # subjects = argv[\"--subjects\"].split(',')\n subjects, subjects_sessions_dictionary = get_subjects_sessions_dictionary(argv['SUBJECTS'], experiment['cachedir'], environment['prefix'], experiment['dbfile']) # Build database before parallel section\n print \"Copying Atlas directory and determining appropriate Nipype options...\"\n pipeline = nipype_options(argv, pipeline, cluster, experiment, environment) # Generate Nipype options\n print \"Dispatching jobs to the system...\"\n\n ######\n ###### Now start workflow construction\n ######\n # Set universal pipeline options\n nipype_config.update_config(pipeline)\n\n template = pe.Workflow(name='SubjectAtlas_Template')\n template.base_dir = pipeline['logging']['log_directory']\n\n subjectIterator = pe.Node(interface=IdentityInterface(fields=['subject']), run_without_submitting=True, name='99_subjectIterator')\n subjectIterator.iterables = ('subject', subjects)\n\n sessionsExtractorNode = pe.Node(Function(function=getSessionsFromSubjectDictionary,\n input_names=['subject_session_dictionary','subject'],\n output_names=['sessions']),\n run_without_submitting=True, name=\"99_sessionsExtractor\")\n sessionsExtractorNode.inputs.subject_session_dictionary = subjects_sessions_dictionary\n\n baselineDG = pe.MapNode(nio.DataGrabber(infields=['subject','session'],\n outfields=['t1_average', 't2_average', 'pd_average',\n 'fl_average', 'brainMaskLabels',\n 'posteriorImages']),\n iterfield=['session'], name='Baseline_DG')\n\n baselineDG.inputs.base_directory = experiment['previousresult']\n baselineDG.inputs.sort_filelist = True\n baselineDG.inputs.raise_on_empty = False\n baselineDG.inputs.template = '*'\n posterior_files = ['AIR', 'BASAL', 'CRBLGM', 'CRBLWM', 'CSF', 'GLOBUS', 'HIPPOCAMPUS',\n 'NOTCSF', 'NOTGM', 'NOTVB', 'NOTWM', 'SURFGM', 'THALAMUS', 'VB', 'WM']\n baselineDG.inputs.field_template = {'t1_average':'*/%s/%s/TissueClassify/t1_average_BRAINSABC.nii.gz',\n 't2_average':'*/%s/%s/TissueClassify/t2_average_BRAINSABC.nii.gz',\n 'pd_average':'*/%s/%s/TissueClassify/pd_average_BRAINSABC.nii.gz',\n 'fl_average':'*/%s/%s/TissueClassify/fl_average_BRAINSABC.nii.gz',\n 'brainMaskLabels':'*/%s/%s/TissueClassify/fixed_brainlabels_seg.nii.gz',\n 'posteriorImages':'*/%s/%s/TissueClassify/POSTERIOR_%s.nii.gz'\n }\n baselineDG.inputs.template_args = {'t1_average':[['subject','session']],\n 't2_average':[['subject','session']],\n 'pd_average':[['subject','session']],\n 'fl_average':[['subject','session']],\n 'brainMaskLabels':[['subject','session']],\n 'posteriorImages':[['subject','session', posterior_files]]\n }\n\n MergeByExtendListElementsNode = pe.Node(Function(function=MergeByExtendListElements,\n input_names=['t1s', 't2s',\n 'pds', 'fls',\n 'labels', 'posteriors'],\n output_names=['ListOfImagesDictionaries', 'registrationImageTypes',\n 'interpolationMapping']),\n run_without_submitting=True, name=\"99_MergeByExtendListElements\")\n\n template.connect([(subjectIterator, baselineDG, [('subject', 'subject')]),\n (subjectIterator, sessionsExtractorNode, [('subject','subject')]),\n (sessionsExtractorNode, baselineDG, [('sessions', 'session')]),\n (baselineDG, MergeByExtendListElementsNode, [('t1_average', 't1s'),\n ('t2_average', 't2s'),\n ('pd_average', 'pds'),\n ('fl_average', 'fls'),\n ('brainMaskLabels', 'labels'),\n (('posteriorImages', ConvertSessionsListOfPosteriorListToDictionaryOfSessionLists), 'posteriors')])\n ])\n\n myInitAvgWF = pe.Node(interface=ants.AverageImages(), name='Atlas_antsSimpleAverage') # was 'Phase1_antsSimpleAverage'\n myInitAvgWF.inputs.dimension = 3\n myInitAvgWF.inputs.normalize = True\n template.connect(baselineDG, 't1_average', myInitAvgWF, \"images\")\n ####################################################################################################\n # TEMPLATE_BUILD_RUN_MODE = 'MULTI_IMAGE'\n # if numSessions == 1:\n # TEMPLATE_BUILD_RUN_MODE = 'SINGLE_IMAGE'\n ####################################################################################################\n buildTemplateIteration1 = registrationWF('iteration01')\n # buildTemplateIteration2 = buildTemplateIteration1.clone(name='buildTemplateIteration2')\n buildTemplateIteration2 = registrationWF('Iteration02')\n\n CreateAtlasXMLAndCleanedDeformedAveragesNode = pe.Node(interface=Function(function=CreateAtlasXMLAndCleanedDeformedAverages,\n input_names=['t1_image', 'deformed_list', 'AtlasTemplate', 'outDefinition'],\n output_names=['outAtlasFullPath', 'clean_deformed_list']),\n # This is a lot of work, so submit it run_without_submitting=True,\n run_without_submitting=True, # HACK: THIS NODE REALLY SHOULD RUN ON THE CLUSTER!\n name='99_CreateAtlasXMLAndCleanedDeformedAverages')\n\n if pipeline['plugin_name'].startswith('SGE'): # for some nodes, the qsub call needs to be modified on the cluster\n\n CreateAtlasXMLAndCleanedDeformedAveragesNode.plugin_args = {'template': pipeline['plugin_args']['template'],\n 'qsub_args': modify_qsub_args(cluster['queue'], '1000M', 1, 1),\n 'overwrite': True}\n for bt in [buildTemplateIteration1, buildTemplateIteration2]:\n ##################################################\n # *** Hans, is this TODO already addressed? *** #\n # ----> # TODO: Change these parameters <---- #\n ##################################################\n BeginANTS = bt.get_node(\"BeginANTS\")\n BeginANTS.plugin_args = {'template': pipeline['plugin_args']['template'], 'overwrite': True,\n 'qsub_args': modify_qsub_args(cluster['queue'], '9000M', 4, hard=False)}\n wimtdeformed = bt.get_node(\"wimtdeformed\")\n wimtdeformed.plugin_args = {'template': pipeline['plugin_args']['template'], 'overwrite': True,\n 'qsub_args': modify_qsub_args(cluster['queue'], '2000M', 1, 2)}\n AvgAffineTransform = bt.get_node(\"AvgAffineTransform\")\n AvgAffineTransform.plugin_args = {'template': pipeline['plugin_args']['template'], 'overwrite': True,\n 'qsub_args': modify_qsub_args(cluster['queue'], '2000M', 1)}\n wimtPassivedeformed = bt.get_node(\"wimtPassivedeformed\")\n wimtPassivedeformed.plugin_args = {'template': pipeline['plugin_args']['template'], 'overwrite': True,\n 'qsub_args': modify_qsub_args(cluster['queue'], '2000M', 1, 2)}\n\n # Running off previous baseline experiment\n NACCommonAtlas = MakeAtlasNode(experiment['atlascache'], 'NACCommonAtlas_{0}'.format('subject'), 'TemplateBuildSupport') ## HACK : replace 'subject' with subject id once this is a loop rather than an iterable.\n template.connect([(myInitAvgWF, buildTemplateIteration1, [('output_average_image', 'inputspec.fixed_image')]),\n (MergeByExtendListElementsNode, buildTemplateIteration1, [('ListOfImagesDictionaries', 'inputspec.ListOfImagesDictionaries'),\n ('registrationImageTypes', 'inputspec.registrationImageTypes'),\n ('interpolationMapping','inputspec.interpolationMapping')]),\n (buildTemplateIteration1, buildTemplateIteration2, [('outputspec.template', 'inputspec.fixed_image')]),\n (MergeByExtendListElementsNode, buildTemplateIteration2, [('ListOfImagesDictionaries', 'inputspec.ListOfImagesDictionaries'),\n ('registrationImageTypes','inputspec.registrationImageTypes'),\n ('interpolationMapping', 'inputspec.interpolationMapping')]),\n (subjectIterator, CreateAtlasXMLAndCleanedDeformedAveragesNode, [(('subject', xml_filename), 'outDefinition')]),\n (NACCommonAtlas, CreateAtlasXMLAndCleanedDeformedAveragesNode, [('ExtendedAtlasDefinition_xml_in', 'AtlasTemplate')]),\n (buildTemplateIteration2, CreateAtlasXMLAndCleanedDeformedAveragesNode, [('outputspec.template', 't1_image'),\n ('outputspec.passive_deformed_templates', 'deformed_list')]),\n ])\n\n # Create DataSinks\n SubjectAtlas_DataSink = pe.Node(nio.DataSink(), name=\"Subject_DS\")\n SubjectAtlas_DataSink.overwrite = pipeline['ds_overwrite']\n SubjectAtlas_DataSink.inputs.base_directory = experiment['resultdir']\n\n template.connect([(subjectIterator, SubjectAtlas_DataSink, [('subject', 'container')]),\n (CreateAtlasXMLAndCleanedDeformedAveragesNode, SubjectAtlas_DataSink, [('outAtlasFullPath', 'Atlas.@definitions')]),\n (CreateAtlasXMLAndCleanedDeformedAveragesNode, SubjectAtlas_DataSink, [('clean_deformed_list', 'Atlas.@passive_deformed_templates')]),\n\n (subjectIterator, SubjectAtlas_DataSink, [(('subject', outputPattern), 'regexp_substitutions')]),\n (buildTemplateIteration2, SubjectAtlas_DataSink, [('outputspec.template', 'Atlas.@template')]),\n ])\n\n dotfilename = argv['--dotfilename']\n if dotfilename is not None:\n print(\"WARNING: Printing workflow, but not running pipeline\")\n print_workflow(template, plugin=pipeline['plugin_name'], dotfilename=dotfilename)\n else:\n run_workflow(template, plugin=pipeline['plugin_name'], plugin_args=pipeline['plugin_args'])\n\nif __name__ == '__main__':\n import sys\n from AutoWorkup import setup\n\n from docopt import docopt\n\n argv = docopt(__doc__, version='1.1')\n print argv\n print '=' * 100\n configs = setup(argv)\n from nipype import config as nipype_config\n import nipype.pipeline.engine as pe\n import nipype.interfaces.io as nio\n from nipype.interfaces.utility import IdentityInterface, Function\n import nipype.interfaces.ants as ants\n\n from PipeLineFunctionHelpers import ConvertSessionsListOfPosteriorListToDictionaryOfSessionLists\n from workflows.atlasNode import MakeAtlasNode, CreateAtlasXMLAndCleanedDeformedAverages\n from utilities.misc import GenerateSubjectOutputPattern as outputPattern\n from utilities.distributed import modify_qsub_args\n from workflows.utils import run_workflow, print_workflow\n from BAWantsRegistrationBuildTemplate import BAWantsRegistrationTemplateBuildSingleIterationWF as registrationWF\n from utilities.configFileParser import nipype_options\n\n exit = _template_runner(argv, *configs)\n sys.exit(exit)\n"
},
{
"alpha_fraction": 0.8062015771865845,
"alphanum_fraction": 0.8062015771865845,
"avg_line_length": 24.799999237060547,
"blob_id": "9ab1e83b303a5f3caa1129c924224880368f21a8",
"content_id": "64fc3f6bdd7fbd7a64b185742f7d5a2a95e07377",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 129,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 5,
"path": "/BRAINSSurfaceTools/BRAINSSurfaceRegister/QuadEdgeMeshSimilarity/CMakeLists.txt",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "StandardBRAINSBuildMacro(NAME QuadEdgeMeshSimilarity\n TARGET_LIBRARIES\n BRAINSCommonLib\n ${ITK_LIBRARIES}\n ${VTK_LIBRARIES})\n"
},
{
"alpha_fraction": 0.8084291219711304,
"alphanum_fraction": 0.8084291219711304,
"avg_line_length": 25,
"blob_id": "284127cd8db822e5b6009f3ec9fa58db8308b65c",
"content_id": "11c6b6d2142b4586937c3e554c2aaecdf8a5b1ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 261,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 10,
"path": "/BRAINSMultiSTAPLE/CMakeLists.txt",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "FindITKUtil(ITKImageCompare\n ITKLabelVoting\n ITKTestKernel\n )\nStandardBRAINSBuildMacro( NAME BRAINSMultiSTAPLE\n TARGET_LIBRARIES BRAINSCommonLib ${ITK_LIBRARIES} )\n\nif(BUILD_TESTING AND NOT Slicer_BUILD_BRAINSTOOLS)\n add_subdirectory(TestSuite)\nendif()\n\n"
},
{
"alpha_fraction": 0.7122806906700134,
"alphanum_fraction": 0.7128654718399048,
"avg_line_length": 21.786666870117188,
"blob_id": "b65e8d2b331bb2e1089f98b4cb8aab2970cac4fd",
"content_id": "9453fe45e2568ed7ba7f49252099addb06fe257e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 1710,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 75,
"path": "/ICCDEF/CMakeLists.txt",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "\n##- project(iccdefRegistrationNew)\n\n#-----------------------------------------------------------------------------\n# Dependencies.\n#\n\n#\n# ITK\n#\nFindITKUtil(\n ITKImageCompare\n ITKFFT\n ITKConnectedComponents\n ITKMathematicalMorphology\n ITKBinaryMathematicalMorphology\n ITKRegionGrowing\n ITKPDEDeformableRegistration\n ITKRegionGrowing\n ITKSmoothing\n ITKDeprecated\n ITKRegistrationCommon\n ITKThresholding\n ITKImageFeature\n ITKV3Compatibility\n ITKSmoothing\n ITKLevelSets\n ITKReview\n # ITKDeprecated\n # ITKDisplacementField\n # ITKDistanceMap\n # ITKFFT\n # ITKIOImageBase\n # ITKIOMeta\n # ITKIOSpatialObjects\n # ITKIOTransformBase\n # ITKImageFeature\n # ITKImageFilterBase\n # ITKImageFunction\n # ITKImageGrid\n # ITKImageIntensity\n # ITKImageStatistics\n # ITKPDEDeformableRegistration\n # ITKRegistrationCommon\n # ITKReview\n # ITKSpatialObjects\n # ITKTransform\n)\n\n#-------------------------------------------------------------------------------\n# OUTPUT DIRECTORIES\n#\n##- include_directories(${COMMON_BRAINSCOMMONLIB_SOURCE_DIR})\n##- link_directories(${COMMON_BRAINSCOMMONLIB_BINARY_DIR})\n\nif(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)\n set(CMAKE_INSTALL_PREFIX \"/opt/iccdefRegistration_New\" CACHE PATH \"Install path prefix,\n prepended onto install directories.\" FORCE)\nendif()\n\nset(ALL_PROGS_LIST\n AverageBrainGenerator\n CreateMask\n iccdefRegistration_New\n )\n\nset(ICCDEFLibraries BRAINSCommonLib ${ITK_LIBRARIES})\nDebugImageViewerLibAdditions(ICCDEFLibraries)\n\nforeach(prog ${ALL_PROGS_LIST})\n StandardBRAINSBuildMacro(NAME ${prog} TARGET_LIBRARIES ${ICCDEFLibraries})\nendforeach()\n\nif(BUILD_TESTING AND NOT Slicer_BUILD_BRAINSTOOLS)\n add_subdirectory(TestSuite)\nendif()\n"
},
{
"alpha_fraction": 0.4731707274913788,
"alphanum_fraction": 0.4731707274913788,
"avg_line_length": 19.5,
"blob_id": "b1d0a9308b49f0e4eb954aa3b9c73d33deef6a1a",
"content_id": "a29189d44a0f61dc20109d135de72338459c9ff0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 205,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 10,
"path": "/BRAINSDWICleanup/CMakeLists.txt",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "#-----------------------------------------------------------------------------\n# Dependencies.\n#\n\n#\n# ITK\n#\nFindITKUtil()\n\nStandardBRAINSBuildMacro(NAME BRAINSDWICleanup TARGET_LIBRARIES ${ITK_LIBRARIES})\n"
},
{
"alpha_fraction": 0.7318611741065979,
"alphanum_fraction": 0.7318611741065979,
"avg_line_length": 20.133333206176758,
"blob_id": "23126f177583268532115edb59dae6861a6eaf8c",
"content_id": "e6c3935f7152ae5b5f4e7283e79e9713f17a4aea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 634,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 30,
"path": "/BRAINSSurfaceTools/CMakeLists.txt",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "#-----------------------------------------------------------------------------\n# Dependencies.\n#\n\n#\n# ITK\n#\nFindITKUtil(\n ITKDeprecated\n ITKQuadEdgeMesh\n ITKOptimizers\n ITKQuadEdgeMeshFiltering\n)\nFindVTKUtil(vtkIOLegacy\n vtkIOXML\n vtkCommonDataModel\n vtkFiltersCore\n vtkFiltersGeneral\n vtkFiltersGeometry\n vtkImagingCore\n vtkFiltersExtraction\n )\n\ninclude_directories(${CMAKE_CURRENT_LIST_DIR}/BRAINSSurfaceCommon)\n\nadd_subdirectory(BRAINSAssignSurfaceFeatures)\nadd_subdirectory(BRAINSSurfaceFlattening)\nadd_subdirectory(BRAINSSurfaceGeneration)\nadd_subdirectory(BRAINSSurfaceRegister)\n#add_subdirectory(BRAINSSurfaceStat)\n"
},
{
"alpha_fraction": 0.521292507648468,
"alphanum_fraction": 0.5303680300712585,
"avg_line_length": 63.69032287597656,
"blob_id": "f2ba18b616e29324af8534540f9cc34e626b5176",
"content_id": "968dc4644ad5f889ef5f0461109434d6cf8d13ec",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10027,
"license_type": "no_license",
"max_line_length": 210,
"num_lines": 155,
"path": "/AutoWorkup/workflows/singleSubject.py",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "def RunSubjectWorkflow(args):\n \"\"\"\n .-----------.\n --- | Session 1 | ---> /project/subjectA/session1/phase/\n / *-----------*\n .-----------. /\n | Subject A | <\n *-----------* \\\n \\ .-----------.\n --- | Session 2 | ---> /project/subjectA/session2/phase/\n *-----------*\n **** Replaces WorkflowT1T2.py ****\n \"\"\"\n start_time, subject, master_config = args\n assert 'tissue_classify' in master_config['components'] or 'auxlmk' in master_config['components'] or 'segmentation' in master_config['components'], \"Baseline or Longitudinal is not in WORKFLOW_COMPONENTS!\"\n import time\n\n from nipype import config, logging\n config.update_config(master_config) # Set universal pipeline options\n # DEBUG\n # config.enable_debug_mode()\n # config.set('execution', 'stop_on_first_rerun', 'true')\n # END DEBUG\n logging.update_logging(config)\n\n import nipype.pipeline.engine as pe\n import nipype.interfaces.base as nbase\n import nipype.interfaces.io as nio\n from nipype.interfaces.utility import IdentityInterface, Function\n import traits\n\n from baw_exp import OpenSubjectDatabase\n from SessionDB import SessionDB\n from PipeLineFunctionHelpers import convertToList\n from atlasNode import MakeAtlasNode\n from utilities.misc import GenerateSubjectOutputPattern as outputPattern\n from utilities.misc import GenerateWFName\n from utils import run_workflow, print_workflow\n\n # while time.time() < start_time:\n # time.sleep(start_time - time.time() + 1)\n # print \"Delaying start for {subject}\".format(subject=subject)\n # print(\"===================== SUBJECT: {0} ===========================\".format(subject))\n\n subjectWorkflow = pe.Workflow(name=\"BAW_StandardWorkup_subject_{0}\".format(subject))\n subjectWorkflow.base_dir = config.get('logging', 'log_directory')\n # subjectWorkflow.config['plugin_name'] = 'Linear' # Hardcodeded in WorkupT1T2.py - why?\n # DEBUG\n # subjectWorkflow.config['execution']['stop_on_first_rerun'] = 'true'\n # END DEBUG\n\n\n sessionWorkflow = dict()\n inputsSpec = dict()\n # To avoid a \"sqlite3.ProgrammingError: Base Cursor.__init__ not called\" error\n # using multiprocessing.map_async(), instantiate database here\n database = OpenSubjectDatabase(master_config['cachedir'], [subject], master_config['prefix'], master_config['dbfile'])\n # print database.getAllSessions()\n database.open_connection()\n\n sessions = database.getSessionsFromSubject(subject)\n print \"These are the sessions: \", sessions\n # TODO: atlas input csv read\n atlasNode = MakeAtlasNode(master_config['atlascache'], 'BAtlas')\n from singleSession import create_singleSession as create_wkfl\n\n for session in sessions: # TODO (future): Replace with iterable inputSpec node and add Function node for getAllFiles()\n project = database.getProjFromSession(session)\n pname = \"{0}_singleSession\".format(session) # Long node names make graphs a pain to read/print\n # pname = GenerateWFName(project, subject, session, 'singleSession')\n print \"Building session pipeline for {0}\".format(session)\n inputsSpec[session] = pe.Node(name='inputspec_{0}'.format(session),\n interface=IdentityInterface(fields=['T1s', 'T2s', 'PDs', 'FLs', 'OTs']))\n inputsSpec[session].inputs.T1s = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])\n inputsSpec[session].inputs.T2s = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])\n inputsSpec[session].inputs.PDs = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])\n inputsSpec[session].inputs.FLs = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])\n inputsSpec[session].inputs.OTs = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])\n\n sessionWorkflow[session] = create_wkfl(project, subject, session, master_config,\n interpMode='Linear', pipeline_name=pname)\n\n subjectWorkflow.connect([(inputsSpec[session], sessionWorkflow[session], [('T1s', 'inputspec.T1s'),\n ('T2s', 'inputspec.T2s'),\n ('PDs', 'inputspec.PDs'),\n ('FLs', 'inputspec.FLs'),\n ('OTs', 'inputspec.OTHERs'),\n ]),\n (atlasNode, sessionWorkflow[session], [('template_landmarks_50Lmks_fcsv',\n 'inputspec.atlasLandmarkFilename'),\n ('template_weights_50Lmks_wts',\n 'inputspec.atlasWeightFilename'),\n ('LLSModel_50Lmks_hdf5', 'inputspec.LLSModel'),\n ('T1_50Lmks_mdl', 'inputspec.inputTemplateModel')]),\n ])\n if 'segmentation' in master_config['components']:\n from WorkupT1T2BRAINSCut import GenerateWFName\n try:\n bCutInputName = \".\".join(['segmentation', GenerateWFName(project, subject, session, 'Segmentation'), 'inputspec'])\n except:\n print project, subject, session\n raise\n subjectWorkflow.connect([(atlasNode, sessionWorkflow[session],\n [('hncma-atlas', 'segmentation.inputspec.hncma-atlas'),\n ('template_t1', 'segmentation.inputspec.template_t1'),\n ('template_t1', bCutInputName + '.template_t1'),\n ('rho', bCutInputName + '.rho'),\n ('phi', bCutInputName + '.phi'),\n ('theta', bCutInputName + '.theta'),\n ('l_caudate_ProbabilityMap', bCutInputName + '.l_caudate_ProbabilityMap'),\n ('r_caudate_ProbabilityMap', bCutInputName + '.r_caudate_ProbabilityMap'),\n ('l_hippocampus_ProbabilityMap', bCutInputName + '.l_hippocampus_ProbabilityMap'),\n ('r_hippocampus_ProbabilityMap', bCutInputName + '.r_hippocampus_ProbabilityMap'),\n ('l_putamen_ProbabilityMap', bCutInputName + '.l_putamen_ProbabilityMap'),\n ('r_putamen_ProbabilityMap', bCutInputName + '.r_putamen_ProbabilityMap'),\n ('l_thalamus_ProbabilityMap', bCutInputName + '.l_thalamus_ProbabilityMap'),\n ('r_thalamus_ProbabilityMap', bCutInputName + '.r_thalamus_ProbabilityMap'),\n ('l_accumben_ProbabilityMap', bCutInputName + '.l_accumben_ProbabilityMap'),\n ('r_accumben_ProbabilityMap', bCutInputName + '.r_accumben_ProbabilityMap'),\n ('l_globus_ProbabilityMap', bCutInputName + '.l_globus_ProbabilityMap'),\n ('r_globus_ProbabilityMap', bCutInputName + '.r_globus_ProbabilityMap'),\n ('trainModelFile_txtD0060NT0060_gz',\n bCutInputName + '.trainModelFile_txtD0060NT0060_gz')])])\n if True: # FIXME: current_phase == 'baseline':\n subjectWorkflow.connect([(atlasNode, sessionWorkflow[session], [('template_t1', 'inputspec.template_t1'),\n ('ExtendedAtlasDefinition_xml',\n 'inputspec.atlasDefinition')]),\n ])\n else:\n template_DG = pe.Node(interface=nio.DataGrabber(infields=['subject'],\n outfields=['template_t1', 'outAtlasFullPath']),\n name='Template_DG')\n template_DG.inputs.base_directory = master_config['previousresult']\n template_DG.inputs.subject = subject\n template_DG.inputs.template = 'SUBJECT_TEMPLATES/%s/AVG_%s.nii.gz'\n template_DG.inputs.template_args['template_t1'] = [['subject', 'T1']]\n template_DG.inputs.field_template = {'outAtlasFullPath': 'Atlas/definitions/AtlasDefinition_%s.xml'}\n template_DG.inputs.template_args['outAtlasFullPath'] = [['subject']]\n template_DG.inputs.sort_filelist = True\n template_DG.inputs.raise_on_empty = True\n\n baw201.connect([(template_DG, sessionWorkflow[session], [('outAtlasFullPath', 'inputspec.atlasDefinition'),\n ('template_t1', 'inputspec.template_t1')]),\n ])\n # HACK: only run first subject\n break\n # END HACK\n if not True:\n return print_workflow(subjectWorkflow,\n plugin=master_config['plugin_name'], dotfilename='subjectWorkflow') #, graph2use='flat')\n try:\n return subjectWorkflow.run(plugin='SGEGraph', plugin_args=master_config['plugin_args'])\n except:\n return 1\n #return run_workflow(subjectWorkflow, plugin=master_config['plugin_name'], plugin_args=master_config['plugin_args'])\n"
},
{
"alpha_fraction": 0.6325390338897705,
"alphanum_fraction": 0.6420286893844604,
"avg_line_length": 58.275001525878906,
"blob_id": "b1afa6539e6e97ffceb56fa794116ecbc7104175",
"content_id": "bf200cde01cf80a04878c856d13f7ccf60862c3b",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9484,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 160,
"path": "/AutoWorkup/workflows/WorkupT1T2TissueClassify.py",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, File, Directory\nfrom nipype.interfaces.base import traits, isdefined, BaseInterface\nfrom nipype.interfaces.utility import Merge, Split, Function, Rename, IdentityInterface\nimport nipype.interfaces.io as nio # Data i/o\nimport nipype.pipeline.engine as pe # pypeline engine\n\nfrom BRAINSABCext import *\nfrom utilities.misc import *\n\n\"\"\"\n from WorkupT1T2TissueClassify import CreateTissueClassifyWorkflow\n myLocalTCWF= CreateTissueClassifyWorkflow(\"TissueClassify\")\n tissueClassifyWF.connect( [ (uidSource, myLocalTCWF, [(('uid', getT1s, subjectDatabaseFile ), 'T1List')] ), ])\n tissueClassifyWF.connect( [ (uidSource, myLocalTCWF, [(('uid', getT2s, subjectDatabaseFile ), 'T2List')] ), ])\n tissueClassifyWF.connect( [ (uidSource, myLocalTCWF, [(('uid', getT1sLength, subjectDatabaseFile ), 'T1_count')] ), ])\n tissueClassifyWF.connect( BCD, 'outputResampledVolume', myLocalTCWF, 'PrimaryT1' )\n tissueClassifyWF.connect(BAtlas,'ExtendedAtlasDefinition.xml',myLocalTCWF,'atlasDefinition')\n tissueClassifyWF.connect(BLI,'outputTransformFilename',myLocalTCWF,'atlasToSubjectInitialTransform')\n\"\"\"\n\n\n\ndef getListIndexOrNoneIfOutOfRange(imageList, index):\n if index < len(imageList):\n return imageList[index]\n else:\n return None\n\n\ndef MakePosteriorDictionaryFunc(posteriorImages):\n from PipeLineFunctionHelpers import POSTERIORS\n if len(POSTERIORS) != len(posteriorImages):\n print \"ERROR: \", posteriorNames\n print \"ERROR: \", POSTERIORS\n return -1\n temp_dictionary = dict(zip(POSTERIORS, posteriorImages))\n return temp_dictionary\n\n\ndef CreateTissueClassifyWorkflow(WFname, CLUSTER_QUEUE, CLUSTER_QUEUE_LONG, InterpolationMode):\n tissueClassifyWF = pe.Workflow(name=WFname)\n\n inputsSpec = pe.Node(interface=IdentityInterface(fields=['T1List', 'T2List', 'PDList', 'FLList',\n 'OtherList', 'T1_count', 'PrimaryT1',\n 'atlasDefinition',\n 'atlasToSubjectInitialTransform']),\n run_without_submitting=True,\n name='inputspec')\n outputsSpec = pe.Node(interface=IdentityInterface(fields=['atlasToSubjectTransform',\n 'atlasToSubjectInverseTransform',\n 'outputLabels',\n 'outputHeadLabels', # ???\n #'t1_corrected', 't2_corrected',\n 't1_average',\n 't2_average',\n 'pd_average',\n 'fl_average',\n 'posteriorImages']),\n run_without_submitting=True,\n name='outputspec')\n\n ########################################################\n # Run BABCext on Multi-modal images\n ########################################################\n makeOutImageList = pe.Node(Function(function=MakeOutFileList,\n input_names=['T1List', 'T2List', 'PDList', 'FLList',\n 'OtherList','postfix','PrimaryT1'],\n output_names=['inImageList','outImageList','imageTypeList']),\n run_without_submitting=True, name=\"99_makeOutImageList\")\n tissueClassifyWF.connect(inputsSpec, 'T1List', makeOutImageList, 'T1List')\n tissueClassifyWF.connect(inputsSpec, 'T2List', makeOutImageList, 'T2List')\n tissueClassifyWF.connect(inputsSpec, 'PDList', makeOutImageList, 'PDList')\n tissueClassifyWF.connect(inputsSpec, 'PrimaryT1', makeOutImageList, 'PrimaryT1')\n makeOutImageList.inputs.FLList = [] # an emptyList HACK\n makeOutImageList.inputs.postfix = \"_corrected.nii.gz\"\n # HACK tissueClassifyWF.connect( inputsSpec, 'FLList', makeOutImageList, 'FLList' )\n tissueClassifyWF.connect(inputsSpec, 'OtherList', makeOutImageList, 'OtherList')\n\n\n BABCext = pe.Node(interface=BRAINSABCext(), name=\"BABC\")\n many_cpu_BABC_options_dictionary = {'qsub_args': '-S /bin/bash -pe smp 4- -l h_vmem=23G,mem_free=8G -o /dev/null -e /dev/null ' + CLUSTER_QUEUE, 'overwrite': True}\n BABCext.plugin_args = many_cpu_BABC_options_dictionary\n tissueClassifyWF.connect(makeOutImageList, 'inImageList', BABCext, 'inputVolumes')\n tissueClassifyWF.connect(makeOutImageList, 'imageTypeList', BABCext, 'inputVolumeTypes')\n tissueClassifyWF.connect(makeOutImageList, 'outImageList', BABCext, 'outputVolumes')\n BABCext.inputs.debuglevel = 0\n BABCext.inputs.maxIterations = 3\n BABCext.inputs.maxBiasDegree = 4\n BABCext.inputs.filterIteration = 3\n BABCext.inputs.filterMethod = 'GradientAnisotropicDiffusion'\n BABCext.inputs.atlasToSubjectTransformType = 'SyN'\n # BABCext.inputs.atlasToSubjectTransformType = 'BSpline'\n # BABCext.inputs.gridSize = [28,20,24]\n BABCext.inputs.gridSize = [10, 10, 10]\n BABCext.inputs.outputFormat = \"NIFTI\"\n BABCext.inputs.outputLabels = \"brain_label_seg.nii.gz\"\n BABCext.inputs.outputDirtyLabels = \"volume_label_seg.nii.gz\"\n BABCext.inputs.posteriorTemplate = \"POSTERIOR_%s.nii.gz\"\n BABCext.inputs.atlasToSubjectTransform = \"atlas_to_subject.h5\"\n # BABCext.inputs.implicitOutputs = ['t1_average_BRAINSABC.nii.gz', 't2_average_BRAINSABC.nii.gz']\n BABCext.inputs.interpolationMode = InterpolationMode\n BABCext.inputs.outputDir = './'\n\n tissueClassifyWF.connect(inputsSpec, 'atlasDefinition', BABCext, 'atlasDefinition')\n tissueClassifyWF.connect(inputsSpec, 'atlasToSubjectInitialTransform', BABCext, 'atlasToSubjectInitialTransform')\n \"\"\"\n Get the first T1 and T2 corrected images from BABCext\n \"\"\"\n\n \"\"\" HACK: THIS IS NOT NEEDED! We should use the averged t1 and averaged t2 images instead!\n def get_first_T1_and_T2(in_files,T1_count):\n '''\n Returns the first T1 and T2 file in in_files, based on offset in T1_count.\n '''\n return in_files[0],in_files[T1_count]\n bfc_files = pe.Node(Function(input_names=['in_files','T1_count'],\n output_names=['t1_corrected','t2_corrected'],\n function=get_first_T1_and_T2), run_without_submitting=True, name='99_bfc_files' )\n tissueClassifyWF.connect( inputsSpec, 'T1_count', bfc_files, 'T1_count')\n tissueClassifyWF.connect(BABCext,'outputVolumes',bfc_files, 'in_files')\n\n\n tissueClassifyWF.connect(bfc_files,'t1_corrected',outputsSpec,'t1_corrected')\n tissueClassifyWF.connect(bfc_files,'t2_corrected',outputsSpec,'t2_corrected')\n #tissueClassifyWF.connect(bfc_files,'pd_corrected',outputsSpec,'pd_corrected')\n #tissueClassifyWF.connect(bfc_files,'fl_corrected',outputsSpec,'fl_corrected')\n\n \"\"\"\n\n #############\n tissueClassifyWF.connect(BABCext, 'atlasToSubjectTransform', outputsSpec, 'atlasToSubjectTransform')\n\n def MakeInverseTransformFileName(TransformFileName):\n \"\"\"### HACK: This function is to work around a deficiency in BRAINSABCext where the inverse transform name is not being computed properly\n in the list outputs\"\"\"\n fixed_inverse_name = TransformFileName.replace(\".h5\", \"_Inverse.h5\")\n return [fixed_inverse_name]\n\n tissueClassifyWF.connect([(BABCext, outputsSpec, [(('atlasToSubjectTransform', MakeInverseTransformFileName), \"atlasToSubjectInverseTransform\")]), ])\n tissueClassifyWF.connect(BABCext, 'outputLabels', outputsSpec, 'outputLabels')\n tissueClassifyWF.connect(BABCext, 'outputDirtyLabels', outputsSpec, 'outputHeadLabels')\n\n tissueClassifyWF.connect(BABCext, 'outputT1AverageImage', outputsSpec, 't1_average')\n tissueClassifyWF.connect(BABCext, 'outputT2AverageImage', outputsSpec, 't2_average')\n tissueClassifyWF.connect(BABCext, 'outputPDAverageImage', outputsSpec, 'pd_average')\n tissueClassifyWF.connect(BABCext, 'outputFLAverageImage', outputsSpec, 'fl_average')\n ## remove tissueClassifyWF.connect( [ ( BABCext, outputsSpec, [ (( 'outputAverageImages', getListIndexOrNoneIfOutOfRange, 0 ), \"t1_average\")] ), ] )\n ## remove tissueClassifyWF.connect( [ ( BABCext, outputsSpec, [ (( 'outputAverageImages', getListIndexOrNoneIfOutOfRange, 1 ), \"t2_average\")] ), ] )\n ## remove tissueClassifyWF.connect( [ ( BABCext, outputsSpec, [ (( 'outputAverageImages', getListIndexOrNoneIfOutOfRange, 2 ), \"pd_average\")] ), ] )\n\n MakePosteriorDictionaryNode = pe.Node(Function(function=MakePosteriorDictionaryFunc,\n input_names=['posteriorImages'],\n output_names=['posteriorDictionary']), run_without_submitting=True, name=\"99_makePosteriorDictionary\")\n tissueClassifyWF.connect(BABCext, 'posteriorImages', MakePosteriorDictionaryNode, 'posteriorImages')\n\n tissueClassifyWF.connect(MakePosteriorDictionaryNode, 'posteriorDictionary', outputsSpec, 'posteriorImages')\n\n return tissueClassifyWF\n"
},
{
"alpha_fraction": 0.659923255443573,
"alphanum_fraction": 0.6650514602661133,
"avg_line_length": 38.12322235107422,
"blob_id": "78d0fcc999edb529b1d1ddd8ab64a226dbb3ce1e",
"content_id": "0a8cdddd79b4c62e5d2c34e5d4ed2ec18faa4ea4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 24765,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 633,
"path": "/BRAINSCommonLib/BRAINSFitHelper.cxx",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "/*=========================================================================\n *\n * Copyright SINAPSE: Scalable Informatics for Neuroscience, Processing and Software Engineering\n * The University of Iowa\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0.txt\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n *=========================================================================*/\n\n#include \"BRAINSFitUtils.h\"\n#include \"BRAINSFitHelper.h\"\n\n#include \"genericRegistrationHelper.h\"\n#include \"itkCorrelationImageToImageMetricv4.h\"\n#include \"itkMeanSquaresImageToImageMetricv4.h\"\n#include \"itkKullbackLeiblerCompareHistogramImageToImageMetric.h\"\n#include \"itkHistogramImageToImageMetric.h\"\n#include \"itkKappaStatisticImageToImageMetric.h\"\n#include \"itkMeanReciprocalSquareDifferenceImageToImageMetric.h\"\n#include \"itkJointHistogramMutualInformationImageToImageMetricv4.h\"\n#include \"itkGradientDifferenceImageToImageMetric.h\"\n#include \"itkCompareHistogramImageToImageMetric.h\"\n#include \"itkCorrelationCoefficientHistogramImageToImageMetric.h\"\n#include \"itkMatchCardinalityImageToImageMetric.h\"\n#include \"itkMeanSquaresHistogramImageToImageMetric.h\"\n#include \"itkBinaryThresholdImageFilter.h\"\n#include \"itkNormalizedMutualInformationHistogramImageToImageMetric.h\"\n\n#include <algorithm>\n\n// A little dummy function to make it easy to stop the debugger.\nvoid debug_catch(void)\n{\n std::cout << \"HERE\" << __FILE__ << \" \" << __LINE__ << std::endl;\n\n return;\n}\n\n// convert spatial object to image\nMaskImageType::ConstPointer\nExtractConstPointerToImageMaskFromImageSpatialObject( SpatialObjectType::ConstPointer inputSpatialObject )\n{\n ImageMaskSpatialObjectType const * const temp =\n dynamic_cast<ImageMaskSpatialObjectType const *>( inputSpatialObject.GetPointer() );\n\n if( temp == NULL )\n {\n itkGenericExceptionMacro(<< \"Invalid mask converstion attempted.\");\n }\n ImageMaskSpatialObjectType::ConstPointer ImageMask( temp );\n const MaskImageType::ConstPointer tempOutputVolumeROI = ImageMask->GetImage();\n return tempOutputVolumeROI;\n}\n\n// convert image to mask (spatial object)\nSpatialObjectType::ConstPointer\nConvertMaskImageToSpatialMask( MaskImageType::ConstPointer inputImage )\n{\n ImageMaskSpatialObjectType::Pointer mask = ImageMaskSpatialObjectType::New();\n mask->SetImage(inputImage);\n mask->ComputeObjectToWorldTransform();\n // return pointer to mask\n SpatialObjectType::Pointer p = dynamic_cast<SpatialObjectType *>( mask.GetPointer() );\n if( p.IsNull() )\n {\n itkGenericExceptionMacro(<< \"Failed conversion to Mask\");\n }\n\n SpatialObjectType::ConstPointer objectMask(p);\n return objectMask;\n}\n\nnamespace itk\n{\nBRAINSFitHelper::BRAINSFitHelper() :\n m_FixedVolume(NULL),\n m_MovingVolume(NULL),\n m_PreprocessedMovingVolume(NULL),\n m_FixedBinaryVolume(NULL),\n m_MovingBinaryVolume(NULL),\n m_OutputFixedVolumeROI(\"\"),\n m_OutputMovingVolumeROI(\"\"),\n m_PermitParameterVariation(0),\n m_SamplingPercentage(1.0), // instead or number of samples, sampling% should be used that is a number between 0 and 1.\n m_NumberOfHistogramBins(50),\n m_HistogramMatch(false),\n m_RemoveIntensityOutliers(0.00),\n m_NumberOfMatchPoints(10),\n m_NumberOfIterations(1, 1500),\n m_MaximumStepLength(0.2),\n m_MinimumStepLength(1, 0.005),\n m_RelaxationFactor(0.5),\n m_TranslationScale(1000.0),\n m_ReproportionScale(1.0),\n m_SkewScale(1.0),\n m_UseCachingOfBSplineWeightsMode(\"ON\"),\n m_BackgroundFillValue(0.0),\n m_TransformType(1, \"Rigid\"),\n m_InitializeTransformMode(\"Off\"),\n m_MaskInferiorCutOffFromCenter(1000),\n m_SplineGridSize(3, 10),\n m_CostFunctionConvergenceFactor(1e+9),\n m_ProjectedGradientTolerance(1e-5),\n m_MaxBSplineDisplacement(0.0),\n m_ActualNumberOfIterations(0),\n m_PermittedNumberOfIterations(0),\n // m_AccumulatedNumberOfIterationsForAllLevels(0),\n m_DebugLevel(0),\n m_CurrentGenericTransform(NULL),\n //m_GenericTransformList(0),\n m_DisplayDeformedImage(false),\n m_PromptUserAfterDisplay(false),\n m_FinalMetricValue(0.0),\n m_ObserveIterations(true),\n m_CostMetric(\"MMI\"), // Default to Mattes Mutual Information Metric\n m_UseROIBSpline(false),\n m_Helper(NULL),\n m_SamplingStrategy(AffineRegistrationType::NONE),\n m_NormalizeInputImages(false),\n m_InitializeRegistrationByCurrentGenericTransform(true),\n m_MaximumNumberOfEvaluations(900),\n m_MaximumNumberOfCorrections(12),\n m_ForceMINumberOfThreads(-1)\n{\n m_SplineGridSize[0] = 14;\n m_SplineGridSize[1] = 10;\n m_SplineGridSize[2] = 12;\n}\n\n/*\nThis function returns a normalized image with values between 0 and 1.\nHACK: parameters are hard coded but some of them should be passed by flags.\n*/\ntemplate <typename ImageType>\ntypename ImageType::Pointer\nNormalizeImage(typename ImageType::Pointer inputImage)\n{\n typedef itk::Statistics::ImageToHistogramFilter<ImageType> HistogramFilterType;\n typedef typename HistogramFilterType::InputBooleanObjectType InputBooleanObjectType;\n typedef typename HistogramFilterType::HistogramSizeType HistogramSizeType;\n\n HistogramSizeType histogramSize( 1 );\n histogramSize[0] = 256;\n\n typename InputBooleanObjectType::Pointer autoMinMaxInputObject = InputBooleanObjectType::New();\n autoMinMaxInputObject->Set( true );\n\n typename HistogramFilterType::Pointer histogramFilter = HistogramFilterType::New();\n histogramFilter->SetInput( inputImage );\n histogramFilter->SetAutoMinimumMaximumInput( autoMinMaxInputObject );\n histogramFilter->SetHistogramSize( histogramSize );\n histogramFilter->SetMarginalScale( 10.0 );\n histogramFilter->Update();\n\n float lowerValue = histogramFilter->GetOutput()->Quantile( 0, 0 );\n float upperValue = histogramFilter->GetOutput()->Quantile( 0, 1 );\n\n typedef itk::IntensityWindowingImageFilter<ImageType, ImageType> IntensityWindowingImageFilterType;\n typename IntensityWindowingImageFilterType::Pointer windowingFilter = IntensityWindowingImageFilterType::New();\n windowingFilter->SetInput( inputImage );\n windowingFilter->SetWindowMinimum( lowerValue );\n windowingFilter->SetWindowMaximum( upperValue );\n windowingFilter->SetOutputMinimum( 0 );\n windowingFilter->SetOutputMaximum( 1 );\n windowingFilter->Update();\n\n typename ImageType::Pointer outputImage = NULL;\n outputImage = windowingFilter->GetOutput();\n outputImage->Update();\n outputImage->DisconnectPipeline();\n\n return outputImage;\n}\n\nvoid\nBRAINSFitHelper::Update(void)\n{\n // Do remove intensity outliers if requested\n if( m_RemoveIntensityOutliers > vcl_numeric_limits<float>::epsilon() )\n {\n this->m_FixedVolume = ClampNoisyTailsOfImage<FixedImageType, FixedBinaryVolumeType>(\n m_RemoveIntensityOutliers, this->m_FixedVolume.GetPointer(), this->m_FixedBinaryVolume.GetPointer() );\n this->m_PreprocessedMovingVolume = ClampNoisyTailsOfImage<MovingImageType, MovingBinaryVolumeType>(\n m_RemoveIntensityOutliers, this->m_MovingVolume.GetPointer(), this->m_MovingBinaryVolume.GetPointer() );\n {\n if( this->m_DebugLevel > 9 )\n {\n {\n typedef itk::ImageFileWriter<FixedImageType> WriterType;\n WriterType::Pointer writer = WriterType::New();\n writer->UseCompressionOn();\n writer->SetFileName(\"DEBUGNormalizedFixedVolume.nii.gz\");\n writer->SetInput(this->m_FixedVolume);\n try\n {\n writer->Update();\n }\n catch( itk::ExceptionObject & err )\n {\n std::cout << \"Exception Object caught: \" << std::endl;\n std::cout << err << std::endl;\n throw;\n }\n }\n {\n typedef itk::ImageFileWriter<MovingImageType> WriterType;\n WriterType::Pointer writer = WriterType::New();\n writer->UseCompressionOn();\n writer->SetFileName(\"DEBUGNormalizedMovingVolume.nii.gz\");\n writer->SetInput(this->m_PreprocessedMovingVolume);\n try\n {\n writer->Update();\n }\n catch( itk::ExceptionObject & err )\n {\n std::cout << \"Exception Object caught: \" << std::endl;\n std::cout << err << std::endl;\n throw;\n }\n }\n }\n }\n }\n else\n {\n this->m_PreprocessedMovingVolume = this->m_MovingVolume;\n }\n\n // Do Histogram equalization on moving image if requested.\n if( m_HistogramMatch )\n {\n typedef itk::OtsuHistogramMatchingImageFilter<FixedImageType, MovingImageType> HistogramMatchingFilterType;\n HistogramMatchingFilterType::Pointer histogramfilter = HistogramMatchingFilterType::New();\n\n // TODO: Regina: Write various histogram matching specializations and\n // compare them.\n // histogramfilter->SetForegroundMode(\"Otsu\"); // A simple Otsu threshold\n // for each image .... BUT BE CAREFUL, need to do some quantile checking for\n // degenerate images\n // histogramfilter->SetForegroundMode(\"Simple\"); // A simple average value\n // of the image should be used\n // histogramfilter->SetForegroundMode(\"Quantile\"); // Only values between\n // the 25th and 66th quantile should be used.\n // histogramfilter->SetForegroundMode(\"Masks\"); // Foreground is\n // specifically defined by masks.\n\n histogramfilter->SetReferenceImage(this->m_FixedVolume);\n if( this->m_FixedBinaryVolume.IsNull() )\n {\n itkGenericExceptionMacro(<< \"ERROR: Histogram matching requires a fixed mask.\");\n }\n histogramfilter->SetReferenceMask( m_FixedBinaryVolume.GetPointer() );\n histogramfilter->SetInput(this->m_PreprocessedMovingVolume);\n if( this->m_MovingBinaryVolume.IsNull() )\n {\n itkGenericExceptionMacro(<< \"ERROR: Histogram matching requires a moving mask.\");\n }\n histogramfilter->SetSourceMask( m_MovingBinaryVolume.GetPointer() );\n histogramfilter->SetNumberOfHistogramLevels(this->m_NumberOfHistogramBins);\n histogramfilter->SetNumberOfMatchPoints(this->m_NumberOfMatchPoints);\n histogramfilter->Update();\n this->m_PreprocessedMovingVolume = histogramfilter->GetOutput();\n if( this->m_DebugLevel > 5 )\n {\n typedef itk::ImageFileWriter<MovingImageType> WriterType;\n WriterType::Pointer writer = WriterType::New();\n writer->UseCompressionOn();\n writer->SetFileName(\"DEBUGHISTOGRAMMATCHEDMOVING.nii.gz\");\n writer->SetInput(this->m_PreprocessedMovingVolume);\n try\n {\n writer->Update();\n }\n catch( itk::ExceptionObject & err )\n {\n std::cout << \"Exception Object caught: \" << std::endl;\n std::cout << err << std::endl;\n throw;\n }\n }\n }\n else\n {\n this->m_PreprocessedMovingVolume = this->m_MovingVolume;\n }\n\n if( m_NormalizeInputImages )\n {\n this->m_FixedVolume = NormalizeImage< FixedImageType >( this->m_FixedVolume );\n this->m_PreprocessedMovingVolume = NormalizeImage< MovingImageType >( this->m_PreprocessedMovingVolume );\n }\n\n const bool gradientfilter = false;\n\n GenericMetricType::Pointer metric;\n if( this->m_CostMetric == \"MMI\" )\n {\n typedef itk::MattesMutualInformationImageToImageMetricv4<FixedImageType, MovingImageType, FixedImageType, double> MIMetricType;\n MIMetricType::Pointer mutualInformationMetric = MIMetricType::New();\n //The next line was a hack for early ITKv4 mattes mutual informaiton\n //that was using a lot of memory\n //mutualInformationMetric->SetMaximumNumberOfThreads(std::min( 3U,itk::MultiThreader::GetGlobalDefaultNumberOfThreads() ) );\n mutualInformationMetric = mutualInformationMetric;\n mutualInformationMetric->SetNumberOfHistogramBins( this->m_NumberOfHistogramBins );\n mutualInformationMetric->SetUseMovingImageGradientFilter( gradientfilter );\n mutualInformationMetric->SetUseFixedImageGradientFilter( gradientfilter );\n mutualInformationMetric->SetUseFixedSampledPointSet( false );\n metric = mutualInformationMetric;\n\n this->SetupRegistration< MIMetricType >(metric);\n this->RunRegistration< MIMetricType >();\n }\n else if( this->m_CostMetric == \"MSE\" )\n {\n typedef itk::MeanSquaresImageToImageMetricv4<FixedImageType, MovingImageType, FixedImageType, double> MSEMetricType;\n MSEMetricType::Pointer meanSquareMetric = MSEMetricType::New();\n meanSquareMetric = meanSquareMetric;\n metric = meanSquareMetric;\n\n this->SetupRegistration< MSEMetricType >(metric);\n this->RunRegistration< MSEMetricType >();\n }\n else if( this->m_CostMetric == \"NC\" )\n {\n typedef itk::CorrelationImageToImageMetricv4<FixedImageType, MovingImageType, FixedImageType, double> corrMetricType;\n corrMetricType::Pointer corrMetric = corrMetricType::New();\n metric = corrMetric;\n\n this->SetupRegistration< corrMetricType >(metric);\n this->RunRegistration< corrMetricType >();\n }\n else if( this->m_CostMetric == \"MIH\" )\n {\n typedef itk::JointHistogramMutualInformationImageToImageMetricv4<FixedImageType, MovingImageType, FixedImageType, double> MutualInformationMetricType;\n MutualInformationMetricType::Pointer mutualInformationMetric = MutualInformationMetricType::New();\n mutualInformationMetric = mutualInformationMetric;\n mutualInformationMetric->SetNumberOfHistogramBins( this->m_NumberOfHistogramBins );\n mutualInformationMetric->SetUseMovingImageGradientFilter( gradientfilter );\n mutualInformationMetric->SetUseFixedImageGradientFilter( gradientfilter );\n mutualInformationMetric->SetUseFixedSampledPointSet( false );\n mutualInformationMetric->SetVarianceForJointPDFSmoothing( 1.0 );\n metric = mutualInformationMetric;\n\n this->SetupRegistration< MutualInformationMetricType >(metric);\n this->RunRegistration< MutualInformationMetricType >();\n }\n else\n {\n std::cout << \"Metric \\\"\" << this->m_CostMetric << \"\\\" not valid!\" << std::endl;\n }\n}\n\nvoid\nBRAINSFitHelper::PrintSelf(std::ostream & os, Indent indent) const\n{\n // Superclass::PrintSelf(os,indent);\n os << indent << \"FixedVolume:\\n\" << this->m_FixedVolume << std::endl;\n os << indent << \"MovingVolume:\\n\" << this->m_MovingVolume << std::endl;\n os << indent << \"PreprocessedMovingVolume:\\n\" << this->m_PreprocessedMovingVolume << std::endl;\n if( this->m_FixedBinaryVolume.IsNotNull() )\n {\n os << indent << \"FixedBinaryVolume:\\n\" << this->m_FixedBinaryVolume << std::endl;\n }\n else\n {\n os << indent << \"FixedBinaryVolume: IS NULL\" << std::endl;\n }\n if( this->m_MovingBinaryVolume.IsNotNull() )\n {\n os << indent << \"MovingBinaryVolume:\\n\" << this->m_MovingBinaryVolume << std::endl;\n }\n else\n {\n os << indent << \"MovingBinaryVolume: IS NULL\" << std::endl;\n }\n os << indent << \"SamplingPercentage: \" << this->m_SamplingPercentage << std::endl;\n\n os << indent << \"NumberOfIterations: [\";\n for( unsigned int q = 0; q < this->m_NumberOfIterations.size(); ++q )\n {\n os << this->m_NumberOfIterations[q] << \" \";\n }\n os << \"]\" << std::endl;\n os << indent << \"NumberOfHistogramBins:\" << this->m_NumberOfHistogramBins << std::endl;\n os << indent << \"MaximumStepLength: \" << this->m_MaximumStepLength << std::endl;\n os << indent << \"MinimumStepLength: [\";\n for( unsigned int q = 0; q < this->m_MinimumStepLength.size(); ++q )\n {\n os << this->m_MinimumStepLength[q] << \" \";\n }\n os << \"]\" << std::endl;\n os << indent << \"TransformType: [\";\n for( unsigned int q = 0; q < this->m_TransformType.size(); ++q )\n {\n os << this->m_TransformType[q] << \" \";\n }\n os << \"]\" << std::endl;\n\n os << indent << \"RelaxationFactor: \" << this->m_RelaxationFactor << std::endl;\n os << indent << \"TranslationScale: \" << this->m_TranslationScale << std::endl;\n os << indent << \"ReproportionScale: \" << this->m_ReproportionScale << std::endl;\n os << indent << \"SkewScale: \" << this->m_SkewScale << std::endl;\n os << indent << \"UseCachingOfBSplineWeightsMode: \" << this->m_UseCachingOfBSplineWeightsMode << std::endl;\n os << indent << \"BackgroundFillValue: \" << this->m_BackgroundFillValue << std::endl;\n os << indent << \"InitializeTransformMode: \" << this->m_InitializeTransformMode << std::endl;\n os << indent << \"MaskInferiorCutOffFromCenter: \" << this->m_MaskInferiorCutOffFromCenter << std::endl;\n os << indent << \"ActualNumberOfIterations: \" << this->m_ActualNumberOfIterations << std::endl;\n os << indent << \"PermittedNumberOfIterations: \" << this->m_PermittedNumberOfIterations << std::endl;\n\n os << indent << \"SplineGridSize: [\";\n for( unsigned int q = 0; q < this->m_SplineGridSize.size(); ++q )\n {\n os << this->m_SplineGridSize[q] << \" \";\n }\n os << \"]\" << std::endl;\n\n os << indent << \"PermitParameterVariation: [\";\n for( unsigned int q = 0; q < this->m_PermitParameterVariation.size(); ++q )\n {\n os << this->m_PermitParameterVariation[q] << \" \";\n }\n os << \"]\" << std::endl;\n\n if( m_CurrentGenericTransform.IsNotNull() )\n {\n os << indent << \"CurrentGenericTransform:\\n\" << this->m_CurrentGenericTransform << std::endl;\n }\n else\n {\n os << indent << \"CurrentGenericTransform: IS NULL\" << std::endl;\n }\n os << indent << \"CostMetric: \" << this->m_CostMetric << std::endl;\n}\n\nvoid\nBRAINSFitHelper::PrintCommandLine(const bool dumpTempVolumes, const std::string & suffix) const\n{\n std::cout << \"The equivalent command line to the current run would be:\" << std::endl;\n\n const std::string fixedVolumeString(\"DEBUGFixedVolume_\" + suffix + \".nii.gz\");\n const std::string movingVolumeString(\"DEBUGMovingVolume_\" + suffix + \".nii.gz\");\n const std::string fixedBinaryVolumeString(\"DEBUGFixedBinaryVolume_\" + suffix + \".nii.gz\");\n const std::string movingBinaryVolumeString(\"DEBUGMovingBinaryVolume_\" + suffix + \".nii.gz\");\n\n std::ostringstream oss;\n\n oss << \"BRAINSFit \\\\\" << std::endl;\n if( dumpTempVolumes == true )\n {\n {\n typedef itk::ImageFileWriter<FixedImageType> WriterType;\n WriterType::Pointer writer = WriterType::New();\n writer->UseCompressionOn();\n writer->SetFileName(fixedVolumeString);\n writer->SetInput(this->m_FixedVolume);\n try\n {\n writer->Update();\n }\n catch( itk::ExceptionObject & err )\n {\n oss << \"Exception Object caught: \" << std::endl;\n oss << err << std::endl;\n throw;\n }\n }\n {\n typedef itk::ImageFileWriter<MovingImageType> WriterType;\n WriterType::Pointer writer = WriterType::New();\n writer->UseCompressionOn();\n writer->SetFileName(movingVolumeString);\n writer->SetInput(this->m_MovingVolume);\n try\n {\n writer->Update();\n }\n catch( itk::ExceptionObject & err )\n {\n oss << \"Exception Object caught: \" << std::endl;\n oss << err << std::endl;\n throw;\n }\n }\n }\n oss << \"--costMetric \" << this->m_CostMetric << \" \\\\\" << std::endl;\n oss << \"--fixedVolume \" << fixedVolumeString << \" \\\\\" << std::endl;\n oss << \"--movingVolume \" << movingVolumeString << \" \\\\\" << std::endl;\n if( this->m_HistogramMatch )\n {\n oss << \"--histogramMatch \" << \" \\\\\" << std::endl;\n }\n\n {\n if( this->m_FixedBinaryVolume.IsNotNull() )\n {\n oss << \"--fixedBinaryVolume \" << fixedBinaryVolumeString << \" \\\\\" << std::endl;\n {\n {\n const MaskImageType::ConstPointer tempOutputFixedVolumeROI =\n ExtractConstPointerToImageMaskFromImageSpatialObject(m_FixedBinaryVolume.GetPointer() );\n itkUtil::WriteConstImage<MaskImageType>(tempOutputFixedVolumeROI.GetPointer(), fixedBinaryVolumeString);\n }\n }\n }\n if( this->m_MovingBinaryVolume.IsNotNull() )\n {\n oss << \"--movingBinaryVolume \" << movingBinaryVolumeString << \" \\\\\" << std::endl;\n {\n {\n const MaskImageType::ConstPointer tempOutputMovingVolumeROI =\n ExtractConstPointerToImageMaskFromImageSpatialObject(m_MovingBinaryVolume.GetPointer() );\n itkUtil::WriteConstImage<MaskImageType>(tempOutputMovingVolumeROI.GetPointer(), movingBinaryVolumeString);\n }\n }\n }\n if( this->m_FixedBinaryVolume.IsNotNull() || this->m_MovingBinaryVolume.IsNotNull() )\n {\n oss << \"--maskProcessingMode ROI \" << \" \\\\\" << std::endl;\n }\n }\n oss << \"--samplingPercentage \" << this->m_SamplingPercentage << \" \\\\\" << std::endl;\n\n oss << \"--numberOfIterations \";\n for( unsigned int q = 0; q < this->m_NumberOfIterations.size(); ++q )\n {\n oss << this->m_NumberOfIterations[q];\n if( q < this->m_NumberOfIterations.size() - 1 )\n {\n oss << \",\";\n }\n }\n oss << \" \\\\\" << std::endl;\n oss << \"--numberOfHistogramBins \" << this->m_NumberOfHistogramBins << \" \\\\\" << std::endl;\n oss << \"--maximumStepLength \" << this->m_MaximumStepLength << \" \\\\\" << std::endl;\n oss << \"--minimumStepLength \";\n for( unsigned int q = 0; q < this->m_MinimumStepLength.size(); ++q )\n {\n oss << this->m_MinimumStepLength[q];\n if( q < this->m_MinimumStepLength.size() - 1 )\n {\n oss << \",\";\n }\n }\n oss << \" \\\\\" << std::endl;\n oss << \"--transformType \";\n for( unsigned int q = 0; q < this->m_TransformType.size(); ++q )\n {\n oss << this->m_TransformType[q];\n if( q < this->m_TransformType.size() - 1 )\n {\n oss << \",\";\n }\n }\n oss << \" \\\\\" << std::endl;\n\n oss << \"--relaxationFactor \" << this->m_RelaxationFactor << \" \\\\\" << std::endl;\n oss << \"--translationScale \" << this->m_TranslationScale << \" \\\\\" << std::endl;\n oss << \"--reproportionScale \" << this->m_ReproportionScale << \" \\\\\" << std::endl;\n oss << \"--skewScale \" << this->m_SkewScale << \" \\\\\" << std::endl;\n oss << \"--useCachingOfBSplineWeightsMode \" << this->m_UseCachingOfBSplineWeightsMode << \" \\\\\" << std::endl;\n oss << \"--maxBSplineDisplacement \" << this->m_MaxBSplineDisplacement << \" \\\\\" << std::endl;\n oss << \"--projectedGradientTolerance \" << this->m_ProjectedGradientTolerance << \" \\\\\" << std::endl;\n oss << \"--MaximumNumberOfEvaluations \" << this->m_MaximumNumberOfEvaluations << \" \\\\\" << std::endl;\n oss << \"--MaximumNumberOfCorrections \" << this->m_MaximumNumberOfCorrections << \" \\\\\" << std::endl;\n oss << \"--costFunctionConvergenceFactor \" << this->m_CostFunctionConvergenceFactor << \" \\\\\" << std::endl;\n oss << \"--backgroundFillValue \" << this->m_BackgroundFillValue << \" \\\\\" << std::endl;\n oss << \"--initializeTransformMode \" << this->m_InitializeTransformMode << \" \\\\\" << std::endl;\n oss << \"--maskInferiorCutOffFromCenter \" << this->m_MaskInferiorCutOffFromCenter << \" \\\\\" << std::endl;\n oss << \"--splineGridSize \";\n for( unsigned int q = 0; q < this->m_SplineGridSize.size(); ++q )\n {\n oss << this->m_SplineGridSize[q];\n if( q < this->m_SplineGridSize.size() - 1 )\n {\n oss << \",\";\n }\n }\n oss << \" \\\\\" << std::endl;\n\n if( !this->m_PermitParameterVariation.empty() )\n {\n oss << \"--permitParameterVariation \";\n for( unsigned int q = 0; q < this->m_PermitParameterVariation.size(); ++q )\n {\n oss << this->m_PermitParameterVariation[q];\n if( q < this->m_PermitParameterVariation.size() - 1 )\n {\n oss << \",\";\n }\n }\n oss << \" \\\\\" << std::endl;\n }\n if( m_CurrentGenericTransform.IsNotNull() )\n {\n const std::string initialTransformString(\"DEBUGInitialTransform_\" + suffix + \".h5\");\n WriteBothTransformsToDisk(this->m_CurrentGenericTransform.GetPointer(), initialTransformString, \"\");\n oss << \"--initialTransform \" << initialTransformString << \" \\\\\" << std::endl;\n }\n {\n const std::string outputVolume(\"DEBUGOutputVolume_\" + suffix + \".nii.gz\");\n oss << \"--outputVolume \" << outputVolume << \" \\\\\" << std::endl;\n std::cout << oss.str() << std::endl;\n }\n {\n const std::string outputTransform(\"DEBUGOutputTransform\" + suffix + \".h5\");\n oss << \"--outputTransform \" << outputTransform << \" \\\\\" << std::endl;\n std::cout << oss.str() << std::endl;\n }\n oss << \"--useROIBSpline \" << this->m_UseROIBSpline << \" \\\\\" << std::endl;\n const std::string TesterScript(\"DEBUGScript\" + suffix + \".sh\");\n std::ofstream myScript;\n myScript.open( TesterScript.c_str() );\n myScript << oss.str() << std::endl;\n myScript.close();\n}\n\nvoid\nBRAINSFitHelper::GenerateData()\n{\n this->Update();\n}\n} // end namespace itk\n"
},
{
"alpha_fraction": 0.6809468865394592,
"alphanum_fraction": 0.6855717301368713,
"avg_line_length": 36.951499938964844,
"blob_id": "2a924d13c59c60ae0778a08dbfbbbd2e4943583f",
"content_id": "80b53fce75de8e4931961031506425807449c8ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 16433,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 433,
"path": "/BRAINSTransformConvert/BRAINSTransformConvert.cxx",
"repo_name": "felixnavarro/BRAINSTools",
"src_encoding": "UTF-8",
"text": "/*=========================================================================\n *\n * Copyright SINAPSE: Scalable Informatics for Neuroscience, Processing and Software Engineering\n * The University of Iowa\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0.txt\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n *=========================================================================*/\n#include \"BRAINSTransformConvertCLP.h\"\n#include \"itkTransformFileReader.h\"\n#include \"itkTransformFileWriter.h\"\n#include \"itkImageFileReader.h\"\n#include \"itkBSplineDeformableTransform.h\"\n#include \"itkIO.h\"\n#include \"itkImageRegionIterator.h\"\n#include \"GenericTransformImage.h\"\n#include \"itkTranslationTransform.h\"\n\n//\n// transform ranking,\n// meaning a lower ranked transform can be\n// converted to a higher ranked transform\n// VersorRigid3D = 1\n// ScaleVersor3D = 2\n// ScaleSkewVersor3D 3\n// Affine 4\n// BSpline 5\n// BSplineROI 5\n\ntemplate<class TScalarType>\ninline\nbool\nIsSameClass(const itk::Transform< TScalarType, 3, 3 > *result,\n const itk::Transform< TScalarType, 3, 3 > *source)\n{\n return strcmp(result->GetNameOfClass(), source->GetNameOfClass() ) == 0;\n}\n\ntemplate<class TScalarType>\ninline\nbool\nIsClass(const itk::Transform< TScalarType, 3, 3 > *xfrm, const char *className)\n{\n return strcmp(xfrm->GetNameOfClass(), className) == 0;\n}\n\ntemplate<class TScalarType>\nvoid\nTransformConvertError(const itk::Transform< TScalarType, 3, 3 > *inputXfrm,\n const std::string & targetClassName)\n{\n std::cerr << \"Can't convert transform of type \"\n << inputXfrm->GetTransformTypeAsString()\n << \" to \"\n << targetClassName\n << std::endl;\n}\n\n//\n// Convert from any type derived from MatrixOffsetTransformType to\n// AffineTransform.\ntemplate<class TScalarType>\nbool\nExtractTransform(typename itk::AffineTransform< TScalarType, 3 >::Pointer &result,\n const itk::Transform< TScalarType, 3, 3 > *source)\n{\n result->SetIdentity();\n // always able to convert to same type\n if( IsSameClass(result.GetPointer(), source) )\n {\n result->SetParameters( source->GetParameters() );\n result->SetFixedParameters( source->GetFixedParameters() );\n return true;\n }\n\n typedef itk::AffineTransform< TScalarType, 3 > LocalAffineTransformType;\n typedef typename LocalAffineTransformType::Superclass MatrixOffsetTransformType;\n const MatrixOffsetTransformType *matBasePtr = dynamic_cast<const MatrixOffsetTransformType *>(source);\n if( matBasePtr == 0 )\n {\n return false;\n }\n\n result->SetCenter( matBasePtr->GetCenter() );\n result->SetMatrix( matBasePtr->GetMatrix() );\n result->SetTranslation( matBasePtr->GetTranslation() );\n return true;\n}\n\n//\n// versor rigid 3d case.\ntemplate<class TScalarType>\nbool\nExtractTransform(typename itk::VersorRigid3DTransform<TScalarType>::Pointer & result,\n const itk::Transform< TScalarType, 3, 3 > *source)\n{\n result->SetIdentity();\n // always able to convert to same type\n if( IsSameClass(result.GetPointer(), source) )\n {\n result->SetParameters( source->GetParameters() );\n result->SetFixedParameters( source->GetFixedParameters() );\n return true;\n }\n\n // this looks like it should be a convertible transform but\n // I'm not sure.\n typedef itk::TranslationTransform<TScalarType, 3> TransTransformType;\n if( IsClass(source, \"TranslationTransform\") )\n {\n const TransTransformType *translationXfrm = dynamic_cast<const TransTransformType *>(source);\n typename TransTransformType::OutputVectorType offset = translationXfrm->GetOffset();\n result->SetOffset( offset );\n return true;\n }\n // versor == rotation only\n if( IsClass(source, \"VersorTransform\") )\n {\n typedef itk::VersorTransform<TScalarType> VersorTransformType;\n const VersorTransformType *versorXfrm = dynamic_cast<const VersorTransformType *>(source);\n\n result->SetRotation( versorXfrm->GetVersor() );\n result->SetCenter( versorXfrm->GetCenter() );\n return true;\n }\n return false;\n}\n\n//\n// scale versor case\ntemplate<class TScalarType>\nbool\nExtractTransform(typename itk::ScaleVersor3DTransform<TScalarType>::Pointer & result,\n const itk::Transform< TScalarType, 3, 3 > *source)\n{\n result->SetIdentity();\n // always able to convert to same type\n if( IsSameClass(result.GetPointer(), source) )\n {\n result->SetParameters( source->GetParameters() );\n result->SetFixedParameters( source->GetFixedParameters() );\n return true;\n }\n\n typedef itk::VersorRigid3DTransform<TScalarType> LocalVersorRigid3DTransformType;\n if( IsClass(source, \"VersorRigid3DTransform\") )\n {\n const LocalVersorRigid3DTransformType *versorRigidXfrm =\n dynamic_cast<const LocalVersorRigid3DTransformType *>(source);\n result->SetRotation(versorRigidXfrm->GetVersor() );\n result->SetTranslation(versorRigidXfrm->GetTranslation() );\n result->SetCenter(versorRigidXfrm->GetCenter() );\n return true;\n }\n // otherwise try VersorRigidTransform\n typename LocalVersorRigid3DTransformType::Pointer vrx = LocalVersorRigid3DTransformType::New();\n if( ExtractTransform<TScalarType>(vrx, source) ) // of VersorRigid3D conversion\n // works\n {\n // recurse to do this conversion\n return ExtractTransform<TScalarType>(result, vrx.GetPointer() );\n }\n return false;\n}\n\n//\n// scale skew versor case\ntemplate<class TScalarType>\nbool\nExtractTransform(typename itk::ScaleSkewVersor3DTransform< TScalarType >::Pointer & result,\n const itk::Transform< TScalarType, 3, 3 > *source)\n{\n // always able to convert to same type\n if( IsSameClass(result.GetPointer(), source) )\n {\n result->SetParameters( source->GetParameters() );\n result->SetFixedParameters( source->GetFixedParameters() );\n return true;\n }\n\n // is it the parent?\n typedef itk::ScaleVersor3DTransform<TScalarType> LocalScaleVersor3DTransformType;\n if( IsClass(source, \"ScaleVersor3DTransform\") )\n {\n const LocalScaleVersor3DTransformType *scaleVersorXfrm =\n dynamic_cast<const LocalScaleVersor3DTransformType *>(source);\n result->SetRotation(scaleVersorXfrm->GetVersor() );\n result->SetTranslation(scaleVersorXfrm->GetTranslation() );\n result->SetCenter(scaleVersorXfrm->GetCenter() );\n result->SetScale(scaleVersorXfrm->GetScale() );\n return true;\n }\n // otherwise try ScaleVersor conversion\n typename LocalScaleVersor3DTransformType::Pointer svx = LocalScaleVersor3DTransformType::New();\n if( ExtractTransform<TScalarType>(svx, source) ) // of VersorRigid3D conversion\n // works\n {\n // recurse to do this conversion\n return ExtractTransform<TScalarType>(result, svx.GetPointer() );\n }\n return false;\n}\n\n#define CHECK_PARAMETER_IS_SET(parameter, message) \\\n if( parameter == \"\" ) \\\n { \\\n std::cerr << message << std::endl; \\\n return EXIT_FAILURE; \\\n }\n\ntemplate<class TScalarType>\nint\nDoConversion( int argc, char *argv[] )\n{\n PARSE_ARGS;\n BRAINSRegisterAlternateIO();\n\n typedef itk::Transform< TScalarType, 3, 3 > LocalGenericTransformType;\n typedef itk::BSplineDeformableTransform\n < TScalarType, GenericTransformImageNS::SpaceDimension,\n GenericTransformImageNS::SplineOrder> LocalBSplineTransformType;\n\n typedef itk::AffineTransform< TScalarType, 3 > LocalAffineTransformTYpe;\n typedef itk::VersorRigid3DTransform< TScalarType > LocalVersorRigid3DTransformType;\n typedef itk::ScaleVersor3DTransform< TScalarType > LocalScaleVersor3DTransformType;\n typedef itk::ScaleSkewVersor3DTransform< TScalarType > LocalScaleSkewVersor3DTransformType;\n\n // read the input transform\n typedef itk::TransformFileReaderTemplate<TScalarType> TransformFileReaderType;\n typename TransformFileReaderType::Pointer reader = TransformFileReaderType::New();\n reader->SetFileName(inputTransform.c_str() );\n reader->Update();\n typename TransformFileReaderType::TransformListType *transformList = reader->GetTransformList();\n typename LocalGenericTransformType::Pointer inputXfrm = dynamic_cast<LocalGenericTransformType *>( transformList->front().GetPointer() );\n\n std::cout << \"------------------------ \" << std::endl;\n std::cout << \"Input Transform Type Saved on Memory ==> \" << inputXfrm->GetTransformTypeAsString() << std::endl;\n std::cout << \"* Input transform parameters: \" << inputXfrm->GetParameters() << std::endl;\n std::cout << \"* Input transform fixed parameters: \" << inputXfrm->GetFixedParameters() << std::endl;\n std::cout << \"------------------------ \" << std::endl;\n\n // Handle BSpline type\n typename LocalBSplineTransformType::Pointer bsplineInputXfrm = dynamic_cast<LocalBSplineTransformType *>( inputXfrm.GetPointer() );\n if( bsplineInputXfrm.IsNotNull() )\n {\n transformList->pop_front();\n if( transformList->size() == 0 )\n {\n std::cerr << \"Error, the second transform needed for BSplineDeformableTransform is missing.\" << std::endl;\n return EXIT_FAILURE;\n }\n typename LocalBSplineTransformType::BulkTransformType::Pointer bulkXfrm =\n dynamic_cast<typename LocalBSplineTransformType::BulkTransformType *>(transformList->front().GetPointer() );\n if( bulkXfrm.IsNull() )\n {\n std::cerr << \"Error, the second transform is not a bulk transform\" << std::endl;\n }\n bsplineInputXfrm->SetBulkTransform(bulkXfrm);\n inputXfrm = bsplineInputXfrm.GetPointer();\n }\n\n if( outputTransformType == \"DisplacementField\" )\n {\n CHECK_PARAMETER_IS_SET(referenceVolume,\n \"Missing referenceVolume needed for Displacement Field output\");\n CHECK_PARAMETER_IS_SET(displacementVolume,\n \"Missing displacementVolume needed for Displacement Field output\");\n\n typedef itk::Image<short, 3> ReferenceImageType;\n ReferenceImageType::Pointer referenceImage = itkUtil::ReadImage<ReferenceImageType>(referenceVolume);\n if( referenceImage.IsNull() )\n {\n std::cerr << \"Can't read Reference Volume \" << referenceVolume << std::endl;\n return EXIT_FAILURE;\n }\n // Allocate Displacement Field\n typedef itk::Vector<float, 3> VectorType;\n typedef itk::Image<VectorType, 3> DisplacementFieldType;\n DisplacementFieldType::Pointer displacementField =\n itkUtil::AllocateImageFromExample<ReferenceImageType, DisplacementFieldType>(referenceImage);\n\n typedef itk::ImageRegionIterator<DisplacementFieldType> DisplacementIteratorType;\n for( DisplacementIteratorType it(displacementField, displacementField->GetLargestPossibleRegion() );\n !it.IsAtEnd(); ++it )\n {\n DisplacementFieldType::IndexType dispIndex = it.GetIndex();\n DisplacementFieldType::PointType fixedPoint, movingPoint;\n displacementField->TransformIndexToPhysicalPoint(dispIndex, fixedPoint);\n movingPoint = inputXfrm->TransformPoint(fixedPoint);\n VectorType displacement = movingPoint - fixedPoint;\n it.Set(displacement);\n }\n\n try\n {\n itkUtil::WriteImage<DisplacementFieldType>(displacementField, displacementVolume);\n }\n catch( ... )\n {\n std::cerr << \"Error writing displacement field \" << displacementVolume << std::endl;\n return EXIT_FAILURE;\n }\n return EXIT_SUCCESS;\n }\n\n //\n // if no transform name given, don't write transform\n if(outputTransform.size() == 0)\n {\n return EXIT_SUCCESS;\n }\n\n //output transform processing\n typename LocalGenericTransformType::Pointer outputXfrm;\n\n if( outputTransformType == \"Affine\" )\n {\n typename LocalAffineTransformTYpe::Pointer affineXfrm = LocalAffineTransformTYpe::New();\n if( ExtractTransform<TScalarType>(affineXfrm, inputXfrm.GetPointer() ) == false )\n {\n TransformConvertError<TScalarType>(inputXfrm, \"Affine Transform\");\n return EXIT_FAILURE;\n }\n outputXfrm = affineXfrm.GetPointer();\n }\n else if( outputTransformType == \"VersorRigid\" )\n {\n typename LocalVersorRigid3DTransformType::Pointer versorRigidXfrm = LocalVersorRigid3DTransformType::New();\n if( ExtractTransform<TScalarType>(versorRigidXfrm, inputXfrm.GetPointer() ) == false )\n {\n TransformConvertError<TScalarType>(inputXfrm, \"VersorRigid3D Transform\");\n return EXIT_FAILURE;\n }\n outputXfrm = versorRigidXfrm.GetPointer();\n }\n else if( outputTransformType == \"ScaleVersor\" )\n {\n typename LocalScaleVersor3DTransformType::Pointer scaleVersorXfrm = LocalScaleVersor3DTransformType::New();\n if( ExtractTransform<TScalarType>( scaleVersorXfrm, inputXfrm.GetPointer() ) == false )\n {\n TransformConvertError<TScalarType>(inputXfrm, \"ScaleVersor Transform\");\n return EXIT_FAILURE;\n }\n outputXfrm = scaleVersorXfrm.GetPointer();\n }\n else if( outputTransformType == \"ScaleSkewVersor\" )\n {\n typename LocalScaleSkewVersor3DTransformType::Pointer scaleSkewVersorXfrm = LocalScaleSkewVersor3DTransformType::New();\n if( ExtractTransform<TScalarType>( scaleSkewVersorXfrm, inputXfrm.GetPointer() ) == false )\n {\n TransformConvertError<TScalarType>(inputXfrm, \"ScaleSkewVersor Transform\");\n return EXIT_FAILURE;\n }\n outputXfrm = scaleSkewVersorXfrm.GetPointer();\n }\n\n if( outputTransformType == \"Same\" )\n {\n typedef typename itk::TransformFileWriterTemplate<TScalarType> TransformWriterType;\n typename TransformWriterType::Pointer transformWriter = TransformWriterType::New();\n transformWriter->SetFileName(outputTransform);\n for( typename itk::TransformFileReaderTemplate<TScalarType>::TransformListType::iterator it = transformList->begin();\n it != transformList->end(); ++it )\n {\n typename LocalGenericTransformType::Pointer outXfrm = dynamic_cast<LocalGenericTransformType *>( (*it).GetPointer() );\n transformWriter->AddTransform( outXfrm );\n //\n std::cout << \"Output Transform Type Written to the Disk ==> \" << outXfrm->GetTransformTypeAsString() << std::endl;\n std::cout << \"* Output transform parameters: \" << outXfrm->GetParameters() << std::endl;\n std::cout << \"* Output transform fixed parameters: \" << outXfrm->GetFixedParameters() << std::endl;\n std::cout << \"------------------------ \" << std::endl;\n }\n try\n {\n transformWriter->Update();\n }\n catch( itk::ExceptionObject & excp )\n {\n std::cerr << \"Can't write \" << outputTransform << excp.GetDescription() << std::endl;\n return EXIT_FAILURE;\n }\n }\n else\n {\n // write the resulting transform.\n std::cout << \"Output Transform Type Written to the Disk ==> \" << outputXfrm->GetTransformTypeAsString() << std::endl;\n std::cout << \"* Output transform parameters: \" << outputXfrm->GetParameters() << std::endl;\n std::cout << \"* Output transform fixed parameters: \" << outputXfrm->GetFixedParameters() << std::endl;\n std::cout << \"------------------------ \" << std::endl;\n //\n itk::WriteTransformToDisk<TScalarType>(outputXfrm.GetPointer(), outputTransform);\n }\n return EXIT_SUCCESS;\n}\n\n\nint main(int argc, char *argv[])\n{\n PARSE_ARGS;\n BRAINSRegisterAlternateIO();\n\n CHECK_PARAMETER_IS_SET(inputTransform,\n \"Missing inputTransform parameter\");\n CHECK_PARAMETER_IS_SET(outputTransformType,\n \"Missing outpuTransformType\");\n CHECK_PARAMETER_IS_SET(outputPrecisionType,\n \"Missing outputPrecisionType\");\n\n if( outputPrecisionType == \"double\" )\n {\n return DoConversion<double>( argc, argv );\n }\n else if( outputPrecisionType == \"float\" )\n {\n return DoConversion<float>( argc, argv );\n }\n else\n {\n std::cerr << \"Error: Invalid parameter for output precision type.\" << std::endl;\n return EXIT_FAILURE;\n }\n return EXIT_SUCCESS;\n}\n"
}
] | 24 |
sophierubin1224/strategy_draft_NEW.py | https://github.com/sophierubin1224/strategy_draft_NEW.py | 97410ab3f42c6aff165bebcbecdd1f9750c7b15d | 8cc731711850c5c9ca80e7c57f6298e418de968f | 8a3fd06bb74a2d8d0375222dfe97f8ef4dc4dddd | refs/heads/master | 2023-04-17T13:02:47.005615 | 2021-04-19T03:01:48 | 2021-04-19T03:01:48 | 359,257,886 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6497696042060852,
"alphanum_fraction": 0.6728110313415527,
"avg_line_length": 35.25,
"blob_id": "7571a9ebc91105224e808f47fc82758f9bcbd890",
"content_id": "fa47165a09d266c06aea54767f5129752581b7ca",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 434,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 12,
"path": "/Test.py",
"repo_name": "sophierubin1224/strategy_draft_NEW.py",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nstart_date = '3/12/20'\nend_date = '3/16/20'\nfile = pd.read_csv(\"US_data.csv\")\nUS_data = pd.DataFrame(file, columns = ['Date', 'Open Price','High Price', 'Low Price', 'Last Price', 'VWAP', 'Volume'])\nstart = pd.to_datetime(start_date)\nend = pd.to_datetime(end_date)\nUS_data.Date = pd.to_datetime(US_data.Date)\nUS_data = US_data[US_data.Date >= start_date]\nUS_data = US_data[US_data.Date <= end_date]\n\nprint(US_data)"
},
{
"alpha_fraction": 0.5158997774124146,
"alphanum_fraction": 0.52000492811203,
"avg_line_length": 35.009071350097656,
"blob_id": "2d20d91674492f7255533c908ec75265a5f89cb6",
"content_id": "b0e587e79c3679ff120908c9f03eeeea57bf837c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16321,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 441,
"path": "/backtest.py",
"repo_name": "sophierubin1224/strategy_draft_NEW.py",
"src_encoding": "UTF-8",
"text": "from sklearn import linear_model\r\nfrom sklearn.metrics import r2_score\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom math import log, isnan\r\nfrom statistics import stdev\r\nfrom numpy import repeat\r\n\r\ndef trading_decision(\r\n exit_date, response_var, features_and_responses, trading_date, N, n\r\n):\r\n training_indices = features_and_responses[exit_date] < trading_date\r\n training_X = features_and_responses[training_indices].tail(N)[\r\n ['a', 'b', 'R2', 'ivv_vol']\r\n ]\r\n training_Y = features_and_responses[training_indices].tail(N)[response_var]\r\n\r\n # Need at least two 1's to train a model\r\n if sum(training_Y) < 2:\r\n return 0\r\n\r\n if sum(training_Y) < n:\r\n logisticRegr = linear_model.LogisticRegression()\r\n logisticRegr.fit(np.float64(training_X), np.float64(training_Y))\r\n trade_decision = logisticRegr.predict(\r\n np.float64(\r\n features_and_responses[[\"a\", \"b\", \"R2\", \"ivv_vol\"]][\r\n features_and_responses['Date'] == trading_date\r\n ]\r\n )\r\n ).item()\r\n else: # If EVERYTHING is a 1, then just go ahead and implement again.\r\n trade_decision = 1\r\n\r\n return trade_decision\r\n\r\ndef backtest(\r\n US_data, HK_data, n, N, alpha, lot_size, start_date, end_date,\r\n starting_cash\r\n):\r\n\r\n print(US_data)\r\n print(HK_data)\r\n\r\n # Convert JSON data to dataframes\r\n US_data = pd.read_json(US_data)\r\n HK_data = pd.read_json(HK_data)\r\n # Create the features data frame from the bond yields & IVV hist data\r\n\r\n # linear regression on log returns\r\n def HK_linreg(HK_data):\r\n log_returns = []\r\n index = []\r\n for i in HK_data:\r\n log_returns[i] = np.log(HK_data[i+1].VWAP/HK_data[i].VWAP)\r\n index[i] = i\r\n linreg_model = linear_model.LinearRegression()\r\n linreg_model.fit(index, log_returns)\r\n modeled_returns = linreg_model.predict(log_returns)\r\n return [HK_data.Date, linreg_model.coef_[0],\r\n linreg_model.intercept_,\r\n r2_score(log_returns[1:],\r\n modeled_returns)]\r\n\r\n # apply bonds_fun to every row in bonds_hist to make the features dataframe.\r\n HK_features = HK_linreg(HK_data)\r\n HK_features.columns = [\"Date\", \"a\", \"b\", \"R2\"]\r\n HK_features.Date = pd.to_datetime(HK_features.Date)\r\n\r\n # Get available volatility of day-over-day log returns based on closing\r\n # prices for IVV using a window size of N days.\r\n ivv_features = []\r\n\r\n for dt in US_data['Date'][N:]:\r\n eod_close_prices = list(\r\n US_data['Close'][US_data['Date'] <= dt].tail(N))\r\n vol = stdev([\r\n log(i / j) for i, j in zip(\r\n eod_close_prices[:N - 1], eod_close_prices[1:]\r\n )\r\n ])\r\n vol_row = [dt, vol]\r\n ivv_features.append(vol_row)\r\n\r\n ivv_features = pd.DataFrame(ivv_features)\r\n ivv_features.columns = [\"Date\", \"ivv_vol\"]\r\n ivv_features['Date'] = pd.to_datetime(ivv_features['Date'])\r\n\r\n # here, I'm doing an inner merge on features from IVV and the bond rates,\r\n # storing the result in a dataframe called 'features'.\r\n # The reason is because federal and NYSE holidays are not exactly the same, so\r\n # there are some days on which the federal government reports bond features\r\n # but no IVV data exists, and vice versa.\r\n features = pd.merge(HK_features, ivv_features, on='Date')\r\n\r\n # delete vars we no longer need\r\n del HK_data\r\n del HK_features\r\n del ivv_features\r\n\r\n response = []\r\n\r\n for features_dt in features['Date']:\r\n # Get data for the next n days after response_date\r\n ohlc_data = US_data[['Date', 'Open', 'High', 'Low', 'Close']][\r\n US_data['Date'] > features_dt\r\n ].head(n)\r\n\r\n if len(ohlc_data) == 0:\r\n response_row = repeat(None, 8).tolist()\r\n response.append(response_row)\r\n continue\r\n\r\n less_than_n_ohlc_data_rows = len(ohlc_data) < n\r\n\r\n entry_date = ohlc_data['Date'].head(1).item()\r\n entry_price = ohlc_data['Open'].head(1).item()\r\n\r\n target_price_long = entry_price * (1 + alpha)\r\n target_price_short = entry_price * (1 - alpha)\r\n\r\n high_price = max(ohlc_data['High'])\r\n low_price = min(ohlc_data['Low'])\r\n\r\n long_success = int(high_price >= target_price_long)\r\n short_success = int(low_price <= target_price_short)\r\n\r\n exit_long = next(\r\n (y.values.tolist() for x, y in\r\n ohlc_data[['Date', 'High']].iterrows()\r\n if y[1] >= target_price_long),\r\n ohlc_data[['Date', 'High']].tail(1).values.tolist()[0]\r\n )\r\n\r\n exit_short = next(\r\n (y.values.tolist() for x, y in ohlc_data[['Date', 'Low']].iterrows()\r\n if y[1] <= target_price_short),\r\n ohlc_data[['Date', 'Low']].tail(1).values.tolist()[0]\r\n )\r\n\r\n response_row = [entry_date, entry_price, long_success, short_success] + \\\r\n exit_long + exit_short\r\n\r\n if less_than_n_ohlc_data_rows:\r\n if not bool(response_row[2]):\r\n for i in [2, 4, 6]:\r\n response_row[i] = None\r\n if not bool(response_row[3]):\r\n for i in [3, 5, 7]:\r\n response_row[i] = None\r\n\r\n response.append(response_row)\r\n\r\n response = pd.DataFrame(response)\r\n response.columns = [\"entry_date\", \"entry_price\", \"long_success\",\r\n \"short_success\", \"exit_date_long\", \"exit_price_long\",\r\n \"exit_date_short\", \"exit_price_short\"]\r\n response = response.round(2)\r\n\r\n features_and_responses = pd.concat([features, response], axis=1)\r\n del features\r\n del response\r\n\r\n blotter = []\r\n trade_id = 0\r\n\r\n for trading_date in features_and_responses['Date'][\r\n features_and_responses['Date'] >= pd.to_datetime(start_date)\r\n ]:\r\n trade_decision_long = trading_decision(\r\n 'exit_date_long', 'long_success', features_and_responses,\r\n trading_date, N, n\r\n )\r\n # trade_decision_short = trading_decision(\r\n # 'exit_date_short', 'short_success', features_and_responses,\r\n # trading_date, N, n\r\n # )\r\n\r\n # trade_sum = trade_decision_short + trade_decision_long\r\n\r\n # if trade_sum == 0 or trade_sum == 2:\r\n # continue\r\n\r\n if trade_decision_long == 1:\r\n right_answer = features_and_responses[\r\n features_and_responses['Date'] == trading_date\r\n ]\r\n\r\n if trading_date == features_and_responses['Date'].tail(1).item():\r\n order_status = 'PENDING'\r\n submitted = order_price = fill_price = filled_or_cancelled = None\r\n else:\r\n submitted = filled_or_cancelled = right_answer[\r\n 'entry_date'].item()\r\n order_price = fill_price = right_answer['entry_price'].item()\r\n order_status = 'FILLED'\r\n\r\n entry_trade_mkt = [\r\n trade_id, 'L', submitted, 'BUY', lot_size, 'IVV',\r\n order_price, 'MKT', order_status, fill_price,\r\n filled_or_cancelled\r\n ]\r\n\r\n long_success = right_answer['long_success'].item()\r\n\r\n if isnan(long_success):\r\n order_status = 'OPEN'\r\n fill_price = filled_or_cancelled = None\r\n\r\n filled_or_cancelled = right_answer['exit_date_long'].item()\r\n\r\n if isinstance(order_price, float):\r\n order_price = order_price * (1+alpha)\r\n\r\n if long_success == 0:\r\n order_status = 'CANCELLED'\r\n fill_price = None\r\n exit_trade_mkt = [\r\n trade_id, 'L', filled_or_cancelled, 'SELL', lot_size,\r\n 'IVV', right_answer['exit_price_long'].item(), 'MKT',\r\n 'FILLED', right_answer['exit_price_long'].item(),\r\n filled_or_cancelled\r\n ]\r\n blotter.append(exit_trade_mkt)\r\n\r\n if long_success == 1:\r\n order_status = 'FILLED'\r\n fill_price = right_answer['exit_price_long'].item()\r\n\r\n exit_trade_lmt = [\r\n trade_id, 'L', submitted, 'SELL', lot_size, 'IVV',\r\n order_price, 'LIMIT', order_status, fill_price,\r\n filled_or_cancelled\r\n ]\r\n\r\n blotter.append(entry_trade_mkt)\r\n blotter.append(exit_trade_lmt)\r\n trade_id += 1\r\n\r\n # elif trade_decision_short == 1:\r\n # right_answer = features_and_responses[\r\n # features_and_responses['Date'] == trading_date\r\n # ]\r\n #\r\n # if trading_date == features_and_responses['Date'].tail(1).item():\r\n # order_status = 'PENDING'\r\n # submitted = order_price = fill_price = filled_or_cancelled = None\r\n # else:\r\n # submitted = filled_or_cancelled = right_answer[\r\n # 'entry_date'].item()\r\n # order_price = fill_price = right_answer['entry_price'].item()\r\n # order_status = 'FILLED'\r\n #\r\n # entry_trade_mkt = [\r\n # trade_id, 'S', submitted, 'SELL', lot_size, 'IVV',\r\n # order_price, 'MKT', order_status, fill_price,\r\n # filled_or_cancelled\r\n # ]\r\n #\r\n # short_success = right_answer['short_success'].item()\r\n #\r\n # if isnan(short_success):\r\n # order_status = 'OPEN'\r\n # fill_price = filled_or_cancelled = None\r\n #\r\n # filled_or_cancelled = right_answer['exit_date_short'].item()\r\n #\r\n # if isinstance(order_price, float):\r\n # order_price = order_price * (1-alpha)\r\n #\r\n # if short_success == 0:\r\n # order_status = 'CANCELLED'\r\n # fill_price = None\r\n # exit_trade_mkt = [\r\n # trade_id, 'S', filled_or_cancelled, 'BUY', lot_size,\r\n # 'IVV', right_answer['exit_price_short'].item(), 'MKT',\r\n # 'FILLED', right_answer['exit_price_short'].item(),\r\n # filled_or_cancelled\r\n # ]\r\n # blotter.append(exit_trade_mkt)\r\n #\r\n # if short_success == 1:\r\n # order_status = 'FILLED'\r\n # fill_price = right_answer['exit_price_short'].item()\r\n #\r\n # exit_trade_lmt = [\r\n # trade_id, 'S', submitted, 'BUY', lot_size, 'IVV',\r\n # order_price, 'LIMIT', order_status, fill_price,\r\n # filled_or_cancelled\r\n # ]\r\n #\r\n # blotter.append(entry_trade_mkt)\r\n # blotter.append(exit_trade_lmt)\r\n # trade_id += 1\r\n\r\n blotter = pd.DataFrame(blotter)\r\n blotter.columns = [\r\n 'ID', 'ls', 'submitted', 'action', 'size', 'symbol', 'price', 'type',\r\n 'status', 'fill_price', 'filled_or_cancelled'\r\n ]\r\n blotter = blotter.round(2)\r\n blotter.sort_values(\r\n by=['ID', 'submitted'],\r\n inplace=True,\r\n ascending=[False, True]\r\n )\r\n blotter.reset_index()\r\n\r\n calendar_ledger = []\r\n cash = starting_cash\r\n position = 0\r\n stock_value = 0\r\n total_value = cash\r\n\r\n for ivv_row in US_data[\r\n US_data['Date'] >= pd.to_datetime(start_date)\r\n ].iterrows():\r\n trading_date = ivv_row[1]['Date']\r\n ivv_close = ivv_row[1]['Close']\r\n trades = blotter[\r\n (blotter['filled_or_cancelled'] == ivv_row[1]['Date']) & (\r\n blotter['status'] == 'FILLED'\r\n )]\r\n if len(trades) > 0:\r\n position = position + sum(\r\n trades['size'][trades['action'] == 'BUY']\r\n ) - sum(\r\n trades['size'][trades['action'] == 'SELL']\r\n )\r\n cash = cash - sum(\r\n trades['size'][trades['action'] == 'BUY'] *\r\n trades['fill_price'][\r\n trades['action'] == 'BUY'\r\n ]\r\n ) + sum(\r\n trades['size'][trades['action'] == 'SELL'] *\r\n trades['fill_price'][\r\n trades['action'] == 'SELL'\r\n ]\r\n )\r\n stock_value = position * ivv_close\r\n total_value = cash + stock_value\r\n else:\r\n stock_value = position * ivv_close\r\n total_value = cash + stock_value\r\n\r\n ledger_row = [\r\n trading_date, position, ivv_close, cash, stock_value, total_value\r\n ]\r\n calendar_ledger.append(ledger_row)\r\n\r\n calendar_ledger = pd.DataFrame(calendar_ledger)\r\n calendar_ledger.columns = [\r\n 'Date', 'position', 'ivv_close', 'cash', 'stock_value', 'total_value'\r\n ]\r\n\r\n trade_ledger = []\r\n\r\n for trade in blotter['ID'].unique():\r\n round_trip_trade = blotter[\r\n (blotter['ID'] == trade) & (blotter['status'] == 'FILLED')\r\n ]\r\n\r\n if len(round_trip_trade) < 2:\r\n continue\r\n\r\n trade_id = round_trip_trade['ID'].unique().item()\r\n\r\n date_opened = min(round_trip_trade['submitted'])\r\n date_closed = max(round_trip_trade['submitted'])\r\n\r\n ivv_df = US_data[(US_data['Date'] <= date_closed) & (\r\n US_data['Date'] >= date_opened\r\n )]\r\n\r\n trading_days_open = len(ivv_df)\r\n\r\n buy_price = round_trip_trade['fill_price'][\r\n round_trip_trade['action'] == 'BUY'\r\n ].item()\r\n sell_price = round_trip_trade['fill_price'][\r\n round_trip_trade['action'] == 'SELL'\r\n ].item()\r\n\r\n ivv_price_enter = ivv_df['Close'][\r\n ivv_df['Date'] == round_trip_trade['submitted'][\r\n round_trip_trade['action'] == 'BUY'\r\n ].item()\r\n ].item()\r\n ivv_price_exit = ivv_df['Close'][\r\n ivv_df['Date'] == round_trip_trade['submitted'][\r\n round_trip_trade['action'] == 'SELL'\r\n ].item()\r\n ].item()\r\n\r\n trade_rtn = log(sell_price / buy_price)\r\n ivv_rtn = log(ivv_price_exit / ivv_price_enter)\r\n\r\n trade_rtn_per_trading_day = trade_rtn/trading_days_open\r\n benchmark_rtn_per_trading_day = ivv_rtn/trading_days_open\r\n\r\n trade_ledger_row = [\r\n trade_id, date_opened, date_closed, trading_days_open, buy_price,\r\n sell_price, ivv_price_enter, ivv_price_exit, trade_rtn, ivv_rtn,\r\n trade_rtn_per_trading_day, benchmark_rtn_per_trading_day\r\n ]\r\n\r\n trade_ledger.append(trade_ledger_row)\r\n\r\n trade_ledger = pd.DataFrame(trade_ledger)\r\n trade_ledger.columns = [\r\n 'trade_id', 'open_dt', 'close_dt', 'trading_days_open', 'buy_price',\r\n 'sell_price', 'benchmark_buy_price', 'benchmark_sell_price',\r\n 'trade_rtn', 'benchmark_rtn', 'trade_rtn_per_trading_day',\r\n 'benchmark_rtn_per_trading_day'\r\n ]\r\n\r\n # Final formatting\r\n features_and_responses['Date'] = features_and_responses['Date'].dt.date\r\n features_and_responses['entry_date'] = features_and_responses[\r\n 'entry_date'].dt.date\r\n features_and_responses['exit_date_long'] = features_and_responses[\r\n 'exit_date_long'].dt.date\r\n features_and_responses['exit_date_short'] = features_and_responses[\r\n 'exit_date_short'].dt.date\r\n\r\n blotter['submitted'] = blotter['submitted'].dt.date\r\n blotter['filled_or_cancelled'] = blotter['filled_or_cancelled'].dt.date\r\n\r\n calendar_ledger['Date'] = calendar_ledger['Date'].dt.date\r\n calendar_ledger.round(2)\r\n\r\n trade_ledger['open_dt'] = trade_ledger['open_dt'].dt.date\r\n trade_ledger['close_dt'] = trade_ledger['close_dt'].dt.date\r\n\r\n features_and_responses.to_csv('features_and_responses.csv')\r\n blotter.to_csv('blotter.csv')\r\n calendar_ledger.to_csv('calendar_ledger.csv')\r\n trade_ledger.to_csv('trade_ledger.csv')\r\n\r\n return features_and_responses, blotter, calendar_ledger, trade_ledger\r\n"
},
{
"alpha_fraction": 0.5912214517593384,
"alphanum_fraction": 0.5996453166007996,
"avg_line_length": 32.6716423034668,
"blob_id": "7194f314652c0a2f602bcb49c8f4c5c8952c6f6b",
"content_id": "1f3cff4bc6eb05b6178bf3c60706918b295b497a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4511,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 134,
"path": "/Strategy_draft.py",
"repo_name": "sophierubin1224/strategy_draft_NEW.py",
"src_encoding": "UTF-8",
"text": "from sklearn import linear_model\nfrom sklearn.metrics import r2_score\nimport numpy as np\nimport pandas as pd\nfrom math import log, isnan\nfrom statistics import stdev\nfrom scipy.stats.mstats import gmean\n\n\nN = 10\nalpha = .02\nthreshold = .001\nlot_size = 100\n\n\nUS_data = pd.read_csv('US_data.csv')\nUS_data['Date'] = pd.to_datetime(US_data['Date'])\nHK_data = pd.read_csv('HK_data.csv')\nHK_data['Date'] = pd.to_datetime(HK_data['Date'])\n\n\n\n#linear regression and GMRR\n##add R^2 back in\nHK_features = []\nfor dt in HK_data['Date'][N:]:\n eod_close_prices = list(HK_data['Last Price'][HK_data['Date'] <= dt].tail(N))\n action = []\n index = np.array(list(i for i in range(1, len(eod_close_prices))))\n log_returns = np.array(list(log(i / j) for i, j in zip(eod_close_prices[:N - 1], eod_close_prices[1:])))\n ret = []\n for i in log_returns[:-1]:\n ret.append(i+1)\n GMRR = np.prod(ret)** (1 / len(log_returns)) - 1\n if log_returns[-1] - GMRR > threshold:\n action.append('BUY')\n else:\n action.append('SELL')\n index = index.reshape(1,-1)\n log_returns = log_returns.reshape(1,-1)\n linreg_model = linear_model.LinearRegression()\n linreg_model.fit(index, log_returns)\n modeled_returns = linreg_model.predict(log_returns)\n row = [dt, linreg_model.intercept_, linreg_model.coef_[0][0],GMRR, action]\n HK_features.append(row)\n\n #r2_score(log_returns[:N],modeled_returns),\nHK_features = pd.DataFrame(HK_features)\nHK_features.columns = [\"Date\", \"a\", \"b\", \"GMRR\", \"Action\"]\nHK_features['Date'] = pd.to_datetime(HK_features['Date'])\n\n\n# Get available volatility of day-over-day log returns based on closing\n# # prices for IVV using a window size of N days.\nivv_features = []\nfor dt in US_data['Date'][N:]:\n eod_close_prices = list(US_data['Last Price'][US_data['Date'] <= dt].tail(N))\n vol = stdev([\n log(i / j) for i, j in zip(\n eod_close_prices[:N - 1], eod_close_prices[1:]\n )\n ])\n vol_row = [dt, vol]\n ivv_features.append(vol_row)\n\nivv_features = pd.DataFrame(ivv_features)\nivv_features.columns = [\"Date\", \"ivv_vol\"]\nivv_features['Date'] = pd.to_datetime(ivv_features['Date'])\n\n# #inner merge on features from IVV and the bond rates,\nfeatures = pd.merge(HK_features, ivv_features, on='Date')\n#\n# # delete vars we no longer need\ndel HK_data\ndel HK_features\ndel ivv_features\n#\nresponse = []\n\n\n\n#\n# def backtest(US_data, HK_data, N, alpha, threshold, lot_size, start_date, end_date, starting_cash):\n#\n# # linear regression on log returns\n# def HK_linreg(HK_data):\n# log_returns = []\n# index = []\n# for i in HK_data['Date'][N:]:\n# eod_close_prices = list(HK_data['Low Price'][HK_data['Date'] <= dt].tail(N))\n# log_returns[i] = log(eod_close_prices(i+1)/eod_close_prices(i))\n# #log_returns = (log(i / j) for i, j in zip(eod_close_prices[:N - 1], eod_close_prices[1:]))\n# index[i] = i\n# linreg_model = linear_model.LinearRegression()\n# linreg_model.fit(index, log_returns)\n# modeled_returns = linreg_model.predict(log_returns)\n# return [HK_data.Date, linreg_model.coef_[0],\n# linreg_model.intercept_,\n# r2_score(log_returns[1:],\n# modeled_returns)]\n#\n# # apply bonds_fun to every row in bonds_hist to make the features dataframe.\n# HK_features = HK_linreg(HK_data)\n# HK_features.columns = [\"Date\", \"a\", \"b\", \"R2\"]\n# HK_features.Date = pd.to_datetime(HK_features.Date)\n#\n# # Get available volatility of day-over-day log returns based on closing\n# # prices for IVV using a window size of N days.\n# ivv_features = []\n#\n# for dt in US_data['Date'][N:]:\n# eod_close_prices = list(\n# US_data['Low Price'][US_data['Date'] <= dt].tail(N))\n# vol = stdev([\n# log(i / j) for i, j in zip(\n# eod_close_prices[:N - 1], eod_close_prices[1:]\n# )\n# ])\n# vol_row = [dt, vol]\n# ivv_features.append(vol_row)\n#\n# ivv_features = pd.DataFrame(ivv_features)\n# ivv_features.columns = [\"Date\", \"ivv_vol\"]\n# ivv_features['Date'] = pd.to_datetime(ivv_features['Date'])\n#\n# #inner merge on features from IVV and the bond rates,\n# features = pd.merge(HK_features, ivv_features, on='Date')\n#\n# # delete vars we no longer need\n# del HK_data\n# del HK_features\n# del ivv_features\n#\n# response = []"
}
] | 3 |
eauriel/Bachelor-Thesis | https://github.com/eauriel/Bachelor-Thesis | 361ba820cea1ac54866e0d264d5da31272e2633e | 195b4ba36ff9cfff2ce4b84a7a08cd047a12247a | b3873121621f1152b6b982acdf1e1cf8035c43bf | refs/heads/master | 2023-06-19T05:52:31.320940 | 2021-06-11T22:05:03 | 2021-06-11T22:05:03 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6550689935684204,
"alphanum_fraction": 0.6592681407928467,
"avg_line_length": 21.486486434936523,
"blob_id": "cdfac1b60dd48244a86fc5ac08c7e4874747297c",
"content_id": "9c6ef08e7853aebeef36d8657799d839f69f783f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1667,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 74,
"path": "/src/doc/_sources/autoapi/NLG/optimalizers/GeneticAlg/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.optimalizers.GeneticAlg`\n==================================\n\n.. py:module:: NLG.optimalizers.GeneticAlg\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.optimalizers.GeneticAlg.GeneticAlg\n\n\n\n\n.. class:: GeneticAlg\n\n Bases: :py:obj:`abc.ABC`\n\n Abstract genetic algorithm framwork \n\n .. method:: generate_individual(self)\n :abstractmethod:\n\n Generate random individual.\n To be implemented in subclasses\n\n\n .. method:: show_individual(self, x)\n\n Show the given individual x, either to console or graphically.\n\n\n .. method:: fitness(self, x)\n :abstractmethod:\n\n Returns fitness of a given individual.\n To be implemented in subclasses\n\n\n .. method:: crossover(self, x, y, k)\n\n Take two parents (x and y) and make two children by applying k-point\n crossover. Positions for crossover are chosen randomly.\n\n\n .. method:: boolean_mutation(self, x, prob)\n\n Elements of x are 0 or 1. Mutate (i.e. change) each element of x with given probability.\n\n\n .. method:: number_mutation(self, x, prob)\n :abstractmethod:\n\n Elements of x are real numbers [0.0 .. 1.0]. Mutate (i.e. add/substract random number)\n each number in x with given probabipity.\n\n\n .. method:: mutation(self, x, prob)\n :abstractmethod:\n\n Decides which mutation will occur. \n\n\n .. method:: solve(self, max_generations, goal_fitness=1)\n :abstractmethod:\n\n Implementation of genetic algorithm. Produce generations until some\n individual`s fitness reaches goal_fitness, or you exceed total number\n of max_generations generations. Return best found individual.\n\n\n\n"
},
{
"alpha_fraction": 0.4968152940273285,
"alphanum_fraction": 0.5031847357749939,
"avg_line_length": 10.923076629638672,
"blob_id": "bef3a6f13b87cfce7e890e64d898e763c6959a5f",
"content_id": "a4e9762ab472ea6418fa341db85e014ddd09e203",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 157,
"license_type": "permissive",
"max_line_length": 27,
"num_lines": 13,
"path": "/src/doc/_sources/autoapi/NLG/database/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.database`\n===================\n\n.. py:module:: NLG.database\n\n\nSubmodules\n----------\n.. toctree::\n :titlesonly:\n :maxdepth: 1\n\n DB/index.rst\n\n\n"
},
{
"alpha_fraction": 0.5781420469284058,
"alphanum_fraction": 0.582513689994812,
"avg_line_length": 27.59375,
"blob_id": "a3d8247189c11bcd8294309acd7a83ea7ab5a190",
"content_id": "9f4cca125ae065e0f4a9b56e07659523c42ea23a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 915,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 32,
"path": "/src/NLG/models/MLPModel.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\n\n\nclass MLP(nn.Module):\n def __init__(self, n_inputs, n_action, n_hidden_layers=1, hidden_dim=[32]):\n super(MLP, self).__init__()\n\n M = n_inputs\n self.layers = []\n\n for hidd_l in range(n_hidden_layers):\n layer = nn.Linear(M, hidden_dim[hidd_l])\n M = hidden_dim[hidd_l]\n self.layers.append(layer)\n self.layers.append(nn.ReLU())\n\n # final layer\n self.layers.append(nn.Linear(M, n_action))\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.layers = nn.Sequential(*self.layers).to(self.device)\n\n self.losses = None\n\n def forward(self, X):\n return self.layers(X).cuda()\n\n def save_weights(self, path):\n torch.save(self.state_dict(), path)\n\n def load_weights(self, path):\n self.load_state_dict(torch.load(path))\n"
},
{
"alpha_fraction": 0.6550132036209106,
"alphanum_fraction": 0.668865442276001,
"avg_line_length": 20.514286041259766,
"blob_id": "ce1c70a3b70f66b8986f92d488bb540d51cbb213",
"content_id": "e847188a89c05c7e0ecb596c11454e753dca7ad8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1516,
"license_type": "permissive",
"max_line_length": 218,
"num_lines": 70,
"path": "/src/doc/_sources/autoapi/NLG/NlgDiscreteStatesActions/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.NlgDiscreteStatesActions`\n===================================\n\n.. py:module:: NLG.NlgDiscreteStatesActions\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.NlgDiscreteStatesActions.Environment\n\n\n\n\nAttributes\n~~~~~~~~~~\n\n.. autoapisummary::\n\n NLG.NlgDiscreteStatesActions.ACTIONS\n\n\n.. class:: Environment(n_questions, game_type, max_gates, n_players=2, initial_state=np.array([0, 1 / sqrt(2), -1 / sqrt(2), 0], dtype=np.complex64), best_or_worst='best', reward_function=None, anneal=False, n_games=1)\n\n\n Bases: :py:obj:`NonLocalGame.abstractEnvironment`\n\n Creates CHSH environments for quantum strategies, discretizes and states and uses discrete actions \n\n .. method:: reset(self)\n\n\n .. method:: calculate_state(self, history_actions, anneal=False)\n\n Calculates the state according to previous actions in parameter history_actions \n\n\n .. method:: save_interesting_strategies(self)\n\n\n .. method:: step(self, action)\n\n\n .. method:: anneal(self, steps=80, t_start=2, t_end=0.001)\n\n Finds the maximal value of the fitness function by\n executing the simulated annealing algorithm.\n Returns a state (e.g. x) for which fitness(x) is maximal. \n\n\n .. method:: fitness(self, x)\n\n Calculates fitness of the state given by calculation of accuracy over history of actions.\n\n\n .. method:: neighbors(self, x, span=30, delta=0.5)\n\n Creates neighboring gate angle to angle x\n\n\n .. method:: random_state(self)\n\n\n\n.. data:: ACTIONS\n \n\n \n\n"
},
{
"alpha_fraction": 0.5957446694374084,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 13.5625,
"blob_id": "2bf3834c9e97de889274c80ebca97eee87ddeb74",
"content_id": "129c5135c058d8e191aecf3f55f35c002583139a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 235,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 16,
"path": "/src/doc/_sources/autoapi/NLG/models/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.models`\n=================\n\n.. py:module:: NLG.models\n\n\nSubmodules\n----------\n.. toctree::\n :titlesonly:\n :maxdepth: 1\n\n KerasModel/index.rst\n LinearModel/index.rst\n MLPModel/index.rst\n RegressionModel/index.rst\n\n\n"
},
{
"alpha_fraction": 0.6939130425453186,
"alphanum_fraction": 0.6973913311958313,
"avg_line_length": 16.363636016845703,
"blob_id": "4e387976128eca4319f897c6793cadfc694b2a93",
"content_id": "824972e5ddacb87d8ea4619659a6a707f5012cf1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 575,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 33,
"path": "/src/doc/_sources/autoapi/NLG/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG`\n==========\n\n.. py:module:: NLG\n\n\nSubpackages\n-----------\n.. toctree::\n :titlesonly:\n :maxdepth: 3\n\n agents/index.rst\n database/index.rst\n models/index.rst\n optimalizers/index.rst\n tests/index.rst\n\n\nSubmodules\n----------\n.. toctree::\n :titlesonly:\n :maxdepth: 1\n\n CHSHPrototype/index.rst\n HyperParametersOptimalization/index.rst\n NlgContinuousGlobalOptimalization/index.rst\n NlgDeterministic/index.rst\n NlgDiscreteStatesActions/index.rst\n NlgGeneticOptimalization/index.rst\n NlgParalelClassical/index.rst\n NonLocalGame/index.rst\n\n\n"
},
{
"alpha_fraction": 0.6205013990402222,
"alphanum_fraction": 0.6279821991920471,
"avg_line_length": 37.0461540222168,
"blob_id": "40398e3964641a459ce2d9e5d39cab426b26ae06",
"content_id": "002f90e26c1dafd471adcc26836cbfde26b46766",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4946,
"license_type": "permissive",
"max_line_length": 180,
"num_lines": 130,
"path": "/src/NLG/agents/DQNAgent.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom NLG.models.MLPModel import MLP\n\n### The experience replay memory ###\nclass ReplayBuffer:\n \"\"\" Creates replay buffer to learn from previous episodes\"\"\"\n def __init__(self, obs_dim, act_dim, size):\n self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.acts_buf = np.zeros(size, dtype=np.uint8)\n self.rews_buf = np.zeros(size, dtype=np.float32)\n self.done_buf = np.zeros(size, dtype=np.uint8)\n self.ptr, self.size, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, next_obs, done):\n self.obs1_buf[self.ptr] = obs\n self.obs2_buf[self.ptr] = next_obs\n self.acts_buf[self.ptr] = act\n self.rews_buf[self.ptr] = rew\n self.done_buf[self.ptr] = done\n self.ptr = (self.ptr + 1) % self.max_size\n self.size = min(self.size + 1, self.max_size)\n\n def sample_batch(self, batch_size=64):\n idxs = np.random.randint(0, self.size, size=batch_size)\n return dict(s=self.obs1_buf[idxs],\n s2=self.obs2_buf[idxs],\n a=self.acts_buf[idxs],\n r=self.rews_buf[idxs],\n d=self.done_buf[idxs])\n\n\ndef predict(model, np_states):\n with torch.no_grad():\n inputs = torch.from_numpy(np_states.astype(np.float32))\n output = model(inputs.to(model.device))\n # print(\"output:\", output)\n return output.cpu().numpy()\n\n\ndef train_one_step(model, criterion, optimizer, inputs, targets):\n # convert to tensors\n inputs = torch.from_numpy(inputs.astype(np.float32))\n targets = torch.from_numpy(targets.astype(np.float32))\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # Forward pass\n outputs = model(inputs.to(model.device))\n loss = criterion(outputs, targets.to(model.device))\n\n # Backward and optimize\n loss.backward()\n optimizer.step()\n\nclass DQNAgent(object):\n def __init__(self, state_size, action_size, gamma, eps, eps_min, eps_decay, ALL_POSSIBLE_ACTIONS, learning_rate, hidden_layers, hidden_dim, onehot_to_action, action_to_onehot):\n self.onehot_to_action = onehot_to_action\n self.action_to_onehot = action_to_onehot\n\n self.state_size = state_size\n self.action_size = action_size\n self.memory = ReplayBuffer(state_size, action_size, size=500)\n self.gamma = gamma # discount rate\n self.epsilon = eps # exploration rate\n self.epsilon_min = eps_min\n self.epsilon_decay = eps_decay\n self.model = MLP(state_size, action_size, hidden_dim=hidden_dim, n_hidden_layers=hidden_layers)\n\n self.ALL_POSSIBLE_ACTIONS = ALL_POSSIBLE_ACTIONS\n\n # Loss and optimizer\n self.criterion = nn.MSELoss()\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate)\n\n def update_replay_memory(self, state, action, reward, next_state, done):\n self.memory.store(state, action, reward, next_state, done)\n\n def act(self, state):\n \"\"\" :returns action based on neural model prediction / epsilon greedy \"\"\"\n\n if np.random.rand() <= self.epsilon:\n choice = np.random.choice(self.action_size)\n return self.ALL_POSSIBLE_ACTIONS[choice], choice\n act_values = predict(self.model,state)\n choice = np.argmax(act_values[0])\n return self.ALL_POSSIBLE_ACTIONS[choice], choice\n\n def replay(self, batch_size=64):\n # first check if replay buffer contains enough data\n if self.memory.size < batch_size:\n return\n\n # sample a batch of data from the replay memory\n minibatch = self.memory.sample_batch(batch_size)\n states = minibatch['s']\n actions = minibatch['a']\n rewards = minibatch['r']\n next_states = minibatch['s2']\n done = minibatch['d']\n\n # Calculate the target: Q(s',a)\n target = rewards + (1 - done) * self.gamma * np.amax(predict(self.model, next_states), axis=1)\n\n # With the PyTorch API, it is simplest to have the target be the\n # same shape as the predictions.\n # However, we only need to update the network for the actions\n # which were actually taken.\n # We can accomplish this by setting the target to be equal to\n # the prediction for all values.\n # Then, only change the targets for the actions taken.\n # Q(s,a)\n target_full = predict(self.model, states)\n target_full[np.arange(batch_size), actions] = target\n\n # Run one training step\n train_one_step(self.model, self.criterion, self.optimizer, states, target_full)\n\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n def load(self, name):\n self.model.load_weights(name)\n\n def save(self, name):\n self.model.save_weights(name)\n"
},
{
"alpha_fraction": 0.5446096658706665,
"alphanum_fraction": 0.553903341293335,
"avg_line_length": 21.45833396911621,
"blob_id": "5d60d3a2d70f7c13ab7c57619d0b53eee05f660f",
"content_id": "70f6d1a459b4395c6b1edb3843205d119b2bb03e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 538,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 24,
"path": "/src/NLG/models/RegressionModel.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "from abc import ABC, abstractmethod\n\nclass RegressionModel(ABC):\n \"\"\" a linear regression models \"\"\"\n\n def predict(self, x):\n \"\"\" predicts output for input \"\"\"\n pass\n\n def sgd(self, x, y, learning_rate=0.01, momentum=0.9):\n \"\"\" makes one step of sgd \"\"\"\n pass\n\n def load_weights(self, filepath):\n \"\"\" loads weights \"\"\"\n pass\n\n def save_weights(self, filepath):\n \"\"\" saves weights \"\"\"\n pass\n\n def get_losses(self):\n \"\"\" returns learning loss \"\"\"\n pass"
},
{
"alpha_fraction": 0.6598579287528992,
"alphanum_fraction": 0.6722912788391113,
"avg_line_length": 19.66666603088379,
"blob_id": "4d488e3a7aaf6e8ebb57f8750d587da0a269b0a0",
"content_id": "df4e6ca7a4c34a2585bd1ce24273ad16500aeb3b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1126,
"license_type": "permissive",
"max_line_length": 217,
"num_lines": 54,
"path": "/src/doc/_sources/autoapi/NLG/HyperParametersOptimalization/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.HyperParametersOptimalization`\n========================================\n\n.. py:module:: NLG.HyperParametersOptimalization\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.HyperParametersOptimalization.HyperParamCHSHOptimizer\n\n\n\n\nAttributes\n~~~~~~~~~~\n\n.. autoapisummary::\n\n NLG.HyperParametersOptimalization.ACTIONS\n\n\n.. class:: HyperParamCHSHOptimizer(population_size=15, n_crossover=3, mutation_prob=0.05, game_type=None, CHSH=None, max_gates=10, n_questions=2, ALL_POSSIBLE_ACTIONS=None, agent_type=BasicAgent, best_or_worst='best')\n\n\n Bases: :py:obj:`optimalizers.GeneticAlg.GeneticAlg`\n\n Works only for DiscreteStatesActions.Environment because of different init parameters \n\n .. method:: generate_individual(self)\n\n\n .. method:: fitness(self, x)\n\n\n .. method:: number_mutation(self, x, prob)\n\n Elements of x are real numbers [0.0 .. 1.0]. Mutate (i.e. add/substract random number)\n each number in x with given probabipity.\n\n\n .. method:: mutation(self, x, prob)\n\n\n .. method:: solve(self, max_generations, goal_fitness=1)\n\n\n\n.. data:: ACTIONS\n \n\n \n\n"
},
{
"alpha_fraction": 0.541436493396759,
"alphanum_fraction": 0.5469613075256348,
"avg_line_length": 11.785714149475098,
"blob_id": "b7ea718e4e525632ff2372010f2ac551fcfbcf66",
"content_id": "f0cabedf50610220d82df97354f1e9d6ed8c1678",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 181,
"license_type": "permissive",
"max_line_length": 25,
"num_lines": 14,
"path": "/src/doc/_sources/autoapi/NLG/agents/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.agents`\n=================\n\n.. py:module:: NLG.agents\n\n\nSubmodules\n----------\n.. toctree::\n :titlesonly:\n :maxdepth: 1\n\n BasicAgent/index.rst\n DQNAgent/index.rst\n\n\n"
},
{
"alpha_fraction": 0.5472500920295715,
"alphanum_fraction": 0.568824052810669,
"avg_line_length": 32.581634521484375,
"blob_id": "184c92d4f4b6fce3ec8731ec11921c8da5adba19",
"content_id": "9c4d988760ddc45deec7fd5391682a3b65f8fb97",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3291,
"license_type": "permissive",
"max_line_length": 146,
"num_lines": 98,
"path": "/src/NLG/NlgDeterministic.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "import itertools\n\nfrom NLG import NonLocalGame\n\n\nclass Environment(NonLocalGame.abstractEnvironment):\n \"\"\" creates CHSH for classic deterministic strategies, works small for 4x4 games \"\"\"\n\n def __init__(self, game_type, num_players=2, n_questions=2):\n self.num_players = num_players\n self.n_questions = n_questions\n self.questions = list(itertools.product(list(range(2)), repeat=self.num_players * self.n_questions // 2))\n\n self.n_games = 1\n self.n_qubits = 0\n\n self.game_type = game_type\n\n self.possible_answers = dict()\n self.possible_answers[0] = (0, 1)\n self.possible_answers[1] = (0, 1)\n\n self.responses = list(\n itertools.product(list(range(2)),\n repeat=self.num_players * self.n_questions //2))\n\n @NonLocalGame.override\n def reset(self):\n return\n\n @NonLocalGame.override\n def step(self, action):\n return\n\n def index(self, response):\n \"\"\" :returns index of response so that it can be mapped to state\"\"\"\n counter = 0\n for r in self.responses:\n if r == response:\n break\n counter += 1\n return counter\n\n def evaluate(self, question, response):\n \"\"\" :returns winning accuracy to input question based on response \"\"\"\n self.state = [0 for _ in range(len(self.game_type))]\n answer = (self.possible_answers[question[0]][response[0]], self.possible_answers[question[1]][response[1]])\n self.state[self.index(answer)] = 1\n return self.measure_probabilities_analytically()\n\n def play_all_strategies(self):\n \"\"\" plays 16 different strategies,evaluate each and :returns: the best accuracy from all strategies \"\"\"\n accuracies = []\n result = []\n\n\n response_list = self.response_rek(self.n_questions)\n for r_A in self.responses:\n for r_B in self.responses:\n for x, question in enumerate(self.questions):\n response_to_this_question = r_A[question[0]], r_B[question[1]]\n result.append(self.evaluate(question, response_to_this_question))\n accuracies.append(self.calc_accuracy(result))\n result = []\n\n return max(accuracies), min(accuracies)\n\n def response_rek(self, n):\n if (n == 0): pass\n else:\n for r in self.responses:\n yield r\n self.response_rek(n - 1)\n\n\n\ndef rule(a, b, x, y):\n return (a != b) == (x and y)\n\n\ndef create(game_type):\n game = [[0 for _ in range(len(game_type)) for __ in range(len(game_type))] for ___ in range(len(game_type)) for ____ in range(len(game_type))]\n for y1, riadok1 in enumerate(game_type):\n for x1, cell1 in enumerate(riadok1):\n for y2, riadok2 in enumerate(game_type):\n # for x1, cell1 in enumerate(riadok1):\n for x2, cell2 in enumerate(riadok2):\n if (cell1 == cell2 and cell1 == 1): game[y1 * y2][x1 * x2] = 1\n return game\n\n\nif __name__ == '__main__':\n game_type = [[1, 0, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0]]\n env = Environment(game_type, 2, 2)\n print(env.play_all_strategies())\n"
},
{
"alpha_fraction": 0.6229507923126221,
"alphanum_fraction": 0.6299765706062317,
"avg_line_length": 15.880000114440918,
"blob_id": "2a1196b3a2c1cdaa4534b9d477f9526eb452df27",
"content_id": "48b6dcd31765a8f31088b0705b464ce0acfd743c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 854,
"license_type": "permissive",
"max_line_length": 213,
"num_lines": 50,
"path": "/src/doc/_sources/autoapi/NLG/database/DB/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.database.DB`\n======================\n\n.. py:module:: NLG.database.DB\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.database.DB.CHSHdb\n\n\n\n\nAttributes\n~~~~~~~~~~\n\n.. autoapisummary::\n\n NLG.database.DB.db\n\n\n.. class:: CHSHdb\n\n\n .. method:: createDB(self)\n\n\n .. method:: createTables(self)\n\n\n .. method:: query(self, category='all', difficulty='all', difference='all', num_players=2, n_questions=2)\n\n\n .. method:: query_categories_games(self, num_players=2, n_questions=2)\n\n\n .. method:: insert_categories_games(self, n_questions, num_players, generated_games)\n\n\n .. method:: insert(self, category, difficulty, classic_min, quantum_min, classic_max, quantum_max, difference_min, difference_max, min_state, max_state, min_strategy, max_strategy, game, questions=2, players=2)\n\n\n\n.. data:: db\n \n\n \n\n"
},
{
"alpha_fraction": 0.5683487057685852,
"alphanum_fraction": 0.5916262865066528,
"avg_line_length": 40.83660125732422,
"blob_id": "76e81d436f9d499e9e5f49b9c4eca2f7865938f8",
"content_id": "1afe8904695103b0c9a245458c6ccfe45a135b9d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6401,
"license_type": "permissive",
"max_line_length": 146,
"num_lines": 153,
"path": "/src/NLG/HyperParametersOptimalization.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "import math\nimport random\n\nimport NonLocalGame\nfrom NonLocalGame import get_scaler, override, Game\nfrom agents.BasicAgent import BasicAgent\nfrom agents.DQNAgent import DQNAgent\nfrom models.LinearModel import LinearModel\nfrom optimalizers.GeneticAlg import GeneticAlg\nfrom sklearn.preprocessing import OneHotEncoder\n\n\nclass HyperParamCHSHOptimizer(GeneticAlg):\n \"\"\" Works only for DiscreteStatesActions.Environment because of different init parameters \"\"\"\n\n def __init__(self, population_size=15, n_crossover=3, mutation_prob=0.05, game_type=None, CHSH=None,\n max_gates=10, n_questions=2, ALL_POSSIBLE_ACTIONS=None, agent_type=BasicAgent, best_or_worst=\"best\"):\n # Initialize the population - create population of 'size' individuals,\n # each individual is a bit string of length 'word_len'.\n self.population_size = population_size\n self.n_crossover = n_crossover\n self.mutation_prob = mutation_prob\n self.population = [self.generate_individual() for _ in range(self.population_size)]\n self.for_plot = []\n\n self.game_type = game_type\n self.CHSH = CHSH\n self.max_gates = max_gates\n self.n_questions = n_questions\n self.ALL_POSSIBLE_ACTIONS = ALL_POSSIBLE_ACTIONS\n\n self.best_or_worst = best_or_worst\n self.agent_type = agent_type\n\n @override\n def generate_individual(self):\n # Generate random individual.\n # Parameters to be optimalized.\n GAMMA = [1, 0.9, 0.5, 0.1, 0]\n MOMENTUM = [0.9, 0.85, 0.5]\n ALPHA = [1, 0.1, 0.01, 0.001]\n EPS = [1]\n EPS_DECAY = [0.99995, 0.9995, 0.9998]\n EPS_MIN = [0.001]\n N_EPISODES = [2000, 3000, 4000]\n HIDDEN_LAYERS = [[20, 20], [20], [30, 30], [30, 30, 30]]\n BATCH_SIZE = [32, 64, 128, 256]\n reward_functions = [f for name, f in NonLocalGame.abstractEnvironment.__dict__.items()\n if callable(f) and \"reward\" in name]\n\n return [random.choice(GAMMA), random.choice(EPS),\n random.choice(EPS_MIN), random.choice(EPS_DECAY),\n random.choice(MOMENTUM), random.choice(ALPHA),\n random.choice(N_EPISODES), random.choice(HIDDEN_LAYERS),\n random.choice(reward_functions), random.choice(BATCH_SIZE)]\n\n @override\n def fitness(self, x):\n # Returns fitness of a given individual.\n # To be implemented in subclasses\n N = math.floor(x[-4])\n\n env = self.CHSH(self.n_questions, self.game_type, self.max_gates, reward_function=x[-2], anneal=True)\n\n if self.agent_type == BasicAgent:\n agent = BasicAgent(state_size=len(env.repr_state), action_size=len(self.ALL_POSSIBLE_ACTIONS), gamma=x[0], eps=x[1], eps_min=x[2],\n eps_decay=x[3], alpha=x[4], momentum=x[5], ALL_POSSIBLE_ACTIONS=self.ALL_POSSIBLE_ACTIONS,\n model_type=LinearModel)\n scaler = get_scaler(env, N, ALL_POSSIBLE_ACTIONS=ALL_POSSIBLE_ACTIONS)\n\n else:\n # transform actions to noncorellated encoding\n encoder = OneHotEncoder(drop='first', sparse=False)\n # transform data\n onehot = encoder.fit_transform(ALL_POSSIBLE_ACTIONS)\n onehot_to_action = dict()\n action_to_onehot = dict()\n for a, a_encoded in enumerate(onehot):\n onehot_to_action[str(a_encoded)] = a\n action_to_onehot[a] = str(a_encoded)\n\n HIDDEN_LAYERS = x[-3]\n agent = DQNAgent(state_size=env.state_size, action_size=len(ALL_POSSIBLE_ACTIONS), gamma=x[0], eps=x[1], eps_min=x[2],\n eps_decay=x[3], ALL_POSSIBLE_ACTIONS=self.ALL_POSSIBLE_ACTIONS, learning_rate=x[4], hidden_layers=len(HIDDEN_LAYERS),\n hidden_dim=HIDDEN_LAYERS, onehot_to_action=onehot_to_action, action_to_onehot=action_to_onehot)\n scaler = None\n\n game = Game(scaler, batch_size=x[-1])\n game.evaluate_train(N, agent, env)\n\n fitness_individual = game.evaluate_test(agent, env)\n return fitness_individual\n\n @override\n def number_mutation(self, x, prob):\n \"\"\" Elements of x are real numbers [0.0 .. 1.0]. Mutate (i.e. add/substract random number)\n each number in x with given probabipity.\"\"\"\n potomok = x[:-3]\n for poc in range(len(potomok)):\n if random.random() <= prob: # posledne argumenty nebudu mutovat (N_EPISODES, REWARD_FUNCTION)\n spocitaj = list(potomok)\n priemer = sum(spocitaj) / len(spocitaj)\n sigma_na_druhu = 0\n\n for i in spocitaj:\n sigma_na_druhu += (i - priemer) ** 2\n\n sigma_na_druhu = sigma_na_druhu / (len(spocitaj) - 1) # pocitam gausovu krivku\n\n if random.random() > 0.5:\n nahodne = random.uniform(0, sigma_na_druhu)\n if potomok[poc] - nahodne >= 0:\n potomok[poc] -= nahodne\n\n else:\n nahodne = random.uniform(0, sigma_na_druhu)\n potomok[poc] += nahodne\n\n potomok[poc] = abs(potomok[poc])\n\n return potomok + x[-3:]\n\n @override\n def mutation(self, x, prob):\n return self.number_mutation(x, prob)\n\n @override\n def solve(self, max_generations, goal_fitness=1):\n best = super().solve(max_generations, goal_fitness)\n return best # best\n\n\nif __name__ == \"__main__\":\n # Hyperparameters setting\n ACTIONS = [q + axis + \"0\" for axis in 'xyz' for q in 'ra']\n PERSON = ['a', 'b']\n QUESTION = ['0', '1']\n\n ALL_POSSIBLE_ACTIONS = [[p + q + a] for p in PERSON for q in QUESTION for a in ACTIONS] # place one gate at some place\n ALL_POSSIBLE_ACTIONS.append([\"xxr0\"])\n\n ## Solve to find optimal individual\n from NlgDiscreteStatesActions import Environment\n\n game_type = [[1, 0, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0]]\n\n ga = HyperParamCHSHOptimizer(population_size=6, n_crossover=5, mutation_prob=0.05,\n game_type=game_type, CHSH=Environment, ALL_POSSIBLE_ACTIONS=ALL_POSSIBLE_ACTIONS, agent_type=DQNAgent)\n best = ga.solve(5) # you can also play with max. generations\n ga.show_individual(best)\n"
},
{
"alpha_fraction": 0.6349534392356873,
"alphanum_fraction": 0.6390899419784546,
"avg_line_length": 14.68852424621582,
"blob_id": "792fb37ad008af4c8fda8bbd80232ca5fa980cb8",
"content_id": "593514b6dd2e2c77abdc3da4933c46dd574364e6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 967,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 61,
"path": "/src/doc/_sources/autoapi/NLG/NlgParalelClassical/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.NlgParalelClassical`\n==============================\n\n.. py:module:: NLG.NlgParalelClassical\n\n.. autoapi-nested-parse::\n\n evaluating classical strategies for N parallel CHSH games\n inspired by Daniel Nagaj's solution, added memoization and C libraries + encapsulation and abstracion\n\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.NlgParalelClassical.Environment\n\n\n\n\nAttributes\n~~~~~~~~~~\n\n.. autoapisummary::\n\n NLG.NlgParalelClassical.start\n\n\n.. class:: Environment(num_players=2, n_questions=2, n_games=2)\n\n\n Bases: :py:obj:`NonLocalGame.abstractEnvironment`\n\n .. method:: reset(self)\n\n\n .. method:: step(self, action)\n\n\n .. method:: evaluate_CHSH(self, aa, bb, xx, yy)\n\n\n .. method:: evaluate_parallelCHSH(self, Aanswer, Banswer, NN, printout)\n\n\n .. method:: binary_add(self, blist, n)\n\n\n .. method:: binary_add1(self, blist, n)\n\n\n .. method:: play_all_strategies(self, Nrounds)\n\n\n\n.. data:: start\n \n\n \n\n"
},
{
"alpha_fraction": 0.5388646125793457,
"alphanum_fraction": 0.5620087385177612,
"avg_line_length": 11.324324607849121,
"blob_id": "889b24fb0c1fbe690d16b5be9091bc9bf6790573",
"content_id": "d76c106d0bf1b421250fe5db93f656368ad8a55b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 2290,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 185,
"path": "/src/doc/_sources/autoapi/NLG/CHSHPrototype/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.CHSHPrototype`\n========================\n\n.. py:module:: NLG.CHSHPrototype\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.CHSHPrototype.QuantumState\n NLG.CHSHPrototype.QuantumOperation\n\n\n\nFunctions\n~~~~~~~~~\n\n.. autoapisummary::\n\n NLG.CHSHPrototype.is_unitary\n NLG.CHSHPrototype.win\n\n\n\nAttributes\n~~~~~~~~~~\n\n.. autoapisummary::\n\n NLG.CHSHPrototype.U_X\n NLG.CHSHPrototype.U_H\n NLG.CHSHPrototype.U_alice_0\n NLG.CHSHPrototype.U_alice_1\n NLG.CHSHPrototype.U_bob_0\n NLG.CHSHPrototype.U_bob_1\n NLG.CHSHPrototype.U_alice_1\n NLG.CHSHPrototype.U_bob_0\n NLG.CHSHPrototype.U_bob_1\n NLG.CHSHPrototype.wins\n NLG.CHSHPrototype.a\n NLG.CHSHPrototype.b\n NLG.CHSHPrototype.state\n NLG.CHSHPrototype.result\n NLG.CHSHPrototype.state\n NLG.CHSHPrototype.win_rate\n NLG.CHSHPrototype.evaluation_tactic\n NLG.CHSHPrototype.win_rate1\n NLG.CHSHPrototype.win_rate1\n\n\n.. function:: is_unitary(M)\n\n\n.. class:: QuantumState(vector)\n\n\n .. method:: measure(self)\n\n\n .. method:: measure_analytic(self)\n\n\n .. method:: compose(self, state)\n\n\n .. method:: __repr__(self)\n\n Return repr(self).\n\n\n\n.. class:: QuantumOperation(matrix)\n\n\n .. method:: apply(self, state)\n\n\n .. method:: compose(self, operation)\n\n\n .. method:: __repr__(self)\n\n Return repr(self).\n\n\n\n.. data:: U_X\n :annotation: = [[0, 1], [1, 0]]\n\n \n\n.. data:: U_H\n :annotation: = [None, None]\n\n \n\n.. data:: U_alice_0\n :annotation: = [[1, 0], [0, 1]]\n\n \n\n.. data:: U_alice_1\n :annotation: = [None, None]\n\n \n\n.. data:: U_bob_0\n :annotation: = [None, None]\n\n \n\n.. data:: U_bob_1\n :annotation: = [None, None]\n\n \n\n.. data:: U_alice_1\n \n\n \n\n.. data:: U_bob_0\n :annotation: = [[1, 0], [0, 1]]\n\n \n\n.. data:: U_bob_1\n :annotation: = [[1, 0], [0, 1]]\n\n \n\n.. function:: win(a, b, s, t)\n\n\n.. data:: wins\n :annotation: = 0\n\n \n\n.. data:: a\n :annotation: = []\n\n \n\n.. data:: b\n :annotation: = []\n\n \n\n.. data:: state\n \n\n \n\n.. data:: result\n :annotation: = []\n\n \n\n.. data:: state\n \n\n \n\n.. data:: win_rate\n :annotation: = 0\n\n \n\n.. data:: evaluation_tactic\n :annotation: = [[1, 0, 0, 1], [1, 0, 0, 1], [1, 0, 0, 1], [0, 1, 1, 0]]\n\n \n\n.. data:: win_rate1\n :annotation: = 0\n\n \n\n.. data:: win_rate1\n \n\n \n\n"
},
{
"alpha_fraction": 0.6135521531105042,
"alphanum_fraction": 0.6161990761756897,
"avg_line_length": 35.346153259277344,
"blob_id": "1a9fa08e68f9d06e6780819eafa61e5204e32471",
"content_id": "71dc663975301a3acb939c8d2d6f8b71e0ff8adb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1889,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 52,
"path": "/src/NLG/agents/BasicAgent.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport random\n\nclass BasicAgent:\n \"\"\" Reinforcement learning agent \"\"\"\n\n def __init__(self, state_size, action_size, gamma, eps, eps_min, eps_decay, alpha, momentum, ALL_POSSIBLE_ACTIONS,\n model_type):\n self.state_size = state_size\n self.action_size = action_size\n self.gamma = gamma # discount rate\n self.epsilon = eps # exploration rate\n self.epsilon_min = eps_min\n self.epsilon_decay = eps_decay\n self.alpha = alpha\n self.momentum = momentum\n self.model = model_type(state_size, action_size)\n self.ALL_POSSIBLE_ACTIONS = ALL_POSSIBLE_ACTIONS\n\n def act(self, state):\n \"\"\" :returns action based on neural model prediction / epsilon greedy \"\"\"\n\n if np.random.rand() <= self.epsilon:\n choice = random.randint(0, self.action_size - 1)\n return self.ALL_POSSIBLE_ACTIONS[choice], choice\n act_values = self.model.predict(state)\n choice = np.argmax(act_values[0])\n return self.ALL_POSSIBLE_ACTIONS[choice], choice\n\n def train(self, state, action, reward, next_state, done):\n \"\"\" performs one training step of neural network \"\"\"\n if done:\n target = reward\n else:\n target = reward + self.gamma * np.amax(self.model.predict(next_state), axis=1)\n\n target_full = self.model.predict(state)\n target_full[0, action] = target\n\n # Run one training step using SGD - stochastic gradient descend\n self.model.sgd(state, target_full, self.alpha, self.momentum)\n\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n def load(self, name):\n \"\"\" loads weights into model \"\"\"\n self.model.load_weights(name)\n\n def save(self, name):\n \"\"\" saves weight into model \"\"\"\n self.model.save_weights(name)"
},
{
"alpha_fraction": 0.6126855611801147,
"alphanum_fraction": 0.6261808276176453,
"avg_line_length": 15.704545021057129,
"blob_id": "79ef85b5b924f092975b4e0a891451ffac581673",
"content_id": "8eb2a9532972ad06f94cbd92d5d6c6694dc83d57",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 741,
"license_type": "permissive",
"max_line_length": 149,
"num_lines": 44,
"path": "/src/doc/_sources/autoapi/NLG/NlgContinuousGlobalOptimalization/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.NlgContinuousGlobalOptimalization`\n============================================\n\n.. py:module:: NLG.NlgContinuousGlobalOptimalization\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.NlgContinuousGlobalOptimalization.Environment\n\n\n\n\nAttributes\n~~~~~~~~~~\n\n.. autoapisummary::\n\n NLG.NlgContinuousGlobalOptimalization.ACTIONS\n\n\n.. class:: Environment(n_questions, game_type, max_gates, num_players=2, initial_state=np.array([0, 1 / sqrt(2), -1 / sqrt(2), 0], dtype=np.float64))\n\n\n Bases: :py:obj:`NonLocalGame.abstractEnvironment`\n\n .. method:: reset(self)\n\n\n .. method:: calculate_new_state(self, action)\n\n\n .. method:: step(self, action)\n\n\n\n.. data:: ACTIONS\n :annotation: = ['r0']\n\n \n\n"
},
{
"alpha_fraction": 0.5310734510421753,
"alphanum_fraction": 0.5367231369018555,
"avg_line_length": 12.461538314819336,
"blob_id": "d0e05628da6ded5a9098424200d0cdeeceba2915",
"content_id": "78458ba3918f8d3dfa1ed7ed4ad5d5cca5641a81",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 177,
"license_type": "permissive",
"max_line_length": 31,
"num_lines": 13,
"path": "/src/doc/_sources/autoapi/NLG/optimalizers/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.optimalizers`\n=======================\n\n.. py:module:: NLG.optimalizers\n\n\nSubmodules\n----------\n.. toctree::\n :titlesonly:\n :maxdepth: 1\n\n GeneticAlg/index.rst\n\n\n"
},
{
"alpha_fraction": 0.5398625135421753,
"alphanum_fraction": 0.5630011558532715,
"avg_line_length": 45.441490173339844,
"blob_id": "027eda4899fd363e347d99bb114fbe46027da589",
"content_id": "048fdadea23be209d5205617cb3c91306a767986",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8730,
"license_type": "permissive",
"max_line_length": 149,
"num_lines": 188,
"path": "/src/NLG/NlgParalelClassical.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "\"\"\"\nevaluating classical strategies for N parallel CHSH games\ninspired by Daniel Nagaj's solution, added memoization and C libraries + encapsulation and abstracion\n\"\"\"\n\nimport itertools\n\nimport NonLocalGame\n\nclass Environment(NonLocalGame.abstractEnvironment):\n\n def __init__(self, num_players=2, n_questions=2, n_games=2):\n self.num_players = num_players\n self.n_questions = n_questions\n\n self.n_games =n_games\n self.n_qubits = 0\n\n self.questions_vectors = itertools.product(list(range(2)), # vectors can be composed of 0s or 1s\n repeat=(self.n_games * pow(2, self.n_games)))\n\n self.memoization = dict()\n\n self.questions = list(\n itertools.product(list(range(2)), # vectors can be composed of 0s or 1s\n repeat=self.n_games))\n\n @NonLocalGame.override\n def reset(self):\n return\n\n @NonLocalGame.override\n def step(self, action):\n return\n\n def evaluate_CHSH(self, aa,bb,xx,yy):\n # evaluates the CHSH game, questions x,y, answers a,b\n # if aa^bb == xx*yy: return 1\n # else: return 0\n return (aa != bb) == (xx and yy)\n #return aa^bb == xx*yy # a xor b ==? x and y\n\n def evaluate_parallelCHSH(self, Aanswer, Banswer, NN, printout):\n # evaluate NN parallel CHSH games\n # for answers answers Aanswer,Banswer\n overallwin = 0 # for how many question sets did AB win all the NN games in parallel?\n questionvectorAlice = [0]*NN # both this and questionvectorBob are binary strings of length NN\n #print('strategy lengths', len(Aanswer),len(Banswer))\n # print('answer strategies', Aanswer,Banswer)\n for kk in range(0,pow(2,NN)): # go over different sets of Alice's questions\n questionvectorBob = [0]*(NN)\n for ll in range(0,pow(2,NN)): # go over different sets of Bob's questions\n # print('questions for A:', questionvectorAlice, ', and for B:', questionvectorBob)\n wincounter = 0 # counting how many of the NN games they win for a given question set\n if printout==1: evalvector = [0]*NN\n for m in range(0,NN): # go over the NN answers\n # the \"m\"-th bit of Alice's answer to question set numbered \"kk\" is Aanswer[kk*NN+m]\n\n #win_game_mm = evaluate_CHSH(Aanswer[kk*NN+m],Banswer[ll*NN+m],questionvectorAlice[m],questionvectorBob[m])\n #wincounter = wincounter + win_game_mm\n #print(win_game_mm,wincounter)\n wincounter = wincounter + self.evaluate_CHSH(Aanswer[kk*NN+m],Banswer[ll*NN+m],questionvectorAlice[m],questionvectorBob[m])\n if printout==1: evalvector[m] = self.evaluate_CHSH(Aanswer[kk*NN+m],Banswer[ll*NN+m],questionvectorAlice[m],questionvectorBob[m])\n if wincounter == NN: # only if we won all NN parallel ones\n overallwin = overallwin + 1\n\n if printout==1:\n print('Ali questions', questionvectorAlice)\n print('Bob questions', questionvectorBob)\n print('Alice answers', Aanswer[kk*NN:(kk+1)*NN])\n print('Bob answers', Banswer[ll*NN:(ll+1)*NN])\n print('evaluations ', evalvector)\n print('won ',sum(evalvector),'/',NN, ' so they ')\n if wincounter==NN:\n print('+ WON the composite game')\n else:\n print('- LOST this composite game')\n print('overall won for ',overallwin,'/',kk*pow(2,NN)+ll+1,' possible sets of questions')\n print('---------------------------------------')\n\n questionvectorBob = self.binary_add(questionvectorBob,(ll+1)%(self.n_questions*self.n_games))\n questionvectorAlice = self.binary_add(questionvectorAlice,(kk+1)%(self.n_questions*self.n_games))\n # questionvectorBob = self.binary_add(questionvectorBob,(ll))\n # questionvectorAlice = self.binary_add(questionvectorAlice,(kk))\n # print('winning probability ',overallwin/pow(2,2*NN))\n return overallwin/pow(2,2*NN) # return the winning probability (count question sets/number of question sets)\n\n def binary_add(self,blist, n):\n # expect a list of length N with values 0/1\n return self.questions[n]\n\n def binary_add1(self,blist, n):\n # expect a list of length N with values 0/1\n try:\n return self.memoization[n]\n except KeyError:\n res = self.questions_vectors.__next__()\n self.memoization[n] = res\n return res\n\n # carry = 1\n # k = 0\n # N = len(blist)\n # newblist = blist\n # while k<N:\n # if blist[k]==0:\n # newblist[k] = 1\n # k = N\n # else:\n # newblist[k] = 0\n # k = k+1\n #\n # return newblist\n\n def play_all_strategies(self, Nrounds):\n print('---------------------------------- ')\n if Nrounds == 1:\n print('a single CHSH game')\n else:\n print(Nrounds, 'rounds of CHSH games, must win all of them')\n\n\n bestsofar = 0\n\n AliceStrategy = [0] * (Nrounds * pow(2, Nrounds))\n # AliceStrategy = [a, b, c, d, e, f, g, h, i, ...]\n # is made of blocks of length N (her composite answers to questions)\n # means answer abc to questions 000, cde to questions 001, fgh to questions 010, etc.\n # the b-th bit of the answer to question set Q is AliceStrategy[Q*Nrounds + b]\n for AliceStrategyCounter in range(0, pow(2, len(AliceStrategy))):\n AliceStrategy = self.binary_add1(AliceStrategy,(AliceStrategyCounter+1)%(self.n_questions*self.n_games))\n # AliceStrategy = self.binary_add1(AliceStrategy, (AliceStrategyCounter))\n\n BobStrategy = [0] * (Nrounds * pow(2, Nrounds))\n # same encoding for Bob's strategy [ans to 000, ans to 001, ans to 010, ...]\n for BobStrategyCounter in range(0, pow(2, len(AliceStrategy))):\n BobStrategy = self.binary_add1(BobStrategy,(BobStrategyCounter+1)%(self.n_questions*self.n_games))\n # BobStrategy = self.binary_add1(BobStrategy, (BobStrategyCounter))\n\n winning_probability = self.evaluate_parallelCHSH(AliceStrategy, BobStrategy, Nrounds, 0) # the last 0 is no printout\n # print('tested strategies (',AliceStrategyCounter,BobStrategyCounter,'). win:',winning_probability)\n # print('..........................................')\n if winning_probability > bestsofar:\n bestsofar = winning_probability\n print('--------------------- best strategy candidate ------------')\n print(bestsofar)\n print(AliceStrategy, 'Alice strategy #', AliceStrategyCounter)\n print(BobStrategy, 'Bob strategy #', BobStrategyCounter)\n print('----------------------------------------------------------')\n\n print('maximum win probability: ', bestsofar)\n\n\nif __name__ == '__main__':\n # go over all strategies\n # Alice's strategy is: for each set of her questions, give an answer\n # there are N questions for her, so she needs how to answwr 2^N different questions\n # each answer is an N bit string again, which means she has\n # (2^N)^(2^N) choices in her strategy!!!\n # TOO many strategies to try!\n # for N=1, she needs to decide on a 0 answer and a 1 answer...\n # 4 strategies = 2^2\n # her (0->0, 1->0) (0->0, 1->1) (0->1, 1->0) (0->1, 1->1)\n # for N=2, she needs to decide on a 00, 01, 10, and 11 answer of length 2\n # 4^4 = 256 strategies\n # for N=3, she needs to decide on answers to 000, 001, 010, 011, ..., 111\n # the answers have length 3 (8 possible answers in each)\n # 8^8 = 2^24 = 16777216 strategies\n\n # the best 1-round strategy gives 0.75 = 3/4\n # see it here:\n import time\n\n start = time.time()\n # env = Environment(n_questions=2, n_games=1)\n # env.evaluate_parallelCHSH([1, 1],[1, 1],1,1)\n\n env = Environment(n_questions=2, n_games=2)\n env.play_all_strategies(2)\n\n print(time.time() - start)\n # the best 2-round strategy gives 0.625 = 10/16\n # see it here:\n env = Environment(n_questions=2, n_games=3)\n # env.evaluate_parallelCHSH([1, 0, 0, 0, 0, 0, 0, 0],[0, 0, 1, 0, 0, 1, 1, 0],2,1)\n\n\n env.play_all_strategies(3)"
},
{
"alpha_fraction": 0.6477935314178467,
"alphanum_fraction": 0.6511240601539612,
"avg_line_length": 16.880596160888672,
"blob_id": "9df503c4717008c7d45d7685c9b7bff78338906c",
"content_id": "83d90953a6ecb59cb07b76583bec13f9a0f1478b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1201,
"license_type": "permissive",
"max_line_length": 176,
"num_lines": 67,
"path": "/src/doc/_sources/autoapi/NLG/agents/DQNAgent/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.agents.DQNAgent`\n==========================\n\n.. py:module:: NLG.agents.DQNAgent\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.agents.DQNAgent.ReplayBuffer\n NLG.agents.DQNAgent.DQNAgent\n\n\n\nFunctions\n~~~~~~~~~\n\n.. autoapisummary::\n\n NLG.agents.DQNAgent.predict\n NLG.agents.DQNAgent.train_one_step\n\n\n\n.. class:: ReplayBuffer(obs_dim, act_dim, size)\n\n\n Creates replay buffer to learn from previous episodes\n\n .. method:: store(self, obs, act, rew, next_obs, done)\n\n\n .. method:: sample_batch(self, batch_size=64)\n\n\n\n.. function:: predict(model, np_states)\n\n\n.. function:: train_one_step(model, criterion, optimizer, inputs, targets)\n\n\n.. class:: DQNAgent(state_size, action_size, gamma, eps, eps_min, eps_decay, ALL_POSSIBLE_ACTIONS, learning_rate, hidden_layers, hidden_dim, onehot_to_action, action_to_onehot)\n\n\n Bases: :py:obj:`object`\n\n .. method:: update_replay_memory(self, state, action, reward, next_state, done)\n\n\n .. method:: act(self, state)\n\n :returns action based on neural model prediction / epsilon greedy \n\n\n .. method:: replay(self, batch_size=64)\n\n\n .. method:: load(self, name)\n\n\n .. method:: save(self, name)\n\n\n\n"
},
{
"alpha_fraction": 0.5407309532165527,
"alphanum_fraction": 0.5695728659629822,
"avg_line_length": 25.561403274536133,
"blob_id": "434c497591e937aab23269cdc5fb219444a4bf79",
"content_id": "46adc25f03f2b1c3258b877b9a5d2c8a676f3065",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4542,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 171,
"path": "/src/NLG/CHSHPrototype.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport random\n\nfrom qiskit.circuit.library import RYGate\n\n\ndef is_unitary(M):\n M_star = np.transpose(M).conjugate()\n identity = np.eye(len(M))\n return np.allclose(identity, np.matmul(M_star, M))\n\n\nclass QuantumState:\n def __init__(self, vector):\n length = np.linalg.norm(vector)\n if not abs(1 - length) < 0.00001:\n raise ValueError('Quantum states must be unit length.')\n self.vector = np.array(vector)\n\n def measure(self):\n choices = range(len(self.vector))\n weights = [abs(a) ** 2 for a in self.vector]\n print(weights)\n outcome = random.choices(choices, weights)[0]\n\n new_state = np.zeros(len(self.vector))\n new_state[outcome] = 1\n self.vector = new_state\n return outcome\n\n def measure_analytic(self):\n choices = range(len(self.vector))\n weights = [abs(a) ** 2 for a in self.vector]\n\n return weights\n\n def compose(self, state):\n new_vector = np.kron(self.vector, state.vector)\n return QuantumState(new_vector)\n\n def __repr__(self):\n return '<QuantumState: {}>'.format(', '.join(map(str, self.vector)))\n\n\nclass QuantumOperation:\n def __init__(self, matrix):\n if not is_unitary(matrix):\n raise ValueError('Quantum operations must be unitary')\n self.matrix = matrix\n\n def apply(self, state):\n new_vector = np.matmul(self.matrix, state.vector)\n return QuantumState(new_vector)\n\n def compose(self, operation):\n new_matrix = np.kron(self.matrix, operation.matrix)\n return QuantumOperation(new_matrix)\n\n def __repr__(self):\n return '<QuantumOperation: {}>'.format(str(self.matrix))\n\n\nfrom math import sqrt, cos, sin, pi\n\n# The unitary matrices of Alice and Bob's possible operations.\nU_X = [[0, 1],\n [1, 0]] # identity I\n\nU_H = [[1 / sqrt(2), 1 / sqrt(2)],\n [1 / sqrt(2), -1 / sqrt(2)]]\n\nU_alice_0 = [[1, 0],\n [0, 1]] # identity I\n\nU_alice_1 = [[cos(pi / 4), sin(pi / 4)],\n [-sin(pi / 4), cos(pi / 4)]]\nU_bob_0 = [[cos(pi / 8), sin(pi / 8)],\n [-sin(pi / 8), cos(pi / 8)]]\nU_bob_1 = [[cos(3 * pi / 8), sin(3 * pi / 8)],\n [-sin(3 * pi / 8), cos(3 * pi / 8)]]\n\nU_alice_1 = RYGate(-67.5 * np.pi / 180).to_matrix()\nU_bob_0 = [[1, 0],\n [0, 1]]\nU_bob_1 = [[1, 0],\n [0, 1]]\n\n\n# Alice and Bob win when their input (a, b)\n# and their response (s, t) satisfy this relationship.\ndef win(a, b, s, t):\n return (a and b) == (s != t)\n\n\nwins = 0\n\n# generate \"questions\" in equal number\na = []\nb = []\nfor x in range(2):\n for y in range(2):\n a.append(x)\n b.append(y)\n\n# random.shuffle(a)\n# random.shuffle(b)\nstate = [1 / sqrt(2), 0, 0, 1 / sqrt(2)]\n# play game\n\nresult = []\nfor i in range(4):\n # Alice and Bob share an entangled state\n state = QuantumState([0,1 / sqrt(2),-1 / sqrt(2),0 ])\n\n # The input to alice and bob is random\n # Alice chooses her operation based on her input\n if a[i] == 0:\n alice_op = QuantumOperation(U_alice_0)\n if a[i] == 1:\n alice_op = QuantumOperation(U_alice_1)\n\n # Bob chooses his operation based on his input\n if b[i] == 0:\n bob_op = QuantumOperation(U_bob_0)\n if b[i] == 1:\n bob_op = QuantumOperation(U_bob_1)\n\n # We combine Alice and Bob's operations\n combined_operation = alice_op.compose(bob_op)\n\n # Alice and Bob make their measurements\n new_state = combined_operation.apply(state)\n # print(new_state)\n result.append(combined_operation.apply(state).measure_analytic())\n\nwin_rate = 0\nfor mat in result[:-1]:\n print(mat)\n win_rate += 1 / 4 * (mat[0] + mat[3])\n\nwin_rate += 1 / 4 * (result[-1][1] + result[-1][2])\nprint(win_rate)\nevaluation_tactic = [[1, 0, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0]]\nwin_rate1 = 0\nfor x, riadok in enumerate(evaluation_tactic):\n for y, stlpec in enumerate(riadok):\n win_rate1 += (stlpec * result[x][y])\nwin_rate1 = win_rate1 * 1 / 4\n\n# test game_type DONE\nprint(win_rate1)\n\n# assert (win_rate==win_rate1) # test game_type DONE\n\n# # convert the 4 state measurement result to two 1-bit results\n# if result == 0:\n# s, t = False, False\n# if result == 1:\n# s, t = False, True\n# if result == 2:\n# s, t = True, False\n# if result == 3:\n# s, t = True, True\n\n# # Check if they won and add it to the total\n# wins += win(a[i], b[i], s, t)\n\n# print('They won this many times:', wins)\n"
},
{
"alpha_fraction": 0.5512082576751709,
"alphanum_fraction": 0.5581128001213074,
"avg_line_length": 8.761363983154297,
"blob_id": "228960f8e8dc5c2c3c4848e493e81b47b023a171",
"content_id": "ec9661da6fc1c5b9a84b6c223c7b5127b84b75dd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 869,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 88,
"path": "/src/doc/_sources/autoapi/decimalrange/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`decimalrange`\n===================\n\n.. py:module:: decimalrange\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n decimalrange.Game\n\n\n\nFunctions\n~~~~~~~~~\n\n.. autoapisummary::\n\n decimalrange.complex_array_to_real\n\n\n\nAttributes\n~~~~~~~~~~\n\n.. autoapisummary::\n\n decimalrange.s\n decimalrange.s1\n decimalrange.s\n decimalrange.s\n decimalrange.s1\n decimalrange.s1\n decimalrange.responses\n\n\n.. data:: s\n \n\n \n\n.. data:: s1\n \n\n \n\n.. data:: s\n \n\n \n\n.. data:: s\n \n\n \n\n.. data:: s1\n \n\n \n\n.. data:: s1\n \n\n \n\n.. class:: Game(scaler)\n\n\n .. method:: play_one_episode(self, agent, env, is_train)\n\n\n .. method:: evaluate_train(self, N, agent, env)\n\n\n .. method:: evaluate_test(self, agent, n_questions, tactic, max_gates, env)\n\n\n\n.. function:: complex_array_to_real(inp_array)\n\n\n.. data:: responses\n \n\n \n\n"
},
{
"alpha_fraction": 0.6148267388343811,
"alphanum_fraction": 0.6325544118881226,
"avg_line_length": 15.25,
"blob_id": "e66ffc60be4d2cb08a37ef39c8ed3d5aa084eac5",
"content_id": "bfb4bb570cdc56c470dbcffa2ce4d1d0d1a8dcd5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1241,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 76,
"path": "/src/doc/_sources/autoapi/NLG/NlgDeterministic/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.NlgDeterministic`\n===========================\n\n.. py:module:: NLG.NlgDeterministic\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.NlgDeterministic.Environment\n\n\n\nFunctions\n~~~~~~~~~\n\n.. autoapisummary::\n\n NLG.NlgDeterministic.rule\n NLG.NlgDeterministic.create\n\n\n\nAttributes\n~~~~~~~~~~\n\n.. autoapisummary::\n\n NLG.NlgDeterministic.game_type\n\n\n.. class:: Environment(game_type, num_players=2, n_questions=2)\n\n\n Bases: :py:obj:`NonLocalGame.abstractEnvironment`\n\n creates CHSH for classic deterministic strategies, works small for 4x4 games \n\n .. method:: reset(self)\n\n\n .. method:: step(self, action)\n\n\n .. method:: index(self, response)\n\n :returns index of response so that it can be mapped to state\n\n\n .. method:: evaluate(self, question, response)\n\n :returns winning accuracy to input question based on response \n\n\n .. method:: play_all_strategies(self)\n\n plays 16 different strategies,evaluate each and :returns: the best accuracy from all strategies \n\n\n .. method:: response_rek(self, n)\n\n\n\n.. function:: rule(a, b, x, y)\n\n\n.. function:: create(game_type)\n\n\n.. data:: game_type\n :annotation: = [[1, 0, 0, 1], [1, 0, 0, 1], [1, 0, 0, 1], [0, 1, 1, 0]]\n\n \n\n"
},
{
"alpha_fraction": 0.5686735510826111,
"alphanum_fraction": 0.5828527212142944,
"avg_line_length": 40.3095588684082,
"blob_id": "28e426b060b12d540c9fa0e74eccfcd1f966c95b",
"content_id": "af4876a97ac3fc50f6a1d490c530be006dfac2ec",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 27223,
"license_type": "permissive",
"max_line_length": 197,
"num_lines": 659,
"path": "/src/NLG/NonLocalGame.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "import math\nfrom math import sqrt\n\nimport matplotlib.pyplot as plt\nfrom qiskit.extensions import RYGate, RZGate, RXGate, IGate, CXGate\nfrom sklearn.preprocessing import StandardScaler\n\nfrom NLG.agents.BasicAgent import BasicAgent\nfrom NLG.agents.DQNAgent import DQNAgent\nfrom NLG.models.LinearModel import LinearModel\n\n\ndef get_scaler(env, N, ALL_POSSIBLE_ACTIONS, round_to=2):\n \"\"\":returns scikit-learn scaler object to scale the states\"\"\"\n # Note: you could also populate the replay buffer here\n states = []\n for _ in range(N):\n action = np.random.choice(ALL_POSSIBLE_ACTIONS)\n state, reward, done = env.step(action)\n states.append(np.round(state, round_to))\n\n if done:\n break\n\n scaler = StandardScaler()\n scaler.fit(states)\n return scaler\n\n\ndef show_plot_of(plot_this, label, place_line_at=()):\n # plot relevant information\n fig_dims = (10, 6)\n\n fig, ax = plt.subplots(figsize=fig_dims)\n\n for pl in place_line_at:\n plt.axhline(y=pl, color='r', linestyle='-')\n\n plt.xlabel('Epochs')\n plt.ylabel(label)\n\n plt.plot(plot_this)\n plt.show()\n\n\ndef override(f): return f\n\n\nfrom abc import ABC, abstractmethod\n\n\nclass abstractEnvironment(ABC):\n \"\"\" abstract environment to create CHSH framework\n\n actions are expected in this format\n\n ACTIONS = [q + axis + \"0\" for axis in 'xyz' for q in 'ra']\n ACTIONS = [q + axis + \"0\" for axis in 'y' for q in 'r']\n PLAYER = ['a', 'b']\n QUESTION = ['0', '1']\n\n ALL_POSSIBLE_ACTIONS = [[p + q + a] for p in PLAYER for q in QUESTION for a in ACTIONS] # place one gate at some place\n ALL_POSSIBLE_ACTIONS.append([\"xxr0\"])\n\n # for 1 game with 2 EPR\n ALL_POSSIBLE_ACTIONS.append([\"a0cxnot\"])\n ALL_POSSIBLE_ACTIONS.append([\"b0cxnot\"])\n #\n # for xor paralel with 2EPR\n ALL_POSSIBLE_ACTIONS.append([\"a0cxnotr\"])\n ALL_POSSIBLE_ACTIONS.append([\"b0cxnotr\"])\"\"\"\n\n @abstractmethod\n def reset(self):\n \"\"\"Return initial_time_step.\"\"\"\n self.counter = 1\n self.history_actions = []\n self.state = self.initial_state.copy()\n self.accuracy = self.calc_accuracy([self.measure_probabilities_analytically() for _ in range(len(self.game_type))])\n self.repr_state = np.array([x for _ in range(len(self.game_type)) for x in self.state], dtype=np.complex64)\n return self.repr_state\n\n @abstractmethod\n def step(self, action):\n \"\"\"Apply action and return new time_step.\"\"\"\n pass\n\n def measure_probabilities_analytically(self):\n \"\"\" :returns probabilities of questions (e.g. 00,01,10,11) happening in matrix \"\"\"\n probabilities = [abs(a) ** 2 for a in self.state]\n return probabilities\n\n def calc_accuracy(self, result):\n \"\"\" Calculates accurary by going through rules of the game given by game_type matrix\n :returns winning probability / accuracy / win rate based on winning game_type \"\"\"\n win_rate = 0\n for x, combination_of_questions in enumerate(self.game_type):\n for y, query in enumerate(combination_of_questions):\n win_rate += (query * result[x][y])\n win_rate = win_rate * 1 / len(self.game_type)\n return win_rate\n\n # def EPR_result(self, result):\n # \"\"\" If state is bigger than with 2 qubits, we must reduce state so that it matches the scale of the game.\n # This functions reduces bigger states result to smaller one by taking the first bit. \"\"\"\n # if self.n_qubits <= 2: return result\n #\n # new_result = []\n # for r, row in enumerate(result):\n # new_result.append([])\n # for c in range(0, len(row), self.reduce_by * 2):\n # new_result[r].append(\n # sum(result[r][c:(c + self.reduce_by // 2)]) +\n # sum(result[r][c + self.reduce_by:(c + self.reduce_by + self.reduce_by // 2)])\n # )\n # new_result[r].append(\n # sum(result[r][(c + self.reduce_by // 2): c + self.reduce_by]) +\n # sum(result[r][(c + self.reduce_by + self.reduce_by // 2):(c + self.reduce_by * 2)])\n # )\n #\n # return new_result\n\n # def paralel_non_local(self, result):\n # \"\"\" selects probabilities for paralel games \"\"\"\n #\n # return self.calc_acc(result)\n # # dividing_to_paralel = dict()\n # for state in result:\n # for x in range(len(state)):\n # dividing_to_paralel[self.possible_states[x]] = state[x]\n #\n # new_result_1 = []\n # new_result_2 = []\n # for s in range(len(result)):\n # paralel_1 = dict()\n # paralel_2 = dict()\n # for key in dividing_to_paralel.keys():\n # try: paralel_1[str(key[0]) + str(key[2])] += dividing_to_paralel[key]\n # except KeyError: paralel_1[str(key[0]) + str(key[2])] = dividing_to_paralel[key]\n # try: paralel_2[str(key[1]) + str(key[3])] += dividing_to_paralel[key]\n # except KeyError: paralel_2[str(key[1]) + str(key[3])] = dividing_to_paralel[key]\n #\n # new_result_1.append(list(paralel_1.values()))\n # new_result_2.append(list(paralel_2.values()))\n #\n # return self.calc_acc(new_result_1) * self.calc_acc(new_result_2)\n\n def n_qubits_from_state(self):\n \"\"\" There are 2^n states of n qubits, to get the n, we need to make log2 from state\"\"\"\n assert len(self.state) % 2 == 0\n return int(math.log(len(self.state), 2))\n\n def count_gates(self):\n \"\"\" :returns count of relevant gates \"\"\"\n count = 0\n for action in self.history_actions:\n if action in {\"xxr0\"}: # ending action\n pass\n # elif action in {\"smallerAngle\", \"biggerAngle\"}:\n # count += 0.5\n else:\n count += 1\n\n return count\n\n def get_gate(self, action):\n \"\"\" :returns gate got from string code of action \"\"\"\n gate = action[2:4]\n if gate == \"rx\" or gate == \"ax\":\n return RXGate\n elif gate == \"ry\" or gate == \"ay\":\n return RYGate\n elif gate == \"rz\" or gate == \"az\":\n return RZGate\n elif gate == \"cx\":\n return CXGate\n else:\n return IGate\n\n\n def reward_only_difference(self, difference):\n # reward is the increase in winning probability\n return difference\n\n def reward_qubic(self, difference):\n return (difference ** 3) * 1000\n\n def reward_only_best(self, difference):\n \"\"\" reward only if it its better than results before \"\"\"\n reward = difference * 100\n\n if np.round(self.accuracy, 2) > np.round(self.max_acc, 2):\n reward += 50 * (self.max_acc - self.accuracy)\n self.min_gates = len(self.history_actions)\n self.max_acc = self.accuracy\n elif np.round(self.accuracy, 2) == np.round(self.max_acc, 2):\n if self.min_gates > len(self.history_actions):\n self.min_gates = len(self.history_actions)\n\n if self.counter == self.max_gates or self.history_actions[-1] == \"xxr0\":\n if np.round(self.max_acc, 2) == np.round(self.accuracy, 2) and self.min_gates == self.count_gates():\n reward = 5000 * (1 / (self.count_gates() + 1)) * self.accuracy\n elif np.round(self.max_acc, 2) == np.round(self.accuracy, 2):\n reward -= 1000 * (self.count_gates() + 1) / self.accuracy\n else:\n reward -= 10000 * (self.count_gates() + 1) / self.accuracy # alebo tu dam tiez nejaky vzorcek\n return reward\n\n def reward_combined(self, difference):\n reward = difference\n if np.round(self.accuracy, 2) >= np.round(self.max_acc, 2):\n self.max_acc = self.accuracy\n if self.history_actions[-1] == \"xxr0\":\n reward += 80 * (1 / (self.count_gates() + 1)) * self.accuracy # alebo za count_gates len(history_actuons)\n # if self.counter == self.max_gates:\n # reward += 50 * (1 / (self.count_gates() + 1))\n return reward\n\n def complex_array_to_real(self, inp_array):\n \"\"\" decomposes complex array into array of real numbers with double size. \"\"\"\n return np.concatenate((np.real(inp_array), np.imag(inp_array)))\n\n\nimport random\n\nimport warnings\n\nwarnings.filterwarnings('ignore')\nimport pickle\nimport numpy as np\n\n\nclass Game:\n \"\"\" creates framework for easier manipulation \"\"\"\n\n def __init__(self, scaler=None, round_to=2, batch_size=32):\n self.scaler = scaler\n self.round_to = round_to\n self.batch_size = batch_size\n\n def play_one_episode(self, agent, env, DO):\n \"\"\" Plays one episode of CHSH training\n :returns last accuracy acquired and rewards from whole episode \"\"\"\n # in this version we will NOT use \"exploring starts\" method\n # instead we will explore using an epsilon-soft policy\n state = env.reset()\n if self.scaler is not None: state = self.scaler.transform([state])\n else: state = np.array([np.around(state, self.round_to)], dtype=np.float32)\n done = False\n\n # be aware of the timing\n # each triple is s(t), a(t), r(t)\n # but r(t) results from taking action a(t-1) from s(t-1) and landing in s(t)\n\n rew_accum = 0\n while not done:\n action = agent.act(state)\n next_state, reward, done = env.step(action[0])\n if self.scaler is not None: next_state = self.scaler.transform([np.around(next_state, self.round_to)])\n else: next_state = np.array([np.around(next_state, self.round_to)], dtype=np.float32)\n if DO == 'train':\n if type(agent) == BasicAgent:\n agent.train(state.copy(), action[1], reward, next_state.copy(), done)\n elif type(agent) == DQNAgent:\n agent.update_replay_memory(state.copy(), action[1], reward, next_state.copy(), done)\n agent.replay(self.batch_size)\n state = next_state.copy()\n rew_accum += reward\n try: print(env.memory_state[tuple(env.history_actions)][2])\n except: print(env.history_actions)\n # print(\"state: \", env.repr_state)\n return env.accuracy, rew_accum\n\n def evaluate_train(self, N, agent, env):\n \"\"\" Performes the whole training of agent in env in N steps\n :returns portfolio value and rewards for all episodes - serves to plot how it has trained\"\"\"\n DO = \"train\"\n\n portfolio_value = []\n rewards = []\n\n for e in range(N):\n val, rew = self.play_one_episode(agent, env, DO)\n print('episode:', end=' ')\n print(e, end=' ')\n print('acc:', end=' ')\n print(val)\n print('rew:', end=' ')\n print(rew)\n\n portfolio_value.append(val) # append episode end portfolio value\n rewards.append(rew)\n\n # save the weights when we are done\n if DO == 'train':\n # # save the DQN\n\n agent.save(f'.training/linear.npz')\n\n # save the scaler\n with open(f'../.training/scaler.pkl', 'wb') as f:\n pickle.dump(self.scaler, f)\n\n return portfolio_value, rewards\n\n def evaluate_test(self, agent, env):\n \"\"\" Tests what has the agent learnt in N=1 steps :returns accuracy and reward \"\"\"\n DO = \"test\"\n\n portfolio_value = []\n if DO == 'test':\n N = 1\n\n # then load the previous scaler\n if self.scaler != None:\n with open(f'../.training/scaler.pkl', 'rb') as f:\n self.scaler = pickle.load(f)\n\n # make sure epsilon is not 1!\n # no need to run multiple episodes if epsilon = 0, it's deterministic, it outputs always what it has already learnt\n agent.epsilon = 0\n\n # load trained weights\n agent.load(f'.training/linear.npz')\n\n # play the game num_episodes times\n\n for e in range(N):\n val = self.play_one_episode(agent, env, DO)\n print('Test value:', end=' ')\n print(val)\n\n portfolio_value.append(val) # append episode end portfolio value\n\n return portfolio_value\n\n\nimport itertools\n\n\ndef game_with_rows_all_zeroes(game):\n \"\"\" Checks whether there is not full zero row in game \"\"\"\n for row in game:\n if 1 not in row or 0 not in row:\n return True\n return False\n\n\ndef generate_only_interesting_games(size=4, n_questions=2):\n \"\"\" Generates only interesting evaluation tactics\n because some are almost duplicates and some will have no difference between classic and quantum strategies. \"\"\"\n product = list(itertools.product(list(range(n_questions)), repeat=size))\n games = list(itertools.product(product, repeat=size))\n print(len(games))\n if size != 4: return games # this function works only for size games of size 4, in bigger scenarios its harder to tell which game is symmetric so easily\n interesting_games = dict()\n for game in games:\n if game_with_rows_all_zeroes(game): continue # hry, ktore maju nulove riadky su nezaujimave tiez\n try:\n if interesting_games[(game[1], game[0], game[3], game[2])]: pass # x za y, symetricke hry\n except KeyError:\n try:\n if interesting_games[(game[3], game[2], game[1], game[0])]: pass # 0 za 1, symetricke hry\n except KeyError:\n interesting_games[game] = True\n\n print(len(interesting_games.keys()))\n return list(interesting_games.keys())\n\n\nfrom NLG import NlgDeterministic\n\n\ndef play_deterministic(game, which=\"best\"):\n \"\"\" Learns to play the best classic strategy according to game \"\"\"\n env = NlgDeterministic.Environment(game)\n best, worst = env.play_all_strategies()\n return best, worst\n\n\nfrom NLG import NlgDiscreteStatesActions\nfrom NLG import NlgGeneticOptimalization\n\n\ndef quantumGEN(states, game):\n \"\"\" Plays nonlocal game using genetic algorithm multiple -lenght(states)- times and returns the best and the worst result.\n Works good for small nonlocal games with 1epr pair. For bigger games reinforcement learning is much better choice. \"\"\"\n\n best = 0\n worst = 1\n min_state = None\n max_state = None\n min_strategy = None\n max_strategy = None\n\n for s in states:\n ACTIONS2 = ['r' + axis + \"0\" for axis in 'y']\n # ACTIONS2.extend(ACTIONS) # complexne gaty zatial neural network cez sklearn nedokaze , cize S, T, Y\n PERSON = ['a', 'b']\n QUESTION = ['0', '1']\n\n ALL_POSSIBLE_ACTIONS = [p + q + a for p in PERSON for q in QUESTION for a in ACTIONS2] # place one gate at some place\n\n env_max = NlgGeneticOptimalization.CHSHgeneticOptimizer(population_size=30, n_crossover=len(ALL_POSSIBLE_ACTIONS) - 1, mutation_prob=0.1,\n history_actions=ALL_POSSIBLE_ACTIONS,\n game_type=game, best_or_worst=\"best\", state=s)\n res_max = env_max.solve(30)\n\n env_min = NlgGeneticOptimalization.CHSHgeneticOptimizer(population_size=30, n_crossover=len(ALL_POSSIBLE_ACTIONS) - 1, mutation_prob=0.1,\n history_actions=ALL_POSSIBLE_ACTIONS,\n game_type=game, best_or_worst=\"worst\", state=s)\n res_min = env_min.solve(30)\n\n # take the best found quantum, not just learned value\n if res_max[1] > best:\n best = res_max[1]\n max_strategy = res_max[0]\n max_state = res_max[2]\n\n # take the best found quantum, not just learned value\n if res_min[1] < worst:\n worst = res_min[1]\n min_strategy = res_min[0]\n min_state = res_min[2]\n\n return best, worst, env_min.complex_array_to_real(min_state), env_min.complex_array_to_real(max_state), min_strategy, max_strategy\n\n\ndef quantumNN(states, agent_type, which, game):\n \"\"\" Plays nonlocal game using reinforcement learning multiple -lenght(states)- times and returns the best and the worst result. \"\"\"\n\n # ACTIONS2 = ['r' + axis + str(180 / 32 * i) for i in range(1, 16) for axis in 'y']\n # ACTIONS = ['r' + axis + str(-180 / 32 * i) for i in range(1, 16) for axis in 'y']\n ACTIONS2 = ['r' + axis + \"0\" for axis in 'xyz']\n # ACTIONS2.extend(ACTIONS) # complexne gaty zatial neural network cez sklearn nedokaze , cize S, T, Y\n PERSON = ['a', 'b']\n QUESTION = ['0', '1']\n\n ALL_POSSIBLE_ACTIONS = [[p + q + a] for p in PERSON for q in QUESTION for a in ACTIONS2] # place one gate at some place\n ALL_POSSIBLE_ACTIONS.append([\"xxr0\"])\n # ALL_POSSIBLE_ACTIONS.append(\"smallerAngle\")\n # ALL_POSSIBLE_ACTIONS.append(\"biggerAngle\")\n ALL_POSSIBLE_ACTIONS.append([\"a0cxnot\"])\n ALL_POSSIBLE_ACTIONS.append([\"b0cxnot\"])\n\n N = 3000\n n_questions = 4\n max_gates = 9\n round_to = 2\n\n # learning_rates = [0.1, 1, 0.01]\n # gammas = [1, 0.9, 0.1]\n\n learning_rates = [0.1]\n gammas = [1]\n\n best = 0\n worst = 1\n min_state = None\n max_state = None\n min_strategy = None\n max_strategy = None\n\n for state in states:\n for alpha in learning_rates:\n for gamma in gammas:\n env = NlgDiscreteStatesActions.Environment(n_questions=n_questions, game_type=game, max_gates=max_gates,\n initial_state=state,\n best_or_worst=which,\n anneal=True) # mozno optimalnejsie by to bolo keby sa to resetovalo iba\n\n # (state_size, action_size, gamma, eps, eps_min, eps_decay, alpha, momentum)\n if agent_type == BasicAgent:\n agent = BasicAgent(state_size=env.state_size, action_size=len(ALL_POSSIBLE_ACTIONS), gamma=gamma, eps=1,\n eps_min=0.01,\n eps_decay=0.9995, alpha=alpha, momentum=0.9, ALL_POSSIBLE_ACTIONS=ALL_POSSIBLE_ACTIONS,\n model_type=LinearModel)\n\n else:\n hidden_dim = [len(env.repr_state) * 2, len(env.repr_state) * 2]\n agent = DQNAgent(state_size=env.state_size, action_size=len(ALL_POSSIBLE_ACTIONS), gamma=gamma, eps=1, eps_min=0.01,\n eps_decay=0.9995, ALL_POSSIBLE_ACTIONS=ALL_POSSIBLE_ACTIONS, learning_rate=alpha, hidden_layers=len(hidden_dim),\n hidden_dim=hidden_dim)\n\n # scaler = get_scaler(env, N, ALL_POSSIBLE_ACTIONS, round_to=round_to)\n batch_size = 128\n\n # store the final value of the portfolio (end of episode)\n game_api = Game(round_to=round_to)\n portfolio_val = game_api.evaluate_train(N, agent, env)\n\n # save portfolio value for each episode\n np.save(f'.training/train.npy', portfolio_val)\n # portfolio_val = game.evaluate_test(agent, env)\n # return portfolio_val[0][0] # acc\n\n load_acc = np.load(f'.training/train.npy')[0]\n load_acc_max = load_acc.max()\n load_acc_min = load_acc.min()\n\n # take the best found quantum, not just learned value\n if load_acc_max > best:\n best = load_acc_max\n max_strategy = env.max_found_strategy.copy()\n max_state = env.max_found_state.copy()\n\n # take the best found quantum, not just learned value\n if load_acc_min < worst:\n worst = load_acc_min\n min_strategy = env.min_found_strategy.copy()\n min_state = env.min_found_state.copy()\n\n return best, worst, env.complex_array_to_real(min_state), env.complex_array_to_real(max_state), min_strategy, max_strategy\n\n\ndef play_quantum(game, which=\"best\", agent_type=BasicAgent, n_qubits=2):\n \"\"\" Learns to play the best quantum strategy according to game\n for 2 qubits uses genetic alg., for more uses reinforcement learning\"\"\"\n if n_qubits == 2: # for small games use genetic algorithm\n states = [np.array([0, 1 / sqrt(2), -1 / sqrt(2), 0], dtype=np.complex64), np.array([1, 0, 0, 0], dtype=np.complex64)]\n best, worst, min_state, max_state, min_strategy, max_strategy = quantumGEN(states, game)\n else: # for bigger games use reinforcement learning\n states = [np.array(\n [0 + 0j, 0 + 0j, 0.707 + 0j, 0 + 0j, -0.707 + 0j, 0 + 0j, 0 + 0j, 0 + 0j, 0 + 0j, 0 + 0j, 0 + 0j, 0 + 0j, 0 + 0j, 0 + 0j, 0 + 0j,\n 0 + 0j])]\n best, worst, min_state, max_state, min_strategy, max_strategy = quantumNN(states, agent_type, which, game)\n return best, worst, min_state, max_state, min_strategy, max_strategy\n\n\ndef calc_difficulty_of_game(game):\n \"\"\" Difficulty of the input game is calculated as a sum of all 1's in the whole game (evaluation) matrix\"\"\"\n diff = 0\n for row in game:\n for x in row:\n if x == 1:\n diff += 1\n return diff\n\n\ndef to_list(tuple):\n \"\"\" converts tuple to list \"\"\"\n return [list(x) for x in tuple]\n\n\ndef categorize(cutGames):\n \"\"\" categorizes input games according to the best and worst classical strategy probabilities , e.g. (0.75,0.25) is the category for\n CHSH game, because the best possible classical strategy will give you 0.75 success probability, the worst is 0.25 classicaly.\"\"\"\n categories = dict()\n for game in cutGames:\n classical_max_min = play_deterministic(game)\n if classical_max_min not in (0, 1): # these are not interesting\n try:\n categories[classical_max_min][calc_difficulty_of_game(game)].append(to_list(game))\n except KeyError:\n try: categories[classical_max_min][calc_difficulty_of_game(game)] = [to_list(game)]\n except KeyError: categories[classical_max_min] = {calc_difficulty_of_game(game): [to_list(game)]}\n return categories\n\n\ndef convert(list):\n \"\"\" Converts list to categories. \"\"\"\n categories = dict()\n for dict_row in list:\n try:\n categories[tuple(dict_row[0][0])][dict_row[1]].append(dict_row[2][0])\n except KeyError:\n try: categories[tuple(dict_row[0][0])][dict_row[1]] = [dict_row[2][0]]\n except: categories[tuple(dict_row[0][0])] = {dict_row[1]: [dict_row[2][0]]}\n return categories\n\n\nfrom NLG.database import DB\n\n\ndef max_entangled_difference(n_players=2, n_questions=2, choose_n_games_from_each_category=5, best_or_worst=\"best\", agent_type=BasicAgent,\n n_qubits=2):\n \"\"\" Finds interesting games by searching through the space of possible interesting games. Compares maximum classical with quantum.\n Puts results into local database\"\"\"\n\n def playGame():\n classical_max, classical_min = play_deterministic(game_type, best_or_worst)\n quantum_max, quantum_min, min_state, max_state, min_strategy, max_strategy = play_quantum(game_type, best_or_worst,\n agent_type=agent_type, n_qubits=n_qubits)\n # quantum_max = 0\n\n difference_max = 0 if classical_max > quantum_max else quantum_max - classical_max\n difference_min = 0 if classical_min < quantum_min else classical_min - quantum_min\n min_state = min_state.tolist()\n max_state = max_state.tolist()\n differences.append(\n (category, difficulty, classical_min, quantum_min, classical_max, quantum_max, game_type, difference_min, difference_max,\n min_state, max_state, min_strategy, max_strategy))\n\n db.insert(category=list(category), difficulty=difficulty, classic_min=classical_min, quantum_min=quantum_min,\n classic_max=classical_max,\n quantum_max=quantum_max, difference_min=difference_min, difference_max=difference_max, min_state=min_state,\n max_state=max_state,\n min_strategy=min_strategy, max_strategy=max_strategy, game=game_type)\n\n\n assert n_qubits == 2 or n_qubits == 4\n db = DB.CHSHdb()\n\n size_of_game = n_players * n_questions\n\n categories = db.query_categories_games(n_questions=n_questions, num_players=n_players)\n\n if categories == []:\n categories = categorize(generate_only_interesting_games(size_of_game))\n db.insert_categories_games(num_players=n_players, n_questions=n_questions, generated_games=categories)\n else:\n categories = convert(categories)\n\n differences = []\n for category, difficulties in categories.items():\n for difficulty in difficulties.keys():\n if choose_n_games_from_each_category != \"all\":\n for _ in range(choose_n_games_from_each_category): # choose 10 tactics from each category randomly\n if choose_n_games_from_each_category != \"all\":\n game_type = random.choice(categories[category][difficulty])\n playGame()\n else:\n for game_type in categories[category][difficulty]:\n playGame()\n\n # differences.sort(key=lambda x: x[1]) # sorts according to difference in winning rate\n if choose_n_games_from_each_category != \"all\":\n for category, difficulty, classical_min, quantum_min, classical_max, quantum_max, game_type, difference_min, difference_max, min_state, max_state, min_strategy, max_strategy in differences:\n print()\n print(\"category: \", category)\n print(\"difficulty: \", difficulty)\n print(\"game = \")\n game_type = list(game_type)\n for i, row in enumerate(game_type):\n game_type[i] = list(game_type[i])\n print(row)\n print(\"difference_max = \", difference_max)\n print(\"difference_min = \", difference_min)\n print(\"max state = \", max_state)\n print(\"min state = \", min_state)\n print(\"max strategy = \", max_strategy)\n print(\"min strategy = \", min_strategy)\n print()\n else: print(\"Too much to print\")\n\n\nif __name__ == '__main__':\n # max_entangled_difference(size=4)\n # game_type = [[1, 0, 0, 1],\n # [1, 0, 0, 1],\n # [1, 0, 0, 1],\n # [0, 1, 1, 0]]\n # print(play_deterministic(game_type))\n\n # print(len(generate_only_interesting_games(4)))\n\n # print([name for name, val in CHSHv02qDiscreteStatesActions.Environment.__dict__.items() if callable(val)]) # dostanem mena funkcii\n\n max_entangled_difference(choose_n_games_from_each_category=\"all\", best_or_worst=\"best\", agent_type=DQNAgent, n_qubits=2)\n"
},
{
"alpha_fraction": 0.6069294810295105,
"alphanum_fraction": 0.6129032373428345,
"avg_line_length": 12.883333206176758,
"blob_id": "95c72cb488ab8a586bc05be1a00bd3c1a168a5fd",
"content_id": "4b33cbd63fa8057939bc523346861ef359b8c0e7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 837,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 60,
"path": "/src/doc/_sources/autoapi/NLG/models/LinearModel/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.models.LinearModel`\n=============================\n\n.. py:module:: NLG.models.LinearModel\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.models.LinearModel.LinearModel\n\n\n\nFunctions\n~~~~~~~~~\n\n.. autoapisummary::\n\n NLG.models.LinearModel.override\n\n\n\n.. function:: override(f)\n\n\n.. class:: LinearModel(input_dim, n_action)\n\n\n Bases: :py:obj:`NLG.models.RegressionModel.RegressionModel`\n\n Simple linear approxiamation model \n\n .. method:: predict(self, X)\n\n predicts output for input \n\n\n .. method:: sgd(self, X, Y, learning_rate=0.01, momentum=0.9)\n\n makes one step of sgd \n\n\n .. method:: load_weights(self, filepath)\n\n loads weights \n\n\n .. method:: save_weights(self, filepath)\n\n saves weights \n\n\n .. method:: get_losses(self)\n\n returns learning loss \n\n\n\n"
},
{
"alpha_fraction": 0.46147269010543823,
"alphanum_fraction": 0.5179010629653931,
"avg_line_length": 38.31512451171875,
"blob_id": "5aab10e80866435e3db0546b9e88204a6a61f4e8",
"content_id": "c7fe657577ab8e6398a789f3fe04287158d4cb5b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9357,
"license_type": "permissive",
"max_line_length": 143,
"num_lines": 238,
"path": "/src/NLG/tests/testNonLocalGames.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom NLG.NlgDiscreteStatesActions import Environment\nfrom NLG.NlgGeneticOptimalization import CHSHgeneticOptimizer\nimport numpy as np\nfrom qiskit.extensions import RYGate\nfrom math import pi, sqrt\n\n\nclass TestCHSH(unittest.TestCase):\n\n def testRYGate(self):\n assert (np.around(RYGate((0 * pi / 180)).to_matrix(), 5).all() == np.eye(2).all())\n\n def testIfCorrectStrategyAndAccuracy(self):\n n_questions = 2\n tactic = [[1, 0, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0]]\n max_gates = 10\n env = Environment(n_questions, tactic, max_gates)\n save_state = env.initial_state.copy()\n nauceneVyhodil = ['b0r-78.75', 'b0r-78.75', 'a0r90.0', 'b0r-78.75', 'b1r56.25', 'b1r-22.5', 'b0r11.25',\n 'b1r0.0', 'b1r0.0', 'b1r0.0'] # toto sa naucil\n dokopy = ['a0ry90', 'b0ry-225', 'b1ry33.75']\n for a in dokopy:\n env.step(a)\n\n A_0 = np.kron(RYGate((90 * pi / 180)).to_matrix(), np.identity(2))\n A_1 = np.kron(np.identity(2), np.identity(2))\n B_0 = np.kron(np.identity(2), RYGate((-225 * pi / 180)).to_matrix())\n B_1 = np.kron(np.identity(2), RYGate((33.75 * pi / 180)).to_matrix())\n\n ax = np.array([\n *[x for x in np.matmul(B_0, np.matmul(A_0, save_state))],\n *[x for x in np.matmul(B_1, np.matmul(A_0, save_state))],\n *[x for x in np.matmul(B_0, np.matmul(A_1, save_state))],\n *[x for x in np.matmul(B_1, np.matmul(A_1, save_state))]\n ])\n print(ax)\n print(env.accuracy)\n # assert (env.accuracy > 0.85) //TODO: este raz prekontrolovat ci je to spravne\n for poc, state in enumerate(env.repr_state):\n if poc % 4 == 0:\n assert (np.round(\n env.repr_state[poc] ** 2 + env.repr_state[poc + 1] ** 2 + env.repr_state[poc + 2] ** 2 +\n env.repr_state[poc + 3] ** 2, 2) == 1)\n\n for poc, state in enumerate(ax):\n if poc % 4 == 0:\n assert (np.round(ax[poc] ** 2 + ax[poc + 1] ** 2 + ax[poc + 2] ** 2 + ax[poc + 3] ** 2, 2) == 1)\n\n assert (env.repr_state.all() == ax.all())\n\n def testInitialAccuracy(self):\n n_questions = 4\n tactic = [[1, 0, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0]]\n max_gates = 10\n env = Environment(n_questions, tactic, max_gates)\n assert (np.round(env.accuracy,2) == 0.25)\n\n # check if the other way of calculating accuracy is correct through comparing with already known good way, but inflexible\n def testCalcWinRate(self):\n n_questions = 4\n tactic = [[1, 0, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0]]\n max_gates = 10\n env = Environment(n_questions, tactic, max_gates)\n result = [env.measure_probabilities_analytically() for i in range(4)]\n\n # this is for sure good way to calculate\n win_rate = 0\n for mat in result[:-1]:\n print(mat)\n win_rate += 1 / 4 * (mat[0] + mat[3])\n\n win_rate += 1 / 4 * (result[-1][1] + result[-1][2])\n assert (win_rate == env.calc_accuracy(result))\n\n def testCalcWinRate1(self):\n n_questions = 4\n tactic = [[1, 0, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1]]\n max_gates = 10\n env = Environment(n_questions, tactic, max_gates)\n result = [env.measure_probabilities_analytically() for i in range(4)]\n\n # this is for sure good way to calculate\n win_rate = 0\n for mat in result:\n print(mat)\n win_rate += 1 / 4 * (mat[0] + mat[3])\n\n assert (win_rate == env.calc_accuracy(result))\n\n def testCalcWinRate2(self):\n n_questions = 4\n tactic = [[1, 0, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 0, 1]]\n max_gates = 10\n env = Environment(n_questions, tactic, max_gates)\n result = [env.measure_probabilities_analytically() for i in range(4)]\n\n # this is for sure good way to calculate\n win_rate = 0\n for mat in result[:-1]:\n print(mat)\n win_rate += 1 / 4 * (mat[0] + mat[3])\n\n win_rate += 1 / 4 * (result[-1][1] + result[-1][3])\n\n assert (win_rate == env.calc_accuracy(result))\n\n def testCalcWinRate3(self):\n n_questions = 4\n tactic = [[1,1,1,1] for i in range(n_questions)]\n max_gates = 10\n env = Environment(n_questions, tactic, max_gates)\n result = [env.measure_probabilities_analytically() for i in range(4)]\n assert (round(env.calc_accuracy(result)) == 1)\n\n def testCalcWinRate4(self):\n n_questions = 4\n tactic = [[0,0,0,0] for i in range(n_questions)]\n max_gates = 10\n env = Environment(n_questions, tactic, max_gates)\n result = [env.measure_probabilities_analytically() for i in range(4)]\n\n # this is for sure good way to calculate\n win_rate = 0\n for mat in result[:-1]:\n print(mat)\n win_rate += 1 / 4 * (mat[0] + mat[3])\n\n win_rate += 1 / 4 * (result[-1][1] + result[-1][3])\n\n assert round(win_rate - env.calc_accuracy(result) - 0) == 0\n\n def testGeneticAlg(self):\n # Solve to find optimal individual\n ACTIONS2 = ['r' + axis + \"0\" for axis in 'xyz']\n # ACTIONS2.extend(ACTIONS) # complexne gaty zatial neural network cez sklearn nedokaze , cize S, T, Y\n PERSON = ['a', 'b']\n QUESTION = ['0', '1']\n\n ALL_POSSIBLE_ACTIONS = [p + q + a for p in PERSON for q in QUESTION for a in ACTIONS2] # place one gate at some place\n game = [[1, 0, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0]]\n ga = CHSHgeneticOptimizer(population_size=30, n_crossover=len(ALL_POSSIBLE_ACTIONS) - 1, mutation_prob=0.1,\n history_actions=ALL_POSSIBLE_ACTIONS,\n game_type=game, best_or_worst=\"best\", state=np.array([0, 1 / sqrt(2), -1 / sqrt(2), 0], dtype=np.complex128))\n\n best = ga.solve(22) # you can also play with max. generations\n ga.show_individual(best[0])\n assert best[1] >= 0.83\n\n def testGeneticAlg2(self):\n # Solve to find optimal individual\n ACTIONS2 = ['r' + axis + \"0\" for axis in 'y']\n # ACTIONS2.extend(ACTIONS) # complexne gaty zatial neural network cez sklearn nedokaze , cize S, T, Y\n PERSON = ['a', 'b']\n QUESTION = ['0', '1']\n\n ALL_POSSIBLE_ACTIONS = [p + q + a for p in PERSON for q in QUESTION for a in ACTIONS2] # place one gate at some place\n game = [[0, 0, 1, 0],\n [1, 1, 0, 0],\n [0, 0, 1, 1],\n [1, 1, 0, 0]]\n ga = CHSHgeneticOptimizer(population_size=30, n_crossover=len(ALL_POSSIBLE_ACTIONS) - 1, mutation_prob=0.1,\n history_actions=ALL_POSSIBLE_ACTIONS,\n game_type=game, best_or_worst=\"best\", state=np.array([0, 1 / sqrt(2), -1 / sqrt(2), 0], dtype=np.complex128))\n best = ga.solve(22) # you can also play with max. generations\n ga.show_individual(best[0])\n assert np.round(best[1],2) == 0.5\n\n def testTensorflow1(self):\n import tensorflow as tf\n hello = tf.constant(\"hello TensorFlow!\")\n\n def testCHSHdeterministicStrategies(self):\n from NLG import NonLocalGame\n evaluation_tactic = [[1, 0, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0]]\n assert NonLocalGame.play_deterministic(evaluation_tactic)[0] == 0.75\n assert NonLocalGame.play_deterministic(evaluation_tactic)[1] == 0.25\n\n def testCHSHaccLearnt(self):\n from NLG import NlgDiscreteStatesActions\n naucil_sa = ['b0ry-22.5', 'b0ry-22.5', 'b0ry-22.5', 'b0ry-22.5', 'b0ry-22.5', 'b0ry-22.5', 'biggerAngle', 'a0ry22.5', 'b1ry-22.5']\n dokopy = ['b0ry-135', 'a0ry45', 'b1ry-45']\n\n tactic = [[1, 0, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0]]\n env = NlgDiscreteStatesActions.Environment(n_questions=2, game_type=tactic, max_gates=10)\n\n result = env.calculate_state(dokopy, False)\n\n acc = env.calc_accuracy(result)\n print(acc)\n assert np.round(acc, 2) < 0.85\n\n\n def testCHSHaccOptimal(self):\n from NLG import NlgDiscreteStatesActions\n naucil_sa = ['b0ry-22.5', 'b0ry-22.5', 'b0ry-22.5', 'b0ry-22.5', 'b0ry-22.5', 'b0ry-22.5', 'biggerAngle', 'a0ry22.5', 'b1ry-22.5']\n dokopy = ['b0ry-135', 'a0ry90', 'b1ry-45']\n\n tactic = [[1, 0, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0]]\n env = NlgDiscreteStatesActions.Environment(n_questions=2, game_type=tactic, max_gates=10)\n\n result = env.calculate_state(dokopy, False)\n\n acc = env.calc_accuracy(result)\n print(acc)\n assert np.round(acc,2) == 0.85\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
},
{
"alpha_fraction": 0.586357057094574,
"alphanum_fraction": 0.5936139225959778,
"avg_line_length": 13.270833015441895,
"blob_id": "dc3856b0344efd165fc51c2ec3fc673c17d9811c",
"content_id": "9668c24db1aa4b40afb390e06ba12e3756b30653",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 689,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 48,
"path": "/src/doc/_sources/autoapi/NLG/models/RegressionModel/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.models.RegressionModel`\n=================================\n\n.. py:module:: NLG.models.RegressionModel\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.models.RegressionModel.RegressionModel\n\n\n\n\n.. class:: RegressionModel\n\n Bases: :py:obj:`abc.ABC`\n\n a linear regression models \n\n .. method:: predict(self, x)\n\n predicts output for input \n\n\n .. method:: sgd(self, x, y, learning_rate=0.01, momentum=0.9)\n\n makes one step of sgd \n\n\n .. method:: load_weights(self, filepath)\n\n loads weights \n\n\n .. method:: save_weights(self, filepath)\n\n saves weights \n\n\n .. method:: get_losses(self)\n\n returns learning loss \n\n\n\n"
},
{
"alpha_fraction": 0.5621766448020935,
"alphanum_fraction": 0.5665836334228516,
"avg_line_length": 37.65925979614258,
"blob_id": "0232f8dcf32b92a5d45d9740b46ecf6d183a8977",
"content_id": "761748684759263bfea0ac09c0c60d2621373357",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5219,
"license_type": "permissive",
"max_line_length": 127,
"num_lines": 135,
"path": "/src/NLG/optimalizers/GeneticAlg.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "import random\n\nfrom abc import ABC, abstractmethod\n\n\nclass GeneticAlg(ABC):\n \"\"\" Abstract genetic algorithm framwork \"\"\"\n\n @abstractmethod\n def generate_individual(self):\n \"\"\" Generate random individual.\n To be implemented in subclasses\n \"\"\"\n pass\n\n def show_individual(self, x):\n \"\"\" Show the given individual x, either to console or graphically.\"\"\"\n print(x)\n\n @abstractmethod\n def fitness(self, x):\n \"\"\"Returns fitness of a given individual.\n To be implemented in subclasses\"\"\"\n pass\n\n def crossover(self, x, y, k):\n \"\"\" Take two parents (x and y) and make two children by applying k-point\n crossover. Positions for crossover are chosen randomly.\"\"\"\n oddelovace = [0, len(x)]\n\n for i in range(k):\n oddelovace.append(random.choice(range(len(x))))\n\n oddelovace = sorted(oddelovace)\n\n x_new, y_new = x[:], y[:]\n\n for i in range(1, len(oddelovace), 2):\n terajsi = oddelovace[i]\n predosly = oddelovace[i - 1]\n\n if predosly != terajsi:\n x_new[predosly:terajsi], y_new[predosly:terajsi] = y[predosly:terajsi], x[predosly:terajsi] # krizenie\n\n return (x_new, y_new)\n\n def boolean_mutation(self, x, prob):\n \"\"\" Elements of x are 0 or 1. Mutate (i.e. change) each element of x with given probability.\"\"\"\n potomok = x\n for poc in range(len(potomok)):\n if random.random() <= prob:\n if potomok[poc] == 1:\n potomok[poc] = 0\n else:\n potomok[poc] = 1\n return potomok\n\n @abstractmethod\n def number_mutation(self, x, prob):\n \"\"\" Elements of x are real numbers [0.0 .. 1.0]. Mutate (i.e. add/substract random number)\n each number in x with given probabipity.\"\"\"\n pass\n\n @abstractmethod\n def mutation(self, x, prob):\n \"\"\" Decides which mutation will occur. \"\"\"\n pass\n\n @abstractmethod\n def solve(self, max_generations, goal_fitness=1):\n \"\"\" Implementation of genetic algorithm. Produce generations until some\n individual`s fitness reaches goal_fitness, or you exceed total number\n of max_generations generations. Return best found individual.\"\"\"\n while max_generations != 0:\n # print(max_generations)\n max_generations -= 1\n\n # najdem najlepsieho, ci uz nieje v cieli, a zaroven vysortujem populaciu na polku\n # print(self.population)\n try: sort_population = sorted(self.population, key=lambda x: self.fitness(x), reverse=self.best_or_worst == \"best\")\n except: sort_population = sorted(self.population, key=lambda x: self.fitness(x), reverse=True)\n\n najlepsi_zatial = self.fitness(sort_population[0])\n self.for_plot.append(najlepsi_zatial)\n\n # for i in sort_population:\n # print(self.fitness(i))\n\n if najlepsi_zatial == goal_fitness:\n return sort_population[0]\n\n polka = len(sort_population) // 2\n self.population = sort_population[:polka] # treba zakomentovat ak ideme pouzit tournament selection\n\n # tournament selection - comment the row above and uncomment rows below\n\n ## novy = []\n ## for x in range(polka):\n ## best = None\n ## for i in range(2): # dvaja budu stale sutazit\n ## ind = self.population[random.randrange(0, len(self.population))]\n ## if (best == None) or self.fitness(ind) > self.fitness(best):\n ## best = ind\n ## novy.append(best)\n ##\n ## self.population = novy[:]\n\n # mutacie a skrizenie\n deti = []\n for i in range(len(self.population)):\n x = random.choice(self.population) # rodicia\n y = random.choice(self.population)\n\n dvaja_potomci = self.crossover(x, y, self.n_crossover) # skrizenie\n\n for ptmk in dvaja_potomci:\n potomok = self.mutation(ptmk, self.mutation_prob) # mutacie\n deti.append(potomok)\n\n # necham len tu najlepsiu polovicu deti\n try: sort_deti = sorted(deti, key=lambda x: self.fitness(x), reverse=self.best_or_worst == \"best\")\n except: sort_deti = sorted(deti, key=lambda x: self.fitness(x), reverse=True)\n\n # tu uz dotvaram novu generaciu teda polka rodicov a polka deti\n polka = len(sort_deti) // 2\n deti = sort_deti[:polka]\n for i in deti:\n self.population.append(i) # tu uz dotvaram celkovu novu generaciu\n\n try: sort_population = sorted(self.population, key=lambda x: self.fitness(x), reverse=self.best_or_worst == \"best\")\n except: sort_population = sorted(self.population, key=lambda x: self.fitness(x), reverse=True)\n\n najlepsi = sort_population[0]\n self.for_plot.append(self.fitness(najlepsi))\n return najlepsi\n"
},
{
"alpha_fraction": 0.7054503560066223,
"alphanum_fraction": 0.7085427045822144,
"avg_line_length": 24.584157943725586,
"blob_id": "a60f20bfe120b957653b63b6e9d80eb210f81aad",
"content_id": "a64646c5c96696e71af687de4a351a4bc2c9f1d6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 2587,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 101,
"path": "/src/doc/_sources/autoapi/NLG/tests/testNonLocalGames/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.tests.testNonLocalGames`\n==================================\n\n.. py:module:: NLG.tests.testNonLocalGames\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.tests.testNonLocalGames.TestCHSH\n\n\n\n\n.. class:: TestCHSH(methodName='runTest')\n\n\n Bases: :py:obj:`unittest.TestCase`\n\n A class whose instances are single test cases.\n\n By default, the test code itself should be placed in a method named\n 'runTest'.\n\n If the fixture may be used for many test cases, create as\n many test methods as are needed. When instantiating such a TestCase\n subclass, specify in the constructor arguments the name of the test method\n that the instance is to execute.\n\n Test authors should subclass TestCase for their own tests. Construction\n and deconstruction of the test's environment ('fixture') can be\n implemented by overriding the 'setUp' and 'tearDown' methods respectively.\n\n If it is necessary to override the __init__ method, the base class\n __init__ method must always be called. It is important that subclasses\n should not change the signature of their __init__ method, since instances\n of the classes are instantiated automatically by parts of the framework\n in order to be run.\n\n When subclassing TestCase, you can set these attributes:\n * failureException: determines which exception will be raised when\n the instance's assertion methods fail; test methods raising this\n exception will be deemed to have 'failed' rather than 'errored'.\n * longMessage: determines whether long messages (including repr of\n objects used in assert methods) will be printed on failure in *addition*\n to any explicit message passed.\n * maxDiff: sets the maximum length of a diff in failure messages\n by assert methods using difflib. It is looked up as an instance\n attribute so can be configured by individual tests if required.\n\n .. method:: testRYGate(self)\n\n\n .. method:: testIfCorrectStrategyAndAccuracy(self)\n\n\n .. method:: testInitialAccuracy(self)\n\n\n .. method:: testCalcWinRate(self)\n\n\n .. method:: testCalcWinRate1(self)\n\n\n .. method:: testCalcWinRate2(self)\n\n\n .. method:: testCalcWinRate3(self)\n\n\n .. method:: testCalcWinRate4(self)\n\n\n .. method:: testGeneticAlg(self)\n\n\n .. method:: testGeneticAlg2(self)\n\n\n .. method:: testTensorflow1(self)\n\n\n .. method:: testCHSHdeterministicStrategies(self)\n\n\n .. method:: testCHSHaccLearnt(self)\n\n\n .. method:: testCHSHaccOptimal(self)\n\n\n .. method:: testCHSH2epr(self)\n\n\n .. method:: testCHSH2eprParalelstartacc(self)\n\n\n\n"
},
{
"alpha_fraction": 0.5936028361320496,
"alphanum_fraction": 0.6136185526847839,
"avg_line_length": 40.43089294433594,
"blob_id": "6636cd1622955577720ccc12e606778e7b45f78f",
"content_id": "be861da6ffcb4b78ba73154c4dfc4c8e8d353da7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5096,
"license_type": "permissive",
"max_line_length": 136,
"num_lines": 123,
"path": "/src/NLG/NlgContinuousGlobalOptimalization.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "from math import sqrt\n\nimport numpy as np\n\nimport NonLocalGame\nfrom NonLocalGame import Game\nfrom NlgGeneticOptimalization import CHSHgeneticOptimizer\nfrom agents.DQNAgent import DQNAgent\n\n\nclass Environment(NonLocalGame.abstractEnvironment):\n\n def __init__(self, n_questions, game_type, max_gates, num_players=2,\n initial_state=np.array([0, 1 / sqrt(2), -1 / sqrt(2), 0],\n dtype=np.float64)):\n self.n_questions = n_questions\n self.counter = 1\n self.history_actions = []\n self.max_gates = max_gates\n self.game_type = game_type\n self.initial_state = initial_state\n self.state = self.initial_state.copy()\n self.num_players = num_players\n self.repr_state = np.array([x for _ in range(self.num_players ** 2) for x in self.state], dtype=np.float64)\n self.accuracy = self.calc_accuracy([self.measure_probabilities_analytically() for _ in range(n_questions)])\n self.max_acc = self.accuracy\n self.min_gates = max_gates\n\n self.optimizer = CHSHgeneticOptimizer(population_size=15, n_crossover=len(self.history_actions) - 1,\n mutation_prob=0.10, state=self.initial_state.copy(),\n history_actions=self.history_actions.copy(),\n game_type=self.game_type,\n num_players=self.num_players)\n self.visited = dict()\n\n @NonLocalGame.override\n def reset(self):\n return super().reset()\n\n def calculate_new_state(self, action):\n self.history_actions.append(action)\n try:\n actions, accuracy, self.repr_state = self.visited[tuple(self.history_actions)]\n except KeyError:\n self.optimizer.reset(self.history_actions.copy(), len(self.history_actions) - 1)\n actions, accuracy, self.repr_state = self.optimizer.solve(17)\n self.visited[tuple(self.history_actions.copy())] = actions, accuracy, self.repr_state\n return accuracy\n\n @NonLocalGame.override\n def step(self, action):\n\n # Alice and Bob win when their input (a, b)\n # and their response (s, t) satisfy this relationship.\n done = False\n\n # accuracy of winning CHSH game\n accuracy_before = self.accuracy\n self.accuracy = self.calculate_new_state(action)\n difference_accuracy = self.accuracy - accuracy_before\n\n # reward is the increase in accuracy\n reward = self.reward_combined(difference_accuracy * 100)\n\n if self.counter == self.max_gates or self.history_actions[-1] == 'xxr0': done = True\n if done == True: print(self.visited[tuple(self.history_actions)][0])\n else: self.counter += 1\n return self.repr_state, reward, done\n\n\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\nif __name__ == '__main__':\n ACTIONS = ['r0'] # complexne gaty zatial neural network cez sklearn nedokaze , cize S, T, Y\n PERSON = ['a', 'b']\n QUESTION = ['0', '1']\n\n ALL_POSSIBLE_ACTIONS = [p + q + a for p in PERSON for q in QUESTION for a in ACTIONS] # place one gate at some place\n ALL_POSSIBLE_ACTIONS.append(\"xxr0\")\n\n N = 4000\n n_questions = 4\n evaluation_tactic = [[1, 0, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0]]\n max_gates = 10\n round_to = 3\n env = Environment(n_questions, evaluation_tactic, max_gates, )\n\n # (state_size, action_size, gamma, eps, eps_min, eps_decay, alpha, momentum)\n # agent = BasicAgent(state_size=len(env.repr_state), action_size=len(ALL_POSSIBLE_ACTIONS), gamma=0.1, eps=1, eps_min=0.01,\n # eps_decay=0.9998, alpha=0.001, momentum=0.9, ALL_POSSIBLE_ACTIONS=ALL_POSSIBLE_ACTIONS, model_type=LinearModel)\n\n hidden_dim = [len(env.repr_state), len(env.repr_state) // 2]\n #\n agent = DQNAgent(state_size=len(env.repr_state), action_size=len(ALL_POSSIBLE_ACTIONS), gamma=0.1, eps=1, eps_min=0.01,\n eps_decay=0.9998, ALL_POSSIBLE_ACTIONS=ALL_POSSIBLE_ACTIONS, learning_rate=0.001, hidden_layers=len(hidden_dim),\n hidden_dim=hidden_dim)\n\n # scaler = get_scaler(env, N, ALL_POSSIBLE_ACTIONS, round_to=round_to)\n batch_size = 128\n\n # store the final value of the portfolio (end of episode)\n game = Game(round_to=round_to)\n portfolio_value, rewards = game.evaluate_train(N, agent, env)\n\n # plot relevant information\n NonLocalGame.show_plot_of(rewards, \"reward\")\n\n if agent.model.losses is not None:\n NonLocalGame.show_plot_of(agent.model.losses, \"loss\")\n\n NonLocalGame.show_plot_of(portfolio_value, \"accuracy\", [0.85, 0.75])\n\n # save portfolio value for each episode\n np.save(f'.training/train.npy', portfolio_value)\n portfolio_value = game.evaluate_test(agent, env)\n print(portfolio_value)\n a = np.load(f'.training/train.npy')\n print(f\"average reward: {a.mean():.2f}, min: {a.min():.2f}, max: {a.max():.2f}\")\n"
},
{
"alpha_fraction": 0.5875281691551208,
"alphanum_fraction": 0.6010518670082092,
"avg_line_length": 21.89655113220215,
"blob_id": "2b7eab25ec5f70898787b9973d0dcd009ba6a24b",
"content_id": "5a85ff1f447830ee134a39d74363ab77ac3d54c9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1331,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 58,
"path": "/src/doc/_sources/autoapi/NLG/models/MLPModel/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.models.MLPModel`\n==========================\n\n.. py:module:: NLG.models.MLPModel\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.models.MLPModel.MLP\n\n\n\n\n.. class:: MLP(n_inputs, n_action, n_hidden_layers=1, hidden_dim=[32])\n\n\n Bases: :py:obj:`torch.nn.Module`\n\n Base class for all neural network modules.\n\n Your models should also subclass this class.\n\n Modules can also contain other Modules, allowing to nest them in\n a tree structure. You can assign the submodules as regular attributes::\n\n import torch.nn as nn\n import torch.nn.functional as F\n\n class Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 20, 5)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n return F.relu(self.conv2(x))\n\n Submodules assigned in this way will be registered, and will have their\n parameters converted too when you call :meth:`to`, etc.\n\n :ivar training: Boolean represents whether this module is in training or\n evaluation mode.\n :vartype training: bool\n\n .. method:: forward(self, X)\n\n\n .. method:: save_weights(self, path)\n\n\n .. method:: load_weights(self, path)\n\n\n\n"
},
{
"alpha_fraction": 0.5698253512382507,
"alphanum_fraction": 0.5808670520782471,
"avg_line_length": 40.54135513305664,
"blob_id": "cde1f871d3d3bfd527bb7006a3b52d8000590d91",
"content_id": "2909566ba9d6ae39b8a93e8a9f99b4a24bb0b0ac",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11049,
"license_type": "permissive",
"max_line_length": 323,
"num_lines": 266,
"path": "/src/NLG/database/DB.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "import psycopg2\nimport psycopg2.extras\n\n\nclass CHSHdb:\n\n def __init__(self):\n pass\n\n def createDB(self):\n # establishing the connection\n conn = psycopg2.connect(\n database=\"postgres\", user='postgres', password='password', host='127.0.0.1', port='5432'\n )\n conn.autocommit = True\n\n # Creating a cursor object using the cursor() method\n cursor = conn.cursor()\n\n # Preparing query to create a database\n sql = '''CREATE database mydb''';\n\n # Creating a database\n cursor.execute(sql)\n print(\"Database created successfully........\")\n\n # Closing the connection\n conn.close()\n\n def createTables(self):\n # establishing the connection\n conn = psycopg2.connect(\n database=\"postgres\", user='postgres', password='password', host='127.0.0.1', port='5432'\n )\n conn.autocommit = True\n\n # Creating a cursor object using the cursor() method\n cursor = conn.cursor()\n\n # Doping EMPLOYEE table if already exists.\n cursor.execute(\"DROP TABLE IF EXISTS NON_LOCAL_GAMES_EVALUATED\")\n\n # Creating table as per requirement\n sql = '''CREATE TABLE NON_LOCAL_GAMES_EVALUATED(\n id SERIAL PRIMARY KEY,\n QUESTIONS INT NOT NULL CHECK ( QUESTIONS >= 2 ),\n PLAYERS INT NOT NULL CHECK ( PLAYERS >= 2 ),\n CATEGORY FLOAT[] NOT NULL CHECK ( 0 <= ALL(CATEGORY) AND 1 >= ALL(CATEGORY) ),\n DIFFICULTY INT NOT NULL CHECK ( DIFFICULTY >= 0),\n MIN_CLASSIC_VALUE FLOAT NOT NULL CHECK ( MIN_CLASSIC_VALUE >= 0 AND MIN_CLASSIC_VALUE <= 1),\n MIN_QUANTUM_VALUE FLOAT NOT NULL CHECK ( MIN_QUANTUM_VALUE >= 0 AND MIN_QUANTUM_VALUE <= 1),\n MAX_CLASSIC_VALUE FLOAT NOT NULL CHECK ( MAX_CLASSIC_VALUE >= 0 AND MAX_CLASSIC_VALUE <= 1),\n MAX_QUANTUM_VALUE FLOAT NOT NULL CHECK ( MAX_QUANTUM_VALUE >= 0 AND MAX_QUANTUM_VALUE <= 1),\n MIN_DIFFERENCE FLOAT NOT NULL CHECK (MIN_DIFFERENCE >= 0 AND MIN_DIFFERENCE <= 1),\n MAX_DIFFERENCE FLOAT NOT NULL CHECK (MAX_DIFFERENCE >= 0 AND MAX_DIFFERENCE <= 1),\n MIN_STRATEGY TEXT[] NOT NULL,\n MAX_STRATEGY TEXT[] NOT NULL,\n MIN_STATE FLOAT[] NOT NULL,\n MAX_STATE FLOAT[] NOT NULL,\n GAME INT[] NOT NULL,\n unique (PLAYERS, QUESTIONS, GAME)\n )'''\n\n cursor.execute(sql)\n print(\"Table created successfully........\")\n\n # Doping EMPLOYEE table if already exists.\n cursor.execute(\"DROP TABLE IF EXISTS NON_LOCAL_GAMES_GENERATED\")\n\n # Creating table as per requirement\n sql = '''CREATE TABLE NON_LOCAL_GAMES_GENERATED(\n id SERIAL PRIMARY KEY,\n QUESTIONS INT NOT NULL CHECK ( QUESTIONS >= 2 ),\n PLAYERS INT NOT NULL CHECK ( PLAYERS >= 2 ),\n CATEGORY FLOAT[] NOT NULL CHECK ( 0 <= ALL(CATEGORY) AND 1 >= ALL(CATEGORY) ),\n DIFFICULTY INT NOT NULL CHECK ( DIFFICULTY >= 0),\n GAME INT[] NOT NULL,\n unique (PLAYERS, QUESTIONS, GAME)\n )'''\n\n cursor.execute(sql)\n print(\"Table created successfully........\")\n\n # Closing the connection\n conn.close()\n\n def query(self, category=\"all\", difficulty=\"all\", difference=\"all\", num_players=2, n_questions=2):\n # establishing the connection\n conn = psycopg2.connect(\n database=\"postgres\", user='postgres', password='password', host='127.0.0.1', port='5432'\n )\n conn.autocommit = True\n\n # Creating a cursor object using the cursor() method\n cursor = conn.cursor()\n\n begin = '''SELECT * FROM NON_LOCAL_GAMES_EVALUATED '''\n if difference in [\"max\", \"min\"]:\n begin = '''SELECT DISTINCT ON (QUESTIONS, PLAYERS, CATEGORY, DIFFICULTY) QUESTIONS, PLAYERS, CATEGORY, DIFFICULTY, MIN_CLASSIC_VALUE, MIN_QUANTUM_VALUE, MAX_CLASSIC_VALUE, MAX_QUANTUM_VALUE, MIN_DIFFERENCE, MAX_DIFFERENCE, MIN_STATE, MAX_STATE, MIN_STRATEGY, MAX_STRATEGY GAME FROM NON_LOCAL_GAMES_EVALUATED '''\n\n if difficulty == \"all\" and category == \"all\": sql = begin\n elif difficulty == \"all\": sql = begin + '''WHERE ''' + '''CATEGORY = ''' + str(category) + ''' AND PLAYERS = ''' + str(\n num_players) + ''' AND QUESTIONS = ''' + str(n_questions)\n else: sql = begin + '''WHERE ''' + '''CATEGORY = ''' + str(category) + ''' AND DIFFICULTY = ''' + str(\n difficulty) + ''' AND PLAYERS = ''' + str(num_players) + ''' AND QUESTIONS = ''' + str(n_questions)\n\n if difference == \"max\":\n sql += '''ORDER BY QUESTIONS, PLAYERS, CATEGORY, DIFFICULTY, MAX_DIFFERENCE DESC''';\n if difference == \"min\":\n sql += '''ORDER BY QUESTIONS, PLAYERS, CATEGORY, DIFFICULTY, MAX_DIFFERENCE ASC''';\n\n # Retrieving data\n cursor.execute(sql)\n\n result = cursor.fetchall();\n\n print(\"Records queried\")\n\n # Closing the connection\n conn.close()\n\n return result\n\n def query_categories_games(self, num_players=2, n_questions=2):\n # establishing the connection\n conn = psycopg2.connect(\n database=\"postgres\", user='postgres', password='password', host='127.0.0.1', port='5432'\n )\n conn.autocommit = True\n\n # Creating a cursor object using the cursor() method\n cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n sql = '''SELECT category,difficulty, game FROM NON_LOCAL_GAMES_GENERATED \n WHERE ''' + ''' PLAYERS = ''' + str(num_players) + ''' AND QUESTIONS = ''' + str(n_questions)\n\n # Retrieving data\n cursor.execute(sql)\n\n result = cursor.fetchall();\n\n print(\"Records queried\")\n\n # Closing the connection\n conn.close()\n\n return result\n\n def insert_categories_games(self, n_questions, num_players, generated_games):\n # establishing the connection\n conn = psycopg2.connect(\n database=\"postgres\", user='postgres', password='password', host='127.0.0.1', port='5432'\n )\n conn.autocommit = True\n\n # Creating a cursor object using the cursor() method\n cursor = conn.cursor()\n\n for category, difficulties in generated_games.items():\n for difficulty, games in difficulties.items():\n for game in games:\n sql = \"INSERT INTO NON_LOCAL_GAMES_GENERATED ( PLAYERS, QUESTIONS, DIFFICULTY, CATEGORY, GAME) VALUES (\" + str(\n num_players) + \", \" + str(n_questions) + \", \" + str(difficulty) + \", ARRAY[\" + str(list(category)) + \"], ARRAY[\" + str(\n game) + \"]);\" \\\n \"\"\n cursor.execute(sql)\n\n # Commit your changes in the database\n conn.commit()\n\n print(\"Records inserted\")\n\n def insert(self, category, difficulty, classic_min, quantum_min, classic_max, quantum_max, difference_min, difference_max, min_state, max_state,\n min_strategy, max_strategy, game, questions=2,\n players=2):\n # establishing the connection\n conn = psycopg2.connect(\n database=\"postgres\", user='postgres', password='password', host='127.0.0.1', port='5432'\n )\n conn.autocommit = True\n\n # Creating a cursor object using the cursor() method\n cursor = conn.cursor()\n\n sql = '''INSERT INTO NON_LOCAL_GAMES_EVALUATED(QUESTIONS, PLAYERS, CATEGORY, DIFFICULTY, MIN_CLASSIC_VALUE, MIN_QUANTUM_VALUE, MAX_CLASSIC_VALUE, MAX_QUANTUM_VALUE, MIN_DIFFERENCE, MAX_DIFFERENCE, MIN_STATE, MAX_STATE, MIN_STRATEGY, MAX_STRATEGY, GAME) VALUES ( ''' + str(\n questions) + \",\" + str(\n players) + \", ARRAY[\" + str(category) + \"],\" + str(difficulty) + \",\" + str(classic_min) + \",\" + str(quantum_min) + \",\" + str(\n classic_max) + \",\" + str(quantum_max) + \",\" + str(\n difference_min) + \",\" + str(\n difference_max) + \",ARRAY[\" + str(\n min_state) + \"], ARRAY[\" + str(\n max_state) + \"], ARRAY[\" + str(\n min_strategy) + \"], ARRAY[\" + str(\n max_strategy) + \"], ARRAY[\" + str(game) + '''] )\n ON CONFLICT(PLAYERS, QUESTIONS, GAME) DO \n UPDATE SET MAX_QUANTUM_VALUE = EXCLUDED.MAX_QUANTUM_VALUE, MAX_DIFFERENCE = EXCLUDED.MAX_DIFFERENCE, MAX_STATE = EXCLUDED.MAX_STATE, MAX_STRATEGY = EXCLUDED.MAX_STRATEGY\n WHERE EXCLUDED.MAX_QUANTUM_VALUE > NON_LOCAL_GAMES_EVALUATED.MAX_QUANTUM_VALUE;\n '''\n\n cursor.execute(sql)\n\n # Commit your changes in the database\n conn.commit()\n\n sql = '''INSERT INTO NON_LOCAL_GAMES_EVALUATED(QUESTIONS, PLAYERS, CATEGORY, DIFFICULTY, MIN_CLASSIC_VALUE, MIN_QUANTUM_VALUE, MAX_CLASSIC_VALUE, MAX_QUANTUM_VALUE, MIN_DIFFERENCE, MAX_DIFFERENCE, MIN_STATE, MAX_STATE, MIN_STRATEGY, MAX_STRATEGY, GAME) VALUES ( ''' + str(\n questions) + \",\" + str(\n players) + \", ARRAY[\" + str(category) + \"],\" + str(difficulty) + \",\" + str(classic_min) + \",\" + str(quantum_min) + \",\" + str(\n classic_max) + \",\" + str(quantum_max) + \",\" + str(\n difference_min) + \",\" + str(\n difference_max) + \",ARRAY[\" + str(\n min_state) + \"], ARRAY[\" + str(\n max_state) + \"], ARRAY[\" + str(\n min_strategy) + \"], ARRAY[\" + str(\n max_strategy) + \"], ARRAY[\" + str(game) + '''] )\n ON CONFLICT(PLAYERS, QUESTIONS, GAME) DO \n UPDATE SET MIN_QUANTUM_VALUE = EXCLUDED.MIN_QUANTUM_VALUE, MIN_DIFFERENCE = EXCLUDED.MIN_DIFFERENCE, MIN_STATE = EXCLUDED.MIN_STATE, MIN_STRATEGY = EXCLUDED.MIN_STRATEGY\n WHERE EXCLUDED.MIN_QUANTUM_VALUE < NON_LOCAL_GAMES_EVALUATED.MIN_QUANTUM_VALUE;\n '''\n\n cursor.execute(sql)\n\n # Commit your changes in the database\n conn.commit()\n\n print(\"Record inserted\")\n\n # Closing the connection\n conn.close()\n\n\nif __name__ == '__main__':\n db = CHSHdb()\n\n # db.createDB()\n\n # db.insert(category=1, difficulty=2, difference_max=1, difference_min=1, questions=2, players=2, classic_min=1, quantum_min=1, classic_max=1,\n # quantum_max=1, game=[[9]])\n # db.insert(category=1, difficulty=2, difference=1, questions=2, players=2, classic=1, quantum=0.75, game=[[8]])\n # print(db.query(max_difference=True))\n\n\n data = db.query()\n\n\n data = list(map(lambda x: x[10] , data))\n\n print(data)\n\n import matplotlib.pyplot as plt\n import seaborn as sb\n import pandas as pd\n import numpy as np\n\n # sb.distplot(data, hist=True)\n # sb.kdeplot(np.array(data), bw=0.5, log_scale=True)\n\n _ = plt.hist(data, bins='auto', log=True) # arguments are passed to np.histogram\n\n\n # plt.yscale('log')\n\n plt.ylabel('Number of games')\n plt.xlabel('Quantum advantage (win_q - win_c)')\n\n plt.title(\"Histogram log scale\")\n plt.show()"
},
{
"alpha_fraction": 0.5817278027534485,
"alphanum_fraction": 0.5981346368789673,
"avg_line_length": 44.76012420654297,
"blob_id": "842a90d60cf7b043dc5f1604eb5daa641a1e6377",
"content_id": "63a550d6bcbc78faa09f56cdbac28c903a2dbe27",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14692,
"license_type": "permissive",
"max_line_length": 165,
"num_lines": 321,
"path": "/src/NLG/NlgDiscreteStatesActions.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "import itertools\nimport random\nfrom math import sqrt, pi\n\nimport numpy as np\nfrom qiskit.circuit.library import IGate, CXGate\nfrom sklearn.preprocessing import OneHotEncoder\n\nfrom NLG import NonLocalGame\nfrom NLG.NonLocalGame import Game\nfrom NLG.agents.DQNAgent import DQNAgent\n\n\nclass Environment(NonLocalGame.abstractEnvironment):\n \"\"\" Creates CHSH environments for quantum strategies, discretizes and states and uses discrete actions \"\"\"\n\n def __init__(self, n_questions, game_type, max_gates, n_players=2,\n initial_state=np.array([0, 1 / sqrt(2), -1 / sqrt(2), 0], dtype=np.complex64), best_or_worst=\"best\", reward_function=None,\n anneal=False, n_games=1):\n self.n_games = n_games # how many games are to be played (paralel)\n self.n_questions = n_questions # how many atomic questions (to one player)\n self.n_players = n_players # players / verifiers\n self.counter = 1 # number of steps\n self.history_actions = [] # history of actions taken\n self.history_actions_anneal = [] # history of action annealed taken\n\n self.max_gates = max_gates # limit of gates that can be taken\n self.min_gates = 0\n self.game_type = game_type # game matrix (rules - when do they win)\n self.initial_state = initial_state # initial state\n self.state = self.initial_state.copy()\n\n self.n_qubits = self.n_qubits_from_state()\n self.reduce_by = 2 ** (self.n_qubits - 2) # reducing for double games\n\n self.possible_states = list( # possible states\n itertools.product(list(range(self.n_questions)),\n repeat=self.n_qubits))\n\n self.one_game_answers = list( # possible answers\n itertools.product(list(range(self.n_questions)),\n repeat=self.n_players))\n\n self.repr_state = np.array([x for _ in range(len(self.game_type)) for x in self.state], dtype=np.complex64) # state representation for all comb. of questions\n\n self.state_size = len(self.repr_state) * 2 # times 2 because of complex array to array of real numbers\n\n self.accuracy = self.calc_accuracy([self.measure_probabilities_analytically() for _ in range(len(self.game_type))]) # winning probability\n self.max_acc = self.accuracy\n self.min_acc = self.accuracy\n\n self.max_found_state = self.repr_state.copy() # best / worst found configurations\n self.max_found_strategy = []\n self.min_found_state = self.repr_state.copy()\n self.min_found_strategy = []\n self.best_or_worst = best_or_worst\n\n self.questions = list(itertools.product(list(range(self.n_questions)), repeat=round(np.log2(len(self.game_type))))) # combinations of questions\n print(self.questions)\n self.memory_state = dict() # memoization of calculation, repr_state, accuracies\n self.reward_funcion = reward_function\n if self.reward_funcion == None: self.reward_funcion = self.reward_only_difference\n\n self.immutable = {\"xxr0\", \"smallerAngle\", \"biggerAngle\", \"a0cxnot\", \"b0cxnot\", \"a0cxnotr\", \"b0cxnotr\"}\n\n self.use_annealing = anneal # do you want to use annealing?\n\n @NonLocalGame.override\n def reset(self):\n self.history_actions_anneal = []\n return self.complex_array_to_real(super().reset())\n\n def calculate_state(self, history_actions, anneal=False):\n \"\"\" Calculates the state according to previous actions in parameter history_actions \"\"\"\n result = []\n\n for g, q in enumerate(self.questions):\n # Alice - a and Bob - b share an entangled state\n # The input to alice and bob is random\n # Alice chooses her operation based on her input, Bob too - eg. a0 if alice gets 0 as input\n\n self.state = self.initial_state.copy()\n\n for action in history_actions:\n # get info from action\n # if action == \"biggerAngle\":\n # self.velocity *= 2\n # continue\n # elif action == \"smallerAngle\":\n # self.velocity /= 2\n # continue\n\n # decode action\n gate = self.get_gate(action)\n if gate == IGate: continue\n to_whom = action[0:2]\n rotate_ancilla = action[2] == 'a'\n try: gate_angle = np.array([action[4:]], dtype=np.float32)\n except ValueError: gate_angle = 0\n\n I_length = int(len(self.initial_state) ** (1 / self.n_players))\n\n # apply action to state\n operation = []\n\n second_player_pos = round(np.log2(len(self.game_type)))//self.n_questions\n if gate == CXGate:\n ctrl = int(action[-1] != \"r\")\n if (q[0] == 0 and to_whom == 'a0') or (q[0] == 1 and to_whom == 'a1'):\n operation = np.kron(CXGate(ctrl_state=ctrl).to_matrix(), np.identity(I_length))\n if (q[second_player_pos] == 0 and to_whom == 'b0') or (q[second_player_pos] == 1 and to_whom == 'b1'):\n operation = np.kron(np.identity(I_length), CXGate(ctrl_state=ctrl).to_matrix())\n else:\n if (q[0] == 0 and to_whom == 'a0') or (q[0] == 1 and to_whom == 'a1'):\n if rotate_ancilla: calc_operation = np.kron(gate((gate_angle * pi / 180).item()).to_matrix(), np.identity(2))\n else: calc_operation = np.kron(np.identity(2), gate((gate_angle * pi / 180).item()).to_matrix())\n if len(self.state) != 4: operation = np.kron(calc_operation, np.identity(I_length))\n else: operation = calc_operation\n if (q[second_player_pos] == 0 and to_whom == 'b0') or (q[second_player_pos] == 1 and to_whom == 'b1'):\n if rotate_ancilla: calc_operation = np.kron(np.identity(2), gate((gate_angle * pi / 180).item()).to_matrix())\n else: calc_operation = np.kron(gate((gate_angle * pi / 180).item()).to_matrix(), np.identity(2))\n if len(self.state) != 4: operation = np.kron(np.identity(I_length), calc_operation)\n else: operation = calc_operation\n\n if len(operation) != 0:\n self.state = np.matmul(operation, self.state)\n\n # modify repr_state according to state\n self.repr_state[g * len(self.state):(g + 1) * len(self.state)] = self.state.copy()\n\n result.append(self.measure_probabilities_analytically())\n\n return result\n\n def save_interesting_strategies(self):\n if self.accuracy > self.max_acc:\n self.max_acc = self.accuracy\n self.max_found_state = self.repr_state.copy()\n self.max_found_strategy = self.history_actions_anneal.copy()\n\n elif self.accuracy == self.max_acc:\n if len(self.history_actions) < len(self.max_found_strategy):\n self.max_found_state = self.repr_state.copy()\n self.max_found_strategy = self.history_actions_anneal.copy()\n\n if self.accuracy < self.min_acc:\n self.min_acc = self.accuracy\n self.min_found_state = self.repr_state.copy()\n self.min_found_strategy = self.history_actions_anneal.copy()\n\n elif self.accuracy == self.min_acc:\n if len(self.history_actions) < len(self.min_found_strategy):\n self.min_found_state = self.repr_state.copy()\n self.min_found_strategy = self.history_actions_anneal.copy()\n\n if self.min_found_strategy == []: self.min_found_strategy.append('xxr0')\n if self.max_found_strategy == []: self.max_found_strategy.append('xxr0')\n\n @NonLocalGame.override\n def step(self, action):\n # Alice and Bob win when their input (a, b)\n # and their response (s, t) satisfy this relationship.\n done = False\n\n if type(action) == list: action = action[0]\n # play game\n self.history_actions.append(action)\n self.history_actions_anneal.append(action)\n\n # accuracy of winning CHSH game\n before = self.accuracy\n\n try:\n result, self.repr_state, _, self.accuracy, to_complex = self.memory_state[tuple(self.history_actions)]\n except KeyError:\n try: result, self.repr_state, self.history_actions_anneal[:-1] = self.memory_state[tuple(self.history_actions[:-1])][:3]\n except KeyError: pass\n if action not in self.immutable and self.use_annealing:\n self.history_actions_anneal[-1] = self.history_actions_anneal[-1][:4] + str(\n self.anneal()) # simulated annealing on the last chosen action\n\n if self.use_annealing: result = self.calculate_state(self.history_actions_anneal)\n else: result = self.calculate_state(self.history_actions)\n\n self.accuracy = self.calc_accuracy(result)\n to_complex = self.complex_array_to_real(self.repr_state)\n self.memory_state[tuple(self.history_actions)] = (\n result, self.repr_state.copy(), self.history_actions_anneal.copy(), self.accuracy, to_complex)\n\n difference_in_accuracy = self.accuracy - before\n\n if self.best_or_worst == \"worst\": difference_in_accuracy *= (-1)\n\n try: reward = self.reward_funcion(self, difference_in_accuracy) # because I needed to call like this when using Optimalizing hyperparam.\n except: reward = self.reward_funcion(difference_in_accuracy)\n\n self.save_interesting_strategies()\n\n if self.counter == self.max_gates or action == 'xxr0': done = True\n if not done: self.counter += 1\n return to_complex, reward, done\n\n def anneal(self, steps=80, t_start=2, t_end=0.001):\n \"\"\" Finds the maximal value of the fitness function by\n executing the simulated annealing algorithm.\n Returns a state (e.g. x) for which fitness(x) is maximal. \"\"\"\n x = self.random_state()\n t = t_start\n for i in range(steps):\n neighbor = np.random.choice(self.neighbors(x))\n ΔE = self.fitness(neighbor) - self.fitness(x)\n if ΔE > 0: # //neighbor is better then x\n x = neighbor\n elif np.random.random() < np.math.e ** (ΔE / t): # //neighbor is worse then x\n x = neighbor\n t = t_start * (t_end / t_start) ** (i / steps)\n return x\n\n def fitness(self, x):\n \"\"\" Calculates fitness of the state given by calculation of accuracy over history of actions.\"\"\"\n last = [self.history_actions_anneal[-1][:4] + str(x)]\n return self.calc_accuracy(self.calculate_state(self.history_actions_anneal[:-1] + last, anneal=True))\n\n def neighbors(self, x, span=30, delta=0.5):\n \"\"\" Creates neighboring gate angle to angle x\"\"\"\n res = []\n if x > -span + 3 * delta: res += [x - i * delta for i in range(1, 4)]\n if x < span - 3 * delta: res += [x + i * delta for i in range(1, 4)]\n return res\n\n def random_state(self):\n return random.uniform(-180, 180)\n\n\nimport warnings\n\nwarnings.filterwarnings('ignore')\nfrom NLG.NonLocalGame import show_plot_of\n\nif __name__ == '__main__':\n # Hyperparameters setting\n # ACTIONS = [q + axis + \"0\" for axis in 'xyz' for q in 'ra']\n ACTIONS = [q + axis + \"0\" for axis in 'y' for q in 'r']\n PERSON = ['a', 'b']\n QUESTION = ['0', '1']\n\n ALL_POSSIBLE_ACTIONS = [[p + q + a] for p in PERSON for q in QUESTION for a in ACTIONS] # place one gate at some place\n ALL_POSSIBLE_ACTIONS.append([\"xxr0\"])\n\n # # for 1 game with 2 EPR\n # ALL_POSSIBLE_ACTIONS.append([\"a0cxnot\"])\n # ALL_POSSIBLE_ACTIONS.append([\"b0cxnot\"])\n #\n # # for xor paralel with 2EPR\n # ALL_POSSIBLE_ACTIONS.append([\"a0cxnotr\"])\n # ALL_POSSIBLE_ACTIONS.append([\"b0cxnotr\"])\n\n N = 4000\n n_questions = 2\n game_type = [[1, 0, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0]]\n\n max_gates = 15\n round_to = 6\n\n # game_type = create(game_type)\n\n state = np.array([0, 1 / sqrt(2), -1 / sqrt(2), 0], dtype=np.complex64)\n # state = np.array([ 0+0j, 0+0j, 0+0j, 0.5+0j, 0+0j, 0+0j, -0.5+0j, 0+0j, 0+0j, -0.5+0j, 0+0j, 0+0j, 0.5+0j, 0+0j, 0+0j, 0+0j ], dtype=np.complex64)\n #\n\n env = Environment(n_questions, game_type, max_gates, initial_state=state,\n reward_function=Environment.reward_only_difference,\n anneal=True, n_games=1)\n\n\n\n # transform actions to noncorellated encoding\n encoder = OneHotEncoder(drop='first', sparse=False)\n # transform data\n onehot = encoder.fit_transform(ALL_POSSIBLE_ACTIONS)\n onehot_to_action = dict()\n action_to_onehot = dict()\n for x, a_encoded in enumerate(onehot):\n onehot_to_action[str(a_encoded)] = x\n action_to_onehot[x] = str(a_encoded)\n\n hidden_dim = [len(env.repr_state) , len(env.repr_state), len(env.repr_state)]\n agent = DQNAgent(state_size=env.state_size, action_size=len(ALL_POSSIBLE_ACTIONS), gamma=0.9, eps=1, eps_min=0.01,\n eps_decay=0.9998, ALL_POSSIBLE_ACTIONS=ALL_POSSIBLE_ACTIONS, learning_rate=0.001, hidden_layers=len(hidden_dim),\n hidden_dim=hidden_dim, onehot_to_action=onehot_to_action, action_to_onehot=action_to_onehot)\n # divide data by\n batch_size = 128\n\n game = Game(round_to=round_to, batch_size=batch_size)\n portfolio_value, rewards = game.evaluate_train(N, agent, env)\n\n # agent = DQNAgent(state_size=env.state_size, action_size=len(ALL_POSSIBLE_ACTIONS), gamma=1, eps=1, eps_min=0.01,\n # eps_decay=0.9998, ALL_POSSIBLE_ACTIONS=ALL_POSSIBLE_ACTIONS, learning_rate=0.001, hidden_layers=len(hidden_dim),\n # hidden_dim=hidden_dim, onehot_to_action=onehot_to_action, action_to_onehot=action_to_onehot)\n\n # The size of a batch must be more than or equal to one and less than or equal to the number of samples in the training dataset.\n\n\n # plot relevant information\n show_plot_of(rewards, \"reward\")\n\n if agent.model.losses is not None:\n show_plot_of(agent.model.losses, \"loss\")\n\n show_plot_of(portfolio_value, \"accuracy\", [0.85, 0.75])\n\n # save portfolio value for each episode\n np.save(f'.training/train.npy', portfolio_value)\n\n portfolio_value = game.evaluate_test(agent, env)\n print(portfolio_value)\n a = np.load(f'.training/train.npy')\n print(f\"average accuracy: {a.mean():.2f}, min: {a.min():.2f}, max: {a.max():.2f}\")\n"
},
{
"alpha_fraction": 0.6984013915061951,
"alphanum_fraction": 0.7071048021316528,
"avg_line_length": 25.17674446105957,
"blob_id": "50b8dc5885470c45dd143650b225cb74467eb497",
"content_id": "d9717a0752f724bdfe4ecd1a44f3f7a728012cc1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 5630,
"license_type": "permissive",
"max_line_length": 160,
"num_lines": 215,
"path": "/src/doc/_sources/autoapi/NLG/NonLocalGame/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.NonLocalGame`\n=======================\n\n.. py:module:: NLG.NonLocalGame\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.NonLocalGame.abstractEnvironment\n NLG.NonLocalGame.Game\n\n\n\nFunctions\n~~~~~~~~~\n\n.. autoapisummary::\n\n NLG.NonLocalGame.get_scaler\n NLG.NonLocalGame.show_plot_of\n NLG.NonLocalGame.override\n NLG.NonLocalGame.game_with_rows_all_zeroes\n NLG.NonLocalGame.generate_only_interesting_games\n NLG.NonLocalGame.play_deterministic\n NLG.NonLocalGame.quantumGEN\n NLG.NonLocalGame.quantumNN\n NLG.NonLocalGame.play_quantum\n NLG.NonLocalGame.calc_difficulty_of_game\n NLG.NonLocalGame.to_list\n NLG.NonLocalGame.categorize\n NLG.NonLocalGame.convert\n NLG.NonLocalGame.max_entangled_difference\n\n\n\n.. function:: get_scaler(env, N, ALL_POSSIBLE_ACTIONS, round_to=2)\n\n :returns scikit-learn scaler object to scale the states\n\n\n.. function:: show_plot_of(plot_this, label, place_line_at=())\n\n\n.. function:: override(f)\n\n\n.. class:: abstractEnvironment\n\n Bases: :py:obj:`abc.ABC`\n\n abstract environment to create CHSH framework\n\n actions are expected in this format\n\n ACTIONS = [q + axis + \"0\" for axis in 'xyz' for q in 'ra']\n ACTIONS = [q + axis + \"0\" for axis in 'y' for q in 'r']\n PLAYER = ['a', 'b']\n QUESTION = ['0', '1']\n\n ALL_POSSIBLE_ACTIONS = [[p + q + a] for p in PLAYER for q in QUESTION for a in ACTIONS] # place one gate at some place\n ALL_POSSIBLE_ACTIONS.append([\"xxr0\"])\n\n # for 1 game with 2 EPR\n ALL_POSSIBLE_ACTIONS.append([\"a0cxnot\"])\n ALL_POSSIBLE_ACTIONS.append([\"b0cxnot\"])\n #\n # for xor paralel with 2EPR\n ALL_POSSIBLE_ACTIONS.append([\"a0cxnotr\"])\n ALL_POSSIBLE_ACTIONS.append([\"b0cxnotr\"])\n\n .. method:: reset(self)\n :abstractmethod:\n\n Return initial_time_step.\n\n\n .. method:: step(self, action)\n :abstractmethod:\n\n Apply action and return new time_step.\n\n\n .. method:: measure_probabilities_analytically(self)\n\n :returns probabilities of questions (e.g. 00,01,10,11) happening in matrix \n\n\n .. method:: calc_accuracy(self, result)\n\n Calculates accurary by going through rules of the game given by game_type matrix\n :returns winning probability / accuracy / win rate based on winning game_type \n\n\n .. method:: n_qubits_from_state(self)\n\n There are 2^n states of n qubits, to get the n, we need to make log2 from state\n\n\n .. method:: count_gates(self)\n\n :returns count of relevant gates \n\n\n .. method:: get_gate(self, action)\n\n :returns gate got from string code of action \n\n\n .. method:: reward_only_difference(self, difference)\n\n\n .. method:: reward_qubic(self, difference)\n\n\n .. method:: reward_only_best(self, difference)\n\n reward only if it its better than results before \n\n\n .. method:: reward_combined(self, difference)\n\n\n .. method:: complex_array_to_real(self, inp_array)\n\n decomposes complex array into array of real numbers with double size. \n\n\n\n.. class:: Game(scaler=None, round_to=2, batch_size=32)\n\n\n creates framework for easier manipulation \n\n .. method:: play_one_episode(self, agent, env, DO)\n\n Plays one episode of CHSH training\n :returns last accuracy acquired and rewards from whole episode \n\n\n .. method:: evaluate_train(self, N, agent, env)\n\n Performes the whole training of agent in env in N steps\n :returns portfolio value and rewards for all episodes - serves to plot how it has trained\n\n\n .. method:: evaluate_test(self, agent, env)\n\n Tests what has the agent learnt in N=1 steps :returns accuracy and reward \n\n\n\n.. function:: game_with_rows_all_zeroes(game)\n\n Checks whether there is not full zero row in game \n\n\n.. function:: generate_only_interesting_games(size=4, n_questions=2)\n\n Generates only interesting evaluation tactics\n because some are almost duplicates and some will have no difference between classic and quantum strategies. \n\n\n.. function:: play_deterministic(game, which='best')\n\n Learns to play the best classic strategy according to game \n\n\n.. function:: quantumGEN(states, game)\n\n Plays nonlocal game using genetic algorithm multiple -lenght(states)- times and returns the best and the worst result.\n Works good for small nonlocal games with 1epr pair. For bigger games reinforcement learning is much better choice. \n\n\n.. function:: quantumNN(states, agent_type, which, game)\n\n Plays nonlocal game using reinforcement learning multiple -lenght(states)- times and returns the best and the worst result. \n\n\n.. function:: play_quantum(game, which='best', agent_type=BasicAgent, n_qubits=2)\n\n Learns to play the best quantum strategy according to game\n for 2 qubits uses genetic alg., for more uses reinforcement learning\n\n\n.. function:: calc_difficulty_of_game(game)\n\n Difficulty of the input game is calculated as a sum of all 1's in the whole game (evaluation) matrix\n\n\n.. function:: to_list(tuple)\n\n converts tuple to list \n\n\n.. function:: categorize(cutGames)\n\n categorizes input games according to the best and worst classical strategy probabilities , e.g. (0.75,0.25) is the category for\n CHSH game, because the best possible classical strategy will give you 0.75 success probability, the worst is 0.25 classicaly.\n\n\n.. function:: convert(list)\n\n Converts list to categories. \n\n\n.. function:: max_entangled_difference(n_players=2, n_questions=2, choose_n_games_from_each_category=5, best_or_worst='best', agent_type=BasicAgent, n_qubits=2)\n\n Finds interesting games by searching through the space of possible interesting games. Compares maximum classical with quantum.\n Puts results into local database\n\n\n"
},
{
"alpha_fraction": 0.54347825050354,
"alphanum_fraction": 0.552717387676239,
"avg_line_length": 24.55555534362793,
"blob_id": "9ea0839b3c8445086c651b59a5ede4c871d43afc",
"content_id": "2060831a18ddf9e6bed2ea502ae4b4e693cab077",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1840,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 72,
"path": "/src/NLG/models/LinearModel.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\nfrom NLG.models.RegressionModel import RegressionModel\n\n\ndef override(f): return f\n\n\nclass LinearModel(RegressionModel):\n \"\"\" Simple linear approxiamation model \"\"\"\n\n @override\n def __init__(self, input_dim, n_action):\n self.W = np.random.randn(input_dim, n_action) / np.sqrt(input_dim)\n self.b = np.zeros(n_action)\n\n # momentum terms\n self.vW = 0\n self.vb = 0\n\n self.losses = []\n\n @override\n def predict(self, X):\n # make sure X is N x D\n # assert (len(X.shape) == 2)\n return X.dot(self.W) + self.b\n\n @override\n def sgd(self, X, Y, learning_rate=0.01, momentum=0.9):\n # make sure X is N x D\n # assert (len(X.shape) == 2)\n\n # the loss values are 2-D\n # normally we would divide by N only\n # but now we divide by N x K\n num_values = np.prod(Y.shape)\n\n # do one step of gradient descent\n # we multiply by 2 to get the exact gradient\n # (not adjusting the learning rate)\n # i.e. d/dx (x^2) --> 2x\n Yhat = self.predict(X)\n\n # print([X.shape, Y.shape])\n gW = 2 * X.T.dot(Yhat - Y) / num_values\n gb = 2 * (Yhat - Y).sum(axis=0) / num_values\n\n # update momentum terms\n self.vW = momentum * self.vW - learning_rate * gW\n self.vb = momentum * self.vb - learning_rate * gb\n\n # update params\n self.W += self.vW\n self.b += self.vb\n\n mse = np.mean((Yhat - Y) ** 2)\n self.losses.append(mse)\n\n @override\n def load_weights(self, filepath):\n npz = np.load(filepath)\n self.W = npz['W']\n self.b = npz['b']\n\n @override\n def save_weights(self, filepath):\n np.savez(filepath, W=self.W, b=self.b)\n\n @override\n def get_losses(self):\n return self.losses\n"
},
{
"alpha_fraction": 0.6310811042785645,
"alphanum_fraction": 0.6310811042785645,
"avg_line_length": 16.5238094329834,
"blob_id": "516a7bc9bda2d4f775a6778d0cf54dc7da195ecc",
"content_id": "22f6fb44f9ab49bc4c0730c66ce75d6eb5a8c3b7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 740,
"license_type": "permissive",
"max_line_length": 129,
"num_lines": 42,
"path": "/src/doc/_sources/autoapi/NLG/agents/BasicAgent/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.agents.BasicAgent`\n============================\n\n.. py:module:: NLG.agents.BasicAgent\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.agents.BasicAgent.BasicAgent\n\n\n\n\n.. class:: BasicAgent(state_size, action_size, gamma, eps, eps_min, eps_decay, alpha, momentum, ALL_POSSIBLE_ACTIONS, model_type)\n\n\n Reinforcement learning agent \n\n .. method:: act(self, state)\n\n :returns action based on neural model prediction / epsilon greedy \n\n\n .. method:: train(self, state, action, reward, next_state, done)\n\n performs one training step of neural network \n\n\n .. method:: load(self, name)\n\n loads weights into model \n\n\n .. method:: save(self, name)\n\n saves weight into model \n\n\n\n"
},
{
"alpha_fraction": 0.6060225963592529,
"alphanum_fraction": 0.6060225963592529,
"avg_line_length": 12.457627296447754,
"blob_id": "6e378a1b19dc1de6808b5fa59b50c1aa62ee649b",
"content_id": "318ab9fac0e5faa49bfb7ea25cf2c6b4724cd059",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 797,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 59,
"path": "/src/doc/_sources/autoapi/NLG/models/KerasModel/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.models.KerasModel`\n============================\n\n.. py:module:: NLG.models.KerasModel\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.models.KerasModel.KerasModel\n\n\n\nFunctions\n~~~~~~~~~\n\n.. autoapisummary::\n\n NLG.models.KerasModel.override\n NLG.models.KerasModel.show_history\n\n\n\n.. function:: override(f)\n\n\n.. function:: show_history(history, block=True)\n\n\n.. class:: KerasModel(input_dim, n_action)\n\n\n Bases: :py:obj:`RegressionModel.RegressionModel`\n\n Regression model using more layers \n\n .. attribute:: physical_devices\n \n\n \n\n .. method:: predict(self, X)\n\n\n .. method:: sgd(self, X, Y, learning_rate, momentum)\n\n\n .. method:: load_weights(self, _)\n\n\n .. method:: save_weights(self, _)\n\n\n .. method:: get_losses(self)\n\n\n\n"
},
{
"alpha_fraction": 0.6528370976448059,
"alphanum_fraction": 0.6693105697631836,
"avg_line_length": 22.27142906188965,
"blob_id": "68b2aa6b74499674eeee96c61ccb66eb517b0875",
"content_id": "7c6726fd143257c7043b9b78c93fc5f9510fb85c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1639,
"license_type": "permissive",
"max_line_length": 257,
"num_lines": 70,
"path": "/src/doc/_sources/autoapi/NLG/NlgGeneticOptimalization/index.rst.txt",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": ":mod:`NLG.NlgGeneticOptimalization`\n===================================\n\n.. py:module:: NLG.NlgGeneticOptimalization\n\n\nModule Contents\n---------------\n\nClasses\n~~~~~~~\n\n.. autoapisummary::\n\n NLG.NlgGeneticOptimalization.CHSHgeneticOptimizer\n\n\n\n\nAttributes\n~~~~~~~~~~\n\n.. autoapisummary::\n\n NLG.NlgGeneticOptimalization.ACTIONS\n\n\n.. class:: CHSHgeneticOptimizer(population_size=15, n_crossover=3, mutation_prob=0.05, state=[0, float(1 / sqrt(2)), -float(1 / sqrt(2)), 0], history_actions=['a0r0', 'b0r0', 'a1r0', 'b1r0'], game_type=[], num_players=2, n_questions=2, best_or_worst='best')\n\n\n Bases: :py:obj:`optimalizers.GeneticAlg.GeneticAlg`, :py:obj:`NonLocalGame.abstractEnvironment`\n\n Creates CHSH genetic optimizer \n\n .. method:: reset(self, history_actions, n_crossover)\n\n Initializes number of crossovers and CHSH environment with :param history_actions - new previous actions\n\n\n .. method:: step(self, action)\n\n\n .. method:: generate_individual(self)\n\n Generate random individual.\n\n\n .. method:: fitness(self, x)\n\n Returns fitness of a given individual.\n\n\n .. method:: number_mutation(self, x, prob)\n\n Elements of x are real numbers [0.0 .. 1.0]. Mutate (i.e. add/substract random number)\n each number in x with given probabipity.\n\n\n .. method:: mutation(self, x, prob)\n\n\n .. method:: solve(self, max_generations, goal_fitness=1)\n\n Implementation of genetic algorithm. Produce generations until some\n # individual`s fitness reaches goal_fitness, or you exceed total number\n # of max_generations generations. Return best found individual. \n\n\n\n.. data:: ACTIONS\n \n\n \n\n"
},
{
"alpha_fraction": 0.5792381763458252,
"alphanum_fraction": 0.5917418003082275,
"avg_line_length": 29.43362808227539,
"blob_id": "14a2802b09160f41f8c3a3070565294a14537165",
"content_id": "0dd42a86ac5f964a57a31772188e09129a171b6c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3439,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 113,
"path": "/src/NLG/models/KerasModel.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom keras import layers\nfrom keras.models import Sequential\nfrom keras.models import model_from_json\nfrom keras.optimizers import Adam\n\nfrom RegressionModel import RegressionModel\n\n\ndef override(f): return f\n\n\ndef show_history(history, block=True):\n fig, axs = plt.subplots(2, 1, num='Training history', sharex=True)\n\n plt.subplot(2, 1, 1)\n plt.title('Regression error per epoch')\n plt.plot(history.history['loss'], '-b', label='training loss')\n try:\n plt.plot(history.history['val_loss'], '-r', label='validation loss')\n except KeyError:\n pass\n plt.grid(True)\n plt.legend(loc='best')\n plt.xlim(left=-1);\n plt.ylim(bottom=-0.01)\n\n plt.subplot(2, 1, 2)\n plt.title('Classification accuracy per epoch [%]')\n plt.plot(np.array(history.history['accuracy']) * 100, '-b', label='training accuracy')\n try:\n plt.plot(np.array(history.history['val_acc']) * 100, '-r', label='validation accuracy')\n except KeyError:\n pass\n plt.grid(True)\n plt.legend(loc='best')\n plt.xlim(left=-1);\n plt.ylim(-3, 103)\n\n plt.tight_layout()\n plt.show(block=block)\n\n\nclass KerasModel(RegressionModel):\n \"\"\" Regression model using more layers \"\"\"\n\n physical_devices = tf.config.list_physical_devices('GPU')\n tf.config.experimental.set_memory_growth(physical_devices[0], True)\n\n def __init__(self, input_dim, n_action):\n # Build the model\n self.dnn = Sequential()\n\n self.dnn.add(layers.Dense(input_dim, activation='relu', input_shape=[input_dim]))\n self.dnn.add(layers.Dense(10, activation='relu'))\n self.dnn.add(layers.Dense(10, activation='relu'))\n self.dnn.add(layers.Dense(8, activation='relu'))\n\n # output layer\n self.dnn.add(layers.Dense(n_action))\n\n self.compiled = False\n self.losses = None\n\n @override\n def predict(self, X):\n assert (len(X.shape) == 2)\n return self.dnn.predict(X)\n\n @override\n def sgd(self, X, Y, learning_rate, momentum):\n # Train the model\n assert (len(X.shape) == 2)\n if not self.compiled:\n self.dnn.compile(loss='mse',\n optimizer=Adam(lr=learning_rate, beta_1=momentum, beta_2=0.999),\n metrics=['mae'])\n self.compiled = True\n\n history = self.dnn.fit(X, Y,\n epochs=1,\n batch_size=1,\n verbose=3\n )\n\n # show_history(history)\n\n @override\n def load_weights(self, _):\n # load json and create model\n json_file = open('../.training/model.json', 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n self.dnn = model_from_json(loaded_model_json)\n # load weights into new model\n self.dnn.load_weights(\".training/model.h5\")\n print(\"Loaded model from disk\")\n\n @override\n def save_weights(self, _):\n # serialize model to JSON\n model_json = self.dnn.to_json()\n with open(\"../.training/model.json\", \"w\") as json_file:\n json_file.write(model_json)\n # serialize weights to HDF5\n self.dnn.save_weights(\".training/model.h5\")\n print(\"Saved model to disk\")\n\n @override\n def get_losses(self):\n return self.losses\n"
},
{
"alpha_fraction": 0.5601972937583923,
"alphanum_fraction": 0.5806498527526855,
"avg_line_length": 40.281436920166016,
"blob_id": "6332dc49f0f41183f3297f2007df1db8bf72c29d",
"content_id": "302c883c675652a7aeac912710e94e2f23780cf7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6894,
"license_type": "permissive",
"max_line_length": 149,
"num_lines": 167,
"path": "/src/NLG/NlgGeneticOptimalization.py",
"repo_name": "eauriel/Bachelor-Thesis",
"src_encoding": "UTF-8",
"text": "import itertools\nimport random\nfrom math import sqrt, pi\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom qiskit.extensions import IGate\n\nfrom NLG.NonLocalGame import abstractEnvironment, override\nfrom NLG.optimalizers.GeneticAlg import GeneticAlg\n\n\nclass CHSHgeneticOptimizer(GeneticAlg, abstractEnvironment):\n \"\"\" Creates CHSH genetic optimizer \"\"\"\n\n @override\n def __init__(self, population_size=15, n_crossover=3, mutation_prob=0.05,\n state=[0, float(1 / sqrt(2)), -float(1 / sqrt(2)), 0],\n history_actions=['a0r0', 'b0r0', 'a1r0', 'b1r0'], game_type=[], num_players=2, n_questions=2, best_or_worst=\"best\"):\n # Initialize the population - create population of 'size' individuals,\n # each individual is a bit string of length 'word_len'.\n super().__init__()\n self.n_questions = n_questions\n self.best_or_worst = best_or_worst\n self.population_size = population_size\n self.n_crossover = n_crossover\n self.mutation_prob = mutation_prob\n self.num_players = num_players\n self.initial = state\n self.state = self.initial.copy()\n self.game_type = game_type\n self.n_games = 1\n\n self.n_qubits = self.n_qubits_from_state()\n\n self.reset(history_actions, n_crossover)\n\n # generate \"questions\" in equal number\n self.questions = list(itertools.product(list(range(self.n_questions)), repeat=self.num_players))\n\n @override\n def reset(self, history_actions, n_crossover):\n \"\"\" Initializes number of crossovers and CHSH environment with :param history_actions - new previous actions\"\"\"\n self.state = self.initial.copy()\n self.n_crossover = n_crossover\n self.repr_state = np.array([x for _ in range(self.num_players ** 2) for x in self.state], dtype=np.complex128)\n self.history_actions = history_actions\n self.for_plot = []\n self.population = [self.generate_individual() for _ in range(self.population_size)]\n\n @override\n def step(self, action):\n pass\n\n @override\n def generate_individual(self):\n \"\"\"Generate random individual.\"\"\"\n # tieto hyperparametre treba optimalizovat - brany\n return [str(action[0:4]) + str(random.uniform(-180, 180)) if action != 'xxr0' else 'xxr0' for action in\n self.history_actions]\n\n @override\n def fitness(self, x):\n \"\"\" Returns fitness of a given individual.\"\"\"\n result = []\n\n for g, q in enumerate(self.questions):\n # Alice - a and Bob - b share an entangled state\n # The input to alice and bob is random\n # Alice chooses her operation based on her input, Bob too - eg. a0 if alice gets 0 as input\n self.state = self.initial.copy()\n self.repr_state = np.array([x for _ in range(self.num_players ** 2) for x in self.state], dtype=np.complex128)\n\n for action in x:\n gate = self.get_gate(action)\n if gate == IGate: continue\n to_whom = action[0:2]\n try: gate_angle = np.array([action[4:]], dtype=np.float64)\n except ValueError: gate_angle = 0\n\n operation = []\n\n if (q[0] == 0 and to_whom == 'a0') or (q[0] == 1 and to_whom == 'a1'):\n calc_operation = np.kron(gate((gate_angle * pi / 180).item()).to_matrix(), np.identity(2))\n operation = calc_operation\n if (q[1] == 0 and to_whom == 'b0') or (q[1] == 1 and to_whom == 'b1'):\n calc_operation = np.kron(np.identity(2), gate((gate_angle * pi / 180).item()).to_matrix())\n operation = calc_operation\n\n if len(operation) != 0:\n self.state = np.matmul(operation, self.state)\n\n self.repr_state[g * self.num_players ** 2:(g + 1) * self.num_players ** 2] = self.state.copy()\n\n result.append(self.measure_probabilities_analytically())\n fitness_individual = self.calc_accuracy(result)\n return fitness_individual\n\n @override\n def number_mutation(self, x, prob):\n \"\"\" Elements of x are real numbers [0.0 .. 1.0]. Mutate (i.e. add/substract random number)\n each number in x with given probabipity.\"\"\"\n potomok = x\n for poc in range(len(potomok)):\n if random.random() <= prob:\n spocitaj = [float(gate[4:]) for gate in potomok]\n priemer = sum(spocitaj) / len(spocitaj)\n sigma_na_druhu = 0\n\n for i in spocitaj:\n sigma_na_druhu += (i - priemer) ** 2\n\n sigma_na_druhu = sigma_na_druhu / (len(spocitaj)) / 360 # Normal distribution\n\n if random.random() > 0.5:\n if potomok[poc] != 'xxr0':\n nahodne = random.uniform(0, sigma_na_druhu)\n potomok[poc] = potomok[poc][:4] + str(float(potomok[poc][4:]) - nahodne)\n\n else:\n if potomok[poc] != 'xxr0':\n nahodne = random.uniform(0, sigma_na_druhu)\n potomok[poc] = potomok[poc][:4] + str(float(potomok[poc][4:]) + nahodne)\n\n return potomok\n\n @override\n def mutation(self, x, prob):\n return self.number_mutation(x, prob)\n\n @override\n def solve(self, max_generations, goal_fitness=1):\n \"\"\"Implementation of genetic algorithm. Produce generations until some\n # individual`s fitness reaches goal_fitness, or you exceed total number\n # of max_generations generations. Return best found individual. \"\"\"\n best = super().solve(max_generations, goal_fitness)\n accuracy = self.fitness(best)\n return best, accuracy, self.repr_state # all is for best\n\n\nif __name__ == \"__main__\":\n # Solve to find optimal individual\n ACTIONS = ['r' + axis + \"0\" for axis in 'y']\n PERSON = ['a', 'b']\n QUESTION = ['0', '1']\n\n ALL_POSSIBLE_ACTIONS = [p + q + a for p in PERSON for q in QUESTION for a in ACTIONS] # place one gate at some place\n game = [[1, 0, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0]]\n ga = CHSHgeneticOptimizer(population_size=30, n_crossover=len(ALL_POSSIBLE_ACTIONS) - 1, mutation_prob=0.1, history_actions=ALL_POSSIBLE_ACTIONS,\n game_type=game, best_or_worst=\"best\", state=np.array([0, 1 / sqrt(2), -1 / sqrt(2), 0], dtype=np.complex128))\n best = ga.solve(22) # you can also play with max. generations\n ga.show_individual(best[0])\n print(best[1])\n\n fig_dims = (10, 6)\n\n fig, ax = plt.subplots(figsize=fig_dims)\n plt.axhline(y=0.853, color='r', linestyle='-')\n plt.axhline(y=0.75, color='r', linestyle='-')\n plt.xlabel('Epochs')\n plt.ylabel('Win rate')\n\n plt.plot(ga.for_plot)\n plt.show()\n"
}
] | 40 |
amore1302/ext_instagrm | https://github.com/amore1302/ext_instagrm | a219b59bbac3cfa05976eb61f7fa0e39ee400541 | 4e80523dee65ccc586de1fa05a78a6fe34eef8d0 | acfa2bcbe4280d71dc55f06b3d3e1168c70d2b7d | refs/heads/master | 2023-05-28T17:10:58.075774 | 2019-10-31T10:13:12 | 2019-10-31T10:13:12 | 218,131,107 | 0 | 0 | null | 2019-10-28T19:41:46 | 2019-10-31T10:13:43 | 2023-05-22T22:31:53 | Python | [
{
"alpha_fraction": 0.7023809552192688,
"alphanum_fraction": 0.7023809552192688,
"avg_line_length": 34.85714340209961,
"blob_id": "4907676c6d625ca067cc026a65ce0b20fc9fb17c",
"content_id": "aa0b41c43ba1bc99bcc1bdc7f7552ab06f3aaca2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 252,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 7,
"path": "/load_image.py",
"repo_name": "amore1302/ext_instagrm",
"src_encoding": "UTF-8",
"text": "\nimport requests\ndef load_image_from_url_to_file(url_internet, full_file_name):\n response = requests.get(url_internet , verify=False)\n response.raise_for_status()\n\n with open(full_file_name, 'wb') as file:\n file.write(response.content)\n"
},
{
"alpha_fraction": 0.7680878639221191,
"alphanum_fraction": 0.7719638347625732,
"avg_line_length": 31.957447052001953,
"blob_id": "e2c60654b7358f65b7a613bb40a9f123ca841c3a",
"content_id": "ccfb99fd1d52ea289d636f300b34dc778db9dc4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2474,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 47,
"path": "/README.md",
"repo_name": "amore1302/ext_instagrm",
"src_encoding": "UTF-8",
"text": "# Космический Инстаграм\n\n Учебный проект выполнен на Python3. Проект учит \n * По API описанию интернет ресурсов находит в этих ресурсах конкретные файлы\n * Конкретные файлы по url скачивать на локальный диск , используя get htth запросы\n * С помощью специального бота размещать локальные файлы(картинки) в Инстаграм\n\n\n### Как установить\n\n Для размещения файлов в Instagram надо иметь логин и пароль.\nЛогин и пароль инстаграмм необходимо поместить в файл `.env`\n\nФайл `.env` должен содержать две строки :\n* INTGR_LOGIN=<ВашЛогин>\n* INTGR_PASSWD=<ВашПароль>\n###### Обязательно пропишите свой логин и свой пароль Инстаграма\n#### Проект написан на языке Python3 и состоит из файлов :\n\n`main.py` - содержит основной модуль программы\n\n`fetch_hubble.py` - содержит get обращения \nк ресурсу http://hubblesite.org/api/v3 Телескоп Hubble \n\n`fetch_spacex.py` содержит get обращения запуска SpaceX\nк ресурсу https://api.spacexdata.com/v3\n\n`load_image.py` содержит модуль для счтывания файла из интернет ресурса в файл на диске.\n\n`requirements.txt` стандартный файл зависимостей для установки pethon окружения\n\n`.env` Описывает среду выполнения. Файл содержил логин и пароль для инстаграмма\n\n\nPython3 должен быть уже установлен. \nЗатем используйте `pip` (или `pip3`) для установки зависимостей:\n\n\n pip install -r requirements.txt\n\n\n\n\n### Цель проекта\n\nКод написан в образовательных целях на онлайн-курсе для веб-разработчиков [dvmn.org](https://dvmn.org/).\nПроект учит получать файлы из интернет ресурсов и далее размещеть файлы в Instagram"
},
{
"alpha_fraction": 0.6304945349693298,
"alphanum_fraction": 0.6593406796455383,
"avg_line_length": 35.400001525878906,
"blob_id": "7e4abc7aa783e13ca6817fec0a23a831c7ee9f16",
"content_id": "3023687783b2053cf80c1ee253eb9f17f9cc16b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 728,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 20,
"path": "/fetch_spacex.py",
"repo_name": "amore1302/ext_instagrm",
"src_encoding": "UTF-8",
"text": "import requests\nfrom load_image import load_image_from_url_to_file\nimport os\n\ndef fetch_spacex_last_launch():\n directory = os.path.join(\"images\", \"\")\n payload = {\n \"latest\": \"\",\n \"launch_date_utc\": \"2019-08-06T22:52:00.000Z\"\n }\n url_image = \"https://api.spacexdata.com/v3/launches\"\n response = requests.get(url_image, params=payload)\n if not response.ok:\n raise requests.exceptions.HTTPError(response=reponse)\n\n image_latest = response.json()[0]\n images = image_latest[\"links\"][\"flickr_images\"]\n for image_number, image in enumerate(images):\n full_file_name = \"{0}spacex{1}.jpg\".format(directory, image_number)\n load_image_from_url_to_file(image, full_file_name)\n"
},
{
"alpha_fraction": 0.48571428656578064,
"alphanum_fraction": 0.699999988079071,
"avg_line_length": 16.5,
"blob_id": "5daa04269d258e3730d2ba08ba7555510dc68d23",
"content_id": "d941896d182a6103a13a8b192d76809fa64f7dc9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 70,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 4,
"path": "/requirements.txt",
"repo_name": "amore1302/ext_instagrm",
"src_encoding": "UTF-8",
"text": "requests==2.22.0\nPillow==6.2.1\ninstabot==0.71.0\npython-dotenv==0.10.3\n"
},
{
"alpha_fraction": 0.6571187973022461,
"alphanum_fraction": 0.6663858294487,
"avg_line_length": 33.882354736328125,
"blob_id": "39633b00d5b7f4cefc40cef17126166a5914b922",
"content_id": "78a1ac0aa875489a27887dccf0a0fc47396056d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1187,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 34,
"path": "/fetch_hubble.py",
"repo_name": "amore1302/ext_instagrm",
"src_encoding": "UTF-8",
"text": "import requests\nfrom load_image import load_image_from_url_to_file\nimport os\n\n\n\n\ndef get_last_image_from_Hubble(id_image):\n url_image = \"http://hubblesite.org/api/v3/image/{0}\".format(id_image)\n response = requests.get(url_image)\n if not response.ok:\n raise requests.exceptions.HTTPError(response=response)\n images = response.json()[\"image_files\"]\n last_image = images[-1]\n url_image = last_image[\"file_url\"]\n\n _, file_extension = os.path.splitext(url_image)\n dir_name = os.path.join(\"images\", \"\")\n url_file = \"{0}{1}{2}\".format(dir_name , id_image, file_extension )\n\n first_symbols = url_image[0:4]\n if first_symbols != \"http\":\n url_image = \"https:{0}\".format(url_image)\n load_image_from_url_to_file(url_image, url_file)\n\ndef get_colection_from_Hubble(name_colection):\n url_colection = \"http://hubblesite.org/api/v3/images/{0}\".format(name_colection)\n response = requests.get(url_colection)\n if not response.ok:\n raise requests.exceptions.HTTPError(response=response)\n images = response.json()\n for curent_image in images:\n curent_id = curent_image[\"id\"]\n get_last_image_from_Hubble(curent_id)\n\n"
},
{
"alpha_fraction": 0.6501392722129822,
"alphanum_fraction": 0.6540390253067017,
"avg_line_length": 25.382352828979492,
"blob_id": "5a86367c1d300ee3d5d020876fd3fb7f69a59564",
"content_id": "cac29a649bacc8fab0c40fc8a6032bfd8651921b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1817,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 68,
"path": "/main.py",
"repo_name": "amore1302/ext_instagrm",
"src_encoding": "UTF-8",
"text": "\nfrom PIL import Image\nfrom instabot import Bot\n\n\nimport time\nfrom dotenv import load_dotenv\nfrom fetch_spacex import fetch_spacex_last_launch\nfrom fetch_hubble import get_colection_from_Hubble\n\n\nimport os, errno\nfrom os import listdir\nfrom os.path import isfile\nfrom os.path import join as joinpath\n\n\ndef create_dir_image(directory):\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\ndef image_to_square_size(file, cur_file):\n image = Image.open(file)\n width = image.width\n height = image.height\n square_size = width\n if height < width :\n square_size = height\n image.thumbnail((square_size, square_size))\n image.save(cur_file)\n image.close()\n\ndef image_to_unload_instagram(file, cur_file):\n time.sleep(7)\n bot.upload_photo(cur_file, caption=\"1\")\n if bot.api.last_response.status_code != 200:\n print(\"Не удалось выгрузить файл\")\n print(bot.api.last_response)\n print(\" \")\n\n\ndef file_unload_instagramm(cur_path):\n cur_path_and_slesh = os.path.join(cur_path, \"\")\n for cur_file in listdir(cur_path):\n if isfile(joinpath(cur_path, cur_file)):\n full_name_file = \"{0}{1}\".format(cur_path_and_slesh,cur_file)\n image_to_square_size(full_name_file, cur_file)\n image_to_unload_instagram(full_name_file, cur_file)\n\ndef main():\n load_dotenv()\n dir_name = os.path.join(\"images\", \"\")\n create_dir_image(dir_name)\n\n fetch_spacex_last_launch()\n get_colection_from_Hubble(\"spacecraft\")\n\n inst_login = os.getenv(\"INTGR_LOGIN\")\n inst_passwd = os.getenv(\"INTGR_PASSWD\")\n bot = Bot()\n bot.login(username=inst_login, password=inst_passwd)\n file_unload_instagramm(\"images\")\n\n\nif __name__ == '__main__':\n main()\n"
}
] | 6 |
arnoldlayne0/tictactoe | https://github.com/arnoldlayne0/tictactoe | 2ab03cdc47bde1e20467f737950566ef39c9a1d5 | 890edf0c228406a1ecde4b86b511774d367c1d58 | 04395a064a5027e281d5d7856ba3eabab93bdec9 | refs/heads/master | 2020-04-26T03:59:25.060396 | 2019-03-07T10:17:41 | 2019-03-07T10:17:41 | 173,285,825 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5288828015327454,
"alphanum_fraction": 0.5331504344940186,
"avg_line_length": 33.531578063964844,
"blob_id": "a6852db08c04d1f3f3d56c216ea8d43bab96d3f7",
"content_id": "59ad88e2a778c35a9ffc039da3dc25c4df5c487b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6561,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 190,
"path": "/big_board.py",
"repo_name": "arnoldlayne0/tictactoe",
"src_encoding": "UTF-8",
"text": "from copy import deepcopy\nimport random\n# import numpy as np\n# import pandas as pd\nfrom functools import reduce\nimport board\n\n\nclass TakenFieldError(Exception):\n pass\n\n\nclass LocalBoardFinishedError(Exception):\n pass\n\n\nclass NotPositiveIntegerError(Exception):\n pass\n\n\nclass BigBoard:\n def __init__(self, size):\n self.size = size\n self.subboards = [reduce((lambda x, y: x + y),\n ([board.Board(size)] for _ in range(self.size)))\n for _ in range(self.size)]\n self.moves_history = []\n self.metaboard = board.Board(size)\n\n def big_board_to_string(self):\n big_rows = []\n for br in range(self.size):\n small_rows = []\n for sr in range(self.size):\n board_list = []\n for col in range(self.size):\n row = ''.join(self.subboards[br][col].board[sr])\n board_list.append(row)\n res1 = '|'.join(board_list)\n small_rows.append(res1)\n res2 = '\\n'.join(small_rows)\n big_rows.append(res2)\n div_list = ['/' * 3] * 3\n div_str = '\\n' + '|'.join(div_list) + '\\n'\n res3 = div_str.join(big_rows) + '\\n'\n return res3\n\n def __str__(self):\n return self.big_board_to_string()\n\n def num_to_ind(self, num):\n if num < 1:\n raise NotPositiveIntegerError\n num -= 1\n return divmod(num, self.size)\n\n def get_subboard_list(self):\n return [i for sub in self.subboards for i in sub]\n\n def get_legal_subboards(self):\n subboard_list = self.get_subboard_list()\n return [i + 1 for i, p in enumerate(subboard_list) if p.board_finished() == False]\n\n def is_restricted(self):\n if len(self.moves_history) == 0:\n return False\n board_row, board_col = self.num_to_ind(self.moves_history[-1]['field'])\n if self.subboards[board_row][board_col].board_finished():\n return False\n return True\n\n def _make_move(self, board_num, field_num, sym):\n if board_num not in self.get_legal_subboards():\n raise LocalBoardFinishedError\n board_row, board_col = self.num_to_ind(board_num)\n field_row, field_col = self.num_to_ind(field_num)\n curr_local_board = self.subboards[board_row][board_col]\n if curr_local_board.board[field_row][field_col] != '_':\n raise TakenFieldError\n try:\n # nie lap bledu tutaj jesli chcesz lapac pozniej\n curr_local_board.board[field_row][field_col] = sym\n return self\n except (IndexError, ValueError):\n pass\n\n def make_player_move(self, sym):\n while True:\n if self.is_restricted():\n board_num = self.moves_history[-1]['field']\n else:\n board_num = input('input board number')\n field_num = input('input field number')\n try:\n board_num = int(board_num)\n field_num = int(field_num)\n self._make_move(board_num, field_num, sym)\n # self._last_move = field_num\n # append moves history in _make_move()\n self.moves_history.append({'number': len(self.moves_history), 'board': board_num, 'field': field_num})\n break\n except (NotPositiveIntegerError, ValueError):\n print('input a positive integer')\n except IndexError:\n print('make a valid move within the board')\n except LocalBoardFinishedError:\n print('make a move on a valid board')\n except TakenFieldError:\n print('field taken')\n\n def make_random_legal_move(self, sym):\n if self.is_restricted():\n board_num = self.moves_history[-1]['field']\n else:\n board_num = random.choice(self.get_legal_subboards())\n board_row, board_col = self.num_to_ind(board_num)\n field_num = random.choice(self.subboards[board_row][board_col].get_legal_moves())\n self._make_move(board_num, field_num, sym)\n self.moves_history.append({'number': len(self.moves_history), 'board': board_num, 'field': field_num})\n\n def get_local_winner(self):\n return [b.get_winner() or 'draw' if b.board_finished() else '_' for b in self.get_subboard_list()]\n\n def update_metaboard(self):\n self.metaboard = board.Board(self.size)\n for ind, sym in enumerate(self.get_local_winner()):\n self.metaboard._make_move(ind + 1, sym)\n\n def get_global_winner(self):\n if self.metaboard.board_finished():\n return self.metaboard.get_winner() or 'draw'\n else:\n return 'nobody yet'\n\n def play_one_random_game(self):\n i = 0\n while self.metaboard.board_finished() == False:\n if i % 2 == 0:\n sym = 'x'\n else:\n sym = 'o'\n self.make_random_legal_move(sym)\n i += 1\n self.update_metaboard()\n winner = self.get_global_winner()\n return winner\n\n def play_two_players(self):\n i = 0\n while self.metaboard.board_finished() == False:\n if i % 2 == 0:\n sym = 'x'\n else:\n sym = 'o'\n self.make_player_move(sym)\n print(self)\n i += 1\n self.update_metaboard()\n winner = self.get_get_winner() or 'draw'\n return winner\n\n # te dwie funkcje moga byc jedna\n def _human_or_machine_move(self, who, sym):\n if who == 'h':\n self.make_player_move(sym)\n elif who == 'm':\n self.make_random_legal_move(sym)\n else:\n raise Exception\n\n def play_against_the_machine(self):\n goes_first = None\n while goes_first not in 'hm':\n goes_first = input('choose who goes first (input h for human or m for machine)')\n i = 0\n if goes_first == 'h':\n sym_play_dict = {'x': 'h', 'o': 'm'}\n elif goes_first == 'm':\n sym_play_dict = {'x': 'm', 'o': 'h'}\n while self.metaboard.board_finished() == False:\n if i % 2 == 0:\n sym = 'x'\n else:\n sym = 'o'\n self._human_or_machine_move(sym_play_dict[sym], sym)\n print(self)\n i += 1\n self.update_metaboard()\n winner = self.get_get_winner() or 'draw'\n return winner\n"
},
{
"alpha_fraction": 0.6232876777648926,
"alphanum_fraction": 0.6347032189369202,
"avg_line_length": 30.309524536132812,
"blob_id": "7d63dcbb92d7b969724a73874040a96110d102f2",
"content_id": "6706d923aeaeb4bac61cdab60ab14d948965213b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1314,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 42,
"path": "/mc_big.py",
"repo_name": "arnoldlayne0/tictactoe",
"src_encoding": "UTF-8",
"text": "import big_board\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef simulate_n_games(n):\n cols = ['draw', 'o', 'x']\n games_df = pd.DataFrame(0, columns=cols, index=range(n))\n for i in range(n):\n my_board = big_board.BigBoard(3)\n winner = my_board.play_one_random_game()\n games_df.loc[i][winner] = 1\n wins = games_df.mean()\n return wins, games_df\n\nwins, games = simulate_n_games(5)\nprint(wins)\n\ndef simulate_n_games_convergence(n):\n cols = ['draw', 'o', 'x']\n summary_df = pd.DataFrame(columns=cols, index=range(1, n+1))\n for i in range(1, n+1):\n summary_df.loc[i]['draw':'x'], _ = simulate_n_games(n)\n print(str(i) + ' done')\n summary_df.columns = ['draws', 'o wins', 'x wins']\n return summary_df\n\ndef plot_convergence(summary_df, filename):\n sns.set()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for col in summary_df.columns:\n ax.plot(summary_df.index, np.array(summary_df.loc[:, col]), label=col)\n ax.legend(loc='best')\n ax.set_ylabel('result percentage')\n ax.set_xlabel('number of games')\n ax.set_title('ultimate kolko i krzyzyk mc convergence')\n fig.savefig(filename)\n\n#summary_5 = simulate_n_games_convergence(5)\n#plot_convergence(summary_5, 'mc_5_test')"
},
{
"alpha_fraction": 0.5128142833709717,
"alphanum_fraction": 0.5184281468391418,
"avg_line_length": 30.76744270324707,
"blob_id": "d396ebec1e16fd27f5d9af7bc7baa9d1d328ae1b",
"content_id": "c74cb44a6c4536f7615cf4f5d48c9be2a7ef6f59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4097,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 129,
"path": "/board.py",
"repo_name": "arnoldlayne0/tictactoe",
"src_encoding": "UTF-8",
"text": "from copy import deepcopy\nimport random\n#import numpy as np\n#import pandas as pd\n#import matplotlib.pyplot as plt\n#import seaborn as sns\n\nclass TakenFieldError(Exception):\n pass\n\nclass Board:\n def __init__(self, size):\n self.size = size\n self.board = [['_'] * self.size for i in range(self.size)]\n rows = [[(i, j) for j in range(self.size)] for i in range(self.size)]\n cols = [[(i, j) for i in range(self.size)] for j in range(self.size)]\n diag_one = [[(i, i) for i in range(self.size)]]\n diag_two = [[(i, self.size - 1 - i) for i in range(self.size)]]\n self._all_combs = rows + cols + diag_one + diag_two\n self.moves_history = []\n\n def print_board(self):\n for row in self.board:\n print(row)\n\n def board_to_string(self):\n rows = []\n for row in self.board:\n rows.append(''.join(row))\n return '\\n'.join(rows)\n\n def __str__(self):\n return self.board_to_string()\n\n def get_board_list(self):\n return [i for sub in self.board for i in sub]\n\n def get_legal_moves(self):\n return [i + 1 for i, p in enumerate(self.get_board_list()) if p == \"_\"]\n\n def num_to_ind(self, num):\n num -= 1\n return divmod(num, self.size)\n\n def ind_to_num(self, ind):\n return ind[0] * self.size + ind[1] + 1\n\n # uzywaj tego\n def _make_move(self, num, sym):\n row, col = self.num_to_ind(num)\n if self.board[row][col] != '_':\n raise TakenFieldError\n try:\n self.board[row][col] = sym\n return self\n except (IndexError, ValueError):\n pass\n\n def make_player_move(self, sym):\n while True:\n move = raw_input()\n try:\n move = int(move)\n self._make_move(move, sym)\n self.moves_history.append(move)\n break\n #except (Exception, IndexError, ValueError):\n # print('move outside of the board, on a taken field or not a number, please make a valid move')\n except ValueError:\n print('insert a number')\n except IndexError:\n print('make a move within the board')\n except TakenFieldError:\n print('make a move on a field that is not already taken')\n\n def make_random_legal_move(self, sym):\n move = random.choice(self.get_legal_moves())\n self._make_move(move, sym)\n self.moves_history.append(move)\n\n def get_winner(self):\n for comb in self._all_combs:\n vals = {self.board[i][j] for (i, j) in comb}\n if len(vals) == 1 and (vals != {'_'}):\n return vals.pop()\n\n def board_finished(self):\n if self.get_winner() != None or len(self.get_legal_moves()) == 0:\n return True\n return False\n\n def moves_to_boards(self):\n boards = [Board(self.size) for i in range(len(self.moves_history)+1)]\n for i in range(1, len(self.moves_history)+1):\n if i % 2 == 0:\n sym = 'o'\n else:\n sym = 'x'\n m = self.moves_history[i-1]\n boards[i] = deepcopy(boards[i-1])._make_move(m, sym)\n boards = [b.board for b in boards]\n return boards\n\n def play_one_random_game(self):\n i = 0\n boards = []\n while self.board_finished() == False:\n if i % 2 == 0:\n sym = 'x'\n else:\n sym = 'o'\n self.make_random_legal_move(sym)\n boards.append(deepcopy(self.board))\n i += 1\n winner = self.get_winner() or 'draw'\n return winner, self.moves_history, boards\n\n def play_two_players(self):\n i = 0\n while self.board_finished() == False:\n if i % 2 == 0:\n sym = 'x'\n else:\n sym = 'o'\n self.make_player_move(sym)\n print(self.board_to_string())\n i += 1\n winner = self.get_winner() or 'draw'\n return winner, self.moves_history"
}
] | 3 |
rajatrawat99/UdacityFinalProject | https://github.com/rajatrawat99/UdacityFinalProject | 19693f611534fbbeaa4b1c72b37c8d2782ffaf8f | f54b19b537e82eef6bac1a71bc0dfef1c0040b08 | b7ee27f19938f17431a7ce8ac5bb534c2a218170 | refs/heads/master | 2020-09-01T04:08:37.601028 | 2019-11-30T05:16:39 | 2019-11-30T05:16:39 | 218,877,106 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7875573635101318,
"alphanum_fraction": 0.8033257126808167,
"avg_line_length": 86.19999694824219,
"blob_id": "300cc894b2102e598f4269e91066e3681e4b9bc7",
"content_id": "cc473c2ea25e1031d9fa4ce006b5742c65d8925b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3488,
"license_type": "no_license",
"max_line_length": 416,
"num_lines": 40,
"path": "/README.md",
"repo_name": "rajatrawat99/UdacityFinalProject",
"src_encoding": "UTF-8",
"text": "## Project Overview\n\nIn this project, I have deployed a simple flask application to the AWS EKS. for this project I have used instructions of Alvaro Andres from following two links:\n1. https://medium.com/@andresaaap/capstone-cloud-devops-nanodegree-4493ab439d48\n2. https://medium.com/@andresaaap/jenkins-pipeline-for-blue-green-deployment-using-aws-eks-kubernetes-docker-7e5d6a401021\n\nI have not used ansible or cloudformation for building the environment as I said I followed the the above links from Alvaro Andres which takes you to this aws link: https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html. (Also it is mentioned here: https://medium.com/@andresaaap/jenkins-pipeline-for-blue-green-deployment-using-aws-eks-kubernetes-docker-7e5d6a401021)\n\nHere I have used \"eksctl create cluster\" command, which also jenkins jx uses to create EKS cluster in background. This makes building EKS cluster really easy as in this you only have to run one command and declare all the variable in it and also it is very easy to update it.\nin my project I have kept this command in a shell script in which you send the name of the environment as command line argument. \nLike this \"./aws.sh RAJATCloud\" . below image shows the \"aws.sh\" shell script\n\n\nBelow image shows how to run the \"aws.sh\" script with env name as argument passed from command line, also it shows its output\n\n\nI have chosen the the rolling deployment here as it is easy to implement here. I have chosen 3 replicas for my rolling deployment which means in any case of update there will always be 3 pods serving my app. Old pods will only get terminated when there is newly 3 created pods to serve the application. In short there will never be downtime for my application which is ultimate aim of the Rolling update.\n\nBelow image can demostrate working of the rolling update better:\n\n\nFirst stage shows when the update has not started, you can see there are 3 pods serving. After 1st stage rolling update has started. Then till the last stage you can see pods are getting created and terminated but the total number of pods always remains 3 which results no downtime which is ultimate goal of rolling deployment. When the update is done you can see new pods have been created and there is no downtime.\n\nJenkinsfile Pipeline Stages\n1. Clone repository: Checkout git ripo\n2. Linting Dockerfile: Lint the Dockerfile with Hadolint\n3. Build Docker image: Builds the Docker image\n4. Test Docker image: Test the Docker image\n5. Push Docker image to Docker-hub: Registering with Docker-hub and then push the image to Docker-hub\n6. Update kubectl config: Update the config file so that kubectl can access it\n7. Deploy Docker image to EKS: Deploy the image to AWS EKS Cluster\n8. Clean Docker Images: Cleaning Dangling docker images and unused containers from the environment.\n\nBelow image shows the failing of the pipeline at linting stage due to error at Dockerfile\n\n\n\n\nBelow image shows all the stages has passed successfully\n\n"
},
{
"alpha_fraction": 0.695652186870575,
"alphanum_fraction": 0.7275362610816956,
"avg_line_length": 19.352941513061523,
"blob_id": "5899f41f125725aa9cc37031336fc305e8d708a6",
"content_id": "b767d1590cd7d4cdfd205a0a9fc1ce777bb26fac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 345,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 17,
"path": "/Dockerfile",
"repo_name": "rajatrawat99/UdacityFinalProject",
"src_encoding": "UTF-8",
"text": "FROM python:3.7.3-stretch\n\n#work dir\nWORKDIR /app\n\n#copy source code\nCOPY/ . flaskApp.py /app/\n\n# hadolint ignore=DL3013\nRUN pip install --upgrade pip --disable-pip-version-check &&\\\n pip install --trusted-host pypi.pyton.org -r requirements.txt\n\n# Expose port 80\nEXPOSE 80\n\n# Run flaskApp.py at container launch\nCMD [\"python\", \"flaskApp.py\"]"
},
{
"alpha_fraction": 0.7419354915618896,
"alphanum_fraction": 0.8064516186714172,
"avg_line_length": 7.857142925262451,
"blob_id": "f35c40dc8ea3f887db7f914c8d101775d6c7cee2",
"content_id": "998634db02b9102dfc88199aa7a50b00d5151909",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 62,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 7,
"path": "/requirements.txt",
"repo_name": "rajatrawat99/UdacityFinalProject",
"src_encoding": "UTF-8",
"text": "Flask\nnumpy\npandas\nscikit-learn==0.20.2\npylint\nescape\nrequest\n"
},
{
"alpha_fraction": 0.5974025726318359,
"alphanum_fraction": 0.6233766078948975,
"avg_line_length": 22.200000762939453,
"blob_id": "0c9d0732210c557c8eddd7823b01969d58d2ee1e",
"content_id": "00c081e46f21f9907ba1c1a6fdd7f16968bf05a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 231,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 10,
"path": "/flaskApp.py",
"repo_name": "rajatrawat99/UdacityFinalProject",
"src_encoding": "UTF-8",
"text": "from flask import Flask, escape, request\n\napp = Flask(__name__)\n\[email protected]('/')\ndef hello():\n name = request.args.get(\"name\", \"Udacity... This is Rajat \\n\")\n return f'Hello, {escape(name)}!'\n\napp.run(host='0.0.0.0', port=80)"
},
{
"alpha_fraction": 0.6631578803062439,
"alphanum_fraction": 0.7105262875556946,
"avg_line_length": 62.66666793823242,
"blob_id": "f213cd3a77df49404046a7b41e7be91d438312ce",
"content_id": "06765d94ce937a1b04c124cecb9a019d72fb39e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 190,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 3,
"path": "/aws.sh",
"repo_name": "rajatrawat99/UdacityFinalProject",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\neksctl create cluster --name $1 --version 1.14 --region us-west-2 --nodegroup-name standard-workers --node-type t3.medium --nodes 1 --nodes-min 1 --nodes-max 2 --managed"
}
] | 5 |
da4id/OctoPrint-MyStromSwitch | https://github.com/da4id/OctoPrint-MyStromSwitch | b9b24a3264e6477189287ed58c9a33054bd119ba | e7bf0762d39938fb81b1d2d1945336df0e96d103 | 76a9a0d9c58b5263cd8a0b25c72bafc2ded0bd0d | refs/heads/master | 2021-07-13T22:22:25.876634 | 2020-07-25T14:49:36 | 2020-07-25T14:49:36 | 222,783,171 | 4 | 6 | null | 2019-11-19T20:26:19 | 2020-08-15T20:57:45 | 2020-08-29T14:45:16 | Python | [
{
"alpha_fraction": 0.7720797657966614,
"alphanum_fraction": 0.7735042572021484,
"avg_line_length": 20.96875,
"blob_id": "ab04222e60b30c95d6fec0fc3212e5a0dd6614be",
"content_id": "5984f648cdc097b43a7d8a3091bb83fdc419c88b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 702,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 32,
"path": "/README.md",
"repo_name": "da4id/OctoPrint-MyStromSwitch",
"src_encoding": "UTF-8",
"text": "# OctoPrint MyStrom Switch Plugin\n\nThis OctoPrint plugin enables the system to control the myStrom switch and read the current Powerconsumption of your system\n\nSettings Tab\n\n\n\nMainscreen\nYou can see the Plugin on the bottom right\n\n\n\nSwitch is off\n\n\n\nSwitch is on\n\n\n\nWith toggle Button enabled\n\n\n\n\n## Setup\n\nInstall via the bundled [Plugin Manager](https://github.com/foosel/OctoPrint/wiki/Plugin:-Plugin-Manager)\nor manually using this URL:\n\n https://github.com/da4id/OctoPrint-MyStromSwitch/archive/master.zip"
},
{
"alpha_fraction": 0.5779515504837036,
"alphanum_fraction": 0.5815606713294983,
"avg_line_length": 40.3733024597168,
"blob_id": "16bff9cc319b39f4b16abc60319a56b59d42ca26",
"content_id": "52564d3d7d2d7dc98938c526089ecc5a720f08a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18287,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 442,
"path": "/octoprint_mystromswitch/__init__.py",
"repo_name": "da4id/OctoPrint-MyStromSwitch",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\nfrom __future__ import absolute_import\n\nimport octoprint.plugin\nimport requests\nimport ssl\nimport time\nfrom octoprint.events import eventManager, Events\nfrom octoprint.util import RepeatedTimer\n\n\nclass MyStromSwitchPlugin(octoprint.plugin.SettingsPlugin,\n octoprint.plugin.AssetPlugin,\n octoprint.plugin.TemplatePlugin,\n octoprint.plugin.StartupPlugin,\n octoprint.plugin.EventHandlerPlugin,\n octoprint.plugin.SimpleApiPlugin,\n octoprint.plugin.ShutdownPlugin):\n\n def __init__(self):\n self.ip = None\n self.token = \"\"\n self.intervall = 1\n self.onOffButtonEnabled = False\n self.powerOnOnStart = False\n self.powerOffOnShutdown = False\n self.powerOffDelay = 0\n self.showShutdownOctopiOption = False\n self.showPowerOffPrintFinishOption = False\n self.shutdownDelay = 60\n\n self.rememberShutdown = False\n self.lastPowerOff = False\n self.lastShutdown = False\n\n self.shutdownAfterPrintFinished = self.lastShutdown if self.rememberShutdown else False\n self.powerOffAfterPrintFinished = self.lastPowerOff if self.rememberShutdown else False\n\n self._status_timer = None\n self._abort_timer = None\n self._wait_for_timelapse_timer = None\n\n self.energy = 0\n self.lastTimeStamp = 0\n\n self.ctx = ssl.create_default_context()\n self.ctx.check_hostname = False\n self.ctx.verify_mode = ssl.CERT_NONE\n\n def initialize(self):\n self.ip = self._settings.get([\"ip\"])\n self._logger.debug(\"ip: %s\" % self.ip)\n\n self.token = self._settings.get([\"token\"])\n self._logger.debug(\"token: %s\" % self.token)\n\n self.intervall = self._settings.get_int([\"intervall\"])\n self._logger.debug(\"intervall: %s\" % self.intervall)\n\n self.onOffButtonEnabled = self._settings.get_boolean([\"onOffButtonEnabled\"])\n self._logger.debug(\"onOffButtonEnabled: %s\" % self.onOffButtonEnabled)\n\n self.powerOnOnStart = self._settings.get_boolean([\"powerOnOnStart\"])\n self._logger.debug(\"powerOnOnStart: %s\" % self.powerOnOnStart)\n\n self.powerOffOnShutdown = self._settings.get_boolean([\"powerOffOnShutdown\"])\n self._logger.debug(\"powerOffOnShutdown: %s\" % self.powerOffOnShutdown)\n\n self.powerOffDelay = self._settings.get_int([\"powerOffDelay\"])\n self._logger.debug(\"powerOffDelay: %s\" % self.powerOffDelay)\n\n self.showShutdownOctopiOption = self._settings.get_boolean([\"showShutdownOctopiOption\"])\n self._logger.debug(\"showShutdownOctopiOption: %s\" % self.showShutdownOctopiOption)\n\n self.showPowerOffPrintFinishOption = self._settings.get_boolean([\"showPowerOffPrintFinishOption\"])\n self._logger.debug(\"showPowerOffPrintFinishOption: %s\" % self.showPowerOffPrintFinishOption)\n\n self.shutdownDelay = self._settings.get_int([\"shutdownDelay\"])\n self._logger.debug(\"shutdownDelay: %s\" % self.shutdownDelay)\n\n self.rememberShutdown = self._settings.get_boolean([\"rememberShutdown\"])\n self._logger.info(\"rememberShutdown: %s\" % self.rememberShutdown)\n\n self.lastPowerOff = self._settings.get_boolean([\"lastPowerOff\"])\n self._logger.info(\"lastPowerOff: %s\" % self.lastPowerOff)\n\n self.lastShutdown = self._settings.get_boolean([\"lastShutdown\"])\n self._logger.info(\"lastShutdown: %s\" % self.lastShutdown)\n\n if self.rememberShutdown:\n self.powerOffAfterPrintFinished = self.lastPowerOff\n self.shutdownAfterPrintFinished = self.lastShutdown\n\n self._status_timer_start()\n\n def get_assets(self):\n return dict(js=[\"js/mystromswitch.js\"], css=[\"css/mystromswitch.css\"])\n\n def get_template_configs(self):\n return [dict(type=\"sidebar\",\n name=\"MyStrom Switch\",\n custom_bindings=False,\n icon=\"power-off\"),\n dict(type=\"settings\", custom_bindings=False)]\n\n def _shutdown_timer_start(self):\n if self._abort_timer is not None:\n return\n self._logger.info(\"_shutdown_timer_start\")\n\n if self._wait_for_timelapse_timer is not None:\n self._wait_for_timelapse_timer.cancel()\n\n self._logger.info(\"Starting abort shutdown timer.\")\n\n self._timeout_value = self.shutdownDelay\n self._abort_timer = RepeatedTimer(1, self._shutdown_timer_task)\n self._abort_timer.start()\n\n def _wait_for_timelapse_start(self):\n if self._wait_for_timelapse_timer is not None:\n return\n self._logger.info(\"_wait_for_timelapse_start()\")\n\n self._wait_for_timelapse_timer = RepeatedTimer(5, self._wait_for_timelapse)\n self._wait_for_timelapse_timer.start()\n\n def _wait_for_timelapse(self):\n c = len(octoprint.timelapse.get_unrendered_timelapses())\n\n if c > 0:\n self._logger.info(\"Waiting for %s timelapse(s) to finish rendering before starting shutdown timer...\" % c)\n else:\n self._shutdown_timer_start()\n\n def _shutdown_timer_task(self):\n if self._timeout_value is None:\n return\n\n self._timeout_value -= 1\n if self._timeout_value <= 0:\n if self._wait_for_timelapse_timer is not None:\n self._wait_for_timelapse_timer.cancel()\n self._wait_for_timelapse_timer = None\n if self._abort_timer is not None:\n self._abort_timer.cancel()\n self._abort_timer = None\n if self.shutdownAfterPrintFinished and self.showShutdownOctopiOption:\n self._shutdown_system()\n elif self.powerOffAfterPrintFinished and self.showPowerOffPrintFinishOption:\n self._logger.info(\"only Shutdown Relais\")\n self._setRelaisState(False)\n\n def _status_timer_start(self):\n if self._status_timer is not None:\n self._status_timer.cancel()\n self._logger.info(\"Canceling Timer\")\n\n if self.intervall >= 1 and self.ip is not None:\n self._logger.info(\"Starting timer\")\n self._status_timer = RepeatedTimer(self.intervall, self._status_timer_task)\n self._status_timer.start()\n\n def _shutdown_system(self):\n self._logger.info(\"Shutdown Relais and System\")\n self._powerCycleRelais(False, self.powerOffDelay)\n shutdown_command = self._settings.global_get([\"server\", \"commands\", \"systemShutdownCommand\"])\n self._logger.info(\"Shutting down system with command: {command}\".format(command=shutdown_command))\n try:\n import sarge\n p = sarge.run(shutdown_command, async_=True)\n except Exception as e:\n self._logger.exception(\"Error when shutting down: {error}\".format(error=e))\n return\n\n def _status_timer_task(self):\n if self.ip is not None:\n try:\n try:\n request = requests.get(\n 'http://{}/report'.format(self.ip), headers={\"Token\": self.token}, timeout=1)\n if request.status_code == 200:\n timestamp = time.time()\n data = request.json()\n if not self.lastTimeStamp == 0:\n intervall = timestamp - self.lastTimeStamp\n # Energy in Wh\n self.energy = self.energy + (intervall * data[\"power\"] / 3600)\n self._logger.debug(\n \"Energy: \" + str(self.energy) + \" interval: \" + str(intervall) + \" power: \" + str(\n data[\"power\"]))\n self.lastTimeStamp = timestamp\n data[\"energy\"] = self.energy\n data[\"onOffButtonEnabled\"] = self.onOffButtonEnabled\n data[\"showShutdownOctopiOption\"] = self.showShutdownOctopiOption\n data[\"showPowerOffPrintFinishOption\"] = self.showPowerOffPrintFinishOption\n data[\"automaticShutdownEnabled\"] = self.shutdownAfterPrintFinished\n data[\"automaticPowerOffEnabled\"] = self.powerOffAfterPrintFinished\n self._plugin_manager.send_plugin_message(self._identifier, data)\n return\n except (requests.exceptions.ConnectionError, ValueError) as e:\n self._logger.exception(e)\n except Exception as exp:\n self._logger.exception(exp)\n else:\n self._logger.info(\"Ip is None\")\n data = {\"relay\": True, \"energy\": 0, \"onOffButtonEnabled\": False, \"showShutdownOctopiOption\": False,\n \"showPowerOffPrintFinishOption\": False, \"automaticShutdownEnabled\": self.shutdownAfterPrintFinished,\n \"automaticPowerOffEnabled\": self.powerOffAfterPrintFinished}\n self._plugin_manager.send_plugin_message(self._identifier, data)\n\n def _setRelaisState(self, newState):\n nbRetry = 0\n value = '0'\n if newState:\n value = '1'\n while nbRetry < 3:\n try:\n headers = {}\n if self.token is not None and self.token != \"\":\n headers = {\"Token\": self.token}\n request = requests.get(\n 'http://{}/relay'.format(self.ip), params={'state': value}, headers=headers, timeout=1)\n if request.status_code == 200:\n return\n else:\n self._logger.info(\n \"Could not set new Relais State, Http Status Code: {}\".format(request.status_code))\n except requests.exceptions.ConnectionError:\n self._logger.info(\"Error during set Relais state\")\n nbRetry = nbRetry + 1\n\n # Sets the switch to a specific inverse newState,\n # waits for a specified amount of time (max 3h),\n # then sets the the switch to the newState.\n def _powerCycleRelais(self, newState, time):\n nbRetry = 0\n value = 'on'\n if newState:\n value = 'off'\n while nbRetry < 3:\n try:\n try:\n self._logger.info(\"try to send Powercycle Request\")\n self._logger.info('http://{}/timer'.format(self.ip))\n request = requests.post(\n 'http://{}/timer'.format(self.ip), params={'mode': value, 'time': time}, headers={\"Token\": self.token},\n timeout=1)\n if request.status_code == 200:\n return\n else:\n self._logger.info(\n \"Could not powerCycle Relais, Http Status Code: {}\".format(request.status_code))\n except requests.exceptions.ConnectionError as e:\n self._logger.exception(e)\n self._logger.info(\"Error during powerCycle Relais: \" + str(e.message))\n except Exception as exp:\n self._logger.exception(exp)\n nbRetry = nbRetry + 1\n\n def _toggleRelay(self):\n nbRetry = 0\n while nbRetry < 3:\n try:\n request = requests.get(\n 'http://{}/toggle'.format(self.ip), headers={\"Token\": self.token}, timeout=1)\n if request.status_code == 200:\n return\n else:\n self._logger.info(\"Could not toggle Relay State, Http Status Code: {}\".format(request.status_code))\n except requests.exceptions.ConnectionError:\n self._logger.info(\"Error during toggle Relais state\")\n nbRetry = nbRetry + 1\n\n def on_api_command(self, command, data):\n if command == \"enableRelais\":\n self._logger.info(\"enableRelais\")\n self._setRelaisState(True)\n elif command == \"disableRelais\":\n self._logger.info(\"disableRelais\")\n self._setRelaisState(False)\n elif command == \"toggleRelais\":\n self._logger.info(\"toggleRelais\")\n self._toggleRelay()\n elif command == \"enableShutdownAfterFinish\":\n self._logger.info(\"enableShutdownAfterFinish\")\n self.shutdownAfterPrintFinished = True\n if self.rememberShutdown:\n self.lastShutdown = self.shutdownAfterPrintFinished\n self._settings.set_boolean([\"lastShutdown\"], self.lastShutdown)\n self._settings.save()\n elif command == \"disableShutdownAfterFinish\":\n self._logger.info(\"disableShutdownAfterFinish\")\n self.shutdownAfterPrintFinished = False\n if self.rememberShutdown:\n self.lastShutdown = self.shutdownAfterPrintFinished\n self._settings.set_boolean([\"lastShutdown\"], self.lastShutdown)\n self._settings.save()\n elif command == \"enablePowerOffAfterFinish\":\n self._logger.info(\"enablePowerOffAfterFinish\")\n self.powerOffAfterPrintFinished = True\n if self.rememberShutdown:\n self.lastPowerOff = self.powerOffAfterPrintFinished\n self._settings.set_boolean([\"lastPowerOff\"], self.lastPowerOff)\n self._settings.save()\n elif command == \"disablePowerOffAfterFinish\":\n self._logger.info(\"disablePowerOffAfterFinish\")\n self.powerOffAfterPrintFinished = False\n if self.rememberShutdown:\n self.lastPowerOff = self.powerOffAfterPrintFinished\n self._settings.set_boolean([\"lastPowerOff\"], self.lastPowerOff)\n self._settings.save()\n\n def get_api_commands(self):\n return dict(\n enableRelais=[],\n disableRelais=[],\n toggleRelais=[],\n disableShutdownAfterFinish=[],\n enableShutdownAfterFinish=[],\n disablePowerOffAfterFinish=[],\n enablePowerOffAfterFinish=[]\n )\n\n def on_after_startup(self):\n if self.powerOnOnStart:\n self._logger.info(\"Turn on Relais on Start\")\n self._setRelaisState(True)\n\n def on_shutdown(self):\n self._logger.info(\"on_shutdown_event\")\n if self.powerOffOnShutdown:\n if self.powerOffDelay <= 0:\n self._logger.info(\"Turn off Relais on Shutdown\")\n self._setRelaisState(False)\n else:\n self._logger.info(\"Turn off Relais on Shutdown Delayed\")\n self._powerCycleRelais(False, self.powerOffDelay)\n\n def on_settings_migrate(self, target, current):\n if target > current:\n if current <= 1:\n self.onOffButtonEnabled = False\n if current <= 2:\n self.powerOnOnStart = False,\n self.powerOffOnShutdown = False,\n self.powerOffDelay = 0\n if current <= 3:\n self.showShutdownOctopiOption = False\n self.showPowerOffPrintFinishOption = False\n self.shutdownDelay = 60\n if current <= 4:\n self.lastShutdown = False\n self.lastPowerOff = False\n self.rememberShutdown = False\n if current <= 5:\n self.token = \"\"\n\n def get_settings_version(self):\n return 6\n\n def get_settings_defaults(self):\n return dict(\n ip=None,\n token=\"\",\n intervall=1,\n onOffButtonEnabled=False,\n powerOnOnStart=False,\n powerOffOnShutdown=False,\n powerOffDelay=0,\n showShutdownOctopiOption=False,\n showPowerOffPrintFinishOption=False,\n shutdownDelay=60,\n lastShutdown=False,\n lastPowerOff=False,\n rememberShutdown=False\n )\n\n def get_settings_restricted_paths(self):\n return dict(admin=[\n ['ip', 'token']\n ])\n\n def on_settings_save(self, data):\n self._logger.info(\"on_settings_save\")\n octoprint.plugin.SettingsPlugin.on_settings_save(self, data)\n self.initialize()\n\n def on_event(self, event, payload):\n if not self.shutdownAfterPrintFinished and not self.powerOffAfterPrintFinished:\n return\n\n if not self._settings.global_get([\"server\", \"commands\", \"systemShutdownCommand\"]):\n self._logger.warning(\"systemShutdownCommand is not defined. Aborting shutdown...\")\n return\n\n if event not in [Events.PRINT_DONE, Events.PRINT_FAILED]:\n return\n\n if event == Events.PRINT_FAILED and not self._printer.is_closed_or_error():\n # Cancelled job\n return\n\n if event in [Events.PRINT_DONE, Events.PRINT_FAILED]:\n webcam_config = self._settings.global_get([\"webcam\", \"timelapse\"], merged=True)\n timelapse_type = webcam_config[\"type\"]\n if (timelapse_type is not None and timelapse_type != \"off\"):\n self._wait_for_timelapse_start()\n else:\n self._shutdown_timer_start()\n return\n\n def get_update_information(self):\n return dict(\n mystromswitch=dict(\n displayName=\"OctoPrint-MyStromSwitch\",\n displayVersion=self._plugin_version,\n\n # version check: github repository\n type=\"github_release\",\n user=\"da4id\",\n repo=\"OctoPrint-MyStromSwitch\",\n current=self._plugin_version,\n\n # update method: pip w/ dependency links\n pip=\"https://github.com/da4id/OctoPrint-MyStromSwitch/archive/{target_version}.zip\"\n )\n )\n\n\n__plugin_name__ = \"MyStrom Switch\"\n__plugin_pythoncompat__ = \">=2.7,<4\"\n\n\ndef __plugin_load__():\n global __plugin_implementation__\n __plugin_implementation__ = MyStromSwitchPlugin()\n\n global __plugin_hooks__\n __plugin_hooks__ = {\n \"octoprint.plugin.softwareupdate.check_config\": __plugin_implementation__.get_update_information\n }\n"
},
{
"alpha_fraction": 0.5919661521911621,
"alphanum_fraction": 0.5943446159362793,
"avg_line_length": 38.41666793823242,
"blob_id": "e2266ef1357afb48dc5ecd426937a5784c8ae330",
"content_id": "1009d2693e5101eb1d52175dc3bfa990830af793",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3784,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 96,
"path": "/octoprint_mystromswitch/static/js/mystromswitch.js",
"repo_name": "da4id/OctoPrint-MyStromSwitch",
"src_encoding": "UTF-8",
"text": "$(function() {\n function mystromswitchViewModel(parameters) {\n var self = this;\n\n self.loginState = parameters[0];\n self.settings = parameters[1];\n self.printer = parameters[2];\n\n self.onOffButtonEnabled = ko.observable();\n self.showShutdownOctopiOption = ko.observable();\n self.showPowerOffPrintFinishOption = ko.observable();\n self.automaticPowerOffEnabled = ko.observable();\n self.automaticShutdownEnabled = ko.observable();\n self.mystromswitchPowerValue = document.getElementById(\"mystromswitchPowerValue\")\n self.mystromswitchEnergyValue = document.getElementById(\"mystromswitchEnergyValue\")\n\n self.onToggleRelayEvent = function(){\n $.ajax({\n url: API_BASEURL + \"plugin/mystromswitch\",\n type: \"POST\",\n dataType: \"json\",\n data: JSON.stringify({\n command: \"toggleRelais\",\n }),\n contentType: \"application/json; charset=UTF-8\"\n })\n }\n\n //self.onmystromswitchEvent = function() {\n\n //}\n\n //self.onOffButtonEnabled.subscribe(self.onmystromswitchEvent, self);\n\n self.onAutomaticShutdownEnabledChanged = function(){\n var cmd = \"disableShutdownAfterFinish\";\n if (self.automaticShutdownEnabled()) {\n var cmd = \"enableShutdownAfterFinish\";\n }\n $.ajax({\n url: API_BASEURL + \"plugin/mystromswitch\",\n type: \"POST\",\n dataType: \"json\",\n data: JSON.stringify({\n command: cmd\n }),\n contentType: \"application/json; charset=UTF-8\"\n })\n }\n\n self.onAutomaticPowerOffEnabledChanged = function(){\n var cmd = \"disablePowerOffAfterFinish\";\n if (self.automaticPowerOffEnabled()) {\n var cmd = \"enablePowerOffAfterFinish\";\n }\n $.ajax({\n url: API_BASEURL + \"plugin/mystromswitch\",\n type: \"POST\",\n dataType: \"json\",\n data: JSON.stringify({\n command: cmd\n }),\n contentType: \"application/json; charset=UTF-8\"\n })\n }\n\n self.automaticShutdownEnabled.subscribe(self.onAutomaticShutdownEnabledChanged, self);\n self.automaticPowerOffEnabled.subscribe(self.onAutomaticPowerOffEnabledChanged, self);\n\n self.onDataUpdaterPluginMessage = function(plugin, data) {\n if (plugin != \"mystromswitch\" && plugin != \"octoprint_mystromswitch\") {\n return;\n }\n\t\t\tself.onOffButtonEnabled(data.onOffButtonEnabled);\n\t\t\tself.showShutdownOctopiOption(data.showShutdownOctopiOption);\n\t\t\tself.showPowerOffPrintFinishOption(data.showPowerOffPrintFinishOption);\n\t\t\tself.mystromswitchEnergyValue.innerHTML = \"Energy: \"+data.energy.toFixed(1)+\"Wh\"\n\t\t\tif(data.relay == false){\n\t\t\t self.mystromswitchPowerValue.innerHTML = \"Relay is off\";\n\t\t\t} else if (data.power != null) {\n self.mystromswitchPowerValue.innerHTML = \"Power Consumption \"+data.power.toFixed(1)+\"W\";\n }else{\n self.mystromswitchPowerValue.innerHTML = \"myStrom switch not reachable\"\n self.mystromswitchEnergyValue.innerHTML = \"Check url in Plugin Settings\"\n }\n self.automaticShutdownEnabled(data.automaticShutdownEnabled);\n self.automaticPowerOffEnabled(data.automaticPowerOffEnabled);\n }\n }\n\n OCTOPRINT_VIEWMODELS.push([\n mystromswitchViewModel,\n [\"loginStateViewModel\", \"settingsViewModel\", \"printerStateViewModel\"],\n\t\t$(\".sidebar_plugin_mystromswitch\").get(0)\n ]);\n});\n"
}
] | 3 |
madcowjay/ENGN1931Z_Lab2 | https://github.com/madcowjay/ENGN1931Z_Lab2 | 0685eccd23f48b296669e035494f2da7c8bf9198 | 89bb219334c07f8a7c49e51c08191c287391cb36 | 12475640d4c9eaecbdd7e959841c10c2e3066065 | refs/heads/master | 2021-01-24T02:40:01.301522 | 2018-02-25T17:16:11 | 2018-02-25T17:16:11 | 122,857,244 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7023411393165588,
"alphanum_fraction": 0.7658863067626953,
"avg_line_length": 20.285715103149414,
"blob_id": "fbceec43eeb53fae66a7b8481232a9ce59c35994",
"content_id": "b55e6fd2957fa23528c169134a0b3a749611bc71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 299,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 14,
"path": "/camera.py",
"repo_name": "madcowjay/ENGN1931Z_Lab2",
"src_encoding": "UTF-8",
"text": "import picamera\nimport time\n\ncamera = picamera.PiCamera()\ncamera.rotation = 180\ncamera.resolution = (1920, 1080)\ncamera.framerate = 15\ncamera.start_preview()\ntime.sleep(2)\ncamera.start_recording('video.h264')\ntime.sleep(10)\ncamera.stop_recording()\ncamera.capture('image.jpg')\ncamera.stop_preview()\n\n"
}
] | 1 |
santimacnet/MLOps-AzureDevops | https://github.com/santimacnet/MLOps-AzureDevops | 14b4fc694923f8612d28e3005dbd367195f45ce1 | e332831ecac53470f2dcd8dff88cf1576f3a8c99 | cd112a75465bef2906a797260c18d247649bdaa2 | refs/heads/main | 2023-01-27T11:53:12.056290 | 2020-12-13T19:38:18 | 2020-12-13T19:38:18 | 321,141,080 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7619783878326416,
"alphanum_fraction": 0.7619783878326416,
"avg_line_length": 30.450000762939453,
"blob_id": "9c8421cc57afa2a2a42dcf9589195b27e2dc66ec",
"content_id": "b2e7ea17f31e9e436d93d5355829110b0e545738",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 647,
"license_type": "permissive",
"max_line_length": 51,
"num_lines": 20,
"path": "/set-environment-vars.sh",
"repo_name": "santimacnet/MLOps-AzureDevops",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\r\n# Cluster Management Environment Variables\r\nexport DATABRICKS_DOMAIN=\"\"\r\nexport DATABRICKS_ACCESS_TOKEN=\"\"\r\nexport DATABRICKS_CLUSTER_NAME_SUFFIX=\"mySuffix\"\r\nexport DATABRICKS_CLUSTER_ID=\"\"\r\n\r\n# Train Environment Variables\r\nexport AML_WORKSPACE_NAME=\"MLOpsOSS-AML-WS\"\r\nexport RESOURCE_GROUP=\"MLOpsOSS-AML-RG\"\r\nexport SUBSCRIPTION_ID=\"\"\r\nexport TENANT_ID=\"\"\r\nexport SP_APP_ID=\"\"\r\nexport SP_APP_SECRET=\"\"\r\nexport SOURCES_DIR=\"\"\r\nexport TRAIN_SCRIPT_PATH=\"src/train/train.py\"\r\nexport DATABRICKS_WORKSPACE_NAME=\"MLOpsOSS-AML-ADB\"\r\nexport DATABRICKS_COMPUTE_NAME_AML=\"ADB-Compute\"\r\nexport MODEL_DIR=\"/dbfs/model\"\r\nexport MODEL_NAME=\"mymodel\""
},
{
"alpha_fraction": 0.5337423086166382,
"alphanum_fraction": 0.6441717743873596,
"avg_line_length": 16.11111068725586,
"blob_id": "aefc6ec29c5ca449aa7d41a6aa54a4fd256ce730",
"content_id": "0625dcd6e56b0f5c72391689285893bf1b5ea91f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 163,
"license_type": "permissive",
"max_line_length": 26,
"num_lines": 9,
"path": "/requirements.txt",
"repo_name": "santimacnet/MLOps-AzureDevops",
"src_encoding": "UTF-8",
"text": "# local package\r\n-e .\r\n\r\n# external requirements\r\nrequests>=2.22 #.0\r\nazureml>=0.2 #.7\r\nazureml-core>=1.0 #.45\r\nazureml-pipeline>=1.0 #.45\r\ndatabricks-cli>=0.8.7\r\n"
},
{
"alpha_fraction": 0.693379819393158,
"alphanum_fraction": 0.693379819393158,
"avg_line_length": 15.9375,
"blob_id": "a005e69434fd41f4b38c6a738c09e1551bfaa8e8",
"content_id": "fa2facea263ddfacd83053eeba08735a93f4a00e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 287,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 16,
"path": "/src/score/consume_score.py",
"repo_name": "santimacnet/MLOps-AzureDevops",
"src_encoding": "UTF-8",
"text": "import json\r\nimport argparse\r\nimport score\r\n\r\nPARSER = argparse.ArgumentParser()\r\nPARSER.add_argument('--DATA_FILE')\r\nARGS = PARSER.parse_args()\r\n\r\nwith open(ARGS.DATA_FILE) as json_file:\r\n MYDATA = json.load(json_file)\r\n\r\nscore.init()\r\n\r\nRESULT = score.run(MYDATA)\r\n\r\nprint(RESULT)\r\n"
},
{
"alpha_fraction": 0.7716181874275208,
"alphanum_fraction": 0.772203803062439,
"avg_line_length": 36.223880767822266,
"blob_id": "321230d3b24b6813b120039b173133c1f1539a85",
"content_id": "4ec2c94630a8f64000215e86c546274f557df2dd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5123,
"license_type": "permissive",
"max_line_length": 218,
"num_lines": 134,
"path": "/README.md",
"repo_name": "santimacnet/MLOps-AzureDevops",
"src_encoding": "UTF-8",
"text": "# MLOps with Azure DevOps\r\n\r\n[](https://dev.azure.com/aidemos/MLOps/_build/latest?definitionId=101&branchName=master)\r\n\r\nSet up and operationalize an MLOps flow leveraging Azure Machine Learning Python SDK, Azure Databricks, and Azure DevOps.\r\n\r\nThis sample shows you how to operationalize your Machine Learning development\r\ncycle with **Azure Machine Learning Service** and **Azure Databricks** - as a\r\ncompute target - by **leveraging Azure DevOps Pipelines** as\r\nthe orchestrator for the whole flow.\r\n\r\nBy running this project, you will have the opportunity to work with Azure\r\nworkloads, such as:\r\n\r\n|Technology|Objective/Reason|\r\n|----------|----------------|\r\n|Azure DevOps|The platform to help you implement DevOps practices on your scenario|\r\n|Azure Machine Learning|Manage Machine Learning models with the power of Azure|\r\n|Azure Databricks|Use its compute power as a Remote Compute for training models|\r\n|Azure Container Instance|Deploy Machine Learning models as Docker containers|\r\n|Languages|python|\r\n\r\n\r\n## Preparing the environment\r\n\r\n### Infrastructure/Cloud Infrastructure\r\n\r\nThis repository contains the base structure for you to start developing your\r\nMachine Learning project using:\r\n\r\n* Azure Machine Learning Service\r\n* Azure Databricks\r\n* Azure Container Instance\r\n\r\nTo have all the resources set, leverage the following resource to get your\r\ninfrastructure ready:\r\n\r\n- [Setting up your cloud infrastructure](docs/setup-cloud-infrastructure.md)\r\n\r\n### Azure DevOps\r\n\r\nAfter you have your infrastructure set, it's time to have your Azure DevOps\r\nconnected to it to start orchestrating your Machine Learning pipeline.\r\n\r\n> If you don't have an Azure DevOps account, please refer to\r\n> [this doc](https://docs.microsoft.com/en-us/azure/devops/user-guide/sign-up-invite-teammates?view=azure-devops)\r\n> to have it set up.\r\n\r\nYou will find resources and docs to have Azure DevOps orchestrating your\r\npipeline by following this guidance:\r\n\r\n- [Setting up the training pipeline](docs/setup-training-pipeline.md)\r\n\r\n## Sample project\r\n\r\nThis code sample reproduces the **Image Classification**, a *convolutional neural\r\nnetwork image classification*. For more details of the project structure,\r\ncheck the [project structure](docs/project-structure.md) page.\r\n\r\nThis project structure was also based on the\r\n[cookiecutter data science project template](https://drivendata.github.io/cookiecutter-data-science/).\r\n\r\n### Running this project on your local development environment\r\n\r\nThis sample provides you two options to have your environment set up for developing\r\nand debugging locally. The focus of these docs is on how to have all the\r\nrequired environment variables set so you can invoke\r\nand debug this code on your machine.\r\n\r\n* See [this page](docs/vscode-launch-json.md) for details on how to debug\r\nusing **Visual Studio Code**\r\n* See [this page](docs/bash-environment-variables.md) for looking into details on how\r\nto have the enrironment variables set using **Bash**\r\n\r\n## Project flow\r\n\r\n### Starting point/Current state\r\n\r\nA Machine Learning project being developed by a team of Data Engineers/Data\r\nAnalysts, using Python.\r\n\r\nThe team develops the code to train the Machine Learning model and they need\r\nto orchestrate the way this code gets **tested, trained, packaged\r\nand deployed**.\r\n\r\n### Testing\r\n\r\nTesting the code that generages a model is crucial to the success and accuracy\r\nof the model being developed.\r\n\r\nThe code being developed will produce a Machine Learning model that will help\r\npeople to take decisions, when not being the main responsible for the\r\ndecisions itself.\r\n\r\nThat's why testing the units of the code to make sure it meets the requirements\r\nis a fundamental piece of the development cycle.\r\n\r\nYou will achieve it using the following capabilities:\r\n\r\n- Python Unit Testing frameworks\r\n- Azure DevOps\r\n\r\n### Training\r\n\r\nThis project is all about generating a Machine Learning model, which needs\r\nto be trained. Training a model requires compute power and orchestration.\r\n\r\nCompute power is commonly an expensive asset and that's why this project\r\nleverages cloud workloads to optimize resource consumption and avoiding upfront\r\ncosts.\r\n\r\nTo enable this, the following capabilities will be used:\r\n\r\n- Machine Learning Python SDKs\r\n- Azure Databricks\r\n- Azure DevOps\r\n\r\n### Deploying\r\n\r\nThe resulting model from the training step needs to be deployed somewhere\r\nso the edge can consume it. There are a few ways to achieve it and,\r\nfor this scenario, you will deploy this model as part of a Docker Container.\r\n\r\nA container has the power of having all the dependencies the application needs\r\nto run encapsulated within it. It is also easily portable to multiple different\r\nplatforms.\r\n\r\nTo take advantage of deploying the model to a container, you will use:\r\n\r\n- Azure DevOps\r\n- Azure Container Instances\r\n- Azure Machine Learning Service\r\n\r\nSee [this page](docs/release-pipeline.md) for details on setting release pipeline to deploy model \r\n"
}
] | 4 |
RichardFreedman/CRIM_Intervals_Notebooks | https://github.com/RichardFreedman/CRIM_Intervals_Notebooks | e0ddefdb0270f5ab8203453a8fcb205c3279b42b | a50e1d4451096d373b180cce4d1fe8b179a74bb2 | c9cdcd45f344bb1526715d420976ec443bee25ba | refs/heads/main | 2023-06-22T10:24:21.844595 | 2021-07-07T15:16:43 | 2021-07-07T15:16:43 | 373,631,010 | 0 | 2 | null | null | null | null | null | [
{
"alpha_fraction": 0.6107637286186218,
"alphanum_fraction": 0.6204437017440796,
"avg_line_length": 44.169456481933594,
"blob_id": "4e57c1927847899df730b5d04700066069d3328b",
"content_id": "f6d90d43e30653e0bdee9661308b6e0053b8ffbf",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21591,
"license_type": "permissive",
"max_line_length": 270,
"num_lines": 478,
"path": "/binder/main.py",
"repo_name": "RichardFreedman/CRIM_Intervals_Notebooks",
"src_encoding": "UTF-8",
"text": "from main_objs import *\n\n# Potential redesign needed due to unstable nature of having user give over patterns_data\n# Potential fix is reincorporating into_patterns back into this method\ndef find_exact_matches(patterns_data, min_matches=5):\n \"\"\"Takes in a series of vector patterns with data attached and finds exact matches\n\n Parameters\n ----------\n patterns_data : return value from into_patterns\n MUST be return value from into_patterns\n min_matches : int, optional\n Minimum number of matches needed to be deemed relevant, defaults to 5\n\n Returns\n -------\n all_matches_list : list\n A list of PatternMatches objects\n \"\"\"\n # A series of arrays are needed to keep track of various data associated with each pattern\n print(\"Finding exact matches...\")\n patterns_nodup, patterns = [], []\n p = 0\n for pattern in patterns_data:\n patterns.append(pattern[0])\n if pattern[0] not in patterns_nodup:\n patterns_nodup.append(pattern[0])\n m = 0\n # Go through each individual pattern and count up its occurences\n all_matches_list = []\n for p in patterns_nodup:\n amt = patterns.count(p)\n # If a pattern occurs more than the designated threshold, we add it to our list of matches\n if amt > min_matches:\n matches_list = PatternMatches(p, [])\n m += 1\n for a in patterns_data:\n if p == a[0]:\n exact_match = Match(p, a[1], a[2], a[3])\n matches_list.matches.append(exact_match)\n all_matches_list.append(matches_list)\n print(str(len(all_matches_list)) + \" melodic intervals had more than \" + str(min_matches) + \" exact matches.\\n\")\n # all_matches_list has a nested structure- it contains a list of PatternMatches objects, which contain a list of individual Match objects\n return all_matches_list\n\n# Finds matches based on a cumulative distance difference between two patterns\ndef find_close_matches(patterns_data, min_matches, threshold):\n \"\"\"Takes in a series of vector patterns with data attached and finds close matches\n\n Parameters\n ----------\n patterns_data : return value from into_patterns\n MUST be return value from into_patterns\n min_matches : int, optional\n Minimum number of matches needed to be deemed relevant, defaults to 5\n threshold : int\n Cumulative variance allowed between vector patterns before they are deemed not similar\n\n Returns\n -------\n all_matches_list : list\n A list of PatternMatches objects\n \"\"\"\n # A series of arrays are needed to keep track of various data associated with each pattern\n print(\"Finding close matches...\")\n patterns_nodup = []\n for pat in patterns_data:\n # Build up a list of patterns without duplicates\n if pat[0] not in patterns_nodup:\n patterns_nodup.append(pat[0])\n # Go through each individual pattern and count up its occurences\n all_matches_list = []\n for p in patterns_nodup:\n matches_list = PatternMatches(p, [])\n # If a pattern occurs more than the designated threshold\n for a in patterns_data:\n rhytmic_match = 0\n # Calculate the \"difference\" by comparing each vector with the matching one in the other pattern\n for v in range(len(a[0])):\n rhytmic_match += abs(p[v] - a[0][v])\n if rhytmic_match <= threshold:\n close_match = Match(a[0], a[1], a[2], a[3])\n matches_list.matches.append(close_match)\n if len(matches_list.matches) > min_matches:\n all_matches_list.append(matches_list)\n print(str(len(all_matches_list)) + \" melodic intervals had more than \" + str(min_matches) + \" exact or close matches.\\n\")\n return all_matches_list\n\n# Allows for the addition of non-moving-window pattern searching approaches\n# Needs to be called before any matches can be made\ndef into_patterns(vectors_list, interval):\n \"\"\"Takes in a series of vector patterns with data attached and finds close matches\n\n Parameters\n ----------\n vectors_list : list of vectorized lists\n MUST be a list from calling generic_intervals or semitone_intervals on a VectorInterval object\n interval : int\n size of interval to be analyzed\n\n Returns\n -------\n patterns_data : list of tuples\n A list of vector patterns with additional information about notes attached\n \"\"\"\n pattern, patterns_data = [], []\n for vectors in vectors_list:\n for i in range(len(vectors)-interval):\n pattern = []\n durations = []\n valid_pattern = True\n durations.append(vectors[i].note1.duration)\n for num_notes in range(interval):\n if vectors[i+num_notes].vector == 'Rest':\n valid_pattern = False\n pattern.append(vectors[i+num_notes].vector)\n durations.append(vectors[i+num_notes].note2.duration)\n if valid_pattern:\n # Here, with help from vectorize() you can jam in whatever more data you would like about the note\n patterns_data.append((pattern, vectors[i].note1, vectors[i+num_notes].note2, durations))\n return patterns_data\n\ndef into_patterns_pd(df:list, interval_size):\n \"\"\"Takes in a series of vector patterns with data attached and finds close matches\n\n Parameters\n ----------\n vectors_list : list of vectorized lists\n MUST be a list from calling generic_intervals or semitone_intervals on a VectorInterval object\n interval : int\n size of interval to be analyzed\n\n Returns\n -------\n patterns_data : list of tuples\n A list of vector patterns with additional information about notes attached\n \"\"\"\n dflist = df.values.tolist()\n vectors_list = []\n for i in range(len(dflist[0])):\n for j in range(len(dflist)):\n vectors_list.append(dflist[j][i])\n vectors_list.append(float('nan'));\n pattern, patterns_data = [], []\n for h in range(len(vectors_list)-interval_size):\n pattern = []\n valid_pattern = True\n for num_notes in range(interval_size):\n if pd.isna(vectors_list[h+num_notes]):\n valid_pattern = False\n pattern.append(vectors_list[h+num_notes])\n if valid_pattern:\n # Here, with help from vectorize() you can jam in whatever more data you would like about the note\n patterns_data.append((pattern, vectors_list[i], vectors_list[i+num_notes]))\n return patterns_data\n\n# sample usage\n# a = into_patterns_pd(melodic, 5)\n# a.sort()\n\n# Helper for sort_matches\ndef sortFunc(pattern):\n \"\"\"Helper function for sort_matches\n \"\"\"\n return len(pattern.matches)\n\n# Sorting based on the amount of matches each pattern has\ndef sort_matches(matches_list):\n \"\"\"Sorts and returns a list of PatternMatch objects, ordering by size\n \"\"\"\n matches_list.sort(reverse=True, key=sortFunc)\n return matches_list\n\n# Generates a score from 0-1 based on how many patterns within a piece can be found in the other\ndef similarity_score(notes1, notes2):\n \"\"\"Returns a score from 0-1 of the similarity between two note lists\n\n Parameters\n ----------\n notes1 : list of NoteListElement objects\n a note list from the CorpusBase or ScoreBase methods\n notes2 : list of NoteListElement objects\n a note list from the CorpusBase or ScoreBase methods\n\n Returns\n -------\n final_score : int\n a score of similarity from 0-1\n \"\"\"\n vectors1 = IntervalBase(notes1).generic_intervals\n vectors2 = IntervalBase(notes2).generic_intervals\n interval = 3\n scores = []\n while interval <= 6:\n # For each piece create a list of all patterns and then a list of unique patterns to compare against it\n pattern, patterns1, patterns_nodup1, patterns2, patterns_nodup2 = [], [], [], [], []\n\n for i in range(len(vectors1)-interval):\n pattern = []\n valid_pattern = True\n for num_notes in range(interval):\n if vectors1[i+num_notes].vector == 'Rest':\n valid_pattern = False\n pattern.append(vectors1[i+num_notes].vector)\n if valid_pattern:\n patterns1.append(pattern)\n\n for j in range(len(vectors2)-interval):\n pattern = []\n valid_pattern = True\n for num_notes in range(interval):\n if vectors2[j+num_notes].vector == 'Rest':\n valid_pattern = False\n pattern.append(vectors2[j+num_notes].vector)\n if valid_pattern:\n patterns2.append(pattern)\n\n for pat in patterns1:\n if pat not in patterns_nodup1:\n patterns_nodup1.append(pat)\n for pat2 in patterns2:\n if pat2 not in patterns_nodup2:\n patterns_nodup2.append(pat2)\n\n # With lists assembled we can do an easy comparison\n score = 0\n for a in patterns_nodup1:\n if patterns2.count(a) > 3:\n score += 1\n if patterns2.count(a) > 0:\n score += 1\n else:\n for b in patterns2:\n diff = 0\n for c in range(interval):\n diff += abs(a[c] - b[c])\n if diff == 1 or diff == 2:\n #score += 0.5\n break\n for d in patterns_nodup2:\n if patterns1.count(d) > 3:\n score += 1\n if patterns1.count(d) > 0:\n score += 1\n else:\n for e in patterns1:\n diff = 0\n for f in range(interval):\n diff += abs(d[f] - e[f])\n if diff == 1 or diff == 2:\n #score += 0.5\n break\n interval += 1\n scores.append(score / (len(patterns_nodup2) + len(patterns_nodup1)))\n final_score = (scores[0] + scores[1] + scores[2] + scores[3]) / 4\n return final_score\n\n# Find all occurences of a specified pattern within a corpus\ndef find_motif(pieces: CorpusBase, motif: list, generic: bool = True):\n \"\"\"Prints out all occurences of a specified motif\n\n Parameters\n ----------\n pieces : CorpusBase\n a CorpusBase object with all scores to be searched\n motif : list\n the motif in vectors (e.g. [-2,-2,2,-2,2])\n generic : bool, optional\n True to use generic vectors, False for semitone vectors- default is generic\n \"\"\"\n # Assemble into patterns\n vectors = IntervalBase(pieces.note_list)\n if generic:\n patterns = into_patterns([vectors.generic_intervals], len(motif))\n else:\n patterns = into_patterns([vectors.semitone_intervals], len(motif))\n print(\"Finding instances of pattern \" + str(motif) + \": \")\n # Find all occurences of given motif, print out information associated\n occurences = 0\n for pat in patterns:\n if motif == pat[0]:\n print(\"Selected pattern occurs in \" + str(pat[1].metadata.title) + \" part \" + str(pat[1].part) + \" beginning in measure \" + str(pat[1].note.measureNumber) + \" and ending in measure \" + str(pat[2].note.measureNumber) + \". Note durations: \" + str(pat[3]))\n occurences += 1\n print(\"Selected pattern occurs \" + str(occurences) + \" times.\")\n\n# Given list of matches, write to csv in current working directory\ndef export_to_csv(matches: list):\n \"\"\"Exports matches data to a csv in the current working directory\n\n Parameters\n ----------\n matches : list\n return value from either find_exact_matches or find_close_matches\n \"\"\"\n proceed = input(\"This method will create a csv file in your current working directory. Continue? (y/n): \").lower()\n csv_name = input(\"Enter a name for your csv file (.csv will be appended): \")\n csv_name += '.csv'\n if proceed != 'y' and proceed != 'yes':\n print(\"Exiting...\")\n return\n import csv\n with open(csv_name, mode='w') as matches_file:\n matches_writer = csv.writer(matches_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n if type(matches[0]) == PatternMatches:\n matches_writer.writerow(['Pattern Generating Match', 'Pattern matched', 'Piece Title', 'Part', 'First Note Measure Number', 'Last Note Measure Number', 'Note Durations', 'EMA', 'EMA url'])\n for match_series in matches:\n for match in match_series.matches:\n matches_writer.writerow([match_series.pattern, match.pattern, match.first_note.metadata.title, match.first_note.part, match.first_note.note.measureNumber, match.last_note.note.measureNumber, match.durations, match.ema, match.ema_url])\n else:\n matches_writer.writerow(['Pattern Generating Match', 'Classification Type', 'EMA', 'EMA url', 'Soggetti 1 Part', 'Soggetti 1 Measure', 'Soggetti 2 Part', 'Soggetti 2 Measure', 'Soggetti 3 Part', 'Soggetti 3 Measure', 'Soggetti 4 Part', 'Soggetti 4 Measure'])\n for classified_matches in matches:\n row_array = [classified_matches.pattern, classified_matches.type, classified_matches.ema, classified_matches.ema_url]\n for soggetti in classified_matches.matches:\n row_array.append(soggetti.first_note.part)\n row_array.append(soggetti.first_note.note.measureNumber)\n matches_writer.writerow(row_array)\n\n print(\"CSV created in your current working directory.\")\n\n# For more naive usage- allows for user interaction, has return value of list of matches\n# All features incorporated except non-whole piece selection\ndef assisted_interface():\n \"\"\"Activates the assisted interface for more naive use\n\n Returns\n ----------\n matches : list\n list of PatternMatches based on the users various inputs\n \"\"\"\n print(\"You can use ctrl-c to quit exit at any time. If you proceed through the entire process, the matches array will be returned from this function\")\n urls = []\n url = input(\"Enter a url, or 'done' when finished: \")\n while url != 'done':\n urls.append(url)\n url = input(\"Enter a url, or 'done' when finished: \")\n corpus = CorpusBase(urls, [])\n vectors = IntervalBase(corpus.note_list)\n pattern_size = int(input(\"Enter the size of pattern you would like to analyze: \"))\n interval_type = input(\"Enter 1 to match using generic intervals or enter 2 to match using semitone intervals: \")\n while interval_type != '1' and interval_type != '2':\n interval_type = input(\"Invalid input, enter 1 for generic intervals or 2 for semitone intervals: \")\n if interval_type == '1':\n patterns = into_patterns([vectors.generic_intervals], pattern_size)\n if interval_type == '2':\n patterns = into_patterns([vectors.semitone_intervals], pattern_size)\n min_matches = int(input(\"Enter the minimum number of matches needed to be displayed: \"))\n close_or_exact = input(\"Enter 1 to include close matches or enter 2 for only exact matches: \")\n while close_or_exact != '1' and close_or_exact != '2':\n close_or_exact = input(\"Invalid input, enter 1 for close matches or 2 for only exact matches: \")\n if close_or_exact == '1':\n max_dif = int(input(\"Enter the maximum total distance threshold for a close match: \"))\n matches = find_close_matches(patterns, min_matches, max_dif)\n if close_or_exact == '2':\n matches = find_exact_matches(patterns, min_matches)\n csv_results = input(\"Export results to CSV? (y/n): \").lower()\n if csv_results == 'y' or csv_results == 'yes':\n export_to_csv(matches)\n print_results = input(\"Print results? (y/n): \").lower()\n if print_results == 'y' or print_results == 'yes':\n if close_or_exact == '1':\n for item in matches:\n item.print_close_matches()\n if close_or_exact == '2':\n for item in matches:\n item.print_exact_matches()\n return matches\n\ndef compare_durations(durations1, durations2, threshold):\n \"\"\"Helper for classify_matches\n\n works similarly to find_close_matches in terms of its comparison technique\n \"\"\"\n total = 0\n durations1_sum, durations2_sum = 0, 0\n for i in range(len(durations1)):\n total += abs(durations1[i]-durations2[i])\n durations1_sum += durations1[i]\n durations2_sum += durations2[i]\n # if total <= threshold or durations1_sum == durations2_sum:\n if total <= threshold:\n return True\n else:\n return False\n\ndef sortMatches(match):\n \"\"\" Helper function for classify_matches\n \"\"\"\n return match.first_note.offset\n\n\ndef classify_matches(exact_matches: list, durations_threshold = 2):\n \"\"\"Classifies groups of matches into periodic entries, imitative duos, and fuga\n\n Classifies through offset comparison of matching melodic patterns, prints out information gathered.\n Reliably accurate results only guaranteed if exact_matches is generated from ONE piece.\n\n Parameters\n ----------\n exact_matches : list\n return value from find_exact_matches\n durations_threshold : int, optional\n maximum cumulative difference between two duration lists before they are deemed not similar, defaults to 2\n\n Returns\n -------\n classified_tuple : tuple\n classified_tuple[0] : list of lists of Match objects\n list of periodic entries, which are lists of Match objects\n classified_tuple[1] : list of lists of Match objects\n list of imitative_duos, which are lists of Match objects\n classified_tuple[0] : list of lists of Match objects\n list of fuga, which are lists of Match objects\n \"\"\"\n classified_matches = []\n for list_matches in exact_matches:\n offset_difs, offset_difs_info = [], []\n match_instance = list_matches.matches\n match_instance.sort(key = sortMatches)\n for index in range(len(match_instance) - 1):\n if compare_durations(match_instance[index + 1].durations, match_instance[index].durations, durations_threshold):\n offset_difs.append(match_instance[index + 1].first_note.offset - match_instance[index].first_note.offset)\n offset_difs_info.append((match_instance[index], match_instance[index + 1]))\n i = 0\n while i < len(offset_difs) - 2:\n if offset_difs[i] > 64 or offset_difs[i + 1] > 64 or abs(offset_difs_info[i][1].last_note.note.measureNumber - offset_difs_info[i + 1][0].first_note.note.measureNumber) > 8:\n pass\n elif offset_difs[i] == offset_difs[i + 1] and offset_difs[i] == offset_difs[i + 2]:\n grouping = (offset_difs_info[i][0], offset_difs_info[i][1], offset_difs_info[i + 1][0], offset_difs_info[i + 1][1], offset_difs_info[i + 2][0], offset_difs_info[i + 2][1])\n grouping = list(dict.fromkeys(grouping))\n classified_obj = ClassifiedMatch(grouping, \"periodic_entry\")\n classified_matches.append(classified_obj)\n elif offset_difs[i] == offset_difs[i + 1]:\n grouping = (offset_difs_info[i][0], offset_difs_info[i][1], offset_difs_info[i + 1][0], offset_difs_info[i + 1][1])\n grouping = list(dict.fromkeys(grouping))\n classified_obj = ClassifiedMatch(grouping, \"periodic entry\")\n classified_matches.append(classified_obj)\n elif offset_difs[i] == offset_difs[i + 2]:\n grouping = (offset_difs_info[i][0], offset_difs_info[i][1], offset_difs_info[i + 2][0], offset_difs_info[i + 2][1])\n grouping = list(dict.fromkeys(grouping))\n classified_obj = ClassifiedMatch(grouping, \"imitative duo\")\n classified_matches.append(classified_obj)\n else:\n grouping = (offset_difs_info[i][0], offset_difs_info[i][1], offset_difs_info[i + 1][0], offset_difs_info[i + 1][1])\n grouping = list(dict.fromkeys(grouping))\n classified_obj = ClassifiedMatch(grouping, \"fuga\")\n classified_matches.append(classified_obj)\n i += 1\n\n for entry in classified_matches:\n print(str(entry.type) + \":\")\n desc_str = \"Pattern: \" + str(entry.pattern) + \", Locations in entry: \"\n for soggetti in entry.matches:\n desc_str += \"\\n- Measure \" + str(soggetti.first_note.note.measureNumber) + \" in voice \" + str(soggetti.first_note.partNumber)\n print(desc_str)\n\n return classified_matches\n\ndef export_pandas(matches):\n match_data = []\n for match_series in matches:\n for match in match_series.matches:\n match_dict = {\n \"pattern_generating_match\": match_series.pattern,\n \"pattern_matched\": match.pattern,\n \"piece_title\": match.first_note.metadata.title,\n \"part\": match.first_note.part,\n \"start_measure\": match.first_note.note.measureNumber,\n \"start_beat\": match.first_note.note.beat,\n \"end_measure\": match.last_note.note.measureNumber,\n \"end_beat\": match.last_note.note.beat,\n \"start_offset\": match.first_note.offset,\n \"end_offset\": match.last_note.offset,\n \"note_durations\": match.durations,\n \"ema\": match.ema,\n \"ema_url\": match.ema_url\n }\n match_data.append(match_dict)\n return pd.DataFrame(match_data)\n"
},
{
"alpha_fraction": 0.6623110771179199,
"alphanum_fraction": 0.6667068004608154,
"avg_line_length": 42.360313415527344,
"blob_id": "07fcbdf4811b8d09c61c7d270af9dc4524bae555",
"content_id": "eb77b54e2a547e7dbdf432c194f2ce90a84edecc",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16607,
"license_type": "permissive",
"max_line_length": 175,
"num_lines": 383,
"path": "/binder/visualizations.py",
"repo_name": "RichardFreedman/CRIM_Intervals_Notebooks",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThis script contains the method\n\"\"\"\n\nimport altair as alt\nfrom pyvis.network import Network\nfrom ipywidgets import interact, fixed\nimport pandas as pd\nimport re\nimport textdistance\n\ndef create_bar_chart(y, x, color, data, condition, *selectors):\n observer_chart = alt.Chart(data).mark_bar().encode(\n y=y,\n x=x,\n color = color, \n opacity=alt.condition(condition, alt.value(1), alt.value(0.2))\n ).add_selection(\n *selectors\n )\n return observer_chart\n\ndef create_heatmap(x, x2, y, color, data, heat_map_width, heat_map_height, selector_condition, *selectors, tooltip):\n\n heatmap = alt.Chart(data).mark_bar().encode(\n x=x,\n x2=x2,\n y=y,\n color=color, \n opacity=alt.condition(selector_condition, alt.value(1), alt.value(0.2)),\n tooltip=tooltip\n ).properties(\n width=heat_map_width,\n height=heat_map_height\n ).add_selection(\n *selectors\n )\n\n return heatmap\n\ndef _process_ngrams_df_helper(ngrams_df, main_col):\n \"\"\"\n The output from the getNgram is usually a table with\n four voices and ngram of notes properties (duration or\n pitch). This method stack this property onto one column\n and mark which voices they are from.\n :param ngrams_df: direct output from getNgram with 1 columns\n for each voices and ngrams of notes' properties.\n :param main_col: the name of the property\n :return: a dataframe with ['start', main_col, 'voice'] as columns\n \"\"\"\n # copy to avoid changing original ngrams df\n ngrams_df = ngrams_df.copy()\n\n # add a start column containing offsets\n ngrams_df.index.name = \"start\"\n ngrams_df = ngrams_df.reset_index().melt(id_vars=[\"start\"], value_name=main_col, var_name=\"voice\")\n\n ngrams_df[\"start\"] = ngrams_df[\"start\"].astype(float)\n return ngrams_df\n\ndef process_ngrams_df(ngrams_df, ngrams_duration=None, selected_pattern=None, voices=None):\n \"\"\"\n This method combines ngrams from all voices in different columns\n into one column and calculates the starts and end points of the\n patterns. It could also filter out specific voices or patterns\n for the users to analyze.\n\n :param ngrams_df: dataframe we got from getNgram in crim-interval\n :param ngrams_duration: if not None, simply output the offsets of the\n ngrams. If we have durations, calculate the end by adding the offsets and\n the durations.\n :param selected_pattern: list of specific patterns the users want (optional)\n :param voices: list of specific voices the users want (optional)\n :return a new, processed dataframe with only desired patterns from desired voices\n combined into one column with start and end points\n \"\"\"\n\n # copy to avoid changing original ngrams df\n ngrams_df = _process_ngrams_df_helper(ngrams_df, 'pattern')\n\n if ngrams_duration is not None:\n ngrams_duration = _process_ngrams_df_helper(ngrams_duration, 'duration')\n ngrams_df['end'] = ngrams_df['start'] + ngrams_duration['duration']\n else:\n # make end=start+1 just to display offsets\n ngrams_df['end'] = ngrams_df['start'] + 1\n\n # filter according to voices and patterns (after computing durations for correct offsets)\n if voices:\n voice_condition = ngrams_df['voice'].isin(voices)\n ngrams_df = ngrams_df[voice_condition].dropna(how='all')\n \n if selected_pattern:\n pattern_condition = ngrams_df['pattern'].isin(selected_pattern)\n ngrams_df = ngrams_df[pattern_condition].dropna(how='all')\n\n return ngrams_df\n\ndef plot_ngrams_df_heatmap(processed_ngrams_df, heatmap_width=800, heatmap_height=300):\n \"\"\"\n Plot a heatmap for crim-intervals getNgram's processed output.\n :param ngrams_df: processed crim-intervals getNgram's output.\n :param selected_pattern: list of specific patterns the users want (optional)\n :param voices: list of specific voices the users want (optional)\n :param heatmap_width: the width of the final heatmap (optional)\n :param heatmap_height: the height of the final heatmap (optional)\n :return: a bar chart that displays the different patterns and their counts,\n and a heatmap with the start offsets of chosen voices / patterns\n \"\"\"\n\n processed_ngrams_df = processed_ngrams_df.dropna(how='any')\n \n selector = alt.selection_multi(fields=['pattern'])\n patterns_bar = create_bar_chart('pattern', 'count(pattern)', 'pattern', processed_ngrams_df, selector, selector)\n heatmap = create_heatmap('start', 'end', 'voice', 'pattern', processed_ngrams_df, heatmap_width, heatmap_height,\n selector, selector, tooltip=['start', 'end', 'pattern'])\n return alt.vconcat(patterns_bar, heatmap)\n\ndef plot_ngrams_heatmap(ngrams_df, model=None, selected_pattern = [], voices = [],\n heatmap_width=800, heatmap_height=300):\n \"\"\"\n Plot a heatmap for crim-intervals getNgram's output.\n :param ngrams_df: crim-intervals getNgram's output\n :param model: if not None, rely on the model to calculate the durations of patterns\n of just outputing only offsets (default=False).\n :param selected_pattern: list of specific patterns the users want (optional)\n :param voices: list of specific voices the users want (optional)\n :param heatmap_width: the width of the final heatmap (optional)\n :param heatmap_height: the height of the final heatmap (optional)\n :return: a bar chart that displays the different patterns and their counts,\n and a heatmap with the start offsets of chosen voices / patterns\n \"\"\"\n processed_ngrams_df = process_ngrams_df(ngrams_df, ngrams_duration=model, selected_pattern=selected_pattern,\n voices=voices)\n return plot_ngrams_df_heatmap(processed_ngrams_df, heatmap_width=heatmap_width,\n heatmap_height=heatmap_height)\n\ndef _from_ema_to_offsets(df, ema_column):\n \"\"\"\n This method adds a columns of start and end measure of patterns into\n the relationship dataframe using the column with the ema address.\n\n :param df: dataframe containing relationships between patterns retrieved\n from CRIM relationship json\n :param ema_column: the name of the column storing ema address.\n :return: the processed dataframe with two new columns start and end\n \"\"\"\n # retrieve the measures from ema address and create start and end in place\n df['locations'] = df[ema_column].str.split(\"/\", n=1, expand=True)[0]\n df['locations'] = df['locations'].str.split(\",\")\n df = df.explode('locations')\n df[['start', 'end']] = df['locations'].str.split(\"-\", expand=True)\n \n # convert to float in case measures are fractions\n df['start'] = df['start'].astype(float)\n df['end'] = df['end'].astype(float)\n return df\n\ndef _process_crim_json_url(url_column):\n # remove 'data' from http://crimproject.org/data/observations/1/ or http://crimproject.org/data/relationships/5/\n url_column = url_column.map(lambda cell: cell.replace('data/', ''))\n return url_column\n\n# TODO refactor with a name that is applicable to both relationship\n# TODO and observations.\ndef plot_relationship_heatmap(df, ema_col, main_category='musical_type', other_category='observer.name', option=1,\n heat_map_width=800, heat_map_height=300):\n \"\"\"\n This method plots a chart relationships/observations dataframe retrieved from their\n corresponding json files. This chart has two bar charts displaying the count of variables\n the users selected, and a heatmap displaying the locations of the relationship.\n :param df: relationships or observations dataframe\n :param ema_col: name of the ema column\n :param main_category: name of the main category for the first bar chart.\n The chart would be colored accordingly (default='musical_type').\n :param other_category: name of the other category for the zeroth bar chart.\n (default='observer.name')\n :param heat_map_width: the width of the final heatmap (default=800)\n :param heat_map_height: the height of the final heatmap (default =300)\n :return: a big chart containing two smaller bar chart and a heatmap\n \"\"\"\n\n df = df.copy() # create a deep copy of the selected observations to protect the original dataframe\n df = _from_ema_to_offsets(df, ema_col)\n\n # sort by id\n df.sort_values(by=main_category, inplace=True)\n\n df = _from_ema_to_offsets(df, ema_col)\n df['website_url'] = _process_crim_json_url(df['url'])\n\n df['id'] = df['id'].astype(str)\n\n # because altair doesn't work when the categories' names have periods,\n # a period is replaced with a hyphen.\n\n new_other_category = other_category.replace(\".\", \"_\")\n new_main_category = main_category.replace(\".\", \"_\")\n\n df.rename(columns={other_category: new_other_category, main_category:new_main_category}, inplace=True)\n\n other_selector = alt.selection_multi(fields=[new_other_category])\n main_selector = alt.selection_multi(fields=[new_main_category])\n\n other_category = new_other_category\n main_category = new_main_category\n\n bar1 = create_bar_chart(main_category, str('count(' + main_category + ')'), main_category, df,\n other_selector | main_selector, main_selector)\n bar0 = create_bar_chart(other_category, str('count(' + other_category + ')'), main_category, df,\n other_selector | main_selector, other_selector)\n\n # heatmap = create_heatmap('start', 'end', 'id', main_category, df, heat_map_width, heat_map_height,\n # other_selector | main_selector, main_selector,\n # tooltip=[main_category, other_category, 'start', 'end', 'id']).interactive()\n\n heatmap = alt.Chart(df).mark_bar().encode(\n x='start',\n x2='end',\n y='id',\n href='website_url',\n color=main_category,\n opacity=alt.condition(other_selector | main_selector, alt.value(1), alt.value(0.2)),\n tooltip=['website_url', main_category, other_category, 'start', 'end', 'id']\n ).properties(\n width=heat_map_width,\n height=heat_map_height\n ).add_selection(\n main_selector\n ).interactive()\n\n chart = alt.vconcat(\n alt.hconcat(\n bar1,\n bar0\n ),\n heatmap\n )\n\n return chart\n\n# TODO make private\ndef close_match_helper(cell):\n\n # process each cell into an interator of *floats* for easy comparisons\n if type(cell) == str:\n cell = cell.split(\",\")\n\n if cell[0].isdigit():\n cell = [int(item) for item in cell]\n\n return cell\n\n# TODO make private\ndef close_match(ngrams_df, key_pattern):\n ngrams_df['pattern'] = ngrams_df['pattern'].map(lambda cell: close_match_helper(cell))\n ngrams_df['score'] = ngrams_df['pattern'].map(lambda cell: 100*textdistance.levenshtein.normalized_similarity(key_pattern, cell))\n return ngrams_df\n\ndef plot_close_match_heatmap(ngrams_df, key_pattern, ngrams_duration=None, selected_patterns=[], voices=[],\n heatmap_width=800, heatmap_height=300):\n \"\"\"\n Plot how closely the other vectors match a selected vector.\n Uses the Levenshtein distance.\n :param ngrams_df: crim-intervals getNgram's output\n :param key_pattern: a pattern the users selected to compare other patterns with (tuple of floats)\n :param selected_pattern: the specific other vectors the users selected\n :param ngrams_duration: if None, simply output the offsets. If the users input a\n list of durations, caculate the end by adding durations with offsets and\n display the end on the heatmap accordingly.\n :param selected_patterns: list of specific patterns the users want (optional)\n :param voices: list of specific voices the users want (optional)\n :param heatmap_width: the width of the final heatmap (optional)\n :param heatmap_height: the height of the final heatmap (optional)\n :return: a bar chart that displays the different patterns and their counts,\n and a heatmap with the start offsets of chosen voices / patterns\n \"\"\"\n\n ngrams = process_ngrams_df(ngrams_df, ngrams_duration=ngrams_duration, selected_pattern=selected_patterns,\n voices=voices)\n ngrams.dropna(how='any', inplace=True) # only the pattern column can be NaN because all columns have starts (==offsets) and voices\n # calculate the score\n key_pattern = close_match_helper(key_pattern)\n score_ngrams = close_match(ngrams, key_pattern)\n\n slider = alt.binding_range(min=0, max=100, step=1, name='cutoff:')\n selector = alt.selection_single(name=\"SelectorName\", fields=['cutoff'],\n bind=slider, init={'cutoff': 50})\n return create_heatmap('start', 'end', 'voice', 'score', score_ngrams, heatmap_width, heatmap_height,\n alt.datum.score > selector.cutoff, selector, tooltip=[])\n\n# Network visualizations\ndef process_network_df(df, interval_column_name, ema_column_name):\n \"\"\"\n Create a small dataframe containing network\n \"\"\"\n result_df = pd.DataFrame()\n result_df[['piece.piece_id', 'url', interval_column_name]] = \\\n df[['piece.piece_id', 'url', interval_column_name]].copy()\n result_df[['segments']] = \\\n df[ema_column_name].astype(str).str.split(\"/\", 1, expand=True)[0]\n result_df['segments'] = result_df['segments'].str.split(\",\")\n return result_df\n\n# add nodes to graph\ndef add_nodes_to_net(interval_column, interval_type):\n # dictionary maps the first time/melodic interval to its corresponding\n # network\n networks_dict = {'all': Network(directed=True, notebook=True)}\n interval_column = interval_column.astype(str)\n networks_dict['all'].add_node('all', color='red', shape='circle', level=0)\n\n # create nodes from the patterns\n for node in interval_column:\n # create nodes according to the interval types\n if interval_type == 'melodic':\n nodes = re.sub(r'([+-])(?!$)', r'\\1,', node).split(\",\")\n elif interval_type == 'time':\n nodes = node.split(\"/\")\n else:\n raise Exception(\"Please put either 'time' or 'melodic' for `type_interval`\")\n\n # nodes would be grouped according to the first interval\n group = nodes[0]\n\n if not group in networks_dict:\n networks_dict[group] = Network(directed=True, notebook=True)\n\n prev_node = 'all'\n for i in range(1, len(nodes)):\n node_id = \"\".join(node for node in nodes[:i])\n # add to its own family network\n networks_dict[group].add_node(node_id, group=group, physics=False, level=i)\n if prev_node != \"all\":\n networks_dict[group].add_edge(prev_node, node_id)\n\n # add to the big network\n networks_dict['all'].add_node(node_id, group=group, physics=False, level=i)\n networks_dict['all'].add_edge(prev_node, node_id)\n prev_node = node_id\n\n return networks_dict\n\ndef generate_network(df, interval_column, interval_type, ema_column, patterns=[]):\n \"\"\"\n Generate a dictionary of networks and a simple dataframe allowing the users\n search through the intervals.\n :param df:\n :param interval_column:\n :param interval_type:\n :param ema_column:\n :param patterns:\n :return:\n \"\"\"\n # process df\n if patterns:\n df = df[df[interval_column.isin(patterns)]].copy()\n\n networks_dict = add_nodes_to_net(df[interval_column], interval_type)\n df = process_network_df(df, interval_column, ema_column)\n return networks_dict, create_interactive_df(df, interval_column)\n\ndef manipulate_processed_network_df(df, interval_column, search_pattern, option='starts with'):\n \"\"\"\n\n :param df:\n :param interval_column:\n :param search_pattern:\n :param option:\n :return:\n \"\"\"\n if option == 'starts with':\n mask = df[interval_column].astype(str).str.startswith(pat=search_pattern)\n elif option == 'ends with':\n mask = df[interval_column].astype(str).str.endswith(pat=search_pattern)\n else:\n mask = df[interval_column].astype(str).str.contains(pat=search_pattern, regex=False)\n filtered_df = df[mask].copy()\n return filtered_df.fillna(\"-\").style.applymap(lambda x: \"background: #ccebc5\" if search_pattern in x else \"\")\n\ndef create_interactive_df(df, interval_column):\n return interact(manipulate_processed_network_df, df=fixed(df), interval_column=fixed(interval_column), search_pattern='', options=['starts with', 'contains', 'ends_with'])\n"
},
{
"alpha_fraction": 0.5741116404533386,
"alphanum_fraction": 0.580843448638916,
"avg_line_length": 44.81971740722656,
"blob_id": "b5af92418fce81b5853c259a76d30b2f1c683621",
"content_id": "83ae4406d250dc398d77e4174e5b7e0a2b59278b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 65064,
"license_type": "permissive",
"max_line_length": 218,
"num_lines": 1420,
"path": "/binder/main_objs.py",
"repo_name": "RichardFreedman/CRIM_Intervals_Notebooks",
"src_encoding": "UTF-8",
"text": "from music21 import *\nimport music21 as m21\nimport time\n# import requests\n# httpx appears to be faster than requests, will fit better with an async version\nimport httpx\nfrom pathlib import Path\nimport pandas as pd\nimport numpy as np\nimport xml.etree.ElementTree as ET\nfrom itertools import combinations\n\n\n# Unncessary at the moment\n# MEINSURI = 'http://www.music-encoding.org/ns/mei'\n# MEINS = '{%s}' % MEINSURI\n# mei_doc = ET.fromstring(requests.get(path).text)\n# # Find the title from the MEI file and update the Music21 Score metadata\n# title = mei_doc.find(f'{MEINS}meiHead//{MEINS}titleStmt/{MEINS}title').text\n# score.metadata.title = title\n# mei_doc = ET.fromstring(requests.get(path).text)\n# # Find the composer from the MEI file and update the Music21 Score metadata\n# composer = mei_doc.find(f'{MEINS}meiHead//{MEINS}respStmt/{MEINS}persName').text\n# score.metadata.composer = composer\n\n# An extension of the music21 note class with more information easily accessible\n\npathDict = {}\n\nclass NoteListElement:\n \"\"\"\n An extension of the music21 note class\n\n Attributes\n ----------\n note : m21.note.Note\n music21 note class\n offset : int\n cumulative offset of note\n id : int\n unique music21 id\n metadata : music21.metadata\n piece metadata- not normally attached to a music21 note\n part : str\n voice name\n partNumber : int\n voice number, not 0 indexed\n duration : int\n note duration\n piece_url : str\n piece url for note\n prev_note : NoteListElement\n prior non-rest note element\n \"\"\"\n def __init__(self, note: m21.note.Note, metadata, part, partNumber, duration, piece_url, prev_note=None):\n self.note = note\n self.prev_note = prev_note\n self.offset = self.note.offset\n self.id = self.note.id\n self.metadata = metadata\n self.part = part\n self.partNumber = partNumber\n self.duration = duration\n self.piece_url = piece_url\n\n def __str__(self):\n return \"<NoteListElement: {}>\".format(self.note.name)\n\n\nclass ImportedPiece:\n def __init__(self, score):\n self.score = score\n self.analyses = {'note_list': None}\n self._intervalMethods = {\n # (quality, directed, compound): function returning the specified type of interval\n # diatonic with quality\n ('q', True, True): ImportedPiece._qualityUndirectedCompound,\n ('q', True, False): ImportedPiece._qualityDirectedSimple,\n ('q', False, True): lambda cell: cell.name if hasattr(cell, 'name') else cell,\n ('q', False, False): lambda cell: cell.semiSimpleName if hasattr(cell, 'semiSimpleName') else cell,\n # diatonic interals without quality\n ('d', True, True): lambda cell: cell.directedName[1:] if hasattr(cell, 'directedName') else cell,\n ('d', True, False): ImportedPiece._noQualityDirectedSimple,\n ('d', False, True): lambda cell: cell.name[1:] if hasattr(cell, 'name') else cell,\n ('d', False, False): lambda cell: cell.semiSimpleName[1:] if hasattr(cell, 'semiSimpleName') else cell,\n # chromatic intervals\n ('c', True, True): lambda cell: str(cell.semitones) if hasattr(cell, 'semitones') else cell,\n ('c', True, False): lambda cell: str(cell.semitones % 12) if hasattr(cell, 'semitones') else cell,\n ('c', False, True): lambda cell: str(abs(cell.semitones)) if hasattr(cell, 'semitones') else cell,\n ('c', False, False): lambda cell: str(abs(cell.semitones) % 12) if hasattr(cell, 'semitones') else cell\n }\n\n def _getPartSeries(self):\n if 'PartSeries' not in self.analyses:\n part_series = []\n\n for i, flat_part in enumerate(self._getSemiFlatParts()):\n notesAndRests = flat_part.getElementsByClass(['Note', 'Rest'])\n part_name = flat_part.partName or 'Part_' + str(i + 1)\n ser = pd.Series(notesAndRests, name=part_name)\n ser.index = ser.apply(lambda noteOrRest: noteOrRest.offset)\n ser = ser[~ser.index.duplicated()] # remove multiple events at the same offset in a given part\n part_series.append(ser)\n self.analyses['PartSeries'] = part_series\n return self.analyses['PartSeries']\n\n def _getSemiFlatParts(self):\n \"\"\"\n Return and store flat parts inside a piece using the score attribute.\n \"\"\"\n if 'SemiFlatParts' not in self.analyses:\n parts = self.score.getElementsByClass(stream.Part)\n self.analyses['SemiFlatParts'] = [part.semiFlat for part in parts]\n return self.analyses['SemiFlatParts']\n\n def _getPartNames(self):\n \"\"\"\n Return flat names inside a piece using the score attribute.\n \"\"\"\n if 'PartNames' not in self.analyses:\n part_names = []\n for i, part in enumerate(self._getSemiFlatParts()):\n part_names.append(part.partName or 'Part_' + str(i + 1))\n self.analyses['PartNames'] = part_names\n return self.analyses['PartNames']\n\n def _getM21Objs(self):\n if 'M21Objs' not in self.analyses:\n part_names = self._getPartNames()\n self.analyses['M21Objs'] = pd.concat(self._getPartSeries(), names=part_names, axis=1)\n return self.analyses['M21Objs']\n\n def _remove_tied(self, noteOrRest):\n if hasattr(noteOrRest, 'tie') and noteOrRest.tie is not None and noteOrRest.tie.type != 'start':\n return None\n return noteOrRest\n\n def _getM21ObjsNoTies(self):\n if 'M21ObjsNoTies' not in self.analyses:\n df = self._getM21Objs().applymap(self._remove_tied).dropna(how='all')\n self.analyses['M21ObjsNoTies'] = df\n return self.analyses['M21ObjsNoTies']\n\n def regularize(self, df, unit=2):\n '''\n Return the passed `pandas.DataFrame` (df) with its observations\n regularized rhythmically. Pass a duration as the `unit` parameter to\n control at what regular distance observations will be made. Durations\n are measured according to the music21 convention where:\n\n eighth note = .5\n quarter note = 1\n half note = 2\n etc.\n\n For example, if you pass a dataframe of the notes and rests of a piece,\n and set `unit` to 4, a new whatever is \"sounding\" (whether a note or a\n rest) at every regular whole note will be kept, and any intervening\n notes or rests will be removed. A breve would get renotated as two\n whole notes.\n Regularization also works with non-integer values. So if you wanted to\n regularize at the swung eigth note, for example, you could set:\n\n `unit=1/3`\n '''\n spot = df.index[0] * 1000\n end = self.score.highestTime * 1000\n vals = []\n step = unit * 1000\n while spot < end:\n vals.append(spot)\n spot += step\n new_index = pd.Index(vals).map(lambda i: round(i) / 1000)\n res = df.ffill().reindex(new_index, method='pad')\n return res\n\n def getDuration(self, df=None, n=1):\n '''\n If no dataframe is passed as the df parameter (the default), return a\n `pandas.DataFrame` of floats giving the duration of notes and rests in \n each part where 1 = quarternote, 1.5 = a dotted quarter, 4 = a whole \n note, etc. If a df is passed, then return a df of the same shape giving \n the duration of each of the slices of this df. This is useful if you \n want to know what the durations of something other than single notes \n and rests, such as the durations of intervals.\n \n If n is set, it must be an integer >= 1 and less than the number of \n rows in df. It determines how many adjacent items have their durations \n grouped together. To get the duration of single events, n should be 1 \n (default). You could set n=3 if you wanted to get the duration of all \n consecutive 3-note groups, for example.'''\n\n if 'Duration' not in self.analyses or df is not None or n != 1:\n _df = self._getM21ObjsNoTies() if df is None else df.copy()\n highestTime = self.score.highestTime\n _df.loc[highestTime, :] = 0\n newCols = []\n for i in range(len(_df.columns)):\n ser = _df.iloc[:, i]\n ser.dropna(inplace=True) \n vals = ser.index[n:] - ser.index[:-n]\n ser.drop(labels=ser.index[-n:], inplace=True)\n ser[:] = vals\n newCols.append(ser)\n result = pd.concat(newCols, axis=1)\n if df is None and n == 1:\n self.analyses['Duration'] = result\n else:\n return result\n return self.analyses['Duration']\n\n def _noteRestHelper(self, noteOrRest):\n if noteOrRest.isRest:\n return 'Rest'\n return noteOrRest.nameWithOctave\n\n def getNoteRest(self):\n '''Return a table of the notes and rests in the piece. Rests are\n designated with the string \"Rest\". Notes are shown such that middle C\n is \"C4\".'''\n if 'NoteRest' not in self.analyses:\n df = self._getM21ObjsNoTies().applymap(self._noteRestHelper, na_action='ignore')\n self.analyses['NoteRest'] = df\n return self.analyses['NoteRest']\n\n def getBeat(self):\n '''\n Return a table of the beat positions of all the notes and rests.\n '''\n if 'Beat' not in self.analyses:\n df = self._getM21ObjsNoTies().applymap(lambda note: note.beat, na_action='ignore')\n self.analyses['Beat'] = df\n return self.analyses['Beat']\n\n def _getBeatIndex(self):\n '''\n Return a series of the first valid value in each row of .getBeat().\n '''\n if 'BeatIndex' not in self.analyses:\n ser = self.getBeat().apply(lambda row: row.dropna()[0], axis=1)\n self.analyses['BeatIndex'] = ser\n return self.analyses['BeatIndex']\n\n def detailIndex(self, df, offset=True, measure=True, beat=True):\n '''\n Return the passed dataframe with a multi-index of the measure and beat\n position.\n '''\n cols = [df, self.getMeasure().iloc[:, 0], self._getBeatIndex()]\n names = ['Measure', 'Beat']\n temp = pd.concat(cols, axis=1)\n temp2 = temp.iloc[:, len(df.columns):].ffill()\n temp2.iloc[:, 0] = temp2.iloc[:, 0].astype(int)\n mi = pd.MultiIndex.from_frame(temp2, names=names)\n ret = temp.iloc[:, :len(df.columns)]\n ret.index = mi\n ret.dropna(inplace=True, how='all')\n ret.sort_index(inplace=True)\n return ret\n\n def _beatStrengthHelper(self, noteOrRest):\n if hasattr(noteOrRest, 'beatStrength'):\n return noteOrRest.beatStrength\n return noteOrRest\n\n def getBeatStrength(self):\n ''' Returns a table of the beat strengths of all the notes and rests in\n the piece. This follows the music21 conventions where the downbeat is\n equal to 1, and all other metric positions in a measure are given\n smaller numbers approaching zero as their metric weight decreases.\n Results from this method should not be sent to the regularize method.\n '''\n if 'BeatStrength' not in self.analyses:\n df = self._getM21ObjsNoTies().applymap(self._beatStrengthHelper)\n self.analyses['BeatStrength'] = df\n return self.analyses['BeatStrength']\n\n def getTimeSignature(self):\n \"\"\"\n Return a data frame containing the time signatures and their offsets\n \"\"\"\n\n if 'TimeSignature' not in self.analyses:\n time_signatures = []\n for part in self._getSemiFlatParts():\n time_signatures.append(pd.Series({ts.offset: ts for ts in part.getTimeSignatures()}))\n df = pd.concat(time_signatures, axis=1)\n df = df.applymap(lambda ts: ts.ratioString, na_action='ignore')\n df.columns = self._getPartNames()\n self.analyses['TimeSignature'] = df\n return self.analyses['TimeSignature']\n\n \n def getMeasure(self):\n \"\"\"\n This method retrieves the offsets of each measure in each voices.\n \"\"\"\n if \"Measure\" not in self.analyses:\n parts = self._getSemiFlatParts()\n partMeasures = []\n for part in parts:\n partMeasures.append(pd.Series({m.offset: m.measureNumber \\\n for m in part.getElementsByClass(['Measure'])}))\n df = pd.concat(partMeasures, axis=1)\n df.columns = self._getPartNames()\n self.analyses[\"Measure\"] = df\n \n return self.analyses[\"Measure\"]\n\n def getSoundingCount(self):\n \"\"\"\n This would return a series with the number of parts that currently have\n a note sounding.\n \"\"\"\n\n if not 'SoundingCount' in self.analyses:\n\n nr = self.getNoteRest().ffill()\n df = nr[nr != 'Rest']\n ser = df.count(axis=1)\n ser.name = 'Sounding'\n\n self.analyses['SoundingCount'] = ser\n\n return self.analyses['SoundingCount']\n \n\n def _zeroIndexIntervals(ntrvl):\n '''\n Change diatonic intervals so that they count the number of steps, i.e.\n unison = 0, second = 1, etc.\n '''\n if ntrvl == 'Rest':\n return ntrvl\n val = int(ntrvl)\n if val > 0:\n return str(val - 1)\n return str(val + 1)\n\n def _harmonicIntervalHelper(row):\n if hasattr(row[1], 'isRest') and hasattr(row[0], 'isRest'):\n if row[1].isRest or row[0].isRest:\n return 'Rest'\n elif row[1].isNote and row[0].isNote:\n return interval.Interval(row[0], row[1])\n return None\n\n def _melodicIntervalHelper(row):\n if hasattr(row[0], 'isRest'):\n if row[0].isRest:\n return 'Rest'\n elif row[0].isNote and hasattr(row[1], 'isNote') and row[1].isNote:\n return interval.Interval(row[1], row[0])\n return None\n\n def _melodifyPart(ser):\n ser.dropna(inplace=True)\n shifted = ser.shift(1)\n partDF = pd.concat([ser, shifted], axis=1)\n res = partDF.apply(ImportedPiece._melodicIntervalHelper, axis=1).dropna()\n return res\n\n def _getM21MelodicIntervals(self):\n if 'M21MelodicIntervals' not in self.analyses:\n m21Objs = self._getM21ObjsNoTies()\n df = m21Objs.apply(ImportedPiece._melodifyPart)\n self.analyses['M21MelodicIntervals'] = df\n return self.analyses['M21MelodicIntervals']\n\n def _getRegularM21MelodicIntervals(self, unit):\n m21Objs = self._getM21ObjsNoTies()\n m21Objs = self.regularize(m21Objs, unit=unit)\n return m21Objs.apply(ImportedPiece._melodifyPart)\n\n def _qualityUndirectedCompound(cell):\n if hasattr(cell, 'direction'):\n if cell.direction.value >= 0:\n return cell.name\n else:\n return '-' + cell.name\n return cell\n\n def _qualityDirectedSimple(cell):\n if hasattr(cell, 'semiSimpleName'):\n if cell.direction.value > 0:\n return cell.semiSimpleName\n else:\n return '-' + cell.semiSimpleName\n return cell\n\n def _noQualityDirectedSimple(cell):\n if hasattr(cell, 'semiSimpleName'):\n if cell.direction.value == -1:\n return '-' + cell.semiSimpleName[1:] \n else:\n return cell.semiSimpleName[1:]\n else:\n return cell\n\n def getMelodic(self, kind='q', directed=True, compound=True, unit=0):\n '''\n Return melodic intervals for all voice pairs. Each melodic interval\n is associated with the starting offset of the second note in the\n interval. If you want melodic intervals measured at a regular duration,\n do not pipe this methods result to the `unit` method. Instead,\n pass the desired regular durational interval as an integer or float as\n the `unit` parameter.\n\n :param str kind: use \"q\" (default) for diatonic intervals with quality,\n \"d\" for diatonic intervals without quality, \"z\" for zero-indexed\n diatonic intervals without quality (i.e. unison = 0, second = 1,\n etc.), or \"c\" for chromatic intervals. Only the first character is\n used, and it's case insensitive.\n :param bool directed: defaults to True which shows that the voice that\n is lower on the staff is a higher pitch than the voice that is\n higher on the staff. This is desginated with a \"-\" prefix.\n :param bool compound: whether to use compound (True, default) or simple\n (False) intervals. In the case of simple diatonic intervals, it\n simplifies to within the octave, so octaves don't get simplified to\n unisons. But for semitonal intervals, an interval of an octave\n (12 semitones) would does get simplified to a unison (0).\n :param int/float unit: regular durational interval at which to measure\n melodic intervals. See the documentation of the `unit` method for\n more about this.\n :returns: `pandas.DataFrame` of melodic intervals in each part\n '''\n kind = kind[0].lower()\n kind = {'s': 'c'}.get(kind, kind)\n _kind = {'z': 'd'}.get(kind, kind)\n settings = (_kind, directed, compound)\n key = ('MelodicIntervals', kind, directed, compound)\n if key not in self.analyses or unit:\n df = self._getRegularM21MelodicIntervals(unit) if unit else self._getM21MelodicIntervals()\n df = df.applymap(self._intervalMethods[settings])\n if kind == 'z':\n df = df.applymap(ImportedPiece._zeroIndexIntervals, na_action='ignore')\n if unit:\n return df\n else:\n self.analyses[key] = df\n return self.analyses[key]\n\n def _getM21HarmonicIntervals(self):\n if 'M21HarmonicIntervals' not in self.analyses:\n m21Objs = self._getM21ObjsNoTies()\n pairs = []\n combos = combinations(range(len(m21Objs.columns) - 1, -1, -1), 2)\n for combo in combos:\n df = m21Objs.iloc[:, list(combo)].dropna(how='all').ffill()\n ser = df.apply(ImportedPiece._harmonicIntervalHelper, axis=1)\n # name each column according to the voice names that make up the intervals, e.g. 'Bassus_Altus'\n ser.name = '_'.join((m21Objs.columns[combo[0]], m21Objs.columns[combo[1]]))\n pairs.append(ser)\n if pairs:\n ret = pd.concat(pairs, axis=1)\n else:\n ret = pd.DataFrame()\n self.analyses['M21HarmonicIntervals'] = ret\n return self.analyses['M21HarmonicIntervals']\n\n def getHarmonic(self, kind='q', directed=True, compound=True):\n '''\n Return harmonic intervals for all voice pairs. The voice pairs are\n named with the voice that's lower on the staff given first, and the two\n voices separated with an underscore, e.g. \"Bassus_Tenor\".\n\n :param str kind: use \"q\" (default) for diatonic intervals with quality,\n \"d\" for diatonic intervals without quality, \"z\" for zero-indexed\n diatonic intervals without quality (i.e. unison = 0, second = 1,\n etc.), or \"c\" for chromatic intervals. Only the first character is\n used, and it's case insensitive.\n :param bool directed: defaults to True which shows that the voice that\n is lower on the staff is a higher pitch than the voice that is\n higher on the staff. This is desginated with a \"-\" prefix.\n :param bool compound: whether to use compound (True, default) or simple\n (False) intervals. In the case of simple diatonic intervals, it\n simplifies to within the octave, so octaves don't get simplified to\n unisons. But for semitonal intervals, an interval of an octave\n (12 semitones) would does get simplified to a unison (0 semitones).\n '''\n kind = kind[0].lower()\n kind = {'s': 'c'}.get(kind, kind)\n _kind = {'z': 'd'}.get(kind, kind)\n settings = (_kind, directed, compound)\n key = ('HarmonicIntervals', kind, directed, compound)\n if key not in self.analyses:\n df = self._getM21HarmonicIntervals()\n df = df.applymap(self._intervalMethods[settings])\n if kind == 'z':\n df = df.applymap(ImportedPiece._zeroIndexIntervals, na_action='ignore')\n self.analyses[key] = df\n return self.analyses[key]\n\n def _ngrams_offsets_helper(col, n, offsets):\n \"\"\"\n Generate a list of series that align the notes from one ngrams according\n to the first or the last note's offset.\n :param pandas.Series col: A column that originally contains\n notes and rests.\n :param int n: The size of the ngram.\n :param str offsets: We could input 'first' if we want to group\n the ngrams by their first note's offset, or 'last' if we\n want to group the ngram by the last note's offset.\n :return pandas.Series: a list of shifted series that could be grouped by\n first or the last note's offset.\n \"\"\"\n if offsets == 'first':\n chunks = [col.shift(-i) for i in range(n)]\n else: # offsets == 'last':\n chunks = [col.shift(i) for i in range(n - 1, -1, -1)]\n return chunks\n\n def _ngramHelper(col, n, exclude, offsets):\n col.dropna(inplace=True)\n if n == -1:\n # get the starting and ending elements of ngrams\n starts = col[(col != 'Rest') & (col.shift(1).isin(('Rest', np.nan)))]\n ends = col[(col != 'Rest') & (col.shift(-1).isin(('Rest', np.nan)))]\n si = tuple(col.index.get_loc(i) for i in starts.index)\n ei = tuple(col.index.get_loc(i) + 1 for i in ends.index)\n ind = starts.index if offsets == 'first' else ends.index\n vals = [', '.join(col.iloc[si[i] : ei[i]]) for i in range(len(si))]\n ser = pd.Series(vals, name=col.name, index=ind)\n return ser\n\n chunks = ImportedPiece._ngrams_offsets_helper(col, n, offsets)\n chains = pd.concat(chunks, axis=1)\n for excl in exclude:\n chains = chains[(chains != excl).all(1)]\n chains.dropna(inplace=True)\n chains = chains.apply(lambda row: ', '.join(row), axis=1)\n return chains\n \n def getNgrams(self, df=None, n=3, how='columnwise', other=None, held='Held',\n exclude=['Rest'], interval_settings=('d', True, True), unit=0,\n offsets='first'):\n '''\n Group sequences of observations in a sliding window \"n\" events long\n (default n=3). If the `exclude` parameter is passed and any item in that\n list is found in an ngram, that ngram will be removed from the resulting\n DataFrame. Since `exclude` defaults to `['Rest']`, pass an empty list if\n you want to allow rests in your ngrams.\n\n There are two primary modes for the `how` parameter. When set to\n \"columnwise\" (default), this is the simple case where the events in each\n column of the `df` DataFrame has its events grouped at the offset of the\n first event in the window. For example, to get 4-grams of melodic\n intervals:\n\n ip = ImportedPiece('path_to_piece')\n ngrams = ip.getNgrams(df=ip.getMelodic(), n=4)\n\n If `how` is set to 'modules' this will return contrapuntal modules. In\n this case, if the `df` or `other` parameters are left as None, they will\n be replaced with the current piece's harmonic and melodic intervals\n respectfully. These intervals will be formed according to the\n interval_settings argument, which gets passed to the getMelodic and\n getHarmonic methods (see those methods for an explanation of those\n settings). This makes it easy to make contrapuntal-module ngrams, e.g.:\n\n ip = ImportedPiece('path_to_piece')\n ngrams = ip.getNgrams(how='modules')\n\n There is a special case for \"open-ended\" module ngrams. Set n=1 and the \n module ngrams will show the vertical interval between two voices, \n followed by the connecting melodic interal in the lower voice, but not \n the next harmonic interval. Open-ended module ngrams can be useful if \n you want to see how long the imitation in two voice parts goes on for.\n\n Another special case is when `n` is set to -1. This finds the longest \n ngrams at all time points excluding subset ngrams. The returned \n dataframe will have ngrams of length varying between 1 and the longest \n ngram in the piece.\n\n The `offset` setting can have two modes. If \"first\" is selected (default option),\n the returned ngrams will be grouped according to their first notes' offsets,\n while if \"last\" is selected, the returned ngrams will be grouped according\n to the last notes' offsets.\n\n If you want want \"module\" ngrams taken at a regular durational interval,\n you can omit passing `df` and `other` dataframes and instead pass the\n desired `interval_settings` and an integer or float for the `unit`\n parameter. See the `.regularize` documentation for how to use this\n parameter. Here's an example that will generate contrapuntal-module\n ngrams at regular minim (half-note) intervals.\n\n ip = ImportedPiece('path_to_piece')\n ngrams = ip.getNgrams(how='modules', unit=2)\n\n Otherwise, you can give specific `df` and/or `other` DataFrames in which\n case the `interval_settings` parameter will be ignored. Also, you can\n use the `held` parameter to be used for when the lower voice sustains a\n note while the upper voice moves. This defaults to 'Held' to distinguish\n between held notes and reiterated notes in the lower voice, but if this\n distinction is not wanted for your query, you may want to pass way a\n unison gets labeled in your `other` DataFrame (e.g. \"P1\" or \"1\").\n '''\n if how == 'columnwise':\n return df.apply(ImportedPiece._ngramHelper, args=(n, exclude, offsets))\n if df is None:\n df = self.getHarmonic(*interval_settings)\n if unit:\n df = self.regularize(df, unit)\n if other is None:\n other = self.getMelodic(*interval_settings, unit=unit)\n cols = []\n for pair in df.columns:\n lowerVoice = pair.split('_')[0]\n combo = pd.concat([other[lowerVoice], df[pair]], axis=1)\n combo.fillna({lowerVoice: held}, inplace=True)\n combo.insert(loc=1, column='Joiner', value=', ')\n combo['_'] = '_'\n if n == -1:\n har = df[pair]\n starts = har[(har != 'Rest') & (har.shift(1).isin(('Rest', np.nan)))]\n ends = har[(har != 'Rest') & (har.shift(-1).isin(('Rest', np.nan)))]\n starts.dropna(inplace=True)\n ends.dropna(inplace=True)\n si = tuple(har.index.get_loc(i) for i in starts.index)\n ei = tuple(har.index.get_loc(i) + 1 for i in ends.index)\n col = [''.join([cell\n for row in combo.iloc[si[i] : ei[i]].values # second loop\n for cell in row][2:-1]) # innermost loop\n for i in range(len(si))] # outermost loop\n col = pd.Series(col)\n if offsets == 'first':\n col.index = starts.index\n else:\n col.index = ends.index\n else: # n >= 1\n lastIndex = -1\n if n == 1:\n lastIndex = -3\n n = 2\n combo = ImportedPiece._ngrams_offsets_helper(combo, n, offsets)\n combo = pd.concat(combo, axis=1)\n col = combo.iloc[:, 2:lastIndex].dropna().apply(lambda row: ''.join(row), axis=1)\n if exclude:\n mask = col.apply(lambda cell: all([excl not in cell for excl in exclude]))\n col = col[mask]\n col.name = pair\n cols.append(col)\n # in case piece has no harmony and cols stays empty\n if cols:\n return pd.concat(cols, axis=1)\n else:\n return pd.DataFrame()\n\n\n# For mass file uploads, only compatible for whole piece analysis, more specific tuning to come\nclass CorpusBase:\n # Need to consider whether users can input certain scores (which means needing urls selected too), or just to do all in the corpus automatically\n \"\"\"\n A class for importing multiple scores at once\n\n Attributes\n ----------\n paths : list\n list of file paths and urls of scores to be imported\n file paths MUST begin with a '/', otherwise they will be categoried as urls\n scores : list of music21.Score\n list of music21.Score objects- imported from urls and paths\n note_list : list of NoteListElement\n list of notes constructed from scores\n note_list_no_unisons : list of NoteListElement\n list of notes constructed from scores, combining unisons\n \"\"\"\n def __init__(self, paths:list):\n \"\"\"\n Parameters\n ----------\n paths : list\n list file paths/urls to mei files\n file paths MUST begin with a '/', otherwise they will be categoried as urls\n\n Raises\n ----------\n Exception\n If at least one score isn't succesfully imported, raises error\n \"\"\"\n self.paths = paths\n self.scores = [] # store lists of ImportedPieces generated from the path above\n mei_conv = converter.subConverters.ConverterMEI()\n for path in paths:\n if path in pathDict:\n # if the path has already been \"memorized\"\n pathScore = ImportedPiece(pathDict[path])\n self.scores.append(pathDict[path])\n print(\"Memoized piece detected...\")\n continue\n elif not path.startswith('http'):\n print(\"Requesting file from \" + str(path) + \"...\")\n try:\n score = mei_conv.parseFile(path)\n pathDict[path] = ImportedPiece(score)\n self.scores.append(pathDict[path])\n print(\"Successfully imported.\")\n except:\n print(\"Import of \" + str(path) + \" failed, please check your file path/file type. Continuing to next file...\")\n else:\n try:\n # self.scores.append(m21.converter.parse(requests.get(path).text))\n score = m21.converter.parse(httpx.get(path).text)\n pathDict[path] = ImportedPiece(score)\n self.scores.append(pathDict[path])\n print(\"Successfully imported.\")\n except:\n print(\"Import from \" + str(path) + \" failed, please check your url. File paths must begin with a '/'. Continuing to next file...\")\n\n if len(self.scores) == 0:\n raise Exception(\"At least one score must be succesfully imported\")\n\n self.note_list = self.note_list_whole_piece()\n self.no_unisons = self.note_list_no_unisons()\n\n def note_list_whole_piece(self):\n \"\"\" Creates a note list from the whole piece for all scores- default note_list\n \"\"\"\n pure_notes = []\n urls_index = 0\n prev_note = None\n\n for imported in self.scores:\n # if statement to check if analyses already done else do it\n score = imported.score\n if imported.analyses['note_list']:\n pure_notes += imported.analyses['note_list']\n urls_index += 1\n continue\n parts = score.getElementsByClass(stream.Part)\n score_notes = []\n for part in parts:\n noteList = part.flat.getElementsByClass(['Note', 'Rest'])\n for note in noteList:\n if note.tie is not None:\n if note.tie.type == 'start':\n note_obj = NoteListElement(note, score.metadata, part.partName, score.index(part), note.quarterLength, self.paths[urls_index], prev_note)\n score_notes.append(note_obj)\n else:\n score_notes[-1].duration += note.quarterLength\n else:\n note_obj = NoteListElement(note, score.metadata, part.partName, score.index(part), note.quarterLength, self.paths[urls_index], prev_note)\n score_notes.append(note_obj)\n # Rests carry the last non-rest note as their prev_note\n if not score_notes[-1].note.isRest:\n prev_note = score_notes[-1]\n note_obj = NoteListElement(m21.note.Rest(), score.metadata, part.partName, score.index(part), 4.0, self.paths[urls_index], prev_note)\n score_notes.append(note_obj)\n urls_index += 1\n # add to dictionary\n imported.analyses['note_list'] = score_notes\n pure_notes += score_notes\n return pure_notes\n\n def note_list_no_unisons(self):\n \"\"\" Creates a note list from the whole piece for all scores combining unisons\n\n Combines consecutive notes at the same pitch into one note, adding in the duration\n of the next note into the previous one.\n \"\"\"\n pure_notes = []\n urls_index = 0\n prev_note = None\n for imported in self.scores:\n score = imported.score\n parts = score.getElementsByClass(stream.Part)\n for part in parts:\n noteList = part.flat.getElementsByClass(['Note', 'Rest'])\n prev_pitch = None\n for note in noteList:\n if not note.isRest and note.nameWithOctave == prev_pitch:\n pure_notes[len(pure_notes)-1].duration += note.quarterLength\n else:\n note_obj = NoteListElement(note, score.metadata, part.partName, score.index(part), note.quarterLength, self.paths[urls_index], prev_note)\n pure_notes.append(note_obj)\n if not note.isRest:\n prev_pitch = note.nameWithOctave\n else:\n prev_pitch == 'Rest'\n if not pure_notes[-1].note.isRest:\n prev_note = pure_notes[-1]\n note_obj = NoteListElement(m21.note.Rest(), score.metadata, part.partName, score.index(part), 4.0, self.paths[urls_index], prev_note)\n pure_notes.append(note_obj)\n urls_index += 1\n return pure_notes\n\n def note_list_selected_offset(self, offsets: list):\n \"\"\"\n Creates a note list from the whole piece for all scores, going by provided offsets\n\n Parameters\n ----------\n offsets : list\n offsets within measures to collect notes at (notes collected will be those that are sounding at that offset- not just starting)\n \"\"\"\n pure_notes = []\n urls_index = 0\n prev_note = None\n for imported in self.scores:\n score = imported.score\n parts = score.getElementsByClass(stream.Part)\n for part in parts:\n measures = part.getElementsByClass(stream.Measure)\n for measure in measures:\n voices = measure.getElementsByClass(stream.Voice)\n for voice in voices:\n for note in voice:\n for point in offsets:\n #print(note.offset, point)\n if point >= note.offset and point < (note.offset + note.quarterLength):\n note_obj = NoteListElement(note, score.metadata, part.partName, score.index(part), note.quarterLength, self.paths[urls_index], prev_note)\n pure_notes.append(note_obj)\n if not pure_notes[-1].note.isRest:\n prev_note = pure_notes[-1]\n break\n urls_index += 1\n return pure_notes\n\n def note_list_incremental_offset(self, min_offset):\n \"\"\"\n Creates a note list from the whole piece for all scores, sampling at a regular interval- not within a measure\n\n Parameters\n ----------\n min_offset : int\n sample every x offset- 2 will sample every half note, 1 every quarter note, etc.\n \"\"\"\n pure_notes = []\n urls_index = 0\n prev_note = None\n for imported in self.scores:\n score = imported.score\n for part in score.getElementsByClass(stream.Part):\n counter = 0\n while counter < score.highestTime - min_offset:\n stuff_at_offset = part.flat.getElementsByOffset(counter, mustBeginInSpan=False, mustFinishInSpan=False, includeEndBoundary=True, includeElementsThatEndAtStart=False)\n note_at_offset = None\n for item in stuff_at_offset:\n if type(item) == m21.note.Note or type(item) == m21.note.Rest:\n note_at_offset = item\n break\n if note_at_offset:\n note_obj = NoteListElement(note_at_offset, score.metadata, part.partName, score.index(part), min_offset, self.paths[urls_index], prev_note)\n note_obj.offset = counter\n pure_notes.append(note_obj)\n else:\n note_obj = NoteListElement(m21.note.Rest(), score.metadata, part.partName, score.index(part), min_offset, self.paths[urls_index], prev_note)\n note_obj.offset = counter\n pure_notes.append(note_obj)\n counter += min_offset\n if not pure_notes[-1].note.isRest:\n prev_note = pure_notes[-1]\n note_obj = NoteListElement(m21.note.Rest(), score.metadata, part.partName, score.index(part), 4.0, self.paths[urls_index], prev_note)\n urls_index += 1\n return pure_notes\n\n\n def vis_pandas_setup(self, min_offset):\n urls_index = 0\n prev_note = None\n dataframes = []\n for imported in self.scores:\n score = imported.score\n part_rows = []\n pure_notes = []\n row_names = []\n for part in score.getElementsByClass(stream.Part):\n counter = 0\n row_names.append(part.partName)\n while counter < score.highestTime - min_offset:\n stuff_at_offset = part.flat.getElementsByOffset(counter, mustBeginInSpan=False, mustFinishInSpan=False, includeEndBoundary=True, includeElementsThatEndAtStart=False)\n note_at_offset = None\n for item in stuff_at_offset:\n if type(item) == m21.note.Note or type(item) == m21.note.Rest:\n note_at_offset = item\n break\n if note_at_offset:\n note_obj = NoteListElement(note_at_offset, score.metadata, part.partName, score.index(part), min_offset, self.paths[urls_index], prev_note)\n note_obj.offset = counter\n pure_notes.append(note_obj)\n else:\n note_obj = NoteListElement(m21.note.Rest(), score.metadata, part.partName, score.index(part), min_offset, self.paths[urls_index], prev_note)\n note_obj.offset = counter\n pure_notes.append(note_obj)\n counter += min_offset\n if not pure_notes[-1].note.isRest:\n prev_note = pure_notes[-1]\n part_rows.append(pure_notes)\n pure_notes = []\n\n column_names = []\n i = 0\n while i < score.highestTime - min_offset:\n column_names.append(i)\n i += min_offset\n\n df = pd.DataFrame(part_rows, index = row_names, columns = column_names)\n dataframes.append(df)\n return dataframes\n\n# For single file uploads\nclass ScoreBase:\n \"\"\"\n A class for importing a single score- offers more precise construction options\n\n Attributes\n ----------\n url : str\n url or path of mei file\n score : music21.Score\n music21.Score object gathered from mei file import\n note_list : list of NoteListElement\n list of notes constructed from score\n \"\"\"\n def __init__(self, url):\n \"\"\"\n Parameters\n ----------\n url:\n url or path of mei file\n Raises\n ----------\n Exception\n If score isn't succesfully imported, raises error\n \"\"\"\n self.url = url\n print(\"Requesting file from \" + str(self.url) + \"...\")\n # Detect if local file of url based on leading /\n if url in pathDict:\n pathScore = ImportedPiece(pathDict[url])\n self.score = pathDict[url].analyses['scores']\n print(\"Memoized piece detected...\")\n else:\n if url[0] == '/':\n try:\n self.score = converter.subConverters.ConverterMEI().parseFile(url)\n print(\"Successfully imported.\")\n except:\n raise Exception(\"Import from \" + str(self.url) + \" failed, please check your ath/file type\")\n else:\n try:\n # self.score = m21.converter.parse(requests.get(self.url).text)\n self.score = m21.converter.parse(httpx.get(self.url).text)\n print(\"Successfully imported.\")\n except:\n raise Exception(\"Import from \" + str(self.url) + \" failed, please check your url/file type\")\n self.note_list = self.note_list_whole_piece()\n\n def note_list_whole_piece(self):\n \"\"\" Creates a note list from the whole piece- default note_list\n \"\"\"\n pure_notes = []\n parts = self.score.getElementsByClass(stream.Part)\n prev_note = None\n for part in parts:\n noteList = part.flat.getElementsByClass(['Note', 'Rest'])\n for note in noteList:\n if note.tie is not None:\n if note.tie.type == 'start':\n note_obj = NoteListElement(note, self.score.metadata, part.partName, self.score.index(part), note.quarterLength, self.url, prev_note)\n pure_notes.append(note_obj)\n else:\n pure_notes[len(pure_notes)-1].duration += note.quarterLength\n else:\n note_obj = NoteListElement(note, self.score.metadata, part.partName, self.score.index(part), note.quarterLength, self.url, prev_note)\n pure_notes.append(note_obj)\n if not pure_notes[-1].note.isRest:\n prev_note = pure_notes[-1]\n note_obj = NoteListElement(m21.note.Rest(), self.score.metadata, part.partName, self.score.index(part), 4.0, self.url, prev_note)\n pure_notes.append(note_obj)\n return pure_notes\n\n # Combines unison intervals into one note- generally for increased pattern finding\n def note_list_no_unisons(self):\n \"\"\" Creates a note list from the whole piece for all scores combining unisons\n\n Combines consecutive notes at the same pitch into one note, adding in the duration\n of the next note into the previous one.\n \"\"\"\n pure_notes = []\n urls_index = 0\n prev_note = None\n parts = self.score.getElementsByClass(stream.Part)\n for part in parts:\n noteList = part.flat.getElementsByClass(['Note', 'Rest'])\n prev_pitch = None\n for note in noteList:\n if not note.isRest and note.nameWithOctave == prev_pitch:\n pure_notes[len(pure_notes)-1].duration += note.quarterLength\n else:\n note_obj = NoteListElement(note, self.score.metadata, part.partName, self.score.index(part), note.quarterLength, self.url, prev_note)\n pure_notes.append(note_obj)\n if not note.isRest:\n prev_pitch = note.nameWithOctave\n else:\n prev_pitch == 'Rest'\n if not pure_notes[-1].note.isRest:\n prev_note = pure_notes[-1]\n note_obj = NoteListElement(m21.note.Rest(), self.score.metadata, part.partName, self.score.index(part), 4.0, self.url, prev_note)\n pure_notes.append(note_obj)\n urls_index += 1\n return pure_notes\n\n # Gets only notes that start on the specified beats- allows for user specification in case of weird time signatures\n def note_list_selected_beats(self, beats: list):\n \"\"\"\n Creates a note list from the whole piece, going by provided beats\n\n Parameters\n ----------\n beats : list\n collects all notes which begin on specified beat\n \"\"\"\n pure_notes = []\n parts = self.score.getElementsByClass(stream.Part)\n prev_note = None\n for part in parts:\n noteList = part.flat.getElementsByClass(['Note', 'Rest'])\n for note in noteList:\n if note.beat in beats:\n note_obj = NoteListElement(note, self.score.metadata, part.partName, self.score.index(part), note.quarterLength, self.url, prev_note)\n pure_notes.append(note_obj)\n if not pure_notes[-1].note.isRest:\n prev_note = pure_notes[-1]\n note_obj = NoteListElement(m21.note.Rest(), self.score.metadata, part.partName, self.score.index(part), 4.0, self.url, prev_note)\n pure_notes.append(note_obj)\n return pure_notes\n\n def note_list_by_offset(self, offsets:list):\n \"\"\"\n Creates a note list from the whole piece, going by provided offsets\n\n Parameters\n ----------\n offsets : list\n offsets within measures to collect notes at (notes collected will be those that are sounding at that offset- not just starting)\n \"\"\"\n pure_notes = []\n part_number = 0\n prev_note = None\n parts = self.score.getElementsByClass(stream.Part)\n for part in parts:\n part_number += 1\n measures = part.getElementsByClass(stream.Measure)\n for measure in measures:\n voices = measure.getElementsByClass(stream.Voice)\n for voice in voices:\n for note in voice:\n for point in offsets:\n if point >= note.offset and point < (note.offset + note.quarterLength):\n note_obj = NoteListElement(note, self.score.metadata, part.partName, part_number, note.quarterLength, self.url, prev_note)\n pure_notes.append(note_obj)\n if not pure_notes[-1].note.isRest:\n prev_note = pure_notes[-1]\n break\n return pure_notes\n\n # Allows for very specific note selection\n def note_list_single_part(self, part, measure_start, num_measures):\n \"\"\"\n Creates a note list from a selected measure range within a single voice\n\n Parameters\n ----------\n part : int\n part number\n measure_start : int\n starting measure\n num_measures : int\n measures until end measure\n \"\"\"\n pure_notes = []\n part_selected = self.score.getElementsByClass(stream.Part)[part]\n measures = part_selected.getElementsByClass(stream.Measure)\n measures_selected = []\n prev_note = None\n for i in range(num_measures):\n measures_selected.append(measures[i+measure_start])\n for measure in measures_selected:\n voices = measure.getElementsByClass(stream.Voice)\n for voice in voices:\n for note in voice:\n print(note.offset)\n if note.tie is not None:\n if note.tie.type == 'start':\n note_obj = NoteListElement(note, self.score.metadata, part_selected.partName, part, note.quarterLength, self.url, prev_note)\n pure_notes.append(note_obj)\n else:\n pure_notes[len(pure_notes)-1].duration += note.quarterLength\n else:\n note_obj = NoteListElement(note, self.score.metadata, part_selected.partName, part, note.quarterLength, self.url, prev_note)\n pure_notes.append(note_obj)\n if not pure_notes[-1].note.isRest:\n prev_note = pure_notes[-1]\n return pure_notes\n\n # Allows for specific selection in terms of measures, but gets all parts/instruments\n def note_list_all_parts(self, measure_start, num_measures):\n \"\"\"\n Creates a note list from a selected measure range over all voices\n\n Parameters\n ----------\n measure_start : int\n starting measure\n num_measures : int\n measures until end measure\n \"\"\"\n pure_notes = []\n prev_note = None\n parts = self.score.getElementsByClass(stream.Part)\n for part in parts:\n measures = part.getElementsByClass(stream.Measure)\n measures_selected = []\n for i in range(num_measures):\n measures_selected.append(measures[i+measure_start])\n for measure in measures_selected:\n voices = measure.getElementsByClass(stream.Voice)\n for voice in voices:\n for note in voice:\n if note.tie is not None:\n if note.tie.type == 'start':\n note_obj = NoteListElement(note, self.score.metadata, part.partName, self.score.index(part), note.quarterLength, self.url, prev_note)\n pure_notes.append(note_obj)\n else:\n pure_notes[len(pure_notes)-1].duration += note.quarterLength\n else:\n note_obj = NoteListElement(note, self.score.metadata, part.partName, self.score.index(part), note.quarterLength, self.url, prev_note)\n pure_notes.append(note_obj)\n if not pure_notes[-1].note.isRest:\n prev_note = pure_notes[-1]\n # Added rest to ensure parts don't overlap\n note_obj = NoteListElement(m21.note.Rest(), self.score.metadata, part.partName, self.score.index(part), 4.0, self.url, prev_note)\n pure_notes.append(note_obj)\n return pure_notes\n\n def note_list_incremental_offset(self, min_offset):\n \"\"\"\n Creates a note list from the whole piece, sampling at a regular interval- not within a measure\n\n Parameters\n ----------\n min_offset : int\n sample every x offset- 2 will sample every half note, 1 every quarter note, etc.\n \"\"\"\n pure_notes = []\n prev_note = None\n for part in self.score.getElementsByClass(stream.Part):\n counter = 0\n while counter < self.score.highestTime - min_offset:\n stuff_at_offset = part.flat.getElementsByOffset(counter, mustBeginInSpan=False, mustFinishInSpan=False, includeEndBoundary=True, includeElementsThatEndAtStart=False)\n note_at_offset = None\n for item in stuff_at_offset:\n if type(item) == m21.note.Note or type(item) == m21.note.Rest:\n note_at_offset = item\n break\n if note_at_offset:\n note_obj = NoteListElement(note_at_offset, self.score.metadata, part.partName, self.score.index(part), min_offset, self.url, prev_note)\n note_obj.offset = counter\n pure_notes.append(note_obj)\n else:\n note_obj = NoteListElement(m21.note.Rest(), self.score.metadata, part.partName, self.score.index(part), min_offset, self.url, prev_note)\n note_obj.offset = counter\n pure_notes.append(note_obj)\n if not pure_notes[-1].note.isRest:\n prev_note = pure_notes[-1]\n counter += min_offset\n note_obj = NoteListElement(m21.note.Rest(), self.score.metadata, part.partName, self.score.index(part), 4.0, self.url, prev_note)\n return pure_notes\n\n\n def vis_pandas_setup(self, min_offset):\n part_rows = []\n prev_note = None\n pure_notes = []\n row_names = []\n for part in self.score.getElementsByClass(stream.Part):\n counter = 0\n row_names.append(part.partName)\n while counter < self.score.highestTime - min_offset:\n stuff_at_offset = part.flat.getElementsByOffset(counter, mustBeginInSpan=False, mustFinishInSpan=False, includeEndBoundary=True, includeElementsThatEndAtStart=False)\n note_at_offset = None\n for item in stuff_at_offset:\n if type(item) == m21.note.Note or type(item) == m21.note.Rest:\n note_at_offset = item\n break\n if note_at_offset:\n note_obj = NoteListElement(note_at_offset, self.score.metadata, part.partName, self.score.index(part), min_offset, self.url, prev_note)\n note_obj.offset = counter\n pure_notes.append(note_obj)\n else:\n note_obj = NoteListElement(m21.note.Rest(), self.score.metadata, part.partName, self.score.index(part), min_offset, self.url, prev_note)\n note_obj.offset = counter\n pure_notes.append(note_obj)\n counter += min_offset\n if not pure_notes[-1].note.isRest:\n prev_note = pure_notes[-1]\n part_rows.append(pure_notes)\n pure_notes = []\n\n column_names = []\n i = 0\n while i < self.score.highestTime - min_offset:\n column_names.append(i)\n i += min_offset\n\n df = pd.DataFrame(part_rows, index = row_names, columns = column_names)\n return df\n\nclass VectorInterval:\n \"\"\"\n An individual vector with information about the notes creating it\n\n Attributes\n ----------\n vector : int or str\n vector- in generic or semitones: is \"Rest\" if done between a note and a rest\n note1 : NoteListElement\n first note of interval pair\n note2 : NoteListElement\n list of notes constructed from score\n \"\"\"\n def __init__(self, vector, note1: NoteListElement, note2: NoteListElement):\n self.vector = vector\n self.note1 = note1\n self.note2 = note2\n\n def __str__(self):\n if self.note1.note.isRest or self.note2.note.isRest:\n return \"<VectorInterval: Rest, First Note: {}, Second Note: {}>\".format(self.vector, self.note1.note, self.note2.note)\n else:\n return \"<VectorInterval: {}, First Note: {}, Second Note: {}>\".format(self.vector, self.note1.note.nameWithOctave, self.note2.note.nameWithOctave)\n\n# Allows for selected \"vectorizations\" given a note list created from either ScoreBase or CorpusBase\n# Consider making this a Standalone method- an object seems slightly redundant/hard to justify\nclass IntervalBase:\n \"\"\"\n A list of VectorInterval objects created from a note list\n\n Attributes\n ----------\n notes : list\n note list gathered from either CorpusBase or ScoreBase's methods/attributes\n generic_intervals : list\n creates list of VectorInterval objects in terms of generic intervals\n semitone_intervals : list\n creates list of VectorInterval objects in terms of semitone intervals\n \"\"\"\n def __init__(self, notes):\n \"\"\"\n Parameters\n ----------\n notes:\n note list gathered from either CorpusBase or ScoreBase's methods/attributes\n \"\"\"\n self.notes = notes\n self.generic_intervals = self.vectorize_generic(self.notes)\n self.semitone_intervals = self.vectorize_semitone(self.notes)\n\n # Construct intervals in terms of semitone distances between notes\n def vectorize_semitone(self, notes):\n \"\"\"Creates list of VectorInterval objects in terms of semitone intervals\n\n Parameters\n ----------\n notes:\n (frequently self.notes): note list gathered from either CorpusBase or ScoreBase's methods/attributes\n \"\"\"\n vec = []\n for i in range(len(notes)-1):\n if notes[i].note.isRest or notes[i+1].note.isRest:\n interval_obj = VectorInterval(\"Rest\", notes[i], notes[i+1])\n vec.append(interval_obj)\n else:\n interval_semitones = interval.Interval(notes[i].note, notes[i+1].note).semitones\n interval_obj = VectorInterval(interval_semitones, notes[i], notes[i+1])\n vec.append(interval_obj)\n return vec\n\n # Construct intervals in terms of generic distance between notes\n def vectorize_generic(self, notes):\n \"\"\"Creates list of VectorInterval objects in terms of generic intervals\n\n Parameters\n ----------\n notes:\n (frequently self.notes): note list gathered from either CorpusBase or ScoreBase's methods/attributes\n \"\"\"\n vec = []\n for i in range(len(notes)-1):\n if notes[i].note.isRest or notes[i+1].note.isRest:\n interval_obj = VectorInterval(\"Rest\", notes[i], notes[i+1])\n vec.append(interval_obj)\n else:\n interval_semitones = interval.Interval(notes[i].note, notes[i+1].note).semitones\n interval_obj = VectorInterval(interval.convertSemitoneToSpecifierGeneric(interval_semitones)[1], notes[i], notes[i+1])\n vec.append(interval_obj)\n return vec\n\n# An individual match event- can be used for close matches as well\nclass Match:\n \"\"\"\n A pattern that has been deemed part of a match\n\n Attributes\n ----------\n pattern : list\n list of vectors in pattern\n first_note : NoteListElement\n first note in the soggetti creating the vector pattern\n last_note : NoteListElement\n last note in the soggetti creating the vector pattern\n durations : list\n list of durations of notes in soggetti creating the vector pattern\n ema : str\n standalone ema snippet for the pattern\n ema_url : str\n url to get mei for the pattern\n \"\"\"\n def __init__(self, pattern, first_note: NoteListElement, last_note: NoteListElement, durations):\n self.pattern = pattern\n self.first_note = first_note\n self.last_note = last_note\n # Construct an ema address for the entire pattern to pass on\n ema = str(self.first_note.note.measureNumber) + \"-\" + str(self.last_note.note.measureNumber) + \"/\" + str(self.first_note.partNumber) + \"/\"\n ema += (\"@\" + str(self.first_note.note.beat) + \"-end\")\n for i in range(self.last_note.note.measureNumber - self.first_note.note.measureNumber - 1):\n ema += \",@start-end\"\n ema += (\",@start-\" + str(self.last_note.note.beat))\n self.ema = ema\n try:\n splice = self.first_note.piece_url.index('mei/')\n self.ema_url = \"https://ema.crimproject.org/https%3A%2F%2Fcrimproject.org%2Fmei%2F\" + str(self.first_note.piece_url[splice + 4:]) + \"/\" + str(self.ema)\n except:\n self.ema_url = \"File must be a crim url to have a valid EMA url\"\n self.durations = durations\n\n# Object representing all the occurences of a pattern in a list of notes/vectors\n# User generally doesn't create this- it is done in the finding matches methods\nclass PatternMatches:\n \"\"\"\n A group of Match objects generated from a pattern\n\n Attributes\n ----------\n pattern : list\n pattern generating matches\n matches : list\n list of Match objects found to be matching the pattern\n \"\"\"\n def __init__(self, pattern, matches:list):\n self.pattern = pattern\n self.matches = matches\n\n def print_exact_matches(self):\n \"\"\"A facilitated way to display all the matches gathered by a find_exact_matches search\n \"\"\"\n print(\"Melodic interval/pattern \" + str(self.pattern) + \" occurs \" + str(len(self.matches)) + \" times:\")\n for match in self.matches:\n print(\"In \" + str(match.first_note.metadata.title) + \" part \" + str(match.first_note.part) + \" beginning in measure \" + str(match.first_note.note.measureNumber) +\\\n \" and ending in measure \" + str(match.last_note.note.measureNumber) + \". Notes lengths: \" + str(match.durations))\n print(\"\\n\")\n\n def print_close_matches(self):\n \"\"\"A facilitated way to display all the matches gathered by a find_close_matches search\n \"\"\"\n print(\"Occurences of \" + str(self.pattern) + \" or similar:\")\n for match in self.matches:\n print(\"Pattern \" + str(match.pattern) + \" appears in \" + str(match.first_note.metadata.title) + \" part \" + str(match.first_note.part) + \" beginning in measure \" + str(match.first_note.note.measureNumber) +\\\n \" and ending in measure \" + str(match.last_note.note.measureNumber) + \". Notes lengths: \" + str(match.durations))\n print(\"Said pattern or similar appeared \" + str(len(self.matches)) + \" times.\\n\")\n\nclass ClassifiedMatch:\n \"\"\"\n Group of matches classified to be a periodic entry, imitative duo, or fuga\n\n Attributes\n ----------\n matches : list\n list of Match objects found to be matching the pattern\n type : str\n either \"periodic entry\", \"imitative duo\", or \"fuga\" depending on match classification\n pattern : list\n interval pattern that the matches have in common\n ema : str\n ema address for the series of patterns\n ema_url : str\n url to download mei slice for the series of patterns\n \"\"\"\n def __init__(self, matches: list, type):\n \"\"\"\n Parameters\n ----------\n matches : list\n list of Match objects found to be matching the pattern\n type : str\n either \"periodic entry\", \"imitative duo\", or \"fuga\" depending on match classification\n \"\"\"\n self.matches = matches\n self.type = type\n self.pattern = self.matches[0].pattern\n\n ema_measures = \"\"\n ema_parts = \"\"\n ema_beats = \"\"\n for match in self.matches:\n ema_measures += str(match.first_note.note.measureNumber) + \"-\" + str(match.last_note.note.measureNumber) + \",\"\n for i in range(match.last_note.note.measureNumber - match.first_note.note.measureNumber + 1):\n ema_parts += str(match.first_note.partNumber) + \",\"\n ema_beats += \"@\" + str(match.first_note.note.beat) + \"-end,\"\n for j in range(match.last_note.note.measureNumber - match.first_note.note.measureNumber - 1):\n ema_beats += \"@start-end,\"\n ema_beats += \"@start-\" + str(match.last_note.note.beat) + \",\"\n self.ema = ema_measures[0:len(ema_measures)-1] + \"/\" + ema_parts[0:len(ema_parts)-1] + \"/\" + ema_beats[0:len(ema_beats)-1]\n\n try:\n splice = self.matches[0].first_note.piece_url.index('mei/')\n self.ema_url = \"https://ema.crimproject.org/https%3A%2F%2Fcrimproject.org%2Fmei%2F\" + str(self.matches[0].first_note.piece_url[splice + 4:]) + \"/\" + str(self.ema)\n except:\n self.ema_url = \"File must be a crim url (not a file path) to have a valid EMA url\"\n"
},
{
"alpha_fraction": 0.4481605291366577,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 13.285714149475098,
"blob_id": "b9e45c6699784b58ba31a80edd9ce9f486644711",
"content_id": "5c7baaf87992eaffdc78aedba0c230a29903456f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 299,
"license_type": "permissive",
"max_line_length": 22,
"num_lines": 21,
"path": "/binder/requirements.txt",
"repo_name": "RichardFreedman/CRIM_Intervals_Notebooks",
"src_encoding": "UTF-8",
"text": "pyvis\ncertifi==2020.12.5\nchardet==4.0.0\nh11==0.12.0\nhttpcore==0.13.2\nhttpx==0.18.1\nidna==3.1\njoblib==1.0.1\nlxml==4.6.3\nmore-itertools==8.7.0\nmusic21==6.7.1\nnumpy==1.20.2\npandas==1.2.4\npython-dateutil==2.8.1\npytz==2021.1\nrfc3986==1.4.0\nsix==1.15.0\nsniffio==1.2.0\nwebcolors==1.11.1\naltair\ntextdistance"
}
] | 4 |
mason-backup/yuuav_RealtimeDetection | https://github.com/mason-backup/yuuav_RealtimeDetection | e137e20de5dccc22d12e895c74ef7143a7e6596a | e14af13ee095f1070e72a45ec06aada211946edc | 68046cefdd71b608dee0e0e8d52ccb7523e1e19f | refs/heads/master | 2020-04-04T00:38:14.827906 | 2018-11-01T03:29:07 | 2018-11-01T03:29:07 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8086956739425659,
"alphanum_fraction": 0.8086956739425659,
"avg_line_length": 22.200000762939453,
"blob_id": "cf0426d5d6c1bb5439d300307b4c869fa694dcc9",
"content_id": "d3eb9d6010a48808e9b6c4e3b74d92f0dae61fce",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 117,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 5,
"path": "/Flask_video_stream/flask-video-streaming_update-pi/readme.txt",
"repo_name": "mason-backup/yuuav_RealtimeDetection",
"src_encoding": "UTF-8",
"text": "camera :virtual \n\ncamera_opencv: using opencv to drive camera\n\nCamera_pi: using raspberrypi drive to drive picamera"
},
{
"alpha_fraction": 0.6300989985466003,
"alphanum_fraction": 0.6374257206916809,
"avg_line_length": 40.04878234863281,
"blob_id": "103eafeb1bb8aed38a71bdd0aabb1913891049ba",
"content_id": "1082f0168cd3cb90077538a2eae9f6ef6ca5aa6c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5050,
"license_type": "permissive",
"max_line_length": 126,
"num_lines": 123,
"path": "/flask_add_google_api-20180621_reall-time/Google_API/object_detection/object_detection.py",
"repo_name": "mason-backup/yuuav_RealtimeDetection",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nimport numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\nfrom collections import defaultdict\nfrom io import StringIO\n#from matplotlib import pyplot as plt\nfrom PIL import Image\nimport cv2\n#sys.path.append(\"..\")\n\nfrom utils import ops as utils_ops\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\n\n\n\n'''import logging\nimport time,timeit\n\n\ndef ini_():\n TM=time.strftime(\"%Y-%m-%d %H-%M-%S\",time.localtime())\n LOG_FORMAT = \"%(levelname)s -[:%(lineno)d]- %(message)s\"\n #open(log_nm,'w').close()\n logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)\n logging.info('\\n**************************Mason(%s)*******************'%(TM,))'''\n \ndef load_model(PATH_TO_CKPT):\n global detection_graph\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n\ndef load_label(): \n # Loading label map \n PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')\n NUM_CLASSES = 90\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n return category_index\n \n\ndef load_image_into_numpy_array(image):\n print \"load_image_into_numpy_array\"\n (im_width, im_height,ch) = image.shape\n return np.array(image).astype(np.uint8)\n\ndef run_inference_for_single_image(image, graph):\n with graph.as_default():\n with tf.Session() as sess:\n # Get handles to input and output tensors\n ops = tf.get_default_graph().get_operations()\n all_tensor_names = {output.name for op in ops for output in op.outputs}\n tensor_dict = {}\n for key in [\n 'num_detections', 'detection_boxes', 'detection_scores',\n 'detection_classes', 'detection_masks']:\n tensor_name = key + ':0'\n if tensor_name in all_tensor_names:\n tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)\n if 'detection_masks' in tensor_dict:\n # The following processing is only for single image\n detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])\n detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])\n # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.\n real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)\n detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])\n detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])\n detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(\n detection_masks, detection_boxes, image.shape[0], image.shape[1])\n detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)\n # Follow the convention by adding back the batch dimension\n tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)\n image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')\n\n # Run inference\n output_dict = sess.run(tensor_dict,feed_dict={image_tensor: np.expand_dims(image, 0)})\n\n # all outputs are float32 numpy arrays, so convert types as appropriate\n output_dict['num_detections'] = int(output_dict['num_detections'][0])\n output_dict['detection_classes'] = output_dict[\n 'detection_classes'][0].astype(np.uint8)\n output_dict['detection_boxes'] = output_dict['detection_boxes'][0]\n output_dict['detection_scores'] = output_dict['detection_scores'][0]\n if 'detection_masks' in output_dict:\n output_dict['detection_masks'] = output_dict['detection_masks'][0]\n\n return output_dict\n\n\n\n\n#############\n#detecting\ndef image_detection(image):\n category_index=load_label()\n image_np = load_image_into_numpy_array(image)\n image_np_expanded = np.expand_dims(image_np, axis=0)\n # Actual detection.\n output_dict = run_inference_for_single_image(image_np, detection_graph)\n # Visualization of the results of a detection.\n vis_util.visualize_boxes_and_labels_on_image_array(image_np,\n output_dict['detection_boxes'],\n output_dict['detection_classes'],\n output_dict['detection_scores'],\n category_index,\n instance_masks=output_dict.get('detection_masks'),\n use_normalized_coordinates=True,\n line_thickness=8)\n\n return image_np\n\n"
},
{
"alpha_fraction": 0.7707948088645935,
"alphanum_fraction": 0.7744916677474976,
"avg_line_length": 23.590909957885742,
"blob_id": "8b0814dce8cb0ad4a127a5b94c4598a1bdb2a919",
"content_id": "ab977a9515c6a57eaa628047aa7cf436c9591bdd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 955,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 22,
"path": "/README.md",
"repo_name": "mason-backup/yuuav_RealtimeDetection",
"src_encoding": "UTF-8",
"text": "# README #\n\n将树莓派摄像头的实时画面抓取,对抓取的图片对象调用Google API 模块进行识别,将识别后的画面流化传输到本地网页显示。(由于树莓派只有逻辑和一个摄像头,没有显示屏)\n\n\n\n###文件说明###\n* Flask_video_stream/\nflask-video-streaming_virtual-camera:放置四海皆准的虚拟相机(轮流播放图片),网络源码。camera.py文件\nflask-video-streaming-update-pi:修改。开放树莓派摄像头(camera_pi.py文件)。如果是调用其他usb/PC摄像头,则是用camera_opencv.py文件\n* Flask_add_google_api.._reall-time/\n树莓派摄像头;并且调用Google API做识别。 重点是在camera_opencv.py文件 的处理:image 对象, 数据流化\n* 主程序文件\napp.py\n\n\n### How do I get set up? ###\n* 1将文件部署到树莓派\n* 2链接树莓派和摄像头\n* 启动服务\nCAMERA=pi/opencv python app.py\n* 局域网内的PC 浏览器打开url\n"
},
{
"alpha_fraction": 0.4812193810939789,
"alphanum_fraction": 0.49319541454315186,
"avg_line_length": 34.32692337036133,
"blob_id": "2065d95ff8ea9df41aefad0ad8f27fe8f1c947e4",
"content_id": "125c93173f305dd84e384446af26258d2ea7ac26",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1837,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 52,
"path": "/flask_add_google_api-20180621_reall-time/camera_opencv.py",
"repo_name": "mason-backup/yuuav_RealtimeDetection",
"src_encoding": "UTF-8",
"text": "import cv2\n#import tensorflow as tf\n#import os\nfrom base_camera import BaseCamera\n\nimport sys\nsys.path.append('./Google_API/object_detection/')\n\nfrom Google_API.object_detection import object_detection\n\nclass Camera(BaseCamera):\n video_source = 0\n\n @staticmethod\n def set_video_source(source):\n Camera.video_source = source\n\n @staticmethod\n def frames():\n camera = cv2.VideoCapture(Camera.video_source)\n if not camera.isOpened():\n raise RuntimeError('Could not start camera.')\n\n #####################################\n #load model tf graph\n #model info\n print 'loading model'\n MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'\n PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\n\n object_detection.load_model(PATH_TO_CKPT)\n '''detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')'''\n #####################################\n\n while True:\n # read current frame\n _, img = camera.read()#np.array(img).shape shape is (720, 1280, 3)\n\n #####################################\n #process, call Google_API\n print 'process...'\n img_=object_detection.image_detection(img)\n #####################################\n\n # encode as a jpeg image and return it\n yield cv2.imencode('.jpg', img_)[1].tobytes()\n"
}
] | 4 |
rageobi/DetectionSystem | https://github.com/rageobi/DetectionSystem | 9989997535f780ece886a1d0f494840ceddb3ece | 9efbc6ca12f473eccef886de70cf8a92a8765214 | 73a8c8e952f6360b37d481a205112e096cd1085a | refs/heads/master | 2022-12-07T01:19:55.064969 | 2020-08-31T04:52:33 | 2020-08-31T04:52:33 | 285,884,600 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5385856628417969,
"alphanum_fraction": 0.5584834814071655,
"avg_line_length": 28.983871459960938,
"blob_id": "a7791c65be2306a2185e00a7298b1c29c45cd319",
"content_id": "c9f86d638e5bc4c28ae3d5299e2e1fb14455e8cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3719,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 124,
"path": "/densityscan.py",
"repo_name": "rageobi/DetectionSystem",
"src_encoding": "UTF-8",
"text": "from typing import NamedTuple\nimport math\nimport numpy as np\nimport random\ncount = 0\n\n\nclass Cluster():\n def __init__(self, x=None, y=None):\n global count\n self.x = x\n self.y = y\n self.id = count = count + 1\n self.cluster = -2\n\n def __repr__(self):\n return [self.x], [self.y], [self.cluster]\n\n def CheckValidPoints(self, point, x_dist, y_dist) -> int:\n #same point as the base cluster\n if self.x == point.x and self.y == point.y:\n return 2\n #Within the mentioned distance from Base cluster\n elif self.GetDistance(point, 1) <= x_dist and self.GetDistance(point, 2) <= y_dist:\n return 1\n #Not Within the mentioned distance from Base cluster\n else:\n return 0\n\n def GetDistance(self, p2, check):\n #Get X Distance\n if check == 1:\n return round(abs(p2.x - self.x), 5)\n #Get Y Distance\n elif check == 2:\n return round(abs(p2.y - self.y), 5)\n #Wrong option\n else:\n return -1\n\n\nclass ClusterLists():\n #cluster_val = 0\n def __init__(self):\n self.cluster_list = []\n self.randoms = []\n self.cluster_val = 1\n\n def get_cluster_labels(self):\n st = []\n for x in self.cluster_list:\n st.append(x.cluster)\n #if verbose:\n #print((\" {} clusters for the frame\").format(len(st)))\n return st\n\n def update_random(self):\n if (type(self.cluster_list).__module__ != np.__name__):\n self.reshape()\n if (len(self.randoms) != len(self.cluster_list)):\n self.randoms = list(range(self.cluster_list.shape[0]))\n\n def cluster_cluster(self,x_dist,y_dist):\n self.update_random()\n for i in range(0, len(self.cluster_list)):#len(self.randoms)):\n #choice = random.choice(self.randoms)\n self.CheckValidClusters(self.cluster_list[i], x_dist, y_dist)\n #self.randoms.remove(choice)\n return np.array(self.get_cluster_labels())\n\n def append(self, cluster: Cluster):\n self.cluster_list.append(cluster)\n\n def reshape(self):\n self.cluster_list = np.array(self.cluster_list) # .reshape(shape_0,)\n\n def CheckValidClusters(self, base_cluster, x_dist, y_dist):\n if base_cluster.cluster == -2:\n for cluster in self.cluster_list:\n if cluster.cluster == -2:\n d_check = base_cluster.CheckValidPoints(\n cluster, x_dist, y_dist)\n if d_check == 1:\n cluster.cluster = self.cluster_val\n base_cluster.cluster = self.cluster_val\n self.cluster_val += 1\n\n\ndef testMethod():\n p1 = Cluster(1, 2)\n p2 = Cluster(2, 3)\n\n p = ClusterLists()\n p.append(p1)\n p.append(p2)\n p.append(Cluster(3, 1))\n p.append(Cluster(1, 1))\n p.append(Cluster(2, 2))\n p.append(Cluster(3, 3))\n p.append(Cluster(1, 3))\n p.append(Cluster(2, 1))\n p.append(Cluster(3, 2))\n p.append(Cluster(4, 1))\n p.append(Cluster(2, 4))\n p.append(Cluster(4, 4))\n p.append(Cluster(3, 4))\n p.append(Cluster(2, 4))\n\n p.update_random()\n print(p.randoms)\n for i in range(0, len(p.randoms)):\n choice = random.choice(p.randoms)\n p.CheckValidClusters(p.cluster_list[choice], 1, 1)\n p.randoms.remove(choice)\n print(p.randoms)\n\n for cluster in p.cluster_list:\n print(\"x ={}, y={}, cluster{}\".format(\n cluster.x, cluster.y, cluster.cluster))\n s = p.cluster_list[0].CheckValidPoints(p.cluster_list[1], 1.5, 1.5)\n\n p.reshape()\n p3 = p1.CheckValidPoints(p2, 1.5, 1.5)\n print(p1.z)\n\n"
},
{
"alpha_fraction": 0.526627242565155,
"alphanum_fraction": 0.7100591659545898,
"avg_line_length": 17.88888931274414,
"blob_id": "6511e48eaa2566613f86e320f70f3ee441a15071",
"content_id": "e1ea92ce8471fabe4fb66a5b4a9633b2ce313204",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 169,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 9,
"path": "/requirements.txt",
"repo_name": "rageobi/DetectionSystem",
"src_encoding": "UTF-8",
"text": "matplotlib==3.1.2\nnumpy==1.17.4\nscipy==1.5.0\nnuscenes_devkit==1.0.8\nShapely==1.7.0\nscikit_image==0.17.2\npyquaternion==0.9.5\nopencv_contrib_python==4.3.0.36\nPillow==7.2.0"
},
{
"alpha_fraction": 0.5008912682533264,
"alphanum_fraction": 0.5130124688148499,
"avg_line_length": 24.733945846557617,
"blob_id": "143e206d86eab1826ae55723dd15291f3db5fa3c",
"content_id": "fc65c80c974fd780e292f9d44bde699bb12cd8bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2805,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 109,
"path": "/run.sh",
"repo_name": "rageobi/DetectionSystem",
"src_encoding": "UTF-8",
"text": "classifier=2 \nparallel=0\nvalidation=\"false\"\nvisualiseFrames=\"false\"\nvisualisesubFrames=\"false\"\nverbose=\"false\"\nsavefile=\"false\"\n\nHelp()\n{\n echo\n echo \"Run this script with -r flag to execute all approaches with results validation enabled\"\n echo\n echo \"Syntax: Template [-c|p|t|f|s|v]\"\n echo\n echo \"options:\"\n echo\n echo \"c 0 = Slow SVM, 1 = Fast SVM, 2 = Modified YOLO, 3 = Orginal YOLO\"\n echo \"p 0 = For normal processing, 1 = For parallel processing\"\n echo \"t Validate/test results\"\n echo \"f Visualize/Display Frames\"\n echo \"s Visualize/Display Sub-Frames/Regions\"\n echo \"v Verbose mode.\"\n echo \"k Save/keep as file.\"\n echo\n echo \"Default Option values -c 2 -p 0 -t False -f False -s False -v False\"\n echo \n}\n\nExecAll()\n{\n\nc=0\nx=0\np=0\nwhile [ $c -lt 4 ]\ndo\n if [ $c -lt 2 ]\n then\n p=$(($x+0))\n \twhile [ $p -lt 2 ]\n\t do\n if [ $c -eq 0 -a $p -eq 0 ]; then echo '===============================Model A without parallel==============================='\n elif [ $c -eq 0 -a $p -eq 1 ]; then echo '================================Model A with parallel================================='\n elif [ $c -eq 1 -a $p -eq 0 ]; then echo '===============================Model B without parallel==============================='\n else \n echo '================================Model B with parallel================================='\n fi\n\t\t python3 detectionsystem.py -c=$c -p=$p -t=\"true\"\n p=`expr $p + 1`\n done \n else \n \tif [ $c -eq 2 ]; then echo '====================================Modified YOLOv3===================================='\n else echo '====================================Original YOLOv3===================================='\n \tfi\n \tpython3 detectionsystem.py -c=$c -p=0 -t=\"true\"\n fi\n c=`expr $c + 1`\ndone\n\n\n}\nwhile getopts \":hrc:p:t:f:s:v:k:\" option; do\n case $option in\n h ) \n Help \n exit;;\n r )\n ExecAll\n exit;;\n c ) classifier=${OPTARG:-2}\n ;;\n p ) parallel=${OPTARG:-0}\n ;;\n t ) validation=${OPTARG:-False} \n ;;\n f ) visualiseFrames=${OPTARG:-False} \n ;;\n s ) visualisesubFrames=${OPTARG:-False} \n ;;\n v ) verbose=${OPTARG:-False} \n ;;\n k ) savefile=${OPTARG:-False} \n ;;\n esac\ndone\n\nPrintValues()\n{\n echo\n echo classifier \n echo $classifier \n echo parallel\n echo $parallel\n echo validation\n echo $validation\n echo visualiseFrames\n echo $visualiseFrames\n echo visualisesubFrames\n echo $visualisesubFrames\n echo verbose\n echo $verbose\n echo savefile\n echo $savefile\n echo\n}\n#PrintValues\nsource dependencies.sh\npython3 detectionsystem.py -c $classifier -p $parallel -t $validation -f $visualiseFrames -s $visualisesubFrames -v $verbose -k $savefile\n"
},
{
"alpha_fraction": 0.7429149746894836,
"alphanum_fraction": 0.748481810092926,
"avg_line_length": 30.822580337524414,
"blob_id": "ce09d2c05ed842eeef01b02050f3682bda17a4cc",
"content_id": "5132e64cfc62afa89eac259e3815c2be0244e04f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1976,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 62,
"path": "/README.md",
"repo_name": "rageobi/DetectionSystem",
"src_encoding": "UTF-8",
"text": "# DetectionSystem\n\nA system to detect vehicles that are moving based on a series of empirical steps. \n\nThe final detections are done using 3 approaches:\n\n - Region proposal + Hand-engineered SVM with more features\n - Region proposal + Hand-engineered SVM with lesser features\n - Region proposal + YOLOv3\n\n\n## run.sh\n\nA shell script to make execution of detectionsystem.py much simpler and abstracted.\nThe help (-h) parameter can guide you through all the available options. \n\nSample:\n```sh\n$ sudo chmod +x run.sh\n$ ./run.sh -h\n\nRun this script with -r flag to execute all approaches with results validation enabled\n\nSyntax: Template [-c|p|t|f|s|v]\n\noptions:\n\nc 0 = Slow SVM, 1 = Fast SVM, 2 = Modified YOLO, 3 = Orginal YOLO\np 0 = For normal processing, 1 = For parallel processing\nt Validate/test results\nf Visualize/Display Frames\ns Visualize/Display Sub-Frames/Regions\nv Verbose mode.\nk Save/keep as file.\n\nDefault Option values -c 2 -p 0 -t False -f False -s False -v False\n```\n## dependencies.sh\nScript to check if all the dependencies are downloaded, and download them if not.\n\nUnresolved file dependencies - nuscenes mini/full dataset needs to be downloaded from nuscenes [website](https://www.nuscenes.org/). \nNot sharing it or making it availabe through a `wget`in `dependencies.sh` due to Terms of use.\n\n## detectionsystem.py\n\nMain file which controls the detections / predictions of all the moving vehicles. Takes in the same arguments as `run.sh`, and has a similar default option values. \n\n## densityscan.py\n\nClustering module which can cluster features based on different feature distances\n\n## SVCmodel.ipynb\n\nFile containing the training of hand-engineered SVM classifier models (Fast and Slow) which are done by varying the feature extraction techniques.\n\n## requirements.txt\n\nUse the below command to install all the dependencies, or run `dependencies.sh` for executing it. \n\n```sh\n$ pip3 install -r requirement.txt \n``` \n \n"
},
{
"alpha_fraction": 0.6792592406272888,
"alphanum_fraction": 0.7118518352508545,
"avg_line_length": 30.34883689880371,
"blob_id": "c146b9bfd15343c193c2f8c07e9f325f829db0ac",
"content_id": "349a406630526bdd1bd5b1df853777824b9ad1e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1350,
"license_type": "no_license",
"max_line_length": 207,
"num_lines": 43,
"path": "/dependencies.sh",
"repo_name": "rageobi/DetectionSystem",
"src_encoding": "UTF-8",
"text": "\n\ndownloadfiles()\n{\nif [ -e data/YOLOv3/yolov3.weights ]\nthen\n echo \"yolov3.weights present\"\nelse\n #wget --no-check-certificate -r 'https://docs.google.com/uc?export=download&id=19f64_-Kfv-zJZT15blxqXMVw3oA4uM1Z' -O data/YOLOv3/yolov3.weights -q --show-progress\n wget 'https://pjreddie.com/media/files/yolov3.weights' -O data/YOLOv3/yolov3.weights -q --show-progress\n \nfi\n\nif [ -e data/YOLOv3/yolov3_classes.txt ]\nthen\n echo \"yolov3_classes.txt present\"\nelse\n wget --no-check-certificate -r 'https://docs.google.com/uc?export=download&id=1yo1mcj5nlpctXTcXJwrT649-IvNVqAsT' -O data/YOLOv3/yolov3_classes.txt -q --show-progress\n \nfi\n\nif [ -e data/YOLOv3/yolov3.cfg ]\nthen\n echo \"yolov3.cfg present\"\nelse\n wget --no-check-certificate -r 'https://docs.google.com/uc?export=download&id=1S8j7_2TtpghtLK1kKurbUpi-QHT_vBgW' -O data/YOLOv3/yolov3.cfg -q --show-progress\nfi\n}\nif [[ ! -e data ]]; then\n mkdir -p data/YOLOv3\n\tdownloadfiles\nelse\n if [[ ! -e data/YOLOv3 ]]; then\n mkdir data/YOLOv3\n fi\n downloadfiles\nfi\n\nif [[ ! -e data/v1.0-mini ]]; then\n\techo\n\techo \"Please download the nuscenes dataset from https://www.nuscenes.org/ . If you are professor Derek Molloy, I have shared the dataset with you personally through GDrive. You can download it from there too. :)\"\n\techo\n\texit;\nfi\npip3 install -r requirements.txt\n"
},
{
"alpha_fraction": 0.5499604940414429,
"alphanum_fraction": 0.5680928826332092,
"avg_line_length": 38.30180358886719,
"blob_id": "dd4fb04c0c3968ea13ad311a9bbac86474eecef2",
"content_id": "ff51163e33bbf48bf59a94df774c872e54dae309",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 54433,
"license_type": "no_license",
"max_line_length": 210,
"num_lines": 1385,
"path": "/detectionsystem.py",
"repo_name": "rageobi/DetectionSystem",
"src_encoding": "UTF-8",
"text": "import argparse\nimport multiprocessing\nfrom functools import partial\nimport math\nfrom multiprocessing import Pool\n#from cvlib.object_detection import draw_bbox\n#import cvlib as cv\nimport glob\nimport cv2 as cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nfrom skimage.feature import hog\nfrom scipy.ndimage.measurements import label\nfrom scipy.ndimage import find_objects\nfrom nuscenes.nuscenes import NuScenes\nfrom nuscenes.utils.data_classes import RadarPointCloud as rpc\nfrom nuscenes.utils.data_classes import LidarPointCloud as lpc\nfrom matplotlib import pyplot as plt\nimport matplotlib.ticker as ticker\nimport os.path as osp\nimport matplotlib.patches as patches\nfrom PIL import Image\nfrom pyquaternion import Quaternion\nfrom nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix\nfrom densityscan import Cluster, ClusterLists\nimport random\nimport pickle\nimport time\nfrom shapely.geometry import Polygon\n\n# golbal variables\nsvc = None\nnet = None\noutput_layers = None\nclasses = None\nc_slide = None\nxscaler = None\n\n\ndef calculation_of_radar_data(radar):\n \"\"\"\n Function to extract features from radar pointcloud data\n\n Parameters\n ----------\n :param radar: Pointcloud data\n\n Returns\n ----------\n point_dist -> array: Distance magnitude of the point from sensor\n point_phi -> array : Azimuth of the point from sensor\n point_rad_velocity -> array : Compensated radial velocity of the point\n velocity_phi -> array : Azimuth of the radial velocity vectors\n \"\"\"\n ## Get required features from radar pointcloud\n x_points = radar.points[0]\n y_points = radar.points[1]\n z_points = radar.points[2]\n x_comp_velocity = radar.points[8]\n y_comp_velocity = radar.points[9]\n x_velocity = radar.points[6]\n y_velocity = radar.points[7]\n\n velocity_phi = np.rad2deg(np.arctan2(y_velocity, x_velocity))\n point_dist = np.sqrt(x_points ** 2 + y_points ** 2 + z_points ** 2)\n point_phi = np.rad2deg(np.arctan2(y_points, x_points))\n point_rad_velocity = np.sqrt(x_comp_velocity ** 2 + y_comp_velocity ** 2)\n\n return point_dist, point_phi, point_rad_velocity, velocity_phi\n\n\ndef custom_map_pointcloud_to_image(nusc,\n pointsensor_token,\n camera_token,\n verbose=False):\n # Inspired from the NuScenes Dev-Kit\n \"\"\"\n Helper function to retrieve the image coordinate transformed point coordinates, clusters mappings for the points and the image frame.\n\n Parameters\n ----------\n :param nusc: Nuscenes object\n :param pointsensor_token: Point sensor token\n :param cam_token: Camera sensor token\n :param verbose: Boolean variable to display console logs\n\n Returns\n ----------\n points -> ndarray: Points data transformed to Image coordinates\n coloring -> list : Cluster associated for points\n im -> PIL Image : Image frame for the instance\n \"\"\"\n # rpc.abidefaults()\n ## Disable all the radar filter settings\n rpc.disable_filters()\n cam = nusc.get('sample_data', camera_token)\n pointsensor = nusc.get('sample_data', pointsensor_token)\n pcl_path = osp.join(nusc.dataroot, pointsensor['filename'])\n if pointsensor['sensor_modality'] == 'lidar':\n pc = lpc.from_file(pcl_path)\n else:\n pc = rpc.from_file(pcl_path)\n im = Image.open(osp.join(nusc.dataroot, cam['filename']))\n # Points live in the point sensor frame. So they need to be transformed via global to the image plane.\n # First step: transform the point-cloud to the ego vehicle frame for the timestamp of the sweep.\n\n point_dist, point_phi, point_rad_velocity, velocity_phi = calculation_of_radar_data(\n pc)\n ## Convert from meters/h to Km/h\n detections_radial_velocity_kmph = point_rad_velocity * 3.6\n\n ## Get Clusterlist object for velocity vectors azimuth and point distance\n point_cluster = appendtoclusterlist(velocity_phi, point_dist)\n\n ## Cluster all points which are within 2.5 radians of vel_phi and 5m distance as same cluster\n cluster_list = point_cluster.cluster_cluster(2.5, 5)\n\n detections_radial_velocity_kmph = np.reshape(\n detections_radial_velocity_kmph, (1, detections_radial_velocity_kmph.shape[0]))\n d_phi = np.reshape(\n point_phi, (1, point_phi.shape[0]))\n d_dist = np.reshape(\n point_dist, (1, point_dist.shape[0]))\n velocity_phi = np.reshape(\n velocity_phi, (1, velocity_phi.shape[0]))\n\n ## append calculated features to the radar pointcloud\n points = np.append(pc.points, velocity_phi, axis=0)\n points = np.append(points, d_phi, axis=0)\n points = np.append(points, d_dist, axis=0)\n points = np.append(points, detections_radial_velocity_kmph, axis=0)\n #mask = np.where(points[18, :] >= -200)\n #pos = points[:, mask]\n #points = np.reshape(points, (points.shape[0], points.shape[2]))\n pc.points = points\n\n cs_record = nusc.get('calibrated_sensor',\n pointsensor['calibrated_sensor_token'])\n pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)\n pc.translate(np.array(cs_record['translation']))\n\n # Second step: transform to the global frame.\n poserecord = nusc.get('ego_pose', pointsensor['ego_pose_token'])\n pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)\n pc.translate(np.array(poserecord['translation']))\n\n # Third step: transform into the ego vehicle frame for the timestamp of the image.\n poserecord = nusc.get('ego_pose', cam['ego_pose_token'])\n pc.translate(-np.array(poserecord['translation']))\n pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)\n\n # Fourth step: transform into the camera.\n cs_record = nusc.get('calibrated_sensor', cam['calibrated_sensor_token'])\n pc.translate(-np.array(cs_record['translation']))\n pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)\n\n # Fifth step: actually take a \"picture\" of the point cloud.\n # Grab the depths (camera frame z axis points away from the camera).\n depths = pc.points[2, :]\n\n ## Let the coloring be based on clusters formed\n coloring = cluster_list\n\n # Take the actual picture (matrix multiplication with camera-matrix + renormalization).\n points = view_points(pc.points[:3, :], np.array(\n cs_record['camera_intrinsic']), normalize=True)\n\n ## rebuilding the pointcloud features\n points = np.append(points, pc.points[3:22, :], axis=0)\n # Remove points that are either outside or behind the camera. Leave a margin of 1 pixel for aesthetic reasons.\n # Also make sure points are at least 1m in front of the camera to avoid seeing the lidar points on the camera\n # casing for non-keyframes which are slightly out of sync.\n mask = np.ones(depths.shape[0], dtype=bool)\n mask = np.logical_and(mask, depths > 1)\n mask = np.logical_and(mask, points[0, :] > 1)\n mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)\n mask = np.logical_and(mask, points[1, :] > 1)\n mask = np.logical_and(mask, points[1, :] < im.size[1] - 1)\n points = points[:, mask]\n coloring = coloring[mask]\n if verbose:\n print(' Total number of points in frame', points.shape[1])\n return points, coloring, im\n\n\ndef appendtoclusterlist(x, y):\n \"\"\"\n Append points to the Clusterlist\n\n Parameters\n ----------\n :param x: X cordinate of the clusterlist\n :param y: Y cordinate of the clusterlist\n\n Returns\n --------\n cl -> ClusterLists : ClusterList of all the points provided\n\n \"\"\"\n cl = ClusterLists()\n\n ## Forming the clustelist based on data provided\n for data in zip(x, y):\n cl.append(Cluster(data[0], data[1]))\n return cl\n\n\ndef get_boxes_yolo(frame, method, point, visualize=False, verbose=False):\n \"\"\"\n Helper function to predict the vehicle box coordinates through YOLO net approach \n\n Parameters\n ----------\n :param frame: Image frame which needs to be predicted\n :param method: int which specifies the classifier type. (2 for Modified YOLOv3 and 3 for Original YOLOv3)\n :param point: The point data of the current frame instance\n :param visualize: Boolean variable to check if user needs to visualize region frames which are proposed and marked\n :param verbose: Boolean variable to display console logs\n\n\n Returns\n --------\n bbox -> list : Vehicle detected box coordinates\n\n \"\"\"\n if method == 2: ## Modified YOLOv3\n #frame_copy = np.copy(frame)\n\n ## Empirically define the region or sub-frame size based on point distance value\n if point[3] < 15:\n frame_size = 450\n\n elif point[3] < 20:\n frame_size = 200\n\n else:\n frame_size = 100\n\n ## Crop regions to form a new frame\n x1 = int(round(point[0])) - (frame_size)\n y1 = int(round(point[1])) - (frame_size)\n x2 = int(round(point[0])) + (frame_size)\n y2 = int(round(point[1])) + (frame_size)\n\n frame = np.array(frame.crop((x1, y1, x2, y2)))\n else:\n ## Original YOLOv3\n frame = np.array(frame)\n x1 = y1 = 0\n\n bbox, label, confidence = get_yolo_detections(frame, (x1, y1))\n if visualize:\n for i, box in enumerate(bbox):\n frame_copy = np.copy(frame)\n a = (box[0][0] - x1)\n b = (box[0][1] - y1)\n c = (box[1][0] - x1)\n d = (box[1][1] - y1)\n\n cv2.rectangle(frame_copy, (a, b), (c, d), (0, 255, 0))\n # cv2.putText(frame_copy, label[i], (box[0][0]-x1, box[0]\n # [1]-y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n plt.imshow(frame_copy)\n plt.show()\n return bbox\n\n\ndef load_svc():\n \"\"\"\n Function to load trained model for the MODEL B approach\n\n Returns\n ----------\n svc : The SVC model\n \"\"\"\n filename = \"data/svc_hope.p\"\n svc = pickle.load(open(filename, 'rb'))\n return svc\n\n\ndef load_svc_2():\n \"\"\"\n Function to load trained model for the MODEL A approach\n\n Returns\n ----------\n svc : The SVC model\n xscaler : The fitted scaler value\n \"\"\"\n filename = \"data/svmhopeful.p\"\n svc = pickle.load(open(filename, 'rb'))\n filename = \"data/xscalerhopeful.p\"\n xscaler = pickle.load(open(filename, 'rb'))\n return svc, xscaler\n\n\n# Python/Project/data/YOLOv3/yolov3.cfg data/YOLOv3/yolov3.weights\n\n\ndef load_net(weights_location='data/YOLOv3/yolov3.weights', config_location='data/YOLOv3/yolov3.cfg', names_location='data/YOLOv3/yolov3_classes.txt'):\n \"\"\"\n Helper function to load the YOLO network\n\n Parameters\n ----------\n :param weights_location: Network weights file location\n :param config_location: Network conifg file location\n :param names_location: Network classes file location\n\n Returns\n --------\n net -> dnn : Loaded Network\n output_layers -> list : Network layers\n classes -> list : Class names\n\n \"\"\"\n ## Load the net based on weights and config provided\n net = cv2.dnn.readNet(weights_location, config_location)\n #net = cv2.dnn_DetectionModel(config_location, weights_location)\n classes = []\n\n ## Load all the classes\n with open(names_location, \"r\") as f:\n classes = [line.strip() for line in f.readlines()]\n \n ## Define the output layers built based on loaded net\n layer_names = net.getLayerNames()\n output_layers = [layer_names[i[0] - 1]\n for i in net.getUnconnectedOutLayers()]\n return net, output_layers, classes\n\n\ndef get_yolo_detections(frame, primary_origin=(0, 0)):\n # Reference - https://pysource.com/2019/06/27/yolo-object-detection-using-opencv-with-python/\n \"\"\"\n Function to predict boxes through the loaded YOLO network\n\n Parameters\n ----------\n :param frame: Image frame which needs to be predicted\n :param primary_origin: Tuple with starting coordinates of image. (0,0) for uncropped image. But if region of the frame is sent, pass the starting coordinates of the region wrt to orginal uncropped frame\n\n Returns\n --------\n bbox -> list : Predicted bounding boxes\n label -> list : Predicted box labels\n confidence -> list : Predicted boxes confidence scores\n\n \"\"\"\n \n global net, output_layers, classes\n height, width, channels = frame.shape\n blob = cv2.dnn.blobFromImage(\n frame, 0.00392, (320, 320), (0, 0, 0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(output_layers)\n class_ids = []\n confidences = []\n boxes = []\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n\n ## Take the detections whose confidence score is greater than 0.5 and classes of the boxes are [car,bus,truck]\n if confidence > 0.5 and class_id in [2, 5, 7]:\n center_x = int(detection[0] * width)\n center_y = int(detection[1] * height)\n w = int(detection[2] * width)\n h = int(detection[3] * height)\n x = int(center_x - w / 2)\n y = int(center_y - h / 2)\n boxes.append([x, y, w, h])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n \n ## All the boxes with scores greater than 0.5 and Non-Max Sopression greater than 0.4 are defined as predicted detections\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\n bbox = []\n label = []\n confidence = []\n for i in indexes:\n i = i[0]\n box = boxes[i]\n x = int(box[0]) + primary_origin[0]\n y = int(box[1]) + primary_origin[1]\n w = int(box[2])\n h = int(box[3])\n bbox.append(((x, y), ((x+w), (y+h))))\n label.append(str(classes[class_ids[i]]))\n confidence.append(confidences[i])\n\n return bbox, label, confidence\n\n\ndef get_boxes_svm(frame=None, visualize=False, verbose=False, method=1, point=None):\n # Inspired from https://github.com/JunshengFu/vehicle-detection/blob/master/svm_pipeline.py\n \"\"\"\n Helper function to predict the vehicle box coordinates through SVM classifier approach \n\n Parameters\n ----------\n :param frame: Image frame which needs to be predicted\n :param visualize: Boolean variable to check if user needs to visualize region frames which are proposed and marked\n :param verbose: Boolean variable to display console logs\n :param method: int which specifies the classifier type. (1 for MODEL B and 0 for MODEL A)\n :param point: The point data of the current frame instance\n\n Returns\n --------\n final_boxes -> list : Vehicle detected box coordinates\n\n \"\"\"\n ## Empirically define the region or sub-frame size based on point distance value\n if point[3] < 15:\n frame_size = 500\n #frame_size_y = 500\n\n elif point[3] < 20:\n frame_size = 250\n #frame_size_y = 250\n\n elif point[3] < 30:\n frame_size = 200\n #frame_size_y = 200\n\n elif point[3] < 40:\n frame_size = 150\n #frame_size_y = 150\n\n elif point[3] < 50:\n frame_size = 120\n #frame_size_y = 120\n\n elif point[3] < 70:\n frame_size = 50\n #frame_size_y = 50\n\n else:\n frame_size = False\n\n ## Empirically calculate the window sizes based on the frame size\n if frame_size:\n if point[3] > 14:\n window_size_1 = int(0.5 * (frame_size))\n window_size_2 = int(0.3 * (frame_size))\n else:\n window_size_1 = int(0.65 * (frame_size))\n window_size_2 = int(0.45 * (frame_size))\n \n ## Crop regions to form a new frame\n x1 = int(round(point[0])) - ((frame_size) // 2)\n y1 = int(round(point[1])) - ((frame_size) // 2)\n x2 = int(round(point[0])) + ((frame_size) // 1.5)\n y2 = int(round(point[1])) + ((frame_size) // 3)\n frame = frame.crop((x1, y1, x2, y2))\n\n ## Define the overlap value based on the SVM model/method\n if method == 0:\n overlap = 0.09\n else:\n overlap = 0.10\n frame = np.array(frame)\n\n ## Get all the windows\n sliding_window_1 = get_window_slides(\n frame, window_size_1, overlap=overlap)\n # sliding_window_1 = get_window_slides(\n # frame, window_size_2, overlap=0.10)\n sliding_windows = sliding_window_1 # + sliding_window_2\n\n ## Get all windows predicted as vehicles\n vehicle_slides = predict_vehicles_slides_2(\n frame, method, sliding_windows)\n #vehicle_slides = predict_vehicles_slides(frame, sliding_windows)\n\n ## Get the final bounding boxes based on vehicle window predictions\n proba_frame, calculated_slides = get_calculated_box(\n frame.shape, vehicle_slides)\n\n ## Draw all the windows/boxes on the image frame\n frame_slides_complete = frame_slides_canvas(frame, sliding_windows)\n frame_slides_refined = frame_slides_canvas(frame, vehicle_slides)\n frame_slides_final = frame_slides_canvas(frame, calculated_slides)\n\n if visualize:\n f, axes = plt.subplots(1, 3, figsize=(20, 100))\n axes[0].set_title(\"All Sliding Windows\")\n axes[0].imshow(frame_slides_complete)\n\n axes[1].set_title(\"Refined Sliding Windows\")\n axes[1].imshow(frame_slides_refined)\n\n axes[2].set_title(\"Final Prediction\")\n axes[2].imshow(frame_slides_final)\n\n plt.show()\n\n final_boxes = []\n for j, slide in enumerate(calculated_slides):\n ## Convert the bounding boxes from sub-frame to image co-ordinates\n if (slide != None and len(slide) > 0):\n a = x1 + slide[0][0]\n b = y1 + slide[0][1]\n c = x1 + slide[1][0]\n d = y1 + slide[1][1]\n final_boxes.append([(a, b), (c, d)])\n return final_boxes\n\n\ndef get_marked_frames(nusc, pointsensor_token, camera_token, method=(2, 0), visualize_frames=False, visualize_sub_frames=False, verbose=False):\n \"\"\"\n Main helper function which handles the calls to other helper function. Gets all the vehicle predicition boxes and the box marked frames. \n\n Parameters\n ----------\n :param nusc: Nuscenes object\n :param pointsensor_token: Radar sensor token\n :param cam_token: Camera sensor token\n :param method: Tuple which specifies the (classifier,isParallel)\n :param validate_results: Boolean variable to check if user needs to validate results\n :param visualize_frames: Boolean variable to check if user needs to visualize fully marked image frames\n :param visualize_sub_frames: Boolean variable to check if user needs to visualize region frames which are proposed and marked\n :param verbose: Boolean variable to display console logs\n\n Returns\n --------\n frame -> ndarray : Marked image frames\n box -> list : Vehicle detected box coordinates\n\n \"\"\"\n p, color, frame = custom_map_pointcloud_to_image(\n nusc, pointsensor_token, camera_token, verbose)\n\n ## Get only the X, Y and the calculated Radar features from the pointcloud\n filtered_col = p[[0, 1, 18, 19, 20, 21], :]\n\n ## Cluster information\n color = np.array(color).reshape(1, color.shape[0])\n\n ## Append both to a np array\n new_p = np.append(filtered_col, color, axis=0)\n ## Get all unique cluster values\n un = np.unique(color, axis=1)\n averages = []\n\n def restrict_dupli_frames(average, averages):\n \"\"\"\n Checks if the \"average\" region is redundant for other \"averages\" regions\n \"\"\"\n flag = 1\n\n for avg in averages:\n if abs(avg[0] - average[0]) < 51 and abs(avg[1] - average[1]) < 45:\n flag = 0\n return False\n return True\n\n ## Loop through unique cluster values\n for i, val in enumerate(un[0], 0):\n\n ## Getting all the filtered pointcloud data for a specific cluster value and also has compensated radial velocity above a threshold\n mask = np.logical_and(new_p[6, :] == val, new_p[5, :] > 7)\n filtered_points = new_p[:, mask]\n\n if filtered_points.shape[1] > 0:\n\n ## Average all the point cloud data and store it in a var\n average = np.mean(filtered_points, axis=1)\n if len(averages) == 0:\n averages.append(\n [average[0], average[1], average[3], average[4]])\n else:\n ## Check for dupilcate frames and append only if not\n if restrict_dupli_frames([average[0], average[1]], averages):\n averages.append(\n [average[0], average[1], average[3], average[4]])\n\n boxes = []\n box = []\n if verbose:\n print(' Total number of point regions to be verified:', len(averages))\n\n ## method[0]= 0 = MODEL A,\n ## 1 = MODEL B,\n ## 2 = Modified YOLOv3,\n ## 3 = Original YOLOv3\n\n ## method[1]= 0 = No parallel processing,\n ## 1 = Parallel processing,\n if method[0] <= 1:\n if (method[1]) == 1:\n ## Open process pool and get bounding boxes through get_boxes_svm(...) for every \"average\" radar point\n pool = multiprocessing.Pool()\n func = partial(get_boxes_svm, frame,\n visualize_sub_frames, verbose, method[0])\n boxes = (pool.map(func, averages))\n pool.close()\n pool.join()\n else:\n ## Get bounding boxes through get_boxes_svm(...) for every \"average\" radar point\n for average in averages:\n boxes.append(get_boxes_svm(\n frame, visualize_sub_frames, verbose, method[0], average))\n elif method[0] == 2:\n\n ## Get bounding boxes through get_boxes_yolo(...) for every \"average\" radar point\n for average in averages:\n boxes.append(get_boxes_yolo(\n frame, method[0], average, visualize_sub_frames, verbose))\n else:\n ## Get bounding boxes through get_boxes_yolo(...) for every \"average\" radar point\n boxes.append(get_boxes_yolo(\n frame, method[0], (0, 0, 0, 0), visualize_sub_frames, verbose))\n frame = np.array(frame)\n\n for i, bbox in enumerate(boxes):\n if (bbox != None and len(bbox) > 0):\n for j in range(len(bbox)):\n if (bbox[j] != None and len(bbox[j]) > 0):\n a = bbox[j][0][0]\n b = bbox[j][0][1]\n c = bbox[j][1][0]\n d = bbox[j][1][1]\n if (len(box) < 1):\n box.append([[a, b], [c, b], [c, d], [a, d]])\n #cv2.rectangle(frame, (a,b),(c,d), color=(0, 0, 255), thickness=2)\n # plt.imshow(frame)\n # plt.show()\n else:\n\n ## Check if an approximate bounding box is predicted already and if it's predicted already add/retain the one with more area and remove the other\n if check_box_area([[a, b], [c, b], [c, d], [a, d]], box, frame):\n box.append([[a, b], [c, b], [c, d], [a, d]])\n #cv2.rectangle(frame, (a, b), (c, d), color=(0, 255, 0), thickness=2)\n else:\n global c_slide\n # b1_area, b2_area = get_box_area(\n # c_slide, [[a, b], [c, b], [c, d], [a, d]])\n b1_area = get_box_area(c_slide)\n b2_area = get_box_area(\n [[a, b], [c, b], [c, d], [a, d]])\n if b2_area > b1_area:\n box.remove(c_slide)\n box.append([[a, b], [c, b], [c, d], [a, d]])\n #cv2.rectangle(frame, (a, b), (c, d), color=(0, 255, 0), thickness=2)\n #cv2.rectangle(frame, (c_box[0][0], c_box[0][1]), (c_box[2][0], c_box[2][1]), color=(255, 0, 0), thickness=2)\n #cv2.rectangle (frame,(a,b),(c,d), color=(0, 255, 0), thickness=2)\n # plt.imshow(frame)\n # plt.show()\n if verbose:\n print(' Total number of vehicle regions predicted in frame:', len(box))\n marked_boxes = []\n\n ## Build the final bounding boxes unified to same format (All the approaches)\n for rect in box:\n cv2.rectangle(frame, (rect[0][0], rect[0][1]), (rect[2][0], rect[2][1]), color=(\n 0, 255, 0), thickness=2)\n marked_boxes.append(\n ((rect[0][0], rect[0][1]), (rect[2][0], rect[2][1])))\n\n if visualize_frames:\n # plt.imshow(frame)\n # plt.show()\n if verbose:\n print(' Visualising points and predicted frames')\n points_in_image(p, np.array(averages)[:, :2], color, frame)\n\n return frame, box\n\n\ndef points_in_image(points, averages, colouring, frame):\n \"\"\"\n Function which can help in scattering the points over frame and visualize information of the points on hover.\n\n Parameters\n ----------\n :param points: Pointcloud data\n :param averages: Clustered and averaged points which are considered for region proposal\n :param colouring: Coloring of the points which are to be scattered. n_points should be equal to n_coloring values\n :param frame: The image frame on which points are to be scattered\n \"\"\"\n\n frame_copy = np.copy(frame)\n fig, ax = plt.subplots()\n\n ## Scatter points based on transformed X & Y coordinates of Radar points and color based on its cluster value\n sc = ax.scatter(points[0, ], points[1, ], c=colouring[0], s=8, alpha=0.5)\n averages = np.transpose(averages)\n\n annot = ax.annotate(\"\", xy=(0, 0), xytext=(20, 20), textcoords=\"offset points\",\n bbox=dict(boxstyle=\"round\", fc=\"w\"),\n arrowprops=dict(arrowstyle=\"->\"))\n t = sc.get_offsets()\n\n def update_annot(ind):\n \"\"\"\n Build the hover data\n \"\"\"\n pos = sc.get_offsets()[ind[\"ind\"][0]]\n annot.xy = pos\n text = \"{}\\n Velocitty Phi ={},\\n Phi = {}\\n dist={},\\n Rad vel ={},\\n cluster ={}\".format(\" \".join(list(map(str, ind[\"ind\"]))),\n \" \".join(\n str([points[18, n] for n in ind[\"ind\"]])),\n \" \".join(str([points[19, n]\n for n in ind[\"ind\"]])),\n \" \".join(str([points[20, n]\n for n in ind[\"ind\"]])),\n \" \".join(str([points[21, n]\n for n in ind[\"ind\"]])),\n \" \".join(str([colouring[0, n] for n in ind[\"ind\"]])))\n annot.set_text(text)\n annot.get_bbox_patch().set_alpha(0.4)\n\n def hover(event):\n \"\"\"\n Capture the hover event and perform suitable action(s)\n \"\"\"\n vis = annot.get_visible()\n if event.inaxes == ax:\n cont, ind = sc.contains(event)\n if cont:\n update_annot(ind)\n annot.set_visible(True)\n fig.canvas.draw_idle()\n else:\n if vis:\n annot.set_visible(False)\n fig.canvas.draw_idle()\n fig.canvas.mpl_connect(\"motion_notify_event\", hover)\n\n ## Scatter the predicted moving vehicle predicted points\n sc2 = ax.scatter(averages[0, ], averages[1, ], s=14, alpha=0.9)\n \n plt.imshow(frame_copy)\n plt.show()\n\n\ndef get_box_area(box):\n \"\"\"\n Helper function to calculate areas of box\n Parameters\n ----------\n :param box: Coordinates for first shape. Sample for rectangle [[a, b], [c, b], [c, d], [a, d]]\n\n Returns\n ----------\n box.area-> GEOSimpl : Area of both the boxes\n \"\"\"\n return Polygon(box).area\n\n\ndef check_box_area(box1, boxes, frame, visualize=False):\n \"\"\"\n Function checks if box1 is already present in the list of boxes. A box is considered to be present if \n intersection area is greater than 85% for box which has been added with the one which is already present. \n If Box1 has greater area, it is saved in global variable for replacing the other box which it intersected with.\n\n Parameters\n ----------\n :param box_1: Box cordinates which needs to checked if present already\n :param boxes: All the boxes which have been added prior to box_1 instance\n :param frame: The image frame over which the box rectangles need to visualised\n :param visualize: Boolean variable to check if user needs to visualize fully marked image frames \n\n Returns\n ----------\n bool : If present already or not\n \"\"\"\n for box2 in boxes:\n if not (box2 == box1):\n intersection = calculate_intersection(box1, box2)\n #a1, a2 = get_box_area(box1, box2)\n a1 = get_box_area(box1)\n a2 = get_box_area(box2)\n\n ## Checks if area of interesection between two boxes is less than 15% of other, if not it is considered as redundant prediction\n if intersection < 0.15*a1 and intersection < 0.15*a2:\n continue\n else:\n global c_slide\n if visualize:\n if (a1 > a2):\n c1 = (0, 255, 0)\n c2 = (255, 0, 0)\n else:\n c1 = (255, 0, 0)\n c2 = (0, 255, 0)\n cv2.rectangle(\n frame, (box2[0][0], box2[0][1]), (box2[2][0], box2[2][1]), color=c2, thickness=2)\n cv2.rectangle(\n frame, (box1[0][0], box1[0][1]), (box1[2][0], box1[2][1]), color=c1, thickness=2)\n plt.imshow(frame)\n plt.show()\n c_slide = box2\n return False\n return True\n\n\ndef calculate_intersection(box_1, box_2):\n \"\"\"\n Helper function to calculate Intersection over Union for two shapes\n Parameters\n ----------\n :param box_1: Coordinates for first shape. Sample for rectangle [[a, b], [c, b], [c, d], [a, d]]\n :param box_2: Coordinates for second shape. Sample for rectangle [[a, b], [c, b], [c, d], [a, d]]\n\n Returns\n ----------\n intersection -> list : Area of Intersection between two objects\n \"\"\"\n poly_1 = Polygon(box_1)\n poly_2 = Polygon(box_2)\n intersection = poly_1.intersection(\n poly_2).area # / poly_1.union(poly_2).area\n return intersection\n\n\ndef calculate_iou(box_1, box_2):\n \"\"\"\n Helper function to calculate Intersection over Union for two shapes\n Parameters\n ----------\n :param box_1: Coordinates for first shape. Sample for rectangle [[a, b], [c, b], [c, d], [a, d]]\n :param box_2: Coordinates for second shape. Sample for rectangle [[a, b], [c, b], [c, d], [a, d]]\n\n Returns\n ----------\n iou -> list : IOU value ranging between 0 to 1\n \"\"\"\n poly_1 = Polygon(box_1)\n poly_2 = Polygon(box_2)\n iou = poly_1.intersection(poly_2).area / poly_1.union(poly_2).area\n return iou\n\n\ndef get_window_slides(frame, window_size, overlap):\n \"\"\"\n Helper function to retrieve window co-ordinates for an image frame\n\n Parameters\n ----------\n :param frame: The image frame\n :param window_size: Size of windows which need to be extracted\n :param overlap: Overlapping which a window can have over other (0 -1) \n\n Returns\n ----------\n window_slides -> list : List of window box coordinates\n \"\"\"\n assert frame.shape[1] > window_size\n window_slides = []\n # print(frame.shape[0],frame.shape[1],window_size)\n\n ## Defines number of windows in rows and coloumns based on the frame shape, winodow size and overlap\n\n n_x_windows = int(frame.shape[1]//(window_size*overlap))\n n_y_windows = int(frame.shape[0]//(window_size*overlap))\n # print(n_x_windows,n_y_windows)\n \n ## Next row starting point\n y_window_seed = 0\n for i in range(0, n_y_windows):\n if (y_window_seed+window_size) < frame.shape[0]:\n \n ## Next column starting point\n x_window_seed = 0\n for j in range(0, n_x_windows):\n if (x_window_seed + window_size) < frame.shape[1]:\n # print((x_window_seed,y_window_seed),(x_window_seed+window_size,y_window_seed+window_size))\n window_slides.append(\n [(x_window_seed, y_window_seed), (x_window_seed+window_size, y_window_seed+window_size)])\n \n ## Update column starting point\n x_window_seed = int(x_window_seed + (window_size*overlap))\n else:\n break\n \n ## Update row starting point\n y_window_seed = int(y_window_seed + (window_size*overlap))\n else:\n break\n return window_slides\n\n\ndef get_other_features(sub_frame):\n \"\"\"\n Feature extractor function to extract the resized image bins and channel based histogram extracted \n\n Parameters\n ----------\n :param sub_frame: The image frame\n\n Returns\n ----------\n rs_bins -> list : Binned resized image stored as list\n sf_hist -> list : The sub frame whose image channels features are extracted and stored as list\n \"\"\"\n rs_bins = []\n sf_hist = []\n for i in range(3):\n rs_bins.append(cv2.resize(sub_frame[:, :, i], (32, 32)).ravel())\n sf_hist.append(np.histogram(sub_frame[:, :, i], bins=32))\n rs_bins = np.concatenate((rs_bins[0], rs_bins[1], rs_bins[2]))\n sf_hist = np.concatenate((sf_hist[0][0], sf_hist[1][0], sf_hist[2][0]))\n return rs_bins, sf_hist\n\n\ndef frame_slides_canvas(frame, slide_windows):\n \"\"\"\n Function to draw rectangles over image frame\n\n Parameters\n ----------\n :param frame: The image frame \n :param slide_windows: All the windows boxes which are to be drawn over the image frame as rectangles\n\n Returns\n ----------\n frame_copy -> ndarray : Image frame with rectangles drawn\n \"\"\"\n frame_copy = np.array(frame)\n for slide_window in slide_windows:\n color = (random.randint(0, 255), random.randint(\n 0, 255), random.randint(0, 255))\n cv2.rectangle(frame_copy, (slide_window[0][0], slide_window[0][1]), (\n slide_window[1][0], slide_window[1][1]), (color), 1)\n return frame_copy\n\n\ndef predict_vehicles_slides_2(frame, method, slide_windows):\n \"\"\"\n Function to predict all the windows with vehicle detections\n\n Parameters\n ----------\n :param frame: The sub-frame image region \n :param method: Defines the SVM approach to follow. 0 for MODEL A approach and 1 for MODEL B approach\n :param slide_windows: All the windows boxes drawn for the original image frame\n\n Returns\n ----------\n vehicle_slides -> list : List of predicted vehicle boxes \n \"\"\"\n\n vehicle_slides = []\n\n ## Get the loaded model data\n global svc, xscaler\n\n for slide_window in slide_windows:\n\n sub_frame = frame[slide_window[0][1]: slide_window[1]\n [1], slide_window[0][0]: slide_window[1][0], :]\n sub_frame = cv2.cvtColor(sub_frame, cv2.COLOR_RGB2YUV)\n sub_frame = cv2.resize(sub_frame, (64, 64))\n\n if method == 0:\n ## Get all the required features from images to feed in the classifer as input\n hog_feat = get_hog_features(sub_frame, 15, (8, 8))\n rs_bins, sf_hist = get_other_features(sub_frame)\n test_stacked = np.hstack(\n (rs_bins, sf_hist, hog_feat[0])).reshape(1, -1)\n #test_stacked = np.hstack((rs_bins, hog_feat[0])).reshape(1, -1)\n\n ## Normalize value using the Standard scaler value which is already built\n hog_feat_2 = xscaler.transform(test_stacked)\n # prediction=svc.predict(j)\n\n prediction = svc.predict(hog_feat_2)\n else:\n \n ## Extract the required image feature\n hog_feat = get_hog_features(sub_frame)\n\n prediction = svc.predict(hog_feat)\n if prediction == 1:\n vehicle_slides.append(slide_window)\n return vehicle_slides\n\n\ndef predict_vehicles_slides(frame, slide_windows):\n ## Replaced this function with predict_vehicles_slides_2 function\n vehicle_slides = []\n global svc\n for slide_window in slide_windows:\n\n sub_frame = frame[slide_window[0][1]: slide_window[1]\n [1], slide_window[0][0]: slide_window[1][0], :]\n sub_frame = cv2.cvtColor(sub_frame, cv2.COLOR_RGB2YUV)\n sub_frame = cv2.resize(sub_frame, (64, 64))\n hog_feat = get_hog_features(sub_frame)\n # prediction=svc.predict(j)\n prediction = svc.predict(hog_feat)\n if prediction == 1:\n vehicle_slides.append(slide_window)\n return vehicle_slides\n\n\ndef get_hog_features(frame, orientations=9, pixels_per_cell=(16, 16), cells_per_block=(2, 2), visualize=False, feature_vector=True, multichannel=None):\n \"\"\"\n Helper Function to call the hog feature extractor and return a single or two outputs based on visualize parameter\n\n Parameters\n ----------\n Same as HOG from skimage module\n\n Returns\n ----------\n Returns ravel list of normalized_blocks with hog features and its image if visualize param is set to True; Else returns just the former. \n \"\"\"\n normalized_blocks = []\n if visualize:\n normalized_blocks, hog_image = hog(\n frame[:, :, :], orientations, pixels_per_cell, cells_per_block, visualize=visualize, feature_vector=feature_vector)\n return normalized_blocks, hog_image\n else:\n for channel in range(frame.shape[2]):\n normalized_blocks.append(hog(frame[:, :, channel], orientations, pixels_per_cell,\n cells_per_block, visualize=visualize, feature_vector=feature_vector))\n normalized_blocks = [np.ravel(normalized_blocks)]\n return normalized_blocks\n\n\ndef get_calculated_box(frame_size, slide_windows):\n \"\"\"\n Function to check for the most overlapping area in the predicted car regions and return the final vehicle detection box\n\n Parameters\n ----------\n :param frame_size: Image frame size 1600 x 900 for all nuscenes image frames\n :param slide_windows: Sliding windows which are refined to vehicles predictions\n\n Returns\n ----------\n proba_frame -> Tuple : Refined windows take labelled as ndarray\n calculated_slides -> list : List of predicted vehicle boxes in Frame of size frame_size\n \"\"\"\n\n ## Build a dummy frame based on original frame size\n proba_frame = np.zeros((frame_size[0], frame_size[1]))\n \n ## Increase counter value for all the predicted car regions\n for slide_window in slide_windows:\n proba_frame[slide_window[0][1]:slide_window[1][1],\n slide_window[0][0]:slide_window[1][0]] += 1\n # print(proba_frame)\n\n ## Set all the counters to zero where the values are less than number of predicted car regions\n proba_frame[proba_frame <= (len(slide_windows)//2)] = 0\n\n proba_frame, n_vehicles = label(proba_frame)\n calculated_slides = []\n detected_slides = find_objects(proba_frame)\n\n for y_row, x_col in detected_slides:\n calculated_slides.append(\n ((x_col.start, y_row.start), (x_col.stop, y_row.stop)))\n #cv2.rectangle (frame,(x.start,y.start),(x.stop,y.stop), color=(0, 255, 0), thickness=1)\n # plt.imshow(frame)\n # plt.show()\n return proba_frame, calculated_slides\n\n\ndef save_video(frames, filename, fps, size):\n \"\"\"\n Function to save as video in .avi format\n\n Parameters\n ----------\n :param frames: Takes in all the frames as list\n :param filename: Name of the file\n :param fps: Frames per second value\n :param size: Size of image frames\n \"\"\"\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n filename = str('data/videos/')+filename+str(random.randint(0, 1000))+'.avi'\n video = cv2.VideoWriter(filename, fourcc, fps, (size[1], size[0]))\n for frame in frames:\n video.write(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n cv2.destroyAllWindows()\n video.release()\n print(('Saved Video {} successfully').format(filename))\n\n\ndef get_annotations(nusc, scene_annotations, cam_token, visualize=False, verbose=False):\n \"\"\"\n Function to get all the annotated object boxes\n\n Parameters\n ----------\n :param nusc: Nuscenes object\n :param scene_annotations: Scene annotation tokens\n :param cam_token: Camera sensor token\n :param visualize: Boolean variable to visualize all the annotated frame\n :param verbose: Boolean variable to display console logs\n\n Returns\n ----------\n list: Annotated boxes\n \"\"\"\n annotated_boxes = []\n for ann_token in scene_annotations:\n cam = cam_token\n ann_record = nusc.get('sample_annotation', ann_token)\n\n\n ## Filtering the annotation to 'car' and 'truck', with a 'vehicle.moving' attribute\n if len(ann_record['attribute_tokens']) > 0 and ann_record['category_name'] in ['vehicle.car', 'vehicle.truck']:\n att_token = ann_record['attribute_tokens'][0]\n att_record = nusc.get('attribute', att_token)\n # ,'vehicle.stopped']):\n if(att_record['name'] in ['vehicle.moving']):\n data_path, boxes, camera_intrinsic = nusc.get_sample_data(\n cam_token, selected_anntokens=[ann_token])\n\n ## Build the annotated_boxes\n for box in boxes:\n\n corners = view_points(\n box.corners(), view=camera_intrinsic, normalize=True)[:2, :]\n mins = corners.T.min(axis=0)\n maxs = corners.T.max(axis=0)\n a = int(mins[0])\n b = int(mins[1])\n c = int(maxs[0])\n d = int(maxs[1])\n annotated_boxes.append([[a, b], [c, b], [c, d], [a, d]])\n if visualize:\n if verbose:\n print(' Visualising annotations')\n frame = Image.open(data_path)\n frame_copy = np.array(frame)\n cv2.rectangle(frame_copy, (a, b),\n (c, d), (0, 255, 0), 2)\n plt.imshow(frame_copy)\n plt.show()\n return annotated_boxes\n\n\ndef str2bool(v):\n \"\"\"\n Returns boolean value for the string\n Parameters\n ----------\n :param v: Value that needs to be checked and converted\n\n Returns\n ----------\n Boolean value for the inputted value\n\n \"\"\"\n # https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse/31347222\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef get_accuracy(marked_boxes, annotated_boxes, frame, visualize=False, verbose=False):\n \"\"\"\n Evaluation/Validation function to calculate the accuracy\n Parameters\n ----------\n :param marked_boxes: list -> Marked object boxes for the image\n :param annotated_boxes: list -> Annotated object boxes for the image\n :param frame: PIL Image -> frame taken for detection in the current instance\n :param visualize: Boolean variable to visualize object-by-object predicted vs annotated truth for comparison\n :param verbose: Boolean variable to display console logs\n\n Returns\n ----------\n Precision: float\n Recall: float\n True positives : int\n False Positives : int\n \"\"\"\n tp = fp = fn = 0\n iou_list = []\n average = 0.5\n\n for annotated_box in annotated_boxes:\n frame_copy = np.copy(frame)\n\n ## Default values\n max_iou = -1\n pos = -1\n for i, marked_box in enumerate(marked_boxes):\n frame_copy2 = np.copy(frame_copy)\n iou = calculate_iou(marked_box, annotated_box)\n\n ## Checks for the best predicted/marked box match in comparison with annotated box\n if iou > max_iou and iou > 0.5:\n max_iou = iou\n pos = i\n \"\"\" cv2.rectangle(frame_copy2, (marked_box[0][0], marked_box[0][1]), (marked_box[2][0], marked_box[2][1]), color=(\n 255, 0, 0), thickness=2)\n cv2.rectangle(frame_copy2, (annotated_box[0][0], annotated_box[0][1]), (annotated_box[2][0], annotated_box[2][1]), color=(\n 0, 255, 0), thickness=2)\n plt.imshow(frame_copy2)\n plt.show() \"\"\"\n if verbose:\n print(' IoU is:', max_iou)\n \n ## Build confusion matrix quadrants based on the 'max_iou' value\n if max_iou > 0.5:\n tp = tp + 1\n elif max_iou >= 0:\n fp = fp + 1\n if pos == -1:\n fn = fn + 1\n #print(\"Correct prediction\",tp,\"Wrong prediction\",fp,\"Not Predicted\",fn)\n if max_iou >= -1:\n if visualize:\n if verbose:\n print(' Visualising IOU taken vs actual')\n cv2.rectangle(frame_copy, (marked_boxes[pos][0][0], marked_boxes[pos][0][1]), (marked_boxes[pos][2][0], marked_boxes[pos][2][1]), color=(\n 255, 0, 0), thickness=2)\n cv2.rectangle(frame_copy, (annotated_box[0][0], annotated_box[0][1]), (annotated_box[2][0], annotated_box[2][1]), color=(\n 0, 255, 0), thickness=2)\n cv2.putText(frame_copy, str(round(max_iou, 3)), (marked_boxes[pos][0][0], marked_boxes[pos][0]\n [1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n plt.imshow(frame_copy)\n plt.show()\n iou_list.append(max_iou)\n\n if tp > 0 or fp > 0:\n precision = tp / (len(marked_boxes))\n else:\n precision = 0\n if tp > 0 or fn > 0:\n recall = tp / (tp + fn)\n else:\n recall = 0\n if len(iou_list) > 0.1:\n average = round(sum(iou_list) / len(iou_list), 3)\n # if verbose:\n # print(' Average IoU is:', average)\n\n return precision, recall, tp, fp\n\n\ndef run_detection_system(method=(2, 0), validate_results=False, visualize_frames=False, visualize_sub_frames=False, verbose=False, save_file=False):\n \"\"\"\n Main function which takes uses all the helper functions to make the detections\n Parameters\n ----------\n :param method: Tuple which specifies the (classifier,isParallel)\n :param validate_results: Boolean variable to check if user needs to validate results\n :param visualize_frames: Boolean variable to check if user needs to visualize fully marked image frames\n :param visualize_sub_frames: Boolean variable to check if user needs to visualize region frames which are proposed and marked\n :param verbose: Boolean variable to display console logs\n :param save_file: Boolean variable to save detections to a file\n \"\"\"\n ## Load Nuscenes object and specify required channels\n location = 'data/v1.0-mini'\n nusc = NuScenes(version='v1.0-mini', dataroot=location, verbose=False)\n pointsensor_channel = 'RADAR_FRONT'\n camera_channel = 'CAM_FRONT'\n frames = []\n global net, output_layers, classes, svc, xscaler\n\n ## Loading model/network once per session, so that it is not repeated for every single scene/frame\n if method[0] > 1:\n \n net, output_layers, classes = load_net()\n if verbose:\n print('Loaded YOLO Net')\n filename = 'YOLOv3_'\n else:\n if method[0] == 0:\n svc, xscaler = load_svc_2()\n else:\n svc = load_svc()\n if verbose:\n print('Loaded SVM predictor')\n filename = 'HOG_SVM_'\n\n t0 = time.time()\n\n ## Scenes iterator\n for scene in nusc.scene:\n # if verbose:\n # print('Scene description: ',scene['description'])\n first_sample_token = scene['first_sample_token']\n last_sample_token = scene['last_sample_token']\n check_token = first_sample_token\n pre = []\n rec = []\n\n while (check_token != '') and scene['name'] == 'scene-0061':\n if verbose:\n print(' -------------------New-Scene----------------')\n sample_record = nusc.get('sample', check_token)\n\n ## Getting front radar and camera sensors' token value\n pointsensor_token = sample_record['data'][pointsensor_channel]\n camera_token = sample_record['data'][camera_channel]\n\n ## Get all the frames with detected moving vehicles\n marked_frames, marked_boxes = get_marked_frames(\n nusc, pointsensor_token, camera_token, method, visualize_frames, visualize_sub_frames, verbose)\n frames.append(marked_frames)\n\n ## Validates the prediction based on validate_result parameter\n if validate_results:\n scene_annotations = sample_record['anns']\n annotated_boxes = get_annotations(\n nusc, scene_annotations, camera_token)\n cam = nusc.get('sample_data', camera_token)\n frame = Image.open(osp.join(nusc.dataroot, cam['filename']))\n precision, recall, trueP, trueN = get_accuracy(\n marked_boxes, annotated_boxes, frame, visualize_sub_frames, verbose)\n pre.append(precision)\n rec.append(recall)\n check_token = sample_record['next']\n\n if validate_results and scene['name'] == 'scene-0061':\n print('Avg Precision is:', sum(pre) / (len(pre)))\n print('Avg Recall is:', sum(rec)/(len(rec)))\n ## Not using mAP for just one scene and hence commented the below function call\n #getmap(pre, rec)\n t1 = time.time()\n t = t1-t0\n print('Time for ', filename, 'is:', t)\n ## Save the detected frames as video if needed\n if save_file:\n save_video(frames, filename, 10, frames[0].shape[:2])\n\n\ndef validate_args(args):\n \"\"\"\n Validates if the arguments passed are correct\n :param args: Arguments retrieved through argsparser\n \"\"\"\n\n try:\n if type(args.c) == int and type(args.p) == int and args.c >= 0 and args.c <= 3 and args.p >= 0 and args.p <= 1:\n if args.c > 1 and args.p > 0:\n print(\n 'No Parallel processing required for YOLOv3 version. Setting it to default')\n args.p = 0\n else:\n raise ValueError(\n 'Use -h for help. You have entered a wrong integer input')\n if type(args.t) != bool and type(args.f) != bool and type(args.s) != bool and type(args.v) != bool:\n raise ValueError(\n 'Use -h for help. You have entered a wrong boolean input')\n\n except Exception as error:\n print('Caught this error: ' + repr(error))\n exit()\n return args\n\n\nif __name__ == \"__main__\":\n # run_detection_system((2, 0), True,\n # False, False, False, False)\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', type=int, default=2,\n help=\"0 = Slow SVM, 1 = Fast SVM, 2 = Modified YOLO, 3 = Orginal YOLO\")\n parser.add_argument('-p', type=int, default=0,\n help=\"0 = For normal processing, 1 = For parallel processing\")\n parser.add_argument('-t', type=str2bool, nargs='?',\n const=True, default=False, help=\"Validate results\")\n parser.add_argument('-f', type=str2bool, nargs='?',\n const=True, default=False, help=\"Visualize Frames\")\n parser.add_argument('-s', type=str2bool, nargs='?',\n const=True, default=False, help=\"Visualize Sub-Frames\")\n parser.add_argument('-v', type=str2bool, nargs='?',\n const=True, default=False, help=\"Verbose\")\n parser.add_argument('-k', type=str2bool, nargs='?',\n const=True, default=False, help=\"Save/keep detections to a file\")\n\n args = parser.parse_args()\n args = validate_args(args)\n\n run_detection_system((args.c, args.p), args.t,\n args.f, args.s, args.v, args.k)\n"
}
] | 6 |
fjcarnevale/studying | https://github.com/fjcarnevale/studying | 5348ee6165ccaace601a1ab507506f273be83495 | fa1a3a50f7fcd3842c3e431e5658127279f18184 | d3ada721924ab4b40a97f10423a360d9e92d089f | refs/heads/master | 2020-05-17T01:11:11.394414 | 2015-01-13T01:21:31 | 2015-01-13T01:21:31 | 29,164,804 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5206698179244995,
"alphanum_fraction": 0.5436943769454956,
"avg_line_length": 17.959999084472656,
"blob_id": "e6b1e8093a7b62696774c4167bbbd820dc196304",
"content_id": "f3e98b7e0e0ccf8166d7073c97bcd4001b432ffd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1911,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 100,
"path": "/Chaper1.py",
"repo_name": "fjcarnevale/studying",
"src_encoding": "UTF-8",
"text": "import sorting\n\ndef expect_equal(a, b, name):\n if a == b:\n print(\"Pass: \" + name)\n else:\n print(\"Fail: \" + name)\n\n# Question 1.1a\ndef unique_characters(S):\n chars = {}\n\n for c in S:\n if c in chars:\n return False\n chars[c] = True\n\n return True\n\n# Test 1.1a\nS = \"abcdefg\"\nexpect_equal(unique_characters(S), True, \"1.1a Unique\")\nS = \"abcdeefg\"\nexpect_equal(unique_characters(S), False, \"1.1a Not Unique\")\n\n\n# Question 1.1b\ndef unique_characters_no_hash(S):\n \n A = list(S)\n\n sorting.quicksort(A)\n\n for i in range(len(A) - 1):\n if A[i] == A[i + 1]:\n return False\n\n return True\n\n# Test 1.1b\nS = \"abcdefg\"\nexpect_equal(unique_characters_no_hash(S), True, \"1.1b Unique\")\nS = \"aebcdefg\"\nexpect_equal(unique_characters_no_hash(S), False, \"1.1b Not Unique\")\n\n# Question 1.2\n\ndef reverse_string(S):\n A = list(S)\n i = 0\n j = len(A) - 1\n\n while i < j:\n A[i],A[j] = A[j],A[i]\n i += 1\n j -= 1\n\n return \"\".join(A)\n\n\n# Test 1.2\nS = \"abcde\"\nexpect_equal(reverse_string(S), \"edcba\", \"1.2 Odd\")\nS = \"abcdef\"\nexpect_equal(reverse_string(S), \"fedcba\", \"1.2 Even\")\n\n\n# Question 1.3\ndef is_permutation(A,B):\n A = list(A)\n B = list(B)\n\n if len(A) != len(B):\n return False\n\n letters = {}\n for c in A:\n if c not in letters:\n letters[c] = {\"A\":0, \"B\":0}\n letters[c][\"A\"] += 1\n\n for c in B:\n if c not in letters:\n return False\n letters[c][\"B\"] += 1\n\n for c in letters:\n if letters[c][\"A\"] != letters[c][\"B\"]:\n return False\n\n return True\n\n# Test 1.3\nA = \"abcde\"\nB = \"aebdc\"\nexpect_equal(is_permutation(A,B), True, \"1.3 Match\")\nB = \"aebdcf\"\nexpect_equal(is_permutation(A,B), False, \"1.3 Mismatch Different Length\")\nB = \"abdcz\"\nexpect_equal(is_permutation(A,B), False, \"1.3 Mismatch Different Letters\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.4650602340698242,
"alphanum_fraction": 0.4771084189414978,
"avg_line_length": 19.700000762939453,
"blob_id": "ca42566a92d833b74561aac88a997f3c328f8aad",
"content_id": "25ce1f832b7e4d23039a904f10cb0b7f988867dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 415,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 20,
"path": "/sorting.py",
"repo_name": "fjcarnevale/studying",
"src_encoding": "UTF-8",
"text": "\ndef quicksort(A):\n quicksort_impl(A, 0, len(A) - 1)\n\ndef quicksort_impl(A, p, r):\n if p >= r:\n return\n \n pivot = A[r]\n\n next_swap = p\n \n for i in range(p, r):\n if A[i] < A[r]:\n A[next_swap],A[i] = A[i], A[next_swap]\n next_swap += 1\n\n A[next_swap],A[r] = A[r], A[next_swap]\n\n quicksort_impl(A, p, next_swap - 1)\n quicksort_impl(A, next_swap + 1, r)\n"
}
] | 2 |
mengjiuxi/Python | https://github.com/mengjiuxi/Python | 30cb7e0636a513567eb03d36c12669443af66186 | 519b99599088e912985fac24f4dcf877d66e581c | 0a74b1fa166588c0020f1923214c62daf9f52877 | refs/heads/master | 2020-03-05T17:27:39.418934 | 2015-08-27T10:20:40 | 2015-08-27T10:20:40 | 41,035,753 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6761904954910278,
"alphanum_fraction": 0.6761904954910278,
"avg_line_length": 25.25,
"blob_id": "680db9dfa074a9c2af68d0e7ea78a718df948fdf",
"content_id": "bef2899fa290cfefdb3b1d3ca040d76add92dfec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 210,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 8,
"path": "/writecsv.py",
"repo_name": "mengjiuxi/Python",
"src_encoding": "UTF-8",
"text": "import csv\ndef writecsv(filename, mout):#mout for memory output\n\twith open(filename, 'a+', ) as d:\n\t\twriter = csv.writer(d)\n\t\twriter.writerow(mout)\n\t\t# for row in mout:\n\t\t# \twriter.writerow(row)\n\t\t# \tprint row\n"
},
{
"alpha_fraction": 0.6112650036811829,
"alphanum_fraction": 0.6214219927787781,
"avg_line_length": 22.042552947998047,
"blob_id": "891aee00ed30b7e5f7aee3781f667cbdc9519697",
"content_id": "7b8a1b8bf10188b3aaeb683a12672e2aabc55612",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1083,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 47,
"path": "/CSVmanager.py",
"repo_name": "mengjiuxi/Python",
"src_encoding": "UTF-8",
"text": "import csv \nimport string\nimport System\n#from System import Byte\n#import numpy as np\nfilename = \"test_f01_MCT.csv\"\nwith open(filename, 'rb') as f:\n\tf.next()\n\treader = csv.reader(f)\n\t#ncols = len(f.readline().split(','))\n\t#print ncols\n\tData = list(reader)#generate a list of list in csv file\t\n\tfor ele in Data:\n\t\tfor y in range(len(ele)):\n\t\t\tif (y == 0):\n\t\t\t\tele.pop(0)\n\t\t\telse:\n\t\t\t\tif (y == len(ele)):\n\t\t\t\t\tele.pop(len(ele) - 1)\n#\tprint Data\n\t#print Data[0][3]\n#convert data type in the matrix \n\t# for ele in Data:\n\t# \tprint ele\n\t# \tfor y in ele:\n\t# \t\ty = [System.Byte(int(y)) for y in ele]\n\t# \t\tprint y\n\tData = [[System.Byte(int(y)) for y in ele] for ele in Data]\n#\tprint Data\n#\t\tnew=[System.Byte(int(y)) for y in ele]\t\t\n#\tprint Data\n\n\t# newrow = []\n\t# for row in reader:\n\t# \tif reader.line_num == 1:\n\t# \t\tcontinue\n\t# \trow.pop(0)\n\t# \tnewrow.append(row)\n\n\t# data = np.loadtxt(f, dtype=int, delimiter=',', skiprows=1, usecols=range(1, ncols))\n\n\t# print newrow\n\twith open('destination.csv', 'w', ) as d:\n\t\twriter = csv.writer(d)\n\t\tfor row in Data:\n\t\t\twriter.writerow(row)\n\t\t\tprint row\n"
},
{
"alpha_fraction": 0.6421636343002319,
"alphanum_fraction": 0.6837725639343262,
"avg_line_length": 25.740739822387695,
"blob_id": "d0f713a3d7f8bcc574c8ac322d8d5c092b274ede",
"content_id": "8c9aaae98ecbd62f3827288bcc18a6133b57df63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 721,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 27,
"path": "/test.py",
"repo_name": "mengjiuxi/Python",
"src_encoding": "UTF-8",
"text": "import System\nimport readcsv\nfrom writecsv import writecsv\nimport csv\n#import numpy\n#from System import Array\n#System.Array = lst\nlst = ['1', '4.125', '1.9001', '3.0', '5']\nprint lst.index('1') \nprint lst.index('3.0') \n#lst = [float(y) for y in range(len(lst))], this will convert the list to [0, 1, 2, 3, 4]\n#print lst\n#lst = [float(y) for y in lst]\nprint lst\nlst = [System.Byte(float(y)) for y in lst]\n#vals = System.Array[System.Byte]([1, 4, 1, 3, 5])\nvals = System.Array[System.Byte](lst)\n#print lst\t\n#print vals\t\nprint System.Byte(vals[2])\t\nData = readcsv.readHKE(\"test_f01_MCT.csv\")\nprint len(Data)\nInput = readcsv.getvalue(brktimes = 4, data = Data)\nprint Input\n\nwritecsv(\"destination.csv\", Input)\nprint \"finished\""
},
{
"alpha_fraction": 0.6273637413978577,
"alphanum_fraction": 0.6340377926826477,
"avg_line_length": 24.685714721679688,
"blob_id": "ab09af38748df69654a967f7cb1eeb3e27023efb",
"content_id": "311626341513282e0cfed112042099d76ee6b778",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 899,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 35,
"path": "/readcsv.py",
"repo_name": "mengjiuxi/Python",
"src_encoding": "UTF-8",
"text": "import csv \nimport string\nimport System\ndef readHKE(filename):\n\t# filename = \"test_f01_MCT.csv\"\n\twith open(filename, 'rb') as f:\n\t\tf.next()\n\t\treader = csv.reader(f)\n\t\tData = list(reader)#generate a list of list in csv file\t\n\t\tfor ele in Data:#remove the first and the last column\n\t\t\tfor y in range(len(ele)):\n\t\t\t\tif (y == 0):\n\t\t\t\t\tele.pop(0)\n\t\t\t\telse:\n\t\t\t\t\tif (y == len(ele)):\n\t\t\t\t\t\tele.pop(len(ele) - 1)\n\t\t#Convert data type to System.Byte\n\t\tData = [[System.Byte(int(y)) for y in ele] for ele in Data]\n\t \tprint \"Input stored in Data\"\n\t \treturn Data\ndef getvalue(brktimes, data):\n\tif brktimes > (len(data) -1):\n\t\tprint \"finish loading input\"\n\telse:\n\t\tfor ele in data:\t \n\t\t\t\tif data.index(ele) == brktimes:\n\t\t\t\t\tinput_data = ele\n\t\t\t\telse: continue\n\t\treturn input_data\n\n# with open('destination.csv', 'w', ) as d:\n# \twriter = csv.writer(d)\n# \tfor row in Data:\n# \t\twriter.writerow(row)\n# \t\t# print row\n"
}
] | 4 |
AugustinP/learn-python-the-hard-way | https://github.com/AugustinP/learn-python-the-hard-way | 3f0a1dd744bb944f1000025943edc98d11d8b645 | b27d773095b74c4e3396c60c55f931ac7ea5e6ea | 5b08342559029de766a3c3fbf1887b32c35da0ea | refs/heads/master | 2021-05-28T00:46:17.377205 | 2013-07-19T00:13:32 | 2013-07-19T00:13:32 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5943396091461182,
"alphanum_fraction": 0.5943396091461182,
"avg_line_length": 25.75,
"blob_id": "70caf9feb90749d2f9e01af8ac9aabf5d60d483d",
"content_id": "bf4d73a365988b20b6b95198f331e8a612c2d155",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 106,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 4,
"path": "/README.md",
"repo_name": "AugustinP/learn-python-the-hard-way",
"src_encoding": "UTF-8",
"text": "learn-python-the-hard-way\n=========================\n\nA log of my exercises from Learn Python the Hard Way."
},
{
"alpha_fraction": 0.7447698712348938,
"alphanum_fraction": 0.7447698712348938,
"avg_line_length": 21.809524536132812,
"blob_id": "42a2bec9e4234ab5a8707654a640fa9303b9ec8f",
"content_id": "f65138a48037098e622eafefb3d8bfeb599c03fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 478,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 21,
"path": "/ex15.py",
"repo_name": "AugustinP/learn-python-the-hard-way",
"src_encoding": "UTF-8",
"text": "from sys import argv\n\nscript, filename = argv\n\n# declares txt variable\ntxt = open(filename)\n\n# announces and opens the chosen file\nprint \"Here's your file %r:\" % filename\n# calls read function on txt\nprint txt.read()\n\n# repeats the process but using raw_input instead\nprint \"Type the filename again:\"\n# declares file_again variable\nfile_again = raw_input(\"> \")\n# declares txt_again variable\ntxt_again = open(file_again)\n\n# calls read function on txt_again\nprint txt_again.read()"
},
{
"alpha_fraction": 0.621802031993866,
"alphanum_fraction": 0.6840934157371521,
"avg_line_length": 26.272727966308594,
"blob_id": "4335be4e149c8969236537cd55eb316fa1f5ec13",
"content_id": "257b4a589d22244c23e215c0b3883325a609a5c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 899,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 33,
"path": "/ex3.py",
"repo_name": "AugustinP/learn-python-the-hard-way",
"src_encoding": "UTF-8",
"text": "#Printing intent phrase\nprint \"I will now count my chickens:\"\n\n#Calculating number of hens and roosters\n#Using floating point numbers where necessary to make results more accurate\nprint \"Hens\", 25 + 30 / 6\nprint \"Roosters\", 100.0 - 25.0 * 3.0 % 4.0\n\n#Printing intent phrase\nprint \"Now I will count the eggs:\"\n\n#Calculating number of eggs\n#Using floating point numbers to make results more accurate\nprint 3.0 + 2.0 + 1.0 - 5.0 + 4.0 % 2.0 - 1.0 / 4.0 + 6.0\n\n#Asking a question\nprint \"Is it true that 3 + 2 < 5 - 7?\"\n\n#Math to answer the question\nprint 3 + 2 < 5 - 7\n\n#Asking and answering more calculation questions\nprint \"What is 3 + 2?\", 3 + 2\nprint \"What is 5 - 7?\", 5 - 7\n\n#More questions\nprint \"Oh, that's why it's False.\"\nprint \"How about some more.\"\n\n#More questions and calculations\nprint \"Is it greater?\", 5 > -2\nprint \"Is it greater or equal?\", 5 >= -2\nprint \"Is it less or equal?\", 5 <= -2"
},
{
"alpha_fraction": 0.618881106376648,
"alphanum_fraction": 0.632867157459259,
"avg_line_length": 15.371428489685059,
"blob_id": "80381943393c2508165defc54310ef67319f0b03",
"content_id": "89eb810b0ce80f2faeaa6d431e1e1fa36249ca86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 572,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 35,
"path": "/ex33.py",
"repo_name": "AugustinP/learn-python-the-hard-way",
"src_encoding": "UTF-8",
"text": "def printNumbersWhile(upto, inc):\n\ti = 1\n\tnumbers = []\n\n\twhile i <= upto:\n\t\tprint \"At the top i is %d\" % i\n\t\tnumbers.append(i)\n\n\t\ti = i + inc\n\t\tprint \"Numbers now: \", numbers\n\t\tprint \"At the bottom i is %d\" % i\n\n\tprint \"The numbers: \"\n\n\tfor num in numbers:\n\t\tprint num\n\ndef printNumbersFor(upto):\n\ti = 1\n\tnumbers = []\n\n\tfor i in range(i, upto):\n\t\tprint \"At the top i is %d\" % i\n\t\tnumbers.append(i)\n\n\t\tprint \"Numbers now: \", numbers\n\t\tprint \"At the bottom i is %d\" % i\n\n\tprint \"The numbers: \"\n\n\tfor num in numbers:\n\t\tprint num\n\nprintNumbersWhile(50, 10)\nprintNumbersFor(10)"
}
] | 4 |
GeorgeLychock/flask | https://github.com/GeorgeLychock/flask | 4043c50e74a724835cb8d5a8705830c443af0f26 | 6edd1dbfbf8d82638a19e526d25510ebe986b31c | 07798b9f3986654bd1c61c82111b02d5ca6fc404 | refs/heads/main | 2023-07-29T02:40:32.621793 | 2021-09-12T10:43:40 | 2021-09-12T10:43:40 | 396,216,398 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5934579372406006,
"alphanum_fraction": 0.5996884703636169,
"avg_line_length": 23.69230842590332,
"blob_id": "087cd8f894a5cf2621ea2dac4c915140182a1aa0",
"content_id": "014afe4caa5b6271f1ae9a0f051704662004ed67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1284,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 52,
"path": "/run.py",
"repo_name": "GeorgeLychock/flask",
"src_encoding": "UTF-8",
"text": "import os\nimport json\nfrom flask import Flask, render_template, request, flash\n\napp = Flask(__name__)\n\n\[email protected](\"/\")\ndef index():\n\n return render_template(\"index.html\")\n\n\[email protected](\"/about\")\ndef about():\n data = []\n with open(\"data/recipes.json\", \"r\") as json_data:\n data = json.load(json_data)\n return render_template(\"about.html\", page_title=\"Aboot\", recipes=data)\n\[email protected](\"/about/<recipe_name>\")\ndef about_recipe(recipe_name):\n recipe = {}\n with open(\"data/recipes.json\", \"r\") as json_data:\n data = json.load(json_data)\n for obj in data:\n if obj[\"url\"] == recipe_name:\n recipe = obj\n return render_template(\"recipe.html\", recipe=recipe)\n\n\[email protected](\"/contact\", methods=[\"GET\", \"POST\"])\ndef contact():\n if request.method == \"POST\":\n print(\"Hello My Friend\")\n print(request.form)\n print(request.form.get(\"name\"))\n print(request.form[\"name\"])\n return render_template(\"contact.html\", page_title=\"Contact\")\n\n\[email protected](\"/careers\")\ndef careers():\n return render_template(\"careers.html\", page_title=\"Careers\")\n\n\nif __name__ == \"__main__\":\n app.run(\n host=os.environ.get(\"IP\", \"0.0.0.0\"),\n port=int(os.environ.get(\"PORT\", \"5000\")),\n debug=True\n )\n"
}
] | 1 |
ezhk/modules | https://github.com/ezhk/modules | c09e9184169e240e9ebf43126e39953411bd8050 | c309721ff68e7333635076c0c39d201d04a9a88d | 7089c514e647a99cc5651510532ca638de62f0fa | refs/heads/master | 2023-05-25T22:36:35.584383 | 2023-05-14T07:49:34 | 2023-05-14T07:49:34 | 11,919,177 | 0 | 0 | null | 2013-08-06T08:38:07 | 2020-04-26T07:12:32 | 2023-05-14T07:49:35 | Perl | [
{
"alpha_fraction": 0.6299999952316284,
"alphanum_fraction": 0.6474999785423279,
"avg_line_length": 25.66666603088379,
"blob_id": "e9a68b104b1c1891c04ca7d249fa980165381f63",
"content_id": "40d88a06a59694741f7df67015e139439765a92d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 400,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 15,
"path": "/web.py",
"repo_name": "ezhk/modules",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n\nimport typing\nimport urllib3\n\n\ndef get_url_body(url: str, status_code: int = 200, headers: typing.Dict[str, typing.Any] = {}) -> str:\n \"\"\"Returns URL response data\"\"\"\n http = urllib3.PoolManager()\n\n r = http.request(\"GET\", url, headers=headers)\n if r.status != status_code:\n raise ValueError(f\"wrong status code: {r.status}\")\n\n return r.data.decode(\"utf-8\")\n"
},
{
"alpha_fraction": 0.5945152044296265,
"alphanum_fraction": 0.6092066764831543,
"avg_line_length": 38.77922058105469,
"blob_id": "4bb118128da14696367784f4a5cccf8962ea055f",
"content_id": "1067c159ae027ed48a5cff70a8d386be0242fc32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3063,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 77,
"path": "/API/Yandex.py",
"repo_name": "ezhk/modules",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n\nimport json\nimport typing\n\nimport urllib3\n\n\nclass API360:\n \"\"\"\n Class provides DNS API with 360 API's Yandex.\n Includes methods, that allow add/delete/show\n NS records and modify them.\n \"\"\"\n\n DNS_LIST = \"https://api360.yandex.net/directory/v1/org/%(org_id)s/domains/%(domain)s/dns?perPage=9999\"\n DNS_ADD = \"https://api360.yandex.net/directory/v1/org/%(org_id)s/domains/%(domain)s/dns\"\n DNS_EDIT = \"https://api360.yandex.net/directory/v1/org/%(org_id)s/domains/%(domain)s/dns/%(record_id)s\"\n DNS_DELETE = \"https://api360.yandex.net/directory/v1/org/%(org_id)s/domains/%(domain)s/dns/%(record_id)s\"\n\n def __init__(self, organization_id: int, domainname: str, token: str):\n self.token = token\n\n self.org_id = organization_id\n self.domainname = domainname\n\n self.http = urllib3.PoolManager()\n\n def list_domain(self) -> typing.Dict[str, typing.Any]:\n \"\"\"Get domain records list.\"\"\"\n\n _url = self.DNS_LIST % {\n \"org_id\": self.org_id,\n \"domain\": self.domainname,\n }\n\n r = self.http.request(\"GET\", _url, headers={\"Authorization\": f\"OAuth {self.token}\"})\n if r.status != 200:\n raise ValueError(f\"wrong status code response: {r.data}\")\n\n return json.loads(r.data.decode(\"utf-8\"))\n\n def del_domain(self, record_id: int) -> typing.Dict[str, typing.Any]:\n \"\"\"Remove record by ID and raise exception on error.\"\"\"\n _url = self.DNS_DELETE % {\"org_id\": self.org_id, \"domain\": self.domainname, \"record_id\": record_id}\n\n r = self.http.request(\"DELETE\", _url, headers={\"Authorization\": f\"OAuth {self.token}\"})\n if r.status != 200:\n raise ValueError(f\"wrong status code response: {r.data}\")\n\n return json.loads(r.data.decode(\"utf-8\"))\n\n def add_domain(self, address: str, name: str, record_type: str, ttl: int) -> typing.Dict[str, typing.Any]:\n \"\"\"Method create A/AAAA record type.\"\"\"\n\n _url = self.DNS_ADD % {\"org_id\": self.org_id, \"domain\": self.domainname}\n _body = json.dumps({\"address\": address, \"name\": name, \"type\": record_type, \"ttl\": ttl})\n\n r = self.http.request(\"POST\", _url, body=_body, headers={\"Authorization\": f\"OAuth {self.token}\"})\n if r.status != 200:\n raise ValueError(f\"wrong status code response: {r.data}\")\n\n return json.loads(r.data.decode(\"utf-8\"))\n\n def edit_domain(\n self, record_id: int, address: str, name: str, record_type: str, ttl: int\n ) -> typing.Dict[str, typing.Any]:\n \"\"\"Method modify A/AAAA record type.\"\"\"\n\n _url = self.DNS_EDIT % {\"org_id\": self.org_id, \"domain\": self.domainname, \"record_id\": record_id}\n _body = json.dumps({\"address\": address, \"name\": name, \"type\": record_type, \"ttl\": ttl})\n\n r = self.http.request(\"POST\", _url, body=_body, headers={\"Authorization\": f\"OAuth {self.token}\"})\n if r.status != 200:\n raise ValueError(f\"wrong status code response: {r.data}\")\n\n return json.loads(r.data.decode(\"utf-8\"))\n"
},
{
"alpha_fraction": 0.449438214302063,
"alphanum_fraction": 0.49438202381134033,
"avg_line_length": 16.799999237060547,
"blob_id": "f7eaa9597b0fc18fcf02e5f724aa80cdc6a96251",
"content_id": "87d8a58200287a61237b5dd003fbb8e775ebb740",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 5,
"path": "/API/__init__.py",
"repo_name": "ezhk/modules",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__version__ = \"0.2\"\n__all__ = [\"Yandex\"]\n"
},
{
"alpha_fraction": 0.6617646813392639,
"alphanum_fraction": 0.6617646813392639,
"avg_line_length": 16,
"blob_id": "f51564ce90c0e40f5343ad69cf2ea5aaed4de7b6",
"content_id": "1c076c24fc15f476fe34198add42cc23929818a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 136,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 8,
"path": "/README.md",
"repo_name": "ezhk/modules",
"src_encoding": "UTF-8",
"text": "# Perl\n - Yandex API DNS: API/YandexDNS.pm\n - Log: Log.pm\n - IPcalc: IPcalc.pm\n\n# Python\n- PDD DNS: API/Yandex.py\n- Web methods: web.pm\n"
}
] | 4 |
galap-1/midi-2-ps2 | https://github.com/galap-1/midi-2-ps2 | 644fd1f6bd5bfcfc49f200e62195919a56c708b0 | bedf82db02ba0ed25b1e1ffa49059292419897af | 442fa4111a3310aae49bef593e756b3523b75b88 | refs/heads/master | 2020-04-15T01:06:28.916060 | 2019-01-09T09:48:35 | 2019-01-09T09:48:35 | 164,263,828 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5525348782539368,
"alphanum_fraction": 0.6142542362213135,
"avg_line_length": 44.400001525878906,
"blob_id": "135cd0f110c7e06bdc062dab7e64f4ae564558ea",
"content_id": "bd2859d7c1a69e36bb84bed52fd2a55baff33142",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1361,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 30,
"path": "/midi-2-ps2.py",
"repo_name": "galap-1/midi-2-ps2",
"src_encoding": "UTF-8",
"text": "import random\n\nPianoOffset = 60\n#This is what note the low C is on the piano keyboard\n\nPianoKeyNoteTable = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B', 'C^', 'C#^','D^', 'D#^', 'E^', 'F^', \n\t\t'F#^', 'G^', 'G#^', 'A^', 'A#^', 'B^', 'C^^']\n#the relevant 25 notes in order\n\nTypingKeyLetterTable = ['Z', 'S', 'X', 'D', 'C', 'V', 'G', 'B', 'H', 'N', 'J', 'M', 'Q', '2', 'W', '3', 'E', 'R', '5', 'T', \n\t\t'6', 'Y', '7', 'U', 'I']\n#the typing keyboard letters associated with the corresponding note in KeyNoteTable\n\nTypingKeyCommandTable = [0x1A, 0x1B, 0x22, 0x23, 0x21, 0x2A, \n\t\t0x34, 0x32, 0x33, 0x31, 0x3B, 0x3A, 0x15, 0x1E, 0x1D, 0x26, 0x24, 0x2D, 0x2E, 0x2C, 0x36, 0x25, 0x3D, 0x3C, 0x43]\n#the hex ps/2 key command\n\nTypingReleaseCommandHex = 0xF0\n#the hex ps/2 key release command\n\nMidiNote = random.randint(60,84)\n\nInputNote = MidiNote - PianoOffset\n#align the midi input to the beginning of the 25 note output range\n\nprint ('Key On ' + str(MidiNote) + ' Note: ' + str(PianoKeyNoteTable[InputNote]) + ' Character: ' + \n\tstr(TypingKeyLetterTable[InputNote]) + ' Command: ' + str(hex(TypingKeyCommandTable[InputNote])))\n\nprint ('Key Off ' + str(MidiNote) + ' Note: ' + str(PianoKeyNoteTable[InputNote]) + ' Character: ' + \n\tstr(TypingKeyLetterTable[InputNote]) + ' Command: ' + str(hex(0xF0)) + ' ' + str(hex(TypingKeyCommandTable[InputNote])))"
},
{
"alpha_fraction": 0.4443882703781128,
"alphanum_fraction": 0.5437310338020325,
"avg_line_length": 31.91666603088379,
"blob_id": "898410b4ff91840b89936f431089eb6c7a4af6df",
"content_id": "4035f50e7b28454cead350ba9d7a697c9e430dd1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3956,
"license_type": "no_license",
"max_line_length": 217,
"num_lines": 120,
"path": "/miditest_with_output.py",
"repo_name": "galap-1/midi-2-ps2",
"src_encoding": "UTF-8",
"text": "\n\n\nimport sys\nimport time\n\nPianoOffset = 60\n#This is what note the low C is on the piano keyboard\n\nPianoKeyNoteTable = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B', 'C^', 'C#^','D^', 'D#^', 'E^', 'F^', \n\t\t'F#^', 'G^', 'G#^', 'A^', 'A#^', 'B^', 'C^^']\n#the relevant 25 notes in order\n\nTypingKeyLetterTable = ['Z', 'S', 'X', 'D', 'C', 'V', 'G', 'B', 'H', 'N', 'J', 'M', 'Q', '2', 'W', '3', 'E', 'R', '5', 'T', \n\t\t'6', 'Y', '7', 'U', 'I']\n#the typing keyboard letters associated with the corresponding note in KeyNoteTable\n\nTypingKeyCommandTable = [0x1A, 0x1B, 0x22, 0x23, 0x21, 0x2A, \n\t\t0x34, 0x32, 0x33, 0x31, 0x3B, 0x3A, 0x15, 0x1E, 0x1D, 0x26, 0x24, 0x2D, 0x2E, 0x2C, 0x36, 0x25, 0x3D, 0x3C, 0x43]\n#the hex ps/2 key command\n\nTypingReleaseCommandHex = 0xF0\n#the hex ps/2 key release command\n\n\n#the data in the following are used for bitbanging the ps/2 data line. first bit is 0, then data sent least significant bit first, then a parity bit to ensure the message has even parity once the ending by (1) is sent\n\nTypingReleaseTable = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]\n\nTypingKeyTable = [\n\t\t\t\t[0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1],\n\t\t\t\t[0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1],\n\t\t\t\t[0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1],\n\t\t\t\t[0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1],\n\t\t\t\t[0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1],\n\t\t\t\t[0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1],\n\t\t\t\t[0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1],\n\t\t\t\t[0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1],\n\t\t\t\t[0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],\n\t\t\t\t[0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1],\n\t\t\t\t[0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1],\n\t\t\t\t[0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1],\n\t\t\t\t[0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1],\n\t\t\t\t[0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1],\n\t\t\t\t[0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1],\n\t\t\t\t[0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1],\n\t\t\t\t[0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1],\n\t\t\t\t[0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1],\n\t\t\t\t[0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1],\n\t\t\t\t[0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1],\n\t\t\t\t[0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1],\n\t\t\t\t[0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1],\n\t\t\t\t[0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1],\n\t\t\t\t[0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1],\n\t\t\t\t[0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1]]\n\ndef SendTypingKey():\n\tif 0 <= InputNote <= 24:\n\t\tfor i in range(0, 10):\n\t\t\tprint(TypingKeyTable[InputNote][i], end = \" \")\n\t\tprint(\"\\n\")\t\t\t\t\t\t\t\t#^ don't start a new line every time\n\t\t#^ move the cursor to the next line\n\telse: \n\t\tprint(\"Note not in range\")\n\t\n\t\ndef SendTypingRelease():\n\tif 0 <= InputNote <= 24:\n\t\tfor i in range(0, 10):\n\t\t\tprint(TypingReleaseTable[i], end = \" \")\n\t\tprint(\"\\n\")\t\t\t\t\t\t#^ don't start a new line every time\n\t\t#^ move the cursor to the next line\n\n\n\n\nfrom rtmidi.midiutil import open_midiinput\n\n\n\n\n# Prompts user for MIDI input port, unless a valid port number or name\n# is given as the first argument on the command line.\n# API backend defaults to ALSA on Linux.\nport = sys.argv[1] if len(sys.argv) > 1 else None\n\ntry:\n midiin, port_name = open_midiinput(port)\nexcept (EOFError, KeyboardInterrupt):\n sys.exit()\n\nprint(\"Entering main loop. Press Control-C to exit.\")\ntry:\n\ttimer = time.time()\n\twhile True:\n\t\t\tRawMessage = midiin.get_message()\n\n\t\t\tif RawMessage:\n\t\t\t\tMidiMessage, deltatime = RawMessage\n\t\t\t\tInputNote = MidiMessage[1] - PianoOffset\n\t\t\t\tif MidiMessage[0] == 144:\n\t\t\t\t\tNoteOn = True\n\t\t\t\t\tNoteOff = False\n\t\t\t\t\tprint ('Key On ' + str(InputNote) + ' Note: ' + str(PianoKeyNoteTable[InputNote]) + ' Character: ' + \n\t\t\t\t\t\tstr(TypingKeyLetterTable[InputNote]) + ' Command: ' + str(hex(TypingKeyCommandTable[InputNote])))\n\t\t\t\t\tSendTypingKey()\n\t\t\t\tif MidiMessage[0] == 128:\n\t\t\t\t\tNoteOn = False\n\t\t\t\t\tNoteOff =True\n\t\t\t\t\tprint ('Key Off ' + str(InputNote) + ' Note: ' + str(PianoKeyNoteTable[InputNote]) + ' Character: ' + \n\t\t\t\t\t\tstr(TypingKeyLetterTable[InputNote]) + ' Command: ' + str(hex(0xF0)) + ' ' + str(hex(TypingKeyCommandTable[InputNote])))\n\t\t\t\t\tSendTypingRelease()\n\t\t\t\t\tSendTypingKey()\n \n\n\ttime.sleep(0.01)\n\t\nexcept KeyboardInterrupt:\n\tprint('')\nfinally:\n\tprint(\"Exit.\")\n\tmidiin.close_port()\ndel midiin\n\n\n\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12,
"blob_id": "76ea6619bf6c81cca31edf2466ce5f95fabc89e1",
"content_id": "62e75d8932e03cb408ce5ba0aa6f321672189f8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 12,
"license_type": "no_license",
"max_line_length": 12,
"num_lines": 1,
"path": "/README.md",
"repo_name": "galap-1/midi-2-ps2",
"src_encoding": "UTF-8",
"text": "# midi-2-ps2"
},
{
"alpha_fraction": 0.6361111402511597,
"alphanum_fraction": 0.6861110925674438,
"avg_line_length": 12.84615421295166,
"blob_id": "7191ff1adf7fefc2c6f0288e41385db12c6bb12f",
"content_id": "981863c8f39f7909f64a831ec335c5c3c4a255b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 360,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 26,
"path": "/gpiotest.py",
"repo_name": "galap-1/midi-2-ps2",
"src_encoding": "UTF-8",
"text": "import time\n\n\ntry:\n\t\timport RPi.GPIO as GPIO\nexcept RuntimeError:\n\tprint(\"Error. Try running with sudo\")\n\t\nGPIO.setmode(GPIO.BOARD)\n\nGPIO.setup(12, GPIO.OUT, initial = 0)\n\nEndTime = time.time() + 1\ncounter = 0\n\nwhile time.time() < EndTime:\n\n\tGPIO.output(12, 1)\n\ttime.sleep(0.00005)\n\tGPIO.output(12, 0)\n\tcounter = counter + 1\n\t\n\nGPIO.cleanup()\n\nprint (counter)\n"
}
] | 4 |
berguner/pypiper | https://github.com/berguner/pypiper | 518793ca18c8deb716952f992217e86e68013f76 | 653216887cb2b2ad8e9119b76f40b39da58ec115 | 6ffd31f8c0b516c2f973febfbd213b0d06c2d27c | refs/heads/master | 2020-05-17T00:15:25.574390 | 2019-03-26T02:57:54 | 2019-03-26T02:57:54 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7628278136253357,
"alphanum_fraction": 0.7651083469390869,
"avg_line_length": 39.476924896240234,
"blob_id": "b5d698f6f64b923ce31374bc42d15aef8d2dc9e5",
"content_id": "c39e7b7a24e7626d414beb0e78a0fe9e625112a3",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2631,
"license_type": "permissive",
"max_line_length": 320,
"num_lines": 65,
"path": "/docs/README.md",
"repo_name": "berguner/pypiper",
"src_encoding": "UTF-8",
"text": "# <img src=\"img/pypiper_logo.svg\" class=\"img-header\">\n\n[](http://pepkit.github.io)\n\n## Introduction\n\nPypiper is a **development-oriented** pipeline framework. Pypiper pipelines are:\n\n1. written in pure python, so they require learning no new language;\n2. simple to update and maintain, so they respond well to changes;\n3. simple to understand for an outsider, so they can be approached by others.\n\n\nThese traits make pypiper ideally suited for **pipelines under active development**.\n\nWith Pypiper, **simplicity is paramount**. Prerequisites are few: base python and 2 common packages (`pyyaml` and `psutil`). It should take fewer than 15 minutes to build your first pipeline and only an hour or two to learn the advanced features.\nPypiper provides automatic restartability, process monitoring for time and memory use, status monitoring, copious log output, robust error handling, easy debugging tools, guaranteed file output integrity, and a bunch of useful pipeline development helper functions. Read more about the [pypiper philosophy](philosophy).\n\n## Installing\n\nRelease versions are posted on the GitHub [pypiper releases page](https://github.com/databio/pypiper/releases). You can install the latest release directly from PyPI using `pip`.\n\nGlobal scope for single user:\n```{console}\npip install --user --upgrade piper\n```\n\nWithin an active virtual environment:\n```{console}\npip install --upgrade piper\n```\n\n## Quick start\n\nTo employ pypiper, you build something like a shell script, but pass the commands through the `run` method on a `PipelineManager` object. Build your pipeline in **pure python**:\n\n```{python}\n#!/usr/bin/env python\n\nimport pypiper\noutfolder = \"hello_pypiper_results\" # Choose a folder for your results\n\n# Create a PipelineManager, the workhorse of pypiper\npm = pypiper.PipelineManager(name=\"hello_pypiper\", outfolder=outfolder)\n\n# Timestamps to delineate pipeline sections are easy:\npm.timestamp(\"Hello!\")\n\n# Now build a command and pass it to pm.run()\ntarget_file = \"hello_pypiper_results/output.txt\"\ncommand = \"echo 'Hello, Pypiper!' > \" + target_file\npm.run(command, target_file)\n\npm.stop_pipeline()\n```\n\nThen invoke your pipeline via the command-line:\n\n```{console}\npython my_pipeline.py --help\n```\n\n## Pypiper strengths\n\nPypiper differs from existing frameworks in its focus on **simplicity**. Pypiper requires learning no new language, as **pipelines are written in pure python**. Pypiper is geared toward **developing pipelines** that are contained in a single file, easy to update, and easy to understand. Read more about\n"
},
{
"alpha_fraction": 0.79440838098526,
"alphanum_fraction": 0.7961028218269348,
"avg_line_length": 54.328125,
"blob_id": "cf4146bc4143d16fe1f252b87126db4dbf6ffeae",
"content_id": "915cf01d386e8e96b7a0a13e4a422aa324975dba",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3541,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 64,
"path": "/docs/philosophy.md",
"repo_name": "berguner/pypiper",
"src_encoding": "UTF-8",
"text": "# Pypiper's development philosophy\n\n## Who should use Pypiper?\n\nThe target audience for pypiper is an individual who wants to build a basic\npipeline, but **wants to do better job than just writing a shell script, without\nrequiring hours of energy to learn a new language or system**. Many\nbioinformatics pipelines are written by students or technicians who don't have\ntime to learn a full-scale pipelining framework, so\nthey just end up using simple bash scripts to piece together commands because\nthat seems the most accessible. Pypiper tries to give 80% of the benefits of a\nprofessional-scale pipelining system while requiring very little additional\neffort.\n\nIf you have a shell script that would benefit from a layer of \"handling code\",\nPypiper helps you convert that set of shell commands into a production-scale\nworkflow, automatically handling the annoying details (restartablilty, file\nintegrity, logging) to make your pipeline robust and restartable.\n\nIf you need a full-blown, datacenter-scale environment that can do everything,\nlook elsewhere. Pypiper's strength is its simplicity. If all you want is a\nshell-like script, but now with the power of python, some built-in benefits, and\nsyntactic sugar, then Pypiper is for you.\n\n## What Pypiper does NOT do\n\nPypiper tries to exploit the [Pareto principle](https://en.wikipedia.org/wiki/Pareto_principle) -- you'll get 80% of the\nfeatures with only 20% of the work of other pipeline management systems. So,\nthere are a few things Pypiper deliberately doesn't do:\n\n\n- Task dependencies. Pypiper runs sequential pipelines. We view this as an\n advantage because it makes the pipeline easier to write, easier to understand,\n and easier to debug -- critical things for pipelines that are still under\n active development (which is, really, *all* pipelines). For developmental\n pipelines, the complexity introduced by task dependencies is not worth the\n minimal benefit -- read this [post on parallelism in\n bioinformatics](http://databio.org/posts/paralellism_in_bioinformatics.html)\n for an explanation.\n\n- Cluster submission. Pypiper pipelines are scripts. You can use whatever system\n you want to run them on whatever computing resources you have. We have divided\n cluster resource management into a separate project called\n [looper](http://looper.readthedocs.io/).Pypiper builds individual, single-sample\n pipelines that can be run one sample at a time.\n [Looper](http://looper.readthedocs.io/) then processes groups of samples,\n submitting appropriate pipelines to a cluster or server. The two projects are\n independent and can be used separately, keeping things simple and modular.\n\n\n## Yet another pipeline system?\n\nAs I began to put together production-scale pipelines, I found a lot of relevant\npipelining systems, but was universally disappointed. For my needs, they were\nall overly complex. I wanted something **simple enough to quickly write and\nmaintain** a pipeline without having to learn a lot of new functions and\nconventions, but robust enough to handle requirements like restartability and\nmemory usage monitoring. Everything related was either a pre-packaged pipeline\nfor a defined purpose, or a heavy-duty development environment that was overkill\nfor a simple pipeline. Both of these seemed to be targeted toward ultra-\nefficient uses, and neither fit my needs: I had a set of commands already in\nmind -- I just needed a wrapper that could take that code and make it\nautomatically restartable, logged, robust to crashing, easy to debug, and so\nforth.\n"
},
{
"alpha_fraction": 0.6960784196853638,
"alphanum_fraction": 0.7235293984413147,
"avg_line_length": 34.96154022216797,
"blob_id": "15c6e944692c5296813bddaff3e05373c8772f09",
"content_id": "f8f0690cf418a20320760efba652eae480f42e83",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5610,
"license_type": "permissive",
"max_line_length": 485,
"num_lines": 156,
"path": "/docs/changelog.md",
"repo_name": "berguner/pypiper",
"src_encoding": "UTF-8",
"text": "# Changelog\n\n- **v0.10.0** (*2019-03-22*):\n\n - Fixed a bug that raised exception with empty commands\n \n - Fixed the pipeline profiling issues\n\n - Major updates to internal systems: Switch to `attmap`\n\n - Revamped way of handling child subprocesses which should lead to more\n efficient memory monitoring of piped subprocesses, and more consistent\n handling of rogues subprocesses during pipeline failure.\n\n - Added force mode to ngstk `gzip` and `pigz` use.\n\n - Changed documentation from sphinx to mkdocs.\n\n - Fixed a bug with python3 output buffering\n\n - Implement multi-target commands\n\n - Fixed a bug that had prevented new start mode from working in certain cases.\n\n - Allow user to change units of memory passed in with default pypiper cli.\n\n- **v0.9.4** (*2019-01-31*):\n\n - Point release to PyPI for README rendering.\n\n- **v0.9.3** (*2019-01-31*):\n\n - Simple point release update to fix PyPI landing page.\n\n- **v0.9.2** (*2019-01-30*):\n\n - Never echo protected-looking attribute request.\n\n- **v0.9.1** (*2019-01-29*):\n\n - Fixed a bug in NGSTk that caused errors for read counting functions on \n MACOS. MACOS `wc` returns leading whitespace, which caused these functions\n to fail.\n\n- **v0.9.0** (*2018-11-19*):\n\n - Use `psutil` to track aggregate memory usage for processes that spawn\n children. This results in accurate memory records for these processes.\n\n - Individual commands in a string of commands connected by shell pipes are\n now treated as individual commands, and and monitored individually for\n time and memory, and if a single component, fails, the entire string will\n fail. Previously, only the final return command was recorded, as in `bash`.\n\n - Various other small improvements (like waiting checking for dynamic recover\n flags)\n\n\n- **v0.8.1** (*2018-09-20*):\n\n - Fixed a bug that caused a problem for some pipelines adding groups of pypiper args.\n \n - Improved the `run` waiting method to immediately stop upon job\n completion, rather than minute-increment polling. This should improve\n performance particularly in pipelines with many, medium-runtime steps, and\n improve accuracy of timing profiles.\n\n\n- **v0.8.0** (*2018-06-15*):\n\n - Implemented 'new start' mode.\n\n - Improved error messages and exception handling for missing child software.\n\n - Clarified the built-in required vs. optional args by allowing pipeline authors to specify which of the pypiper args are required. The command-line help UI now displays these correctly as 'required arguments' instead of incorrectly as 'optional arguments'.\n\n - Corrected the sort order of added arguments, so they are listed in the help menu more naturally.\n\n - Fixed a bug that caused an erroneous error message indicating missing pypiper args.\n\n - Clarified the license is BSD2\n\n - Fixed a bug that neglected to list pyyaml as a dependency\n\n- **v0.7.2** (*2018-06-05*):\n\n - Implemented the 'report object' function.\n\n - Cleanup files are now relative, so a moved folder could still be cleaned.\n\n - Fixed a bug that prevented install if pypandoc was not installed\n\n - Fixed a bug that caused an error in containers where /proc wasn't accessible\n\n\n- **v0.7.1** (*2018-02-27*):\n\n - Package cleanup for Pypi.\n\n- **v0.7.0** (*2017-12-12*):\n\n - Standardize `NGSTk` function naming.\n\n - Introduce `Stage` as a model for a logically related set of pipeline processing steps.\n\n - Introduce `Pipeline` framework for automated processing phase execution and checkpointing.\n\n - Add ability to start and/or stop a pipeline at arbitrary checkpoints.\n\n - Introduce new state for a paused/halted pipeline.\n\n - Improve spawned process shutdown to avoid zombie processes.\n\n- **v0.6** (*2017-08-24*):\n\n - Adds 'dynamic recovery' capability. For jobs that are terminated by an interrupt, such as a SIGINT or SIGTERM (as opposed to a failed command), pypiper will now set a dynamic recovery flags. These jobs, when restarted, will automatically pick up where they left off, without requiring any user intervention. Previously, the user would have to specify recover mode (`-R`). Now, recover mode forces a recover regardless of failure type, but interrupted pipelines will auto-recover.\n\n - Pypiper now appropriately adds cleanup files intermediate files for failed runs. It adds them to the cleanup script.\n\n - Improves error messages so only a single exception is raised with a more direct relevance to the user/\n\n - Pypiper will automatically remove existing flags when the run starts, eliminating the earlier issue of confusion due to multiple flags present on runs that were restarted.\n\n - Fixes a bug that caused a pipeline to continue if a SIGTERM is given during a process that was marked `nofail`.\n\n - Pypiper now can handle multiple SIGTERMs without one canceling the shutdown procedure begun by the other.\n\n - Major improvements to documentation and tutorials.\n\n - Adds `report_figure` function.\n\n- **v0.5** (*2017-07-21*):\n\n - Adds preliminary support for handling docker containers\n\n - Updates docs, adds Hello World example\n\n - Adds 'waiting' flag\n\n - Eliminates extra spaces in reported results\n\n - Pypiper module is version aware\n\n - Updates Success time format to eliminate space\n\n - Improves efficiency in some ngstk merging functions\n\n- **v0.4** (*2017-01-23*):\n\n - First major public release!\n\n - Revamps pypiper args\n\n - Adds parallel compression/decompression with pigz\n\n - Various small bug fixes and speed improvements\n"
},
{
"alpha_fraction": 0.7080292105674744,
"alphanum_fraction": 0.7080292105674744,
"avg_line_length": 21.83333396911621,
"blob_id": "172017dda54c63847afb182c083ad28c6c5ebd87",
"content_id": "e1433f2439a6a0f4479f898eff7f96b3f8e47236",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 137,
"license_type": "permissive",
"max_line_length": 36,
"num_lines": 6,
"path": "/pypiper/const.py",
"repo_name": "berguner/pypiper",
"src_encoding": "UTF-8",
"text": "\"\"\" Pypiper constants. \"\"\"\n\n\nCHECKPOINT_EXTENSION = \".checkpoint\"\nPIPELINE_CHECKPOINT_DELIMITER = \"_\"\nSTAGE_NAME_SPACE_REPLACEMENT = \"-\"\n"
},
{
"alpha_fraction": 0.7720465660095215,
"alphanum_fraction": 0.7753743529319763,
"avg_line_length": 74.125,
"blob_id": "9cb63dcd48879771d87c1385f31107fa81a0e470",
"content_id": "88668ed20c6f15837bce41ef490e68a4a0b1835b",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 601,
"license_type": "permissive",
"max_line_length": 182,
"num_lines": 8,
"path": "/README.md",
"repo_name": "berguner/pypiper",
"src_encoding": "UTF-8",
"text": "<img src=\"https://raw.githubusercontent.com/databio/pypiper/master/logo_pypiper.svg?sanitize=true\" alt=\"Pypiper logo\" height=\"70\" align=\"left\"/>\n\n# Pypiper\n\n[](http://pypiper.readthedocs.org/en/latest/?badge=latest)\n[](https://travis-ci.org/databio/pypiper)\n\nA lightweight python toolkit for gluing together restartable, robust command line pipelines. The best place to learn more is at the [documentation](http://code.databio.org/pypiper/).\n"
},
{
"alpha_fraction": 0.729411780834198,
"alphanum_fraction": 0.729411780834198,
"avg_line_length": 23.285715103149414,
"blob_id": "825b748ae064499823a1609a906a18d9962c6d47",
"content_id": "86bcf9b90929a321a4ce7d53887d62982c763489",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 170,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 7,
"path": "/pypiper/__init__.py",
"repo_name": "berguner/pypiper",
"src_encoding": "UTF-8",
"text": "from ._version import __version__\nfrom .manager import *\nfrom .ngstk import *\nfrom .utils import *\nfrom .pipeline import *\nfrom .exceptions import *\nfrom .stage import *\n"
},
{
"alpha_fraction": 0.7658612728118896,
"alphanum_fraction": 0.7675995230674744,
"avg_line_length": 54.8543701171875,
"blob_id": "3295deb1197d24f530ac80c8d33553c1cbc4bfa5",
"content_id": "586b926fed12b5c754c3c772174eb60ad877ef93",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 5753,
"license_type": "permissive",
"max_line_length": 423,
"num_lines": 103,
"path": "/docs/intro.rst",
"repo_name": "berguner/pypiper",
"src_encoding": "UTF-8",
"text": ".. |logo| image:: _static/logo_pypiper.svg\n\n|logo| Introduction\n=========================\n\nPypiper is a lightweight python toolkit for gluing together restartable command\nline pipelines. With Pypiper, **simplicity is paramount**. It should take less\nthan 15 minutes to build your first pipeline. Learning all the\n:doc:`features and benefits <features>` takes just an hour or two. At\nthe same time, Pypiper provides immediate advantages over a\nsimple shell script.\n\nPypiper is an example of a simple `bioinformatics pipeline framework\n<http://databio.org/pipeline_frameworks/>`_. It differs from existing frameworks in its focus on **simplicity** and **sequential pipelines**. \nTo employ pypiper, you will just take your bash script and pass those commands through the ``run`` method on a ``PipelineManager`` object. This will give you automatic restartability, process monitoring for memory use and compute time, pipeline status monitoring, copious log output, robust error handling, easy debugging tools, guaranteed file output integrity, and a bunch of useful pipeline development helper functions.\n\nA simple example pipeline\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nTo demonstrate the simplicity, take a look at a very simple but complete pipeline:\n\n.. literalinclude:: ../../example_pipelines/hello_pypiper.py\n\nThere's nothing complex here: we are choosing an output folder, and running a single command, much like you may do in a shell script. Building pypiper pipelines is as simple as stringing together shell commands. That's it. We'll actually run this example pipeline in the ``Hello World`` section.\n\n\nWho should use Pypiper?\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe target audience for pypiper is an individual student or researcher or lab\nthat wants to build a basic pipeline, but to do a better job than just writing\na shell script. Many bioinformatics pipelines are written by students or\ntechnicians who don't have time to learn a full-scale pipelining framework, so\nthey just end up using simple bash scripts to piece together commands because\nthat seems the most accessible. Pypiper tries to give 80% of the benefits of a\nprofessional-scale pipelining system while requiring very little additional\neffort.\n\nIf you have a shell script that would benefit from a layer of \"handling code\",\nPypiper helps you convert that set of shell commands into a production-scale\nworkflow, automatically handling the annoying details (restartablilty, file\nintegrity, logging) to make your pipeline robust and restartable.\n\nIf you need a full-blown, datacenter-scale environment that can do everything,\nlook elsewhere. Pypiper's strength is its simplicity. If all you want is a\nshell-like script, but now with the power of python, some built-in benefits, and\nsyntactic sugar, then Pypiper is for you.\n\nThis emphasis on simplicity provides a few advantages:\n\n- Write your pipeline in pure python (no new language to learn).\n- Pypiper is easy to learn (3 or 4 functions will be all you need for simple\n stuff)\n- Pypiper does not assume you want a complex dependency structure. You write a\n simple **ordered sequence of commands**, just like a shell script.\n\n\nWhat Pypiper does NOT do\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nPypiper tries to exploit the `Pareto principle\n<https://en.wikipedia.org/wiki/Pareto_principle>`_ -- you'll get 80% of the\nfeatures with only 20% of the work of other pipeline management systems. So,\nthere are a few things Pypiper deliberately doesn't do:\n\n\n- Task dependencies. Pypiper runs sequential pipelines. If you want to implement\n a pipeline with complex task dependencies, there are better options. We view\n this as an advantage because it makes the pipeline easier to write, easier to\n understand, and easier to debug -- critical things for pipelines that are\n still under active development (which is, really, *all* pipelines). For\n developmental pipelines, the complexity cost of encoding task dependencies is\n not worth the minimal benefit -- read this `post on parallelism in\n bioinformatics <http://databio.org/posts/paralellism_in_bioinformatics.html>`_\n for an explanation.\n\n- Cluster submission. Pypiper does not handle any sort of cluster job submission\n or resource requesting. Instead, we have divided this into a separate project\n called `looper <http://looper.readthedocs.io/>`_. This makes a modular system:\n you can use whatever system you want for cluster management. `Pypiper\n <http://pypiper.readthedocs.io/>`_ builds individual, single-sample pipelines\n that can be run one sample at a time. `Looper\n <http://looper.readthedocs.io/>`_ then processes groups of samples, submitting\n appropriate pipelines to a cluster or server. The two projects are independent\n and can be used separately, but they are most powerful when combined. This\n keeps things simple and modular.\n\n\nYet another pipeline system?\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nAs I began to put together production-scale pipelines, I found a lot of relevant\npipelining systems, but was universally disappointed. For my needs, they were\nall overly complex. I wanted something **simple enough to quickly write and\nmaintain** a pipeline without having to learn a lot of new functions and\nconventions, but robust enough to handle requirements like restartability and\nmemory usage monitoring. Everything related was either a pre-packaged pipeline\nfor a defined purpose, or a heavy-duty development environment that was overkill\nfor a simple pipeline. Both of these seemed to be targeted toward ultra-\nefficient uses, and neither fit my needs: I had a set of commands already in\nmind -- I just needed a wrapper that could take that code and make it\nautomatically restartable, logged, robust to crashing, easy to debug, and so\nforth.\n"
},
{
"alpha_fraction": 0.7725597023963928,
"alphanum_fraction": 0.773361086845398,
"avg_line_length": 121.33333587646484,
"blob_id": "0c72daf94b8c33c26d176a67e28fc0b6044c5b6f",
"content_id": "96a49eac14b4d1e1935c0c305a5380e3953b98b7",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6239,
"license_type": "permissive",
"max_line_length": 940,
"num_lines": 51,
"path": "/docs/advanced-run-method.md",
"repo_name": "berguner/pypiper",
"src_encoding": "UTF-8",
"text": "# Run method options\n\nThe `PipelineManager.run()` function is the core part of `pypiper`. In its simplest case, all you need to provide is a command to run. But it can be much more powerful than that if you make use of these additional arguments:\n\n## The `cmd` argument\n\nNormally you just pass a string, but you can also pass a list of commands to `run`, like this:\n\n```\npm.run([cmd1, cmd2, cmd3])\n```\n\nPypiper will treat these commands as a group, running each one in turn (and monitoring them individually for time and memory use). The difference in doing it this way, rather than 3 separate calls to `run()` is that if the series does not complete, the entire series will be re-run. This is therefore useful to piece together commands that must all be run together.\n\n## The `target` and `lock_name` arguments\n\nIf you provide a `target` file, then `pypiper` will first check to see if that target exists, and only run the `command` if the `target` does not exist. To prevent two pipelines from running commands on the same target, `pypiper` will automatically derive a lock file name from your target file. You can use the `lock_name` argument to override this default. If you do not provide a `target`, then you will need to provide a `lock_name` argument because `pypiper` will not be able to derive one automatically.\n\n## The `shell` argument: Python subprocess types\n\nSince Pypiper runs all your commands from within python (using the `subprocess` python module), it's nice to be aware of the two types of processes that `subprocess` allows: **direct processes** and **shell processes**.\n\nBy default, Pypiper will guess which to use based on your command, so for most pipelines, you don't need to worry about it. However, how you write your commands has some implications for memory tracking, and advanced pipeline authors may want to control the process types that Pypiper uses, so this section covers how these subprocesses work.\n\n**Direct process**: A direct process is one that Python executes directly, from within python. Python retains control over the process completely. Wherever possible, you should use a direct subprocess because it enabling Python to monitor the memory use of the subprocess. This the preferable way of running subprocesses in Python. The disadvantage of direct subprocesses is that you may not use shell-specific operators in a direct subprocess. For instance, if you use an asterisk (`*`) for wildcard expansion, or a bracket (`>`) for output redirection, or a pipe (`|`) to link processes -- these are commands understood by a shell like Bash, and thus, cannot be run as direct subprocesses in Python.\n\n**Shell process**: In a shell process, Python first spawns a shell, and then runs the command in that shell. The spawned shell is then controlled by Python, but processes done by the shell are not. This allows you to use shell operators (`*`, `|`, `>`), but at the cost of the ability to monitor memory for each command independently, because Python does not have direct control over subprocesses run inside a subshell. \n\n### How pypiper handles shell subprocesses\n\nPypiper includes 2 nice provisions that help us deal with shell processes. First, pypiper divides commands with pipes (`|`) and executes them as *direct processes*. This enables you to pass a piped shell command, but still get the benefit of a direct process. Unless using the shell directly, with pypiper, each process in the pipe is monitored for return value, and for memory use individually, and this information will be reported in the pipeline log. Nice! Second, pypiper uses the `psutil` module to monitor memory of *all child processes*. That means when you use a shell process, we *do* monitor the memory use of that process (and any other processes it spawns), which gives us more accurate memory monitoring.\n\nYou can force Pypiper by specifying `shell=True` or `shell=False` to the `run` function, but really, you shouldn't have to. By default Pypiper will try to guess: if your command contains any of the shell process characters (`*` or `>`), it will be run in a shell. If it contains a pipe (`|`), it will be split and run as direct, piped subprocesses. Anything else will be run as a direct subprocess.\n\n## The `nofail` argument\n\nBy default, a command that fails will cause the entire pipeline to halt. If you want to provide a command that *should not* halt the pipeline upon failure, set `nofail=True`. `nofail` can be used to implement non-essential parts of the pipeline.\n \n\n\n## The `follow` argument\n\nThe `PipelineManager.run` function has an optional argument named `follow` that is useful for checking or reporting results from a command. To the `follow` argument you must pass a python function (which may be either a defined function or a `lambda` function). These *follow functions* are then coupled to the command that is run; the follow function will be called by python **if and only if** the command is run. \n\nWhy is this useful? The major use cases are QC checks and reporting results. We use a folllow function to run a QC check to make sure processes did what we expect, and then to report that result to the `stats` file. We only need to check the result and report the statistic once, so it's best to put these kind of checks in a `follow` function. Often, you'd like to run a function to examine the result of a command, but you only want to run that once, *right after the command that produced the result*. For example, counting the number of lines in a file after producing it, or counting the number of reads that aligned right after an alignment step. You want the counting process coupled to the alignment process, and don't need to re-run the counting every time you restart the pipeline. Because pypiper is smart, it will not re-run the alignment once it has been run; so there is no need to re-count the result on every pipeline run! \n\n*Follow functions* let you avoid running unnecessary processes repeatedly in the event that you restart your pipeline multiple times (for instance, while debugging later steps in the pipeline).\n\n## The `container` argument\n\nIf you specify a string here, `pypiper` will wrap the command in a `docker run` call using the given `container` image name.\n"
}
] | 8 |
nanophyr/osrs_customboard | https://github.com/nanophyr/osrs_customboard | e5598aa9eb9137b5c8d025acb9905fffa2b87b35 | 950066df3eacc13bfdf9a6c3c68a5b59aa88cb0f | 40b3bcb92e2abb96e22d8168c4306b85981e99f0 | refs/heads/master | 2020-05-18T05:57:51.076763 | 2019-05-13T11:26:27 | 2019-05-13T11:26:27 | 184,221,632 | 1 | 0 | MIT | 2019-04-30T08:20:56 | 2019-04-30T12:28:27 | 2019-04-30T12:29:35 | Python | [
{
"alpha_fraction": 0.6150943636894226,
"alphanum_fraction": 0.6226415038108826,
"avg_line_length": 17.85714340209961,
"blob_id": "909f67bbff53e9af3ec894f176e31d05e7d20709",
"content_id": "cb1048a1f40cd701a7213899a64a5b48a7ea3e05",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 265,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 14,
"path": "/common.py",
"repo_name": "nanophyr/osrs_customboard",
"src_encoding": "UTF-8",
"text": "from flask import * \nimport os\nimport scrape\n\napp = Flask(__name__)\n\[email protected]('/')\ndef home():\n flash(scrape.getTotal('nanoluck'))\n return render_template('home.html')\n\nif __name__ == '__main__':\n app.secret_key = os.urandom(24)\n app.run(debug=True)\n\n"
},
{
"alpha_fraction": 0.6323091983795166,
"alphanum_fraction": 0.6352824568748474,
"avg_line_length": 26.243244171142578,
"blob_id": "9df7cc792700b89705da2fbb1a9ed415eb38efeb",
"content_id": "b6c46fcb4861a526227f6fcebb857d46f5bdea01",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1009,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 37,
"path": "/scrape.py",
"repo_name": "nanophyr/osrs_customboard",
"src_encoding": "UTF-8",
"text": "import urllib\nfrom bs4 import BeautifulSoup\n\ndef ripToLines():\n # kill all script and style elements\n for script in soup([\"script\", \"style\"]):\n script.extract() # rip it out\n text = soup.get_text().splitlines()\n return text\n\ndef find_between( s, first, last):\n try:\n start = s.index(first) + len(first)\n end = s.index( last, start)\n return s[start:end]\n except ValueError:\n return \"\"\n\ndef ripToLines(soup):\n # kill all script and style elements\n for script in soup([\"script\", \"style\"]):\n script.extract() # rip it out\n text = soup.get_text().splitlines()\n return text\n\n\n#returns total level for given user\ndef getTotal(user):\n url = \"https://secure.runescape.com/m=hiscore_oldschool/index_lite.ws?player=\" + user\n html = urllib.urlopen(url).read()\n soup = BeautifulSoup(html,features=\"html.parser\")\n text = ripToLines(soup)\n return find_between(str(text[0]), \",\" , \",\")\n\n\n#prints rank level exp\n#print str(text[0])\n\n"
}
] | 2 |
bbhunter/HawkScan | https://github.com/bbhunter/HawkScan | 747160e643b29fd9d851efa780e387fa02fa105b | fdd10d89681cb4f9a3c3dc246811aa24ae0ca5af | 3e1f994a40396af26b9bde71b4523435b24af859 | refs/heads/master | 2020-05-09T14:55:11.297671 | 2019-04-12T11:54:13 | 2019-04-12T11:54:13 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5725980401039124,
"alphanum_fraction": 0.5954329967498779,
"avg_line_length": 42.79245376586914,
"blob_id": "49cae91ca7b2eff07e937d5dfb0db9ed7b31c95e",
"content_id": "12bcd8c2240eef6c7af8e99722ae68c459cb3f06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2321,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 53,
"path": "/README.md",
"repo_name": "bbhunter/HawkScan",
"src_encoding": "UTF-8",
"text": "# HawkScan\n\n\n\nSecurity Tool for Reconnaissance and Information Gathering on a website. (python 2.7)\n\nThis script use \"WafW00f\" to detect the WAF (https://github.com/EnableSecurity/wafw00f)\n\nThis script use \"Sublist3r\" to scan subdomains (https://github.com/aboul3la/Sublist3r)\n\n# Features\n - [x] URL fuzzing and dir/file detection\n - [x] Test backup/old file on all the files found (index.php.bak, index.php~ ...)\n - [x] Check header information\n - [x] Check DNS information\n - [x] Check whois information\n - [x] User-agent random or personal\n - [x] Extract files\n - [x] Keep a trace of the scan\n - [x] Check @mail in the website and check if @mails leaked\n - [x] CMS detection + version and vulns\n - [x] Subdomain Checker\n - [x] Backup system (if the script stopped, it take again in same place)\n - [x] WAF detection\n - [x] Add personal prefix\n - [x] auto update script\n\n# TODO\n - [ ] Testing website paramaters (attack, so no passive scan)\n - [ ] Get certificate (crypto keys...)\n - [ ] Recursif dir/file\n - [ ] Anonymous routing through some proxy (http/s proxy list)\n - [ ] Check github & pastebin\n - [ ] Spider link in website\n - [ ] Check if it's an user or a page\n - [ ] Backup file csv of scan\n - [ ] Work it with py2 and py3\n - [ ] add option \"-o\" to backup file in directory of your choice\n \n # Usage\n > pip install -r requirements.txt\n \n > usage: hawkscan.py [-h] [-u URL] [-w WORDLIST] [-s SUBDOMAINS] [-t THREAD] [-a USER_AGENT] [--redirect] [-p PREFIX]\n \n > optional arguments: \n > -h, --help show this help message and exit \n > -u URL URL to scan [required] \n > -w WORDLIST Wordlist used for URL Fuzzing [required] \n > -s SUBDOMAINS subdomain tester \n > -t THREAD Number of threads to use for URL Fuzzing. Default: 5 \n > -a USER_AGENT choice user-agent \n > --redirect For scan with redirect response like 301,302 \n > -p PREFIX add prefix in wordlist to scan\n"
},
{
"alpha_fraction": 0.5129932761192322,
"alphanum_fraction": 0.5256015658378601,
"avg_line_length": 34.27164840698242,
"blob_id": "3493272bd871c388086c5d0f87af277687420da7",
"content_id": "b40da177c13434b5d7adb648aa1cd66bcbebf47b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20780,
"license_type": "no_license",
"max_line_length": 206,
"num_lines": 589,
"path": "/hawkscan.py",
"repo_name": "bbhunter/HawkScan",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#system libs\nimport requests\nimport sys, os, re\nimport time\nimport ssl, OpenSSL\nimport socket\nimport pprint\nimport whois\nimport argparse\nfrom bs4 import BeautifulSoup\nimport json\nimport traceback\nimport csv\n#personal libs\nfrom config import PLUS, WARNING, INFO, LESS, LINE, FORBI, BACK\nfrom Queue import Queue\nfrom threading import Thread\nfrom fake_useragent import UserAgent\nimport wafw00f\ntry:\n from Sublist3r import sublist3r\nexcept Exception:\n traceback.print_exc()\n\n\ndef banner():\n print(\"\"\"\n _ _ _ _____ \n | | | | | | / ____| \n | |__| | __ ___ _| | _| (___ ___ __ _ _ __ \n | __ |/ _` \\ \\ /\\ / / |/ /\\___ \\ / __/ _` | '_ \\ \n | | | | (_| |\\ V V /| < ____) | (_| (_| | | | |\n |_| |_|\\__,_| \\_/\\_/ |_|\\_\\_____/ \\___\\__,_|_| |_|\n \n\nhttps://github.com/c0dejump/HawkScan\n-------------------------------------------------------------------\n \"\"\")\n\n\nenclosure_queue = Queue()\n\nrequests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)\n\n\n\"\"\"\nauto_update: for update the tool\n\"\"\"\ndef auto_update():\n au = raw_input(\"Do you want update it ? (y/n): \")\n if au == \"y\":\n os.system(\"git pull origin master\")\n else:\n pass\n\n\"\"\"\nMail:\nget mail adresse in web page during the scan and check if the mail leaked\n\"\"\"\ndef mail(req, directory, all_mail):\n mails = req.text\n # for all @mail\n reg = re.compile(r\"[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+\")\n search = re.findall(reg, mails)\n for mail in search:\n #check if email pwned\n if mail:\n datas = { \"act\" : mail, \"accounthide\" : \"test\", \"submit\" : \"Submit\" }\n req_ino = requests.post(\"https://www.inoitsu.com/\", data=datas)\n if \"DETECTED\" in req_ino.text:\n pwnd = \"{}: pwned ! \".format(mail)\n if pwnd not in all_mail:\n all_mail.append(pwnd)\n else:\n no_pwned = \"{}: no pwned \".format(mail)\n if no_pwned not in all_mail:\n all_mail.append(no_pwned)\n with open(directory + '/mail.csv', 'a+') as file:\n if all_mail is not None and all_mail != []:\n writer = csv.writer(file)\n for r in all_mail:\n r = r.split(\":\")\n writer.writerow(r)\n\"\"\"\nSubdomains:\nCheck subdomains with the option -s (-s google.fr)\nscript use sublit3r to scan subdomain (it's a basic scan)\n\"\"\"\ndef subdomain(subdomains):\n print \"search subdomains:\\n\"\n sub_file = \"sublist/\" + subdomains + \".txt\"\n sub = sublist3r.main(subdomains, 40, sub_file, ports= None, silent=False, verbose= False, enable_bruteforce= False, engines=None)\n print LINE\n time.sleep(2)\n\n\"\"\" Get sitemap.xml of website\"\"\"\ndef sitemap(req, directory):\n soup = BeautifulSoup(req.text, \"html.parser\")\n with open(directory + '/sitemap.xml', 'w+') as file:\n file.write(str(soup).replace(' ','\\n'))\n\n\"\"\"\nWAF:\nDetect if the website use a WAF with tools \"wafw00f\"\n\"\"\"\ndef detect_waf(url, directory):\n detect = False\n message = \"\"\n os.system(\"wafw00f {} > {}/waf.txt\".format(url, directory))\n with open(directory + \"/waf.txt\", \"r+\") as waf:\n for w in waf:\n if \"behind\" in w:\n detect = True\n message = w\n else:\n pass\n print INFO + \"WAF\"\n print LINE\n if detect == True:\n print \"{}{}\".format(WARNING, message)\n print LINE\n else:\n print \"{}This website dos not use WAF\".format(LESS)\n print LINE\n\n\"\"\"\nCMS:\nDetect if the website use a CMS\n\"\"\"\ndef detect_cms(url):\n print INFO + \"CMS\"\n print LINE\n req = requests.get(\"https://whatcms.org/APIEndpoint/Detect?key=1481ff2f874c4942a734d9c499c22b6d8533007dd1f7005c586ea04efab2a3277cc8f2&url={}\".format(url))\n if \"Not Found\" in req.text:\n print \"{} this website does not seem to use a CMS \\n\".format(LESS)\n print LINE\n else:\n reqt = json.loads(req.text)\n result = reqt[\"result\"].get(\"name\")\n v = reqt[\"result\"].get(\"version\")\n if v:\n print \"{} This website use \\033[32m{} {} \\033[0m\\n\".format(PLUS, result, v)\n cve_cms(result, v)\n print LINE\n else:\n print \"{} This website use \\033[32m{}\\033[0m but nothing version found \\n\".format(PLUS, result)\n print LINE\n\n\"\"\"\nCVE_CMS:\nCheck CVE with cms and version detected by the function 'detect_cms'.\n\"\"\"\ndef cve_cms(result, v):\n url_comp = \"https://www.cvedetails.com/version-search.php?vendor={}&product=&version={}\".format(result, v)\n req = requests.get(url_comp, allow_redirects=True, verify=False)\n if not \"matches\" in req.text:\n print \"{}CVE found ! \\n{}{}\\n\".format(WARNING, WARNING, url_comp)\n if 'WordPress' in req.text:\n version = v.replace('.','')\n site = \"https://wpvulndb.com/wordpresses/{}\".format(version)\n req = requests.get(site)\n soup = BeautifulSoup(req.text, \"html.parser\")\n search = soup.find_all('tr')\n if search:\n for p in search:\n dates = p.find(\"td\").text.strip()\n detail = p.find(\"a\").text.strip()\n print \"{}{} : {}\".format(WARNING, dates, detail)\n else:\n print \"{} Nothing wpvunldb found \\n\".format(LESS)\n elif 'WordPress' in req.text:\n version = v.replace('.','')\n site = \"https://wpvulndb.com/wordpresses/{}\".format(version)\n req = requests.get(site)\n soup = BeautifulSoup(req.text, \"html.parser\")\n search = soup.find_all('tr')\n if search:\n print \"{}CVE found ! \\n{}{}\\n\".format(WARNING, WARNING, site)\n for p in search:\n dates = p.find(\"td\").text.strip()\n detail = p.find(\"a\").text.strip()\n print \"{}{} : {}\".format(WARNING, dates, detail)\n else:\n print \"{} Nothing wpvunldb found \\n\".format(LESS)\n else:\n print \"{} Nothing CVE found \\n\".format(LESS)\n\n\"\"\"Get header of website (cookie, link, etc...)\"\"\"\ndef get_header(url, directory):\n head = r.headers\n print INFO + \"HEADER\"\n print LINE\n print \" {} \\n\".format(head).replace(',','\\n')\n print LINE\n with open(directory + '/header.csv', 'w+') as file:\n file.write(str(head).replace(',','\\n'))\n\n\"\"\"Get whois of website\"\"\"\ndef who_is(url, directory):\n print INFO + \"WHOIS\"\n print LINE\n try:\n who_is = whois.whois(url)\n #pprint.pprint(who_is) + \"\\n\"\n for k, w in who_is.iteritems():\n is_who = \"{} : {}-\".format(k, w)\n print is_who\n with open(directory + '/whois.csv', 'a+') as file:\n file.write(is_who.replace(\"-\",\"\\n\"))\n except:\n erreur = sys.exc_info()\n typerr = u\"%s\" % (erreur[0])\n typerr = typerr[typerr.find(\"'\")+1:typerr.rfind(\"'\")]\n print typerr\n msgerr = u\"%s\" % (erreur[1])\n print msgerr\n print \"\\n\" + LINE\n\n\"\"\"\nStatus:\n - Get response status of the website (200, 302, 404...).\n - Check if a backup exist before to start the scan.\n If exist it restart scan from to the last line of backup.\n\"\"\"\ndef status(stat, directory, u_agent):\n check_b = check_backup(directory)\n #check backup before start scan\n if check_b == True:\n with open(directory + \"/backup.txt\", \"r\") as word:\n for ligne in word.readlines():\n print \"{}{}{}\".format(BACK, url, ligne.replace(\"\\n\",\"\"))\n lignes = ligne.split(\"\\n\")\n #take the last line in file\n last_line = lignes[-2]\n with open(wordlist, \"r\") as f:\n for nLine, line in enumerate(f):\n line = line.replace(\"\\n\",\"\")\n if line == last_line:\n print LINE\n forced = False\n check_words(url, wordlist, directory, u_agent, forced, nLine)\n elif check_b == False:\n os.remove(directory + \"/backup.txt\")\n print \"restart scan...\"\n print LINE\n if stat == 200:\n check_words(url, wordlist, directory, u_agent)\n elif stat == 301:\n print PLUS + \" 301 Moved Permanently\\n\"\n check_words(url, wordlist, directory, u_agent)\n elif stat == 302:\n print PLUS + \" 302 Moved Temporarily\\n\"\n check_words(url, wordlist, directory, u_agent)\n elif stat == 304:\n pass\n elif stat == 404:\n a = raw_input(\"{} not found/ forced ?(y:n)\".format(LESS))\n if a == \"y\":\n check_words(url, wordlist, directory, u_agent)\n else:\n sys.exit()\n elif stat == 403:\n a = raw_input(FORBI + \" forbidden/ forced ?(y:n)\")\n if a == \"y\":\n forced = True\n check_words(url, wordlist, directory, u_agent, forced)\n else:\n sys.exit()\n else:\n a = raw_input(\"{} not found/ forced ?(y:n)\".format(LESS))\n if a == \"y\":\n check_words(url, wordlist, directory, u_agent)\n else:\n sys.exit()\n\n\"\"\"Check if a backup file exist from function 'Status' \"\"\"\ndef check_backup(directory):\n if os.path.exists(directory + \"/backup.txt\"):\n bp = raw_input(\"A backup file exist, do you want to continue or restart ? (C:R)\\n\")\n if bp == 'C' or bp == 'c':\n print \"restart from last save in backup.txt ...\"\n print LINE\n return True\n else:\n print LINE\n return False\n else:\n pass\n\n\"\"\"Get DNS informations\"\"\"\ndef get_dns(url, directory):\n try:\n if \"https\" in url:\n url = url.replace('https://','').replace('/','')\n context = ssl.create_default_context()\n conn = context.wrap_socket(socket.socket(socket.AF_INET), server_hostname=url)\n conn.connect((url, 443))\n cert = conn.getpeercert()\n print INFO + \"DNS information\"\n print LINE\n pprint.pprint(str(cert['subject']).replace(',','').replace('((','').replace('))',''))\n pprint.pprint(cert['subjectAltName'])\n print ''\n conn.close()\n print LINE\n with open(directory + '/dns_info.csv', 'w+') as file:\n file.write(str(cert).replace(',','\\n').replace('((','').replace('))',''))\n else:\n pass\n except:\n print INFO + \"DNS information\"\n print LINE\n erreur = sys.exc_info()\n typerr = u\"%s\" % (erreur[0])\n typerr = typerr[typerr.find(\"'\")+1:typerr.rfind(\"'\")]\n print typerr\n msgerr = u\"%s\" % (erreur[1])\n print msgerr + \"\\n\"\n print LINE\n\n\n\"\"\"Create backup file\"\"\"\ndef backup(res, directory, forbi):\n with open(directory + \"/backup.txt\", \"a+\") as words:\n #delete url to keep just file or dir\n anti_sl = res.split(\"/\")\n rep = anti_sl[3:]\n result = str(rep)\n result = result.replace(\"['\",\"\").replace(\"']\",\"\").replace(\"',\", \"/\").replace(\" '\",\"\")\n words.write(result + \"\\n\")\n\n\"\"\" Download files and calcul size \"\"\"\ndef dl(res, req, directory):\n soup = BeautifulSoup(req.text, \"html.parser\")\n extensions = ['.txt', '.html', '.jsp', '.xml', '.php', '.log', '.aspx', '.zip', '.old', '.bak', '.sql', '.js', '.asp', '.ini', '.log', '.rar', '.dat', '.log', '.backup', '.dll', '.save', '.BAK', '.inc']\n d_files = directory + \"/files/\"\n if not os.path.exists(d_files):\n os.makedirs(d_files)\n anti_sl = res.split(\"/\")\n rep = anti_sl[3:]\n result = rep[-1]\n p_file = d_files + result\n texte = req.text\n for exts in extensions:\n if exts in result:\n with open(p_file, 'w+') as fichier:\n fichier.write(str(soup))\n # get size of file (in bytes)\n size_bytes = os.path.getsize(p_file)\n return size_bytes\n\n\"\"\"\nfile_backup:\nDuring the scan, check if a backup file or dir exist.\n\"\"\"\ndef file_backup(res, directory):\n ext_b = ['.save', '.old', '.backup', '.BAK', '.bak', '.zip', '.rar', '~', '_old', '_backup', '_bak']\n d_files = directory + \"/files/\"\n for exton in ext_b:\n res_b = res + exton\n #print res_b\n anti_sl = res_b.split(\"/\")\n rep = anti_sl[3:]\n result = rep[-1]\n r_files = d_files + result\n req_b = requests.get(res_b, allow_redirects=False, verify=False)\n soup = BeautifulSoup(req_b.text, \"html.parser\")\n if req_b.status_code == 200:\n with open(r_files, 'w+') as fichier_bak:\n fichier_bak.write(str(soup))\n size_bytes = os.path.getsize(r_files)\n if size_bytes:\n print \"{}{} ({} bytes)\".format(PLUS, res_b, size_bytes)\n else:\n print \"{}{}\".format(PLUS, res_b)\n else:\n pass\n\n\"\"\"\nhidden_dir:\nLike the function 'file_backup' but check if the type backup dir like '~articles/' exist.\n\"\"\"\ndef hidden_dir(res, user_agent):\n pars = res.split(\"/\")\n hidd_d = \"{}~{}/\".format(url, pars[3])\n hidd_f = \"{}~{}\".format(url, pars[3])\n req_d = requests.get(hidd_d, headers=user_agent, allow_redirects=False, verify=False, timeout=5)\n req_f = requests.get(hidd_f, headers=user_agent, allow_redirects=False, verify=False, timeout=5)\n sk_d = req_d.status_code\n sk_f = req_f.status_code\n if sk_d == 200:\n print \"{}{}\".format(PLUS, hidd_d)\n elif sk_f == 200:\n print \"{}{}\".format(PLUS, hidd_f)\n\n\"\"\"\ntryUrl:\nTest all URL contains in the dictionnary with multi-threading.\nThis script run functions:\n- backup()\n- dl()\n- file_backup()\n- mail()\n\"\"\"\ndef tryUrl(i, q, directory, u_agent, forced=False):\n all_mail = []\n rec_list = []\n for t in range(len_w):\n res = q.get()\n try:\n if u_agent:\n user_agent = {'User-agent': u_agent}\n else:\n ua = UserAgent()\n user_agent = {'User-agent': ua.random} #for a user-agent random\n try:\n forbi = False\n req = requests.get(res, headers=user_agent, allow_redirects=False, verify=False, timeout=5)\n hidden_dir(res, user_agent)\n status_link = req.status_code\n if status_link == 200:\n #check backup\n backup(res, directory, forbi)\n # dl files and calcul size\n size = dl(res, req, directory)\n if size:\n print \"{}{} ({} bytes)\".format(PLUS, res, size)\n else:\n print \"{}{}\".format(PLUS, res)\n #check backup files\n file_backup(res, directory)\n #get mail\n mail(req, directory, all_mail)\n if 'sitemap.xml' in res:\n sitemap(req, directory)\n if status_link == 403:\n if not forced:\n forbi = True\n print FORBI + res + \"\\033[31m Forbidden \\033[0m\"\n backup(res, directory, forbi)\n else:\n #print FORBI + res + \"\\033[31m Forbidden \\033[0m\"\n pass\n elif status_link == 404:\n pass\n elif status_link == 301:\n if redirect:\n print \"\\033[33m[+] \\033[0m\" + res + \"\\033[33m 301 Moved Permanently \\033[0m\"\n else:\n pass\n elif status_link == 304:\n pass\n elif status_link == 302:\n if redirect:\n print \"\\033[33m[+] \\033[0m\" + res + \"\\033[33m 302 Moved Temporarily \\033[0m\"\n else:\n pass\n elif status_link == 400:\n pass\n #print \"bad request\"\n except Exception:\n pass\n #traceback.print_exc()\n q.task_done()\n except Exception:\n #traceback.print_exc()\n pass\n sys.stdout.write(\"\\033[34m[i] [scan... %d/%d]\\033[0m\\r\" % (t*thread, len_w))\n sys.stdout.flush()\n try:\n os.remove(directory + \"/backup.txt\")\n except:\n print(\"backup.txt not found\")\n\n\"\"\"\ncheck_words:\nFunctions wich manage multi-threading\n\"\"\"\ndef check_words(url, wordlist, directory, u_agent, forced=False, nLine=False):\n link_url = []\n hiddend = []\n if nLine:\n with open(wordlist, \"r\") as payload:\n links = payload.read().splitlines()\n for i in range(thread):\n worker = Thread(target=tryUrl, args=(i, enclosure_queue, directory, u_agent, forced))\n worker.setDaemon(True)\n worker.start()\n for link in links[nLine:]:\n if prefix:\n link_url = url + prefix + link\n else:\n link_url = url + link\n enclosure_queue.put(link_url)\n enclosure_queue.join()\n else:\n with open(wordlist, \"r\") as payload:\n links = payload.read().splitlines()\n for i in range(thread):\n worker = Thread(target=tryUrl, args=(i, enclosure_queue, directory, u_agent, forced))\n worker.setDaemon(True)\n worker.start()\n for link in links:\n if prefix:\n link_url = url + prefix + link\n else:\n link_url = url + link\n enclosure_queue.put(link_url)\n enclosure_queue.join()\n\n\"\"\"\ncreate_file:\nCreate directory with the website name to keep a scan backup. \n\"\"\"\ndef create_file(url, stat, u_agent, thread, subdomains):\n if 'www' in url:\n direct = url.split('.')\n directory = direct[1]\n directory = \"sites/\" + directory\n else:\n direct = url.split('/')\n directory = direct[2]\n directory = \"sites/\" + directory\n # if the directory don't exist, create it\n if not os.path.exists(directory):\n os.makedirs(directory) # creat the dir\n if subdomains:\n subdomain(subdomains)\n get_header(url, directory)\n get_dns(url, directory)\n who_is(url, directory)\n detect_cms(url)\n detect_waf(url, directory)\n status(stat, directory, u_agent)\n # or else ask the question\n else:\n new_file = raw_input('this directory exist, do you want to create another file ? (y:n)\\n')\n if new_file == 'y':\n print LINE\n directory = directory + '_2'\n os.makedirs(directory)\n if subdomains:\n subdomain(subdomains)\n get_header(url, directory)\n get_dns(url, directory)\n who_is(url, directory)\n detect_cms(url)\n status(stat, directory, u_agent)\n else:\n status(stat, directory, u_agent)\n\nif __name__ == '__main__':\n #arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-u\", help=\"URL to scan [required]\", dest='url')\n parser.add_argument(\"-w\", help=\"Wordlist used for URL Fuzzing [required]\", dest='wordlist')\n parser.add_argument(\"-s\", help=\"Subdomain tester\", dest='subdomains', required=False)\n parser.add_argument(\"-t\", help=\"Number of threads to use for URL Fuzzing. Default: 5\", dest='thread', type=int, default=5)\n parser.add_argument(\"-a\", help=\"Choice user-agent\", dest='user_agent', required=False)\n parser.add_argument(\"--redirect\", help=\"For scan with redirect response (301/302)\", dest='redirect', required=False, action='store_true')\n parser.add_argument(\"-r\", help=\"recursive dir\", required=False, dest=\"recursif\", action='store_true')\n parser.add_argument(\"-p\", help=\"add prefix in wordlist to scan\", required=False, dest=\"prefix\")\n results = parser.parse_args()\n \n url = results.url\n wordlist = results.wordlist\n thread = results.thread\n u_agent = results.user_agent\n subdomains = results.subdomains\n redirect = results.redirect\n prefix = results.prefix\n recur = results.recursif\n # TODO implement recursive scan\n\n banner()\n len_w = 0\n #calcul wordlist size\n auto_update()\n with open(wordlist, 'r') as words:\n for l in words:\n len_w += 1\n r = requests.get(url, allow_redirects=False, verify=False)\n stat = r.status_code\n print \"\\n \\033[32m url \" + url + \" found \\033[0m\\n\"\n print LINE\n create_file(url, stat, u_agent, thread, subdomains)\n \n"
}
] | 2 |
ShawnUrbach/pythonScripts | https://github.com/ShawnUrbach/pythonScripts | a42c24f4aced643a9d25d4f6226bd0f1d1430622 | 0a1ad43e6f0e622498561206abaa9677286dd04c | cd41df6482938a9bd4307fa5eee10811ab4dc99c | refs/heads/master | 2016-09-14T13:43:01.407190 | 2016-06-02T11:20:39 | 2016-06-02T11:20:39 | 59,643,719 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6462093591690063,
"alphanum_fraction": 0.6516245603561401,
"avg_line_length": 34.043479919433594,
"blob_id": "6531022f73e1989f5cd165b5d2a561e4911a0eb3",
"content_id": "4eb2c8b7e77094e394548f27b4b9c96ed72f5130",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1662,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 46,
"path": "/filename_Changer.py",
"repo_name": "ShawnUrbach/pythonScripts",
"src_encoding": "UTF-8",
"text": "#This script allows user to either change all filenames containing a specified string in a folder or:\r\n#User may also choose to change filename extensions for a particular extension in a folder.\r\n\r\nimport tkinter, os\r\nfrom tkinter import filedialog\r\n\r\nprint ('Please choose directory:')\r\n\r\nroot = tkinter.Tk()\r\nroot.withdraw()\r\n#allows user to input working directory\r\ndirname = filedialog.askdirectory(parent=root,initialdir=\"/\",title='Please select a directory')\r\n\r\nprint (dirname)\r\nos.chdir(dirname)\r\n\r\ndef first_input():\r\n input1 = input (\"Choose A.) Change filename or B.) Change extension.: \")\r\n if input1 == \"A\" or input1 == \"a\":\r\n change_filename()\r\n elif input1 ==\"B\" or input1 == \"b\":\r\n ext_change()\r\n else:\r\n print ('Please choose either A or B.')\r\n first_input()\r\n\r\ndef change_filename():\r\n word_choice = input(\"Type in string to be replaced: \")\r\n word_choice2 = input(\"Type in replacement string: \")\r\n for f in os.listdir('.'):\r\n if f.startswith(word_choice):\r\n os.rename(f, f.replace(word_choice, word_choice2))\r\n print (\"File rename complete.\")\r\n\r\ndef ext_change():\r\n ext_choice = input(\"Type in .extension to be replaced: \")\r\n ext_choice2 = input(\"Type in replacement .extension: \")\r\n for filename in os.listdir(dirname):\r\n infilename = os.path.join(dirname,filename)\r\n if not os.path.isfile(infilename): continue\r\n oldbase = os.path.splitext(filename)\r\n newname = infilename.replace(ext_choice, ext_choice2)\r\n output = os.rename(infilename, newname)\r\n print (\"File extension rename complete.\")\r\n\r\nfirst_input()\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6402116417884827,
"alphanum_fraction": 0.6481481194496155,
"avg_line_length": 28.72222137451172,
"blob_id": "9a1c4b769d1b817e54567e76b16241deba198731",
"content_id": "8294128b3fda48a034e4ebdc082be6fecd6a8407",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1134,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 36,
"path": "/filename_Changer2.py",
"repo_name": "ShawnUrbach/pythonScripts",
"src_encoding": "UTF-8",
"text": "#This script allows for user to delete characters at beginning or end of filename.\r\n\r\nimport tkinter, os\r\nfrom tkinter import filedialog\r\n\r\nprint ('Please choose directory:')\r\n\r\nroot = tkinter.Tk()\r\nroot.withdraw()\r\n#allows user to input working directory\r\ndirname = filedialog.askdirectory(parent=root,initialdir=\"/\",title='Please select a directory')\r\n\r\nprint (dirname)\r\nos.chdir(dirname)\r\n\r\nfrom glob import glob\r\nfrom os import rename\r\n\r\ndef front_chars():\r\n usr1 = input(\"Type file extension here: \")\r\n usr2 = int(input(\"Type number of FRONT characters you wish to keep. The rest will be removed: \"))\r\n for fname in glob('*'):\r\n os.rename(fname, fname[:usr2] + '.'+usr1)\r\n print (\"Characters removed.\")\r\n\r\ndef back_chars():\r\n usr3 = int(input(\"Type number of FRONT characters you wish to delete. The rest will remain: \"))\r\n for fname in glob('*'):\r\n os.rename(fname, fname[usr3:])\r\n print (\"Characters removed.\")\r\n\r\ninput1 = input(\"Keep characters from front or back? \")\r\nif input1 == \"back\":\r\n back_chars()\r\nelif input1 == \"front\":\r\n front_chars()\r\n \r\n\r\n\r\n \r\n \r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6414473652839661,
"alphanum_fraction": 0.6491228342056274,
"avg_line_length": 32.77777862548828,
"blob_id": "c13fdbfd67772d9c72fe42366fff30867adb19c7",
"content_id": "d0ea508a3209386e96bd6936151a24e7b013b996",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 912,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 27,
"path": "/imageResizer.py",
"repo_name": "ShawnUrbach/pythonScripts",
"src_encoding": "UTF-8",
"text": "import tkinter, os\nfrom tkinter import filedialog\nroot = tkinter.Tk()\nroot.withdraw()\n#allows user to input working directory\ndirname = filedialog.askdirectory(parent=root,initialdir=\"/\",title='Please select a directory')\n\nprint (dirname)\nos.chdir(dirname)\n\nimport sys\nfrom PIL import Image\n\n#user inputs a height and width separated by comma (ex. 128, 128)\nsize = list(map(int,input('Type height and width separated by a comma here:').split(',')))\n\n#iterates over every .jpg file in current working directory, resizing to user-specified height and width.\nfor f in os.listdir('.'):\n if f.endswith('.jpg'):\n outfile = os.path.splitext(f)[0] + \"thumbnail.jpg\"\n if f != outfile:\n try:\n im = Image.open(f)\n im.thumbnail(size)\n im.save(outfile, \"JPEG\")\n except IOError:\n print (\"cannot create thumbnail for '%s'\" % f)\n"
},
{
"alpha_fraction": 0.8399999737739563,
"alphanum_fraction": 0.8399999737739563,
"avg_line_length": 24,
"blob_id": "ee9802ecc5122c04c8b6f3fc45e28cada0da0bd9",
"content_id": "12599236751b586471a879b707de78652c18fedf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 50,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 2,
"path": "/README.md",
"repo_name": "ShawnUrbach/pythonScripts",
"src_encoding": "UTF-8",
"text": "# pythonScripts\nContains assorted Python scripts.\n"
}
] | 4 |
liuyanfei002/deep-learning-of-web | https://github.com/liuyanfei002/deep-learning-of-web | a46cf9771c238ab2556b1ea3f0798f31b9ea7cdd | 78e6d51d699d391fba9f13594cf168b29068db0b | 3cc4d02ef79df10bdf723622ab258cc182580c9e | refs/heads/master | 2022-11-09T18:59:36.108874 | 2020-06-12T04:19:42 | 2020-06-12T04:19:42 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6357827186584473,
"alphanum_fraction": 0.6389776468276978,
"avg_line_length": 30.350000381469727,
"blob_id": "99585df52eb4548edb70e0c8d245d1e9981030b1",
"content_id": "aa84865f058fd701c73383a19872fcea47407bc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 764,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 20,
"path": "/3flask/个人主页/post_test.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\nfrom flask import request\nfrom flask import render_template\n\napp = Flask(__name__) # 创建应用实例\n\[email protected]('/',methods = [\"get\", \"post\"])\ndef index():\n print(\"------------------\")\n print(request.method)\n print(request.form)\n print(request.form.get(\"username\"))\n print(request.form.get(\"password\"))\n print(\"------------------\")\n response_data = \"广东科学技术职业学院\"\n return render_template(\"post_form.html\")\n\nif __name__ == '__main__': # 判断是否运行此文件,还是被当做模块导入\n\tapp.run(debug=True, port=80) # 开始运行flask应用程序, debug启动app的调试模式\n\t# app.run(debug=True) # 开始运行flask应用程序, debug启动app的调试模式"
},
{
"alpha_fraction": 0.5965957641601562,
"alphanum_fraction": 0.6017021536827087,
"avg_line_length": 30.783782958984375,
"blob_id": "0a2e412bd751941d6acbbf737419ffb337eb6d24",
"content_id": "9f2a35f2d82a2aedce53f2ff135ceb1006759ff0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1301,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 37,
"path": "/web_mnist/img_receive.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request, jsonify\nimport os\nfrom werkzeug.utils import secure_filename\n\napp = Flask(__name__)\n\n# 设置允许的文件格式\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'JPG', 'PNG', 'bmp'])\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\[email protected]('/', methods=['POST', 'GET'])\ndef hello_world():\n print(request.method)\n if request.method == 'POST':\n print('post')\n # 通过file标签获取文件\n f = request.files['file']\n print(f.filename)\n if not (f and allowed_file(f.filename)):\n return jsonify({\"error\": 1001, \"msg\": \"图片类型:png、PNG、jpg、JPG、bmp\"})\n # 当前文件所在路径\n basepath = os.path.dirname(__file__)\n # 一定要先创建该文件夹,不然会提示没有该路径\n upload_path = os.path.join(basepath, 'static/images', secure_filename(f.filename))\n # 保存文件\n f.save(upload_path)\n # show_path = \"../static/images/\" + f.filename\n # return render_template('upload_ok.html',path = show_path)\n return \"上传成功\"\n else:\n print('get')\n return render_template('upload.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)"
},
{
"alpha_fraction": 0.6750741600990295,
"alphanum_fraction": 0.6795251965522766,
"avg_line_length": 21.46666717529297,
"blob_id": "eea3a2764212c06295995571d2a9feb51304817e",
"content_id": "de23304b4ed79dc995aef21df60cfe98d352ebc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 882,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 30,
"path": "/3flask/hello_word/hello.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\nfrom flask import render_template\napp = Flask(__name__) # 创建应用实例\nimport json\nimport flask\n\nprint(flask.__version__)\n\[email protected]('/') # 使用route装饰器创建一个路由\ndef hello(): # 视图函数,访问此路由时执行的函数\n\t# return 'Hello World' # 视图函数的返回值,称之为 ‘响应’\n return render_template('hello.html')\n\[email protected]('/jsj')\ndef hello2():\n\treturn '计算机工程技术学院(人工智能学院)'\n\[email protected]('/json')\ndef json_test():\n\tiidct = {\"name\" :\"gdit\", \"age\": 20}\n\tprint(type(iidct))\n\tprint(iidct[\"name\"])\n\n\tijson = json.dumps(iidct)\n\tprint(type(ijson))\n\t# print(ijson[\"name\"])\n\treturn ijson\n\nif __name__ == '__main__': # 判断是否运行此文件,还是被当做模块导入\n\tapp.run(debug=True) # 开始运行flask应用程序, debug启动app的调试模式\n"
},
{
"alpha_fraction": 0.68767911195755,
"alphanum_fraction": 0.7335243821144104,
"avg_line_length": 20.875,
"blob_id": "5bececdfa957614613edb98c72bf4116090aa879",
"content_id": "4b5c8e2e4003b01244c56ca2f16f93638f8cd025",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 349,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 16,
"path": "/5.2keras卷积神经网络识别手写数字/卷积神经网络识别手写数字/predict_new.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import cv2\nfrom keras.models import load_model\n\nimg = cv2.imread(\"./pic/2.jpg\")\nprint(img.shape)\ngrey_img = img\nprint(grey_img.shape)\n\nshape_img= (grey_img.reshape(1, 28, 28, 3)).astype('float32')/255\nprint(shape_img.shape)\n\n\nmodel = load_model('SaveModel/minist_model_graphic.h5')\n\nprediction = model.predict_classes(shape_img)\nprint(prediction[0])"
},
{
"alpha_fraction": 0.6043010950088501,
"alphanum_fraction": 0.6043010950088501,
"avg_line_length": 26.41176414489746,
"blob_id": "45b650d338781de7ef9cd7acc8ed7c9778021227",
"content_id": "780de52ba51f493fea13ea9fccf1b4d4d59639f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 553,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 17,
"path": "/3flask/个人主页/get_test.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\napp = Flask(__name__) # 创建应用实例\nfrom flask import request\n\[email protected]('/',)\ndef index():\n print(\"------------------\")\n print(request)\n print(request.method)\n print(request.args)\n print(request.args.get(\"name\"))\n request_name = request.args.get(\"name\")\n print(\"------------------\")\n return request_name\n\nif __name__ == '__main__': # 判断是否运行此文件,还是被当做模块导入\n\tapp.run(debug=True) # 开始运行flask应用程序, debug启动app的调试模式"
},
{
"alpha_fraction": 0.48740553855895996,
"alphanum_fraction": 0.507556676864624,
"avg_line_length": 21.714284896850586,
"blob_id": "6ad8f6c1df31e467bd671e7d251424129e658d93",
"content_id": "db09a538c67d10e712f96e528e68d50f460962d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 868,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 35,
"path": "/3flask/表格与数据库/数据库/insert_sql2.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import pymysql\ndef insert_one_test():\n sql = 'insert into student (user,pwd) values (%s,%s);'\n name = 'wuli'\n pwd = '123456789'\n cursor.execute(sql, [name, pwd])\n\ndef insert_many_test():\n # 定义要执行的sql语句\n sql = 'insert into student(user,pwd) values(%s,%s);'\n data = [\n ('a', '147'),\n ('b', '258'),\n ('c', '369')\n ]\n # 拼接并执行sql语句\n cursor.executemany(sql, data)\n\n# 连接database\nconn = pymysql.connect(host=\"127.0.0.1\",\n user=\"root\",\n password=\"123456\",\n # db=\"cov\",\n db=\"gdit_student\",\n charset=\"utf8\")\n# 获取一个光标\ncursor = conn.cursor()\ninsert_one_test()\ninsert_many_test()\n\n# 涉及写操作要注意提交\nconn.commit()\n# 关闭连接\ncursor.close()\nconn.close()"
},
{
"alpha_fraction": 0.6039682626724243,
"alphanum_fraction": 0.6238095164299011,
"avg_line_length": 20.016666412353516,
"blob_id": "b4852f5291f794d5d895be2bf2f87003f53d0435",
"content_id": "95a1adc2cedc61f1cf6d70368ab9941b788c4e5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1452,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 60,
"path": "/3flask/表格与数据库插入与查询/数据库/create_table.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "# 导入pymysql模块\nimport pymysql\n\n# 连接database\nconn = pymysql.connect(host=\"127.0.0.1\",\n user=\"root\",\n password=\"123456\",\n db=\"gdit_student\",\n charset=\"utf8\")\n\n# 得到一个可以执行SQL语句的光标对象\ncursor = conn.cursor() # 执行完毕返回的结果集默认以元组显示\n\n\n# 定义要执行的SQL语句\n# sql = \"\"\"\n# CREATE TABLE USER1 (\n# id INT auto_increment PRIMARY KEY ,\n# name CHAR(10) NOT NULL UNIQUE,\n# age TINYINT NOT NULL\n# )ENGINE=innodb DEFAULT CHARSET=utf8; #注意:charset='utf8' 不能写成utf-8\n# \"\"\"\n\n# sql = \"\"\"\n# CREATE TABLE userinfo (\n# id INT auto_increment PRIMARY KEY ,\n# user CHAR(10) NOT NULL UNIQUE,\n# pwd TINYINT NOT NULL\n# )ENGINE=innodb DEFAULT CHARSET=utf8; #注意:charset='utf8' 不能写成utf-8\n# \"\"\"\n\nsql = \"\"\"\nCREATE TABLE student (\nid INT auto_increment PRIMARY KEY ,\nuser CHAR(20) NOT NULL UNIQUE,\npwd int(10)\n)ENGINE=innodb DEFAULT CHARSET=utf8; #注意:charset='utf8' 不能写成utf-8\n\"\"\"\n\nprint(\"create table\")\n# 执行SQL语句\ncursor.execute(sql)\n\n# sql = 'select * from userinfo where user = \"%s\" and pwd=\"%s\"' % (user, pwd)\n# sql = 'select * from userinfo'\n# print(sql)\n# res = cursor.execute(sql)\n# print(res)\n# # 进行判断\n# if res:\n# print('登录成功')\n# else:\n# print('登录失败')\n\n\n# 关闭光标对象\ncursor.close()\n\n# 关闭数据库连接\nconn.close()"
},
{
"alpha_fraction": 0.6819788217544556,
"alphanum_fraction": 0.6819788217544556,
"avg_line_length": 22.58333396911621,
"blob_id": "ec4988943fa970b457bd97751b9338c065fde7c8",
"content_id": "3787d984e9451e9d7cb23e56aba5c7726cf08898",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 371,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 12,
"path": "/3flask/redirect/redirect.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\nfrom flask import redirect\n\napp = Flask(__name__) # 创建应用实例\n\n\[email protected]('/redir')\ndef redir():\n return redirect('https://www.gdit.edu.cn/')\n\nif __name__ == '__main__': # 判断是否运行此文件,还是被当做模块导入\n\tapp.run(debug=True) # 开始运行flask应用程序, debug启动app的调试模式\n"
},
{
"alpha_fraction": 0.6141414046287537,
"alphanum_fraction": 0.6277777552604675,
"avg_line_length": 30.95161247253418,
"blob_id": "701c645223fff61be795622bdf95301fc33a24f7",
"content_id": "3411810dc83ca04bcc298b10e668cc189fb34f88",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2168,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 62,
"path": "/web_mnist/web_predict_img.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request, jsonify\nfrom werkzeug.utils import secure_filename\n# from datetime import timedelta\nimport os\nimport cv2\nfrom keras.models import load_model\nimport numpy as np\n\napp = Flask(__name__)\n\n# 设置允许的文件格式\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'JPG', 'PNG', 'bmp'])\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n# 附windows下中文路径图片解决方案:\ndef cv_imread(file_path=\"\"):\n cv_img = cv2.imdecode(np.fromfile(file_path, dtype=np.uint8), -1)\n return cv_img\n\ndef predict_img(input_img):\n print(input_img)\n # img = cv2.imread(input_img)\n img = cv_imread(input_img)\n print(img.shape)\n # grey_img = img[:, :, 0:1]\n grey_img = img\n print(grey_img.shape)\n shape_img = (grey_img.reshape(1, 28, 28, 1)).astype('float32') / 255\n\n # model = load_model('SaveModel/minist_model.h5') #选取自己的.h模型名称\n model = load_model('SaveModel/minist_model_graphic.h5') # 选取自己的.h模型名称\n prediction = model.predict_classes(shape_img)\n label = prediction[0]\n print(label)\n return label\n\[email protected]('/', methods=['POST', 'GET'])\ndef hello_world():\n if request.method == 'POST':\n print('post')\n # 通过file标签获取文件\n f = request.files['file']\n # print(f.filename)\n if not (f and allowed_file(f.filename)):\n return jsonify({\"error\": 1001, \"msg\": \"图片类型:png、PNG、jpg、JPG、bmp\"})\n # 当前文件所在路径\n basepath = os.path.dirname(__file__)\n # 一定要先创建该文件夹,不然会提示没有该路径\n upload_path = os.path.join(basepath, 'static\\images', secure_filename(f.filename))\n # 保存文件\n f.save(upload_path)\n show_path = \"../static/images/\" + f.filename\n label = predict_img(upload_path)\n return render_template('upload_ok.html',path = show_path, label = label)\n # return \"上传成功\"\n else:\n print('get')\n return render_template('upload.html')\n\nif __name__ == '__main__':\n app.run(debug=True)"
},
{
"alpha_fraction": 0.5946560502052307,
"alphanum_fraction": 0.6742467284202576,
"avg_line_length": 24.507246017456055,
"blob_id": "f36009b820575f1910958ae3b7de558cd03f72ba",
"content_id": "941e16c8563b4458168cadc51e573dac824b76ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1855,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 69,
"path": "/6.2tensorflow_minist/simple.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n#读取数据\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n#构建网络\n# images data\n#占用一个空间 [None, 784]\nx = tf.placeholder(\"float\", shape=[None, 784])\n# images labels\ny_ = tf.placeholder(\"float\", shape=[None, 10])\n# reshape from 784x1 to 28x28\nx_image = tf.reshape(x, [-1,28,28,1])\n# convolution\n#x [28x28]\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n# for weights intialization\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n# pooling\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\nW_conv1 = weight_variable([5, 5, 1, 32])\n#卷积之后大小是32x28x28\nnew_cov_img = conv2d(x_image,W_conv1)\n\nb_conv1 = bias_variable([32])\n\nprint(new_cov_img)\n#32x28x28\nh_conv1 = tf.nn.relu( new_cov_img)\n#32x14x14\nh_pool1 = max_pool_2x2(h_conv1)\n\nprint(h_pool1)\n#32*14*14\nh_pool2_flat = tf.reshape(h_pool1, [-1, 32*14*14])\nprint(h_pool2_flat)\n#参数= 输入维度*输出维度\n# = 6272 * 10\nW_fc2 = weight_variable([32*14*14, 10])\n#输出维度 = 10维\na10 = tf.matmul(h_pool2_flat, W_fc2)\nprint(a10)\nb_fc2 = bias_variable([10])\ny_conv=tf.nn.softmax(a10 + b_fc2)\n\n# loss = (y_ - y_conv)\ncross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\nkeep_prob = tf.placeholder(\"float\")\n\nprint(y_conv)\n\n# start graph\nsess = tf.InteractiveSession()\n\nfor i in range(5000):\n\t# 50一批\n\tbatch = mnist.train.next_batch(50)\n\t# 训练\n\ttrain_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})"
},
{
"alpha_fraction": 0.6575342416763306,
"alphanum_fraction": 0.6575342416763306,
"avg_line_length": 23.399999618530273,
"blob_id": "d57ce588fce487912173b8ac623fd7f96bd4a619",
"content_id": "bcaf4e97e529698dab5056c8f35108270a3947cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 463,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 15,
"path": "/3flask/html/render_html.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\napp = Flask(__name__) # 创建应用实例\nfrom flask import render_template\n\[email protected]('/')\ndef index():\n return 'Hello index'\n\[email protected]('/hello') #添加路由:hello\ndef do_hello():\n return render_template('hello.html')\n\n\nif __name__ == '__main__': # 判断是否运行此文件,还是被当做模块导入\n\tapp.run(debug=True) # 开始运行flask应用程序, debug启动app的调试模式"
},
{
"alpha_fraction": 0.54666668176651,
"alphanum_fraction": 0.6466666460037231,
"avg_line_length": 24,
"blob_id": "260116d1f8b02cbcc045a5631d8bb074c090c741",
"content_id": "3255c3d11b2fe2b87e3e93e90224b9ca7fe51be2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 150,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 6,
"path": "/6.1TensorFlow原理介绍/sum.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nx1 = tf.constant(90, name=\"x1\")\nx2 = tf.constant(80, name=\"x2\")\nx3 = tf.constant(70, name=\"x3\")\nadd = x1 + x2 + x3\nprint(add)\n"
},
{
"alpha_fraction": 0.5348148345947266,
"alphanum_fraction": 0.5474073886871338,
"avg_line_length": 29,
"blob_id": "5af361cf1650ad7b24bcf249daedd9c923d6bdb6",
"content_id": "1f68cbe4b50abe788f682beb72378212e15e9674",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1384,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 45,
"path": "/6.1TensorFlow原理介绍/fat_thin/plot_right_point.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport numpy as np\n#获取数据和标签\ndef get_data(path):\n train_data = []\n train_labels = []\n test_labels = []\n with open(path) as ifile:\n for line in ifile:\n tokens = line.strip().split(' ')\n tmp_data = [int(tk) for tk in tokens[:-1]]\n tmp_label = tokens[-1]\n #设置胖瘦正常的标签,\n if tmp_label == 'fat':\n label = [1,0,0]\n elif tmp_label == 'normal':\n label = [0,1,0]\n elif tmp_label == 'thin':\n label = [0,0,1]\n train_data.append(tmp_data)\n train_labels.append(label)\n test_labels.append(tmp_label)\n\n return train_data, train_labels, test_labels\ndata_path = './bmi.txt'\ntrain_data, train_labels, test_labels = get_data(data_path)\n\nprint(train_data)\nprint(train_labels)\nprint(test_labels)\n\ntrain_data = np.array(train_data)\nx = train_data[:,0]\ny = train_data[:,1]\nprint(x)\nprint(y)\nfor indx in range(1000):\n if (test_labels[indx] == \"fat\"):\n plt.scatter(x[indx], y[indx], c='red', marker='.')\n elif (test_labels[indx] == \"thin\"):\n plt.scatter(x[indx], y[indx], c='blue', marker='.')\n elif (test_labels[indx] == \"normal\"):\n plt.scatter(x[indx], y[indx], c='green', marker='.')\n# plt.scatter(x, y, c='red', marker='.')\nplt.show()\n"
},
{
"alpha_fraction": 0.6601941585540771,
"alphanum_fraction": 0.6941747665405273,
"avg_line_length": 14.769230842590332,
"blob_id": "ed210c1983a1a2ba58a61a36657fa9b4080b35ea",
"content_id": "9555f9dd835ad3017240e885173091aa7187848c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 206,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 13,
"path": "/6.2tensorflow_minist/img.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport cv2\nimport platform\n\nprint(platform.python_version())\n\nprint('ok')\nimg = cv2.imread('a.jpg')\n\ncv2.imshow('img',img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n"
},
{
"alpha_fraction": 0.5994831919670105,
"alphanum_fraction": 0.6201550364494324,
"avg_line_length": 24.866666793823242,
"blob_id": "4ea40a34cb761f7d67fd997f79c47e180976cd0a",
"content_id": "66ed990f635f9c3dd2319985af4072f45fc9c9a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 497,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 15,
"path": "/3flask/个人主页/self_introduce.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\napp = Flask(__name__) # 创建应用实例\nfrom flask import render_template\n\[email protected]('/')\ndef index():\n data = {\n \"name\":\"广东科学技术职业学院\",\n \"s\":\"男\",\n \"born\":1990\n }\n return render_template(\"introduce.html\", data = data)\n\nif __name__ == '__main__': # 判断是否运行此文件,还是被当做模块导入\n\tapp.run(debug=True,port=5000) # 开始运行flask应用程序, debug启动app的调试模式"
},
{
"alpha_fraction": 0.6551724076271057,
"alphanum_fraction": 0.7306034564971924,
"avg_line_length": 30,
"blob_id": "37eece813a5d3f42f3a73934e6dbed03c902b6a1",
"content_id": "9af44e6624549085c324ab68d7c1ff0134160c9d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 514,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 15,
"path": "/web_face/base64_img.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nimport base64\nimport numpy as np\nimport cv2\n\nimg_file = open(r'./static/1.jpg', 'rb') # 二进制打开图片文件\nimg_b64encode = base64.b64encode(img_file.read()) # base64编码\nimg_file.close() # 文件关闭\nprint(img_b64encode)\nimg_b64decode = base64.b64decode(img_b64encode) # base64解码\nprint(img_b64decode)\nimg_array = np.fromstring(img_b64decode, np.uint8) # 转换np序列\nimg = cv2.imdecode(img_array, cv2.COLOR_BGR2RGB) # 转换Opencv格式\ncv2.imshow(\"img\", img)\ncv2.waitKey()"
},
{
"alpha_fraction": 0.6182265877723694,
"alphanum_fraction": 0.633004903793335,
"avg_line_length": 22.823530197143555,
"blob_id": "3fce10f4619e955e3112d0e2a228fc45d2f2647d",
"content_id": "69ba5f1ad7ffbef573029dfa203d8139dd144ead",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 406,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 17,
"path": "/5.2keras卷积神经网络识别手写数字/卷积神经网络识别手写数字/ouput_mnist_img.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from keras.datasets import mnist\nimport cv2\n(x_Train, y_Train) , (x_Test, y_Test) = mnist.load_data()\n\nprint(x_Train.shape)\nprint(y_Train.shape)\nprint(x_Test.shape)\nprint(y_Test.shape)\nfor num in range(20):\n name = './pic/' + str(num) + '.jpg'\n cv2.imwrite(name, x_Train[num])\n\nf = open(\"train_label.txt\",\"w\")\nfor num in range(20):\n label = y_Train[num]\n f.write(str(label))\n f.write(\"\\n\")\n\n"
},
{
"alpha_fraction": 0.4312668442726135,
"alphanum_fraction": 0.5498652458190918,
"avg_line_length": 42.411766052246094,
"blob_id": "9324248ba45377a19a765d081c42e341ed38f71b",
"content_id": "1a8a122327407b8c0cd438ca15f5ed4b262d0e1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 816,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 17,
"path": "/6.1TensorFlow原理介绍/remind.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "#-*- conding:utf-8 -*-\nimport datetime\nimport time #时间函数\nimport tkinter.messagebox #tk消息框\ntimelist=['8:55:00','9:50:00','10:55:00',\"11:50:00\",\"14:45:00\",\"15:40:00\",\"16:45:00\",\"17:40:00\",\"18:55:00\",\"19:50:00\",\"20:45:00\",\"21:40:00\"] #自定义提醒时间\nwhile True:\n now=time.strftime('%H:%M:%S',time.localtime(time.time())) #获取为HH:MM:SS时间格式\n for i in timelist:\n if i==now:\n print(\"到达设定时间:\",now)\n win = tkinter.Tk() # 初始化Tk\n win.title(\"当前时间\")\n win.attributes(\"-topmost\", True)\n win.geometry(\"%dx%d\" % (1000, 1000))\n TimeLabel = tkinter.Label(text=\"下课了\", bg='red', font=('Arial', 150), width=300, height=200)\n TimeLabel.pack()\n win.mainloop()\n\n\n\n\n"
},
{
"alpha_fraction": 0.5255929827690125,
"alphanum_fraction": 0.6342072486877441,
"avg_line_length": 21.25,
"blob_id": "43ec40a7215cbfe140ad656932f778263f223da1",
"content_id": "3b91e2a8d1f5d8856024efec11bfe6ef4794d586",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 802,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 36,
"path": "/6.1TensorFlow原理介绍/tf_calc.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nx1 = tf.placeholder(dtype=tf.float32)\nx2 = tf.placeholder(dtype=tf.float32)\nx3 = tf.placeholder(dtype=tf.float32)\n\nw1 = tf.Variable(0.6, dtype=tf.float32)\nw2 = tf.Variable(0.3, dtype=tf.float32)\nw3 = tf.Variable(0.1, dtype=tf.float32)\n\nn1 = x1 * w1\nn2 = x2 * w2\nn3 = x3 * w3\ny = n1 + n2 + n3\n\nprint(w1)\nprint(x1)\nprint(y)\n\nsess = tf.Session()\n# \ninit = tf.global_variables_initializer()\nsess.run(init)\n# result = sess.run([x1, x2, x3, w1, w2, w3, y], feed_dict={x1: 90, x2: 80, x3: 70})\nresult = sess.run([y], feed_dict={x1: 90, x2: 80, x3: 70})\n# print(result[6])\nprint(result)\n\n\n# print(result)\n\n# result = sess.run([y], feed_dict={x1: 98, x2: 95, x3: 87})\n# print(result)\n\n\n#result = sess.run([x1, x2, x3, w1, w2, w3, y], feed_dict={x1: 90, x2: 80, x3: 70})\n#print(result)\n"
},
{
"alpha_fraction": 0.5441176295280457,
"alphanum_fraction": 0.5596885681152344,
"avg_line_length": 21.25,
"blob_id": "ec0e464724713a8d750acda714e3162231fabd3f",
"content_id": "ce22f8ba20be89ddbd167e351a8d675f959bd51b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1338,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 52,
"path": "/3flask/表格与数据库/show_sql_table.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\napp = Flask(__name__) # 创建应用实例\nfrom flask import render_template\n\nimport pymysql\n\ndef get_sql_data():\n # 连接database\n conn = pymysql.connect(host=\"127.0.0.1\",\n user=\"root\",\n password=\"123456\",\n db=\"cov\",\n charset=\"utf8\")\n\n # 获取一个光标\n cursor = conn.cursor(cursor=pymysql.cursors.DictCursor) # 返回字典数据类型\n\n # 定义将要执行的sql语句\n sql = 'select user,pwd from userinfo;'\n # 拼接并执行sql语句\n cursor.execute(sql)\n\n # 取到查询结果\n ret1 = cursor.fetchone() # 取一条\n ret2 = cursor.fetchmany(3) # 取三条\n ret3 = cursor.fetchone() # 取一条\n\n cursor.close()\n conn.close()\n\n print(ret1)\n print(ret2)\n print(ret3)\n return ret1\n\[email protected]('/show')\ndef hello():\n data = {\n \"user\": \"gdit\",\n \"pwd\": 100\n }\n # data = None\n ret_data = get_sql_data()\n print(ret_data)\n return render_template('table.html', data=ret_data)\n\[email protected]('/')\ndef index():\n return \"hello\"\n\nif __name__ == '__main__': # 判断是否运行此文件,还是被当做模块导入\n\tapp.run(debug=True) # 开始运行flask应用程序, debug启动app的调试模式"
},
{
"alpha_fraction": 0.6723300814628601,
"alphanum_fraction": 0.7184466123580933,
"avg_line_length": 30.615385055541992,
"blob_id": "77c70c2f60f9dbb0921d7a692e68464ca8d39ed1",
"content_id": "07ad662a2e91baee328c1e2f38b9ee582ce8292b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 448,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 13,
"path": "/web_mnist/predict_one_img.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import cv2\nfrom keras.models import load_model\n\nimg = cv2.imread(\"./pic/0.jpg\")\nprint(img.shape)\ngrey_img = img[:,:,0:1]\nprint(grey_img.shape)\nshape_img= (grey_img.reshape(1, 28, 28, 1)).astype('float32')/255\n\n# model = load_model('SaveModel/minist_model.h5') #选取自己的.h模型名称\nmodel = load_model('SaveModel/minist_model_graphic.h5') #选取自己的.h模型名称\nprediction = model.predict_classes(shape_img)\nprint(prediction[0])\n\n"
},
{
"alpha_fraction": 0.5404813885688782,
"alphanum_fraction": 0.5557987093925476,
"avg_line_length": 19.81818199157715,
"blob_id": "b22de050e10dbaff1d974df1f6665c754c1e0344",
"content_id": "86d1e2275f0fc3d16e43a2cfbd75ebe47df7ef2d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 533,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 22,
"path": "/3flask/表格与数据库/数据库/delete_sql.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import pymysql\n\n# 连接database\nconn = pymysql.connect(host=\"127.0.0.1\",\n user=\"root\",\n password=\"123456\",\n db=\"gdit_student\",\n charset=\"utf8\")\n# 获取一个光标\ncursor = conn.cursor()\n# 定义将要执行的SQL语句\nsql = \"delete from student where user=%s;\"\n# name = \"june\"\nname = \"ddddd\"\n# 拼接并执行SQL语句\ncursor.execute(sql, [name])\n# 涉及写操作注意要提交\nconn.commit()\n# 关闭连接\n\ncursor.close()\nconn.close()"
},
{
"alpha_fraction": 0.6240875720977783,
"alphanum_fraction": 0.6313868761062622,
"avg_line_length": 26.200000762939453,
"blob_id": "f267cc5ad2356e0a7239d9c566d3e1581d41cd0a",
"content_id": "46c43695bdc64163b4121c587f8eb0ed5654d49a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 372,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 10,
"path": "/3flask/html/test_html.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\napp = Flask(__name__) # 创建应用实例\n\[email protected]('/hello') #添加路由:hello\ndef do_hello():\n return '<h1>Hello, stranger!</h1> '\n\n\nif __name__ == '__main__': # 判断是否运行此文件,还是被当做模块导入\n\tapp.run(debug=True) # 开始运行flask应用程序, debug启动app的调试模式\n\n\n"
},
{
"alpha_fraction": 0.646350622177124,
"alphanum_fraction": 0.6726862192153931,
"avg_line_length": 24.576923370361328,
"blob_id": "46b4666c34f9a1fd9625c5092d213584f4b299e8",
"content_id": "caf68eb28bee49f5390e031eb2ff8385bb42e385",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1329,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 52,
"path": "/5.3keras循环神经网络识别手写数字/lstm.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import keras\nfrom keras.layers import LSTM\nfrom keras.layers import Dense, Activation\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam\n\nlearning_rate = 0.001\ntraining_iters = 5\nbatch_size = 128\ndisplay_step = 10\n\nn_input = 28\nn_step = 28\nn_hidden = 128\nn_classes = 10\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nx_train = x_train.reshape(-1, n_step, n_input)\nx_test = x_test.reshape(-1, n_step, n_input)\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\n\ny_train = keras.utils.to_categorical(y_train, n_classes)\ny_test = keras.utils.to_categorical(y_test, n_classes)\n\nmodel = Sequential()\nmodel.add(LSTM(n_hidden,\n batch_input_shape=(None, n_step, n_input),\n unroll=True))\n\nmodel.add(Dense(n_classes))\n\nmodel.add(Activation('softmax'))\nadam = Adam(lr=learning_rate)\nmodel.summary()\nmodel.compile(optimizer=adam,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=training_iters,\n verbose=1,\n validation_data=(x_test, y_test))\n\nscores = model.evaluate(x_test, y_test, verbose=0)\nprint('LSTM test score:', scores[0])\nprint('LSTM test accuracy:', scores[1])"
},
{
"alpha_fraction": 0.535243570804596,
"alphanum_fraction": 0.5564469695091248,
"avg_line_length": 27.606557846069336,
"blob_id": "71df2f8aab5e4c4ff2530e501e9aac4575b7fd64",
"content_id": "4084ddf3e7c46a4bffc1195511c0ee3a2a00f8f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1841,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 61,
"path": "/6.1TensorFlow原理介绍/fat_thin/predict.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nmodel_save_path = './model/'\n#获取数据和标签\ndef get_data(path):\n train_data = []\n train_labels = []\n test_labels = []\n with open(path) as ifile:\n for line in ifile:\n tokens = line.strip().split(' ')\n tmp_data = [int(tk) for tk in tokens[:-1]]\n tmp_label = tokens[-1]\n #设置胖瘦正常的标签,\n if tmp_label == 'fat':\n label = [1,0,0]\n elif tmp_label == 'normal':\n label = [0,1,0]\n elif tmp_label == 'thin':\n label = [0,0,1]\n\n\n train_data.append(tmp_data)\n train_labels.append(label)\n test_labels.append(tmp_label)\n\n return train_data, train_labels, test_labels\n\ndata_path = './bmi.txt'\ntrain_data, train_labels, test_labels = get_data(data_path)\n\n#用占位符定义w,y的大小\nw = tf.Variable(tf.zeros([2,3]), dtype=tf.float32)\nb = tf.Variable(np.zeros([3]), dtype=tf.float32)\n#身高和體重兩列,行數爲[None]\nx=tf.placeholder(tf.float32,[None,2],name = \"x_input\")\ny_=tf.placeholder(tf.float32,name = \"y_predict\")\n#回歸方程表達式\ny=tf.nn.softmax(tf.matmul(x,w) + b)\n\n# 測試模型\npredict_labels = []\nwith tf.Session() as sess:\n count = 0\n true = 0\n saver = tf.train.Saver()\n saver.restore(sess, tf.train.latest_checkpoint(model_save_path))\n for i in range(len(train_labels)):\n result1 = sess.run([y], feed_dict={x: train_data[i: i + 1]})\n # print(result1[0][0])\n result = result1[0][0]\n if max(result) == result[0]:\n print(\"fat\")\n if max(result) == result[1]:\n print('normal')\n if max(result) == result[2]:\n print('thin')\n count += 1\n print(true / count)"
},
{
"alpha_fraction": 0.6629448533058167,
"alphanum_fraction": 0.7145847678184509,
"avg_line_length": 33.975608825683594,
"blob_id": "e3b4a5601c5b8e4210c86ce28c1ee486c1e0b5da",
"content_id": "d8ea3c2ac1fb17bbc409ca4371311099ed6c14ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1473,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 41,
"path": "/5.2keras卷积神经网络识别手写数字/卷积神经网络识别手写数字/cnn_clear.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from keras.datasets import mnist\nfrom keras.utils import np_utils\nimport numpy as np\nnp.random.seed(10)\n\n(x_Train, y_Train) , (x_Test, y_Test) = mnist.load_data()\n\nx_Train4D = x_Train.reshape(x_Train.shape[0], 28, 28,1).astype('float32')\nx_Test4D = x_Test.reshape(x_Test.shape[0], 28, 28, 1).astype('float32')\n\nx_Train4D_normalize = x_Train4D / 255\nx_Test4D_normalize = x_Test4D / 255\ny_TrainHot = np_utils.to_categorical(y_Train)\ny_TestHot = np_utils.to_categorical(y_Test)\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n\nmodel = Sequential()\nmodel.add(Conv2D(filters=16, kernel_size=(5,5), padding='same', input_shape = (28, 28, 1), activation='relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Conv2D(filters=36, kernel_size=(5,5), padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\n# 查看模型摘要\nprint(model.summary())\n# # 进行训练\n# 定义训练方式\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n# 开始训练\ntrain_history = model.fit(x = x_Train4D_normalize, y = y_TrainHot,\n validation_split=0.2, epochs=10, batch_size=300, verbose=2)\n\n# model.save_weights(\"SaveModel/minist_model.h5\")\nmodel.save(\"SaveModel/minist_model_graphic.h5\")"
},
{
"alpha_fraction": 0.5458937287330627,
"alphanum_fraction": 0.5845410823822021,
"avg_line_length": 11.176470756530762,
"blob_id": "5037aede992add0f324128a4a6bbcc632206977d",
"content_id": "f0ae463044c3aff7e92fe9f82e2c27f563e2e4da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 207,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 17,
"path": "/6.2tensorflow_minist/add.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\na = 1\nb = 2\nc = a + b \nprint('okok')\nprint('ok1')\nprint(c)\n\n\nimport tensorflow as tf\nsess = tf.Session()\na = tf.constant(10)\nb= tf.constant(12)\nc = a + b\nprint(c)\nprint(sess.run(c))\n"
},
{
"alpha_fraction": 0.6359281539916992,
"alphanum_fraction": 0.656287431716919,
"avg_line_length": 27.32203483581543,
"blob_id": "ae50d5015a95503c73a79280809c207553cc3f03",
"content_id": "8727799eba462527834cd7fd55443fd68461bddb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1670,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 59,
"path": "/3flask/表格与数据库/demo.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template\nfrom flask import request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_script import Shell, Manager\n\napp = Flask(__name__)\n\n# app.config['SQLALCHEMY_DATABASE_URI'] = \"mysql://root:[email protected]:3306/xx\"\n# app.config['SQLALCHEMY_DATABASE_URI'] = \"mysql+pymysql://root:[email protected]:3306/xx\"\napp.config['SQLALCHEMY_DATABASE_URI'] = \"mysql+pymysql://[email protected]:3306/db\"\n# app.config['SQLALCHEMY_DATABASE_URI'] = \"mysql+pymysql://username:password@server/db\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['WTF_CSRF_ENABLED'] = False\ndb = SQLAlchemy(app)\n\nmigrate = Migrate(app, db)\nmanager = Manager(app)\nmanager.add_command('db', MigrateCommand)\n\n\nclass User(db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64), unique=True, index=True)\n age = db.Column(db.Integer, default=18)\n\n def __repr__(self):\n return 'User:%s' % self.name\n\n\[email protected]('/')\ndef demo():\n user = User.query.first()\n name = user.name\n age = user.age\n data = {\n \"name\": name,\n \"age\": age\n }\n\n return render_template(\"index.html\", data=data)\n\n\[email protected]('/login', methods=[\"GET\", \"POST\"])\ndef login():\n if request.method == \"POST\":\n username = request.form.get(\"username\")\n userage = request.form.get(\"userage\")\n user = User(name=username, age=userage)\n db.session.add(user)\n db.session.commit()\n return render_template(\"login.html\")\n\n\nif __name__ == '__main__':\n db.create_all()\n app.run(debug=True)\n manager.run()"
},
{
"alpha_fraction": 0.6071428656578064,
"alphanum_fraction": 0.6142857074737549,
"avg_line_length": 21.157894134521484,
"blob_id": "2deaee4e8f691d4c7d0b39449e41c66a8a916385",
"content_id": "c87f28cf30b0f308702b356a3d54c1c696e4dc16",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 508,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 19,
"path": "/3flask/表格与数据库/table_test.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\napp = Flask(__name__) # 创建应用实例\nfrom flask import render_template\n\[email protected]('/hello')\ndef hello():\n data = {\n \"name\": \"gdit\",\n \"age\": 100\n }\n # data = None\n return render_template('index.html', data=data)\n\[email protected]('/')\ndef index():\n return \"hello\"\n\nif __name__ == '__main__': # 判断是否运行此文件,还是被当做模块导入\n\tapp.run(debug=True) # 开始运行flask应用程序, debug启动app的调试模式"
},
{
"alpha_fraction": 0.6587516665458679,
"alphanum_fraction": 0.6838534474372864,
"avg_line_length": 27.843137741088867,
"blob_id": "56598749160c96b270b469345e33835f3f693843",
"content_id": "40f2b31af1fbf8b7fc6910afbb2a125ecec2b172",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1664,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 51,
"path": "/6.2tensorflow_minist/mnist_dnn_acc.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n#自动下载minist数据,读进来。\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\n# 定义对应数据占位 [1,784]\n# 定义对应标签占位 [1,10]\nxs = tf.placeholder(tf.float32, [None, 784]) # 28x28\nys = tf.placeholder(tf.float32, [None, 10])\n\ndef predict(xs):\n #定义全连接所需要的权重参数矩阵\n #每一个神经元定义一个偏置\n w = tf.Variable(tf.random_normal([784, 10]), name='W')\n b = tf.Variable(tf.zeros([1, 10]) + 0.1, name='b')\n #out = xs * w + b\n #输入与权重参数相乘再加上偏置\n wx_plus_b = tf.matmul(xs, w) + b\n #激活函数\n prediction = tf.nn.softmax(wx_plus_b)\n print(prediction)\n return prediction\n\nprediction = predict(xs)\n\n\ndef compute_accuracy(prediction, v_xs, v_ys):\n # global prediction\n y_pre = sess.run(prediction, feed_dict={xs: v_xs})\n corrct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))\n accuracy = tf.reduce_mean(tf.cast(corrct_prediction, tf.float32))\n\n result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})\n return result\n\n#定义会话,进行变量初始化\nsess = tf.Session()\nsess.run(tf.initialize_all_variables())\n#读一个数据\nbatch_xs, batch_ys = mnist.train.next_batch(1)\n#得到预测结果\nprediction_lable = tf.argmax(prediction, 1)\nresult = sess.run([prediction, prediction_lable], feed_dict={xs: batch_xs})\nprint(result)\nprint(batch_ys)\n\ntrue_lable = tf.argmax(batch_ys, 1)\nresult = sess.run([true_lable])\nprint(result)\n\nprint(compute_accuracy(prediction, mnist.test.images, mnist.test.labels))\n\n\n\n"
},
{
"alpha_fraction": 0.5894308686256409,
"alphanum_fraction": 0.684959352016449,
"avg_line_length": 24.894737243652344,
"blob_id": "48b79866e6f64329d7e89113d5ddac17c92cff3a",
"content_id": "80f10e6e22e0dfdc1e820841a80179c39ee0a595",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 492,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 19,
"path": "/6.1TensorFlow原理介绍/tf_calc_demo.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nx1 = tf.placeholder(dtype=tf.float32)\nx2 = tf.placeholder(dtype=tf.float32)\nx3 = tf.placeholder(dtype=tf.float32)\nw1 = tf.Variable(0.6, dtype=tf.float32)\nw2 = tf.Variable(0.3, dtype=tf.float32)\nw3 = tf.Variable(0.1, dtype=tf.float32)\nn1 = x1 * w1\nn2 = x2 * w2\nn3 = x3 * w3\ny = n1 + n2 + n3\nprint(w1)\nprint(x1)\nprint(y)\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\nresult = sess.run([y], feed_dict={x1: 90, x2: 80, x3: 70})\nprint(result)\n"
},
{
"alpha_fraction": 0.682692289352417,
"alphanum_fraction": 0.6971153616905212,
"avg_line_length": 22,
"blob_id": "7866cc2007fceb0deb5278ee5600ee2cd69d8b43",
"content_id": "696447f523815141297592e04380b812f13c0a28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 212,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 9,
"path": "/6.2tensorflow_minist/.spyproject/workspace.ini",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "[workspace]\nsave_data_on_exit = True\nsave_history = True\nsave_non_project_files = False\nrestore_data_on_startup = True\n\n[main]\nversion = 0.1.0\nrecent_files = ['E:\\\\study\\\\python\\\\教材\\\\demo\\\\test\\\\opencv.py']\n\n"
},
{
"alpha_fraction": 0.6093023419380188,
"alphanum_fraction": 0.6186046600341797,
"avg_line_length": 20.399999618530273,
"blob_id": "44132bad1f9d2cc44f5a05f48766c0cbde672126",
"content_id": "9e86f545aba4af2aa7b426ecd41930277a0d7f2a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 215,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 10,
"path": "/6.2tensorflow_minist/list_dir.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import os\npath = r'C:\\Users\\Administrator\\Desktop\\SSD-Tensorflow-master\\demo'\n\nfile_list = os.listdir(path)\nprint(len(file_list))\n\nfor i in file_list:\n if (i[-3:] == 'jpg'):\n print(i)\n # print(i[-3:])\n\n"
},
{
"alpha_fraction": 0.5859982967376709,
"alphanum_fraction": 0.5972342491149902,
"avg_line_length": 24.173913955688477,
"blob_id": "5e29ced8003d27815124c20622502973bc0ad2a2",
"content_id": "6de711ca18327655eafd856c9a9c7fffad6a482a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1177,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 46,
"path": "/3flask/request/test.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import face_recognition\nfrom flask import Flask, jsonify, request, redirect\n\napp = Flask(__name__)\n\[email protected]('/', methods=['GET', 'POST'])\ndef upload_image():\n# Check if a valid image file was uploaded\n if request.method == 'POST':\n if 'file' not in request.files:\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n return redirect(request.url)\n\n # print(\"get\")\n\n return'''\n<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <title>Flask网页上传图片演示</title>\n</head>\n<body>\n <form enctype='multipart/form-data' method='POST'>\n <input type=\"file\" name=\"file\" style=\"margin-top:20px;\"/>\n <input type=\"submit\" value=\"上传\" class=\"button-new\" style=\"margin-top:15px;\"/>\n </form>\n</body>\n</html>\n'''\n\[email protected]('/sendrequest', methods=['GET', 'POST'])\ndef sendrequest():\n print(request.method)\n print(request.args)\n print(type(request.args))\n print('----------------------')\n print(request.form)\n # return 'success'\n return request.method\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5001, debug=True)"
},
{
"alpha_fraction": 0.7147436141967773,
"alphanum_fraction": 0.7147436141967773,
"avg_line_length": 27.363636016845703,
"blob_id": "1228c1ef114ae674bffaf661909777e9bfaa0186",
"content_id": "a10923b7a20cb32a266f24f945fc3774474711ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 454,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 11,
"path": "/3flask/hello_word/html_render.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\nfrom flask import render_template\n\napp = Flask(__name__) # 创建应用实例\n\[email protected]('/') # 使用route装饰器创建一个路由\ndef hello(): # 视图函数,访问此路由时执行的函数\n\treturn render_template('hello.html')\n\nif __name__ == '__main__': # 判断是否运行此文件,还是被当做模块导入\n\tapp.run(debug=True) # 开始运行flask应用程序, debug启动app的调试模式\n"
},
{
"alpha_fraction": 0.6512455344200134,
"alphanum_fraction": 0.7325876951217651,
"avg_line_length": 38.36000061035156,
"blob_id": "59ee8460d20986318a63653b496fe64433049c35",
"content_id": "70e05e135f524091c6fb0bbd0676a2db469b4f01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2677,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 50,
"path": "/5.2keras卷积神经网络识别手写数字/卷积神经网络识别手写数字/cnn.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from keras.datasets import mnist\nfrom keras.utils import np_utils\nimport numpy as np\nnp.random.seed(10)\n\n(x_Train, y_Train) , (x_Test, y_Test) = mnist.load_data()\n\nx_Train4D = x_Train.reshape(x_Train.shape[0], 28, 28,1).astype('float32')\nx_Test4D = x_Test.reshape(x_Test.shape[0], 28, 28, 1).astype('float32')\n\nx_Train4D_normalize = x_Train4D / 255\nx_Test4D_normalize = x_Test4D / 255\ny_TrainHot = np_utils.to_categorical(y_Train)\ny_TestHot = np_utils.to_categorical(y_Test)\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n\nmodel = Sequential()\nmodel.add(Conv2D(filters=16, kernel_size=(5,5), padding='same', input_shape = (28, 28, 1), activation='relu'))\n# 参数说明\n# filter = 16 建立16个滤镜\n# kernel_size = (5,5) 每一个滤镜是5 × 5的大小\n# padding = 'same' 设置卷积运算产生的图像大小不变\n# input_shape = (28, 28, 1) 第一二维代表输入图像的形状是28 × 28,第三维因为是单色灰度图像,所以最后维数值是1\n# activation设置激活函数为relu建立池化层1\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n# 输入参数为pool_size=(2,2),执行第一次缩减采样,将16个28 ×28的图像缩小为16个14 × 14的图像建立卷积层2,将16个图像转化为36个图像,不改变图像大小,仍为14 × 14\nmodel.add(Conv2D(filters=36, kernel_size=(5,5), padding='same', activation='relu'))\n# 加入池化层2,并加入DropOut避免过拟合\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n# 执行第二次缩减采样,将14 × 14图像转换为7 × 7图像\nmodel.add(Dropout(0.25))\n# 加入DropOut(0.25),每次训练时,会在神经网络中随机放弃25%的神经元,避免过拟合建立神经网络(平坦层,隐藏层,输出层)建立平坦层\nmodel.add(Flatten())\n# 将之前步骤建立的池化层2,一共有36个7 × 7的图像转化为一维向量,长度是36 × 7 × 7 = 1764, 也就是1764个float数,对应1764个神经元建立隐藏层,一共128个神经元\nmodel.add(Dense(128, activation='relu'))\n# 把DropOut加入模型中,DropOut(0.5)在每次迭代时候会随机放弃50%的神经元,避免过拟合\nmodel.add(Dropout(0.5))\n# 建立输出层,一共10个单元,对应0-9一共10个数字。使用softmax进行激活\nmodel.add(Dense(10, activation='softmax'))\n# 查看模型摘要\nprint(model.summary())\n# # 进行训练\n# 定义训练方式\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n# 开始训练\ntrain_history = model.fit(x = x_Train4D_normalize, y = y_TrainHot,\n validation_split=0.2, epochs=10, batch_size=300, verbose=2)"
},
{
"alpha_fraction": 0.7047244310379028,
"alphanum_fraction": 0.721784770488739,
"avg_line_length": 32.043479919433594,
"blob_id": "00ce84940c10a8818373c27410bf85e5ff7cf8e9",
"content_id": "91d73f27761788d4e675a7fe977ff044322c8d6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 834,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 23,
"path": "/6.2tensorflow_minist/read_model.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n#自动下载minist数据,读进来。\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n# 加载元图和权重\nsaver = tf.train.import_meta_graph('./model/mninst_dnn-1000.meta')\n\nsess = tf.Session()\nsaver.restore(sess, tf.train.latest_checkpoint(\"./model/\"))\n\ngraph = tf.get_default_graph()\nxs = graph.get_tensor_by_name(\"Placeholder:0\") # w1:张量名 => name\nw = graph.get_tensor_by_name(\"w:0\") # w1:张量名 => name\nprediction = graph.get_tensor_by_name(\"predict:0\") # w1:张量名 => name\n\nprint(w)\nprint(prediction)\n\nbatch_xs, batch_ys = mnist.train.next_batch(1)\n#得到预测结果\nprediction_lable = tf.argmax(prediction, 1)\nresult = sess.run([prediction, prediction_lable], feed_dict={xs: batch_xs})\nprint(result[1])\n\n\n"
},
{
"alpha_fraction": 0.6679536700248718,
"alphanum_fraction": 0.7078506946563721,
"avg_line_length": 28.884614944458008,
"blob_id": "45c481cd710c0e71ef1c081aa3308d71449d14a7",
"content_id": "a20f0a833f265707687e7f07b56893711d7c12dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 967,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 26,
"path": "/6.2tensorflow_minist/dnn_predict.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n#自动下载minist数据,读进来。\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n#定义对应数据占位 [1,784]\n#定义对应标签占位 [1,10]\nxs = tf.placeholder(tf.float32, [None, 784]) # 28x28\nys = tf.placeholder(tf.float32, [None, 10])\n#定义全连接所需要的权重参数矩阵\n#每一个神经元定义一个偏置\nw = tf.Variable(tf.random_normal([784, 10]), name='W')\nb = tf.Variable(tf.zeros([1, 10]) + 0.1, name='b')\n#out = xs * w + b\n#输入与权重参数相乘再加上偏置\nwx_plus_b = tf.matmul(xs, w) + b\n#激活函数\nprediction = tf.nn.softmax(wx_plus_b)\nprint(prediction)\n#定义会话,进行变量初始化\nsess = tf.Session()\nsess.run(tf.initialize_all_variables())\n#读一个数据\nbatch_xs, batch_ys = mnist.train.next_batch(1)\n#得到预测结果\nresult = sess.run([prediction, w, b], feed_dict={xs: batch_xs})\nprint(result)\n"
},
{
"alpha_fraction": 0.5555555820465088,
"alphanum_fraction": 0.5758547186851501,
"avg_line_length": 28.265625,
"blob_id": "ac03e4f5b3772c7a7fc05daf5ed8e1e3d731ca24",
"content_id": "93435a44ff931f49cdeaa0b6fb176b9703f815b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1994,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 64,
"path": "/6.1TensorFlow原理介绍/fat_thin/train2.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata_path = './bmi.txt'\nmodel_save_path = './model/'\n\n#获取数据和标签\ndef get_data(path):\n train_data = []\n train_labels = []\n test_labels = []\n with open(path) as ifile:\n for line in ifile:\n tokens = line.strip().split(' ')\n tmp_data = [int(tk) for tk in tokens[:-1]]\n tmp_label = tokens[-1]\n #设置胖瘦正常的标签,\n if tmp_label == 'fat':\n label = [1,0,0]\n elif tmp_label == 'normal':\n label = [0,1,0]\n elif tmp_label == 'thin':\n label = [0,0,1]\n\n\n train_data.append(tmp_data)\n train_labels.append(label)\n test_labels.append(tmp_label)\n\n return train_data, train_labels, test_labels\n\ntrain_data, train_labels, test_labels = get_data(data_path)\n\n\n#用占位符定义w,y的大小\nw = tf.Variable(tf.zeros([2,3]), dtype=tf.float32)\nb = tf.Variable(np.zeros([3]), dtype=tf.float32)\n#身高和體重兩列,行數爲[None]\nx=tf.placeholder(tf.float32,[None,2],name = \"x_input\")\ny_=tf.placeholder(tf.float32,name = \"y_predict\")\n#回歸方程表達式\ny=tf.nn.softmax(tf.matmul(x,w) + b)\n#定義交叉熵函數\ncross_encropy=-tf.reduce_mean(y_*tf.log(y))\n#選擇優化器,學習率:0.01\ntrain_step = tf.train.AdamOptimizer(0.01).minimize(cross_encropy)\n\n\n#train\ndef train_model(train_data,train_label):\n with tf.Session() as sess:\n count = 0\n init = tf.global_variables_initializer()\n sess.run(init)\n epoch = 3\n for i in range(epoch):\n for j in range(20000):\n result = sess.run([train_step, y], feed_dict={x: train_data[j: j + 1], y_: train_label[j: j + 1]})\n # print(result)\n saver = tf.train.Saver()\n saver.save(sess, \"./model/my-model\", global_step=epoch)\n print('train finsh')\ntrain_model(train_data,train_labels)"
},
{
"alpha_fraction": 0.45348837971687317,
"alphanum_fraction": 0.45348837971687317,
"avg_line_length": 13.5,
"blob_id": "4acabffa78dc740136d4d0ff6ee263ab474e3f64",
"content_id": "44ec61e4c7bb5c38dd3db1b6a31cd13fe63cccb0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 86,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 6,
"path": "/3flask/个人主页/ditc_test.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "data = {\n \"user\":\"gdit\",\n \"passwd\":123456\n }\n\nprint(data[\"user\"])"
},
{
"alpha_fraction": 0.5752426981925964,
"alphanum_fraction": 0.5983009934425354,
"avg_line_length": 20.710525512695312,
"blob_id": "eb85d7f2ea09694685c918e1e44ea2fbe2f76e10",
"content_id": "24338ba7050885701bc39b2b4b0c852a0e698d17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 986,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 38,
"path": "/3flask/表格与数据库插入与查询/数据库/show_sql.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import pymysql\n\n# 连接database\nconn = pymysql.connect(host=\"127.0.0.1\",\n user=\"root\",\n password=\"123456\",\n db=\"gdit_student\",\n # db=\"cov\",\n charset=\"utf8\")\n\n# 获取一个光标\ncursor = conn.cursor(cursor=pymysql.cursors.DictCursor) # 返回字典数据类型\n\n# 定义将要执行的sql语句\n# sql = 'select user,pwd from student;'\n# sql = 'select user from student;'\nsql = 'select * from student;'\n# 拼接并执行sql语句\ncursor.execute(sql)\n\n# 取到查询结果\nret1 = cursor.fetchone() # 取一条\nret2 = cursor.fetchmany(3) # 取三条\nret3 = cursor.fetchone() # 取一条\nprint(ret1)\nprint(ret2)\nprint(ret3)\n\ncursor.close()\nconn.close()\n\n#\n# # 可以获取指定数量的数据\n# cursor.fetchmany(3)\n# # 光标按绝对位置移动1\n# cursor.scroll(1, mode=\"absolute\")\n# # 光标按照相对位置(当前位置)移动1\n# cursor.scroll(1, mode=\"relative\")"
},
{
"alpha_fraction": 0.6332288384437561,
"alphanum_fraction": 0.6379310488700867,
"avg_line_length": 21.821428298950195,
"blob_id": "325bd7f55fe1f6c5a0762f48a56f916daded50d9",
"content_id": "2cd11efef54c76e81147401a4e7132f43893a0ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 726,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 28,
"path": "/3flask/个人主页/app.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\napp = Flask(__name__) # 创建应用实例\nfrom flask import render_template\n# import request\nfrom flask import request\n\[email protected]('/pos_test')\ndef pos_test():\n print(request.form)\n # print(request.args[\"name\"])\n return \"hello\"\n\[email protected]('/get_test')\ndef get_test():\n print(request.args)\n print(request.args[\"name\"])\n return request.args[\"name\"]\n\[email protected]('/')\ndef index():\n data = {\n \"name\": \"gdit\",\n \"age\": 100\n }\n return render_template('table.html', data=data)\n\nif __name__ == '__main__': # 判断是否运行此文件,还是被当做模块导入\n\tapp.run(debug=True) # 开始运行flask应用程序, debug启动app的调试模式"
},
{
"alpha_fraction": 0.6339622735977173,
"alphanum_fraction": 0.6339622735977173,
"avg_line_length": 18,
"blob_id": "e4d6e13a6818dc62eaad5dc8ac9418f0614ab9be",
"content_id": "9b36d9cbcdc8e8fb6f7529a6a2a9b865a0a3bc71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 14,
"path": "/web_mnist/render_html.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request, jsonify\n\n\n\napp = Flask(__name__)\n\[email protected]('/', methods=['POST', 'GET'])\ndef hello_world():\n print(request.method)\n return render_template('upload.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)"
},
{
"alpha_fraction": 0.6239941716194153,
"alphanum_fraction": 0.6642282605171204,
"avg_line_length": 30.022727966308594,
"blob_id": "37278379c5725a46e3c216f55c4a78ee01fda7fe",
"content_id": "ce9bb552ca4f01189bbef2cfd8f1acd09dc2dfee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1397,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 44,
"path": "/5.1Keras 多层感知器识别手写数字/train_mnist.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\nfrom keras.utils import np_utils\nimport numpy as np\nimport os\nnp.random.seed(10)\nfrom keras.datasets import mnist\n(x_train_image, y_train_label), (x_test_image, y_test_label) = mnist.load_data()\n\n#60000x28x28\n#转变成二维转为一维向量\nx_Train = x_train_image.reshape(60000, 784).astype('float32')\nx_Test = x_test_image.reshape(10000, 784).astype('float32')\nx_Train_normalize = x_Train / 255\nx_Test_normalize = x_Test / 255\n\ny_Train_OneHot = np_utils.to_categorical(y_train_label)\ny_Test_OneHot = np_utils.to_categorical(y_test_label)\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nmodel = Sequential()\nmodel.add(Dense(units=256,\n input_dim=784,\n kernel_initializer='normal',\n activation='relu'))\n\nmodel.add(Dense(units=10,\n kernel_initializer='normal',\n activation='softmax'))\nprint(model.summary())\n\n# # 训练模型\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam', metrics=['accuracy'])\n\ntrain_history = model.fit(x=x_Train_normalize,\n y=y_Train_OneHot, validation_split = 0.2,\n epochs = 10, batch_size = 200, verbose = 2)\n\nscores = model.evaluate(x_Test_normalize, y_Test_OneHot)\nprint('accuracy=', scores[1])\nprediction = model.predict_classes(x_Test)\nprint(prediction)\n\n\n"
},
{
"alpha_fraction": 0.42811501026153564,
"alphanum_fraction": 0.46805110573768616,
"avg_line_length": 17.382352828979492,
"blob_id": "30a3defd7a1b35d630276be3d4f613e6a60728d7",
"content_id": "39892dedea291825f414328c0caeea4770ec271e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 700,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 34,
"path": "/3flask/表格与数据库/数据库/insert_sql.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import pymysql\n\n# 连接database\nconn = pymysql.connect(host=\"127.0.0.1\",\n user=\"root\",\n password=\"123456\",\n # db=\"cov\",\n db=\"gdit_student\",\n charset=\"utf8\")\n# 获取一个光标\ncursor = conn.cursor()\n\n# 定义要执行的sql语句\nsql = 'insert into userinfo(user,pwd) values(%s,%s);'\n# data = [\n# ('july', '147'),\n# ('june', '258'),\n# ('marin', '369')\n# ]\n\ndata = [\n ('a', '147'),\n ('b', '258'),\n ('c', '369')\n]\n# 拼接并执行sql语句\ncursor.executemany(sql, data)\n\n# 涉及写操作要注意提交\nconn.commit()\n\n# 关闭连接\ncursor.close()\nconn.close()\n\n"
},
{
"alpha_fraction": 0.5395683646202087,
"alphanum_fraction": 0.586844801902771,
"avg_line_length": 28.439393997192383,
"blob_id": "eef4c973397fb7bc4d15838ba5bbc8ed647c2cd4",
"content_id": "129b8f0ac851274d9e0918733b7332565d570668",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2078,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 66,
"path": "/6.1TensorFlow原理介绍/fat_thin/train.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n# [180,70] * [2][3]\n# fat = 180 * w11 + 70 *w21 + b1 = 85\n# thin = 180 * w12 + 70 *w22 + b2 = 65\n# normal = 180 * w13 + 70 *w23 +b3 = 40\n#获取数据和标签\ndef get_data(path):\n train_data = []\n train_labels = []\n test_labels = []\n with open(path) as ifile:\n for line in ifile:\n tokens = line.strip().split(' ')\n tmp_data = [int(tk) for tk in tokens[:-1]]\n tmp_label = tokens[-1]\n #设置胖瘦正常的标签,\n if tmp_label == 'fat':\n label = [1,0,0]\n elif tmp_label == 'normal':\n label = [0,1,0]\n elif tmp_label == 'thin':\n label = [0,0,1]\n\n train_data.append(tmp_data)\n train_labels.append(label)\n test_labels.append(tmp_label)\n\n return train_data, train_labels, test_labels\ndata_path = './bmi.txt'\ntrain_data, train_labels, test_labels = get_data(data_path)\n#用占位符定义w,y的大小\nw = tf.Variable(tf.zeros([2,3]), dtype=tf.float32)\nb = tf.Variable(np.zeros([3]), dtype=tf.float32)\n#身高和體重兩列,行數爲[None]\nx=tf.placeholder(tf.float32,[None,2],name = \"x_input\")\n#回歸方程表達式\ny=tf.nn.softmax(tf.matmul(x,w) + b) #预测值\n\ny_=tf.placeholder(tf.float32,name = \"y_label\")#标签 [0,0,1]\nprint(y)\n\nmax_value = tf.argmax(y, 1)\n\n#定義交叉熵函數\nloss=-tf.reduce_mean(y_*tf.log(y))\n# loss = (y - y_)\n\n#選擇優化器,學習率:0.01\n# train_step = tf.train.AdamOptimizer(0.01).minimize(cross_encropy)\ntrain_step = tf.train.AdamOptimizer(0.01).minimize(loss)\n\nsess = tf.Session()\ninit = tf.global_variables_initializer()\n# result = sess.run([y], feed_dict={x: [[180,40]]})\n# print(result)\nsess.run(init)\nfor j in range(2000):\n print(train_data[j: j + 1])\n print(train_labels[j: j + 1])\n result = sess.run([train_step,y], feed_dict={x: train_data[j: j + 1], y_: train_labels[j: j + 1]})\n print(result)\n\nsaver = tf.train.Saver()\nsaver.save(sess, \"./model/new_model\")\n\n\n\n"
},
{
"alpha_fraction": 0.540669858455658,
"alphanum_fraction": 0.5511961579322815,
"avg_line_length": 26.526315689086914,
"blob_id": "546fe56fee58f1962cd9f37a060406595f3761bd",
"content_id": "12c1f4c9538aa3e7fb5afc9424537c4b148104e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1153,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 38,
"path": "/3flask/表格与数据库插入与查询/web_show_database.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\napp = Flask(__name__) # 创建应用实例\nfrom flask import render_template\nimport pymysql\n\ndef select_student():\n # 连接database\n conn = pymysql.connect(host=\"127.0.0.1\",\n user=\"root\",\n password=\"123456\",\n db=\"gdit_student\",\n # db=\"cov\",\n charset=\"utf8\")\n\n # 获取一个光标\n cursor = conn.cursor(cursor=pymysql.cursors.DictCursor) # 返回字典数据类型\n\n # 定义将要执行的sql语句\n # sql = 'select user,pwd from student;'\n # sql = 'select user from student;'\n sql = 'select * from student;'\n # 拼接并执行sql语句\n cursor.execute(sql)\n\n # 取到查询结果\n ret1 = cursor.fetchone() # 取一条\n # ret2 = cursor.fetchmany(3) # 取三条\n cursor.close()\n conn.close()\n return ret1\n\[email protected]('/',methods=[\"GET\", \"POST\"])\ndef index():\n one_info = select_student()\n print(one_info)\n return render_template('show_table.html', data= one_info)\nif __name__ == '__main__':\n\tapp.run(debug=True)"
},
{
"alpha_fraction": 0.5416666865348816,
"alphanum_fraction": 0.6385658979415894,
"avg_line_length": 34.517242431640625,
"blob_id": "851ce062dad89018201cf7edcf38a554b3a03372",
"content_id": "1d2c5be880470bd226121de2d6236425b0e1a648",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1032,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 29,
"path": "/6.1TensorFlow原理介绍/calc_train.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nx1 = tf.placeholder(dtype=tf.float32)\nx2 = tf.placeholder(dtype=tf.float32)\nx3 = tf.placeholder(dtype=tf.float32)\nyTrain = tf.placeholder(dtype=tf.float32)\nw1 = tf.Variable(0.0, dtype=tf.float32)\nw2 = tf.Variable(0.0, dtype=tf.float32)\nw3 = tf.Variable(0.0, dtype=tf.float32)\nn1 = x1 * w1\nn2 = x2 * w2\nn3 = x3 * w3\ny = n1 + n2 + n3\nloss = tf.abs(y - yTrain)\noptimizer = tf.train.RMSPropOptimizer(0.001)\n\ntrain = optimizer.minimize(loss)\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\n\nfor i in range(5000):\n # result = sess.run([train,loss,y], feed_dict={x1: 90, x2: 80, x3: 70, yTrain: 85})\n # print(result)\n result = sess.run([train, x1, x2, x3, w1, w2, w3, y, yTrain, loss], feed_dict={x1: 90, x2: 80, x3: 70, yTrain: 85})\n # result = sess.run([train, x1, x2, x3, w1, w2, w3, y, yTrain, loss], feed_dict={x1: 98, x2: 95, x3: 87, yTrain: 96})\n print(result)\nprint(\"-------------------\")\nresult = sess.run([y], feed_dict={x1: 90, x2: 80, x3: 70})\nprint(result)\n\n\n"
},
{
"alpha_fraction": 0.6115108132362366,
"alphanum_fraction": 0.6618704795837402,
"avg_line_length": 16.25,
"blob_id": "1d7aed219cce1096f7901362631f8f348e584dab",
"content_id": "2dfa7ea1b8676aab087c8748066f3ff786b2eb81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 139,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 8,
"path": "/6.2tensorflow_minist/opencv.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport cv2\nimport tensorflow as tf\nprint(cv2.__version__)\nprint(tf.__version__)\n\n#pip install tensorflow==1.14.0\n\n"
},
{
"alpha_fraction": 0.5672268867492676,
"alphanum_fraction": 0.5777310729026794,
"avg_line_length": 20.68181800842285,
"blob_id": "450033b6db14a0a23e3c93a1a5dffde82a31d326",
"content_id": "51855d9f88b011b5c1fd6c98565817092cd36bcb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 586,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 22,
"path": "/3flask/表格与数据库/数据库/create_table2.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "# 导入pymysql模块\nimport pymysql\n# 连接database\nconn = pymysql.connect(host=\"localhost\",\n user=\"root\",\n password=\"123456\",\n db=\"gdit_student\",\n charset=\"utf8\")\n# 得到一个可以执行SQL语句的光标对象\ncursor = conn.cursor() # 执行完毕返回的结果集默认以元组显示\nsql = \"\"\"\nCREATE TABLE student (\nuser CHAR(20),\npwd int(10)\n); \"\"\"\nprint(\"create table\")\n# 执行SQL语句\ncursor.execute(sql)\n# 关闭光标对象\ncursor.close()\n# 关闭数据库连接\nconn.close()"
},
{
"alpha_fraction": 0.6144781112670898,
"alphanum_fraction": 0.6144781112670898,
"avg_line_length": 28.75,
"blob_id": "1da7b52f66788f427b3ef72ebdb691984e924557",
"content_id": "9df3e0d4e728e41b94a32260d30ca308a70a5d95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 618,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 20,
"path": "/3flask/表格与数据库插入与查询/web_main.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\napp = Flask(__name__) # 创建应用实例\nfrom flask import render_template\nfrom flask import request\n\[email protected]('/',methods=[\"GET\", \"POST\"])\ndef index():\n print(request.method)\n if request.method == \"POST\":\n get_form = request.form\n print(get_form)\n username = request.form.get(\"user\")\n pwd = request.form.get(\"pwd\")\n print(username)\n print(pwd)\n return render_template('index.html', data=get_form)\n else: # get请求\n return render_template('login.html')\nif __name__ == '__main__':\n\tapp.run(debug=True)"
},
{
"alpha_fraction": 0.6364741921424866,
"alphanum_fraction": 0.6626139879226685,
"avg_line_length": 31.176469802856445,
"blob_id": "2d088eb7a4e582f5900bc7b82143aa689b6ece5e",
"content_id": "f7ed95d790bb274c7c1b77cc609ff1c5598b361c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1839,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 51,
"path": "/6.2tensorflow_minist/dnn_train2.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n#自动下载minist数据,读进来。\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\n# 定义对应数据占位 [1,784]\n# 定义对应标签占位 [1,10]\nxs = tf.placeholder(tf.float32, [None, 784]) # 28x28\nys = tf.placeholder(tf.float32, [None, 10])\n\ndef predict(xs):\n #定义全连接所需要的权重参数矩阵\n #每一个神经元定义一个偏置\n w = tf.Variable(tf.random_normal([784, 10]), name='w')\n b = tf.Variable(tf.zeros([1, 10]) + 0.1, name='b')\n #out = xs * w + b\n #输入与权重参数相乘再加上偏置\n wx_plus_b = tf.matmul(xs, w) + b\n #激活函数\n prediction = tf.nn.softmax(wx_plus_b)\n print(prediction)\n print(w)\n return prediction\n\nprediction = predict(xs)\n\n# 损失函数\n#loss = tf.abs(y - yTrain)\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction), reduction_indices=[1]))\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\n#定义会话,进行变量初始化\nsess = tf.Session()\nsess.run(tf.initialize_all_variables())\n\ndef compute_accuracy(prediction, v_xs, v_ys):\n # global prediction\n y_pre = sess.run(prediction, feed_dict={xs: v_xs})\n corrct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))\n accuracy = tf.reduce_mean(tf.cast(corrct_prediction, tf.float32))\n result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})\n return result\n\n#运行训练代码\nfor i in range(2):\n batch_xs, batch_ys = mnist.train.next_batch(64)\n result = sess.run([train_step, prediction], feed_dict={xs: batch_xs, ys:batch_ys})\n # print(result)\n if i % 50 == 0:\n acc = compute_accuracy(prediction, mnist.test.images, mnist.test.labels)\n print(\"准确率 = \" + str(acc))\n\n\n\n\n"
},
{
"alpha_fraction": 0.6567679643630981,
"alphanum_fraction": 0.6885359287261963,
"avg_line_length": 31.93181800842285,
"blob_id": "541c6da88e7a87e5567d71c808fa90b3b178ec2d",
"content_id": "309ccd9aea72efe0c608b036d487fefb1a4c3b33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1628,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 44,
"path": "/6.2tensorflow_minist/dnn_train.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n#自动下载minist数据,读进来。\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n#定义对应数据占位 [1,784]\n#定义对应标签占位 [1,10]\nxs = tf.placeholder(tf.float32, [None, 784]) # 28x28\nys = tf.placeholder(tf.float32, [None, 10])\n#定义全连接所需要的权重参数矩阵\n#每一个神经元定义一个偏置\nw = tf.Variable(tf.random_normal([784, 10]), name='W')\nb = tf.Variable(tf.zeros([1, 10]) + 0.1, name='b')\n#out = xs * w + b\n#输入与权重参数相乘再加上偏置\nwx_plus_b = tf.matmul(xs, w) + b\n#激活函数\nprediction = tf.nn.softmax(wx_plus_b)\nprint(prediction)\n\n# the error between prediction and real data\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),reduction_indices=[1]))\n\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\n#定义会话,进行变量初始化\nsess = tf.Session()\nsess.run(tf.initialize_all_variables())\n\n#得到预测结果\ndef compute_accuracy(v_xs, v_ys):\n global prediction\n y_pre = sess.run(prediction, feed_dict={xs: v_xs})\n corrct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))\n accuracy = tf.reduce_mean(tf.cast(corrct_prediction, tf.float32))\n result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})\n return result\n\nfor i in range(2000):\n batch_xs, batch_ys = mnist.train.next_batch(64)\n sess.run(train_step, feed_dict={xs: batch_xs, ys:batch_ys})\n if i % 50 == 0:\n print(compute_accuracy(\n mnist.test.images, mnist.test.labels\n ))"
},
{
"alpha_fraction": 0.5252100825309753,
"alphanum_fraction": 0.6148459315299988,
"avg_line_length": 19.399999618530273,
"blob_id": "0467151448caed47935cb10953143f8c7ed92bb4",
"content_id": "4884dbc8e34f7fbfc926b5467ee47c6637cce4e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 715,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 35,
"path": "/6.1TensorFlow原理介绍/xiangliang.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\n\n#x = tf.placeholder(shape=[3], dtype=tf.float32)\n\nx = tf.placeholder(dtype=tf.float32)\nxShape = tf.shape(x)\n\nw = tf.Variable(tf.zeros([3]), dtype=tf.float32)\n\n#w = tf.Variable(dtype=tf.float32)\n\nn = x * w\n\n#y = n1 + n2 + n3\ny = tf.reduce_sum(n)\n\nprint(y)\n\nsess = tf.Session()\n# \ninit = tf.global_variables_initializer()\nsess.run(init)\n\n# result = sess.run([y], feed_dict={x1: [90,98], x2: [80,95], x3: [70,87]})\n# print(result)\n\nresult = sess.run([y], feed_dict={x: [90,80,70],w:[0.6,0.3,0.1]})\nprint(result)\n\n# result = sess.run([y], feed_dict={x1: 98, x2: 95, x3: 87})\n# print(result)\n\n\n#result = sess.run([x1, x2, x3, w1, w2, w3, y], feed_dict={x1: 90, x2: 80, x3: 70})\n#print(result)\n"
},
{
"alpha_fraction": 0.49806421995162964,
"alphanum_fraction": 0.5160555839538574,
"avg_line_length": 28.086091995239258,
"blob_id": "7b369bb1ccb0d8503757fe285437365ca632c872",
"content_id": "8674d7b9735158a0d7238448ecf2b99fb87d6ffc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4713,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 151,
"path": "/6.1TensorFlow原理介绍/fat_thin/test.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport numpy as np\n\n\n# #獲取數據\n# def get_data(path):\n# data = []\n# labels = []\n# with open(path) as ifile:\n# for line in ifile:\n# tokens = line.strip().split(' ')\n# tmp_data = [int(tk) for tk in tokens[:-1]]\n# tmp_label = tokens[-1]\n# #设置胖瘦正常的标签\n# if tmp_label == 'fat':\n# label = [1,0,0]\n# elif tmp_label == 'normal':\n# label = [0,1,0]\n# elif tmp_label == 'thin':\n# label = [0,0,1]\n#\n# data.append(tmp_data)\n# labels.append(label)\n# return data,labels\n# path = './bmi.txt'\n# train_data, train_label = get_data(path)\n#\n# '''\n# 假设胖瘦与体重和身高呈回归方程关系,设为y = w * x + b\n# 身高、体重两列,行数设置为None,\n# 因为x*w是矩阵运算,第一个矩阵的列数一定要和第二个矩阵的行数相等,\n# y输出是瘦、正常、胖的各自的概率,所以w的列数就是3\n# '''\n#\n#用占位符定义w,y的大小\nw = tf.Variable(tf.zeros([2,3]), dtype=tf.float32)\nb = tf.Variable(np.zeros([3]), dtype=tf.float32)\n#身高和體重兩列,行數爲[None]\nx=tf.placeholder(tf.float32,[None,2],name = \"x_input\")\ny_=tf.placeholder(tf.float32,name = \"y_predict\")\n#回歸方程表達式\ny=tf.nn.softmax(tf.matmul(x,w) + b)\n#定義交叉熵函數\ncross_encropy=-tf.reduce_mean(y_*tf.log(y))\n#選擇優化器,學習率:0.01\ntrain_step = tf.train.AdamOptimizer(0.01).minimize(cross_encropy)\n#\n#\n# #訓練模型\n# with tf.Session() as sess:\n# count = 0\n# init = tf.global_variables_initializer()\n# sess.run(init)\n# epoch = 3\n# for i in range(epoch):\n# for j in range(20000):\n# result = sess.run([train_step, y], feed_dict={x: train_data[j: j + 1], y_: train_label[j: j + 1]})\n#\n# saver = tf.train.Saver()\n# saver.save(sess, \"./model/my-model\", global_step=epoch)\n# print('train finsh')\n#\n#\n# # 測試模型\n# with tf.Session() as sess:\n# saver = tf.train.Saver()\n# with open(path) as ifile:\n# count = 0\n# true = 0\n# for line in ifile:\n#\n# predict = ''\n# saver.restore(sess, tf.train.latest_checkpoint(\"./model/\"))\n# tokens = line.strip().split(' ')\n# tmp_data = [int(tk) for tk in tokens[:-1]]\n# result = sess.run([train_step,y], feed_dict = {x: [tmp_data], y_: [None]})[1][0]\n# if max(result) == result[0]:\n# predict = 'fat'\n# if max(result) == result[1]:\n# predict = 'normal'\n# if max(result) == result[2]:\n# predict = 'thin'\n# if tokens[-1] == predict:\n# true+= 1\n# count += 1\n#\n# print(true/count)\n\n\n\nimport matplotlib.pyplot as plt\n''' 数据读入 '''\ndata = []\nlabels = []\ncount = 0\nwith open(\"bmi.txt\") as ifile:\n for line in ifile:\n tokens = line.strip().split(' ')\n tmp_data = [int(tk) for tk in tokens[:-1]]\n tmp_label = tokens[-1]\n if count < 500:\n with tf.Session() as sess:\n saver = tf.train.Saver()\n saver.restore(sess, tf.train.latest_checkpoint(\"./model/\"))\n result = sess.run([train_step, y], feed_dict={x: [tmp_data], y_: [None]})[1][0]\n if max(result) == result[0]:\n predict = 'fat'\n if max(result) == result[1]:\n predict = 'normal'\n if max(result) == result[2]:\n predict = 'thin'\n if tokens[-1] != predict:\n tmp_label = 0\n count += 1\n print(count)\n\n\n data.append(tmp_data)\n labels.append(tmp_label)\ndata_input = np.array(data)\nlabels = np.array(labels)\nlabel_num= np.size(labels)\n\nlabel_transfer = np.zeros(label_num)\n\n''' 标签转换为1/2/3 '''\nfor num in range(label_num):\n if labels[num] == 'fat':\n label_transfer[num] = 3\n elif labels[num] == 'normal':\n label_transfer[num] = 2\n elif labels[num] == 'thin':\n label_transfer[num] = 1\n\n# print(data_input)\n\nx = data_input[:,0]\ny = data_input[:,1]\n\nfor indx in range(500):\n if (label_transfer[indx] == 3):\n plt.scatter(x[indx], y[indx], c='red', marker='o')\n elif (label_transfer[indx] == 2):\n plt.scatter(x[indx], y[indx], c='blue', marker='o')\n elif (label_transfer[indx] == 1):\n plt.scatter(x[indx], y[indx], c='green', marker='o')\n elif (label_transfer[indx] == 0):\n plt.scatter(x[indx], y[indx], c='black', marker='o')\n\n# print(label_transfer)\nplt.show()"
},
{
"alpha_fraction": 0.5482333302497864,
"alphanum_fraction": 0.5670218467712402,
"avg_line_length": 28,
"blob_id": "e7eafe6b93450921fd5260f193e7f10381cde89b",
"content_id": "8b59f229e5199e1d75e89875e0a6863652c0eece",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3706,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 123,
"path": "/6.1TensorFlow原理介绍/fat_thin/test2.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata_path = './bmi.txt'\nmodel_save_path = './model/'\n\n#获取数据和标签\ndef get_data(path):\n train_data = []\n train_labels = []\n test_labels = []\n with open(path) as ifile:\n for line in ifile:\n tokens = line.strip().split(' ')\n tmp_data = [int(tk) for tk in tokens[:-1]]\n tmp_label = tokens[-1]\n #设置胖瘦正常的标签,\n if tmp_label == 'fat':\n label = [1,0,0]\n elif tmp_label == 'normal':\n label = [0,1,0]\n elif tmp_label == 'thin':\n label = [0,0,1]\n\n\n train_data.append(tmp_data)\n train_labels.append(label)\n test_labels.append(tmp_label)\n\n return train_data, train_labels, test_labels\n\ntrain_data, train_labels, test_labels = get_data(data_path)\n\n\n#用占位符定义w,y的大小\nw = tf.Variable(tf.zeros([2,3]), dtype=tf.float32)\nb = tf.Variable(np.zeros([3]), dtype=tf.float32)\n#身高和體重兩列,行數爲[None]\nx=tf.placeholder(tf.float32,[None,2],name = \"x_input\")\ny_=tf.placeholder(tf.float32,name = \"y_predict\")\n#回歸方程表達式\ny=tf.nn.softmax(tf.matmul(x,w) + b)\n#定義交叉熵函數\ncross_encropy=-tf.reduce_mean(y_*tf.log(y))\n#選擇優化器,學習率:0.01\ntrain_step = tf.train.AdamOptimizer(0.01).minimize(cross_encropy)\n\n\n#train\ndef train_model(train_data,train_label):\n with tf.Session() as sess:\n count = 0\n init = tf.global_variables_initializer()\n sess.run(init)\n epoch = 3\n for i in range(epoch):\n for j in range(20000):\n result = sess.run([train_step, y], feed_dict={x: train_data[j: j + 1], y_: train_label[j: j + 1]})\n # print(result)\n saver = tf.train.Saver()\n saver.save(sess, \"./model/my-model\", global_step=epoch)\n print('train finsh')\ntrain_model(train_data,train_labels)\n\n\n# 測試模型\npredict_labels = []\nwith tf.Session() as sess:\n count = 0\n true = 0\n saver = tf.train.Saver()\n saver.restore(sess, tf.train.latest_checkpoint(model_save_path))\n for i in range(len(train_labels)):\n result = sess.run([train_step, y], feed_dict={x: train_data[i: i + 1], y_: train_labels[i: i + 1]})[1][0]\n if max(result) == result[0]:\n predict = ['fat']\n if max(result) == result[1]:\n predict = ['normal']\n if max(result) == result[2]:\n predict = ['thin']\n if test_labels[i: i + 1] == predict:\n true += 1\n else:\n predict = ['error']\n predict_labels.append(predict)\n count += 1\n print(true / count)\n\n\n\ntrain_data = np.array(train_data)\npredict_labels = np.array(predict_labels)\nlabel_num= np.size(predict_labels)\nlabel_transfer = np.zeros(label_num)\n''' 标签转换为1/2/3 '''\nfor num in range(label_num):\n if predict_labels[num] == 'fat':\n label_transfer[num] = 3\n elif predict_labels[num] == 'normal':\n label_transfer[num] = 2\n elif predict_labels[num] == 'thin':\n label_transfer[num] = 1\n elif predict_labels[num] == 'error':\n label_transfer[num] = 0\n\n\n\nx = train_data[:,0]\ny = train_data[:,1]\n\nfor indx in range(1000):\n if (label_transfer[indx] == 3):\n plt.scatter(x[indx], y[indx], c='red', marker='.')\n elif (label_transfer[indx] == 2):\n plt.scatter(x[indx], y[indx], c='blue', marker='.')\n elif (label_transfer[indx] == 1):\n plt.scatter(x[indx], y[indx], c='green', marker='.')\n elif (label_transfer[indx] == 0):\n plt.scatter(x[indx], y[indx], c='black', marker='x')\n\n\nplt.show()"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 29.81818199157715,
"blob_id": "215f227c7e923468bb49c5f4a4d37e3aa6479fc4",
"content_id": "fb69374c9421c1d4ed9a0589387c1f00bb2d6820",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 426,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 11,
"path": "/3flask/varable_test/test.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\napp = Flask(__name__) # 创建应用实例\nfrom flask import render_template\n\[email protected]('/hello')\[email protected]('/hello/<name>')\ndef hello(name=None):\n return render_template('hello_name.html', name=name)\n\nif __name__ == '__main__': # 判断是否运行此文件,还是被当做模块导入\n\tapp.run(debug=True) # 开始运行flask应用程序, debug启动app的调试模式"
},
{
"alpha_fraction": 0.6595191955566406,
"alphanum_fraction": 0.6868095993995667,
"avg_line_length": 23.41269874572754,
"blob_id": "abe6d0fdf3cbd48ca0e8b4f82be878a1888c8bea",
"content_id": "f48e0b52d3e23017fa7c58402c2f369c217fa879",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1649,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 63,
"path": "/5.3keras循环神经网络识别手写数字/lstm_part.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import keras\nfrom keras.layers import LSTM\nfrom keras.layers import Dense, Activation\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam\n\nlearning_rate = 0.001\n\n\ndisplay_step = 10\n\n\nn_classes = 10\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nprint(x_train.shape)\nprint(y_train.shape)\nprint(x_test.shape)\nprint(y_test.shape)\nn_input = 28\nn_step = 28\nn_hidden = 128\n\n\nx_train = x_train.reshape(-1, n_step, n_input)\nprint(x_train.shape)\nx_train = x_train.reshape(-1, n_step, n_input)\nx_test = x_test.reshape(-1, n_step, n_input)\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\n\ny_train = keras.utils.to_categorical(y_train, n_classes)\ny_test = keras.utils.to_categorical(y_test, n_classes)\n\nmodel = Sequential()\n#LSTM输入数据维度,n_step时序的维度,n_hidden表示输出数据维度\nmodel.add(LSTM(n_hidden, batch_input_shape=(None, n_step, n_input),\n unroll=True))\nmodel.add(Dense(10))\n\nmodel.summary()\n\n#每次更新参数的变化快慢 = 梯度 * 学习率\nlearning_rate = 0.001\nadam = Adam(lr=learning_rate)\nmodel.compile(optimizer=adam,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nbatch_size = 128 #一次性读进的图片张数\ntraining_iters = 5 #训练数据集的次数\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=training_iters,\n verbose=1,\n validation_data=(x_test, y_test))\n\nscores = model.evaluate(x_test, y_test, verbose=0)\nprint('LSTM test score:', scores[0])\nprint('LSTM test accuracy:', scores[1])\n\n"
},
{
"alpha_fraction": 0.6168830990791321,
"alphanum_fraction": 0.6558441519737244,
"avg_line_length": 22.69230842590332,
"blob_id": "e1d7951abe71012e63a2c001e794c696012b4e1b",
"content_id": "05d298d22cbb8d53c49ed6e7a158b868af95f8c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 308,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 13,
"path": "/6.1TensorFlow原理介绍/x_shape.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\n\n\nx = tf.placeholder(dtype=tf.float32)\nxShape = tf.shape(x)\n\nsess = tf.Session()\nresult = sess.run(xShape, feed_dict={x: 8})\nprint(result)\nresult = sess.run(xShape, feed_dict={x: [1, 2, 3]})\nprint(result)\nresult = sess.run(xShape, feed_dict={x: [[1, 2, 3], [3, 6, 9]]})\nprint(result)\n"
},
{
"alpha_fraction": 0.6322722434997559,
"alphanum_fraction": 0.6783754229545593,
"avg_line_length": 29.399999618530273,
"blob_id": "097a780de5f833455017f757cdea0501999dc24e",
"content_id": "b2f229a9a908e92811645673f680bc54d800fb6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1019,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 30,
"path": "/web_face/get_cameral_img.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\napp = Flask(__name__) # 创建应用实例\nfrom flask import render_template\nfrom flask import request\nimport base64\nimport numpy as np\nimport cv2\n\[email protected]('/ajax',methods=[\"get\",\"post\"])\ndef hello_world4():\n # name = request.values.get(\"name\")\n print(request.values)\n img_base64 = request.values.get(\"img\")\n print(img_base64[22:])\n img_base64_encode = img_base64[22:]\n img_b64decode = base64.b64decode(img_base64_encode) # base64解码\n print(img_b64decode)\n img_array = np.fromstring(img_b64decode, np.uint8) # 转换np序列\n img = cv2.imdecode(img_array, cv2.COLOR_BGR2RGB) # 转换Opencv格式\n cv2.imwrite(\"./save.jpg\",img)\n cv2.imshow(\"img\", img)\n cv2.waitKey()\n return '10000'\n\[email protected]('/')\ndef index():\n return render_template('camera.html')\n\nif __name__ == '__main__': # 判断是否运行此文件,还是被当做模块导入\n\tapp.run(debug=True) # 开始运行flask应用程序, debug启动app的调试模式"
},
{
"alpha_fraction": 0.5185490250587463,
"alphanum_fraction": 0.5408079028129578,
"avg_line_length": 25.9777774810791,
"blob_id": "54cab8733fbb6df48f16db2684c948bbdb357d26",
"content_id": "a4975ba84812b737195f939b1584268cc7721c1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1321,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 45,
"path": "/3flask/表格与数据库/web_show_database.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\napp = Flask(__name__) # 创建应用实例\nfrom flask import render_template\nimport pymysql\n\ndef select_student():\n # 连接database\n conn = pymysql.connect(host=\"127.0.0.1\",\n user=\"root\",\n password=\"123456\",\n db=\"gdit_student\",\n # db=\"cov\",\n charset=\"utf8\")\n\n # 获取一个光标\n cursor = conn.cursor(cursor=pymysql.cursors.DictCursor) # 返回字典数据类型\n\n # 定义将要执行的sql语句\n # sql = 'select user,pwd from student;'\n # sql = 'select user from student;'\n sql = 'select * from student;'\n # 拼接并执行sql语句\n cursor.execute(sql)\n\n # 取到查询结果\n # ret1 = cursor.fetchone() # 取一条\n ret1 = cursor.fetchmany(3) # 取三条\n cursor.close()\n conn.close()\n return ret1\n\[email protected]('/',methods=[\"GET\", \"POST\"])\ndef index():\n ret_info = select_student()\n print(ret_info)\n data1 = ret_info[0]\n data2 = ret_info[1]\n data3 = ret_info[2]\n data_set = {\n \"user\": \"gdit\",\n \"pwd\": 100\n }\n return render_template('show_table2.html', data1 = data1, data2 = data2, data3 = data3)\nif __name__ == '__main__':\n\tapp.run(debug=True)"
},
{
"alpha_fraction": 0.7061855792999268,
"alphanum_fraction": 0.8298969268798828,
"avg_line_length": 26.714284896850586,
"blob_id": "52132c38a92cd0c96a9c892bd1f1bb93845b3beb",
"content_id": "cc3a6690bd0fdbdc542d2dda9e2d1513704e356d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 350,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 7,
"path": "/README.md",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "1.掌握深度学习图像处理(基于keras、tensorflow、opencv)\n2.掌握web前后端设计(基 于flask框架)\n3.开发基于web端的深度学习图像,把web端应用与人工智能相结合\n\n欢迎加入人工智能算法应用开发,群号码:971601256\n视频教程:\nhttps://edu.csdn.net/course/detail/28400/391614?pre_view=1\n"
},
{
"alpha_fraction": 0.547694742679596,
"alphanum_fraction": 0.5532591342926025,
"avg_line_length": 27.613636016845703,
"blob_id": "80c728a9de48fa0e929d90079a7d110b273449ec",
"content_id": "c9746d723d4751d38283d09a42f6b13ec6ea58b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1326,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 44,
"path": "/3flask/表格与数据库/web_database_main.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\napp = Flask(__name__) # 创建应用实例\nfrom flask import render_template\nfrom flask import request\nimport pymysql\n\ndef insert_data_database(input_username, input_pwd):\n # 连接database\n conn = pymysql.connect(host=\"127.0.0.1\",\n user=\"root\",\n password=\"123456\",\n # db=\"cov\",\n db=\"gdit_student\",\n charset=\"utf8\")\n # 获取一个光标\n cursor = conn.cursor()\n\n sql = 'insert into student (user,pwd) values (%s,%s);'\n name = input_username\n pwd = input_pwd\n cursor.execute(sql, [name, pwd])\n\n # 涉及写操作要注意提交\n conn.commit()\n # 关闭连接\n cursor.close()\n conn.close()\n\[email protected]('/',methods=[\"GET\", \"POST\"])\ndef index():\n print(request.method)\n if request.method == \"POST\":\n get_form = request.form\n print(get_form)\n username = request.form.get(\"user\")\n pwd = request.form.get(\"pwd\")\n print(username)\n print(pwd)\n insert_data_database(username, pwd)\n return render_template('show_table.html', data=get_form)\n else: # get请求\n return render_template('login.html')\nif __name__ == '__main__':\n\tapp.run(debug=True)"
},
{
"alpha_fraction": 0.6263048052787781,
"alphanum_fraction": 0.6560542583465576,
"avg_line_length": 28,
"blob_id": "b776c369f761a6a86a55ad9db13c5e0d0fdce0a1",
"content_id": "8a1e3c388083f1fcde832684241d948b13de2e09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1924,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 66,
"path": "/5.2keras卷积神经网络识别手写数字/卷积神经网络识别手写数字/train_mnist_my.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from keras.utils import np_utils\nimport numpy as np\nfrom PIL import Image\nimg_list = []\nfor num in range(5000):\n name = './pic/' + str(num) + '.png'\n #print(name)\n img_pil = Image.open(name)\n img_array = np.array(img_pil)\n #print(img_array.shape)\n img_list.append(img_array)\nimg_list_np = np.array(img_list)\nprint(img_list_np.shape)\n\n\nfilename = \"dig_label.txt\"\nfile = open(filename)\nlabel_list = []\nfor line in file.readlines():\n new_line = line.strip()\n token = new_line.split(\" \")\n label_list.append(token[1])\n\nlabel_list_np = np.array(label_list)\nprint(label_list_np)\nprint(label_list_np.shape)\nslice_img = img_list_np[:,:,:,0:1]\nprint(slice_img.shape)\nx_train_image = slice_img\ny_train_label = label_list_np\nx_test_image = x_train_image\ny_test_label = y_train_label\n#60000x28x28\nx_Train = x_train_image.reshape(5000, 400).astype('float32')\nx_Test = x_test_image.reshape(5000, 400).astype('float32')\nx_Train_normalize = x_Train / 255\nx_Test_normalize = x_Test / 255\n\ny_Train_OneHot = np_utils.to_categorical(y_train_label)\ny_Test_OneHot = np_utils.to_categorical(y_test_label)\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nmodel = Sequential()\nmodel.add(Dense(units=256,\n input_dim=400,\n kernel_initializer='normal',\n activation='relu'))\n\nmodel.add(Dense(units=10,\n kernel_initializer='normal',\n activation='softmax'))\nprint(model.summary())\n\n# # 训练模型\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam', metrics=['accuracy'])\n\ntrain_history = model.fit(x=x_Train_normalize,\n y=y_Train_OneHot, validation_split = 0.2,\n epochs = 10, batch_size = 200, verbose = 2)\n\nscores = model.evaluate(x_Test_normalize, y_Test_OneHot)\nprint('accuracy=', scores[1])\nprediction = model.predict_classes(x_Test)\nprint(prediction)\n\n\n"
},
{
"alpha_fraction": 0.6540587544441223,
"alphanum_fraction": 0.6822810769081116,
"avg_line_length": 35.17894744873047,
"blob_id": "5f1a3a3b32beed2351b25e55d811b23ea4e8255e",
"content_id": "318398d79cc963d7056a30f5bc7063925cd6d786",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4473,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 95,
"path": "/6.3rnn_lstm/rnn_mnist.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "# 通过RNN实现Mnist,神经网络将一幅数字图片的像素矩阵从行索引0开始一行一行的循环扫描,将\n# 整个像素矩阵扫描完后,再预测数字(而CNN是通过卷积核滑窗扫描、卷积运算进行预测)\n\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets('Mnist_data', one_hot=True)\n# 定义超参数\nlearning_rate = 0.001# 学习率\ntraining_iters = 10000# 循环次数\nbatch_size = 100# 批次数目(一次输入多少数据)\n\n# Mnist中的图片是28*28*1 #\nn_inputs = 28# 一次扫描28个像素点(像素矩阵有28列)\nn_steps = 28# 一共扫描28次(像素矩阵有28行),与时间相对应\n##############################\nn_hidden_units = 128# 在隐藏层中神经元的数量\nn_classes = 10# Mnist中只有0-9这10个类\n\n\n# 定义placeholder\nx = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.float32, [None, n_classes])\nkeep_drop = tf.placeholder(tf.float32)\n\n# 以字典的形式定义权重与偏值\nweights = {\n # (28, 128), 输入数据进入cell时参与的运算的的权重\n 'in':tf.Variable(tf.random_normal([n_inputs, n_hidden_units])),\n # (128, 10),数据从cell中变为输出数据时参与的运算的权重\n 'out':tf.Variable(tf.random_normal([n_hidden_units, n_classes]))\n}\nbiases = {\n # (128, ),输入数据进入cell时参与的运算的的偏值\n 'in':tf.Variable(tf.constant(0.1, shape=[n_hidden_units,])),\n # (10, ),数据从cell中变为输出数据时参与的运算的偏值\n 'out':tf.Variable(tf.constant(0.1, shape=[n_classes,]))\n}\n\n\n# 定义RNN\ndef RNN(X, weights, biases):\n # 定义输入数据进入cell时经过的隐藏层\n X = tf.reshape(X, [-1, n_inputs])\n X_into_Cell = tf.matmul(X, weights['in']) + biases['in']\n X_into_Cell_drop = tf.nn.dropout(X_into_Cell, keep_drop)# 防止过拟合\n X_into_Cell_drop = tf.reshape(X_into_Cell_drop, [-1, n_steps, n_hidden_units])\n\n # 定义cell\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden_units, forget_bias=1.0, state_is_tuple=True)\n # 对于BasicLSTMCell来说,lstm_cell中的state_is_tuple的作用是确保初始化state时生成一个包含两个元素的tuple,两个元素分别是c_state(主线state)和m_state(分线state)\n # forget_bias=1.0确保分线state都可以得到保留\n __init__state = lstm_cell.zero_state(batch_size, dtype=tf.float32)# 初始化state,与上面定义的cell相对应\n\n # 定义cell中的运算\n outputs, states = tf.nn.dynamic_rnn(lstm_cell, X_into_Cell_drop, initial_state=__init__state, time_major=False)\n # 其中的outputs就是cell的输出值,而states则是每个cell内存中保留的状态(计算下一次输出值时被调用)\n # 其中time_major的作用指的是n_steps处于主要维度(一维)还是次要维度(二维、三维...),前者则为True,后者则为False\n # 定义数据从cell中变为输出数据时经过的隐藏层\n results = tf.matmul(states[1], weights['out']) + biases['out']\n return results\n\n# 定义预测值\nprediction = RNN(x, weights, biases)\n# 定义损失函数\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))\n# 定义训练函数\ntrain_step = tf.train.AdamOptimizer(learning_rate).minimize(cost)\n\n# 定义准确率\ncorrect_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\nwith tf.Session() as sess:\n\n sess.run(tf.initialize_all_variables())# 全局初始化\n counter = 10\n for step in range(training_iters):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n batch_xs = batch_xs.reshape([batch_size, n_steps, n_inputs])\n sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys, keep_drop: 0.5})\n\n if step % 1000 == 0:\n counter -= 1\n # 每训练1000次输出一次剩余的训练次数以及当前的识别准确度\n print('the remaining times trained is ', counter * 1000, '.')\n print('the current accuracy is ', sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_drop: 0.5}), '.')\n print()\n\n # 保存神经网络中的参数\n saver = tf.train.Saver()\n save_path = saver.save(sess, 'Mnist_paramater_RNN/save_parameter.ckpt')\n\n # 提示神经网络已经完成训练\n print('The training is over!')\n"
},
{
"alpha_fraction": 0.6204188466072083,
"alphanum_fraction": 0.6204188466072083,
"avg_line_length": 26.35714340209961,
"blob_id": "8d3b7eed807aed91450ac3dcbca9e1443742f91a",
"content_id": "f2e08efdd0864e04ef69e49fb082d3680775cd44",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 470,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 14,
"path": "/3flask/个人主页/data_deal.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from flask import Flask # 导入Flask模块\nfrom flask import render_template\napp = Flask(__name__) # 创建应用实例\[email protected]('/',)\ndef index():\n name = \"gdit\"\n data = {\n \"user\":\"gdit\",\n \"passwd\":123456\n }\n return render_template(\"data_show.html\", data = data)\n\nif __name__ == '__main__': # 判断是否运行此文件,还是被当做模块导入\n\tapp.run(debug=True) # 开始运行flask应用程序, debug启动app的调试模式"
},
{
"alpha_fraction": 0.7236363887786865,
"alphanum_fraction": 0.7490909099578857,
"avg_line_length": 29.11111068725586,
"blob_id": "7bc09a458e1b06f81dcb4428b80e55ae8c47bb81",
"content_id": "44be5ca88c4e325982afadbad53c00323227a727",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 303,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 9,
"path": "/6.2tensorflow_minist/minist_data_read.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# 获取数据,number 1 to 10\n#自动下载minist数据,读进来\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\ntrain_x, train_y = mnist.train.next_batch(100)\nprint(len(train_x[0]))\nprint(train_y)\n\n\n\n\n"
},
{
"alpha_fraction": 0.6245370507240295,
"alphanum_fraction": 0.664814829826355,
"avg_line_length": 26.66666603088379,
"blob_id": "72d311293ee398a3bdebd1aec24cd46901173604",
"content_id": "9c259f01c640173e7abde6f227be695e4ab0f742",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2288,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 78,
"path": "/6.1TensorFlow原理介绍/Keras_Cifar_CNN_Continue_Train_model.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\nfrom keras.datasets import cifar10\nimport numpy as np\nnp.random.seed(10)\n\n# # 数据准备\n(x_img_train,y_label_train),(x_img_test,y_label_test)=cifar10.load_data()\n\nx_img_train_normalize = x_img_train.astype('float32') / 255.0\nx_img_test_normalize = x_img_test.astype('float32') / 255.0\n\nfrom keras.utils import np_utils\ny_label_train_OneHot = np_utils.to_categorical(y_label_train)\ny_label_test_OneHot = np_utils.to_categorical(y_label_test)\n\n# # 建立模型\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D\n\nmodel = Sequential()\n#卷积层1\nmodel.add(Conv2D(filters=32,kernel_size=(3,3),\n input_shape=(32, 32,3), \n activation='relu', \n padding='same'))\nmodel.add(Dropout(rate=0.25))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n#卷积层2与池化层2\nmodel.add(Conv2D(filters=64, kernel_size=(3, 3), \n activation='relu', padding='same'))\nmodel.add(Dropout(0.25))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Flatten())\nmodel.add(Dropout(rate=0.25))\nmodel.add(Dense(1024, activation='relu'))\nmodel.add(Dropout(rate=0.25))\n\nmodel.add(Dense(10, activation='softmax'))\nprint(model.summary())\n\n\n# # 加载之前训练的模型\ntry:\n model.load_weights(\"./train/cifar.h5\")\n print(\"加载模型成功!继续训练模型\")\nexcept :\n print(\"加载模型失败!开始训练一个新模型\")\n# 训练模型\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam', metrics=['accuracy'])\n\n\ntrain_history=model.fit(x_img_train_normalize, y_label_train_OneHot,\n validation_split=0.2,\n epochs=5, batch_size=128, verbose=1)\n\nscores = model.evaluate(x_img_test_normalize,\n y_label_test_OneHot, verbose=0)\nprint(scores[1])\n\n#prediction=model.predict_classes(x_img_test_normalize)\n\n\nmodel.save(\"./train/cifar.h5\")\nprint(\"Saved model to disk\")\n\n# prediction[:10]\n\n# label_dict={0:\"airplane\",1:\"automobile\",2:\"bird\",3:\"cat\",4:\"deer\",\n# 5:\"dog\",6:\"frog\",7:\"horse\",8:\"ship\",9:\"truck\"}\n#\n#\n# # # 查看预测概率\n# Predicted_Probability=model.predict(x_img_test_normalize)\n\n\n"
},
{
"alpha_fraction": 0.4771505296230316,
"alphanum_fraction": 0.5416666865348816,
"avg_line_length": 25.39285659790039,
"blob_id": "ace3809463b37b4680242b14255eba141d4f33b1",
"content_id": "39897a2e47353c4e64e202330cc326fe8d780b23",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 758,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 28,
"path": "/5.1Keras 多层感知器识别手写数字/split_all_pic.py",
"repo_name": "liuyanfei002/deep-learning-of-web",
"src_encoding": "UTF-8",
"text": "from PIL import Image # array is a numpy array\nimport numpy as np\n\nim_path = \"digits.png\"\nim1_pil = Image.open(im_path)\n#把PIL数据转为numpy类型\nim1_nup = np.array(im1_pil)\nprint(im1_nup.shape)\n\n#print(im1_nup[20][20][0])\n#one_pic = im1_nup[0:20,0:20,:]\n#count = 0\nfor num in range(50):\n for col in range(100):\n one_pic = im1_nup[num * 20:num * 20 + 20, col * 20:col * 20 + 20,:]\n c_pil = Image.fromarray(one_pic)\n name = \"./pic/\" + str(num * 100 + col) + \".png\"\n #count = count + 1\n c_pil.save(name)\n\ns = 0\nwith open(\"dig_label.txt\",\"w\") as f:\n for num in range(5000):\n f.write(str(num)+ \".png\")\n s = int(num/500)\n print(s)\n f.write(\" \" + str(s))\n f.write(\"\\n\")\n\n\n\n\n\n"
}
] | 69 |
erikselin/snowflake-connector-python | https://github.com/erikselin/snowflake-connector-python | 4c57547771b429023e8ccb2e79843cf398031cb8 | 18ef9c06bd7900581520cebbe603c1cecb21509f | af06afa859ccbe806f03789b9c44307a3ef38a3a | refs/heads/master | 2023-01-04T06:44:12.683014 | 2021-08-20T03:45:09 | 2021-08-20T03:45:09 | 212,495,889 | 0 | 0 | NOASSERTION | 2019-10-03T04:19:42 | 2021-08-20T13:19:25 | 2023-01-02T17:03:39 | Python | [
{
"alpha_fraction": 0.5535755753517151,
"alphanum_fraction": 0.5605054497718811,
"avg_line_length": 35.45859909057617,
"blob_id": "4a19de481a8ca6b09bd9db1ac7ff2ec86c4a37a0",
"content_id": "e627532367e8593c30b8e528bad172bba8657dd1",
"detected_licenses": [
"Apache-2.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17172,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 471,
"path": "/src/snowflake/connector/s3_storage_client.py",
"repo_name": "erikselin/snowflake-connector-python",
"src_encoding": "UTF-8",
"text": "#\n# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.\n#\n\nfrom __future__ import division\n\nimport base64\nimport xml.etree.cElementTree as ET\nfrom datetime import datetime\nfrom io import IOBase\nfrom logging import getLogger\nfrom typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Tuple, Union\n\nfrom cryptography.hazmat.primitives import hashes, hmac\n\nfrom .compat import quote\nfrom .constants import (\n HTTP_HEADER_CONTENT_TYPE,\n HTTP_HEADER_VALUE_OCTET_STREAM,\n FileHeader,\n ResultStatus,\n)\nfrom .encryption_util import EncryptionMetadata\nfrom .storage_client import SnowflakeStorageClient\nfrom .vendored import requests\n\nif TYPE_CHECKING: # pragma: no cover\n from .file_transfer_agent import SnowflakeFileMeta, StorageCredential\n\nlogger = getLogger(__name__)\n\nMETA_PREFIX = \"x-amz-meta-\"\nSFC_DIGEST = \"sfc-digest\"\n\nAMZ_MATDESC = \"x-amz-matdesc\"\nAMZ_KEY = \"x-amz-key\"\nAMZ_IV = \"x-amz-iv\"\n\nERRORNO_WSAECONNABORTED = 10053 # network connection was aborted\n\nEXPIRED_TOKEN = \"ExpiredToken\"\nADDRESSING_STYLE = \"virtual\" # explicit force to use virtual addressing style\n\n\nclass S3Location(NamedTuple):\n bucket_name: str\n path: str\n\n\nclass SnowflakeS3RestClient(SnowflakeStorageClient):\n def __init__(\n self,\n meta: \"SnowflakeFileMeta\",\n credentials: \"StorageCredential\",\n stage_info: Dict[str, Any],\n chunk_size: int,\n use_accelerate_endpoint: bool = False,\n use_s3_regional_url=False,\n ):\n \"\"\"Rest client for S3 storage.\n\n Args:\n stage_info:\n use_accelerate_endpoint:\n \"\"\"\n super().__init__(meta, stage_info, chunk_size, credentials=credentials)\n # Signature version V4\n # Addressing style Virtual Host\n self.region_name: str = stage_info[\"region\"]\n # Multipart upload only\n self.upload_id: Optional[str] = None\n self.etags: Optional[List[str]] = None\n self.s3location: \"S3Location\" = (\n SnowflakeS3RestClient._extract_bucket_name_and_path(\n self.stage_info[\"location\"]\n )\n )\n self.use_s3_regional_url = use_s3_regional_url\n # if GS sends us an endpoint, it's likely for FIPS. Use it.\n if stage_info[\"endPoint\"]:\n self.endpoint = (\n f\"https://{self.s3location.bucket_name}.\" + stage_info[\"endPoint\"]\n )\n elif use_accelerate_endpoint:\n self.endpoint = (\n f\"https://{self.s3location.bucket_name}.s3-accelerate.amazonaws.com\"\n )\n else:\n if self.use_s3_regional_url:\n self.endpoint = f\"https://{self.s3location.bucket_name}.s3.{self.region_name}.amazonaws.com\"\n else:\n self.endpoint = (\n f\"https://{self.s3location.bucket_name}.s3.amazonaws.com\"\n )\n\n @staticmethod\n def sign(secret_key, msg):\n h = hmac.HMAC(secret_key, hashes.SHA1())\n h.update(msg)\n return base64.encodebytes(h.finalize()).strip()\n\n @staticmethod\n def _construct_canonicalized_element(\n bucket_name: str = None,\n request_uri: str = \"\",\n subresource: Dict[str, Union[str, int, None]] = None,\n ) -> str:\n if not subresource:\n subresource = {}\n res = \"\"\n if bucket_name:\n res += f\"/{bucket_name}\"\n if request_uri:\n res += \"/\" + request_uri\n else:\n # for GET operations without a bucket name\n res += \"/\"\n if subresource:\n res += \"?\"\n keys = sorted(subresource.keys())\n res += (\n keys[0]\n if subresource[keys[0]] is None\n else f\"{keys[0]}={subresource[keys[0]]}\"\n )\n for k in keys[1:]:\n query_str = k if subresource[k] is None else f\"{k}={subresource[k]}\"\n res += f\"&{query_str}\"\n return res\n\n @staticmethod\n def construct_canonicalized_headers(\n headers: Dict[str, Union[str, List[str]]]\n ) -> str:\n _res = sorted([[k.lower(), v] for k, v in headers.items()])\n res = []\n\n for i in range(len(_res)):\n k, v = _res[i]\n # if value is a list, convert to string delimited by comma\n if isinstance(v, list):\n v = \",\".join(v)\n # if multiline header, replace withs space\n k = k.replace(\"\\n\", \" \")\n res.append(k.rstrip() + \":\" + v.lstrip())\n\n ans = \"\\n\".join(res)\n if ans:\n ans = ans + \"\\n\"\n\n return ans\n\n @staticmethod\n def _construct_string_to_sign(\n verb: str,\n canonicalized_element: str,\n canonicalized_headers: str,\n amzdate: str,\n content_md5: str = \"\",\n content_type: str = \"\",\n ) -> bytes:\n res = verb + \"\\n\" + content_md5 + \"\\n\" + content_type + \"\\n\"\n res += amzdate + \"\\n\" + canonicalized_headers + canonicalized_element\n return res.encode(\"UTF-8\")\n\n @staticmethod\n def _has_expired_token(response: requests.Response) -> bool:\n \"\"\"Extract error code and error message from the S3's error response.\n\n Expected format:\n https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses\n\n Args:\n response: Rest error response in XML format\n\n Returns: True if the error response is caused by token expiration\n\n \"\"\"\n if response.status_code != 400:\n return False\n message = response.text\n if not message or message.isspace():\n return False\n err = ET.fromstring(message)\n return err.find(\"Code\").text == EXPIRED_TOKEN\n\n @staticmethod\n def _extract_bucket_name_and_path(stage_location) -> \"S3Location\":\n # split stage location as bucket name and path\n bucket_name, _, path = stage_location.partition(\"/\")\n if path and not path.endswith(\"/\"):\n path += \"/\"\n\n return S3Location(bucket_name=bucket_name, path=path)\n\n def _send_request_with_authentication_and_retry(\n self,\n url: str,\n verb: str,\n resources: str,\n retry_id: Union[int, str],\n x_amz_headers: Optional[Dict[str, str]] = None,\n headers: Optional[Dict[str, str]] = None,\n content_type: str = \"\",\n data: Union[bytes, bytearray, IOBase, None] = None,\n ) -> requests.Response:\n if not x_amz_headers:\n x_amz_headers = {}\n if not headers:\n headers = {}\n\n def generate_authenticated_url_and_args() -> Tuple[bytes, Dict[str, bytes]]:\n t = datetime.utcnow()\n amzdate = t.strftime(\"%Y%m%dT%H%M%SZ\")\n\n if \"AWS_TOKEN\" in self.credentials.creds:\n x_amz_headers[\"x-amz-security-token\"] = self.credentials.creds.get(\n \"AWS_TOKEN\"\n )\n _x_amz_headers = self.construct_canonicalized_headers(x_amz_headers)\n string_to_sign = self._construct_string_to_sign(\n verb, resources, _x_amz_headers, amzdate, content_type=content_type\n )\n signature = self.sign(\n self.credentials.creds[\"AWS_SECRET_KEY\"].encode(\"UTF-8\"), string_to_sign\n )\n authorization_header = ( # TODO\n \"AWS \" + self.credentials.creds[\"AWS_KEY_ID\"] + \":\" + signature.decode()\n )\n headers.update(x_amz_headers)\n headers[\"Date\"] = amzdate\n headers[\"Authorization\"] = authorization_header\n rest_args = {\"headers\": headers}\n\n if data:\n rest_args[\"data\"] = data\n\n return url, rest_args\n\n return self._send_request_with_retry(\n verb, generate_authenticated_url_and_args, retry_id\n )\n\n def get_file_header(self, filename: str) -> Union[FileHeader, None]:\n \"\"\"Gets the metadata of file in specified location.\n\n Args:\n filename: Name of remote file.\n\n Returns:\n None if HEAD returns 404, otherwise a FileHeader instance populated with metadata\n \"\"\"\n path = quote(self.s3location.path + filename.lstrip(\"/\"))\n url = self.endpoint + f\"/{path}\"\n\n _resource = self._construct_canonicalized_element(\n bucket_name=self.s3location.bucket_name, request_uri=path\n )\n retry_id = \"HEAD\"\n self.retry_count[retry_id] = 0\n response = self._send_request_with_authentication_and_retry(\n url, \"HEAD\", _resource, retry_id\n )\n if response.status_code == 200:\n self.meta.result_status = ResultStatus.UPLOADED\n metadata = response.headers\n encryption_metadata = (\n EncryptionMetadata(\n key=metadata.get(META_PREFIX + AMZ_KEY),\n iv=metadata.get(META_PREFIX + AMZ_IV),\n matdesc=metadata.get(META_PREFIX + AMZ_MATDESC),\n )\n if metadata.get(META_PREFIX + AMZ_KEY)\n else None\n )\n return FileHeader(\n digest=metadata.get(META_PREFIX + SFC_DIGEST),\n content_length=int(metadata.get(\"Content-Length\")),\n encryption_metadata=encryption_metadata,\n )\n elif response.status_code == 404:\n logger.debug(\n f\"not found. bucket: {self.s3location.bucket_name}, path: {path}\"\n )\n self.meta.result_status = ResultStatus.NOT_FOUND_FILE\n return None\n else:\n response.raise_for_status()\n\n def _prepare_file_metadata(self) -> Dict[str, Any]:\n \"\"\"Construct metadata for a file to be uploaded.\n\n Returns: File metadata in a dict.\n\n \"\"\"\n s3_metadata = {\n META_PREFIX + SFC_DIGEST: self.meta.sha256_digest,\n }\n if self.encryption_metadata:\n s3_metadata.update(\n {\n META_PREFIX + AMZ_IV: self.encryption_metadata.iv,\n META_PREFIX + AMZ_KEY: self.encryption_metadata.key,\n META_PREFIX + AMZ_MATDESC: self.encryption_metadata.matdesc,\n }\n )\n return s3_metadata\n\n def _initiate_multipart_upload(self) -> None:\n path = quote(self.s3location.path + self.meta.dst_file_name.lstrip(\"/\"))\n url = self.endpoint + f\"/{path}?uploads\"\n s3_metadata = self._prepare_file_metadata()\n # initiate multipart upload\n _resource = self._construct_canonicalized_element(\n bucket_name=self.s3location.bucket_name,\n request_uri=path,\n subresource={\"uploads\": None},\n )\n retry_id = \"Initiate\"\n self.retry_count[retry_id] = 0\n response = self._send_request_with_authentication_and_retry(\n url,\n \"POST\",\n _resource,\n retry_id,\n x_amz_headers=s3_metadata,\n content_type=HTTP_HEADER_VALUE_OCTET_STREAM,\n headers={HTTP_HEADER_CONTENT_TYPE: HTTP_HEADER_VALUE_OCTET_STREAM},\n )\n if response.status_code == 200:\n self.upload_id = ET.fromstring(response.content)[2].text\n self.etags = [None] * self.num_of_chunks\n else:\n response.raise_for_status()\n\n def _upload_chunk(self, chunk_id: int, chunk: bytes):\n path = quote(self.s3location.path + self.meta.dst_file_name.lstrip(\"/\"))\n url = self.endpoint + f\"/{path}\"\n\n if self.num_of_chunks == 1: # single request\n s3_metadata = self._prepare_file_metadata()\n _resource = self._construct_canonicalized_element(\n bucket_name=self.s3location.bucket_name, request_uri=path\n )\n response = self._send_request_with_authentication_and_retry(\n url,\n \"PUT\",\n _resource,\n chunk_id,\n data=chunk,\n x_amz_headers=s3_metadata,\n headers={HTTP_HEADER_CONTENT_TYPE: HTTP_HEADER_VALUE_OCTET_STREAM},\n content_type=HTTP_HEADER_VALUE_OCTET_STREAM,\n )\n response.raise_for_status()\n else:\n # multipart PUT\n chunk_url = url + f\"?partNumber={chunk_id+1}&uploadId={self.upload_id}\"\n query_params = {\"partNumber\": chunk_id + 1, \"uploadId\": self.upload_id}\n chunk_resource = self._construct_canonicalized_element(\n bucket_name=self.s3location.bucket_name,\n request_uri=path,\n subresource=query_params,\n )\n response = self._send_request_with_authentication_and_retry(\n chunk_url, \"PUT\", chunk_resource, chunk_id, data=chunk\n )\n if response.status_code == 200:\n self.etags[chunk_id] = response.headers[\"ETag\"]\n response.raise_for_status()\n\n def _complete_multipart_upload(self) -> None:\n path = quote(self.s3location.path + self.meta.dst_file_name.lstrip(\"/\"))\n url = self.endpoint + f\"/{path}?uploadId={self.upload_id}\"\n logger.debug(\"Initiating multipart upload complete\")\n # Complete multipart upload\n _resource = self._construct_canonicalized_element(\n bucket_name=self.s3location.bucket_name,\n request_uri=path,\n subresource={\"uploadId\": self.upload_id},\n )\n root = ET.Element(\"CompleteMultipartUpload\")\n for idx, etag_str in enumerate(self.etags):\n part = ET.Element(\"Part\")\n etag = ET.Element(\"ETag\")\n etag.text = etag_str\n part.append(etag)\n part_number = ET.Element(\"PartNumber\")\n part_number.text = str(idx + 1)\n part.append(part_number)\n root.append(part)\n retry_id = \"Complete\"\n self.retry_count[retry_id] = 0\n response = self._send_request_with_authentication_and_retry(\n url,\n \"POST\",\n _resource,\n retry_id,\n data=ET.tostring(root),\n )\n response.raise_for_status()\n\n def _abort_multipart_upload(self) -> None:\n if self.upload_id is None:\n return\n path = quote(self.s3location.path + self.meta.dst_file_name.lstrip(\"/\"))\n url = self.endpoint + f\"/{path}?uploadId={self.upload_id}\"\n\n retry_id = \"Abort\"\n self.retry_count[retry_id] = 0\n _resource = self._construct_canonicalized_element(\n bucket_name=self.s3location.bucket_name,\n request_uri=path,\n subresource={\"uploadId\": self.upload_id},\n )\n response = self._send_request_with_authentication_and_retry(\n url, \"DELETE\", _resource, retry_id\n )\n response.raise_for_status()\n\n def download_chunk(self, chunk_id: int) -> None:\n logger.debug(f\"Downloading chunk {chunk_id}\")\n path = quote(self.s3location.path + self.meta.src_file_name.lstrip(\"/\"))\n url = self.endpoint + f\"/{path}\"\n _resource = self._construct_canonicalized_element(\n bucket_name=self.s3location.bucket_name, request_uri=path\n )\n if self.num_of_chunks == 1:\n response = self._send_request_with_authentication_and_retry(\n url, \"GET\", _resource, chunk_id\n )\n if response.status_code == 200:\n self.write_downloaded_chunk(0, response.content)\n self.meta.result_status = ResultStatus.DOWNLOADED\n response.raise_for_status()\n else:\n chunk_size = self.chunk_size\n if chunk_id < self.num_of_chunks - 1:\n _range = f\"{chunk_id * chunk_size}-{(chunk_id+1)*chunk_size-1}\"\n else:\n _range = f\"{chunk_id * chunk_size}-\"\n\n response = self._send_request_with_authentication_and_retry(\n url,\n \"GET\",\n _resource,\n chunk_id,\n headers={\"Range\": f\"bytes={_range}\"},\n )\n if response.status_code in (200, 206):\n self.write_downloaded_chunk(chunk_id, response.content)\n response.raise_for_status()\n\n def transfer_accelerate_config(self) -> bool:\n url = self.endpoint + \"/?accelerate\"\n _resource = self._construct_canonicalized_element(\n bucket_name=self.s3location.bucket_name, subresource={\"accelerate\": None}\n )\n retry_id = \"accelerate\"\n self.retry_count[retry_id] = 0\n response = self._send_request_with_authentication_and_retry(\n url, \"GET\", _resource, retry_id\n )\n if response.status_code == 200:\n config = ET.fromstring(response.text)\n use_accelerate_endpoint = (\n config.find(\"Status\") and config.find(\"Status\").text == \"Enabled\"\n )\n logger.debug(f\"use_accelerate_endpoint: {use_accelerate_endpoint}\")\n return use_accelerate_endpoint\n return False\n"
}
] | 1 |
conghui/cvprclg | https://github.com/conghui/cvprclg | 63c80456013d99f21900edbea1d436d86495aebc | d756ffc1eb96d015e61b29a76a9d7482ae4a6e4a | f9deb6b92daf5b58241cee8fc4377c2cb5197735 | refs/heads/master | 2020-03-11T18:56:33.881332 | 2018-04-20T16:12:51 | 2018-04-20T16:12:51 | 130,192,498 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6820809245109558,
"alphanum_fraction": 0.6955683827400208,
"avg_line_length": 32.45161437988281,
"blob_id": "d03afd58744db1fb5c5653a9c487fce5705fa193",
"content_id": "e538ff6da90f37fd33f7e7e1c00f0583241990ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1038,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 31,
"path": "/util/np2imgv17.py",
"repo_name": "conghui/cvprclg",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport numpy as np\nfrom skimage.io import imsave\nimport pandas as pd\nfrom pathlib import Path\n\n# for MODEL v17\nIMG_LIST_FN = '/home/rice/cvprclg/data/working/images/v5/AOI_3_Paris_test_ImageId.csv'\nNUMPY_FILE = '/home/rice/cvprclg/data/working/models/v17/AOI_3_Paris_poly.npy'\nPNG_DIR = '/home/rice/cvprclg/data/working/models/v17/AOI_3_Paris_test_png_v17'\n\n# assert the OUTPUT_DIR exists\nif not Path(PNG_DIR).exists():\n Path(PNG_DIR).mkdir(parents=True)\n\n# read image list and numpy array\nimage_list = pd.read_csv(IMG_LIST_FN, index_col='ImageId').index.tolist()\nimage_array = np.load(NUMPY_FILE)\n\nprint('# of files in image_list: ', len(image_list))\nprint('# of array in numpy array: ', image_array.shape[0])\n\nif len(image_list) != image_array.shape[0]:\n print('# of files in %s != # of images in %s' % (IMG_LIST_FN, NUMPY_FILE))\n\n# write png file\nfor idx in range(len(image_list)):\n png_fn = PNG_DIR + '/' + image_list[idx] + '.png'\n imsave(png_fn, image_array[idx])\n print('png written to ', png_fn)\n\n"
},
{
"alpha_fraction": 0.6422287225723267,
"alphanum_fraction": 0.6764418482780457,
"avg_line_length": 41.625,
"blob_id": "c7d23f275e403977f7c6f7f859cc3e8eed8c013e",
"content_id": "8ada261005e74ff3fb66503c2332119168e0c39d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2046,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 48,
"path": "/code/train.sh",
"repo_name": "conghui/cvprclg",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nset -x\nset -e\n\nexport THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32\n\nexport PROJ_BASE_PATH=\"/root\"\n\nTRAIN_PATH_LIST=\"\n$PROJ_BASE_PATH/data/train/AOI_4_Shanghai_Train\n\"\n# $PROJ_BASE_PATH/data/train/AOI_2_Vegas_Train\n# $PROJ_BASE_PATH/data/train/AOI_3_Paris_Train\n# $PROJ_BASE_PATH/data/train/AOI_4_Shanghai_Train\n# $PROJ_BASE_PATH/data/train/AOI_5_Khartoum_Train\n\n# echo \">>> CLEAN UP\" && echo rm -rf $PROJ_BASE_PATH/data/working && rm -rf $PROJ_BASE_PATH/data/working && mkdir -p $PROJ_BASE_PATH/data/working\n\n# rm -rf /home/rice/projects/BuildingDetectors_Round2/1-XD_XD/data/working/*.pkl\n# rm -rf /home/rice/projects/BuildingDetectors_Round2/1-XD_XD/data/working/images/v16\n# rm -rf /home/rice/projects/BuildingDetectors_Round2/1-XD_XD/data/working/images/v12\n# rm -rf /home/rice/projects/BuildingDetectors_Round2/1-XD_XD/data/working/images/\n\nsource activate py35\n\nfor train_path in $TRAIN_PATH_LIST; do\n echo $train_path\n # echo \">>> PREPROCESSING STEP\"\n echo python v5_im.py preproc_train $train_path && python v5_im.py preproc_train $train_path\n echo python v12_im.py preproc_train $train_path && python v12_im.py preproc_train $train_path\n\n # echo \">>> TRAINING v9s model\"\n echo python v9s.py validate $train_path && python v9s.py validate $train_path\n echo python v9s.py evalfscore $train_path && python v9s.py evalfscore $train_path\n\n # ### v13 --------------\n echo \">>>>>>>>>> v13.py: Training for v13 model\" && python v13.py validate $train_path\n echo \">>>>>>>>>> v13.py: Parametr optimization for v13 model\" && python v13.py evalfscore $train_path\n\n # ### v17 --------------\n echo \">>>>>>>>>> v17.py\" && python v17.py evalfscore $train_path\ndone\n\n# echo python v16.py preproc_train $train_path && python v16.py preproc_train $train_path\n\n# ### v16 --------------\n# echo \">>>>>>>>>> v16.py Training for v16 model\" && python v16.py validate $train_path\n# echo \">>>>>>>>>> v16.py Parametr optimization for v16 model\" && python v16.py evalfscore $train_path\n"
},
{
"alpha_fraction": 0.5475409626960754,
"alphanum_fraction": 0.5606557130813599,
"avg_line_length": 24.41666603088379,
"blob_id": "2de5c3afd63af184fd68c29e4f1a754c177521f6",
"content_id": "15c8ee23ce6981f13ca17b59cf0b3e7ff995cf66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 305,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 12,
"path": "/run-docker.sh",
"repo_name": "conghui/cvprclg",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\njob=train\nnvidia-docker run \\\n --rm \\\n --name shanghai-${job}-${USER} \\\n -v ${HOME}/cvprclg/data/:/root/data/ \\\n -v ${HOME}/cvprclg/code/:/root/code/ \\\n -v ${HOME}/cvprclg/util/visualizer-2.0/:/root/visualizer-2.0/ \\\n --workdir=/root/code \\\n -t unetsol \\\n ./${job}.sh\n"
},
{
"alpha_fraction": 0.6827176809310913,
"alphanum_fraction": 0.7091029286384583,
"avg_line_length": 35.095237731933594,
"blob_id": "5a27e743fbae846c340bd064a00068db5b9e2126",
"content_id": "f4eac4ed9f13ccfb691b44b913f94f6d9705fb93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1516,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 42,
"path": "/code/test.sh",
"repo_name": "conghui/cvprclg",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# set -x\nset -e\n\n# export LC_ALL=C.UTF-8\n# export LANG=C.UTF-8\nexport THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32\n\nexport PROJ_BASE_PATH=\"/root\"\n\nRESULT=17.csv\nTEST_PATH_LIST=\"\n$PROJ_BASE_PATH/data/test/AOI_3_Paris_Test_public\n\"\n# $PROJ_BASE_PATH/data/train/AOI_2_Vegas_Train\n# $PROJ_BASE_PATH/data/train/AOI_3_Paris_Train\n# $PROJ_BASE_PATH/data/train/AOI_4_Shanghai_Train\n# $PROJ_BASE_PATH/data/train/AOI_5_Khartoum_Train\n\n# clean up\nmkdir -p $PROJ_BASE_PATH/data/output $PROJ_BASE_PATH/data/working\n# rm -f $PROJ_BASE_PATH/data/working/images/v5/test_AOI_*_im.h5\n# rm -f $PROJ_BASE_PATH/data/working/images/v5/test_AOI_*_mul.h5\n# rm -f $PROJ_BASE_PATH/data/working/images/v12/test_AOI_*_mul.h5\n# rm -f $PROJ_BASE_PATH/data/working/images/v16/test_AOI_*_osm.h5\n\n# source activate /home/rice/softs/install/anaconda3/envs/py35\nsource activate py35\nfor test_path in $TEST_PATH_LIST; do\n echo \">>> PREPROCESSING STEP\"\n # echo \">>>\" python v5_im.py preproc_test $test_path && python v5_im.py preproc_test $test_path\n # echo \">>>\" python v12_im.py preproc_test $test_path && python v12_im.py preproc_test $test_path\n # echo \">>>\" python v16.py preproc_test $test_path && python v16.py preproc_test $test_path\n\n echo \">>> INFERENCE STEP\"\n echo \">>>\" python v17.py testproc $test_path && python v17.py testproc $test_path\ndone\n\n# Merge infenrece results\necho \">>> MERGE INFERENCE RESULTS\"\necho \">>>\" python merge.py $TEST_PATH_LIST $RESULT\npython merge.py $TEST_PATH_LIST $RESULT\n"
}
] | 4 |
beemi/sciencepal | https://github.com/beemi/sciencepal | 2d2adbf1821d5bbf8864f5439a03866487e6c9c7 | 3c6aadb3972bcb29ebb4ef814220df7e5b252753 | 3f1299996fc74c081fa7e78557f2a286bfb57f8c | refs/heads/master | 2023-04-07T04:47:38.018991 | 2020-08-23T08:14:55 | 2020-08-23T08:14:55 | 289,649,803 | 0 | 0 | MIT | 2020-08-23T08:55:44 | 2020-08-23T08:55:46 | 2023-03-23T17:51:49 | null | [
{
"alpha_fraction": 0.7354596853256226,
"alphanum_fraction": 0.7692307829856873,
"avg_line_length": 37.07143020629883,
"blob_id": "aef8a2e41070c4712966ce7b7bc66ca440f23b20",
"content_id": "9b55fa99db741df6dd1c90b70b138e1d34d46a7a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 533,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 14,
"path": "/Usage_Instructions.md",
"repo_name": "beemi/sciencepal",
"src_encoding": "UTF-8",
"text": "Usage Instructions\n\n1. Fork this repo\n2. Create a new token from here github.com/settings/tokens/new\n3. Give repo read write and workflow permissions to the token and store its value\n4. Add the token as a repo secret (Settings -> Secret) with key GH_TOKEN_1 and value equal to token value stored above\n\nWhat this repo does\n\n1. Fetches chess.com results of my last 100 games\n2. Creates an Ascii chart of the results\n3. Updates the chart\n4. Updates workflow file to generate next run interval (1,2,3,...8 hours)\n5. Repeats from step 1\n"
},
{
"alpha_fraction": 0.6433566212654114,
"alphanum_fraction": 0.6573426723480225,
"avg_line_length": 22.83333396911621,
"blob_id": "eca422c967f3b106d2c79141855431c5add6d1c0",
"content_id": "b123db359340427dddbced2bfeb75f93c960f1c5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 143,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 6,
"path": "/generate_random_workflow.py",
"repo_name": "beemi/sciencepal",
"src_encoding": "UTF-8",
"text": "import os\nimport random\n\nwith open(\"workflow.txt\", \"r\") as f:\n wf = f.read()\nprint (wf.replace(\"{random_number}\", str(random.randint(1, 8))))\n"
}
] | 2 |
ivogonzalo/day-69-blog-add-users | https://github.com/ivogonzalo/day-69-blog-add-users | a5dd81960b1875d28e1b678466bbca35dbd9e77f | 306b6907be6707281db956520f58b3238478a9e8 | afb8eb76d4ed8e58da96378833d22be0591b369a | refs/heads/master | 2023-06-15T18:12:39.257203 | 2021-07-05T18:28:38 | 2021-07-05T18:28:38 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7216624617576599,
"alphanum_fraction": 0.7216624617576599,
"avg_line_length": 40.81578826904297,
"blob_id": "f354fdfef34020bd85a5779580be3702f0e79136",
"content_id": "92415c08dd48f411dc01cab9ddb27ddc5d83de98",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1588,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 38,
"path": "/forms.py",
"repo_name": "ivogonzalo/day-69-blog-add-users",
"src_encoding": "UTF-8",
"text": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, PasswordField\nfrom wtforms.validators import DataRequired, URL, Email, ValidationError\nfrom flask_ckeditor import CKEditorField\n\n\n\n##WTForm\nclass CreatePostForm(FlaskForm):\n title = StringField(\"Blog Post Title\", validators=[DataRequired()])\n subtitle = StringField(\"Subtitle\", validators=[DataRequired()])\n img_url = StringField(\"Blog Image URL\", validators=[DataRequired(), URL()])\n body = CKEditorField(\"Blog Content\", validators=[DataRequired()])\n submit = SubmitField(\"Submit Post\")\n\n\nclass RegisterForm(FlaskForm):\n email = StringField(label=\"Email Address:\", validators=[DataRequired(), Email()])\n password = PasswordField(label=\"Password:\", validators=[DataRequired()])\n username = StringField(label=\"Username:\", validators=[DataRequired()])\n submit = SubmitField(label=\"Create Account\")\n\n # def validate_username(self, email_address_to_check):\n # email_address = Users.query.filter_by(\n # email_address=email_address_to_check.data).first()\n # if email_address:\n # raise ValidationError('That Email Address already exists. Please choose a different one.')\n\n\nclass LoginForm(FlaskForm):\n email = StringField(label=\"Email Address:\", validators=[DataRequired(), Email()])\n password = PasswordField(label=\"Password:\", validators=[DataRequired()])\n submit = SubmitField(label=\"Log In\")\n\n\nclass CommentForm(FlaskForm):\n body = CKEditorField(\"Comments\", validators=[DataRequired()])\n submit = SubmitField(label=\"SUBMIT COMMENT\")"
}
] | 1 |
furuuchitakahiro/get-PySnooper-string | https://github.com/furuuchitakahiro/get-PySnooper-string | 6b8ba24b6835b4a5d23710d4bc9ae18a78fbbd87 | 282ec09b1a4c76172070a269f81ecd640a3d39ac | 10304c95020bab050b4fe091cbcaead916aae2c7 | refs/heads/master | 2020-05-27T22:49:00.677139 | 2019-05-27T10:19:17 | 2019-05-27T10:19:17 | 188,810,259 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5087719559669495,
"alphanum_fraction": 0.6379585266113281,
"avg_line_length": 22.22222137451172,
"blob_id": "9626fccffb4478ee982cc25ae69e1faca2b7e4fe",
"content_id": "e312dcc7691033afa44f7609f107b0ad27b2eaa0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 735,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 27,
"path": "/README.md",
"repo_name": "furuuchitakahiro/get-PySnooper-string",
"src_encoding": "UTF-8",
"text": "# PySnooper を標準出力ではなく、Python 文字列として取得する方法\n\n- python: 3.7.2\n- PySnooper: 0.0.39\n\n# 記事\n\n[PySnooper の文字列を取得](https://qiita.com/furuuchin/items/63398a10006787b99ebf)\n\n# やってみる\n\nPySnooper はインストール済みと仮定します。\n\n1. `git clone [email protected]:furuuchitakahiro/get-PySnooper-string.git`\n2. `cd get-PySnooper-string`\n3. `python get_pysnooper_string.py`\n\n**結果**\n\n```\nNew var:....... ws = <__main__.MyWritableStream object at 0x7fcadd2944e0>\n09:51:49.678454 line 7 num1 = 1\nNew var:....... num1 = 1\n09:51:49.681886 line 8 num2 = 2\nNew var:....... num2 = 2\n09:51:49.681952 line 9 result = num1 + num2\n```\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.7222222089767456,
"avg_line_length": 17,
"blob_id": "33a1afe8c511be7975dd82a49f9a76fff9929c59",
"content_id": "6a1f0d8817d4c15a18fa5bd9c873e256bb093fb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 18,
"license_type": "no_license",
"max_line_length": 17,
"num_lines": 1,
"path": "/requirements.txt",
"repo_name": "furuuchitakahiro/get-PySnooper-string",
"src_encoding": "UTF-8",
"text": "PySnooper==0.0.39\n"
},
{
"alpha_fraction": 0.5728155374526978,
"alphanum_fraction": 0.5844660401344299,
"avg_line_length": 21.39130401611328,
"blob_id": "4903185587f193695207152b13060092a21f529a",
"content_id": "0d2970af6837a65d450704a7f734d87854b4d770",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 539,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 23,
"path": "/get_pysnooper_string.py",
"repo_name": "furuuchitakahiro/get-PySnooper-string",
"src_encoding": "UTF-8",
"text": "import pysnooper\n\n\ndef example_func():\n ws = MyWritableStream()\n with pysnooper.snoop(output=ws):\n num1 = 1\n num2 = 2\n result = num1 + num2\n print(ws.dump_message) # ws.dump_message で文字列として取得できる\n\n\nclass MyWritableStream(pysnooper.utils.WritableStream):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.dump_message = ''\n\n def write(self, s: str) -> None:\n self.dump_message += s\n\n\nif __name__ == '__main__':\n example_func()\n"
}
] | 3 |
sachin38/twitter-bot | https://github.com/sachin38/twitter-bot | 2b0194c13364ed26e3994cb04bc0a665c24ec86e | 72d96757ca5f771d2706378ca5d847145a7f0936 | 4749d4b636f3003d25288b77001ecf353dc0e32c | refs/heads/master | 2021-01-10T04:35:24.374792 | 2016-04-01T20:47:19 | 2016-04-01T20:47:19 | 55,260,503 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5290322303771973,
"alphanum_fraction": 0.5290322303771973,
"avg_line_length": 21.14285659790039,
"blob_id": "895401f989532b145645931e4fbe375c4053abbc",
"content_id": "d14ab4a4b9682ccd28f1c92688279ddd06bde904",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 310,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 14,
"path": "/twitter_updater.py",
"repo_name": "sachin38/twitter-bot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom twython import Twython\n\ndef twitter_updater():\n twitter = Twython(\n \"APP KEY\",\n \"APP SECRET KEY\",\n \"AUTH TOKEN\",\n \"AUTH SECRET TOKEN\"\n )\n twitter.update_status(status=\"You status here\")\n\ntwitter_updater()\n"
}
] | 1 |
caylee-annett/ICS3U-Unit3-03-Python-number_guessing_game | https://github.com/caylee-annett/ICS3U-Unit3-03-Python-number_guessing_game | c72c555a0783edbf6f1dc02d3266b8217932585b | 366a7ffcba6fc7bedf02d596696f76261d830c70 | ebddea1bfb920881273f9d8ea8a383a3c6be9796 | refs/heads/main | 2023-04-18T17:18:34.286761 | 2021-05-03T19:42:24 | 2021-05-03T19:42:24 | 363,934,526 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6132075190544128,
"alphanum_fraction": 0.6305031180381775,
"avg_line_length": 20.931034088134766,
"blob_id": "647a0ccb7e0c68bc195cb83d4c0cc117c2f45c66",
"content_id": "c00f51dcc0f760376ff1e8c798c5876fac016cd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 636,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 29,
"path": "/number_guessing_game.py",
"repo_name": "caylee-annett/ICS3U-Unit3-03-Python-number_guessing_game",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n# Created by: Caylee Annett\n# Created on: April 2021\n# This program is a game where the user tries to guess a randomly\n# generated number\n\n\nimport random\n\n\ndef main():\n # This function tells the user if their guess is correct\n\n # Input\n guessed_number = int(input(\"Guess what the number between 0 and 10 is: \"))\n print(\"\")\n\n # Process & Output\n correct_number = random.randint(0, 10)\n if guessed_number == correct_number:\n print(\"You guessed it!\")\n else:\n print(\"Incorrect! The number was {}.\".format(correct_number))\n print(\"\\nDone.\")\n\n\nif __name__ == \"__main__\":\n main()\n"
}
] | 1 |
whigg/ThicknessSensitivity | https://github.com/whigg/ThicknessSensitivity | a2457ea057099070f58564baff126aa018a34327 | 6c0bf92a694a977bf36235acf784c070b2cd408e | 81159080982664d2266d79a6e3e5bb506c71d5f5 | refs/heads/master | 2020-03-08T16:36:02.756808 | 2018-04-04T21:12:54 | 2018-04-04T21:12:54 | 128,243,923 | 0 | 1 | MIT | 2018-04-05T17:55:35 | 2018-04-04T21:13:05 | 2018-04-04T21:13:03 | null | [
{
"alpha_fraction": 0.5591939687728882,
"alphanum_fraction": 0.5810243487358093,
"avg_line_length": 30.342105865478516,
"blob_id": "c74d6b59cf57e9ecd95975deac91226c79762a45",
"content_id": "6f22ffc8b6a98466399d78121a05b9c159770cf2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3573,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 114,
"path": "/Scripts/read_MonthlyOutput.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nScript reads in monthly data from WACCM4 experiments (CIT,HIT,FIT)\n \nNotes\n-----\n Author : Zachary Labe\n Date : 13 August 2017\n \nUsage\n-----\n readExperi(directory,varid,experi,level)\n\"\"\"\n\ndef readExperi(directory,varid,experi,level):\n \"\"\"\n Function reads monthly data from WACCM4 simulations\n\n Parameters\n ----------\n directory : string\n working directory for stored WACCM4 experiments (remote server)\n varid : string\n variable name to read\n experi : string\n experiment name (CIT or HIT or FIT)\n level : string\n Height of variable (surface or profile)\n \n\n Returns\n -------\n lat : 1d numpy array\n latitudes\n lon : 1d numpy array\n longitudes\n time : 1d numpy array\n standard time (days since 1870-1-1, 00:00:00)\n var : 4d numpy array or 5d numpy array \n [year,month,lat,lon] or [year,month,level,lat,lon]\n\n Usage\n -----\n lat,lon,time,lev,var = readExperi(directory,varid,experi,level)\n \"\"\"\n print('\\n>>> Using readExperi function! \\n')\n \n ### Import modules\n import numpy as np\n from netCDF4 import Dataset\n \n ### Call files\n totaldirectory = directory + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n \n if any([experi == 'FPOL',experi == 'FSUB']):\n directory = '/home/zlabe/green/simu/'\n totaldirectory = directory + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n \n if varid == 'EGR' and level == 'surface':\n filename = totaldirectory + varid + '_500_850.nc'\n \n ### Read in Data\n if level == 'surface': # 3d variables\n data = Dataset(filename,'r')\n time = data.variables['time'][:]\n lev = 'surface'\n lat = data.variables['latitude'][:]\n lon = data.variables['longitude'][:]\n varq = data.variables['%s' % varid][:]\n data.close()\n elif level == 'profile': # 4d variables\n data = Dataset(filename,'r')\n time = data.variables['time'][:]\n lev = data.variables['level'][:]\n lat = data.variables['latitude'][:]\n lon = data.variables['longitude'][:]\n varq = data.variables['%s' % varid][:]\n data.close()\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Read data for *%s* : %s!' % (experi[:4],varid))\n \n ### Reshape to split years and months\n months = 12\n if level == 'surface': # 3d variables\n var = np.reshape(varq,(int(varq.shape[0]/12),months,\n int(lat.shape[0]),int(lon.shape[0])))\n elif level == 'profile': # 4d variables\n var = np.reshape(varq,(int(varq.shape[0]/12),months,int(lev.shape[0]),\n int(lat.shape[0]),int(lon.shape[0])))\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Reshaped %s array!' % (varid))\n \n ### Convert units\n if varid in ('TEMP','T2M'):\n var = var - 273.15 # Kelvin to degrees Celsius \n print('Completed: Changed units (K to C)!')\n elif varid == 'SWE':\n var = var*1000. # Meters to Millimeters \n print('Completed: Changed units (m to mm)!')\n\n print('\\n*Completed: Finished readExperi function!')\n return lat,lon,time,lev,var\n\n### Test function -- no need to use \n#directory = '/surtsey/zlabe/simu/'\n#varid = 'T2M'\n##varid = 'TEMP'\n#experi = 'HIT'\n#level = 'surface'\n# \n#lat,lon,time,lev,var = readExperi(directory,varid,experi,level)\n"
},
{
"alpha_fraction": 0.5795851945877075,
"alphanum_fraction": 0.6214392781257629,
"avg_line_length": 33.94759750366211,
"blob_id": "1e9260ce1b750b56a7e5708e00494a0e5ed42094",
"content_id": "a9413a8a7ee17ece064c9887c56fc2a46dc99a6e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8004,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 229,
"path": "/Scripts/plot_U.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPlot zonal wind between HIT and FIT experiments. These are \nsea ice uhickness perturbation experiments using WACCM4.\n\nNotes\n-----\n Auuhor : Zachary Labe\n Date : 14 August 2017\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport nclcmaps as ncm\nimport datetime\nimport read_MonthlyOutput as MO\nimport calc_Utilities as UT\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectoryfigure = '/home/zlabe/Desktop/TestPerturb/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting temperature - %s----' % titletime)\n\n### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\n### Call function for zonal wind data\nlat,lon,time,lev,uh = MO.readExperi(directorydata,'U','HIT','profile')\nlat,lon,time,lev,uf = MO.readExperi(directorydata,'U','FIT','profile')\n\n#### Separate per periods (ON,DJ,FM)\nuh_on = np.nanmean(uh[:,9:11,:,:,:],axis=1)\nuf_on = np.nanmean(uf[:,9:11,:,:,:],axis=1)\n\nuh_dj,uf_dj = UT.calcDecJan(uh,uf,lat,lon,'profile',lev.shape[0])\n\nuh_fm = np.nanmean(uh[:,1:3,:,:,:],axis=1)\nuf_fm = np.nanmean(uf[:,1:3,:,:,:],axis=1)\n\n#### Calculate period differenceds\ndiff_on = np.nanmean((uf_on-uh_on),axis=0)\ndiff_dj = np.nanmean((uf_dj-uh_dj),axis=0)\ndiff_fm = np.nanmean((uf_fm-uh_fm),axis=0)\n\n### Calculate zonal mean\nzdiff_on = np.nanmean((diff_on),axis=2)\nzdiff_dj = np.nanmean((diff_dj),axis=2)\nzdiff_fm = np.nanmean((diff_fm),axis=2)\n\n## Calculate climo\nzclimo_on = np.apply_over_axes(np.nanmean,uh_on,(0,3)).squeeze()\nzclimo_dj = np.apply_over_axes(np.nanmean,uh_dj,(0,3)).squeeze()\nzclimo_fm = np.apply_over_axes(np.nanmean,uh_fm,(0,3)).squeeze()\n\n\n### Calculate significance\nstat_on,pvalue_on = UT.calc_indttest(np.nanmean(uh_on,axis=3),\n np.nanmean(uf_on,axis=3))\nstat_dj,pvalue_dj = UT.calc_indttest(np.nanmean(uh_dj,axis=3),\n np.nanmean(uf_dj,axis=3))\nstat_fm,pvalue_fm = UT.calc_indttest(np.nanmean(uh_fm,axis=3),\n np.nanmean(uf_fm,axis=3))\n\n###########################################################################\n###########################################################################\n###########################################################################\n#### Plot U\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\n### Set limits for contours and colorbars\nlimit = np.arange(-3,3.1,0.1)\nbarlim = np.arange(-3,4,1)\nzscale = np.array([1000,700,500,300,200,\n 100,50,30,10])\nlatq,levq = np.meshgrid(lat,lev)\n\nfig = plt.figure()\nax1 = plt.subplot(131)\n\nax1.spines['top'].set_color('dimgrey')\nax1.spines['right'].set_color('dimgrey')\nax1.spines['bottom'].set_color('dimgrey')\nax1.spines['left'].set_color('dimgrey')\nax1.spines['left'].set_linewidth(2)\nax1.spines['bottom'].set_linewidth(2)\nax1.spines['right'].set_linewidth(2)\nax1.spines['top'].set_linewidth(2)\nax1.tick_params(axis='y',direction='out',which='major',pad=3,\n width=2,color='dimgrey')\nax1.tick_params(axis='x',direction='out',which='major',pad=3,\n width=2,color='dimgrey') \nax1.xaxis.set_ticks_position('bottom')\nax1.yaxis.set_ticks_position('left')\n\n\ncs = plt.contourf(lat,lev,zdiff_on,limit,extend='both')\n#cs1 = plt.scatter(latq,levq,pvalue_on,color='k',marker='.',alpha=0.9,\n# edgecolor='k',linewidth=0.7)\ncs2 = plt.contour(lat,lev,zclimo_on,np.arange(-20,101,5),\n linewidths=0.6,colors='dimgrey')\nplt.contourf(latq,levq,pvalue_on,colors='None',hatches=['////'],\n linewidth=5) \n\nplt.gca().invert_yaxis()\nplt.yscale('log',nonposy='clip')\n\nplt.xlim([0,90])\nplt.ylim([1000,10])\nplt.xticks(np.arange(0,96,15),map(str,np.arange(0,91,15)),fontsize=8)\nplt.yticks(zscale,map(str,zscale),ha='right',fontsize=8)\nplt.minorticks_off()\n\ncmap = ncm.cmap('temp_diff_18lev') \ncs.set_cmap(cmap) \n\nax1.annotate(r'\\textbf{ON}',\n xy=(0, 0),xytext=(0.33,1.02),xycoords='axes fraction',\n fontsize=25,color='dimgrey',rotation=0)\n\n############################################################################\nax2 = plt.subplot(132)\n\nax2.spines['top'].set_color('dimgrey')\nax2.spines['right'].set_color('dimgrey')\nax2.spines['bottom'].set_color('dimgrey')\nax2.spines['left'].set_color('dimgrey')\nax2.spines['left'].set_linewidth(2)\nax2.spines['bottom'].set_linewidth(2)\nax2.spines['right'].set_linewidth(2)\nax2.spines['top'].set_linewidth(2)\nax2.tick_params(axis='y',direction='out',which='major',pad=3,\n width=2,color='dimgrey')\nax2.tick_params(axis='x',direction='out',which='major',pad=3,\n width=2,color='dimgrey') \nax2.xaxis.set_ticks_position('bottom')\nax2.yaxis.set_ticks_position('left')\n\ncs = plt.contourf(lat,lev,zdiff_dj,limit,extend='both')\n#cs1 = plt.scatter(latq,levq,pvalue_dj,color='k',marker='.',alpha=0.9,\n# edgecolor='k',linewidth=0.7)\ncs2 = plt.contour(lat,lev,zclimo_dj,np.arange(-20,101,5),\n linewidths=0.6,colors='dimgrey')\nplt.contourf(latq,levq,pvalue_dj,colors='None',hatches=['////'],\n linewidth=5) \n\nplt.gca().invert_yaxis()\nplt.yscale('log',nonposy='clip')\n\nplt.xlim([0,90])\nplt.ylim([1000,10])\nplt.xticks(np.arange(0,96,15),map(str,np.arange(0,91,15)),fontsize=8)\nplt.yticks(zscale,map(str,zscale),ha='right',fontsize=8)\nplt.minorticks_off()\n\nax2.annotate(r'\\textbf{DJ}',\n xy=(0, 0),xytext=(0.35,1.02),xycoords='axes fraction',\n fontsize=25,color='dimgrey',rotation=0)\n\ncmap = ncm.cmap('temp_diff_18lev') \ncs.set_cmap(cmap) \n\n###########################################################################\nax3 = plt.subplot(133)\n\nax3.spines['top'].set_color('dimgrey')\nax3.spines['right'].set_color('dimgrey')\nax3.spines['bottom'].set_color('dimgrey')\nax3.spines['left'].set_color('dimgrey')\nax3.spines['left'].set_linewidth(2)\nax3.spines['bottom'].set_linewidth(2)\nax3.spines['right'].set_linewidth(2)\nax3.spines['top'].set_linewidth(2)\nax3.tick_params(axis='y',direction='out',which='major',pad=3,\n width=2,color='dimgrey')\nax3.tick_params(axis='x',direction='out',which='major',pad=3,\n width=2,color='dimgrey') \nax3.xaxis.set_ticks_position('bottom')\nax3.yaxis.set_ticks_position('left')\n\ncs = plt.contourf(lat,lev,zdiff_fm,limit,extend='both')\n#cs1 = plt.scatter(latq,levq,pvalue_fm,color='k',marker='.',alpha=0.9,\n# edgecolor='k',linewidth=0.7)\ncs2 = plt.contour(lat,lev,zclimo_fm,np.arange(-20,101,5),\n linewidths=0.6,colors='dimgrey')\nplt.contourf(latq,levq,pvalue_fm,colors='None',hatches=['////'],\n linewidth=5) \n\nplt.gca().invert_yaxis()\nplt.yscale('log',nonposy='clip')\n\nplt.xlim([0,90])\nplt.ylim([1000,10])\nplt.xticks(np.arange(0,96,15),map(str,np.arange(0,91,15)),fontsize=8)\nplt.yticks(zscale,map(str,zscale),ha='right',fontsize=8)\nplt.minorticks_off()\n\ncmap = ncm.cmap('temp_diff_18lev') \ncs.set_cmap(cmap) \n\nax3.annotate(r'\\textbf{FM}',\n xy=(0, 0),xytext=(0.35,1.02),xycoords='axes fraction',\n fontsize=25,color='dimgrey',rotation=0)\n\ncbar_ax = fig.add_axes([0.312,0.1,0.4,0.03]) \ncbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',\n extend='max',extendfrac=0.07,drawedges=False)\ncbar.set_label(r'\\textbf{m/s}',fontsize=11,color='dimgray')\ncbar.set_ticks(barlim)\ncbar.set_ticklabels(list(map(str,barlim))) \ncbar.ax.tick_params(axis='x', size=.01)\n\nplt.subplots_adjust(wspace=0.3)\nplt.subplots_adjust(bottom=0.21)\n\nplt.savefig(directoryfigure + 'U_diff_FIT-HIT.png',dpi=300)\nprint('Completed: Script done!')\n\n"
},
{
"alpha_fraction": 0.4340369403362274,
"alphanum_fraction": 0.47704485058784485,
"avg_line_length": 30.59166717529297,
"blob_id": "d93895a5ae2304f5ac48003c630eeed9eeafb92a",
"content_id": "0b603047ceafaa18b44853a76b48a83c0492efb8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3790,
"license_type": "permissive",
"max_line_length": 90,
"num_lines": 120,
"path": "/Scripts/read_var_LENS.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nScript reads LENS data for selected variables\n \nNotes\n-----\n Author : Zachary Labe\n Date : 28 November 2016\n \nUsage\n-----\n lats,lons,var = readLENS(directory,varq)\n\"\"\"\n \ndef readLENSEnsemble(directory,varq):\n \"\"\"\n Function reads LENS ensembles netCDF4 data array\n\n Parameters\n ----------\n directory : string\n working directory for stored PIOMAS files\n varq : string\n variable from LENS\n\n Returns\n -------\n lats : 1d array\n latitudes\n lons : 1d array\n longitudes\n varq : 5d array [ens,year,month,lat,lon]\n selected variable\n\n Usage\n -----\n lats,lons,var = readLENS(directory,varq)\n \"\"\"\n \n print('\\n>>> Using readLENS function!')\n \n ### Import modules\n import numpy as np\n from netCDF4 import Dataset\n \n ens = ['02','03','04','05','06','07','08','09'] + \\\n list(map(str,np.arange(10,36,1))) + list(map(str,np.arange(101,106,1)))\n \n ### Modify directory\n directory = directory + '%s/' % (varq)\n \n if varq == 'SST':\n varn = np.empty((len(ens),75*12,384,320)) # 96 for all\n for i in range(len(ens)):\n if int(ens[i]) > 33:\n filename = '%s_2006_2100_0%s.nc' % (varq,ens[i])\n \n data = Dataset(directory + filename)\n lats = data.variables['ULAT'][:]\n lons = data.variables['ULONG'][:]\n varn[i,:,:,:] = np.squeeze(data.variables['%s' % varq][:-240,:,:]) # -2080\n data.close()\n else:\n filename = '%s_2006_2080_0%s.nc' % (varq,ens[i])\n \n data = Dataset(directory + filename)\n lats = data.variables['ULAT'][:]\n lons = data.variables['ULONG'][:]\n varn[i,:,:,:] = np.squeeze(data.variables['%s' % varq][:,:,:]) # -2080\n data.close()\n \n if int(ens[i]) > 100:\n filename = '%s_2006_2100_%s.nc' % (varq,ens[i])\n \n data = Dataset(directory + filename)\n lats = data.variables['ULAT'][:]\n lons = data.variables['ULONG'][:]\n varn[i,:,:,:] = np.squeeze(data.variables['%s' % varq][:-240,:,:]) # -2080\n data.close()\n \n print('Completed: Read LENS Ensemble #%s - %s!' % (ens[i],varq))\n \n else:\n varn = np.empty((len(ens),161*12,96,144)) # 96 for all\n for i in range(len(ens)):\n filename = '%s_0%s_1920_2080.nc' % (varq,ens[i])\n \n if int(ens[i]) > 100:\n filename = '%s_%s_1920_2100.nc' % (varq,ens[i])\n \n print(directory +filename)\n \n data = Dataset(directory + filename)\n lats = data.variables['latitude'][:]\n lons = data.variables['longitude'][:]\n varn[i,:,:,:] = data.variables['%s' % varq][:-240,:,:] # -2080\n data.close()\n \n print('Completed: Read LENS Ensemble #%s - %s!' % (ens[i],varq))\n \n var = np.reshape(varn,(len(ens),int(varn.shape[1]/12),12,\n int(lats.shape[0]),int(lons.shape[0])))\n var = np.squeeze(np.asarray(var))\n \n ### Modify Units\n if varq == 'SLP':\n var = var/100. #Pa to hPa\n elif varq == 'T2M' or varq == 'T':\n var = var - 273.15 #K to C\n elif varq == 'SIT':\n var[np.where(var < 0)] = np.nan\n var[np.where(var > 12)] = np.nan\n \n ### Missing values \n var[np.where(var <= -9999)] = np.nan\n \n print('*Completed: Read %s data!' % varq)\n \n return var,lats,lons\n \n#var,lats,lons = readLENSEnsemble('/home/zlabe/Surtsey3/CESM_large_ensemble/','SST')"
},
{
"alpha_fraction": 0.5988175868988037,
"alphanum_fraction": 0.6167652010917664,
"avg_line_length": 37.495933532714844,
"blob_id": "23d3785afbb3cb6ef8583d9ea09ca9436d897b45",
"content_id": "123338f90c83143bab1c3fd6aef843a5680da47d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4736,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 123,
"path": "/Scripts/calc_RNET_SICgridcells.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPlot figure 2 in manuscript for dynamical responses to sea ice loss in WACCM4\nexperiments [FIT-HIT, FIC-CIT, FICT-HIT]. Current variables include T2M and\nRNET. Time period includes December through February [DJF].\n\nNotes\n-----\n Author : Zachary Labe\n Date : 4 February 2018\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport datetime\nimport read_MonthlyOutput as MO\nimport calc_Utilities as UT\nimport matplotlib.pyplot as plt\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectorydata2 = '/home/zlabe/Documents/Research/SITperturb/Data/'\ndirectoryfigure = '/home/zlabe/Desktop/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting Fig 2 - %s----' % titletime)\n\n### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\n### Define constants\nrunnames = [r'HIT',r'FIT',r'HIT2',r'FICT2',r'FICT']\nexperiments = ['FIT--HIT','FICT2--HIT2','FICT--HIT']\n\n### Read in SIC data\nlat,lon,time,lev,sic = MO.readExperi(directorydata,'SIC','HIT','surface')\n\n### Find where ice is < 15% (values are 0 to 100 in sic array)\nsicq = sic[5,:,:,:].copy()\nsicq[np.where(sicq < 10)] = 0.0\nsicq[np.where((sicq >= 10) & (sicq <= 100))] = 1.\nsicn = np.append(sicq[8:],sicq[:3],axis=0)\n\n###############################################################################\n###############################################################################\n###############################################################################\n# Function to read surface heat flux data\ndef readFlux(varnames):\n \"\"\"\n Read in heat flux data for selected variables and calculate differences\n between experiments\n \"\"\"\n for v in range(len(varnames)):\n ### Call function for surface temperature data from reach run\n lat,lon,time,lev,varhit = MO.readExperi(directorydata,\n '%s' % varnames[v],'HIT','surface')\n lat,lon,time,lev,varfit = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIT','surface')\n lat,lon,time,lev,varcit = MO.readExperi(directorydata,\n '%s' % varnames[v],'CIT','surface')\n lat,lon,time,lev,varfic = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIC','surface')\n lat,lon,time,lev,varfict = MO.readExperi(directorydata,\n '%s' % varnames[v],'FICT','surface')\n \n ### Compare experiments\n runs = [varhit,varfit,varcit,varfic,varfict]\n \n ### Compute comparisons for experiments - take ensemble average\n diff_FITHIT = np.nanmean(varfit - varhit,axis=0)*-1\n diff_FICCIT = np.nanmean(varfic - varcit,axis=0)*-1\n diff_FICTHIT = np.nanmean(varfict - varhit,axis=0)*-1\n diffruns = [diff_FITHIT,diff_FICCIT,diff_FICTHIT]\n \n return diffruns,runs,lat,lon\n\n### Call function to read data for selected variable\ndiffruns_rnet,runs_rnet,lat,lon = readFlux(['RNET'])\n\ndifftotal_FITHITq = diffruns_rnet[0] + diffruns_rnet[0]\ndifftotal_FICCITq = diffruns_rnet[1] + diffruns_rnet[1]\ndifftotal_FICTHITq = diffruns_rnet[2] + diffruns_rnet[2]\n\ndifftotal_FITHIT = np.append(difftotal_FITHITq[8:],difftotal_FITHITq[:3],axis=0)\ndifftotal_FICCIT = np.append(difftotal_FICCITq[8:],difftotal_FICCITq[:3],axis=0)\ndifftotal_FICTHIT = np.append(difftotal_FICTHITq[8:],difftotal_FICTHITq[:3],axis=0)\ndifftotallhsh = [difftotal_FITHIT,difftotal_FICCIT,difftotal_FICTHIT]\n\n### Take average above 40N\nlatq = np.where(lat > 40)[0]\nlatslice = lat[latq]\nlon2,lat2 = np.meshgrid(lon,latslice)\n\n### Mask out values not over SIC grid cells\nrnetvals = []\nfor i in range(len(difftotallhsh)):\n rnetvalsq = difftotallhsh[i] * sicn\n rnetvalsq[np.where(rnetvalsq == 0.0)] = np.nan\n rnetvalsq = rnetvalsq[:,latq,:]\n \n rnetvals.append(rnetvalsq)\n \n### Calculated weighted average \nweightedrnet = np.empty((len(rnetvals),sicn.shape[0]))\nfor i in range(len(rnetvals)):\n weightedrnet[i,:] = UT.calc_weightedAve(rnetvals[i],lat2)\n \n### Create files for rnet\nnp.savetxt(directorydata2 + 'weightedsic_rnets.txt',weightedrnet.transpose(),\n delimiter=',',header=' '.join(experiments)+'\\n',\n footer='\\n File contains net surface energy flux response' \\\n '\\n which are weighted above 40N for SIC cells >10% \\n' \\\n ' in all months of the year',newline='\\n\\n')\n\nprint('Completed: Script done!')\n\n"
},
{
"alpha_fraction": 0.5711379051208496,
"alphanum_fraction": 0.5933635830879211,
"avg_line_length": 34.30386734008789,
"blob_id": "ef5627875875ee6d977b8002d7eb35af3422d1a2",
"content_id": "139351c89f349d852888fb9a21383592c273e55c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6389,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 181,
"path": "/Scripts/calc_SITSIC_ratiomean.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCompute ratio (%) between SIT and SIC responses\n\nNotes\n-----\n Author : Zachary Labe\n Date : 16 November 2017\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nimport read_MonthlyOutput as MO\nimport cmocean\nimport scipy.stats as sts\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport nclcmaps as ncm\nimport calc_Utilities as UT\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectorydata2 = '/home/zlabe/Documents/Research/SITperturb/Data/'\ndirectoryfigure = '/home/zlabe/Desktop/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting SIT-SIC ratio - %s----' % titletime)\n\n### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\nmonths = [r'OCT',r'NOV',r'DEC',r'JAN',r'FEB',r'MAR']\nvarnames = ['U10','Z30','U300','Z500','SLP','T2M','RNET']\n\nmeanratioficcit = []\nmeanratiofithit = []\nfor v in range(len(varnames)):\n ### Call function for surface temperature data from reach run\n lat,lon,time,lev,varhit = MO.readExperi(directorydata,\n '%s' % varnames[v],'HIT','surface')\n lat,lon,time,lev,varfit = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIT','surface')\n lat,lon,time,lev,varfic = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIC','surface')\n lat,lon,time,lev,varcit = MO.readExperi(directorydata,\n '%s' % varnames[v],'CIT','surface')\n \n ### Create 2d array of latitude and longitude\n lon2,lat2 = np.meshgrid(lon,lat)\n \n latq = np.where(lat>40)[0]\n latqq = lat[latq]\n \n lonnew,latnew=np.meshgrid(lon,latqq)\n \n ### Concatonate runs\n runnames = [r'HIT',r'FIT',r'FICT',r'FIC']\n experiments = [r'\\textbf{FIT--HIT}',r'\\textbf{FICT--FIC}']\n runs = [varhit,varfit,varfic,varcit]\n \n ### Separate per months\n varmo_fit = np.append(varfit[:,9:,:,:],varfit[:,0:3,:,:],\n axis=1)\n varmo_hit = np.append(varhit[:,9:,:,:],varhit[:,0:3,:,:],\n axis=1)\n varmo_fic = np.append(varfic[:,9:,:,:],varfic[:,0:3,:,:],\n axis=1)\n varmo_cit = np.append(varcit[:,9:,:,:],varcit[:,0:3,:,:],\n axis=1)\n \n ### Calculate differences [FIT-HIT and FICT - FIT]\n diff_fithit = np.nanmean(varmo_fit - varmo_hit,axis=0)\n diff_ficcit = np.nanmean(varmo_fic - varmo_cit,axis=0)\n \n def calc_iceRatio(varx,vary):\n \"\"\"\n Compute relative % difference\n \"\"\"\n print('\\n>>> Using calc_iceRatio function!')\n \n diff = varx-vary\n percchange = (diff/vary)*100.0\n \n print('*Completed: Finished calc_iceRatio function!')\n return percchange\n \n meanfithit = UT.calc_weightedAve(diff_fithit[:,latq,:],latnew)\n meanficcit = UT.calc_weightedAve(diff_ficcit[:,latq,:],latnew)\n \n meanratiofithit.append(meanfithit)\n meanratioficcit.append(meanficcit)\nmeanratiofithit = np.asarray(meanratiofithit)\nmeanratioficcit = np.asarray(meanratioficcit)\n\n### Calculate ratio\nratio = calc_iceRatio(meanratiofithit,meanratioficcit)\n \n#### Save file\nnp.savetxt(directorydata2 + 'meanratio.txt',ratio.transpose(),delimiter=',',\n fmt='%3.2f',header=' '.join(varnames)+'\\n',\n footer='\\n File contains pearsonr correlation coefficients' \\\n '\\n between FIT-HIT and FIC-CIT to get the relative \\n' \\\n ' contributions of SIT and SIC [monthly, OCT-MAR]',newline='\\n\\n')\n\n###############################################################################\n###############################################################################\n###############################################################################\n### Plot Figure\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\nfig = plt.figure()\nax = plt.subplot(111)\n\nax.spines['top'].set_color('none')\nax.spines['right'].set_color('none')\nax.spines['bottom'].set_color('none')\nax.spines['left'].set_color('none')\nax.get_xaxis().set_tick_params(direction='out', width=0,length=0,\n color='w')\n\nplt.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='on', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelbottom='on')\nplt.tick_params(\n axis='y', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n left='off', # ticks along the bottom edge are off\n right='off', # ticks along the top edge are off\n labelleft='on')\n\ncs = plt.pcolormesh(ratio,shading='faceted',edgecolor='w',\n linewidth=0.3,vmin=-300,vmax=300)\n\nfor i in range(ratio.shape[0]):\n for j in range(ratio.shape[1]):\n plt.text(j+0.5,i+0.5,r'\\textbf{%3.1f}' % ratio[i,j],fontsize=6,\n color='r',va='center',ha='center')\n\ncs.set_cmap(cmocean.cm.balance)\n\nylabels = [r'\\textbf{U10}',r'\\textbf{Z30}',r'\\textbf{U300}',r'\\textbf{Z500}',\n r'\\textbf{SLP}',r'\\textbf{T2M}',r'\\textbf{RNET}']\nplt.yticks(np.arange(0.5,7.5,1),ylabels,ha='right',color='dimgrey',\n va='center')\nyax = ax.get_yaxis()\nyax.set_tick_params(pad=0.7)\n\nxlabels = [r'\\textbf{OCT}',r'\\textbf{NOV}',r'\\textbf{DEC}',\n r'\\textbf{JAN}',r'\\textbf{FEB}',r'\\textbf{MAR}']\nplt.xticks(np.arange(0.5,6.5,1),xlabels,ha='center',color='dimgrey',\n va='center')\nxax = ax.get_xaxis()\nxax.set_tick_params(pad=8)\nplt.xlim([0,6])\n\ncbar = plt.colorbar(cs,orientation='horizontal',aspect=50)\nticks = np.arange(-300,301,300)\nlabels = list(map(str,np.arange(-300,301,300)))\ncbar.set_ticks(ticks)\ncbar.set_ticklabels(labels)\ncbar.ax.tick_params(axis='x', size=.001)\ncbar.outline.set_edgecolor('dimgrey')\ncbar.set_label(r'\\textbf{Ratio [\\%]}',\n color='dimgrey',labelpad=3,fontsize=12)\n\nplt.subplots_adjust(top=0.8)\n\nplt.savefig(directoryfigure + 'SITSIC_ratio_mesh.png',dpi=300)"
},
{
"alpha_fraction": 0.5785356760025024,
"alphanum_fraction": 0.5932415723800659,
"avg_line_length": 29.730770111083984,
"blob_id": "1518084e43fc0a3ca0b02ee83c682ec2071e9096",
"content_id": "cdb22c0d70a26515af8e4a8e93e3a808a0c3a233",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3196,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 104,
"path": "/Scripts/read_MeanMonthlyOutput.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nScript reads in MEAN monthly data from WACCM4 experiments (CIT,HIT,FIT)\n \nNotes\n-----\n Author : Zachary Labe\n Date : 13 August 2017\n \nUsage\n-----\n readMeanExperi(directory,varid,experi,level)\n\"\"\"\n\ndef readMeanExperi(directory,varid,experi,level):\n \"\"\"\n Function reads monthly data from WACCM4 simulations\n\n Parameters\n ----------\n directory : string\n working directory for stored WACCM4 experiments (remote server)\n varid : string\n variable name to read\n experi : string\n experiment name (CIT or HIT or FIT or FIC or FICT)\n level : string\n Height of variable (surface or profile)\n \n\n Returns\n -------\n lat : 1d numpy array\n latitudes\n lon : 1d numpy array\n longitudes\n time : 1d numpy array\n standard time (days since 1870-1-1, 00:00:00)\n var : 2d numpy array or 3d numpy array \n [year,month] or [year,month,level]\n\n Usage\n -----\n lat,lon,time,lev,var = readMeanExperi(directory,varid,experi,level)\n \"\"\"\n print('\\n>>> Using readMeanExperi function!')\n \n ### Import modules\n import numpy as np\n from netCDF4 import Dataset\n \n ### Call files\n totaldirectory = directory + experi + '/monthly/'\n filename = totaldirectory + varid + '_mean.nc'\n \n if any([experi == 'FPOL',experi == 'FSUB']):\n directory = '/home/zlabe/green/simu/'\n totaldirectory = directory + experi + '/monthly/'\n filename = totaldirectory + varid + '_mean.nc'\n \n ### Read in Data\n if level == 'surface': # 1d variables\n data = Dataset(filename,'r')\n time = data.variables['time'][:]\n lev = 'surface'\n lat = data.variables['latitude'][:]\n lon = data.variables['longitude'][:]\n varq = data.variables['%s' % varid][:]\n data.close()\n elif level == 'profile': # 2d variables\n data = Dataset(filename,'r')\n time = data.variables['time'][:]\n lev = data.variables['level'][:]\n lat = data.variables['latitude'][:]\n lon = data.variables['longitude'][:]\n varq = data.variables['%s' % varid][:]\n data.close()\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Read data for *%s* : %s!' % (experi[:4],varid))\n \n ### Reshape to split years and months\n months = 12\n if level == 'surface': # 2d variables\n var = np.reshape(varq,((varq.shape[0]//12),months))\n elif level == 'profile': # 3d variables\n var = np.reshape(varq,((varq.shape[0]//12),months,int(lev.shape[0])))\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Reshaped %s array!' % (varid))\n \n ### Convert units\n if varid in ('TEMP','T2M'):\n var = var - 273.15 # Kelvin to degrees Celsius \n print('Completed: Changed units (K to C)!')\n\n print('*Completed: Finished readExperi function!')\n return lat,lon,time,lev,var\n\n#### Test function -- no need to use \n#directory = '/surtsey/zlabe/simu/'\n#varid = 'LHFLX'\n#experi = 'FIT'\n#level = 'surface'\n#lat,lon,time,lev,var = readMeanExperi(directory,varid,experi,level)\n"
},
{
"alpha_fraction": 0.5655408501625061,
"alphanum_fraction": 0.5991668105125427,
"avg_line_length": 37.843929290771484,
"blob_id": "f0213f5b16bb150c837e3e08d0fc4ab15f71c9cb",
"content_id": "4490c4164376561f489ce69a446db732e4edb1e7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6721,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 173,
"path": "/Scripts/plot_EPflux_QBO.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPlot temperature profile difference between HIT and FIT experiments. \nThese are sea ice thickness perturbation experiments using WACCM4.\n\nNotes\n-----\n Author : Zachary Labe\n Date : 14 August 2017\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport nclcmaps as ncm\nimport datetime\nimport read_MonthlyLatOutput as MO\nimport calc_Utilities as UT\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectoryfigure = '/home/zlabe/Desktop/QBO_epflux/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting EP flux for QBO %s----' % titletime)\n\n### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\nqbophase = 'neg'\n\n### Call function for vertical temperature data\nlat,lon,time,lev,epy_h = MO.readExperi(directorydata,'EPY','HIT','profile')\nlat,lon,time,lev,epz_h = MO.readExperi(directorydata,'EPZ','HIT','profile')\nlat,lon,time,lev,div_h = MO.readExperi(directorydata,'DEPF','HIT','profile')\n\nlat,lon,time,lev,epy_f = MO.readExperi(directorydata,'EPY','FIT','profile')\nlat,lon,time,lev,epz_f = MO.readExperi(directorydata,'EPZ','FIT','profile')\nlat,lon,time,lev,div_f = MO.readExperi(directorydata,'DEPF','FIT','profile')\n\n### Separate per month\nepy_moh = np.append(epy_h[:,9:,:,:],epy_h[:,:3,:,:],axis=1)\nepz_moh = np.append(epz_h[:,9:,:,:],epz_h[:,:3,:,:],axis=1)\ndiv_moh = np.append(div_h[:,9:,:,:],div_h[:,:3,:,:],axis=1)\n\nepy_mof = np.append(epy_f[:,9:,:,:],epy_f[:,:3,:,:],axis=1)\nepz_mof = np.append(epz_f[:,9:,:,:],epz_f[:,:3,:,:],axis=1)\ndiv_mof = np.append(div_f[:,9:,:,:],div_f[:,:3,:,:],axis=1)\n\n### Read in QBO phases \nfilenamefitp = directorydata + 'FIT/monthly/QBO_%s_FIT.txt' % 'pos'\nfilenamefitno = directorydata + 'FIT/monthly/QBO_%s_FIT.txt' % 'non'\nfilenamefitn = directorydata + 'FIT/monthly/QBO_%s_FIT.txt' % 'neg'\npos_fit = np.genfromtxt(filenamefitp,unpack=True,usecols=[0],dtype='int')\nnon_fit = np.genfromtxt(filenamefitno,unpack=True,usecols=[0],dtype='int')\nneg_fit = np.genfromtxt(filenamefitn,unpack=True,usecols=[0],dtype='int')\n\n### Calculate differences\nif qbophase == 'pos':\n diff_epy = np.nanmean(epy_mof[pos_fit] - epy_moh[pos_fit],axis=0)\n diff_epz = np.nanmean(epz_mof[pos_fit] - epz_moh[pos_fit],axis=0)\n diff_div = np.nanmean(div_mof[pos_fit] - div_moh[pos_fit],axis=0)/30.\nelif qbophase == 'non':\n diff_epy = np.nanmean(epy_mof[non_fit] - epy_moh[non_fit],axis=0)\n diff_epz = np.nanmean(epz_mof[non_fit] - epz_moh[non_fit],axis=0)\n diff_div = np.nanmean(div_mof[non_fit] - div_moh[non_fit],axis=0)/30.\nelif qbophase == 'neg':\n diff_epy = np.nanmean(epy_mof[neg_fit] - epy_moh[neg_fit],axis=0)\n diff_epz = np.nanmean(epz_mof[neg_fit] - epz_moh[neg_fit],axis=0)\n diff_div = np.nanmean(div_mof[neg_fit] - div_moh[neg_fit],axis=0)/30.\n\n##### Calculate significance\n#stat_on,pvalue_on = UT.calc_indttest(np.nanmean(th_on,axis=3),\n# np.nanmean(tf_on,axis=3))\n#stat_dj,pvalue_dj = UT.calc_indttest(np.nanmean(th_dj,axis=3),\n# np.nanmean(tf_dj,axis=3))\n#stat_fm,pvalue_fm = UT.calc_indttest(np.nanmean(th_fm,axis=3),\n# np.nanmean(tf_fm,axis=3))\n\n###########################################################################\n###########################################################################\n###########################################################################\n#### Plot U\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\n### Set limits for contours and colorbars\nlimit = np.arange(-2,2.1,0.1)\nbarlim = np.arange(-2,3,1)\n \nzscale = np.array([1000,700,500,300,200,\n 100,50,30,10])\nlatq,levq = np.meshgrid(lat,lev)\n\nfig = plt.figure()\nfor i in range(6):\n ax1 = plt.subplot(2,3,i+1)\n \n clmq = i\n\n ax1.spines['top'].set_color('dimgrey')\n ax1.spines['right'].set_color('dimgrey')\n ax1.spines['bottom'].set_color('dimgrey')\n ax1.spines['left'].set_color('dimgrey')\n ax1.spines['left'].set_linewidth(2)\n ax1.spines['bottom'].set_linewidth(2)\n ax1.spines['right'].set_linewidth(2)\n ax1.spines['top'].set_linewidth(2)\n ax1.tick_params(axis='y',direction='out',which='major',pad=3,\n width=2,color='dimgrey')\n ax1.tick_params(axis='x',direction='out',which='major',pad=3,\n width=2,color='dimgrey') \n ax1.xaxis.set_ticks_position('bottom')\n ax1.yaxis.set_ticks_position('left')\n \n cs = plt.contourf(lat,lev,diff_div[i],limit,extend='both')\n cs1 = plt.quiver(lat[::4],lev,diff_epy[i][:,::4],diff_epz[i][:,::4],\n pivot='mid',color='k',units='width',\n scale=0.8e7,width=0.007)\n if i == 5:\n plt.quiverkey(cs1,0.34,-0.3,0.8e6,r'\\textbf{0.8$\\times$10$^{6}$}',\n coordinates='axes',labelpos='E')\n\n# plt.contourf(latq,levq,pruns[i],colors='None',hatches=['////'],\n# linewidth=5) \n \n plt.gca().invert_yaxis()\n plt.yscale('log',nonposy='clip')\n \n plt.xticks(np.arange(0,96,30),map(str,np.arange(0,91,30)),fontsize=7)\n plt.yticks(zscale,map(str,zscale),ha='right',fontsize=7)\n plt.minorticks_off()\n \n plt.xlim([0,90])\n plt.ylim([1000,10])\n \n cmap = ncm.cmap('temp_diff_18lev') \n cs.set_cmap(cmap) \n\n labelmonths = [r'OCT',r'NOV',r'DEC',r'JAN',r'FEB',r'MAR']\n ax1.annotate(r'\\textbf{%s}' % labelmonths[i],\n xy=(0, 0),xytext=(0.5,1.08),xycoords='axes fraction',\n fontsize=17,color='dimgrey',rotation=0,\n ha='center',va='center')\n\ncbar_ax = fig.add_axes([0.312,0.1,0.4,0.03]) \ncbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',\n extend='max',extendfrac=0.07,drawedges=False)\n\ncbar.set_label(r'\\textbf{m/s/day}',fontsize=11,color='dimgray',labelpad=1)\n \ncbar.set_ticks(barlim)\ncbar.set_ticklabels(list(map(str,barlim))) \ncbar.ax.tick_params(axis='x', size=.01)\n \nplt.annotate(r'\\textbf{FIT--HIT}',\n xy=(0, 0),xytext=(0.045,0.535),xycoords='figure fraction',\n fontsize=17,color='k',rotation=90,\n ha='center',va='center') \n\nplt.subplots_adjust(hspace=0.33)\nplt.subplots_adjust(bottom=0.18)\nplt.subplots_adjust(wspace=0.3)\n\nplt.savefig(directoryfigure + 'ep_flux_FIT-HIT_QBO_%s.png' % qbophase,dpi=300)\nprint('Completed: Script done!')\n\n"
},
{
"alpha_fraction": 0.6153337955474854,
"alphanum_fraction": 0.653668224811554,
"avg_line_length": 29.57575798034668,
"blob_id": "88d8d1824689254466ee55ac304b005559eed751",
"content_id": "171b77518c7cb37c9e4c98fa3f24461a2dd6e589",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3026,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 99,
"path": "/Scripts/plot_ArcticSystemsWorkshop_Fig2.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPlot for NCAR Arctic Systems workshop poster. Graph is DJF sea ice volume\nfrom PIOMAS over the satellite era.\n\nNotes\n-----\n Author : Zachary Labe\n Date : 4 April 2018\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport nclcmaps as ncm\nimport datetime\nimport read_MonthlyOutput as MO\nimport calc_Utilities as UT\nimport cmocean\nimport itertools\n\n### Directory and time\ndirectorydata = '/home/zlabe/Documents/Projects/Tests/SIV_animate/Data/' \ndirectoryfigure = '/home/zlabe/Desktop/' \n\nnow = datetime.datetime.now()\ncurrentmn = str(now.month-1)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\nyears = np.arange(1979,2018,1)\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting Poster Figure 2 - %s----' % titletime)\n\n### Read data\nyears,j,f,d = np.genfromtxt(directorydata + 'monthly_piomas.txt',\n unpack=True,delimiter='',usecols=[0,1,2,12])\n\nsiv = (j[1:] + f[1:] + d[:-1])/3\n\n### Plot Figure\ndef adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 5))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n\n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([]) \n\nfig = plt.figure()\nax = plt.subplot()\n\nadjust_spines(ax, ['left', 'bottom'])\nax.spines['top'].set_color('none')\nax.spines['right'].set_color('none')\nax.spines['left'].set_color('dimgrey')\nax.spines['bottom'].set_color('dimgrey')\nax.spines['left'].set_linewidth(2)\nax.spines['bottom'].set_linewidth(2)\nax.tick_params('both',length=4,width=2,which='major',color='dimgrey',pad=1)\n\nax.yaxis.grid(zorder=1,color='darkgrey',alpha=1,linewidth=0.4)\n\nplt.plot(years[1:],siv,color=cmocean.cm.balance(0.78),linewidth=3.5,marker='o',markersize=7,\n label=r'\\textbf{PIOMAS v2.1 [Zhang and Rothrock, 2003]}')\n\nplt.xticks(np.arange(1980,2021,10),list(map(str,np.arange(1980,2021,10))),\n fontsize=13,color='dimgrey')\nplt.yticks(np.arange(14,29,2),list(map(str,np.arange(14,29,2))),fontsize=13,\n color='dimgrey')\n\nplt.ylabel(r'\\textbf{VOLUME [$\\times$1000 km$^{3}$]}',\n color='k',fontsize=16)\nplt.title(r'\\textbf{DEC-FEB : ARCTIC SEA ICE}',color='K',fontsize=27)\n\nle = plt.legend(shadow=False,fontsize=8,loc='upper center',\n bbox_to_anchor=(0.27, 0.07),fancybox=True,frameon=False,ncol=1)\nfor text in le.get_texts():\n text.set_color('dimgrey') \n\nplt.xlim([1980,2020])\nplt.ylim([14,28])\n\nplt.savefig(directoryfigure + 'PosterFig2.png',dpi=1000)"
},
{
"alpha_fraction": 0.44670847058296204,
"alphanum_fraction": 0.7770019769668579,
"avg_line_length": 26.197673797607422,
"blob_id": "2f885ec0cd6eafdcda2dd0ef346127d9335ecc1a",
"content_id": "196a6478e90e66ae7d707188a619df53d292b031",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 7018,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 258,
"path": "/requirements.txt",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "# This file may be used to create an environment using:\n# $ conda create --name <env> --file <this file>\n# platform: linux-64\n_license=1.1=py36_1\nalabaster=0.7.10=py36h306e16b_0\nanaconda=custom=py36hbbc8b67_0\nanaconda-client=1.6.9=py36_0\nanaconda-navigator=1.6.2=py36_0\nanaconda-project=0.8.2=py36h44fb852_0\naospy=0.2=py36_0\nasn1crypto=0.24.0=py36_0\nastroid=1.6.1=py36_0\nastropy=2.0.3=py36h14c3975_0\nattrs=17.4.0=py36_0\nbabel=2.5.3=py36_0\nbackports=1.0=py36hfa02d7e_1\nbackports.shutil_get_terminal_size=1.0.0=py36hfea85ff_2\nbasemap=1.1.0=py36_2\nbeautifulsoup4=4.6.0=py36h49b8c8c_1\nbitarray=0.8.1=py36h14c3975_1\nbkcharts=0.2=py36h735825a_0\nblaze=0.11.3=py36h4e06776_0\nbleach=2.1.2=py36_0\nbokeh=0.12.13=py36h2f9c1c0_0\nboto=2.48.0=py36h6e4cd66_1\nbottleneck=1.2.1=py36haac1ea0_0\nbzip2=1.0.6=h9a117a8_4\nca-certificates=2017.08.26=h1d4fec5_0\ncairo=1.14.12=h77bcde2_0\ncertifi=2018.1.18=py36_0\ncffi=1.11.4=py36h9745a5d_0\nchardet=3.0.4=py36h0f667ec_1\nclick=6.7=py36h5253387_0\ncloudpickle=0.5.2=py36_1\nclyent=1.2.2=py36h7e57e65_1\ncmocean=1.1=py_0\ncolorama=0.3.9=py36h489cec4_0\nconda=4.4.11=py36_0\nconda-env=2.6.0=h36134e3_1\ncontextlib2=0.5.5=py36h6c84a62_0\ncryptography=2.1.4=py36hd09be54_0\ncurl=7.49.0=1\ncycler=0.10.0=py36h93f1223_0\ncython=0.27.3=py36h1860423_0\ncytoolz=0.9.0=py36h14c3975_0\ndask=0.16.1=py36_0\ndask-core=0.16.1=py36_0\ndatashape=0.5.4=py36h3ad6b5c_0\ndbus=1.12.2=hc3f9b76_1\ndecorator=4.2.1=py36_0\ndistributed=1.20.2=py36_0\ndocutils=0.14=py36hb0f60f5_0\nentrypoints=0.2.3=py36h1aec115_2\neofs=1.2.0=py36_0\net_xmlfile=1.0.1=py36hd6bccc3_0\nexpat=2.2.5=he0dffb1_0\nfastcache=1.0.2=py36h14c3975_2\nfilelock=2.0.13=py36h646ffb5_0\nflask=0.12.2=py36hb24657c_0\nflask-cors=3.0.3=py36h2d857d3_0\nfontconfig=2.12.4=h88586e7_1\nfreetype=2.8=hab7d2ae_1\ngeos=3.5.1=1\nget_terminal_size=1.0.0=haa9412d_0\ngevent=1.2.2=py36h2fe25dc_0\nglib=2.53.6=h5d9569c_2\nglob2=0.6=py36he249c77_0\ngmp=6.1.2=h6c8ec71_1\ngmpy2=2.0.8=py36hc8893dd_2\ngraphite2=1.3.10=hc526e54_0\ngreenlet=0.4.12=py36h2d503a6_0\ngst-plugins-base=1.12.4=h33fb286_0\ngstreamer=1.12.4=hb53b477_0\nh5py=2.7.0=np113py36_0\nharfbuzz=1.7.4=hc5b324e_0\nhdf4=4.2.13=h3ca952b_2\nhdf5=1.8.17=2\nheapdict=1.0.0=py36_2\nhtml5lib=1.0.1=py36h2f9c1c0_0\nicu=58.2=h9c2bf20_1\nidna=2.6=py36h82fb2a8_1\nimageio=2.2.0=py36he555465_0\nimagesize=0.7.1=py36h52d8127_0\nintel-openmp=2018.0.0=hc7b2577_8\nipykernel=4.8.0=py36_0\nipython=6.2.1=py36h88c514a_1\nipython_genutils=0.2.0=py36hb52b0d5_0\nipywidgets=7.1.1=py36_0\nisort=4.2.15=py36had401c0_0\nitsdangerous=0.24=py36h93cc618_1\njbig=2.1=hdba287a_0\njdcal=1.3=py36h4c697fb_0\njedi=0.11.1=py36_0\njinja2=2.10=py36ha16c418_0\njpeg=9b=h024ee3a_2\njsonschema=2.6.0=py36h006f8b5_0\njupyter=1.0.0=py36_4\njupyter_client=5.2.2=py36_0\njupyter_console=5.2.0=py36he59e554_1\njupyter_core=4.4.0=py36h7c827e3_0\njupyterlab=0.31.5=py36_0\njupyterlab_launcher=0.10.2=py36_0\nlazy-object-proxy=1.3.1=py36h10fcdad_0\nlibcurl=7.58.0=h1ad7b7a_0\nlibedit=3.1=heed3624_0\nlibffi=3.2.1=1\nlibgcc=4.8.5=2\nlibgcc-ng=7.2.0=h7cc24e2_2\nlibgfortran=3.0.0=1\nlibgfortran-ng=7.2.0=h9f7466a_2\nlibiconv=1.14=0\nlibnetcdf=4.4.1=1\nlibpng=1.6.34=hb9fc6fc_0\nlibsodium=1.0.15=hf101ebd_0\nlibssh2=1.8.0=h9cfc8f7_4\nlibstdcxx-ng=7.2.0=h7a57d05_2\nlibtiff=4.0.9=h28f6b97_0\nlibtool=2.4.6=h544aabb_3\nlibxcb=1.12=hcd93eb1_4\nlibxml2=2.9.7=h26e45fe_0\nlibxslt=1.1.32=h1312cb7_0\nllvmlite=0.21.0=py36ha241eea_0\nlocket=0.2.0=py36h787c0ad_1\nlxml=4.1.1=py36hf71bdeb_1\nlzo=2.10=h49e0be7_2\nmarkupsafe=1.0=py36hd9260cd_1\nmatplotlib=2.1.2=py36h0e671d2_0\nmccabe=0.6.1=py36h5ad9710_1\nmetpy=0.6.1=py36_0\nmistune=0.8.3=py36_0\nmkl=2018.0.1=h19d6760_4\nmkl-service=1.1.2=py36h17a0993_4\nmpc=1.0.3=hec55b23_5\nmpfr=3.1.5=h11a74b3_2\nmpmath=1.0.0=py36hfeacd6b_2\nmsgpack-python=0.5.1=py36h6bb024c_0\nmultipledispatch=0.4.9=py36h41da3fb_0\nnavigator-updater=0.1.0=py36_0\nnbconvert=5.3.1=py36hb41ffb7_0\nnbformat=4.4.0=py36h31c9010_0\nncurses=6.0=h9df7e31_2\nnetcdf4=1.2.4=np113py36_0\nnetworkx=2.1=py36_0\nnltk=3.2.5=py36h7532b22_0\nnose=1.3.7=py36hcdf7029_2\nnotebook=5.4.0=py36_0\nnumba=0.36.2=np113py36h7a10136_0\nnumexpr=2.6.4=py36hc4a3f9a_0\nnumpy=1.13.3=py36ha266831_3\nnumpydoc=0.7.0=py36h18f165f_0\nodo=0.5.1=py36h90ed295_0\nolefile=0.45.1=py36_0\nopenpyxl=2.4.10=py36_0\nopenssl=1.0.2n=hb7f436b_0\npackaging=16.8=py36ha668100_1\npandas=0.22.0=py36hf484d3e_0\npandoc=1.19.2.1=hea2e7c5_1\npandocfilters=1.4.2=py36ha6701b7_1\npango=1.41.0=hd475d92_0\nparso=0.1.1=py36h35f843b_0\npartd=0.3.8=py36h36fd896_0\npatchelf=0.9=hf79760b_2\npath.py=10.5=py36h55ceabb_0\npathlib2=2.3.0=py36h49efa8e_0\npatsy=0.5.0=py36_0\npcre=8.41=hc27e229_1\npep8=1.7.1=py36_0\npexpect=4.3.1=py36_0\npickleshare=0.7.4=py36h63277f8_0\npillow=5.0.0=py36h3deb7b8_0\npint=0.8.1=py36_0\npip=9.0.1=py36h6c6f9ce_4\npixman=0.34.0=hceecf20_3\npkginfo=1.4.1=py36h215d178_1\npluggy=0.6.0=py36hb689045_0\nply=3.10=py36hed35086_0\nprompt_toolkit=1.0.15=py36h17d85b1_0\npsutil=5.4.3=py36h14c3975_0\nptyprocess=0.5.2=py36h69acd42_0\npy=1.5.2=py36h29bf505_0\npycodestyle=2.3.1=py36hf609f19_0\npycosat=0.6.3=py36h0a5515d_0\npycparser=2.18=py36hf9f622e_1\npycrypto=2.6.1=py36h14c3975_7\npycurl=7.43.0=py36_0\npyflakes=1.6.0=py36h7bd6a15_0\npygments=2.2.0=py36h0d3125c_0\npylint=1.8.2=py36_0\npyodbc=4.0.22=py36hf484d3e_0\npyopenssl=17.5.0=py36h20ba746_0\npyparsing=2.2.0=py36hee85983_1\npyproj=1.9.5.1=py36_0\npyqt=5.6.0=py36h0386399_5\npyshp=1.2.12=py_0\npysocks=1.6.7=py36hd97a5b1_1\npytables=3.4.2=np113py36_0\npytest=3.3.2=py36_0\npython=3.6.4=hc3d631a_1\npython-dateutil=2.6.1=py36h88d3b88_1\npytz=2017.3=py36h63b9c63_0\npywavelets=0.5.2=py36he602eb0_0\npyyaml=3.12=py36hafb9ca4_1\npyzmq=16.0.3=py36he2533c7_0\nqt=5.6.2=h974d657_12\nqtawesome=0.4.4=py36h609ed8c_0\nqtconsole=4.3.1=py36h8f73b5b_0\nqtpy=1.3.1=py36h3691cc8_0\nreadline=7.0=hac23ff0_3\nrequests=2.18.4=py36he2e5f8d_1\nrope=0.10.7=py36h147e2ec_0\nruamel_yaml=0.15.35=py36h14c3975_1\nscikit-image=0.13.1=py36h14c3975_1\nscikit-learn=0.19.1=py36h7aa7ec6_0\nscipy=1.0.0=py36hbf646e7_0\nseaborn=0.8.1=py36hfad7ec4_0\nsend2trash=1.4.2=py36_0\nsetuptools=38.4.0=py36_0\nsimplegeneric=0.8.1=py36_2\nsingledispatch=3.4.0.3=py36h7a266c3_0\nsip=4.18.1=py36h51ed4ed_2\nsix=1.11.0=py36h372c433_1\nsnowballstemmer=1.2.1=py36h6febd40_0\nsortedcollections=0.5.3=py36h3c761f9_0\nsortedcontainers=1.5.9=py36_0\nsphinx=1.6.6=py36_0\nsphinxcontrib=1.0=py36h6d0f590_1\nsphinxcontrib-websupport=1.0.1=py36hb5cb234_1\nspyder=3.2.8=py36_0\nsqlalchemy=1.2.1=py36h14c3975_0\nsqlite=3.22.0=h1bed415_0\nstatsmodels=0.8.0=py36h8533d0b_0\nsympy=1.1.1=py36hc6d1c1c_0\ntblib=1.3.2=py36h34cf8b6_0\nterminado=0.8.1=py36_1\ntestpath=0.3.1=py36h8cadb63_0\ntk=8.6.7=h5979e9b_1\ntoolz=0.9.0=py36_0\ntornado=4.5.3=py36_0\ntraitlets=4.3.2=py36h674d592_0\ntyping=3.6.2=py36h7da032a_0\nunicodecsv=0.14.1=py36ha668878_0\nunixodbc=2.3.4=hc36303a_1\nurllib3=1.22=py36hbe7ace6_0\nwcwidth=0.1.7=py36hdf4376a_0\nwebencodings=0.5.1=py36h800622e_1\nwerkzeug=0.14.1=py36_0\nwheel=0.30.0=py36hfd4bba0_1\nwidgetsnbextension=3.1.0=py36_0\nwrapt=1.10.11=py36h28b7045_0\nxarray=0.9.6=py36_0\nxlrd=1.1.0=py36h1db9f0c_1\nxlsxwriter=1.0.2=py36h3de1aca_0\nxlwt=1.3.0=py36h7b00a1f_0\nxz=5.2.3=h2bcbf08_1\nyaml=0.1.7=had09818_2\nzeromq=4.2.2=hbedb6e5_2\nzict=0.1.3=py36h3a3bf81_0\nzlib=1.2.11=hfbfcf68_1\n\n"
},
{
"alpha_fraction": 0.5720632076263428,
"alphanum_fraction": 0.6046397089958191,
"avg_line_length": 33.076271057128906,
"blob_id": "16cf65bdb930dfa06262e3cc6471019d1038e509",
"content_id": "4b869243fdc3ae1cdd1f8c0395921ad7d8d248e6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4052,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 118,
"path": "/Scripts/plot_TEMP_daily.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPlot temperature comparisons between HIT and FIT experiments. These are \nsea ice thickness perturbation experiments using WACCM4. This script is\nfor DAILY data.\n\nNotes\n-----\n Author : Zachary Labe\n Date : 6 September 2017\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport nclcmaps as ncm\nimport datetime\nimport read_DailyOutput as DO\nimport calc_Utilities as UT\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectoryfigure = '/home/zlabe/Desktop/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting Daily Temperature Profile - %s----' % titletime)\n\n#### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\n### Call function for temperature profile data for polar cap\nlat,lon,time,lev,TEMP_h = DO.readMeanExperi(directorydata,'TEMP',\n 'HIT','profile')\nlat,lon,time,lev,TEMP_f = DO.readMeanExperi(directorydata,'TEMP',\n 'FIT','profile')\n \n#### Calculate significance\nstat,pvalue = UT.calc_indttest(TEMP_h,TEMP_f)\n \n### Calculate ensemble mean\nTEMP_diff= np.nanmean(TEMP_f-TEMP_h,axis=0) \n\n############################################################################\n############################################################################\n############################################################################\n##### Plot temperature profile\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\n### Set limits for contours and colorbars\nlimit = np.arange(-3,3.1,0.2)\nbarlim = np.arange(-3,4,1)\nzscale = np.array([1000,700,500,300,200,\n 100,50,30,10])\ntimeq = np.arange(0,212,1)\ntimeqq,levq = np.meshgrid(timeq,lev)\n\nfig = plt.figure()\nax1 = plt.subplot(111)\n\nax1.spines['top'].set_color('dimgrey')\nax1.spines['right'].set_color('dimgrey')\nax1.spines['bottom'].set_color('dimgrey')\nax1.spines['left'].set_color('dimgrey')\nax1.spines['left'].set_linewidth(2)\nax1.spines['bottom'].set_linewidth(2)\nax1.spines['right'].set_linewidth(2)\nax1.spines['top'].set_linewidth(2)\nax1.tick_params(axis='y',direction='out',which='major',pad=3,\n width=2,color='dimgrey')\nax1.tick_params(axis='x',direction='out',which='major',pad=3,\n width=2,color='dimgrey') \nax1.xaxis.set_ticks_position('bottom')\nax1.yaxis.set_ticks_position('left')\n\n\ncs = plt.contourf(timeq,lev,TEMP_diff.transpose(),limit,extend='both')\n \nplt.contourf(timeqq,levq,pvalue.transpose(),colors='None',hatches=['////'],\n linewidth=5)\n\nplt.gca().invert_yaxis()\nplt.yscale('log',nonposy='clip')\nplt.ylabel(r'\\textbf{Pressure (hPa)}',color='dimgrey',fontsize=15,\n labelpad=1)\n\nxlabels = [r'Sep',r'Oct',r'Nov',r'Dec',r'Jan',r'Feb',r'Mar',r'Apr'] \nplt.xticks(np.arange(0,212,30),xlabels,fontsize=8)\nplt.yticks(zscale,map(str,zscale),ha='right',fontsize=8)\nplt.minorticks_off()\nplt.xlim([30,210])\nplt.ylim([1000,10])\n\ncmap = ncm.cmap('NCV_blu_red') \ncs.set_cmap(cmap) \n\ncbar_ax = fig.add_axes([0.312,0.1,0.4,0.03]) \ncbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',\n extend='max',extendfrac=0.07,drawedges=False)\ncbar.outline.set_edgecolor('dimgrey')\ncbar.set_label(r'\\textbf{$^\\circ$C}',fontsize=11,color='dimgray')\ncbar.set_ticks(barlim)\ncbar.set_ticklabels(list(map(str,barlim)))\ncbar.ax.tick_params(axis='x', size=.01)\n\nplt.subplots_adjust(wspace=0.3)\nplt.subplots_adjust(bottom=0.21)\n\nplt.savefig(directoryfigure + 'TEMP_daily_diff_FIT-HIT.png',dpi=300)\nprint('Completed: Script done!') "
},
{
"alpha_fraction": 0.5919262170791626,
"alphanum_fraction": 0.6417406797409058,
"avg_line_length": 33.20769119262695,
"blob_id": "28f7bb4a986f2fbf6083a2ca1479f4e660b83ff5",
"content_id": "2647e742056c4aa7b7a06cbf45e5488039cab7ea",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8893,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 260,
"path": "/Scripts/plot_forcings_LENS.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nForcing files for SITperturb from LENS (1979-2005; SST, SIC, SIT)\n(2060-2080; SIT)\n\nNotes\n-----\n Reference : Kay et al. [2014]\n Author : Zachary Labe\n Date : 17 July 2017\n\"\"\"\n\n### Import modules\nimport numpy as np\nfrom netCDF4 import Dataset\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as c\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport nclcmaps as ncm\nimport datetime\nimport read_var_LENS as LV\n#import read_SeaIceThick_LENS as lens\n\n### Define directories\ndirectorydata = '/home/zlabe/surt/LENS/ForcingPerturb/' \ndirectoryfigure = '/home/zlabe/Desktop/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting forcing files - %s----' % titletime)\n\nensembles = ['02','03','04','05','06','07','08','09'] + \\\n list(map(str,np.arange(10,36,1))) + list(map(str,np.arange(101,106,1)))\n\n### Alott time series\nyear1 = 2006\nyear2 = 2080\nyears = np.arange(year1,year2+1,1)\n\ndata = Dataset(directorydata + 'SST-SIT_lens_1976-2005.nc')\nlons = data.variables['lon'][:]\nlats = data.variables['lat'][:]\nsith = data.variables['ice_thick'][:,:,:]\ndata.close()\n\ndata = Dataset(directorydata + 'SST-SIT_lens_2051-2080.nc')\nlons = data.variables['lon'][:]\nlats = data.variables['lat'][:]\nsitf = data.variables['ice_thick'][:,:,:]\ndata.close()\n\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\nsith[np.where(sith == 0)] = np.nan \nvarh = (sith[0] + sith[1] + sith[-1])/3.\n\nsitf[np.where(sitf == 0)] = np.nan \nvarf = (sitf[0] + sitf[1] + sitf[-1])/3.\n\nvarh[np.where(varh == 2.)] = np.nan\nvarhtemp = varh.copy()\nvarhtemp[np.where(varhtemp > 0)] = 1.\n\nvarf = varf*varhtemp\n\nvarf[np.where(varf == 2.)] = np.nan\n\nvarc = varh.copy()\nvarc[np.where(varc >= 0)] = 2.\n\n### Set limits for contours and colorbars\nlimsit = np.arange(0,7.1,0.1)\nbarlimsit = np.arange(0,8,1)\nlimdiff = np.arange(-3,3.1,0.5)\nbarlimdiff = np.arange(-3,4,1)\n\nfig = plt.figure()\nax = plt.subplot(121)\n\nm1 = Basemap(projection='ortho',lon_0=0,lat_0=90,resolution='l')\n\nvar, lons_cyclic = addcyclic(varh, lons)\nvar, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\nlon2d, lat2d = np.meshgrid(lons_cyclic, lats)\nx, y = m1(lon2d, lat2d)\n \nm1.drawmapboundary(fill_color='white',color='dimgray',linewidth=0.7) \nm1.drawcoastlines(color='darkgrey',linewidth=0.1)\nparallels = np.arange(-90,90,30)\nmeridians = np.arange(-180,180,60)\n#m.drawparallels(parallels,labels=[True,True,True,True],\n# linewidth=0.1,color='k',fontsize=6)\n#m.drawmeridians(meridians,labels=[True,True,True,True],\n# linewidth=0.1,color='k',fontsize=6)\n#m1.drawlsmask(land_color='dimgray',ocean_color='mintcream')\n\ncs = m1.contourf(x,y,var,limsit,extend='max')\ncs1 = m1.contour(x,y,var,barlimsit,linewidths=0.1,colors='darkgrey',\n linestyles='-')\n\nm1.fillcontinents(color='dimgray')\n\ndef colormapSIT():\n cmap1 = plt.get_cmap('BuPu')\n cmap2 = plt.get_cmap('RdPu_r')\n cmap3 = plt.get_cmap('gist_heat_r')\n cmaplist1 = [cmap1(i) for i in range(30,cmap1.N-10)]\n cmaplist2 = [cmap2(i) for i in range(15,cmap2.N)]\n cmaplist3 = [cmap3(i) for i in range(cmap2.N-15)]\n cms_sit = c.ListedColormap(cmaplist1 + cmaplist2 + cmaplist3)\n return cms_sit\n \ncmap = colormapSIT() \ncs.set_cmap('cubehelix')\n\n###########################################################################\n\nax = plt.subplot(122)\n\nm2 = Basemap(projection='ortho',lon_0=0,lat_0=90,resolution='l')\n\nvar, lons_cyclic = addcyclic(varc, lons)\nvar, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\nlon2d, lat2d = np.meshgrid(lons_cyclic, lats)\nx, y = m2(lon2d, lat2d)\n \nm2.drawmapboundary(fill_color='white',color='dimgray',linewidth=0.7) \nm2.drawcoastlines(color='darkgrey',linewidth=0.1)\nparallels = np.arange(-90,90,30)\nmeridians = np.arange(-180,180,60)\n#m.drawparallels(parallels,labels=[True,True,True,True],\n# linewidth=0.1,color='k',fontsize=6)\n#m.drawmeridians(meridians,labels=[True,True,True,True],\n# linewidth=0.1,color='k',fontsize=6)\n#m2.drawlsmask(land_color='dimgray',ocean_color='mintcream')\n\ncs = m2.contourf(x,y,var,limsit,extend='max')\ncs1 = m2.contour(x,y,var,barlimsit,linewidths=0.1,colors='darkgrey',\n linestyles='-')\n\nm2.fillcontinents(color='dimgray')\n\ncmap = colormapSIT() \ncs.set_cmap('cubehelix') \n\ncbar_ax = fig.add_axes([0.312,0.15,0.4,0.03]) \ncbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',\n extend='max',extendfrac=0.07,drawedges=False)\n\ncbar.set_label(r'\\textbf{Thickness [m]}',fontsize=11,color='dimgray')\ncbar.set_ticks(barlimsit)\ncbar.set_ticklabels(list(map(str,barlimsit)))\ncbar.ax.tick_params(axis='x', size=.01)\n\nplt.text(-0.15,23,r'\\textbf{HIT}',color='dimgray',fontsize=30)\nplt.text(0.93,23,r'\\textbf{CIT}',color='dimgray',fontsize=30)\n#plt.text(1.05,21.8,r'\\textbf{CONSTANT}',color='dimgray',fontsize=10)\nplt.text(-0.75,13.5,r'\\textbf{DJF}',color='dimgray',fontsize=30,\n rotation=90)\n\nplt.savefig(directoryfigure + 'sit_comp.png',dpi=300)\n\n###########################################################################\n###########################################################################\n###########################################################################\nfig = plt.figure()\nax = plt.subplot(121)\n\nm3 = Basemap(projection='ortho',lon_0=0,lat_0=90,resolution='l')\n\nvar, lons_cyclic = addcyclic(varh, lons)\nvar, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\nlon2d, lat2d = np.meshgrid(lons_cyclic, lats)\nx, y = m3(lon2d, lat2d)\n\nm3.drawmapboundary(fill_color='white',color='dimgray',linewidth=0.7) \nm3.drawmapboundary(fill_color='white',color='dimgray',linewidth=0.7)\nm3.drawcoastlines(color='darkgrey',linewidth=0.1)\nparallels = np.arange(-90,90,30)\nmeridians = np.arange(-180,180,60)\n#m.drawparallels(parallels,labels=[True,True,True,True],\n# linewidth=0.1,color='k',fontsize=6)\n#m.drawmeridians(meridians,labels=[True,True,True,True],\n# linewidth=0.1,color='k',fontsize=6)\n#m3.drawlsmask(land_color='dimgray',ocean_color='mintcream')\n\ncs = m3.contourf(x,y,var,limsit,extend='max')\ncs1 = m3.contour(x,y,var,barlimsit,linewidths=0.1,colors='darkgrey',\n linestyles='-')\n \ncmap = colormapSIT() \ncs.set_cmap('cubehelix')\n\nm3.fillcontinents(color='dimgray')\n\ncbar = m3.colorbar(cs,location='bottom',pad = 0.2,extend='max',\n drawedges=False)\nticks = barlimsit\nlabels = list(map(str,barlimsit))\ncbar.set_ticks(ticks)\ncbar.set_ticklabels(labels)\ncbar.set_label(r'\\textbf{Thickness [m]}',fontsize=11,color='dimgray')\ncbar.ax.tick_params(axis='x', size=.01)\n\n###########################################################################\ndiffsit = varc - varh\n\nax = plt.subplot(122)\n\nm4 = Basemap(projection='ortho',lon_0=0,lat_0=90,resolution='l')\n\nvar, lons_cyclic = addcyclic(diffsit, lons)\nvar, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\nlon2d, lat2d = np.meshgrid(lons_cyclic, lats)\nx, y = m4(lon2d, lat2d)\n \nm4.drawmapboundary(fill_color='white',color='dimgray',linewidth=0.7) \nm4.drawmapboundary(fill_color='white',color='dimgray',linewidth=0.7)\nm4.drawcoastlines(color='darkgrey',linewidth=0.1)\nparallels = np.arange(-90,90,30)\nmeridians = np.arange(-180,180,60)\n#m.drawparallels(parallels,labels=[True,True,True,True],\n# linewidth=0.1,color='k',fontsize=6)\n#m.drawmeridians(meridians,labels=[True,True,True,True],\n# linewidth=0.1,color='k',fontsize=6)\n#m4.drawlsmask(land_color='dimgray',ocean_color='mintcream')\n\ncs = m4.contourf(x,y,var,limdiff,extend='both')\ncs1 = m4.contour(x,y,var,barlimdiff,linewidths=0.1,colors='darkgrey',\n linestyles='-')\n\ncmap = ncm.cmap('amwg_blueyellowred') \ncs.set_cmap(cmap) \ncbar = m4.colorbar(cs,location='bottom',pad = 0.2,extend='max',\n drawedges=False)\nticks = barlimdiff\nlabels = list(map(str,barlimdiff))\ncbar.set_ticks(ticks)\ncbar.set_ticklabels(labels)\ncbar.set_label(r'\\textbf{Difference [m]}',fontsize=11,color='dimgray')\ncbar.ax.tick_params(axis='x', size=.01)\n\nm4.fillcontinents(color='dimgray')\n\nplt.annotate(r'\\textbf{HIT}',xy=(-0.82,1.1),\n xycoords='axes fraction',color='dimgray',fontsize=30,alpha=1) \nplt.annotate(r'\\textbf{CIT--HIT}',xy=(0.2,1.1),\n xycoords='axes fraction',color='dimgray',fontsize=30,alpha=1) \nplt.annotate(r'\\textbf{DJF}',xy=(-1.45,0.55),\n xycoords='axes fraction',color='dimgray',fontsize=30,alpha=1,\n rotation=90) \n\nplt.savefig(directoryfigure + 'sit_diff.png',dpi=300)\n\nprint('Completed: Script done!')"
},
{
"alpha_fraction": 0.5792063474655151,
"alphanum_fraction": 0.6187827587127686,
"avg_line_length": 36.69355010986328,
"blob_id": "3a04666d7b84d429c0d856076c9c635d24a1ad8e",
"content_id": "bd75dd42c7fdb06ffadd403aaf67971c1b2b11bf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9349,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 248,
"path": "/Scripts/plot_T2M.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPlot temperature comparisons between HIT and FIT experiments. These are \nsea ice thickness perturbation experiments using WACCM4.\n\nNotes\n-----\n Author : Zachary Labe\n Date : 13 August 2017\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport nclcmaps as ncm\nimport datetime\nimport read_MonthlyOutput as MO\nimport calc_Utilities as UT\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectoryfigure = '/home/zlabe/Desktop/TestPerturb/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting temperature - %s----' % titletime)\n\n### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\n### Call function for surface temperature data\nlat,lon,time,lev,tash = MO.readExperi(directorydata,'T2M','HIT','surface')\nlat,lon,time,lev,tasf = MO.readExperi(directorydata,'T2M','FIT','surface')\n\n### Separate per periods (ON,DJ,FM)\ntash_on = np.nanmean(tash[:,9:11,:,:],axis=1)\ntasf_on = np.nanmean(tasf[:,9:11,:,:],axis=1)\n\ntash_dj,tasf_dj = UT.calcDecJan(tash,tasf,lat,lon,'surface',1)\n\ntash_fm = np.nanmean(tash[:,1:3,:,:],axis=1)\ntasf_fm = np.nanmean(tasf[:,1:3,:,:],axis=1)\n\n### Calculate period differenceds\ndiff_on = np.nanmean((tasf_on-tash_on),axis=0)\ndiff_dj = np.nanmean((tasf_dj-tash_dj),axis=0)\ndiff_fm = np.nanmean((tasf_fm-tash_fm),axis=0)\ndiff_onq = tasf_on-np.nanmean(tash_on,axis=0)\ndiff_djq = tasf_dj-np.nanmean(tash_dj,axis=0)\ndiff_fmq = tasf_fm-np.nanmean(tash_fm,axis=0)\n\n### Calculate significance\nstat_on,pvalue_on = UT.calc_indttest(tash_on,tasf_on)\nstat_dj,pvalue_dj = UT.calc_indttest(tash_dj,tasf_dj)\nstat_fm,pvalue_fm = UT.calc_indttest(tash_fm,tasf_fm)\n\n###########################################################################\n###########################################################################\n###########################################################################\n### Plot surface temperature\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\n### Set limits for contours and colorbars\nlimit = np.arange(-10,10.1,0.5)\nbarlim = np.arange(-10,11,5)\n\nfig = plt.figure()\nax1 = plt.subplot(131)\n\nm = Basemap(projection='ortho',lon_0=0,lat_0=89,resolution='l',\n area_thresh=10000.)\n\nvar, lons_cyclic = addcyclic(diff_on, lon)\nvar, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\nlon2d, lat2d = np.meshgrid(lons_cyclic, lat)\nx, y = m(lon2d, lat2d)\n\npvalue_onq,lons_cyclic = addcyclic(pvalue_on, lon)\npvalue_onq,lons_cyclic = shiftgrid(180.,pvalue_onq,lons_cyclic,start=False)\n \nm.drawmapboundary(fill_color='white',color='dimgray',linewidth=0.7)\nm.drawcoastlines(color='dimgray',linewidth=0.8)\nparallels = np.arange(-90,90,45)\nmeridians = np.arange(-180,180,60)\n#m.drawparallels(parallels,labels=[True,True,True,True],\n# linewidth=0.6,color='dimgray',fontsize=6)\n#m.drawmeridians(meridians,labels=[True,True,True,True],\n# linewidth=0.6,color='dimgray',fontsize=6)\n#m.drawlsmask(land_color='dimgray',ocean_color='mintcream')\n\ncs = m.contourf(x,y,var,limit,extend='both')\n#cs1 = ax1.scatter(x,y,pvalue_onq,color='k',marker='.',alpha=0.5,\n# edgecolor='k',linewidth=0.2)\ncs1 = m.contourf(x,y,pvalue_onq,colors='None',hatches=['....'],\n linewidths=0.4)\n\nax1.annotate(r'\\textbf{ON}',\n xy=(0, 0),xytext=(0.35,1.05),xycoords='axes fraction',\n fontsize=25,color='dimgrey',rotation=0)\n\ncmap = ncm.cmap('NCV_blu_red') \ncs.set_cmap(cmap) \n\n###########################################################################\n\nax2 = plt.subplot(132)\n\nm = Basemap(projection='ortho',lon_0=0,lat_0=89,resolution='l',\n area_thresh=10000.)\n\nvar, lons_cyclic = addcyclic(diff_dj, lon)\nvar, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\nlon2d, lat2d = np.meshgrid(lons_cyclic, lat)\nx, y = m(lon2d, lat2d)\n\npvalue_djq,lons_cyclic = addcyclic(pvalue_dj, lon)\npvalue_djq,lons_cyclic = shiftgrid(180.,pvalue_djq,lons_cyclic,start=False)\n \nm.drawmapboundary(fill_color='white',color='dimgray',linewidth=0.7)\nm.drawcoastlines(color='dimgray',linewidth=0.8)\nparallels = np.arange(-90,90,45)\nmeridians = np.arange(-180,180,60)\n#m.drawparallels(parallels,labels=[True,True,True,True],\n# linewidth=0.6,color='dimgray',fontsize=6)\n#m.drawmeridians(meridians,labels=[True,True,True,True],\n# linewidth=0.6,color='dimgray',fontsize=6)\n#m.drawlsmask(land_color='dimgray',ocean_color='mintcream')\n\ncs = m.contourf(x,y,var,limit,extend='both')\n#cs1 = ax2.scatter(x,y,pvalue_djq,color='k',marker='.',alpha=0.5,\n# edgecolor='k',linewidth=0.2)\ncs1 = m.contourf(x,y,pvalue_djq,colors='None',hatches=['....'],\n linewidths=0.4)\n\nax2.annotate(r'\\textbf{DJ}',\n xy=(0, 0),xytext=(0.35,1.05),xycoords='axes fraction',\n fontsize=25,color='dimgrey',rotation=0)\n\ncmap = ncm.cmap('NCV_blu_red') \ncs.set_cmap(cmap) \n\n###########################################################################\n\nax3 = plt.subplot(133)\n\nm = Basemap(projection='ortho',lon_0=0,lat_0=89,resolution='l',\n area_thresh=10000.)\n\nvar, lons_cyclic = addcyclic(diff_fm, lon)\nvar, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\nlon2d, lat2d = np.meshgrid(lons_cyclic, lat)\nx, y = m(lon2d, lat2d)\n\npvalue_fmq,lons_cyclic = addcyclic(pvalue_fm, lon)\npvalue_fmq,lons_cyclic = shiftgrid(180.,pvalue_fmq,lons_cyclic,start=False)\n \nm.drawmapboundary(fill_color='white',color='dimgray',linewidth=0.7)\nm.drawcoastlines(color='dimgray',linewidth=0.8)\nparallels = np.arange(-90,90,45)\nmeridians = np.arange(-180,180,60)\n#m.drawparallels(parallels,labels=[True,True,True,True],\n# linewidth=0.6,color='dimgray',fontsize=6)\n#m.drawmeridians(meridians,labels=[True,True,True,True],\n# linewidth=0.6,color='dimgray',fontsize=6)\n#m.drawlsmask(land_color='dimgray',ocean_color='mintcream')\n\ncs = m.contourf(x,y,var,limit,extend='both')\n#cs1 = ax3.scatter(x,y,pvalue_fmq,color='k',marker='.',alpha=0.5,\n# edgecolor='k',linewidth=0.2)\ncs1 = m.contourf(x,y,pvalue_fmq,colors='None',hatches=['....'],\n linewidths=3)\n\nax3.annotate(r'\\textbf{FM}',\n xy=(0, 0),xytext=(0.35,1.05),xycoords='axes fraction',\n fontsize=25,color='dimgrey',rotation=0)\n\ncmap = ncm.cmap('NCV_blu_red') \ncs.set_cmap(cmap) \n\ncbar_ax = fig.add_axes([0.312,0.23,0.4,0.03]) \ncbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',\n extend='max',extendfrac=0.07,drawedges=False)\ncbar.set_label(r'\\textbf{$^\\circ$C}',fontsize=11,color='dimgray')\ncbar.set_ticks(barlim)\ncbar.set_ticklabels(list(map(str,barlim)))\ncbar.ax.tick_params(axis='x', size=.01)\ncbar.outline.set_edgecolor('dimgrey')\n\nplt.subplots_adjust(wspace=0.01)\n\nplt.savefig(directoryfigure + 'T2M_diff_FIT-HIT.png',dpi=300)\n\n###########################################################################\n###########################################################################\n###########################################################################\n#for i in xrange(diff_onq.shape[0]):\n# ax3 = plt.subplot(7,6,i+1)\n# \n# m = Basemap(projection='ortho',lon_0=0,lat_0=89,resolution='l',\n# area_thresh=10000.)\n# \n# var, lons_cyclic = addcyclic(diff_onq[i], lon)\n# var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\n# lon2d, lat2d = np.meshgrid(lons_cyclic, lat)\n# x, y = m(lon2d, lat2d)\n# \n## pvalue_fmq,lons_cyclic = addcyclic(pvalue_fm, lon)\n## pvalue_fmq,lons_cyclic = shiftgrid(180.,pvalue_fmq,lons_cyclic,start=False)\n# \n# m.drawmapboundary(fill_color='white',color='dimgray',linewidth=0.7)\n# m.drawcoastlines(color='dimgray',linewidth=0.2)\n# parallels = np.arange(-90,90,45)\n# meridians = np.arange(-180,180,60)\n# #m.drawparallels(parallels,labels=[True,True,True,True],\n# # linewidth=0.6,color='dimgray',fontsize=6)\n# #m.drawmeridians(meridians,labels=[True,True,True,True],\n# # linewidth=0.6,color='dimgray',fontsize=6)\n# #m.drawlsmask(land_color='dimgray',ocean_color='mintcream')\n# \n# cs = m.contourf(x,y,var,limit,extend='both')\n## cs1 = ax3.scatter(x,y,pvalue_fmq,color='k',marker='.',alpha=0.5,\n## edgecolor='k',linewidth=0.2)\n# \n# cmap = ncm.cmap('NCV_blu_red') \n# cs.set_cmap(cmap) \n#\n#cbar_ax = fig.add_axes([0.312,0.07,0.4,0.03]) \n#cbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',\n# extend='max',extendfrac=0.07,drawedges=False)\n#cbar.set_label(r'\\textbf{$^\\circ$C}',fontsize=11,color='dimgray')\n#cbar.set_ticks(barlim)\n#cbar.set_ticklabels(map(str,barlim)) \n#cbar.ax.tick_params(axis='x', size=.01)\n#\n#plt.subplots_adjust(wspace=0.00)\n#plt.subplots_adjust(hspace=0)\n#\n#plt.savefig(directoryfigure + 't2m_FIT-HIT.png',dpi=300)\nprint('Completed: Script done!')\n\n"
},
{
"alpha_fraction": 0.5065458416938782,
"alphanum_fraction": 0.5427995920181274,
"avg_line_length": 41.33333206176758,
"blob_id": "0cb63dfb4adea9264c9864ec9d0af5cb39b9a69e",
"content_id": "948a9956713aea04f7bc04032d9444f7579e3b08",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10923,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 258,
"path": "/Scripts/plot_allExperiments_var_ON.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPlot temperature comparisons between SIT and SIC modeling experiments using \nWACCM4. Subplot includes FIT, HIT, CIT, FIC, FICT\n\nNotes\n-----\n Author : Zachary Labe\n Date : 13 August 2017\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport nclcmaps as ncm\nimport datetime\nimport read_MonthlyOutput as MO\nimport calc_Utilities as UT\nimport cmocean\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectoryfigure = '/home/zlabe/Desktop/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting ON variable data - %s----' % titletime)\n\n### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\nvarnames = ['Z500','Z50','Z30','SLP','T2M','U10','RNET','P','THICK','U300',\n 'SWE']\nfor v in range(len(varnames)):\n ### Call function for surface temperature data from reach run\n lat,lon,time,lev,tashit = MO.readExperi(directorydata,\n '%s' % varnames[v],'HIT','surface')\n lat,lon,time,lev,tasfit = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIT','surface')\n lat,lon,time,lev,tascit = MO.readExperi(directorydata,\n '%s' % varnames[v],'CIT','surface')\n lat,lon,time,lev,tasfic = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIC','surface')\n lat,lon,time,lev,tasfict = MO.readExperi(directorydata,\n '%s' % varnames[v],'FICT','surface')\n \n ### Create 2d array of latitude and longitude\n lon2,lat2 = np.meshgrid(lon,lat)\n \n ### Concatonate runs\n runnames = [r'HIT',r'FIT',r'CIT',r'FIC',r'FICT']\n experiments = [r'\\textbf{FIT--HIT}',r'\\textbf{FIT--CIT}',\n r'\\textbf{HIT--CIT}',r'\\textbf{FIC--CIT}',\n r'\\textbf{FICT--FIT}',r'\\textbf{FICT--HIT}']\n runs = [tashit,tasfit,tascit,tasfic,tasfict]\n \n ### Separate per periods (ON,DJ,FM)\n tas_on = np.empty((5,tashit.shape[0],tashit.shape[2],tashit.shape[3]))\n tas_dj = np.empty((5,tashit.shape[0]-1,tashit.shape[2],tashit.shape[3]))\n tas_fm = np.empty((5,tashit.shape[0],tashit.shape[2],tashit.shape[3]))\n for i in range(len(runs)):\n tas_on[i] = np.nanmean(runs[i][:,9:11,:,:],axis=1) \n tas_dj[i],tas_dj[i] = UT.calcDecJan(runs[i],runs[i],lat,lon,'surface',1) \n tas_fm[i] = np.nanmean(runs[i][:,1:3,:,:],axis=1)\n \n ### Compute climatology \n climofit = np.nanmean(tas_dj[0],axis=0)\n climohit = np.nanmean(tas_dj[1],axis=0)\n climocit = np.nanmean(tas_dj[2],axis=0)\n climofic = np.nanmean(tas_dj[3],axis=0)\n climofict = np.nanmean(tas_dj[4],axis=0)\n \n climo = [climohit,climocit,climocit,climocit,climofit,climohit]\n \n ### Compute comparisons for FM - taken ensemble average\n diff_FITHIT = np.nanmean(tas_on[1] - tas_on[0],axis=0)\n diff_FITCIT = np.nanmean(tas_on[1] - tas_on[2],axis=0)\n diff_HITCIT = np.nanmean(tas_on[0] - tas_on[2],axis=0)\n diff_FICCIT = np.nanmean(tas_on[3] - tas_on[2],axis=0)\n diff_FICTFIT = np.nanmean(tas_on[4] - tas_on[1],axis=0)\n diff_FICTHIT = np.nanmean(tas_on[4] - tas_on[0],axis=0)\n diffruns_on = np.asarray([diff_FITHIT,diff_FITCIT,diff_HITCIT,diff_FICCIT,\n diff_FICTFIT,diff_FICTHIT])\n \n ### Calculate significance for FM\n stat_FITHIT,pvalue_FITHIT = UT.calc_indttest(tas_on[1],tas_on[0])\n stat_FITCIT,pvalue_FITCIT = UT.calc_indttest(tas_on[1],tas_on[2])\n stat_HITCIT,pvalue_HITCIT = UT.calc_indttest(tas_on[0],tas_on[2])\n stat_FICCIT,pvalue_FICCIT = UT.calc_indttest(tas_on[3],tas_on[2])\n stat_FICTFIT,pvalue_FICTFIT = UT.calc_indttest(tas_on[4],tas_on[1])\n stat_FICTHIT,pvalue_FICTHIT = UT.calc_indttest(tas_on[4],tas_on[0])\n pruns_on = np.asarray([pvalue_FITHIT,pvalue_FITCIT,pvalue_HITCIT,pvalue_FICCIT,\n pvalue_FICTFIT,pvalue_FICTHIT])\n \n ###########################################################################\n ###########################################################################\n ###########################################################################\n ### Plot various variables for ON\n plt.rc('text',usetex=True)\n plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n \n ### Set limits for contours and colorbars\n if varnames[v] == 'T2M':\n limit = np.arange(-10,10.1,0.5)\n barlim = np.arange(-10,11,5)\n elif varnames[v] == 'Z500':\n limit = np.arange(-60,60.1,1)\n barlim = np.arange(-60,61,30) \n elif varnames[v] == 'Z50':\n limit = np.arange(-60,60.1,1)\n barlim = np.arange(-60,61,30) \n elif varnames[v] == 'Z30':\n limit = np.arange(-60,60.1,1)\n barlim = np.arange(-60,61,30) \n elif varnames[v] == 'SLP':\n limit = np.arange(-6,6.1,0.5)\n barlim = np.arange(-6,7,3)\n elif varnames[v] == 'U10' or varnames[v] == 'U300':\n limit = np.arange(-10,10.1,1)\n barlim = np.arange(-10,11,5)\n elif varnames[v] == 'RNET': \n limit = np.arange(-50,50.1,1)\n barlim = np.arange(-50,51,25)\n elif varnames[v] == 'P':\n limit = np.arange(-2,2.1,0.05)\n barlim = np.arange(-2,3,1) \n elif varnames[v] == 'THICK':\n limit = np.arange(-60,60.1,3)\n barlim = np.arange(-60,61,30)\n elif varnames[v] == 'SWE':\n limit = np.arange(-25,25.1,1)\n barlim = np.arange(-25,26,25)\n \n fig = plt.figure()\n for i in range(len(diffruns_on)):\n var = diffruns_on[i]\n pvar = pruns_on[i]\n \n if varnames[v] == 'RNET':\n var = var*-1.\n \n ax1 = plt.subplot(2,3,i+1)\n m = Basemap(projection='ortho',lon_0=0,lat_0=89,resolution='l',\n area_thresh=10000.)\n \n var, lons_cyclic = addcyclic(var, lon)\n var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\n lon2d, lat2d = np.meshgrid(lons_cyclic, lat)\n x, y = m(lon2d, lat2d)\n \n pvar,lons_cyclic = addcyclic(pvar, lon)\n pvar,lons_cyclic = shiftgrid(180.,pvar,lons_cyclic,start=False)\n climoq,lons_cyclic = addcyclic(climo[i], lon)\n climoq,lons_cyclic = shiftgrid(180.,climoq,lons_cyclic,start=False)\n \n m.drawmapboundary(fill_color='white',color='dimgray',linewidth=0.7)\n m.drawcoastlines(color='dimgray',linewidth=0.8)\n \n cs = m.contourf(x,y,var,limit,extend='both')\n cs1 = m.contourf(x,y,pvar,colors='None',hatches=['....'],\n linewidths=0.4)\n \n if varnames[v] == 'T2M':\n cmap = ncm.cmap('NCV_blu_red') \n cs.set_cmap(cmap) \n elif varnames[v] == 'Z500':\n cmap = ncm.cmap('nrl_sirkes') \n cs.set_cmap(cmap) \n elif varnames[v] == 'Z50':\n cmap = ncm.cmap('nrl_sirkes') \n cs.set_cmap(cmap) \n elif varnames[v] == 'Z30':\n cmap = ncm.cmap('nrl_sirkes') \n cs.set_cmap(cmap) \n elif varnames[v] == 'SLP':\n cmap = ncm.cmap('nrl_sirkes') \n cs.set_cmap(cmap) \n elif varnames[v] == 'U10' or varnames[v] == 'U300':\n cmap = ncm.cmap('temp_diff_18lev') \n cs.set_cmap(cmap) \n elif varnames[v] == 'RNET':\n cmap = ncm.cmap('NCV_blu_red') \n cs.set_cmap(cmap) \n elif varnames[v] == 'P':\n cmap = ncm.cmap('precip4_diff_19lev') \n cs.set_cmap(cmap) \n elif varnames[v] == 'THICK':\n cmap = ncm.cmap('NCV_blu_red') \n cs.set_cmap(cmap)\n elif varnames[v] == 'SWE':\n cmap = cmap = cmocean.cm.balance\n cs.set_cmap(cmap)\n \n if varnames[v] == 'Z30': # the interval is 250 m \n cs2 = m.contour(x,y,climoq,np.arange(21900,23500,250),\n colors='k',linewidths=1.5,zorder=10) \n if varnames[v] == 'RNET':\n m.drawcoastlines(color='darkgray',linewidth=0.3)\n m.fillcontinents(color='dimgrey')\n else:\n m.drawcoastlines(color='dimgray',linewidth=0.8)\n \n ### Add experiment text to subplot\n if i >= 4:\n ax1.annotate(r'%s' % experiments[i],xy=(0,0),xytext=(0.88,0.885),\n textcoords='axes fraction',color='k',\n fontsize=11,rotation=319,ha='center',va='center')\n else:\n ax1.annotate(r'%s' % experiments[i],xy=(0,0),xytext=(0.865,0.90),\n textcoords='axes fraction',color='k',fontsize=11,\n rotation=320,ha='center',va='center')\n \n ###########################################################################\n cbar_ax = fig.add_axes([0.312,0.1,0.4,0.03]) \n cbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',\n extend='max',extendfrac=0.07,drawedges=False)\n if varnames[v] == 'T2M':\n cbar.set_label(r'\\textbf{$^\\circ$C}',fontsize=11,color='dimgray') \n elif varnames[v] == 'Z500':\n cbar.set_label(r'\\textbf{m}',fontsize=11,color='dimgray') \n elif varnames[v] == 'Z50':\n cbar.set_label(r'\\textbf{m}',fontsize=11,color='dimgray') \n elif varnames[v] == 'Z30':\n cbar.set_label(r'\\textbf{m}',fontsize=11,color='dimgray') \n elif varnames[v] == 'SLP':\n cbar.set_label(r'\\textbf{hPa}',fontsize=11,color='dimgray') \n elif varnames[v] == 'U10' or varnames[v] == 'U300':\n cbar.set_label(r'\\textbf{m/s}',fontsize=11,color='dimgray') \n elif varnames[v] == 'RNET':\n cbar.set_label(r'\\textbf{W/m$^{\\bf{2}}$}',fontsize=11,color='dimgray') \n elif varnames[v] == 'P':\n cbar.set_label(r'\\textbf{mm/day}',fontsize=11,color='dimgray') \n elif varnames[v] == 'THICK':\n cbar.set_label(r'\\textbf{m}',fontsize=11,color='dimgray') \n elif varnames[v] == 'SWE':\n cbar.set_label(r'\\textbf{mm}',fontsize=11,color='dimgray')\n\n cbar.set_ticks(barlim)\n cbar.set_ticklabels(list(map(str,barlim)))\n cbar.ax.tick_params(axis='x', size=.01)\n cbar.outline.set_edgecolor('dimgrey')\n \n plt.subplots_adjust(wspace=0.01)\n plt.subplots_adjust(hspace=0.01)\n plt.subplots_adjust(bottom=0.15)\n \n plt.savefig(directoryfigure + 'allExperiments_ON_%s.png' % varnames[v],\n dpi=300)\n\nprint('Completed: Script done!')\n\n"
},
{
"alpha_fraction": 0.7847682237625122,
"alphanum_fraction": 0.7880794405937195,
"avg_line_length": 42.28571319580078,
"blob_id": "b9832a09ed00524c4e49e639d7d530d3e7c23fba",
"content_id": "6af629cfad8f21df8b71b4a280eeb86d4e65394f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 302,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 7,
"path": "/Scripts/__init__.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "'''\nThe Python repository \"ThicknessSensitivity\" contains the scripts, data, \nfigures, and text for Arctic sea ice thickness perturbation experiments \nusing WACCM4. We are interested in the role of sea ice thickness anomalies\non the wintertime large-scale atmospheric flow.\nAuthor: Zachary Labe ([email protected])\n'''"
},
{
"alpha_fraction": 0.5571955442428589,
"alphanum_fraction": 0.5810388922691345,
"avg_line_length": 29.90350914001465,
"blob_id": "28a7d417e80bb00897db1b255f9326b2a799885b",
"content_id": "44a1b5c757944b088805fb1c684f3a00f2092ae1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3523,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 114,
"path": "/Scripts/read_MonthlyLatOutput.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nScript reads in monthly latitude data from WACCM4 experiments \n(CIT,HIT,FIT,FICT,FIT)\n \nNotes\n-----\n Author : Zachary Labe\n Date : 13 August 2017\n \nUsage\n-----\n readExperi(directory,varid,experi,level)\n\"\"\"\n\ndef readExperi(directory,varid,experi,level):\n \"\"\"\n Function reads monthly latitude data from WACCM4 simulations\n\n Parameters\n ----------\n directory : string\n working directory for stored WACCM4 experiments (remote server)\n varid : string\n variable name to read\n experi : string\n experiment name (CIT or HIT or FIT or FIC or FICT)\n level : string\n Height of variable (surface or profile)\n \n\n Returns\n -------\n lat : 1d numpy array\n latitudes\n lon : 1d numpy array\n longitudes\n time : 1d numpy array\n standard time (days since 1870-1-1, 00:00:00)\n var : 3d numpy array or 4d numpy array \n [year,month,lat] or [year,month,level,lat]\n\n Usage\n -----\n lat,lon,time,lev,var = readExperi(directory,varid,experi,level)\n \"\"\"\n print('\\n>>> Using readExperi function! \\n')\n \n ### Import modules\n import numpy as np\n from netCDF4 import Dataset\n \n ### Call files\n totaldirectory = directory + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n \n if any([experi == 'FPOL',experi == 'FSUB']):\n directory = '/home/zlabe/green/simu/'\n totaldirectory = directory + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n \n ### Read in Data\n if level == 'surface': # 3d variables\n data = Dataset(filename,'r')\n varq = data.variables['%s' % varid][:,:,:,0]\n data.close()\n \n dataq = Dataset(totaldirectory + 'T2M_1900-2000.nc')\n time = dataq.variables['time'][:]\n lev = 'surface'\n lat = dataq.variables['latitude'][:]\n lon = dataq.variables['longitude'][:]\n dataq.close()\n elif level == 'profile': # 4d variables\n data = Dataset(filename,'r')\n varq = data.variables['%s' % varid][:,:,:,0]\n data.close()\n \n dataq = Dataset(totaldirectory + 'TEMP_1900-2000.nc')\n time = dataq.variables['time'][:]\n lev = dataq.variables['level'][:]\n lat = dataq.variables['latitude'][:]\n lon = dataq.variables['longitude'][:]\n dataq.close()\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Read data for *%s* : %s!' % (experi[:4],varid))\n \n ### Reshape to split years and months\n months = 12\n if level == 'surface': # 3d variables\n var = np.reshape(varq,(int(varq.shape[0]/12),months,\n int(lat.shape[0])))\n elif level == 'profile': # 4d variables\n var = np.reshape(varq,(int(varq.shape[0]/12),months,int(lev.shape[0]),\n int(lat.shape[0])))\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Reshaped %s array!' % (varid))\n \n ### Convert units\n if varid in ('TEMP','T2M'):\n var = var - 273.15 # Kelvin to degrees Celsius \n print('Completed: Changed units (K to C)!')\n\n print('\\n*Completed: Finished readExperi function!')\n return lat,lon,time,lev,var\n\n### Test function -- no need to use \n#directory = '/surtsey/zlabe/simu/'\n#varid = 'EPZ'\n#experi = 'HIT'\n#level = 'profile'\n# \n#lat,lon,time,lev,var = readExperi(directory,varid,experi,level)\n"
},
{
"alpha_fraction": 0.51667320728302,
"alphanum_fraction": 0.5421425700187683,
"avg_line_length": 36.51970291137695,
"blob_id": "20063e25bac72f8f9cae1d87a7702a0e23d531c9",
"content_id": "a09629ec98791a68903e4019cb3fa17bc2ba140e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15234,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 406,
"path": "/Scripts/plot_Fig2.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPlot figure 2 in manuscript for dynamical responses to sea ice loss in WACCM4\nexperiments [FIT-HIT, FIC-CIT, FICT-HIT]. Current variables include T2M and\nRNET. Time period includes December through February [DJF].\n\nNotes\n-----\n Author : Zachary Labe\n Date : 4 February 2018\n\"\"\"\n\n### Import modules\nimport numpy as np\nfrom numpy import ma\nimport matplotlib.pyplot as plt\nfrom matplotlib import cbook\nfrom matplotlib.colors import Normalize\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport nclcmaps as ncm\nimport datetime\nimport read_MonthlyOutput as MO\nimport read_MeanMonthlyOutput as DM\nimport calc_Utilities as UT\nimport cmocean\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectorydata2 = '/home/zlabe/Documents/Research/SITperturb/Data/'\ndirectoryfigure = '/home/zlabe/Desktop/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting Fig 2 - %s----' % titletime)\n\n### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\n### Define constants\nvarnames = ['T2M']\nexperiments = [r'\\textbf{$\\Delta$SIT}',r'\\textbf{$\\Delta$SIC}',\n r'\\textbf{$\\Delta$NET}']\nrunnames = [r'HIT',r'FIT',r'HIT2',r'FICT2',r'FICT']\n\n### Functions to read temperature\ndef readTemp(varnames):\n \"\"\"\n Read in temperature data for selected variables and calculate differences\n between experiments\n \"\"\"\n for v in range(len(varnames)):\n ### Call function for T2M data from reach run\n lat,lon,time,lev,varhit = MO.readExperi(directorydata,\n '%s' % varnames[v],'HIT',\n 'surface')\n lat,lon,time,lev,varfit = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIT',\n 'surface')\n lat,lon,time,lev,varcit = MO.readExperi(directorydata,\n '%s' % varnames[v],'CIT',\n 'surface')\n lat,lon,time,lev,varfic = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIC',\n 'surface')\n lat,lon,time,lev,varfict = MO.readExperi(directorydata,\n '%s' % varnames[v],'FICT',\n 'surface')\n \n ### Create 2d array of latitude and longitude\n lon2,lat2 = np.meshgrid(lon,lat)\n \n ### Concatonate runs\n runs = [varhit,varfit,varcit,varfic,varfict]\n \n ### Separate per periods (DJF)\n var_djf = np.empty((5,varhit.shape[0]-1,varhit.shape[2],varhit.shape[3]))\n for i in range(len(runs)):\n var_djf[i],var_djf[i] = UT.calcDecJanFeb(runs[i],runs[i],lat,lon,\n 'surface',1) \n \n ### Compute comparisons for FM - taken ensemble average\n diff_FITHIT = np.nanmean(var_djf[1] - var_djf[0],axis=0)\n diff_FICCIT = np.nanmean(var_djf[3] - var_djf[2],axis=0)\n diff_FICTHIT = np.nanmean(var_djf[4] - var_djf[0],axis=0)\n diffruns_djf = [diff_FITHIT,diff_FICCIT,diff_FICTHIT]\n \n ### Calculate significance for FM\n stat_FITHIT,pvalue_FITHIT = UT.calc_indttest(var_djf[1],var_djf[0])\n stat_FICCIT,pvalue_FICCIT = UT.calc_indttest(var_djf[3],var_djf[2])\n stat_FICTHIT,pvalue_FICTHIT = UT.calc_indttest(var_djf[4],var_djf[0])\n pruns_djf = [pvalue_FITHIT,pvalue_FICCIT,pvalue_FICTHIT]\n \n return diffruns_djf,pruns_djf,lat,lon\n\n###############################################################################\n###############################################################################\n###############################################################################\n# Function to read surface heat flux data\ndef readFlux(varnames):\n \"\"\"\n Read in heat flux data for selected variables and calculate differences\n between experiments\n \"\"\"\n lat,lon,time,lev,varhit = DM.readMeanExperi(directorydata,\n '%s' % varnames,\n 'HIT','surface')\n lat,lon,time,lev,varfit = DM.readMeanExperi(directorydata,\n '%s' % varnames,\n 'FIT','surface')\n lat,lon,time,lev,varcit = DM.readMeanExperi(directorydata,\n '%s' % varnames,\n 'CIT','surface')\n lat,lon,time,lev,varfic = DM.readMeanExperi(directorydata,\n '%s' % varnames,\n 'FIC','surface')\n lat,lon,time,lev,varfict = DM.readMeanExperi(directorydata,\n '%s' % varnames,\n 'FICT','surface')\n \n ### Compare experiments\n runs = [varhit,varfit,varcit,varfic,varfict]\n \n ### Compute comparisons for experiments - take ensemble average\n diff_FITHIT = np.nanmean(varfit - varhit,axis=0)\n diff_FICCIT = np.nanmean(varfic - varcit,axis=0)\n diff_FICTHIT = np.nanmean(varfict - varhit,axis=0)\n diffruns = [diff_FITHIT,diff_FICCIT,diff_FICTHIT]\n \n return diffruns,runs,lat,lon\n\n###########################################################################\n###########################################################################\n###########################################################################\n# Read data for net surface energy budget\ndifftotallhshq = np.genfromtxt(directorydata2+'weightedsic_SHLH.txt',\n skip_header=2,delimiter=',')\ndifftotallhsh = difftotallhshq.transpose()\ntemps,ptemps,lat,lon = readTemp(varnames)\n\n### Create 2d array of latitude and longitude\nlon2,lat2 = np.meshgrid(lon,lat)\n \n###########################################################################\n###########################################################################\n###########################################################################\n#### Plot surface temperature and rnet\n\n### Functions to center colormap on 0 - white \nclass MidPointNorm(Normalize): \n def __init__(self, midpoint=0, vmin=None, vmax=None, clip=False):\n Normalize.__init__(self,vmin, vmax, clip)\n self.midpoint = midpoint\n\n def __call__(self, value, clip=None):\n if clip is None:\n clip = self.clip\n\n result, is_scalar = self.process_value(value)\n\n self.autoscale_None(result)\n vmin, vmax, midpoint = self.vmin, self.vmax, self.midpoint\n\n if not (vmin < midpoint < vmax):\n raise ValueError(\"midpoint must be between maxvalue and minvalue.\") \n elif vmin == vmax:\n result.fill(0) # Or should it be all masked? Or 0.5?\n elif vmin > vmax:\n raise ValueError(\"maxvalue must be bigger than minvalue\")\n else:\n vmin = float(vmin)\n vmax = float(vmax)\n if clip:\n mask = ma.getmask(result)\n result = ma.array(np.clip(result.filled(vmax), vmin, vmax),\n mask=mask)\n\n # ma division is very slow; we can take a shortcut\n resdat = result.data\n\n #First scale to -1 to 1 range, than to from 0 to 1.\n resdat -= midpoint \n resdat[resdat>0] /= abs(vmax - midpoint) \n resdat[resdat<0] /= abs(vmin - midpoint)\n\n resdat /= 2.\n resdat += 0.5\n result = ma.array(resdat, mask=result.mask, copy=False) \n\n if is_scalar:\n result = result[0] \n return result\n\n def inverse(self, value):\n if not self.scaled():\n raise ValueError(\"Not invertible until scaled\")\n vmin, vmax, midpoint = self.vmin, self.vmax, self.midpoint\n\n if cbook.iterable(value):\n val = ma.asarray(value)\n val = 2 * (val-0.5) \n val[val>0] *= abs(vmax - midpoint)\n val[val<0] *= abs(vmin - midpoint)\n val += midpoint\n return val\n else:\n val = 2 * (val - 0.5)\n if val < 0: \n return val*abs(vmin-midpoint) + midpoint\n else:\n return val*abs(vmax-midpoint) + midpoint\nnorm = MidPointNorm(midpoint=0)\n\n### Begin plot\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \nfig = plt.figure()\n\n###############################################################################\n### Set limits\nvar = temps[0]\npvar = ptemps[0]\n \nlimit = np.arange(-5,15.1,0.25)\nbarlim = np.arange(-5,16,5)\n \nax1 = plt.subplot(2,2,1)\nm = Basemap(projection='ortho',lon_0=0,lat_0=89,resolution='l',\n area_thresh=10000.)\n\nvar, lons_cyclic = addcyclic(var, lon)\nvar, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\nlon2d, lat2d = np.meshgrid(lons_cyclic, lat)\nx, y = m(lon2d, lat2d)\n\npvar,lons_cyclic = addcyclic(pvar, lon)\npvar,lons_cyclic = shiftgrid(180.,pvar,lons_cyclic,start=False)\n \nm.drawmapboundary(fill_color='white',color='dimgrey',linewidth=0.7)\n\ncs = m.contourf(x,y,var,limit,extend='max',norm=norm)\ncs1 = m.contourf(x,y,pvar,colors='None',hatches=['....'])\n\ncmap = ncm.cmap('NCV_blu_red') \ncs.set_cmap(cmap) \n\nm.drawcoastlines(color='dimgrey',linewidth=0.7)\n\nax1.annotate(r'%s' % experiments[0],xy=(0,0),xytext=(0.1,0.90),\n textcoords='axes fraction',color='k',fontsize=16,\n rotation=45,ha='center',va='center')\nax1.annotate(r'\\textbf{[%s]}' % 'a',xy=(0,0),\n xytext=(0.89,0.9),xycoords='axes fraction',\n color='dimgrey',fontsize=7)\n\n###############################################################################\n### Set limits\nvar = temps[1]\npvar = ptemps[1]\n \nlimit = np.arange(-5,15.1,0.25)\nbarlim = np.arange(-5,16,5)\n \nax1 = plt.subplot(2,2,2)\nm = Basemap(projection='ortho',lon_0=0,lat_0=89,resolution='l',\n area_thresh=10000.)\n\nvar, lons_cyclic = addcyclic(var, lon)\nvar, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\nlon2d, lat2d = np.meshgrid(lons_cyclic, lat)\nx, y = m(lon2d, lat2d)\n\npvar,lons_cyclic = addcyclic(pvar, lon)\npvar,lons_cyclic = shiftgrid(180.,pvar,lons_cyclic,start=False)\n \nm.drawmapboundary(fill_color='white',color='dimgrey',linewidth=0.7)\n\ncs = m.contourf(x,y,var,limit,extend='max',norm=norm)\ncs1 = m.contourf(x,y,pvar,colors='None',hatches=['....'])\n\ncmap = ncm.cmap('NCV_blu_red') \ncs.set_cmap(cmap) \n\nm.drawcoastlines(color='dimgrey',linewidth=0.7)\n\nax1.annotate(r'%s' % experiments[1],xy=(0,0),xytext=(0.1,0.90),\n textcoords='axes fraction',color='k',fontsize=16,\n rotation=45,ha='center',va='center')\nax1.annotate(r'\\textbf{[%s]}' % 'b',xy=(0,0),\n xytext=(0.89,0.9),xycoords='axes fraction',\n color='dimgrey',fontsize=7)\n\n###############################################################################\n### Set limits\nvar = temps[2]\npvar = ptemps[2]\n \nlimit = np.arange(-5,15.1,0.25)\nbarlim = np.arange(-5,16,5)\n \nax1 = plt.subplot(2,2,3)\nm = Basemap(projection='ortho',lon_0=0,lat_0=89,resolution='l',\n area_thresh=10000.)\n\nvar, lons_cyclic = addcyclic(var, lon)\nvar, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\nlon2d, lat2d = np.meshgrid(lons_cyclic, lat)\nx, y = m(lon2d, lat2d)\n\npvar,lons_cyclic = addcyclic(pvar, lon)\npvar,lons_cyclic = shiftgrid(180.,pvar,lons_cyclic,start=False)\n \nm.drawmapboundary(fill_color='white',color='dimgrey',linewidth=0.7)\n\ncs = m.contourf(x,y,var,limit,extend='max',norm=norm)\ncs1 = m.contourf(x,y,pvar,colors='None',hatches=['....'])\n\ncmap = ncm.cmap('NCV_blu_red') \ncs.set_cmap(cmap) \n\nm.drawcoastlines(color='dimgrey',linewidth=0.7)\n\nax1.annotate(r'%s' % experiments[2],xy=(0,0),xytext=(0.08,0.88),\n textcoords='axes fraction',color='k',fontsize=16,\n rotation=45,ha='center',va='center')\nax1.annotate(r'\\textbf{[W/m${^{2}}$]}',xy=(0,0),xytext=(1.18,1),\n textcoords='axes fraction',color='dimgrey',fontsize=7,\n rotation=0,ha='center',va='center')\nax1.annotate(r'\\textbf{[%s]}' % 'c',xy=(0,0),\n xytext=(0.89,0.9),xycoords='axes fraction',\n color='dimgrey',fontsize=7)\n\n###############################################################################\nax = plt.axes([.543, .183, .24, .31]) \n\ndef adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 2))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n\n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([]) \n\nadjust_spines(ax, ['left', 'bottom'])\nax.spines['top'].set_color('none')\nax.spines['right'].set_color('none')\nax.spines['left'].set_color('dimgrey')\nax.spines['bottom'].set_color('dimgrey')\nax.spines['left'].set_linewidth(2)\nax.spines['bottom'].set_linewidth(2)\nax.tick_params('both',length=4,width=2,which='major',color='dimgrey',pad=1)\n\ncolor=iter(cmocean.cm.matter(np.linspace(0.3,1,len(difftotallhsh))))\nfor i in range(len(difftotallhsh)):\n c=next(color)\n plt.plot(difftotallhsh[i],linewidth=2,color=c,alpha=1,\n label = r'\\textbf{%s}' % experiments[i],linestyle='-',\n marker='o',markersize=4)\n\nplt.legend(shadow=False,fontsize=5,loc='lower left',\n fancybox=True,frameon=True,ncol=3,bbox_to_anchor=(0.05, 0.13),\n labelspacing=0.2,columnspacing=1,handletextpad=0.4,\n edgecolor='dimgrey')\n\nplt.yticks(np.arange(0,126,25),list(map(str,np.arange(0,126,25))),fontsize=6)\nplt.ylim([0,100])\n\nxlabels = [r'OCT',r'NOV',r'DEC',r'JAN',r'FEB',r'MAR',r'APR']\nplt.xticks(np.arange(0,7,1),xlabels,fontsize=6)\nplt.xlim([0,6])\n\nax.annotate(r'\\textbf{[%s]}' % 'd',xy=(0,0),\n xytext=(0.89,0.9),xycoords='axes fraction',\n color='dimgrey',fontsize=7)\n \ncbar_ax = fig.add_axes([0.31,0.09,0.4,0.03]) \ncbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',\n extend='max',extendfrac=0.07,drawedges=False)\n\ncbar.set_label(r'\\textbf{[T2M]$^\\circ$C}',\n fontsize=13,color='dimgrey',labelpad=2)\n \ncbar.set_ticks(barlim)\ncbar.set_ticklabels(list(map(str,barlim))) \ncbar.ax.tick_params(axis='x', size=.001,labelsize=8,pad=2.1)\ncbar.outline.set_edgecolor('dimgrey')\n \nfig.subplots_adjust(wspace=-0.4,hspace=0,bottom=0.15)\n \nplt.savefig(directoryfigure + 'Fig2.png',dpi=600)\n \nprint('Completed: Script done!')\n\n"
},
{
"alpha_fraction": 0.5191680788993835,
"alphanum_fraction": 0.5464845299720764,
"avg_line_length": 39.26250076293945,
"blob_id": "305d700067629d4d69d733d3417fe6ad02013e9e",
"content_id": "d247517a33a6964b495510219428a642eb9524b4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6443,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 160,
"path": "/Scripts/plot_exampleExperiments_DJF.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPlots DJF for climatological wave number X for WACCM4 experiments\n\nNotes\n-----\n Author : Zachary Labe\n Date : 12 November 2017\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport nclcmaps as ncm\nimport datetime\nimport read_MonthlyOutput as MO\nimport calc_Utilities as UT\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectoryfigure = '/home/zlabe/Desktop/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting Climo Wave - %s----' % titletime)\n\n### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\nvarnames = ['T2M','RNET']\nfor v in range(len(varnames)):\n ### Call function for T2M data from reach run\n lat,lon1,time,lev,varhit = MO.readExperi(directorydata,\n '%s' % varnames[v],'HIT','surface')\n lat,lon1,time,lev,varfit = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIT','surface')\n lat,lon1,time,lev,varcit = MO.readExperi(directorydata,\n '%s' % varnames[v],'CIT','surface')\n lat,lon1,time,lev,varfic = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIC','surface')\n lat,lon1,time,lev,varfict = MO.readExperi(directorydata,\n '%s' % varnames[v],'FICT','surface')\n \n ### Create 2d array of latitude and longitude\n lon2,lat2 = np.meshgrid(lon1,lat)\n \n ### Concatonate runs\n runnames = [r'HIT',r'FIT',r'CIT',r'FIC',r'FICT']\n experiments = [r'\\textbf{FIT--HIT}',r'\\textbf{FIC--CIT}',\n r'\\textbf{FICT--FIT}',r'\\textbf{FICT--HIT}']\n runs = [varhit,varfit,varcit,varfic,varfict]\n \n ### Separate per periods (DJF)\n var_djf = np.empty((5,varhit.shape[0]-1,varhit.shape[2],varhit.shape[3]))\n for i in range(len(runs)):\n var_djf[i],var_djf[i] = UT.calcDecJanFeb(runs[i],runs[i],lat,\n lon1,'surface',1) \n \n ### Compute comparisons for FM - taken ensemble average\n diff_FITHIT = np.nanmean(var_djf[1] - var_djf[0],axis=0)\n diff_FICCIT = np.nanmean(var_djf[3] - var_djf[2],axis=0)\n diff_FICTTFIT = np.nanmean(var_djf[4] - var_djf[1],axis=0)\n diff_FICTHIT = np.nanmean(var_djf[4] - var_djf[0],axis=0)\n diffruns_djf = [diff_FITHIT,diff_FICCIT,diff_FICTTFIT,diff_FICTHIT]\n \n ### Calculate significance for FM\n stat_FITHIT,pvalue_FITHIT = UT.calc_indttest(var_djf[1],var_djf[0])\n stat_FICCIT,pvalue_FICCIT = UT.calc_indttest(var_djf[3],var_djf[2])\n stat_FICTFIT,pvalue_FICTFIT = UT.calc_indttest(var_djf[4],var_djf[1])\n stat_FICTHIT,pvalue_FICTHIT = UT.calc_indttest(var_djf[4],var_djf[0])\n pruns_djf = [pvalue_FITHIT,pvalue_FICCIT,pvalue_FICTFIT,pvalue_FICTHIT]\n \n ###########################################################################\n ###########################################################################\n ###########################################################################\n #### Plot T2M\n plt.rc('text',usetex=True)\n plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n \n fig = plt.figure()\n for i in range(len(experiments)):\n var = diffruns_djf[i]\n pvar = pruns_djf[i]\n \n if varnames[v] == 'RNET':\n var = var*-1.\n \n if varnames[v] == 'T2M':\n limit = np.arange(-10,10.1,0.5)\n barlim = np.arange(-10,11,5)\n elif varnames[v] == 'RNET': \n limit = np.arange(-50,50.1,1)\n barlim = np.arange(-50,51,25)\n \n ax1 = plt.subplot(1,4,i+1)\n m = Basemap(projection='ortho',lon_0=0,lat_0=90,resolution='l',\n area_thresh=10000.)\n \n var, lons_cyclic = addcyclic(var, lon1)\n var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\n lon2d, lat2d = np.meshgrid(lons_cyclic, lat)\n x, y = m(lon2d, lat2d)\n \n pvar,lons_cyclic = addcyclic(pvar, lon1)\n pvar,lons_cyclic = shiftgrid(180.,pvar,lons_cyclic,start=False)\n \n m.drawmapboundary(fill_color='white',color='dimgrey',linewidth=0.7)\n \n cs = m.contourf(x,y,var,limit,extend='both')\n cs1 = m.contourf(x,y,pvar,colors='None',hatches=['....'])\n \n if varnames[v] == 'T2M' or varnames[v] == 'RNET':\n cmap = ncm.cmap('NCV_blu_red') \n cs.set_cmap(cmap) \n \n if varnames[v] == 'RNET':\n m.drawcoastlines(color='darkgrey',linewidth=0.3)\n m.fillcontinents(color='dimgrey')\n else:\n m.drawcoastlines(color='dimgrey',linewidth=0.8)\n \n ### Add experiment text to subplot\n alph = [r'A',r'B',r'C',r'D']\n ax1.annotate(r'%s' % experiments[i],xy=(0,0),xytext=(0.865,0.90),\n textcoords='axes fraction',color='k',fontsize=11,\n rotation=320,ha='center',va='center')\n ax1.annotate(r'\\textbf{%s}' % alph[i],xy=(0,0),xytext=(0.5,1.2),\n textcoords='axes fraction',color='dimgrey',fontsize=31,\n rotation=0,ha='center',va='center')\n \n cbar_ax = fig.add_axes([0.312,0.23,0.4,0.03]) \n cbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',\n extend='max',extendfrac=0.07,drawedges=False)\n \n if varnames[v] == 'T2M':\n cbar.set_label(r'\\textbf{2-m Temperature [$^\\circ$C]}',\n fontsize=11,color='dimgrey')\n elif varnames[v] == 'RNET':\n cbar.set_label(r'\\textbf{W/m$^{\\bf{2}}$}',\n fontsize=11,color='dimgrey') \n \n cbar.set_ticks(barlim)\n cbar.set_ticklabels(list(map(str,barlim))) \n cbar.ax.tick_params(axis='x', size=.001)\n cbar.outline.set_edgecolor('dimgrey')\n \n plt.subplots_adjust(wspace=0.01)\n \n plt.savefig(directoryfigure + '%s_SITexperiments.png' % varnames[v],\n dpi=300)\n \nprint('Completed: Script done!')\n\n"
},
{
"alpha_fraction": 0.5764316916465759,
"alphanum_fraction": 0.5956711173057556,
"avg_line_length": 36.8011360168457,
"blob_id": "32f8688770bc47135ed93cb966abd4a1dc78dda5",
"content_id": "a8e1767f0ec3e0581aa05fcad0484742b5ebd93b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6653,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 176,
"path": "/Scripts/plot_allExperiments_var_MeanMonthly.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPlot comparisons between WACCM4 sea ice experiments. These are \nsea ice thickness and concentration perturbation experiments. This script is\nfor MEAN MONTHLY data for all variables.\n\nNotes\n-----\n Author : Zachary Labe\n Date : 6 November 2017\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nimport read_MeanMonthlyOutput as DM\nimport cmocean\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectoryfigure = '/home/zlabe/Desktop/MeanMonthly/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting Mean Monthly Data - %s----' % titletime)\n\n#### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\n### Add parameters\nvarnames = ['LHFLX','SHFLX','FLNS']\nrunnames = [r'HIT',r'FIT',r'CIT',r'FIC',r'FICT']\nexperiments = [r'\\textbf{FIT--HIT}',r'\\textbf{FIT--CIT}',\n r'\\textbf{HIT--CIT}',r'\\textbf{FIC--CIT}',\n r'\\textbf{FICT--FIT}',r'\\textbf{FICT--HIT}']\n\n### Call functions for mean monthly data for polar cap\ndef readData(varnames):\n \"\"\"\n Read in data for selected variables and calculate differences\n between experiments\n \"\"\"\n\n lat,lon,time,lev,varhit = DM.readMeanExperi(directorydata,\n '%s' % varnames,\n 'HIT','surface')\n lat,lon,time,lev,varfit = DM.readMeanExperi(directorydata,\n '%s' % varnames,\n 'FIT','surface')\n lat,lon,time,lev,varcit = DM.readMeanExperi(directorydata,\n '%s' % varnames,\n 'CIT','surface')\n lat,lon,time,lev,varfic = DM.readMeanExperi(directorydata,\n '%s' % varnames,\n 'FIC','surface')\n lat,lon,time,lev,varfict = DM.readMeanExperi(directorydata,\n '%s' % varnames,\n 'FICT','surface')\n \n ### Compare experiments\n runs = [varhit,varfit,varcit,varfic,varfict]\n \n ### Compute comparisons for experiments - take ensemble average\n diff_FITHIT = np.nanmean(varfit - varhit,axis=0)\n diff_FITCIT = np.nanmean(varfit - varcit,axis=0)\n diff_HITCIT = np.nanmean(varhit - varcit,axis=0)\n diff_FICCIT = np.nanmean(varfic - varcit,axis=0)\n diff_FICTFIT = np.nanmean(varfict - varfit,axis=0)\n diff_FICTHIT = np.nanmean(varfict - varhit,axis=0)\n diffruns = [diff_FITHIT,diff_FITCIT,diff_HITCIT,diff_FICCIT,\n diff_FICTFIT,diff_FICTHIT]\n \n return diffruns,runs,lat,lon\n\n### Call function to read data for selected variable\ndiffruns_lh,runs_lh,lat,lon = readData('LHFLX')\ndiffruns_sh,runs_sh,lat,lon = readData('SHFLX')\ndiffruns_long,runs_long,lat,lon = readData('FLNS')\ndiffruns_rnet,runs_rnet,lat,lon = readData('RNET')\n\n### Create 2d array of latitude and longitude\nlon2,lat2 = np.meshgrid(lon,lat)\n\n###############################################################################\n###############################################################################\n###############################################################################\n### Plot Figure\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\ndef adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 5))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n\n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([]) \n \ntotal_hitq = np.nanmean(runs_lh[0],axis=0) + np.nanmean(runs_sh[0],axis=0)\ntotal_fitq = np.nanmean(runs_lh[1],axis=0) + np.nanmean(runs_sh[1],axis=0) \ntotal_citq = np.nanmean(runs_lh[2],axis=0) + np.nanmean(runs_sh[2],axis=0) \ntotal_ficq = np.nanmean(runs_lh[3],axis=0) + np.nanmean(runs_sh[3],axis=0) \ntotal_fictq = np.nanmean(runs_lh[4],axis=0) + np.nanmean(runs_sh[4],axis=0) \n\ntotal_hit = np.append(total_hitq[8:],total_hitq[:3])\ntotal_fit = np.append(total_fitq[8:],total_fitq[:3])\ntotal_cit = np.append(total_citq[8:],total_citq[:3])\ntotal_fic = np.append(total_ficq[8:],total_ficq[:3])\ntotal_fict = np.append(total_fictq[8:],total_fictq[:3])\n\nlong_hitq = np.nanmean(runs_long[0],axis=0)\nlong_fitq = np.nanmean(runs_long[1],axis=0)\nlong_citq = np.nanmean(runs_long[2],axis=0)\nlong_ficq = np.nanmean(runs_long[3],axis=0)\nlong_fictq = np.nanmean(runs_long[4],axis=0)\n\nlong_hit = np.append(long_hitq[8:],long_hitq[:3])\nlong_fit = np.append(long_fitq[8:],long_fitq[:3])\nlong_cit = np.append(long_citq[8:],long_citq[:3])\nlong_fic = np.append(long_ficq[8:],long_ficq[:3])\nlong_fict = np.append(long_fictq[8:],long_fictq[:3])\n\ntotallhsh = [total_hit,total_fit,total_cit,total_fic,total_fict]\nlongg = [long_hit,long_fit,long_cit,long_fic,long_fict]\n \nfig = plt.figure()\nax = plt.subplot(111)\n\nadjust_spines(ax, ['left', 'bottom'])\nax.spines['top'].set_color('none')\nax.spines['right'].set_color('none')\nax.spines['left'].set_color('darkgrey')\nax.spines['bottom'].set_color('darkgrey')\nax.spines['left'].set_linewidth(2)\nax.spines['bottom'].set_linewidth(2)\nax.tick_params('both',length=4,width=2,which='major',color='darkgrey')\n\ncolor=iter(cmocean.cm.thermal(np.linspace(0.1,0.9,len(totallhsh))))\nfor i in range(len(runnames)):\n c=next(color)\n plt.plot(totallhsh[i],linewidth=2.5,color=c,alpha=1,\n label = r'\\textbf{%s}' % runnames[i],linestyle='-')\n \ncolor=iter(cmocean.cm.thermal(np.linspace(0.1,0.9,len(totallhsh))))\nfor i in range(len(runnames)):\n c=next(color)\n plt.plot(longg[i],linewidth=1.5,color=c,alpha=1,linestyle='--')\n\nplt.legend(shadow=False,fontsize=9,loc='lower center',\n fancybox=True,frameon=False,ncol=5)\nplt.ylabel(r'\\textbf{Fluxes [W/m${^{2}}$]}',color='k',fontsize=13)\n\nplt.yticks(np.arange(25,56,5),list(map(str,np.arange(25,56,5))))\nplt.ylim([25,55])\n\nxlabels = [r'OCT',r'NOV',r'DEC',r'JAN',r'FEB',r'MAR',r'APR']\nplt.xticks(np.arange(0,7,1),xlabels)\nplt.xlim([0,6])\n\nplt.savefig(directoryfigure + 'test.png',dpi=300)\n"
},
{
"alpha_fraction": 0.5811224579811096,
"alphanum_fraction": 0.6211734414100647,
"avg_line_length": 33.99553680419922,
"blob_id": "e9bf83481918b00e6823f2221df63cb39db48565",
"content_id": "40c6fd169afabbd848649c0ee147492488e72c15",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7840,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 224,
"path": "/Scripts/plot_TEMP.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPlot temperature profile difference between HIT and FIT experiments. \nThese are sea ice thickness perturbation experiments using WACCM4.\n\nNotes\n-----\n Author : Zachary Labe\n Date : 14 August 2017\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as c\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport nclcmaps as ncm\nimport datetime\nimport read_MonthlyOutput as MO\nimport calc_Utilities as UT\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectoryfigure = '/home/zlabe/Desktop/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting temperature - %s----' % titletime)\n\n### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\n### Call function for vertical temperature data\nlat,lon,time,lev,th = MO.readExperi(directorydata,'TEMP','HIT','profile')\nlat,lon,time,lev,tf = MO.readExperi(directorydata,'TEMP','FIT','profile')\n\n### Separate per periods (ON,DJ,FM)\nth_on = np.nanmean(th[:,9:11,:,:,:],axis=1)\ntf_on = np.nanmean(tf[:,9:11,:,:,:],axis=1)\n\nth_dj,tf_dj = UT.calcDecJan(th,tf,lat,lon,'profile',lev.shape[0])\n\nth_fm = np.nanmean(th[:,1:3,:,:,:],axis=1)\ntf_fm = np.nanmean(tf[:,1:3,:,:,:],axis=1)\n\n#### Calculate period differenceds\ndiff_on = np.nanmean((tf_on-th_on),axis=0)\ndiff_dj = np.nanmean((tf_dj-th_dj),axis=0)\ndiff_fm = np.nanmean((tf_fm-th_fm),axis=0)\n\n#### Calculate significance\nstat_on,pvalue_on = UT.calc_indttest(np.nanmean(th_on,axis=3),\n np.nanmean(tf_on,axis=3))\nstat_dj,pvalue_dj = UT.calc_indttest(np.nanmean(th_dj,axis=3),\n np.nanmean(tf_dj,axis=3))\nstat_fm,pvalue_fm = UT.calc_indttest(np.nanmean(th_fm,axis=3),\n np.nanmean(tf_fm,axis=3))\n\n### Calculate zonal mean\nzdiff_on = np.nanmean((diff_on),axis=2)\nzdiff_dj = np.nanmean((diff_dj),axis=2)\nzdiff_fm = np.nanmean((diff_fm),axis=2)\n\n############################################################################\n############################################################################\n############################################################################\n##### Plot temperature profile\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\n### Set limits for contours and colorbars\nlimit = np.arange(-4,4.1,0.1)\nbarlim = np.arange(-4,5,1)\nzscale = np.array([1000,700,500,300,200,\n 100,50,30,10])\nlatq,levq = np.meshgrid(lat,lev)\n\nfig = plt.figure()\nax1 = plt.subplot(131)\n\nax1.spines['top'].set_color('dimgrey')\nax1.spines['right'].set_color('dimgrey')\nax1.spines['bottom'].set_color('dimgrey')\nax1.spines['left'].set_color('dimgrey')\nax1.spines['left'].set_linewidth(2)\nax1.spines['bottom'].set_linewidth(2)\nax1.spines['right'].set_linewidth(2)\nax1.spines['top'].set_linewidth(2)\nax1.tick_params(axis='y',direction='out',which='major',pad=3,\n width=2,color='dimgrey')\nax1.tick_params(axis='x',direction='out',which='major',pad=3,\n width=2,color='dimgrey') \nax1.xaxis.set_ticks_position('bottom')\nax1.yaxis.set_ticks_position('left')\n\n\ncs = plt.contourf(lat,lev,zdiff_on,limit,extend='both')\ncs2 = plt.contour(lat,lev,zdiff_on,np.arange(0,1,1),\n linewidths=0.6,colors='dimgrey')\n#cs1 = plt.scatter(latq,levq,pvalue_on,color='k',marker='.',alpha=0.7,\n# edgecolor='k',linewidth=0.5)\nplt.contourf(latq,levq,pvalue_on,colors='None',hatches=['////'],\n linewidth=5) \n\nplt.gca().invert_yaxis()\nplt.yscale('log',nonposy='clip')\n\nplt.xlim([0,90])\nplt.ylim([1000,10])\nplt.xticks(np.arange(0,96,15),map(str,np.arange(0,91,15)),fontsize=8)\nplt.yticks(zscale,map(str,zscale),ha='right',fontsize=8)\nplt.minorticks_off()\n\ncmap = ncm.cmap('NCV_blu_red') \ncs.set_cmap(cmap) \n\nax1.annotate(r'\\textbf{ON}',\n xy=(0, 0),xytext=(0.34,1.02),xycoords='axes fraction',\n fontsize=25,color='dimgrey',rotation=0)\n\n###########################################################################\nax2 = plt.subplot(132)\n\nax2.spines['top'].set_color('dimgrey')\nax2.spines['right'].set_color('dimgrey')\nax2.spines['bottom'].set_color('dimgrey')\nax2.spines['left'].set_color('dimgrey')\nax2.spines['left'].set_linewidth(2)\nax2.spines['bottom'].set_linewidth(2)\nax2.spines['right'].set_linewidth(2)\nax2.spines['top'].set_linewidth(2)\nax2.tick_params(axis='y',direction='out',which='major',pad=3,\n width=2,color='dimgrey')\nax2.tick_params(axis='x',direction='out',which='major',pad=3,\n width=2,color='dimgrey') \nax2.xaxis.set_ticks_position('bottom')\nax2.yaxis.set_ticks_position('left')\n\ncs = plt.contourf(lat,lev,zdiff_dj,limit,extend='both')\ncs2 = plt.contour(lat,lev,zdiff_dj,np.arange(0,1,1),\n linewidths=0.6,colors='dimgrey')\n#cs1 = plt.scatter(latq,levq,pvalue_dj,color='k',marker='.',alpha=0.7,\n# edgecolor='k',linewidth=0.5)\nplt.contourf(latq,levq,pvalue_dj,colors='None',hatches=['////'],\n linewidth=5) \n\nplt.gca().invert_yaxis()\nplt.yscale('log',nonposy='clip')\n\nplt.xlim([0,90])\nplt.ylim([1000,10])\nplt.xticks(np.arange(0,96,15),map(str,np.arange(0,91,15)),fontsize=8)\nplt.yticks(zscale,map(str,zscale),ha='right',fontsize=8)\nplt.minorticks_off()\n\ncmap = ncm.cmap('NCV_blu_red') \ncs.set_cmap(cmap) \n\nax2.annotate(r'\\textbf{DJ}',\n xy=(0, 0),xytext=(0.35,1.02),xycoords='axes fraction',\n fontsize=25,color='dimgrey',rotation=0)\n\n###########################################################################\nax3 = plt.subplot(133)\n\nax3.spines['top'].set_color('dimgrey')\nax3.spines['right'].set_color('dimgrey')\nax3.spines['bottom'].set_color('dimgrey')\nax3.spines['left'].set_color('dimgrey')\nax3.spines['left'].set_linewidth(2)\nax3.spines['bottom'].set_linewidth(2)\nax3.spines['right'].set_linewidth(2)\nax3.spines['top'].set_linewidth(2)\nax3.tick_params(axis='y',direction='out',which='major',pad=3,\n width=2,color='dimgrey')\nax3.tick_params(axis='x',direction='out',which='major',pad=3,\n width=2,color='dimgrey') \nax3.xaxis.set_ticks_position('bottom')\nax3.yaxis.set_ticks_position('left')\n\ncs = plt.contourf(lat,lev,zdiff_fm,limit,extend='both')\ncs2 = plt.contour(lat,lev,zdiff_fm,np.arange(0,1,1),\n linewidths=0.6,colors='dimgrey')\n#cs1 = plt.scatter(latq,levq,pvalue_fm,color='k',marker='.',alpha=0.7,\n# edgecolor='k',linewidth=0.5)\nplt.contourf(latq,levq,pvalue_fm,colors='None',hatches=['////'],\n linewidth=5) \n\nplt.gca().invert_yaxis()\nplt.yscale('log',nonposy='clip')\n\nplt.xlim([0,90])\nplt.ylim([1000,10])\nplt.xticks(np.arange(0,96,15),map(str,np.arange(0,91,15)),fontsize=8)\nplt.yticks(zscale,map(str,zscale),ha='right',fontsize=8)\nplt.minorticks_off()\n\ncmap = ncm.cmap('NCV_blu_red') \ncs.set_cmap(cmap) \n\nax3.annotate(r'\\textbf{FM}',\n xy=(0, 0),xytext=(0.35,1.02),xycoords='axes fraction',\n fontsize=25,color='dimgrey',rotation=0)\n\ncbar_ax = fig.add_axes([0.312,0.1,0.4,0.03]) \ncbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',\n extend='max',extendfrac=0.07,drawedges=False)\ncbar.set_label(r'\\textbf{$^\\circ$C}',fontsize=11,color='dimgray')\ncbar.set_ticks(barlim)\ncbar.set_ticklabels(list(map(str,barlim))) \ncbar.ax.tick_params(axis='x', size=.01)\n\nplt.subplots_adjust(wspace=0.3)\nplt.subplots_adjust(bottom=0.21)\n\nplt.savefig(directoryfigure + 'T_diff_FIT-HIT.png',dpi=300)\nprint('Completed: Script done!')\n\n"
},
{
"alpha_fraction": 0.5542359352111816,
"alphanum_fraction": 0.5865663886070251,
"avg_line_length": 32.684444427490234,
"blob_id": "851d0953c496c8308c63cca817aafb278b983d91",
"content_id": "9f5bb439a4a302056b908abc376ed2b4a6442d8b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7578,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 225,
"path": "/Scripts/calc_NAOindex.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nScript calculates NAO index - currently a TEST script\n\nNotes\n-----\n Author : Zachary Labe\n Date : 6 September 2017\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport nclcmaps as ncm\nimport datetime\nimport read_DailyOutput as DO\nimport calc_Utilities as UT\nfrom eofs.standard import Eof\nimport scipy.stats as sts\nimport cmocean\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectoryfigure = '/home/zlabe/Desktop/nao/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Calculating NAO Index - %s----' % titletime)\n\n#### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\n### Call function for Z500 data (daily)\nlat,lon,time,lev,z500_h = DO.readMeanExperi(directorydata,'Z500',\n 'FIC','surface')\nlat,lon,time,lev,z500_f = DO.readMeanExperi(directorydata,'Z500',\n 'FICT','surface')\n \n##### Calculate significance\n#stat,pvalue = UT.calc_indttest(TEMP_h,TEMP_f)\n \n### Calculate ensemble mean\nz5_diffq = z500_f-z500_h\n\n### Calculate for DJFM\nz5_diff = z5_diffq[:,91:,:,:] \n\n#### Slice over (20-90N) and (90W-40E)\nlatq = np.where((lat>=20) & (lat<=90))[0]\nlatnao = lat[latq]\n\nlonnew = np.mod(lon, 360.0) - 180.0\nlonq = np.where((lonnew>=-90) & (lonnew<=40))[0]\nlonnao = lonnew[lonq]\n\nz5_diffn = z5_diff[:,:,latq,:]\nz5_diffnao = z5_diffn[:,:,:,lonq]\n\nz5n_h = np.nanmean(z500_h[:,91:,latq,:],axis=0)\nz5nao_h = z5n_h[:,:,lonq]\n\n### Calculate NAO\n# Create an EOF solver to do the EOF analysis. Square-root of cosine of\n# latitude weights are applied before the computation of EOFs.\ncoslat = np.cos(np.deg2rad(latnao)).clip(0., 1.)\nwgts = np.sqrt(coslat)[..., np.newaxis]\nsolver = Eof(z5nao_h, weights=wgts)\n\n# Retrieve the leading EOF, expressed as the covariance between the leading PC\n# time series and the input SLP anomalies at each grid point.\neof1 = solver.eofsAsCovariance(neofs=1).squeeze()\npc1 = solver.pcs(npcs=1, pcscaling=1).squeeze()\n\n### Calculate NAO index\ndef NAOIndex(anomz5,eofpattern,members):\n \"\"\"\n Calculate NAO index by regressing Z500 onto the EOF1 pattern\n \"\"\"\n print('\\n>>> Using NAO Index function!') \n \n if members == True:\n nao = np.empty((anomz5.shape[0],anomz5.shape[1]))\n for i in range(anomz5.shape[0]):\n print('Regressing ensemble ---> %s!' % (i+1))\n for j in range(anomz5.shape[1]):\n varx = np.ravel(anomz5[i,j,:,:])\n vary = np.ravel(eofpattern[:,:])\n mask = np.isfinite(varx) & np.isfinite(vary) \n \n nao[i,j],intercept,r,p_value,std_err = sts.stats.linregress(\n varx[mask],\n vary[mask]) \n elif members == False: \n nao = np.empty((anomz5.shape[0]))\n for i in range(anomz5.shape[0]):\n varx = np.ravel(anomz5[i,:,:])\n vary = np.ravel(eofpattern[:,:])\n mask = np.isfinite(varx) & np.isfinite(vary) \n \n nao[i],intercept,r,p_value,std_err = sts.stats.linregress(\n varx[mask],\n vary[mask]) \n print('Completed: Regressed ensemble mean!')\n else:\n ValueError('Please select [True] or [False] for averageing!')\n \n print('*Completed: finished with NAO function!')\n return nao\n \n### Calculate NAO index\nnaoindex = NAOIndex(np.nanmean(z5_diffnao,axis=0),eof1,False)\npc1 = (naoindex-np.mean(naoindex))/np.std(naoindex)\n#pc1 = np.nanmean(pc1,axis=0)\n \n#### Plot figure\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\nfig = plt.figure()\nax = plt.subplot(111)\n \nvarf = eof1[:,:] \n\nm = Basemap(projection='ortho',lon_0=-20,lat_0=60,resolution='l',\n area_thresh=10000.)\nm.drawmapboundary(fill_color='white')\nm.drawcoastlines(color='dimgray',linewidth=0.8)\nparallels = np.arange(50,90,10)\nmeridians = np.arange(-180,180,30)\nm.drawparallels(parallels,labels=[False,False,False,False],\n linewidth=0,color='k',fontsize=6)\nm.drawmeridians(meridians,labels=[False,False,False,False],\n linewidth=0,color='k',fontsize=6)\nm.drawmapboundary(fill_color='white',color='dimgray',linewidth=0.7)\n\n# Make the plot continuous\nbarlim = np.arange(-8,9,4)\nvalues = np.arange(-8,8.1,0.25)\n\nlon2,lat2 = np.meshgrid(lonnao,latnao)\n\ncs = m.contourf(lon2,lat2,varf,45,\n extend='both',latlon=True)\ncs1 = m.contour(lon2,lat2,varf,\n linewidths=0.1,colors='darkgrey',\n linestyles='-',latlon=True)\n \ncmap = cmocean.cm.balance \ncs.set_cmap(cmap) \n\ncbar = m.colorbar(cs,drawedges=True,location='right',pad = 0.55) \n#cbar.set_ticks(barlim)\n#cbar.set_ticklabels(list(map(str,barlim))) \ncbar.ax.tick_params(labelsize=8) \n\nplt.savefig(directoryfigure + 'testeof1_FIC.png',dpi=300)\n\n############################################################################\n############################################################################\n############################################################################\n#### Plot NAO index\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\ndef adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 5))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n\n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([]) \n\nfig = plt.figure()\nax = plt.subplot(111)\n\nadjust_spines(ax, ['left', 'bottom'])\nax.spines['top'].set_color('none')\nax.spines['right'].set_color('none')\nax.spines['left'].set_color('darkgrey')\nax.spines['bottom'].set_color('darkgrey')\nax.spines['left'].set_linewidth(2)\nax.spines['bottom'].set_linewidth(2)\nax.tick_params('both',length=4,width=2,which='major',color='darkgrey')\n\nax.yaxis.grid(zorder=1,color='darkgrey',alpha=0.35)\n\nzeroline = [0]*122\n\npc1_masked = np.ma.masked_less_equal(pc1, 0)\n\nplt.bar(np.arange(len(pc1)),pc1,color='tomato',edgecolor='tomato',zorder=9) \nplt.bar(np.arange(len(pc1)),pc1_masked.filled(np.nan),color='dodgerblue',\n edgecolor='dodgerblue',zorder=10)\n\n#plt.plot(pc1,linewidth=2.5,color='dodgerblue',alpha=1,\n# linestyle='-')\n\n#plt.legend(shadow=False,fontsize=9,loc='lower center',\n# fancybox=True,frameon=False,ncol=5)\nplt.ylabel(r'\\textbf{NAO Index (Z500)}',color='dimgrey',fontsize=13)\n\nplt.yticks(np.arange(-5,6,1),list(map(str,np.arange(-5,6,1))),fontsize=9)\nplt.ylim([-3,3])\n\nxlabels = [r'Dec',r'Jan',r'Feb',r'Mar',r'Apr'] \nplt.xticks(np.arange(0,121,30),xlabels,fontsize=9)\nplt.xlim([0,120])\n\nplt.savefig(directoryfigure + 'NAO_Index_FICT-FIC.png',dpi=300)"
},
{
"alpha_fraction": 0.5934853553771973,
"alphanum_fraction": 0.6133550405502319,
"avg_line_length": 37.61006164550781,
"blob_id": "8739361b439bdd40f6b6363abf77bf9a9e72f520",
"content_id": "a95cb18afbb5d470fdc54bdb9233681e8f3b4eb6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6140,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 159,
"path": "/Scripts/calc_SHLH_SICgridcells.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCalculate weighted turbulent heat flux (sensible + latent) for areas\nover the control with a minimum of 10% sea ice concentration.\n\nNotes\n-----\n Author : Zachary Labe\n Date : 9 February 2018\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport datetime\nimport read_MonthlyOutput as MO\nimport calc_Utilities as UT\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nimport cmocean\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectorydata2 = '/home/zlabe/Documents/Research/SITperturb/Data/'\ndirectoryfigure = '/home/zlabe/Desktop/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Calculate weighted turbulent fluxes - %s----' % titletime)\n\n### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\n### Define constants\nrunnames = [r'HIT',r'FIT',r'HIT2',r'FICT2',r'FICT']\nexperiments = ['FIT--HIT','FICT2--HIT2','FICT--HIT']\n\n### Read in SIC data\nlat,lon,time,lev,sic = MO.readExperi(directorydata,'SIC','HIT','surface')\n\n### Find where ice is < 10% (values are 0 to 100 in sic array)\nsicq = sic[5,:,:,:].copy()\nsicq[np.where(sicq < 10)] = 0.0\nsicq[np.where((sicq >= 10) & (sicq <= 100))] = 1.\nsicn = np.append(sicq[8:],sicq[:3],axis=0)\n\n###############################################################################\n###############################################################################\n###############################################################################\n# Function to read surface heat flux data\ndef readFlux():\n \"\"\"\n Read in heat flux data for selected variables and calculate differences\n between experiments\n \"\"\"\n ### Call function for latent heat flux\n lat,lon,time,lev,lhhit = MO.readExperi(directorydata,\n 'LHFLX','HIT','surface')\n lat,lon,time,lev,lhfit = MO.readExperi(directorydata,\n 'LHFLX','FIT','surface')\n lat,lon,time,lev,lhcit = MO.readExperi(directorydata,\n 'LHFLX','CIT','surface')\n lat,lon,time,lev,lhfic = MO.readExperi(directorydata,\n 'LHFLX','FIC','surface')\n lat,lon,time,lev,lhfict = MO.readExperi(directorydata,\n 'LHFLX','FICT','surface')\n ### Call function for sensible heat flux\n lat,lon,time,lev,shhit = MO.readExperi(directorydata,\n 'SHFLX','HIT','surface')\n lat,lon,time,lev,shfit = MO.readExperi(directorydata,\n 'SHFLX','FIT','surface')\n lat,lon,time,lev,shcit = MO.readExperi(directorydata,\n 'SHFLX','CIT','surface')\n lat,lon,time,lev,shfic = MO.readExperi(directorydata,\n 'SHFLX','FIC','surface')\n lat,lon,time,lev,shfict = MO.readExperi(directorydata,\n 'SHFLX','FICT','surface')\n ### Calculate turbulent heat fluxes\n varhit = lhhit + shhit\n varfit = lhfit + shfit\n varcit = lhcit + shcit\n varfic = lhfic + shfic\n varfict = lhfict + shfict\n \n ### Compare experiments\n runs = [varhit,varfit,varcit,varfic,varfict]\n \n ### Compute comparisons for experiments - take ensemble average\n diff_FITHIT = np.nanmean(varfit - varhit,axis=0)\n diff_FICCIT = np.nanmean(varfic - varcit,axis=0)\n diff_FICTHIT = np.nanmean(varfict - varhit,axis=0)\n diffruns = [diff_FITHIT,diff_FICCIT,diff_FICTHIT]\n \n return diffruns,runs,lat,lon\n\n### Call function to read data for selected variable\ndiffruns_rnet,runs_rnet,lat,lon = readFlux()\n\ndifftotal_FITHITq = diffruns_rnet[0] + diffruns_rnet[0]\ndifftotal_FICCITq = diffruns_rnet[1] + diffruns_rnet[1]\ndifftotal_FICTHITq = diffruns_rnet[2] + diffruns_rnet[2]\n\ndifftotal_FITHIT = np.append(difftotal_FITHITq[8:],difftotal_FITHITq[:3],axis=0)\ndifftotal_FICCIT = np.append(difftotal_FICCITq[8:],difftotal_FICCITq[:3],axis=0)\ndifftotal_FICTHIT = np.append(difftotal_FICTHITq[8:],difftotal_FICTHITq[:3],axis=0)\ndifftotallhsh = [difftotal_FITHIT,difftotal_FICCIT,difftotal_FICTHIT]\n\n#### Take average above 30N\nlatq = np.where(lat > 30)[0]\nlatslice = lat[latq]\nlon2,lat2 = np.meshgrid(lon,latslice)\n\n### Mask out values not over SIC grid cells\nrnetvals = []\nfor i in range(len(difftotallhsh)):\n rnetvalsq = difftotallhsh[i] * sicn\n rnetvalsq[np.where(rnetvalsq == 0.0)] = np.nan\n rnetvalsq = rnetvalsq[:,latq,:]\n \n rnetvals.append(rnetvalsq)\n \n### Calculated weighted average \nweightedrnet = np.empty((len(rnetvals),sicn.shape[0]))\nfor i in range(len(rnetvals)):\n weightedrnet[i,:] = UT.calc_weightedAve(rnetvals[i],lat2)\n \n#### Create files for rnet\nnp.savetxt(directorydata2 + 'weightedsic_SHLH.txt',weightedrnet.transpose(),\n delimiter=',',header=' '.join(experiments)+'\\n',\n footer='\\n File contains net turbulet energy flux response' \\\n '\\n which are weighted above 30N for SIC cells >10% \\n' \\\n ' in all months of the year',newline='\\n\\n')\n\nprint('Completed: Script done!')\n\nfig = plt.figure()\nax = plt.subplot(111)\nm = Basemap(projection='ortho',lon_0=300,lat_0=90,resolution='l') \n#var = sicn[4,latq,:]\na=rnetvals[2]\nvar = a[-1]\nm.drawmapboundary(fill_color='white')\nm.drawcoastlines(color='dimgrey',linewidth=0.3)\nparallels = np.arange(-90,90,30)\nmeridians = np.arange(-180,180,60)\nm.drawparallels(parallels,labels=[True,True,True,True],\n linewidth=0.3,color='k',fontsize=6)\nm.drawmeridians(meridians,labels=[True,True,True,True],\n linewidth=0.3,color='k',fontsize=6)\ncs = m.contourf(lon2,lat2,var,55,latlon=True,extend='both') \ncs.set_cmap(cmocean.cm.balance)\ncbar = plt.colorbar(cs,extend='both') \nplt.savefig(directoryfigure + 'test_sic.png',dpi=300)\n\n"
},
{
"alpha_fraction": 0.6112014651298523,
"alphanum_fraction": 0.6511291861534119,
"avg_line_length": 29.585636138916016,
"blob_id": "1b7349a775b252d52d3271c9cc59648affaf4631",
"content_id": "15c2ebdaa7f6c78bff39654228336ce182d713b3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5535,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 181,
"path": "/Scripts/calc_forcings_LENS.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nForcing files for SITperturb from LENS (1979-2005; SST, SIC, SIT)\n(2060-2080; SIT)\n\nNotes\n-----\n Reference : Kay et al. [2014]\n Author : Zachary Labe\n Date : 11 July 2017\n\"\"\"\n\n### Import modules\nimport numpy as np\nfrom netCDF4 import Dataset\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as c\nfrom mpl_toolkits.basemap import Basemap\nimport nclcmaps as ncm\nimport datetime\nimport read_var_LENS as LV\nimport read_SeaIceThick_LENS as lens\n\n### Define directories\ndirectorydata1 = '/surtsey/zlabe/LENS/'\ndirectorydata2 = '/surtsey/'\ndirectorydata3 = '/surtsey/zlabe/LENS/ForcingPerturb/'\ndirectoryfigure = '/home/zlabe/Desktop/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Calculate forcing files - %s----' % titletime)\n\nensembles = ['02','03','04','05','06','07','08','09'] + \\\n list(map(str,np.arange(10,36,1)))+ list(map(str,np.arange(101,106,1)))\n\n### Alott time series\nyear1 = 2006\nyear2 = 2080\nyears = np.arange(year1,year2+1,1)\n \n### Read in functions\nsst,lats,lons = LV.readLENSEnsemble(directorydata1,'SST') # until 2080\n#sic,lats,lons = LV.readLENSEnsemble(directorydata1,'SIC') # until 2080\n#sit,lats,lons = lens.readLENSEnsemble(directorydata2,'None','historical')\n#sit,lats,lons = lens.readLENSEnsemble(directorydata2,'None','rcp85')\n\n### Pick years\nyearmin = 2051\nyearmax = 2080\nyearq = np.where((years >= yearmin) & (years <= yearmax))[0]\n\n### Average composite for years\nsstn = np.nanmean(sst[:,yearq,:,:,:],axis=1)\n#sicn = np.nanmean(sic[:,yearq,:,:,:],axis=1)\n#sitn = np.mean(sit[:,yearq,:,:,:],axis=1)\n\n#sst = None\n#sic = None\n#sit = None\n\nprint('\\n Completed: Average over years %s - %s!' % (yearmin,yearmax))\n\n### Average over ensembles\nsst_ens = np.nanmean(sstn,axis=0)\n#sic_ens = np.nanmean(sicn,axis=0)\n#sit_ens = np.mean(sitn,axis=0)\n\ndel sstn\n#del sicn\n#del sitn\n\n#sit_ens[np.where(sit_ens > 12)] = np.nan \n\nprint('Completed: Average over all ensembles!')\n\ndef netcdfLENS(lats,lons,var,varqq,directory):\n print('\\n>>> Using netcdf4LENS function!')\n \n name = 'lens_comp_%s_20512080.nc' % varqq\n filename = directory + name\n ncfile = Dataset(filename,'w',format='NETCDF4')\n ncfile.description = 'LENS %s interpolated on 1x1 grid' % varqq\n \n ### Dimensions\n ncfile.createDimension('months',var.shape[0])\n ncfile.createDimension('lat',var.shape[1])\n ncfile.createDimension('lon',var.shape[2])\n \n ### Variables\n months = ncfile.createVariable('months','f4',('months'))\n latitude = ncfile.createVariable('lat','f4',('lat'))\n longitude = ncfile.createVariable('lon','f4',('lon'))\n varns = ncfile.createVariable(varqq,'f4',('months','lat','lon'))\n \n ### Units\n if varqq == 'sst':\n varns.units = 'K'\n elif varqq == 'sic':\n varns.units = 'fraction'\n elif varqq == 'sit':\n varns.units = 'm'\n ncfile.title = 'LENS %s' % varqq\n ncfile.instituion = 'Dept. ESS at University of California, Irvine'\n ncfile.source = 'NCAR LENS'\n ncfile.references = 'Kay et al. [2013]'\n \n ### Data\n months[:] = np.arange(1,12+1,1)\n latitude[:] = lats\n longitude[:] = lons\n varns[:] = var\n \n ncfile.close()\n print('*Completed: Created netCDF4 File!')\n\nnetcdfLENS(lats,lons,sst_ens,'sst',directorydata3)\n#netcdfLENS(lats,lons,sic_ens,'sic',directorydata3)\n#netcdfLENS(lats,lons,sit_ens,'sit',directorydata3)\n\n#data = Dataset(directorydata3 + 'lens_comp_sit_20512080.nc')\n#lons = data.variables['lon'][:]\n#lats = data.variables['lat'][:]\n#sit = data.variables['sit'][:]\n#data.close()\n#\n#plt.rc('text',usetex=True)\n#plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n#\n#fig = plt.figure()\n#ax = plt.subplot(111)\n#\n#m = Basemap(projection='robin',lon_0=0,resolution='l')\n#m = Basemap(projection='npstere',boundinglat=67,lon_0=270,resolution='l',round =True)\n#\n#sit[np.where(sit == 0)] = np.nan \n#var = sit[9]\n#\n#lons2,lats2 = np.meshgrid(lons,lats)\n# \n#m.drawmapboundary(fill_color='white')\n#m.drawcoastlines(color='dimgrey',linewidth=0.3)\n#parallels = np.arange(-90,90,30)\n#meridians = np.arange(-180,180,60)\n#m.drawparallels(parallels,labels=[True,True,True,True],\n# linewidth=0.3,color='k',fontsize=6)\n#m.drawmeridians(meridians,labels=[True,True,True,True],\n# linewidth=0.3,color='k',fontsize=6)\n#m.drawlsmask(land_color='darkgrey',ocean_color='mintcream')\n#\n#cs = m.contourf(lons2,lats2,var,80,latlon=True,extend='both')\n#cs1 = m.contour(lons2,lats2,var,50,linewidths=0.2,colors='darkgrey',\n# linestyles='-',latlon=True)\n#\n#def colormapSIT():\n# cmap1 = plt.get_cmap('BuPu')\n# cmap2 = plt.get_cmap('RdPu_r')\n# cmap3 = plt.get_cmap('gist_heat_r')\n# cmaplist1 = [cmap1(i) for i in xrange(30,cmap1.N-10)]\n# cmaplist2 = [cmap2(i) for i in xrange(15,cmap2.N)]\n# cmaplist3 = [cmap3(i) for i in xrange(cmap2.N-15)]\n# cms_sit = c.ListedColormap(cmaplist1 + cmaplist2 + cmaplist3)\n# return cms_sit\n# \n#cmap = ncm.cmap('GMT_ocean') \n##cmap = colormapSIT() \n#cs.set_cmap(cmap)\n#\n#cbar = plt.colorbar(cs,extend='both') \n#cbar.set_label(r'\\textbf{SIT}') \n##ticks = np.arange(0,8,1)\n##cbar.set_ticks(ticks)\n##cbar.set_ticklabels(map(str,ticks)) \n#\n#plt.savefig(directoryfigure + 'test_sit.png',dpi=300)\n#\n#print 'Completed: Script done!'"
},
{
"alpha_fraction": 0.6067472100257874,
"alphanum_fraction": 0.6324269771575928,
"avg_line_length": 27.385713577270508,
"blob_id": "b05d67595119e96c8d238ae752e053ae192870bf",
"content_id": "6c509fb8920a9c5ea7e125c43aa6c64caf88db50",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1986,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 70,
"path": "/Scripts/plot_forcings_testplot.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPlot test files of the forcings\n\nNotes\n-----\n Reference : Kay et al. [2014]\n Author : Zachary Labe\n Date : 1 February 2018\n\"\"\"\n\n### Import modules\nimport numpy as np\nfrom netCDF4 import Dataset\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nimport nclcmaps as ncm\nimport datetime\n\n### Define directories\ndirectorydata = '/surtsey/ypeings/'\ndirectoryfigure = '/home/zlabe/Desktop/testseaice/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Calculate forcing file SIT constant - %s----' % titletime)\n\n### Read in data \ndata = Dataset(directorydata + 'SST-SIC-SIT_lens_2051-2080_polar.nc')\nlon = data.variables['lon'][:]\nlat = data.variables['lat'][:]\nsit = data.variables['ice_thick'][:]\ndata.close()\n\nlons,lats = np.meshgrid(lon,lat)\n\n#### Test Data\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\nfor i in range(sit.shape[0]):\n fig = plt.figure()\n ax = plt.subplot(111)\n \n m = Basemap(projection='ortho',lon_0=300,lat_0=90,resolution='l')\n \n var = sit[i]\n \n m.drawmapboundary(fill_color='white')\n m.drawcoastlines(color='darkgrey',linewidth=0.3)\n \n cs = m.contourf(lons,lats,var,np.arange(0,5.1,0.1),latlon=True,extend='max')\n cs1 = m.contour(lons,lats,lats,np.arange(66.6,67.6,1),linewidths=1,colors='r',\n linestyles='--',latlon=True)\n \n cs.set_cmap('cubehelix')\n m.fillcontinents(color='dimgrey')\n \n cbar = plt.colorbar(cs,extend='both') \n \n cbar.set_label(r'\\textbf{SIT (m)}') \n ticks = np.arange(0,6,1)\n cbar.set_ticks(ticks)\n cbar.set_ticklabels(list(map(str,ticks))) \n \n plt.savefig(directoryfigure + 'polar_testplot_%s.png' % i,dpi=300)"
},
{
"alpha_fraction": 0.5819818377494812,
"alphanum_fraction": 0.6043485999107361,
"avg_line_length": 38.104530334472656,
"blob_id": "b8e96059c81bf20cc79212062f8a6968f1be4049",
"content_id": "96e040cc24aa1270200c10e30b4f2713b2f688a7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11222,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 287,
"path": "/Scripts/calc_SITSIC_ratio.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCompute ratio (%) between SIT and SIC responses\n\nNotes\n-----\n Author : Zachary Labe\n Date : 15 February 2018\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nimport read_MonthlyOutput as MO\nimport cmocean\nimport scipy.stats as sts\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport nclcmaps as ncm\nimport calc_Utilities as UT\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectorydata2 = '/home/zlabe/Documents/Research/SITperturb/Data/'\ndirectoryfigure = '/home/zlabe/Desktop/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting SIT-SIC ratio - %s----' % titletime)\n\n### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\nmonths = [r'OCT',r'NOV',r'DEC',r'JAN',r'FEB',r'MAR']\nvarnames = ['U10','Z30','U300','Z500','SLP','T2M','RNET']\n#varnames = ['SLP']\n\nratiovar = []\nfor v in range(len(varnames)):\n ### Call function for surface temperature data from reach run\n lat,lon,time,lev,varhit = MO.readExperi(directorydata,\n '%s' % varnames[v],'HIT','surface')\n lat,lon,time,lev,varfit = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIT','surface')\n lat,lon,time,lev,varfic = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIC','surface')\n lat,lon,time,lev,varcit = MO.readExperi(directorydata,\n '%s' % varnames[v],'CIT','surface')\n \n ### Create 2d array of latitude and longitude\n lon2,lat2 = np.meshgrid(lon,lat)\n \n ### Concatonate runs\n runnames = [r'HIT',r'FIT',r'FIC',r'CIT']\n experiments = [r'\\textbf{FIT--HIT}',r'\\textbf{FIC--CIT}']\n runs = [varhit,varfit,varfic,varcit]\n \n ### Separate per 2 month periods\n varmo_on = np.empty((4,varhit.shape[0],varhit.shape[2],varhit.shape[3]))\n varmo_dj = np.empty((4,varhit.shape[0]-1,varhit.shape[2],varhit.shape[3]))\n varmo_fm = np.empty((4,varhit.shape[0],varhit.shape[2],varhit.shape[3]))\n for i in range(len(runs)):\n varmo_on[i] = np.nanmean(runs[i][:,9:11,:,:],axis=1) \n varmo_dj[i],varmo_dj[i] = UT.calcDecJan(runs[i],runs[i],lat,lon,'surface',1) \n varmo_fm[i] = np.nanmean(runs[i][:,1:3,:,:],axis=1)\n \n ### Calculate differences [FIT-HIT and FICT - FIT]\n diff_fithit_on = np.nanmean(varmo_on[1] - varmo_on[0],axis=0)\n diff_ficcit_on = np.nanmean(varmo_on[2] - varmo_on[3],axis=0)\n \n diff_fithit_dj = np.nanmean(varmo_dj[1] - varmo_dj[0],axis=0)\n diff_ficcit_dj = np.nanmean(varmo_dj[2] - varmo_dj[3],axis=0)\n \n diff_fithit_fm = np.nanmean(varmo_fm[1] - varmo_fm[0],axis=0)\n diff_ficcit_fm = np.nanmean(varmo_fm[2] - varmo_fm[3],axis=0)\n \n ### Calculate significance \n stat_FITHITon,pvalue_FITHITon = UT.calc_indttest(varmo_on[1],varmo_on[0])\n stat_FICCITon,pvalue_FICCITon = UT.calc_indttest(varmo_on[2],varmo_on[3])\n\n stat_FITHITdj,pvalue_FITHITdj = UT.calc_indttest(varmo_dj[1],varmo_dj[0])\n stat_FICCITdj,pvalue_FICCITdj = UT.calc_indttest(varmo_dj[2],varmo_dj[3])\n\n stat_FITHITfm,pvalue_FITHITfm = UT.calc_indttest(varmo_fm[1],varmo_fm[0])\n stat_FICCITfm,pvalue_FICCITfm = UT.calc_indttest(varmo_fm[2],varmo_fm[3])\n \n ### Create mask of significant values\n pvalue_FITHITon[np.where(np.isnan(pvalue_FITHITon))] = 0.0\n pvalue_FICCITon[np.where(np.isnan(pvalue_FICCITon))] = 0.0\n\n pvalue_FITHITdj[np.where(np.isnan(pvalue_FITHITdj))] = 0.0\n pvalue_FICCITdj[np.where(np.isnan(pvalue_FICCITdj))] = 0.0\n\n pvalue_FITHITfm[np.where(np.isnan(pvalue_FITHITfm))] = 0.0\n pvalue_FICCITfm[np.where(np.isnan(pvalue_FICCITfm))] = 0.0\n \n pvalue_FITHIT = [pvalue_FITHITon,pvalue_FITHITdj,pvalue_FITHITfm]\n pvalue_FICCIT = [pvalue_FICCITon,pvalue_FICCITdj,pvalue_FICCITfm]\n \n ### Create mask of shared significant values\n mask = np.asarray(pvalue_FITHIT) * np.asarray(pvalue_FICCIT)\n \n ### Slice out lats below 40\n latq = np.where(lat>40)[0]\n latqq = lat[latq]\n \n ### Create 2nd meshgrid with lats > 40N\n lonnew,latnew=np.meshgrid(lon,latqq)\n \n ### Create mask for ON, DJ, FM\n mask = mask[:,latq,:]\n \n ### Keep only values significant in both SIT and SIC responses\n# diff_fithit_onq = diff_fithit_on[latq,:] * mask[0,:,:]\n# diff_fithit_djq = diff_fithit_dj[latq,:] * mask[1,:,:]\n# diff_fithit_fmq = diff_fithit_fm[latq,:] * mask[2,:,:]\n# \n# diff_ficcit_onq = diff_ficcit_on[latq,:] * mask[0,:,:]\n# diff_ficcit_djq = diff_ficcit_dj[latq,:] * mask[1,:,:]\n# diff_ficcit_fmq = diff_ficcit_fm[latq,:] * mask[2,:,:]\n \n diff_fithit_onq = diff_fithit_on[latq,:] * pvalue_FITHITon[latq,:]\n diff_fithit_djq = diff_fithit_dj[latq,:] * pvalue_FITHITdj[latq,:]\n diff_fithit_fmq = diff_fithit_fm[latq,:] * pvalue_FITHITfm[latq,:]\n \n diff_ficcit_onq = diff_ficcit_on[latq,:] * pvalue_FICCITon[latq,:]\n diff_ficcit_djq = diff_ficcit_dj[latq,:] * pvalue_FICCITdj[latq,:]\n diff_ficcit_fmq = diff_ficcit_fm[latq,:] * pvalue_FICCITfm[latq,:]\n \n ### Change 0 to nan as to no affect the averaging\n# diff_fithit_onq[np.where(diff_fithit_onq == 0.0)] = np.nan\n# diff_fithit_djq[np.where(diff_fithit_djq == 0.0)] = np.nan\n# diff_fithit_fmq[np.where(diff_fithit_fmq == 0.0)] = np.nan\n# \n# diff_ficcit_onq[np.where(diff_ficcit_onq == 0.0)] = np.nan\n# diff_ficcit_djq[np.where(diff_ficcit_djq == 0.0)] = np.nan\n# diff_ficcit_fmq[np.where(diff_ficcit_fmq == 0.0)] = np.nan\n \n fithit = [diff_fithit_onq,diff_fithit_djq,diff_fithit_fmq]\n ficcit = [diff_ficcit_onq,diff_ficcit_djq,diff_ficcit_fmq]\n \n def calc_iceRatio(varx,vary,maske,up,down):\n \"\"\"\n Compute relative % difference\n \"\"\"\n print('\\n>>> Using calc_iceRatio function!')\n \n ### Mask extremes\n if maske == True:\n print('MASKING EXTREMES!')\n \n varxup = np.nanpercentile(varx,up)\n varxdo = np.nanpercentile(varx,down)\n \n varyup = np.nanpercentile(vary,up)\n varydo = np.nanpercentile(vary,down)\n \n print(varxup,varxdo)\n print(varyup,varydo)\n \n varx[np.where((varx >= varxup) | (varx <= varxdo))] = np.nan\n vary[np.where((vary >= varyup) | (vary <= varydo))] = np.nan\n \n percchange = (abs(varx)/abs(vary)) * 100.\n \n ### Test if real values\n if np.isnan(percchange).all() == True:\n percchange[np.where(np.isnan(percchange))] = 0.0\n if percchange > 500:\n percchange = 0.0\n \n print('*Completed: Finished calc_iceRatio function!')\n return percchange,varx,vary\n \n fithitave = np.empty((3))\n ficcitave = np.empty((3))\n for i in range(len(fithit)):\n# fithit[i][np.where(fithit[0] == 0.0)] = np.nan\n# ficcit[i][np.where(ficcit[0] == 0.0)] = np.nan\n fithitave[i] = UT.calc_weightedAve(abs(fithit[i]),latnew)\n ficcitave[i] = UT.calc_weightedAve(abs(ficcit[i]),latnew)\n\n ratio = []\n for i in range(len(fithit)):\n percchangeq,varx,vary = calc_iceRatio(fithitave[i],ficcitave[i],False,95,5)\n \n ratio.append(percchangeq)\n ratiovar.append(ratio)\nmeanratiovar = np.asarray(ratiovar).squeeze()\n#ratiovar[np.where(np.isnan(ratiovar))] = 0.0\n#meanratiovar = UT.calc_weightedAve(ratiovar[:,:,:,:],latnew)\n\nvaryy = abs(fithit[0])\nfig = plt.figure()\nax = plt.subplot(111)\nm = Basemap(projection='ortho',lon_0=0,lat_0=89,resolution='l',\n area_thresh=10000.) \nm.drawmapboundary(fill_color='white')\nm.drawcoastlines(color='dimgrey',linewidth=0.3)\nparallels = np.arange(-90,90,30)\nm.drawparallels(parallels,labels=[True,True,True,True],\n linewidth=0.3,color='k',fontsize=6)\ncs = m.contourf(lonnew,latnew,varyy[:,:],55,latlon=True,extend='both') \ncs.set_cmap(cmocean.cm.thermal)\ncbar = plt.colorbar(cs,extend='both') \nplt.savefig(directoryfigure + 'test_ratio.png',dpi=300)\n \n#### Save file\nnp.savetxt(directorydata2 + 'sicsitratio.txt',np.round(meanratiovar.transpose(),1),delimiter=',',\n fmt='%3.1f',header=' '.join(varnames)+'\\n',\n footer='\\n File contains ratio values of relative contributions' \\\n '\\n between FIT-HIT and FIC-CIT to get the relative \\n' \\\n ' contributions of SIT and SIC [bimonth, ON,DJ,FM]',newline='\\n\\n')\n\n###############################################################################\n###############################################################################\n###############################################################################\n### Plot Figure\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\nfig = plt.figure()\nax = plt.subplot(111)\n\nax.spines['top'].set_color('none')\nax.spines['right'].set_color('none')\nax.spines['bottom'].set_color('none')\nax.spines['left'].set_color('none')\nax.get_xaxis().set_tick_params(direction='out', width=0,length=0,\n color='w')\n\nplt.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='on', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelbottom='on')\nplt.tick_params(\n axis='y', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n left='off', # ticks along the bottom edge are off\n right='off', # ticks along the top edge are off\n labelleft='on')\n\ncs = plt.pcolormesh(meanratiovar,shading='faceted',edgecolor='w',\n linewidth=0.3,vmin=0,vmax=50)\n\nfor i in range(meanratiovar.shape[0]):\n for j in range(meanratiovar.shape[1]):\n plt.text(j+0.5,i+0.5,r'\\textbf{%3.1f}' % meanratiovar[i,j],fontsize=6,\n color='r',va='center',ha='center')\n\ncs.set_cmap(cmocean.cm.tempo)\n\nylabels = [r'\\textbf{U10}',r'\\textbf{Z30}',r'\\textbf{U300}',r'\\textbf{Z500}',\n r'\\textbf{SLP}',r'\\textbf{T2M}',r'\\textbf{RNET}']\nplt.yticks(np.arange(0.5,7.5,1),ylabels,ha='right',color='dimgrey',\n va='center')\nyax = ax.get_yaxis()\nyax.set_tick_params(pad=0.7)\n\nxlabels = [r'\\textbf{ON}',r'\\textbf{DJ}',r'\\textbf{FM}']\nplt.xticks(np.arange(0.5,4.5,1),xlabels,ha='center',color='dimgrey',\n va='center')\nxax = ax.get_xaxis()\nxax.set_tick_params(pad=8)\nplt.xlim([0,3])\n\ncbar = plt.colorbar(cs,orientation='horizontal',aspect=50)\nticks = np.arange(0,51,50)\nlabels = list(map(str,np.arange(0,51,50)))\ncbar.set_ticks(ticks)\ncbar.set_ticklabels(labels)\ncbar.ax.tick_params(axis='x', size=.001)\ncbar.outline.set_edgecolor('dimgrey')\ncbar.set_label(r'\\textbf{Ratio [\\%]}',\n color='dimgrey',labelpad=3,fontsize=12)\n\nplt.subplots_adjust(top=0.8)\n\nplt.savefig(directoryfigure + 'SITSIC_ratio_mesh.png',dpi=300)"
},
{
"alpha_fraction": 0.6364425420761108,
"alphanum_fraction": 0.6737527251243591,
"avg_line_length": 27.469135284423828,
"blob_id": "30462b263cb5737dc0224f047254b48ef639eb5a",
"content_id": "1d51149215dd71e9285f89d436af1843bf9986c5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2305,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 81,
"path": "/Scripts/calc_forcings_SITc_LENS.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCreate forcing file of constant sea ice thickness at 2 m\n\nNotes\n-----\n Reference : Kay et al. [2014]\n Author : Zachary Labe\n Date : 16 August 2017\n\"\"\"\n\n### Import modules\nimport numpy as np\nfrom netCDF4 import Dataset\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nimport nclcmaps as ncm\nimport datetime\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/LENS/ForcingPerturb/'\ndirectoryfigure = '/home/zlabe/Desktop/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Calculate forcing file SIT constant - %s----' % titletime)\n\n### Set all values to 2 m\n### Used NCO by:\n# ncap2 -s 'where(ice_thick>0) ice_thick=2;' SST-SIT_lens_CTL.nc test.nc\n# ncap2 -s 'where(ice_thick>0) ice_thick=2;' SST-SIC-SIT_lens_2051-2080_FICT.nc SST-SIC-SIT_lens_2051-2080_FIC.nc\n\n### Read in data \ndata = Dataset(directorydata + 'SST-SIC-SIT_lens_2051-2080_FIC.nc')\nlon = data.variables['lon'][:]\nlat = data.variables['lat'][:]\nsit = data.variables['ice_thick'][:]\ndata.close()\n\nlons,lats = np.meshgrid(lon,lat)\n\n#### Test Data\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\nfig = plt.figure()\nax = plt.subplot(111)\n\nm = Basemap(projection='ortho',lon_0=300,lat_0=90,resolution='l')\n \nvar = sit[2]\n \nm.drawmapboundary(fill_color='white')\nm.drawcoastlines(color='dimgrey',linewidth=0.3)\nparallels = np.arange(-90,90,30)\nmeridians = np.arange(-180,180,60)\nm.drawparallels(parallels,labels=[True,True,True,True],\n linewidth=0.3,color='k',fontsize=6)\nm.drawmeridians(meridians,labels=[True,True,True,True],\n linewidth=0.3,color='k',fontsize=6)\n\ncs = m.contourf(lons,lats,var,np.arange(0,3,0.1),latlon=True,extend='both')\n\nm.fillcontinents(color='dimgrey')\n \ncs.set_cmap('cubehelix')\n\ncbar = plt.colorbar(cs,extend='both') \n\ncbar.set_label(r'\\textbf{SIT (m)}') \nticks = np.arange(0,5,1)\ncbar.set_ticks(ticks)\ncbar.set_ticklabels(list(map(str,ticks))) \n\nplt.savefig(directoryfigure + 'test_sitconstant.png',dpi=300)\n\nprint('Completed: Script done!')"
},
{
"alpha_fraction": 0.5213649868965149,
"alphanum_fraction": 0.555934727191925,
"avg_line_length": 37.732757568359375,
"blob_id": "bed8218ec12f734143335436295253aae59c4dfe",
"content_id": "ef4790b7e5b3094749e4fdced6d395d982c9df40",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13480,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 348,
"path": "/Scripts/plot_ClimoWavex_FIT.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPlots DJF for climatological wave number X for WACCM4 experiments\n\nNotes\n-----\n Author : Zachary Labe\n Date : 12 November 2017\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport nclcmaps as ncm\nimport datetime\nimport read_MonthlyOutput as MO\nimport calc_Utilities as UT\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectoryfigure = '/home/zlabe/Desktop/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting Climo Wave - %s----' % titletime)\n\n### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\nvarnames = ['Z300']\nqbophase = ['pos','non','neg']\nfor v in range(len(varnames)):\n ### Call function for geopotential height data from reach run\n lat,lon1,time,lev,varhit = MO.readExperi(directorydata,\n '%s' % varnames[v],'HIT','surface')\n lat,lon1,time,lev,varfit = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIT','surface')\n \n ### Create 2d array of latitude and longitude\n lon2,lat2 = np.meshgrid(lon1,lat)\n \n ### Concatonate runs\n runnames = [r'HIT',r'FIT']\n experiments = [r'\\textbf{FIT--HIT}']\n runs = [varhit,varfit]\n \n ### Read in QBO phases \n filenamefitp = directorydata + 'FIT/monthly/QBO_%s_FIT.txt' % qbophase[0]\n filenamefitno = directorydata + 'FIT/monthly/QBO_%s_FIT.txt' % qbophase[1]\n filenamefitn = directorydata + 'FIT/monthly/QBO_%s_FIT.txt' % qbophase[2]\n pos_fit = np.genfromtxt(filenamefitp,unpack=True,usecols=[0],dtype='int')\n non_fit = np.genfromtxt(filenamefitno,unpack=True,usecols=[0],dtype='int')\n neg_fit = np.genfromtxt(filenamefitn,unpack=True,usecols=[0],dtype='int')\n \n ### Separate per periods (Feb,Mar)\n varh_f = runs[0][:,1,:,:]\n# varh_f = varh_f[pos_fit,:,:]\n varh_m = runs[0][:,2,:,:]\n# varh_m = varh_m[pos_fit,:,:]\n \n filenamehitp = directorydata + 'HIT/monthly/QBO_%s_HIT.txt' % qbophase[0]\n filenamehitno = directorydata + 'HIT/monthly/QBO_%s_HIT.txt' % qbophase[1]\n filenamehitn = directorydata + 'HIT/monthly/QBO_%s_HIT.txt' % qbophase[2]\n pos_hit = np.genfromtxt(filenamehitp,unpack=True,usecols=[0],dtype='int')\n non_hit = np.genfromtxt(filenamehitno,unpack=True,usecols=[0],dtype='int')\n neg_hit = np.genfromtxt(filenamehitn,unpack=True,usecols=[0],dtype='int')\n \n varf_f = runs[1][:,1,:,:]\n# varf_f = varf_f[pos_hit,:,:]\n varf_m = runs[1][:,2,:,:]\n# varf_m = varf_m[pos_hit,:,:]\n \n ### Compute comparisons for FM - taken ensemble average\n diff_feb = np.nanmean(varf_f - varh_f,axis=0)\n diff_mar = np.nanmean(varf_m - varh_m,axis=0)\n \n ### Calculate significance for FM\n stat_feb,pvalue_feb = UT.calc_indttest(varfit[:,1,:,:],varhit[:,1,:,:])\n stat_mar,pvalue_mar = UT.calc_indttest(varfit[:,1,:,:],varhit[:,1,:,:])\n \n ### Read in wave number \n lat,lon,time,lev,waveh= MO.readExperi(directorydata,\n '%sxwave1' % varnames[v],'HIT',\n 'surface')\n# lat,lon,time,lev,wavef = MO.readExperi(directorydata,\n# '%sxwave1' % varnames[v],'FIT',\n# 'surface')\n \n climowaveh_feb = np.nanmean(waveh[:,1,:,:],axis=0)\n climowaveh_mar = np.nanmean(waveh[:,2,:,:],axis=0)\n \n ###########################################################################\n ###########################################################################\n ###########################################################################\n #### Plot climatological wave\n plt.rc('text',usetex=True)\n plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n \n fig = plt.figure()\n ax = plt.subplot(121)\n \n var = diff_feb\n pvar = pvalue_feb\n climo = climowaveh_feb\n \n limit = np.arange(-60,61,5)\n barlim = np.arange(-60,61,30)\n \n m = Basemap(projection='ortho',lon_0=0,lat_0=90,resolution='l',\n area_thresh=10000.)\n \n var, lons_cyclic = addcyclic(var, lon1)\n var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\n lon2d, lat2d = np.meshgrid(lons_cyclic, lat)\n x, y = m(lon2d, lat2d)\n \n pvar,lons_cyclic = addcyclic(pvar, lon1)\n pvar,lons_cyclic = shiftgrid(180.,pvar,lons_cyclic,start=False)\n lon2c,lat2c = np.meshgrid(lon, lat)\n \n m.drawmapboundary(fill_color='white',color='w',linewidth=0.7)\n m.drawcoastlines(color='dimgray',linewidth=0.65)\n \n cs = m.contourf(x,y,var,limit,extend='both',alpha=0.7,antiliased=True)\n cs1 = m.contourf(x,y,pvar,colors='None',hatches=['....'])\n cs2 = m.contour(lon2c,lat2c,climo,np.arange(-200,201,50),\n colors='k',linewidths=1.5,latlon=True,zorder=10)\n \n cmap = ncm.cmap('nrl_sirkes') \n cs.set_cmap(cmap) \n \n m.drawcoastlines(color='dimgray',linewidth=0.8)\n \n ### Add experiment text to subplot\n ax.annotate(r'\\textbf{FEB}',xy=(0,0),xytext=(0.5,1.1),\n textcoords='axes fraction',color='dimgray',fontsize=23,\n rotation=0,ha='center',va='center')\n ax.annotate(r'\\textbf{FIT--HIT}',xy=(0,0),xytext=(-0.1,0.5),\n textcoords='axes fraction',color='dimgray',fontsize=23,\n rotation=90,ha='center',va='center')\n \n ###########################################################################\n ax = plt.subplot(122)\n \n var = diff_mar\n pvar = pvalue_mar\n climo = climowaveh_mar\n \n limit = np.arange(-60,61,5)\n barlim = np.arange(-60,61,30)\n \n m = Basemap(projection='ortho',lon_0=0,lat_0=90,resolution='l',\n area_thresh=10000.)\n \n var, lons_cyclic = addcyclic(var, lon1)\n var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\n lon2d, lat2d = np.meshgrid(lons_cyclic, lat)\n x, y = m(lon2d, lat2d)\n \n pvar,lons_cyclic = addcyclic(pvar, lon1)\n pvar,lons_cyclic = shiftgrid(180.,pvar,lons_cyclic,start=False)\n lon2c,lat2c = np.meshgrid(lon, lat)\n \n m.drawmapboundary(fill_color='white',color='w',linewidth=0.7)\n m.drawcoastlines(color='dimgray',linewidth=0.65)\n \n cs = m.contourf(x,y,var,limit,extend='both',alpha=0.7,antiliased=True)\n cs1 = m.contourf(x,y,pvar,colors='None',hatches=['....'])\n cs2 = m.contour(lon2c,lat2c,climo,np.arange(-200,201,50),\n colors='k',linewidths=1.5,latlon=True,zorder=10)\n \n cmap = ncm.cmap('nrl_sirkes') \n cs.set_cmap(cmap) \n \n m.drawcoastlines(color='dimgray',linewidth=0.8)\n \n ### Add text\n ax.annotate(r'\\textbf{MAR}',xy=(0,0),xytext=(0.5,1.1),\n textcoords='axes fraction',color='dimgray',fontsize=23,\n rotation=0,ha='center',va='center')\n \n cbar_ax = fig.add_axes([0.312,0.13,0.4,0.03]) \n cbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',\n extend='max',extendfrac=0.07,drawedges=True)\n cbar.set_label(r'\\textbf{m}',fontsize=11,color='dimgray')\n cbar.set_ticks(barlim)\n cbar.set_ticklabels(list(map(str,barlim))) \n cbar.ax.tick_params(axis='x', size=.01)\n cbar.outline.set_edgecolor('dimgray')\n \n plt.subplots_adjust(wspace=0.01)\n \n plt.savefig(directoryfigure + '%s_climowave1.png' % varnames[v],\n dpi=300)\n \n###############################################################################\n###############################################################################\n###############################################################################\nvarnames = ['Z30']\nfor v in range(len(varnames)):\n ### Call function for geopotential height data from reach run\n lat,lon1,time,lev,varhit = MO.readExperi(directorydata,\n '%s' % varnames[v],'HIT','surface')\n lat,lon1,time,lev,varfit = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIT','surface')\n \n ### Create 2d array of latitude and longitude\n lon2,lat2 = np.meshgrid(lon1,lat)\n \n ### Concatonate runs\n runnames = [r'HIT',r'FIT']\n experiments = [r'\\textbf{FIT--HIT}']\n runs = [varhit,varfit]\n \n ### Separate per periods (Feb,Mar)\n varh_f = runs[0][:,1,:,:]\n varh_m = runs[0][:,2,:,:]\n \n varf_f = runs[1][:,1,:,:]\n varf_m = runs[1][:,2,:,:]\n \n ### Compute comparisons for FM - taken ensemble average\n diff_feb = np.nanmean(varf_f - varh_f,axis=0)\n diff_mar = np.nanmean(varf_m - varh_m,axis=0)\n \n ### Calculate significance for FM\n stat_feb,pvalue_feb = UT.calc_indttest(varfit[:,1,:,:],varhit[:,1,:,:])\n stat_mar,pvalue_mar = UT.calc_indttest(varfit[:,2,:,:],varhit[:,2,:,:])\n \n ### Calculate climatology \n climowaveh_feb = np.nanmean(varhit[:,1,:,:],axis=0)\n climowaveh_mar = np.nanmean(varhit[:,2,:,:],axis=0)\n \n ###########################################################################\n ###########################################################################\n ###########################################################################\n #### Plot Z30\n plt.rc('text',usetex=True)\n plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n \n fig = plt.figure()\n ax = plt.subplot(121)\n \n var = diff_feb\n pvar = pvalue_feb\n climo = climowaveh_feb\n \n limit = np.arange(-100,101,5)\n barlim = np.arange(-100,101,50)\n \n m = Basemap(projection='ortho',lon_0=0,lat_0=90,resolution='l',\n area_thresh=10000.)\n \n var, lons_cyclic = addcyclic(var, lon1)\n var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\n lon2d, lat2d = np.meshgrid(lons_cyclic, lat)\n x, y = m(lon2d, lat2d)\n \n pvar,lons_cyclic = addcyclic(pvar, lon1)\n pvar,lons_cyclic = shiftgrid(180.,pvar,lons_cyclic,start=False)\n climo,lons_cyclic = addcyclic(climowaveh_feb, lon1)\n climo,lons_cyclic = shiftgrid(180.,climo,lons_cyclic,start=False)\n \n m.drawmapboundary(fill_color='white',color='dimgray',linewidth=0.7)\n m.drawcoastlines(color='dimgray',linewidth=0.65)\n \n cs = m.contourf(x,y,var,limit,extend='both',alpha=1)\n cs1 = m.contourf(x,y,pvar,colors='None',hatches=['....'])\n cs2 = m.contour(x,y,climo,np.arange(21900,23500,250),\n colors='k',linewidths=1.5,zorder=10)\n \n cmap = ncm.cmap('nrl_sirkes') \n cs.set_cmap(cmap) \n \n m.drawcoastlines(color='dimgray',linewidth=0.8)\n \n ### Add experiment text to subplot\n ax.annotate(r'\\textbf{FEB}',xy=(0,0),xytext=(0.5,1.1),\n textcoords='axes fraction',color='dimgray',fontsize=23,\n rotation=0,ha='center',va='center')\n ax.annotate(r'\\textbf{FIT--HIT}',xy=(0,0),xytext=(-0.1,0.5),\n textcoords='axes fraction',color='dimgray',fontsize=23,\n rotation=90,ha='center',va='center')\n \n ###########################################################################\n ax = plt.subplot(122)\n \n var = diff_mar\n pvar = pvalue_mar\n climo = climowaveh_mar\n \n limit = np.arange(-125,126,5)\n barlim = np.arange(-125,126,125)\n \n m = Basemap(projection='ortho',lon_0=0,lat_0=90,resolution='l',\n area_thresh=10000.)\n \n var, lons_cyclic = addcyclic(var, lon1)\n var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\n lon2d, lat2d = np.meshgrid(lons_cyclic, lat)\n x, y = m(lon2d, lat2d)\n \n pvar,lons_cyclic = addcyclic(pvar, lon1)\n pvar,lons_cyclic = shiftgrid(180.,pvar,lons_cyclic,start=False)\n climo,lons_cyclic = addcyclic(climowaveh_mar, lon1)\n climo,lons_cyclic = shiftgrid(180.,climo,lons_cyclic,start=False)\n \n m.drawmapboundary(fill_color='white',color='dimgray',linewidth=0.7)\n m.drawcoastlines(color='dimgray',linewidth=0.65)\n \n cs = m.contourf(x,y,var,limit,extend='both',alpha=1)\n cs1 = m.contourf(x,y,pvar,colors='None',hatches=['....'])\n cs2 = m.contour(x,y,climo,np.arange(21900,23500,250),\n colors='k',linewidths=1.5,zorder=10)\n \n cmap = ncm.cmap('nrl_sirkes') \n cs.set_cmap(cmap) \n \n m.drawcoastlines(color='dimgray',linewidth=0.8)\n \n ### Add text\n ax.annotate(r'\\textbf{MAR}',xy=(0,0),xytext=(0.5,1.1),\n textcoords='axes fraction',color='dimgray',fontsize=23,\n rotation=0,ha='center',va='center')\n \n cbar_ax = fig.add_axes([0.312,0.13,0.4,0.03]) \n cbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',\n extend='max',extendfrac=0.07,drawedges=True)\n cbar.set_label(r'\\textbf{m}',fontsize=11,color='dimgray')\n cbar.set_ticks(barlim)\n cbar.set_ticklabels(list(map(str,barlim))) \n cbar.ax.tick_params(axis='x', size=.01)\n cbar.outline.set_edgecolor('dimgray')\n \n plt.subplots_adjust(wspace=0.01)\n \n plt.savefig(directoryfigure + '%s_FIT-HIT.png' % varnames[v],\n dpi=300) \n \nprint('Completed: Script done!')\n\n"
},
{
"alpha_fraction": 0.5334808230400085,
"alphanum_fraction": 0.5591445565223694,
"avg_line_length": 40.845680236816406,
"blob_id": "675a654a459140fae625768c49d52533a266a39c",
"content_id": "e9dbe1cf209f82ac3a9873943038271646ac29d4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6780,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 162,
"path": "/Scripts/plot_ClimoWavex.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPlots DJF for climatological wave number X for WACCM4 experiments\n\nNotes\n-----\n Author : Zachary Labe\n Date : 12 November 2017\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport nclcmaps as ncm\nimport datetime\nimport read_MonthlyOutput as MO\nimport calc_Utilities as UT\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectoryfigure = '/home/zlabe/Desktop/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting Climo Wave - %s----' % titletime)\n\n### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\nvarnames = ['Z300']\nfor v in range(len(varnames)):\n ### Call function for geopotential height data from reach run\n lat,lon1,time,lev,varhit = MO.readExperi(directorydata,\n '%s' % varnames[v],'HIT','surface')\n lat,lon1,time,lev,varfit = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIT','surface')\n lat,lon1,time,lev,varcit = MO.readExperi(directorydata,\n '%s' % varnames[v],'CIT','surface')\n lat,lon1,time,lev,varfic = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIC','surface')\n lat,lon1,time,lev,varfict = MO.readExperi(directorydata,\n '%s' % varnames[v],'FICT','surface')\n \n ### Create 2d array of latitude and longitude\n lon2,lat2 = np.meshgrid(lon1,lat)\n \n ### Concatonate runs\n runnames = [r'HIT',r'FIT',r'CIT',r'FIC',r'FICT']\n experiments = [r'\\textbf{FIT--HIT}',r'\\textbf{FIC--CIT}',\n r'\\textbf{FICT--HIT}']\n runs = [varhit,varfit,varcit,varfic,varfict]\n \n ### Separate per periods (DJF)\n var_djf = np.empty((5,varhit.shape[0]-1,varhit.shape[2],varhit.shape[3]))\n for i in range(len(runs)):\n var_djf[i],var_djf[i] = UT.calcDecJanFeb(runs[i],runs[i],lat,\n lon1,'surface',1) \n \n ### Compute comparisons for FM - taken ensemble average\n diff_FITHIT = np.nanmean(var_djf[1] - var_djf[0],axis=0)\n diff_FICCIT = np.nanmean(var_djf[3] - var_djf[2],axis=0)\n diff_FICTHIT = np.nanmean(var_djf[4] - var_djf[0],axis=0)\n diffruns_djf = [diff_FITHIT,diff_FICCIT,diff_FICTHIT]\n \n ### Calculate significance for FM\n stat_FITHIT,pvalue_FITHIT = UT.calc_indttest(var_djf[1],var_djf[0])\n stat_FICCIT,pvalue_FICCIT = UT.calc_indttest(var_djf[3],var_djf[2])\n stat_FICTHIT,pvalue_FICTHIT = UT.calc_indttest(var_djf[4],var_djf[0])\n pruns_djf = [pvalue_FITHIT,pvalue_FICCIT,pvalue_FICTHIT]\n \n ### Read in wave number \n lat,lon,time,lev,wavef = MO.readExperi(directorydata,\n '%sxwave1' % varnames[v],'FIT',\n 'surface')\n lat,lon,time,lev,wavefc = MO.readExperi(directorydata,\n '%sxwave1' % varnames[v],'FIC',\n 'surface')\n lat,lon,time,lev,wavefict = MO.readExperi(directorydata,\n '%sxwave1' % varnames[v],'FICT',\n 'surface')\n \n wavef_djf,wavef_djf = UT.calcDecJanFeb(wavef,wavef,lat,lon,'surface',1) \n wavefc_djf,wavefc_djf = UT.calcDecJanFeb(wavefc,wavefc,lat,lon,'surface',1) \n wavefict_djf,wavefict_djf = UT.calcDecJanFeb(wavefict,wavefict,lat,lon,'surface',1) \n \n climowavef = np.nanmean(wavef_djf,axis=0)\n climowavefc = np.nanmean(wavefc_djf,axis=0)\n climowavefict = np.nanmean(wavefict_djf,axis=0)\n wavelist = [climowavef,climowavefc,climowavefict]\n \n ###########################################################################\n ###########################################################################\n ###########################################################################\n #### Plot U\n plt.rc('text',usetex=True)\n plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n \n fig = plt.figure()\n for i in range(len(experiments)):\n var = diffruns_djf[i]\n pvar = pruns_djf[i]\n climo = wavelist[i]\n \n limit = np.arange(-60,61,5)\n barlim = np.arange(-60,61,30)\n \n ax1 = plt.subplot(1,3,i+1)\n m = Basemap(projection='ortho',lon_0=0,lat_0=90,resolution='l',\n area_thresh=10000.)\n \n var, lons_cyclic = addcyclic(var, lon1)\n var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\n lon2d, lat2d = np.meshgrid(lons_cyclic, lat)\n x, y = m(lon2d, lat2d)\n \n pvar,lons_cyclic = addcyclic(pvar, lon1)\n pvar,lons_cyclic = shiftgrid(180.,pvar,lons_cyclic,start=False)\n# climo,lons_cyclic = addcyclic(climo,lon)\n# climo,lons_cyclic = shiftgrid(180.,climo,lons_cyclic,start=False)\n# lon2dc, lat2dc = np.meshgrid(lons_cyclic, lat)\n lon2c,lat2c = np.meshgrid(lon, lat)\n \n m.drawmapboundary(fill_color='white',color='w',linewidth=0.7)\n m.drawcoastlines(color='dimgray',linewidth=0.65)\n \n cs = m.contourf(x,y,var,limit,extend='both',alpha=0.7,antiliased=True)\n cs1 = m.contourf(x,y,pvar,colors='None',hatches=['....'])\n cs2 = m.contour(lon2c,lat2c,climo,np.arange(-200,201,60),\n colors='k',linewidths=1.5,latlon=True,zorder=10)\n \n cmap = ncm.cmap('nrl_sirkes') \n cs.set_cmap(cmap) \n \n m.drawcoastlines(color='dimgray',linewidth=0.8)\n \n ### Add experiment text to subplot\n ax1.annotate(r'%s' % experiments[i],xy=(0,0),xytext=(0.865,0.90),\n textcoords='axes fraction',color='k',fontsize=11,\n rotation=320,ha='center',va='center')\n \n cbar_ax = fig.add_axes([0.312,0.23,0.4,0.03]) \n cbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',\n extend='max',extendfrac=0.07,drawedges=True)\n cbar.set_label(r'\\textbf{m}',fontsize=11,color='dimgray')\n cbar.set_ticks(barlim)\n cbar.set_ticklabels(list(map(str,barlim))) \n cbar.ax.tick_params(axis='x', size=.01)\n \n plt.subplots_adjust(wspace=0.01)\n \n plt.savefig(directoryfigure + '%s_climowave1.png' % varnames[v],\n dpi=300)\n \nprint('Completed: Script done!')\n\n"
},
{
"alpha_fraction": 0.5872753858566284,
"alphanum_fraction": 0.6033025979995728,
"avg_line_length": 40.34538269042969,
"blob_id": "00f2b7e22202a5c842c648709e015e9814e90c7c",
"content_id": "a38aa3fcd43b3c9b0d6afd5e4516b2f74f31b8b7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10295,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 249,
"path": "/Scripts/plot_SpatialCorrelations_QBO.py",
"repo_name": "whigg/ThicknessSensitivity",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPlots OCT-MAR spatial correlations -- test script so far!\n\nNotes\n-----\n Author : Zachary Labe\n Date : 16 November 2017\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nimport read_MonthlyOutput as MO\nimport cmocean\nimport scipy.stats as sts\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport nclcmaps as ncm\nimport calc_Utilities as UT\n\n### Define directories\ndirectorydata = '/surtsey/zlabe/simu/'\ndirectorydata2 = '/home/zlabe/Documents/Research/SITperturb/Data/'\ndirectoryfigure = '/home/zlabe/Desktop/'\n#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting spatial correlations - %s----' % titletime)\n\n### Alott time series\nyear1 = 1900\nyear2 = 2000\nyears = np.arange(year1,year2+1,1)\n\nmonths = [r'OCT',r'NOV',r'DEC',r'JAN',r'FEB',r'MAR']\nvarnames = ['U10','Z30','Z500','SLP','T2M','THICK']\nqbophase = ['pos','non','neg']\ncorrvarpos = []\ncorrvarnon = []\ncorrvarneg = []\nfor v in range(len(varnames)):\n ### Call function for surface temperature data from reach run\n lat,lon,time,lev,tashit = MO.readExperi(directorydata,\n '%s' % varnames[v],'HIT','surface')\n lat,lon,time,lev,tasfit = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIT','surface')\n lat,lon,time,lev,tasfict = MO.readExperi(directorydata,\n '%s' % varnames[v],'FIC','surface')\n lat,lon,time,lev,tasfic = MO.readExperi(directorydata,\n '%s' % varnames[v],'CIT','surface')\n \n ### Create 2d array of latitude and longitude\n lon2,lat2 = np.meshgrid(lon,lat)\n \n ### Read in QBO phases \n filenamefitp = directorydata + 'FIT/monthly/QBO_%s_FIT.txt' % qbophase[0]\n filenamefitno = directorydata + 'FIT/monthly/QBO_%s_FIT.txt' % qbophase[1]\n filenamefitn = directorydata + 'FIT/monthly/QBO_%s_FIT.txt' % qbophase[2]\n pos_fit = np.genfromtxt(filenamefitp,unpack=True,usecols=[0],dtype='int')\n non_fit = np.genfromtxt(filenamefitno,unpack=True,usecols=[0],dtype='int')\n neg_fit = np.genfromtxt(filenamefitn,unpack=True,usecols=[0],dtype='int')\n \n filenamehitp = directorydata + 'HIT/monthly/QBO_%s_HIT.txt' % qbophase[0]\n filenamehitno = directorydata + 'HIT/monthly/QBO_%s_HIT.txt' % qbophase[1]\n filenamehitn = directorydata + 'HIT/monthly/QBO_%s_HIT.txt' % qbophase[2]\n pos_hit = np.genfromtxt(filenamehitp,unpack=True,usecols=[0],dtype='int')\n non_hit = np.genfromtxt(filenamehitno,unpack=True,usecols=[0],dtype='int')\n neg_hit = np.genfromtxt(filenamehitn,unpack=True,usecols=[0],dtype='int')\n \n filenameficp = directorydata + 'FIC/monthly/QBO_%s_FIC.txt' % qbophase[0]\n filenameficno = directorydata + 'FIC/monthly/QBO_%s_FIC.txt' % qbophase[1]\n filenameficn = directorydata + 'FIC/monthly/QBO_%s_FIC.txt' % qbophase[2]\n pos_fic = np.genfromtxt(filenameficp,unpack=True,usecols=[0],dtype='int')\n non_fic = np.genfromtxt(filenameficno,unpack=True,usecols=[0],dtype='int')\n neg_fic = np.genfromtxt(filenameficn,unpack=True,usecols=[0],dtype='int')\n \n filenamefictp = directorydata + 'FICT/monthly/QBO_%s_FICT.txt' % qbophase[0]\n filenamefictno = directorydata + 'FICT/monthly/QBO_%s_FICT.txt' % qbophase[1]\n filenamefictn = directorydata + 'FICT/monthly/QBO_%s_FICT.txt' % qbophase[2]\n pos_fict = np.genfromtxt(filenamefictp,unpack=True,usecols=[0],dtype='int')\n non_fict = np.genfromtxt(filenamefictno,unpack=True,usecols=[0],dtype='int')\n neg_fict = np.genfromtxt(filenamefictn,unpack=True,usecols=[0],dtype='int')\n \n ### Separate per months\n varmo_fit = np.append(tasfit[:,9:,:,:],tasfit[:,0:3,:,:],\n axis=1)\n varmo_hit = np.append(tashit[:,9:,:,:],tashit[:,0:3,:,:],\n axis=1)\n varmo_fict = np.append(tasfict[:,9:,:,:],tasfict[:,0:3,:,:],\n axis=1)\n varmo_fic = np.append(tasfic[:,9:,:,:],tasfic[:,0:3,:,:],\n axis=1)\n \n ### Composite by QBO phase \n tas_mofitpos = varmo_fit[pos_fit,:,:,:]\n tas_mohitpos = varmo_hit[pos_hit,:,:,:]\n tas_moficpos = varmo_fic[pos_fic,:,:,:]\n tas_mofictpos = varmo_fict[pos_fict,:,:,:]\n \n tas_mofitnon = varmo_fit[non_fit,:,:,:]\n tas_mohitnon = varmo_hit[non_hit,:,:,:]\n tas_moficnon = varmo_fic[non_fic,:,:,:]\n tas_mofictnon = varmo_fict[non_fict,:,:,:]\n \n tas_mofitneg = varmo_fit[neg_fit,:,:,:]\n tas_mohitneg = varmo_hit[neg_hit,:,:,:]\n tas_moficneg = varmo_fic[neg_fic,:,:,:]\n tas_mofictneg = varmo_fict[neg_fict,:,:,:]\n \n ### Calculate differences [FIT-HIT and FICT - FIT]\n fithitpos = np.nanmean(tas_mofitpos - tas_mohitpos,axis=0)\n fithitnon = np.nanmean(tas_mofitnon - tas_mohitnon,axis=0)\n fithitneg = np.nanmean(tas_mofitneg - tas_mohitneg,axis=0)\n \n fictficpos = np.nanmean(tas_mofictpos - tas_moficpos,axis=0)\n fictficnon = np.nanmean(tas_mofictnon - tas_moficnon,axis=0)\n fictficneg = np.nanmean(tas_mofictneg - tas_moficneg,axis=0)\n \n corrsposn = []\n for i in range(fithitpos.shape[0]):\n corrsqpos = UT.calc_spatialCorr(fithitpos[i],fictficpos[i],lat,lon,'yes')\n corrsposn.append(corrsqpos)\n corrvarpos.append(corrsposn)\n \n corrsnonn = []\n for i in range(fithitnon.shape[0]):\n corrsqnon = UT.calc_spatialCorr(fithitnon[i],fictficnon[i],lat,lon,'yes')\n corrsnonn.append(corrsqnon)\n corrvarnon.append(corrsnonn)\n \n corrsnegn = []\n for i in range(fithitneg.shape[0]):\n corrsqneg = UT.calc_spatialCorr(fithitneg[i],fictficneg[i],lat,lon,'yes')\n corrsnegn.append(corrsqneg)\n corrvarneg.append(corrsnegn)\n\ncorrvarpos = np.asarray(corrvarpos)\ncorrvarnon = np.asarray(corrvarnon)\ncorrvarneg = np.asarray(corrvarneg)\n\ncorrvar = [corrvarpos,corrvarnon,corrvarneg]\n\n#### Save file\nnp.savetxt(directorydata2 + 'patterncorr_qbo_pos.txt',\n corrvarpos.transpose(),delimiter=',',\n fmt='%3.2f',header=' '.join(varnames)+'\\n',\n footer='\\n File contains pearsonr correlation coefficients' \\\n '\\n between FIT-HIT and FICT-FIT to get the relative \\n' \\\n ' contributions of SIT and SIC [monthly, OCT-MAR]. \\n' \\\n 'This is for composites of QBO-W',newline='\\n\\n')\nnp.savetxt(directorydata2 + 'patterncorr_qbo_neg.txt',\n corrvarneg.transpose(),delimiter=',',\n fmt='%3.2f',header=' '.join(varnames)+'\\n',\n footer='\\n File contains pearsonr correlation coefficients' \\\n '\\n between FIT-HIT and FICT-FIT to get the relative \\n' \\\n ' contributions of SIT and SIC [monthly, OCT-MAR]. \\n' \\\n 'This is for composites of QBO-E',newline='\\n\\n')\nnp.savetxt(directorydata2 + 'patterncorr_qbo_non.txt',\n corrvarnon.transpose(),delimiter=',',\n fmt='%3.2f',header=' '.join(varnames)+'\\n',\n footer='\\n File contains pearsonr correlation coefficients' \\\n '\\n between FIT-HIT and FICT-FIT to get the relative \\n' \\\n ' contributions of SIT and SIC [monthly, OCT-MAR]. \\n' \\\n 'This is for composites of QBO-Neutral',newline='\\n\\n')\n\n###############################################################################\n###############################################################################\n###############################################################################\n### Plot Figure\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\ndef adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 5))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n\n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([]) \n\nfig = plt.figure()\nfor r in range(len(corrvar)):\n ax = plt.subplot(3,1,r+1)\n \n adjust_spines(ax, ['left', 'bottom'])\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n ax.spines['left'].set_color('dimgrey')\n ax.spines['bottom'].set_color('dimgrey')\n ax.spines['left'].set_linewidth(2)\n ax.spines['bottom'].set_linewidth(2)\n ax.tick_params('both',length=4,width=2,which='major',color='dimgrey')\n \n if r < 2:\n ax.spines['bottom'].set_color('w')\n ax.tick_params('x',length=0,width=0,which='major',color='w')\n \n plt.plot([0]*len(corrvar[r]),linewidth=2,color='dimgrey',linestyle='--')\n \n color=iter(ncm.cmap('MPL_gnuplot2')(np.linspace(0,0.8,len(corrvar[r]))))\n for i in range(len(corrvar[r])):\n c=next(color)\n plt.plot(corrvar[r][i],linewidth=1.5,color=c,alpha=1,\n label = r'\\textbf{%s}' % varnames[i],linestyle='-',\n marker='o',markersize=3)\n \n if r == 1:\n plt.ylabel(r'\\textbf{Pattern Correlation [R]}',color='dimgrey',\n fontsize=13)\n if r == 2:\n plt.legend(shadow=False,fontsize=9,loc='lower center',\n fancybox=True,frameon=False,ncol=3,\n bbox_to_anchor=(0.5,-1.04))\n \n qbophaseq = [r'QBO-W',r'QBO-N',r'QBO-E']\n ax.annotate(r'\\textbf{%s}' % qbophaseq[r],xy=(0,0),xytext=(1.03,0.5),\n textcoords='axes fraction',color='dimgray',\n fontsize=14,rotation=270,ha='center',va='center')\n \n plt.yticks(np.arange(-1,1.1,0.5),list(map(str,np.arange(-1,1.1,0.5))),\n fontsize=8)\n plt.ylim([-1,1])\n \n xlabels = [r'OCT',r'NOV',r'DEC',r'JAN',r'FEB',r'MAR',r'APR']\n plt.xticks(np.arange(0,6,1),xlabels,fontsize=8)\n plt.xlim([0,5])\n \n if r<2:\n ax.tick_params(labelbottom='off') \n \n ax.yaxis.grid(zorder=1,color='dimgrey',alpha=0.3)\n \n plt.subplots_adjust(bottom=0.2,hspace=0.25)\n\nplt.savefig(directoryfigure + 'patterncorrs_monthly_qbo.png',dpi=300)\n"
}
] | 28 |
gutioliveira/DesignPatterns | https://github.com/gutioliveira/DesignPatterns | cd5f61366e94293bda2e5b8626351f8bb5d7eb40 | 0ec1ce554470e9efa938f4a689ffce68a7ef3cfc | 8cd15688c548c72a2e9dcc4f3c77de49fb4ceb90 | refs/heads/master | 2021-01-20T18:52:20.836762 | 2016-09-18T19:09:25 | 2016-09-18T19:09:25 | 60,041,881 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6254545450210571,
"alphanum_fraction": 0.6254545450210571,
"avg_line_length": 17.33333396911621,
"blob_id": "da1ff5bb26c035f8eadd53136a5d42803be9ff83",
"content_id": "82c03010ea1dfac50ac667b4efe0142f0648d9ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 275,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 15,
"path": "/MethodFactory/src/ship/ShipFactory.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "package ship;\n\npublic class ShipFactory {\n\t\n\tpublic EnemyShip makeShip(String type){\n\t\t\n\t\tif ( type.equals(\"u\") || type.equals(\"U\") )\n\t\t\treturn new UFOShip(\"UFO\");\n\t\telse if ( type.equals(\"r\") || type.equals(\"R\") )\n\t\t\treturn new RocketShip(\"Rocket\");\n\t\t\n\t\treturn null;\n\t}\n\n}\n"
},
{
"alpha_fraction": 0.6635802388191223,
"alphanum_fraction": 0.6882715821266174,
"avg_line_length": 22.071428298950195,
"blob_id": "fe857d0069ca5e6ddae81fef4ad5c38f367e9877",
"content_id": "92c7caa2f5b126b68acfb0e02f30ee4bd0c15b60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 324,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 14,
"path": "/Adapter/src/App.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "\npublic class App {\n\t\n\tpublic static void main(String[] args){\n\t\t\n\t\tImagemTarget imagem = new SDLSurfaceAdapter();\n\t\timagem.carregarImagem(\"background.png\");\n\t\timagem.desenharImagem(0, 0, 5, 5);\n\t\t\n\t\t\n\t\timagem = new OpenGLImageAdapter();\n\t\timagem.carregarImagem(\"background.png\");\n\t\timagem.desenharImagem(0, 0, 5, 5);\n\t}\t\n}\n"
},
{
"alpha_fraction": 0.644859790802002,
"alphanum_fraction": 0.6542056202888489,
"avg_line_length": 10.94444465637207,
"blob_id": "d5fcebf54b3d70d8ceb50ee83c98a64318f0f7fb",
"content_id": "5fca36cc6900e73afef5f7628b7d8acc6611b1b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 214,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 18,
"path": "/Interpreter-Python/roman/two_digits.py",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "from interpreter import *\n\nclass TwoDigits(Interpreter):\n\n\tdef one(self):\n\t\treturn \"X\"\n\n\tdef four(self):\n\t\treturn \"XL\"\n\n\tdef five(self):\n\t\treturn \"L\"\n\n\tdef nine(self):\n\t\treturn \"XC\"\n\n\tdef weight(self):\n\t\treturn 10;"
},
{
"alpha_fraction": 0.6966666579246521,
"alphanum_fraction": 0.6966666579246521,
"avg_line_length": 17.6875,
"blob_id": "149d47c5c637c48e3d462d2ed0139c38213ae028",
"content_id": "cf3478cb40aec0ddf1447b534e8c3bcf00e3844e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 300,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 16,
"path": "/Decorator/src/PizzaDecorator.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "\npublic abstract class PizzaDecorator extends Pizza{\n\t\n\tprivate Pizza pizza;\n\t\n\tpublic PizzaDecorator(Pizza pizza){\n\t\tthis.pizza = pizza;\n\t}\n\t\n\tpublic String getDescription(){\n\t\treturn pizza.getDescription() + \" + \" + description;\n\t}\n\t\n\tpublic double getCost(){\n\t\treturn pizza.getCost() + cost;\n\t}\n}\n"
},
{
"alpha_fraction": 0.7135922312736511,
"alphanum_fraction": 0.7135922312736511,
"avg_line_length": 17.636363983154297,
"blob_id": "78af6b34798f787ebf0fd0f7b0a96edaeaa8f22a",
"content_id": "0a705747fdff4126646e20c70e66ebf5cd3ae7c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 206,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 11,
"path": "/State/src/Winter.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "\npublic class Winter implements Season{\n\n\t@Override\n\tpublic void nextSeason(SeasonContext seasonContext) {\n\t\t\n\t\tSystem.out.println(\"We are on winter... \");\n\t\t\n\t\tseasonContext.setSeason(new Summer());\n\t}\n\n}\n"
},
{
"alpha_fraction": 0.64838707447052,
"alphanum_fraction": 0.6516128778457642,
"avg_line_length": 16.27777862548828,
"blob_id": "dc79e7337d1fa62794471a6590249c62b9b8d938",
"content_id": "697f605056fb1f6565bb2df28c95a2e560060a69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 310,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 18,
"path": "/Interpreter-Python/roman/context.py",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "class Context():\n\t_input = \"\"\n\t_output = 0\n\n\tdef __init__(self, input_context):\n\t\tself._input = input_context\n\n\tdef get_input(self):\n\t\treturn self._input\n\n\tdef set_input(self, _input):\n\t\tself._input = _input\n\n\tdef get_output(self):\n\t\treturn self._output\n\n\tdef set_output(self,_output):\n\t\tself._output = _output"
},
{
"alpha_fraction": 0.7388888597488403,
"alphanum_fraction": 0.7388888597488403,
"avg_line_length": 17,
"blob_id": "cd66d066e09fc45ed5467828ddbaf58ebdd50ef1",
"content_id": "15e2f6f12d0bb77672e1e01cea0a6fb9384a7c26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 180,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 10,
"path": "/Command/src/dota/PureDamage.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "package dota;\n\npublic class PureDamage implements Damage {\n\n\t@Override\n\tpublic double attack(Hero hero, double damage) {\n\t\t// TODO Auto-generated method stub\n\t\treturn damage;\n\t}\n}\n"
},
{
"alpha_fraction": 0.7412140369415283,
"alphanum_fraction": 0.7412140369415283,
"avg_line_length": 17.41176414489746,
"blob_id": "186384e10791add8f1ade0120464f01111291901",
"content_id": "8754189a21c73df3beb3d4f33bedbad8943d9226",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 313,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 17,
"path": "/Command/src/loja/Loja.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "package loja;\n\npublic class Loja {\n\t\n\tString nomeDaLoja;\n\t\n\tpublic Loja(String nomeDaLoja){\n\t\tthis.nomeDaLoja = nomeDaLoja;\n\t}\n\t\n\tpublic void executarCompra(double valor, PagamentoCommand pagamento){\n\t\t\n\t\tCompra compra = new Compra(nomeDaLoja);\n\t\tcompra.setValor(valor);\n\t\tpagamento.processarCompra(compra);\n\t}\n}\n"
},
{
"alpha_fraction": 0.6232557892799377,
"alphanum_fraction": 0.6418604850769043,
"avg_line_length": 11,
"blob_id": "853511eff870c76ed9b051c2164594868db611c6",
"content_id": "c266950dec0e6f259037967f096e3778b1825eed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 215,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 18,
"path": "/Interpreter-Python/roman/four_digits.py",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "from interpreter import *\n\nclass FourDigits(Interpreter):\n\n\tdef one(self):\n\t\treturn \"M\"\n\n\tdef four(self):\n\t\treturn \" \"\n\n\tdef five(self):\n\t\treturn \" \"\n\n\tdef nine(self):\n\t\treturn \" \"\n\n\tdef weight(self):\n\t\treturn 1000;"
},
{
"alpha_fraction": 0.6111111044883728,
"alphanum_fraction": 0.6296296119689941,
"avg_line_length": 12.375,
"blob_id": "5663954c02d5ae82348cec50704d605f0005efca",
"content_id": "1ffc806a8decd01d197d17225ad2d335be4e6fc4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 108,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 8,
"path": "/Decorator/src/PizzaDoce.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "\npublic class PizzaDoce extends Pizza {\n\t\n\tpublic PizzaDoce(){\n\t\tdescription = \"Doce \";\n\t\tcost = 5.0;\n\t}\n\n}\n"
},
{
"alpha_fraction": 0.6371841430664062,
"alphanum_fraction": 0.6371841430664062,
"avg_line_length": 16.870967864990234,
"blob_id": "252c611e6e6a1939a15fdd4c7fee9ccff8b74b2c",
"content_id": "90b7d2ade7cc225af97b361bfbff3e28636dc64d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 554,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 31,
"path": "/MethodFactory/src/ship/App.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "package ship;\n\nimport java.util.Scanner;\n\npublic class App {\n\n\tpublic static void main(String[] args) {\n\t\t// TODO Auto-generated method stub\n\t\t\n\t\tScanner input = new Scanner(System.in);\n\t\t\n\t\tShipFactory shipFactory = new ShipFactory();\n\t\t\n\t\tEnemyShip enemyShip;\n\t\n\t\tdo{\n\t\t\t\n\t\t\tSystem.out.println(\"Which type of rocket do you want in your team? (U/R)\");\n\t\t\tString in = input.nextLine();\n\t\t\tenemyShip = shipFactory.makeShip(in);\n\t\t\t\n\t\t\tif ( enemyShip != null ){\n\t\t\t\t\n\t\t\t\tenemyShip.atack();\n\t\t\t\tenemyShip.move();\n\t\t\t}\n\t\t}while ( input.hasNext() );\n\t\t\n\t}\n\n}\n"
},
{
"alpha_fraction": 0.7301136255264282,
"alphanum_fraction": 0.7301136255264282,
"avg_line_length": 18.55555534362793,
"blob_id": "96bb2ad199bd5e0c40fe3646d39f185f760b027b",
"content_id": "360c335431fe2b4e441b904514f082ff92b432f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 352,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 18,
"path": "/Decorator/src/drinks/CoquetelDecorator.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "package drinks;\n\npublic abstract class CoquetelDecorator extends Coquetel {\n\t\n\tprotected Coquetel coquetel;\n\t\n\tpublic CoquetelDecorator(Coquetel coquetel){\n\t\tthis.coquetel = coquetel;\n\t}\n\t\n\tpublic String getDescription(){\n\t\treturn coquetel.getDescription() + \" + \" + description;\n\t}\n\t\n\tpublic double getCost(){\n\t\treturn coquetel.getCost() + cost;\n\t}\n}\n"
},
{
"alpha_fraction": 0.6398104429244995,
"alphanum_fraction": 0.6682464480400085,
"avg_line_length": 13.551724433898926,
"blob_id": "50dfdf7f1496e2e53ec8a739b9ebc179dc150994",
"content_id": "43d265a4f3361c108817ff125405760762a85e43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 422,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 29,
"path": "/State/src/mario/MarioState.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "package mario;\n\npublic abstract class MarioState {\n\t\n\tpublic MarioState pegarCogumelo(){\n\t\t\n\t\tSystem.out.println(\"Ganhou 1000 pontos\");\n\t\t\n\t\treturn this;\n\t}\n\t\n\tpublic MarioState pegarPena(){\n\t\t\n\t\tSystem.out.println(\"Ganhou 1000 pontos\");\n\t\t\n\t\treturn this;\n\t}\n\t\n\tpublic MarioState pegarFlor(){\n\t\t\n\t\tSystem.out.println(\"Ganhou 1000 pontos\");\n\t\t\n\t\treturn this;\n\t}\n\t\n\tpublic void pular(){\n\t\tSystem.out.println(\"Pulou!\");\n\t}\n}\n"
},
{
"alpha_fraction": 0.6871165633201599,
"alphanum_fraction": 0.6871165633201599,
"avg_line_length": 13.818181991577148,
"blob_id": "c6c3da58fcbce5c0f3dc8408d53bc66b53ff5a48",
"content_id": "37c4280c39c647ab1e8708a13f36160112524b70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 163,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 11,
"path": "/State/src/mario/MarioFlor.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "package mario;\n\npublic class MarioFlor extends MarioState{\n\t\n\tpublic MarioState pegarPena(){\n\t\t\n\t\tSystem.out.println(\"Mario voador!\");\n\t\t\n\t\treturn new MarioPena();\n\t}\n}\n"
},
{
"alpha_fraction": 0.7051070928573608,
"alphanum_fraction": 0.7051070928573608,
"avg_line_length": 14.973684310913086,
"blob_id": "fc4a1680a7e97c4c382bfe769e799d12e2391c19",
"content_id": "24e086efbb659f8abbb1ac4fe64c419f16a876e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 607,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 38,
"path": "/Observer/src/DadosSubject.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "import java.util.ArrayList;\n\npublic class DadosSubject {\n\t\n\tprotected ArrayList<DadosObserver> observers;\n\tprotected Dados dados;\n\t\n\tpublic DadosSubject(){\n\t\tobservers = new ArrayList<DadosObserver>();\n\t}\n\t\n\tpublic void attach(DadosObserver dadosObserver){\n\t\tobservers.add(dadosObserver);\n\t}\n\t\n\tpublic void detach(int index){\n\t\tobservers.remove(index);\n\t}\n\t\n\tpublic void setState(Dados dados){\n\t\t\n\t\tthis.dados = dados;\n\t\tnotifyObservers();\n\t}\n\n\tprivate void notifyObservers() {\n\t\tfor ( DadosObserver observer : observers ){\n\t\t\tobserver.update();\n\t\t}\n\t}\n\t\n\tpublic Dados getState(){\n\t\treturn dados;\n\t}\n\t\n\t\n\n}\n"
},
{
"alpha_fraction": 0.7241379022598267,
"alphanum_fraction": 0.7448275685310364,
"avg_line_length": 19.571428298950195,
"blob_id": "61739152c56e5b73d201b11df6d1c22ca9ef1d3f",
"content_id": "649e8fbf37268fa3efd005dd0794616e74264a07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 145,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 7,
"path": "/Strategy/src/ManagerSalary.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "\npublic class ManagerSalary implements Taxes {\n\t\n\tpublic double calculateSalaryWithTaxes(double salary) {\n\t\t\n\t\treturn salary - salary*0.20;\n\t}\n}\n"
},
{
"alpha_fraction": 0.6451612710952759,
"alphanum_fraction": 0.6589861512184143,
"avg_line_length": 11.11111068725586,
"blob_id": "91ac72861a35b116664aa5a9a40906d2ddc21019",
"content_id": "50991ae51d919b7dded9307899e1f89560c0a347",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 217,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 18,
"path": "/Interpreter-Python/roman/three_digits.py",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "from interpreter import *\n\nclass ThreeDigits(Interpreter):\n\n\tdef one(self):\n\t\treturn \"C\"\n\n\tdef four(self):\n\t\treturn \"CD\"\n\n\tdef five(self):\n\t\treturn \"D\"\n\n\tdef nine(self):\n\t\treturn \"CM\"\n\n\tdef weight(self):\n\t\treturn 100;"
},
{
"alpha_fraction": 0.6700507402420044,
"alphanum_fraction": 0.6700507402420044,
"avg_line_length": 14.076923370361328,
"blob_id": "9d9ae1eef0131336090dc678f9ea94db593b5f80",
"content_id": "1d143fbb7c9d00b27cc649482187fcbb105ba945",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 13,
"path": "/Observer/src/Dados.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "\npublic class Dados {\n\n\tprotected double valorA;\n\tprotected double valorB;\n\tprotected double valorC;\n\t\n\tpublic Dados(double a, double b, double c){\n\t\tvalorA = a;\n\t\tvalorB = b;\n\t\tvalorC = c;\n\t}\n\t\n}\n"
},
{
"alpha_fraction": 0.5992366671562195,
"alphanum_fraction": 0.6221374273300171,
"avg_line_length": 15.3125,
"blob_id": "1cb62ea52ec05eb90446824d39feb03255200c18",
"content_id": "c95f304c8b3d5ae2e4a6e3e98488a96679e9e3a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 262,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 16,
"path": "/Singleton/src/App.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "\npublic class App {\n\t\n\tstatic Fragment fragment;\n\t\n\tpublic static void main(String[] args) {\n\t\t\n\t\tfor ( int i = 0; i < 10000; i++ )\n\t\t\tonClick();\n\t}\n\t\n\t// calls a fragment on the screen\n\tpublic static void onClick(){\n\t\t\n\t\tfragment = Fragment.getInstance();\n\t}\n}\n"
},
{
"alpha_fraction": 0.7377777695655823,
"alphanum_fraction": 0.7511110901832581,
"avg_line_length": 19.454545974731445,
"blob_id": "c007ffea5393ab2ec80f8e2dc9b7224b619bf4a0",
"content_id": "45cd79ac768c9d1d06577bdda9b6166924b1f463",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 225,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 11,
"path": "/Command/src/dota/MagicalDamage.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "package dota;\n\npublic class MagicalDamage implements Damage {\n\n\t@Override\n\tpublic double attack(Hero hero, double damage) {\n\t\t// TODO Auto-generated method stub\n\t\treturn damage - hero.getMagicalResistence()*damage/100;\n\t}\n\n}\n"
},
{
"alpha_fraction": 0.6951219439506531,
"alphanum_fraction": 0.6951219439506531,
"avg_line_length": 24.153846740722656,
"blob_id": "4110dc37f4b02b03d40e15fe341d865d2c9aec72",
"content_id": "43f4b179b7cd9e7a5ef851cecc37fe0a90d64cd5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 328,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 13,
"path": "/Observer/src/DadosBarras.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "\npublic class DadosBarras extends DadosObserver {\n\t\n\tpublic DadosBarras(DadosSubject dados){\n\t\tsuper(dados);\n\t}\n\n\tpublic void update(){\n\t\t\n\t\tSystem.out.println(\"altura a = \"+dados.getState().valorA);\n\t\tSystem.out.println(\"altura b = \"+dados.getState().valorB);\n\t\tSystem.out.println(\"altura c = \"+dados.getState().valorC);\n\t}\t\n}\n"
},
{
"alpha_fraction": 0.5502958297729492,
"alphanum_fraction": 0.6360946893692017,
"avg_line_length": 21.53333282470703,
"blob_id": "df8e86499bfd24f6226f5dc42cc8ab6e1e78e666",
"content_id": "cd2c0b0e3ab15a1dab8943bf9ce9728c0d9b201b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 338,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 15,
"path": "/Command/src/dota/App.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "package dota;\n\npublic class App {\n\t\n\tpublic static void main(String[] args){\n\t\t\n\t\tHero axe = new Hero(\"Axe\", 100.0, 20.0, 25.0);\n\t\tHero mirana = new Hero(\"Mirana\", 100.0, 20.0, 25.0);\n\t\t\n\t\taxe.attack(mirana, new PhysicalDamage(), 50.0);\n\t\taxe.attack(mirana, new MagicalDamage(), 50.0);\n\t\taxe.attack(mirana, new PureDamage(), 50.0);\n\t}\n\n}\n"
},
{
"alpha_fraction": 0.7056451439857483,
"alphanum_fraction": 0.7056451439857483,
"avg_line_length": 12.052631378173828,
"blob_id": "2e6efd15786c3cd8a05ae7735b2ad2e7d71a7150",
"content_id": "ffcf5a13d77c517224841da3bbd0962cdd2721e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 248,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 19,
"path": "/Command/src/loja/Compra.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "package loja;\n\npublic class Compra {\n\t\n\tint idNotafiscal;\n\tString nomeDaLoja;\n\tdouble valorTotal;\n\t\n\tpublic Compra(String nomeDaLoja){\n\t\t\n\t\tthis.nomeDaLoja = nomeDaLoja;\n\t}\n\t\n\tpublic void setValor(double valor){\n\t\t\n\t\tthis.valorTotal = valor;\n\t}\n\n}\n"
},
{
"alpha_fraction": 0.6431924700737,
"alphanum_fraction": 0.6478873491287231,
"avg_line_length": 10.833333015441895,
"blob_id": "b5b7d6776d6088c1634a0c92a643398a81bc251c",
"content_id": "d96d569d823a23dc071eebce22ad61d48a5eedf4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 213,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 18,
"path": "/Interpreter-Python/roman/one_digit.py",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "from interpreter import *\n\nclass OneDigit(Interpreter):\n\n\tdef one(self):\n\t\treturn \"I\"\n\n\tdef four(self):\n\t\treturn \"IV\"\n\n\tdef five(self):\n\t\treturn \"V\"\n\n\tdef nine(self):\n\t\treturn \"IX\"\n\n\tdef weight(self):\n\t\treturn 1;\n"
},
{
"alpha_fraction": 0.7366548180580139,
"alphanum_fraction": 0.7366548180580139,
"avg_line_length": 19.14285659790039,
"blob_id": "94294e5e19796abbe735585c1a7fbec4e2f2ee50",
"content_id": "b9bbe2147c2d6fdb70d157988ed2c197fc6d6b0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 281,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 14,
"path": "/Interpreter-Python/roman/main.py",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "from one_digit import *\nfrom two_digits import *\nfrom three_digits import *\nfrom four_digits import *\nfrom context import *\n\ndigits = [FourDigits(),ThreeDigits(),TwoDigits(),OneDigit()]\n\ncontext = Context(\"VI\")\n\nfor d in digits:\n\td.to_interpret(context)\n\nprint context.get_output()"
},
{
"alpha_fraction": 0.704649031162262,
"alphanum_fraction": 0.7128532528877258,
"avg_line_length": 23.399999618530273,
"blob_id": "ca0a5dbac5d44874048dd695deda36c844a530f5",
"content_id": "08c49c62f67ea0bef62da5075944e9123bd73b44",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1097,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 45,
"path": "/Interpreter-Python/roman/interpreter.py",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "from abc import ABCMeta, abstractmethod\nfrom context import *\n\nclass Interpreter():\n\t__metaclass__ = ABCMeta\n\n\tdef to_interpret(self, context):\n\n\t\tif len(context.get_input()) == 0:\n\t\t\treturn\n\n\t\tif context.get_input().startswith(self.nine()):\n\t\t\tself.add_value_output(context, 9)\n\t\t\tself.consume_digits(context, 2)\n\t\telif context.get_input().startswith(self.four()):\n\t\t\tself.add_value_output(context, 4)\n\t\t\tself.consume_digits(context, 2)\n\t\telif context.get_input().startswith(self.five()):\n\t\t\tself.add_value_output(context, 5)\n\t\t\tself.consume_digits(context, 1)\n\n\t\twhile context.get_input().startswith(self.one()):\n\t\t\tself.add_value_output(context, 1)\n\t\t\tself.consume_digits(context, 1)\n\n\tdef consume_digits(self, context, digits):\n\t\tcontext.set_input(context.get_input()[digits:])\n\n\tdef add_value_output(self, context, number):\n\t\tcontext.set_output(context.get_output() + number * self.weight())\n\n\t@abstractmethod\n\tdef weight(self): pass\n\n\t@abstractmethod\n\tdef one(self): pass\n\n\t@abstractmethod\n\tdef four(self): pass\n\n\t@abstractmethod\n\tdef five(self): pass\n\n\t@abstractmethod\n\tdef nine(self): pass"
},
{
"alpha_fraction": 0.6343283653259277,
"alphanum_fraction": 0.646766185760498,
"avg_line_length": 16.478260040283203,
"blob_id": "01bdbffdc6fafbce1d61ce72919b0db54d8ec599",
"content_id": "71f223f11ea0bda7bbba40a08675ee71b0df7c67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 402,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 23,
"path": "/MethodFactory/src/ship/UFOShip.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "package ship;\n\npublic class UFOShip extends EnemyShip{\n\t\n\tpublic UFOShip(String name){\n\t\tthis.name = name;\n\t\thp = 100;\n\t\tspeed = 60;\n\t}\n\t\n\t@Override\n\tpublic void atack() {\n\t\t// TODO Auto-generated method stub\n\t\tSystem.out.println(name + \" has attacked !\");\n\t}\n\n\t@Override\n\tpublic void move() {\n\t\t// TODO Auto-generated method stub\n\t\tSystem.out.println(name + \" has moved at \" + speed + \" km/h\");\n\t}\n\n}\n"
},
{
"alpha_fraction": 0.6643598675727844,
"alphanum_fraction": 0.6643598675727844,
"avg_line_length": 14.210526466369629,
"blob_id": "8dffb89556741a33dc6bd102839f36cda45735b0",
"content_id": "eb1da041fd9c814e484c9ae1c0ac4b4613213157",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 289,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 19,
"path": "/Adapter/src/robot/EnemyTarget.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "package robot;\n\npublic abstract class EnemyTarget {\n\n\tpublic void fireWeapon(){\n\t\t\n\t\tSystem.out.println(\"Fires weeapon!\");\n\t}\n\t\n\tpublic void driveFowards(){\n\t\t\t\n\t\tSystem.out.println(\"is driving\");\n\t}\n\t\n\tpublic void assignDriver(String s){\n\t\t\n\t\tSystem.out.println(s + \" is driving!\");\n\t}\n}\n"
},
{
"alpha_fraction": 0.7112299203872681,
"alphanum_fraction": 0.7112299203872681,
"avg_line_length": 13.307692527770996,
"blob_id": "21629b69556a476fb435fe511bfc4149b3a60f92",
"content_id": "607d6a6ba63126002aaefc1fb2a05673cba5be90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 187,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 13,
"path": "/Decorator/src/Pizza.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "\npublic class Pizza {\n\n\tprotected String description;\n\tprotected double cost;\n\t\n\tpublic String getDescription() {\n\t\treturn description;\n\t}\n\n\tpublic double getCost() {\n\t\treturn cost;\n\t}\n}\n"
},
{
"alpha_fraction": 0.6728187799453735,
"alphanum_fraction": 0.708053708076477,
"avg_line_length": 23.79166603088379,
"blob_id": "b55b8382097b8b54962308f9b15e8328c3be72eb",
"content_id": "ead5d6a3b6fbc1f09659c721104a2214cfc87654",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 596,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 24,
"path": "/Observer/src/App.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "\npublic class App {\n\n\tpublic static void main(String[] args) {\n\t\t\n\t\tDadosSubject dadosSubject = new DadosSubject();\n\t\t\n\t\tDadosObserver d1 = new DadosBarras(dadosSubject);\n\t\tDadosObserver d2 = new DadosTabela(dadosSubject);\n\t\tDadosObserver d3 = new DadosPercent(dadosSubject);\n\t\t\n\t\tdadosSubject.attach(d1);\n\t\tdadosSubject.attach(d2);\n\t\tdadosSubject.attach(d3);\n\t\t\n\t\tDados valores = new Dados(10.0, 20.0, 30.0);\n\t\t\n//\t\tdadosSubject.attach(new DadosPercent());\n//\t\tdadosSubject.attach(new DadosTabela());\n\t\t\n\t\tdadosSubject.setState(valores);\n\t\tdadosSubject.setState(new Dados(5.0, 5.0, 5.0));\n\t}\n\n}\n"
},
{
"alpha_fraction": 0.7174887657165527,
"alphanum_fraction": 0.726457417011261,
"avg_line_length": 21.200000762939453,
"blob_id": "8b717117d5174c36f0f319b8052f2ec94254b430",
"content_id": "e2f92255659e38af45c9843d2d7a7a0f2dea5549",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 223,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 10,
"path": "/Facade/src/App.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "\npublic class App {\n\n\tpublic static void main(String[] args) {\n\t\t\n\t\tSistemasFacade sistemas = new SistemasFacade();\n\t\tsistemas.reproduzirAudio(\"MetalGearSolid V-Nuclear.mp3\");\n\t\tsistemas.reproduzirVideo(\"Intro.mp4\");\n\t}\n\n}\n"
},
{
"alpha_fraction": 0.6499999761581421,
"alphanum_fraction": 0.675000011920929,
"avg_line_length": 16,
"blob_id": "cb9a1c1d085d6135fb99c1441290f81270a2a585",
"content_id": "ad9b379e9d731f7e301e5e7fe7c08fd2b4e5272d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 120,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 7,
"path": "/Decorator/src/PizzaEspecial.java",
"repo_name": "gutioliveira/DesignPatterns",
"src_encoding": "UTF-8",
"text": "\npublic class PizzaEspecial extends Pizza {\n\t\n\tpublic PizzaEspecial(){\n\t\tdescription = \"Especial \";\n\t\tcost = 10.0;\n\t}\n}\n"
}
] | 32 |
Pantheona/ourModel11-10 | https://github.com/Pantheona/ourModel11-10 | cb3cb25ef75fc443212eddcd386d2043d31686db | 85f30ccb17d3f15085bc3d93a31706e9f7039666 | 14b06c1aecf10ebdbfd68788c3cf651e494ed50c | refs/heads/master | 2023-01-25T01:38:19.518793 | 2020-11-15T13:17:41 | 2020-11-15T13:17:41 | 311,674,662 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.550067126750946,
"alphanum_fraction": 0.5653691291809082,
"avg_line_length": 37.41237258911133,
"blob_id": "ee653d65c378e25fc63399b3d66e8cce1ada33e4",
"content_id": "8a5a18c61373762ba70226c7cb4756112d70abfe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3725,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 97,
"path": "/utils/TripletLoss.py",
"repo_name": "Pantheona/ourModel11-10",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\n\n\nclass TripletLoss(nn.Module):\n \"\"\"Triplet loss with hard positive/negative mining.\n Reference:\n Hermans et al. In Defense of the Triplet Loss for Person Re-Identification. arXiv:1703.07737.\n Code imported from https://github.com/Cysu/open-reid/blob/master/reid/loss/triplet.py.\n Args:\n margin (float): margin for triplet.\n \"\"\"\n\n def __init__(self, margin=0.3, mutual_flag=False):\n super(TripletLoss, self).__init__()\n self.margin = margin\n self.ranking_loss = nn.MarginRankingLoss(margin=margin)\n self.mutual = mutual_flag\n\n def forward(self, inputs, targets):\n \"\"\"\n Args:\n inputs: feature matrix with shape (batch_size, feat_dim)\n targets: ground truth labels with shape (num_classes)\n \"\"\"\n n = inputs.size(0)\n # print(inputs.shape, targets.shape)\n # inputs = 1. * inputs / (torch.norm(inputs, 2, dim=-1, keepdim=True).expand_as(inputs) + 1e-12)\n # Compute pairwise distance, replace by the official when merged\n dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)\n dist = dist + dist.t()\n dist.addmm_(1, -2, inputs, inputs.t())\n dist = dist.clamp(min=1e-12).sqrt() # for numerical stability\n # For each anchor, find the hardest positive and negative\n mask = targets.expand(n, n).eq(targets.expand(n, n).t())\n dist_ap, dist_an = [], []\n for i in range(n):\n dist_ap.append(dist[i][mask[i]].max().unsqueeze(0))\n dist_an.append(dist[i][mask[i] == 0].min().unsqueeze(0))\n dist_ap = torch.cat(dist_ap)\n dist_an = torch.cat(dist_an)\n # Compute ranking hinge loss\n # print('\\n', dist_ap[0], dist_an[0])\n y = torch.ones_like(dist_an)\n loss = self.ranking_loss(dist_an, dist_ap, y)\n loss_ap = sum(dist_ap) / len(dist_ap)\n if self.mutual:\n return loss, dist\n return loss\n\n\nclass ContrastiveLoss(nn.Module):\n def __init__(self, margin=0.3, mutual_flag=False):\n super(ContrastiveLoss, self).__init__()\n self.margin = margin\n\n def forward(self, inputs, targets):\n \"\"\"\n Args:\n inputs: feature matrix with shape (batch_size, feat_dim)\n targets: ground truth labels with shape (num_classes)\n \"\"\"\n n = inputs.size(0)\n # print(inputs.shape, targets.shape)\n # inputs = 1. * inputs / (torch.norm(inputs, 2, dim=-1, keepdim=True).expand_as(inputs) + 1e-12)\n # Compute pairwise distance, replace by the official when merged\n dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)\n dist = dist + dist.t()\n dist.addmm_(1, -2, inputs, inputs.t())\n dist = dist.clamp(min=1e-12).sqrt() # for numerical stability\n # For each anchor, find the hardest positive and negative\n mask = targets.expand(n, n).eq(targets.expand(n, n).t())\n dist_ap, dist_an = [], []\n\n\n for i in range(n):\n for j in range(i+1, n):\n if mask[i][j] == 1:\n dist_ap.append(dist[i][j].unsqueeze(0))\n elif mask[i][j] == 0:\n dist_an.append(dist[i][j].unsqueeze(0))\n\n # dist_ap = torch.cat(dist_ap)\n # dist_an = torch.cat(dist_an)\n # Compute ranking hinge loss\n # print('\\n', dist_ap[0], dist_an[0])\n\n margin = self.margin\n if len(dist_ap) != 0:\n loss_ap = sum(dist_ap) / len(dist_ap)\n else:\n loss_ap = 0\n margin *= 2\n loss_an = sum(dist_an) / len(dist_an)\n loss = max(0, loss_ap - loss_an + margin)\n\n return loss"
},
{
"alpha_fraction": 0.5313174724578857,
"alphanum_fraction": 0.53995680809021,
"avg_line_length": 28.870967864990234,
"blob_id": "32fb30d18ac33833baf4437910ffba90f92fa4ec",
"content_id": "a74e7d6f623d5caeee5cbea782c899bd744f906b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 926,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 31,
"path": "/cot.py",
"repo_name": "Pantheona/ourModel11-10",
"src_encoding": "UTF-8",
"text": "import os\nimport shutil\n\npath = \"D:\\\\re-ID-dataset\\\\LTCC_ReID\"\nclothcot = 0\nclothdict = {}\n\ndef copyfile(category):\n global clothcot\n global clothdict\n category_path = path + \"\\\\\" + category\n for filename in os.listdir(category_path):\n splits = filename.split('_')\n ID = splits[0]\n cloth = splits[1]\n camera = splits[2][1:]\n useless = splits[3]\n if ID + 'c' + cloth in clothdict.keys():\n cloth = clothdict[ID + 'c' + cloth]\n else:\n clothdict[ID + 'c' + cloth] = clothcot\n cloth = clothdict[ID + 'c' + cloth]\n clothcot += 1\n\n newname = '0' + ID + '_' + 'c' + camera + 's' + str(cloth) + '_' + useless\n frompath = category_path + '\\\\' + filename\n topath = path + \"\\\\bounding_box_\" + category + \"\\\\\" + newname\n print(frompath, topath)\n shutil.copyfile(frompath, topath)\n\ncopyfile(\"train\")\n"
},
{
"alpha_fraction": 0.4621366262435913,
"alphanum_fraction": 0.48663344979286194,
"avg_line_length": 37.5048942565918,
"blob_id": "0b55254f2acea454e122f535ce33e586d48b5159",
"content_id": "c0996ed16b5838e2e9304756fd6ea285c2931b6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19776,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 511,
"path": "/main.py",
"repo_name": "Pantheona/ourModel11-10",
"src_encoding": "UTF-8",
"text": "import os\nimport numpy as np\nfrom scipy.spatial.distance import cdist\nfrom tqdm import tqdm\nimport torch\nimport torchvision.utils as vutils\nfrom torch.optim import lr_scheduler\nfrom matplotlib import pyplot as plt\nfrom opt import opt\nfrom data import Data\nfrom network import Model\n# from PCB import Model\nfrom loss import Loss\nfrom utils.extract_feature import extract_feature\nfrom utils.metrics import mean_ap, cmc, re_ranking\n\nclass Main():\n def __init__(self, model, loss, data):\n if opt.stage == 1 or opt.stage == 0:\n self.train_loader = data.train_loader\n else:\n self.train_loader = data.train_loader_woEr\n self.test_loader = data.test_loader\n self.query_loader = data.query_loader\n self.testset = data.testset\n self.queryset = data.queryset\n\n self.model = model.to(opt.device)\n self.loss = loss\n self.data = data\n\n self.x = []\n self.y = [[], [], [], [], [], [], []]\n self.errors = [[], []] # IDError/ClothError\n\n self.scheduler = lr_scheduler.MultiStepLR(loss.optimizer, milestones=opt.lr_scheduler, gamma=0.1)\n self.scheduler_D = lr_scheduler.MultiStepLR(loss.optimizer_D, milestones=opt.lr_scheduler, gamma=0.1)\n self.scheduler_DC = lr_scheduler.MultiStepLR(loss.optimizer_DC, milestones=opt.lr_scheduler, gamma=0.1)\n\n def train(self, epoch):\n\n self.scheduler.step()\n self.scheduler_D.step()\n self.scheduler_DC.step()\n self.model.train()\n\n self.x.append(epoch)\n self.y2batch = [[], [], [], [], [], [], []]\n self.errorcnt = [[0, 0], [0, 0]]\n\n for batch, (rgb, cloth, labels) in enumerate(self.train_loader):\n if rgb.size()[0] != opt.batchid * opt.batchimage: continue\n rgb = rgb.to(opt.device)\n labels = labels.to(opt.device)\n cloth = cloth.to(opt.device)\n\n if opt.stage == 0:\n self.loss.optimizer.zero_grad()\n loss, loss_values, errors = self.loss(rgb, labels, cloth, batch, epoch)\n loss.backward()\n self.loss.optimizer.step()\n\n elif opt.stage == 1:\n self.loss.optimizer.zero_grad()\n self.loss.optimizer_DC.zero_grad()\n loss, loss_values, errors = self.loss(rgb, labels, cloth, batch, epoch)\n loss.backward()\n self.loss.optimizer.step()\n\n elif (opt.stage == 2) or (opt.stage == 3):\n self.loss.optimizer_D.zero_grad()\n self.loss.optimizer.zero_grad()\n self.loss.optimizer_DC.zero_grad()\n loss, loss_values, errors = self.loss(rgb, labels, cloth, batch, epoch)\n loss.backward()\n self.loss.optimizer.step()\n\n # 不用GAN的stage3\n elif opt.stage == 4:\n self.loss.optimizer.zero_grad()\n loss, loss_values, errors = self.loss(rgb, labels, cloth, batch, epoch)\n loss.backward()\n self.loss.optimizer.step()\n\n for i in range(len(loss_values)):\n self.y2batch[i].append(loss_values[i])\n for i in range(len(errors)):\n self.errorcnt[i][0] += errors[i][0]\n self.errorcnt[i][1] += errors[i][1]\n for i in range(len(loss_values)):\n self.y[i].append(sum(self.y2batch[i]) / len(self.y2batch[i]))\n for i in range(len(self.errorcnt)):\n self.errors[i].append((self.errorcnt[i][1] - self.errorcnt[i][0]) / self.errorcnt[i][1])\n\n def save_model(self, save_path):\n torch.save({\n 'model_C': self.model.C.state_dict(),\n 'model_G': self.model.G.state_dict(),\n 'model_D': self.model.D.state_dict(),\n 'model_DC': self.model.DC.state_dict(),\n 'optimizer': self.loss.optimizer.state_dict(),\n 'optimizer_D': self.loss.optimizer_D.state_dict(),\n 'optimizer_DC': self.loss.optimizer_DC.state_dict()\n }, save_path)\n\n def load_model(self, load_path, last_epoch):\n checkpoint = torch.load(load_path)\n pretrained_dict = checkpoint['model_C']\n model_dict = self.model.C.state_dict()\n state_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict.keys()}\n model_dict.update(state_dict)\n self.model.C.load_state_dict(model_dict, strict=False)\n if opt.stage != 1:\n pretrained_dict = checkpoint['model_DC']\n model_dict = self.model.DC.state_dict()\n state_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict.keys()}\n model_dict.update(state_dict)\n self.model.DC.load_state_dict(model_dict, strict=False)\n if opt.stage == 3:\n self.model.G.load_state_dict(checkpoint['model_G'])\n self.model.D.load_state_dict(checkpoint['model_D'])\n self.loss.optimizer_D.load_state_dict(checkpoint['optimizer_D'])\n\n self.scheduler.last_epoch = last_epoch\n self.scheduler_D.last_epoch = last_epoch\n\n def evaluate(self, save_path, epoch=0):\n\n self.model.eval()\n\n print('extract features, this may take a few minutes')\n qf = extract_feature(self.model, tqdm(self.query_loader)).numpy()\n gf = extract_feature(self.model, tqdm(self.test_loader)).numpy()\n\n def rank(dist):\n r = cmc(dist, self.queryset.ids, self.testset.ids, self.queryset.cameras, self.testset.cameras,\n self.queryset.clothes, self.testset.clothes,\n separate_camera_set=False,\n single_gallery_shot=False,\n first_match_break=True,\n cloth_changing_settings=False)\n m_ap = mean_ap(\n dist, self.queryset.ids, self.testset.ids, self.queryset.cameras, self.testset.cameras,\n self.queryset.clothes, self.testset.clothes,\n cloth_changing_settings=False)\n\n return r, m_ap\n\n # ######################### re rank##########################\n # q_g_dist = np.dot(qf, np.transpose(gf))\n # q_q_dist = np.dot(qf, np.transpose(qf))\n # g_g_dist = np.dot(gf, np.transpose(gf))\n # dist = re_ranking(q_g_dist, q_q_dist, g_g_dist)\n\n # r, m_ap = rank(dist)\n\n # print('[With Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}'\n # .format(m_ap, r[0], r[2], r[4], r[9]))\n\n # #########################no re rank##########################\n dist = cdist(qf, gf)\n # from utils.combine_feature import combine_feature\n # dist = combine_feature(self.model, self.query_loader, self.test_loader)\n\n r, m_ap = rank(dist)\n\n print('[Without Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}'\n .format(m_ap, r[0], r[2], r[4], r[9], r[19]))\n\n with open(save_path, 'a') as f:\n f.write(\n '[Without Re-Ranking] epoch: {:} mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}\\n'\n .format(epoch, m_ap, r[0], r[2], r[4], r[9], r[19]))\n\n\n def evaluate_multi_test(self, data, qf, save_path, test, epoch=0):\n\n self.model.eval()\n\n gf = extract_feature(self.model, tqdm(data.test_loader)).numpy()\n\n def rank(dist):\n r = cmc(dist, self.queryset.ids, self.testset.ids, self.queryset.cameras, self.testset.cameras,\n self.queryset.clothes, self.testset.clothes,\n separate_camera_set=False,\n single_gallery_shot=False,\n first_match_break=True,\n cloth_changing_settings=False)\n m_ap = mean_ap(\n dist, self.queryset.ids, self.testset.ids, self.queryset.cameras, self.testset.cameras,\n self.queryset.clothes, self.testset.clothes,\n cloth_changing_settings=False)\n\n return r, m_ap\n\n # ######################### re rank##########################\n # q_g_dist = np.dot(qf, np.transpose(gf))\n # q_q_dist = np.dot(qf, np.transpose(qf))\n # g_g_dist = np.dot(gf, np.transpose(gf))\n # dist = re_ranking(q_g_dist, q_q_dist, g_g_dist)\n\n # r, m_ap = rank(dist)\n\n # print('[With Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}'\n # .format(m_ap, r[0], r[2], r[4], r[9]))\n\n # #########################no re rank##########################\n dist = cdist(qf, gf)\n # from utils.combine_feature import combine_feature\n # dist = combine_feature(self.model, self.query_loader, self.test_loader)\n\n r, m_ap = rank(dist)\n\n print('[Without Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}'\n .format(m_ap, r[0], r[2], r[4], r[9], r[19]))\n\n with open(save_path, 'a') as f:\n f.write(\n '[Without Re-Ranking] epoch: {:} test_set: {:} mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}\\n'\n .format(epoch, test, m_ap, r[0], r[2], r[4], r[9], r[19]))\n return r[0], r[2], r[4], r[9], r[19]\n\n def multi_test(self, save_path, epoch=0):\n print('start evaluate')\n self.model.eval()\n qf = extract_feature(self.model, tqdm(self.query_loader)).numpy()\n rank1 = []\n rank3 = []\n rank5 = []\n rank10 = []\n rank20 = []\n for i in range(11):\n data = Data(test=i, dataset=\"prcc\")\n print('start evaluate', i)\n r1, r3, r5, r10, r20 = self.evaluate_multi_test(data, qf, save_path, i, epoch)\n rank1.append(r1)\n rank3.append(r3)\n rank5.append(r5)\n rank10.append(r10)\n rank20.append(r20)\n r1 = np.mean(rank1)\n r3 = np.mean(rank3)\n r5 = np.mean(rank5)\n r10 = np.mean(rank10)\n r20 = np.mean(rank20)\n\n print('[Average] rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}\\n'\n .format(r1, r3, r5, r10, r20))\n\n with open(opt.save_path + opt.name + '_accr.txt', 'a') as f:\n f.write(\n '[Average] rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}\\n'\n .format(r1, r3, r5, r10, r20))\n return r1\n\ndef start():\n data = Data(dataset=\"prcc\")\n model = Model()\n loss = Loss(model)\n main = Main(model, loss, data)\n\n if opt.mode == 'train':\n os.makedirs(opt.save_path, exist_ok=True)\n\n if opt.stage == 0:\n opt.start = 0\n opt.epoch = 150\n\n if opt.stage == 1:\n main.load_model(opt.save_path + '/isgan_stage0_latest.pt', 0)\n opt.start = 0\n opt.epoch = 300\n\n if opt.stage == 2:\n main.load_model(opt.save_path + '/isgan_stage1_latest.pt', 0)\n opt.start = 0\n opt.epoch = 200\n\n # ours stage3\n # 注意开始epoch标注的是300 为了适应lr降低 具体开始epoch视stage1 load的epoch而定\n if opt.stage == 3:\n main.load_model(opt.save_path + '/isgan_stage2_latest.pt', 300)\n opt.start = 300\n opt.epoch = 400\n\n # 不用GAN的stage3\n # 注意开始epoch标注的是300 为了适应lr降低 具体开始epoch视stage1 load的epoch而定\n if opt.stage == 4:\n main.load_model(opt.save_path + '/isgan_stage0_latest.pt', 300)\n opt.start = 300\n opt.epoch = 400\n\n for epoch in range(opt.start + 1, opt.epoch + 1):\n\n print('\\nepoch', epoch)\n main.train(epoch)\n\n if opt.stage == 0:\n rgb_CE_fig = plt.figure()\n plt.ylabel(\"RgbCE\")\n plt.xlim(0, 150)\n plt.ylim(0, 7)\n plt.plot(main.x, main.y[0])\n rgb_CE_fig.savefig(\"./weights/Stage0_rgbCEloss.jpg\")\n plt.close()\n\n ID_Error = plt.figure()\n plt.ylabel(\"ID Error\")\n plt.xlim(0, 150)\n plt.ylim(0, 1)\n plt.plot(main.x, main.errors[0])\n ID_Error.savefig(\"./weights/Stage0_IDError.jpg\")\n plt.close()\n\n if opt.stage == 1:\n\n cloth_CE_fig = plt.figure()\n plt.ylabel(\"Cloth_CE\")\n plt.xlim(0, 300)\n plt.ylim(0, 7)\n plt.plot(main.x, main.y[0])\n cloth_CE_fig.savefig(\"./weights/Stage1_cloth_CE_loss.jpg\")\n plt.close()\n\n Cloth_Error = plt.figure()\n plt.ylabel(\"Cloth_Error\")\n plt.xlim(0, 300)\n plt.ylim(0, 1)\n plt.plot(main.x, main.errors[1])\n Cloth_Error.savefig(\"./weights/Stage1_ClothError.jpg\")\n plt.close()\n\n # D_I_fig = plt.figure()\n # plt.ylabel(\"D_I\")\n # plt.xlim(0, 300)\n # plt.plot(main.x, main.y[1])\n # D_I_fig.savefig(\"./weights/Stage1_D_I_loss.jpg\")\n # plt.close()\n #\n # D_C_fig = plt.figure()\n # plt.ylabel(\"D_C\")\n # plt.xlim(0, 300)\n # plt.plot(main.x, main.y[2])\n # D_C_fig.savefig(\"./weights/Stage1_D_C_loss.jpg\")\n # plt.close()\n\n if opt.stage == 2:\n\n D_Loss_fig = plt.figure()\n plt.plot(main.x, main.y[0])\n plt.ylabel(\"D_Loss\")\n plt.xlim(0, 200)\n D_Loss_fig.savefig(\"./weights/Stage2_D_Loss.jpg\")\n plt.close()\n\n G_Loss_fig = plt.figure()\n plt.plot(main.x, main.y[1])\n plt.ylabel(\"G_Loss\")\n plt.xlim(0, 200)\n G_Loss_fig.savefig(\"./weights/Stage2_G_Loss.jpg\")\n plt.close()\n\n KL_Loss_fig = plt.figure()\n plt.plot(main.x, main.y[2])\n plt.ylabel(\"KL_Loss\")\n plt.xlim(0, 200)\n KL_Loss_fig.savefig(\"./weights/Stage2_KL_Loss.jpg\")\n plt.close()\n\n D_I_fig = plt.figure()\n plt.ylabel(\"D_I\")\n plt.xlim(0, 300)\n plt.plot(main.x, main.y[3])\n D_I_fig.savefig(\"./weights/Stage2_D_I_loss.jpg\")\n plt.close()\n\n D_C_fig = plt.figure()\n plt.ylabel(\"D_C\")\n plt.xlim(0, 300)\n plt.plot(main.x, main.y[4])\n D_C_fig.savefig(\"./weights/Stage2_D_C_loss.jpg\")\n plt.close()\n\n\n if opt.stage == 3:\n\n rgb_CE_fig = plt.figure()\n plt.ylabel(\"RgbCE\")\n plt.xlim(300, 400)\n plt.ylim(0, 7)\n plt.plot(main.x, main.y[0])\n rgb_CE_fig.savefig(\"./weights/Stage3_rgbCE_loss.jpg\")\n plt.close()\n\n ID_Error = plt.figure()\n plt.ylabel(\"ID Error\")\n plt.xlim(300, 400)\n plt.ylim(0, 1)\n plt.plot(main.x, main.errors[0])\n ID_Error.savefig(\"./weights/Stage3_IDError.jpg\")\n plt.close()\n \n Cloth_CE_fig = plt.figure()\n plt.ylabel(\"ClothCE\")\n plt.xlim(300, 400)\n plt.ylim(0, 7)\n plt.plot(main.x, main.y[1])\n Cloth_CE_fig.savefig(\"./weights/Stage3_ClothCE_loss.jpg\")\n plt.close()\n\n Cloth_Error = plt.figure()\n plt.ylabel(\"Cloth Error\")\n plt.xlim(300, 400)\n plt.ylim(0, 1)\n plt.plot(main.x, main.errors[1])\n Cloth_Error.savefig(\"./weights/Stage3_ClothError,jpg\")\n plt.close()\n \n D_Loss_fig = plt.figure()\n plt.plot(main.x, main.y[2])\n plt.ylabel(\"D_Loss\")\n plt.xlim(300, 400)\n D_Loss_fig.savefig(\"./weights/Stage3_D_Loss.jpg\")\n plt.close()\n\n G_Loss_fig = plt.figure()\n plt.plot(main.x, main.y[3])\n plt.ylabel(\"G_Loss\")\n plt.xlim(300, 400)\n G_Loss_fig.savefig(\"./weights/Stage3_G_Loss.jpg\")\n plt.close()\n\n D_I_fig = plt.figure()\n plt.ylabel(\"D_I\")\n plt.xlim(300, 400)\n plt.plot(main.x, main.y[4])\n D_I_fig.savefig(\"./weights/Stage3_D_I_loss.jpg\")\n plt.close()\n\n D_C_fig = plt.figure()\n plt.ylabel(\"D_C\")\n plt.xlim(300, 400)\n plt.plot(main.x, main.y[5])\n D_C_fig.savefig(\"./weights/Stage3_D_C_loss.jpg\")\n plt.close()\n\n KL_Loss_fig = plt.figure()\n plt.plot(main.x, main.y[6])\n plt.ylabel(\"KL_Loss\")\n plt.xlim(300, 400)\n KL_Loss_fig.savefig(\"./weights/Stage3_KL_Loss.jpg\")\n plt.close()\n\n if opt.stage == 4:\n rgb_CE_fig = plt.figure()\n plt.ylabel(\"RgbCE\")\n plt.xlim(300, 400)\n plt.ylim(0, 7)\n plt.plot(main.x, main.y[0])\n rgb_CE_fig.savefig(\"./weights/Stage4_rgbCEloss.jpg\")\n plt.close()\n\n ID_Error = plt.figure()\n plt.ylabel(\"ID Error\")\n plt.xlim(300, 400)\n plt.ylim(0, 1)\n plt.plot(main.x, main.errors[0])\n ID_Error.savefig(\"./weights/Stage4_IDError.jpg\")\n plt.close()\n\n if opt.stage == 0 and epoch % 50 == 0:\n os.makedirs(opt.save_path, exist_ok=True)\n weight_save_path = opt.save_path + opt.name + \\\n '_stage{}_latest.pt'.format(opt.stage)\n if os.path.exists(weight_save_path):\n os.remove(weight_save_path)\n main.save_model(weight_save_path)\n main.evaluate(opt.save_path + opt.name + '_accr.txt', epoch)\n elif opt.stage == 1 or opt.stage == 2 and epoch % 100 == 0:\n weight_save_path = opt.save_path + opt.name + \\\n '_stage{}_latest.pt'.format(opt.stage)\n if os.path.exists(weight_save_path):\n os.remove(weight_save_path)\n main.save_model(weight_save_path)\n elif (opt.stage == 3 or opt.stage == 4) and epoch % 25 == 0:\n weight_save_path = opt.save_path + opt.name + \\\n '_stage{}_latest.pt'.format(opt.stage)\n if os.path.exists(weight_save_path):\n os.remove(weight_save_path)\n main.save_model(weight_save_path)\n main.evaluate(opt.save_path + opt.name + '_accr.txt', epoch)\n\n if opt.mode == 'evaluate':\n print('start evaluate')\n main.load_model(opt.weight, 0)\n main.evaluate(opt.save_path + opt.name + '_accr.txt')\n\n\nif __name__ == '__main__':\n opt.mode = 'train'\n opt.stage = 0\n start()\n # opt.stage = 1\n # start()\n # opt.batchimage = 2\n # opt.stage = 2\n # start()\n # opt.stage = 3\n # start()\n opt.stage = 4\n start()\n"
},
{
"alpha_fraction": 0.5454891324043274,
"alphanum_fraction": 0.5576703548431396,
"avg_line_length": 30.2738094329834,
"blob_id": "7b649b27bc681811490b12b2b6f6c699f382a990",
"content_id": "8fb2768fcc2b1179293c156d2b01cd3c38d92ee9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2627,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 84,
"path": "/utils/RandomSampler.py",
"repo_name": "Pantheona/ourModel11-10",
"src_encoding": "UTF-8",
"text": "import random\nimport collections\nfrom torch.utils.data import sampler\nimport torch\n\nclass RandomSampler(sampler.Sampler):\n def __init__(self, data_source, batch_id, batch_image):\n super(RandomSampler, self).__init__(data_source)\n\n self.data_source = data_source\n self.batch_image = batch_image\n self.batch_id = batch_id\n\n self._id2index = collections.defaultdict(list)\n self.cam_index = collections.defaultdict()\n self._id2cam = collections.defaultdict(list)\n\n for idx, path in enumerate(data_source.imgs):\n _id = data_source.id(path)\n cam = data_source.camera(path)\n self._id2index[_id].append(idx)\n self.cam_index[idx] = cam\n\n for key, value in self._id2index.items():\n for i in range(len(value)):\n self._id2cam[key].append(self.cam_index[value[i]])\n\n\n\n def __iter__(self):\n unique_ids = self.data_source.unique_ids\n\n random.shuffle(unique_ids)\n\n imgs = []\n for _id in unique_ids:\n imgs.extend(self._sample(self._id2index[_id], self._id2cam[_id], _id, self.batch_image))\n\n return iter(imgs)\n\n def __len__(self):\n return len(self._id2index) * self.batch_image\n\n @staticmethod\n def _sample(population, camera, id, k):\n\n if len(population) < k:\n population = population * k\n\n '''\n sample = []\n cam_max = [0]\n\n for i in range(len(camera)):\n if camera[i] == 1:\n cam_max.append(i)\n break\n for i in range(len(camera)):\n if camera[i] == 2:\n cam_max.append(i)\n break\n cam_max.append(len(camera))\n\n if id == 220:\n cam_max = [cam_max[0], cam_max[1] // 2, cam_max[1], cam_max[2]]\n\n\n for i in range(k):\n\n # # selected_index = int(torch.randint(low=cam_max[i], high=cam_max[i+1], size=(1,)).item())\n selected_index = int(torch.randint(low=cam_max[i // 2], high=cam_max[i // 2 + 1], size=(1,)).item())\n sample.append(population[selected_index])\n\n return sample\n '''\n return random.sample(population, k)\n\n # sample = []\n # index =int(torch.randint(low=0, high=3, size=(1, )))\n # selected_index=int(torch.randint(low=cam_max[index], high=cam_max[index+1], size=(1, )))\n # sample.append(population[selected_index])\n # selected_index = int(torch.randint(low=cam_max[index], high=cam_max[index + 1], size=(1,)))\n # sample.append(population[selected_index])\n # return sample\n"
},
{
"alpha_fraction": 0.5020509958267212,
"alphanum_fraction": 0.5561500787734985,
"avg_line_length": 47.339080810546875,
"blob_id": "01850ab7a999f5bc03fa2d2434104fc5dc35dab2",
"content_id": "e0e3d7059b36910b65e021bbe4c0c2d960bba813",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16821,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 348,
"path": "/isgan_network.py",
"repo_name": "Pantheona/ourModel11-10",
"src_encoding": "UTF-8",
"text": "import copy\nimport functools\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom torchvision.models.resnet import resnet50, Bottleneck\nfrom opt import opt\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.C = Encoder()\n self.G = Generator()\n self.D = Discriminator()\n \ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.kaiming_normal_(m.weight.data, mode='fan_in')\n elif classname.find('Linear') != -1:\n nn.init.kaiming_normal_(m.weight.data, mode='fan_out')\n nn.init.constant_(m.bias.data, 0.)\n elif classname.find('BatchNorm2d') != -1:\n nn.init.normal_(m.weight.data, mean=1., std=0.02)\n nn.init.constant_(m.bias.data, 0.0)\n \ndef init_weights(net):\n net.apply(weights_init_normal)\n\nclass Encoder(nn.Module):\n def __init__(self):\n super(Encoder, self).__init__()\n \n resnet = resnet50(pretrained=True)\n \n self.backbone = nn.Sequential(\n resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool,\n resnet.layer1, resnet.layer2, resnet.layer3[0],) # conv4_1\n res_conv4 = nn.Sequential(*resnet.layer3[1:])\n \n res_g_conv5 = resnet.layer4\n res_p_conv5 = nn.Sequential(\n Bottleneck(1024, 512, downsample=nn.Sequential(\n nn.Conv2d(1024, 2048, 1, bias=False), nn.BatchNorm2d(2048))),\n Bottleneck(2048, 512),\n Bottleneck(2048, 512))\n res_p_conv5.load_state_dict(resnet.layer4.state_dict())\n \n self.p0_id = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_g_conv5))\n self.p1_id = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5))\n self.p2_id = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5))\n \n self.maxpool_zg_p0 = nn.MaxPool2d(kernel_size=(12, 4))\n self.maxpool_zg_p1 = nn.MaxPool2d(kernel_size=(24, 8))\n self.maxpool_zp1 = nn.MaxPool2d(kernel_size=(12, 8))\n self.maxpool_zg_p2 = nn.MaxPool2d(kernel_size=(24, 8))\n self.maxpool_zp2 = nn.MaxPool2d(kernel_size=(8, 8))\n \n self.reduction_zg_p0_id = nn.Sequential(\n nn.Conv2d(2048, opt.feat_id, 1, bias=False), nn.BatchNorm2d(opt.feat_id))\n self.reduction_zg_p1_id = nn.Sequential(\n nn.Conv2d(2048, opt.feat_id, 1, bias=False), nn.BatchNorm2d(opt.feat_id))\n self.reduction_z0_p1_id = nn.Sequential(\n nn.Conv2d(2048, opt.feat_id, 1, bias=False), nn.BatchNorm2d(opt.feat_id))\n self.reduction_z1_p1_id = nn.Sequential(\n nn.Conv2d(2048, opt.feat_id, 1, bias=False), nn.BatchNorm2d(opt.feat_id))\n self.reduction_zg_p2_id = nn.Sequential(\n nn.Conv2d(2048, opt.feat_id, 1, bias=False), nn.BatchNorm2d(opt.feat_id))\n self.reduction_z0_p2_id = nn.Sequential(\n nn.Conv2d(2048, opt.feat_id, 1, bias=False), nn.BatchNorm2d(opt.feat_id))\n self.reduction_z1_p2_id = nn.Sequential(\n nn.Conv2d(2048, opt.feat_id, 1, bias=False), nn.BatchNorm2d(opt.feat_id))\n self.reduction_z2_p2_id = nn.Sequential(\n nn.Conv2d(2048, opt.feat_id, 1, bias=False), nn.BatchNorm2d(opt.feat_id))\n \n self.fc_fg_p0_id = nn.Linear(opt.feat_id, int(opt.num_cls))\n self.fc_fg_p1_id = nn.Linear(opt.feat_id, int(opt.num_cls))\n self.fc_f0_p1_id = nn.Linear(opt.feat_id, int(opt.num_cls))\n self.fc_f1_p1_id = nn.Linear(opt.feat_id, int(opt.num_cls))\n self.fc_fg_p2_id = nn.Linear(opt.feat_id, int(opt.num_cls))\n self.fc_f0_p2_id = nn.Linear(opt.feat_id, int(opt.num_cls))\n self.fc_f1_p2_id = nn.Linear(opt.feat_id, int(opt.num_cls))\n self.fc_f2_p2_id = nn.Linear(opt.feat_id, int(opt.num_cls))\n \n self.p0_nid = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_g_conv5))\n self.p1_nid = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5))\n self.p2_nid = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5))\n \n self.avgpool_zg_p0 = nn.AvgPool2d(kernel_size=(12, 4))\n self.avgpool_zg_p1 = nn.AvgPool2d(kernel_size=(24, 8))\n self.avgpool_zp1 = nn.AvgPool2d(kernel_size=(12, 8))\n self.avgpool_zg_p2 = nn.AvgPool2d(kernel_size=(24, 8))\n self.avgpool_zp2 = nn.AvgPool2d(kernel_size=(8, 8))\n \n self.fc_zg_p0_nid_mu = nn.Linear(2048, int(opt.feat_nid))\n self.fc_zg_p0_nid_lv = nn.Linear(2048, int(opt.feat_nid))\n self.fc_zg_p1_nid_mu = nn.Linear(2048, int(opt.feat_nid))\n self.fc_zg_p1_nid_lv = nn.Linear(2048, int(opt.feat_nid))\n self.fc_z0_p1_nid_mu = nn.Linear(2048, int(opt.feat_nid))\n self.fc_z0_p1_nid_lv = nn.Linear(2048, int(opt.feat_nid))\n self.fc_z1_p1_nid_mu = nn.Linear(2048, int(opt.feat_nid))\n self.fc_z1_p1_nid_lv = nn.Linear(2048, int(opt.feat_nid))\n self.fc_zg_p2_nid_mu = nn.Linear(2048, int(opt.feat_nid))\n self.fc_zg_p2_nid_lv = nn.Linear(2048, int(opt.feat_nid))\n self.fc_z0_p2_nid_mu = nn.Linear(2048, int(opt.feat_nid))\n self.fc_z0_p2_nid_lv = nn.Linear(2048, int(opt.feat_nid))\n self.fc_z1_p2_nid_mu = nn.Linear(2048, int(opt.feat_nid))\n self.fc_z1_p2_nid_lv = nn.Linear(2048, int(opt.feat_nid))\n self.fc_z2_p2_nid_mu = nn.Linear(2048, int(opt.feat_nid))\n self.fc_z2_p2_nid_lv = nn.Linear(2048, int(opt.feat_nid))\n \n id_dict2 = self.get_modules(self.id_dict2())\n for i in range(np.size(id_dict2)):\n init_weights(id_dict2[i])\n \n def reparameterization(self, mu, lv):\n std = torch.exp(lv / 2)\n sampled_z = torch.FloatTensor(np.random.normal(0, 1, mu.size())).to(opt.device)\n return sampled_z * std + mu\n \n def id_dict1(self):\n return ['p0_id', 'p1_id', 'p2_id']\n def id_dict2(self):\n return ['reduction_zg_p0_id', 'reduction_zg_p1_id', 'reduction_zg_p2_id', \n 'reduction_z0_p1_id', 'reduction_z1_p1_id', \n 'reduction_z0_p2_id', 'reduction_z1_p2_id', 'reduction_z2_p2_id',\n 'fc_fg_p0_id', 'fc_fg_p1_id', 'fc_fg_p2_id',\n 'fc_f0_p1_id', 'fc_f1_p1_id',\n 'fc_f0_p2_id', 'fc_f1_p2_id', 'fc_f2_p2_id']\n def nid_dict1(self):\n return ['p0_nid', 'p1_nid', 'p2_nid']\n def nid_dict2(self):\n return ['fc_zg_p0_nid_mu', 'fc_zg_p0_nid_lv', \n 'fc_zg_p1_nid_mu', 'fc_zg_p1_nid_lv', 'fc_zg_p2_nid_mu', 'fc_zg_p2_nid_lv',\n 'fc_z0_p1_nid_mu', 'fc_z0_p1_nid_lv', 'fc_z1_p1_nid_mu', 'fc_z1_p1_nid_lv',\n 'fc_z0_p2_nid_mu', 'fc_z0_p2_nid_lv', 'fc_z1_p2_nid_mu', 'fc_z1_p2_nid_lv', \n 'fc_z2_p2_nid_mu', 'fc_z2_p2_nid_lv']\n \n def get_modules(self, list):\n modules = []\n for name, module in self.named_children():\n if name in list:\n modules.append(module)\n return modules\n \n def forward(self, x):\n x = self.backbone(x)\n ##################################### identity-related #########################################\n p0_id = self.p0_id(x)\n p1_id = self.p1_id(x)\n p2_id = self.p2_id(x)\n \n zg_p0_id = self.maxpool_zg_p0(p0_id)\n zg_p1_id = self.maxpool_zg_p1(p1_id)\n zp1_id = self.maxpool_zp1(p1_id)\n z0_p1_id = zp1_id[:, :, 0:1, :]\n z1_p1_id = zp1_id[:, :, 1:2, :]\n zg_p2_id = self.maxpool_zg_p2(p2_id)\n zp2_id = self.maxpool_zp2(p2_id)\n z0_p2_id = zp2_id[:, :, 0:1, :]\n z1_p2_id = zp2_id[:, :, 1:2, :]\n z2_p2_id = zp2_id[:, :, 2:3, :]\n \n fg_p0_id = self.reduction_zg_p0_id(zg_p0_id).squeeze(dim=3).squeeze(dim=2)\n fg_p1_id = self.reduction_zg_p1_id(zg_p1_id).squeeze(dim=3).squeeze(dim=2)\n f0_p1_id = self.reduction_z0_p1_id(z0_p1_id).squeeze(dim=3).squeeze(dim=2)\n f1_p1_id = self.reduction_z1_p1_id(z1_p1_id).squeeze(dim=3).squeeze(dim=2)\n fg_p2_id = self.reduction_zg_p2_id(zg_p2_id).squeeze(dim=3).squeeze(dim=2)\n f0_p2_id = self.reduction_z0_p2_id(z0_p2_id).squeeze(dim=3).squeeze(dim=2)\n f1_p2_id = self.reduction_z1_p2_id(z1_p2_id).squeeze(dim=3).squeeze(dim=2)\n f2_p2_id = self.reduction_z2_p2_id(z2_p2_id).squeeze(dim=3).squeeze(dim=2)\n \n lg_p0 = self.fc_fg_p0_id(fg_p0_id)\n lg_p1 = self.fc_fg_p1_id(fg_p1_id)\n l0_p1 = self.fc_f0_p1_id(f0_p1_id)\n l1_p1 = self.fc_f1_p1_id(f1_p1_id)\n lg_p2 = self.fc_fg_p2_id(fg_p2_id)\n l0_p2 = self.fc_f0_p2_id(f0_p2_id)\n l1_p2 = self.fc_f1_p2_id(f1_p2_id)\n l2_p2 = self.fc_f2_p2_id(f2_p2_id)\n\n ###################################### identity-unrelated ########################################\n p0_nid = self.p0_nid(x)\n p1_nid = self.p1_nid(x)\n p2_nid = self.p2_nid(x)\n\n zg_p0_nid = self.avgpool_zg_p0(p0_nid)\n zg_p1_nid = self.avgpool_zg_p1(p1_nid)\n zp1_nid = self.avgpool_zp1(p1_nid)\n z0_p1_nid = zp1_nid[:, :, 0:1, :]\n z1_p1_nid = zp1_nid[:, :, 1:2, :]\n zg_p2_nid = self.avgpool_zg_p2(p2_nid)\n zp2_nid = self.avgpool_zp2(p2_nid)\n z0_p2_nid = zp2_nid[:, :, 0:1, :]\n z1_p2_nid = zp2_nid[:, :, 1:2, :]\n z2_p2_nid = zp2_nid[:, :, 2:3, :]\n \n fc_zg_p0_nid_mu = self.fc_zg_p0_nid_mu(zg_p0_nid.squeeze(dim=3).squeeze(dim=2))\n fc_zg_p0_nid_lv = self.fc_zg_p0_nid_lv(zg_p0_nid.squeeze(dim=3).squeeze(dim=2))\n fc_zg_p1_nid_mu = self.fc_zg_p1_nid_mu(zg_p1_nid.squeeze(dim=3).squeeze(dim=2))\n fc_zg_p1_nid_lv = self.fc_zg_p1_nid_lv(zg_p1_nid.squeeze(dim=3).squeeze(dim=2))\n fc_z0_p1_nid_mu = self.fc_z0_p1_nid_mu(z0_p1_nid.squeeze(dim=3).squeeze(dim=2))\n fc_z0_p1_nid_lv = self.fc_z0_p1_nid_lv(z0_p1_nid.squeeze(dim=3).squeeze(dim=2))\n fc_z1_p1_nid_mu = self.fc_z1_p1_nid_mu(z1_p1_nid.squeeze(dim=3).squeeze(dim=2))\n fc_z1_p1_nid_lv = self.fc_z1_p1_nid_lv(z1_p1_nid.squeeze(dim=3).squeeze(dim=2))\n fc_zg_p2_nid_mu = self.fc_zg_p2_nid_mu(zg_p2_nid.squeeze(dim=3).squeeze(dim=2))\n fc_zg_p2_nid_lv = self.fc_zg_p2_nid_lv(zg_p2_nid.squeeze(dim=3).squeeze(dim=2))\n fc_z0_p2_nid_mu = self.fc_z0_p2_nid_mu(z0_p2_nid.squeeze(dim=3).squeeze(dim=2))\n fc_z0_p2_nid_lv = self.fc_z0_p2_nid_lv(z0_p2_nid.squeeze(dim=3).squeeze(dim=2))\n fc_z1_p2_nid_mu = self.fc_z1_p2_nid_mu(z1_p2_nid.squeeze(dim=3).squeeze(dim=2))\n fc_z1_p2_nid_lv = self.fc_z1_p2_nid_lv(z1_p2_nid.squeeze(dim=3).squeeze(dim=2))\n fc_z2_p2_nid_mu = self.fc_z2_p2_nid_mu(z2_p2_nid.squeeze(dim=3).squeeze(dim=2))\n fc_z2_p2_nid_lv = self.fc_z2_p2_nid_lv(z2_p2_nid.squeeze(dim=3).squeeze(dim=2))\n \n fc_zg_p0_nid = self.reparameterization(fc_zg_p0_nid_mu, fc_zg_p0_nid_lv)\n fc_zg_p1_nid = self.reparameterization(fc_zg_p1_nid_mu, fc_zg_p1_nid_lv)\n fc_z0_p1_nid = self.reparameterization(fc_z0_p1_nid_mu, fc_z0_p1_nid_lv)\n fc_z1_p1_nid = self.reparameterization(fc_z1_p1_nid_mu, fc_z1_p1_nid_lv)\n fc_zg_p2_nid = self.reparameterization(fc_zg_p2_nid_mu, fc_zg_p2_nid_lv)\n fc_z0_p2_nid = self.reparameterization(fc_z0_p2_nid_mu, fc_z0_p2_nid_lv)\n fc_z1_p2_nid = self.reparameterization(fc_z1_p2_nid_mu, fc_z1_p2_nid_lv)\n fc_z2_p2_nid = self.reparameterization(fc_z2_p2_nid_mu, fc_z2_p2_nid_lv)\n \n list_mu = [fc_zg_p0_nid_mu, fc_zg_p1_nid_mu, fc_z0_p1_nid_mu, fc_z1_p1_nid_mu, \n fc_zg_p2_nid_mu, fc_z0_p2_nid_mu, fc_z1_p2_nid_mu, fc_z2_p2_nid_mu]\n list_lv = [fc_zg_p0_nid_lv, fc_zg_p1_nid_lv, fc_z0_p1_nid_lv, fc_z1_p1_nid_lv, \n fc_zg_p2_nid_lv, fc_z0_p2_nid_lv, fc_z1_p2_nid_lv, fc_z2_p2_nid_lv]\n \n id = torch.cat(\n [fg_p0_id, fg_p1_id, f0_p1_id, f1_p1_id, fg_p2_id, f0_p2_id, f1_p2_id, f2_p2_id], dim=1)\n nid = torch.cat(\n [fc_zg_p0_nid, fc_zg_p1_nid, fc_z0_p1_nid, fc_z1_p1_nid, \n fc_zg_p2_nid, fc_z0_p2_nid, fc_z1_p2_nid, fc_z2_p2_nid], dim=1)\n \n return id, lg_p0, lg_p1, l0_p1, l1_p1, lg_p2, l0_p2, l1_p2, l2_p2, list_mu, list_lv, nid\n \nclass Generator(nn.Module):\n def __init__(self, output_dim=3):\n super(Generator, self).__init__()\n \n self.G_fc = nn.Sequential(\n nn.Linear(opt.feat_id*8 + opt.feat_nid*8 + opt.feat_niz + opt.num_cls, opt.feat_G*8),\n nn.BatchNorm1d(opt.feat_G*8),\n nn.LeakyReLU(0.2, True),\n nn.Dropout(opt.dropout))\n \n self.G_deconv = nn.Sequential(\n # 1st block\n nn.ConvTranspose2d(opt.feat_G*8, opt.feat_G*8, kernel_size=(6,2),bias=False),\n nn.BatchNorm2d(opt.feat_G*8),\n nn.LeakyReLU(0.2, True),\n nn.Dropout(opt.dropout),\n # 2nd block\n nn.ConvTranspose2d(opt.feat_G*8, opt.feat_G*8, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(opt.feat_G*8),\n nn.LeakyReLU(0.2, True),\n nn.Dropout(opt.dropout),\n # 3rd block\n nn.ConvTranspose2d(opt.feat_G*8, opt.feat_G*8, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(opt.feat_G*8),\n nn.LeakyReLU(0.2, True),\n nn.Dropout(opt.dropout),\n # 4th block\n nn.ConvTranspose2d(opt.feat_G*8, opt.feat_G*4, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(opt.feat_G*4),\n nn.LeakyReLU(0.2, True),\n nn.Dropout(opt.dropout),\n # 5th block\n nn.ConvTranspose2d(opt.feat_G*4, opt.feat_G*2, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(opt.feat_G*2),\n nn.LeakyReLU(0.2, True),\n nn.Dropout(opt.dropout),\n # 6th block\n nn.ConvTranspose2d(opt.feat_G*2, opt.feat_G*1, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(opt.feat_G*1),\n nn.LeakyReLU(0.2, True),\n # 7th block\n nn.ConvTranspose2d(opt.feat_G*1, 3, kernel_size=4, stride=2, padding=1, bias=False),\n nn.Tanh())\n init_weights(self.G_fc)\n init_weights(self.G_deconv)\n\n def forward(self, inputs, labels):\n x = torch.cat([inputs, labels], 1)\n x = self.G_fc(x).view(-1, opt.feat_G*8, 1, 1)\n x = self.G_deconv(x)\n return x\n \nclass Discriminator(nn.Module):\n def __init__(self, norm_layer=nn.InstanceNorm2d):\n super(Discriminator, self).__init__()\n \n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n \n n_layers = 5\n kw = 4\n padw = 1\n backbone = [\n nn.Tanh(),\n nn.Conv2d(3, opt.feat_D, kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True)]\n\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers):\n nf_mult_prev = nf_mult\n nf_mult = min(2**n, 8)\n backbone += [\n nn.Conv2d(\n opt.feat_D*nf_mult_prev, opt.feat_D*nf_mult, kernel_size=kw, \n stride=2, padding=padw, bias=use_bias),\n norm_layer(opt.feat_D * nf_mult),\n nn.LeakyReLU(0.2, True),]\n nf_mult_prev = nf_mult\n nf_mult = min(2**n_layers, 8)\n\n image_D = [\n nn.Conv2d(\n opt.feat_D*nf_mult_prev, opt.feat_D*nf_mult, kernel_size=kw, \n stride=1, padding=padw, bias=use_bias),\n norm_layer(opt.feat_D*nf_mult),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(opt.feat_D*nf_mult, 1, kernel_size=kw, stride=1, padding=padw),\n nn.Sigmoid()]\n \n label_D1 = [\n nn.Conv2d(\n opt.feat_D*nf_mult_prev, opt.feat_D*nf_mult, kernel_size=kw, \n stride=1, padding=padw, bias=use_bias),\n norm_layer(opt.feat_D*nf_mult),]\n self.avgp = nn.AvgPool2d(kernel_size=(11, 3))\n label_D2 = [nn.Linear(opt.feat_D*nf_mult, int(opt.num_cls))]\n \n self.backbone = nn.Sequential(*backbone)\n self.image_D = nn.Sequential(*image_D)\n self.label_D1 = nn.Sequential(*label_D1)\n self.label_D2 = nn.Sequential(*label_D2)\n\n def forward(self, input):\n backbone = self.backbone(input)\n image_D = self.image_D(backbone)\n label_D1 = self.label_D1(backbone)\n avgp = self.avgp(label_D1).squeeze(dim=3).squeeze(dim=2)\n label_D2 = self.label_D2(avgp)\n return image_D, label_D2"
},
{
"alpha_fraction": 0.5534263253211975,
"alphanum_fraction": 0.5675730109214783,
"avg_line_length": 36.19029998779297,
"blob_id": "e2b13563f82d76295e0747ed45cde63e3a5037ce",
"content_id": "afe1b05316fdd02671452edb4dc2a1526186b9ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9967,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 268,
"path": "/data.py",
"repo_name": "Pantheona/ourModel11-10",
"src_encoding": "UTF-8",
"text": "from torchvision import transforms\nfrom torch.utils.data import dataset, dataloader\nfrom torchvision.datasets.folder import default_loader\nfrom utils.RandomErasing import RandomErasing\nfrom utils.RandomSampler import RandomSampler\nfrom utils.Sampler import Sampler\nfrom opt import opt\nimport os\nimport re\nimport random\nfrom PIL import Image\n\nclass Data():\n def __init__(self, dataset=\"prcc\", test=None):\n rgb_transform = transforms.Compose([\n transforms.Resize((384, 128), interpolation=3)\n ])\n gray_transform = transforms.Compose([\n transforms.Resize((384, 128), interpolation=3),\n transforms.Grayscale(3)\n ])\n test_transform = transforms.Compose([\n transforms.Resize((384, 128), interpolation=3),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n process_transform = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n RandomErasing(probability=0.5, mean=[0.0, 0.0, 0.0])\n ])\n woEr_process_transform = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n\n if dataset == \"prcc\":\n self.trainset = prcc(data_path=opt.data_path, dtype=\"train\",\n rgb_trans=rgb_transform, gray_trans=gray_transform,\n process_trans=process_transform)\n self.trainset_woEr = prcc(data_path=opt.data_path, dtype=\"train\",\n rgb_trans=rgb_transform, gray_trans=gray_transform,\n process_trans=woEr_process_transform)\n if test is None:\n self.testset = prcc(data_path=opt.data_path, dtype=\"test\", test_trans=test_transform)\n else:\n self.testset = prcc(data_path=opt.data_path + '/' + str(test),\n dtype=\"test\", test_trans=test_transform)\n self.queryset = prcc(data_path=opt.data_path, dtype=\"query\", test_trans=test_transform)\n\n elif dataset == \"ltcc\":\n self.trainset = ltcc(data_path=opt.data_path, dtype=\"train\",\n rgb_trans=rgb_transform, gray_trans=gray_transform,\n process_trans=process_transform)\n self.trainset_woEr = ltcc(data_path=opt.data_path, dtype=\"train\",\n rgb_trans=rgb_transform, gray_trans=gray_transform,\n process_trans=woEr_process_transform)\n self.testset = ltcc(data_path=opt.data_path, dtype=\"test\", test_trans=test_transform)\n self.queryset = ltcc(data_path=opt.data_path, dtype=\"query\", test_trans=test_transform)\n\n self.train_loader = dataloader.DataLoader(\n self.trainset, \n sampler=RandomSampler(self.trainset, batch_id=opt.batchid,batch_image=opt.batchimage),\n batch_size=opt.batchid * opt.batchimage, num_workers=0, pin_memory=True)\n self.train_loader_woEr = dataloader.DataLoader(\n self.trainset_woEr,\n sampler=RandomSampler(self.trainset_woEr, batch_id=opt.batchid, batch_image=opt.batchimage),\n batch_size=opt.batchid * opt.batchimage, num_workers=0, pin_memory=True)\n\n self.test_loader = dataloader.DataLoader(\n self.testset, batch_size=opt.batchtest, num_workers=0, pin_memory=True)\n self.query_loader = dataloader.DataLoader(\n self.queryset, batch_size=opt.batchtest, num_workers=0, pin_memory=True)\n\nclass prcc(dataset.Dataset):\n def __init__(self, data_path, dtype, rgb_trans = None, gray_trans = None, process_trans = None, test_trans = None):\n\n self.rgb_transform = rgb_trans\n self.gray_transform = gray_trans\n self.process_transform = process_trans\n self.test_transform = test_trans\n self.loader = default_loader\n self.data_path = data_path\n self.dtype = dtype\n\n if self.dtype == 'train':\n self.data_path += '/bounding_box_train'\n elif self.dtype == 'test':\n self.data_path += '/bounding_box_test'\n else:\n self.data_path += '/query'\n\n self.imgs = [path for path in self.list_pictures(self.data_path) if self.id(path) != -1]\n\n self._id2label = {_id: idx for idx, _id in enumerate(self.unique_ids)}\n self._path2cloth = {path: self.cloth(path) for path in self.imgs}\n\n def __getitem__(self, index):\n path = self.imgs[index]\n labels = self._id2label[self.id(path)]\n cloth = self._path2cloth[path]\n img = self.loader(path)\n if self.dtype == \"train\":\n rgb = self.rgb_transform(img)\n rgb = self.process_transform(rgb)\n return rgb, cloth, labels\n else:\n if self.test_transform is not None:\n rgb = self.test_transform(img)\n return rgb, labels\n\n\n def __len__(self):\n return len(self.imgs)\n\n @staticmethod\n def id(file_path):\n \"\"\"\n :param file_path: unix style file path\n :return: person id\n \"\"\"\n return int(file_path.split('/')[-1].split('_')[0])\n\n def cloth(self, file_path):\n c = int(file_path.split(\"/\")[-1].split('_')[1][1])\n if c == 0 or c == 1:\n return self._id2label[self.id(file_path)] * 2\n elif c == 2:\n return self._id2label[self.id(file_path)] * 2 + 1\n\n @staticmethod\n def camera(file_path):\n \"\"\"\n :param file_path: unix style file path\n :return: camera id\n \"\"\"\n return int(file_path.split('/')[-1].split('_')[1][1])\n\n @property\n def ids(self):\n \"\"\"\n :return: person id list corresponding to dataset image paths\n \"\"\"\n return [self.id(path) for path in self.imgs]\n\n @property\n def unique_ids(self):\n \"\"\"\n :return: unique person ids in ascending order\n \"\"\"\n return sorted(set(self.ids))\n\n @property\n def clothes(self):\n return None\n\n @property\n def cameras(self):\n \"\"\"\n :return: camera id list corresponding to dataset image paths\n \"\"\"\n return [self.camera(path) for path in self.imgs]\n\n @staticmethod\n def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm|npy'):\n assert os.path.isdir(directory), 'dataset is not exists!{}'.format(directory)\n\n return sorted([os.path.join(root, f)\n for root, _, files in os.walk(directory) for f in files\n if re.match(r'([\\w]+\\.(?:' + ext + '))', f)])\n\nclass ltcc(dataset.Dataset):\n def __init__(self, data_path, dtype, rgb_trans=None, gray_trans=None, process_trans=None, test_trans=None):\n\n self.rgb_transform = rgb_trans\n self.gray_transform = gray_trans\n self.process_transform = process_trans\n self.test_transform = test_trans\n self.loader = default_loader\n self.data_path = data_path\n self.dtype = dtype\n\n if self.dtype == 'train':\n self.data_path += '/bounding_box_train'\n elif self.dtype == 'test':\n self.data_path += '/bounding_box_test'\n else:\n self.data_path += '/query'\n\n self.imgs = [path for path in self.list_pictures(self.data_path) if self.id(path) != -1]\n\n self._id2label = {_id: idx for idx, _id in enumerate(self.unique_ids)}\n self._path2cloth = {path: self.cloth(path) for path in self.imgs}\n\n\n def __getitem__(self, index):\n path = self.imgs[index]\n labels = self._id2label[self.id(path)]\n cloth = self._path2cloth[path]\n img = self.loader(path)\n if self.dtype == \"train\":\n # if self.rgb_transform is not None:\n rgb = self.rgb_transform(img)\n rgb = self.process_transform(rgb)\n return rgb, cloth, labels\n else:\n if self.test_transform is not None:\n rgb = self.test_transform(img)\n return rgb, labels\n\n\n def __len__(self):\n return len(self.imgs)\n\n @staticmethod\n def cloth(file_path):\n return int(file_path.split('/')[-1].split('_')[1].split('s')[-1])\n\n @staticmethod\n def id(file_path):\n \"\"\"\n :param file_path: unix style file path\n :return: person id\n \"\"\"\n return int(file_path.split('/')[-1].split('_')[0])\n\n @staticmethod\n def camera(file_path):\n \"\"\"\n :param file_path: unix style file path\n :return: camera id\n \"\"\"\n return int(file_path.split('/')[-1].split('_')[1].split('s')[0][1:])\n\n @property\n def ids(self):\n \"\"\"\n :return: person id list corresponding to dataset image paths\n \"\"\"\n return [self.id(path) for path in self.imgs]\n\n @property\n def unique_ids(self):\n \"\"\"\n :return: unique person ids in ascending order\n \"\"\"\n return sorted(set(self.ids))\n\n @property\n def clothes(self):\n return [self.cloth(path) for path in self.imgs]\n\n @property\n def cameras(self):\n \"\"\"\n :return: camera id list corresponding to dataset image paths\n \"\"\"\n return [self.camera(path) for path in self.imgs]\n\n @staticmethod\n def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm|npy'):\n assert os.path.isdir(directory), 'dataset is not exists!{}'.format(directory)\n\n return sorted([os.path.join(root, f)\n for root, _, files in os.walk(directory) for f in files\n if re.match(r'([\\w]+\\.(?:' + ext + '))', f)])\n"
},
{
"alpha_fraction": 0.5506144165992737,
"alphanum_fraction": 0.6067876219749451,
"avg_line_length": 41.75,
"blob_id": "fff1e3ee60b5db66983505001e2eba6b6a48f42b",
"content_id": "4a1bf071c2f39ab2adfc45b574ef427786cdc936",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1709,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 40,
"path": "/utils/tensor2img.py",
"repo_name": "Pantheona/ourModel11-10",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport torch\n\ndef tensor2im(image_tensor, imtype=np.uint8, normalize=True):\n if isinstance(image_tensor, list):\n image_numpy = []\n for i in range(len(image_tensor)):\n image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))\n return image_numpy\n image_numpy = image_tensor.cpu().float().detach().numpy()\n if normalize:\n image_numpy[0] = (image_numpy[0] * 0.229 + 0.485) * 255\n image_numpy[1] = (image_numpy[1] * 0.224 + 0.456) * 255\n image_numpy[2] = (image_numpy[2] * 0.225 + 0.406) * 255\n image_numpy = np.transpose(image_numpy, (1, 2, 0))\n else:\n image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0\n image_numpy = np.clip(image_numpy, 0, 255)\n if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3:\n image_numpy = image_numpy[:,:,0]\n return image_numpy.astype(imtype)\n\n\ndef tensor2im2(image_tensor, imtype=np.uint8, normalize=True):\n if isinstance(image_tensor, list):\n image_numpy = []\n for i in range(len(image_tensor)):\n image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))\n return image_numpy\n image_numpy = image_tensor.cpu().float().detach().numpy()\n if normalize:\n image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0\n else:\n image_numpy = image_numpy * 255.0\n image_numpy = np.clip(image_numpy, 0, 255)\n if image_numpy.shape[0] == 23:\n image_numpy = np.concatenate([image_numpy[3:6, :, :], image_numpy[0:3, :, :]], axis=1)\n if image_numpy.shape[0] == 1 or image_numpy.shape[0] > 3:\n image_numpy = image_numpy[0, :,:]\n return image_numpy.astype(imtype)"
},
{
"alpha_fraction": 0.5280911326408386,
"alphanum_fraction": 0.539372980594635,
"avg_line_length": 43.319095611572266,
"blob_id": "f5ecfc570796f335d15960a99f5dbab7e4d40989",
"content_id": "6b91e39d0a506657d8baefab1d46fef98220bb17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17639,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 398,
"path": "/loss.py",
"repo_name": "Pantheona/ourModel11-10",
"src_encoding": "UTF-8",
"text": "from torch.nn import CrossEntropyLoss, BCELoss, L1Loss, Tanh\nfrom torch.nn.modules import loss\nfrom utils.get_optimizer import get_optimizer\nfrom utils.TripletLoss import TripletLoss\nimport torch\nfrom torch.distributions import normal\nimport numpy as np\nimport copy\nfrom utils.tensor2img import tensor2im\nfrom opt import opt\n\nfrom torchvision import transforms\nfrom PIL import Image\n\nclass Loss(loss._Loss):\n def __init__(self, model):\n super(Loss, self).__init__()\n self.batch_size = opt.batchid * opt.batchimage\n self.num_gran = 8\n self.tanh = Tanh()\n self.l1_loss = L1Loss()\n self.bce_loss = BCELoss()\n self.cross_entropy_loss = CrossEntropyLoss()\n\n self.model = model\n self.optimizer, self.optimizer_D, self.optimizer_DC = get_optimizer(model)\n\n def get_positive_pairs(self):\n idx = []\n for i in range(self.batch_size):\n r = i\n while r == i:\n r = int(torch.randint(\n low=opt.batchimage * (i // opt.batchimage), high=opt.batchimage * (i // opt.batchimage + 1),\n size=(1,)).item())\n idx.append(r)\n return idx\n\n def region_wise_shuffle(self, id, ps_idx):\n sep_id = id.clone()\n idx = torch.tensor([0] * (self.num_gran))\n while (torch.sum(idx) == 0) and (torch.sum(idx) == self.num_gran):\n idx = torch.randint(high=2, size=(self.num_gran,))\n\n for i in range(self.num_gran):\n if idx[i]:\n sep_id[:, opt.feat_id * i:opt.feat_id * (i + 1)] = id[ps_idx][:, opt.feat_id * i:opt.feat_id * (i + 1)]\n return sep_id\n\n def get_noise(self):\n return torch.randn(self.batch_size, opt.feat_niz, device=opt.device)\n\n def make_onehot(self, label):\n onehot_vec = torch.zeros(self.batch_size, opt.num_cls)\n for i in range(label.size()[0]):\n onehot_vec[i, label[i]] = 1\n return onehot_vec\n\n def set_parameter(self, m, train=True):\n if train:\n for param in m.parameters():\n param.requires_grad = True\n m.apply(self.set_bn_to_train)\n else:\n for param in m.parameters():\n param.requires_grad = False\n m.apply(self.set_bn_to_eval)\n\n def set_bn_to_eval(self, m):\n classname = m.__class__.__name__\n if classname.find('BatchNorm2d') != -1:\n m.eval()\n\n def set_bn_to_train(self, m):\n classname = m.__class__.__name__\n if classname.find('BatchNorm2d') != -1:\n m.train()\n\n def set_model(self, batch=None):\n self.model.C.zero_grad()\n self.model.G.zero_grad()\n self.model.D.zero_grad()\n\n if opt.stage == 0:\n self.set_parameter(self.model.C, train=True)\n self.set_parameter(self.model.G, train=False)\n self.set_parameter(self.model.D, train=False)\n self.set_parameter(self.model.DC, train=False)\n\n elif opt.stage == 1:\n self.set_parameter(self.model.C, train=False)\n cloth_dict1 = self.model.C.get_modules(self.model.C.cloth_dict1())\n cloth_dict2 = self.model.C.get_modules(self.model.C.cloth_dict2())\n for i in range(np.shape(cloth_dict1)[0]):\n self.set_parameter(cloth_dict1[i], train=True)\n for i in range(np.shape(cloth_dict2)[0]):\n self.set_parameter(cloth_dict2[i], train=True)\n self.set_parameter(self.model.G, train=False)\n self.set_parameter(self.model.D, train=False)\n self.set_parameter(self.model.DC, train=False)\n\n elif opt.stage == 2:\n self.set_parameter(self.model.C, train=False)\n nid_dict1 = self.model.C.get_modules(self.model.C.nid_dict1())\n nid_dict2 = self.model.C.get_modules(self.model.C.nid_dict2())\n # cloth_dict1 = self.model.C.get_modules(self.model.C.cloth_dict1())\n # cloth_dict2 = self.model.C.get_modules(self.model.C.cloth_dict2())\n for i in range(np.shape(nid_dict1)[0]):\n self.set_parameter(nid_dict1[i], train=True)\n for i in range(np.shape(nid_dict2)[0]):\n self.set_parameter(nid_dict2[i], train=True)\n # for i in range(np.shape(cloth_dict1)[0]):\n # self.set_parameter(cloth_dict1[i], train=True)\n # for i in range(np.shape(cloth_dict2)[0]):\n # self.set_parameter(cloth_dict2[i], train=True)\n self.set_parameter(self.model.G, train=True)\n self.set_parameter(self.model.D, train=True)\n self.set_parameter(self.model.DC, train=True)\n\n\n\n def id_related_loss(self, labels, outputs):\n CrossEntropy_Loss = [self.cross_entropy_loss(output, labels) for output in outputs[1:1 + self.num_gran]]\n return sum(CrossEntropy_Loss) / len(CrossEntropy_Loss)\n\n def cloth_related_loss(self, labels, outputs):\n CrossEntropy_Loss = [self.cross_entropy_loss(output, labels) for output in outputs[-6]]\n return sum(CrossEntropy_Loss) / len(CrossEntropy_Loss)\n\n def rgb_gray_distance(self, rgb_outputs, gray_outputs):\n return torch.sum(torch.pairwise_distance(rgb_outputs[0], gray_outputs[0]))\n\n def KL_loss(self, outputs):\n list_mu = outputs[-3]\n list_lv = outputs[-2]\n loss_KL = 0.\n for i in range(np.size(list_mu)):\n loss_KL += torch.sum(0.5 * (list_mu[i] ** 2 + torch.exp(list_lv[i]) - list_lv[i] - 1))\n return loss_KL / np.size(list_mu)\n\n\n\n def GAN_loss(self, inputs, outputs, labels, cloth_labels, epoch):\n id = outputs[0]\n nid = outputs[-1]\n cloth = outputs[-4]\n one_hot_labels = self.make_onehot(labels).to(opt.device)\n\n # Auto Encoder\n auto_G_in = torch.cat((id, cloth, nid, self.get_noise()), dim=1)\n auto_G_out = self.model.G.forward(auto_G_in, one_hot_labels)\n\n # Positive Shuffle\n ps_idx = self.get_positive_pairs()\n ps_G_in = torch.cat((id[ps_idx], cloth, nid, self.get_noise()), dim=1)\n ps_G_out = self.model.G.forward(ps_G_in, one_hot_labels)\n\n s_ps_G_in = torch.cat((id, cloth[ps_idx], nid, self.get_noise()), dim=1)\n s_ps_G_out = self.model.G.forward(s_ps_G_in, one_hot_labels)\n\n s2_ps_G_in = torch.cat((id[ps_idx], cloth[ps_idx], nid, self.get_noise()), dim=1)\n s2_ps_G_out = self.model.G.forward(s2_ps_G_in, one_hot_labels)\n\n # Negative Shuffle\n neg_idx = ps_idx[::-1]\n neg_G_in = torch.cat((id, cloth[neg_idx], nid[neg_idx], self.get_noise()), dim=1)\n neg_G_out = self.model.G.forward(neg_G_in, one_hot_labels)\n\n ############################################## D_loss ############################################\n D_real = self.model.D(inputs)\n REAL_LABEL = torch.FloatTensor(D_real.size()).uniform_(0.7, 1.0).to(opt.device)\n D_real_loss = self.bce_loss(D_real, REAL_LABEL)\n\n auto_D_fake = self.model.D(auto_G_out.detach())\n FAKE_LABEL = torch.FloatTensor(auto_D_fake.size()).uniform_(0.0, 0.3).to(opt.device)\n auto_D_fake_loss = self.bce_loss(auto_D_fake, FAKE_LABEL)\n\n ps_D_fake = self.model.D(ps_G_out.detach())\n FAKE_LABEL = torch.FloatTensor(ps_D_fake.size()).uniform_(0.0, 0.3).to(opt.device)\n ps_D_fake_loss = self.bce_loss(ps_D_fake, FAKE_LABEL)\n\n s_ps_D_fake = self.model.D(s_ps_G_out.detach())\n FAKE_LABEL = torch.FloatTensor(s_ps_D_fake.size()).uniform_(0.0, 0.3).to(opt.device)\n s_ps_D_fake_loss = self.bce_loss(s_ps_D_fake, FAKE_LABEL)\n\n s2_ps_D_fake = self.model.D(s2_ps_G_out.detach())\n FAKE_LABEL = torch.FloatTensor(s2_ps_D_fake.size()).uniform_(0.0, 0.3).to(opt.device)\n s2_ps_D_fake_loss = self.bce_loss(s2_ps_D_fake, FAKE_LABEL)\n\n neg_D_fake = self.model.D(neg_G_out.detach())\n FAKE_LABEL = torch.FloatTensor(neg_D_fake.size()).uniform_(0.0, 0.3).to(opt.device)\n neg_D_fake_loss = self.bce_loss(neg_D_fake, FAKE_LABEL)\n\n D_loss = (D_real_loss + auto_D_fake_loss + ps_D_fake_loss + s_ps_D_fake_loss + s2_ps_D_fake_loss + neg_D_fake_loss)\n D_loss.backward()\n self.optimizer_D.step()\n\n ############################################## G_loss ##############################################\n auto_D_fake = self.model.D(auto_G_out)\n auto_I_fake, auto_C_fake = self.model.DC(auto_G_out)\n REAL_LABEL = torch.ones_like(auto_D_fake)\n auto_D_fake_loss = self.bce_loss(auto_D_fake, REAL_LABEL)\n auto_I_fake_loss = self.cross_entropy_loss(auto_I_fake, labels)\n auto_C_fake_loss = self.cross_entropy_loss(auto_C_fake, cloth_labels)\n # auto_cls_loss = self.cross_entropy_loss(auto_G_cls, labels)\n\n ps_D_fake = self.model.D(ps_G_out)\n ps_I_fake, ps_C_fake = self.model.DC(ps_G_out)\n REAL_LABEL = torch.ones_like(ps_D_fake)\n ps_D_fake_loss = self.bce_loss(ps_D_fake, REAL_LABEL)\n ps_I_fake_loss = self.cross_entropy_loss(ps_I_fake, labels)\n ps_C_fake_loss = self.cross_entropy_loss(ps_C_fake, cloth_labels)\n # ps_cls_loss = self.cross_entropy_loss(ps_G_cls, labels)\n\n s_ps_D_fake = self.model.D(s_ps_G_out)\n s_ps_I_fake, s_ps_C_fake = self.model.DC(s_ps_G_out)\n REAL_LABEL = torch.ones_like(s_ps_D_fake)\n s_ps_D_fake_loss = self.bce_loss(s_ps_D_fake, REAL_LABEL)\n s_ps_I_fake_loss = self.cross_entropy_loss(s_ps_I_fake, labels)\n s_ps_C_fake_loss = self.cross_entropy_loss(s_ps_C_fake, cloth_labels[ps_idx])\n\n s2_ps_D_fake = self.model.D(s2_ps_G_out)\n s2_ps_I_fake, s2_ps_C_fake = self.model.DC(s2_ps_G_out)\n REAL_LABEL = torch.ones_like(s2_ps_D_fake)\n s2_ps_D_fake_loss = self.bce_loss(s2_ps_D_fake, REAL_LABEL)\n s2_ps_I_fake_loss = self.cross_entropy_loss(s2_ps_I_fake, labels)\n s2_ps_C_fake_loss = self.cross_entropy_loss(s2_ps_C_fake, cloth_labels[ps_idx])\n\n neg_D_fake = self.model.D(neg_G_out)\n neg_I_fake, neg_C_fake = self.model.DC(neg_G_out)\n REAL_LABEL = torch.ones_like(neg_D_fake)\n neg_D_fake_loss = self.bce_loss(neg_D_fake, REAL_LABEL)\n neg_I_fake_loss = self.cross_entropy_loss(neg_I_fake, labels)\n neg_C_fake_loss = self.cross_entropy_loss(neg_C_fake, cloth_labels[neg_idx])\n\n auto_imgr_loss = self.l1_loss(auto_G_out, self.tanh(inputs))\n ps_imgr_loss = self.l1_loss(ps_G_out, self.tanh(inputs))\n s_ps_imgr_loss = self.l1_loss(s_ps_G_out, s2_ps_G_out)\n\n '''\n img = tensor2im(auto_G_out[0])\n img = Image.fromarray(img)\n img.save('test_image' + '/aaa' + '.jpg')\n img = tensor2im(inputs[0])\n img = Image.fromarray(img)\n img.save('test_image' + '/anchor' + '.jpg')\n '''\n\n if epoch > 50:\n G_loss = (auto_D_fake_loss + ps_D_fake_loss + s_ps_D_fake_loss + s2_ps_D_fake_loss + neg_D_fake_loss) + \\\n (auto_C_fake_loss + ps_C_fake_loss + s_ps_C_fake_loss + s2_ps_C_fake_loss + neg_C_fake_loss) * 2 + \\\n (auto_I_fake_loss + ps_I_fake_loss + s_ps_I_fake_loss + s2_ps_I_fake_loss + neg_I_fake_loss) * 2 + \\\n (auto_imgr_loss + ps_imgr_loss + s_ps_imgr_loss) * 10\n else:\n G_loss = (auto_D_fake_loss + ps_D_fake_loss + s_ps_D_fake_loss + s2_ps_D_fake_loss + neg_D_fake_loss) + \\\n (auto_imgr_loss + ps_imgr_loss + s_ps_imgr_loss) * 10\n\n ############################################################################################\n return D_loss, G_loss\n\n def forward(self, rgb, labels, cloth_labels, batch, epoch):\n self.set_model(batch)\n rgb_outputs = self.model.C(rgb)\n\n if opt.stage == 0:\n Rgb_CE = self.id_related_loss(labels, rgb_outputs)\n IDcnt = 0\n IDtotal = opt.batchid * opt.batchimage * self.num_gran\n for classifyprobabilities in rgb_outputs[1:1 + self.num_gran]:\n for i in range(opt.batchid * opt.batchimage):\n class_ = torch.argmax(classifyprobabilities[i])\n label = labels[i]\n if class_ == label:\n IDcnt += 1\n\n loss_sum = Rgb_CE\n\n print('\\rRgb_CE:%.2f' % (\n Rgb_CE.data.cpu().numpy()\n ), end=' ')\n return loss_sum, [Rgb_CE.data.cpu().numpy()], [[IDcnt, IDtotal], [1, 1]]\n\n elif opt.stage == 1:\n # D_outputs_id, D_outputs_cloth = self.model.DC(rgb)\n\n Cloth_CE = self.cloth_related_loss(cloth_labels, rgb_outputs)\n Clothcnt = 0\n Clothtotal = opt.batchid * opt.batchimage * 3\n for classifyprobabilities in rgb_outputs[-6]:\n for i in range(opt.batchid * opt.batchimage):\n class_ = torch.argmax(classifyprobabilities[i])\n cloth_label = cloth_labels[i]\n if class_ == cloth_label:\n Clothcnt += 1\n\n # D_I = self.cross_entropy_loss(D_outputs_id, labels)\n # D_C = self.cross_entropy_loss(D_outputs_cloth, cloth_labels)\n # DC_loss = D_I + D_C\n # DC_loss.backward()\n # self.optimizer_DC.step()\n loss_sum = Cloth_CE\n\n print('\\rCloth_CE:%.2f' % (\n Cloth_CE.data.cpu().numpy()\n ), end=' ')\n return loss_sum, \\\n [Cloth_CE.data.cpu().numpy()], \\\n [[1, 1], [Clothcnt, Clothtotal]]\n\n elif opt.stage == 2:\n D_outputs_id, D_outputs_cloth = self.model.DC(rgb)\n D_I = self.cross_entropy_loss(D_outputs_id, labels)\n D_C = self.cross_entropy_loss(D_outputs_cloth, cloth_labels)\n DC_loss = D_I + D_C\n DC_loss.backward()\n self.optimizer_DC.step()\n\n D_loss, G_loss = self.GAN_loss(rgb, rgb_outputs, labels, cloth_labels, epoch)\n KL_loss = self.KL_loss(rgb_outputs)\n\n loss_sum = G_loss + KL_loss / 500\n\n print('\\rD_loss:%.2f G_loss:%.2f KL:%.2f D_I:%.2f D_C:%.2f' % (\n D_loss.data.cpu().numpy(),\n G_loss.data.cpu().numpy(),\n KL_loss.data.cpu().numpy(),\n D_I.data.cpu().numpy(),\n D_C.data.cpu().numpy()), end=' ')\n return loss_sum, \\\n [D_loss.data.cpu().numpy(), G_loss.data.cpu().numpy(), KL_loss.data.cpu().numpy(), D_I.data.cpu().numpy(), D_C.data.cpu().numpy()], \\\n [[1, 1], [1, 1]]\n\n elif opt.stage == 3:\n\n D_outputs_id, D_outputs_cloth = self.model.DC(rgb)\n D_I = self.cross_entropy_loss(D_outputs_id, labels)\n D_C = self.cross_entropy_loss(D_outputs_cloth, cloth_labels)\n DC_loss = D_I + D_C\n DC_loss.backward()\n self.optimizer_DC.step()\n\n Rgb_CE = self.id_related_loss(labels, rgb_outputs)\n IDcnt = 0\n IDtotal = opt.batchid * opt.batchimage * self.num_gran\n for classifyprobabilities in rgb_outputs[1:1 + self.num_gran]:\n for i in range(opt.batchid * opt.batchimage):\n class_ = torch.argmax(classifyprobabilities[i])\n label = labels[i]\n if class_ == label:\n IDcnt += 1\n\n cloth_loss = self.cloth_related_loss(cloth_labels, rgb_outputs)\n Clothcnt = 0\n Clothtotal = opt.batchid * opt.batchimage * 3\n for classifyprobabilities in rgb_outputs[-6]:\n for i in range(opt.batchid * opt.batchimage):\n class_ = torch.argmax(classifyprobabilities[i])\n cloth_label = cloth_labels[i]\n if class_ == cloth_label:\n Clothcnt += 1\n\n D_loss, G_loss = self.GAN_loss(rgb, rgb_outputs, labels, cloth_labels, epoch)\n KL_loss = self.KL_loss(rgb_outputs)\n\n loss_sum = (Rgb_CE) * 20 + cloth_loss * 10 + G_loss / 2 + KL_loss / 100\n\n print('\\rRgb_CE:%.2f Cloth:%.2f D_loss:%.2f G_loss:%.2f D_I:%.2f D_C:%.2f '\n ' KL:%.2f' % (\n Rgb_CE.data.cpu().numpy(),\n cloth_loss.data.cpu().numpy(),\n D_loss.data.cpu().numpy(),\n G_loss.data.cpu().numpy(),\n D_I.data.cpu().numpy(),\n D_C.data.cpu().numpy(),\n KL_loss.data.cpu().numpy()), end=' ')\n return loss_sum, \\\n [Rgb_CE.data.cpu().numpy(), cloth_loss.data.cpu().numpy(), D_loss.data.cpu().numpy(),\n G_loss.data.cpu().numpy(), D_I.data.cpu().numpy(), D_C.data.cpu().numpy(),\n KL_loss.data.cpu().numpy()], \\\n [[IDcnt, IDtotal], [Clothcnt, Clothtotal]]\n\n if opt.stage == 4:\n Rgb_CE = self.id_related_loss(labels, rgb_outputs)\n IDcnt = 0\n IDtotal = opt.batchid * opt.batchimage * self.num_gran\n for classifyprobabilities in rgb_outputs[1:1 + self.num_gran]:\n for i in range(opt.batchid * opt.batchimage):\n class_ = torch.argmax(classifyprobabilities[i])\n label = labels[i]\n if class_ == label:\n IDcnt += 1\n\n loss_sum = Rgb_CE\n\n print('\\rRgb_CE:%.2f' % (\n Rgb_CE.data.cpu().numpy()\n ), end=' ')\n return loss_sum, [Rgb_CE.data.cpu().numpy()], [[IDcnt, IDtotal], [1, 1]]\n"
},
{
"alpha_fraction": 0.5764362215995789,
"alphanum_fraction": 0.6071080565452576,
"avg_line_length": 49.121952056884766,
"blob_id": "20ae5ffd566fd1ea6e4fda3b5d05b4b977d00f0c",
"content_id": "4910b73f8c887974e6532af7790c3bad7685c2f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2054,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 41,
"path": "/utils/get_optimizer.py",
"repo_name": "Pantheona/ourModel11-10",
"src_encoding": "UTF-8",
"text": "from torch.optim import Adam, SGD\nimport numpy as np\n\nfrom opt import opt\n\n\ndef get_optimizer(model):\n if (opt.stage == 1) or (opt.stage == 2) or opt.stage == 0:\n param_groups = [{'params': model.C.parameters(), 'lr_mult': 1.0},\n {'params': model.G.parameters(), 'lr_mult': 1.0}]\n optimizer = Adam(param_groups, lr=opt.lr, weight_decay=5e-4, amsgrad=True)\n\n optimizer_D = SGD(model.D.parameters(), lr=opt.lr, momentum=0.9, weight_decay=1e-4)\n optimizer_DC = SGD(model.DC.parameters(), lr=opt.lr, momentum=0.9, weight_decay=1e-4)\n\n if opt.stage == 3 or opt.stage == 4:\n id_dict1 = model.C.get_modules(model.C.id_dict1())\n id_dict2 = model.C.get_modules(model.C.id_dict2())\n id_modules = id_dict1 + id_dict2\n nid_dict1 = model.C.get_modules(model.C.nid_dict1())\n nid_dict2 = model.C.get_modules(model.C.nid_dict2())\n nid_modules = nid_dict1 + nid_dict2\n cloth_dict1 = model.C.get_modules(model.C.cloth_dict1())\n cloth_dict2 = model.C.get_modules(model.C.cloth_dict2())\n cloth_modules = cloth_dict1 + cloth_dict2\n\n param_groups = []\n param_groups = [{'params': model.C.backbone.parameters(), 'lr_mult': 1.0},\n {'params': model.G.parameters(), 'lr_mult': 0.01}]\n for i in range(np.shape(id_modules)[0]):\n param_groups.append({'params': id_modules[i].parameters(), 'lr_mult': 1.0})\n for i in range(np.shape(nid_modules)[0]):\n param_groups.append({'params': nid_modules[i].parameters(), 'lr_mult': 0.01})\n for i in range(np.shape(cloth_modules)[0]):\n param_groups.append({'params': cloth_modules[i].parameters(), 'lr_mult': 0.01})\n\n optimizer = Adam(param_groups, lr=opt.lr, weight_decay=5e-4, amsgrad=True)\n optimizer_D = SGD(model.D.parameters(), lr=opt.lr, momentum=0.9, weight_decay=1e-4)\n optimizer_DC = SGD(model.DC.parameters(), lr=opt.lr, momentum=0.9, weight_decay=1e-4)\n\n return optimizer, optimizer_D, optimizer_DC"
},
{
"alpha_fraction": 0.4481586515903473,
"alphanum_fraction": 0.4739376902580261,
"avg_line_length": 30.238937377929688,
"blob_id": "3581b00fcca057a48f37d23b69b6aa1133def198",
"content_id": "b317bdcb4af8b4acdf7e0b4d26a68a3a84a69484",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3530,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 113,
"path": "/opt.py",
"repo_name": "Pantheona/ourModel11-10",
"src_encoding": "UTF-8",
"text": "import torch\nimport argparse\n\nparser = argparse.ArgumentParser(description='reid')\n\nparser.add_argument('--data_path',\n default=\"/home/ubuntu/reid/0630/dataset/prcc-shuffle/dataset5/\",\n help='path of Market-1501-v15.09.15')\n\nparser.add_argument('--mode',\n default='train', choices=['train', 'evaluate', 'vis'],\n help='train or evaluate ')\n\nparser.add_argument('--query_image',\n default='0001_c1s1_001051_00.jpg',\n help='path to the image you want to query')\n\nparser.add_argument('--weight',\n default='weights/isgan_stage0_100.pt',\n help='load weights ')\n\nparser.add_argument('--epoch',\n default=400,\n type=int,\n help='number of epoch to train')\n\nparser.add_argument('--lr',\n default=2e-4, #2e-4\n help='initial learning_rate')\n\nparser.add_argument('--lr_scheduler',\n default=[300],\n help='MultiStepLR,decay the learning rate')\n\nparser.add_argument(\"--batchid\",\n default=2,\n type=int,\n help='the batch for id')\n\nparser.add_argument(\"--batchimage\",\n default=6,\n type=int,\n help='the batch of per id')\n\nparser.add_argument(\"--batchtest\",\n default=3,\n type=int,\n help='the batch size for test')\n\nparser.add_argument(\"--device\",\n default=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"),\n help='cuda is available?')\n\nparser.add_argument(\"--num_cls\",\n default=150, # Prcc 150 LTCC 77\n type=int,\n help='# of classes')\n\nparser.add_argument(\"--num_cloths\",\n default=300, # Prcc 300 LTCC 256\n type=int,\n help='# of clothes')\n\nparser.add_argument(\"--feat_id\",\n default=128, #--> 2048\n type=int,\n help='size of id features')\n\nparser.add_argument(\"--feat_nid\",\n default=64, #--> 512\n type=int,\n help='size of id features')\n\nparser.add_argument(\"--feat_niz\",\n default=128,\n type=int,\n help='size of id features')\n\nparser.add_argument(\"--feat_G\",\n default=64,\n type=int,\n help='size of Generator')\n\nparser.add_argument(\"--feat_D\",\n default=32,\n type=int,\n help='size of Discriminator')\n\nparser.add_argument(\"--dropout\",\n default=0.2,\n help='probaility of dropout')\n\nparser.add_argument(\"--stage\",\n default=1,\n type=int,\n help='# of training stage')\n\nparser.add_argument(\"--save_path\",\n default='weights',\n help='the path for saving weights')\n\nparser.add_argument(\"--name\",\n default='/isgan',\n help='the additional path to identify')\n\nparser.add_argument(\"--start\",\n default=0,\n type=int,\n help='start epoch')\n\nparser.add_argument(\"--stage2_weight_path\", default='/model_stage2_200.pt')\n\nopt = parser.parse_args()\n"
},
{
"alpha_fraction": 0.5064102411270142,
"alphanum_fraction": 0.5320512652397156,
"avg_line_length": 31.842105865478516,
"blob_id": "c1925a1b1e6ecb6a80d8989f3bd6e29e2faafcd4",
"content_id": "b9bb1cb427e5e84fc6c82a01f460edd2edf85a9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 624,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 19,
"path": "/utils/extract_feature.py",
"repo_name": "Pantheona/ourModel11-10",
"src_encoding": "UTF-8",
"text": "import torch\n\ndef extract_feature(model, loader):\n features = torch.FloatTensor()\n\n for (rgb, labels) in loader:\n ff = torch.FloatTensor(rgb.size(0), 1024).zero_()\n for i in range(2):\n if i == 1:\n rgb = rgb.index_select(3, torch.arange(rgb.size(3) - 1, -1, -1).long())\n input_img = rgb.to('cuda')\n outputs = model.C(input_img)\n f = outputs[0].data.cpu()\n ff = ff + f\n\n fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)\n ff = ff.div(fnorm.expand_as(ff))\n features = torch.cat((features, ff), 0)\n return features\n"
}
] | 11 |
IsmaelEzequiel/Brincando-com-DjangoRest | https://github.com/IsmaelEzequiel/Brincando-com-DjangoRest | 25911756220af31947c9f0405662f9787b7b54b7 | 18b330e94b6ac9a9b707a6e69de6731b5a4beabe | 92eb43545ffb56e5353f6e5ebd2f40470a5c355a | refs/heads/master | 2016-08-12T21:02:32.786119 | 2016-02-24T18:59:44 | 2016-02-24T18:59:44 | 52,466,031 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7293666005134583,
"alphanum_fraction": 0.7293666005134583,
"avg_line_length": 29.647058486938477,
"blob_id": "f10ef692a53f77675ddce0984ce71211e83bfab8",
"content_id": "b35ad32a68dc3968607ad921bdad3ac8b0471f66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 521,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 17,
"path": "/api/api/urls.py",
"repo_name": "IsmaelEzequiel/Brincando-com-DjangoRest",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom rest_framework import routers\nfrom qui import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\nrouter.register(r'group', views.GroupViewSet)\nrouter.register(r'form', views.UserModelViewSet)\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', views.home_page),\n url(r'^api/', include(router.urls)),\n url(r'api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n]\n"
},
{
"alpha_fraction": 0.7283950448036194,
"alphanum_fraction": 0.7283950448036194,
"avg_line_length": 15.199999809265137,
"blob_id": "5e6e55c08e843a0bfef2666d6d30823ae3b9f002",
"content_id": "af9c5f1a95bb8d763451cd12d1582a22dd4e5473",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 81,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 5,
"path": "/api/qui/apps.py",
"repo_name": "IsmaelEzequiel/Brincando-com-DjangoRest",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass QuiConfig(AppConfig):\n name = 'qui'\n"
},
{
"alpha_fraction": 0.7293233275413513,
"alphanum_fraction": 0.7293233275413513,
"avg_line_length": 25.600000381469727,
"blob_id": "7f1463de4cf8b8bb396150602007e76dbe3011ae",
"content_id": "853c674968d6ddea7a58ccd2d44ad420dbe9153b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 266,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 10,
"path": "/api/qui/admin.py",
"repo_name": "IsmaelEzequiel/Brincando-com-DjangoRest",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\nfrom .models import UserModel\nfrom .forms import UserForm\n\n# Register your models here.\[email protected](UserModel)\nclass UserModel(admin.ModelAdmin):\n list_display = ['__str__', 'name', 'password', 'comment']\n form = UserForm\n"
},
{
"alpha_fraction": 0.7005025148391724,
"alphanum_fraction": 0.7005025148391724,
"avg_line_length": 26.63888931274414,
"blob_id": "23b522066c0120041ca8d2b9ffdfa04a860f644d",
"content_id": "ff084ad529048e361a6cdb96d81ed90f25ae3e56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 995,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 36,
"path": "/api/qui/views.py",
"repo_name": "IsmaelEzequiel/Brincando-com-DjangoRest",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\nfrom django.contrib.auth.models import User, Group\nfrom rest_framework import viewsets\nfrom qui.serializers import UserSerializer, GroupSerializer, UserModelSerializer\n\nfrom .forms import UserForm\nfrom .models import UserModel\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all().order_by('-date_joined')\n serializer_class = UserSerializer\n\nclass GroupViewSet(viewsets.ModelViewSet):\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\nclass UserModelViewSet(viewsets.ModelViewSet):\n queryset = UserModel.objects.all().order_by('-created')\n serializer_class = UserModelSerializer\n\ndef home_page(request):\n template = 'index.html'\n\n user = UserForm(request.POST or None)\n context = {\n 'user_forms': user\n }\n\n if user.is_valid():\n user.save()\n context = {\n 'user_forms': 'Thank you!'\n }\n\n return render(request, template, context)\n"
}
] | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.