repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
diegosantosmartinss/Streamlit | https://github.com/diegosantosmartinss/Streamlit | f3c9a68288bf5a7a4fb631be5c28807ef0d4d34c | 8079a4637b0879b6d806522d4a87a3352688e24d | 4386c3483b1e79675846e59422e98e9cb55b6979 | refs/heads/main | 2023-05-02T20:23:49.719383 | 2021-05-19T16:58:56 | 2021-05-19T16:58:56 | 365,890,808 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6074270606040955,
"alphanum_fraction": 0.6366047859191895,
"avg_line_length": 18.6849308013916,
"blob_id": "37c5cb67aa8f8b25d08d195e0058e0744c526357",
"content_id": "128b2556d58d142658405613d30ca3a34d19cdb2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1509,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 73,
"path": "/first-app.py",
"repo_name": "diegosantosmartinss/Streamlit",
"src_encoding": "UTF-8",
"text": "from collections import namedtuple\r\nimport streamlit as st \r\nimport time \r\nimport altair as alt\r\nimport numpy as np \r\nimport pandas as pd \r\n#Use magic \r\n\r\n\"\"\"\r\n #My first app\r\n Here's our first attempt at using data to create a table:\r\n\"\"\"\r\n\r\ndf = pd.DataFrame({\r\n\r\n 'first column': [1, 2,3,4],\r\n 'second column':[10,20,30,40]\r\n})\r\n\r\ndf\r\n\"\"\"\r\nGráfico de Linha\r\n\"\"\"\r\nchart_data = pd.DataFrame(\r\n np.random.randn(20,3),\r\n columns = ['a','b','c'])\r\n\r\nst.line_chart(chart_data)\r\n\r\n\"\"\"\r\nTrace um mapa\r\n\"\"\"\r\nmap_data = pd.DataFrame(\r\n np.random.randn(1000,2)/[50,50]+[37.76, -122.4],\r\n columns=['lat', 'lon'])\r\n\r\nst.map(map_data)\r\n\r\nif st.checkbox('Show dataframe'):\r\n chart_data = pd.DataFrame(\r\n np.random.randn(20,3),\r\n columns=['a', 'b','c'])\r\nchart_data\r\n\r\n#Use a selectbox for options \r\n\r\noption = st.sidebar.selectbox(\r\n 'Which number do you like best?',\r\n df['first column'])\r\n'You selected: ', option\r\n\r\nleft_column, right_column = st.beta_columns(2)\r\npressed = left_column.button('Press me?')\r\nif pressed:\r\n right_column.write(\"Woohoo!\")\r\n\r\nexpander = st.beta_expander(\"FAQ\")\r\nexpander.write(\"Here you could put in some really, really long explanations...\")\r\n\r\n#Mostrar progresso\r\n'Starting a long computation ...'\r\n\r\n#Add a placeholder \r\nlatest_iteration = st.empty()\r\nbar = st.progress(0)\r\n\r\nfor i in range(100):\r\n #Update the progress bar with each iteration \r\n latest_iteration.text(f'Iteration{i+1}')\r\n bar.progress(i+1)\r\n time.sleep(0.1)\r\n\r\n '...and now we\\'re done!'"
},
{
"alpha_fraction": 0.7083333134651184,
"alphanum_fraction": 0.7083333134651184,
"avg_line_length": 6.333333492279053,
"blob_id": "734ae66aa687b947d374884f0affeeb4b99913de",
"content_id": "2646ada496b7c3cd7ccfb81059078fee3d842326",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 24,
"license_type": "permissive",
"max_line_length": 7,
"num_lines": 3,
"path": "/requirements.txt",
"repo_name": "diegosantosmartinss/Streamlit",
"src_encoding": "UTF-8",
"text": "altair \r\nnumpy \r\npandas "
},
{
"alpha_fraction": 0.8454545736312866,
"alphanum_fraction": 0.8454545736312866,
"avg_line_length": 53.75,
"blob_id": "5f4f12f3de6573d2decb431f4835a993856fe817",
"content_id": "32ff20ea8cd3db585d667303a415546709ed7a28",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 225,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 4,
"path": "/README.md",
"repo_name": "diegosantosmartinss/Streamlit",
"src_encoding": "UTF-8",
"text": "# Streamlit\nAplicativo de dados em linguagem de programação Python utilizando biblioteca Streamlit\n\nO aplicativo está disponível neste endereço:https://share.streamlit.io/diegosantosmartinss/streamlit/main/first-app.py \n"
}
] | 3 |
caschne/quondam-link | https://github.com/caschne/quondam-link | 200d8750ea750883a53c3c05173552c98de10d00 | 2c77c585211f50d1f3b744a443853f85d6a0f980 | b14887767ede3ba25396acf38a1f548d03d3121a | refs/heads/master | 2019-01-23T01:59:09.208697 | 2017-09-11T03:34:12 | 2017-09-11T03:34:12 | 97,521,395 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7194322943687439,
"alphanum_fraction": 0.7194322943687439,
"avg_line_length": 28.54838752746582,
"blob_id": "21321722774103615c9e7223dee50303ad2a851b",
"content_id": "f727193724148189acb9c8278d082dc88cb914b5",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 916,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 31,
"path": "/src/core/views.py",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.views import LoginView , LogoutView\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom django.views.generic.edit import FormView\nfrom core.forms import ContactForm\nfrom blog.models import Post\n# Create your views here.\n\nclass Home(TemplateView):\n\t\ttemplate_name = \"home.html\"\n\n\t\tdef get_context_data(self, *args, **kwargs):\n\t\t\tcontext = super(Home , self).get_context_data(**kwargs)\n\t\t\tcontext['object'] = Post.objects.publish().order_by('-created_at')\n\t\t\treturn context\n\nclass AboutMe(TemplateView):\n\t\ttemplate_name\t\t\t\t= \"about.html\"\n\nclass ContactMe(FormView):\n\t\ttemplate_name\t\t\t\t= \"contact.html\"\n\t\tform_class\t\t\t\t\t= ContactForm\n\nclass Login(LoginView):\n template_name = \"authenticate/login.html\"\n\nclass Logout(LogoutView):\n next_page = \"core:home\"\n\nclass Disclaimer(TemplateView):\n template_name = \"disclaimer.html\"\n"
},
{
"alpha_fraction": 0.621052622795105,
"alphanum_fraction": 0.621052622795105,
"avg_line_length": 42.846153259277344,
"blob_id": "b0f7c5baa3bf823997c8a23328dead3b29059096",
"content_id": "09e5e5c9cd2d9dcfd918cb995be2f8465bcd6f8d",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 570,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 13,
"path": "/src/blog/urls.py",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\n\nfrom blog.views import CreatePost , EditPost , CreateCategory , CreateTag , PostDetails , Blog\n\n\nurlpatterns = [\n\t\turl(r'^$' , Blog.as_view() , name=\"list\") ,\n url(r'^create/$' , CreatePost.as_view() , name=\"create\" ) ,\n\t\turl(r'^categories/create/$', CreateCategory.as_view() , name=\"category-create\") ,\n\t\turl(r'^tags/create/$' , CreateTag.as_view() , name=\"tag-create\") ,\n\t\turl(r'^(?P<slug>[\\w\\d-]+)/edit/$' , EditPost.as_view() , name=\"edit\") ,\n\t\turl(r'^(?P<slug>[\\w\\d-]+)/details/$' , PostDetails.as_view() , name=\"details\" ) ,\n]\n"
},
{
"alpha_fraction": 0.6359447240829468,
"alphanum_fraction": 0.644173800945282,
"avg_line_length": 38.97368240356445,
"blob_id": "0195b0beb6a563de6fde0dfb873d47bab57e8207",
"content_id": "012e71fbf6711f486112d365cf457ba0a9e56ba0",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3038,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 76,
"path": "/src/pages/models.py",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.db.models.signals import pre_save\nfrom pages.utils import make_generated_slug, LAYOUT_DISPLAY , ADS\n# model QuerySets\nclass PageQuerySet(models.query.QuerySet):\n\tdef active(self):\n\t\treturn self.filter(active=True)\n\n\tdef featured_qs(self):\n\t\treturn self.filter(featured=True)\n\nclass AWeberQuerySet(models.query.QuerySet):\n\tpass\n# model Managers\nclass PageManager(models.Manager):\n\tdef get_queryset(self,*args,**kwargs):\n\t\treturn PageQuerySet(self.model , using=self._db)\n\n\tdef active_products(self , *args, **kwargs):\n\t\treturn self.get_queryset().active()\n\n\tdef featured(self , *args, **kwargs):\n\t\treturn self.get_queryset().featured_qs()\n\nclass AWeberManager(models.Manager):\n\tpass\n# Create your models here.\nclass Page(models.Model):\n product = models.CharField(max_length=200)\n product_description = models.CharField(max_length=20 , choices=ADS , default=\"ad-one\")\n btn_title = models.CharField(max_length=200)\n btn_title_url = models.URLField(default=\"\" , blank=True, null=True)\n display = models.CharField(max_length=20 ,\n choices=LAYOUT_DISPLAY , default=\"cliff\")\n content = models.TextField(default=\"\", blank=True, null=True )\n slug = models.SlugField(unique=True , blank=True)\n active = models.BooleanField(default=True)\n featured = models.BooleanField(default=False)\n aweber = models.ForeignKey('AWeberList' , related_name=\"list\", null=True,\n\t\t blank=True)\n affiliate_link = models.URLField(default=\"\", blank=True, null=True)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n objects = PageManager()\n\n def __str__(self):\n return self.product\n\n def save(self, *args, **kwargs):\n if self.featured:\n qs = Page.objects.featured().exclude(pk=self.pk)\n if qs.exists:\n qs.update(featured=False)\n super(Page, self).save(*args, **kwargs)\n\nclass AWeberList(models.Model):\n\tlist_name\t\t\t\t\t\t\t\t\t= models.CharField(max_length=200)\n\tredirect_url\t\t\t\t\t\t\t= models.URLField()\n\trequired\t\t\t\t\t\t\t\t\t= models.CharField(max_length=5)\n\tmeta_message\t\t\t\t\t\t\t= models.TextField(default=\"\" , blank=True , null=True)\n\tweb_form_id\t\t\t\t\t\t\t\t= models.CharField(max_length=20)\n\tmeta_adtracking\t\t\t\t\t\t= models.CharField(max_length=200, blank=True, null=True)\n\tmeta_forward_vars\t\t\t\t\t= models.CharField(max_length=200, blank=True, null=True)\n\tmeta_tooltip\t\t\t\t\t\t\t= models.CharField(max_length=200, blank=True, null=True)\n\n\tdef __str__(self):\n\t\treturn self.list_name\n\tclass Meta:\n\t\tverbose_name = \"List\"\n\t\tverbose_name_plural = 'Lists'\n\ndef pre_save_slug_reciever(sender, instance, *args, **kwargs):\n\tif not instance.slug:\n\t\tinstance.slug = make_generated_slug(instance)\n\npre_save.connect(pre_save_slug_reciever , sender=Page)\n"
},
{
"alpha_fraction": 0.6207430362701416,
"alphanum_fraction": 0.6207430362701416,
"avg_line_length": 42.06666564941406,
"blob_id": "b217e35976a6c020cbe6f1b9e4cc28dae2b4016b",
"content_id": "d47863306f5a7ce26f007c7154abd8c32a3068bb",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 646,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 15,
"path": "/src/blog/forms.py",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom blog.models import Post, Category, Tag\n\nclass CreatePostForm(forms.ModelForm):\n\t\tclass Meta:\n\t\t\t\tmodel = Post\n\t\t\t\texclude = ['published' ,'slug']\n\t\t\t\twidgets = { 'title': forms.TextInput(attrs={'class':'form-control' ,}) ,\n\t\t\t\t\t\t\t\t\t\t'author': forms.Select(attrs={'class':'form-control' ,}) ,\n\t\t\t\t\t\t\t\t\t\t'post_text': forms.Textarea(attrs={'class':'form-control' ,}) ,\n\t\t\t\t\t\t\t\t\t\t'featured_image': forms.FileInput(attrs={'class':'form-control-file'}) ,\n\t\t\t\t\t\t\t\t\t\t'category': forms.Select(attrs={'class':'form-control' ,}) ,\n\t\t\t\t\t\t\t\t\t\t'tags': forms.CheckboxSelectMultiple(attrs={'class':'form-check-input' ,}) ,\n\n\t\t\t\t}\n"
},
{
"alpha_fraction": 0.7694588303565979,
"alphanum_fraction": 0.7746478915214539,
"avg_line_length": 121.63636016845703,
"blob_id": "def86709aa90d12ec5e5cfd4e53cc21f7bc584f9",
"content_id": "8b697b9dd7007bd9f81427c856740064fdb29c20",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1349,
"license_type": "permissive",
"max_line_length": 541,
"num_lines": 11,
"path": "/src/core/templates/disclaimer.html",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "{% extends 'layout/base.html' %}\n{% block title %} Disclaimer {{ block.super }} {% endblock %}\n{% block content %}\n<h2 class=\"page-header\">Disclaimer</h2>\n<p>This policy is valid from September 5th 2017</p>\n<p>This blog is a personal blog written and edited by me. For questions about this blog, please contact Curtis at [email protected].</p>\n<p>This blog accepts forms of cash advertising, sponsorship, paid insertions or other forms of compensation.</p>\n<p>The compensation received will never influence the content, topics or posts made in this blog. All advertising is in the form of advertisements generated by a third party ad network. Those advertisements will be identified as paid advertisements.</p>\n<p>The owner(s) of this blog is not compensated to provide opinion on products, services, websites and various other topics. The views and opinions expressed on this blog are purely the blog owners. If we claim or appear to be experts on a certain topic or product or service area, we will only endorse products or services that we believe, based on our expertise, are worthy of such endorsement. Any product claim, statistic, quote or other representation about a product or service should be verified with the manufacturer or provider.</p>\n<p>This blog does not contain any content which might present a conflict of interest.</p>\n{% endblock %}\n"
},
{
"alpha_fraction": 0.4137931168079376,
"alphanum_fraction": 0.6724137663841248,
"avg_line_length": 13.5,
"blob_id": "b154763ba1cc6c470466e983e6ef7dc87e33550b",
"content_id": "7d122bbabfa146e3ca4130e998dfebe8901ca23c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 116,
"license_type": "permissive",
"max_line_length": 18,
"num_lines": 8,
"path": "/requirements.txt",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "Django==1.11.4\nolefile==0.44\nPillow==4.2.1\npip==9.0.1\npsycopg2==2.7.1\npytz==2017.2\nsetuptools==36.2.0\nwheel==0.29.0\n"
},
{
"alpha_fraction": 0.7470588088035583,
"alphanum_fraction": 0.7470588088035583,
"avg_line_length": 20.25,
"blob_id": "e59520dc2a013ce1d9592c9a51b7440f8824f05f",
"content_id": "ccde11361141b9a6349749103e5c8062df10a191",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 170,
"license_type": "permissive",
"max_line_length": 37,
"num_lines": 8,
"path": "/src/core/templatetags/sidebar.py",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "from django import template\nfrom blog.models import Post\nregister = template.Library()\n\[email protected]_tag\ndef get_posts():\n\treturn { 'posts': Post.objects.all()\n\t}\n"
},
{
"alpha_fraction": 0.6446866393089294,
"alphanum_fraction": 0.6474114656448364,
"avg_line_length": 30.63793182373047,
"blob_id": "6b1e723b92987007c2fe9db4c71c8014d8e376cf",
"content_id": "cd89ef27c69a27beff0d76e77a00263d7fb1680c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1835,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 58,
"path": "/src/blog/models.py",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.models import User\nfrom django.db import models\nfrom blog.utils import make_new_slug\n\nclass PostQuerySet(models.query.QuerySet):\n\tdef published(self):\n\t\treturn self.filter(published=True)\n\nclass PostManager(models.Manager):\n\tdef get_queryset(self, *args, **kwargs):\n\t\treturn PostQuerySet(self.model , using=self._db)\n\n\tdef publish(self , *args, **kwargs):\n\t\treturn self.get_queryset().published()\n\n# Create your models here.\nclass Post(models.Model):\n\n title = models.CharField(max_length=255)\n author = models.ForeignKey(User , related_name=\"author\" )\n post_text = models.TextField(default=\"\")\n featured_image = models.ImageField(blank=True, null=True)\n published = models.BooleanField(default=False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n slug = models.SlugField(unique=True , blank=True )\n category = models.ForeignKey('Category' , on_delete=models.SET_NULL , blank=True, null=True )\n tags = models.ManyToManyField('Tag' , blank=True, )\n objects = PostManager()\n\n def __str__(self):\n return self.title\n\n def save(self, *args , **kwargs):\n self.slug = make_new_slug(self, self.title)\n super(Post , self).save(*args, **kwargs)\n\nclass Label(models.Model):\n\tlabel\t\t\t\t\t\t\t\t\t= models.CharField(max_length=45)\n\tslug\t\t\t\t\t\t\t\t\t= models.SlugField(unique=True , blank=True)\n\n\tdef __str__(self):\n\t\treturn self.label\n\n\tdef save(self, *args, **kwargs):\n\t\tself.slug = make_new_slug(self , self.label)\n\t\tsuper(Label, self).save(*args,**kwargs)\n\n\tclass Meta:\n\t\tabstract = True\n\nclass Category(Label):\n\n\tclass Meta:\n\t\tverbose_name_plural = 'Categories'\n\nclass Tag(Label):\n\tpass\n"
},
{
"alpha_fraction": 0.525612473487854,
"alphanum_fraction": 0.5300667881965637,
"avg_line_length": 31.071428298950195,
"blob_id": "c2ac44649be24a1ddd4aed62fb2a112f5806d193",
"content_id": "7505dca54862908a62fb7d9aaac65b61db2ea47b",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 449,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 14,
"path": "/src/blog/utils.py",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "from django.utils.text import slugify\n\ndef make_new_slug(instance, slug=None):\n num = 1\n Klass = instance.__class__\n new_slug = slugify(slug)\n qs_exists = Klass.objects.filter(slug=new_slug).exists()\n if slug is not None:\n slug = new_slug\n if qs_exists:\n num += 1\n slug = '%s-%s' % (new_slug, num)\n make_new_slug(instance , slug=slug)\n return slug\n"
},
{
"alpha_fraction": 0.6232678294181824,
"alphanum_fraction": 0.6490492820739746,
"avg_line_length": 37.787498474121094,
"blob_id": "46d510cc16316982bfbc5da2655b2f111188b339",
"content_id": "1010835b582da1179f09399914843e9013dec205",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3103,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 80,
"path": "/src/pages/utils.py",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "import random\nimport string\n\ndef random_string_generator(size=7, chars=string.ascii_lowercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\ndef make_generated_slug(instance, new_slug=None):\n\n\tif new_slug is not None:\n\t\tslug = new_slug\n\telse:\n\t\tslug = random_string_generator()\n\n\tKlass = instance.__class__\n\tqs_exists = Klass.objects.filter(slug=slug).exists()\n\tif qs_exists:\n\t\tslug = random_string_generator()\n\treturn slug\n\n\treturn slug\n\n\n\nLAYOUT_DISPLAY = [\n ('cliff' , 'Optin One') ,\n ('sunset' , 'Optin Two') ,\n ('beach' , 'Optin Three') ,\n ('car' , 'Optin Four') ,\n ('island' , 'Optin Five') ,\n ('city' , 'Optin Six') ,\n ('cars' , 'Optin Seven') ,\n ('trees' , 'Optin Eight') ,\n ('dl-one' , 'Download One') ,\n ('dl-two' , 'Download Two ') ,\n ('dl-three' , 'Download Three') ,\n ('dl-four' , 'Download Four') ,\n]\n\nADS \t\t\t\t\t\t= [\n\t('ad-one' , 'UNDERGROUND $55/day System' +\n\t' START NOW!\\n 2 Step System Makes You $55/day On AUTOPILOT!') ,\n\t('ad-two' , 'Start Banking TODAY' +\n\t' FREE Newbie Course Shows How To Earn Over $121.27 Per Day Online.') ,\n\t('ad-three' , 'Make Money FAST' +\n\t' Earn $50 In 8 Minutes Working Online From Home. START FREE NOW!') ,\n\t('ad-four' , 'How To Make $283.03 Now' +\n\t' All Done For You. Newbies Are Easily Banking $283.03 Per Day!') ,\n\t('ad-five' , 'Fastest Way to Make Earn' +\n\t' No Other System Earns Daily Income This Fast. FREE VIDEO!') ,\n\t('ad-six' , 'Earn $3000 A Month' +\n\t' See How You Can Earn $3,000 Online In As Little As 30 Days.') ,\n\t('ad-seven' , '2 STEP RESULTS' +\n\t' World\\'s Easiest Blueprint For Making Money Online On AUTOPILOT!') ,\n\t('ad-eight' , 'I Made $892 In 7 Days' +\n\t' FREE VIDEO Shows You How I Made $892 Using This One Crazy Trick!') ,\n\t('ad-nine' , 'Earn Money FAST' +\n\t' Make $497.16 Per Week Even If You\\'re A Newbie. START FREE NOW!') ,\n\t('ad-ten' , 'GRANDPA Made $550.15' +\n\t' 80-Year-Old Grandpa Made $550.15 His First Week. FREE VIDEO!') ,\n\t('ad-eleven' , 'FREE System makes $58/Day' +\n\t' FREE DOWNLOAD! Complete guide to making $58/Day Online on AUTOPILOT.') ,\n\t('ad-twelve' , 'Couch CASH Method' +\n\t' With A Computer And An Internet Connection You Can Make $283.08/Day.') ,\n\t('ad-thirtteen' , 'Make Money Online FAST!' +\n\t' Make 3k In 7 Days With This FREE 2 Step System. Grab Your Free Spot!') ,\n\t('ad-fourteen' , 'Earn a REAL income online' +\n\t' Simple System Allows Any Newbie To Earn An Income Online FAST.') ,\n\t('ad-fifthteen' , 'Get Paid TODAY' +\n\t' Powerful $500+/day NO COST Traffic System. START FREE NOW!') ,\n\t('ad-sixteen' , 'Make Money Online FREE' +\n\t' Clever UNDERGROUND $45+/Day Method Anyone Can Do. START FREE NOW!') ,\n\t('ad-seventeen' , 'Make Money Online NOW' +\n\t' Quiet Way To Profit Daily From Home Revealed. Click Here To START.') ,\n\t('ad-eightteen' , 'Copy, Paste PROFIT' +\n\t' Stupid Simple $3k/Month Copy & Paste System Anyone Can Do. See Here!') ,\n\t('ad-nineteen' , 'Fast CASH Blueprint' +\n\t' OVERNIGHT $147/day plan. See The Step By Step Blueprint Here!') ,\n\t('ad-twenty' , 'Fast CASH System' +\n\t' Fresh Off The Street Newbies Are Making Daily Cash. FREE VIDEO HERE!') ,\n]\n"
},
{
"alpha_fraction": 0.7784653306007385,
"alphanum_fraction": 0.7784653306007385,
"avg_line_length": 30.076923370361328,
"blob_id": "b508ced91c297efd04bf6b3513d3fd708a49a261",
"content_id": "f026c2f231cd47c01e271f9b24716c0ce0387b7e",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 808,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 26,
"path": "/src/pages/views.py",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import render\nfrom django.views.generic import DetailView , TemplateView\nfrom django.views.generic.list import ListView\nfrom pages.models import Page\n# Create your views here.\n\nclass ProductSqueezePage(DetailView):\n\tmodel\t\t\t\t\t= Page\n\ttemplate_name = 'product_detail.html'\n\nclass ProductDownloadPage(TemplateView):\n\ttemplate_name = 'product_download.html'\n\n\nclass ProductListPage(LoginRequiredMixin, ListView):\n\ttemplate_name = 'product_list.html'\n\tmodel\t\t\t\t\t= Page\n\nclass ProductHomePage(TemplateView):\n\ttemplate_name = 'product_detail.html'\n\n\tdef get_context_data(self, *args, **kwargs):\n\t\tcontext = super(ProductHomePage ,self).get_context_data(**kwargs)\n\t\tcontext['object'] = Page.objects.featured().first()\n\t\treturn context\n"
},
{
"alpha_fraction": 0.7444444298744202,
"alphanum_fraction": 0.7444444298744202,
"avg_line_length": 43.5,
"blob_id": "bb2feb61a2c6a2c5ffc11b53902f0aaa342393d8",
"content_id": "3f7433cbf30c059948db978d13cec2f6f0f40935",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 2,
"path": "/README.md",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "# quondam-link\nA django project for a little a website I don't know what it will be yet. \n"
},
{
"alpha_fraction": 0.8159509301185608,
"alphanum_fraction": 0.8159509301185608,
"avg_line_length": 31.600000381469727,
"blob_id": "daa459a788c304f207c838d17d4968e8b9558e22",
"content_id": "5fca74b491cc2c89f7ce940a727d5c84aeeb2f6b",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 163,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 5,
"path": "/src/pages/admin.py",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom pages.models import Page , AWeberList\n# Register your models here.\nadmin.site.register(Page)\nadmin.site.register(AWeberList)\n"
},
{
"alpha_fraction": 0.7666666507720947,
"alphanum_fraction": 0.7666666507720947,
"avg_line_length": 14,
"blob_id": "3e6a89ea7bb8c7fe0bb72f75e190509c6aaaf7ef",
"content_id": "589694b510e5caf6a278915ace61a0d942d77ded",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 60,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 4,
"path": "/src/quondom_links/settings/productions.py",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "from quondom_links.settings.base import *\n\n\n# Override Here\n"
},
{
"alpha_fraction": 0.6734693646430969,
"alphanum_fraction": 0.6734693646430969,
"avg_line_length": 48,
"blob_id": "43a04bf7dc63640faac5fa72971300cffb687406",
"content_id": "f1bd3cda7f66843afc185c1ab5f3d63c15ce6c4d",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 441,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 9,
"path": "/src/pages/urls.py",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom pages.views import ProductSqueezePage , ProductListPage , ProductHomePage ,ProductDownloadPage\n\nurlpatterns = [\n\turl(r'^$' , ProductHomePage.as_view() , name=\"home\") ,\n\turl(r'^list/$' , ProductListPage.as_view() , name='list') ,\n\turl(r'^(?P<slug>[\\w\\d]+)/$' , ProductSqueezePage.as_view() , name=\"product\") ,\n\turl(r'^(?P<slug>[\\w\\d]+)/download/$' , ProductDownloadPage.as_view() , name=\"download\") ,\n]\n"
},
{
"alpha_fraction": 0.6387394070625305,
"alphanum_fraction": 0.6387394070625305,
"avg_line_length": 33.23684310913086,
"blob_id": "737ffd330bfd2eaab6385c3b0051c2a6ca1d1418",
"content_id": "8a20d290a0875ec1acb6f501e2880850f2a927b8",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1301,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 38,
"path": "/src/blog/views.py",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import render\nfrom django.views.generic.edit import CreateView , UpdateView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\nfrom django.urls import reverse_lazy\n\nfrom blog.forms import CreatePostForm\nfrom blog.models import Post , Category , Tag\n# Create your views here.\n\nclass CreatePost(LoginRequiredMixin, CreateView):\n model = Post\n template_name = \"create_post.html\"\n form_class = CreatePostForm\nclass Blog(ListView):\n\t\tmodel \t\t\t\t\t\t\t\t\t\t\t\t\t= Post\n\t\ttemplate_name\t\t\t\t\t\t\t\t\t\t= \"blog_list.html\"\n\t\t\nclass EditPost(LoginRequiredMixin , UpdateView):\n\t\tmodel\t\t\t\t\t\t\t\t\t\t\t\t\t\t= Post\n\t\ttemplate_name\t\t\t\t\t\t\t\t\t\t= \"edit_post.html\"\n\t\tform_class\t\t\t\t\t\t\t\t\t\t\t= CreatePostForm\n\nclass PostDetails(DetailView):\n\t\tmodel\t\t\t\t\t\t\t\t\t\t\t\t\t\t= Post\n\t\ttemplate_name\t\t\t\t\t\t\t\t\t\t= \"detail_post.html\"\n\nclass CreateCategory(LoginRequiredMixin , CreateView):\n\t\tmodel \t\t\t\t\t\t\t\t\t\t\t\t\t= Category\n\t\ttemplate_name\t\t\t\t\t\t\t\t\t\t= 'create_label.html'\n\t\tfields\t\t\t\t\t\t\t\t\t\t\t\t\t= [ 'label' ]\n\n\nclass CreateTag(LoginRequiredMixin , CreateView):\n\t\tmodel \t\t\t\t\t\t\t\t\t\t\t\t\t= Tag\n\t\ttemplate_name\t\t\t\t\t\t\t\t\t\t= 'create_label.html'\n\t\tfields\t\t\t\t\t\t\t\t\t\t\t\t\t= [ 'label' ]\n"
},
{
"alpha_fraction": 0.6098562479019165,
"alphanum_fraction": 0.6098562479019165,
"avg_line_length": 39.58333206176758,
"blob_id": "f1b99c9b91411bd8fac2bfa1ce301a9f3d0c12d2",
"content_id": "a69436fd8517056f2088e87ea85777b1327d0bc3",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 487,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 12,
"path": "/src/core/urls.py",
"repo_name": "caschne/quondam-link",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom core.views import Home , Login , Logout , Disclaimer , AboutMe , ContactMe\n\n\nurlpatterns = [\n url(r'^$', Home.as_view() , name=\"home\"),\n\t\turl(r'^about-me/$' , AboutMe.as_view() , name=\"aboutme\") ,\n\t\turl(r'^contact/$' , ContactMe.as_view() , name=\"contact\") ,\n url(r'^login/$' , Login.as_view() , name=\"login\") ,\n url(r'^logout/$' , Logout.as_view(), name=\"logout\") ,\n url(r'^disclaimer/$' , Disclaimer.as_view() , name=\"disclaimer\") ,\n]\n"
}
] | 17 |
Jnfuhriman/rickRollingLogin | https://github.com/Jnfuhriman/rickRollingLogin | 65983cdbb141e3a5e587d5b64b8ad3356e77d69b | 3c4c857965d888e7cedf40cab0fab06290fcc85e | f49ebbf5d6d1afb0a37b77bcb15463f6000ffc6d | refs/heads/master | 2022-11-30T02:02:51.283427 | 2020-08-12T04:49:36 | 2020-08-12T04:49:36 | 286,919,003 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7719298005104065,
"alphanum_fraction": 0.7719298005104065,
"avg_line_length": 27.5,
"blob_id": "b6a4c191bdce512abd817f4340dd97e7b10343b2",
"content_id": "c97ec843b645ca363e2a28857929880546238d6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 57,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Jnfuhriman/rickRollingLogin",
"src_encoding": "UTF-8",
"text": "# rickRollingLogin\nFail the login and get Rick Rolled...\n"
},
{
"alpha_fraction": 0.5113236308097839,
"alphanum_fraction": 0.5153497457504272,
"avg_line_length": 27.25,
"blob_id": "e1591fdb1449a8e68068a621fdb3add3a8691f64",
"content_id": "d3cb1c00c9ec487e79f40b410e48d6b27f92e400",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1987,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 68,
"path": "/terminalLogin.py",
"repo_name": "Jnfuhriman/rickRollingLogin",
"src_encoding": "UTF-8",
"text": "import webbrowser\r\n\r\ndef fail():\r\n site = \"https://www.youtube.com/watch?v=dQw4w9WgXcQ\"\r\n webbrowser.open_new_tab(site)\r\n\r\ndef addUser(cred):\r\n while True:\r\n newUser = input(\"Enter a unique username: \")\r\n if newUser in cred:\r\n print('That username already exists\\n')\r\n else:\r\n break\r\n while True:\r\n newPass = input(\"Enter a password: \")\r\n verifyPass = input(\"Re-enter password to verify: \")\r\n if newPass == verifyPass:\r\n break\r\n else: print('Passwords must match')\r\n createUser(newUser, newPass)\r\n\r\ndef createUser(user, password):\r\n f = open('userPasswords.txt','a')\r\n userPass = user + \":\" + password\r\n f.write(\"\\n\")\r\n f.write(userPass)\r\n f.close()\r\n print(\"Successfully added user: \", user)\r\n print(\"Logging out now\")\r\n main()\r\n\r\ndef loggedIn(user, cred):\r\n if user == 'ad':\r\n print(\"Welcome admin\\n\")\r\n adminChoice = input('Would you like to add a user?')\r\n if adminChoice == 'yes':\r\n addUser(cred)\r\n else: \r\n print(\"Welcome \", user)\r\n\r\n\r\ndef main():\r\n attempts = 3\r\n while attempts > 0:\r\n username = input(\"Username: \")\r\n password = input(\"Password: \")\r\n f = open('userPasswords.txt', \"r\")\r\n credDict = {}\r\n while True:\r\n line = f.readline()\r\n if not line:\r\n break\r\n else:\r\n tempLine = line.split(':')\r\n credDict[tempLine[0]] = tempLine[1].rstrip()\r\n if username in credDict and password == credDict[username]:\r\n loggedIn(username, credDict)\r\n break\r\n else:\r\n attempts = attempts - 1\r\n if attempts == 0:\r\n print(\"No more attempts, exiting program\")\r\n fail()\r\n break\r\n else:\r\n print(\"invalid credentials, attempts remaining: \", attempts, \"\\n\")\r\n f.close()\r\nmain()"
}
] | 2 |
Joaongm/Projeto-RunAway | https://github.com/Joaongm/Projeto-RunAway | daea138c60340dd3e0e6b30c0eb61fe86783d62f | 73b478715f1a977fa686e67aafde66a71ceecfe8 | 9c1a28a0e3951fc320389b44cf0bf43463818aa6 | refs/heads/main | 2023-05-31T21:21:08.589702 | 2021-05-29T19:24:14 | 2021-05-29T19:24:14 | 370,804,584 | 0 | 0 | MIT | 2021-05-25T19:21:36 | 2021-05-25T19:19:02 | 2021-05-25T19:19:00 | null | [
{
"alpha_fraction": 0.4877192974090576,
"alphanum_fraction": 0.4964912235736847,
"avg_line_length": 19.77777862548828,
"blob_id": "ab0d614959fa81127896baf4dc98ab6fa735bba6",
"content_id": "b0fdf7d138e009dda6128a6fa412dd70e0d4a9aa",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 570,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 27,
"path": "/TIMER.py",
"repo_name": "Joaongm/Projeto-RunAway",
"src_encoding": "UTF-8",
"text": "import time\n\nclass Timer:\n def __init__(self, value = 0):\n self.start = time.time()\n self.set(value)\n\n def start(self, args):\n self.timer = Timer(10)\n\n def update(self):\n print(self.timer.get())\n\n \n def set(self, value : float):\n self.start = time.time() - value\n\n\n def get(self) -> float:\n return time.time() - self.start \n \n def __str__(self):\n value = self.get()\n sec = int(value)\n min = int(sec/60)\n sec=int(value)\n return str(min) +'m '+str(sec)+'s ' \n "
},
{
"alpha_fraction": 0.4037165641784668,
"alphanum_fraction": 0.41382479667663574,
"avg_line_length": 49.74611282348633,
"blob_id": "f0d4fd4d8c5b3906c3ca7cfb958444075665ff03",
"content_id": "c7cc4306e26216bbde775dadfc3064699bd29e87",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9875,
"license_type": "permissive",
"max_line_length": 159,
"num_lines": 193,
"path": "/segundaSala.py",
"repo_name": "Joaongm/Projeto-RunAway",
"src_encoding": "UTF-8",
"text": "from personagem import Personagem\nfrom relogio import Relógio\nfrom funcoes import Funções\nfrom time import sleep\nfrom CORES import cores\nimport os\nimport pygame\nclass salaBranca(Funções, Personagem):\n def __init__(self, *args, genero, **kwargs):\n self.cadeira = True\n self.abajur = True\n self.quadro = True\n self.luminaria = True\n self.estatua = True\n self.escolha = 0\n self.objeto = 0\n super().__init__(*args, genero = genero, **kwargs)\n\n def acao(self):\n relogio = Relógio(30, self.nome, self.altura, self.atributo)\n \n numeroSala = False\n \n print(' '*20, end='')\n print('', '__________________________')\n print(' '*20, end='')\n print('|', ' '*24, '|')\n print(' '*20, end='')\n print('|', ' '*7, ' SALA 2 ', ' '*7, '|')\n print(' '*20, end='')\n print('|__________________________|')\n print('\\n'*2)\n \n sala = 'Após a sua primeira vitória, você segue para uma sala branca'\n \n fraseSala = f\"Lembre-se que acima da porta a um {cores['red']}relógio {cores['limpa']}marcando {relogio.minutos} minutos,\\n\"\n while True:\n \n \n print('[1] - cadeira\\n[2] - espelho\\n[3] - quadro\\n[4] - baú\\n[5] - estatua\\n[6] - candelabro\\n[7] - estojo de remédios\\n')\n self.objeto = input('Qual objeto deseja interagir escolha?: ')\n\n if self.objeto == '1':\n print('Você escolheu a cadeira...')\n print(\n '[1] - Vasculhar cadeira\\n[2] - Jogar cadeira na parede\\n[3] - Sentar para descansar')\n self.escolha = int(input('>>> '))\n if self.escolha == 1:\n ler = input(\n 'Você encontrou uma carta grudada na cadeira, deseja ler?[sim/não]: ').upper()\n if ler == 'SIM':\n print('***Mensagem sobre a vida***')\n print(\n 'Começamos bem, porém ainda não encontramos uma forma de sair daqui, vamos continuar')\n else:\n print('Ok, vamos continuar')\n elif self.escolha == 2:\n print(\n \"Você quebrou a cadeira na parede, mas um pedaço de farpa machucou seu braço\")\n print('**Decrementa tempo**')\n ler = input(\n 'Você percebeu que nos pedaços de madeira quebrados tem uma carta, deseja ler?').upper()\n if ler == 'SIM':\n print(\n 'Você está um pouco \"sonzo\", e esta perdendo tempo lendo essa carta, vá enfaixar seu braço')\n print('Caso contrário perderá mais tempo que o normal')\n print('**decrementa tempo**')\n print()\n print(\n '[1] - cadeira\\n[2] - abajur\\n[3] - quadro\\n[4] - luminaria\\n[5] - estatua\\n[6] - candelabro\\n[7] - estojo de remédios\\n')\n recuperarVida = int(input('>>> '))\n if recuperarVida == 7:\n print(\n 'Boa escolha, dentro do estojo de remédios temos analgésicos e gaze para o seu machucado')\n print(\"Pode continuar sem nenhum problema\")\n # Decrementa normalmente\n else:\n # Decrementa 5 minutos a cada 10 minutos passados\n continue\n\n elif self.escolha == 3:\n print('Que situação maluca, descansar é sempre bom!')\n print('Você conseguiu enxergar o reflexo de alguma coisa')\n print('O que deseja fazer?')\n\n print(\n '[1] - cadeira\\n[2] - espelho\\n[3] - quadro\\n[4] - luminaria\\n[5] - estatua\\n[6] - candelabro\\n[7] - estojo de remédios\\n')\n self.objeto = input('Qual objeto deseja interagir escolha?: ')\n if self.objeto == '2':\n print('O que você viu no espelho?')\n print(\n '[1] - Dinossauro\\n[2] - guarda - roupa\\n[3] - Estatua')\n visualizar = int(input('>>> '))\n if visualizar != 3:\n print(\n 'Você está muito cansado, é impossível isso estar na sala, descanse 5 minutos')\n # Decrementa 5 mintuos\n else:\n print(\n 'A sala realmente tem uma estatua, o que quer fazer?')\n print('''\n [1] - Quebrar a estatua \n [2] - Girar a estatua\n [3] - Não fazer nada\n ''')\n estatua = int(input('>>> '))\n if estatua == 1:\n print(\n 'Seu emocional está quebrado... Você está quebrando objetos como se não ouvesse consequencias')\n # Decrementa 1 minuto\n elif estatua == 2: # =================\\\\FIM //============================\n print(\n 'Boa escolha, você girou a estatua com delicadeza, isso possibilitou a porta de saida abrir')\n break\n elif estatua == 3:\n continue\n\n elif self.objeto == '2':\n print('O que você viu no espelho?')\n print('[1] - Seu próprio reflexo\\n[2] - Qudaro \\n[3] - Violão')\n visualizar = int(input('>>> '))\n\n if visualizar == 1:\n print('''\n Você é uma pessoa muito bonita, porém \n já tem problemas o suficiente para ficar se olhando no espelho\n ''')\n # Decrementa 5 minutos\n elif visualizar == 2:\n print('''\n É um quadro lindo, deseja chegar mais perto?\n ''')\n quadro = input(' ').upper()\n\n if quadro == 'SIM':\n print(\n 'O quadro da sala é a Dama com Arminho de Leonardo da Vince')\n print('Que linda peça!')\n print('''\n [1] - Vasculhar o quadro\n [2] - Admirar o quadro\n [3] - Rasgar o quadro''')\n acao = input('O que deseja fazer? ')\n\n if acao == 1:\n print('Você não encontrou nada...')\n # Decrementa 5\n elif acao == 2:\n print(\n 'Essa realmente é uma obra muito bonita, entendo você querer adimirá-la')\n print('Porem seu tempo está correndo')\n # Decrementa 5 minutos\n elif acao == 3:\n print(\n 'Que loucura rasgar um quadro tão lindo quanto esse...Da Vince acaba de se revirar no tumulo')\n print(\n 'Porem situações desesperadas pedem medidas desesperadas')\n print(\n 'Você encontrou dentrou do quadro uma chave... O que deseja fazer?')\n chave = True\n print(\n '[1] - cadeira\\n[2] - espelho\\n[3] - quadro\\n[4] - luminaria\\n[5] - estatua\\n[6] - candelabro\\n[7] - estojo de remédios\\n')\n self.objeto = input(\n 'Qual objeto deseja interagir escolha?: ')\n if self.objeto == '5' and chave == True:\n print(\n 'Abrir o baú com a chave foi uma ótima escolha, nele se encontra uma foto')\n print(\n 'Nela se encontra Rodin, ao lado de uma de suas obras mais famósas')\n print(\n 'Porque alguém guardaria essa foto em um baú?')\n\n print(\n '[1] - cadeira\\n[2] - espelho\\n[3] - quadro\\n[4] - luminaria\\n[5] - estatua\\n[6] - candelabro\\n[7] - estojo de remédios\\n')\n self.objeto = input(\n 'Qual objeto deseja interagir escolha?: ')\n if self.objeto == '6' and chave == True:\n print(\n 'Que bela estátua, é uma répica da obra \"O Pensador de Agusto Rodin\"')\n print('''\n [1] - Quebrar a estatua \n [2] - Girar a estatua\n [3] - Não fazer nada\n ''')\n estatua = int(\n input('O que deseja fazer com ela? '))\n if estatua == 2:\n print(\n 'Boa escolha, você girou a estatua com delicadeza, isso possibilitou a porta de saida abrir')\n break\n else:\n print(\n 'Nada de diferente aconteceu, talvez fosse apenas uma impressão sua...')\n"
},
{
"alpha_fraction": 0.8260869383811951,
"alphanum_fraction": 0.8260869383811951,
"avg_line_length": 68,
"blob_id": "45df3e5fc68d1b6f8d97119e18c1422d026f399b",
"content_id": "d5d48634b9d832d3c0fecc76327b584d9a939ff1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 145,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Joaongm/Projeto-RunAway",
"src_encoding": "UTF-8",
"text": "# Projeto-RunAway\nEsse projeto foi desenvolvido para a conclusão do módulo de lógica de programação da instituição de ensino Blue Edtech.\n"
},
{
"alpha_fraction": 0.37270602583885193,
"alphanum_fraction": 0.3835470974445343,
"avg_line_length": 41.509037017822266,
"blob_id": "ad00e0b4c045feac7dfd47ceaf00127313a3d25e",
"content_id": "76d388a6f7cb9305b6300cfa9d1d4045179ab93c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14207,
"license_type": "permissive",
"max_line_length": 229,
"num_lines": 332,
"path": "/salas.py",
"repo_name": "Joaongm/Projeto-RunAway",
"src_encoding": "UTF-8",
"text": "from personagem import Personagem\nfrom relogio import Relógio\nfrom funcoes import Funções\nfrom time import sleep\nfrom CORES import cores\nimport os\nimport pygame\n\n\nclass salaVermelha(Funções, Personagem):\n\n def __init__(self, *args, genero, **kwargs):\n self.escrivaninha = False\n self.estante = False\n self.armario = False\n self.guitarra = True\n self.mesaBilhar = False\n self.chave = False\n super().__init__(*args, genero=genero, **kwargs)\n\n def acao(self):\n\n relogio = Relógio(30, self.nome, self.altura, self.atributo)\n\n numeroSala = False\n\n print(' '*20, end='')\n print('', '__________________________')\n print(' '*20, end='')\n print('|', ' '*24, '|')\n print(' '*20, end='')\n print('|', ' '*7, ' SALA 1 ', ' '*7, '|')\n print(' '*20, end='')\n print('|__________________________|')\n print('\\n'*2)\n\n # tecla.play(-1)\n sala = \"Você acorda em uma sala com paredes vermelhas!\\n\\n\"\n # func.animation(sala)\n\n fraseSala1 = f\"Você consegue ver uma porta e em cima da porta, um {cores['red']}relógio {cores['limpa']}marcando {relogio.minutos} minutos,\\n\"\n # func.animation(fraseSala1)\n fraseSala2 = \"consegue ver uma escrivaninha, um toca disco, um armário, uma guitarra e uma mesa de bilhar...\\n\\n\"\n # func.animation(fraseSala2)\n # tecla.stop()\n sleep(1)\n\n pygame.init()\n tecla = pygame.mixer.Sound('teclado.ogg')\n while True:\n\n if numeroSala == True:\n\n tecla.play()\n print(' '*20, end='')\n print('', '__________________________')\n print(' '*20, end='')\n print('|', ' '*24, '|')\n print(' '*20, end='')\n print('|', ' '*7, ' SALA 1 ', ' '*7, '|')\n print(' '*20, end='')\n print('|__________________________|')\n print('\\n'*2)\n\n tecla.play(-1)\n contador = f\"Restam {relogio.minutos} minutos para escapar da sala!\\n\\n\"\n self.animation(contador)\n tecla.stop()\n\n numeroSala = True\n\n print(\"O que você deseja fazer?\\n\\n\",\n \"[1] - Abrir a porta\\n\",\n \"[2] - Vasculhar a sala\\n\",\n \"[3] - Quebrar porta\\n\")\n\n opcao2 = int(input(\"»» \"))\n print()\n\n if opcao2 == 1:\n\n relogio.corretempo(4)\n print(\"A porta está trancada...\")\n\n if self.chave == False:\n print(\"Você não tem a chave! Vasculhe a sala para encontra-la\")\n print()\n else:\n deseja = input(\"Você deseja usar a chave[sim/não]? \")\n\n if deseja == \"sim\":\n print(\n \"Parabéns!! Você abriu a porta e avançou para a próxima sala!\")\n relogio.minutos += 30\n input(\"Aperte enter para prosseguir...\")\n break\n elif deseja == \"nao\":\n print(\"Você achou melhor guardar a chave.\")\n\n sleep(3)\n os.system('clear')\n\n elif opcao2 == 2:\n\n sleep(1)\n print(\"Qual item deseja vasculhar?\\n\\n\",\n \"[1] - Escrivaninha\\n\",\n \"[2] - Toca disco\\n\",\n \"[3] - Armário\\n\",\n \"[4] - Guitarra\\n\",\n \"[5] - Mesa de bilhar\\n\")\n\n sleep(1)\n print()\n escolha = int(input('>> '))\n print()\n\n if escolha == 1:\n\n print(\"Você deseja: \\n\\n\",\n \"[1] - Vasculhar Escrivaninha\\n\",\n \"[2] - Olhar Escrivaninha\\n\",\n \"[3] - Empurrar Escrivaninha\\n\",\n \"[4] - Subir na Escrivaninha\\n\",)\n\n opcEscrivaninha = int(input(\">> \"))\n\n if opcEscrivaninha == 1:\n relogio.corretempo(4)\n print()\n print(\"Você não encontrou nada, {}só tem lixo!{}\".format(cores['red'], cores['limpa']))\n sleep(3)\n os.system('clear')\n elif opcEscrivaninha == 2:\n relogio.corretempo(4)\n print()\n print(\"É uma escrivaninha bonita e resistente!\")\n sleep(3)\n os.system('clear')\n elif opcEscrivaninha == 3:\n relogio.corretempo(4)\n\n if self.atributo == 'Força':\n print()\n print(\"Você empurrou a escrivaninha até a parede.\")\n sleep(3)\n escrivaninhanaparede = True\n os.system('clear')\n else:\n print(\"Você não tem força para empurrar a escrivaninha!\")\n print()\n sleep(3)\n os.system('clear')\n elif opcEscrivaninha == 4:\n\n if escrivaninhanaparede == True:\n print()\n print(\n 'Você pode ver uma pequena {}saída{} de ventilação próxima ao teto.'.format(cores['red'],cores['limpa']))\n sleep(5)\n relogio.corretempo(4)\n os.system('clear')\n if self.altura >= 1.8:\n print('Você alcança a saída, deseja subir?')\n janelinha=int(input('1-S / 0-N >>> '))\n if janelinha == 0:\n break\n elif janelinha == 1:\n print('Você pode se pendurar na janela, mas não tem {}força{} para passar para o outro lado, na parede do lado oposto está escrita a seguinte mensagem:'.format(cores['amarelo'],cores['limpa']))\n print('{}O conhecimento liberta!{}'.format(cores['amarelo'],cores['limpa']))\n\n else:\n print()\n print('O chão parece mais distante')\n sleep(5)\n relogio.corretempo(4)\n os.system('clear')\n\n elif escolha == 2:\n print('Você deseja:\\n\\n',\n '[1] - Vasculhar toca disco\\n',\n '[2] - Olhar toca disco\\n',\n '[3] - Tocar o disco\\n\\n')\n\n escolhaEstante = int(input('>> '))\n\n if escolhaEstante == 1:\n print()\n relogio.corretempo(4)\n print(\n 'Você descobriu a modelo do {}toca{} disco: Toca disco vinil air LP ion IT55'.format(cores['azul'],cores['limpa']))\n sleep(3)\n os.system('clear')\n elif escolhaEstante == 2:\n print()\n print(\n 'Econtrei um vinil,esta em ótimo estado, sera que o toca disco funciona? ')\n sleep(3)\n relogio.corretempo(4)\n os.system('clear')\n elif escolhaEstante == 3:\n relogio.corretempo(4)\n if self.atributo == 'Inteligência':\n print(\n '**While my {}guitar{} gently weeps tem um lindo {}solo{}!'.format(cores['azul'],cores['limpa'],cores['amarelo',cores['limpa']]))\n else:\n print('O toca disco não funciona!')\n\n os.system('clear')\n elif escolha == 3:\n print('Você deseja:\\n\\n',\n '[1] - Abrir o armário\\n',\n '[2] - Olhar o armário\\n',\n '[3] - Empurrar o armário\\n\\n')\n\n escolhaArm = int(input('>> '))\n\n if escolhaArm == 1:\n print()\n print(\n 'Dentro do armário vc encontra um bilhete escrito: \"Pare de perder {}tempo!{}'.format(cores['red'],cores['limpa']))\n sleep(3)\n relogio.corretempo(4)\n os.system('clear')\n elif escolhaArm == 2:\n print()\n print('Você está {}tentando{} aprender marcenaria?'.format(cores['red'], cores['limpa']))\n sleep(5)\n relogio.corretempo(4)\n os.system('clear')\n elif escolhaArm == 3:\n if self.atributo == 'Força':\n print()\n print(\"Você derrubou o Armário.\")\n print('E encontrou uma dica: {}HOJE É DIA DE ROCK BEBÊ!!!{}'.format(\n cores['azul'], cores['limpa']))\n sleep(3)\n relogio.corretempo(4)\n os.system('clear')\n\n else:\n print()\n if ima == True:\n print(\"Você conseguiu pegar a {}chave{} com o {}imã!{}\".format(cores['red'],cores['limpa'],cores['azul'],cores['limpa']))\n self.chave = True\n else:\n print(\"Você não tem {}força{} para empurrar o Armário!\".format(cores['amarelo'],cores['limpa']))\n sleep(3)\n relogio.corretempo(4)\n os.system('clear')\n elif escolha == 4:\n print('É uma linda lespaul sunburn Stevie Ray signature 2001\\n')\n\n print('O que deseja:\\n\\n',\n '[1] - Limpar a Guitarra\\n',\n '[2] - Quebrar a Guitarra\\n',\n '[3] - Tocar a Guitarra\\n\\n ')\n\n escolhaGuitarra = int(input('>> '))\n\n if escolhaGuitarra == 1:\n print()\n print('Por que isso é importante?')\n sleep(3)\n relogio.corretempo(4)\n os.system('clear')\n elif escolhaGuitarra == 2:\n print()\n relogio.corretempo(8)\n print(\n 'Você quebrou a guitarra e a {}chave{} caiu embaixo do armário, procure algo para pegá-la, voce perdeu {}8 minutos{}'.format(cores['red'],cores['limpa'],cores['red'],cores['limpa']))\n guitarQuebrada = True\n sleep(3)\n os.system('clear')\n elif escolhaGuitarra == 3:\n print()\n relogio.corretempo(4)\n if guitarQuebrada == True:\n print(\n \"Você tentou tocar uma guitarra quebrada, e perdeu {}10 minutos{}\".format(cores['red'],cores['limpa']))\n else:\n print(\n 'Que música linda! Os deuses do rock estão satisfeitos...')\n sleep(.5)\n print('.')\n sleep(.5)\n print('.')\n sleep(.5)\n print('.')\n sleep(.5)\n print('.')\n sleep(.5)\n print('.')\n print('Uma chave caiu em sua cabeça!')\n sleep(5)\n self.chave = True\n os.system('clear')\n elif escolha == 5:\n print()\n print('Tá com saudade do boteco né minha filha?\\n')\n print('Oq deseja fazer:\\n\\n',\n '[1] - Jogar bilhar\\n',\n '[2] - Olhar em baixo da mesa\\n\\n')\n\n opcMesa = int(input('>> '))\n\n if opcMesa == 1:\n print('Você decidiu jogar bilhar e perdeu {}5 minutos{}'.format(cores['red'],cores['limpa']))\n relogio.corretempo(5)\n sleep(5)\n elif opcMesa == 2:\n print(\"Você encontrou um {}imã{}, agora consegue atrair metal\".format(cores['azul'],cores['limpa']))\n relogio.corretempo(4)\n sleep(5)\n ima = True\n os.system('clear')\n\n elif opcao2 == 3:\n if self.atributo == 'Força':\n print()\n print(\"A porta é de madeira e você conseguiu quebra-la!\")\n print(\"Parabéns, você é {}forte{} o suficiente para a próxima sala!\".format(cores['amarelo'],cores['limpa']))\n sleep(5)\n relogio.minutos += 30\n os.system('clear')\n break\n else:\n print('Você é frac{} demais pra isso'.format(\n self.generos()))\n sleep(5)\n relogio.corretempo(4)\n os.system('clear')\n"
},
{
"alpha_fraction": 0.5312820672988892,
"alphanum_fraction": 0.5415384769439697,
"avg_line_length": 22.214284896850586,
"blob_id": "1381d354cda8f324cd296f2a1acca65e34eea7bd",
"content_id": "6ec927cb767637ff8573ec9d057ba09723988289",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 981,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 42,
"path": "/relogio.py",
"repo_name": "Joaongm/Projeto-RunAway",
"src_encoding": "UTF-8",
"text": "from personagem import Personagem\n\n\nclass Relógio(Personagem):\n def __init__(self, minutos, nome, altura, atributo):\n self.minutos = minutos\n super().__init__(nome, altura, atributo)\n\n\n def corretempo(self, minutos):\n\n if self.atributo == 'Velocidade':\n total = minutos // 2\n self.minutos -= total\n else:\n self.minutos -= minutos\n\n def __str__(self):\n return f'{self.minutos:02d}'\n\n\n# Parte de teste\n\"\"\"while fimdotempo == False:\n print(\"---\")\n print(\"Ainda restam \"+str(relogio)+\" minutos\")\n print(\"\")\n print(\"Ações:\")\n print(\"1 - Tentar\")\n \n print(\"0 - Sair do jogo\")\n opcao = input(\"Escolha sua ação:\")\n if(opcao == \"1\") and (relogio.minutos > 0):\n relogio.corretempo(5)\n if relogio.minutos <= 0:\n fimdotempo = True \n print('ACABOU O TEMPO')\n break\n \n elif(opcao == '0'):\n print('Tente amanhã!')\n break\n \"\"\"\n"
},
{
"alpha_fraction": 0.5927654504776001,
"alphanum_fraction": 0.6161026954650879,
"avg_line_length": 24.205883026123047,
"blob_id": "26a54733ba7f5a82e7fae2ce798e0ea05f6ba576",
"content_id": "19fa66f41df012c673727cdb361d6e86f4d50a12",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1735,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 68,
"path": "/main.py",
"repo_name": "Joaongm/Projeto-RunAway",
"src_encoding": "UTF-8",
"text": "from relogio import Relógio\nfrom personagem import Personagem\nfrom salas import salaVermelha\nfrom segundaSala import salaBranca\nfrom funcoes import Funções\nfrom CORES import cores\nfrom time import sleep\nimport pygame\nimport sys\nimport os\n\n\npygame.init()\ntecla = pygame.mixer.Sound('teclado.ogg')\npygame.mixer.music.load('trilhasuspensa.ogg')\npygame.mixer.music.set_volume(0.1)\npygame.mixer.music.play(-1)\n\nif (__name__ == \"__main__\"):\n\n print(\"{:^60}\\n\\n\".format(\"RunAway\"))\n print(\"{:^60}\\n\\n\".format(\"[CADASTRO]\"))\n\n nome = input(\"Digite seu nome: \")\n alt = float(input(\"Digite sua altura: \"))\n gen = input(\"Digite seu gênero: \").lower()\n os.system('clear')\n\n func = Funções(genero=gen)\n\n tecla.play(-1)\n print(' '*25, end='')\n bemVindo = f\"Bem vind{func.generos()} {nome}\\n\\n\"\n func.animation(bemVindo)\n print(' '*20, end='')\n atributo = '»»»» Escolha um \\033[33matributo\\033[m ««««\\n\\n\\n\\n'\n func.animation(atributo)\n tecla.stop()\n\n sleep(1)\n print(\"Qual \\033[33matributo\\033[m você escolhe? \\n\\n\",\n '[1] - Força\\n',\n '[2] - Velocidade\\n',\n '[3] - Inteligência\\n',\n '[4] - Sorte\\n\\n')\n sleep(.5)\n\n opcao = int(input('»» '))\n print()\n\n if opcao == 1:\n atributo = \"Força\"\n elif opcao == 2:\n atributo = \"Velocidade\"\n elif opcao == 3:\n atributo = 'Inteligência'\n elif opcao == 4:\n atributo = \"Sorte\"\n\n personagem = Personagem(nome, alt, atributo)\n\n salas = salaVermelha(nome=nome, altura=alt, atributo = atributo, genero=gen)\n sala2 = salaBranca(nome=nome, altura=alt, atributo=atributo, genero=gen)\n personagem.escolhaAtributo()\n\n salas.acao()\n\n sala2.acao()\n"
}
] | 6 |
speakstone/meanshift | https://github.com/speakstone/meanshift | 7b63eac5301486fb6d28713840561483e05fd6d2 | 0110c82446603cfe0d50950af2ea9fb5e90bf3b1 | a0d34ec237c484747bb94442e1c59d1fa82e7b6f | refs/heads/master | 2021-05-08T23:01:54.932939 | 2018-05-18T08:14:25 | 2018-05-18T08:14:25 | 119,693,440 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5292758345603943,
"alphanum_fraction": 0.5755007863044739,
"avg_line_length": 33.157894134521484,
"blob_id": "c6d1c4a2467b47387d6145c852411b0c94a960ae",
"content_id": "f05bb7beb75a78a201978ae689c73aa69b1b1fea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3894,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 114,
"path": "/utils.py",
"repo_name": "speakstone/meanshift",
"src_encoding": "UTF-8",
"text": "utils.py\nimport scipy.misc, numpy as np, os, sys\nimport cv2 as cv\n\ndef save_img(out_path, img):\n img = np.clip(img, 0, 255).astype(np.uint8)\n scipy.misc.imsave(out_path, img)\n\ndef scale_img(style_path, style_scale):\n scale = float(style_scale)\n o0, o1, o2 = scipy.misc.imread(style_path, mode='RGB').shape\n scale = float(style_scale)\n new_shape = (int(o0 * scale), int(o1 * scale), o2)\n style_target = _get_img(style_path, img_size=new_shape)\n return style_target\n\ndef get_img(src, img_size=False):\n img = scipy.misc.imread(src, mode='RGB') # misc.imresize(, (256, 256, 3))\n if not (len(img.shape) == 3 and img.shape[2] == 3):\n img = np.dstack((img,img,img))\n if img_size != False:\n img = scipy.misc.imresize(img, img_size)\n return img\n\ndef exists(p, msg):\n assert os.path.exists(p), msg\n\ndef list_files(in_path):\n files = []\n for (dirpath, dirnames, filenames) in os.walk(in_path):\n files.extend(filenames)\n break\n\n return files\n\n\ndef rgb2gray(rgb):\n return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])\n\ndef gray2rgb(gray):\n w, h = gray.shape\n rgb = np.empty((w, h, 3), dtype=np.float32)\n rgb[:, :, 2] = rgb[:, :, 1] = rgb[:, :, 0] = gray\n return rgb\n\n\ndef rgb2luv(image):\n img = image.transpose(2,0,1).reshape(3,-1)\n luv = np.array([[.299, .587, .114],[-.147, -.288, .436],[.615, -.515, -.1]]).dot(img).reshape((3,image.shape[0],image.shape[1]))\n return luv.transpose(1,2,0)\ndef luv2rgb(image):\n img = image.transpose(2,0,1).reshape(3,-1)\n rgb = np.array([[1, 0, 1.139],[1, -.395, -.580],[1, 2.03, 0]]).dot(img).reshape((3,image.shape[0],image.shape[1]))\n return rgb.transpose(1,2,0)\n\n\ndef histogram(content, styles):\n new_styles=[]\n for i in range(len(styles)):\n #content_sub = cv.resize(content, (256,256), interpolation=cv.INTER_CUBIC)/1.0\n #style_sub = cv.resize(style, (256,256), interpolation=cv.INTER_CUBIC)/1.0\n style = styles[i]\n content_sub = content\n style_sub = styles[i]\n mean_c = np.zeros((3))/1.0\n mean_s = np.zeros((3))/1.0\n conv_c = np.zeros((3,3))/1.0\n conv_s = np.zeros((3,3))/1.0\n for i in range (0,3):\n \tmean_c[i] = np.mean(content_sub[:,:,i])\n \tmean_s[i] = np.mean(style_sub[:,:,i])\n for i in range (0,3):\n \tfor j in range (0,3):\n \t\tconv_c[i,j] = np.mean((content_sub[:,:,i]-mean_c[i])*(content_sub[:,:,j]-mean_c[j]))\n \t\tconv_s[i,j] = np.mean((style_sub[:,:,i]-mean_s[i])*(style_sub[:,:,j]-mean_s[j]))\n eig_c, vec_c = np.linalg.eig(conv_c)\n\n eig_s, vec_s = np.linalg.eig(conv_s)\n\n if (False == np.all(eig_c>0.0001) or False == np.all(eig_s>0.0001)):\n new_styles.append(style.copy())\n continue\n\n sqrt_conv_c = np.dot(np.dot(vec_c, np.diag(eig_c**0.5)), vec_c.transpose())\n\n sqrt_conv_s_inv = np.dot(np.dot(vec_s, np.diag(eig_s**-0.5)), vec_s.transpose())\n\n A_chol = np.dot(sqrt_conv_c, sqrt_conv_s_inv)\n\n b_chol = mean_c - np.dot(A_chol, mean_s)\n\n new_style = style.copy()\n\n new_style_size = new_style.shape[0]*new_style.shape[1]\n\n new_style_shape = [new_style.shape[0],new_style.shape[1]]\n\n new_style_newshape = np.zeros((3,new_style_size))/1.0\n\n new_style_newshape[0,:] = new_style[:,:,0].flatten()\n\n new_style_newshape[1,:] = new_style[:,:,1].flatten()\n\n new_style_newshape[2,:] = new_style[:,:,2].flatten()\n\n new_style_newshape = np.dot(A_chol, new_style_newshape)+b_chol.repeat(new_style_size).reshape(3,new_style_size)\n\n new_style[:,:,0] = new_style_newshape[0,:].reshape(new_style_shape)\n\n new_style[:,:,1] = new_style_newshape[1,:].reshape(new_style_shape)\n\n new_style[:,:,2] = new_style_newshape[2,:].reshape(new_style_shape)\n new_styles.append(new_style)\n return np.array(new_styles)\n"
},
{
"alpha_fraction": 0.568024754524231,
"alphanum_fraction": 0.5817431807518005,
"avg_line_length": 35.14559555053711,
"blob_id": "cf6d1a16aabee027b603d92695917ebc21404fd5",
"content_id": "37e3b8b61fd8a306409c0b78738d11b6bf03002d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 9805,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 261,
"path": "/meanshift/stdafx.cpp",
"repo_name": "speakstone/meanshift",
"src_encoding": "UTF-8",
"text": "// stdafx.cpp : 只包括标准包含文件的源文件\r\n// meanshift.pch 将作为预编译头\r\n// stdafx.obj 将包含预编译类型信息\r\n\r\n#include \"stdafx.h\"\r\n\r\n// TODO: 在 STDAFX.H 中\r\n// 引用任何所需的附加头文件,而不是在此文件中引用\r\ntransform.py\r\nimport tensorflow as tf, pdb\r\n\r\nWEIGHTS_INIT_STDEV = .1\r\n\r\ndef net(image):\r\n conv1 = _conv_layer(image, 32, 9, 1)\r\n conv2 = _conv_layer(conv1, 64, 3, 2)\r\n conv3 = _conv_layer(conv2, 128, 3, 2)\r\n resid1 = _residual_block(conv3, 3)\r\n resid2 = _residual_block(resid1, 3)\r\n resid3 = _residual_block(resid2, 3)\r\n resid4 = _residual_block(resid3, 3)\r\n resid5 = _residual_block(resid4, 3)\r\n conv_t1 = _conv_tranpose_layer(resid5, 64, 3, 2)\r\n conv_t2 = _conv_tranpose_layer(conv_t1, 32, 3, 2)\r\n conv_t3 = _conv_layer(conv_t2, 3, 9, 1, relu=False)\r\n preds = tf.nn.tanh(conv_t3) * 150 + 255./2\r\n return preds\r\n\r\ndef _conv_layer(net, num_filters, filter_size, strides, relu=True):\r\n weights_init = _conv_init_vars(net, num_filters, filter_size)\r\n strides_shape = [1, strides, strides, 1]\r\n net = tf.nn.conv2d(net, weights_init, strides_shape, padding='SAME')\r\n net = _instance_norm(net)\r\n if relu:\r\n net = tf.nn.relu(net)\r\n\r\n return net\r\n\r\ndef _conv_tranpose_layer(net, num_filters, filter_size, strides):\r\n weights_init = _conv_init_vars(net, num_filters, filter_size, transpose=True)\r\n\r\n batch_size, rows, cols, in_channels = [i.value for i in net.get_shape()]\r\n new_rows, new_cols = int(rows * strides), int(cols * strides)\r\n # new_shape = #tf.pack([tf.shape(net)[0], new_rows, new_cols, num_filters])\r\n\r\n new_shape = [batch_size, new_rows, new_cols, num_filters]\r\n tf_shape = tf.stack(new_shape)\r\n strides_shape = [1,strides,strides,1]\r\n\r\n net = tf.nn.conv2d_transpose(net, weights_init, tf_shape, strides_shape, padding='SAME')\r\n net = _instance_norm(net)\r\n return tf.nn.relu(net)\r\n\r\ndef _residual_block(net, filter_size=3):\r\n tmp = _conv_layer(net, 128, filter_size, 1)\r\n return net + _conv_layer(tmp, 128, filter_size, 1, relu=False)\r\n\r\ndef _instance_norm(net, train=True):\r\n batch, rows, cols, channels = [i.value for i in net.get_shape()]\r\n var_shape = [channels]\r\n mu, sigma_sq = tf.nn.moments(net, [1,2], keep_dims=True)\r\n shift = tf.Variable(tf.zeros(var_shape))\r\n scale = tf.Variable(tf.ones(var_shape))\r\n epsilon = 1e-3\r\n normalized = (net-mu)/(sigma_sq + epsilon)**(.5)\r\n return scale * normalized + shift\r\n\r\ndef _conv_init_vars(net, out_channels, filter_size, transpose=False):\r\n _, rows, cols, in_channels = [i.value for i in net.get_shape()]\r\n if not transpose:\r\n weights_shape = [filter_size, filter_size, in_channels, out_channels]\r\n else:\r\n weights_shape = [filter_size, filter_size, out_channels, in_channels]\r\n\r\n weights_init = tf.Variable(tf.truncated_normal(weights_shape, stddev=WEIGHTS_INIT_STDEV, seed=1), dtype=tf.float32)\r\n return weights_init\r\n\r\n\r\n\r\n\r\n\r\nstyle_color.py\r\nfrom __future__ import print_function\r\nimport sys, os, pdb\r\nsys.path.insert(0, 'color_src')\r\nimport numpy as np, scipy.misc\r\nfrom optimize import optimize\r\nfrom argparse import ArgumentParser\r\nfrom utils import save_img, get_img, exists, list_files, rgb2gray, gray2rgb\r\nimport evaluate\r\n\r\nCONTENT_WEIGHT = 7.5e0\r\nSTYLE_WEIGHT = 1e2\r\nTV_WEIGHT = 2e2\r\n\r\nLEARNING_RATE = 1e-3\r\nNUM_EPOCHS = 2\r\nCHECKPOINT_DIR = 'checkpoints'\r\nCHECKPOINT_ITERATIONS = 2000\r\nVGG_PATH = '/opt/data1/lilei/vgg-19/imagenet-vgg-verydeep-19.mat'\r\nTRAIN_PATH = '/opt/data1/train2014/'\r\nBATCH_SIZE = 2\r\nDEVICE = '/gpu:0'\r\nFRAC_GPU = 1\r\n\r\ndef build_parser():\r\n parser = ArgumentParser()\r\n parser.add_argument('--checkpoint-dir', type=str,\r\n dest='checkpoint_dir', help='dir to save checkpoint in',\r\n metavar='CHECKPOINT_DIR', required=True)\r\n\r\n parser.add_argument('--style', type=str,\r\n dest='style', help='style image path',\r\n metavar='STYLE', required=True)\r\n\r\n parser.add_argument('--color', type=str,\r\n dest='color', help='color image path',\r\n metavar='COLOR', required=True)\r\n\r\n parser.add_argument('--train-path', type=str,\r\n dest='train_path', help='path to training images folder',\r\n metavar='TRAIN_PATH', default=TRAIN_PATH)\r\n\r\n parser.add_argument('--test', type=str,\r\n dest='test', help='test image path',\r\n metavar='TEST', default=False)\r\n\r\n parser.add_argument('--test-dir', type=str,\r\n dest='test_dir', help='test image save dir',\r\n metavar='TEST_DIR', default=False)\r\n\r\n parser.add_argument('--slow', dest='slow', action='store_true',\r\n help='gatys\\' approach (for debugging, not supported)',\r\n default=False)\r\n\r\n parser.add_argument('--epochs', type=int,\r\n dest='epochs', help='num epochs',\r\n metavar='EPOCHS', default=NUM_EPOCHS)\r\n\r\n parser.add_argument('--batch-size', type=int,\r\n dest='batch_size', help='batch size',\r\n metavar='BATCH_SIZE', default=BATCH_SIZE)\r\n\r\n parser.add_argument('--checkpoint-iterations', type=int,\r\n dest='checkpoint_iterations', help='checkpoint frequency',\r\n metavar='CHECKPOINT_ITERATIONS',\r\n default=CHECKPOINT_ITERATIONS)\r\n\r\n parser.add_argument('--vgg-path', type=str,\r\n dest='vgg_path',\r\n help='path to VGG19 network (default %(default)s)',\r\n metavar='VGG_PATH', default=VGG_PATH)\r\n\r\n parser.add_argument('--content-weight', type=float,\r\n dest='content_weight',\r\n help='content weight (default %(default)s)',\r\n metavar='CONTENT_WEIGHT', default=CONTENT_WEIGHT)\r\n\r\n parser.add_argument('--style-weight', type=float,\r\n dest='style_weight',\r\n help='style weight (default %(default)s)',\r\n metavar='STYLE_WEIGHT', default=STYLE_WEIGHT)\r\n\r\n parser.add_argument('--tv-weight', type=float,\r\n dest='tv_weight',\r\n help='total variation regularization weight (default %(default)s)',\r\n metavar='TV_WEIGHT', default=TV_WEIGHT)\r\n\r\n parser.add_argument('--learning-rate', type=float,\r\n dest='learning_rate',\r\n help='learning rate (default %(default)s)',\r\n metavar='LEARNING_RATE', default=LEARNING_RATE)\r\n\r\n return parser\r\n\r\ndef check_opts(opts):\r\n exists(opts.checkpoint_dir, \"checkpoint dir not found!\")\r\n exists(opts.style, \"style path not found!\")\r\n exists(opts.train_path, \"train path not found!\")\r\n if opts.test or opts.test_dir:\r\n exists(opts.test, \"test img not found!\")\r\n exists(opts.test_dir, \"test directory not found!\")\r\n exists(opts.vgg_path, \"vgg network data not found!\")\r\n assert opts.epochs > 0\r\n assert opts.batch_size > 0\r\n assert opts.checkpoint_iterations > 0\r\n assert os.path.exists(opts.vgg_path)\r\n assert opts.content_weight >= 0\r\n assert opts.style_weight >= 0\r\n assert opts.tv_weight >= 0\r\n assert opts.learning_rate >= 0\r\n\r\ndef _get_files(img_dir):\r\n files = list_files(img_dir)\r\n return [os.path.join(img_dir,x) for x in files]\r\n\r\n\r\ndef main():\r\n parser = build_parser()\r\n options = parser.parse_args()\r\n check_opts(options)\r\n\r\n style_target = get_img(options.style)\r\n color_target = get_img(options.color)\r\n\r\n\r\n # change style_target to gray image\r\n styled_grayscale = rgb2gray(style_target)\r\n style_target = gray2rgb(styled_grayscale)\r\n\r\n if not options.slow:\r\n content_targets = _get_files(options.train_path)\r\n elif options.test:\r\n content_targets = [options.test]\r\n\r\n kwargs = {\r\n \"slow\":options.slow,\r\n \"epochs\":options.epochs,\r\n \"print_iterations\":options.checkpoint_iterations,\r\n \"batch_size\":options.batch_size,\r\n \"save_path\":os.path.join(options.checkpoint_dir,'fns.ckpt'),\r\n \"learning_rate\":options.learning_rate\r\n }\r\n\r\n if options.slow:\r\n if options.epochs < 10:\r\n kwargs['epochs'] = 1000\r\n if options.learning_rate < 1:\r\n kwargs['learning_rate'] = 1e1\r\n\r\n args = [\r\n content_targets,\r\n color_target,\r\n style_target,\r\n options.content_weight,\r\n options.style_weight,\r\n options.tv_weight,\r\n options.vgg_path\r\n ]\r\n\r\n for preds, losses, i, epoch in optimize(*args, **kwargs):\r\n style_loss, content_loss, tv_loss, color_loss, loss = losses\r\n\r\n print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))\r\n to_print = (style_loss, content_loss, tv_loss, color_loss)\r\n print('style: %s, content:%s, tv: %s, color: %s' % to_print)\r\n if options.test:\r\n assert options.test_dir != False\r\n preds_path = '%s/%s_%s.png' % (options.test_dir,epoch,i)\r\n if not options.slow:\r\n ckpt_dir = os.path.dirname(options.checkpoint_dir)\r\n evaluate.ffwd_to_img(options.test,preds_path,\r\n options.checkpoint_dir)\r\n else:\r\n save_img(preds_path, img)\r\n ckpt_dir = options.checkpoint_dir\r\n cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir\r\n print(\"Training complete. For evaluation:\\n `%s`\" % cmd_text)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
}
] | 2 |
cliffxuan/mew | https://github.com/cliffxuan/mew | 60a7dcbc316a495b509e8bffb88e3059a33d25e1 | 43e4a39d75abe0d529d0ebe6cb075d0f64513ff6 | 351ac669b5cf07ee0d9bddd89b7290e103d411e4 | refs/heads/master | 2022-01-03T07:22:24.056058 | 2021-12-14T21:47:51 | 2021-12-14T21:47:51 | 165,946,224 | 2 | 0 | null | 2019-01-16T00:39:56 | 2021-11-22T12:10:31 | 2021-11-22T12:11:07 | Python | [
{
"alpha_fraction": 0.5614035129547119,
"alphanum_fraction": 0.5693780183792114,
"avg_line_length": 18,
"blob_id": "9325bf22cfd9e76ef94780ca5011ff9e401ac441",
"content_id": "d0fc32eb3db04ed8382046f40e751616c5a6f218",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 627,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 33,
"path": "/mew/convert_case.py",
"repo_name": "cliffxuan/mew",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport re\n\nCAP_RE = re.compile(\"(.)([A-Z])\")\n\n\ndef to_pascal_case(name):\n \"\"\"\n Turn a snake_case name into PascalCase.\n \"\"\"\n return name.title().replace(\"_\", \"\")\n\n\ndef from_pascal_case(name):\n \"\"\"\n Turn a PascalCase name into snake_case.\n \"\"\"\n return CAP_RE.sub(r\"\\1_\\2\", name).lower()\n\n\ndef to_camel_case(name):\n \"\"\"\n Turn a snake_case name into camelCase.\n \"\"\"\n output = name.title().replace(\"_\", \"\")\n return output[0].lower() + output[1:]\n\n\ndef from_camel_case(name):\n \"\"\"\n Turn a camelCase name into snake_case.\n \"\"\"\n return from_pascal_case(name)\n"
},
{
"alpha_fraction": 0.5923076868057251,
"alphanum_fraction": 0.6192307472229004,
"avg_line_length": 31.5,
"blob_id": "c7b4b20b468dfb2a7a34c472e02c6ba58ce696b8",
"content_id": "d8001dc52c1cdb2d8ed604ff1d29bb1ab0965e0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 260,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 8,
"path": "/mew/__version__.py",
"repo_name": "cliffxuan/mew",
"src_encoding": "UTF-8",
"text": "__title__ = \"mew\"\n__description__ = \"Python dataclass serializer/deserializer\"\n__url__ = \"http://github.com/cliffxuan/mew\"\n__version__ = \"0.1.4\"\n__author__ = \"Cliff Xuan\"\n__author_email__ = \"[email protected]\"\n__license__ = \"MIT\"\n__copyright__ = \"Copyright 2020 Cliff Xuan\"\n"
},
{
"alpha_fraction": 0.6741230487823486,
"alphanum_fraction": 0.6779359579086304,
"avg_line_length": 21.739885330200195,
"blob_id": "6d3fc0b79aa5a04608d2991cba35faf920a7f62b",
"content_id": "7e1dcf56a49f8bb14ebddf5dc5686a9c83588872",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3934,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 173,
"path": "/tests/test_serializable.py",
"repo_name": "cliffxuan/mew",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport datetime as dt\nimport typing\nimport uuid\nfrom dataclasses import dataclass, field\nfrom enum import Enum\n\nimport pytest\nfrom hypothesis import given, note, settings\nfrom hypothesis import strategies as st\nfrom hypothesis.extra.dateutil import timezones\n\nfrom mew import NotSupported, serializable, to_pascal_case, from_pascal_case\n\n\ndef test_not_supported():\n\n with pytest.raises(NotSupported) as error:\n\n @serializable\n @dataclass\n class Foo:\n name: typing.Callable\n\n assert error.value.types == [typing.Callable]\n\n\n# TODO generate types?\n# IDEA use schema.org?\nclass GeoPosition(typing.NamedTuple):\n latitude: float\n longitude: float\n\n\nclass Subject(Enum):\n MATHMATICS = 0\n ENGLISH = 1\n COMPUTING = 2\n\n\n@serializable\n@dataclass\nclass Address:\n id: uuid.UUID\n door_number: int\n house_name: typing.Optional[str]\n street_name: str\n geo_position: GeoPosition\n\n\n@serializable\n@dataclass\nclass Teacher:\n id: uuid.UUID\n name: str\n address: Address\n\n\n@serializable\n@dataclass\nclass Lecture:\n id: uuid.UUID\n name: int\n timestamp: dt.datetime\n teacher: Teacher\n subject: Subject\n\n\n@serializable\n@dataclass\nclass Student:\n id: uuid.UUID\n name: str\n address: Address\n lectures: typing.List[Lecture] = field(default_factory=list)\n\n\[email protected]\ndef geo_positions(\n draw,\n latitude=st.floats(min_value=-90, max_value=90),\n longitude=st.floats(min_value=0, max_value=180),\n) -> GeoPosition:\n return GeoPosition(draw(latitude), draw(longitude))\n\n\[email protected]\ndef address(\n draw,\n id=st.uuids(),\n door_number=st.integers(),\n house_name=st.one_of(st.text() | st.none()),\n street_name=st.text(),\n geo_position=geo_positions(),\n) -> Address:\n return Address(\n draw(id),\n draw(door_number),\n draw(house_name),\n draw(street_name),\n draw(geo_position),\n )\n\n\[email protected]\ndef teacher(draw, id=st.uuids(), name=st.text(), address=address()) -> Teacher:\n return Teacher(draw(id), draw(name), draw(address))\n\n\[email protected]\ndef lecture(\n draw,\n id=st.uuids(),\n name=st.text(),\n timestamp=st.datetimes(timezones=timezones()),\n teacher=teacher(),\n subject=st.sampled_from(Subject),\n) -> Lecture:\n return Lecture(draw(id), draw(name), draw(timestamp), draw(teacher), draw(subject))\n\n\[email protected]\ndef student(\n draw, id=st.uuids(), name=st.text(), address=address(), lecture=lecture()\n) -> Student:\n lectures = draw(st.lists(lecture, max_size=5))\n return Student(draw(id), draw(name), draw(address), lectures)\n\n\n@given(address())\ndef test_address(address):\n blob = address.dumps(convert_key=to_pascal_case)\n # make sure all keys are included and with the correct case\n assert \"DoorNumber\" in blob\n assert \"HouseName\" in blob\n assert \"StreetName\" in blob\n assert address == Address.loads(blob, convert_key=from_pascal_case)\n\n\n@given(teacher())\ndef test_teacher(teacher):\n blob = teacher.dumps(convert_key=to_pascal_case)\n # parent keys\n assert \"Id\" in blob\n assert \"Name\" in blob\n assert \"Address\" in blob\n # child keys\n assert \"DoorNumber\" in blob\n assert \"HouseName\" in blob\n assert \"StreetName\" in blob\n assert teacher == Teacher.loads(teacher.dumps(), convert_key=from_pascal_case)\n\n\n@given(lecture())\ndef test_lecture(lecture):\n blob = lecture.dumps(convert_key=to_pascal_case)\n # parent keys\n assert \"Id\" in blob\n assert \"Name\" in blob\n assert \"Address\" in blob\n # child keys\n assert \"DoorNumber\" in blob\n assert \"HouseName\" in blob\n assert \"StreetName\" in blob\n assert lecture == Lecture.loads(blob, convert_key=from_pascal_case)\n\n\n@given(student())\n@settings(max_examples=10)\ndef test_student(student):\n blob = student.dumps()\n note(f\"student: {student}\")\n assert student == Student.loads(blob, convert_key=from_pascal_case)\n"
},
{
"alpha_fraction": 0.6025640964508057,
"alphanum_fraction": 0.6057692170143127,
"avg_line_length": 19.799999237060547,
"blob_id": "32c5504090d1463365c2fa4179f6b3d4809e80ba",
"content_id": "521cbf68308f00ef0b688b8ad007b0b1e38aa17a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 312,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 15,
"path": "/tests/test_version.py",
"repo_name": "cliffxuan/mew",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport datetime as dt\nimport re\n\nfrom mew import __version__\n\n\ndef test_version():\n # no double digit\n assert re.match(r\"\\d\\.\\d\\.\\d\", __version__.__version__)\n\n\ndef test_copyright():\n year = dt.date.today().year\n assert f\"Copyright {year} Cliff Xuan\" == __version__.__copyright__\n"
},
{
"alpha_fraction": 0.6545105576515198,
"alphanum_fraction": 0.6698656678199768,
"avg_line_length": 26.421052932739258,
"blob_id": "2896888d27b34cb35916958eadba39488c2757d5",
"content_id": "4e6d34866f532be0e814d0774d8aa3c8f8a7fd4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 521,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 19,
"path": "/Pipfile",
"repo_name": "cliffxuan/mew",
"src_encoding": "UTF-8",
"text": "[[source]]\nname = \"pypi\"\nurl = \"https://pypi.org/simple\"\nverify_ssl = true\n\n[dev-packages]\nhypothesis = \"*\"\npython-dateutil = \"*\"\npytest = \"*\"\npytest-cov = \"*\"\nflake8 = \"*\"\npdbpp = \"*\"\n\n[packages]\nmew = {editable = true,path = \".\"}\nbackports-datetime-fromisoformat = {markers = \"python_version<'3.7'\",version = \"*\"}\n# use fork until this RP is approved:\n# https://github.com/ericvsmith/dataclasses/pull/141\ndataclasses = {markers = \"python_version<'3.7'\",file = \"https://github.com/cliffxuan/dataclasses/tarball/master\"}\n"
},
{
"alpha_fraction": 0.5498027801513672,
"alphanum_fraction": 0.5527613162994385,
"avg_line_length": 28.823530197143555,
"blob_id": "b71db3a1a4d6a6bd06059939302b245723cba555",
"content_id": "b83b7a64fc2174cb0a5078fa438950304da0ea83",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2028,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 68,
"path": "/mew/decorator.py",
"repo_name": "cliffxuan/mew",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\nfrom dataclasses import is_dataclass\nimport enum\nimport typing\n\nfrom mew.serializer import dumps, loads, SCALAR_TYPES\n\n\nclass NotSupported(Exception):\n \"\"\"not supported\"\"\"\n\n def __init__(self, types, message):\n self.types = types\n self.message = message\n\n\ndef is_namedtuple(t):\n b = t.__bases__\n if len(b) != 1 or b[0] != tuple:\n return False\n f = getattr(t, \"_fields\", None)\n if not isinstance(f, tuple):\n return False\n return all(type(n) == str for n in f)\n\n\ndef find_unsupported(t: typing.Any) -> typing.List[typing.Any]:\n if t in SCALAR_TYPES:\n return []\n if isinstance(t, type): # if it's a class\n if issubclass(t, enum.Enum):\n return []\n if is_dataclass(t):\n return [\n v.type\n for v in t.__dataclass_fields__.values() # type: ignore\n if find_unsupported(v.type)\n ]\n if is_namedtuple(t) and hasattr(t, \"_field_types\"):\n # yes: class Point(typing.NamedTuple):\n # x: int\n # y: int\n # no: Point = namedtuple('Point', ['x', 'y'])\n return [\n v for v in t._field_types.values() # type: ignore\n if find_unsupported(v)\n ]\n if hasattr(t, \"__origin__\"): # if it's a type in typing module\n origin = t.__origin__\n if origin == typing.Union:\n return [arg for arg in t.__args__ if find_unsupported(arg)]\n if origin in (list, typing.List): # typing.List for python3.6\n return find_unsupported(t.__args__[0])\n # TODO covert more of typing.XXX\n return [t]\n\n\ndef serializable(t):\n \"\"\"adds dumps() and loads() to the class\"\"\"\n unsupported_types = find_unsupported(t)\n if unsupported_types:\n raise NotSupported(unsupported_types, f\"unsupported type {unsupported_types}\")\n t.dumps = dumps\n t.loads = classmethod(loads)\n return t\n"
},
{
"alpha_fraction": 0.6168830990791321,
"alphanum_fraction": 0.6258116960525513,
"avg_line_length": 29.04878044128418,
"blob_id": "d5a1d1e7ac6d9ced98217d27c77e42533e69c88a",
"content_id": "a0bedb6add08c481debf30bbafbe737896519c33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1232,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 41,
"path": "/tests/test_convert_case.py",
"repo_name": "cliffxuan/mew",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport pytest\nfrom mew.convert_case import (\n to_pascal_case,\n from_pascal_case,\n to_camel_case,\n from_camel_case,\n)\n\n\[email protected](\n \"pascal, snake\",\n [\n (\"PascalCase\", \"pascal_case\"),\n (\"PascalPascalCase\", \"pascal_pascal_case\"),\n (\"Pascal2Pascal2Case\", \"pascal2_pascal2_case\"),\n (\"HttpResponseCode\", \"http_response_code\"),\n (\"HttpResponseCodeXyz\", \"http_response_code_xyz\")\n # these cannot be converted\n # ('HTTPResponseCode', 'http_response_code'),\n # ('HTTPResponseCodeXYZ', 'http_response_code_xyz')\n ],\n)\ndef test_pascal_and_snake_case_conversion(pascal, snake):\n assert pascal == to_pascal_case(snake)\n assert snake == from_pascal_case(pascal)\n\n\[email protected](\n \"camel, snake\",\n [\n (\"camelCase\", \"camel_case\"),\n (\"camelCamelCase\", \"camel_camel_case\"),\n (\"camel2Camel2Case\", \"camel2_camel2_case\"),\n (\"getHttpResponseCode\", \"get_http_response_code\"),\n (\"get2HttpResponseCode\", \"get2_http_response_code\"),\n ],\n)\ndef test_camel_and_snake_case_conversion(camel, snake):\n assert camel == to_camel_case(snake)\n assert snake == from_camel_case(camel)\n"
},
{
"alpha_fraction": 0.7412587404251099,
"alphanum_fraction": 0.7832167744636536,
"avg_line_length": 70.5,
"blob_id": "95b8cf6a10ebd0847b09c2ae17dfabd37a6bdb8d",
"content_id": "db31e8c8e21073ce5ba4177e5fbf06179b33287f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 143,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 2,
"path": "/mew/__init__.py",
"repo_name": "cliffxuan/mew",
"src_encoding": "UTF-8",
"text": "from mew.convert_case import to_pascal_case, from_pascal_case # noqa: F401\nfrom mew.decorator import serializable, NotSupported # noqa: F401\n"
},
{
"alpha_fraction": 0.6708776354789734,
"alphanum_fraction": 0.6801861524581909,
"avg_line_length": 32.42222213745117,
"blob_id": "27bf78299e87fab491d6197d8fc2f31a393359aa",
"content_id": "d04bc4971566377a3e70f7544fe4fc32d07a1d54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1504,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 45,
"path": "/README.md",
"repo_name": "cliffxuan/mew",
"src_encoding": "UTF-8",
"text": "Mew: python dataclass serializer/deserializer\n=============================================\n[![Build Status](https://travis-ci.org/cliffxuan/mew.svg?branch=master)](https://travis-ci.org/cliffxuan/mew)\n[![Python Version Support](https://img.shields.io/pypi/pyversions/mew.svg)](https://img.shields.io/pypi/pyversions/mew.svg)\n[![PyPI Version](https://badge.fury.io/py/mew.svg)](https://badge.fury.io/py/mew)\n[![Coverage](https://img.shields.io/codeclimate/coverage/cliffxuan/mew.svg?style=flat)](https://codeclimate.com/github/cliffxuan/mew)\n[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/ambv/black)\n![image](https://raw.githubusercontent.com/cliffxuan/mew/master/mew.jpg)\n\n``` {.sourceCode .python}\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom typing import List\n\nimport mew\n\n\nclass Type(Enum):\n normal = 'normal'\n electric = 'electric'\n fire = 'fire'\n fighting = 'fighting'\n water = 'water'\n psychic = 'psychic'\n\n\[email protected]\n@dataclass\nclass Pokemon:\n name: str\n pokedex: int\n type: Type\n abilities: List[str]\n\n\n>>> pikachu = Pokemon('Pikachu', 25, Type.electric, ['static', 'lightning rod'])\n\n>>> pikachu\nPokemon(name='Pikachu', pokedex=25, type=<Type.electric: 'electric'>, abilities=['static', 'lightning rod'])\n\n>>> pikachu.dumps()\n'{\"name\": \"Pikachu\", \"pokedex\": 25, \"type\": \"electric\", \"abilities\": [\"static\", \"lightning rod\"]}'\n\n>>> assert pikachu == Pokemon.loads(pikachu.dumps())\n```\n"
},
{
"alpha_fraction": 0.5977244973182678,
"alphanum_fraction": 0.602329671382904,
"avg_line_length": 25.75,
"blob_id": "58b27c13f1674a195ac0c2bdbeabad066f37ec10",
"content_id": "4363d5e1711cdc40006999a755cc10fb88932250",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7383,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 276,
"path": "/mew/serializer.py",
"repo_name": "cliffxuan/mew",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport datetime as dt\nimport enum\nimport json\nimport sys\nimport uuid\nfrom abc import ABC, abstractmethod\nfrom collections.abc import Collection, Mapping\nfrom typing import Any, Callable, Dict, List, Union\nfrom dataclasses import asdict, is_dataclass\n\nimport yaml\n\nif sys.version_info < (3, 7):\n # fromisoformat() is for python version >= 3.7\n from backports.datetime_fromisoformat import MonkeyPatch\n\n MonkeyPatch.patch_fromisoformat()\n\n\nNoneType = type(None)\nSUPPORTED_FORMATS = (\"json\", \"yaml\")\n\nPRIMITIVE_TYPES = (str, int, float, bool, type(None))\nSCALAR_TYPES = (uuid.UUID, dt.date, dt.datetime, dt.time, *PRIMITIVE_TYPES)\n\n\nclass ScalarTypeSerializer(ABC):\n @property\n @classmethod\n @abstractmethod\n def type(cls):\n ...\n\n @staticmethod\n @abstractmethod\n def serialize(obj: Any) -> Any:\n ...\n\n @staticmethod\n @abstractmethod\n def deserialize(value: Any) -> Any:\n ...\n\n\nclass StringSerializer(ScalarTypeSerializer):\n\n type = str\n\n @staticmethod\n def serialize(obj: str) -> str:\n return obj\n\n @staticmethod\n def deserialize(value: str) -> str:\n return value\n\n\nclass IntSerializer(ScalarTypeSerializer):\n\n type = int\n\n @staticmethod\n def serialize(obj: int) -> int:\n return obj\n\n @staticmethod\n def deserialize(value: int) -> int:\n return value\n\n\nclass FloatSerializer(ScalarTypeSerializer):\n\n type = float\n\n @staticmethod\n def serialize(obj: float) -> float:\n return obj\n\n @staticmethod\n def deserialize(value: float) -> float:\n return value\n\n\nclass BooleanSerializer(ScalarTypeSerializer):\n\n type = bool\n\n @staticmethod\n def serialize(obj: bool) -> bool:\n return obj\n\n @staticmethod\n def deserialize(value: bool) -> bool:\n return value\n\n\nclass NoneTypeSerializer:\n\n type = type(None)\n\n @staticmethod\n def serialize(obj: NoneType) -> NoneType:\n return obj\n\n @staticmethod\n def deserialize(value: NoneType) -> NoneType:\n return value\n\n\nclass UUIDSerializer(ScalarTypeSerializer):\n type = uuid.UUID\n\n @staticmethod\n def serialize(obj: uuid.UUID) -> str:\n # Format as \"10fb6968-6b54-44c8-9365-e0b3934ae156\".\n return str(obj)\n\n @staticmethod\n def deserialize(value: str) -> uuid.UUID:\n return uuid.UUID(value)\n\n\nclass DateTimeSerializer(ScalarTypeSerializer):\n type = dt.datetime\n\n @staticmethod\n def serialize(obj: dt.datetime) -> str:\n # Format datetimes as ISO-8601 w/ timezone.\n return obj.isoformat()\n\n @staticmethod\n def deserialize(value: str) -> dt.datetime:\n return dt.datetime.fromisoformat(value)\n\n\nclass DateSerializer(ScalarTypeSerializer):\n\n type = dt.date\n\n @staticmethod\n def serialize(obj: dt.date) -> str:\n # Format date as YYYY-MM-DD\n return obj.isoformat()\n\n @staticmethod\n def deserialize(value: str) -> dt.date:\n return dt.date.fromisoformat(value)\n\n\nclass TimeSerializer(ScalarTypeSerializer):\n\n type = dt.time\n\n @staticmethod\n def serialize(obj: dt.time) -> str:\n # Format date as HH:MM:SS\n return obj.isoformat()\n\n @staticmethod\n def deserialize(value: str) -> dt.time:\n return dt.time.fromisoformat(value)\n\n\nclass MultiTypeSerializer:\n\n default_serializers = (\n StringSerializer,\n IntSerializer,\n FloatSerializer,\n BooleanSerializer,\n NoneTypeSerializer,\n UUIDSerializer,\n DateTimeSerializer,\n DateSerializer,\n TimeSerializer,\n )\n\n def __init__(self):\n self.serializers: Dict[type, ScalarTypeSerializer] = {\n serializer.type: serializer for serializer in self.default_serializers\n }\n\n def serialize(self, obj: Any, convert_key: Callable = lambda x: x) -> Any:\n for t in self.serializers:\n if isinstance(obj, t):\n return self.serializers[t].serialize(obj)\n\n # Serialise enums to their value.\n if isinstance(obj, enum.Enum):\n return obj.value\n\n # Recursively serialise dictionaries.\n if isinstance(obj, Mapping):\n # TODO this is assuming k is a string\n return {\n convert_key(k): self.serialize(v, convert_key) for k, v in obj.items()\n }\n\n # Convert dataclass instance as dict\n if is_dataclass(obj) and not isinstance(obj, type):\n return self.serialize(asdict(obj), convert_key)\n\n # Recursively serialise stuff that looks like a list or set.\n if isinstance(obj, Collection) and not isinstance(obj, str):\n return [self.serialize(i, convert_key) for i in obj]\n\n # TODO raise exception instead?\n return obj\n\n def deserialize(\n self, t: Any, value: Any, convert_key: Callable = lambda x: x\n ) -> Any:\n if t in self.serializers:\n return self.serializers[t].deserialize(value)\n if is_dataclass(t):\n return t(\n **{\n convert_key(k): self.deserialize(\n t.__dataclass_fields__[convert_key(k)].type, v, convert_key\n )\n for k, v in value.items()\n }\n )\n\n if isinstance(t, type):\n if issubclass(t, enum.Enum):\n for item in t: # type: ignore\n if item.value == value:\n return item\n raise Exception(f\"cannot find an item in Enum {t} with value {value}\")\n if issubclass(t, tuple):\n return t(*value)\n\n # TODO deal with List without args, e.g. foo: List = ['a', 'b']\n if hasattr(t, \"__origin__\") and hasattr(t, \"__args__\"):\n origin = t.__origin__\n args = t.__args__\n if origin in (list, List):\n return [self.deserialize(args[0], item, convert_key) for item in value]\n if origin == Union:\n for arg in args:\n try:\n return self.deserialize(arg, value, convert_key)\n except TypeError:\n pass\n raise Exception(f\"cannot convert type {t} for value {value}\")\n # TODO convert more of type.XXX\n raise Exception(f\"unsupported type {t} for value {value}\")\n\n\n# TODO validate\n# types may not match\ndef dumps(o, *, format: str = \"json\", convert_key: Callable = lambda x: x, **kwargs):\n # serialized = serialize(o, convert_key=convert_key)\n serialized = MultiTypeSerializer().serialize(o, convert_key=convert_key)\n if format == \"json\":\n return json.dumps(serialized, **kwargs)\n if format == \"yaml\":\n return yaml.dump(serialized, **kwargs)\n raise ValueError(\n f'supported formats are {\",\".join(SUPPORTED_FORMATS)}.' f\" found {format}\"\n )\n\n\ndef loads(t, data, format: str = \"json\", convert_key: Callable = lambda x: x):\n if format == \"json\":\n data_asdict = json.loads(data)\n elif format == \"yaml\":\n data_asdict = yaml.load(data)\n else:\n raise ValueError(\n f'supported formats are {\",\".join(SUPPORTED_FORMATS)}.' f\" found {format}\"\n )\n # return deserialize(t, data_asdict, convert_key)\n return MultiTypeSerializer().deserialize(t, data_asdict, convert_key)\n"
},
{
"alpha_fraction": 0.717185378074646,
"alphanum_fraction": 0.7185385823249817,
"avg_line_length": 27.423076629638672,
"blob_id": "45de099d4510276909d3e70dd2ab5575e4ec4893",
"content_id": "43a28312fc60bd2a700fe7cf930fcb2bb804c620",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 739,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 26,
"path": "/tests/test_serializer.py",
"repo_name": "cliffxuan/mew",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport typing\n\nfrom hypothesis import given\nfrom hypothesis import strategies as st\n\nfrom mew.serializer import dumps, loads, StringSerializer, IntSerializer\n\n\n@given(st.text())\ndef test_string_serializer(text):\n assert StringSerializer.serialize(text) == text\n assert StringSerializer.deserialize(text) == text\n\n\n@given(st.integers())\ndef test_int_serializer(integer):\n assert IntSerializer.serialize(integer) == integer\n assert IntSerializer.deserialize(integer) == integer\n\n\ndef test_dumps_yaml():\n origin = ['foo', 'bar']\n result = dumps(origin, format='yaml', default_flow_style=False)\n assert result == '- foo\\n- bar\\n'\n assert loads(typing.List[str], result, format='yaml') == origin\n"
},
{
"alpha_fraction": 0.6536585092544556,
"alphanum_fraction": 0.6585366129875183,
"avg_line_length": 16.08333396911621,
"blob_id": "5da74bbffc4b5e1bb994cb026b39ddffb8da8dd8",
"content_id": "2fa978a5369d68f83772ba3734839a6576d5b4e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 615,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 36,
"path": "/tests/pokemon.py",
"repo_name": "cliffxuan/mew",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom typing import List\n\nimport mew\n\n\nclass Type(Enum):\n normal = \"normal\"\n electric = \"electric\"\n fire = \"fire\"\n fighting = \"fighting\"\n water = \"water\"\n psychic = \"psychic\"\n\n\[email protected]\n@dataclass\nclass Pokemon:\n name: str\n pokedex: int\n type: Type\n abilities: List[str]\n\n\npikachu = Pokemon(\n name=\"Pikachu\",\n pokedex=25,\n type=Type.electric,\n abilities=[\"static\", \"lightning rod\"],\n)\n\nblob = pikachu.dumps()\npikachu_from_blob = Pokemon.loads(blob)\nassert pikachu == pikachu_from_blob\n"
}
] | 12 |
arnuschky/sjcl | https://github.com/arnuschky/sjcl | 8ced88ae6e92e099cc0df1aca3b7c9ad74137f8b | 33ef77670e22f28a328f9093705f2e6292498252 | d29316c185a541417ad2981511c267f0c03f5f9e | refs/heads/master | 2021-01-18T19:54:51.235106 | 2015-04-23T13:10:42 | 2015-04-23T13:10:42 | 34,407,178 | 1 | 0 | null | 2015-04-22T18:09:43 | 2015-03-16T05:49:23 | 2014-05-28T12:30:38 | null | [
{
"alpha_fraction": 0.4202611744403839,
"alphanum_fraction": 0.45904234051704407,
"avg_line_length": 26.172042846679688,
"blob_id": "d46232bcdde93c75b25b4bb25e2b07fe77af2c3c",
"content_id": "3b08476ddddd9aaec0f1ef2bdbc2d52892aed4d3",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2527,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 93,
"path": "/tests/simple.py",
"repo_name": "arnuschky/sjcl",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom sjcl import SJCL\nimport unittest\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_encrypt_decrypt(self):\n message = \"secret message to encrypt\"\n cyphertext = SJCL().encrypt(message, \"shared_secret\")\n self.assertEqual(\n SJCL().decrypt(cyphertext, \"shared_secret\"),\n message\n )\n\n def test_decrypt_128(self):\n cyphertext = {\n 'ks': 128,\n 'cipher': 'aes',\n 'mode': 'ccm',\n 'v': 1,\n 'adata': '',\n 'iv': 'fR4fZKbjsZOrzDyjCYdEQw==',\n 'salt': '5IiimlH8JvY=',\n 'ts': 64,\n 'iter': 1000,\n 'ct': 'V8BYrUdurq1/Qx/EX8EBliKDKa6XB93dZ6QOFSelw77Q'\n }\n self.assertEqual(\n SJCL().decrypt(cyphertext, \"shared_secret\"),\n \"secret message to encrypt\"\n )\n\n def test_decrypt_192(self):\n cyphertext = {\n 'ks': 192,\n 'cipher': 'aes',\n 'mode': 'ccm',\n 'v': 1,\n 'adata': '',\n 'iv': '3NCuY8Ev/Fbuf+2WqoQCDg==',\n 'salt': 'QL3iSh2PnVI=',\n 'ts': 64,\n 'iter': 1000,\n 'ct': '4/BcukcCJHgQXQA3QhJ3RTykynj3g1do49+BIW2Nge0S'\n }\n self.assertEqual(\n SJCL().decrypt(cyphertext, \"shared_secret\"),\n \"secret message to encrypt\"\n )\n\n def test_decrypt_256(self):\n cyphertext = {\n 'ks': 256,\n 'cipher': 'aes',\n 'mode': 'ccm',\n 'v': 1,\n 'adata': '',\n 'iv': 'bgEVvR8Hw9kY2UF0RcWUcQ==',\n 'salt': 'QL3iSh2PnVI=',\n 'ts': 64,\n 'iter': 1000,\n 'ct': 'lIFzbDGF9aflXHrZfZIF4+zN7r3nCUtSf8R5ztGM0nH0'\n }\n self.assertEqual(\n SJCL().decrypt(cyphertext, \"shared_secret\"),\n \"secret message to encrypt\"\n )\n\n def test_decrypt_nopad(self):\n cyphertext = {\n 'ks': 128,\n 'cipher': 'aes',\n 'mode': 'ccm',\n 'v': 1,\n 'adata': '',\n 'iv': 'fR4fZKbjsZOrzDyjCYdEQw',\n 'salt': '5IiimlH8JvY',\n 'ts': 64,\n 'iter': 1000,\n 'ct': 'V8BYrUdurq1/Qx/EX8EBliKDKa6XB93dZ6QOFSelw77Q'\n }\n self.assertEqual(\n SJCL().decrypt(cyphertext, \"shared_secret\"),\n \"secret message to encrypt\"\n )\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.7173076868057251,
"alphanum_fraction": 0.7173076868057251,
"avg_line_length": 24.950000762939453,
"blob_id": "fe4a9c8b0374d7cfc26ba16585a5be57e4eb8220",
"content_id": "a8dd63b78d0f765b4c79901ca767505f6162ab0b",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 520,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 20,
"path": "/README.md",
"repo_name": "arnuschky/sjcl",
"src_encoding": "UTF-8",
"text": "===========\nPython-SJCL\n===========\n\nDecrypt and encrypt messages compatible to the \"Stanford Javascript Crypto\nLibrary (SJCL)\" message format.\n\nThis module was created while programming and testing the encrypted\nblog platform on cryptedblog.com which is based on sjcl.\n\nTypical usage often looks like this:\n\n #!/usr/bin/env python\n\n from sjcl import SJCL\n\n cyphertext = SJCL().encrypt(\"secret message to encrypt\", \"shared_secret\")\n\n print(cyphertext)\n print(SJCL().decrypt(cyphertext, \"shared_secret\"))\n\n"
},
{
"alpha_fraction": 0.5672268867492676,
"alphanum_fraction": 0.6008403301239014,
"avg_line_length": 22.799999237060547,
"blob_id": "957990e0c068cec071d5baa43d74e16d10442f3a",
"content_id": "e54f4c6ae8559bbb1eab63cf2762334ebdc4b88f",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 238,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 10,
"path": "/sjcl/__init__.py",
"repo_name": "arnuschky/sjcl",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom .sjcl import SJCL\n\n__author__ = \"Ulf Bartel <[email protected]>\"\n__version__ = \"0.1.3\"\n__copyright__ = \"Copyright (c) 2014 Ulf Bartel\"\n__license__ = \"New-style BSD\"\n"
},
{
"alpha_fraction": 0.5056307911872864,
"alphanum_fraction": 0.511343240737915,
"avg_line_length": 32.298912048339844,
"blob_id": "18862073d731bdee606a362a8a33fca4a7ce51d3",
"content_id": "9f56f4f638735972f930223f6cf4e9b78a51fa36",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6127,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 184,
"path": "/examples/backup_cryptedblog.py",
"repo_name": "arnuschky/sjcl",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nExample for sjcl python module:\n\nThis module downloads all content of a blog from www.cyptedblog.com via the\nJSON api, decrypts the SJCL encrpted posts/comments/images and saves both, the\nencrypted and decrypted data to disk.\n\nThis module may also be used as a backup solution for cryptedblog.\n\"\"\"\n\nimport os\nimport requests\nimport json\nfrom sjcl import SJCL\nimport base64\n\nURL_API = '%(server)s/api/blog/v1.0'\nURL_POST = URL_API + '/blog/%(blog)s/posts?offset=%(offset)d&limit=%(limit)d'\nURL_COMMENT = URL_API + '/blog/%(blog)s/posts/%(post)s/comments?offset=%(offset)d&limit=%(limit)d'\nURL_IMAGES = URL_API + '/images/%(image)s'\n\n\ndef write_file(fname_parts, content):\n \"\"\" write a file and create all needed directories \"\"\"\n\n fname_parts = [str(part) for part in fname_parts]\n # try to create the directory\n if len(fname_parts) > 1:\n try:\n os.makedirs(os.path.join(*fname_parts[:-1]))\n except OSError:\n pass\n # write file\n fhandle = open(os.path.join(*fname_parts), \"w\")\n fhandle.write(content)\n fhandle.close()\n\n\nclass Cryptedblog():\n \"\"\" This module downloads all content of a blog from www.cyptedblog.com via the\n JSON api, decrypts the SJCL encrpted posts/comments/images and saves both, the\n encrypted and decrypted data to disk.\n\n This module may also be used as a backup solution for cryptedblog.\n \"\"\"\n\n def __init__(self, server, blog, shared_secret, limit=5):\n self.server = server\n self.blog = blog\n self.shared_secret = shared_secret\n self.limit = limit # how many posts/comments to fetch with one request\n self.sjcl = SJCL()\n\n def download_images(self, fname_parts, key_image):\n response = requests.get(URL_IMAGES % {\n \"server\": self.server,\n \"image\": key_image\n })\n data = response.json()\n # special case: images are server from blobstrore and do normally not\n # contain a \"success\" field !!!\n if not data.get(\"success\", True):\n raise Exception(\"error image\")\n write_file(\n fname_parts + [\"images-encrypted.json\"],\n json.dumps(data, indent=4, sort_keys=True)\n )\n\n images = json.loads(\n self.sjcl.decrypt(\n data,\n self.shared_secret\n )\n )\n for (counter, image) in enumerate(images):\n write_file(\n fname_parts + [\"image_%02d.json\" % counter],\n json.dumps(image, indent=4, sort_keys=True)\n )\n # data uri of inline images starts like \"data:image/jpeg;base64,\"\n file_type = image[\"data\"].split(\"image/\")[1].split(\";\")[0]\n binary_data = base64.b64decode(image[\"data\"].split(\"base64,\")[1])\n write_file(\n fname_parts + [\"image_%02d.%s\" % (counter, file_type)],\n binary_data\n )\n\n def download_comments(self, key_post):\n offset = 0\n while True:\n\n response = requests.get(URL_COMMENT % {\n \"server\": self.server,\n \"blog\": self.blog,\n \"post\": key_post,\n \"offset\": offset,\n \"limit\": self.limit\n })\n data = response.json()\n if not data.get(\"success\", False):\n raise Exception(\"error comment\")\n\n for comment in data[\"data\"]:\n key_comment = comment[\"comment\"]\n fname_parts = [self.blog, \"posts\", key_post, \"comments\", key_comment]\n\n # write original data\n write_file(\n fname_parts + [\"comment.json\"],\n json.dumps(comment, indent=4, sort_keys=True)\n )\n # decrypt and write again\n comment[\"comment-decrypted\"] = json.loads(\n self.sjcl.decrypt(\n comment[\"comment-encrypted\"],\n self.shared_secret\n )\n )\n write_file(\n fname_parts + [\"comment-decrypted.json\"],\n json.dumps(comment, indent=4, sort_keys=True)\n )\n # get images\n if \"images\" in comment:\n self.download_images(fname_parts + [\"images\"], comment[\"images\"])\n\n offset += 1\n\n if len(data[\"data\"]) == 0:\n break\n\n def download_posts(self):\n offset = 0\n while True:\n response = requests.get(URL_POST % {\n \"server\": self.server,\n \"blog\": self.blog,\n \"offset\": offset,\n \"limit\": self.limit\n })\n data = response.json()\n for post in data[\"data\"]:\n key_post = post[\"post\"]\n fname_parts = [self.blog, \"posts\", key_post]\n\n # write original data\n write_file(\n fname_parts + [\"post.json\"],\n json.dumps(post, indent=4, sort_keys=True)\n )\n\n # decrypt and write again\n post[\"post-decrypted\"] = json.loads(\n self.sjcl.decrypt(\n post[\"post-encrypted\"],\n self.shared_secret\n )\n )\n write_file(\n fname_parts + [\"post_decrypted.json\"],\n json.dumps(post, indent=4, sort_keys=True)\n )\n\n # get images\n if \"images\" in post:\n self.download_images(fname_parts + [\"images\"], post[\"images\"])\n\n self.download_comments(key_post)\n offset += 1\n\n # break if there are no further posts\n if len(data[\"data\"]) == 0:\n break\n\nif __name__ == \"__main__\":\n cr = Cryptedblog(\n server=\"http://www.cryptedblog.com\",\n blog=\"test-blog\",\n shared_secret=\"YourSharedSecret\",\n )\n cr.download_posts()\n"
},
{
"alpha_fraction": 0.7894737124443054,
"alphanum_fraction": 0.7894737124443054,
"avg_line_length": 37,
"blob_id": "220acd040509f4889e85a468ebbec96780f79e39",
"content_id": "63eb02cb93b5ce72adcc59a0145c37766de43f41",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 38,
"license_type": "permissive",
"max_line_length": 37,
"num_lines": 1,
"path": "/requirements.txt",
"repo_name": "arnuschky/sjcl",
"src_encoding": "UTF-8",
"text": "git+https://github.com/dlitz/pycrypto\n"
}
] | 5 |
deanmcniven/ansible-intellij-idea | https://github.com/deanmcniven/ansible-intellij-idea | 3c9cbf70f52b294f57d259b64359a8034b7301fb | 1d0dacac008ccd5972a8c9e9423c85c4dcb5db57 | 4bdb51734a08d5260711c2560f42ba984aa97eb6 | refs/heads/master | 2023-04-07T11:04:13.898078 | 2023-04-02T23:54:42 | 2023-04-02T23:54:42 | 153,362,803 | 0 | 3 | MIT | 2018-10-16T22:32:29 | 2018-11-28T22:05:32 | 2020-04-07T05:12:30 | Python | [
{
"alpha_fraction": 0.8058252334594727,
"alphanum_fraction": 0.8058252334594727,
"avg_line_length": 33.33333206176758,
"blob_id": "2c4b25c87686d1df80e1b97e9e4e0325c46768ea",
"content_id": "7c0f3c95e2da8528c81be249456d8b05c3bdb553",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 206,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 6,
"path": "/README.md",
"repo_name": "deanmcniven/ansible-intellij-idea",
"src_encoding": "UTF-8",
"text": "# intellij-idea\n\nAnsible role that installs IntelliJ Idea\n\nThis role will install the Community Edition by default.\nTo install the Ultimate Edition, set the `intellij_license` variable to your license key.\n"
},
{
"alpha_fraction": 0.6612903475761414,
"alphanum_fraction": 0.6866359710693359,
"avg_line_length": 20.700000762939453,
"blob_id": "a6e7f0b52b3493fea4285d656defce9b658bcec5",
"content_id": "400c995e8c163ff5c2fdd556e5d9bdd021bcd79b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 434,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 20,
"path": "/files/license-file-generator.py",
"repo_name": "deanmcniven/ansible-intellij-idea",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport binascii\nimport codecs\nimport os\nimport sys\n\nif len(sys.argv) != 3:\n print(\"ERROR: Invalid number of parameters!\")\n print(\"Usage: \", sys.argv[0], \" <license string> <output file>\")\n sys.exit(1)\n\nlicensestring=sys.argv[1]\noutputfile=sys.argv[2]\n\nwith open(outputfile, 'wb') as f:\n f.write(binascii.unhexlify('FFFF'))\n f.write(licensestring.encode('utf-16-le'))\n\nos.chmod(outputfile, 0o644)\n"
}
] | 2 |
louis-cai/bitcoin-ahr999-HODL | https://github.com/louis-cai/bitcoin-ahr999-HODL | 24b3fd14801c200801d627504a22bcc210eb7875 | 5200961ed25d77b4703994e94fe70bd744e48407 | 09ef06e7c038de80b0639bfce0d425fdd958868b | refs/heads/master | 2023-07-15T10:36:30.531062 | 2021-08-19T05:01:36 | 2021-08-19T05:01:36 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5814151763916016,
"alphanum_fraction": 0.6760443449020386,
"avg_line_length": 29.86842155456543,
"blob_id": "c15c01f4a5e64a4bd74b3bf874025a14bc886c4d",
"content_id": "d03ce8db3b441e9d750eeba837519dbcfe6d463e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1779,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 38,
"path": "/ahr999.py",
"repo_name": "louis-cai/bitcoin-ahr999-HODL",
"src_encoding": "UTF-8",
"text": "import requests, json, time, math\nfrom scipy import stats\nimport csv\n\n'''\nahr999囤币指标:\n计算方式:ahr999指标 =(比特币价格/200日定投成本)*(比特币价格/指数增长估值)。\n其中指数成长估值为币价和币龄的拟合结果,本指数拟合方法为每月对历史数据进行拟合。\n\n指标说明:该指标由微博用户ahr999创建,辅助比特币定投用户结合择机策略做出投资决策。 \n该指标隐含了比特币短期定投的收益率及比特币价格与预期估值的偏离度。 \n从长期来看,比特币价格与区块高度呈现出一定的正相关,同时借助定投方式的优势,短期定投成本大都位于比特币价格之下。 \n因此,当比特币价格同时低于短期定投成本和预期估值时增大投资额,能增大用户收益的概率。 \n根据指标回测,当指标低于0.45时适合抄底,在0.45和1.2区间内适合定投BTC,高于该区间意味着错过最佳定投时期。\n'''\n\ndef ahr999():\n geomean = stats.gmean([8112.13, 7479.35, 7575, 7450])\n # api地址\n url = 'https://api.coincap.io/v2/candles?exchange=huobi&interval=d1&baseId=bitcoin"eId=tether&start=1559520000000&end=1584275033726'\n\n # 网络请求\n r = requests.get(url)\n jsonstr = r.json()\n\n data = jsonstr['data']\n lows = []\n for item in data: # 打印出所有的keys\n lows.append((float(item['low'])))\n geomean = stats.gmean(lows)\n day = (item['period'] / 1000 - 1230940800) / (24 * 60 * 60)\n coinPrice = 10 ** (5.84 * math.log(day, 10) - 17.01)\n ahr999 = (float(item['low']) / geomean) * (float(item['low']) / coinPrice)\n print(item, ahr999, day, coinPrice, geomean)\n\n\nif __name__ == '__main__':\n ahr999()\n"
},
{
"alpha_fraction": 0.5760086178779602,
"alphanum_fraction": 0.7615273594856262,
"avg_line_length": 52.38461685180664,
"blob_id": "8d0a597c6e963b0ee245537807dae1a332b3f58f",
"content_id": "42ad68f7a5d4c02cf3ac0f707a71b9053455decc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4796,
"license_type": "no_license",
"max_line_length": 198,
"num_lines": 52,
"path": "/README.md",
"repo_name": "louis-cai/bitcoin-ahr999-HODL",
"src_encoding": "UTF-8",
"text": "# 比特币-九神-囤币指南\n\n [ @ahr999 ](https://weibo.com/ahr999)[《囤比特币》](http://ahr999.com/xubtc.htm)分享九神ahr999在过去几年囤比特币过程中的思考和经验。写它的目的不是宣传比特币,而是帮助那些已经准备囤比特币的人。是否看好比特币很大程度上与价值观有关,无意改变任何人的价值观。(整理[@玛雅cndx](http://jdoge.com/))\n\n#\n**新文:**\n\n* 《[囤比特币:ahr999指数](https://weibo.com/ttarticle/p/show?id=2309404441088189399138)》。可以[BtcIE.com/ahr999 ](https://btcie.com/ahr999/)查询新定义的ahr999指数。\n* 《[寻找合适的购买时机(20190804更新)](https://weibo.com/ttarticle/p/show?id=2309404401520245342246)》囤币党也是要抄底的。对(中长期)行情精准的判断,也是囤币党的必备技能。\n\n#\n**序章:**\n\n* 《[知之非难,行之不易](https://weibo.com/ttarticle/p/show?id=2309404290257041409981)》为什么要写这个系列。\n\n#\n**入门:**\n\n* 《[比特币与理想主义](https://weibo.com/ttarticle/p/show?id=2309404283412763544904)》我们在参与一场社会实验,它存在失败的可能性,但是我们无怨无悔。\n* 《[下车太早只因愿景太小](https://weibo.com/ttarticle/p/show?id=2309404286329633561927)》这场实验的目标很大,如果一切顺利,比特币的价格可能会在20年后涨到1.6亿人民币。\n* 《[囤比特币:你离财富自由还有多远?](https://weibo.com/ttarticle/p/show?id=2309404287022729712573)》我们没有其他能耐,只能靠囤积比特币,并耐心地等待属于自己的财富自由。\n* 《[囤比特币:冲动、孤独、无聊与矛盾](https://weibo.com/ttarticle/p/show?id=2309404287827880877926)》虽然会经历冲动、孤独、无聊和矛盾等心理考验,但是我们已经做好准备囤币。\n* 《[囤比特币:手握私钥的快感](https://weibo.com/ttarticle/p/show?id=2309404289198575222102)》虽然掌握私钥有点麻烦,但是我们仍然准备自己对自己负责。\n* 《[囤比特币:如何管理私钥?](https://weibo.com/ttarticle/p/show?id=2309404289950832033282)》管理私钥其实并没有想象的那么麻烦,但我们需要把握好几个原则。\n\n#\n**进阶:**\n\n* 《[囤比特币:基本价格模型](https://weibo.com/ttarticle/p/show?id=2309404290588110395875)》囤币是比特币一切价值得来源,长期囤币者关心的去中心化和安全性是比特币首先要保证的特性。每次产量减半,囤币需求不变,但比特币供应减少,价格必涨。\n* 《[囤比特币:寻找合适的购买时机](https://weibo.com/ttarticle/p/show?id=2309404292613674022595)》我们都希望使用有限的投入获得更多比特币,那么何时是合适的买点呢?【[币应用BtcIE.com](http://btcie.com)】\n* 《[囤比特币:唯有比特币](https://weibo.com/ttarticle/p/show?id=2309404294325361104197)》除了比特币,我不持有任何其它数字币。但是,我也不反对任何人持有任何币,哪怕是传销币。\n* 《[囤比特币:不要跟着感觉走](https://weibo.com/ttarticle/p/show?id=2309404294599689565825)》囤比特币其实是反复决策的结果,别人觉得简单是因为只看到结果,而看不到决策的过程。\n* 《[囤比特币:币本位思维](https://weibo.com/ttarticle/p/show?id=2309404294635697610801)》比特币创造了一个全新的世界。在这个世界里,只有一个标准——比特币。【[币本位USD/XBT](http://btcie.com/btc)】\n* 《[囤比特币:心中无币](https://weibo.com/ttarticle/p/show?id=2309404295149122413875)》当理解上升到一定程度,我们不再需要关注任何比特币相关的信息。\n\n#\n**贡献:**\n\n* 《[囤比特币:打造强节点](https://weibo.com/ttarticle/p/show?id=2309404297578786198023)》成就最好的自己就是对比特币最大的贡献!\n* 《[囤比特币:运行全节点](https://weibo.com/ttarticle/p/show?id=2309404297617780650574)》私钥决定比特币所有权,全节点捍卫比特币规则。\n\n#\n**终章:**\n\n* 《[不忘初心](https://weibo.com/ttarticle/p/show?id=2309404297653562298410)》系列的最后的一文。\n\n#\n**故事:**\n\n* 《[四年一个轮回,不光有世界杯,还有比特币](https://weibo.com/ttarticle/p/show?id=2309404265822628505977)》有时候,时间能改变一切。有时候,时间什么也改变不了。\n* 《[上一轮熊市](https://weibo.com/ttarticle/p/show?id=2309404282406097046246)》从8000元到900元,我们都经历了些什么?\n* 《[牛市起点的故事](https://weibo.com/ttarticle/p/show?id=2309404284267738876518)》2015年11月,牛市起点,19个人,19个故事。\n"
}
] | 2 |
mrbungie/AIND-Recognizer | https://github.com/mrbungie/AIND-Recognizer | 1fe9d2bf9daec5c673fa5791208b029bab331e98 | 85ae1be55f3b5592c977a82beee5392d6efd26ad | d7fbde2762af4e0c8d74bb6996eec882771eeb33 | refs/heads/master | 2021-01-19T18:25:43.346739 | 2017-08-24T22:59:27 | 2017-08-24T22:59:27 | 101,136,220 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5846602320671082,
"alphanum_fraction": 0.5928459167480469,
"avg_line_length": 44.04545593261719,
"blob_id": "e3d4ddfdb0c00f18294bf3ae0843486de62127bf",
"content_id": "95edd2fa40f1853a50981176f7e2da9bc34457c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8918,
"license_type": "no_license",
"max_line_length": 196,
"num_lines": 198,
"path": "/my_model_selectors.py",
"repo_name": "mrbungie/AIND-Recognizer",
"src_encoding": "UTF-8",
"text": "import math\nimport statistics\nimport warnings\n\nimport numpy as np\nfrom hmmlearn.hmm import GaussianHMM\nfrom sklearn.model_selection import KFold\nfrom asl_utils import combine_sequences\n\n\nclass ModelSelector(object):\n '''\n base class for model selection (strategy design pattern)\n '''\n\n def __init__(self, all_word_sequences: dict, all_word_Xlengths: dict, this_word: str,\n n_constant=3,\n min_n_components=2, max_n_components=10,\n random_state=14, verbose=False):\n self.words = all_word_sequences\n self.hwords = all_word_Xlengths\n self.sequences = all_word_sequences[this_word]\n self.X, self.lengths = all_word_Xlengths[this_word]\n self.this_word = this_word\n self.n_constant = n_constant\n self.min_n_components = min_n_components\n self.max_n_components = max_n_components\n self.random_state = random_state\n self.verbose = verbose\n\n def select(self):\n raise NotImplementedError\n\n def base_model(self, num_states):\n # with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n # warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n try:\n hmm_model = GaussianHMM(n_components=num_states, covariance_type=\"diag\", n_iter=1000,\n random_state=self.random_state, verbose=False).fit(self.X, self.lengths)\n if self.verbose:\n print(\"model created for {} with {} states\".format(self.this_word, num_states))\n return hmm_model\n except:\n if self.verbose:\n print(\"failure on {} with {} states\".format(self.this_word, num_states))\n return None\n\n\nclass SelectorConstant(ModelSelector):\n \"\"\" select the model with value self.n_constant\n\n \"\"\"\n\n def select(self):\n \"\"\" select based on n_constant value\n\n :return: GaussianHMM object\n \"\"\"\n best_num_components = self.n_constant\n return self.base_model(best_num_components)\n\n\nclass SelectorBIC(ModelSelector):\n \"\"\" select the model with the lowest Bayesian Information Criterion(BIC) score\n\n http://www2.imm.dtu.dk/courses/02433/doc/ch6_slides.pdf\n Bayesian information criteria: BIC = -2 * logL + p * logN\n \"\"\"\n\n def select(self):\n \"\"\" select the best model for self.this_word based on\n BIC score for n between self.min_n_components and self.max_n_components\n\n :return: GaussianHMM object\n \"\"\"\n best_bic = float('Inf') # to start the selector\n best_model = None # just in case we don't find any\n # for each num_component to test\n for num_components in range(self.min_n_components, self.max_n_components+1):\n try:\n # we train the model and get its log-likelihood\n current_model = self.base_model(num_components)\n logL = current_model.score(self.X, self.lengths)\n # number of parameters according to https://discussions.udacity.com/t/number-of-parameters-bic-calculation/233235/15\n bic = -2 * logL + (num_components ** 2 + 2 * num_components * current_model.n_features - 1 ) * np.log(len(self.sequences))\n # shall it be better than the best_bic yet, it becomes the best_bic and we select this model as the best_model\n if bic < best_bic:\n best_bic = bic\n best_model = current_model\n except:\n # copied from the function above (base_model)\n if self.verbose:\n print(\"failure on {} with {} states, continuing\".format(self.this_word, num_components))\n pass\n # we return the best_model\n return best_model\n\nclass SelectorDIC(ModelSelector):\n ''' select best model based on Discriminative Information Criterion\n\n Biem, Alain. \"A model selection criterion for classification: Application to hmm topology optimization.\"\n Document Analysis and Recognition, 2003. Proceedings. Seventh International Conference on. IEEE, 2003.\n http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.58.6208&rep=rep1&type=pdf\n DIC = log(P(X(i)) - 1/(M-1)SUM(log(P(X(all but i))\n '''\n\n def select(self):\n best_dic = float('-Inf') # to start the selector\n best_model = None # just in case we find no model\n\n other_words = [value for key, value in self.hwords.items() if key != self.this_word] # \n for num_components in range(self.min_n_components, self.max_n_components+1):\n try:\n # we train for the current word and we get the logL\n current_model = self.base_model(num_components)\n Xi_logL = current_model.score(self.X, self.lengths)\n sum_other_Xi_logL = float(0)\n # we score every other class, calculating logL for competing words\n for word in other_words:\n sum_other_Xi_logL += current_model.score(word[0], word[1])\n dic = Xi_logL - (1/len(other_words))*sum_other_Xi_logL\n # according to the paper, if the model presents a greater criterion value, it's a better model \n # shall it be better than the best_dic yet, it becomes the best_dic and we select this model as the best_model\n if dic > best_dic:\n best_dic = dic\n best_model = current_model\n except:\n # copied from the function above (base_model)\n if self.verbose:\n print(\"failure on {} with {} states, continuing\".format(self.this_word, num_components))\n continue\n # we return the best_model\n return best_model\n\n\nclass SelectorCV(ModelSelector):\n ''' select best model based on average log Likelihood of cross-validation folds\n\n '''\n \n \n def select(self):\n best_logLavg = float('-Inf') # to start the selector\n best_model = None # just in case we don't find a model\n best_num_components = None # just in case we don't find a model\n\n def cv_loop(num_components):\n \"\"\" CV loop helper function \"\"\"\n logLs = []\n # I thought I needed to do something like this (as it was failing for FISH) but I confirmed it using the forums: https://discussions.udacity.com/t/selectorcv-fails-to-train-fish/338796\n split_method = KFold(n_splits=min(3,len(self.sequences))) \n # for each fold\n for cv_train_idx, cv_test_idx in split_method.split(self.sequences):\n try:\n # we get X and lengths for both train and test set\n X_train, lengths_train = combine_sequences(cv_train_idx, self.sequences)\n X_test, lengths_test = combine_sequences(cv_test_idx, self.sequences)\n # we train the model\n current_model = GaussianHMM(n_components=num_components, covariance_type=\"diag\", n_iter=1000,\n random_state=self.random_state, verbose=False).fit(X_train, lengths_train)\n # and we append the logL to our list\n logLs.append(current_model.score(X_test, lengths_test))\n except:\n # copied from the function above (base_model)\n if self.verbose:\n print(\"failure on {} with {} states, continuing\".format(self.this_word, num_components))\n continue\n # if we found at least one logL we return the average\n if len(logLs) > 0:\n return (sum(logLs)/len(logLs))\n else:\n return float('-Inf')\n\n for num_components in range(self.min_n_components, self.max_n_components+1):\n if len(self.sequences) > 1:\n # in case CV is possible (>1 sequences) we do the cv loop\n logLavg = cv_loop(num_components)\n else:\n # if <1 sequences, we train using all the data (no cv possible)\n logLavg = float('-Inf')\n try:\n current_model = self.base_model(num_components)\n logLavg = current_model.score(self.X, self.lengths)\n except:\n pass\n\n # we compare the current logLavg with the best one yet, if it's better, we assign it as the best logLavg and set\n # the number of components as the best yet\n if logLavg > best_logLavg:\n best_logLavg = logLavg\n best_num_components = num_components\n\n # if we found the best number of components, we create a new model\n if best_num_components is not None:\n best_model = self.base_model(best_num_components)\n\n return best_model"
}
] | 1 |
damasosanchezarenas/SquareRootPy | https://github.com/damasosanchezarenas/SquareRootPy | e27997e9ec7a38a463155bc44a3adcf29cb727bb | ac15e7b91bcd6011f0c9dc15a37a2c845304a098 | f39bc6e18e6c80ce8c02ab6d8cf95773187ea731 | refs/heads/master | 2022-10-27T02:00:12.495254 | 2020-06-09T22:09:08 | 2020-06-09T22:09:08 | 271,118,198 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8062015771865845,
"alphanum_fraction": 0.8100775480270386,
"avg_line_length": 128,
"blob_id": "1d02f2ae29dbb23355deb58742e92cf48794f28b",
"content_id": "b923b10ea788f2136a331cb8c0c98586a602c5fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 258,
"license_type": "no_license",
"max_line_length": 242,
"num_lines": 2,
"path": "/README.md",
"repo_name": "damasosanchezarenas/SquareRootPy",
"src_encoding": "UTF-8",
"text": "# SquareRootPy\nThis program calculates the square root of a number. You can choose 3 types of algorithms: exhaustive enumeration, solution approach and binary search. If the integer to search does not have an exact square root, the program says the closest.\n"
},
{
"alpha_fraction": 0.5901249051094055,
"alphanum_fraction": 0.6091611981391907,
"avg_line_length": 22.34722137451172,
"blob_id": "4d17a6150a182833ce874299f7aeee2d5976c9f8",
"content_id": "58e1fe9b1898f867d4168819b8f9254337d7f78c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1681,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 72,
"path": "/SquareRoot.py",
"repo_name": "damasosanchezarenas/SquareRootPy",
"src_encoding": "UTF-8",
"text": "def exhaustiveEnumeration(goal):\n result=0\n while result**2 < goal:\n result+=1\n\n if result**2 == goal :\n print(f\"The square root of {goal} is {result}\")\n else :\n print(\"The exact square root of this number does not exist\")\n\n\n##exhaustiveEnumeration(goal)\n\ndef solutionsApproach(goal):\n epsilon=0.01\n step=epsilon**2\n response=0\n\n while abs(response**2 - goal) >= epsilon and epsilon < goal:\n response+=step\n \n if abs(response**2 - goal) >= epsilon:\n print(f\"The square root of {goal} is {result}\")\n else:\n print(f\"The aproximate square root is {response}\")\n\n##solutionsApproach(goal)\n\n\ndef binarySearch(goal):\n epsilon=0.01\n min_value=0.0\n max_value= max(1.0, goal)\n response = (max_value + min_value) /2\n\n while abs(response**2 - goal) >= epsilon:\n if response**2 <goal:\n min_value=response\n else : \n max_value=response\n\n response = (max_value + min_value) /2\n\n print(f\"The square root of {goal} is {response}\")\n\n##binarySearch(goal)\n\npresentation = \"Welcome to the program to take out the square root of a whole number :) \\n\"\nprint(presentation.center(10,\"-\"))\n\ngoal = int(input(\"Please enter the number of which you wish to calculate the square root: \"))\n\nprint(\"\"\"Which option do you prefer?\"\n [1]. Exhaustive Enumeration\n [2]. Solutions Approach\n [3]. Binary Search\n [4]. Exit\"\"\")\n\n\noption = int(input(\": \"))\n\nif option==1: \n exhaustiveEnumeration(goal)\n\nelif option==2: \n solutionsApproach(goal)\n\nelif option==3: \n binarySearch(goal)\n\nelif option==4: \n print (\"Bye Bye :)\")\n"
}
] | 2 |
tdhock/max-generalized-auc | https://github.com/tdhock/max-generalized-auc | adc3f71b9421dbad8b2b2f73f7fbf8ac70a3fd23 | 1ccd52717deb1aacb398fab7b2cc7bad4b3f17c2 | 22771c6f84bee93c7ec677986ccb81d77b9d079f | refs/heads/master | 2023-05-10T15:07:43.884559 | 2023-05-09T13:36:50 | 2023-05-09T13:36:50 | 189,482,937 | 1 | 2 | null | 2019-05-30T21:09:52 | 2021-06-26T18:53:37 | 2021-07-07T13:06:26 | R | [
{
"alpha_fraction": 0.574195384979248,
"alphanum_fraction": 0.5835944414138794,
"avg_line_length": 37.79558181762695,
"blob_id": "288b9c8691690c9735ed8f39e915b401a19ed3a5",
"content_id": "0ea4bae6622bc62624974d49ce6e8d7d80b80679",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 7022,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 181,
"path": "/figure-DNA-Sonar-subtrain-valid-data.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "data(package=\"mlbench\")\ndata(Sonar, package=\"mlbench\")\ndata(DNA, package=\"mlbench\")\ndata.list <- list(\n Sonar=list(\n input.mat=as.matrix(Sonar[,1:60]),\n output.vec=ifelse(Sonar$Class==\"R\", 1, -1)),\n DNA=list(\n input.mat=ifelse(as.matrix(DNA[,1:180])==0, 0, 1),\n output.vec=ifelse(DNA$Class==\"n\", -1, 1)))\nN <- 10\nset.seed(1)\nrand.pred.vec <- rnorm(N)\nsubtrain.output.vec <- rep(c(-1, 1), l=N)\nsubtrain.diff.count.dt <- aum::aum_diffs_binary(subtrain.output.vec, denominator=\"count\")\nsubtrain.diff.rate.dt <- aum::aum_diffs_binary(subtrain.output.vec, denominator=\"rate\")\nlibrary(data.table)\nPairsDT <- function(output.vec){\n is.positive <- output.vec == 1\n data.table(expand.grid(\n positive=which(is.positive),\n negative=which(!is.positive)))\n}\nsubtrain.pairs.dt <- PairsDT(subtrain.output.vec)\nmargin <- 1\n## Note: for efficiency subtrain labels are assumed to be pre-computed\n## in the enclosing environment, once before the optimization starts.\nAUM <- function(pred.vec, diff.dt){\n L <- aum::aum(diff.dt, pred.vec)\n d <- L$derivative_mat\n non.diff <- abs(d[,1] - d[,2]) > 1e-6\n if(any(non.diff)){\n cat(sprintf(\"%d non-diff points\\n\", sum(non.diff)))\n print(d[non.diff, ])\n }\n with(L, list(gradient=derivative_mat[,1], loss=aum))\n}\nloss.list <- list(\n logistic=function(pred.vec, output.vec=subtrain.output.vec, ...){\n N <- length(pred.vec)\n list(\n gradient=-output.vec/(1+exp(output.vec*pred.vec))/N,\n loss=sum(log(1+exp(-output.vec*pred.vec)))/N)\n },\n aum.count=function(pred.vec, diff.count.dt=subtrain.diff.count.dt, ...){\n AUM(pred.vec, diff.count.dt)\n },\n aum.rate=function(pred.vec, diff.rate.dt=subtrain.diff.rate.dt, ...){\n AUM(pred.vec, diff.rate.dt)\n },\n squared.hinge.all.pairs=function(pred.vec, pairs.dt=subtrain.pairs.dt, ...){\n pairs.dt[, diff := pred.vec[positive]-pred.vec[negative]-margin]\n pairs.dt[, diff.clipped := ifelse(diff<0, diff, 0)]\n pairs.tall <- data.table::melt(\n pairs.dt,\n measure.vars=c(\"positive\", \"negative\"),\n value.name=\"pred.i\",\n variable.name=\"label\")\n ## d/dx (x - y - m)^2 = x - y - m\n ## d/dy (x - y - m)^2 = -(x - y - m)\n pairs.tall[, grad.sign := ifelse(label==\"positive\", 1, -1)]\n N.pairs <- nrow(pairs.dt)\n grad.dt <- pairs.tall[, .(\n gradient=sum(grad.sign*diff.clipped)\n ), keyby=pred.i]\n list(gradient=grad.dt$gradient/N.pairs, loss=sum(pairs.dt$diff.clipped^2)/N.pairs)\n }\n)\nresult.list <- list()\nfor(loss.name in names(loss.list)){\n fun <- loss.list[[loss.name]]\n result.list[[loss.name]] <- fun(rand.pred.vec)\n}\nstr(result.list)\nsapply(result.list, \"[[\", \"gradient\")\nout.loss.list <- list()\nfor(data.name in names(data.list)){\n input.output.list <- data.list[[data.name]]\n input.mat <- input.output.list[[\"input.mat\"]]\n full.input.mat <- scale(input.mat)\n full.output.vec <- input.output.list[[\"output.vec\"]]\n stopifnot(full.output.vec %in% c(-1, 1))\n set.seed(1)\n n.folds <- 4\n unique.folds <- 1:n.folds\n fold.vec <- sample(rep(unique.folds, l=length(full.output.vec)))\n for(validation.fold in unique.folds){\n is.set.list <- list(\n validation=fold.vec == validation.fold,\n subtrain=fold.vec != validation.fold)\n set.data.list <- list()\n for(set.name in names(is.set.list)){\n is.set <- is.set.list[[set.name]]\n output.vec <- full.output.vec[is.set]\n set.data.list[[set.name]] <- list(\n output.vec=output.vec,\n input.mat=full.input.mat[is.set,],\n diff.rate.dt=aum::aum_diffs_binary(output.vec, denominator=\"rate\"),\n diff.count.dt=aum::aum_diffs_binary(output.vec, denominator=\"count\"),\n pairs.dt=PairsDT(output.vec))\n }\n X.mat <- set.data.list$subtrain$input.mat\n for(loss.name in names(loss.list)){\n loss.grad.fun <- loss.list[[loss.name]]\n step.candidates <- 10^seq(-2, 2, by=0.25)\n for(step.size in step.candidates){\n set.seed(1)\n weight.vec <- rnorm(ncol(X.mat))\n done <- FALSE\n iteration <- 0\n prev.set.loss.vec <- rep(1e10, 2)\n while(!done){\n iteration <- iteration+1\n loss.for.weight <- function(w, set.data=set.data.list$subtrain){\n pred <- set.data$input.mat %*% w\n set.data$pred.vec <- pred\n out <- do.call(loss.grad.fun, set.data)\n out$pred <- pred\n out\n }\n loss.before.step <- loss.for.weight(weight.vec)\n direction <- -t(X.mat) %*% loss.before.step[[\"gradient\"]]\n loss.for.step <- function(step.size){\n new.weight <- weight.vec + step.size * direction\n out <- loss.for.weight(new.weight)\n out$new.weight <- new.weight\n out$step.size <- step.size\n out\n }\n loss.after.step <- loss.for.step(step.size)\n weight.vec <- loss.after.step[[\"new.weight\"]]\n set.loss.vec <- numeric()\n for(set.name in names(set.data.list)){\n set.data <- set.data.list[[set.name]]\n set.loss <- loss.for.weight(weight.vec, set.data)\n set.loss.vec[[set.name]] <- set.loss[[\"loss\"]]\n roc.df <- WeightedROC::WeightedROC(set.loss[[\"pred\"]], set.data[[\"output.vec\"]])\n auc <- WeightedROC::WeightedAUC(roc.df)\n out.dt <- data.table(\n data.name, validation.fold, loss.name, step.size, iteration, set.name,\n auc,\n loss.value=set.loss$loss)\n for(aum.type in c(\"count\", \"rate\")){\n diff.name <- paste0(\"diff.\", aum.type, \".dt\")\n aum.list <- aum::aum(set.data[[diff.name]], set.loss[[\"pred\"]])\n out.col <- paste0(\"aum.\", aum.type)\n out.dt[[out.col]] <- aum.list[[\"aum\"]]\n }\n out.loss.list[[paste(\n data.name, validation.fold, loss.name, step.size, iteration, set.name\n )]] <- out.dt\n }#for(set.name\n diff.set.loss.vec <- set.loss.vec - prev.set.loss.vec\n max.inc.iterations <- 10\n valid.increasing.iterations <- if(!is.finite(diff.set.loss.vec[[\"validation\"]])){\n max.inc.iterations\n }else if(0 <= diff.set.loss.vec[[\"validation\"]]){\n cat(sprintf(\n \"data=%s fold=%d loss=%s step=%f it=%d non-dec-iterations=%d\\n\",\n data.name, validation.fold, loss.name, step.size, iteration,\n valid.increasing.iterations))\n valid.increasing.iterations+1\n }else{\n 0\n }\n if(\n max.inc.iterations <= valid.increasing.iterations\n || loss.after.step$step.size == 0\n || 1000 < iteration\n ){\n done <- TRUE\n }\n prev.set.loss.vec <- set.loss.vec\n }#while(!done\n }#for(step.size\n }#for(loss.name\n }#for(validation.fold\n}\nout.loss <- do.call(rbind, out.loss.list)\ndata.table::fwrite(out.loss, \"figure-DNA-Sonar-subtrain-valid-data.csv\")\nsystem(\"gzip figure-DNA-Sonar-subtrain-valid-data.csv\")\n"
},
{
"alpha_fraction": 0.6172136664390564,
"alphanum_fraction": 0.6402544975280762,
"avg_line_length": 40.58634948730469,
"blob_id": "051b3f888041bdc8af76bcf3e52ff4a88e01579e",
"content_id": "5bb49b9d22faef27e6fcdef5d9b3ef4173ccff9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 29862,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 718,
"path": "/figure-jadon-timings.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(ggplot2)\nlibrary(data.table)\nlibrary(batchtools)\nlibrary(aum)\nlibrary(dplyr)\nlibrary(purrr)\n\n(testFold.vec <- Sys.glob(\"../neuroblastoma-data/data/*/cv/*/testFolds/*\"))\ntestFold.path <- \"../neuroblastoma-data/data/H3K27ac-H3K4me3_TDHAM_BP/cv/equal_labels/testFolds/3\"\nseed <- 1\ninit.name=\"IntervalRegressionCV\"\naum.type=\"count\"\nOneBatch <- function(testFold.path, aum.type, init.name, seed){\n library(data.table)\n cv.path <- dirname(dirname(testFold.path))\n folds.csv <- file.path(cv.path, \"folds.csv\")\n cv.type <- basename(cv.path)\n test.fold <- basename(testFold.path)\n data.dir <- dirname(dirname(cv.path))\n data.name <- basename(data.dir)\n data.list <- list()\n for(f in c(\"inputs\", \"outputs\", \"evaluation\")){\n f.csv.xz <- file.path(data.dir, paste0(f, \".csv.xz\"))\n if(file.exists(f.csv.xz)){\n system(paste(\"unxz\", f.csv.xz))\n }\n f.csv <- file.path(data.dir, paste0(f, \".csv\"))\n f.dt <- data.table::fread(f.csv)\n data.list[[f]] <- f.dt\n }\n ## replace positive fn at end with 0 to avoid AUM=Inf.\n data.list$evaluation[, `:=`(\n min.fn=min(fn),\n max.fp=max(fp),\n min.lambda = exp(min.log.lambda),\n example=sequenceID\n ), by=sequenceID]\n bad <- data.list$evaluation[min.log.lambda == -Inf & min.fn < fn]\n if(nrow(bad)){\n print(bad)\n }\n data.list$evaluation[min.log.lambda == -Inf & 0 < fn]\n ## code below not necessary since this does not happen in our real\n ## data sets, but it could theoretically in some data.\n data.list$aum.input <- data.table(data.list$evaluation)[, `:=`(\n possible.fn=possible.fn-min.fn,\n fn=fn-min.fn,\n possible.fp=max.fp\n ), by=sequenceID]\n ## read folds. \n folds.dt <- data.table::fread(folds.csv)\n folds.dt[fold == test.fold, set := \"test\"]\n folds.dt[fold != test.fold, set := rep(\n c(\"subtrain\", \"validation\"), l=.N)]\n folds.dt[, table(fold, set)]\n X.all <- scale(data.list$inputs[, -1])#rm seqID.\n rownames(X.all) <- data.list$inputs$sequenceID\n X.finite <- X.all[, apply(is.finite(X.all), 2, all)]\n set.vec <- folds.dt[rownames(X.finite), set, on=\"sequenceID\"]\n seqs.list <- list()\n diffs.list <- list()\n aum.vec.list <- list()\n for(s in unique(folds.dt$set)){\n seqs.set <- folds.dt[s==set, sequenceID]\n seqs.list[[s]] <- seqs.set\n seqs.diff <- aum::aum_diffs_penalty(\n data.list$evaluation,\n seqs.set,\n denominator=aum.type)\n diffs.list[[s]] <- seqs.diff\n }\n totals <- colSums(diffs.list$subtrain[, .(fp_diff, fn_diff)])\n X.subtrain <- X.finite[set.vec==\"subtrain\",]\n neg.t.X.subtrain <- -t(X.subtrain)\n seqs.train <- with(seqs.list, c(subtrain, validation))\n y.train <- data.list[[\"outputs\"]][\n seqs.train,\n cbind(min.log.lambda, max.log.lambda),\n on=\"sequenceID\"]\n keep <- apply(is.finite(y.train), 1, any)\n X.train <- X.finite[seqs.train, ]\n N.param <- ncol(X.finite)+1\n init.param <- structure(\n rep(0, N.param),\n names=c(\"(Intercept)\",colnames(X.finite)))\n init.fun.list <- list(\n IntervalRegressionCV=function(){\n fit <- penaltyLearning::IntervalRegressionCV(\n X.train[keep, ],\n y.train[keep, ])\n some.param <- fit[[\"param.mat\"]]\n init.param[names(some.param)] <- some.param\n init.param\n },\n zero=function(){\n init.param+rnorm(N.param)\n }\n )\n iteration.dt.list <- list()\n considered.dt.list <- list()\n obj.sign.list <- list(aum=1)#list(auc=-1, aum=1)\n for(seed in seeds)for(init.name in names(init.fun.list)){\n init.fun <- init.fun.list[[init.name]]\n set.seed(seed)\n int.weights <- init.fun()\n for(algo in c(\"grid\",\"exactL\",\"exactQ\",\"hybrid\"))for(objective in names(obj.sign.list)){\n start.time <- microbenchmark::get_nanotime()\n computeROC <- function(w, i, set){\n pred.pen.vec <- (X.finite %*% w) + i\n pred.dt <- data.table(\n sequenceID=rownames(pred.pen.vec),\n pred.log.lambda=-as.numeric(pred.pen.vec))\n is.set <- set.vec==set\n set.dt <- pred.dt[is.set]\n L <- penaltyLearning::ROChange(\n data.list$evaluation, set.dt, \"sequenceID\")\n alist <- aum_auc(diffs.list[[set]], pred.pen.vec[ seqs.list[[set]], ])\n L$aum.diffs <- alist$aum\n L$auc.diffs <- alist$auc\n L\n }\n aum_auc <- function(diffs.dt, pred.vec){\n aum.list <- aum::aum(diffs.dt, pred.vec)\n before.dt <- data.table(aum.list$total_error, key=\"thresh\")[, `:=`(\n TPR_before=1-fn_before/-totals[[\"fn_diff\"]],\n FPR_before=fp_before/totals[[\"fp_diff\"]])]\n aum.list$auc <- before.dt[, .(\n FPR=c(FPR_before, 1),\n TPR=c(TPR_before, 1)\n )][, sum((FPR[-1]-FPR[-.N])*(TPR[-1]+TPR[-.N])/2)]\n aum.list\n }\n obj.sign <- obj.sign.list[[objective]]\n weight.vec <- int.weights[-1]\n intercept <- int.weights[1]\n prev.obj <- Inf*obj.sign\n step.number <- 0\n elapsed.time <- 0\n max.iterations <- if (algo == \"exactQ\") {\n nrow(diffs.list$subtrain) * (nrow(diffs.list$subtrain)) - 1 / 2\n } else {\n nrow(diffs.list$subtrain)\n }\n while({\n summary.dt.list <- list()\n for(set in names(seqs.list)){\n set.PL <- computeROC(weight.vec, intercept, set)\n summary.dt.list[[set]] <- with(set.PL, data.table(\n set,\n thresholds[threshold==\"predicted\"],\n auc, aum, auc.diffs, aum.diffs))\n }\n summary.dt <- do.call(rbind, summary.dt.list)\n current.time <- microbenchmark::get_nanotime() - start.time\n iteration.dt.list[[paste(\n seed, init.name, algo, step.number, objective\n )]] <- data.table(\n seed, init.name, algo, step.number, objective, elapsed.time, time=current.time, summary.dt)\n new.obj <- summary.dt.list$subtrain[[paste0(objective,\".diffs\")]]\n improvement <- obj.sign*(prev.obj-new.obj)\n cat(sprintf(\n \"seed=%d init=%s algo=%s step=%d %s %f->%f\\n\",\n seed, init.name, algo, step.number, objective, prev.obj, new.obj))\n 1e-5 < improvement\n }){\n ##while(step.number<2){\n pred.vec <- X.subtrain %*% weight.vec\n aum.list <- aum::aum(diffs.list$subtrain, pred.vec)\n pred.grad.vec <- rowMeans(aum.list$derivative_mat)\n direction.vec <- neg.t.X.subtrain %*% pred.grad.vec\n take.step <- function(s){\n weight.vec+s*direction.vec\n }\n ptm <- proc.time() # timer\n grid.result <- NULL\n exact.result <- NULL\n if (algo == \"grid\") {\n step.grid <- 10^seq(-9, 0)\n grid.dt <- data.table(step.size=step.grid)[, {\n step.weight <- take.step(step.size)\n grid.aum <- aum_auc(diffs.list$subtrain, X.subtrain %*% step.weight)\n with(grid.aum, data.table(auc, aum))\n }, by=step.size]\n grid.result <- grid.dt[, .(search=\"grid\", step.size, auc, aum)]\n } else if (algo == \"exactL\" || algo == \"exactQ\") {\n LS=aum::aum_line_search(\n diffs.list$subtrain, X.subtrain, weight.vec, maxIterations=max.iterations)\n exact.result <- LS$line_search_result[, .(search=\"exact\", step.size, auc, aum)]\n } else if (algo == \"hybrid\") {\n LS=aum::aum_line_search(\n diffs.list$subtrain, X.subtrain, weight.vec, maxIterations=max.iterations)\n exact.result <- LS$line_search_result[, .(search=\"exact\", step.size, auc, aum)]\n search.result <- data.table(LS$line_search_result)\n search.result[, kink := .I/.N]\n best.row <- search.result[which.min(aum)] \n if (best.row$kink == 1) {\n # if kink == 1, we have chosen the very last step size we looked at.\n # run a grid search where we're at to find a larger step.size\n steps.list <- list()\n for (s in 10^seq(1,5)) {\n step.size <- best.row$step.size * s\n step.weight <- take.step(step.size)\n step.aum <- aum_auc(diffs.list$subtrain, X.subtrain %*% step.weight)\n if (step.aum$aum < best.row$aum) { # TODO AUC check\n step.result <- data.table(search=\"grid\", step.size, auc=step.aum$auc, aum=step.aum$aum)\n steps.list[[paste(s)]] <- step.result\n } else {\n break\n }\n }\n if (length(steps.list) > 0) {\n grid.result <- rbindlist(steps.list)\n }\n }\n }\n elapsed.time <- (proc.time() - ptm)[[\"elapsed\"]] # timer end\n steps.considered <- rbind(\n exact.result,\n grid.result\n )[, step.prop := seq(1, .N)/.N, by=search][]\n #considered.dt.list[[paste(\n # seed, init.name, algo, objective, step.number\n #)]] <- data.table(\n # seed, init.name, algo, objective, step.number, steps.considered)\n best.step <- steps.considered[which.min(obj.sign*get(objective))]\n weight.vec <- take.step(best.step$step.size)\n new.aum <- aum::aum(diffs.list$subtrain, X.subtrain %*% weight.vec)\n err.thresh <- data.table(\n new.aum$total_error,key=\"thresh\"\n )[, err_before := fp_before+fn_before][, .(\n thresh=c(thresh[1]-1,thresh[-1]-diff(thresh)/2,thresh[.N]+1),\n err=c(err_before,sum(diffs.list$subtrain$fp_diff))\n )]\n intercept <- err.thresh[which.min(err), thresh]\n step.number <- step.number+1\n prev.obj <- new.obj\n }#step.number\n }#algo/objective\n }#seed/init.name\n list(\n sets=data.table(\n do.call(rbind, iteration.dt.list),\n testFold.path, data.name, cv.type, test.fold))\n #steps=data.table(\n # rbindlist(considered.dt.list),\n # data.name, cv.type, test.fold))\n}\n\n# Set of Jobs to run\n# aum.type is FPR/FNR or FP/FN count\n# init.name is the starting weight matrix\nargs.dt <- data.table::CJ(\n testFold.path=datasets.by.size$name[1:181],#testFold.vec,\n aum.type=c(\"rate\"),#,\"count\")\n init.name=c(\"zero\"),#, \"IntervalRegressionCV\")\n seed=c(1, 2, 3, 4)\n)\n\n## Run on SLURM.\nregistry.dir <- \"figure-line-grid-search-interactive-registry\"\nregistry.dir <- \"figure-line-grid-search-interactive-registry-6\"#4 datasets w hybrid A,B (1.2mb)\nregistry.dir <- \"figure-line-grid-search-interactive-registry-7\"#23 datasets (1.6mb)\nregistry.dir <- \"figure-line-grid-search-interactive-registry-8\"#70 datasets (20mb)\nregistry.dir <- \"figure-line-grid-search-interactive-registry-9\"#109 datasets (28mb)\nregistry.dir <- \"figure-line-grid-search-interactive-registry-10\"#[1:109] datasets w/ init.name=c(\"zero\", \"IntervalRegressionCV\")\nregistry.dir <- \"figure-line-grid-search-interactive-registry-11\"#[1:181]\nregistry.dir <- \"figure-line-grid-search-interactive-registry-12\"# new hybridC\nregistry.dir <- \"figure-line-grid-search-interactive-registry-13\"# better params for hybridB (it's like hybridC now but searches more grid points)\nregistry.dir <- \"figure-line-grid-search-interactive-registry-15\"# testing hybridD\nregistry.dir <- \"figure-line-grid-search-interactive-registry-16\"# maybe final run?\nregistry.dir <- \"figure-line-grid-search-interactive-registry-17\"# many-seeds\n\nif (FALSE) {\n reg=batchtools::loadRegistry(registry.dir, writeable = TRUE)\n #batchtools::clearRegistry(reg)\n batchtools::getStatus(reg=reg)\n batchtools::findExpired(reg=reg)\n status.dt <- batchtools::getJobStatus(reg=reg)\n status.dt[!is.na(error)]\n status.dt[!is.na(done)]\n}\n\n#analyze.\nif(FALSE){\n done.ids <- status.dt[is.na(error), job.id]\n for(done.i in done.ids){\n job.id <- done.ids[[done.i]]\n args.row <- args.dt[job.id]\n ls.dir <- file.path(args.row$testFold.path, \"line_search\", \"sets\")\n dir.create(ls.dir, showWarnings = FALSE, recursive = TRUE)\n ls.csv <- file.path(ls.dir, paste0(args.row$aum.type, \".csv\"))\n if(!file.exists(ls.csv)){\n cat(sprintf(\"%4d / %4d %s\\n\", done.i, length(done.ids), ls.csv))\n res <- batchtools::loadResult(job.id)\n best.steps <- res$steps[\n , .SD[which.min(aum)], by=.(\n seed,init.name,algo,objective,step.number\n )][,.(seed,init.name,algo,objective,step.number=step.number+1,search)]\n join.dt <- best.steps[res$sets, on=.(\n seed,init.name,algo,objective,step.number\n )]\n join.dt[is.na(search), table(step.number)]\n fwrite(join.dt, ls.csv)\n } \n }\n}\n\n# args.dt[5:8] CTCF_TDH_ENCODE 2407\n# args.dt[13:16] H3K27ac_TDH_some 798\n# args.dt[17:20] H3K27me3_RL_cancer 592\n# args.dt[21:24] H3K27me3_TDH_some 296\n# args.dt[29:32] H3K36me3_TDH_ENCODE 377\n# args.dt[37:40] H3K36me3_TDH_other 554\n# args.dt[c(13:16,21:24,29:32,37:40,17:20)]\n\nif(FALSE){\n unlink(registry.dir, recursive=TRUE)\n}\n# CREATE REGISTRY AND RUN JOBS\nreg <- batchtools::makeRegistry(file.dir=registry.dir)\n#parallel.job.count <- 8\nreg$cluster.functions <- makeClusterFunctionsMulticore()\n# sample random test folds\n#batchtools::batchMap(OneBatch, args=args.dt[sample(1:nrow(args.dt), 5)], reg=reg)\nbatchtools::batchMap(OneBatch, args=args.dt, reg=reg)\njob.table <- batchtools::getJobTable(reg=reg)\nchunks <- data.frame(job.table,chunk=job.table$job.id)\n#chunks <- data.frame(job.table,chunk=(job.table$job.id%%parallel.job.count)+1)\n#chunks <- data.frame(job.table, chunk=1)\noptions(batchtools.verbose=TRUE, batchtools.progress=TRUE)\nrunJobs <- function() {\n ptm <- proc.time()\n batchtools::submitJobs(chunks, resources=list(\n walltime = 24 * 60 * 60,#seconds\n memory = 5000,#megabytes per cpu\n #max.concurrent.jobs = parallel.job.count,\n ncpus=1, #>1 for multicore/parallel jobs.\n ntasks=1, #>1 for MPI jobs.\n chunks.as.arrayjobs=FALSE), reg=reg)\n total.time <- (proc.time() - ptm)[[\"elapsed\"]]\n total.time\n}\ncat(sprintf(\"Finished running jobs in %f minutes\\n\", runJobs() / 60))\n\nbatchtools::getStatus(reg=reg)\nstatus.dt <- batchtools::getJobStatus(reg=reg)\nstatus.dt[!is.na(error)]\nstatus.dt[!is.na(done)]\n\n\nggplot()+\n facet_grid(init.name + objective ~ ., labeller=label_both, scales=\"free\")+\n geom_point(aes(\n auc, algo),\n data=compare.obj.dt)+\n scale_x_continuous(\n \"Best validation AUC\")\n\n\n# jadon tests\n\n# load the first test and graph auc by each algorithm\nif(FALSE) {\n result.one <- loadResult(1, reg)\n ggplot(result.one$sets[objective==\"aum\"]) +\n geom_line(aes(x=step.number, y=aum, color=algo, linetype=as.factor(set))) +\n facet_grid(init.name ~ algo, scales=\"free\") +\n scale_x_log10() +\n scale_y_log10()\n}\n\n# load all results and build one big data table\nresult.sets.list <- list()\nstatus.dt <- batchtools::getJobStatus(reg=reg)\ncompleted.jobs <- status.dt[is.na(error)]$job.id\nfor (result.id in completed.jobs) {\n # ensure this job is done\n if (!is.na(status.dt[job.id==result.id]$done)) {\n r <- batchtools::loadResult(result.id, reg)\n r$sets[,result.id:=result.id]\n result.sets.list[[result.id]] <- r$sets\n r <- NULL\n }\n}\nresult.sets <- do.call(rbind, result.sets.list)\nresult.sets.list <- NULL\n\nalgo.time.by.dataset <- data.table(result.sets[init.name==\"zero\"][objective==\"aum\"][set==\"validation\"] %>%\n group_by(result.id, algo, testFold.path) %>%\n reframe(total.time = sum(elapsed.time)))\ncolnames(algo.time.by.dataset)[colnames(algo.time.by.dataset) == \"testFold.path\"] <- \"name\"\nresults.with.dataset.size <- merge(algo.time.by.dataset, datasets.by.size, on=.(name))\n\nalgo.time.by.dataset.with.inits <- data.table(result.sets[objective==\"aum\"][set==\"validation\"] %>%\n group_by(result.id, algo, testFold.path, init.name) %>%\n reframe(total.time = sum(elapsed.time)))\ncolnames(algo.time.by.dataset.with.inits)[colnames(algo.time.by.dataset.with.inits) == \"testFold.path\"] <- \"name\"\nresults.with.dataset.size.and.init <- merge(algo.time.by.dataset.with.inits, datasets.by.size, on=.(name))\n\n\n# name for the folder for the images below to go in\nexperiment.name <- \"many-seeds\"\ndir.create(file.path(experiment.name))\n\n\n# palette for everything below\ncbPalette <- c(\"#E69F00\", \"#56B4E9\", \"#009E73\", \"#DA72B2\", \"#D55E00\", \"#F2D0A4\")\n\n# plot elapsed time per step of gradient descent for each algo/dataset\nggplot(result.sets[objective==\"aum\"][set==\"validation\"]) +\n geom_point(aes(x=step.number, y=elapsed.time, color=algo, linetype=as.factor(set))) +\n facet_grid(init.name ~ result.id, scale=\"free\") +\n #scale_x_log10() +\n scale_y_log10() +\n scale_colour_manual(values=cbPalette)\nggsave(paste(sep=\"/\", experiment.name, \"elapsed.time.png\"), width=1920*3, height=1080*3, units=\"px\")\n\nggplot(result.sets[init.name==\"zero\"][objective==\"aum\"][set==\"validation\"],\n aes(x=step.number, y=elapsed.time, color=algo, linetype=as.factor(set))) +\n geom_point() +\n geom_smooth() +\n facet_wrap(vars(result.id), scale=\"free\") +\n #facet_grid(init.name, scale=\"free\") +\n #scale_x_log10() +\n scale_y_log10() +\n scale_colour_manual(values=cbPalette) +\n scale_fill_manual(values=cbPalette)\nggsave(paste(sep=\"/\", experiment.name, \"elapsed.time2.png\"), width=1920*3, height=1080*3, units=\"px\")\n\n# histogram version of the above\nggplot(result.sets[init.name==\"zero\"][objective==\"aum\"][set==\"validation\"]) +\n geom_histogram(aes(x=elapsed.time, fill=algo), binwidth = 0.05, color=\"black\") +\n scale_x_log10() +\n scale_y_log10() +\n scale_colour_manual(values=cbPalette) +\n scale_fill_manual(values=cbPalette) +\n ggtitle(\"Time per step of Gradient Descent\") +\n ylab(\"Count\") +\n xlab(\"Time (seconds)\")\nggsave(paste(sep=\"/\", experiment.name, \"elapsed.time3.png\"), width=1920*3, height=1080*3, units=\"px\")\n\nggplot(result.sets[init.name==\"zero\"][objective==\"aum\"][set==\"validation\"]) +\n geom_boxplot(aes(x=algo, y=elapsed.time, fill=algo),color=\"black\") +\n #scale_x_log10() +\n scale_y_log10() +\n scale_colour_manual(values=cbPalette) +\n scale_fill_manual(values=cbPalette) +\n ggtitle(\"Time per step of Gradient Descent\") +\n ylab(\"Time (seconds)\") +\n xlab(\"Algorithm\")\nggsave(paste(sep=\"/\", experiment.name, \"elapsed.time4.png\"), width=1920*3, height=1080*3, units=\"px\")\n\nggplot(result.sets[init.name==\"zero\"][objective==\"aum\"][set==\"validation\"]) +\n geom_violin(aes(x=algo, y=elapsed.time, fill=algo),color=\"black\") +\n #scale_x_log10() +\n scale_y_log10() +\n scale_colour_manual(values=cbPalette) +\n scale_fill_manual(values=cbPalette) +\n ggtitle(\"Time per step of Gradient Descent\") +\n ylab(\"Time (seconds)\") +\n xlab(\"Algorithm\")\nggsave(paste(sep=\"/\", experiment.name, \"elapsed.time5.png\"), width=1920*3, height=1080*3, units=\"px\")\n\n\n# plot aum for each dataset\nggplot(result.sets[init.name==\"zero\"][objective==\"aum\"][set==\"validation\"]) +\n geom_line(aes(x=step.number, y=aum, color=algo, linetype=as.factor(set))) +\n facet_wrap(vars(result.id), scale=\"free\") +\n scale_x_log10() +\n scale_y_log10() +\n scale_colour_manual(values=cbPalette)\nggsave(paste(sep=\"/\", experiment.name, \"aum.png\"), width=1920*3, height=1080*3, units=\"px\")\n\n\nggplot(result.sets[init.name==\"zero\"][objective==\"aum\"][set==\"validation\"]) +\n geom_line(aes(x=step.number, y=auc, color=algo, linetype=as.factor(set))) +\n facet_wrap(vars(result.id), scale=\"free\") +\n scale_x_log10() +\n scale_y_log10() +\n scale_colour_manual(values=cbPalette)\nggsave(paste(sep=\"/\", experiment.name, \"auc.png\"), width=1920*3, height=1080*3, units=\"px\")\n\n# total time by algo (bar chart)\nresult.sets[init.name==\"zero\"][objective==\"aum\"][set==\"validation\"] %>%\n group_by(result.id, algo) %>%\n summarize(total.time = sum(elapsed.time)) %>%\n ggplot() +\n geom_col(aes(x=algo, y=total.time, fill=algo)) +\n #scale_y_log10() +\n geom_text(aes(x=algo, y=total.time, label=round(total.time,digits=1)), position=position_dodge(width=0.9), vjust=-0.25) +\n facet_grid(. ~ result.id) +\n scale_colour_manual(values=cbPalette)\nggsave(paste(sep=\"/\", experiment.name, \"total.time.png\"), width=1920*3, height=1080*3, units=\"px\")\n\nggplot(result.sets[init.name==\"zero\"][objective==\"aum\"][set==\"validation\"][result.id %in% c(1:8, 16:19)]) +\n geom_line(aes(x=step.number, y=aum, color=algo),size=1.1) +\n #geom_smooth(aes(x=time, y=aum, color=algo, fill=algo)) +\n facet_grid(data.name ~ test.fold, scale=\"free\", labeller = label_both) +\n scale_x_log10() +\n scale_y_log10() +\n scale_colour_manual(values=cbPalette) +\n scale_fill_manual(values=cbPalette) +\n ggtitle(\"Validation AUM over steps of gradient descent for select datasets\") +\n #xlab(\"timestamp\") +\n ylab(\"AUM\")\nggsave(paste(sep=\"/\", experiment.name, \"aum.over.time.png\"), width=1920*3, height=1080*3, units=\"px\")\n\nggplot(result.sets[init.name==\"zero\"][objective==\"aum\"][set==\"validation\"][result.id %in% c(1:8, 16:19)]) +\n geom_line(aes(x=step.number, y=aum, color=algo),size=1.1) +\n #geom_smooth(aes(x=time, y=aum, color=algo, fill=algo)) +\n facet_grid(data.name ~ test.fold, scale=\"free\", labeller = label_both) +\n scale_x_log10() +\n scale_y_log10() +\n scale_colour_manual(values=cbPalette) +\n scale_fill_manual(values=cbPalette) +\n ggtitle(\"Validation AUM over steps of gradient descent for select datasets\") +\n xlab(\"step number\") +\n ylab(\"AUM\")\nggsave(paste(sep=\"/\", experiment.name, \"aum.over.time2.png\"), width=1920*2, height=1080*2, units=\"px\")\n\nggplot(result.sets[init.name==\"zero\"][objective==\"aum\"][set==\"validation\"][result.id %in% c(1:8, 16:19)]) +\n geom_line(aes(x=step.number, y=auc, color=algo),size=1.1) +\n #geom_smooth(aes(x=time, y=aum, color=algo, fill=algo)) +\n facet_grid(data.name ~ test.fold, scale=\"free\", labeller = label_both) +\n scale_x_log10() +\n scale_y_log10() +\n scale_colour_manual(values=cbPalette) +\n scale_fill_manual(values=cbPalette) +\n ggtitle(\"Validation AUC over steps of gradient descent for select datasets\") +\n xlab(\"step number\") +\n ylab(\"AUC\")\nggsave(paste(sep=\"/\", experiment.name, \"auc.over.time2.png\"), width=1920*2, height=1080*2, units=\"px\")\n\n\n# boxplot total time by algo\nresult.sets[objective==\"aum\"][set==\"validation\"] %>%\n group_by(result.id, algo, init.name) %>%\n reframe(total.time = sum(elapsed.time)) %>%\n ggplot() +\n #geom_violin(aes(x=algo, y=total.time, fill=algo)) +\n geom_boxplot(aes(x=algo, y=total.time, fill=algo)) +\n scale_y_log10() +\n #geom_text(aes(x=algo, y=total.time, label=round(total.time,digits=3)), position=position_dodge(width=0.9), vjust=-0.25) +\n facet_grid(. ~ init.name) +\n scale_colour_manual(values=cbPalette) +\n scale_fill_manual(values=cbPalette) + \n xlab(\"Algorithm\") +\n ylab(\"Total time (seconds)\") +\n ggtitle(\"Total time across datasets\")\nggsave(paste(sep=\"/\", experiment.name, \"total.time.across.datasets.png\"), width=1920*3, height=1080*3, units=\"px\")\n\nresult.sets[objective==\"aum\"][set==\"validation\"] %>%\n group_by(result.id, algo, init.name) %>%\n reframe(total.time = sum(elapsed.time)) %>%\n ggplot() +\n geom_violin(aes(x=algo, y=total.time, fill=algo)) +\n #geom_boxplot(aes(x=algo, y=total.time, fill=algo)) +\n scale_y_log10() +\n #geom_text(aes(x=algo, y=total.time, label=round(total.time,digits=3)), position=position_dodge(width=0.9), vjust=-0.25) +\n facet_grid(. ~ init.name) +\n scale_colour_manual(values=cbPalette) +\n scale_fill_manual(values=cbPalette) + \n xlab(\"Algorithm\") +\n ylab(\"Total time (seconds)\") +\n ggtitle(\"Total time across datasets\")\nggsave(paste(sep=\"/\", experiment.name, \"total.time.across.datasets2.png\"), width=1920*3, height=1080*3, units=\"px\")\n\nresult.sets[objective==\"aum\"][init.name==\"zero\"][set==\"validation\"] %>%\n group_by(result.id, algo, init.name) %>%\n reframe(total.time = sum(elapsed.time)) %>%\n ggplot() +\n #geom_violin(aes(x=algo, y=total.time, fill=algo)) +\n geom_boxplot(aes(x=algo, y=total.time, fill=algo)) +\n scale_y_log10() +\n #geom_text(aes(x=algo, y=total.time, label=round(total.time,digits=3)), position=position_dodge(width=0.9), vjust=-0.25) +\n #facet_grid(. ~ init.name) +\n scale_colour_manual(values=cbPalette) +\n scale_fill_manual(values=cbPalette) + \n xlab(\"Algorithm\") +\n ylab(\"Total time (seconds)\") +\n ggtitle(\"Total time across datasets\")\nggsave(paste(sep=\"/\", experiment.name, \"total.time.across.datasets3.png\"), width=1920*2, height=1080*2, units=\"px\")\n\n\nresults.with.dataset.size %>%\n ggplot() +\n geom_line(aes(x=size, y=total.time, color=algo)) +\n scale_y_log10() +\n scale_x_log10() +\n scale_colour_manual(values=cbPalette)\nggsave(paste(sep=\"/\", experiment.name, \"size.affects.time.png\"), width=1920*2, height=1080*2, units=\"px\")\n\nresults.with.dataset.size %>%\n #group_by(algo, s=signif(size, 3)) %>%\n #reframe(t=mean(total.time)) %>%\n ggplot(aes(x=size, y=total.time, color=algo, fill=algo)) +\n geom_point() +\n geom_smooth(level=0.70,span=0.6) +\n scale_y_log10() +\n scale_x_log10() +\n scale_colour_manual(values=cbPalette) +\n scale_fill_manual(values=cbPalette) +\n xlab(\"Size of Dataset (bytes)\") +\n ylab(\"Total time (seconds)\")\nggtitle(\"Dataset size vs. Algorithm time\")\nggsave(paste(sep=\"/\", experiment.name, \"size.affects.time2.png\"), width=1920*3, height=1080*3, units=\"px\")\n\nresults.with.dataset.size.and.init %>%\n #group_by(algo, s=signif(size, 3)) %>%\n #reframe(t=mean(total.time)) %>%\n ggplot(aes(x=size, y=total.time, color=algo, fill=algo)) +\n geom_point(size=0.5) +\n geom_smooth(level=0.95)+#geom_smooth(level=0.70,span=0.6) +\n scale_y_log10() +\n scale_x_log10() +\n scale_colour_manual(values=cbPalette) +\n scale_fill_manual(values=cbPalette) +\n facet_grid(init.name ~ .) +\n xlab(\"Size of Dataset (bytes)\") +\n ylab(\"Total time (seconds)\") +\n ggtitle(\"Dataset size vs. Algorithm time\")\nggsave(paste(sep=\"/\", experiment.name, \"size.affects.time3.png\"), width=1920*3, height=1080*3, units=\"px\")\n\nresults.with.dataset.size %>%\n #group_by(algo, s=signif(size, 3)) %>%\n #reframe(t=mean(total.time)) %>%\n ggplot(aes(x=observations, y=total.time, color=algo, fill=algo)) +\n geom_point() +\n geom_smooth(span=1) +\n scale_y_log10() +\n scale_x_log10() +\n scale_colour_manual(values=cbPalette) +\n scale_fill_manual(values=cbPalette) +\n xlab(\"# of observations\") +\n ylab(\"Total time (seconds)\") +\n ggtitle(\"Dataset size vs. Algorithm time\")\nggsave(paste(sep=\"/\", experiment.name, \"size.affects.time4.png\"), width=1920*2, height=1080*2, units=\"px\")\n\nrfac <- 2\nresults.with.dataset.size.and.init[, N := 10^(round(log10(size)*rfac)/rfac)]\n\nresults.with.dataset.size.and.init[,data.table(min=min(total.time),max=max(total.time),mean=mean(total.time)), by=.(algo, N)] %>%\n #group_by(algo, s=signif(size, 3)) %>%\n #reframe(t=mean(total.time)) %>%\n ggplot() +\n geom_ribbon(aes(x=N, ymin=min, ymax=max, fill=algo), alpha=0.10) +\n geom_line(aes(x=N, y=mean, color=algo), size=0.8) +\n geom_point(aes(x=N, y=max, color=algo), size=0.7, alpha=0.10) +\n geom_point(aes(x=N, y=min, color=algo), size=0.7, alpha=0.10) +\n geom_point(aes(x=N, y=mean, color=algo), size=1.2) +\n #geom_smooth(level=0.95)+#geom_smooth(level=0.70,span=0.6) +\n scale_y_log10() +\n scale_x_log10() +\n scale_colour_manual(values=cbPalette) +\n scale_fill_manual(values=cbPalette) +\n #facet_grid(init.name ~ .) +\n xlab(\"B = number of breakpoints in error functions\") +\n ylab(\"Total time (seconds)\") + theme_bw()\n#ggtitle(\"Dataset size vs. Algorithm time\")\nggsave(paste(sep=\"/\", experiment.name, \"size.affects.time5.png\"), width=1920*0.75, height=1080*0.75, units=\"px\")\n\nresult.sets[init.name==\"zero\"][objective==\"aum\"][set==\"validation\"] %>%\n group_by(result.id, algo) %>%\n summarize(total.time = sum(elapsed.time)) %>%\n ggplot() +\n geom_col(aes(x=algo, y=total.time, fill=algo)) +\n #scale_y_log10() +\n geom_text(aes(x=algo, y=total.time, label=round(total.time,digits=1)), position=position_dodge(width=0.9), vjust=-0.25) +\n facet_grid(. ~ result.id) +\n scale_colour_manual(values=cbPalette)\nggsave(paste(sep=\"/\", experiment.name, \"total.time.png\"), width=1920*3, height=1080*3, units=\"px\")\n\nresult.sets[init.name==\"zero\"][objective==\"aum\"][set==\"validation\"] %>%\n group_by(result.id, algo) %>%\n summarize(total.time = sum(elapsed.time)) %>%\n ggplot() +\n geom_vline(data=result.sets[init.name==\"zero\"][objective==\"aum\"][set==\"validation\"] %>%\n group_by(data.name) %>%\n summarize(total.time = sum(elapsed.time), max.id = max(result.id)),\n aes(xintercept=max.id+0.5),color=\"grey\") +\n geom_point(aes(x=result.id, y=total.time, color=algo),size=1.3) +\n #geom_smooth(aes(x=result.id, y=total.time, color=algo,fill=algo)) +\n #geom_col(aes(x=result.id, y=total.time, fill=algo)) +\n #geom_segment(aes(x=result.id,xend=result.id,y=0,yend=total.time,color=algo)) +\n scale_y_log10() +\n scale_colour_manual(values=cbPalette) +\n scale_fill_manual(values=cbPalette) +\n xlab(\"Fold/Job ID\") +\n ylab(\"Total time (seconds)\") +\n ggtitle(\"Total time for every dataset fold\")\nggsave(paste(sep=\"/\", experiment.name, \"total.time2.png\"), width=1920*1.5, height=1080*1.5, units=\"px\")\n\nselected.datasets <- c(\"H3K9me3_TDH_BP\", \"ATAC_JV_adipose\", \"H3K27ac-H3K4me3_TDHAM_BP\", \"detailed\")\nselected.datasets <- c(\"H3K9me3_TDH_BP\", \"detailed\")\n\n(dataset.labels <- map(selected.datasets, function(x) {\n size <- mean(datasets.by.size[data.name==x]$observations)\n rounded.size <- round(10^(round(log10(size)*2)/2))\n paste0(x, \" (n≃\",rounded.size,\")\")\n paste0(x, \" (n=\",size,\")\")\n paste0(\"B=\",round(size))\n}))\n\n\nresult.sets[data.name %in% selected.datasets] %>%\n group_by(result.id, algo, data.name, seed) %>%\n reframe(total.time = sum(elapsed.time)) %>%\n ggplot() +\n geom_boxplot(aes(x=total.time, y=factor(algo, levels=c(\"hybrid\",\"exactL\",\"exactQ\",\"grid\")), fill=algo), show.legend = FALSE) +\n facet_wrap(.~factor(data.name, levels=selected.datasets, labels=dataset.labels), scales = \"free\") +\n scale_x_log10() +\n scale_colour_manual(values=cbPalette) +\n scale_fill_manual(values=cbPalette) +\n ylab(\"Line Search Algorithm\") +\n xlab(\"Total time (seconds)\") +\n theme_bw()\n#ggtitle(\"Time for selected datasets\")\nggsave(paste(sep=\"/\", experiment.name, \"boxplot.datasets.png\"), width=1920*0.75, height=1080*0.75, units=\"px\")\n\n"
},
{
"alpha_fraction": 0.6480447053909302,
"alphanum_fraction": 0.6620111465454102,
"avg_line_length": 24.571428298950195,
"blob_id": "3a46fe5f519e9a986dcd9cd0f0982d1cef18049b",
"content_id": "77f25bfac98c65dab13051b9d8091f379a62c78a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 358,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 14,
"path": "/figure-binary-test-auc.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\nresult.list <- readRDS(\"figure-binary-test-auc-data.rds\")\n\ntest.loss <- do.call(rbind, result.list)\ngg <- ggplot()+\n geom_point(aes(\n auc, loss.name),\n data=test.loss)+\n facet_grid(select ~ ., labeller=label_both)+\n xlab(\"Test AUC\")\npng(\"figure-binary-test-auc.png\", width=4, height=4, units=\"in\", res=200)\nprint(gg)\ndev.off()\n"
},
{
"alpha_fraction": 0.5894508361816406,
"alphanum_fraction": 0.608573853969574,
"avg_line_length": 30.72222137451172,
"blob_id": "f3f64d2261a8971fcc0eca81629771fe40157559",
"content_id": "c6e86ff4172288f4d1be2e3da75e738b0f4596dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 14276,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 450,
"path": "/figure-curveAlignment.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "### Write down what package versions work with your R code, and\n### attempt to download and load those packages. The first argument is\n### the version of R that you used, e.g. \"3.0.2\" and then the rest of\n### the arguments are package versions. For\n### CRAN/Bioconductor/R-Forge/etc packages, write\n### e.g. RColorBrewer=\"1.0.5\" and if RColorBrewer is not installed\n### then we use install.packages to get the most recent version, and\n### warn if the installed version is not the indicated version. For\n### GitHub packages, write \"user/repo@commit\"\n### e.g. \"tdhock/animint@f877163cd181f390de3ef9a38bb8bdd0396d08a4\" and\n### we use install_github to get it, if necessary.\nworks_with_R <- function(Rvers,...){\n local.lib <- file.path(getwd(), \"library\")\n dir.create(local.lib, showWarnings=FALSE, recursive=TRUE)\n .libPaths(c(local.lib, .libPaths()))\n pkg_ok_have <- function(pkg,ok,have){\n stopifnot(is.character(ok))\n if(!as.character(have) %in% ok){\n warning(\"works with \",pkg,\" version \",\n paste(ok,collapse=\" or \"),\n \", have \",have)\n }\n }\n pkg_ok_have(\"R\",Rvers,getRversion())\n pkg.vers <- list(...)\n for(pkg.i in seq_along(pkg.vers)){\n vers <- pkg.vers[[pkg.i]]\n pkg <- if(is.null(names(pkg.vers))){\n \"\"\n }else{\n names(pkg.vers)[[pkg.i]]\n }\n if(pkg == \"\"){# Then it is from GitHub.\n ## suppressWarnings is quieter than quiet.\n if(!suppressWarnings(require(requireGitHub))){\n ## If requireGitHub is not available, then install it using\n ## devtools.\n if(!suppressWarnings(require(devtools))){\n install.packages(\"devtools\")\n require(devtools)\n }\n install_github(\"tdhock/requireGitHub\")\n require(requireGitHub)\n }\n print(search())\n requireGitHub(vers)\n }else{# it is from a CRAN-like repos.\n if(!suppressWarnings(require(pkg, character.only=TRUE))){\n install.packages(pkg)\n }\n pkg_ok_have(pkg, vers, packageVersion(pkg))\n library(pkg, character.only=TRUE)\n }\n }\n}\noptions(repos=c(\n \"http://www.bioconductor.org/packages/release/bioc\",\n ##\"http://r-forge.r-project.org\",\n \"http://cloud.r-project.org\",\n \"http://cran.r-project.org\"))\nworks_with_R(\n \"4.1.0\",\n data.table=\"1.14.0\",\n future=\"1.21.0\",\n future.apply=\"1.7.0\",\n RJSONIO=\"1.3.1.4\",\n R.utils=\"2.10.1\",\n \"tdhock/penaltyLearning@4e14a0b0e022d919884277d68b8e47bd158459f3\",\n jointseg=\"1.0.2\",\n gridExtra=\"2.3\",\n neuroblastoma=\"1.0\",\n tikzDevice=\"0.12.3.1\",\n microbenchmark=\"1.4.7\",\n animint2=\"1.0\")\n\ncurveAlignment <- readRDS(\"curveAlignment.rds\")\nAUC.dt.list <- list()\nroc.dt.list <- list()\nerr.dt.list <- list()\nroc.segs.list <- list()\nroc.win.err.list <- list()\noff.by <- 0.2\noffset.prob <- curveAlignment$problems$prob.dir[2]\nfor(offset in seq(-5, 5, by=off.by)){\n print(offset)\n pred.dt <- data.table(\n curveAlignment$problems, pred.log.lambda=10+c(0, offset))\n pred.eval <- curveAlignment$evaluation[pred.dt, on=list(prob.dir)]\n pred.eval[, possible.fn := possible.tp]\n roc <- penaltyLearning::ROChange(\n pred.eval, pred.dt, \"prob.dir\")\n ## compute derivative of Area under min(FP, FN).\n thresh.dt <- pred.eval[order(-min.log.lambda), {\n fp.diff <- diff(fp)\n fp.change <- fp.diff != 0\n fn.diff <- diff(fn)\n fn.change <- fn.diff != 0\n fp.dt <- if(any(fp.change))data.table(\n log.lambda=min.log.lambda[c(fp.change, FALSE)],\n fp=as.numeric(fp.diff[fp.change]),\n fn=0)\n fn.dt <- if(any(fn.change))data.table(\n log.lambda=min.log.lambda[c(fn.change, FALSE)],\n fp=0,\n fn=as.numeric(fn.diff[fn.change]))\n ##browser(expr=sample.id==\"McGill0322\")\n rbind(fp.dt, fn.dt)\n }, by=.(prob.dir)]\n pred.with.thresh <- thresh.dt[pred.dt, on=.(prob.dir), nomatch=0L]\n pred.with.thresh[, thresh := log.lambda - pred.log.lambda]\n first.dt <- pred.eval[max.log.lambda==Inf]\n thresh.ord <- pred.with.thresh[order(-thresh), .(\n prob.dir=c(NA, prob.dir),\n min.thresh=c(-Inf, log.lambda),\n max.thresh=c(log.lambda, Inf),\n fp = cumsum(c(sum(first.dt$fp), fp)),\n fn = cumsum(c(sum(first.dt$fn), fn)),\n change=c(0, ifelse(fp==0, fn, fp))\n )]\n thresh.ord[, min.fp.fn := ifelse(fp<fn, fp, fn)]\n thresh.ord[, min.change := c(NA, diff(min.fp.fn))]\n prob.deriv <- thresh.ord[min.change==change, .(\n deriv=-sum(change)\n ), by=.(prob.dir)]\n offset.deriv <- prob.deriv[offset.prob, on=.(prob.dir)]\n ## save info for display.\n pred.eval[, min.thresh := min.log.lambda-pred.log.lambda]\n pred.eval[, max.thresh := max.log.lambda-pred.log.lambda]\n pred.eval[, piece := 1:.N]\n err.dt.list[[paste(offset)]] <- data.table(offset, pred.eval)\n roc$roc[, thresh := (min.thresh+max.thresh)/2]\n pred.some.cols <- pred.dt[, list(id=1, prob.dir, pred.log.lambda)]\n roc.off.id <- data.table(offset, id=1, roc$roc)\n roc.off <- roc.off.id[pred.some.cols, on=list(\n id), allow.cartesian=TRUE]\n roc.off[, log.lambda := thresh + pred.log.lambda]\n roc.segs.list[[paste(offset)]] <-\n curveAlignment$segments[roc.off, nomatch=0L, allow.cartesian=TRUE, on=list(\n prob.dir,\n min.log.lambda<=log.lambda,\n max.log.lambda>=log.lambda)]\n roc.win.err.list[[paste(offset)]] <-\n curveAlignment$errors[roc.off, nomatch=0L, on=list(\n prob.dir,\n min.log.lambda<=log.lambda,\n max.log.lambda>=log.lambda)]\n off.min <- roc$roc[errors==min(errors)]\n roc$roc[, min.fp.fn := ifelse(fp<fn, fp, fn)]\n roc$roc[, width.thresh := max.thresh-min.thresh]\n roc$roc[, min.change.after := c(diff(min.fp.fn), NA)]\n min.positive <- roc$roc[0<min.fp.fn]\n bad <- min.positive[width.thresh==Inf]\n if(nrow(bad)){\n print(bad)\n stop(\"infinite AUM\")\n }\n AUM <- min.positive[, {\n sum(min.fp.fn*width.thresh)\n }]\n AUC.dt.list[[paste(offset)]] <- with(roc, data.table(\n AUC=auc, AUM, offset,\n AUM.deriv=offset.deriv$deriv,\n min.errors=off.min$errors[1],\n n.min=nrow(off.min),\n thresholds[threshold==\"min.error\"]))\n roc.dt.list[[paste(offset)]] <- data.table(\n offset, roc$roc, piece=1:nrow(roc$roc), prob.dir=\"Total\")\n}\nAUC.dt <- do.call(rbind, AUC.dt.list)\nroc.dt <- do.call(rbind, roc.dt.list)\nroc.segs <- do.call(rbind, roc.segs.list)\nerr.dt <- do.call(rbind, err.dt.list)\nroc.win.err.dt <- do.call(rbind, roc.win.err.list)\n\nggplot()+\n geom_point(aes(\n offset, AUC),\n data=AUC.dt)\n\ncommon.names <- intersect(names(roc.dt), names(err.dt))\nboth.dt <- rbind(\n err.dt[, common.names, with=FALSE],\n roc.dt[, common.names, with=FALSE])\nboth.dt[, `min(fp,fn)` := pmin(fp, fn)]\nerr.dt.tall <- melt(\n both.dt,\n variable.name=\"error.type\",\n measure.vars=c(\"fp\", \"fn\", \"errors\", \"min(fp,fn)\"))\nid2show <- function(seqID)gsub(\n \"ATAC_JV_adipose/samples/AC1/|/problems/chrX-37148256-49242997\", \"\",\n seqID)\nroc.segs[, sample := id2show(prob.dir)]\ncurveAlignment$labels[, sample := id2show(prob.dir)]\nroc.win.err.dt[, sample := id2show(prob.dir)]\ncurveAlignment$profiles[, sample := id2show(prob.dir)]\nerr.dt.tall[, sample := id2show(prob.dir)]\nAUC.dt[, thresh := (min.thresh+max.thresh)/2]\nroc.dt[, Errors := ifelse(errors==min(errors), \"Min\", \"More\"), by=list(offset)]\nAUC.dt[, max.correct := as.numeric(labels-min.errors)]\nAUC.dt[, opt.models := as.numeric(n.min)]\nAUC.tall <- melt(\n AUC.dt,\n measure.vars=c(\"AUM\", \"AUM.deriv\", \"AUC\", \"min.errors\", \"opt.models\"))\nmin.err <- roc.dt[Errors==\"Min\"]\nmin.err[, piece := 1:.N, by=list(offset)]\nroc.size <- 5\nroc.peaks <- roc.segs[status==\"peak\"]\ntext.y <- c(\n \"offset\"=200,\n \"thresh\"=175,\n \"fp\"=150,\n \"fn\"=125)\ntext.dt <- melt(\n roc.dt,\n id.vars=c(\"offset\", \"thresh\"),\n measure.vars=names(text.y))\ntext.dt[, y := text.y[variable] ]\ntext.dt[, digits := ifelse(variable %in% c(\"fp\", \"fn\"), 0, 1)]\ntext.dt[, value.num := round(value, digits)]\ntext.dt[, value.str := paste(value.num)]\nerr.dt.tall[, value.i := cumsum(\n c(FALSE, diff(value) != 0)\n), by=list(sample, offset, error.type)]\nerr.dt.segs <- err.dt.tall[, list(\n min.thresh=min(min.thresh),\n max.thresh=max(max.thresh),\n value=value[1]\n), by=list(sample, offset, error.type, value.i)]\nmin.tallrects <- data.table(\n err.dt.segs[error.type==\"errors\", {\n .SD[value==min(value)]\n }, by=list(sample, offset)],\n Errors=\"Min\")\nchunk_vars <- \"offset\"\nchunk_vars <- character()\nann.colors <- c(\n noPeaks=\"#f6f4bf\",\n peakStart=\"#ffafaf\",\n peakEnd=\"#ff4c4c\",\n peaks=\"#a445ee\")\nerr.dt.show <- err.dt.tall[\n error.type %in% c(\"fp\",\"fn\") |\n (error.type==\"min(fp,fn)\" & sample==\"Total\")]\narea.show <- err.dt.show[error.type==\"min(fp,fn)\"]\nAUM.text <- area.show[, .SD[value>0][1], by=offset][AUC.dt, .(\n offset, sample, min.thresh, AUM), on=\"offset\"]\nanimint(\n title=\"Changepoint detection ROC curve alignment problem\",\n ##first=list(offset=0.5),\n out.dir=\"figure-curveAlignment\",\n duration=list(offset=250),\n time=list(variable=\"offset\", ms=250),\n profiles=ggplot()+\n ylab(\"Number of aligned DNA sequence reads (coverage)\")+\n ggtitle(\n \"Noisy coverage data, labels, and predicted model\")+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n theme_animint(width=1300)+\n facet_grid(sample ~ window, scales=\"free\", labeller=label_both)+\n geom_text(aes(\n 43447, y,\n key=paste(offset, thresh, variable),\n label=paste0(\n variable, \"=\", value.str)),\n hjust=0,\n showSelected=c(\"offset\", \"thresh\"),\n data=data.table(\n text.dt,\n window=1,\n sample=\"MSC83\"))+\n geom_tallrect(aes(\n xmin=labelStart/1e3, xmax=labelEnd/1e3, fill=annotation),\n data=curveAlignment$labels,\n alpha=0.5,\n color=\"grey\")+\n scale_linetype_manual(\n \"Error type\",\n values=c(\n correct=0,\n \"false negative\"=3,\n \"false positive\"=1))+\n geom_tallrect(aes(\n xmin=chromStart/1e3, xmax=chromEnd/1e3,\n key=paste(chromStart, chromEnd),\n linetype=status),\n data=roc.win.err.dt,\n chunk_vars=chunk_vars,\n showSelected=c(\"offset\", \"thresh\"),\n fill=NA,\n size=2,\n color=\"black\")+\n scale_fill_manual(values=ann.colors)+\n geom_step(aes(\n chromStart/1e3, coverage),\n data=curveAlignment$profiles,\n color=\"grey50\")+\n geom_segment(aes(\n segStart/1e3, mean,\n key=paste(segStart, segEnd),\n xend=segEnd/1e3, yend=mean),\n color=\"green\",\n alpha=0.7,\n chunk_vars=chunk_vars,\n showSelected=c(\"offset\", \"thresh\"),\n data=roc.segs)+\n geom_segment(aes(\n segStart/1e3, 0,\n key=paste(segStart, segEnd),\n xend=segEnd/1e3, yend=0),\n color=\"deepskyblue\",\n showSelected=c(\"offset\", \"thresh\"),\n chunk_vars=chunk_vars,\n size=3,\n alpha=0.7,\n data=roc.peaks)+\n geom_point(aes(\n segStart/1e3, 0,\n key=paste(segStart, segEnd)),\n color=\"deepskyblue\",\n showSelected=c(\"offset\", \"thresh\"),\n chunk_vars=chunk_vars,\n size=4,\n fill=\"white\",\n alpha=0.7,\n data=roc.peaks)+\n scale_x_continuous(\n \"Position on chrX (kb = kilo bases, reference genome hg19)\",\n breaks=seq(4e4, 5e4, by=5)),\n metrics=ggplot()+\n ggtitle(\n \"AUC, select offset\")+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n facet_grid(variable ~ ., scales=\"free\")+\n geom_blank(aes(\n x, y),\n data=data.table(\n x=0, y=c(3.4, 1.6),\n variable=\"min.errors\"))+\n xlab(\"Offset = Difference between predicted values of samples\")+\n geom_point(aes(\n offset, value),\n fill=NA,\n data=AUC.tall)+\n ylab(\"\")+\n make_tallrect(AUC.dt, \"offset\"),\n error=ggplot()+\n ggtitle(\n \"Error curves, select threshold\")+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n facet_grid(sample ~ ., scales=\"free\")+\n scale_color_manual(values=c(\n fp=\"red\",\n fn=\"deepskyblue\",\n \"min(fp,fn)\"=\"black\"))+\n scale_size_manual(values=c(\n fp=5,\n fn=3,\n \"min(fp,fn)\"=1))+\n xlab(\"Prediction threshold\")+\n scale_y_continuous(\n \"Number of incorrectly predicted labels\",\n breaks=seq(0, 20, by=2))+\n geom_tallrect(aes(\n xmin=min.thresh,\n xmax=max.thresh,\n key=min.thresh),\n showSelected=c(\"offset\", \"Errors\"),\n color=\"grey50\",\n alpha=0.5,\n data=min.tallrects)+\n geom_text(aes(\n min.thresh, labels*0.9, key=1, label=paste0(\n \"Min Error=\", errors)),\n showSelected=c(\"offset\", \"Errors\"),\n hjust=0,\n color=\"grey50\",\n data=data.table(\n AUC.dt,\n Errors=\"Min\",\n sample=\"Total\"))+\n geom_rect(aes(\n xmin=min.thresh, ymin=0,\n key=piece,\n xmax=max.thresh, ymax=value),\n chunk_vars=chunk_vars,\n showSelected=\"offset\",\n fill=\"black\",\n data=area.show)+\n geom_segment(aes(\n min.thresh, value,\n key=paste(piece, error.type),\n color=error.type,\n size=error.type,\n xend=max.thresh, yend=value),\n chunk_vars=chunk_vars,\n showSelected=\"offset\",\n data=err.dt.show)+\n geom_text(aes(\n min.thresh, 0.5, key=1,\n label=sprintf(\"AUM=%.1f\", AUM)),\n showSelected=\"offset\",\n hjust=1,\n data=AUM.text)+\n geom_tallrect(aes(\n xmin=min.thresh, xmax=max.thresh,\n tooltip=sprintf(\n \"%.1f<thresh<%.1f FP=%d FN=%d\",\n min.thresh, max.thresh, fp, fn),\n key=paste(offset, thresh)),\n showSelected=\"offset\",\n clickSelects=\"thresh\",\n alpha=0.5,\n data=roc.dt),\n roc=ggplot()+\n ggtitle(\n \"ROC curves, select threshold\")+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n geom_path(aes(\n FPR, TPR, key=paste(offset, thresh)),\n showSelected=\"offset\",\n data=roc.dt)+\n geom_point(aes(\n FPR, TPR, fill=Errors,\n tooltip=sprintf(\n \"%.1f<thresh<%.1f FP=%d FN=%d\",\n min.thresh, max.thresh, fp, fn),\n key=paste(offset, thresh)),\n showSelected=\"offset\",\n clickSelects=\"thresh\",\n size=roc.size,\n alpha=0.7,\n data=roc.dt)+\n scale_fill_manual(values=c(\n Min=\"black\",\n More=\"white\"))+\n coord_equal()+\n geom_text(aes(\n 0.75, 0.25, key=1, label=sprintf(\n \"AUC=%.2f\", AUC)),\n showSelected=\"offset\",\n data=AUC.dt)+\n geom_abline(aes(\n slope=slope, intercept=intercept),\n color=\"grey\",\n data=data.table(slope=1, intercept=0))\n)\n\n"
},
{
"alpha_fraction": 0.5869508981704712,
"alphanum_fraction": 0.6134650111198425,
"avg_line_length": 34.953269958496094,
"blob_id": "71a247bbdcbea05aa2146504953d9c5b07ddf060",
"content_id": "fd5fd98194bd3bed3302877bb8d68c8a858b31b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 3847,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 107,
"path": "/figure-logistic-weights.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\nzip.X.list <- list()\nzip.y.list <- list()\nfor(set in c(\"train\", \"test\")){\n f <- sprintf(\"zip.%s.gz\", set)\n if(!file.exists(f)){\n u <- paste0(\"https://web.stanford.edu/~hastie/ElemStatLearn/datasets/\", f)\n download.file(u, f)\n }\n zip.dt <- data.table::fread(f)\n y.vec <- zip.dt[[1]]\n is.01 <- y.vec %in% 0:1\n y01.dt <- data.table(label=y.vec[is.01])\n y01.dt[, cum := 1:.N, by=label]\n max.dt <- y01.dt[, .(max=max(cum)), by=label]\n keep <- y01.dt$cum <= min(max.dt[[\"max\"]])\n zip.y.list[[set]] <- y01.dt[keep, label]\n zip.X.list[[set]] <- as.matrix(zip.dt[is.01, -1, with=FALSE][keep,])\n}\n(y.tab <- sapply(zip.y.list, table))\n\ntrain.set.list <- list(\n full=list(X=zip.X.list[[\"train\"]], y=zip.y.list[[\"train\"]]))\nsome.props <- c(0.01, 0.05)\nprop.pos.vec <- sort(unique(c(some.props, 1-some.props, 0.5)))\n##want p/(p + n) = 0.05 => 0.05*(p+n) = p => 0.05p + 0.05n = p => 0.05n = 0.95p => p = 0.05 / 0.95n\nmin.prop.pos <- min(prop.pos.vec)\nmin.n.pos <- as.integer(min.prop.pos/(1-min.prop.pos) * y.tab[\"0\", \"train\"])\nmin.total <- min.n.pos + y.tab[\"0\", \"train\"]\nc(min.n.pos, y.tab[\"0\", \"train\"])/min.total\nN.obs <- 1000\ntrain.y.dt <- data.table(label=zip.y.list[[\"train\"]])\ntrain.y.dt[, i := 1:.N]\ntest.y <- zip.y.list[[\"test\"]]\nresult.dt.list <- list()\nfor(prop.pos in prop.pos.vec){\n prop.dt <- rbind(\n data.table(prop=prop.pos, label=1),\n data.table(prop=1-prop.pos, label=0))\n prop.dt[, class.N := as.integer(N.obs*prop) ]\n prop.dt[, weight := 1/class.N]\n for(seed in 1:3){\n cat(sprintf(\"prop=%f seed=%d\\n\", prop.pos, seed))\n set.seed(seed)\n index.dt <- prop.dt[train.y.dt, on=\"label\"][, .(\n i=.SD[sample(1:.N), i[1:class.N] ]\n ), by=.(label, weight, class.N)]\n seed.i <- index.dt[[\"i\"]]\n seed.y <- zip.y.list[[\"train\"]][seed.i]\n seed.X <- zip.X.list[[\"train\"]][seed.i,]\n weight.list <- list(\n identity=rep(1, length(seed.y)),\n balanced=index.dt[[\"weight\"]])\n for(weight.name in names(weight.list)){\n weight.vec <- weight.list[[weight.name]]\n fit <- glmnet::cv.glmnet(seed.X, seed.y, weight.vec, family=\"binomial\")\n seed.pred <- predict(fit, zip.X.list[[\"test\"]])\n roc.df <- WeightedROC::WeightedROC(seed.pred, test.y)\n seed.pred.class <- ifelse(0<seed.pred, 1, 0)\n accuracy <- mean(seed.pred.class == test.y)\n auc <- WeightedROC::WeightedAUC(roc.df)\n result.dt.list[[paste(prop.pos, seed, weight.name)]] <- data.table(\n prop.pos, seed, weight.name, accuracy, auc)\n }\n }\n}\n(result.dt <- do.call(rbind, result.dt.list))\n\nresult.tall <- melt(result.dt, measure.vars=c(\"accuracy\", \"auc\"))\nresult.tall[, percent.positive.labels := factor(prop.pos*100)]\nggplot()+\n facet_grid(variable ~ ., labeller = label_both, scales=\"free\")+\n geom_point(aes(\n percent.positive.labels, value, color=weight.name),\n data=result.tall)\n\nresult.stats <- result.tall[, .(\n max=max(value),\n q75=quantile(value, 0.75),\n median=median(value),\n q25=quantile(value, 0.25),\n min=min(value),\n seeds=.N\n), by=.(variable, prop.pos, percent.positive.labels, weight.name)]\ngg <- ggplot()+\n ggtitle(paste0(\n \"cv.glmnet run on data sets with same number of observations, N=\",\n nrow(seed.X),\n \"\\nand with different proportions of positive labels\"))+\n facet_grid(variable ~ ., labeller = label_both, scales=\"free\")+\n geom_ribbon(aes(\n prop.pos, ymin=min, ymax=max, fill=weight.name),\n alpha=0.5,\n data=result.stats)+\n geom_line(aes(\n prop.pos, median, color=weight.name),\n data=result.stats)+\n scale_x_continuous(\n \"Proportion positive labels in train set\",\n breaks=unique(result.stats[[\"prop.pos\"]]))+\n ylab(\"Accuracy or AUC of predictions\non a test set of 50% positive\nand 50% negative labels\")\npng(\"figure-logistic-weights.png\", width=10, height=3, units=\"in\", res=200)\nprint(gg)\ndev.off()\n"
},
{
"alpha_fraction": 0.636820912361145,
"alphanum_fraction": 0.6488933563232422,
"avg_line_length": 26.61111068725586,
"blob_id": "40d7586b734d875bb03f8b6be747ed65d5cc5305",
"content_id": "0b96d6e8740c4fa3ddc452375e393f568da8ac97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 994,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 36,
"path": "/figure-flearn-complex.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\ndata.list <- list()\nfor(data.type in c(\"possible_errors\", \"errors\")){\n f <- paste0(\n \"../feature-learning-benchmark/labeled_problems_\", data.type, \".csv\")\n data.list[[data.type]] <- fread(f)\n}\n\n\nprob.dir.vec <- data.list$errors[min.log.penalty==-Inf & 0<fn, prob.dir]\nsome.err <- data.list$errors[prob.dir.vec, on=list(prob.dir)]\nsome.err[, list(min.fn=min(fn)), by=list(prob.dir)]\nerr.sizes <- c(\n fp=2,\n fn=1.5,\n errors=1)\nerr.colors <- c(\n fp=\"red\",\n fn=\"deepskyblue\",\n errors=\"black\")\nsome.err.tall <- melt(\n some.err,\n measure.vars=names(err.colors))\nggplot()+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n facet_grid(prob.dir ~ ., scales=\"free\")+\n geom_segment(aes(\n min.log.penalty, value,\n xend=max.log.penalty, yend=value,\n color=variable, size=variable),\n data=some.err.tall)+\n scale_color_manual(values=err.colors)+\n scale_size_manual(values=err.sizes)+\n scale_y_continuous(limits=c(-2, NA),breaks=seq(0, 20, by=4))\n"
},
{
"alpha_fraction": 0.6263269782066345,
"alphanum_fraction": 0.6492568850517273,
"avg_line_length": 29.584415435791016,
"blob_id": "e344de96f667d00155ef601e919ef44cc9fbef15",
"content_id": "52de4bf76712884be5da1cc01b5d1c227279a0d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2355,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 77,
"path": "/figure-aum-grad-speed-binary.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\nN.vec <- as.integer(10^seq(1, 6, by=0.5))\nmax.N <- max(N.vec)\nall.labels.vec <- rep(c(-1,1), l=max.N)\nall.diffs.dt <- aum::aum_diffs_binary(all.labels.vec)\nset.seed(1)\nall.pred.vec <- rnorm(max.N)\ntiming.dt.list <- list()\nfor(N in N.vec){\n print(N)\n N.pred.vec <- all.pred.vec[1:N]\n N.diffs.dt <- all.diffs.dt[1:N]\n N.labels.vec <- sort(all.labels.vec[1:N])\n order.list <- list(sorted=sort(N.pred.vec), unsorted=N.pred.vec)\n for(prediction.order in names(order.list)){\n order.pred.vec <- order.list[[prediction.order]]\n timing.df <- microbenchmark::microbenchmark(logistic.grad={\n -N.labels.vec/(1+exp(N.labels.vec*order.pred.vec))\n }, logistic.loss={\n log(1+exp(-N.labels.vec*order.pred.vec))\n }, sort={\n sort(order.pred.vec)\n }, aum={\n aum::aum(N.diffs.dt, order.pred.vec)\n }, times=10)\n timing.dt.list[[paste(N, prediction.order)]] <- with(timing.df, data.table(\n N, prediction.order, seconds=time/1e9, algorithm=expr))\n }\n}\n(timing.dt <- do.call(rbind, timing.dt.list))\n\ntiming.stats <- timing.dt[, .(\n max=max(seconds),\n median=median(seconds),\n min=min(seconds),\n times=.N\n), by=.(N, prediction.order, algorithm)]\ngg <- ggplot()+\n facet_grid(. ~ prediction.order, labeller=label_both)+\n geom_ribbon(aes(\n N, ymin=min, ymax=max, fill=algorithm),\n alpha=0.5,\n data=timing.stats)+\n geom_line(aes(\n N, median, color=algorithm),\n data=timing.stats)+\n scale_x_log10(\n \"Number of predictions\",\n limits=c(10, max.N*10))+\n scale_y_log10(\n \"Computation time in seconds,\nmedian line, min/max band, 10 timings\")\ndl <- directlabels::direct.label(gg, \"right.polygons\")\npng(\"figure-aum-grad-speed-binary-algos.png\", width=10, height=3, res=200, units=\"in\")\nprint(dl)\ndev.off()\n\ngg <- ggplot()+\n facet_grid(. ~ algorithm, labeller=label_both)+\n geom_ribbon(aes(\n N, ymin=min, ymax=max, fill=prediction.order),\n alpha=0.5,\n data=timing.stats)+\n geom_line(aes(\n N, median, color=prediction.order),\n data=timing.stats)+\n scale_x_log10(\n \"Number of predictions\",\n limits=c(10, max.N*10))+\n scale_y_log10(\n \"Computation time in seconds,\nmedian line, min/max band, 10 timings\")\ndl <- directlabels::direct.label(gg, \"right.polygons\")\npng(\"figure-aum-grad-speed-binary.png\", width=10, height=3, res=200, units=\"in\")\nprint(dl)\ndev.off()\n"
},
{
"alpha_fraction": 0.6019544005393982,
"alphanum_fraction": 0.6208469271659851,
"avg_line_length": 25.929824829101562,
"blob_id": "bad9910a42f2a411155e065e0c07855f78edfdf1",
"content_id": "5166aa337a2153d03af241fc8a32117ffff07558",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1535,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 57,
"path": "/figure-binary-class.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\nd <- function(min.log.lambda, fp, fn){\n data.table(min.log.lambda, fp, fn)\n}\nprofile <- function(..., possible.fp, possible.fn, errors, labels){\n dt <- do.call(rbind, list(...))\n if(missing(possible.fp))possible.fp <- max(dt$fp)\n if(missing(possible.fn))possible.fn <- max(dt$fn)\n errors <- dt[, fp+fn]\n if(missing(labels))labels <- max(errors)\n dt[, data.table(\n min.log.lambda,\n max.log.lambda=c(min.log.lambda[-1], Inf),\n fp, fn, errors, possible.fp, possible.fn, labels)]\n}\nprofile.list <- list(\n negative=profile(\n d(-Inf, 0, 0),\n d(0, 1, 0)),\n positive=profile(\n d(-Inf, 0, 1),\n d(0, 0, 0)))\nprofile.wide <- data.table(\n label=names(profile.list)\n)[, profile.list[[label]], by=label]\nerr.sizes <- c(\n fp=3,\n fn=2)\nerr.colors <- c(\n fp=\"red\",\n fn=\"deepskyblue\")\nprofile.tall <- data.table::melt(\n profile.wide,\n measure=c(\"fp\", \"fn\"))\nleg <- \"Error type\"\ngg <- ggplot()+\n facet_grid(. ~ label, labeller=label_both)+\n ## theme_bw()+\n ## theme(panel.spacing=grid::unit(0, \"lines\"))+\n geom_segment(aes(\n min.log.lambda, value,\n color=variable, size=variable,\n xend=max.log.lambda, yend=value),\n data=profile.tall)+\n scale_y_continuous(\n \"Label errors\",\n breaks=c(0,1),\n limits=c(-0.2, 1.2))+\n scale_color_manual(leg, values=err.colors)+\n scale_size_manual(leg, values=err.sizes)+\n scale_x_continuous(\n \"Predicted value f(x)\",\n limits=c(-1.8, 1.8))\npng(\"figure-binary-class.png\", width=4, height=2, res=200, units=\"in\")\nprint(gg)\ndev.off()\n"
},
{
"alpha_fraction": 0.592467188835144,
"alphanum_fraction": 0.6058682203292847,
"avg_line_length": 28.537500381469727,
"blob_id": "b8d4da808e75181f2556e7fdcdf271c449cc5508",
"content_id": "396cfcbf2f85cf1cc7eed185d14ebda8f5c595f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 7089,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 240,
"path": "/figure-auc-improved-interactive.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(data.table)\nlibrary(animint2)\nauc.improved <- readRDS(\"auc.improved.rds\")\n\nauc.improved[, set.fold := paste0(set.name, \"/\", fold)]\nroc.dt.list <- list()\nfor(test.fold.i in 1:nrow(auc.improved)){\n one.fold <- auc.improved[test.fold.i]\n roc.dt.list[[test.fold.i]] <- one.fold[, data.table(\n set.fold, pred.name, roc[[1]])]\n}\n(roc.dt <- do.call(rbind, roc.dt.list))\nroc.dt[, fn0 := fn-min(fn), by=.(set.fold)]\nroc.dt[, min.fp.fn := ifelse(fp<fn0, fp, fn0)]\nroc.dt[, width := max.thresh-min.thresh]\nroc.dt[, area := ifelse(min.fp.fn==0, 0, min.fp.fn*width)]\n(aum.dt <- roc.dt[, .(\n aum=sum(area)\n), by=.(set.fold, pred.name)][order(aum)])\naum.dt[, log.aum := log10(aum+1)]\naum.wide <- dcast(aum.dt, set.fold ~ pred.name, value.var=\"log.aum\")\naum.wide[, status := ifelse(\n initial==improved, \"same\", ifelse(\n initial>improved, \"better\", \"worse\"))]\n\nggplot()+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n geom_point(aes(\n auc, set.name, color=pred.name),\n data=auc.improved)\n\nauc.improved[, accuracy.percent := 100-error.percent]\nauc.tall <- melt(auc.improved, measure.vars=c(\"accuracy.percent\", \"auc\"))\nauc.stats <- auc.tall[, .(\n mean=mean(value),\n sd=sd(value)\n), by=.(set.name, variable, pred.name)]\nauc.only <- auc.stats[variable==\"auc\" & pred.name==\"improved\"][order(mean)]\nset.levs <- auc.only$set.name\nauc.stats[, set.fac := factor(set.name, set.levs)]\nauc.tall[, set.fac := factor(set.name, set.levs)]\nauc.only.wide <- dcast(\n auc.stats[variable==\"auc\"], set.name ~ pred.name, value.var=\"mean\")\nauc.only.wide[, diff := improved - initial]\nauc.only.wide[order(diff)]\n\nroc.wide <- dcast(\n auc.improved,\n set.name + fold + set.fold ~ pred.name,\n value.var=c(\"auc\", \"accuracy.percent\"))\nroc.wide[, auc_status := ifelse(\n auc_initial==auc_improved, \"same\", ifelse(\n auc_initial<auc_improved, \"better\", \"worse\"))]\nroc.wide[auc_initial>auc_improved]\nroc.wide[, accuracy.percent_status := ifelse(\n accuracy.percent_initial==accuracy.percent_improved, \"same\", ifelse(\n accuracy.percent_initial<accuracy.percent_improved, \"better\", \"worse\"))]\n\nerr.sizes <- c(\n fp=3,\n fn=2,\n errors=1)\nerr.colors <- c(\n fp=\"red\",\n fn=\"deepskyblue\",\n errors=\"black\")\nroc.dt[, seg.i := 1:.N, by=.(set.fold, pred.name)]\nroc.dt[, mid.thresh := (min.thresh+max.thresh)/2]\nroc.tall <- melt(\n roc.dt,\n measure.vars=names(err.sizes))\nstatus.colors <- c(\n same=\"black\",\n better=\"blue\",\n worse=\"red\")\ntallrect.dt <- data.table(\n mid.thresh=seq(-10, 5, by=0.2))\nroc.dots <- roc.dt[tallrect.dt, .(\n set.fold, pred.name, mid.thresh=i.mid.thresh, FPR, TPR\n), on=.(\n min.thresh<mid.thresh, max.thresh>mid.thresh)]\nanimint(\n title=\"Minimizing area under min(FP,FN)\",\n out.dir=\"figure-auc-improved-interactive\",\n ## ggplot()+\n ## ggtitle(\"Data sets ordered by mean improved AUC\")+\n ## guides(size=\"none\", color=\"none\")+\n ## theme_bw()+\n ## theme(panel.margin=grid::unit(0, \"lines\"))+\n ## theme_animint(width=800)+\n ## facet_grid(. ~ variable, scales=\"free\")+\n ## geom_segment(aes(\n ## mean+sd, set.fac,\n ## color=pred.name, size=pred.name,\n ## xend=mean-sd, yend=set.fac),\n ## data=auc.stats)+\n ## scale_size_manual(values=c(improved=2, initial=3))+\n ## geom_point(aes(\n ## mean, set.fac,\n ## color=pred.name),\n ## shape=21,\n ## size=3,\n ## fill=\"white\",\n ## data=auc.stats)+\n ## geom_point(aes(\n ## value, set.fac,\n ## color=pred.name),\n ## clickSelects=\"set.fold\",\n ## alpha=0.6,\n ## size=4,\n ## data=auc.tall)+\n ## xlab(\"\")+\n ## ylab(\"Data set\"),\n ggplot()+\n ggtitle(\"FP/FN curves\")+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n theme_animint(update_axes=\"y\")+\n facet_grid(pred.name ~ .)+\n xlab(\"Prediction threshold\")+\n ylab(\"Incorrectly predicted labels\")+\n geom_vline(aes(\n xintercept=(min.thresh+max.thresh)/2,\n key=1),\n data=auc.improved,\n color=\"grey\",\n showSelected=\"set.fold\")+\n geom_line(aes(\n mid.thresh, value,\n key=variable, group=variable,\n color=variable, size=variable),\n data=roc.tall,\n showSelected=\"set.fold\")+\n geom_polygon(aes(\n mid.thresh, min.fp.fn, key=1),\n color=\"grey\",\n data=roc.dt,\n size=0,\n showSelected=\"set.fold\")+\n make_tallrect(tallrect.dt, \"mid.thresh\")+\n ## geom_tallrect(aes(\n ## xmin=min.thresh, xmax=max.thresh),\n ## data=roc.dt,\n ## showSelected=\"set.fold\",\n ## clickSelects=\"mid.thresh\",\n ## alpha=0.5)+\n scale_size_manual(values=err.sizes)+\n scale_color_manual(values=err.colors),\n selector.types=list(\n variable=\"multiple\"),\n ggplot()+\n ggtitle(\"ROC curves\")+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n geom_path(aes(\n FPR, TPR,\n key=pred.name,\n color=pred.name, group=pred.name),\n data=roc.dt,\n showSelected=\"set.fold\")+\n geom_point(aes(\n FPR, TPR, color=pred.name, key=pred.name),\n fill=\"white\",\n data=auc.improved,\n showSelected=\"set.fold\")+\n geom_point(aes(\n FPR, TPR, color=pred.name, key=pred.name),\n data=roc.dots,\n showSelected=c(\"set.fold\", \"mid.thresh\"),\n size=4,\n alpha=0.5)+\n geom_point(aes(\n FPR, TPR, color=pred.name, key=paste(pred.name, mid.thresh)),\n data=roc.dots,\n showSelected=\"set.fold\",\n clickSelects=\"mid.thresh\",\n size=4,\n alpha=0.5),\n ggplot()+\n ggtitle(\"Percent correctly predicted labels\")+\n theme_bw()+\n theme_animint(width=300)+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n geom_abline(slope=1, intercept=0, color=\"grey\")+\n scale_color_manual(values=status.colors)+\n guides(color=\"none\")+\n coord_equal()+\n geom_point(aes(\n accuracy.percent_initial, accuracy.percent_improved,\n key=set.fold,\n color=accuracy.percent_status),\n clickSelects=\"set.fold\",\n alpha=0.6,\n size=4,\n data=roc.wide),\n ggplot()+\n ggtitle(\"Log[Area under Min(FP,FN) + 1]\")+\n theme_bw()+\n theme_animint(width=300)+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n geom_abline(slope=1, intercept=0, color=\"grey\")+\n guides(color=\"none\")+\n scale_color_manual(\"Status\",values=status.colors)+\n geom_point(aes(\n initial, improved,\n key=set.fold,\n color=status),\n clickSelects=\"set.fold\",\n size=4,\n alpha=0.6,\n data=aum.wide)+\n coord_equal(),\n ggplot()+\n ggtitle(\"Area under the ROC curve\")+\n theme_bw()+\n theme_animint(width=300)+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n geom_abline(slope=1, intercept=0, color=\"grey\")+\n scale_color_manual(\"Status\",values=status.colors)+\n geom_point(aes(\n auc_initial, auc_improved,\n key=set.fold,\n color=auc_status),\n clickSelects=\"set.fold\",\n size=4,\n alpha=0.6,\n data=roc.wide)+\n coord_equal(),\n duration=list(\n set.fold=500,\n mid.thresh=500),\n time=list(\n ms=500,\n variable=\"mid.thresh\"),\n first=list(\n set.fold=\"H3K27me3_RL_cancer/2\",\n mid.thresh=-5\n )\n)\n"
},
{
"alpha_fraction": 0.6352154612541199,
"alphanum_fraction": 0.650074303150177,
"avg_line_length": 28.58241844177246,
"blob_id": "d7f7e4864031da312ecde1120f1c3ddcff8e7aa3",
"content_id": "2cbf83e1fd2efa492d17a4888e8029a250be528d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2692,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 91,
"path": "/figure-aum-neural-networks-data.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "param.dt <- data.table::CJ(\n loss_name=c(\"logistic\",\"balanced\",\"AUM\",\"AUM_rate\"),\n seed=1:4,\n lr=10^seq(-4, 2),\n data_name=c(\"MNIST\", \"FashionMNIST\"),\n batch_size=1000)\nMyFun <- function(loss_name, seed, lr, data_name, batch_size){\n cmd <- paste(\n \"python figure-aum-neural-networks-data.py\",\n loss_name, seed, lr, data_name, batch_size)\n status <- system(cmd)\n if(status != 0){\n stop(\"error code \", status)\n }\n}\nunlink(\"registry\",recursive=TRUE)\nreg <- batchtools::makeRegistry(\"registry\")\nbatchtools::batchMap(\n MyFun,\n args=param.dt,\n reg=reg)\njob.table <- batchtools::getJobTable(reg=reg)\nchunks <- data.frame(job.table, chunk=1)\nbatchtools::submitJobs(chunks, resources=list(\n walltime = 1*24*60*60,#seconds\n memory = 8000,#megabytes per cpu\n ncpus=1, #>1 for multicore/parallel jobs.\n ntasks=1, #>1 for MPI jobs.\n chunks.as.arrayjobs=TRUE), reg=reg)\nreg <- batchtools::loadRegistry(\"registry\")\nbatchtools::getStatus(reg=reg)\njt <- batchtools::getJobTable(reg=reg)\njt[!is.na(error)]\n\n\nif(!file.exists(\"figure-aum-neural-networks-data\")){\n download.file(\n \"https://rcdata.nau.edu/genomic-ml/figure-aum-neural-networks-data.tgz\",\n \"figure-aum-neural-networks-data.tgz\")\n system(\"tar xf figure-aum-neural-networks-data.tgz\")\n}\n\"figure-aum-neural-networks-data/AUM/1/1e-06/FashionMNIST/1000/steps.csv\"\n(steps.csv.vec <- Sys.glob(\n \"figure-aum-neural-networks-data/*/*/*/*/*/steps.csv\"))\nsystem(paste(\"wc -l\", paste(steps.csv.vec, collapse=\" \")))\nunlink(grep(\"_count\", steps.csv.vec, value=TRUE))\nlibrary(data.table)\nsteps.dt <- data.table(steps.csv=steps.csv.vec)[, {\n steps <- fread(\n steps.csv, colClasses=list(\n integer=c(\"epoch\",\"step\"),\n numeric=\"out_value\",\n character=c(\"set_name\",\"out_name\")))\n meta <- nc::capture_first_vec(\n steps.csv,\n \"figure-aum-neural-networks-data/\",\n loss=\".*?\",\n \"/\",\n seed=\".*?\",\n \"/\",\n lr=\".*?\",\n \"/\",\n data_set=\".*?\",\n \"/\",\n batch_size=\".*?\",\n \"/steps.csv\")\n data.table(meta, steps)\n}, by=steps.csv]\n\nlibrary(ggplot2)\nsteps.dt[, iteration := epoch*(1+max(step))+step]\nsteps.dt[, step.size := as.numeric(lr)]\none <- steps.dt[\n data_set==\"MNIST\" & out_name==\"AUC\" & set_name==\"validation\"]\nggplot()+\n facet_grid(step.size ~ seed, labeller=label_both)+\n geom_line(aes(\n iteration/(1+max(step)), out_value, color=loss),\n data=one)+\n scale_x_continuous(\"epoch\")\n\nggplot()+\n facet_grid(step.size ~ seed, labeller=label_both)+\n geom_line(aes(\n iteration, out_value, color=loss),\n data=one[epoch < 10])\n\nout.names <- setdiff(names(steps.dt), \"steps.csv\")\ndata.table::fwrite(\n steps.dt[,out.names,with=FALSE],\n \"figure-aum-neural-networks-data.csv\")\n"
},
{
"alpha_fraction": 0.5432509183883667,
"alphanum_fraction": 0.5712307095527649,
"avg_line_length": 26.44866943359375,
"blob_id": "549a34fcde4c698dd24f34d84550aa7588400b01",
"content_id": "5fca613ac2fef72fbc31be917aab9c486967d19f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 14439,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 526,
"path": "/figure-more-than-one.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\nd <- function(min.log.lambda, fp, fn){\n data.table(min.log.lambda, fp, fn)\n}\nprofile <- function(..., possible.fp, possible.fn, errors, labels){\n dt <- do.call(rbind, list(...))\n if(missing(possible.fp))possible.fp <- max(dt$fp)\n if(missing(possible.fn))possible.fn <- max(dt$fn)\n errors <- dt[, fp+fn]\n if(missing(labels))labels <- max(errors)\n dt[, data.table(\n min.log.lambda,\n max.log.lambda=c(min.log.lambda[-1], Inf),\n fp, fn, errors, possible.fp, possible.fn, labels)]\n}\nprofile.list <- list(\n best=profile(\n d(-Inf, 0, 10),\n d(1, 0, 4),\n d(1.5,0,2),\n d(2, 0, 0),\n d(2.5,3,0),\n d(3, 7, 0),\n d(4, 10, 0)),\n bad=profile(\n d(-Inf, 0, 10),\n d(1,1,9),\n d(2,2,8),\n d(3,3,7),\n d(4,4,6),\n d(5,5,5),\n d(6, 10, 0)),\n less=profile(\n d(-Inf, 0, 10),\n d(2, 1, 1),\n d(4, 10, 0)),\n good=profile(\n d(-Inf, 0, 10),\n d(0.5,1,8),\n d(1, 1, 4),\n d(2, 1, 1),\n d(2.5,3,1),\n d(3, 8, 1),\n d(4, 10, 0)),\n ok=profile(\n d(-Inf, 0, 10),\n d(2, 0, 8),\n d(5, 4, 5),\n d(7, 4, 3),\n d(8, 7, 3),\n d(9, 7, 0),\n d(10, 10, 0)),\n more=profile(\n d(-Inf, 0, 10),\n d(2, 8/3, 8/3),\n d(5, 10, 8/3),\n d(7, 10, 25/3),\n d(8, 5/3, 25/3),\n d(9, 5/3, 8/3),\n d(10, 10, 0)))\npred.dt <- data.table(problem=1, pred.log.lambda=0)\nroc.dt.list <- list()\nauc.dt.list <- list()\nfor(profile.i in seq_along(profile.list)){\n p <- data.table(profile.list[[profile.i]], problem=1)\n roc.list <- penaltyLearning::ROChange(p, pred.dt, problem.vars=\"problem\")\n model <- names(profile.list)[[profile.i]]\n roc.dt.list[[profile.i]] <- data.table(model, roc.list$roc)\n auc.dt.list[[profile.i]] <- with(roc.list, data.table(model, auc, aum))\n}\nroc.dt <- do.call(rbind, roc.dt.list)\nauc.dt <- do.call(rbind, auc.dt.list)\nroc.dt[, aum := min.fp.fn*(max.thresh-min.thresh)]\nroc.dt[, AUM := sum(ifelse(is.finite(aum), aum, 0)), by=model]\nroc.dt[, `:=`(FP=fp, FN=fn, `min(FP,FN)`=min.fp.fn)]\nfp.fn.dt <- data.table::melt(roc.dt, measure.vars=c(\"FP\", \"FN\", \"min(FP,FN)\"))\nerr.sizes <- c(\n FP=3,\n FN=2,\n \"min(FP,FN)\"=1)\nerr.colors <- c(\n FP=\"red\",\n FN=\"deepskyblue\",\n \"min(FP,FN)\"=\"black\")\nggplot()+\n facet_grid(model ~ ., labeller=label_both)+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n geom_rect(aes(\n xmin=min.thresh, xmax=max.thresh,\n ymin=0, ymax=value),\n color=\"grey\",\n fill=\"grey\",\n data=fp.fn.dt[variable==\"min(FP,FN)\"])+\n geom_segment(aes(\n min.thresh, value,\n color=variable, size=variable,\n xend=max.thresh, yend=value),\n data=fp.fn.dt)+\n scale_color_manual(values=err.colors)+\n scale_size_manual(values=err.sizes)+\n scale_x_continuous(\"Constant added to predictions\")\n\nggplot()+\n geom_path(aes(\n FPR, TPR, color=model, size=model, group=model),\n data=roc.dt)\n\npoly.dt <- roc.dt[, {\n right <- .SD[-.N]\n left <- .SD[-1]\n zero <- rep(0, nrow(left))\n i <- 1:nrow(left)\n m <- left$model\n area <- ifelse(left$FPR < right$FPR, \"negative\", \"positive\")\n data.table(\n FPR=c(left$FPR, right$FPR, right$FPR, left$FPR),\n TPR=c(left$TPR, right$TPR, zero, zero),\n area=rep(area, 4),\n seg=rep(i, 4))\n}, by=model]\nggplot()+\n theme_bw()+\n scale_fill_manual(values=c(positive=\"black\", negative=\"red\"))+\n geom_polygon(aes(\n FPR, TPR, group=paste(seg, model), fill=area),\n alpha=0.2,\n data=poly.dt)+\n geom_path(aes(\n FPR, TPR),\n data=roc.dt)+\n geom_point(aes(\n FPR, TPR),\n data=roc.dt)+\n facet_grid(. ~ model, labeller=label_both)+\n coord_equal()+\n geom_text(aes(\n 0.5, 0.5, label=sprintf(\"auc=%.2f\", auc)),\n data=auc.dt)\n\nmodel.ord <- c(\"best\",\"good\",\"ok\",\"bad\")\nauc.dt[, Model := factor(model, model.ord)]\nauc.dt[, AUC := round(auc, 2)]\nroc.join <- auc.dt[roc.dt, on=\"model\"]\npoly.join <- auc.dt[poly.dt, on=\"model\"]\nggplot()+\n theme_bw()+\n scale_fill_manual(values=c(positive=\"black\", negative=\"red\"))+\n geom_polygon(aes(\n FPR, TPR, group=paste(seg, model), fill=area),\n alpha=0.2,\n data=poly.join)+\n geom_path(aes(\n FPR, TPR),\n data=roc.join)+\n facet_grid(. ~ AUC + model, labeller=label_both)+\n coord_equal()+\n scale_x_continuous(\n \"False Positive Rate\",\n breaks = seq(0, 1, by=0.5))+\n scale_y_continuous(\n \"True Positive Rate\",\n breaks = seq(0, 1, by=0.5))\n\nsome <- function(dt)dt[model %in% model.ord]\ngg <- ggplot()+\n theme_bw()+\n scale_fill_manual(values=c(positive=\"black\", negative=\"red\"))+\n geom_polygon(aes(\n FPR, TPR, group=paste(seg, model)),\n alpha=0.2,\n data=some(poly.join))+\n geom_path(aes(\n FPR, TPR),\n data=some(roc.join))+\n facet_grid(.~Model+AUC, labeller=label_both)+\n coord_equal()+\n scale_x_continuous(\n \"False Positive Rate\",\n labels=c(\"0\",\"0.5\",\"1\"),\n breaks = seq(0, 1, by=0.5))+\n scale_y_continuous(\n \"True Positive Rate\",\n breaks = seq(0, 1, by=0.5),\n labels=c(\"0\",\"0.5\",\"1\"))\npng(\"figure-more-than-one-binary.png\", width=5, height=2, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\nfp.fn.dt[, Model := factor(model, model.ord)]\nleg <- \"Error type\"\ngg <- ggplot()+\n facet_grid(. ~ Model + AUM, labeller=label_both)+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n geom_rect(aes(\n xmin=min.thresh, xmax=max.thresh,\n ymin=0, ymax=value),\n color=\"grey\",\n fill=\"grey\",\n data=some(fp.fn.dt[variable==\"min(FP,FN)\"]))+\n geom_segment(aes(\n min.thresh, value,\n color=variable, size=variable,\n xend=max.thresh, yend=value),\n data=some(fp.fn.dt))+\n scale_color_manual(leg, values=err.colors)+\n scale_size_manual(leg, values=err.sizes)+\n scale_x_continuous(\"Constant added to predictions\")+\n scale_y_continuous(\"Label errors\")\npng(\n \"figure-more-than-one-binary-aum.png\", \n width=6, height=2, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\nroc.join[, min.FPR.FNR := pmin(FPR,1-TPR)]\nroc.join[, `sum(min)` := sum(min.FPR.FNR), by=Model]\ngg <- ggplot()+\n theme_bw()+\n scale_fill_gradient2(\n \"min(FPR,FNR)\",\n midpoint=0.25,\n low=\"blue\",\n mid=\"white\",\n high=\"red\")+\n geom_path(aes(\n FPR, TPR),\n data=some(roc.join))+\n geom_point(aes(\n FPR, TPR, fill=min.FPR.FNR),\n shape=21,\n data=some(roc.join))+\n facet_grid(.~Model+`sum(min)`, labeller=label_both)+\n coord_equal()+\n scale_x_continuous(\n \"False Positive Rate\",\n labels=c(\"0\",\"0.5\",\"1\"),\n breaks = seq(0, 1, by=0.5))+\n scale_y_continuous(\n \"True Positive Rate\",\n breaks = seq(0, 1, by=0.5),\n labels=c(\"0\",\"0.5\",\"1\"))\nprint(gg)\npng(\n \"figure-more-than-one-binary-dots.png\", \n width=6, height=2, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\nerr.sizes <- c(\n FP=3,\n FN=2,\n \"min(FP,FN)\"=1)\nerr.colors <- c(\n FP=\"red\",\n FN=\"deepskyblue\",\n \"min(FP,FN)\"=\"black\")\n\nggplot()+\n\nbinary.list <- list(\n \"1\"=profile(\n d(-Inf, 0, 1),\n d(0, 0, 0)),\n \"0\"=profile(\n d(-Inf, 0, 0),\n d(0, 1, 0)))\nleg <- \"Error type\"\nlabel.tall.dt.list <- list()\nfor(label in names(binary.list)){\n label.dt <- binary.list[[label]]\n label.tall <- melt(label.dt, measure=c(\"fp\",\"fn\"))\n label.tall[, Variable := toupper(variable)]\n label.tall.dt.list[[label]] <- data.table(label, label.tall)\n ggplot()+\n scale_color_manual(leg, values=err.colors)+\n scale_size_manual(leg, values=err.sizes)+\n scale_y_continuous(\"Label Errors\")+\n scale_x_continuous(\n \"Predicted score\")+\n geom_segment(aes(\n min.log.lambda, value,\n color=Variable,\n size=Variable,\n xend=max.log.lambda, yend=value),\n data=label.tall)\n}\nlabel.tall.dt <- do.call(rbind, label.tall.dt.list)\nlab.info <- rbind(\n data.table(label=\"0\", hjust=1.1),\n data.table(label=\"1\", hjust=-0.1))\nlabel.non.zero <- label.tall.dt[value>0][lab.info, on=\"label\"]\nlabel.non.zero[, x := ifelse(label==\"0\", min.log.lambda, max.log.lambda)]\ngg <- ggplot()+\n scale_color_manual(leg, values=err.colors)+\n scale_size_manual(leg, values=err.sizes)+\n scale_y_continuous(\n \"Label Errors\",\n breaks=0:1)+\n scale_x_continuous(\n \"Predicted score f(x)\",\n limits=c(-2,2))+\n geom_segment(aes(\n min.log.lambda, value,\n color=Variable,\n size=Variable,\n xend=max.log.lambda, yend=value),\n data=label.tall.dt)+\n geom_text(aes(\n x, 1, hjust=hjust, label=Variable, color=Variable),\n vjust=1, \n data=label.non.zero)+\n facet_grid(label ~ ., labeller=label_both)\npng(\n \"figure-more-than-one-binary-errors.png\",\n width=3, height=2.5, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\nfor(m in names(profile.list)){\n p.roc <- roc.dt[model==m]\n p.auc <- auc.dt[model==m]\n p.poly <- poly.dt[model==m]\n p.fp.fn <- fp.fn.dt[model==m]\n p.fp.fn[, Variable := ifelse(\n variable==\"min.fp.fn\", \"min(FP,FN)\", toupper(paste(variable)))]\n p.rect <- p.fp.fn[Variable==\"min(FP,FN)\"]\n p.best <- p.roc[errors==min(errors)]\n best.color <- \"green\"\n p.roc[, q := .I]\n p.roc[, hjust := 0]\n p.roc[, vjust := 1.2]\n p.roc[q==6, `:=`(hjust=1, vjust=-0.4)]\n g <- ggplot()+\n theme_bw()+\n scale_fill_manual(values=c(positive=\"black\", negative=\"red\"))+\n geom_polygon(aes(\n FPR, TPR, group=paste(seg, model), fill=area),\n alpha=0.2,\n data=p.poly)+\n geom_path(aes(\n FPR, TPR),\n data=p.roc)+\n geom_text(aes(\n FPR+0.01, TPR, label=paste0(\"q=\",q), hjust=hjust, vjust=vjust),\n size=3,\n data=p.roc)+\n geom_point(aes(\n FPR, TPR),\n data=p.roc)+\n coord_equal(xlim=c(0,1.1), ylim=c(-0.05, 1))+\n scale_y_continuous(\"True Positive Rate\", breaks=c(0,0.5,1))+\n scale_x_continuous(\"False Positive Rate\", breaks=c(0,0.5,1))+\n geom_text(aes(\n 0.5, 0.5, label=sprintf(\"AUC=%.2f\", auc)),\n data=p.auc)+\n theme(legend.position=\"none\")\n ##if(all(p.poly$area==\"positive\"))g <- g+theme(legend.position=\"none\")\n g.aum <- ggplot()+\n theme_bw()+\n theme(\n panel.grid.minor=element_blank(),\n panel.spacing=grid::unit(0, \"lines\"))+\n geom_rect(aes(\n xmin=min.thresh, xmax=max.thresh,\n ymin=0, ymax=value),\n color=\"grey\",\n alpha=0.75,\n fill=\"grey\",\n data=p.rect)+\n geom_text(aes(\n 6, 4, label=sprintf(\"AUM=%.0f\", aum)),\n data=p.auc)+\n geom_segment(aes(\n min.thresh, value,\n color=Variable, size=Variable,\n xend=max.thresh, yend=value),\n data=p.fp.fn)+\n geom_text(aes(\n ifelse(\n min.thresh == -Inf, max.thresh-1, ifelse(\n max.thresh == Inf, min.thresh+1, (min.thresh+max.thresh)/2)),\n -4, label=paste0(\"q=\",q)),\n vjust=0,\n size=2.5,\n data=p.roc)+\n scale_color_manual(leg, values=err.colors)+\n scale_size_manual(leg, values=err.sizes)+\n scale_y_continuous(\"Label Errors\")+\n scale_x_continuous(\n \"Constant added to predicted values\",\n limits=c(0, 12),\n breaks=p.roc[[\"min.thresh\"]])\n g.list <- list(auc=g, aum=g.aum)\n for(plot.type in names(g.list)){\n out.png <- sprintf(\n \"figure-more-than-one-%s-%s.png\",\n m, plot.type)\n png(\n out.png,\n width=if(plot.type==\"auc\")2.5 else 4.5,\n height=if(plot.type==\"auc\")2.5 else 2, units=\"in\", res=200)\n print(g.list[[plot.type]])\n dev.off()\n f.tex <- sub(\"png\", \"tex\", out.png)\n tikz(f.tex, width=3, height=3, standAlone = TRUE)\n print(g.list[[plot.type]])\n dev.off()\n system(paste(\"pdflatex\", f.tex))\n }\n g <- ggplot()+\n theme_bw()+\n scale_fill_manual(values=c(positive=\"black\", negative=\"red\"))+\n geom_polygon(aes(\n FPR, TPR, group=paste(seg, model), fill=area),\n alpha=0.2,\n data=p.poly)+\n geom_path(aes(\n FPR, TPR),\n data=p.roc)+\n geom_text(aes(\n FPR+0.01, TPR, label=sprintf(\"q=%d\",q), hjust=hjust, vjust=vjust),\n size=3,\n data=p.roc)+\n geom_point(aes(\n FPR, TPR),\n size=0.5,\n data=p.roc)+\n coord_equal(\n xlim=c(0,1.1), ylim=c(-0.05, 1))+\n scale_y_continuous(\"True Positive Rate\", breaks=c(0,0.5,1))+\n scale_x_continuous(\"False Positive Rate\", breaks=c(0,0.5,1))+\n geom_text(aes(\n 0.5, 0.5, label=sprintf(\"AUC=%.2f\", auc)),\n data=p.auc)+\n theme(legend.position=\"none\")\n limits.vec <- c(0, 12)\n type.breaks <- c(\n FP=\"$\\\\text{FPT}_{\\\\mathbf{\\\\hat{y}}}(c)$\",\n FN=\"$\\\\text{FNT}_{\\\\mathbf{\\\\hat{y}}}(c)$\",\n \"min(FP,FN)\"=\"$M_{\\\\mathbf{\\\\hat{y}}}(c)$\")\n g.aum <- ggplot()+\n geom_vline(aes(\n xintercept=max.thresh),\n data=p.roc,\n color=\"grey\")+\n theme_bw()+\n theme(\n panel.grid.minor=element_blank(),\n panel.spacing=grid::unit(0, \"lines\"))+\n coord_cartesian(expand=FALSE)+\n geom_rect(aes(\n xmin=min.thresh, xmax=max.thresh,\n ymin=0, ymax=value),\n color=\"grey\",\n alpha=0.75,\n fill=\"grey\",\n data=p.rect)+\n geom_text(aes(\n 6, 1.5, label=sprintf(\"AUM=%.0f\", aum)),\n data=p.auc)+\n geom_segment(aes(\n min.thresh, value,\n color=Variable, size=Variable,\n xend=max.thresh, yend=value),\n data=p.fp.fn)+\n geom_text(aes(\n ifelse(\n min.thresh == -Inf, max.thresh-1, ifelse(\n max.thresh == Inf, min.thresh+1, (min.thresh+max.thresh)/2)),\n ifelse(q %% 2, -2, -1),\n label=sprintf(\"q=%d\",q)),\n vjust=-0.5,\n size=3,\n data=p.roc)+\n scale_color_manual(leg, values=err.colors, breaks=names(type.breaks), labels=type.breaks)+\n scale_size_manual(leg, values=err.sizes, breaks=names(type.breaks), labels=type.breaks)+\n scale_x_continuous(\n \"Constant $c$ added to predicted values\",\n breaks=seq(0, 12, by=2),\n limits=limits.vec)+\n scale_y_continuous(\n \"Total label errors over all\n$n$ labeled training examples\",\nbreaks=seq(0, 10, by=2),\nlimits=c(NA, 11))\n g.list <- list(\n auc=g,\n \"aum-nomath\"=g.aum,\n aum=g.aum+\n geom_text(aes(\n thresh, 5.5,\n vjust=fcase(\n thresh %in% c(7,Inf), -0.5,\n default=1.2),\n label=sprintf(\n \"$\\\\tau(\\\\mathbf{\\\\hat{y}})_{%d}=%s$\",\n q, ifelse(\n is.finite(thresh),\n paste(thresh),\n paste(\n ifelse(thresh<0, \"-\", \"\"),\n \"\\\\infty\"))\n )),\n angle=90,\n data=p.roc[, data.table(\n q=c(0, .I),\n thresh=c(-Inf, max.thresh))])\n )\n for(plot.type in names(g.list)){\n f.tex <- sprintf(\n \"figure-more-than-one-%s-%s.tex\",\n m, plot.type)\n s <- 0.8\n tikz(\n f.tex,\n width=if(plot.type!=\"auc\")5*s else 3*s,\n height=3*s,\n standAlone = TRUE)\n print(g.list[[plot.type]])\n dev.off()\n system(paste(\"pdflatex\", f.tex))\n }\n}\n\n"
},
{
"alpha_fraction": 0.5893003940582275,
"alphanum_fraction": 0.6111111044883728,
"avg_line_length": 26,
"blob_id": "21629fd1d513b738bdcdd14e40c81ae10cb4f372",
"content_id": "97afa87298430cb0b4cde79ac0350322e1154905",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2430,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 90,
"path": "/figure-aum-train-both.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\nabbrev.vec <- c(\n \"prediction vector\"=\"optimized\",\n \"linear model weights\"=\"train\")\nboth.list <- list()\nfor(optimization.variable in names(abbrev.vec)){\n fname.rds <- paste0(\n \"figure-aum-\", abbrev.vec[[optimization.variable]], \"-data.rds\")\n result.list <- readRDS(fname.rds)\n for(data.type in c(\"roc\", \"auc\")){\n dt <- result.list[[data.type]]\n dt[, model := sub(\"improved\", \"optimized\", pred.name)]\n both.list[[data.type]][[optimization.variable]] <- data.table(\n optimization.variable=factor(optimization.variable, names(abbrev.vec)),\n dt)\n }\n}\nfor(data.type in names(both.list)){\n both.list[[data.type]] <- do.call(rbind, both.list[[data.type]])\n}\nboth.list$auc[, `:=`(x=c(0.25), y=ifelse(model==\"initial\", 0.75, 0.5))]\ngg <- ggplot()+\n facet_grid(. ~ optimization.variable, labeller=label_both)+\n geom_path(aes(\n FPR, TPR, color=model),\n data=both.list$roc)+\n geom_point(aes(\n FPR, TPR, color=model),\n fill=\"white\",\n shape=21,\n data=both.list$auc)+\n geom_segment(aes(\n x, y,\n xend=FPR, yend=TPR,\n color=model),\n size=0.25,\n data=both.list$auc)+\n geom_label(aes(\n x, y, color=model,\n label=sprintf(\n \"%s AUM=%.2f\\nerrors=%d AUC=%.2f\",\n model, aum, errors, auc)),\n size=3,\n hjust=0,\n vjust=1,\n data=both.list$auc)+\n coord_equal()+\n guides(color=\"none\")+\n theme(panel.spacing=grid::unit(0.5, \"cm\"))\npng(\"figure-aum-train-both.png\", width=5.8, height=3.3, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\none <- function(DT)DT[optimization.variable==\"prediction vector\"]\ngg <- ggplot()+\n geom_path(aes(\n FPR, TPR, color=model),\n data=one(both.list$roc))+\n geom_point(aes(\n FPR, TPR, color=model),\n fill=\"white\",\n shape=21,\n data=one(both.list$auc))+\n geom_segment(aes(\n x, y,\n xend=FPR, yend=TPR,\n color=model),\n size=0.25,\n data=one(both.list$auc))+\n geom_label(aes(\n x, y, color=model,\n label=sprintf(\n \"%s AUM=%.2f\\nerrors=%d AUC=%.2f\",\n model, aum, errors, auc)),\n size=3,\n hjust=0,\n vjust=1,\n data=one(both.list$auc))+\n coord_equal()+\n guides(color=\"none\")+\n theme(panel.spacing=grid::unit(0.5, \"cm\"))+\n scale_x_continuous(\n \"False positive rate\",\n breaks=c(0,0.5,1))+\n scale_y_continuous(\n \"True positive rate\",\n breaks=c(0,0.5,1))\npng(\"figure-aum-train-pred-only.png\", width=3, height=3, units=\"in\", res=200)\nprint(gg)\ndev.off()\n"
},
{
"alpha_fraction": 0.6208231449127197,
"alphanum_fraction": 0.6389329433441162,
"avg_line_length": 33.30857849121094,
"blob_id": "221b256428c31da2e333ed8f34d2afb3153dad8e",
"content_id": "d678fe4688c39d1f77357d4d61ba5be81ce8db74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 32800,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 956,
"path": "/figure-line-search-complexity-compare.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(ggplot2)\nlibrary(data.table)\nsetDTthreads(1)\n## > mb[per.set, on=list(set)][order(labels)]\n## megabytes set labels\n## 1: 554 H3K36me3_TDH_other 200\n## 2: 377 H3K36me3_TDH_ENCODE 338\n## 3: 375 H3K4me3_TDH_ENCODE 525\n## 4: 592 H3K27me3_RL_cancer 570\n## 5: 798 H3K27ac_TDH_some 627\n## 6: 906 H3K36me3_TDH_immune 630\n## 7: 296 H3K27me3_TDH_some 696\n## 8: 2407 CTCF_TDH_ENCODE 1378\n## 9: 3223 H3K4me1_TDH_BP 1584\n## 10: 5871 H3K36me3_AM_immune 1743\n## 11: 6407 ATAC_JV_adipose 3241\n## 12: 3017 H3K4me3_PGP_immune 3780\n## 13: 2902 H3K4me3_TDH_immune 3807\n## 14: 5421 H3K27ac-H3K4me3_TDHAM_BP 15961\n(testFold.vec <- Sys.glob(\"../neuroblastoma-data/data/*/cv/*/testFolds/*\"))\ntestFold.path <- \"../neuroblastoma-data/data/H3K27ac-H3K4me3_TDHAM_BP/cv/equal_labels/testFolds/3\"\nseed <- 1\ninit.name=\"zero\"\naum.type=\"count\"\nOneBatch <- function(testFold.path, aum.type){\n library(data.table)\n cv.path <- dirname(dirname(testFold.path))\n folds.csv <- file.path(cv.path, \"folds.csv\")\n cv.type <- basename(cv.path)\n test.fold <- basename(testFold.path)\n data.dir <- dirname(dirname(cv.path))\n data.name <- basename(data.dir)\n data.list <- list()\n for(f in c(\"inputs\", \"outputs\", \"evaluation\")){\n f.csv.xz <- file.path(data.dir, paste0(f, \".csv.xz\"))\n if(file.exists(f.csv.xz)){\n system(paste(\"unxz\", f.csv.xz))\n }\n f.csv <- file.path(data.dir, paste0(f, \".csv\"))\n f.dt <- data.table::fread(f.csv)\n data.list[[f]] <- f.dt\n }\n ## replace positive fn at end with 0 to avoid AUM=Inf.\n data.list$evaluation[, `:=`(\n min.fn=min(fn),\n max.fp=max(fp),\n min.lambda = exp(min.log.lambda),\n example=sequenceID\n ), by=sequenceID]\n bad <- data.list$evaluation[min.log.lambda == -Inf & min.fn < fn]\n if(nrow(bad)){\n print(bad)\n }\n data.list$evaluation[min.log.lambda == -Inf & 0 < fn]\n ## code below not necessary since this does not happen in our real\n ## data sets, but it could theoretically in some data.\n data.list$aum.input <- data.table(data.list$evaluation)[, `:=`(\n possible.fn=possible.fn-min.fn,\n fn=fn-min.fn,\n possible.fp=max.fp\n ), by=sequenceID]\n ## read folds. \n folds.dt <- data.table::fread(folds.csv)\n folds.dt[fold == test.fold, set := \"test\"]\n folds.dt[fold != test.fold, set := rep(\n c(\"subtrain\", \"validation\"), l=.N)]\n folds.dt[, table(fold, set)]\n X.all <- scale(data.list$inputs[, -1])#rm seqID.\n rownames(X.all) <- data.list$inputs$sequenceID\n X.finite <- X.all[, apply(is.finite(X.all), 2, all)]\n set.vec <- folds.dt[rownames(X.finite), set, on=\"sequenceID\"]\n seqs.list <- list()\n diffs.list <- list()\n aum.vec.list <- list()\n for(s in unique(folds.dt$set)){\n seqs.set <- folds.dt[s==set, sequenceID]\n seqs.list[[s]] <- seqs.set\n seqs.diff <- aum::aum_diffs_penalty(\n data.list$evaluation,\n seqs.set,\n denominator=aum.type)\n diffs.list[[s]] <- seqs.diff\n }\n n.subtrain.diffs <- nrow(diffs.list$subtrain)\n totals <- colSums(diffs.list$subtrain[, .(fp_diff, fn_diff)])\n X.subtrain <- X.finite[set.vec==\"subtrain\",]\n neg.t.X.subtrain <- -t(X.subtrain)\n seqs.train <- with(seqs.list, c(subtrain, validation))\n y.train <- data.list[[\"outputs\"]][\n seqs.train,\n cbind(min.log.lambda, max.log.lambda),\n on=\"sequenceID\"]\n keep <- apply(is.finite(y.train), 1, any)\n X.train <- X.finite[seqs.train, ]\n N.param <- ncol(X.finite)+1\n init.param <- structure(\n rep(0, N.param),\n names=c(\"(Intercept)\",colnames(X.finite)))\n init.fun.list <- list(\n IntervalRegressionCV=function(){\n fit <- penaltyLearning::IntervalRegressionCV(\n X.train[keep, ],\n y.train[keep, ])\n some.param <- fit[[\"param.mat\"]]\n init.param[rownames(some.param)] <- some.param\n init.param\n },\n near.zero=function(){\n init.param+rnorm(N.param)\n }\n )\n loss.dt.list <- list()\n time.dt.list <- list()\n iterations.dt.list <- list()\n Breaks <- nrow(diffs.list$subtrain)\n maxIterations.list <- list(\n grid=NULL,\n min.aum=\"min.aum\",\n exactL=Breaks,\n exactQ=Breaks*(Breaks-1)/2)\n for(seed in 1:4)for(init.name in c(\"near.zero\",\"IntervalRegressionCV\")){\n init.fun <- init.fun.list[[init.name]]\n set.seed(seed)\n int.weights <- init.fun()\n for(maxIterations.name in names(maxIterations.list)){\n maxIterations <- maxIterations.list[[maxIterations.name]]\n computeROC <- function(w, i, set){\n pred.pen.vec <- (X.finite %*% w) + i\n pred.dt <- data.table(\n sequenceID=rownames(pred.pen.vec),\n pred.log.lambda=-as.numeric(pred.pen.vec))\n is.set <- set.vec==set\n set.dt <- pred.dt[is.set]\n L <- penaltyLearning::ROChange(\n data.list$evaluation, set.dt, \"sequenceID\")\n alist <- aum_auc(diffs.list[[set]], pred.pen.vec[ seqs.list[[set]], ])\n L$aum.diffs <- alist$aum\n L$auc.diffs <- alist$auc\n L\n }\n aum_auc <- function(diffs.dt, pred.vec){\n aum.list <- aum::aum(diffs.dt, pred.vec)\n before.dt <- data.table(aum.list$total_error, key=\"thresh\")[, `:=`(\n TPR_before=1-fn_before/-totals[[\"fn_diff\"]],\n FPR_before=fp_before/totals[[\"fp_diff\"]])]\n aum.list$auc <- before.dt[, .(\n FPR=c(FPR_before, 1),\n TPR=c(TPR_before, 1)\n )][, sum((FPR[-1]-FPR[-.N])*(TPR[-1]+TPR[-.N])/2)]\n aum.list\n }\n obj.sign <- 1\n objective <- \"aum\"\n weight.vec <- int.weights[-1]\n intercept <- int.weights[1]\n prev.obj <- Inf*obj.sign\n step.number <- 0\n elapsed.seconds <- system.time({\n while({\n summary.dt.list <- list()\n for(set in names(seqs.list)){\n set.PL <- computeROC(weight.vec, intercept, set)\n summary.dt.list[[set]] <- with(set.PL, data.table(\n set,\n thresholds[threshold==\"predicted\"],\n auc, aum, auc.diffs, aum.diffs))\n }\n summary.dt <- do.call(rbind, summary.dt.list)\n loss.dt.list[[paste(\n seed, init.name, step.number, maxIterations.name\n )]] <- data.table(\n seed, init.name, step.number, maxIterations.name, summary.dt)\n new.obj <- summary.dt.list$subtrain[[paste0(objective,\".diffs\")]]\n improvement <- obj.sign*(prev.obj-new.obj)\n cat(sprintf(\n \"seed=%d init=%s step=%d %s %f->%f\\n\",\n seed, init.name, step.number, maxIterations.name, prev.obj, new.obj))\n ##step.number < 2 &&\n 1e-5 < improvement\n }){\n pred.vec <- X.subtrain %*% weight.vec\n aum.list <- aum::aum(diffs.list$subtrain, pred.vec)\n pred.grad.vec <- rowMeans(aum.list$derivative_mat)\n direction.vec <- neg.t.X.subtrain %*% pred.grad.vec\n take.step <- function(s){\n weight.vec+s*direction.vec\n }\n candidate.dt <- if(identical(maxIterations.name, \"grid\")){\n step.grid <- 10^seq(-9, 0)\n iterations <- NA_integer_\n data.table(step.size=step.grid)[, {\n step.weight <- take.step(step.size)\n grid.aum <- aum_auc(diffs.list$subtrain, X.subtrain %*% step.weight)\n with(grid.aum, data.table(auc, aum))\n }, by=step.size]\n }else{\n LS=aum::aum_line_search(\n diffs.list$subtrain, X.subtrain, weight.vec, maxIterations=maxIterations)\n iterations <- LS[[\"line_search_result\"]][\n , if(identical(maxIterations,\"min.aum\"))q.size else .N]\n LS$line_search_result\n }\n best.step.size <- candidate.dt[which.min(aum), step.size]\n weight.vec <- take.step(best.step.size)\n new.aum <- aum::aum(diffs.list$subtrain, X.subtrain %*% weight.vec)\n err.thresh <- data.table(\n new.aum$total_error,key=\"thresh\"\n )[, err_before := fp_before+fn_before][, .(\n thresh=c(thresh[1]-1,thresh[-1]-diff(thresh)/2,thresh[.N]+1),\n err=c(err_before,sum(diffs.list$subtrain$fp_diff))\n )]\n intercept <- err.thresh[which.min(err), thresh]\n step.number <- step.number+1\n iterations.dt.list[[paste(\n seed, init.name, step.number, maxIterations.name\n )]] <- data.table(\n seed, init.name, step.number, maxIterations.name, iterations)\n prev.obj <- new.obj\n }#step.number\n })[[\"elapsed\"]]\n time.dt.list[[paste(\n seed, init.name, maxIterations.name\n )]] <- data.table(\n seed, init.name, maxIterations.name, elapsed.seconds)\n }#maxIterations\n }#seed/init.name\n with_meta <- function(L){\n data.table(data.name, cv.type, test.fold, rbindlist(L))\n }\n list(\n sets=with_meta(loss.dt.list),\n iterations=with_meta(iterations.dt.list),\n time=with_meta(time.dt.list))\n}\nargs.dt <- data.table::CJ(\n testFold.path=testFold.vec,\n aum.type=c(\"rate\",\"count\")\n)\n\n## Run on SLURM.\nregistry.dir <- \"figure-line-search-complexity-compare-grid\"\nif(FALSE){\n unlink(registry.dir, recursive=TRUE)\n}\nreg <- batchtools::makeRegistry(registry.dir)\nbatchtools::batchMap(OneBatch, args=args.dt, reg=reg)\nbatchtools::testJob(4, reg=reg)\njob.table <- batchtools::getJobTable(reg=reg)\nchunks <- data.frame(job.table, chunk=1)\nbatchtools::submitJobs(chunks, resources=list(\n walltime = 24*60*60,#seconds\n memory = 32000,#megabytes per cpu\n ncpus=1, #>1 for multicore/parallel jobs.\n ntasks=1, #>1 for MPI jobs.\n chunks.as.arrayjobs=TRUE), reg=reg)\n\nbatchtools::getStatus(reg=reg)\nstatus.dt <- batchtools::getJobStatus(reg=reg)\nstatus.dt[!is.na(error)]\nstatus.dt[!is.na(done)]\n\nargs.dt[21]\n\n## Run locally.\nfor(args.i in 1:nrow(args.dt)){\n args.row <- args.dt[args.i]\n cache.rds <- args.row[, file.path(testFold.path, paste0(aum.type, \".rds\"))]\n all.it.list[[args.i]] <- if(file.exists(cache.rds)){\n readRDS(cache.rds)\n }else{\n cat(sprintf(\"%4d / %4d\\n\", args.i, length(args.dt)))\n print(args.row)\n iteration.list <- do.call(OneBatch, args.row)\n saveRDS(iteration.list, cache.rds)\n }\n}\n\n## analyze.\ncache.vec <- Sys.glob(file.path(\n \"../neuroblastoma-data/data/*/cv/*/testFolds/*\",\n cache.name))\nfor(cache.i in seq_along(cache.vec)){\n cache.rds <- cache.vec[[cache.i]]\n L <- readRDS(cache.rds)\n algo.cols <- c(\"seed\",\"init.name\",\"algo\")\n step.cols <- c(algo.cols,\"step.number\")\n best.steps <- L$steps[, .SD[which.min(aum)], by=step.cols][,c(step.cols,\"search\"),with=FALSE]\n join.dt <- L$sets[set != \"test\"][best.steps, on=step.cols]\n min.dt <- join.dt[set==\"validation\", .SD[which.min(aum)], by=.(seed,init.name,set)]\n\n ggplot()+\n geom_line(aes(\n step.number, aum, color=algo),\n data=join.dt)+\n geom_point(aes(\n step.number, aum, color=algo),\n shape=1,\n data=join.dt[search==\"exact\"])+\n geom_point(aes(\n step.number, aum, color=algo),\n data=min.dt)+\n facet_wrap(~seed+ init.name + set,scales=\"free\")\n \n}\n\n#analyze 2\ntype.csv.vec <- Sys.glob(file.path(testFold.vec, \"line_search\",\"complexity\", \"*.csv\"))\ntotal.dt.list <- list()\nfor(type.csv.i in seq_along(type.csv.vec)){\n type.csv <- type.csv.vec[[type.csv.i]]\n aum.type <- sub(\".csv\",\"\",basename(type.csv))\n type.dt <- fread(type.csv)\n type.total.dt <- type.dt[, .(\n aum.type, steps=.N, sum.iterations=sum(q.size), mean.iterations=mean(q.size)\n ), by=.(\n data.name, cv.type, test.fold, seed, init.name, maxIterations, n.subtrain.diffs\n )]\n total.dt.list[[type.csv]] <- type.total.dt\n}\ntotal.dt <- rbindlist(total.dt.list)\nfwrite(total.dt, \"figure-line-search-complexity.csv\")\nrfac <- 10\ntotal.dt[, N:= 10^(round(log10(n.subtrain.diffs)*rfac)/rfac)]\n\nL <- list(measurements=total.dt[maxIterations==\"min.aum\", data.table(\n iterations=mean(sum.iterations),\n min=min(sum.iterations),\n max=max(sum.iterations)\n), by=.(expr.name=paste(aum.type, init.name), N)])\nmy_funs <- list(\n N=function(N)log10(N),\n \"N \\\\log N\"=function(N)log10(N) + log10(log(N)),\n \"N^2\"=function(N)2*log10(N))\nbest <- atime::references_best(L, unit.col.vec=\"iterations\", fun.list=my_funs)\nmeas <- best[[\"measurements\"]]\nref.dt <- best[[\"references\"]]\nref.color <- \"violet\"\nemp.color <- \"black\"\ngg <- ggplot2::ggplot()+\n ggplot2::facet_grid(unit ~ expr.name, scales=\"free\")+\n ggplot2::theme_bw()+\n ggplot2::geom_ribbon(ggplot2::aes(\n N, ymin=min, ymax=max),\n data=meas,\n fill=emp.color,\n alpha=0.5)+\n ggplot2::geom_line(ggplot2::aes(\n N, empirical),\n size=2,\n color=emp.color,\n data=meas)+\n ggplot2::geom_line(ggplot2::aes(\n N, reference, group=fun.name),\n color=ref.color,\n size=1,\n data=ref.dt)+\n ggplot2::scale_y_log10(\"\")+\n ggplot2::scale_x_log10()\nif(requireNamespace(\"directlabels\")){\n gg+\n directlabels::geom_dl(ggplot2::aes(\n N, reference, label=fun.name),\n data=ref.dt,\n color=ref.color,\n method=\"bottom.polygons\")\n}else{\n gg\n}\n\nL <- list(measurements=total.dt[maxIterations==\"min.aum\", data.table(\n iterations=mean(sum.iterations),\n min=min(sum.iterations),\n max=max(sum.iterations)\n), by=.(expr.name=paste(init.name), N)])\nmy_funs <- list(\n \"N^2\"=function(N)2*log10(N))\nbest <- atime::references_best(L, unit.col.vec=\"iterations\", fun.list=my_funs)\nmeas <- best[[\"measurements\"]]\nref.dt <- best[[\"references\"]]\nref.color <- \"violet\"\nemp.color <- \"black\"\ngg <- ggplot2::ggplot()+\n ggplot2::facet_grid(unit ~ expr.name, scales=\"free\")+\n ggplot2::theme_bw()+\n ggplot2::geom_ribbon(ggplot2::aes(\n N, ymin=min, ymax=max),\n data=meas,\n fill=emp.color,\n alpha=0.5)+\n ggplot2::geom_line(ggplot2::aes(\n N, empirical),\n size=2,\n color=emp.color,\n data=meas)+\n ggplot2::geom_line(ggplot2::aes(\n N, reference, group=fun.name),\n color=ref.color,\n size=1,\n data=ref.dt)+\n ggplot2::scale_y_log10(\"\")+\n ggplot2::scale_x_log10()\nif(requireNamespace(\"directlabels\")){\n gg+\n directlabels::geom_dl(ggplot2::aes(\n N, reference, label=fun.name),\n data=ref.dt,\n color=ref.color,\n method=\"bottom.polygons\")\n}else{\n gg\n}\n\nL <- list(measurements=total.dt[maxIterations==\"min.aum\", data.table(\n sum.iterations=mean(sum.iterations),\n mean.iterations=mean(mean.iterations)\n), by=.(expr.name=paste(aum.type, init.name), N)]\nmy_funs <- list(\n N=function(N)log10(N),\n \"N \\\\log N\"=function(N)log10(N) + log10(log(N)),\n \"N^2\"=function(N)2*log10(N))\nbest <- atime::references_best(\n L, unit.col.vec=c(\"sum.iterations\", \"mean.iterations\"), fun.list=my_funs)\nmeas <- best[[\"measurements\"]]\nref.dt <- best[[\"references\"]]\nref.color <- \"violet\"\nemp.color <- \"black\"\ngg <- ggplot2::ggplot()+\n ggplot2::facet_grid(unit ~ expr.name, scales=\"free\")+\n ggplot2::theme_bw()+\n ggplot2::geom_line(ggplot2::aes(\n N, empirical),\n size=2,\n color=emp.color,\n data=meas)+\n ggplot2::geom_line(ggplot2::aes(\n N, reference, group=fun.name),\n color=ref.color,\n size=1,\n data=ref.dt)+\n ggplot2::scale_y_log10(\"\")+\n ggplot2::scale_x_log10()\nif(requireNamespace(\"directlabels\")){\n gg+\n directlabels::geom_dl(ggplot2::aes(\n N, reference, label=fun.name),\n data=ref.dt,\n color=ref.color,\n method=\"bottom.polygons\")\n}else{\n gg\n}\n\ntotal.wide <- dcast(\n total.dt,\n N ~ .,\n value.var=c(\"sum.iterations\", \"steps\"),\n fun.aggregate = list(median, min, max)\n)[, expr.name := \"line.search\"]\nL <- list(measurements=total.wide)\nmy_funs <- list(\n N=function(N)log10(N),\n \"N \\\\log N\"=function(N)log10(N) + log10(log(N)),\n \"N^2\"=function(N)2*log10(N))\nbest <- atime::references_best(L, unit.col.vec=c(\"sum.iterations_median\", \"steps_median\"), fun.list=my_funs)\naddUnit <- function(DT)DT[, Unit := sub(\"_median\", \"\", unit)]\nmeas <- addUnit(best[[\"measurements\"]])\nref.dt <- addUnit(best[[\"references\"]])\nref.color <- \"violet\"\nemp.color <- \"black\"\nribbon.dt <- nc::capture_melt_multiple(total.wide, Unit=\".*\", \"_\", column=\"min|max\")\ngg <- ggplot2::ggplot()+\n ggplot2::facet_grid(Unit ~ ., scales=\"free\")+\n ggplot2::theme_bw()+\n ggplot2::geom_ribbon(ggplot2::aes(\n N, ymin=min, ymax=max),\n data=ribbon.dt,\n fill=emp.color,\n alpha=0.5)+\n ggplot2::geom_line(ggplot2::aes(\n N, empirical),\n size=2,\n color=emp.color,\n data=meas)+\n ggplot2::geom_line(ggplot2::aes(\n N, reference, group=fun.name),\n color=ref.color,\n size=1,\n data=ref.dt)+\n ggplot2::scale_y_log10(\"\")+\n ggplot2::scale_x_log10()\nif(requireNamespace(\"directlabels\")){\n gg+\n directlabels::geom_dl(ggplot2::aes(\n N, reference, label=fun.name),\n data=ref.dt,\n color=ref.color,\n method=\"bottom.polygons\")\n}else{\n gg\n}\n\n\nvalue.var <- c(\"sum.iterations\")\nunit.col.vec <- paste0(value.var,\"_median\")\ntotal.wide <- dcast(\n total.dt[maxIterations==\"min.aum\"],\n N + init.name ~ .,\n value.var=value.var,\n fun.aggregate = list(median, min, max, length)\n)[, expr.name := init.name]\nL <- list(measurements=total.wide)\nmy_funs <- list(\n \"N^2\"=function(N)2*log10(N))\nbest <- atime::references_best(L, unit.col.vec=unit.col.vec, fun.list=my_funs)\naddUnit <- function(DT)DT[, Unit := sub(\"_median\", \"\", unit)]\nmeas <- addUnit(best[[\"measurements\"]])\nref.dt <- addUnit(best[[\"references\"]])\nref.color <- \"violet\"\nemp.color <- \"black\"\nribbon.dt <- nc::capture_melt_multiple(total.wide, Unit=\".*\", \"_\", column=\"min|max\")\ngg <- ggplot2::ggplot()+\n ggplot2::facet_grid(Unit ~ expr.name, scales=\"free\")+\n ggplot2::theme_bw()+\n ggplot2::geom_ribbon(ggplot2::aes(\n N, ymin=min, ymax=max),\n data=ribbon.dt,\n fill=emp.color,\n alpha=0.5)+\n ggplot2::geom_line(ggplot2::aes(\n N, empirical),\n size=2,\n color=emp.color,\n data=meas)+\n ggplot2::geom_line(ggplot2::aes(\n N, reference, group=fun.name),\n color=ref.color,\n size=1,\n data=ref.dt)+\n ggplot2::scale_y_log10(\"\")+\n ggplot2::scale_x_log10()\ndl <- if(requireNamespace(\"directlabels\")){\n gg+\n directlabels::geom_dl(ggplot2::aes(\n N, reference, label=fun.name),\n data=ref.dt,\n color=ref.color,\n method=\"bottom.polygons\")\n}else{\n gg\n}\npng('figure-line-search-complexity.png', width=8, height=4, units=\"in\", res=200)\nprint(dl)\ndev.off()\n\n\n#analyze.\nreg=batchtools::loadRegistry(registry.dir)\n\nstatus.dt <- batchtools::getJobStatus(reg=reg)\nstatus.dt[!is.na(error)]\nstatus.dt[!is.na(done)]\ndone.ids <- status.dt[is.na(error) & !is.na(done), job.id]\ndata.list <- list()\nfor(done.i in seq_along(done.ids)){\n job.id <- done.ids[[done.i]]\n args.row <- args.dt[job.id]\n res <- batchtools::loadResult(job.id)\n for(data.type in names(res)){\n data.list[[data.type]][[done.i]] <- args.row[, .(aum.type, res[[data.type]])]\n }\n}\nfor(data.type in names(data.list)){\n data.dt <- rbindlist(data.list[[data.type]])\n data.csv <- sprintf(\"figure-line-search-complexity-compare-%s.csv\", data.type)\n data.table::fwrite(data.dt, data.csv)\n}\n\ndata.csv.vec <- Sys.glob(\"figure-line-search-complexity-compare-*.csv\")\ndt.list <- list()\nfor(data.csv in data.csv.vec){\n data.type <- gsub(\".csv|.*-\", \"\", data.csv)\n dt.list[[data.type]] <- data.table::fread(data.csv)\n}\nq25 <- function(x)quantile(x,0.25)\nq75 <- function(x)quantile(x,0.75)\n\ncomp.dt <- data.table::fread(\"figure-line-search-complexity.csv\")\nsubtrain.sizes <- unique(comp.dt[, .(data.name, cv.type, test.fold, n.subtrain.diffs)])\nrfac <- 5\niterations.tall <- dt.list[[\"iterations\"]][, .(\n steps=.N,\n iterations=sum(iterations)\n),\nby=.(aum.type, data.name, cv.type, test.fold, seed, maxIterations.name)\n][\n subtrain.sizes, on=.(data.name, cv.type, test.fold), nomatch=0L\n][, B := as.integer(10^(round(log10(n.subtrain.diffs)*rfac)/rfac))][]\niterations.wide <- dcast(\n iterations.tall[!is.na(iterations)],\n B + maxIterations.name ~ .,\n list(median, min, max, q25, q75),\n value.var=\"iterations\")\nL <- list(measurements=iterations.wide[, data.table(\n iterations=iterations_median,\n N=B,\n expr.name=maxIterations.name)])\nmy_funs <- list(\n N=function(N)log10(N),\n \"N^2\"=function(N)2*log10(N))\nbest <- atime::references_best(L, unit.col.vec=\"iterations\", fun.list=my_funs)\nbest$ref[, maxIterations.name := expr.name]\nlibrary(ggplot2)\nref.color <- \"red\"\ngg <- ggplot()+\n facet_grid(. ~ maxIterations.name, labeller=label_both)+\n geom_ribbon(aes(\n B, ymin=iterations_q25, ymax=iterations_q75),\n alpha=0.5,\n data=iterations.wide)+\n geom_line(aes(\n B, iterations_median),\n data=iterations.wide)+\n geom_line(aes(\n N, reference, group=fun.name),\n color=ref.color,\n data=best$ref)+\n directlabels::geom_dl(aes(\n N, reference, group=fun.name, label=fun.name),\n color=ref.color,\n method=\"bottom.polygons\",\n data=best$ref)+\n scale_x_log10(\n \"B = breakpoints in subtrain set error functions\")+\n scale_y_log10(\n \"Total line search iterations\\nuntil loss stops decreasing\\nmedian and quartiles\")\npng(\"figure-line-search-complexity-compare-iterations-refs.png\", width=9, height=3, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\nsteps.wide <- dcast(\n iterations.tall,\n B + maxIterations.name ~ .,\n list(median, min, max, q25, q75),\n value.var=\"steps\")\nL <- list(measurements=steps.wide[, data.table(\n steps=steps_median,\n N=B,\n expr.name=maxIterations.name)])\nmy_funs <- list(\n N=function(N)log10(N),\n \"\\\\log N\"=function(N)log10(log(N)))\nbest <- atime::references_best(L, unit.col.vec=\"steps\", fun.list=my_funs)\nbest$ref[, maxIterations.name := expr.name]\nref.color <- \"red\"\ngg <- ggplot()+\n facet_grid(. ~ maxIterations.name, labeller=label_both)+\n geom_ribbon(aes(\n B, ymin=steps_q25, ymax=steps_q75),\n alpha=0.5,\n data=steps.wide)+\n geom_line(aes(\n B, steps_median),\n data=steps.wide)+\n geom_line(aes(\n N, reference, group=fun.name),\n color=ref.color,\n data=best$ref)+\n directlabels::geom_dl(aes(\n N, reference, group=fun.name, label=fun.name),\n color=ref.color,\n method=\"bottom.polygons\",\n data=best$ref)+\n scale_x_log10(\n \"B = breakpoints in subtrain set error functions\")+\n scale_y_log10(\n \"Gradient descent steps\\nuntil loss stops decreasing\\nmedian and quartiles\")\npng(\"figure-line-search-complexity-compare-steps-refs.png\", width=9, height=3, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\n\nseconds.tall <- dt.list[[\"time\"]][, seconds := elapsed.seconds][\n subtrain.sizes, on=.(data.name, cv.type, test.fold), nomatch=0L\n][, B := as.integer(10^(round(log10(n.subtrain.diffs)*rfac)/rfac))][]\nseconds.wide <- dcast(\n seconds.tall,\n B + maxIterations.name ~ .,\n list(median, min, max, q25, q75),\n value.var=\"seconds\")\nL <- list(measurements=seconds.wide[, data.table(\n seconds=seconds_median,\n N=B,\n expr.name=maxIterations.name)])\nmy_funs <- list(\n N=function(N)log10(N),\n \"N^2\"=function(N)2*log10(N))\nbest <- atime::references_best(L, unit.col.vec=\"seconds\", fun.list=my_funs)\nbest$ref[, maxIterations.name := expr.name]\nlibrary(ggplot2)\nref.color <- \"red\"\ngg <- ggplot()+\n facet_grid(. ~ maxIterations.name, labeller=label_both)+\n geom_ribbon(aes(\n B, ymin=seconds_q25, ymax=seconds_q75),\n alpha=0.5,\n data=seconds.wide)+\n geom_line(aes(\n B, seconds_median),\n data=seconds.wide)+\n geom_line(aes(\n N, reference, group=fun.name),\n color=ref.color,\n data=best$ref)+\n directlabels::geom_dl(aes(\n N, reference, group=fun.name, label=fun.name),\n color=ref.color,\n method=\"bottom.polygons\",\n data=best$ref)+\n scale_x_log10(\n \"B = breakpoints in subtrain set error functions\")+\n scale_y_log10(\n \"Total line search seconds\\nuntil loss stops decreasing\\nmedian and quartiles\")\npng(\"figure-line-search-complexity-compare-seconds-refs.png\", width=9, height=3, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\ngg <- ggplot()+\n geom_line(aes(\n B, seconds_median, color=maxIterations.name),\n data=seconds.wide)+\n geom_ribbon(aes(\n B, ymin=seconds_q25, ymax=seconds_q75, fill=maxIterations.name),\n alpha=0.5,\n data=seconds.wide)+\n scale_x_log10(\n \"B = breakpoints in subtrain set error functions\",\n limits=c(NA,8000))+\n scale_y_log10(\n \"Total line search seconds\\nuntil loss stops decreasing\\nmedian and quartiles\")\ndl <- directlabels::direct.label(gg, \"right.polygons\")\npng(\"figure-line-search-complexity-compare-seconds.png\", width=4.5, height=3, units=\"in\", res=200)\nprint(dl)\ndev.off()\n\nonly.valid <- dt.list$sets[set==\"validation\"]\nstep0.valid <- only.valid[step.number==0]\nmax.valid.auc <- only.valid[\n , .SD[which.max(auc), .(auc)],\n by=.(aum.type, data.name, cv.type, test.fold, seed, init.name, maxIterations.name)]\nmax.valid.wide <- dcast(\n max.valid.auc,\n aum.type+data.name+cv.type+test.fold+seed+init.name ~ maxIterations.name,\n value.var=\"auc\")\nmax.valid.wide[, hist(min.aum-exactL)]\nmax.valid.wide[, table(min.aum>exactL)]\nmax.valid.wide[, summary(min.aum-exactL)]\nmax.valid.wide[, summary(min.aum-exactQ)]\nmax.valid.wide[, summary(exactQ-exactL)]\nwide.p <- max.valid.wide[, {\n tryCatch({\n L <- t.test(min.aum, grid, alternative=\"two.sided\", paired=TRUE)\n with(L, data.table(p.value, estimate))\n }, error=function(e){\n NULL\n })\n}, by=.(aum.type, data.name, cv.type, test.fold, init.name)][order(p.value)]\nwide.p[order(estimate)]\nwide.p[init.name==\"IntervalRegressionCV\"][order(estimate)]\n\n\nggplot()+\n geom_boxplot(aes(\n auc, maxIterations.name),\n data=max.valid.auc)\n\nmax.valid.folds <- dcast(\n max.valid.auc,\n aum.type+data.name+cv.type+test.fold+init.name ~ maxIterations.name,\n list(mean, length, min, max),\n value.var=\"auc\")\nmax.valid.folds[auc_min_exactL < auc_max_exactL]\nmax.valid.folds[auc_min_exactQ < auc_max_exactQ]\nmax.valid.folds[auc_min_min.aum < auc_max_min.aum]\npdt <- max.valid.folds[init.name==\"IntervalRegressionCV\", {\n L <- t.test(auc_mean_grid, auc_mean_min.aum, alternative=\"two.sided\", paired=TRUE)\n with(L, data.table(p.value, estimate))\n}, by=.(aum.type, data.name, cv.type)][order(p.value)]\npdt[order(estimate)]\nselect.dt <- rbind(\n data.table(aum.type=\"rate\", data.name=\"CTCF_TDH_ENCODE\", cv.type=\"equal_labels\"),\n data.table(aum.type=\"rate\", data.name=\"systematic\", cv.type=\"R-3.6.0-sequenceID\"))\nselect.auc <- max.valid.auc[select.dt, on=.(aum.type, data.name, cv.type)]\n\nggplot()+\n geom_point(aes(\n auc, maxIterations.name),\n data=select.auc)+\n facet_grid(test.fold ~ aum.type+data.name+cv.type, labeller=label_both, scales=\"free\")\n\nselect.dt <- rbind(\n ##data.table(aum.type=\"rate\", data.name=\"CTCF_TDH_ENCODE\", cv.type=\"equal_labels\"),\n ##data.table(aum.type=\"count\", data.name=\"H3K27ac-H3K4me3_TDHAM_BP\", cv.type=\"equal_labels\"),\n ##data.table(aum.type=\"count\", data.name=\"systematic\", cv.type=\"profileID\"),\n ##data.table(aum.type=\"count\", data.name=\"systematic\", cv.type=\"R-3.6.0-profileID\"),\n ##data.table(aum.type=\"rate\", data.name=\"systematic\", cv.type=\"R-3.6.0-chrom\"),\n ##data.table(aum.type=\"rate\", data.name=\"H3K36me3_TDH_ENCODE\", cv.type=\"equal_labels\"),\n ##data.table(aum.type=\"rate\", data.name=\"systematic\", cv.type=\"chrom\"),\n ##data.table(aum.type=\"rate\", data.name=\"systematic\", cv.type=\"sequenceID\"),\n NULL)\nselect.auc <- max.valid.auc[select.dt, on=.(aum.type, data.name, cv.type)]\nggplot()+\n geom_point(aes(\n auc, maxIterations.name),\n shape=1,\n data=select.auc)+\n facet_grid(init.name ~ test.fold, labeller=label_both, scales=\"free\")\n\nsome.folds <- rbind(\n data.table(data.name=\"detailed\", cv.type=\"chrom\", test.fold=1, aum.type=\"count\", init.name=\"IntervalRegressionCV\"),\n data.table(data.name=\"H3K4me3_PGP_immune\", cv.type=\"equal_labels\", test.fold=4, aum.type=\"count\", init.name=\"IntervalRegressionCV\"),\n data.table(data.name=\"H3K4me3_TDH_immune\", cv.type=\"equal_labels\", test.fold=4, aum.type=\"rate\", init.name=\"IntervalRegressionCV\"),\n data.table(data.name=\"systematic\", cv.type=\"profileSize\", test.fold=4, aum.type=\"rate\", init.name=\"IntervalRegressionCV\"))\n##some.folds <- rbind(wide.p[init.name==\"near.zero\" & 0<estimate][order(p.value)][1:5])\nselect.auc <- max.valid.auc[some.folds, on=.(aum.type, data.name, cv.type, test.fold, init.name)]\nselect.wide <- dcast(\n select.auc,\n data.name +cv.type+test.fold+aum.type+init.name+maxIterations.name ~ .,\n list(mean, sd),\n value.var=\"auc\")\nggplot()+\n geom_point(aes(\n auc_mean, maxIterations.name),\n shape=1,\n data=select.wide)+\n geom_segment(aes(\n auc_mean-auc_sd, maxIterations.name,\n xend=auc_mean+auc_sd, yend=maxIterations.name),\n data=select.wide)+\n facet_grid(\n . ~ data.name + cv.type + test.fold + aum.type,\n labeller=label_both, scales=\"free\")+\n scale_x_continuous(\n \"Max validation AUC\")\n\nsome.folds <- rbind(\n ##data.table(data.name=\"detailed\", cv.type=\"chrom\", test.fold=1, aum.type=\"count\", init.name=\"IntervalRegressionCV\"),\n data.table(data.name=\"H3K4me3_PGP_immune\", cv.type=\"equal_labels\", test.fold=4, aum.type=\"count\", init.name=\"IntervalRegressionCV\"),\n ##data.table(data.name=\"H3K4me3_TDH_immune\", cv.type=\"equal_labels\", test.fold=4, aum.type=\"rate\", init.name=\"IntervalRegressionCV\"),\n ##data.table(data.name=\"systematic\", cv.type=\"profileSize\", test.fold=4, aum.type=\"rate\", init.name=\"IntervalRegressionCV\"))\n NULL)\n max.valid.auc[some.folds, on=.(aum.type, data.name, cv.type, init.name)]\nselect.auc <- max.valid.auc[data.name==\"H3K4me3_PGP_immune\" & cv.type==\"equal_labels\" & aum.type==\"count\" & init.name==\"IntervalRegressionCV\"]\nselect.auc <- max.valid.auc[data.name==\"systematic\" & cv.type==\"profileSize\" & aum.type==\"rate\" & init.name==\"IntervalRegressionCV\"]\ndisp.names <- c(\n initial=\"initial\",\n min.aum=\"first min(proposed)\",\n grid=\"grid\",\n exactQ=\"quadratic(proposed)\",\n exactL=\"linear(proposed)\")\none_set <- function(DT){\n DT[\n data.name==\"H3K4me3_TDH_immune\" & cv.type==\"equal_labels\" &\n aum.type==\"rate\" & init.name==\"IntervalRegressionCV\"\n ][, display.name := factor(disp.names[maxIterations.name], rev(disp.names))]\n}\nselect.auc <- rbind(\n one_set(max.valid.auc),\n one_set(data.table(\n step0.valid\n )[maxIterations.name==\"grid\"][, maxIterations.name := \"initial\"])[, .(\n aum.type, data.name, cv.type, test.fold, seed, init.name,\n maxIterations.name, auc, display.name)])\nselect.wide <- dcast(\n select.auc,\n data.name +cv.type+test.fold+aum.type+init.name+display.name ~ .,\n list(mean, sd),\n value.var=\"auc\")\nonly.initial <- select.wide[\n display.name==\"initial\"\n][, .(test.fold, initial.validation.AUC=sprintf(\"%.4f\", auc_mean))]\nnot.initial <- select.wide[\n display.name!=\"initial\"\n][only.initial, on=\"test.fold\"]\ngg <- ggplot()+\n theme(\n axis.text.x=element_text(angle=30, hjust=1))+\n geom_point(aes(\n auc_mean, display.name),\n shape=1,\n data=not.initial)+\n geom_segment(aes(\n auc_mean-auc_sd, display.name,\n xend=auc_mean+auc_sd, yend=display.name),\n data=not.initial)+\n facet_grid(\n . ~ test.fold + initial.validation.AUC,\n labeller=label_both, scales=\"free\")+\n scale_x_continuous(\n \"Max validation AUC, mean +/- SD over 4 random initializations\",\n labels=function(x)sprintf(\"%.4f\", x))+\n scale_y_discrete(\n \"Line search type\")\npng(\"figure-line-search-complexity-compare-H3K4me3_TDH_immune-equal_labels-rate-IntervalRegressionCV.png\", width=8, height=2, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\ngg <- ggplot()+\n theme(\n axis.text.x=element_text(angle=30, hjust=1))+\n geom_point(aes(\n auc_mean, display.name),\n shape=1,\n data=select.wide)+\n geom_segment(aes(\n auc_mean-auc_sd, display.name,\n xend=auc_mean+auc_sd, yend=display.name),\n data=select.wide)+\n facet_grid(\n . ~ test.fold,\n labeller=label_both, scales=\"free\")+\n scale_x_continuous(\n \"Max validation AUC, mean +/- SD over 4 random initializations\",\n labels=function(x)sprintf(\"%.4f\", x))+\n scale_y_discrete(\n \"Line search type\")\npng(\"figure-line-search-complexity-compare-H3K4me3_TDH_immune-equal_labels-rate-IntervalRegressionCV-initial.png\", width=8, height=1.5, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\nset.times <- one_set(dt.list[[\"time\"]])\nwide.times <- dcast(\n set.times,\n data.name +cv.type+test.fold+aum.type+init.name+display.name ~ .,\n list(mean, sd),\n value.var=\"elapsed.seconds\")\ngg <- ggplot()+\n theme(\n axis.text.x=element_text(angle=30, hjust=1))+\n geom_point(aes(\n elapsed.seconds_mean, display.name),\n shape=1,\n data=wide.times)+\n geom_segment(aes(\n elapsed.seconds_mean-elapsed.seconds_sd, display.name,\n xend=elapsed.seconds_mean+elapsed.seconds_sd, yend=display.name),\n data=wide.times)+\n facet_grid(\n . ~ test.fold,\n labeller=label_both, scales=\"free\")+\n scale_x_continuous(\n \"Computation time (seconds), mean +/- SD over 4 random initializations\")+\n scale_y_discrete(\n \"Line search type\")\npng(\"figure-line-search-complexity-compare-H3K4me3_TDH_immune-equal_labels-rate-IntervalRegressionCV-seconds.png\", width=8, height=1.5, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\n"
},
{
"alpha_fraction": 0.6051361560821533,
"alphanum_fraction": 0.6137285232543945,
"avg_line_length": 32.40967559814453,
"blob_id": "56174ab5b8a0330a15319ca4e6e41fd372c45074",
"content_id": "9f3167adb189ee0dad238435878145bd63ba3873",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 10358,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 310,
"path": "/figure-linear-model-test.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(data.table)\nlibrary(ggplot2)\n\nauc.improved <- readRDS(\"../feature-learning-benchmark/auc.improved.rds\")\nroc.dt.list <- list()\nfor(test.fold.i in 1:nrow(auc.improved)){\n one.fold <- auc.improved[test.fold.i]\n roc.dt.list[[test.fold.i]] <- one.fold[, data.table(\n data.name=set.name, test.fold=fold, pred.name,\n rows=.N,\n roc[[1]])]\n}\n(roc.dt <- do.call(rbind, roc.dt.list))\nroc.dt[, fn0 := fn-min(fn), by=.(data.name, test.fold, pred.name)]\nroc.dt[, min.fp.fn := ifelse(fp<fn0, fp, fn0)]\nroc.dt[, width := max.thresh-min.thresh]\nroc.dt[, area := ifelse(min.fp.fn==0, 0, min.fp.fn*width)]\n(aum.dt <- roc.dt[, .(\n aum=sum(area)\n), keyby=.(data.name, test.fold, pred.name)])\nbest.aum <- aum.dt[, .SD[which.min(aum), .(best.aum=aum)], by=.(data.name, test.fold)]\n\n\ntestFold.vec <- Sys.glob(\"../neuroblastoma-data/data/*/cv/*/testFolds/*\")\n\nOneFold <- function(testFold.path){\n cv.path <- dirname(dirname(testFold.path))\n folds.csv <- file.path(cv.path, \"folds.csv\")\n cv.type <- basename(cv.path)\n test.fold <- basename(testFold.path)\n data.dir <- dirname(dirname(cv.path))\n data.name <- basename(data.dir)\n data.list <- list()\n for(f in c(\"inputs\", \"outputs\", \"evaluation\")){\n f.csv.xz <- file.path(data.dir, paste0(f, \".csv.xz\"))\n if(file.exists(f.csv.xz)){\n system(paste(\"unxz\", f.csv.xz))\n }\n f.csv <- file.path(data.dir, paste0(f, \".csv\"))\n f.dt <- data.table::fread(f.csv)\n data.list[[f]] <- f.dt\n }\n ## replace positive fp/fn at end with 0 to avoid AUM=Inf.\n data.list[[\"evaluation\"]][min.log.lambda==-Inf & 0<fn, fn := 0]\n data.list[[\"evaluation\"]][max.log.lambda==Inf & 0<fp, fp := 0]\n ## read folds. \n folds.dt <- data.table::fread(folds.csv)\n folds.dt[fold == test.fold, set := \"test\"]\n folds.dt[fold != test.fold, set := rep(\n c(\"subtrain\", \"validation\"), l=.N)]\n seqs.train <- folds.dt[[\"sequenceID\"]]\n X.all <- scale(data.list$inputs[, -1])\n rownames(X.all) <- data.list$inputs$sequenceID\n X.finite <- X.all[, apply(is.finite(X.all), 2, all)]\n set.list <- list()\n for(s in unique(folds.dt$set)){\n set.list[[s]] <- rownames(X.finite) %in% folds.dt[s==set, sequenceID]\n }\n X.list <- lapply(set.list, function(i)X.finite[i, ])\n neg.t.X.subtrain <- -t(X.list[[\"subtrain\"]])\n y.train <- data.list[[\"outputs\"]][\n seqs.train,\n cbind(min.log.lambda, max.log.lambda),\n on=\"sequenceID\"]\n keep <- apply(is.finite(y.train), 1, any)\n X.train <- X.finite[seqs.train, ]\n init.fun.list <- list(\n IntervalRegressionCV=function(){\n fit <- penaltyLearning::IntervalRegressionCV(\n X.train[keep, ],\n y.train[keep, ]) \n fit[[\"param.mat\"]]\n },\n zero=function(){\n N.param <- ncol(X.finite)+1\n rep(0, N.param)+rnorm(N.param)\n }\n )\n iteration.dt.list <- list()\n for(seed in 1:4)for(init.name in names(init.fun.list)){\n init.fun <- init.fun.list[[init.name]]\n set.seed(seed)\n int.weights <- init.fun()\n weight.vec <- int.weights[-1]\n intercept <- int.weights[1]\n computeAUM <- function(w, i, is.set){\n pred.pen.vec <- (X.finite %*% w) + i\n pred.dt <- data.table(\n sequenceID=rownames(pred.pen.vec),\n pred.log.lambda=as.numeric(pred.pen.vec))\n set.dt <- pred.dt[is.set]\n penaltyLearning::ROChange(\n data.list$evaluation, set.dt, \"sequenceID\")\n }\n for(iteration in 1:50){\n summary.dt.list <- list()\n set.roc.list <- list()\n for(set in names(set.list)){\n set.roc.list[[set]] <- computeAUM(weight.vec, intercept, set.list[[set]])\n summary.dt.list[[set]] <- with(set.roc.list[[set]], data.table(\n set,\n thresholds[threshold==\"predicted\"],\n auc,\n aum))\n }\n summary.dt <- do.call(rbind, summary.dt.list)\n iteration.dt.list[[paste(seed, init.name, iteration)]] <- data.table(\n seed, init.name, iteration, summary.dt)\n cat(sprintf(\n \"it=%d seed=%d init=%s\\n\",\n iteration, seed, init.name))\n g.dt <- set.roc.list[[\"subtrain\"]][[\"aum.grad\"]]\n ## If aum.grad has some problems with no changes in error then\n ## they may be missing.\n g.vec <- rep(0, ncol(neg.t.X.subtrain))\n names(g.vec) <- colnames(neg.t.X.subtrain)\n g.vec[\n g.dt[[\"sequenceID\"]]\n ] <- g.dt[[\"lo\"]]\n direction.vec <- neg.t.X.subtrain %*% g.vec\n take.step <- function(s){\n weight.vec + s*direction.vec\n }\n set.aum.list <- list()\n for(step.size in 10^seq(-10, 0, by=0.5)){\n new.weight.vec <- take.step(step.size)\n for(set in \"subtrain\"){\n set.roc <- computeAUM(new.weight.vec, 0, set.list[[set]])\n set.aum.list[[paste(step.size, set)]] <- data.table(\n step.size, set, aum=set.roc$aum,\n intercept=set.roc$thresholds[\n threshold==\"min.error\", (max.thresh+min.thresh)/2])\n }\n }\n set.aum <- do.call(rbind, set.aum.list)\n best.dt <- set.aum[, .SD[min(aum)==aum], by=set]\n ggplot()+\n geom_line(aes(\n step.size, aum),\n data=set.aum)+\n geom_point(aes(\n step.size, aum),\n data=best.dt)+\n geom_text(aes(\n step.size, aum, label=aum),\n vjust=0,\n data=best.dt)+\n scale_x_log10()+\n scale_y_log10()+\n facet_grid(set ~ ., scales=\"free\")\n weight.vec <- take.step(best.dt[[\"step.size\"]])\n intercept <- best.dt[[\"intercept\"]]\n }#iteration\n }#seed/init.name\n data.table(\n do.call(rbind, iteration.dt.list),\n data.name, cv.type, test.fold)\n}\n\nall.it.list <- list()\nfor(testFold.i in seq_along(testFold.vec)){\n fdir <- testFold.vec[testFold.i]\n out.csv <- file.path(fdir, \"linear-model-aum.csv\")\n all.it.list[[testFold.i]] <- if(file.exists(out.csv)){\n data.table::fread(out.csv)\n }else{\n cat(sprintf(\"%4d / %4d %s\\n\", testFold.i, length(testFold.vec), fdir))\n iteration.dt <- OneFold(fdir)\n data.table::fwrite(iteration.dt, out.csv)\n iteration.dt\n }\n}\nall.it <- do.call(rbind, all.it.list)\n\nsubtrain.it <- all.it[set==\"subtrain\"]\nsubtrain.it[, diff := c(NA, diff(aum)), by=.(init.name, data.name, test.fold, seed)]\nsubtrain.it[, .(init.name, data.name, test.fold, iteration, aum, diff)]\nsubtrain.it[diff>1e-6]\ngg <- ggplot()+\n ggtitle(\"check if train AUM decreases\")+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n geom_line(aes(\n iteration, aum,\n group=paste(seed, init.name)),\n data=subtrain.it)+\n facet_grid(init.name + data.name + test.fold ~ ., scales=\"free\", labeller=label_both)\npng(\n \"figure-linear-model-test-aum-train-decreases.png\",\n width=4, height=35, res=100, units=\"in\")\nprint(gg)\ndev.off()\n\nvalidation.it <- all.it[set==\"validation\"]\nggplot()+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n scale_y_log10()+\n geom_line(aes(\n iteration, aum, color=init.name,\n group=paste(seed, init.name)),\n data=validation.it)+\n geom_point(aes(\n iteration, aum, color=init.name,\n group=paste(seed, init.name)),\n data=validation.it[\n ,\n .SD[which.min(aum)],\n by=.(data.name, test.fold, init.name, seed)])+\n facet_grid(data.name + test.fold ~ ., scales=\"free\", labeller=label_both)\n\nvalid.best.ids <- all.it[\n set==\"validation\",\n .SD[which.min(aum), .(iteration)],\n by=.(data.name, test.fold, init.name, seed)]\ntest.best.ids <- all.it[\n set==\"test\",\n .SD[which.min(aum), .(iteration)],\n by=.(data.name, test.fold, init.name, seed)]\n\n## model selection.\ntest.it1 <- all.it[set==\"test\" & iteration==1]\ntest.selected <- all.it[set==\"test\"][valid.best.ids, on=names(valid.best.ids)]\ntest.best <- all.it[set==\"test\"][test.best.ids, on=names(test.best.ids)]\n\n## compare with best predictions (no linear model).\nbest.compare <- best.aum[\n test.best,\n .(data.name, test.fold, init.name, seed, aum, best.aum),\n on=.(data.name, test.fold)]\nbest.compare[, aum.diff := aum-best.aum]\nggplot()+\n geom_point(aes(\n aum.diff, init.name),\n shape=1,\n data=best.compare)+\n facet_grid(. ~ data.name + test.fold, scales=\"free\", labeller=label_both)+\n theme_bw()+\n scale_x_log10()+\n theme(panel.spacing=grid::unit(0, \"lines\"))\nbest.compare[, .(\n min.diff=min(aum.diff),\n max.diff=max(aum.diff)\n), by=.(data.name, test.fold, init.name)]\n\nbest.pred <- best.aum[\n unique(test.best[, .(data.name, test.fold)]),\n on=.(data.name, test.fold)]\ntest.show <- rbind(\n data.table(iterations=\"initial\", test.it1),\n data.table(iterations=\"best.linear\", test.best),\n data.table(iterations=\"selected\", test.selected))\nifac <- function(x)factor(\n x, c(\"initial\", \"selected\", \"best.linear\", \"best.pred\"))\ntest.show[, Iterations := ifac(iterations)]\nbest.pred[, Iterations := ifac(\"best.pred\")]\ngg <- ggplot()+\n ggtitle(\"Test AUM, selected=min valid aum, best=min test aum, max it=50\")+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n geom_point(aes(\n aum, Iterations, color=factor(test.fold)),\n shape=1,\n data=test.show)+\n scale_y_discrete(drop=FALSE)+\n geom_point(aes(\n best.aum, Iterations, color=factor(test.fold)),\n shape=1,\n data=best.pred)+\n facet_grid(init.name ~ data.name, scales=\"free\", labeller=label_both)\npng(\n \"figure-linear-model-test-compare-init.png\",\n width=8, height=6, res=100, units=\"in\")\nprint(gg)\ndev.off()\nb\ntest.show[, neg.auc := -auc]\ntest.show.tall <- melt(\n test.show[init.name==\"IntervalRegressionCV\"],\n measure.vars=c(\"neg.auc\", \"error.percent\", \"aum\"))\ntest.iCV <- dcast(\n test.show.tall,\n data.name + test.fold + variable + seed ~ iterations)\ngg <- ggplot()+\n ggtitle(\"Test set metrics, init=IntervalRegressionCV, selected worse than initial\")+\n geom_point(aes(\n initial, selected, color=factor(seed)),\n shape=1,\n data=test.iCV)+\n geom_abline()+\n facet_wrap(~ data.name + variable, scales=\"free\", labeller=label_both, ncol=3)\npng(\n \"figure-linear-model-test-initial-selected.png\",\n width=10, height=6, res=100, units=\"in\")\nprint(gg)\ndev.off()\n\ngg <- ggplot()+\n ggtitle(\"Test set metrics, init=IntervalRegressionCV, best about the same as initial\")+\n geom_point(aes(\n initial, best.linear, color=factor(seed)),\n shape=1,\n data=test.iCV)+\n geom_abline()+\n facet_wrap(~ data.name + variable, scales=\"free\", labeller=label_both, ncol=3)\npng(\n \"figure-linear-model-test-initial-best.png\",\n width=10, height=6, res=100, units=\"in\")\nprint(gg)\ndev.off()\n\n"
},
{
"alpha_fraction": 0.6328257322311401,
"alphanum_fraction": 0.6441060304641724,
"avg_line_length": 30.93693733215332,
"blob_id": "a0b4363cc5c557fc2127ff3c565d501961de0311",
"content_id": "1566ebdad58b38f2954ec206a50d145ab99fcda9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 3546,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 111,
"path": "/neuroblastomaProcessed.combinations.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\ndata(neuroblastomaProcessed, package=\"penaltyLearning\")\n\ncounts <- neuroblastomaProcessed$errors[, {\n diff.tab <- table(factor(diff(errors), c(\"-1\", \"0\", \"1\")))\n L <- as.list(diff.tab)\n size <- max.log.lambda-min.log.lambda\n for(fun.name in c(\"min\", \"max\")){\n fun <- get(fun.name)\n L[[paste0(fun.name, \".size\")]] <- min(size[errors==fun(errors)])\n }\n L$mean.size <- with(L, (min.size+max.size)/2)\n L\n}, by=list(profile.id, chromosome)]\ntwo.changes <- counts[1 < `-1` | 1 < `1`]\ntwo.changes <- counts[order(-`-1`, -`1`, -mean.size)][profile.id != 481][1:8]\ntwo.changes[, panel := paste0(\n ifelse(`-1`==2, \"p\", \"n\"), #positive or negative label\n profile.id, \".\", chromosome)]\nsome.err <- neuroblastomaProcessed$errors[two.changes, on=list(\n profile.id, chromosome)]\nerr.sizes <- c(\n fp=3,\n fn=2,\n errors=1)\nerr.colors <- c(\n fp=\"red\",\n fn=\"deepskyblue\",\n errors=\"black\")\nsome.err.tall <- melt(\n some.err,\n measure.vars=names(err.colors))\nggplot()+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n facet_grid(profile.id + chromosome ~ .)+\n geom_segment(aes(\n min.log.lambda, value,\n xend=max.log.lambda, yend=value,\n color=variable, size=variable),\n data=some.err.tall)+\n scale_y_continuous(\n \"errors\",\n breaks=c(0,1),\n limits=c(-0.2, 1.2))+\n scale_color_manual(values=err.colors)+\n scale_size_manual(values=err.sizes)\n\nsome.err.tall[, value.i := cumsum(\n c(FALSE, diff(value) != 0)\n), by=list(panel, profile.id, chromosome, variable)]\nsegs.err.tall <- some.err.tall[, list(\n min.log.lambda=min(min.log.lambda),\n max.log.lambda=max(max.log.lambda),\n value=value[1]\n), by=list(panel, profile.id, chromosome, variable, value.i)]\nsegs.min.tall <- segs.err.tall[, {\n .SD[value==min(value)]\n}, by=list(panel, profile.id, chromosome, variable)]\nsegs.min.err <- segs.min.tall[variable==\"errors\"]\nsegs.min.err[, mid.log.lambda := (min.log.lambda+max.log.lambda)/2]\n(expand.args <- sapply(two.changes$panel, function(L){\n c(\"finite\", \"infinite\")\n}, USE.NAMES=TRUE, simplify=FALSE))\ncombos.wide <- data.table(do.call(expand.grid, expand.args))\ncombos.wide[, combo.i := 1:.N]\ncombos.tall <- melt(\n combos.wide,\n id.vars=\"combo.i\",\n variable.name=\"panel\",\n value.name=\"interval\")\n\nauc.dt.list <- list()\nsize.vec <- 10^seq(-4, 2)\n##size.vec <- 1e-4\npred.dt.list <- list()\nfor(size in size.vec){\n print(size)\n segs.min.err[, pred.log.lambda := ifelse(\n min.log.lambda == -Inf, max.log.lambda-size, ifelse(\n max.log.lambda == Inf, min.log.lambda+size, mid.log.lambda))]\n segs.min.err[, interval := ifelse(\n is.finite(mid.log.lambda), \"finite\", \"infinite\")]\n pred.dt <- segs.min.err[combos.tall, on=list(panel, interval)]\n pred.dt.list[[paste(size)]] <- pred.dt\n auc.dt.list[[paste(size)]] <- pred.dt[, {\n L <- penaltyLearning::ROChange(\n some.err, .SD, c(\"panel\"))\n L$roc[, min.fp.fn := ifelse(fp<fn, fp, fn)]\n L$roc[, width.thresh := max.thresh-min.thresh]\n aub <- L$roc[!(width.thresh==Inf & min.fp.fn==0), {\n sum(min.fp.fn*width.thresh)\n }]\n with(L, data.table(\n auc, aub, size, roc=list(list(roc)),\n n.finite=sum(interval==\"finite\"),\n thresholds[threshold==\"predicted\"]))\n }, by=list(combo.i)]\n}\n\nneuroblastomaProcessed.combinations <- list(\n problems=two.changes,\n segs.min.err=segs.min.err,\n some.err=some.err,\n combos=combos.tall,\n pred=do.call(rbind, pred.dt.list),\n auc=do.call(rbind, auc.dt.list))\nsaveRDS(\n neuroblastomaProcessed.combinations,\n \"neuroblastomaProcessed.combinations.rds\")\n\n"
},
{
"alpha_fraction": 0.5997107028961182,
"alphanum_fraction": 0.6063235998153687,
"avg_line_length": 35.1119384765625,
"blob_id": "98518386300f61771172ea32a7238fb90ffd542c",
"content_id": "86875b8ad0ac8654fb49761bb40d0d9f4dcb173c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 4839,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 134,
"path": "/auc.improved.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\nfolds.dt <- fread(\"../feature-learning-benchmark/labeled_problems_folds.csv\")\naddMeta <- function(dt){\n dt[, set.name := sub(\"/.*\", \"\", prob.dir)]\n dt[, problem := sub(\".*/\", \"\", prob.dir)]\n dt[folds.dt, on=list(set.name, problem)]\n}\nerrors.dt <- addMeta(fread(\"../feature-learning-benchmark/labeled_problems_errors.csv\"))\npossible.dt <- addMeta(fread(\"../feature-learning-benchmark/labeled_problems_possible_errors.csv\"))\n\nauc.improved.list <- list()\n\n## compute derivative of Area under min(FP, FN).\nfold.possible <- unique(folds.dt[, .(set.name, fold)])\ni.possible <- 1:nrow(fold.possible)\nN.possible <- paste(i.possible, \"improved\")\ni.todo <- i.possible[!N.possible %in% names(auc.improved.list)]\nbiggest.step <- 0.1\nfor(i in seq_along(i.todo)){\n test.fold.i <- i.todo[[i]]\n cat(sprintf(\"%4d / %4d test folds TODO=%d\\n\", i, length(i.todo), test.fold.i))\n test.fold.info <- fold.possible[test.fold.i]\n test.fold.errors <- errors.dt[test.fold.info, on=.(set.name, fold)]\n test.fold.errors[, min.log.lambda := min.log.penalty]\n test.fold.errors[, max.log.lambda := max.log.penalty]\n test.fold.errors[, seg.i := cumsum(\n c(1, diff(fp)!=0 | diff(fn) != 0)), by=.(prob.dir)]\n possible.errors <- possible.dt[test.fold.errors, on=list(\n set.name, fold, prob.dir)][, possible.fn := possible.tp]\n possible.segs <- possible.errors[, .(\n min.log.lambda=min(min.log.lambda),\n max.log.lambda=max(max.log.lambda)\n ), by=.(\n prob.dir, seg.i, fp, fn, errors, possible.fp, possible.fn, labels\n )][, `:=`(\n min.lambda = exp(min.log.lambda),\n example=prob.dir\n )]\n ## Check for non-zero at end of err fun.\n possible.segs[min.log.lambda == -Inf & fn > 0]\n possible.segs[min.log.lambda == Inf & fp > 0]\n test.fold.targets <- penaltyLearning::targetIntervals(\n possible.segs, \"prob.dir\")\n prob.ord <- test.fold.targets$prob.dir\n aum.diffs <- aum::aum_diffs_penalty(possible.segs, prob.ord)\n min.err.pred.dt <- test.fold.targets[, data.table(\n prob.dir,\n pred.log.lambda=fcase(\n min.log.lambda>-Inf & max.log.lambda==Inf, min.log.lambda+1, \n min.log.lambda==-Inf & max.log.lambda<Inf, max.log.lambda-1,\n min.log.lambda>-Inf & max.log.lambda<Inf, (min.log.lambda+max.log.lambda)/2,\n min.log.lambda==-Inf & max.log.lambda==Inf, 0)\n )]\n getROC <- function(p){\n L <- penaltyLearning::ROChange(possible.segs, p, \"prob.dir\")\n non.smooth <- L$aum.grad[lo != hi]\n if(nrow(non.smooth))print(non.smooth)\n L\n }\n getAUM <- function(pred.vec){\n L <- aum::aum(aum.diffs, pred.vec)\n L$grad <- with(L, ifelse(\n derivative_mat[,1] == derivative_mat[,2],\n (derivative_mat[,1]+derivative_mat[,2])/2,\n 0))\n L\n }\n possible.segs[prob.dir==prob.ord[1], .(fp,fn,min.log.lambda)]\n aum.diffs[example==0]\n init.list <- list(\n min.error=-min.err.pred.dt$pred.log.lambda,\n zero=rep(0, nrow(min.err.pred.dt)))\n for(initialization in names(init.list)){\n current.pred <- initial.pred <- init.list[[initialization]]\n step.number <- 1\n step.size <- 1\n ##roc.list <- getROC(pred.dt)\n aum.list <- getAUM(current.pred)\n ##data.table(pred.dt, current.pred)[, pred.log.lambda-current.pred]\n improvement <- Inf\n while(1e-3 < improvement){\n ## these depend on predictions:\n while({\n ## step.dt <- pred.dt[roc.list$aum.grad, .(\n ## prob.dir,\n ## pred.log.lambda = pred.log.lambda-step.size*ifelse(\n ## sign(lo)==sign(hi), (lo+hi)/2, 0)\n ## ), on=.(prob.dir)]\n ## step.list <- getROC(step.dt)\n step.pred <- current.pred - step.size*aum.list$grad\n step.list <- getAUM(step.pred)\n aum.list$aum < step.list$aum\n }){\n step.size <- step.size/2\n }\n improvement <- aum.list$aum-step.list$aum\n cat(sprintf(\n \"step=%d size=%e aum=%f->%f diff=%f\\n\",\n step.number,\n step.size,\n aum.list$aum,\n step.list$aum,\n improvement))\n ## pred.dt <- step.dt\n current.pred <- step.pred\n aum.list <- step.list\n step.number <- step.number + 1\n step.size <- step.size*2\n }\n pred.list <- list(\n initial=initial.pred,\n improved=current.pred)\n for(pred.name in names(pred.list)){\n pred <- data.table(\n prob.dir=prob.ord, \n pred.log.lambda=-pred.list[[pred.name]])\n L <- penaltyLearning::ROChange(possible.segs, pred, \"prob.dir\")\n print(L$aum)\n auc.improved.list[[paste(test.fold.i, improvement, pred.name)]] <- \n with(L, data.table(\n test.fold.i,\n test.fold.info,\n initialization,\n pred.name,\n roc=list(roc),\n thresholds[threshold==\"min.error\"],\n auc, aum))\n }\n }\n}\n(auc.improved <- do.call(rbind, auc.improved.list))\n\nsaveRDS(auc.improved, \"auc.improved.rds\")\n"
},
{
"alpha_fraction": 0.6112269163131714,
"alphanum_fraction": 0.6374329924583435,
"avg_line_length": 30.679244995117188,
"blob_id": "d5cee5f6d46bc5efce2af3b02793f5f75d4f2bb3",
"content_id": "a7bc9b02e99c2cbf7b8816c1cf778727b12818ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 6716,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 212,
"path": "/figure-fn-not-monotonic.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\ndata(neuroblastoma, package=\"neuroblastoma\")\nnb.dt <- data.table(neuroblastoma$profiles)\np4.2 <- nb.dt[profile.id==4 & chromosome==2]\np4.2[, i := 1:.N]\nlabel.dt <- data.table(\n problem=1, min=20, max=80, annotation=\"1breakpoint\", label=\"one change\")\nmax.segments <- 20\nfit <- jointseg::Fpsn(p4.2$logratio, max.segments)\nsegs.dt.list <- list()\nmodels.dt.list <- list()\nfor(segments in 1:max.segments){\n end <- fit$t.est[segments, 1:segments]\n start <- c(0L, end[-length(end)])+1L\n start.end <- data.table(start, end)\n segs.dt.list[[paste(segments)]] <- data.table(\n segments,\n p4.2[start.end, .(start, end, mean=mean(logratio)), on=.(i >= start, i <= end), by=.EACHI])\n models.dt.list[[paste(segments)]] <- data.table(\n segments,\n loss=fit$J.est[segments])\n}\n##models.dt <- do.call(rbind, models.dt.list)\nmodels.dt <- data.table(problem=1, segments=1:max.segments, loss=fit$J.est)\nsegs.dt <- do.call(rbind, segs.dt.list)[, .(problem=1, segments, start, end, mean)]\nchange.dt <- segs.dt[1 < start][, change := start-0.5][]\nselected.dt <- penaltyLearning::modelSelection(models.dt, \"loss\", \"segments\")\nerr.list <- penaltyLearning::labelError(\n selected.dt, label.dt, change.dt,\n problem.vars=\"problem\", change.var=\"change\", model.vars=\"segments\")\n\nerr.list$model.errors[, diff.fp := c(diff(fp), NA)]\nerr.list$model.errors[, diff.fn := c(diff(fn), NA)]\ndiff.dt <- err.list$model.errors[diff.fp!=0 | diff.fn!=0, .(\n pred.log.lambda=max.log.lambda,\n diff.fp,\n diff.fn\n)]\nerr.tall <- data.table::melt(err.list$model.errors, measure=c(\"fp\", \"fn\"))\nerr.sizes <- c(\n fp=3,\n fn=2)\nerr.colors <- c(\n fp=\"red\",\n fn=\"deepskyblue\",\n correct=\"grey50\")\nerr.colors[c(\"false positive\", \"false negative\")] <- err.colors[c(\"fp\", \"fn\")]\nlab <- \"Error type\"\nsome.segs <- c(2:4, 15)\nvline.dt <- err.list$model.errors[segments %in% some.segs]\nvline.dt[, x := -(min.log.lambda+max.log.lambda)/2]\ngg <- ggplot()+\n facet_grid(panel ~ ., scales=\"free\")+\n theme_bw()+\n ##theme(panel.spacing=grid::unit(0, \"lines\"))+\n ylab(\"\")+\n geom_vline(aes(\n xintercept=x),\n color=\"grey50\",\n data=vline.dt)+\n geom_segment(aes(\n -min.log.lambda, value,\n color=variable, size=variable,\n xend=-max.log.lambda, yend=value),\n data=data.table(panel=\"Label errors\", err.tall))+\n geom_blank(aes(\n x, y),\n data=data.table(x=0, y=c(-0.4,1.4)))+\n geom_segment(aes(\n -min.log.lambda, segments,\n xend=-max.log.lambda, yend=segments),\n size=1,\n data=data.table(panel=\"Segments\", err.list$model.errors))+\n scale_color_manual(lab, values=err.colors)+\n scale_size_manual(lab, values=err.sizes)+\n scale_x_continuous(\n \"Predicted value, f(x) = -log(penalty)\",\n limits=c(-2, 4))\npng(\"figure-fn-not-monotonic-error.png\", 3.3, 2, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\nmodel.color <- \"violet\"\nstandAlone <- TRUE\nsuffix <- if(standAlone)\"-standAlone\" else \"\"\nno.ext <- paste0(\"figure-fn-not-monotonic-error\", suffix)\nf.tex <- paste0(no.ext, \".tex\")\ntikz(f.tex, width=3, height=3, standAlone = standAlone)\nleft.lines <- 4\nother.lines <- 1\nax.label.offset <- 1.5\npar(mar=c(0,left.lines,other.lines,other.lines), cex.axis=1.5)\nlayout(cbind(c(rep(1,1),rep(2,2))))\nxrange <- c(-2, 4)\nxsize <- diff(xrange)\ndiff.dt[, x := c(0, -1, -1.5, -2)]\ndiff.dt[, y := -.I/2]\ndiff.dt[, pos := c(4,4,4,4)]\nplot(xrange, range(err.tall[[\"segments\"]]), type=\"n\", yaxt=\"n\", xaxt=\"n\",ylab=\"\",xlab=\"\")\nvline.dt[, abline(v=x, col=model.color)]\nvline.dt[, text(x, 19, segments, pos=2, offset=0.2, col=model.color)]\nmysegs <- function(x0, x1, y, ...)segments(\n ifelse(x0==-Inf, xrange[1]-xsize, x0), y,\n ifelse(x1==Inf, xrange[2]+xsize, x1), y,\n lend=1,\n ...)\nerr.list$model.errors[, mysegs(-max.log.lambda, -min.log.lambda, segments, lwd=3)]\naxis(2,c(1,10,20),las=1)\nmtext(\"Segments\", 2, left.lines-ax.label.offset)\nbottom.lines <- 4\npar(mar=c(bottom.lines,left.lines,0,other.lines))\nplot(xrange, c(min(diff.dt[[\"y\"]]), 1.4), type=\"n\", yaxt=\"n\", xaxt=\"n\",xlab=\"\",ylab=\"\")\naxis(2,c(0,1),las=1)\nerr.tall[, mysegs(\n -max.log.lambda, -min.log.lambda, value,\n lwd=err.sizes[paste(variable)]*4,\n col=err.colors[paste(variable)])]\ndiff.dt[, text(x, y, sprintf(\n \"$(v=%.3f,\\\\Delta\\\\text{FP}=%d,\\\\Delta\\\\text{FN}=%d)$\",\n -pred.log.lambda, -diff.fp, -diff.fn),\n cex=1, pos=pos, offset=-0.5)]\nleg.dt <- data.table(\n variable=c(\"fp\",\"fn\"),\n x=c(2.5,-0.5))\nvline.dt[, segments(x, 0, x, 2, col=model.color)]\nleg.dt[, text(x, 0.9, toupper(variable), col=err.colors[paste(variable)], cex=1.5)]\ndiff.dt[, segments(-pred.log.lambda, y+0.3, -pred.log.lambda, 0)]\nmtext(\"Label errors\", 2, left.lines-ax.label.offset)\naxis(1)\nmtext(\"Predicted value, $f(\\\\mathbf x_i) = -\\\\log \\\\hat \\\\lambda_i$\", 1,bottom.lines-ax.label.offset)\ndev.off()\nif(standAlone)system(paste(\"pdflatex\", no.ext))\n\nsome.segs.dt <- data.table(segments=some.segs)\nshow.labels <- err.list$label.errors[some.segs.dt, on=\"segments\"]\nshow.segs <- segs.dt[show.labels, on=\"segments\"]\nshow.change <- change.dt[show.labels, on=\"segments\"]\ngg <- ggplot()+\n theme_bw()+\n geom_point(aes(\n i, logratio),\n data=p4.2)+\n geom_segment(aes(\n start-0.5, mean,\n xend=end+0.5, yend=mean),\n color=model.color,\n size=1,\n data=show.segs)+\n geom_vline(aes(\n xintercept=change),\n data=show.change,\n size=1,\n color=model.color)+\n facet_grid(segments ~ ., labeller=label_both)+\n scale_x_continuous(\"Data sequence index\")+\n scale_y_continuous(\"Data value\")+\n theme(legend.position=\"none\")\npng(\"figure-fn-not-monotonic-no-labels.png\", 5, 3.8, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\ngg <- ggplot()+\n theme_bw()+\n geom_rect(aes(\n xmin=min, xmax=max,\n ymin=-Inf, ymax=Inf,\n fill=label),\n alpha=0.5,\n data=label.dt)+\n geom_rect(aes(\n xmin=min, xmax=max,\n ymin=-Inf, ymax=Inf,\n color=status),\n fill=NA,\n size=2,\n data=show.labels)+\n scale_color_manual(values=err.colors)+\n scale_fill_manual(values=c(\"one change\"=\"grey50\"))+\n scale_linetype_manual(\n \"Error type\",\n values=c(\n correct=0,\n \"false negative\"=3,\n \"false positive\"=1))+\n geom_point(aes(\n i, logratio),\n data=p4.2)+\n geom_segment(aes(\n start-0.5, mean,\n xend=end+0.5, yend=mean),\n color=model.color,\n size=1,\n data=show.segs)+\n geom_vline(aes(\n xintercept=change),\n data=show.change,\n size=1,\n color=model.color)+\n facet_grid(segments ~ ., labeller=label_both)+\n scale_x_continuous(\"Data sequence index\")+\n scale_y_continuous(\"Data value\")+\n geom_label(aes(\n (min+max)/2, -0.5,\n label=status,\n color=status),\n size=3,\n data=show.labels)+\n theme(legend.position=\"none\")\npng(\"figure-fn-not-monotonic.png\", 5, 3.8, units=\"in\", res=200)\nprint(gg)\ndev.off()\n"
},
{
"alpha_fraction": 0.633904755115509,
"alphanum_fraction": 0.6510476469993591,
"avg_line_length": 28.16666603088379,
"blob_id": "e31c2a35a7358756a6d8ffbbd20978ffa8e099d4",
"content_id": "75ce1a02788e3e241ee5795d601db2fe7e5c17ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2625,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 90,
"path": "/figure-aum-margin-loss.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(data.table)\nlibrary(ggplot2)\nerror.dt <- data.table::fread(\n \"../feature-learning-benchmark/labeled_problems_errors.csv\")\nerror.dt[, min.lambda := exp(min.log.penalty)]\nlevs <- unique(error.dt$prob.dir)\nerror.dt[, efac := factor(prob.dir, levs)]\nerror.dt[, example := as.integer(efac)-1L]\nerror.dt[, run := cumsum(c(1, diff(errors)!=0)), by=example]\ndiff.dt <- aum::aum_diffs_penalty(error.dt)\nrun.dt <- error.dt[, .(\n max=max(max.log.penalty),\n min=min(min.log.penalty)\n), by=.(example,errors,run)]\nmin.dt <- run.dt[, .SD[errors==min(errors)], by=example]\nbig.dt <- min.dt[, .SD[size==max(size)][1], by=example]\nbad <- big.dt[, .(count=.N), by=example][count>1]\nrun.dt[bad, on=\"example\"]\nsome.ids <- big.dt[order(size)][c(1,10,100,500,1000), example]\nsome.vline <- big.dt[example %in% some.ids]\nsome.diffs <- diff.dt[example %in% some.ids]\nsome.err <- aum::aum_errors(some.diffs)\nsome.tall <- melt(\n some.err,\n measure.vars = c(\"fp\",\"fn\"),\n variable.name=\"error_type\",\n value.name=\"label_errors\")\nerr.sizes <- c(\n fp=3,\n fn=2,\n errors=1)\nerr.colors <- c(\n fp=\"red\",\n fn=\"deepskyblue\",\n errors=\"black\")\nggplot()+\n geom_vline(aes(\n xintercept=log.penalty),\n data=some.vline)+\n geom_segment(aes(\n min.pred, label_errors,\n color=error_type, size=error_type,\n xend=max.pred, yend=label_errors),\n data=some.tall)+\n scale_color_manual(values=err.colors)+\n scale_size_manual(values=err.sizes)+\n facet_grid(example ~ ., scales=\"free\")\n\ndiff.tall <- nc::capture_melt_single(\n diff.dt,\n error_type=\"fp|fn\",\n \"_diff\",\n value.name=\"diff\")[diff != 0]\ndiff.tall[, `:=`(\n next.diff = c(diff[-1],NA), \n next.pred = c(pred[-1],NA)\n), by=.(example, error_type)]\nsmall.pred.diff <- diff.tall[\n !is.na(next.diff) & next.diff>0 & diff<0 & next.pred-pred<1]\nsmall.pred.diff[-diff < next.diff]\nsmall.pred.diff[-diff > next.diff]\nsmall.pred.diff[error_type==\"fn\"][order(next.diff+diff)]\nsmall.pred.diff[error_type==\"fn\"][order(next.diff-diff)]\nsome.ids <- c(291,778,4808,4483)\nsome.diffs <- diff.dt[example %in% some.ids]\nsome.err <- aum::aum_errors(some.diffs)\n\nsome.tall <- melt(\n some.err,\n measure.vars = c(\"fp\",\"fn\"),\n variable.name=\"error_type\",\n value.name=\"label_errors\")\nerr.sizes <- c(\n fp=3,\n fn=2,\n errors=1)\nerr.colors <- c(\n fp=\"red\",\n fn=\"deepskyblue\",\n errors=\"black\")\nggplot()+\n geom_segment(aes(\n min.pred, label_errors,\n color=error_type, size=error_type,\n xend=max.pred, yend=label_errors),\n data=some.tall)+\n scale_color_manual(values=err.colors)+\n scale_size_manual(values=err.sizes)+\n facet_grid(example ~ ., scales=\"free\")\n## adaptive margin size?\n"
},
{
"alpha_fraction": 0.6026490330696106,
"alphanum_fraction": 0.6297698020935059,
"avg_line_length": 28.082569122314453,
"blob_id": "d4021ba83437e28eec0dc69ffc9abeff7e84eee4",
"content_id": "cbc76e18853b3edab2f4fea0036904495409918c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 3171,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 109,
"path": "/figure-aub-convexity.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\ndata(neuroblastomaProcessed, package=\"penaltyLearning\")\n\ncounts <- neuroblastomaProcessed$errors[, {\n diff.tab <- table(factor(diff(errors), c(\"-1\", \"0\", \"1\")))\n L <- as.list(diff.tab)\n size <- max.log.lambda-min.log.lambda\n for(fun.name in c(\"min\", \"max\")){\n fun <- get(fun.name)\n L[[paste0(fun.name, \".size\")]] <- min(size[errors==fun(errors)])\n }\n L$mean.size <- with(L, (min.size+max.size)/2)\n L\n}, by=list(profile.id, chromosome)]\ntwo.changes <- counts[1 < `-1` | 1 < `1`]\ntwo.changes <- counts[order(-`-1`, -`1`, -mean.size)][profile.id != 481][2:3]\ntwo.changes[, panel := paste0(\n ifelse(`-1`==2, \"p\", \"n\"), #positive or negative label\n profile.id, \".\", chromosome)]\nsome.err <- neuroblastomaProcessed$errors[two.changes, on=list(\n profile.id, chromosome)]\nerr.sizes <- c(\n fp=3,\n fn=2,\n errors=1)\nerr.colors <- c(\n fp=\"red\",\n fn=\"deepskyblue\",\n errors=\"black\")\nsome.err.tall <- melt(\n some.err,\n measure.vars=names(err.colors))\nggplot()+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n facet_grid(profile.id + chromosome ~ .)+\n geom_segment(aes(\n min.log.lambda, value,\n xend=max.log.lambda, yend=value,\n color=variable, size=variable),\n data=some.err.tall)+\n scale_y_continuous(\n \"errors\",\n breaks=c(0,1),\n limits=c(-0.2, 1.2))+\n scale_color_manual(values=err.colors)+\n scale_size_manual(values=err.sizes)\n\ngrid.by <- 0.05\npred.wide <- data.table(expand.grid(\n p4.2=seq(0, 3, by=grid.by),\n n513.3=seq(-5, -2, by=grid.by)))\nmvars <- paste(names(pred.wide))\npred.wide[, combo.i := 1:.N]\npred.tall <- melt(\n pred.wide,\n id.vars=\"combo.i\",\n measure.vars=mvars,\n variable.name=\"panel\",\n value.name=\"pred.log.lambda\")[two.changes, nomatch=0L, on=list(panel)]\naub.dt <- pred.tall[order(combo.i)][, {\n L <- penaltyLearning::ROChange(some.err, .SD, \"panel\")\n roc.dt <- data.table(L$roc)\n roc.dt[, min.fp.fn := ifelse(fp<fn, fp, fn)]\n roc.dt[, width.thresh := max.thresh-min.thresh]\n aub <- roc.dt[!(width.thresh==Inf & min.fp.fn==0), sum(min.fp.fn*width.thresh)]\n pred.errors <- L$thresholds[threshold==\"predicted\", errors]\n min.errors <- L$thresholds[threshold==\"min.error\", errors]\n data.table(aub, auc=L$auc, pred.errors, min.errors)\n}, by=list(combo.i)]\n\nmetrics.tall <- melt(\n aub.dt,\n measure.vars=c(\"aub\", \"auc\", \"pred.errors\", \"min.errors\")\n)[pred.wide, on=list(combo.i)]\n\ngg <- ggplot()+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n facet_grid(. ~ variable)+\n geom_tile(aes(\n p4.2, n513.3, fill=value),\n data=metrics.tall)+\n scale_fill_gradient2(low=\"blue\", high=\"red\")+\n coord_equal()\npng(\"figure-aub-convexity-heatmap.png\", 6, 6, units=\"in\", res=100)\nprint(gg)\ndev.off()\n\np0 <- metrics.tall[p4.2==2]\nvline.dt <- p0[variable==\"aub\" & value==0, list(n513.3)]\ngg <- ggplot()+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n facet_grid(. ~ variable)+\n geom_vline(aes(\n xintercept=n513.3),\n color=\"grey\",\n size=2,\n data=vline.dt)+\n geom_point(aes(\n n513.3, value),\n shape=1,\n data=p0)+\n xlab(\"predicted log(penalty)\")\npng(\"figure-aub-convexity.png\", 10, 2, units=\"in\", res=100)\nprint(gg)\ndev.off()\n\n"
},
{
"alpha_fraction": 0.6047552227973938,
"alphanum_fraction": 0.6238694787025452,
"avg_line_length": 25.947237014770508,
"blob_id": "901bedbeb802baf10e2a9ba028ae9694ffe4c0e8",
"content_id": "476d43c270c5c6df26731056bba6a2de412de9b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 10725,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 398,
"path": "/figure-compare-hinge-loss.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(data.table)\nlibrary(ggplot2)\n\nloss.wide <- data.table::fread(\"figure-compare-hinge-loss-data.csv\")\nloss.tall <- melt(\n loss.wide,\n measure.vars = c(\n \"AUC\", \"AUM\", \"AUM.margin\", \"logistic.loss\",\n grep(\"pairwise.*_\", names(loss.wide), value=TRUE),\n \"squared.hinge\", \"hinge.loss\"),\n variable.name=\"loss.name\",\n value.name=\"loss.value\")\nnormalize <- function(x)(x-min(x))/(max(x)-min(x))\nloss.tall[, loss.norm := normalize(loss.value), by=loss.name]\nrect.dt <- data.table(\n xmin=0, xmax=Inf,\n ymin=-Inf, ymax=0)\nlevs <- c(\"AUM\", \"AUM.margin\", \"hinge.loss\")\nloss.contour <- loss.tall[loss.name %in% levs]\nloss.contour[, loss.fac := factor(loss.name, levs)]\nshow.breaks <- seq(1, 7, by=1)\ngg <- ggplot()+\n geom_abline(\n slope=1, intercept=0, color=\"grey\")+\n geom_tile(aes(\n pos, neg, fill=loss.value),\n data=loss.contour)+\n geom_text(aes(\n x,y,label=label),\n data=data.table(x=-Inf,y=-Inf,label=\" correct rank\"),\n color=\"grey50\",\n vjust=1,\n angle=45,\n hjust=0)+\n geom_text(aes(\n x,y,label=label),\n data=data.table(x=Inf,y=-Inf,label=\"correct \\nlabels \"),\n hjust=1,\n vjust=-0.2)+\n geom_rect(aes(\n xmin=xmin, xmax=xmax,\n ymin=ymin, ymax=ymax),\n fill=NA,\n color=\"black\",\n data=rect.dt)+\n metR::geom_contour2(aes(\n pos, neg, z=loss.value, label=stat(level)),\n breaks=show.breaks,\n color=\"blue\",\n size=1,\n data=loss.contour)+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n facet_grid(. ~ loss.fac)+\n scale_fill_gradient(\n \"Loss\\nvalues\",\n low=\"white\",\n high=\"red\")+\n coord_equal()+\n geom_abline(aes(\n intercept=intercept, slope=slope),\n color=\"grey\",\n data=data.table(intercept=0, slope=1))+\n xlab(\"Real-valued prediction for positive label\")+\n ylab(\"Real-valued prediction for negative label\")\npng(\n \"figure-compare-hinge-loss-contours.png\", \n width=8, height=3, res=200, units=\"in\")\nprint(gg)\ndev.off()\n\nloss.line <- loss.tall[pos+neg==0 & grepl(\"pairwise\",loss.name)]\nloss.line[, loss := sub(\"pred.prob\",\"last.act=sigmoid\",sub(\"pred.real\",\"last.act=none\",sub(\"pairwise[.]\",\"\",sub(\"_\",\"\\n\",loss.name))))]\ngg <- ggplot()+\n ggtitle(\"Comparing pairwise loss functions\")+\n geom_line(aes(\n pos, loss.value, color=loss),\n size=1,\n data=loss.line)+\n scale_x_continuous(\n \"Predicted score yhat for positive example\",\n breaks=seq(-3, 3, by=1))+\n coord_cartesian(xlim=c(-5, 3),ylim=c(0,9))+\n scale_y_continuous(\n \"Pairwise loss after last activation (a), Loss[a(yhat)-a(-yhat)]\",\n limits=c(0,10),\n breaks=seq(0,8,by=2))\ndl <- directlabels::direct.label(gg, \"left.polygons\")\npng(\n \"figure-compare-hinge-loss-pairwise-line.png\", \n width=5, height=5, res=200, units=\"in\")\nprint(dl)\ndev.off()\n\nlog.aum <- loss.tall[loss.name %in% c(\"AUM\",\"logistic.loss\")]\ngg <- ggplot()+\n geom_abline(\n slope=1, intercept=0, color=\"grey\")+\n geom_tile(aes(\n pos, neg, fill=loss.value),\n data=log.aum)+\n geom_text(aes(\n x,y,label=label),\n data=data.table(x=-Inf,y=-Inf,label=\" correct rank\"),\n color=\"grey50\",\n vjust=1,\n angle=45,\n hjust=0)+\n geom_text(aes(\n x,y,label=label),\n data=data.table(x=Inf,y=-Inf,label=\"correct \\nlabels \"),\n hjust=1,\n vjust=-0.2)+\n geom_rect(aes(\n xmin=xmin, xmax=xmax,\n ymin=ymin, ymax=ymax),\n fill=NA,\n color=\"black\",\n data=rect.dt)+\n metR::geom_contour2(aes(\n pos, neg, z=loss.value, label=stat(level)),\n breaks=show.breaks,\n color=\"blue\",\n size=1,\n data=log.aum)+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n facet_grid(. ~ loss.name)+\n scale_fill_gradient(\n \"Loss\\nvalues\",\n low=\"white\",\n high=\"red\")+\n coord_equal()+\n geom_abline(aes(\n intercept=intercept, slope=slope),\n color=\"grey\",\n data=data.table(intercept=0, slope=1))+\n xlab(\"Predicted score for positive label\")+\n ylab(\"Predicted score for negative label\")\npng(\n \"figure-compare-hinge-loss-contours-logistic.png\", \n width=5.5, height=3, res=200, units=\"in\")\nprint(gg)\ndev.off()\n\nlog.aum <- loss.tall[loss.name %in% c(\n \"pairwise.squared.hinge_pred.real\",\n \"pairwise.squared.hinge_pred.prob\")]\nlog.aum[, last.layer.output := sub(\".*[.]\", \"\", loss.name)]\nshow.breaks <- seq(0, 3.5, by=0.5)\ngg <- ggplot()+\n ggtitle(\"Pairwise squared hinge loss functions\")+\n geom_abline(\n slope=1, intercept=0, color=\"grey\")+\n geom_tile(aes(\n pos, neg, fill=loss.norm),\n data=log.aum)+\n geom_text(aes(\n x,y,label=label),\n data=data.table(x=-Inf,y=-Inf,label=\" correct rank\"),\n color=\"grey50\",\n vjust=1,\n angle=45,\n hjust=0)+\n geom_text(aes(\n x,y,label=label),\n data=data.table(x=Inf,y=-Inf,label=\"correct \\nlabels \"),\n hjust=1,\n vjust=-0.2)+\n geom_rect(aes(\n xmin=xmin, xmax=xmax,\n ymin=ymin, ymax=ymax),\n fill=NA,\n color=\"black\",\n data=rect.dt)+\n metR::geom_contour2(aes(\n pos, neg, z=loss.value, label=stat(level)),\n breaks=show.breaks,\n color=\"blue\",\n size=1,\n data=log.aum)+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n facet_grid(. ~ last.layer.output, labeller=label_both)+\n scale_fill_gradient(\n \"Relative\\nloss\\nvalues\",\n low=\"white\",\n high=\"red\")+\n coord_equal()+\n geom_abline(aes(\n intercept=intercept, slope=slope),\n color=\"grey\",\n data=data.table(intercept=0, slope=1))+\n xlab(\"Predicted score for positive label\")+\n ylab(\"Predicted score for negative label\")\npng(\n \"figure-compare-hinge-loss-squared-pairwise-relative.png\", \n width=5.5, height=3, res=200, units=\"in\")\nprint(gg)\ndev.off()\n\nshow.breaks <- seq(0, 3.5, by=0.5)\ngg <- ggplot()+\n ggtitle(\"Pairwise squared hinge loss functions\")+\n geom_abline(\n slope=1, intercept=0, color=\"grey\")+\n geom_tile(aes(\n pos, neg, fill=loss.value),\n data=log.aum)+\n geom_text(aes(\n x,y,label=label),\n data=data.table(x=-Inf,y=-Inf,label=\" correct rank\"),\n color=\"grey50\",\n vjust=1,\n angle=45,\n hjust=0)+\n geom_text(aes(\n x,y,label=label),\n data=data.table(x=Inf,y=-Inf,label=\"correct \\nlabels \"),\n hjust=1,\n vjust=-0.2)+\n geom_rect(aes(\n xmin=xmin, xmax=xmax,\n ymin=ymin, ymax=ymax),\n fill=NA,\n color=\"black\",\n data=rect.dt)+\n metR::geom_contour2(aes(\n pos, neg, z=loss.value, label=stat(level)),\n breaks=show.breaks,\n color=\"blue\",\n size=1,\n data=log.aum)+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n facet_grid(. ~ last.layer.output, labeller=label_both)+\n scale_fill_gradient(\n \"Loss\\nvalues\",\n low=\"white\",\n high=\"red\")+\n coord_equal()+\n geom_abline(aes(\n intercept=intercept, slope=slope),\n color=\"grey\",\n data=data.table(intercept=0, slope=1))+\n xlab(\"Predicted score for positive label\")+\n ylab(\"Predicted score for negative label\")\npng(\n \"figure-compare-hinge-loss-squared-pairwise.png\", \n width=5.5, height=3, res=200, units=\"in\")\nprint(gg)\ndev.off()\n\nlog.aum <- loss.tall[loss.name %in% c(\n \"pairwise.hinge.loss_pred.real\",\n \"pairwise.hinge.loss_pred.prob\")]\nlog.aum[, last.layer.output := sub(\".*[.]\", \"\", loss.name)]\nshow.breaks <- seq(0, 1.75, by=0.25)\ngg <- ggplot()+\n ggtitle(\"Pairwise linear hinge loss functions\")+\n geom_abline(\n slope=1, intercept=0, color=\"grey\")+\n geom_tile(aes(\n pos, neg, fill=loss.norm),\n data=log.aum)+\n geom_text(aes(\n x,y,label=label),\n data=data.table(x=-Inf,y=-Inf,label=\" correct rank\"),\n color=\"grey50\",\n vjust=1,\n angle=45,\n hjust=0)+\n geom_text(aes(\n x,y,label=label),\n data=data.table(x=Inf,y=-Inf,label=\"correct \\nlabels \"),\n hjust=1,\n vjust=-0.2)+\n geom_rect(aes(\n xmin=xmin, xmax=xmax,\n ymin=ymin, ymax=ymax),\n fill=NA,\n color=\"black\",\n data=rect.dt)+\n metR::geom_contour2(aes(\n pos, neg, z=loss.value, label=stat(level)),\n breaks=show.breaks,\n color=\"blue\",\n size=1,\n data=log.aum)+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n facet_grid(. ~ last.layer.output, labeller=label_both)+\n scale_fill_gradient(\n \"Relative\\nloss\\nvalues\",\n low=\"white\",\n high=\"red\")+\n coord_equal()+\n geom_abline(aes(\n intercept=intercept, slope=slope),\n color=\"grey\",\n data=data.table(intercept=0, slope=1))+\n xlab(\"Predicted score for positive label\")+\n ylab(\"Predicted score for negative label\")\npng(\n \"figure-compare-hinge-loss-hinge-pairwise-relative.png\", \n width=5.5, height=3, res=200, units=\"in\")\nprint(gg)\ndev.off()\n\nshow.breaks <- seq(0, 1.75, by=0.25)\ngg <- ggplot()+\n ggtitle(\"Pairwise linear hinge loss functions\")+\n geom_abline(\n slope=1, intercept=0, color=\"grey\")+\n geom_tile(aes(\n pos, neg, fill=loss.value),\n data=log.aum)+\n geom_text(aes(\n x,y,label=label),\n data=data.table(x=-Inf,y=-Inf,label=\" correct rank\"),\n color=\"grey50\",\n vjust=1,\n angle=45,\n hjust=0)+\n geom_text(aes(\n x,y,label=label),\n data=data.table(x=Inf,y=-Inf,label=\"correct \\nlabels \"),\n hjust=1,\n vjust=-0.2)+\n geom_rect(aes(\n xmin=xmin, xmax=xmax,\n ymin=ymin, ymax=ymax),\n fill=NA,\n color=\"black\",\n data=rect.dt)+\n metR::geom_contour2(aes(\n pos, neg, z=loss.value, label=stat(level)),\n breaks=show.breaks,\n color=\"blue\",\n size=1,\n data=log.aum)+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n facet_grid(. ~ last.layer.output, labeller=label_both)+\n scale_fill_gradient(\n \"Loss\\nvalues\",\n low=\"white\",\n high=\"red\")+\n coord_equal()+\n geom_abline(aes(\n intercept=intercept, slope=slope),\n color=\"grey\",\n data=data.table(intercept=0, slope=1))+\n xlab(\"Predicted score for positive label\")+\n ylab(\"Predicted score for negative label\")\npng(\n \"figure-compare-hinge-loss-hinge-pairwise.png\", \n width=5.5, height=3, res=200, units=\"in\")\nprint(gg)\ndev.off()\n\ngg <- ggplot()+\n geom_tile(aes(\n pos, neg, fill=loss.norm),\n data=loss.tall[loss.name %in% c(\"AUC\", \"AUM\", \"logistic.loss\")])+\n geom_rect(aes(\n xmin=xmin, xmax=xmax,\n ymin=ymin, ymax=ymax),\n fill=NA,\n color=\"black\",\n data=rect.dt)+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n facet_grid(. ~ loss.name)+\n geom_abline(\n slope=1, intercept=0, color=\"grey\")+\n scale_fill_gradient(\n \"Relative\\nvalues\",\n low=\"white\",\n high=\"red\")+\n coord_equal()+\n geom_text(aes(\n x,y,label=label),\n data=data.table(x=-3,y=-3,label=\" correct rank\"),\n color=\"grey50\",\n vjust=1.3,\n angle=45,\n hjust=0)+\n geom_text(aes(\n x,y,label=label),\n data=data.table(x=0,y=0,label=\"correct \\nlabels \"),\n hjust=-0.1,\n vjust=1.1)+\n xlab(\"Real-valued prediction for positive label\")+\n ylab(\"Real-valued prediction\\nfor negative label\")\npng(\"figure-compare-hinge-loss.png\", width=7, height=2.6, res=200, units=\"in\")\nprint(gg)\ndev.off()\n"
},
{
"alpha_fraction": 0.6485437154769897,
"alphanum_fraction": 0.6699029207229614,
"avg_line_length": 38.61538314819336,
"blob_id": "c9f38e3a58af2abb1ffcdc08c394c7199c966615",
"content_id": "9ef106856e6962d703c7ac0dc80e4b974b5e6148",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 515,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 13,
"path": "/figure-DNA-Sonar-subtrain-valid.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "f.csv.gz <- \"figure-DNA-Sonar-subtrain-valid-data.csv.gz\"\nif(!file.exists(f.csv.gz)){\n u <- paste0(\"http://ml.nau.edu/data/\", f.csv.gz)\n download.file(u, f)\n}\nloss.dt <- data.table::fread(\"figure-DNA-Sonar-subtrain-valid-data.csv.gz\")\n\n(selected.dt <- loss.dt[\n set.name==\"validation\",\n .SD[which.max(auc), .(iteration, step.size)],\n by=.(data.name, validation.fold, loss.name)])\n#=> Some step sizes selected include 100 and 0.01, which means seq(2,\n#-2) is not broad enough, so we should try -3, 3 next time.\n"
},
{
"alpha_fraction": 0.629141092300415,
"alphanum_fraction": 0.6466257572174072,
"avg_line_length": 29.605634689331055,
"blob_id": "9767385c6a6e323466cf3198bc148251089010d4",
"content_id": "2e238d0e300951c019cf29fdee6293ee7e3b7cb7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 6520,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 213,
"path": "/figure-aum-convexity.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\ndata(neuroblastomaProcessed, package=\"penaltyLearning\")\n\ne <- function(label, profile.id, chromosome){\n data.table(label, profile.id=factor(profile.id), chromosome=factor(chromosome))\n}\nselect.dt <- rbind(\n e(\"positive\", 4, 2),\n e(\"negative\", 513, 3))\nsome.err <- neuroblastomaProcessed$errors[select.dt, .(\n fp, fn, possible.fp, possible.fn,\n min.log.lambda=-max.log.lambda,\n max.log.lambda=-min.log.lambda,\n errors, labels,\n label\n), on=list(profile.id, chromosome)]\nerr.sizes <- c(\n \"min(FP,FN)\"=1,\n FP=3,\n FN=2)\nerr.colors <- c(\n \"min(FP,FN)\"=\"black\",\n FP=\"red\",\n FN=\"deepskyblue\")\nsome.err.tall <- melt(\n some.err,\n measure.vars=c(\"fp\",\"fn\"),\n variable.name=\"var.lower\")\nsome.err.tall[, variable := toupper(var.lower)]\nleg <- \"Error type\"\nsome.err.tall[, Label := paste0(\"\\n\", label)]\ngg.err <- ggplot()+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n facet_grid(Label ~ ., labeller=label_both)+\n geom_segment(aes(\n min.log.lambda, value,\n xend=max.log.lambda, yend=value,\n color=variable, size=variable),\n data=some.err.tall)+\n scale_y_continuous(\n \"Label errors\",\n breaks=c(0,1),\n limits=c(-0.2, 1.2))+\n scale_color_manual(leg,values=err.colors)+\n scale_size_manual(leg,values=err.sizes)+\n scale_x_continuous(\n \"Predicted value f(x)\",\n breaks=seq(-2, 6, by=2))+\n coord_cartesian(xlim=c(-3, 5))\npng(\"figure-aum-convexity-profiles.png\", 3.5, 2, units=\"in\", res=200)\nprint(gg.err)\ndev.off()\n\ndmin <- 3.5\ndmax <- 7.5\nsome.err[, fp.diff := c(NA, diff(fp)), by=label]\nsome.err[, fn.diff := c(NA, diff(fn)), by=label]\nsome.diff <- some.err[fp.diff != 0 | fn.diff != 0, .(\n id=1, label, fp.diff, fn.diff, pred.log.lambda=min.log.lambda)]\nsome.diff[, fp.cum := cumsum(fp.diff), by=label]\nsome.diff[, fn.cum := rev(cumsum(rev(-fn.diff))), by=label]\ndlist <- split(some.diff, some.diff[[\"label\"]])\ngrid.dt <- with(dlist, positive[negative, on=\"id\", allow.cartesian=TRUE])\ngrid.dt[, negative := i.pred.log.lambda]\ngrid.dt[, positive := pred.log.lambda]\ngrid.dt[, pred.diff := negative - positive]\ngrid.sorted <- grid.dt[order(pred.diff), .(\n pred.diff, fn=fn.cum, fp=i.fp.cum)]\ngrid.sorted[, min.fp.fn := pmin(fp,fn)]\nborder.pred <- grid.dt[\n dmin < pred.diff & pred.diff < dmax]\ngrid.pred <- data.table(\n pred.diff=seq(dmin, dmax, by=0.02))\ngrid.pred[, positive := 0]\ngrid.pred[, negative := pred.diff]\nboth.pred <- rbind(\n border.pred[, .(positive, negative, pred.diff, differentiable=FALSE)],\n grid.pred[, .(positive, negative, pred.diff, differentiable=TRUE)])\n##positive=0, negative=pred.diff.\npred.tall <- melt(\n both.pred,\n measure.vars=select.dt$label,\n variable.name=\"label\",\n value.name=\"pred.log.lambda\")[select.dt, nomatch=0L, on=\"label\"]\nmetrics.wide <- pred.tall[order(pred.diff)][, {\n L <- penaltyLearning::ROChange(some.err, .SD, \"label\")\n with(L, data.table(\n aum, auc,\n SM=L$roc[min.thresh < max.thresh, sum(min.fp.fn)],\n roc=list(roc)))\n}, by=list(pred.diff, differentiable)]\nmetrics.wide[auc==max(auc)] #max auc => aum>0.\n\npred.diff.vec <- c(4.5, 5, 5.14)\nvline.dt <- rbind(\n data.table(pred=pred.diff.vec, Label=\"\\nnegative\"),\n data.table(pred=0, Label=\"\\npositive\"))\nvline.dt[, pred.value := pred-1]\ngg.err+\n geom_vline(aes(\n xintercept=pred.value),\n data=vline.dt)\n## TODO three or more slides showing alignment.\n\nshow.roc.dt.list <- list()\nfor(pdiff in pred.diff.vec){\n select.dt <- data.table(pred.diff=pdiff)\n pdiff.metrics <- metrics.wide[select.dt, on=\"pred.diff\", roll=\"nearest\"]\n pdiff.roc <- pdiff.metrics[[\"roc\"]][[1]]\n show.roc.dt.list[[paste(pdiff)]] <- data.table(\n pred.diff=pdiff,\n pdiff.metrics[, .(AUC=auc, AUM=round(aum,3), SM)],\n pdiff.roc)\n}\n(show.roc.dt <- do.call(rbind, show.roc.dt.list))\nshow.roc.dt[, min.fp.fn := pmin(fp, fn)]\nshow.roc.tall <- melt(\n show.roc.dt,\n measure=c(\"fp\",\"fn\",\"min.fp.fn\"),\n variable.name=\"lower.var\")\nshow.roc.tall[, variable := ifelse(\n lower.var==\"min.fp.fn\", \"min(FP,FN)\", toupper(lower.var))]\ngg <- ggplot()+\n theme_bw()+\n theme(panel.grid.minor=element_blank())+\n facet_grid(pred.diff + AUC + AUM ~ ., labeller=label_both)+\n geom_rect(aes(\n xmin=min.thresh, xmax=max.thresh,\n ymin=0, ymax=min.fp.fn),\n fill=\"grey\",\n color=NA,\n alpha=0.5,\n show.roc.dt)+\n geom_segment(aes(\n min.thresh, value,\n xend=max.thresh, yend=value,\n color=variable, size=variable),\n data=show.roc.tall)+\n scale_y_continuous(\n \"Label errors\",\n breaks=c(0,1))+\n scale_color_manual(leg,values=err.colors)+\n scale_size_manual(leg,values=err.sizes)+\n geom_blank(aes(\n x, y),\n data=data.table(x=0, y=c(-0.4,1.4)))+\n scale_x_continuous(\n \"Constant added to predicted values\")\npng(\"figure-aum-convexity-thresholds.png\", 5, 3.5, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\nmetrics.tall <- melt(\n metrics.wide,\n measure.vars=c(\"aum\", \"auc\", \"SM\"),\n variable.name=\"var.lower\"\n)\nmetrics.tall[, variable := toupper(var.lower)]\ngg <- ggplot()+\n theme(panel.spacing=grid::unit(1, \"lines\"))+\n theme(text=element_text(size = 15))+\n ##theme(legend.position=c(0.8, 0.15))+\n theme(legend.position=\"bottom\")+\n facet_grid(variable ~ ., scales=\"free\")+\n scale_fill_manual(values=c(\n \"TRUE\"=\"black\",\n \"FALSE\"=\"orange\"))+\n geom_point(aes(\n pred.diff, value, fill=differentiable),\n size=1,\n shape=21,\n data=metrics.tall[order(-differentiable)])+\n xlab(\"Prediction difference, f(negative) - f(positive)\")+\n coord_cartesian(xlim=c(4,7))+\n scale_y_continuous(\"\", breaks=seq(0, 3, by=1))\ngg\n\ngg.emph <- gg+\n theme_bw()+\n geom_vline(aes(\n xintercept=pred.diff),\n color=\"grey50\",\n data=data.table(pred.diff=pred.diff.vec))\npng(\"figure-aum-convexity-emph.png\", 5, 3, units=\"in\", res=200)\nprint(gg.emph)\ndev.off()\n\nmetrics.no.SM <- metrics.tall[variable != \"SM\"]\ngg.no.SM <- ggplot()+\n theme(panel.spacing=grid::unit(1, \"lines\"))+\n theme(text=element_text(size = 15))+\n theme(legend.position=\"bottom\")+\n facet_grid(variable ~ ., scales=\"free\")+\n scale_fill_manual(values=c(\n \"TRUE\"=\"black\",\n \"FALSE\"=\"orange\"))+\n geom_point(aes(\n pred.diff, value, fill=differentiable),\n size=1,\n shape=21,\n data=metrics.no.SM[order(-differentiable)])+\n xlab(\"Prediction difference, f(negative) - f(positive)\")+\n coord_cartesian(xlim=c(4,7))+\n scale_y_continuous(\"\", breaks=seq(0, 3, by=1))\npng(\"figure-aum-convexity-no-SM.png\", 4.2, 3, units=\"in\", res=200)\nprint(gg.no.SM)\ndev.off()\n\npng(\"figure-aum-convexity.png\", 4.2, 3, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\n"
},
{
"alpha_fraction": 0.5952108502388,
"alphanum_fraction": 0.6161020994186401,
"avg_line_length": 31.67914390563965,
"blob_id": "9d68850b7bce76c629a1dfe44c929404d8705f4d",
"content_id": "43f4d553f60c55a12a2dae594c0f87c29d3adaa0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 18333,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 561,
"path": "/figure-line-search-complexity.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(ggplot2)\nlibrary(data.table)\nsetDTthreads(1)\ncache.name <- \"figure-line-search-complexity-cache.rds\"\nif(FALSE){\n unlink(file.path(testFold.vec, cache.name), recursive=TRUE)\n}\n\n##TODO add for loop over line search set, subtrain or validation?\n\n## > mb[per.set, on=list(set)][order(labels)]\n## megabytes set labels\n## 1: 554 H3K36me3_TDH_other 200\n## 2: 377 H3K36me3_TDH_ENCODE 338\n## 3: 375 H3K4me3_TDH_ENCODE 525\n## 4: 592 H3K27me3_RL_cancer 570\n## 5: 798 H3K27ac_TDH_some 627\n## 6: 906 H3K36me3_TDH_immune 630\n## 7: 296 H3K27me3_TDH_some 696\n## 8: 2407 CTCF_TDH_ENCODE 1378\n## 9: 3223 H3K4me1_TDH_BP 1584\n## 10: 5871 H3K36me3_AM_immune 1743\n## 11: 6407 ATAC_JV_adipose 3241\n## 12: 3017 H3K4me3_PGP_immune 3780\n## 13: 2902 H3K4me3_TDH_immune 3807\n## 14: 5421 H3K27ac-H3K4me3_TDHAM_BP 15961\n(testFold.vec <- Sys.glob(\"../neuroblastoma-data/data/*/cv/*/testFolds/*\"))\ntestFold.path <- \"../neuroblastoma-data/data/H3K27ac-H3K4me3_TDHAM_BP/cv/equal_labels/testFolds/3\"\nseed <- 1\ninit.name=\"zero\"\naum.type=\"count\"\nOneBatch <- function(testFold.path, aum.type){\n library(data.table)\n cv.path <- dirname(dirname(testFold.path))\n folds.csv <- file.path(cv.path, \"folds.csv\")\n cv.type <- basename(cv.path)\n test.fold <- basename(testFold.path)\n data.dir <- dirname(dirname(cv.path))\n data.name <- basename(data.dir)\n data.list <- list()\n for(f in c(\"inputs\", \"outputs\", \"evaluation\")){\n f.csv.xz <- file.path(data.dir, paste0(f, \".csv.xz\"))\n if(file.exists(f.csv.xz)){\n system(paste(\"unxz\", f.csv.xz))\n }\n f.csv <- file.path(data.dir, paste0(f, \".csv\"))\n f.dt <- data.table::fread(f.csv)\n data.list[[f]] <- f.dt\n }\n ## replace positive fn at end with 0 to avoid AUM=Inf.\n data.list$evaluation[, `:=`(\n min.fn=min(fn),\n max.fp=max(fp),\n min.lambda = exp(min.log.lambda),\n example=sequenceID\n ), by=sequenceID]\n bad <- data.list$evaluation[min.log.lambda == -Inf & min.fn < fn]\n if(nrow(bad)){\n print(bad)\n }\n data.list$evaluation[min.log.lambda == -Inf & 0 < fn]\n ## code below not necessary since this does not happen in our real\n ## data sets, but it could theoretically in some data.\n data.list$aum.input <- data.table(data.list$evaluation)[, `:=`(\n possible.fn=possible.fn-min.fn,\n fn=fn-min.fn,\n possible.fp=max.fp\n ), by=sequenceID]\n ## read folds. \n folds.dt <- data.table::fread(folds.csv)\n folds.dt[fold == test.fold, set := \"test\"]\n folds.dt[fold != test.fold, set := rep(\n c(\"subtrain\", \"validation\"), l=.N)]\n folds.dt[, table(fold, set)]\n X.all <- scale(data.list$inputs[, -1])#rm seqID.\n rownames(X.all) <- data.list$inputs$sequenceID\n X.finite <- X.all[, apply(is.finite(X.all), 2, all)]\n set.vec <- folds.dt[rownames(X.finite), set, on=\"sequenceID\"]\n seqs.list <- list()\n diffs.list <- list()\n aum.vec.list <- list()\n for(s in unique(folds.dt$set)){\n seqs.set <- folds.dt[s==set, sequenceID]\n seqs.list[[s]] <- seqs.set\n seqs.diff <- aum::aum_diffs_penalty(\n data.list$evaluation,\n seqs.set,\n denominator=aum.type)\n diffs.list[[s]] <- seqs.diff\n }\n n.subtrain.diffs <- nrow(diffs.list$subtrain)\n totals <- colSums(diffs.list$subtrain[, .(fp_diff, fn_diff)])\n X.subtrain <- X.finite[set.vec==\"subtrain\",]\n neg.t.X.subtrain <- -t(X.subtrain)\n seqs.train <- with(seqs.list, c(subtrain, validation))\n y.train <- data.list[[\"outputs\"]][\n seqs.train,\n cbind(min.log.lambda, max.log.lambda),\n on=\"sequenceID\"]\n keep <- apply(is.finite(y.train), 1, any)\n X.train <- X.finite[seqs.train, ]\n N.param <- ncol(X.finite)+1\n init.param <- structure(\n rep(0, N.param),\n names=c(\"(Intercept)\",colnames(X.finite)))\n init.fun.list <- list(\n IntervalRegressionCV=function(){\n fit <- penaltyLearning::IntervalRegressionCV(\n X.train[keep, ],\n y.train[keep, ])\n some.param <- fit[[\"param.mat\"]]\n init.param[names(some.param)] <- some.param\n init.param\n },\n zero=function(){\n init.param+rnorm(N.param)\n }\n )\n iteration.dt.list <- list()\n considered.dt.list <- list()\n obj.sign.list <- list(max.auc=-1, min.aum=1)\n for(seed in 1:4)for(init.name in names(init.fun.list)){\n init.fun <- init.fun.list[[init.name]]\n set.seed(seed)\n int.weights <- init.fun()\n for(maxIterations in names(obj.sign.list)){\n objective <- sub(\".*[.]\", \"\", maxIterations)\n computeROC <- function(w, i, set){\n pred.pen.vec <- (X.finite %*% w) + i\n pred.dt <- data.table(\n sequenceID=rownames(pred.pen.vec),\n pred.log.lambda=-as.numeric(pred.pen.vec))\n is.set <- set.vec==set\n set.dt <- pred.dt[is.set]\n L <- penaltyLearning::ROChange(\n data.list$evaluation, set.dt, \"sequenceID\")\n alist <- aum_auc(diffs.list[[set]], pred.pen.vec[ seqs.list[[set]], ])\n L$aum.diffs <- alist$aum\n L$auc.diffs <- alist$auc\n L\n }\n aum_auc <- function(diffs.dt, pred.vec){\n aum.list <- aum::aum(diffs.dt, pred.vec)\n before.dt <- data.table(aum.list$total_error, key=\"thresh\")[, `:=`(\n TPR_before=1-fn_before/-totals[[\"fn_diff\"]],\n FPR_before=fp_before/totals[[\"fp_diff\"]])]\n aum.list$auc <- before.dt[, .(\n FPR=c(FPR_before, 1),\n TPR=c(TPR_before, 1)\n )][, sum((FPR[-1]-FPR[-.N])*(TPR[-1]+TPR[-.N])/2)]\n aum.list\n }\n obj.sign <- obj.sign.list[[maxIterations]]\n weight.vec <- int.weights[-1]\n intercept <- int.weights[1]\n prev.obj <- Inf*obj.sign\n step.number <- 0\n while({\n summary.dt.list <- list()\n for(set in names(seqs.list)){\n set.PL <- computeROC(weight.vec, intercept, set)\n summary.dt.list[[set]] <- with(set.PL, data.table(\n set,\n thresholds[threshold==\"predicted\"],\n auc, aum, auc.diffs, aum.diffs))\n }\n summary.dt <- do.call(rbind, summary.dt.list)\n iteration.dt.list[[paste(\n seed, init.name, step.number, maxIterations\n )]] <- data.table(\n seed, init.name, step.number, maxIterations, summary.dt)\n new.obj <- summary.dt.list$subtrain[[paste0(objective,\".diffs\")]]\n improvement <- obj.sign*(prev.obj-new.obj)\n cat(sprintf(\n \"seed=%d init=%s step=%d %s %f->%f\\n\",\n seed, init.name, step.number, maxIterations, prev.obj, new.obj))\n ##step.number < 2 &&\n 1e-5 < improvement\n }){\n LS=aum::aum_line_search(diffs.list$subtrain, X.subtrain, weight.vec, maxIterations=maxIterations)\n pred.vec <- X.subtrain %*% weight.vec\n aum.list <- aum::aum(diffs.list$subtrain, pred.vec)\n pred.grad.vec <- rowMeans(aum.list$derivative_mat)\n direction.vec <- neg.t.X.subtrain %*% pred.grad.vec\n step.grid <- 10^seq(-9, 0)\n take.step <- function(s){\n weight.vec+s*direction.vec\n }\n grid.dt <- data.table(step.size=step.grid)[, {\n step.weight <- take.step(step.size)\n grid.aum <- aum_auc(diffs.list$subtrain, X.subtrain %*% step.weight)\n with(grid.aum, data.table(auc, aum))\n }, by=step.size]\n considered.dt.list[[paste(\n seed, init.name, maxIterations, step.number\n )]] <- data.table(\n seed, init.name, maxIterations, step.number, n.subtrain.diffs, LS$line_search_result)\n weight.vec <- take.step(LS$line_search_result[[\"step.size\"]])\n new.aum <- aum::aum(diffs.list$subtrain, X.subtrain %*% weight.vec)\n err.thresh <- data.table(\n new.aum$total_error,key=\"thresh\"\n )[, err_before := fp_before+fn_before][, .(\n thresh=c(thresh[1]-1,thresh[-1]-diff(thresh)/2,thresh[.N]+1),\n err=c(err_before,sum(diffs.list$subtrain$fp_diff))\n )]\n intercept <- err.thresh[which.min(err), thresh]\n step.number <- step.number+1\n prev.obj <- new.obj\n }#step.number\n }#maxIterations\n }#seed/init.name\n list(\n sets=data.table(\n do.call(rbind, iteration.dt.list),\n data.name, cv.type, test.fold),\n steps=data.table(\n rbindlist(considered.dt.list),\n data.name, cv.type, test.fold))\n}\n\nargs.dt <- data.table::CJ(\n testFold.path=testFold.vec,\n aum.type=c(\"rate\",\"count\")\n)\n\n## Run on SLURM.\nregistry.dir <- \"figure-line-search-complexity\"\nreg=batchtools::loadRegistry(registry.dir)\nbatchtools::getStatus(reg=reg)\nbatchtools::findExpired(reg=reg)\nstatus.dt <- batchtools::getJobStatus(reg=reg)\nstatus.dt[!is.na(error)]\nstatus.dt[!is.na(done)]\n\n#analyze.\ndone.ids <- status.dt[is.na(error) & !is.na(done), job.id]\nfor(done.i in seq_along(done.ids)){\n job.id <- done.ids[[done.i]]\n args.row <- args.dt[job.id]\n ls.dir <- file.path(args.row$testFold.path, \"line_search\", \"complexity\")\n dir.create(ls.dir, showWarnings = FALSE, recursive = TRUE)\n ls.csv <- file.path(ls.dir, paste0(args.row$aum.type, \".csv\"))\n if(!file.exists(ls.csv)){\n cat(sprintf(\"%4d / %4d %s\\n\", done.i, length(done.ids), ls.csv))\n res <- batchtools::loadResult(job.id)\n fwrite(res$steps, ls.csv)\n } \n}\n\nif(FALSE){\n unlink(registry.dir, recursive=TRUE)\n}\nreg <- batchtools::makeRegistry(registry.dir)\nbatchtools::batchMap(OneBatch, args=args.dt, reg=reg)\njob.table <- batchtools::getJobTable(reg=reg)\nchunks <- data.frame(job.table, chunk=1)\nbatchtools::submitJobs(chunks, resources=list(\n walltime = 24*60*60,#seconds\n memory = 32000,#megabytes per cpu\n ncpus=1, #>1 for multicore/parallel jobs.\n ntasks=1, #>1 for MPI jobs.\n chunks.as.arrayjobs=TRUE), reg=reg)\n\nbatchtools::getStatus(reg=reg)\nstatus.dt <- batchtools::getJobStatus(reg=reg)\nstatus.dt[!is.na(error)]\nstatus.dt[!is.na(done)]\n\nbatchtools::testJob(4, reg=reg)\nargs.dt[21]\n\n## Run locally.\nfor(args.i in 1:nrow(args.dt)){\n args.row <- args.dt[args.i]\n cache.rds <- args.row[, file.path(testFold.path, paste0(aum.type, \".rds\"))]\n all.it.list[[args.i]] <- if(file.exists(cache.rds)){\n readRDS(cache.rds)\n }else{\n cat(sprintf(\"%4d / %4d\\n\", args.i, length(args.dt)))\n print(args.row)\n iteration.list <- do.call(OneBatch, args.row)\n saveRDS(iteration.list, cache.rds)\n }\n}\n\n## analyze.\ncache.vec <- Sys.glob(file.path(\n \"../neuroblastoma-data/data/*/cv/*/testFolds/*\",\n cache.name))\nfor(cache.i in seq_along(cache.vec)){\n cache.rds <- cache.vec[[cache.i]]\n L <- readRDS(cache.rds)\n algo.cols <- c(\"seed\",\"init.name\",\"algo\")\n step.cols <- c(algo.cols,\"step.number\")\n best.steps <- L$steps[, .SD[which.min(aum)], by=step.cols][,c(step.cols,\"search\"),with=FALSE]\n join.dt <- L$sets[set != \"test\"][best.steps, on=step.cols]\n min.dt <- join.dt[set==\"validation\", .SD[which.min(aum)], by=.(seed,init.name,set)]\n\n ggplot()+\n geom_line(aes(\n step.number, aum, color=algo),\n data=join.dt)+\n geom_point(aes(\n step.number, aum, color=algo),\n shape=1,\n data=join.dt[search==\"exact\"])+\n geom_point(aes(\n step.number, aum, color=algo),\n data=min.dt)+\n facet_wrap(~seed+ init.name + set,scales=\"free\")\n \n}\n\n#analyze 2\ntype.csv.vec <- Sys.glob(file.path(testFold.vec, \"line_search\",\"complexity\", \"*.csv\"))\ntotal.dt.list <- list()\nfor(type.csv.i in seq_along(type.csv.vec)){\n type.csv <- type.csv.vec[[type.csv.i]]\n aum.type <- sub(\".csv\",\"\",basename(type.csv))\n type.dt <- fread(type.csv)\n type.total.dt <- type.dt[, .(\n aum.type, steps=.N, sum.iterations=sum(q.size), mean.iterations=mean(q.size)\n ), by=.(\n data.name, cv.type, test.fold, seed, init.name, maxIterations, n.subtrain.diffs\n )]\n total.dt.list[[type.csv]] <- type.total.dt\n}\ntotal.dt <- rbindlist(total.dt.list)\nfwrite(total.dt, \"figure-line-search-complexity.csv\")\nrfac <- 10\ntotal.dt[, N:= 10^(round(log10(n.subtrain.diffs)*rfac)/rfac)]\n\nL <- list(measurements=total.dt[maxIterations==\"min.aum\", data.table(\n iterations=mean(sum.iterations),\n min=min(sum.iterations),\n max=max(sum.iterations)\n), by=.(expr.name=paste(aum.type, init.name), N)])\nmy_funs <- list(\n N=function(N)log10(N),\n \"N \\\\log N\"=function(N)log10(N) + log10(log(N)),\n \"N^2\"=function(N)2*log10(N))\nbest <- atime::references_best(L, unit.col.vec=\"iterations\", fun.list=my_funs)\nmeas <- best[[\"measurements\"]]\nref.dt <- best[[\"references\"]]\nref.color <- \"violet\"\nemp.color <- \"black\"\ngg <- ggplot2::ggplot()+\n ggplot2::facet_grid(unit ~ expr.name, scales=\"free\")+\n ggplot2::theme_bw()+\n ggplot2::geom_ribbon(ggplot2::aes(\n N, ymin=min, ymax=max),\n data=meas,\n fill=emp.color,\n alpha=0.5)+\n ggplot2::geom_line(ggplot2::aes(\n N, empirical),\n size=2,\n color=emp.color,\n data=meas)+\n ggplot2::geom_line(ggplot2::aes(\n N, reference, group=fun.name),\n color=ref.color,\n size=1,\n data=ref.dt)+\n ggplot2::scale_y_log10(\"\")+\n ggplot2::scale_x_log10()\nif(requireNamespace(\"directlabels\")){\n gg+\n directlabels::geom_dl(ggplot2::aes(\n N, reference, label=fun.name),\n data=ref.dt,\n color=ref.color,\n method=\"bottom.polygons\")\n}else{\n gg\n}\n\nL <- list(measurements=total.dt[maxIterations==\"min.aum\", data.table(\n iterations=mean(sum.iterations),\n min=min(sum.iterations),\n max=max(sum.iterations)\n), by=.(expr.name=paste(init.name), N)])\nmy_funs <- list(\n \"N^2\"=function(N)2*log10(N))\nbest <- atime::references_best(L, unit.col.vec=\"iterations\", fun.list=my_funs)\nmeas <- best[[\"measurements\"]]\nref.dt <- best[[\"references\"]]\nref.color <- \"violet\"\nemp.color <- \"black\"\ngg <- ggplot2::ggplot()+\n ggplot2::facet_grid(unit ~ expr.name, scales=\"free\")+\n ggplot2::theme_bw()+\n ggplot2::geom_ribbon(ggplot2::aes(\n N, ymin=min, ymax=max),\n data=meas,\n fill=emp.color,\n alpha=0.5)+\n ggplot2::geom_line(ggplot2::aes(\n N, empirical),\n size=2,\n color=emp.color,\n data=meas)+\n ggplot2::geom_line(ggplot2::aes(\n N, reference, group=fun.name),\n color=ref.color,\n size=1,\n data=ref.dt)+\n ggplot2::scale_y_log10(\"\")+\n ggplot2::scale_x_log10()\nif(requireNamespace(\"directlabels\")){\n gg+\n directlabels::geom_dl(ggplot2::aes(\n N, reference, label=fun.name),\n data=ref.dt,\n color=ref.color,\n method=\"bottom.polygons\")\n}else{\n gg\n}\n\nL <- list(measurements=total.dt[maxIterations==\"min.aum\", data.table(\n sum.iterations=mean(sum.iterations),\n mean.iterations=mean(mean.iterations)\n), by=.(expr.name=paste(aum.type, init.name), N)]\nmy_funs <- list(\n N=function(N)log10(N),\n \"N \\\\log N\"=function(N)log10(N) + log10(log(N)),\n \"N^2\"=function(N)2*log10(N))\nbest <- atime::references_best(\n L, unit.col.vec=c(\"sum.iterations\", \"mean.iterations\"), fun.list=my_funs)\nmeas <- best[[\"measurements\"]]\nref.dt <- best[[\"references\"]]\nref.color <- \"violet\"\nemp.color <- \"black\"\ngg <- ggplot2::ggplot()+\n ggplot2::facet_grid(unit ~ expr.name, scales=\"free\")+\n ggplot2::theme_bw()+\n ggplot2::geom_line(ggplot2::aes(\n N, empirical),\n size=2,\n color=emp.color,\n data=meas)+\n ggplot2::geom_line(ggplot2::aes(\n N, reference, group=fun.name),\n color=ref.color,\n size=1,\n data=ref.dt)+\n ggplot2::scale_y_log10(\"\")+\n ggplot2::scale_x_log10()\nif(requireNamespace(\"directlabels\")){\n gg+\n directlabels::geom_dl(ggplot2::aes(\n N, reference, label=fun.name),\n data=ref.dt,\n color=ref.color,\n method=\"bottom.polygons\")\n}else{\n gg\n}\n\ntotal.wide <- dcast(\n total.dt,\n N ~ .,\n value.var=c(\"sum.iterations\", \"steps\"),\n fun.aggregate = list(median, min, max)\n)[, expr.name := \"line.search\"]\nL <- list(measurements=total.wide)\nmy_funs <- list(\n N=function(N)log10(N),\n \"N \\\\log N\"=function(N)log10(N) + log10(log(N)),\n \"N^2\"=function(N)2*log10(N))\nbest <- atime::references_best(L, unit.col.vec=c(\"sum.iterations_median\", \"steps_median\"), fun.list=my_funs)\naddUnit <- function(DT)DT[, Unit := sub(\"_median\", \"\", unit)]\nmeas <- addUnit(best[[\"measurements\"]])\nref.dt <- addUnit(best[[\"references\"]])\nref.color <- \"violet\"\nemp.color <- \"black\"\nribbon.dt <- nc::capture_melt_multiple(total.wide, Unit=\".*\", \"_\", column=\"min|max\")\ngg <- ggplot2::ggplot()+\n ggplot2::facet_grid(Unit ~ ., scales=\"free\")+\n ggplot2::theme_bw()+\n ggplot2::geom_ribbon(ggplot2::aes(\n N, ymin=min, ymax=max),\n data=ribbon.dt,\n fill=emp.color,\n alpha=0.5)+\n ggplot2::geom_line(ggplot2::aes(\n N, empirical),\n size=2,\n color=emp.color,\n data=meas)+\n ggplot2::geom_line(ggplot2::aes(\n N, reference, group=fun.name),\n color=ref.color,\n size=1,\n data=ref.dt)+\n ggplot2::scale_y_log10(\"\")+\n ggplot2::scale_x_log10()\nif(requireNamespace(\"directlabels\")){\n gg+\n directlabels::geom_dl(ggplot2::aes(\n N, reference, label=fun.name),\n data=ref.dt,\n color=ref.color,\n method=\"bottom.polygons\")\n}else{\n gg\n}\n\n\nvalue.var <- c(\"sum.iterations\")\nunit.col.vec <- paste0(value.var,\"_median\")\ntotal.wide <- dcast(\n total.dt[maxIterations==\"min.aum\"],\n N + init.name ~ .,\n value.var=value.var,\n fun.aggregate = list(median, min, max, length)\n)[, expr.name := init.name]\nL <- list(measurements=total.wide)\nmy_funs <- list(\n \"N^2\"=function(N)2*log10(N))\nbest <- atime::references_best(L, unit.col.vec=unit.col.vec, fun.list=my_funs)\naddUnit <- function(DT)DT[, Unit := sub(\"_median\", \"\", unit)]\nmeas <- addUnit(best[[\"measurements\"]])\nref.dt <- addUnit(best[[\"references\"]])\nref.color <- \"violet\"\nemp.color <- \"black\"\nribbon.dt <- nc::capture_melt_multiple(total.wide, Unit=\".*\", \"_\", column=\"min|max\")\ngg <- ggplot2::ggplot()+\n ggplot2::facet_grid(Unit ~ expr.name, scales=\"free\")+\n ggplot2::theme_bw()+\n ggplot2::geom_ribbon(ggplot2::aes(\n N, ymin=min, ymax=max),\n data=ribbon.dt,\n fill=emp.color,\n alpha=0.5)+\n ggplot2::geom_line(ggplot2::aes(\n N, empirical),\n size=2,\n color=emp.color,\n data=meas)+\n ggplot2::geom_line(ggplot2::aes(\n N, reference, group=fun.name),\n color=ref.color,\n size=1,\n data=ref.dt)+\n ggplot2::scale_y_log10(\"\")+\n ggplot2::scale_x_log10()\ndl <- if(requireNamespace(\"directlabels\")){\n gg+\n directlabels::geom_dl(ggplot2::aes(\n N, reference, label=fun.name),\n data=ref.dt,\n color=ref.color,\n method=\"bottom.polygons\")\n}else{\n gg\n}\npng('figure-line-search-complexity.png', width=8, height=4, units=\"in\", res=200)\nprint(dl)\ndev.off()\n"
},
{
"alpha_fraction": 0.5438708662986755,
"alphanum_fraction": 0.5525012016296387,
"avg_line_length": 34.54545593261719,
"blob_id": "09b5bf7a2eebec888698eb116b11d5d97ea6ae59",
"content_id": "92f28bde40985fb276357c15ca1b4f041c3696be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 6257,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 176,
"path": "/figure-sonar-comparisons-data.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "OneSeed <- function(seed){\n library(data.table)\n seed.csv <- sprintf(\"figure-sonar-comparisons-data-seed%d.csv\", seed)\n append <- FALSE\n data(package=\"mlbench\")\n data(Sonar, package=\"mlbench\")\n data(DNA, package=\"mlbench\")\n data.list <- list(\n Sonar=list(\n input.mat=as.matrix(Sonar[,1:60]),\n output.vec=ifelse(Sonar$Class==\"R\", 1, -1)),\n DNA=list(\n input.mat=ifelse(as.matrix(DNA[,1:180])==0, 0, 1),\n output.vec=ifelse(DNA$Class==\"n\", -1, 1)))\n data.name <- \"Sonar\"\n input.output.list <- data.list[[data.name]]\n input.mat <- input.output.list[[\"input.mat\"]]\n full.input.mat <- scale(input.mat)\n full.output.vec <- input.output.list[[\"output.vec\"]]\n stopifnot(full.output.vec %in% c(-1, 1))\n unique.sets <- c(\"subtrain\", \"validation\", \"test\")\n PairsDT <- function(output.vec){\n is.positive <- output.vec == 1\n data.table(expand.grid(\n positive=which(is.positive),\n negative=which(!is.positive)))\n }\n equal.class.weights <- function(output.vec){\n otab <- table(output.vec)\n as.numeric(1/otab[paste(output.vec)])\n }\n Logistic <- function(pred.vec, output.vec, obs.weights){\n list(\n gradient=-obs.weights*output.vec/(1+exp(output.vec*pred.vec)),\n loss=sum(obs.weights*log(1+exp(-output.vec*pred.vec))))\n }\n AUM <- function(pred.vec, diff.dt){\n L <- aum::aum(diff.dt, pred.vec)\n d <- L$derivative_mat\n non.diff <- abs(d[,1] - d[,2]) > 1e-6\n if(any(non.diff)){\n cat(sprintf(\"%d non-diff points\\n\", sum(non.diff)))\n print(d[non.diff, ])\n }\n with(L, list(gradient=rowMeans(derivative_mat), loss=aum))\n }\n loss.list <- list(\n logistic=function(pred.vec, output.vec, ...){\n Logistic(pred.vec, output.vec, 1/length(pred.vec))\n },\n logistic.weighted=\n function(pred.vec, output.vec,\n obs.weights=subtrain.obs.weights, ...){\n Logistic(pred.vec, output.vec, obs.weights)\n },\n aum.count=function(pred.vec, diff.count.dt, ...){\n AUM(pred.vec, diff.count.dt)\n },\n aum.rate=function(pred.vec, diff.rate.dt, ...){\n AUM(pred.vec, diff.rate.dt)\n },\n squared.hinge.all.pairs=function(pred.vec, pairs.dt, margin=1, ...){\n pairs.dt[, diff := pred.vec[positive]-pred.vec[negative]-margin]\n pairs.dt[, diff.clipped := ifelse(diff<0, diff, 0)]\n pairs.tall <- data.table::melt(\n pairs.dt,\n measure.vars=c(\"positive\", \"negative\"),\n value.name=\"pred.i\",\n variable.name=\"label\")\n ## d/dx (x - y - m)^2 = x - y - m\n ## d/dy (x - y - m)^2 = -(x - y - m)\n pairs.tall[, grad.sign := ifelse(label==\"positive\", 1, -1)]\n N.pairs <- nrow(pairs.dt)\n grad.dt <- pairs.tall[, .(\n gradient=sum(grad.sign*diff.clipped)\n ), keyby=pred.i]\n list(\n gradient=grad.dt$gradient/N.pairs,\n loss=sum(pairs.dt$diff.clipped^2)/N.pairs)\n }\n )\n seed.dt.list <- list()\n set.seed(seed)\n set.vec <- sample(rep(unique.sets, l=length(full.output.vec)))\n set.data.list <- list()\n for(set.name in unique.sets){\n is.set <- set.vec == set.name\n output.vec <- full.output.vec[is.set]\n set.data.list[[set.name]] <- list(\n output.vec=output.vec,\n obs.weights=equal.class.weights(output.vec),\n input.mat=full.input.mat[is.set,],\n diff.rate.dt=aum::aum_diffs_binary(output.vec, denominator=\"rate\"),\n diff.count.dt=aum::aum_diffs_binary(output.vec, denominator=\"count\"),\n pairs.dt=PairsDT(output.vec))\n }\n X.mat <- set.data.list$subtrain$input.mat\n for(loss.name in names(loss.list)){\n loss.grad.fun <- loss.list[[loss.name]]\n for(step.size in 10^seq(-2,4,by=0.5)){\n cat(sprintf(\"seed=%d loss=%s step.size=%f\\n\", seed, loss.name, step.size))\n set.seed(1)\n weight.vec <- last.w <- rnorm(ncol(X.mat))\n done <- FALSE\n iteration <- 0\n while(!done){\n iteration <- iteration+1\n loss.for.weight <- function(w, set.data=set.data.list$subtrain){\n pred <- set.data$input.mat %*% w\n set.data$pred.vec <- pred\n out <- do.call(loss.grad.fun, set.data)\n out$pred <- pred\n out\n }\n loss.before.step <- loss.for.weight(weight.vec)\n direction <- -t(X.mat) %*% loss.before.step[[\"gradient\"]]\n loss.for.step <- function(step.size){\n new.weight <- weight.vec + step.size * direction\n out <- loss.for.weight(new.weight)\n out$new.weight <- new.weight\n out$step.size <- step.size\n out\n }\n loss.after.step <- loss.for.step(step.size)\n weight.vec <- loss.after.step[[\"new.weight\"]]\n diff.w <- sum(abs(weight.vec-last.w))\n last.w <- weight.vec\n diverged <- (!is.finite(diff.w)) ||\n any(!is.finite(loss.after.step[[\"pred\"]]))\n if(!diverged){\n for(set.name in names(set.data.list)){\n set.data <- set.data.list[[set.name]]\n set.loss <- loss.for.weight(weight.vec, set.data)\n roc.df <- WeightedROC::WeightedROC(\n set.loss[[\"pred\"]],\n set.data[[\"output.vec\"]])\n auc <- WeightedROC::WeightedAUC(roc.df)\n out.dt <- data.table(\n seed,\n loss.name,\n step.size,\n iteration,\n set.name,\n auc,\n loss.value=set.loss$loss)\n for(aum.type in c(\"count\", \"rate\")){\n diff.name <- paste0(\"diff.\", aum.type, \".dt\")\n out.dt[[paste0(\"aum.\", aum.type)]] <- if(\n all(is.finite(set.loss[[\"pred\"]]))\n ){\n aum.list <- aum::aum(set.data[[diff.name]], set.loss[[\"pred\"]])\n aum.list[[\"aum\"]]\n }else{\n NA\n }\n }\n data.table::fwrite(\n out.dt,\n seed.csv,\n append=append)\n append <- TRUE\n }#for(set.name\n }#if(!diverged\n if(10000 < iteration || diverged || diff.w < 1e-6){\n done <- TRUE\n }\n }#while(!done\n }#for(step.size\n }#for(loss.name\n do.call(rbind, seed.dt.list)\n}\n\nLAPPLY <- future.apply::future_lapply\n##LAPPLY <- lapply\nfuture::plan(\"multisession\")\nout.loss.list <- LAPPLY(1:10, OneSeed)\n\n"
},
{
"alpha_fraction": 0.49080830812454224,
"alphanum_fraction": 0.4937552511692047,
"avg_line_length": 44.97419357299805,
"blob_id": "7546ed322fc2ac26e4820e733c2121440129ab41",
"content_id": "135ca3666648bba53876268446f23b9fcf0ed0ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 28504,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 620,
"path": "/figure-test-comparison-data.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "#Set the neuroblastoma dataset here\nnb.data.dir <- file.path(\"neuroblastoma-data/data\")\n\ndata.dir.vec <- c(file.path(\"ATAC_JV_adipose/cv/equal_labels/testFolds/4\"),\n file.path(\"H3K27ac-H3K4me3_TDHAM_BP/cv/equal_labels/testFolds/2\"),\n file.path(\"systematic/cv/R-3.6.0-profileSize/testFolds/1\"),\n file.path(\"H3K4me3_XJ_immune/cv/equal_labels/testFolds/2\"),\n file.path(\"H3K4me3_XJ_immune/cv/equal_labels/testFolds/4\"))\n\ntestFold.vec <- sapply(data.dir.vec, function(x){file.path(nb.data.dir, x)})\n\n#This computation can take a long time. It is recommended to run the OneFold function\n#on each element of testFold.vec in par\n\nOneFold <- function(testFold.path)\n{\n #############################################################################\n ############################# Initializations ###############################\n #############################################################################\n \n #needed libraries\n library(data.table)\n library(ggplot2)\n library(dplyr)\n library(future.apply)\n library(directlabels)\n \n #Filename and directory initializations\n cv.path <- dirname(dirname(testFold.path))\n folds.csv <- file.path(cv.path, \"folds.csv\")\n cv.type <- basename(cv.path)\n test.fold <- basename(testFold.path)\n data.dir <- dirname(dirname(cv.path))\n data.name <- basename(data.dir)\n data.list <- list()\n \n #Initialize a generic plot title for later use\n plot.title <- paste0(\"Data Name = \", data.name, \", cv.type = \", cv.type, \",\\n test.fold = \", test.fold)\n \n #Read the \"inputs.csv\", \"outputs.csv\", and \"evaluations.csv\", for each data \n for(f in c(\"inputs\", \"outputs\", \"evaluation\")){\n f.csv.xz <- file.path(data.dir, paste0(f, \".csv.xz\"))\n if(file.exists(f.csv.xz)){\n system(paste(\"xz -dk\", f.csv.xz))\n }\n f.csv <- file.path(data.dir, paste0(f, \".csv\"))\n f.dt <- data.table::fread(f.csv)\n data.list[[f]] <- f.dt\n }\n \n \n ## replace positive fp/fn at end with 0 to avoid AUM=Inf.\n data.list[[\"evaluation\"]][min.log.lambda==-Inf & 0<fn, fn := 0]\n data.list[[\"evaluation\"]][max.log.lambda==Inf & 0<fp, fp := 0]\n \n #random cross validation algorithm will randomly assign\n #subtrain/validation folds, where preset will use the test\n #folds already in place.\n cv.algos <- c(\"random\")\n cv.dt.list <- list()\n \n #############################################################################\n ############################# Model Fitting #################################\n #############################################################################\n \n for( curr.cv.algo in cv.algos )\n {\n ## read folds.csv for the specific data type\n folds.dt <- data.table::fread(folds.csv)\n \n #set test folds \n folds.dt[fold == test.fold, set := \"test\"]\n folds.dt[fold != test.fold, set := \"subtrain\"]\n \n #If the current cross validation algorithm is \"preset\",\n #use the test fold assignments already in place to\n #create subtrain/validation folds\n if( curr.cv.algo == \"preset\")\n {\n cv.folds <- unique(folds.dt[fold != test.fold]$fold)\n }\n \n #If the current cross validation algorithm is \"random\",\n #create my own randomized subtrain/validation folds.\n if( curr.cv.algo == \"random\")\n {\n #initialize my own validation folds\n set.seed(1)\n n.val.folds <- 4\n \n cv.folds <- 1:n.val.folds\n val.fold.assignments <- sample(rep(cv.folds, l = nrow(folds.dt[set == \"subtrain\"])))\n }\n \n #Loop through every validation folds in the total folds in\n #the cross validation algorithm\n for(val.fold in cv.folds)\n {\n \n #Adjust the folds with respect to the current validation fold\n #and the current cross validation algorithm\n if( curr.cv.algo == \"preset\")\n {\n folds.dt[set != \"test\",] <- folds.dt[set != \"test\",] %>%\n mutate(set = ifelse(fold == val.fold, \"validation\", \"subtrain\"))\n }\n if( curr.cv.algo == \"random\")\n {\n folds.dt[fold == test.fold, set := \"test\"]\n folds.dt[fold != test.fold, set := \"subtrain\"]\n folds.dt[set == \"subtrain\"]$set <- val.fold.assignments\n folds.dt[set != \"test\",] <- folds.dt[set != \"test\",] %>% \n mutate(set = ifelse(set == val.fold, \"validation\", \"subtrain\"))\n }\n \n seqs.train <- folds.dt[[\"sequenceID\"]]\n X.all <- scale(data.list$inputs[, -1])\n rownames(X.all) <- data.list$inputs$sequenceID\n X.finite <- X.all[, apply(is.finite(X.all), 2, all)]\n set.list <- list()\n for(s in unique(folds.dt$set)){\n set.list[[s]] <- rownames(X.finite) %in% folds.dt[s==set, sequenceID]\n }\n X.list <- lapply(set.list, function(i)X.finite[i, ])\n neg.t.X.subtrain <- -t(X.list[[\"subtrain\"]])\n y.train <- data.list[[\"outputs\"]][\n seqs.train,\n cbind(min.log.lambda, max.log.lambda),\n on=\"sequenceID\"]\n keep <- apply(is.finite(y.train), 1, any)\n X.train <- X.finite[seqs.train, ]\n init.fun.list <- list(\n IntervalRegressionCV=function(){\n fit <- penaltyLearning::IntervalRegressionCV(\n X.train[keep, ],\n y.train[keep, ]) \n fit[[\"param.mat\"]]\n }\n )\n \n #Set hyperparameters for the training algorithm\n n.seeds <- 4\n num.iterations <- 2\n \n #Fit model. Output is a data.table that includes information\n #relating to the AUC, AUM, and more with respect to every seed,\n #iteration, cross validation algorithm, current validation fold, and\n #fold type (subtrain, validation, test) and more.\n fit.model <- function(X.train, y.train, set.list, num.iterations, seed)\n {\n \n iteration.dt.list <- list()\n for (init.name in names(init.fun.list))\n {\n init.fun <- init.fun.list[[init.name]]\n set.seed(seed)\n int.weights <- init.fun()\n \n weight.vec <- int.weights[-1]\n intercept <- int.weights[1]\n computeAUM <- function(w, i, is.set) {\n pred.pen.vec <- (X.finite %*% w) + i\n pred.dt <- data.table(\n sequenceID = rownames(pred.pen.vec),\n pred.log.lambda = as.numeric(pred.pen.vec)\n )\n set.dt <- pred.dt[is.set]\n penaltyLearning::ROChange(data.list$evaluation, set.dt, \"sequenceID\")\n }\n for (iteration in 1:num.iterations) {\n summary.dt.list <- list()\n set.roc.list <- list()\n for (set in names(set.list)) {\n set.roc.list[[set]] <-\n computeAUM(weight.vec, intercept, set.list[[set]])\n summary.dt.list[[set]] <-\n with(set.roc.list[[set]],\n data.table(set,\n thresholds[threshold == \"predicted\"],\n auc,\n aum))\n }\n summary.dt <- do.call(rbind, summary.dt.list)\n iteration.dt.list[[paste(seed, init.name, iteration)]] <-\n data.table(seed = paste(seed), init.name, iteration, summary.dt)\n cat(\n sprintf(\n \"it=%d seed=%d init=%s cv.algo=%s\\n\",\n iteration,\n seed,\n init.name,\n curr.cv.algo\n )\n )\n g.dt <- set.roc.list[[\"subtrain\"]][[\"aum.grad\"]]\n ## If aum.grad has some problems with no changes in error then\n ## they may be missing.\n g.vec <- rep(0, ncol(neg.t.X.subtrain))\n names(g.vec) <- colnames(neg.t.X.subtrain)\n g.vec[g.dt[[\"sequenceID\"]]] <- g.dt[[\"lo\"]]\n is.differentiable <- all(g.dt[[\"lo\"]] == g.dt[[\"hi\"]])\n direction.vec <- neg.t.X.subtrain %*% g.vec\n take.step <- function(s) {\n weight.vec + s * direction.vec\n }\n \n #line search\n set.aum.list <- list()\n for (step.size in 10 ^ seq(-10, 0, by = 0.5)) {\n new.weight.vec <- take.step(step.size)\n for (set in \"subtrain\") {\n set.roc <- computeAUM(new.weight.vec, 0, set.list[[set]])\n set.aum.list[[paste(step.size, set)]] <-\n data.table(step.size,\n set,\n aum = set.roc$aum,\n intercept = set.roc$thresholds[threshold == \"min.error\", (max.thresh +\n min.thresh) / 2])\n }#line search\n }\n set.aum <- do.call(rbind, set.aum.list)\n best.dt <- set.aum[, .SD[min(aum) == aum], by = set]\n weight.vec <- take.step(best.dt[[\"step.size\"]])\n intercept <- best.dt[[\"intercept\"]]\n }#iteration\n }\n output.dt <- data.table(do.call(rbind, iteration.dt.list),\n data.name,\n cv.type,\n test.fold,\n is.differentiable)\n \n output.dt\n \n }\n \n for(curr.seed in 1:n.seeds)\n {\n #Create the data.tables used during the validation phase\n cv.dt.list[[paste(curr.seed, val.fold, curr.cv.algo)]] <- data.table( fit.model(X.train, \n y.train, \n set.list,\n num.iterations = num.iterations, \n seed = curr.seed),\n val.fold = paste(val.fold),\n cv.algo = curr.cv.algo)\n \n }#seed\n cat(sprintf(\"Validation fold %d complete\\n\", val.fold))\n }#validation fold\n \n }\n \n #Create the cross validation data.table and save it to a file\n cv.dt <- do.call(rbind, cv.dt.list)\n cv.csv <- file.path(testFold.path, \"linear-model-cv-aum.csv\")\n data.table::fwrite(cv.dt, cv.csv)\n \n #############################################################################\n ################################## Plotting #################################\n #############################################################################\n \n #Make the data.table of the mean aum/auc with respect to each seed, iteration,\n #data set, and cross validation algorithm\n mean.cv.dt <- cv.dt[, .(mean.aum=mean(aum), mean.auc=mean(auc)), by=.(seed, iteration, set, cv.algo)]\n \n #Make the data.table of the min/max aum/auc with respect to each seed, iteration,\n #data set, and cross validation algorithm, and include their corresponding iterations\n min.cv.dt <- cv.dt[, .(min.aum.iteration = which.min(aum), min.aum = min(aum),\n max.auc.iteration = which.max(auc), max.auc = max(auc)), \n by=.(seed, val.fold, set, cv.algo)]\n \n min.mean.cv.dt <- mean.cv.dt[, .(min.aum.iteration = which.min(mean.aum), min.aum = min(mean.aum),\n max.auc.iteration = which.max(mean.auc), max.auc = max(mean.auc)), \n by=.(seed, set, cv.algo) ]\n \n \n for(curr.cv.algo in cv.algos)\n {\n for(curr.set in unique(cv.dt$set))\n {\n #Create an auc line graph for every dataset, including a mean line graph\n out.png.path <- file.path(testFold.path, paste0(\"linear-model-precv-\", curr.set, \n \"-\", curr.cv.algo, \"-aum-line-graph.png\"))\n png(out.png.path)\n print(ggplot(data = cv.dt[set == curr.set & cv.algo == curr.cv.algo]) +\n geom_line(aes(x = iteration, y = aum, color = val.fold)) +\n geom_point(data = min.cv.dt[set == curr.set & cv.algo == curr.cv.algo], mapping = aes(x = min.aum.iteration, y = min.aum, color = val.fold)) +\n geom_line(data = mean.cv.dt[set == curr.set & cv.algo == curr.cv.algo], mapping = aes(x = iteration, y = mean.aum, color = \"mean\"), size = 1) +\n geom_point(data = min.mean.cv.dt[set == curr.set & cv.algo == curr.cv.algo], mapping = aes(x = min.aum.iteration, y = min.aum), size = 3) +\n facet_wrap(.~seed, labeller = label_both) +\n ggtitle(paste0(curr.set, \" AUM with for each validation fold\\n\", plot.title)))\n dev.off()\n \n \n #Create an auc line graph for every dataset, including a mean line graph\n out.png.path <- file.path(testFold.path, paste0(\"linear-model-precv-\", curr.set,\n \"-\", curr.cv.algo, \"-auc-line-graph.png\"))\n png(out.png.path)\n print(ggplot(data = cv.dt[set == curr.set & cv.algo == curr.cv.algo]) +\n geom_line(aes(x = iteration, y = auc, color = val.fold)) +\n geom_point(data = min.cv.dt[set == curr.set & cv.algo == curr.cv.algo], mapping = aes(x = max.auc.iteration, y = max.auc, color = val.fold)) +\n geom_line(data = mean.cv.dt[set == curr.set & cv.algo == curr.cv.algo], mapping = aes(x = iteration, y = mean.auc, color = \"mean\"), size = 1) +\n geom_point(data = min.mean.cv.dt[set == curr.set & cv.algo == curr.cv.algo], mapping = aes(x = max.auc.iteration, y = max.auc), size = 3) +\n facet_wrap(.~seed, labeller = label_both) +\n ggtitle(paste0(curr.set, \" AUC with for each validation fold\\n\", plot.title)))\n dev.off()\n }\n }\n \n \n #############################################################################\n ############################# Post CV Initialization ########################\n #############################################################################\n \n folds.dt[fold == test.fold, set := \"test\"]\n folds.dt[fold != test.fold, set := \"subtrain\"]\n iteration.dt.list <- list()\n \n seqs.train <- folds.dt[[\"sequenceID\"]]\n X.all <- scale(data.list$inputs[, -1])\n rownames(X.all) <- data.list$inputs$sequenceID\n X.finite <- X.all[, apply(is.finite(X.all), 2, all)]\n set.list <- list()\n for(s in unique(folds.dt$set)){\n set.list[[s]] <- rownames(X.finite) %in% folds.dt[s==set, sequenceID]\n }\n X.list <- lapply(set.list, function(i)X.finite[i, ])\n neg.t.X.subtrain <- -t(X.list[[\"subtrain\"]])\n y.train <- data.list[[\"outputs\"]][\n seqs.train,\n cbind(min.log.lambda, max.log.lambda),\n on=\"sequenceID\"]\n keep <- apply(is.finite(y.train), 1, any)\n X.train <- X.finite[seqs.train, ]\n init.fun.list <- list(\n IntervalRegressionCV=function(){\n fit <- penaltyLearning::IntervalRegressionCV(\n X.train[keep, ],\n y.train[keep, ]) \n fit[[\"param.mat\"]]\n }\n )\n \n #############################################################################\n ############################## Post CV Fitting ##############################\n #############################################################################\n \n post.cv.dt.list <- list()\n for(curr.seed in 1:n.seeds)\n {\n post.cv.dt.list[[paste(curr.seed)]] <- fit.model(X.train, \n y.train,\n set.list,\n num.iterations = num.iterations,\n seed = curr.seed)\n }#seed\n \n #Create post cross validation data table\n post.cv.dt <- do.call(rbind, post.cv.dt.list)\n post.cv.csv <- file.path(testFold.path, \"linear-model-post-cv-aum.csv\")\n data.table::fwrite(post.cv.dt, post.cv.csv)\n \n #Create data.table with best linear model results with respect to\n #the test set\n best.linear.dt <- post.cv.dt[set == \"test\", .(aum = min(aum),\n auc = max(auc),\n type = \"best.linear\",\n aum.iteration = which.min(aum),\n auc.iteration = which.max(auc)),\n by = .(seed)]\n \n #Create the selected iteration data.table for later use in \n #training on the whole training dataset\n selected.iter.dt <- min.mean.cv.dt[set == \"validation\"]\n \n #Create selected iteration data table lists with respect to picking\n #the minimum aum or the minimum auc.\n selected.aum.dt.list <- list()\n selected.auc.dt.list <- list()\n for(curr.cv.algo in cv.algos)\n {\n for(curr.seed in 1:n.seeds)\n {\n curr.dt <- post.cv.dt[seed == curr.seed & set == \"test\"]\n min.aum.iteration <- selected.iter.dt[seed == curr.seed & cv.algo == curr.cv.algo]$min.aum.iteration\n max.auc.iteration <- selected.iter.dt[seed == curr.seed & cv.algo == curr.cv.algo]$max.auc.iteration\n \n selected.aum.dt.list[[paste0(curr.seed, curr.cv.algo)]] <- data.table(aum = curr.dt$aum[min.aum.iteration],\n auc = curr.dt$auc[min.aum.iteration],\n type = paste0(\"selected.\", curr.cv.algo,\".aum\"),\n seed = curr.seed,\n aum.iteration = min.aum.iteration,\n auc.iteration = min.aum.iteration)\n \n selected.auc.dt.list[[paste0(curr.seed, curr.cv.algo)]] <- data.table(aum = curr.dt$aum[max.auc.iteration],\n auc = curr.dt$auc[max.auc.iteration],\n type = paste0(\"selected.\", curr.cv.algo, \".auc\"),\n seed = curr.seed,\n aum.iteration = max.auc.iteration,\n auc.iteration = max.auc.iteration)\n }\n }\n \n selected.aum.dt <- do.call(rbind, selected.aum.dt.list)\n selected.auc.dt <- do.call(rbind, selected.auc.dt.list)\n \n #Create the data.table containing the initial aum/auc for\n #the dataset\n initial.dt <-\n post.cv.dt[set == \"test\", .(aum = first(aum),\n auc = first(auc),\n type = \"initial\",\n aum.iteration = 1,\n auc.iteration = 1), \n by = seed]\n \n #Combine intitial, selected, and best.linear results for plotting purposes\n test.aum.dt <- rbind(best.linear.dt, selected.aum.dt, selected.auc.dt, initial.dt)\n \n \n #Create a dotplot with every cv algorithm,\n out.png.path <- file.path(testFold.path, \"linear-model-test-aum-comparison.png\")\n png(out.png.path)\n print(ggplot(data = test.aum.dt) +\n geom_point(aes(x = aum, y = type, color = seed)) +\n ggtitle(plot.title))\n \n dev.off()\n \n out.png.path <- file.path(testFold.path, \"linear-model-test-auc-comparison.png\")\n png(out.png.path)\n \n print(ggplot(data = test.aum.dt) +\n geom_point(aes(x = auc, y = type, color = seed)) +\n ggtitle(plot.title))\n \n dev.off()\n \n \n out.png.path <- file.path(testFold.path, \"linear-model-postcv-subtrain-aum-line-graph.png\")\n png(out.png.path)\n \n print(ggplot(post.cv.dt[set == \"subtrain\",]) +\n geom_line(aes(x = iteration, y = aum)) +\n facet_wrap(.~seed, labeller = label_both) +\n ggtitle(paste0(\"subtrain AUM for every seed after CV\"), plot.title))\n \n dev.off()\n \n \n out.png.path <- file.path(testFold.path, \"linear-model-postcv-subtrain-auc-line-graph.png\")\n png(out.png.path)\n \n print(ggplot(post.cv.dt[set == \"subtrain\",]) +\n geom_line(aes(x = iteration, y = auc)) +\n facet_wrap(.~seed, labeller = label_both) +\n ggtitle(paste0(\"subtrain AUC for every seed after CV\"), plot.title))\n \n dev.off()\n \n \n out.png.path <- file.path(testFold.path, \"linear-model-postcv-test-aum-line-graph.png\")\n png(out.png.path)\n \n print(ggplot(post.cv.dt[set == \"test\",]) +\n geom_line(aes(x = iteration, y = aum)) +\n geom_point(data = best.linear.dt, mapping = aes(x = aum.iteration, y = aum, color = \"best.linear\")) +\n # geom_point(data = selected.aum.dt[type == \"selected.preset.aum\",], mapping = aes(x = aum.iteration, y = aum, color = \"selected.preset.aum\")) +\n # geom_point(data = selected.auc.dt[type == \"selected.preset.auc\",], mapping = aes(x = aum.iteration, y = aum, color = \"selected.preset.auc\")) +\n geom_point(data = selected.aum.dt[type == \"selected.random.aum\",], mapping = aes(x = aum.iteration, y = aum, color = \"selected.random.aum\")) +\n geom_point(data = selected.auc.dt[type == \"selected.random.auc\",], mapping = aes(x = aum.iteration, y = aum, color = \"selected.random.auc\")) +\n facet_wrap(.~seed, labeller = label_both) +\n ggtitle(paste0(\"test AUM for every seed after CV\"), plot.title))\n \n dev.off()\n \n out.png.path <- file.path(testFold.path, \"linear-model-postcv-test-auc-line-graph.png\")\n png(out.png.path)\n \n print(ggplot(post.cv.dt[set == \"test\",]) +\n geom_line(aes(x = iteration, y = auc)) +\n geom_point(data = best.linear.dt, mapping = aes(x = aum.iteration, y = auc, color = \"best.linear\")) +\n # geom_point(data = selected.aum.dt[type == \"selected.preset.aum\",], mapping = aes(x = aum.iteration, y = auc, color = \"selected.preset.aum\")) +\n # geom_point(data = selected.auc.dt[type == \"selected.preset.auc\",], mapping = aes(x = aum.iteration, y = auc, color = \"selected.preset.auc\")) +\n geom_point(data = selected.aum.dt[type == \"selected.random.aum\",], mapping = aes(x = aum.iteration, y = auc, color = \"selected.random.aum\")) +\n geom_point(data = selected.auc.dt[type == \"selected.random.auc\",], mapping = aes(x = aum.iteration, y = auc, color = \"selected.random.auc\")) +\n facet_wrap(.~seed, labeller = label_both) +\n ggtitle(paste0(\"test AUM for every seed after CV\"), plot.title))\n \n dev.off()\n \n}\n\ntest.aum.dt.list <- list()\n\nfor(testFold.path in testFold.vec){\n OneFold(testFold.path)\n \n #Filename and directory initializations\n cv.path <- dirname(dirname(testFold.path))\n folds.csv <- file.path(cv.path, \"folds.csv\")\n cv.type <- basename(cv.path)\n test.fold <- basename(testFold.path)\n data.dir <- dirname(dirname(cv.path))\n inputs.csv <- data.table::fread(file.path(data.dir, \"inputs.csv\"))\n data.name <- basename(data.dir)\n data.list <- list()\n \n n.obs <- nrow(inputs.csv)\n p <- ncol(inputs.csv)\n \n #Initialize a generic plot title for later use\n plot.title <- paste0(\"Data Name = \", data.name, \", cv.type = \", cv.type, \", test.fold = \", test.fold)\n cv.algos <- c(\"random\")\n n.seeds <- 4\n \n if(!file.exists(file.path(testFold.path, \"linear-model-cv-aum.csv\")) ||\n !file.exists(file.path(testFold.path, \"linear-model-post-cv-aum.csv\")))\n {\n OneFold(testFold.path)\n }\n \n cv.dt <- data.table::fread(file.path(testFold.path, \"linear-model-cv-aum.csv\")) %>% \n mutate(seed = as.factor(seed))\n post.cv.dt <- data.table::fread(file.path(testFold.path, \"linear-model-post-cv-aum.csv\")) %>%\n mutate(seed = as.factor(seed))\n \n #Make the data.table of the mean aum/auc with respect to each seed, iteration,\n #data set, and cross validation algorithm\n mean.cv.dt <- cv.dt[, .(mean.aum=mean(aum), mean.auc=mean(auc)), by=.(seed, iteration, set, cv.algo)]\n \n #Make the data.table of the min/max aum/auc with respect to each seed, iteration,\n #data set, and cross validation algorithm, and include their corresponding iterations\n min.cv.dt <- cv.dt[, .(min.aum.iteration = which.min(aum), min.aum = min(aum),\n max.auc.iteration = which.max(auc), max.auc = max(auc)), \n by=.(seed, val.fold, set, cv.algo)]\n \n min.mean.cv.dt <- mean.cv.dt[, .(min.aum.iteration = which.min(mean.aum), min.aum = min(mean.aum),\n max.auc.iteration = which.max(mean.auc), max.auc = max(mean.auc)), \n by=.(seed, set, cv.algo) ]\n \n \n \n best.linear.dt <- post.cv.dt[set == \"test\", .(`Test AUM` = min(aum),\n `Test AUC` = max(auc),\n algorithm = \"best.linear\",\n aum.iteration = which.min(aum),\n auc.iteration = which.max(auc)),\n by = .(seed)]\n \n #Create the selected iteration data.table for later use in \n #training on the whole training dataset\n selected.iter.dt <- min.mean.cv.dt[set == \"validation\"]\n \n #Create selected iteration data table lists with respect to picking\n #the minimum aum or the minimum auc.\n selected.aum.dt.list <- list()\n for(curr.cv.algo in cv.algos)\n {\n for(curr.seed in 1:n.seeds)\n {\n curr.dt <- post.cv.dt[seed == curr.seed & set == \"test\"]\n min.aum.iteration <- selected.iter.dt[seed == curr.seed & cv.algo == curr.cv.algo]$min.aum.iteration\n max.auc.iteration <- selected.iter.dt[seed == curr.seed & cv.algo == curr.cv.algo]$max.auc.iteration\n \n for(aum.auc in c(\"aum\", \"auc\"))\n {\n if(aum.auc == \"aum\")\n {\n selected.aum.dt.list[[paste0(curr.seed, curr.cv.algo, aum.auc)]] <- data.table(`Test AUM` = curr.dt$aum[min.aum.iteration],\n `Test AUC` = curr.dt$auc[min.aum.iteration],\n algorithm = paste0(\"Min.Valid.AUM\"),\n seed = curr.seed,\n aum.iteration = min.aum.iteration,\n auc.iteration = min.aum.iteration)\n } else {\n selected.aum.dt.list[[paste0(curr.seed, curr.cv.algo, aum.auc)]] <- data.table(`Test AUM` = curr.dt$aum[max.auc.iteration],\n `Test AUC` = curr.dt$auc[max.auc.iteration],\n algorithm = paste0(\"Max.Valid.AUC\"),\n seed = curr.seed,\n aum.iteration = max.auc.iteration,\n auc.iteration = max.auc.iteration)\n }\n \n \n \n }\n }\n }\n \n selected.aum.dt <- do.call(rbind, selected.aum.dt.list)\n \n #Create the data.table containing the initial aum/auc for\n #the dataset\n initial.dt <-\n post.cv.dt[set == \"test\", .(`Test AUM` = first(aum),\n `Test AUC` = first(auc),\n algorithm = \"Initial\",\n aum.iteration = 1,\n auc.iteration = 1), \n by = seed]\n \n #Combine intitial, selected, and best.linear results for plotting purposes\n test.aum.dt <- rbind(selected.aum.dt, initial.dt)\n \n test.aum.dt.list[[testFold.path]] <- test.aum.dt %>% mutate(data.name = data.name, \n cv.type = cv.type,\n test.fold = test.fold,\n n = n.obs,\n p = p)\n}\n\n \n \n\ntest.aum.dt.combined <- do.call(rbind, test.aum.dt.list) %>%\n mutate(new.test.fold = paste0(\"Test Fold \", test.fold, \", n=\", n, \", p=\", p))\n\ndata.table::fwrite(test.aum.dt.combined, \"figure-test-comparison.csv\")\n"
},
{
"alpha_fraction": 0.6331300735473633,
"alphanum_fraction": 0.6544715166091919,
"avg_line_length": 27.91176414489746,
"blob_id": "6952f6d9b2f36b26ae69935181c79b16103c0da7",
"content_id": "3306ca0614b8998b835e8374ebabbb95b9b6e9d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 984,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 34,
"path": "/figure-test-comparison.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(dplyr)\nlibrary(ggplot2)\nlibrary(data.table)\n\n\nif(!file.exists(\"figure-test-comparison.csv\"))\n{\n source(\"figure-test-comparison-data.R\")\n}\n\ntest.aum.dt.combined <- data.table::fread(\"figure-test-comparison.csv\")\n\n\npng(\"figure-test-auc-comparison.png\", width = 26, height = 3, res = 200, units = \"in\")\n\n\nggplot(data = test.aum.dt.combined) +\n geom_point(aes(x = `Test AUC`, y = algorithm), size = 5) +\n #ggtitle(c(data.name, cv.type, test.fold)) +\n facet_grid(.~data.name + new.test.fold, scales = \"free\") +\n theme(panel.spacing=grid::unit(1, \"cm\"), text = element_text(size=25))\n\ndev.off()\n\npng(\"figure-test-aum-comparison.png\", width = 26, height = 3, res = 200, units = \"in\")\n\n\nggplot(data = test.aum.dt.combined) +\n geom_point(aes(x = `Test AUM`, y = algorithm), size = 5) +\n #ggtitle(c(data.name, cv.type, test.fold)) +\n facet_grid(.~data.name + new.test.fold, scales = \"free\") +\n theme(panel.spacing=grid::unit(1, \"cm\"), text = element_text(size=25))\n\ndev.off()\n\n"
},
{
"alpha_fraction": 0.6175316572189331,
"alphanum_fraction": 0.6310759782791138,
"avg_line_length": 30.345237731933594,
"blob_id": "6bb38cece053bdeebb8e450bacee768d2ce8313d",
"content_id": "7c447683517ae18fe4497ae4bf67c41dffa1fd51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 15800,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 504,
"path": "/figure-line-search-example.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(ggplot2)\nlibrary(data.table)\ndata(neuroblastomaProcessed, package=\"penaltyLearning\")\ndata(neuroblastoma, package=\"neuroblastoma\")\nex <- function(label, profile.id, chromosome){\n data.table(\n label, \n profile.id=factor(profile.id), \n chromosome=factor(chromosome))\n}\nselect.dt <- rbind(\n ex(\"pos\", 4, 2),\n ex(\"neg\", 513, 3))\nnb.list <- lapply(neuroblastoma, data.table)\nnb.some <- lapply(nb.list, \"[\", select.dt, on=.NATURAL)\nmax.segments <- max(neuroblastomaProcessed$errors$n.segments)\nnb.segs <- nb.some$profiles[, {\n cum.vec <- cumsum(c(0, logratio))\n d <- diff(position)/2\n between <- position[-1]-d\n data.start.pos <- c(position[1]-d[1], between)\n data.end.pos <- c(between, position[.N]+d[.N-1])\n fit <- jointseg::Fpsn(logratio, max.segments)\n end.t <- t(fit$t.est)\n end.dt <- data.table(\n end=as.integer(end.t),\n segments=as.integer(col(end.t))\n )[!is.na(end)]\n end.dt[, start := c(0, end[-.N])+1, by=segments]\n end.dt[, mean := (cum.vec[end+1]-cum.vec[start])/(end-start+1)]\n end.dt[, `:=`(\n start.pos=data.start.pos[start],\n end.pos=data.end.pos[end]\n )]\n}, by=label]\nsome.err <- neuroblastomaProcessed$errors[select.dt, .(\n profile.id, chromosome,\n segments=n.segments,\n fp, fn, possible.fp, possible.fn,\n min.log.lambda=-max.log.lambda,\n max.log.lambda=-min.log.lambda,\n min.lambda,\n errors, labels,\n label\n), on=list(profile.id, chromosome)]\nerr.sizes <- c(\n \"min(FP,FN)\"=2,\n FP=6,\n FN=4)\nerr.colors <- c(\n correct=\"transparent\",\n \"min(FP,FN)\"=\"black\",\n FP=\"red\",\n FN=\"deepskyblue\")\nsome.err.tall <- melt(\n some.err,\n measure.vars=c(\"fp\",\"fn\"),\n variable.name=\"var.lower\")\nsome.err.tall[, error.type := toupper(var.lower)]\nleg <- \"Error type\"\ndmin <- 2.3\ndmax <- 3\nsome.err[, fp.diff := c(NA, diff(fp)), by=label]\nsome.err[, fn.diff := c(NA, diff(fn)), by=label]\nsome.diff <- some.err[fp.diff != 0 | fn.diff != 0, .(\n id=1, label, fp.diff, fn.diff, pred.log.lambda=min.log.lambda)]\nsome.diff[, fp.cum := cumsum(fp.diff), by=label]\nsome.diff[, fn.cum := rev(cumsum(rev(-fn.diff))), by=label]\ndlist <- split(some.diff, some.diff[[\"label\"]])\nborder.pred <- with(dlist, pos[ #orange dots\n neg,\n data.table(\n pos=pred.log.lambda,\n neg=i.pred.log.lambda),\n on=\"id\",\n allow.cartesian=TRUE]\n)[, diff := neg-pos]\nneg.seq <- seq(dmin, dmax, by=0.025)\ngrid.pred <- CJ(\n pos=-neg.seq, \n neg=neg.seq\n)[, diff := neg-pos]\nrange(grid.pred[, neg-pos])\ngrid.uniq.diff <- grid.pred[, .(\n pos=0,\n neg=unique(diff)\n)][, diff := neg-pos]\nboth.pred <- rbind(\n data.table(differentiable=FALSE, border.pred), \n data.table(differentiable=TRUE, grid.uniq.diff)\n)\npred.tall <- melt(\n both.pred,\n id.vars=c(\"diff\",\"differentiable\"),\n measure.vars=c(\"neg\",\"pos\"),\n variable.name=\"label\",\n value.name=\"pred.log.lambda\"\n)[select.dt, nomatch=0L, on=\"label\"]\nmetrics.wide <- pred.tall[, {\n L <- penaltyLearning::ROChange(some.err, .SD, \"label\")\n pos <- pred.log.lambda[label==\"pos\"]\n with(L, data.table(\n aum, auc,\n SM=roc[min.thresh < max.thresh, sum(min.fp.fn)],\n roc=list(roc[, `:=`(\n min.thresh=min.thresh+pos,\n max.thresh=max.thresh+pos\n )])\n ))\n}, keyby=list(diff, differentiable)]\nmyjoin <- function(d.val, dt.pred){\n metrics.wide[differentiable==d.val][dt.pred, on=\"diff\"]\n}\nboth.roc <- rbind(\n myjoin(TRUE, grid.pred),\n myjoin(FALSE, border.pred))\npred.list <- list(\n after.two=c(neg=2.8, pos=-2.75))\ndiff.grid.list <- list()\nls.points.list <- list()\nls.segs.list <- list()\nabline.dt.list <- list()\nvline.dt.list <- list()\npred.points.list <- list()\nheat.step.list <- list()\nfor(pred.name in names(pred.list)){\n one.pred <- pred.list[[pred.name]]\n one.pred.diff <- one.pred[[\"neg\"]]-one.pred[[\"pos\"]]\n some.err[, example := label]\n diff.dt <- aum::aum_diffs_penalty(some.err, names(one.pred))\n ls.list <- aum::aum_line_search(\n diff.dt, pred.vec=one.pred, maxIterations = 10)\n ##compute slope and intercept of each of the 6 T_b(s) functions, plot\n ##them using geom_abline, and geom_point to represent the 9\n ##intersection points.\n some.diff[, `:=`(\n slope=ifelse(label==\"pos\", 0, -1),\n intercept=pred.log.lambda-ifelse(label==\"pos\", 0, 6.5))]\n denom <- sum(ls.list$gradient_pred*c(1,-1))\n ToStep <- function(d){\n ifelse(d==0, 0, if(denom==0)NA else d/denom)\n }\n metrics.tall <- melt(\n both.roc,\n measure.vars=c(\"aum\", \"auc\"),\n variable.name=\"var.lower\"\n )[order(-differentiable)][, pred.diff := neg-pos][\n , step.size := ToStep(one.pred.diff-pred.diff)\n ][!is.na(step.size)]\n metrics.tall[, variable := toupper(var.lower)]\n metrics.tall[\n , norm := (value-min(value))/(max(value)-min(value)), by=variable]\n max.step <- ls.list$line_search_result[, (3*step.size[.N]-step.size[.N-1])/2]\n heat.step.list[[pred.name]] <- \n data.table(t(sapply(c(\n ls.list$line_search_result$step.size, max.step\n ), function(s){\n one.pred-ls.list$gradient*s\n })), pred.name)\n if(length(max.step)==0)max.step <- Inf\n ls.segs.list[[pred.name]] <- data.table(\n pred.name, rbind(\n ls.list$line_search_result[, .(\n variable=\"AUC\", \n step.min=step.size, step.max=c(step.size[-1], max.step), \n value.min=auc.after, value.max=auc.after)],\n ls.list$line_search_result[, .(\n variable=\"AUM\", \n step.min=step.size, step.max=c(step.size[-1], max.step), \n value.min=aum, value.max=c(\n aum[-1], \n if(max.step==Inf)aum else (max.step-step.size[.N])*aum.slope.after[.N]+aum[.N]))]))\n ls.points.list[[pred.name]] <- melt(\n data.table(pred.name, ls.list$line_search_result),\n measure=c(\"aum\",\"auc\"),\n variable.name=\"var.lower\"\n )[, variable := toupper(var.lower)]\n diff.grid.list[[pred.name]] <- unique(metrics.tall[, .(\n pred.name, pred.diff, step.size, differentiable, variable, value\n )])\n abline.dt.list[[pred.name]] <- data.table(\n pred.name, variable=\"threshold\", search=\"exact\", ls.list$line_search_input)\n vline.dt.list[[pred.name]] <- unique(metrics.tall[\n differentiable==FALSE & variable==\"AUC\", \n .(pred.name, step.size)])\n pred.points.list[[pred.name]] <- data.table(\n pred.name, t(one.pred))\n}\nheat.step <- rbindlist(heat.step.list)\npred.points <- rbindlist(pred.points.list)\ndiff.grid <- rbindlist(diff.grid.list)\nls.points <- rbindlist(ls.points.list)\nls.segs <- rbindlist(ls.segs.list)\nabline.dt <- rbindlist(abline.dt.list)\nvline.dt <- rbindlist(vline.dt.list)\n\nggplot()+\n geom_vline(aes(\n xintercept=step.size),\n data=vline.dt,\n color=\"grey\")+\n theme_bw()+\n theme(panel.spacing=grid::unit(1, \"lines\"))+\n geom_abline(aes(\n slope=slope, intercept=intercept, color=search),\n data=abline.dt)+\n geom_point(aes(\n 0, intercept, color=search),\n data=abline.dt)+\n facet_grid(variable ~ ., scales=\"free_y\")+\n scale_fill_manual(values=c(\n \"TRUE\"=\"black\",\n \"FALSE\"=\"orange\"))+\n scale_color_manual(values=c(\n exact=\"red\",\n grid=\"black\"))+\n geom_point(aes(\n step.size, value, fill=differentiable, color=search),\n size=3,\n shape=21,\n data=data.table(search=\"grid\", diff.grid))+\n geom_point(aes(\n step.size, value, color=search),\n data=data.table(search=\"exact\", ls.points))+\n geom_segment(aes(\n step.min, value.min,\n color=search,\n xend=step.max, yend=value.max),\n data=data.table(search=\"exact\", ls.segs))+\n xlab(\"Step size\")+\n ##scale_y_continuous(\"\", breaks=seq(0, 3, by=1))\n scale_y_continuous(\"\")\n\npoint.size <- 1\ngg <- ggplot()+\n ggtitle(\"Many step sizes considered in grid search\")+\n theme_bw()+\n theme(panel.spacing=grid::unit(1, \"lines\"))+\n facet_grid(variable ~ ., scales=\"free\")+\n scale_fill_manual(values=c(\n \"TRUE\"=\"black\",\n \"FALSE\"=\"orange\"))+\n scale_color_manual(values=c(\n exact=\"red\",\n grid=\"black\"))+\n geom_point(aes(\n step.size, value, fill=differentiable),\n size=point.size,\n shape=21,\n data=data.table(search=\"grid\", diff.grid))+\n scale_y_continuous(\"\")+\n scale_x_continuous(\"Step size\", breaks=seq(-1, 1, by=0.1))\npng(\n \"figure-line-search-example-grid.png\",\n width=4.9, height=3, units=\"in\", res=300)\nprint(gg)\ndev.off()\ndiff.grid.some <- diff.grid[\n differentiable==TRUE\n][, .SD[seq(1,.N,by=3)], by=variable]\nZERO <- 1e-3\ndiff.grid.some <- diff.grid[\n abs(round(step.size,digits=1)-step.size)<ZERO\n & step.size>ZERO]\ngg <- ggplot()+\n ggtitle(\"Four steps\")+\n theme_bw()+\n theme(\n legend.position=\"none\",\n panel.margin=grid::unit(1, \"lines\"))+\n facet_grid(variable ~ ., scales=\"free\")+\n scale_fill_manual(values=c(\n \"TRUE\"=\"black\",\n \"FALSE\"=\"orange\"))+\n scale_color_manual(values=c(\n exact=\"red\",\n grid=\"black\"))+\n geom_blank(aes(\n step.size, value),\n data=diff.grid[, .(step.size=0.1, value=range(value)), by=variable])+\n geom_point(aes(\n step.size, value, fill=differentiable),\n size=point.size,\n shape=21,\n data=data.table(search=\"grid\", diff.grid.some))+\n scale_y_continuous(\"\")+\n scale_x_continuous(\"Step size\", breaks=seq(-1, 1, by=0.1))\npng(\n \"figure-line-search-example-some.png\",\n width=1.5, height=3, units=\"in\", res=300)\nprint(gg)\ndev.off()\n\nit.name.vec <- c(\n \"5\"=\"first min\",\n \"6\"=\"linear\",\n \"8\"=\"quadratic\")\nit.name.dt <- data.table(\n iteration.i=as.integer(names(it.name.vec)),\n maxIterations.name=it.name.vec)\nframe.list <- list()\nprev.intersection.list <- list(data.table(\n iteration.i=1, this.next.step=0, this.next.thresh=1.1))\nfor(iteration.i in 1:nrow(ls.list$line_search_result)){\n offset <- if(iteration.i==8)1000 else 0.015\n current.vline <- ls.list$line_search_result[iteration.i][, `:=`(\n step.after=step.size+offset,\n aum.after=aum+offset*aum.slope.after\n )]\n current.intersections <- data.table(abline.dt[, `:=`(\n this.intercept=intercept+slope*current.vline$step.size\n )], key=c(\"this.intercept\",\"slope\"))[, `:=`(\n next.slope=c(slope[-1],NA),\n next.intercept=c(intercept[-1],NA)\n )][, `:=`(\n this.next.step=(intercept-next.intercept)/(next.slope-slope)\n )][, `:=`(\n this.next.thresh=this.next.step*slope+intercept\n )][is.finite(this.next.step) & current.vline$step.size < this.next.step][]\n current.segs <- ls.segs[\n , search := \"exact\"][step.max <= current.vline$step.size]\n current.points <- ls.points[step.size <= current.vline$step.size]\n seg.size <- 1\n after.linetype <- \"solid\"\n diff.colors <- c(\n \"TRUE\"=\"black\",\n \"FALSE\"=\"orange\")\n search.colors <- c(\n exact=\"red\",\n grid=\"black\")\n gg <- ggplot()+\n geom_vline(aes(\n xintercept=step.size),\n data=vline.dt,\n color=\"grey\")+\n geom_rect(aes(\n ymin=-Inf, ymax=Inf,\n xmin=0, \n xmax=step.size,\n color=search),\n alpha=0.3,\n data=data.table(search=\"exact\",current.vline))+\n theme_bw()+\n theme(panel.margin=grid::unit(0.5, \"lines\"))+\n geom_abline(aes(\n slope=slope, intercept=intercept, color=search),\n data=abline.dt)+\n geom_blank(aes(\n 0, intercept, color=search),\n data=abline.dt)+\n geom_point(aes(\n this.next.step, this.next.thresh, color=search),\n data=current.intersections)+\n facet_grid(variable ~ ., scales=\"free_y\")+\n scale_fill_manual(values=diff.colors)+\n scale_color_manual(values=search.colors)+\n geom_point(aes(\n step.size, value, fill=differentiable, color=search),\n size=3,\n shape=21,\n data=data.table(search=\"grid\", diff.grid))+\n geom_point(aes(\n step.size, value, color=search),\n data=data.table(search=\"exact\", current.points))+\n geom_segment(aes(\n step.min, value.min,\n color=search,\n xend=step.max, yend=value.max),\n linewidth=seg.size,\n data=current.segs)+\n geom_segment(aes(\n step.size, aum,\n color=search,\n xend=step.after, yend=aum.after),\n linetype=after.linetype,\n linewidth=seg.size,\n data=data.table(search=\"exact\",variable=\"AUM\",current.vline))+\n geom_segment(aes(\n step.size, auc.after,\n color=search,\n xend=step.after, yend=auc.after),\n linetype=after.linetype,\n linewidth=seg.size,\n data=data.table(search=\"exact\",variable=\"AUC\",current.vline))+\n xlab(\"Step size\")+\n scale_y_continuous(\"\")\n png(\n sprintf(\"figure-line-search-example-%d.png\", iteration.i),\n width=6, height=4.7, units=\"in\", res=300)\n lwd <- 2\n layout(rbind(1, 2, 3, 3, 3, 3))\n left.lines <- 4.5\n other.lines <- 1\n ax.label.offset <- 1.5\n par(\n mar=c(0,left.lines,other.lines,other.lines),\n cex=1.2)\n is.before <- it.name.dt$iteration.i <= iteration.i\n it.name.some <- it.name.dt[is.before]\n it.name.vlines <- ls.list$line_search_result[it.name.some$iteration.i]\n draw.rect <- function(){\n abline(\n v=it.name.vlines$step.size,\n lwd=5,\n col=\"#999999\")\n current.vline[, rect( \n 0, -1000, if(iteration.i==8)1000 else step.size, 1000, \n col=\"#00000033\",\n border=search.colors[[\"exact\"]])]\n }\n diff.grid[variable==\"AUC\", plot(\n step.size, value, type=\"n\",\n ylab=\"AUC\",\n xaxt=\"n\",\n las=1)]\n draw.rect()\n grid.cex <- 0.5\n diff.grid[variable==\"AUC\", points(\n step.size, value, pch=21, cex=grid.cex,\n bg=diff.colors[paste(differentiable)])]\n current.segs[variable==\"AUC\", segments(\n step.min, value.min,\n step.max, value.max,\n lwd=lwd,\n col=search.colors[[\"exact\"]])]\n current.pch <- 21\n current.points[variable==\"AUC\", points(\n step.size, value,\n pch=current.pch,\n col=search.colors[[\"exact\"]])]\n current.vline[, segments(\n step.size, auc.after,\n step.after, auc.after,\n lwd=lwd,\n col=search.colors[[\"exact\"]])]\n par(mar=c(0,left.lines,other.lines,other.lines))\n diff.grid[variable==\"AUM\", plot(\n step.size, value, type=\"n\",\n ylab=\"AUM\",\n xaxt=\"n\",\n las=1)]\n draw.rect()\n diff.grid[variable==\"AUM\", points(\n step.size, value, pch=21, cex=grid.cex,\n bg=diff.colors[paste(differentiable)])]\n current.segs[variable==\"AUM\", segments(\n step.min, value.min,\n step.max, value.max,\n lwd=lwd,\n col=search.colors[[\"exact\"]])]\n current.points[variable==\"AUM\", points(\n step.size, value,\n pch=current.pch,\n col=search.colors[[\"exact\"]])]\n current.vline[, segments(\n step.size, aum,\n step.after, aum.after,\n lwd=lwd,\n col=search.colors[[\"exact\"]])]\n bottom.lines <- 4.5\n par(mar=c(bottom.lines,left.lines,other.lines,other.lines))\n plot(\n range(diff.grid$step.size),\n range(abline.dt$intercept),\n type=\"n\", las=1,\n xlab=\"\",\n ylab=\"Threshold\")\n mtext(\"Step size\", side=1, line=2, cex=par(\"cex\"))\n draw.rect()\n ##abline.dt[, points(rep(0, .N), intercept)]\n abline.dt[, abline(\n intercept, slope, col=search.colors[[\"exact\"]],\n lwd=lwd\n ), by=intercept]\n current.intersections[, points(\n this.next.step, this.next.thresh,\n col=search.colors[[\"exact\"]])]\n rbindlist(prev.intersection.list)[, text(\n this.next.step, this.next.thresh, iteration.i, adj=c(1,0.5))]\n if(nrow(it.name.vlines))text(\n it.name.vlines$step.size,\n 0.8,\n it.name.some$maxIterations.name,\n srt=90,\n adj=c(0,1))\n legend(\n 'topleft', c(\"grid\", \"proposed\"),\n col=c(\"black\",\"red\"), pch=1, lty=c(0,1), bg=\"white\",\n cex=0.75)\n ##print(gg)\n dev.off()\n frame.list[[iteration.i]] <- sprintf(\"\n\\\\begin{frame}\n \\\\frametitle{Proposed complete AUC/AUM line search, iteration %d}\n%s\\n\\n\n \\\\includegraphics[width=\\\\textwidth]{figure-line-search-example-%d}\\n\\n\n\\\\end{frame}\n\",iteration.i,if(iteration.i>1)\"AUC/AUM values completely known within shaded grey region.\" else \"AUC/AUM values known only at red vertical line.\",iteration.i)\n prev.intersection.list[[iteration.i+1]] <- current.intersections[\n which.min(this.next.step),\n .(iteration.i=iteration.i+1, this.next.step, this.next.thresh)]\n}\ncat(paste(frame.list, collapse=\"\\n\"), file=\"figure-line-search-example.tex\")\nsystem(\"pdflatex HOCKING-slides-toronto\")\n\n\n"
},
{
"alpha_fraction": 0.6152147650718689,
"alphanum_fraction": 0.6265292763710022,
"avg_line_length": 29.70903968811035,
"blob_id": "a08d57e5996ed77dda404864fb423c0c3019c44e",
"content_id": "e99437532d567e2b498236e9f2220566e4da21ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 10871,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 354,
"path": "/figure-line-search-interactive.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(animint2)\nlibrary(data.table)\ndata(neuroblastomaProcessed, package=\"penaltyLearning\")\ndata(neuroblastoma, package=\"neuroblastoma\")\nex <- function(label, profile.id, chromosome){\n data.table(\n label, \n profile.id=factor(profile.id), \n chromosome=factor(chromosome))\n}\nselect.dt <- rbind(\n ex(\"pos\", 4, 2),\n ex(\"neg\", 513, 3))\nnb.list <- lapply(neuroblastoma, data.table)\nnb.some <- lapply(nb.list, \"[\", select.dt, on=.NATURAL)\nmax.segments <- max(neuroblastomaProcessed$errors$n.segments)\nnb.segs <- nb.some$profiles[, {\n cum.vec <- cumsum(c(0, logratio))\n d <- diff(position)/2\n between <- position[-1]-d\n data.start.pos <- c(position[1]-d[1], between)\n data.end.pos <- c(between, position[.N]+d[.N-1])\n fit <- jointseg::Fpsn(logratio, max.segments)\n end.t <- t(fit$t.est)\n end.dt <- data.table(\n end=as.integer(end.t),\n segments=as.integer(col(end.t))\n )[!is.na(end)]\n end.dt[, start := c(0, end[-.N])+1, by=segments]\n end.dt[, mean := (cum.vec[end+1]-cum.vec[start])/(end-start+1)]\n end.dt[, `:=`(\n start.pos=data.start.pos[start],\n end.pos=data.end.pos[end]\n )]\n}, by=label]\n\nsome.err <- neuroblastomaProcessed$errors[select.dt, .(\n profile.id, chromosome,\n segments=n.segments,\n fp, fn, possible.fp, possible.fn,\n min.log.lambda=-max.log.lambda,\n max.log.lambda=-min.log.lambda,\n min.lambda,\n errors, labels,\n label\n), on=list(profile.id, chromosome)]\nerr.sizes <- c(\n \"min(FP,FN)\"=2,\n FP=6,\n FN=4)\nerr.colors <- c(\n correct=\"transparent\",\n \"min(FP,FN)\"=\"black\",\n FP=\"red\",\n FN=\"deepskyblue\")\nsome.err.tall <- melt(\n some.err,\n measure.vars=c(\"fp\",\"fn\"),\n variable.name=\"var.lower\")\nsome.err.tall[, error.type := toupper(var.lower)]\nleg <- \"Error type\"\n\ndmin <- 2\ndmax <- 3.05\nsome.err[, fp.diff := c(NA, diff(fp)), by=label]\nsome.err[, fn.diff := c(NA, diff(fn)), by=label]\nsome.diff <- some.err[fp.diff != 0 | fn.diff != 0, .(\n id=1, label, fp.diff, fn.diff, pred.log.lambda=min.log.lambda)]\nsome.diff[, fp.cum := cumsum(fp.diff), by=label]\nsome.diff[, fn.cum := rev(cumsum(rev(-fn.diff))), by=label]\ndlist <- split(some.diff, some.diff[[\"label\"]])\nborder.pred <- with(dlist, pos[ #orange dots\n neg,\n data.table(\n pos=pred.log.lambda,\n neg=i.pred.log.lambda),\n on=\"id\",\n allow.cartesian=TRUE]\n)[, diff := neg-pos]\nneg.seq <- seq(dmin, dmax, by=0.025)\ngrid.pred <- CJ(\n pos=-neg.seq, \n neg=neg.seq\n)[, diff := neg-pos]\nrange(grid.pred[, neg-pos])\n\ngrid.uniq.diff <- grid.pred[, .(\n pos=0,\n neg=unique(diff)\n)][, diff := neg-pos]\nboth.pred <- rbind(\n data.table(differentiable=FALSE, border.pred), \n data.table(differentiable=TRUE, grid.uniq.diff)\n)\npred.tall <- melt(\n both.pred,\n id.vars=c(\"diff\",\"differentiable\"),\n measure.vars=c(\"neg\",\"pos\"),\n variable.name=\"label\",\n value.name=\"pred.log.lambda\"\n)[select.dt, nomatch=0L, on=\"label\"]\nmetrics.wide <- pred.tall[, {\n L <- penaltyLearning::ROChange(some.err, .SD, \"label\")\n pos <- pred.log.lambda[label==\"pos\"]\n with(L, data.table(\n aum, auc,\n SM=roc[min.thresh < max.thresh, sum(min.fp.fn)],\n roc=list(roc[, `:=`(\n min.thresh=min.thresh+pos,\n max.thresh=max.thresh+pos\n )])\n ))\n}, keyby=list(diff, differentiable)]\nmyjoin <- function(d.val, dt.pred){\n metrics.wide[differentiable==d.val][dt.pred, on=\"diff\"]\n}\nboth.roc <- rbind(\n myjoin(TRUE, grid.pred),\n myjoin(FALSE, border.pred))\n\npred.list <- list(\n nine.cross=c(neg=3.05, pos=-2.95),\n flat=c(neg=2.75, pos=-3),\n after.two=c(neg=2.8, pos=-2.75),\n increasing=c(neg=2.5, pos=-2.5))\ndiff.grid.list <- list()\nls.points.list <- list()\nls.segs.list <- list()\nabline.dt.list <- list()\nvline.dt.list <- list()\npred.points.list <- list()\nheat.step.list <- list()\nfor(pred.name in names(pred.list)){\n one.pred <- pred.list[[pred.name]]\n one.pred.diff <- one.pred[[\"neg\"]]-one.pred[[\"pos\"]]\n some.err[, example := label]\n diff.dt <- aum::aum_diffs_penalty(some.err, names(one.pred))\n ls.list <- aum::aum_line_search(diff.dt, pred.vec=one.pred, maxIterations = 10)\n ##compute slope and intercept of each of the 6 T_b(s) functions, plot\n ##them using geom_abline, and geom_point to represent the 9\n ##intersection points.\n some.diff[, `:=`(\n slope=ifelse(label==\"pos\", 0, -1),\n intercept=pred.log.lambda-ifelse(label==\"pos\", 0, 6.5))]\n denom <- sum(ls.list$gradient_pred*c(1,-1))\n ToStep <- function(d){\n ifelse(d==0, 0, if(denom==0)NA else d/denom)\n }\n metrics.tall <- melt(\n both.roc,\n measure.vars=c(\"aum\", \"auc\"),\n variable.name=\"var.lower\"\n )[order(-differentiable)][, pred.diff := neg-pos][\n , step.size := ToStep(one.pred.diff-pred.diff)\n ][!is.na(step.size)]\n metrics.tall[, variable := toupper(var.lower)]\n metrics.tall[\n , norm := (value-min(value))/(max(value)-min(value)), by=variable]\n max.step <- ls.list$line_search_result[, (3*step.size[.N]-step.size[.N-1])/2]\n heat.step.list[[pred.name]] <- \n data.table(t(sapply(c(\n ls.list$line_search_result$step.size, max.step\n ), function(s){\n one.pred-ls.list$gradient*s\n })), pred.name)\n if(length(max.step)==0)max.step <- Inf\n ls.segs.list[[pred.name]] <- data.table(\n pred.name, rbind(\n ls.list$line_search_result[, .(\n variable=\"AUC\", \n step.min=step.size, step.max=c(step.size[-1], max.step), \n value.min=auc.after, value.max=auc.after)],\n ls.list$line_search_result[, .(\n variable=\"AUM\", \n step.min=step.size, step.max=c(step.size[-1], max.step), \n value.min=aum, value.max=c(\n aum[-1], \n if(max.step==Inf)aum else (max.step-step.size[.N])*aum.slope.after[.N]+aum[.N]))]))\n ls.points.list[[pred.name]] <- melt(\n data.table(pred.name, ls.list$line_search_result),\n measure=c(\"aum\",\"auc\"),\n variable.name=\"var.lower\"\n )[, variable := toupper(var.lower)]\n diff.grid.list[[pred.name]] <- unique(metrics.tall[, .(\n pred.name, pred.diff, step.size, differentiable, variable, value\n )])\n abline.dt.list[[pred.name]] <- data.table(\n pred.name, variable=\"threshold\", search=\"exact\", ls.list$line_search_input)\n vline.dt.list[[pred.name]] <- unique(metrics.tall[\n differentiable==FALSE & variable==\"AUC\", \n .(pred.name, step.size)])\n pred.points.list[[pred.name]] <- data.table(\n pred.name, t(one.pred))\n}\nheat.step <- rbindlist(heat.step.list)\npred.points <- rbindlist(pred.points.list)\ndiff.grid <- rbindlist(diff.grid.list)\nls.points <- rbindlist(ls.points.list)\nls.segs <- rbindlist(ls.segs.list)\nabline.dt <- rbindlist(abline.dt.list)\nvline.dt <- rbindlist(vline.dt.list)\nggplot()+\n geom_tile(aes(\n pos, neg, fill=norm),\n data=metrics.tall[differentiable==TRUE])+\n geom_point(aes(\n pos, neg),\n color=\"red\",\n data=heat.step[, .SD[-.N], by=pred.name])+\n geom_line(aes(\n pos, neg, group=pred.name),\n color=\"red\",\n data=heat.step)+\n geom_point(aes(\n pos, neg),\n shape=21,\n fill=\"white\",\n data=pred.points)+\n scale_fill_gradient(low=\"white\", high=\"blue\")+\n facet_grid(. ~ variable)+\n coord_equal()\nggplot()+\n ggtitle(\"Overview, select step size\")+\n geom_vline(aes(\n xintercept=step.size),\n data=vline.dt,\n color=\"grey\")+\n theme_bw()+\n theme(panel.margin=grid::unit(1, \"lines\"))+\n theme_animint(width=300, height=300)+\n geom_abline(aes(\n slope=slope, intercept=intercept, color=search),\n data=abline.dt)+\n geom_point(aes(\n 0, intercept, color=search),\n data=abline.dt)+\n facet_grid(variable ~ pred.name, scales=\"free_y\")+\n scale_fill_manual(values=c(\n \"TRUE\"=\"black\",\n \"FALSE\"=\"orange\"))+\n scale_color_manual(values=c(\n exact=\"red\",\n grid=\"black\"))+\n geom_point(aes(\n step.size, value, fill=differentiable, color=search),\n size=3,\n shape=21,\n data=data.table(search=\"grid\", diff.grid))+\n geom_point(aes(\n step.size, value, color=search),\n data=data.table(search=\"exact\", ls.points))+\n geom_segment(aes(\n step.min, value.min,\n color=search,\n xend=step.max, yend=value.max),\n data=data.table(search=\"exact\", ls.segs))+\n xlab(\"Step size\")+\n ##scale_y_continuous(\"\", breaks=seq(0, 3, by=1))\n scale_y_continuous(\"\")\n\nggplot()+\n theme_bw()+\n theme(panel.margin=grid::unit(1, \"lines\"))+\n theme_animint(width=300, height=300)+\n facet_grid(variable ~ ., scales=\"free\")+\n scale_fill_manual(values=c(\n \"TRUE\"=\"black\",\n \"FALSE\"=\"orange\"))+\n scale_color_manual(values=c(\n exact=\"red\",\n grid=\"black\"))+\n geom_point(aes(\n pred.diff, value, fill=differentiable, color=search),\n size=3,\n shape=21,\n data=data.table(search=\"grid\", diff.grid))+\n scale_y_continuous(\"\", breaks=seq(0, 3, by=1))\n\nmin.max.step <- diff.grid[, seq(min(step.size), max(step.size), l=51)]\nslope.int.lines <- abline.dt[, data.table(\n step.size=min.max.step,\n threshold=intercept+slope*min.max.step\n), \nby=.(pred.name, variable, search, intercept, slope)\n][min(abline.dt$intercept) < threshold & threshold < max(abline.dt$intercept)]\nanimint(\n out.dir=\"figure-line-search-interactive\",\n heat=ggplot()+\n ggtitle(\"Loss function, select predictions\")+\n theme_bw()+\n theme_animint(width=600, height=400)+\n geom_tile(aes(\n pos, neg, fill=norm),\n data=metrics.tall[differentiable==TRUE])+\n geom_point(aes(\n pos, neg),\n showSelected=\"pred.name\",\n color=\"red\",\n data=heat.step[, .SD[-.N], by=pred.name])+\n geom_line(aes(\n pos, neg, group=pred.name),\n color=\"red\",\n showSelected=\"pred.name\",\n data=heat.step)+\n geom_point(aes(\n pos, neg),\n shape=21,\n fill=\"white\",\n size=4,\n clickSelects=\"pred.name\",\n data=pred.points)+\n scale_fill_gradient(low=\"white\", high=\"blue\")+\n facet_grid(. ~ variable)+\n coord_equal(),\n step=ggplot()+\n ggtitle(\"Line search for selected predictions\")+\n geom_vline(aes(\n xintercept=step.size),\n showSelected=\"pred.name\",\n data=vline.dt,\n color=\"grey\")+\n theme_bw()+\n theme(panel.margin=grid::unit(1, \"lines\"))+\n theme_animint(width=400, height=400)+\n geom_line(aes(\n step.size, threshold, color=search, group=intercept),\n showSelected=\"pred.name\",\n data=slope.int.lines)+\n facet_grid(variable ~ ., scales=\"free\")+\n scale_fill_manual(values=c(\n \"TRUE\"=\"black\",\n \"FALSE\"=\"orange\"))+\n scale_color_manual(values=c(\n exact=\"red\",\n grid=\"black\"))+\n geom_point(aes(\n step.size, value, fill=differentiable, color=search),\n showSelected=\"pred.name\",\n size=3,\n shape=21,\n data=data.table(search=\"grid\", diff.grid))+\n geom_point(aes(\n step.size, value, color=search),\n showSelected=\"pred.name\",\n data=data.table(search=\"exact\", ls.points))+\n geom_segment(aes(\n step.min, value.min,\n color=search,\n xend=step.max, yend=value.max),\n showSelected=\"pred.name\",\n data=data.table(search=\"exact\", ls.segs))+\n xlab(\"Step size\")+\n scale_y_continuous(\"\")\n)\n"
},
{
"alpha_fraction": 0.6654438972473145,
"alphanum_fraction": 0.6710197925567627,
"avg_line_length": 31.764423370361328,
"blob_id": "8f146957304c16c9063a180abca29f3e171b7dc0",
"content_id": "48e36ea5ff12b3a408262a4a9986d2a16a1193ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 6815,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 208,
"path": "/figure-aum-optimized-data.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\n## need to clone https://github.com/tdhock/feature-learning-benchmark\nfolds.dt <- fread(\"../feature-learning-benchmark/labeled_problems_folds.csv\")\naddMeta <- function(dt){\n dt[, set.name := sub(\"/.*\", \"\", prob.dir)]\n dt[, problem := sub(\".*/\", \"\", prob.dir)]\n dt[folds.dt, on=list(set.name, problem)]\n}\nerrors.dt <- addMeta(fread(\"../feature-learning-benchmark/labeled_problems_errors.csv\"))\npossible.dt <- addMeta(fread(\"../feature-learning-benchmark/labeled_problems_possible_errors.csv\"))\n\ndiff.info <- errors.dt[, aum::aum_diffs_penalty(data.table(example=prob.dir, min.lambda=exp(min.log.penalty), fp, fn), unique(prob.dir)), by=.(set.name, fold)]\ndiff.counts <- diff.info[, .(\n breakpoints=.N,\n examples=length(unique(example))\n), by=.(set.name, fold)]\ndiff.tall <- melt(diff.counts, measure.vars=c(\"breakpoints\", \"examples\"))\ndiff.tall[, .(\n max=max(value),\n mean=mean(value),\n min=min(value),\n folds=.N\n), by=variable]\n\nfold.counts <- possible.dt[, list(\n examples=.N,\n labels=sum(labels)\n), by=.(set.name, fold)]\nfold.counts[, list(\n folds=.N,\n min.examples=min(examples),\n max.examples=max(examples)\n)]\n\ntest.fold.info <- folds.dt[set.name==\"H3K4me3_XJ_immune\" & fold==4]\ntest.fold.errors <- errors.dt[test.fold.info, on=.(set.name, fold, problem)]\ntest.fold.errors[, min.log.lambda := min.log.penalty]\ntest.fold.errors[, max.log.lambda := max.log.penalty]\ntest.fold.errors[, seg.i := cumsum(\n c(1, diff(fp)!=0 | diff(fn) != 0)), by=.(prob.dir)]\npossible.errors <- possible.dt[test.fold.errors, on=list(\n set.name, fold, prob.dir)]\npossible.errors[, possible.fn := possible.tp]\ntest.fold.segs <- test.fold.errors[, .(\n min.log.lambda=min(min.log.lambda),\n max.log.lambda=max(max.log.lambda),\n fp=fp[1],\n fn=fn[1]\n), by=.(prob.dir, seg.i)]\ntest.fold.segs[, mid.log.lambda := (max.log.lambda+min.log.lambda)/2]\ntest.fold.targets <- penaltyLearning::targetIntervals(\n test.fold.errors, \"prob.dir\")\ntest.fold.targets[, width := max.log.lambda-min.log.lambda]\ninitial.pred <- test.fold.targets[order(width==Inf, -width), data.table(\n prob.dir,\n pred.log.lambda=ifelse(\n max.log.lambda==Inf, min.log.lambda+1, ifelse(\n min.log.lambda==-Inf, max.log.lambda-1,\n (min.log.lambda+max.log.lambda)/2)\n )\n)]\ninitial.pred[!is.finite(pred.log.lambda), pred.log.lambda := 0]\nprob.dir.ord <- unique(test.fold.segs$prob.dir)\ndiff.fp.fn <- aum::aum_diffs_penalty(\n test.fold.segs[, `:=`(example=prob.dir, min.lambda = exp(min.log.lambda))],\n prob.dir.ord)\ndiff.fp.fn[example==0]\ntest.fold.segs[prob.dir==prob.dir.ord[1], .(min.log.lambda, max.log.lambda, fp, fn)]\npred.vec <- initial.pred[prob.dir.ord, -pred.log.lambda, on=\"prob.dir\"]\n\npossible.segs <- possible.dt[test.fold.segs, on=\"prob.dir\"][, `:=`(\n errors = fp+fn,\n possible.fn = possible.tp\n )]\nroc.initial <- penaltyLearning::ROChange(\n possible.segs, initial.pred, problem.vars=\"prob.dir\")\nroc.initial$aum\nN.breaks <- nrow(diff.fp.fn)\nmax.intersections <- N.breaks*(N.breaks-1)/2\nls.out <- aum::aum_line_search_grid(\n diff.fp.fn, pred.vec, maxIterations=max.intersections, n.grid=100)\nls.out$aum\nplot(ls.out)\n\npred.mat <- matrix(pred.vec, length(pred.vec), n.steps)\ngrad.mat <- matrix(rowMeans(ls.out$derivative_mat), length(pred.vec), n.steps)\nn.steps <- length(ls.out$line_search_result$step.size)\nstep.mat <- matrix(\n ls.out$line_search_result$step.size, length(pred.vec), n.steps, byrow=TRUE)\nafter.mat <- pred.mat-step.mat*grad.mat\nexpected <- apply(after.mat, 2, function(pred)aum::aum(diff.fp.fn,pred)$aum)\nrbind(ls.out$line_search_result$aum, expected)\n\naum.list <- aum::aum(diff.fp.fn, pred.vec)\ndescent.direction.vec <- -rowMeans(aum.list$derivative_mat)\ndirection.dt <- data.table(\n example=seq(0, length(pred.vec)-1),\n weight=pred.vec,\n direction=descent.direction.vec)\n(diffs.with.direction <- direction.dt[diff.fp.fn, on=\"example\"][, `:=`(\n slope = -direction,\n intercept=pred-weight\n)][])\n##TODO Jadon plug in your code.\nggplot()+\n geom_point(aes(\n 0, intercept),\n data=diffs.with.direction)+\n geom_abline(aes(\n slope=slope, intercept=intercept),\n data=diffs.with.direction)+\n coord_cartesian(xlim=c(-5,5))\n\n\ntest.fold.breaks <- test.fold.errors[, .(breaks=.N-1), by=prob.dir]\ntest.fold.breaks[, .(\n total.breaks=sum(breaks),\n max.breaks=max(breaks),\n mean.breaks=mean(breaks),\n min.breaks=min(breaks),\n examples=.N\n)]\ndiff.counts[test.fold.info[1], on=c(\"set.name\", \"fold\")]\n\n## initialization:\npred.dt <- data.table(initial.pred)\ngetROC <- function(p){\n L <- penaltyLearning::ROChange(possible.errors, p, \"prob.dir\")\n non.smooth <- L$aum.grad[lo != hi]\n if(nrow(non.smooth))print(non.smooth)\n L\n}\nstep.number <- 1\nstep.size <- 1\nroc.list <- getROC(pred.dt)\niterations.dt.list <- list()\nimprovement <- Inf\nwhile(1e-6 < improvement){\n ## these depend on predictions:\n while({\n step.dt <- pred.dt[roc.list$aum.grad, .(\n prob.dir,\n pred.log.lambda = pred.log.lambda-step.size*lo\n ), on=.(prob.dir)]\n step.list <- getROC(step.dt)\n roc.list$aum < step.list$aum\n }){\n ## TODO Jadon replace step size halving with your algorithm,\n ## either go all the way to the end of the path, quadratic\n ## max_iterations=N*(N-1)/2 or just stop with log linear\n ## algorithm, max_iterations=N.\n step.size <- step.size/2\n }\n cat(sprintf(\n \"step=%d size=%e aum=%f->%f auc=%f->%f\\n\",\n step.number,\n step.size,\n roc.list$aum,\n step.list$aum,\n roc.list$auc,\n step.list$auc))\n iterations.dt.list[[paste(step.number)]] <- data.table(\n step.number,\n aum=roc.list$aum,\n auc=roc.list$auc,\n min.errors=roc.list$thresholds[threshold==\"min.error\", errors])\n improvement <- roc.list$aum-step.list$aum\n pred.dt <- step.dt\n roc.list <- step.list\n step.number <- step.number + 1\n step.size <- step.size*2\n}\nmid.pred <- test.fold.segs[pred.dt, .(\n prob.dir,\n improved.pred=pred.log.lambda,\n mid.log.lambda), on=.(\n prob.dir,\n min.log.lambda < pred.log.lambda,\n max.log.lambda > pred.log.lambda)]\nmid.pred[, pred.log.lambda := ifelse(\n is.finite(mid.log.lambda), improved.pred, mid.log.lambda)]\n\niterations.dt <- do.call(rbind, iterations.dt.list)\n\npred.list <- list(\n initial=initial.pred,\n ##mid=mid.pred,\n improved=pred.dt)\nout.auc.list <- list()\nout.roc.list <- list()\nfor(pred.name in names(pred.list)){\n pred <- pred.list[[pred.name]]\n L <- penaltyLearning::ROChange(possible.errors, pred, \"prob.dir\")\n print(L$auc)\n out.auc.list[[paste(pred.name)]] <- with(L, data.table(\n pred.name,\n thresholds[threshold==\"min.error\"],\n auc, aum))\n out.roc.list[[paste(pred.name)]] <- data.table(pred.name, L$roc)\n}\n\nout.list <- list(\n iterations=iterations.dt,\n roc=do.call(rbind, out.roc.list),\n auc=do.call(rbind, out.auc.list))\n\nsaveRDS(out.list, \"figure-aum-optimized-data.rds\")\n"
},
{
"alpha_fraction": 0.661102294921875,
"alphanum_fraction": 0.6693163514137268,
"avg_line_length": 30.983051300048828,
"blob_id": "d672555ddedabcede090ef38f8ffb94c40d0d756",
"content_id": "af234ff92b103d2817b0e4de1508db0208a9b275",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 3774,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 118,
"path": "/figure-aum-neural-networks.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(data.table)\nlibrary(ggplot2)\nsteps.dt <- data.table::fread(\n \"figure-aum-neural-networks-data.csv\"\n)[epoch<10]\nsteps.dt[,which(is.na(out_value))[1]]\nsteps.dt[, out_num := as.numeric(out_value)]\nsteps.dt[, iteration := epoch*(max(step)+1)+step]\n## analysis of missing values, seems to happen when loss\n## underflows. Suggests to use sum rather than mean.\ncount.values <- function(DT){\n dcast(\n DT,\n loss + lr+set_name ~ data_set,\n fun.aggregate=length)\n}\ncount.values(steps.dt[is.na(out_num)])\ncount.values(steps.dt[out_value==0])\nsteps.dt[is.na(out_num)]\nsteps.dt[is.na(out_num) & !is.na(out_value)]\nstr(steps.dt)\n\none <- steps.dt[\n data_set==\"MNIST\" & out_name==\"AUC\" & set_name==\"validation\"]\nggplot()+\n facet_grid(lr ~ seed, labeller=label_both)+\n geom_line(aes(\n iteration, out_num, color=loss),\n data=one)\n\n## check if all initializations were the same.\ninit.dt <- dcast(\n steps.dt[iteration==0&out_name==\"AUC\"],\n seed+data_set+set_name~.,\n fun.aggregate=list(min,max),\n value.var=\"out_num\")\ninit.dt[1e-5 < out_num_max-out_num_min]#should be empty.\n\nvalid.auc <- steps.dt[set_name==\"validation\" & out_name==\"AUC\"]\nby.vars <- c(\"loss\",\"seed\",\"data_set\")\nvalid.vars <- c(by.vars,\"lr\")\ntest.vars <- c(valid.vars, \"iteration\")\nselected.dt <- valid.auc[, .SD[which.max(out_num)], by=by.vars]\nselect.test <- selected.dt[,test.vars,with=FALSE]\nall.test.auc <- steps.dt[set_name==\"test\" & out_name==\"AUC\"]\nselect.test.auc <- all.test.auc[select.test, on=names(select.test)]\nggplot()+\n facet_grid(. ~ data_set, labeller=label_both)+\n geom_point(aes(\n out_num, loss),\n data=select.test.auc)\nwide.test.auc <- dcast(\n select.test.auc,\n loss + data_set ~ .,\n fun.aggregate=list(mean,sd),\n value.var=\"out_num\")\nshow.names <- c(\n balanced=\"logistic.weighted\",\n logistic=\"logistic.unweighted\",\n AUM=\"AUM.count\",\n AUM_rate=\"AUM.rate\")\nwide.test.auc[, loss.name := ifelse(\n loss %in% names(show.names), show.names[loss], loss)]\nlevs <- wide.test.auc[data_set==\"MNIST\"][order(out_num_mean), loss.name]\nwide.test.auc[, loss.fac := factor(loss.name, levs)]\ngg <- ggplot()+\n facet_grid(. ~ data_set, labeller=label_both)+\n geom_point(aes(\n out_num_mean, loss.fac),\n shape=1,\n data=wide.test.auc)+\n geom_segment(aes(\n out_num_mean+out_num_sd, loss.fac,\n xend=out_num_mean-out_num_sd, yend=loss.fac),\n data=wide.test.auc)+\n scale_y_discrete(\n \"Loss function\")+\n scale_x_continuous(paste(\n \"Test AUC\",\n \"(Mean +/- SD over 4 random initializations of neural network weights)\"))\npng(\n \"figure-aum-neural-networks-test-auc.png\",\n width=7, height=1.3, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\np.wide <- dcast(select.test.auc, data_set+seed ~ loss,value.var=\"out_num\")\np.tall <- melt(p.wide, measure=c(\"AUM\",\"balanced\",\"logistic\"))\np.tall[, {\n t.test(\n AUM_rate, value, alternative=\"greater\", paired=TRUE\n )[c(\"estimate\",\"p.value\")]\n}, keyby=.(data_set,variable)]\n \nselect.valid <- selected.dt[,valid.vars,with=FALSE]\nselect.valid.auc <- valid.auc[select.valid,on=names(select.valid)]\ngg <- ggplot()+\n coord_cartesian(ylim=c(0.5,1))+\n geom_line(aes(\n iteration,out_num,color=loss),\n data=select.valid.auc)+\n facet_grid(data_set ~ seed, labeller=label_both)\nprint(gg)\npng(\n \"figure-aum-neural-networks-best-valid-auc-curves.png\",\n width=10, height=4, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\nsubtrain.loss <- steps.dt[set_name==\"subtrain\" & out_name==\"loss\"]\nselected.subtrain <- subtrain.loss[select.valid,on=names(select.valid)]\nselected.subtrain[iteration>100 & data_set==\"FashionMNIST\" & loss==\"AUM_rate\"]\nggplot()+\n geom_line(aes(\n iteration, out_num, color=factor(seed)),\n data=selected.subtrain)+\n facet_grid(loss ~ data_set, labeller=label_both, scales=\"free\")+\n scale_y_log10()\n"
},
{
"alpha_fraction": 0.648654580116272,
"alphanum_fraction": 0.6563115119934082,
"avg_line_length": 32.12318801879883,
"blob_id": "f441efb7c3e0db0bfe1fdc059d9f55a63676b68c",
"content_id": "92a3c524f872385f343a9e43a9127a3a08777e59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 4571,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 138,
"path": "/figure-linear-model-test-analyze.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\naum.csv <- Sys.glob(\n \"../neuroblastoma-data/data/*/cv/*/testFolds/*/linear-model-aum.csv\")\nall.it <- data.table(out.csv=aum.csv)[, {\n data.table::fread(out.csv)\n}, by=out.csv]\n\ncount.dt <- all.it[, .(\n count=.N\n), by=.(data.name, cv.type, test.fold)]\nstopifnot(nrow(count.dt)==length(aum.csv))\n\nsubtrain.it <- all.it[set==\"subtrain\"]\nsubtrain.it[, diff := c(NA, diff(aum)), by=.(init.name, data.name, test.fold, seed)]\nsubtrain.it[, .(init.name, data.name, test.fold, iteration, aum, diff)]\nsubtrain.it[diff>1e-6]\ngg <- ggplot()+\n ggtitle(\"check if train AUM decreases\")+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n geom_line(aes(\n iteration, aum,\n group=paste(seed, init.name)),\n data=subtrain.it)+\n facet_grid(init.name + data.name + test.fold ~ ., scales=\"free\", labeller=label_both)\nprint(gg)\n\nvalidation.it <- all.it[set==\"validation\"]\nggplot()+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n scale_y_log10()+\n geom_line(aes(\n iteration, aum, color=init.name,\n group=paste(seed, init.name)),\n data=validation.it)+\n geom_point(aes(\n iteration, aum, color=init.name,\n group=paste(seed, init.name)),\n data=validation.it[\n ,\n .SD[which.min(aum)],\n by=.(data.name, test.fold, init.name, seed)])+\n facet_grid(data.name + test.fold ~ ., scales=\"free\", labeller=label_both)\n\nvalid.best.ids <- all.it[\n set==\"validation\",\n .SD[which.min(aum), .(iteration)],\n by=.(data.name, cv.type, test.fold, init.name, seed)]\ntest.best.ids <- all.it[\n set==\"test\",\n .SD[which.min(aum), .(iteration)],\n by=.(data.name, cv.type, test.fold, init.name, seed)]\n\n## model selection.\ntest.it1 <- all.it[set==\"test\" & iteration==1]\ntest.selected <- all.it[set==\"test\"][valid.best.ids, on=names(valid.best.ids)]\ntest.best <- all.it[set==\"test\"][test.best.ids, on=names(test.best.ids)]\n\n## compare with best predictions (no linear model).\ntest.show <- rbind(\n data.table(iterations=\"initial\", test.it1),\n data.table(iterations=\"best.linear\", test.best),\n data.table(iterations=\"selected\", test.selected))\nifac <- function(x)factor(\n x, c(\"initial\", \"selected\", \"best.linear\"))\ntest.show[, Iterations := ifac(iterations)]\ngg <- ggplot()+\n ggtitle(\"Test AUM, selected=min valid aum, best=min test aum, max it=50\")+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n geom_point(aes(\n aum, Iterations, color=factor(test.fold)),\n shape=1,\n data=test.show)+\n scale_y_discrete(drop=FALSE)+\n facet_grid(\n init.name ~ data.name + cv.type,\n scales=\"free\", labeller=label_both)\nprint(gg)\n\ngg <- ggplot()+\n ggtitle(\"Test AUM, selected=min valid aum, best=min test aum, max it=50\")+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n geom_point(aes(\n aum, Iterations, color=factor(test.fold)),\n shape=1,\n data=test.show[init.name==\"IntervalRegressionCV\"])+\n scale_y_discrete(drop=FALSE)+\n facet_grid(\n init.name ~ data.name + cv.type,\n scales=\"free\", labeller=label_both)\nprint(gg)\n\ntest.show[, neg.auc := -auc]\ntest.show.tall <- melt(\n test.show[init.name==\"IntervalRegressionCV\"],\n measure.vars=c(\"neg.auc\", \"error.percent\", \"aum\"),\n variable.name=\"metric\")\ntest.iCV <- dcast(\n test.show.tall,\n data.name + cv.type + test.fold + metric + seed ~ iterations)\ntest.iCV.tall <- melt(\n test.iCV,\n measure.vars=c(\"best.linear\", \"selected\"),\n variable.name=\"iteration\")\n\ntest.iCV.tall[, improvement := value - initial]\nimp.stats <- test.iCV.tall[, .(\n median=median(improvement),\n p.value=tryCatch({\n t.test(initial, value)[[\"p.value\"]]\n }, error=function(e){\n NA_real_\n })\n), by=.(data.name, cv.type, test.fold, iteration, metric)][!is.na(p.value)][median < 0][order(p.value)]\ntop10 <- imp.stats[metric==\"aum\" & iteration==\"best.linear\"][1:min(.N, 10)]\nsome.types <- unique(top10[, .(data.name, cv.type, test.fold)])\n\ntest.show[, Data.name := paste0(\"\\n\", data.name)]\ngg <- ggplot()+\n ggtitle(\"Optimizing train AUM can reduce test AUM if number of iterations is chosen correctly\")+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n geom_point(aes(\n aum, Iterations),\n shape=1,\n data=test.show[init.name==\"IntervalRegressionCV\"][some.types, on=names(some.types)])+\n scale_y_discrete(drop=FALSE)+\n facet_grid(\n . ~ Data.name + cv.type + test.fold,\n scales=\"free\", labeller=label_both)+\n xlab(\"Test AUM, each dot is a different random seed/initialization for IntervalRegressionCV\")\npng(\"figure-linear-model-test-analyze.png\", width=20, height=2.5, units=\"in\", res=100)\nprint(gg)\ndev.off()\n"
},
{
"alpha_fraction": 0.6222968101501465,
"alphanum_fraction": 0.6372110247612,
"avg_line_length": 27.838708877563477,
"blob_id": "fa8f8565bd469d88e521e5d846af02b4ae67852b",
"content_id": "a96d3164c136941314225b972a064b6a3c639fbc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2682,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 93,
"path": "/figure-aum-optimized.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\nresult.list <- readRDS(\"figure-aum-optimized-data.rds\")\niterations.tall <- melt(result.list$iterations, id=\"step.number\")\niterations.tall[, Variable := ifelse(\n variable==\"min.errors\", \"label errors\", toupper(variable))]\ngg <- ggplot()+\n geom_line(aes(\n step.number, value),\n data=iterations.tall)+\n facet_grid(Variable ~ ., scales=\"free\")+\n ylab(\"\")+\n xlab(\"Iteration of gradient descent algorithm\")\npng(\"figure-aum-optimized-iterations.png\", width=3, height=3, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\ntwo.it <- iterations.tall[step.number %in% range(step.number)]\ntwo.it[, emph := ifelse(step.number==1, \"initial\", \"optimized\")]\nemph <- gg+\n geom_point(aes(\n step.number, value, color=emph),\n data=two.it)+\n theme(legend.position = \"none\")\npng(\"figure-aum-optimized-iterations-emph.png\", width=3, height=3, units=\"in\", res=200)\nprint(emph)\ndev.off()\n\nresult.list$auc[, `:=`(x=c(0.25), y=c(0.75, 0.5))]\nresult.list$roc[, is.monotonic := c(NA, (diff(fp)<=0) & (diff(fn)>=0)), by=pred.name]\nresult.list$roc[, line.i := 1:.N, by=pred.name]\nresult.list$roc[, range(line.i), by=pred.name]\n## same number of monotonic moves, makes sense because all the same\n## error diffs but happening at different thresholds.\nresult.list$roc[, .(n.monotonic=sum(is.monotonic[-1])), by=pred.name]\nggplot()+\n geom_path(aes(\n FPR, TPR, color=pred.name),\n data=result.list$roc)+\n geom_point(aes(\n FPR, TPR, color=pred.name),\n data=result.list$roc[is.monotonic==FALSE])+\n geom_text(aes(\n FPR, TPR, color=pred.name, label=line.i),\n hjust=1,vjust=1,\n data=result.list$roc[is.monotonic==FALSE])+\n geom_point(aes(\n FPR, TPR, color=pred.name),\n fill=\"white\",\n shape=21,\n data=result.list$auc)+\n geom_segment(aes(\n x, y,\n xend=FPR, yend=TPR,\n color=pred.name),\n data=result.list$auc)+\n geom_label(aes(\n x, y, color=pred.name,\n label=sprintf(\n \"%s errors=%d auc=%.2f\",\n pred.name, errors, auc)),\n size=3,\n hjust=0,\n data=result.list$auc)+\n coord_equal()+\n guides(color=\"none\")\n\ngg <- ggplot()+\n geom_path(aes(\n FPR, TPR, color=pred.name),\n data=result.list$roc)+\n geom_point(aes(\n FPR, TPR, color=pred.name),\n fill=\"white\",\n shape=21,\n data=result.list$auc)+\n geom_segment(aes(\n x, y,\n xend=FPR, yend=TPR,\n color=pred.name),\n data=result.list$auc)+\n geom_label(aes(\n x, y, color=pred.name,\n label=sprintf(\n \"%s errors=%d auc=%.2f\",\n pred.name, errors, auc)),\n size=3,\n hjust=0,\n data=result.list$auc)+\n coord_equal()+\n guides(color=\"none\")\npng(\"figure-aum-optimized.png\", width=3, height=3, units=\"in\", res=200)\nprint(gg)\ndev.off()\n"
},
{
"alpha_fraction": 0.5980057716369629,
"alphanum_fraction": 0.6090264916419983,
"avg_line_length": 29.00787353515625,
"blob_id": "3e8cf7a519bfc4c376f517c528937d145b79c3e2",
"content_id": "d1e82ae2667b093d705c76e82dda7de7465397d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 3811,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 127,
"path": "/figure-auc-improved.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\nauc.improved <- readRDS(\"auc.improved.rds\")\n\nnorm.list <- list(\n l1=function(x)sum(abs(x)),\n l0=function(x)sum(x != 0))\nnorm.dt <- auc.improved[, {\n diff.wide <- roc[[1]][, lapply(.SD, diff), .SDcols=c(\"FPR\",\"TPR\")]\n diff.tall <- melt(diff.wide, measure=c(\"FPR\", \"TPR\"))\n data.table(norm.name=names(norm.list))[, {\n norm.fun <- norm.list[[norm.name]]\n diff.tall[value!=0, .(\n norm.value=as.numeric(norm.fun(value))\n ), by=.(variable, sign=sign(value))]\n }, by=norm.name]\n}, by=.(fold, set.name, initialization, pred.name)]\nnorm.wide <- dcast(\n norm.dt, \n fold + set.name + initialization + variable + \n sign + norm.name ~ pred.name , value.var=\"norm.value\")\nnorm.wide[abs(improved - initial)>1e-5] ##??\n\nmoves.dt <- auc.improved[order(-min.thresh), {\n diff.wide <- roc[[1]][, lapply(.SD, diff), .SDcols=c(\"FPR\",\"TPR\")]\n diff.wide[, `:=`(\n fp.move = fcase(\n FPR > 0, \"right\",\n FPR < 0, \"left\"),\n tp.move = fcase(\n TPR > 0, \"up\",\n TPR < 0, \"down\"))\n ][, move := fcase(\n !is.na(fp.move) & !is.na(tp.move), paste0(tp.move, \"+\", fp.move),\n is.na(fp.move), tp.move,\n is.na(tp.move), fp.move\n )]\n diff.wide[, .(\n moves=as.numeric(.N),\n FPR=sum(FPR),\n TPR=sum(TPR)\n ), by=move]\n}, by=.(fold, set.name, initialization, pred.name)]\nmoves.dt[is.na(move)]\nmoves.tall <- melt(moves.dt, measure=c(\"moves\", \"FPR\", \"TPR\"))\nmoves.wide <- dcast(\n moves.tall, \n fold + set.name + initialization + variable + move ~ pred.name)\nmoves.wide[order(initial-improved)]\n\nauc.wide <- dcast(\n auc.improved,\n fold + set.name + initialization ~ pred.name , value.var=\"auc\")\nbest <- auc.wide[initialization==\"min.error\"][order(initial-improved)][1]\n\non.vec <- c(\"fold\", \"set.name\", \"initialization\")\nauc.improved[best, on=on.vec]\nmoves.wide[best, .(\n move, variable, initial, improved, diff=round(initial-improved, 6)\n), on=on.vec]\n\nroc.dt <- auc.improved[, {\n roc[[1]][, .(\n thresh=c(-Inf,max.thresh), FPR=c(1,FPR), TPR=c(1,TPR)\n )]\n}, by=.(fold, set.name, initialization, pred.name)]\nroc.best <- roc.dt[best, on=on.vec]\n\nregular.roc <- roc.dt[, {\n reg.dt <- data.table(\n FPR=cummin(FPR), TPR=cummin(TPR)\n )\n for(XPR in c(\"FPR\",\"TPR\")){\n reg.dt[, count := .N, by=XPR]\n reg.dt[, keep := TRUE]\n reg.dt[count>1, keep := c(TRUE, rep(FALSE,.N-2), TRUE), by=XPR]\n reg.dt <- reg.dt[keep==TRUE]\n }\n reg.dt\n}, by=.(fold, set.name, initialization, pred.name)]\nregular.auc <- regular.roc[, {\n AUC.WeightedROC <- WeightedROC::WeightedAUC(.SD)\n AUC.geometry <- geometry::polyarea(c(FPR,1), c(TPR,0))\n if(!isTRUE(all.equal(AUC.WeightedROC, AUC.geometry))){\n print(rbind(AUC.WeightedROC, AUC.geometry))\n print(.SD)\n browser()\n }\n data.table(auc.regular=AUC.WeightedROC)\n}, by=.(fold, set.name, initialization, pred.name)]\nauc.both <- auc.improved[regular.auc, on=.NATURAL]\nauc.best <- auc.both[best, .(pred.name, auc, auc.regular), on=on.vec]\nregular.best <- regular.roc[best, on=on.vec]\nauc.best[, `:=`(diff=auc.regular-auc, y=c(0.3, 0.6))]\n\ngg <- ggplot()+\n theme_bw()+\n theme(legend.position=\"none\")+\n coord_equal()+\n geom_label(aes(\n 1, y, \n fill=pred.name,\n label=sprintf(\n \"%s Full/color AUC=%.4f\\nMonotonic/grey AUC=%.4f\\n AUC Difference=%.4f\",\n pred.name, auc, auc.regular, diff)),\n hjust=1,\n data=auc.best)+\n geom_path(aes(\n FPR, TPR, group=pred.name),\n size=2,\n color=\"grey50\",\n data=regular.best)+\n geom_path(aes(\n FPR, TPR, color=pred.name),\n size=1,\n data=roc.best)+\n geom_point(aes(\n FPR, TPR, fill=pred.name),\n shape=21,\n data=regular.best)+\n scale_x_continuous(\n \"False Positive Rate\")+\n scale_y_continuous(\n \"True Positive Rate\")\npng(\"figure-auc-improved.png\", width=4, height=4, units=\"in\", res=200)\nprint(gg)\ndev.off()\n"
},
{
"alpha_fraction": 0.6310344934463501,
"alphanum_fraction": 0.6566243171691895,
"avg_line_length": 32.803680419921875,
"blob_id": "076db02c264c4f388e18bff52220522feb8cf99f",
"content_id": "94db03bb04893f6e1fbf86a730b56891780a0e55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 5510,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 163,
"path": "/figure-aum-grad-speed.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\nreplacements <- c(\n \"squared.hinge.each.example\"=\"Squared\\nHinge\\nEach\\nExample\",\n \"Squared Hinge All Pairs\"=\"Squared Hinge\\nAll Pairs\",\n aum=\"AUM\")\nrow_fun <- function(...){\n form.list <- as.list(match.call()[-1])\n sym <- sapply(form.list, is.symbol)\n names(form.list)[sym] <- paste(form.list[sym])\n form.list[sym] <- NA\n make_row <- function(){}\n formals(make_row) <- form.list\n body(make_row) <- as.call(lapply(c(\"data.frame\", names(form.list)), as.symbol))\n make_row\n}\nf <- row_fun(Problem, file.csv, col.name, col.value)\ncsv.file.dt <- rbind(\n f(\"Changepoint detection\",\"figure-aum-grad-speed-data.csv\",\"pred.type\",\"pred.rnorm\"),\n f(\"Binary classification\",\"figure-aum-grad-speed-binary-cpp-data.csv\",\"prediction.order\",\"unsorted\"))\n\nproblem.dt.list <- list()\nfor(file.i in 1:nrow(csv.file.dt)){\n csv.file.row <- csv.file.dt[file.i,]\n timing.dt <- data.table::fread(csv.file.row[[\"file.csv\"]])\n problem.dt.list[[file.i]] <- timing.dt[\n algorithm != \"sort\" &\n get(csv.file.row[[\"col.name\"]]) == csv.file.row[[\"col.value\"]],\n data.table(\n csv.file.row, N, seconds,\n Algorithm=ifelse(\n algorithm%in%names(replacements),\n replacements[algorithm],\n algorithm))\n ]\n}\n(problem.dt <- do.call(rbind, problem.dt.list))\n\nalgo.colors <- c(\n \"Squared Hinge\\nAll Pairs\"=\"#A6CEE3\",\n \"Squared\\nHinge\\nEach\\nExample\"=\"#1F78B4\",\n \"Logistic\"=\"#B2DF8A\", #\"#33A02C\",\"#FB9A99\", \"#E31A1C\", \"#FDBF6F\", \"#FF7F00\", \"#CAB2D6\", \"#6A3D9A\", \"#FFFF99\", \"#B15928\"\n \"AUM\"=\"black\"\n)\nproblem.stats <- problem.dt[, .(\n max=max(seconds),\n median=median(seconds),\n min=min(seconds),\n times=.N\n), by=.(Problem, N, Algorithm)]\nmydl <- function(data){\n geom_dl(aes(N, median, label = Algorithm, color = Algorithm), \n method = list(cex=0.8, \"right.polygons\"),\n data = data)\n}\nbreaks <- 10^seq(0, 6)\ndl <- ggplot()+\n theme(legend.position=\"none\")+\n ##facet_wrap(. ~ Problem, labeller=label_both, scales=\"free\")+\n facet_grid(. ~ Problem, labeller=label_both, scales=\"free\", space=\"free\")+\n geom_ribbon(aes(\n N, ymin=min, ymax=max, fill=Algorithm),\n alpha=0.5,\n data=problem.stats)+\n geom_line(aes(\n N, median, color=Algorithm),\n data=problem.stats)+\n geom_blank(aes(\n N*5, median, color=Algorithm),\n data=problem.stats)+\n geom_blank(aes(\n x,y),\n data=data.table(x=100, y=10^c(-5, -2)))+\n scale_color_manual(values=algo.colors)+\n scale_fill_manual(values=algo.colors)+\n mydl(problem.stats[Algorithm != \"Squared Hinge\\nAll Pairs\",])+\n mydl(problem.stats[Algorithm == \"Squared Hinge\\nAll Pairs\",])+\n scale_x_log10(\n \"n = number of predicted values = size of gradient vector\",\n breaks=breaks,\n labels=sprintf(\"%.e\", breaks))+\n scale_y_log10(paste0(\"Computation time in seconds,\nmedian line, min/max band over \",problem.stats[1, times], \" timings\"),\nbreaks=10^seq(-6, 0))\npng(\"figure-aum-grad-speed-both.png\", width=7, height=3.2, res=200, units=\"in\")\nprint(dl)\ndev.off()\n\nbinary.stats <- problem.stats[Problem==\"Binary classification\"]\ndl <- ggplot()+\n theme(legend.position=\"none\")+\n geom_ribbon(aes(\n N, ymin=min, ymax=max, fill=Algorithm),\n alpha=0.5,\n data=binary.stats)+\n geom_line(aes(\n N, median, color=Algorithm),\n data=binary.stats)+\n scale_color_manual(values=algo.colors)+\n scale_fill_manual(values=algo.colors)+\n mydl(binary.stats[Algorithm != \"Squared Hinge\\nAll Pairs\",])+\n mydl(binary.stats[Algorithm == \"Squared Hinge\\nAll Pairs\",])+\n scale_x_log10(\n \"n = number of predicted values = size of gradient vector\",\n breaks=breaks,\n limits=c(1e1, 2e6),\n labels=sprintf(\"%.e\", breaks))+\n scale_y_log10(paste0(\"Computation time in seconds,\nmedian line, min/max band over \",problem.stats[1, times], \" timings\"),\nbreaks=10^seq(-6, 0))\ndl\npng(\"figure-aum-grad-speed-binary.png\", width=7, height=3.4, res=200, units=\"in\")\nprint(dl)\ndev.off()\n\ntiming.dt <- data.table::fread(\"figure-aum-grad-speed-data.csv\")\ntiming.stats <- timing.dt[, .(\n max=max(seconds),\n median=median(seconds),\n min=min(seconds),\n times=.N\n), by=.(N, pred.type, algorithm)]\nsome.stats <- timing.stats[pred.type==\"pred.rnorm\" & algorithm != \"sort\"]\nsome.stats[, Algorithm := replacements[algorithm] ]\ngg <- ggplot()+\n geom_ribbon(aes(\n N, ymin=min, ymax=max, fill=Algorithm),\n alpha=0.5,\n data=some.stats)+\n geom_line(aes(\n N, median, color=Algorithm),\n data=some.stats)+\n scale_color_manual(values=algo.colors)+\n scale_fill_manual(values=algo.colors)+\n scale_x_log10(\n \"Number of predicted values in gradient computation\",\n limits=c(10, 12000),\n breaks=c(10, 100, 1000, timing.stats[, max(N)]))+\n scale_y_log10(paste0(\"Computation time in seconds,\nmedian line, min/max band over \",timing.stats[1, times], \" timings\"))+\n ggtitle(\"Changepoint detection\")\ndl <- directlabels::direct.label(gg, \"right.polygons\")\npng(\"figure-aum-grad-speed-random.png\", width=5, height=4, res=200, units=\"in\")\nprint(dl)\ndev.off()\ngg <- ggplot()+\n geom_ribbon(aes(\n N, ymin=min, ymax=max, fill=algorithm),\n alpha=0.5,\n data=timing.stats)+\n geom_line(aes(\n N, median, color=algorithm),\n data=timing.stats)+\n facet_grid(. ~ pred.type)+\n scale_x_log10(\n \"Number of predicted values\",\n breaks=c(10, 100, 1000, timing.stats[, max(N)]))+\n scale_y_log10(paste0(\"Computation time in seconds,\nmedian line, min/max band\nover \",timing.stats[1, times], \" timings\"))\npng(\"figure-aum-grad-speed.png\", width=7, height=3, res=200, units=\"in\")\nprint(gg)\ndev.off()\n"
},
{
"alpha_fraction": 0.5926993489265442,
"alphanum_fraction": 0.6147934794425964,
"avg_line_length": 23.785715103149414,
"blob_id": "c24517844e2810d815879d4f45d70e01bece6075",
"content_id": "8362a4878d1ad6bad370556b0fcd29dde27a37a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1041,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 42,
"path": "/figure-aum-train.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\nresult.list <- readRDS(\"figure-aum-train-data.rds\")\n\niterations.tall <- melt(result.list$iterations, id=\"step.number\")\ngg <- ggplot()+\n geom_line(aes(\n step.number, value),\n data=iterations.tall)+\n facet_grid(variable ~ ., scales=\"free\")+\n ylab(\"\")\npng(\"figure-aum-train-iterations.png\", width=3, height=3, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\nresult.list$auc[, `:=`(x=c(0.25), y=c(0.75, 0.5))]\ngg <- ggplot()+\n geom_path(aes(\n FPR, TPR, color=pred.name),\n data=result.list$roc)+\n geom_point(aes(\n FPR, TPR, color=pred.name),\n fill=\"white\",\n shape=21,\n data=result.list$auc)+\n geom_segment(aes(\n x, y,\n xend=FPR, yend=TPR,\n color=pred.name),\n data=result.list$auc)+\n geom_label(aes(\n x, y, color=pred.name,\n label=sprintf(\n \"%s errors=%d auc=%.2f\",\n pred.name, errors, auc)),\n size=3,\n hjust=0,\n data=result.list$auc)+\n coord_equal()+\n guides(color=\"none\")\npng(\"figure-aum-train.png\", width=3, height=3, units=\"in\", res=200)\nprint(gg)\ndev.off()\n"
},
{
"alpha_fraction": 0.5882075428962708,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 31.106060028076172,
"blob_id": "e12c318d87db270639f746fcfc3ba6447f9adffb",
"content_id": "598b2cf5629603489960771af21c99e80946c22f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2120,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 66,
"path": "/figure-aum-grad-speed-binary-cpp-data.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\nN.vec <- as.integer(10^seq(1, 6, by=0.5))\nmax.N <- max(N.vec)\nall.labels.vec <- rep(c(-1,1), l=max.N)\nall.diffs.dt <- aum::aum_diffs_binary(all.labels.vec)\nset.seed(1)\nall.pred.vec <- rnorm(max.N)\ntiming.dt.list <- list()\n\ndo.sub <- function(...){\n mcall <- match.call()\n L <- as.list(mcall[-1])\n for(arg.name in names(L)){\n maybe.lang <- L[[arg.name]]\n if(is.language(maybe.lang)){\n L[[arg.name]] <- substitute(\n result.list[[NAME]] <- EXPR,\n list(NAME=arg.name, EXPR=maybe.lang))\n }\n }\n L\n}\n\nfor(N in N.vec){\n print(N)\n N.pred.vec <- all.pred.vec[1:N]\n N.diffs.dt <- all.diffs.dt[1:N]\n N.labels.vec <- sort(all.labels.vec[1:N])\n order.list <- list(sorted=sort(N.pred.vec), unsorted=N.pred.vec)\n for(prediction.order in names(order.list)){\n order.pred.vec <- order.list[[prediction.order]]\n result.list <- list()\n m.args <- c(do.sub(`Logistic`={\n aum:::logistic_grad(order.pred.vec, N.labels.vec)\n }, AUM={\n aum::aum(N.diffs.dt, order.pred.vec)\n }),\n if(N < 1e4)do.sub(`Squared Hinge All Pairs`={\n is.positive <- N.labels.vec == 1\n pairs.dt <- data.table(expand.grid(\n positive=which(is.positive),\n negative=which(!is.positive)))\n margin <- 1\n pairs.dt[, diff := order.pred.vec[positive]-order.pred.vec[negative]-margin]\n pairs.dt[, diff.clipped := ifelse(diff<0, diff, 0)]\n pairs.tall <- data.table::melt(\n pairs.dt,\n measure.vars=c(\"positive\", \"negative\"),\n value.name=\"pred.i\",\n variable.name=\"label\")\n pairs.tall[, grad.sign := ifelse(label==\"positive\", 1, -1)]\n grad.dt <- pairs.tall[, .(\n gradient=sum(grad.sign*diff.clipped)\n ), keyby=pred.i]\n grad.dt[[\"gradient\"]]\n }),\n times=10)\n timing.df <- do.call(microbenchmark::microbenchmark, m.args)\n timing.dt.list[[paste(N, prediction.order)]] <- with(timing.df, data.table(\n N, prediction.order, seconds=time/1e9, algorithm=expr))\n }\n}\n(timing.dt <- do.call(rbind, timing.dt.list))\n\ndata.table::fwrite(timing.dt, \"figure-aum-grad-speed-binary-cpp-data.csv\")\n\n"
},
{
"alpha_fraction": 0.594363808631897,
"alphanum_fraction": 0.619982898235321,
"avg_line_length": 21.960784912109375,
"blob_id": "b680f72104fea764ecd87026d5cb843a209d208d",
"content_id": "2766f2db2909a4652a79c239e0d8bb60d2e35ca1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1171,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 51,
"path": "/figure-over-one.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\nbefore.dt <- data.table(\n tp=0,\n fp=0,\n possible.tp=1,\n possible.fp=1)\nrep.dt <- data.table(\n tp=c(1, 1, 0, 0),\n fp=c(0, 1, 1, 0),\n possible.tp=1,\n possible.fp=1)\nafter.dt <- data.table(\n tp=c(1, 1),\n fp=c(0, 1),\n possible.tp=1,\n possible.fp=1)\n\nrep.list <- replicate(1, rep.dt, simplify=FALSE)\nseveral.dt <- do.call(rbind, rep.list)\nsegs.dt <- rbind(before.dt, several.dt, after.dt)[.N:1]\nn.breaks <- nrow(segs.dt)-1L\nbreak.vec <- 1:n.breaks\nsegs.dt[, min.log.lambda := c(-Inf, break.vec)]\nsegs.dt[, max.log.lambda := c(break.vec, Inf)]\nprint(segs.dt)\nsegs.dt[, problem := 1]\nsegs.dt[, fn := possible.tp-tp]\nsegs.dt[, possible.fn := possible.tp]\nsegs.dt[, errors := fp+fn]\nsegs.dt[, labels := 2]\npred.dt <- data.table(pred.log.lambda=1.5, problem=1)\n(L <- penaltyLearning::ROChange(segs.dt, pred.dt, \"problem\"))\n\nggplot()+\n geom_path(aes(\n FPR, TPR),\n data=L$roc)\n\nsegs.tall <- melt(\n segs.dt,\n measure.vars=c(\"fp\", \"tp\"))\n\nggplot()+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n facet_grid(variable ~ .)+\n geom_segment(aes(\n min.log.lambda, value,\n xend=max.log.lambda, yend=value),\n data=segs.tall)\n"
},
{
"alpha_fraction": 0.5654131770133972,
"alphanum_fraction": 0.5754334330558777,
"avg_line_length": 37.956138610839844,
"blob_id": "3439ba9670e3f2cd14a0f1d190ee7c9d65a3fe00",
"content_id": "bde25c756561a9c49bc41ad0a10a311aaf8652ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8882,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 228,
"path": "/figure-aum-neural-networks-data.py",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "import pdb\nimport os\nfrom pytorch_lightning.metrics.classification import AUROC\nimport torch\nimport torchvision.datasets\ncompute_auc = AUROC()\ntorch.set_num_interop_threads(1)#to avoid monsoon admin complaints.\n\nclass MySubset(torch.utils.data.Dataset):\n def __init__(self, dataset, indices):\n self.dataset = dataset\n self.indices = indices\n def __getitem__(self, index):\n i = self.indices[index].item()\n return self.dataset[i]\n def __len__(self):\n return len(self.indices)\n\ndata_dict = {}\ndata_name_tup = (\"MNIST\", \"FashionMNIST\")\nset_tf_dict = {\"train\":True, \"test\":False}\nfor data_name in data_name_tup:\n data_class = getattr(torchvision.datasets, data_name)\n data_dict[data_name] = {}\n for in_name, train in set_tf_dict.items():\n data_set = data_class(\n \".\", train=train, download=True,\n transform=torchvision.transforms.ToTensor(),\n target_transform=lambda label: 0 if label < 5 else 1)\n if in_name is \"train\":\n torch.manual_seed(1)\n label_list = [y for x,y in data_set]\n train_labels = torch.tensor(label_list)\n skip_dict = {0:1, 1:100}\n index_dict = {\n lab:(train_labels==lab).nonzero() for lab in skip_dict}\n index_list = [\n index_dict[lab][::skip] for lab,skip in skip_dict.items()]\n print([len(i) for i in index_list])\n indices = torch.cat(index_list)\n imbalanced = MySubset(data_set, indices)\n length_dict={\"subtrain\":int(0.8*len(imbalanced))}\n length_dict[\"validation\"]=len(imbalanced)-length_dict[\"subtrain\"]\n sub_list = torch.utils.data.random_split(\n imbalanced, length_dict.values())\n for s,sub_set in zip(length_dict.keys(),sub_list):\n data_dict[data_name][s] = sub_set\n else:\n data_dict[data_name][\"test\"]=data_set\ndl = torch.utils.data.DataLoader(\n data_dict[\"MNIST\"][\"subtrain\"], batch_size=2000)\nfor x,y in dl:\n pass\nprint(x.shape)\nfor data_name,set_dict in data_dict.items():\n for set_name,data_set in set_dict.items():\n print((data_name,set_name,len(data_set)))\n\nclass LeNet5(torch.nn.Module):\n def __init__(self):\n super(LeNet5, self).__init__()\n self.seq = torch.nn.Sequential( \n torch.nn.Conv2d(\n in_channels=1, out_channels=6,\n kernel_size=5, stride=1, padding=2),\n torch.nn.ReLU(),\n torch.nn.AvgPool2d(kernel_size=2, stride=2),\n torch.nn.Conv2d(\n in_channels=6, out_channels=16,\n kernel_size=5, stride=1, padding=0),\n torch.nn.ReLU(),\n torch.nn.AvgPool2d(kernel_size=2, stride=2),\n torch.nn.Flatten(),\n torch.nn.Linear(in_features=400, out_features=120),\n torch.nn.ReLU(),\n torch.nn.Linear(in_features=120, out_features=84),\n torch.nn.ReLU(),\n torch.nn.Linear(in_features=84, out_features=1),\n )\n def forward(self, feature_mat):\n return self.seq(feature_mat)\n\ndef AUM(pred_tensor, label_tensor, rate=False):\n \"\"\"Area Under Min(FP,FN)\n\n Loss function for imbalanced binary classification\n problems. Minimizing AUM empirically results in maximizing Area\n Under the ROC Curve (AUC). Arguments: pred_tensor and label_tensor\n should both be 1d tensors (vectors of real-valued predictions and\n labels for each observation in the set/batch).\n\n \"\"\"\n is_positive = label_tensor == 1\n is_negative = label_tensor != 1\n fn_diff = torch.where(is_positive, -1, 0)\n fp_diff = torch.where(is_positive, 0, 1)\n thresh_tensor = -pred_tensor.flatten()\n sorted_indices = torch.argsort(thresh_tensor)\n fp_denom = torch.sum(is_negative) if rate else 1\n fn_denom = torch.sum(is_positive) if rate else 1\n sorted_fp_cum = fp_diff[\n sorted_indices].cumsum(axis=0)/fp_denom\n sorted_fn_cum = -fn_diff[\n sorted_indices].flip(0).cumsum(axis=0).flip(0)/fn_denom\n sorted_thresh = thresh_tensor[sorted_indices]\n sorted_is_diff = sorted_thresh.diff() != 0\n sorted_fp_end = torch.cat([sorted_is_diff, torch.tensor([True])])\n sorted_fn_end = torch.cat([torch.tensor([True]), sorted_is_diff])\n uniq_thresh = sorted_thresh[sorted_fp_end]\n uniq_fp_after = sorted_fp_cum[sorted_fp_end]\n uniq_fn_before = sorted_fn_cum[sorted_fn_end]\n uniq_min = torch.minimum(uniq_fn_before[1:], uniq_fp_after[:-1])\n return torch.sum(uniq_min * uniq_thresh.diff())\n\ndef AUM_rate(pred_tensor, label_tensor):\n \"\"\"Area Under Min(FPR,FNR)\"\"\"\n return AUM(pred_tensor, label_tensor, rate=True)\n\nout_cols = [\n \"epoch\",\n \"step\",\n \"set_name\",\n \"out_name\",\n \"out_value\"\n ]\nloss_name = \"logistic\"\nlr = 1e-3\nseed=1\nbatch_size=50\ndef one_trial(loss_name, seed_str, lr_str, data_name, batch_size_str):\n out_csv = \"/\".join([\n \"figure-aum-neural-networks-data\",\n loss_name, seed_str, lr_str, data_name, batch_size_str,\n \"steps.csv\"\n ])\n out_dir = os.path.dirname(out_csv)\n os.makedirs(out_dir, exist_ok=True)\n def write_row(items,w_or_a):\n f=open(out_csv,w_or_a)\n f.write(\",\".join(items)+\"\\n\")\n write_row(out_cols,\"w\")\n seed = int(seed_str)\n lr = float(lr_str)\n batch_size = int(batch_size_str)\n set_dict = data_dict[data_name]\n subtrain_label_list = [y for x,y in set_dict[\"subtrain\"]]\n N_subtrain = len(subtrain_label_list)\n subtrain_label_tensor = torch.tensor(subtrain_label_list)\n label_count_dict = {\n lab:torch.sum(subtrain_label_tensor==lab) for lab in (0,1)}\n print(label_count_dict)\n label_weight_dict = {\n lab:N_subtrain/count for lab,count in label_count_dict.items()}\n def get_weight_tensor(lab_tensor):\n return torch.where(\n lab_tensor==0,\n label_weight_dict[0],\n label_weight_dict[1])\n torch.sum(get_weight_tensor(subtrain_label_tensor)) #should be 2.\n def log_loss(pred_tensor, label_tensor, *args):\n bce_inst = torch.nn.BCEWithLogitsLoss(*args)\n return bce_inst(pred_tensor, label_tensor.float())\n def weights_balanced(pred_tensor, label_tensor):\n return log_loss(\n pred_tensor, label_tensor,\n get_weight_tensor(label_tensor))\n loss_dict = {\n \"logistic\":log_loss,\n \"balanced\":weights_balanced,\n \"AUM\":AUM,\n \"AUM_rate\":AUM_rate,\n }\n loss_fun = loss_dict[loss_name]\n def compute_loss_pred(features, labels):\n pred_mat = model(features)\n pred_vec = pred_mat.reshape(len(pred_mat))\n return loss_fun(pred_vec, labels), pred_vec\n out_metrics = {\n \"AUC\":compute_auc,\n \"loss\":loss_fun\n }\n torch.manual_seed(seed) \n model = LeNet5()\n optimizer = torch.optim.SGD(model.parameters(), lr=lr)\n for epoch in range(200):\n step = 0\n print(epoch)\n # first update weights.\n subtrain_loader = torch.utils.data.DataLoader(\n set_dict[\"subtrain\"], shuffle=True, batch_size=batch_size)\n for subtrain_features, subtrain_labels in subtrain_loader:\n # then compute subtrain/validation loss.\n with torch.no_grad():\n for set_name,set_obj in set_dict.items():\n set_loader = torch.utils.data.DataLoader(\n set_obj, batch_size=100)\n pred_list = []\n label_list = []\n for batch_features, batch_labels in set_loader:\n batch_loss, batch_pred = compute_loss_pred(\n batch_features, batch_labels)\n pred_list.append(batch_pred)\n label_list.append(batch_labels)\n set_pred_tensor = torch.cat(pred_list)\n set_label_tensor = torch.cat(label_list)\n for out_name,out_fun in out_metrics.items():\n out_tensor = out_fun(set_pred_tensor, set_label_tensor)\n out_dict = {\n \"epoch\":epoch,\n \"step\":step,\n \"set_name\":set_name,\n \"out_name\":out_name,\n \"out_value\":out_tensor.item()\n }\n item_list = [str(out_dict[N]) for N in out_cols]\n write_row(item_list,\"a\")\n step += 1\n optimizer.zero_grad()\n subtrain_loss, pred_vec = compute_loss_pred(\n subtrain_features, subtrain_labels)\n subtrain_loss.backward()\n optimizer.step()\n\nif __name__ == '__main__':\n import sys\n args = sys.argv[1:]\n print(args)\n one_trial(*args)\n"
},
{
"alpha_fraction": 0.6078660488128662,
"alphanum_fraction": 0.6256710290908813,
"avg_line_length": 35.390716552734375,
"blob_id": "e46b2b000257144fd84a3804f7e241c8da401e4e",
"content_id": "0e198fd024ec2b90501266a4ca7a75580541e4f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 18815,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 517,
"path": "/figure-line-grid-search-interactive.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(ggplot2)\nlibrary(data.table)\n obj.sign.list <- list(auc=-1, aum=1)\ncache.name <- \"figure-line-grid-search-interactive-cache.rds\"\nif(FALSE){\n unlink(file.path(testFold.vec, cache.name), recursive=TRUE)\n}\n\n##TODO add for loop over line search set, subtrain or validation?\n\n## > mb[per.set, on=list(set)][order(labels)]\n## megabytes set labels\n## 1: 554 H3K36me3_TDH_other 200\n## 2: 377 H3K36me3_TDH_ENCODE 338\n## 3: 375 H3K4me3_TDH_ENCODE 525\n## 4: 592 H3K27me3_RL_cancer 570\n## 5: 798 H3K27ac_TDH_some 627\n## 6: 906 H3K36me3_TDH_immune 630\n## 7: 296 H3K27me3_TDH_some 696\n## 8: 2407 CTCF_TDH_ENCODE 1378\n## 9: 3223 H3K4me1_TDH_BP 1584\n## 10: 5871 H3K36me3_AM_immune 1743\n## 11: 6407 ATAC_JV_adipose 3241\n## 12: 3017 H3K4me3_PGP_immune 3780\n## 13: 2902 H3K4me3_TDH_immune 3807\n## 14: 5421 H3K27ac-H3K4me3_TDHAM_BP 15961\n(testFold.vec <- Sys.glob(\"../neuroblastoma-data/data/*/cv/*/testFolds/*\"))\ntestFold.path <- \"../neuroblastoma-data/data/detailed/cv/R-3.6.0-chrom/testFolds/1\" \nseed <- 1\ninit.name=\"zero\"\naum.type=\"count\"\nOneBatch <- function(testFold.path, aum.type){\n library(data.table)\n cv.path <- dirname(dirname(testFold.path))\n folds.csv <- file.path(cv.path, \"folds.csv\")\n cv.type <- basename(cv.path)\n test.fold <- basename(testFold.path)\n data.dir <- dirname(dirname(cv.path))\n data.name <- basename(data.dir)\n data.list <- list()\n for(f in c(\"inputs\", \"outputs\", \"evaluation\")){\n f.csv.xz <- file.path(data.dir, paste0(f, \".csv.xz\"))\n if(file.exists(f.csv.xz)){\n system(paste(\"unxz\", f.csv.xz))\n }\n f.csv <- file.path(data.dir, paste0(f, \".csv\"))\n f.dt <- data.table::fread(f.csv)\n data.list[[f]] <- f.dt\n }\n ## replace positive fn at end with 0 to avoid AUM=Inf.\n data.list$evaluation[, `:=`(\n min.fn=min(fn),\n max.fp=max(fp),\n min.lambda = exp(min.log.lambda),\n example=sequenceID\n ), by=sequenceID]\n bad <- data.list$evaluation[min.log.lambda == -Inf & min.fn < fn]\n if(nrow(bad)){\n print(bad)\n }\n data.list$evaluation[min.log.lambda == -Inf & 0 < fn]\n ## code below not necessary since this does not happen in our real\n ## data sets, but it could theoretically in some data.\n data.list$aum.input <- data.table(data.list$evaluation)[, `:=`(\n possible.fn=possible.fn-min.fn,\n fn=fn-min.fn,\n possible.fp=max.fp\n ), by=sequenceID]\n ## read folds. \n folds.dt <- data.table::fread(folds.csv)\n folds.dt[fold == test.fold, set := \"test\"]\n folds.dt[fold != test.fold, set := rep(\n c(\"subtrain\", \"validation\"), l=.N)]\n folds.dt[, table(fold, set)]\n X.all <- scale(data.list$inputs[, -1])#rm seqID.\n rownames(X.all) <- data.list$inputs$sequenceID\n X.finite <- X.all[, apply(is.finite(X.all), 2, all)]\n set.vec <- folds.dt[rownames(X.finite), set, on=\"sequenceID\"]\n seqs.list <- list()\n diffs.list <- list()\n aum.vec.list <- list()\n for(s in unique(folds.dt$set)){\n seqs.set <- folds.dt[s==set, sequenceID]\n seqs.list[[s]] <- seqs.set\n seqs.diff <- aum::aum_diffs_penalty(\n data.list$evaluation,\n seqs.set,\n denominator=aum.type)\n diffs.list[[s]] <- seqs.diff\n }\n totals <- colSums(diffs.list$subtrain[, .(fp_diff, fn_diff)])\n X.subtrain <- X.finite[set.vec==\"subtrain\",]\n neg.t.X.subtrain <- -t(X.subtrain)\n seqs.train <- with(seqs.list, c(subtrain, validation))\n y.train <- data.list[[\"outputs\"]][\n seqs.train,\n cbind(min.log.lambda, max.log.lambda),\n on=\"sequenceID\"]\n keep <- apply(is.finite(y.train), 1, any)\n X.train <- X.finite[seqs.train, ]\n N.param <- ncol(X.finite)+1\n init.param <- structure(\n rep(0, N.param),\n names=c(\"(Intercept)\",colnames(X.finite)))\n init.fun.list <- list(\n IntervalRegressionCV=function(){\n fit <- penaltyLearning::IntervalRegressionCV(\n X.train[keep, ],\n y.train[keep, ])\n some.param <- fit[[\"param.mat\"]]\n init.param[names(some.param)] <- some.param\n init.param\n },\n zero=function(){\n init.param+rnorm(N.param)\n }\n )\n iteration.dt.list <- list()\n considered.dt.list <- list()\n for(seed in 1:4)for(init.name in names(init.fun.list)){\n init.fun <- init.fun.list[[init.name]]\n set.seed(seed)\n int.weights <- init.fun()\n ##for(algo in c(\"grid\",\"exact\",\"hybrid\"))\n for(algo in c(\"grid\",\"hybrid\"))\n ##for(objective in names(obj.sign.list)){\n for(objective in \"aum\"){\n computeROC <- function(w, i, set){\n pred.pen.vec <- (X.finite %*% w) + i\n pred.dt <- data.table(\n sequenceID=rownames(pred.pen.vec),\n pred.log.lambda=-as.numeric(pred.pen.vec))\n is.set <- set.vec==set\n set.dt <- pred.dt[is.set]\n L <- penaltyLearning::ROChange(\n data.list$evaluation, set.dt, \"sequenceID\")\n alist <- aum_auc(diffs.list[[set]], pred.pen.vec[ seqs.list[[set]], ])\n L$aum.diffs <- alist$aum\n L$auc.diffs <- alist$auc\n L\n }\n aum_auc <- function(diffs.dt, pred.vec){\n aum.list <- aum::aum(diffs.dt, pred.vec)\n before.dt <- data.table(aum.list$total_error, key=\"thresh\")[, `:=`(\n TPR_before=1-fn_before/-totals[[\"fn_diff\"]],\n FPR_before=fp_before/totals[[\"fp_diff\"]])]\n aum.list$auc <- before.dt[, .(\n FPR=c(FPR_before, 1),\n TPR=c(TPR_before, 1)\n )][, sum((FPR[-1]-FPR[-.N])*(TPR[-1]+TPR[-.N])/2)]\n aum.list\n }\n obj.sign <- obj.sign.list[[objective]]\n weight.vec <- int.weights[-1]\n intercept <- int.weights[1]\n prev.obj <- Inf*obj.sign\n step.number <- 0\n while({\n summary.dt.list <- list()\n for(set in names(seqs.list)){\n set.PL <- computeROC(weight.vec, intercept, set)\n summary.dt.list[[set]] <- with(set.PL, data.table(\n set,\n thresholds[threshold==\"predicted\"],\n auc, aum, auc.diffs, aum.diffs))\n }\n summary.dt <- do.call(rbind, summary.dt.list)\n iteration.dt.list[[paste(\n seed, init.name, algo, step.number, objective\n )]] <- data.table(\n seed, init.name, algo, step.number, objective, summary.dt)\n new.obj <- summary.dt.list$subtrain[[paste0(objective,\".diffs\")]]\n improvement <- obj.sign*(prev.obj-new.obj)\n cat(sprintf(\n \"seed=%d init=%s algo=%s step=%d %s %f->%f\\n\",\n seed, init.name, algo, step.number, objective, prev.obj, new.obj))\n 1e-5 < improvement\n }){\n ##while(step.number<2){\n LS=aum::aum_line_search(diffs.list$subtrain, X.subtrain, weight.vec)\n pred.vec <- X.subtrain %*% weight.vec\n aum.list <- aum::aum(diffs.list$subtrain, pred.vec)\n pred.grad.vec <- rowMeans(aum.list$derivative_mat)\n direction.vec <- neg.t.X.subtrain %*% pred.grad.vec\n step.grid <- 10^seq(-9, 0)\n take.step <- function(s){\n weight.vec+s*direction.vec\n }\n grid.dt <- data.table(step.size=step.grid)[, {\n step.weight <- take.step(step.size)\n grid.aum <- aum_auc(diffs.list$subtrain, X.subtrain %*% step.weight)\n with(grid.aum, data.table(auc, aum))\n }, by=step.size]\n steps.considered <- rbind(\n if(algo!=\"grid\")LS$line_search_result[, .(search=\"exact\", step.size, auc, aum)],\n if(algo!=\"exact\")grid.dt[, .(search=\"grid\", step.size, auc, aum)]\n )[, step.prop := seq(1, .N)/.N, by=search][]\n considered.dt.list[[paste(\n seed, init.name, algo, objective, step.number\n )]] <- data.table(\n seed, init.name, algo, objective, step.number, steps.considered)\n best.step <- steps.considered[which.min(obj.sign*get(objective))]\n weight.vec <- take.step(best.step$step.size)\n new.aum <- aum::aum(diffs.list$subtrain, X.subtrain %*% weight.vec)\n err.thresh <- data.table(\n new.aum$total_error,key=\"thresh\"\n )[, err_before := fp_before+fn_before][, .(\n thresh=c(thresh[1]-1,thresh[-1]-diff(thresh)/2,thresh[.N]+1),\n err=c(err_before,sum(diffs.list$subtrain$fp_diff))\n )]\n intercept <- err.thresh[which.min(err), thresh]\n step.number <- step.number+1\n prev.obj <- new.obj\n }#step.number\n }#algo/objective\n }#seed/init.name\n list(\n sets=data.table(\n do.call(rbind, iteration.dt.list),\n data.name, cv.type, test.fold),\n steps=data.table(\n rbindlist(considered.dt.list),\n data.name, cv.type, test.fold))\n}\n\nargs.dt <- data.table::CJ(\n testFold.path=testFold.vec,\n aum.type=c(\"rate\",\"count\")\n)\n\n## Run on SLURM.\nregistry.dir <- \"figure-line-grid-search-interactive-registry\"\nregistry.dir <- \"figure-line-grid-search-interactive-registry-fixed\"\nreg=batchtools::loadRegistry(registry.dir)\nbatchtools::getStatus(reg=reg)\nbatchtools::findExpired(reg=reg)\nstatus.dt <- batchtools::getJobStatus(reg=reg)\nstatus.dt[!is.na(error)]\nstatus.dt[!is.na(done)]\n\n#analyze.\njob.id <- 9#join.dt[init.name==\"zero\" & seed==1 & objective==\"aum\" & search != \"exact\"]\ndone.ids <- status.dt[is.na(error), job.id]\nfor(done.i in seq_along(done.ids)){\n job.id <- done.ids[[done.i]]\n args.row <- args.dt[job.id]\n ls.dir <- file.path(args.row$testFold.path, \"line_search\", \"sets\")\n dir.create(ls.dir, showWarnings = FALSE, recursive = TRUE)\n ls.csv <- file.path(ls.dir, paste0(args.row$aum.type, \".csv\"))\n if(!file.exists(ls.csv)){\n cat(sprintf(\"%4d / %4d %s\\n\", done.i, length(done.ids), ls.csv))\n res <- batchtools::loadResult(job.id)\n best.steps <- res$steps[\n , .SD[which.min(obj.sign.list[[objective]]*get(objective))], by=.(\n seed,init.name,algo,objective,step.number\n )][,.(seed,init.name,algo,objective,step.number=step.number+1,search)]\n join.dt <- best.steps[res$sets, on=.(\n seed,init.name,algo,objective,step.number\n )]\n join.dt[is.na(search), table(step.number)]\n fwrite(join.dt, ls.csv)\n } \n}\n\nif(FALSE){\n unlink(registry.dir, recursive=TRUE)\n}\nreg <- batchtools::makeRegistry(registry.dir)\nbatchtools::batchMap(OneBatch, args=args.dt, reg=reg)\njob.table <- batchtools::getJobTable(reg=reg)\nchunks <- data.frame(job.table, chunk=1)\nbatchtools::submitJobs(chunks, resources=list(\n walltime = 24*60*60,#seconds\n memory = 32000,#megabytes per cpu\n ncpus=1, #>1 for multicore/parallel jobs.\n ntasks=1, #>1 for MPI jobs.\n chunks.as.arrayjobs=TRUE), reg=reg)\n\nbatchtools::getStatus(reg=reg)\nstatus.dt <- batchtools::getJobStatus(reg=reg)\nstatus.dt[!is.na(error)]\nstatus.dt[!is.na(done)]\n\nbatchtools::testJob(4, reg=reg)\nargs.dt[21]\n\n## seed=1 init.name=IntervalRegressionCV algo=exact step=33146 auc 0.955147->0.955147\n## *** caught bus error ***\n## address 0x153dc1917f40, cause 'non-existent physical address'\n## Traceback:\n## 1: aum_sort_interface(error.diff.df, pred.vec)\n\n\n##job.id=376 Error in penaltyLearning::ROChange(data.list$evaluation, data.table(sequenceID = seqs.set, : \\n no positive labels => fix by excluding data?\n\n##job.id=354 Error in X.finite %*% w : non-conformable arguments => fixed by processing output of IRCV, which returns w that may be smaller than number of features.\n\n##job.id=4,12 Error in aum_sort_interface(error.diff.df, pred.vec) : \\n fp should be non-negative => fixed by updating check in aum C++ code.\n\n##job.id=21 Error in while (obj.sign * (new.obj - prev.obj) < 1e-06) { : \\n missing value where TRUE/FALSE needed => fixed by using aum package which always gives finite aum.\n\n## Run locally.\nfor(args.i in 1:nrow(args.dt)){\n args.row <- args.dt[args.i]\n cache.rds <- args.row[, file.path(testFold.path, paste0(aum.type, \".rds\"))]\n all.it.list[[args.i]] <- if(file.exists(cache.rds)){\n readRDS(cache.rds)\n }else{\n cat(sprintf(\"%4d / %4d\\n\", args.i, length(args.dt)))\n print(args.row)\n iteration.list <- do.call(OneBatch, args.row)\n saveRDS(iteration.list, cache.rds)\n }\n}\n\n## analyze.\ncache.vec <- Sys.glob(file.path(\n \"../neuroblastoma-data/data/*/cv/*/testFolds/*\",\n cache.name))\nfor(cache.i in seq_along(cache.vec)){\n cache.rds <- cache.vec[[cache.i]]\n L <- readRDS(cache.rds)\n algo.cols <- c(\"seed\",\"init.name\",\"algo\")\n step.cols <- c(algo.cols,\"step.number\")\n best.steps <- L$steps[, .SD[which.min(aum)], by=step.cols][,c(step.cols,\"search\"),with=FALSE]\n join.dt <- L$sets[set != \"test\"][best.steps, on=step.cols]\n min.dt <- join.dt[set==\"validation\", .SD[which.min(aum)], by=.(seed,init.name,set)]\n\n ggplot()+\n geom_line(aes(\n step.number, aum, color=algo),\n data=join.dt)+\n geom_point(aes(\n step.number, aum, color=algo),\n shape=1,\n data=join.dt[search==\"exact\"])+\n geom_point(aes(\n step.number, aum, color=algo),\n data=min.dt)+\n facet_wrap(~seed+ init.name + set,scales=\"free\")\n \n}\n\n#analyze 2\ntype.csv.vec <- Sys.glob(file.path(testFold.vec, \"line_search\",\"sets\", \"*.csv\"))\nselected.dt.list <- list()\nfor(type.csv.i in seq_along(type.csv.vec)){\n type.csv <- type.csv.vec[[type.csv.i]]\n type.dt <- fread(type.csv)\n meta.dt <- type.dt[1, .(\n data.name, cv.type, test.fold,\n gradient=sub(\".csv\",\"\",basename(type.csv)))]\n ## does max auc get better auc than min aum?\n valid.dt <- type.dt[\n set==\"validation\"\n ][, step.prop := step.number/max(step.number), by=.(seed,init.name,algo,objective)]\n compare.obj.dt <- valid.dt[\n , .SD[which.max(auc), .(step.number,step.prop,valid.auc=auc)], by=.(seed,init.name,algo,objective)]\n not.zero <- valid.dt[0 < step.number]\n search.counts <- dcast(\n compare.obj.dt[not.zero, on=.(seed,init.name,algo,objective,step.number>=step.number),nomatch=0L],\n seed+init.name+algo+objective~search,\n length)\n selected.dt.list[[type.csv]] <- data.table(\n meta.dt, search.counts[compare.obj.dt, on=.(seed,init.name,algo,objective)])\n}\nselected.dt <- rbindlist(selected.dt.list)\nfwrite(selected.dt, \"figure-line-grid-search-interactive-selected.csv\")\n\nggplot()+\n facet_grid(init.name + objective ~ ., labeller=label_both, scales=\"free\")+\n geom_point(aes(\n auc, algo),\n data=compare.obj.dt)+\n scale_x_continuous(\n \"Best validation AUC\")\n\n\nselected.dt <- data.table::fread(\"figure-line-grid-search-interactive-selected.csv\")\nexact.used <- selected.dt[algo==\"hybrid\" & 0<exact]\nexact.used[, table(objective, gradient)]\ngrid.only <- selected.dt[algo==\"grid\"]\nexact.used[grid.only, grid.auc = i.valid.auc, on=.(\n data.name, cv.type, test.fold, gradient, seed, init.name, objective\n)]\nexact.used[, hist(valid.auc-grid.auc)]\n\nggplot()+\n geom_point(aes(\n valid.auc, grid.auc),\n shape=1,\n data=exact.used)+\n coord_equal()\n\nselected.wide <- dcast(\n selected.dt[step.number>0],\n data.name + cv.type + test.fold + gradient + seed + init.name + objective ~ algo,\n value.var=c('step.number','valid.auc','exact')\n)[!is.na(exact_hybrid)][, hybrid_used_exact := ifelse(exact_hybrid==0, \"no\", \"yes\")]\nggplot()+\n facet_grid(. ~ hybrid_used_exact)+\n geom_point(aes(\n valid.auc_grid, valid.auc_hybrid),\n data=selected.wide)\n##Shouldn't grid=hybrid when exact=0?\none <- selected.wide[exact_hybrid==0 & valid.auc_hybrid != valid.auc_grid & objective==\"aum\"][1]\nselected.dt[one, on=.(\n data.name, cv.type, test.fold, gradient, seed, init.name, objective\n)]\n##type.csv.i=1 [seed==1 & init.name==\"zero\" & objective==\"auc\"]\n##type.csv=\"../neuroblastoma-data/data/ATAC_JV_adipose/cv/equal_labels/testFolds/1/line_search/sets/count.csv\"\nbest.algos <- selected.dt[\n, .SD[which.max(valid.auc)],\n by=.(data.name, cv.type, test.fold, gradient, seed, init.name, objective)]\nbest.algos[, .(count=.N), by=algo]\nbest.algos[, .(count=.N), keyby=.(algo, objective)]\nbest.algos[objective==\"auc\", .(count=.N), keyby=.(algo, gradient)]\n\n\n#analyze 3: how many steps?\ntype.csv.vec <- Sys.glob(file.path(testFold.vec, \"line_search\",\"sets\", \"*.csv\"))\nsteps.dt.list <- list()\nfor(type.csv.i in seq_along(type.csv.vec)){\n type.csv <- type.csv.vec[[type.csv.i]]\n type.dt <- fread(type.csv)\n meta.dt <- type.dt[1, .(\n data.name, cv.type, test.fold,\n gradient=sub(\".csv\",\"\",basename(type.csv)))]\n ## does max auc get better auc than min aum?\n valid.dt <- type.dt[\n set==\"validation\",\n .SD[, data.table(\n max.auc=max(auc),\n steps=.N)],\n by=.(seed, init.name, algo, objective)]\n steps.dt.list[[type.csv]] <- data.table(\n meta.dt, valid.dt)\n}\nsteps.dt <- rbindlist(steps.dt.list)\nfwrite(steps.dt, \"figure-line-grid-search-interactive-steps.csv\")\n\nsteps.dt <- data.table::fread(\"figure-line-grid-search-interactive-steps.csv\")\ncomp.dt <- data.table::fread(\"figure-line-search-complexity.csv\")\n\nsubtrain.sizes <- unique(comp.dt[, .(data.name, cv.type, test.fold, n.subtrain.diffs)])\nAlgo.map <- c(\n grid=\"grid\",\n exact=\"exactL\",\n hybrid=\"both\")\nrfac <- 5\nsteps.ircv.aum <- steps.dt[\n init.name==\"IntervalRegressionCV\" & objective==\"aum\"\n][,\n Algo := Algo.map[algo]\n][\n subtrain.sizes, on=.(data.name, cv.type, test.fold), nomatch=0L\n][, B:= as.integer(10^(round(log10(n.subtrain.diffs)*rfac)/rfac))]\nsteps.ircv.aum[, hist(n.subtrain.diffs)]\nsteps.wide <- dcast(\n steps.ircv.aum,\n B + Algo ~ .,\n list(median, min, max),\n value.var=\"steps\")\nL <- list(measurements=steps.wide[, data.table(\n steps=steps_median,\n N=B,\n expr.name=Algo)])\nmy_funs <- list(\n N=function(N)log10(N),\n \"\\\\log N\"=function(N)log10(log(N)))\nbest <- atime::references_best(L, unit.col.vec=\"steps\", fun.list=my_funs)\nbest$ref[, Algo := expr.name]\nlibrary(ggplot2)\nref.color <- \"red\"\ngg <- ggplot()+\n facet_grid(. ~ Algo, labeller=label_both)+\n geom_ribbon(aes(\n B, ymin=steps_min, ymax=steps_max),\n alpha=0.5,\n data=steps.wide)+\n geom_line(aes(\n B, steps_median),\n data=steps.wide)+\n geom_line(aes(\n N, reference, group=fun.name),\n color=ref.color,\n data=best$ref)+\n directlabels::geom_dl(aes(\n N, reference, group=fun.name, label=fun.name),\n color=ref.color,\n method=\"bottom.polygons\",\n data=best$ref)+\n scale_x_log10(\n \"B = breakpoints in subtrain set error functions\")+\n scale_y_log10(\n \"Steps of gradient descent\\nuntil loss stops decreasing\")\npng(\"figure-line-grid-search-interactive-steps-refs.png\", width=9, height=3, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\ngg <- ggplot()+\n geom_line(aes(\n B, steps_median, color=Algo),\n data=steps.wide)+\n geom_ribbon(aes(\n B, ymin=steps_min, ymax=steps_max, fill=Algo),\n alpha=0.5,\n data=steps.wide)+\n scale_x_log10(\n \"B = breakpoints in subtrain set error functions\",\n limits=c(NA,8000))+\n scale_y_log10(\n \"Steps of gradient descent\\nuntil loss stops decreasing\")\ndl <- directlabels::direct.label(gg, \"right.polygons\")\npng(\"figure-line-grid-search-interactive-steps.png\", width=4.5, height=3, units=\"in\", res=200)\nprint(dl)\ndev.off()\n\n"
},
{
"alpha_fraction": 0.6382067799568176,
"alphanum_fraction": 0.659608006477356,
"avg_line_length": 29.81944465637207,
"blob_id": "a37c8dd9c72ce2bd1e25f65193da70f23f3de962",
"content_id": "7fec3ed10b173b0269efc0374b0d24075500e4c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 4439,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 144,
"path": "/curveAlignment.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\nfuture::plan(\"multiprocess\")\n\nsome.probs <- data.table(prob.dir=paste0(\n \"ATAC_JV_adipose/samples/AC1/MSC\",\n c(83, 91),\n \"/problems/chrX-37148256-49242997\"))\nsome.labels <- some.probs[, {\n labels.bed <- paste0(\n \"../feature-learning-benchmark/data/\",\n prob.dir, \"/labels.bed\")\n fread(\n labels.bed,\n col.names=c(\n \"chrom\",\n \"labelStart\",\n \"labelEnd\",\n \"annotation\"))\n}, by=list(prob.dir)]\nmin.labelStart <- min(some.labels$labelStart)\nmax.labelEnd <- max(some.labels$labelEnd)\nlabel.range <- max.labelEnd-min.labelStart\nexpand <- label.range/20\n\nprofiles.dt <- some.probs[, {\n from <- min.labelStart-expand\n to <- max.labelEnd+expand\n s <- sub(\":\", \"-\", prob.dir)\n bg <- paste0(\n \"../feature-learning-benchmark/data/\",\n s, \"/coverage.bedGraph\")\n gz <- paste0(bg, \".gz\")\n dt <- fread(\n gz,\n col.names=c(\"chrom\", \"chromStart\", \"chromEnd\", \"coverage\"))\n fwrite(dt, bg, col.names=FALSE, sep=\"\\t\")\n dt[from < chromEnd & chromStart < to]\n}, by=list(prob.dir)]\nann.colors <- c(\n noPeaks=\"#f6f4bf\",\n peakStart=\"#ffafaf\",\n peakEnd=\"#ff4c4c\",\n peaks=\"#a445ee\")\nggplot()+\n ggtitle(\n \"Noisy coverage data and labels\")+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n facet_grid(prob.dir ~ ., scales=\"free\")+\n geom_tallrect(aes(\n xmin=labelStart/1e3, xmax=labelEnd/1e3, fill=annotation),\n data=some.labels,\n color=\"grey\")+\n scale_fill_manual(values=ann.colors)+\n geom_step(aes(\n chromStart/1e3, coverage),\n data=profiles.dt,\n color=\"grey50\")+\n scale_x_continuous(breaks=seq(4e4, 5e4, by=5))\n\nwin <- function(windowStart, windowEnd){\n data.table(windowStart, windowEnd)\n}\nwin.dt <- rbind(\n win(43447, 43457),\n win(43502, 43512))*1000\nwin.dt[, window := 1:.N]\nsetkey(profiles.dt, chromStart, chromEnd)\nsetkey(some.labels, labelStart, labelEnd)\nsetkey(win.dt, windowStart, windowEnd)\nwin.profiles <- foverlaps(profiles.dt, win.dt, nomatch=0L)\nwin.labels <- foverlaps(some.labels, win.dt, nomatch=0L)\ngg <- ggplot()+\n ggtitle(\n \"Noisy coverage data and labels\")+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n facet_grid(prob.dir ~ window, scales=\"free\", labeller=label_both)+\n geom_tallrect(aes(\n xmin=labelStart/1e3, xmax=labelEnd/1e3, fill=annotation),\n data=win.labels,\n color=\"grey\")+\n scale_fill_manual(values=ann.colors)+\n geom_step(aes(\n chromStart/1e3, coverage),\n data=win.profiles,\n color=\"grey50\")+\n scale_x_continuous(breaks=seq(4e4, 5e4, by=5))\nprint(gg)\n\nwin.err.list <- list()\nwin.segs.list <- list()\nwin.selection.list <- list()\nfor(seq.i in 1:nrow(some.probs)){\n s <- some.probs[seq.i]\n pdir <- paste0(\n \"../feature-learning-benchmark/data/\",\n sub(\":\", \"-\", s$prob.dir))\n L <- PeakSegPipeline::problem.target(pdir, 1)\n plabels <- win.labels[prob.dir==s$prob.dir]\n plabels[, chromStart := labelStart]\n plabels[, chromEnd := labelEnd]\n selection.dt <- data.table(penaltyLearning::modelSelection(\n L$models, \"total.loss\", \"peaks\"))\n win.selection.list[[paste(seq.i)]] <- data.table(\n prob.dir=s$prob.dir,\n selection.dt)\n for(model.i in 1:nrow(selection.dt)){\n model <- selection.dt[model.i]\n pen.str <- paste(model$penalty)\n pen.info <- PeakSegDisk::problem.PeakSegFPOP(pdir, pen.str)\n seg.dt <- data.table(prob.dir=s$prob.dir, model, pen.str, pen.info$segments)\n setkey(seg.dt, chromStart, chromEnd)\n over.dt <- foverlaps(seg.dt, win.dt, nomatch=0L)\n peak.dt <- over.dt[status==\"peak\"]\n e <- PeakError::PeakErrorChrom(peak.dt, plabels)\n win.err.list[[paste(seq.i, model.i)]] <-\n data.table(\n prob.dir=s$prob.dir,\n window=plabels$window,\n model, pen.str, e)\n win.segs.list[[paste(seq.i, model.i)]] <- over.dt\n }\n}\nwin.selection <- do.call(rbind, win.selection.list)\nwin.segs <- do.call(rbind, win.segs.list)\nwin.err <- do.call(rbind, win.err.list)\nwin.segs[, segStart := ifelse(chromStart<windowStart, windowStart, chromStart)]\nwin.segs[, segEnd := ifelse(windowEnd<chromEnd, windowEnd, chromEnd)]\n\npossible <- fread(\n \"../feature-learning-benchmark/labeled_problems_possible_errors.csv\")\npossible[, prob.dir := sub(\":\", \"-\", prob.dir)]\neval.dt <- possible[win.selection, nomatch=0L, on=list(prob.dir)]\n\nout.list <- list(\n evaluation=eval.dt,\n segments=win.segs,\n errors=win.err,\n problems=some.probs,\n profiles=win.profiles,\n labels=win.labels)\nsaveRDS(out.list, \"curveAlignment.rds\")\n\n"
},
{
"alpha_fraction": 0.6102250218391418,
"alphanum_fraction": 0.6366615891456604,
"avg_line_length": 27.600000381469727,
"blob_id": "8becb4490eff19a538e4da27ee1e934f0fa76ceb",
"content_id": "83c212e564cc240b08eb44dae99a98ed4aaaa334",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 4577,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 160,
"path": "/figure-neuroblastomaProcessed-combinations.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\nnb.comb <- readRDS(\"neuroblastomaProcessed.combinations.rds\")\n\nworst <- nb.comb$auc[which.max(auc)]\nworst.combo <- nb.comb$combos[worst, .(panel, interval), on=list(combo.i)]\nnb.comb$segs.min.err[, pred.log.lambda := ifelse(\n min.log.lambda == -Inf, max.log.lambda-worst$size, ifelse(\n max.log.lambda == Inf, min.log.lambda+worst$size, mid.log.lambda))]\nnb.comb$segs.min.err[, interval := ifelse(\n is.finite(mid.log.lambda), \"finite\", \"infinite\")]\npred.dt <- nb.comb$segs.min.err[worst.combo, on=list(panel, interval)]\nL <- penaltyLearning::ROChange(\n nb.comb$some.err, pred.dt, c(\"panel\"))\nL$auc\n\nL$auc.polygon[, row := 1:.N]\nggplot()+\n geom_polygon(aes(\n FPR, TPR),\n fill=\"red\",\n color=\"black\",\n alpha=0.5,\n data=L$auc.polygon)+\n geom_text(aes(\n FPR, TPR, label=row),\n data=L$auc.polygon)\n\nsel.dt <- L$auc.polygon[row>1, .(row, first=1)]\nsetkey(sel.dt, first, row)\nL$auc.polygon[, row0 := row]\nsetkey(L$auc.polygon, row, row0)\ncum.poly <- foverlaps(sel.dt, L$auc.polygon, nomatch=0L)\ncum.poly[, added := ifelse(i.row==row, \"new\", \"old\")]\nlim <- c(-0.2, 1.2)\ngg <- ggplot()+\n geom_path(aes(\n FPR, TPR),\n data=cum.poly)+\n geom_text(aes(\n FPR, TPR, label=row, color=added),\n data=cum.poly)+\n scale_color_manual(values=c(\"new\"=\"red\", old=\"black\"))+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n facet_wrap(\"i.row\", nrow=2)+\n ##facet_grid(. ~ i.row)+\n coord_equal(xlim=lim, ylim=lim)+\n scale_x_continuous(breaks=seq(0, 1, by=0.5), labels=c(\"0\", \"0.5\", \"1\"))+\n scale_y_continuous(breaks=seq(0, 1, by=0.5))\npng(\"figure-neuroblastomaProcessed-combinations-worst.png\", 12, 3, units=\"in\", res=100)\nprint(gg)\ndev.off()\n\ngg <- ggplot()+\n geom_point(aes(\n aub, auc),\n color=\"black\",\n shape=21,\n size=5,\n fill=NA,\n data=nb.comb$auc)+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n facet_grid(. ~ size)\nprint(gg)\nnb.comb$auc[order(aub), .(auc, aub, size, combo.i)]\n\nnb.comb$auc[, status := ifelse(\n auc==1 & aub != 0, \"counter-example\", \"other\")]\ngg <- ggplot()+\n ggtitle(\"AUC=1 does not imply AUM=0,\neach point represents a different vector of 8 predicted values,\nfor data sequences n409.4 n485.2 n490.2 n513.3 n7.4 n76.2 p4.2 p496.11\")+\n geom_hline(aes(\n yintercept=yint),\n data=data.table(yint=1),\n color=\"grey50\")+\n geom_vline(aes(\n xintercept=xint),\n data=data.table(xint=0),\n color=\"grey50\")+\n geom_point(aes(\n aub, auc, color=status),\n data=nb.comb$auc)+\n theme_bw()+\n theme(panel.spacing=grid::unit(0, \"lines\"))+\n scale_x_continuous(\n \"AUM = Area Under Min(FP, FN)\",\n breaks=c(0:2, max(nb.comb$auc$aub)))+\n scale_y_continuous(\n \"AUC = Area Under the ROC Curve\",\n limits=c(0, NA),\n breaks=c(0, 0.5, 1, max(nb.comb$auc$auc)))\npng(\"figure-neuroblastomaProcessed-combinations-points.png\", width=10, height=6, units=\"in\", res=100)\nprint(gg)\ndev.off()\n\nrfac <- 5\nnb.comb$auc[, round.aub := round(aub*rfac)/rfac]\nnb.comb$auc[, round.auc := round(auc, 4)]\naub.count <- nb.comb$auc[, list(\n combos=.N\n), by=list(aub=round.aub, size, round.auc)]\ngg <- ggplot()+\n geom_hline(aes(\n yintercept=yint),\n data=data.table(yint=1),\n color=\"grey50\")+\n geom_point(aes(\n aub, round.auc, fill=combos),\n shape=21,\n size=5,\n data=aub.count)+\n scale_fill_gradient(low=\"white\", high=\"red\")+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n facet_grid(size ~ .)+\n geom_text(aes(\n aub, round.auc, label=combos),\n size=3,\n data=aub.count)+\n scale_y_continuous(\n \"Area under ROC curve\",\n breaks=seq(0, 1.2, by=0.2))+\n scale_x_continuous(\n \"Area under both TP and FP curves\")\nprint(gg)\npng(\"figure-neuroblastomaProcessed-combinations-scatter.png\", 12, 9, units=\"in\", res=100)\nprint(gg)\ndev.off()\n\nauc.count <- nb.comb$auc[, list(\n combos=.N\n), by=list(n.finite, size, round.auc)]\ngg <- ggplot()+\n geom_tile(aes(\n n.finite, round.auc, fill=combos),\n data=auc.count)+\n geom_point(aes(\n n.finite, auc),\n color=\"black\",\n shape=21,\n size=5,\n fill=NA,\n data=worst)+\n scale_fill_gradient(low=\"white\", high=\"red\")+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n facet_grid(. ~ size)+\n geom_text(aes(\n n.finite, round.auc, label=combos),\n size=3,\n data=auc.count)+\n scale_x_continuous(\n \"Number of predictions in finite min error interval (other predictions in the infinite min error interval)\",\n breaks=unique(auc.count$n.finite))\npng(\"figure-neuroblastomaProcessed-combinations.png\", 12, 3, units=\"in\", res=100)\nprint(gg)\ndev.off()\n\n"
},
{
"alpha_fraction": 0.6552346348762512,
"alphanum_fraction": 0.7148014307022095,
"avg_line_length": 33.625,
"blob_id": "130fa884b0143b2ecd06444dd628ecfda18f52f1",
"content_id": "8d6f2e8a3fdcddae326a634cff5478d740a5911d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 554,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 16,
"path": "/figure-aum-convexity-interactive-screenshots.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "PNG.vec <- Sys.glob(\"figure-aum-convexity-interactive-screenshots/*.PNG\")\nfor(PNG in PNG.vec){\n img <- magick::image_read(PNG)\n crop <- magick::image_crop(img, \"1455x940+17+93\")\n out <- sub(\"screenshots\", \"cropped\", PNG)\n dir.create(dirname(out), showWarnings=FALSE, recursive=TRUE)\n magick::image_write(crop, out)\ncat(sprintf(r\"{\n\\begin{frame}\n \\frametitle{Demonstration of AUC/AUM computation}\n {\\scriptsize\\url{https://bl.ocks.org/tdhock/raw/545d76ea8c0678785896e7dbe5ff5510/}}\n\n \\includegraphics[width=\\textwidth]{%s}\n\\end{frame}\n}\", out))\n}\n"
},
{
"alpha_fraction": 0.6148853302001953,
"alphanum_fraction": 0.6270012855529785,
"avg_line_length": 23.19895362854004,
"blob_id": "ddbbcf129c78f1801f3a6a6b54461cab8d75cde5",
"content_id": "a5d93858bd29baa81469fa7edd94b688ebed4605",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 4622,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 191,
"path": "/figure-sonar-comparisons.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(ggplot2)\nlibrary(data.table)\nout.loss.list <- list()\nfor(seed.csv in Sys.glob(\"figure-sonar-comparisons-data-seed*.csv\")){\n out.loss.list[[seed.csv]] <- data.table::fread(seed.csv)\n}\nout.loss <- do.call(rbind, out.loss.list)\n\n(max.valid.auc <- out.loss[\n set.name==\"validation\",\n .SD[which.max(auc), .(iteration, auc, set.name, step.size)],\n by=.(seed, loss.name)])\nextremes.selected <- sum(max.valid.auc$step.size %in% range(out.loss$step.size))\nif(0 < extremes.selected){\n stop(\"some extreme step sizes selected, should increase grid\")\n}\nshow.loss <- out.loss[set.name != \"test\"][\n max.valid.auc[,.(seed,loss.name,step.size)],\n on=.(seed, loss.name, step.size)\n]\n\n## lots of data.\ngg <- ggplot(,aes(\n iteration, auc, color=set.name))+\n geom_line(aes(\n group=paste(step.size, set.name)),\n data=out.loss)+\n facet_grid(\n seed ~ loss.name,\n labeller=\"label_both\",\n scales='free',\n space='fixed')\n\nggplot(,aes(\n iteration, auc, color=set.name))+\n geom_line(\n data=show.loss)+\n geom_point(\n shape=1,\n data=max.valid.auc)+\n facet_grid(\n seed ~ loss.name,\n labeller=\"label_both\",\n scales='free',\n space='fixed')\n\none.seed <- 6\npoint.dt <- max.valid.auc[seed==one.seed]\nline.dt <- show.loss[seed==one.seed & iteration<200]\ngg <- ggplot(,aes(\n iteration, auc, color=loss.name))+\n facet_grid(\n set.name ~ .,\n labeller=\"label_both\",\n scales='free',\n space='fixed')+\n geom_point(\n shape=1,\n data=point.dt)+\n geom_line(\n size=1,\n data=line.dt)\ndirectlabels::direct.label(gg, \"top.polygons\")\n\ndl.dt <- rbind(\n point.dt,\n line.dt[, .SD[\n which.max(iteration)\n ], by=loss.name][, names(point.dt),with=FALSE])\nggplot(,aes(\n iteration, auc, color=loss.name))+\n ## directlabels::geom_dl(aes(\n ## label=loss.name),\n ## method=list(\"top.polygons\",directlabels::dl.trans(y=y+0.1)),\n ## data=dl.dt)+\n facet_grid(\n . ~ set.name,\n labeller=\"label_both\",\n scales='free',\n space='fixed')+\n geom_point(\n shape=1,\n data=point.dt)+\n geom_line(\n size=1,\n data=line.dt)\n\ngg <- ggplot(,aes(\n iteration, auc, color=loss.name))+\n geom_point(\n shape=21,\n size=2,\n fill=\"black\",\n data=point.dt)+\n geom_line(aes(\n linetype=set.name),\n size=0.5,\n data=line.dt)+\n ## directlabels::geom_dl(aes(\n ## y=auc+0.01,\n ## label=loss.name),\n ## method=list(cex=0.5,\"top.polygons\"),\n ## data=point.dt)+\n scale_linetype_manual(values=c(\n subtrain=\"dashed\",\n validation=\"solid\"))\npng(\n \"figure-sonar-comparisons-iterations.png\",\n width=6, height=4, res=200, units=\"in\")\nprint(gg)\ndev.off()\n\ntest.loss <- out.loss[set.name==\"test\"]\ntest.selected <- test.loss[\n max.valid.auc[, .(seed, loss.name, step.size, iteration)],\n on=.(seed, loss.name, step.size, iteration)]\nggplot()+\n geom_point(aes(\n auc, loss.name),\n data=test.selected)\n\ntest.selected.stats <- test.selected[, .(\n median=median(auc),\n mean=mean(auc),\n sd=sd(auc),\n q25=quantile(auc, 0.25),\n q75=quantile(auc, 0.75)\n), by=.(loss.name)][order(mean)]\nlevs <- test.selected.stats$loss.name\ntest.selected.stats[, Loss := factor(loss.name, levs)]\ntest.selected[, Loss := factor(loss.name, levs)]\nggplot()+\n geom_segment(aes(\n q25, Loss,\n xend=q75, yend=loss.name),\n data=test.selected.stats)+\n geom_label(aes(\n median, Loss, label=\"median\"),\n data=test.selected.stats)+\n geom_point(aes(\n auc, Loss),\n data=test.selected)\n\ntest.wide <- dcast(\n test.selected,\n seed ~ loss.name,\n value.var=\"auc\")\ntest.compare <- melt(\n test.wide,\n id.vars=c(\"seed\", \"aum.count\"),\n variable.name=\"other.loss.name\",\n value.name=\"other.loss.auc\")\ntest.dt <- test.compare[, {\n L <- t.test(\n aum.count, other.loss.auc,\n alternative=\"greater\",\n paired=TRUE)\n L[c(\"statistic\",\"p.value\")]\n}, by=other.loss.name][order(p.value)]\nggplot()+\n geom_abline(slope=1, intercept=0, color=\"grey\")+\n theme_bw()+\n geom_point(aes(\n other.loss.auc, aum.count),\n data=test.compare)+\n coord_equal()+\n facet_grid(. ~ other.loss.name)\n\ntest.dt[, Loss := factor(other.loss.name, levs)]\ntext.x <- Inf\ntext.size <- 3\ntext.hjust <- 1\ngg <- ggplot()+\n geom_segment(aes(\n mean-sd, Loss,\n xend=mean+sd, yend=Loss),\n data=test.selected.stats)+\n geom_point(aes(\n mean, Loss),\n data=test.selected.stats)+\n geom_text(aes(\n text.x, Loss, label=sprintf(\"Diff=%.1f p=%.3f\", statistic, p.value)),\n vjust=-0.2,\n size=text.size,\n hjust=text.hjust,\n data=test.dt)+\n scale_x_continuous(\n \"Test AUC\")\npng(\"figure-sonar-comparisons.png\", width=4, height=1.5, res=200, units=\"in\")\nprint(gg)\ndev.off()\n"
},
{
"alpha_fraction": 0.8088701367378235,
"alphanum_fraction": 0.8088701367378235,
"avg_line_length": 51.55555725097656,
"blob_id": "f75e3f44e19129f7133a8296f83ebbc72147d334",
"content_id": "1361cf69af9ab25700d3be658a55ad9f9fba559c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 947,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 18,
"path": "/Makefile",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "slides.pdf: figure-compare-hinge-loss.png\n\tpdflatex slides\nfigure-compare-hinge-loss.png: figure-compare-hinge-loss.R figure-compare-hinge-loss-data.csv\n\tR --vanilla < $<\nfigure-compare-hinge-loss-data.csv: figure-compare-hinge-loss-data.R\n\tR --vanilla < $<\nfigure-neuroblastomaProcessed-combinations-interactive/index.html: figure-neuroblastomaProcessed-combinations-interactive.R neuroblastomaProcessed.combinations.rds\n\tR --vanilla < $<\nfigure-neuroblastomaProcessed-combinations.png: figure-neuroblastomaProcessed-combinations.R neuroblastomaProcessed.combinations.rds\n\tR --vanilla < $<\nfigure-curveAlignment/index.html: figure-curveAlignment.R curveAlignment.rds\n\tR --vanilla < $<\ncurveAlignment.rds: curveAlignment.R\n\tR --vanilla < $<\nneuroblastomaProcessed.combinations.rds: neuroblastomaProcessed.combinations.R\n\tR --vanilla < $<\nfigure-neuroblastomaProcessed-complex/index.html: figure-neuroblastomaProcessed-complex.R\n\tR --vanilla < $<\n\n"
},
{
"alpha_fraction": 0.6723846197128296,
"alphanum_fraction": 0.6791495680809021,
"avg_line_length": 39.57843017578125,
"blob_id": "2ad2ec88fa8ca03446b965cb16ef71d859d793c1",
"content_id": "cf3320a237914b15a63f459907141a8e60ff38c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 4139,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 102,
"path": "/figure-aum-train-data.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\nfolds.dt <- fread(\"../feature-learning-benchmark/labeled_problems_folds.csv\")\naddMeta <- function(dt){\n dt[, set.name := sub(\"/.*\", \"\", prob.dir)]\n dt[, problem := sub(\".*/\", \"\", prob.dir)]\n dt[folds.dt, on=list(set.name, problem)]\n}\nerrors.dt <- addMeta(fread(\"../feature-learning-benchmark/labeled_problems_errors.csv\"))\npossible.dt <- addMeta(fread(\"../feature-learning-benchmark/labeled_problems_possible_errors.csv\"))\nfeatures.dt <- fread(\"../feature-learning-benchmark/labeled_problems_features.csv\")\n\ntest.fold.info <- folds.dt[set.name==\"H3K4me3_XJ_immune\" & fold==4]\ntest.fold.errors <- errors.dt[test.fold.info, on=.(set.name, fold, problem)]\ntest.fold.errors[, min.log.lambda := min.log.penalty]\ntest.fold.errors[, max.log.lambda := max.log.penalty]\ntest.fold.errors[, seg.i := cumsum(\n c(1, diff(fp)!=0 | diff(fn) != 0)), by=.(prob.dir)]\npossible.errors <- possible.dt[test.fold.errors, on=list(\n set.name, fold, prob.dir)]\npossible.errors[, possible.fn := possible.tp]\ntest.fold.segs <- test.fold.errors[, .(\n min.log.lambda=min(min.log.lambda),\n max.log.lambda=max(max.log.lambda)\n), by=.(prob.dir, seg.i)]\ntest.fold.segs[, mid.log.lambda := (max.log.lambda+min.log.lambda)/2]\ntest.fold.targets <- penaltyLearning::targetIntervals(\n test.fold.errors, \"prob.dir\")\nall.features.mat <- as.matrix(features.dt[, -1, with=FALSE])\ntest.fold.features.dt <- features.dt[test.fold.targets$prob.dir, on=\"prob.dir\"]\nall.features.mat <- as.matrix(test.fold.features.dt[, -1, with=FALSE])\nsd.features <- apply(all.features.mat, 2, sd)\nkeep.feature <- is.finite(sd.features) & 0 < sd.features\nfinite.features.mat <- all.features.mat[, keep.feature]\ntest.fold.targets.mat <- test.fold.targets[, cbind(min.log.lambda, max.log.lambda)]\nscaled.features.mat <- scale(finite.features.mat)\nrownames(scaled.features.mat) <- test.fold.targets$prob.dir\n\n## No need to set seed, unregularized learning algorithm is\n## deterministic (resulting fit does not depend on random seed).\nkeep.obs <- apply(is.finite(test.fold.targets.mat), 1, any)\nfit <- penaltyLearning::IntervalRegressionUnregularized(\n scaled.features.mat[keep.obs,], test.fold.targets.mat[keep.obs,])\ncomputeAUM <- function(w, i){\n pred.pen.vec <- (scaled.features.mat %*% w) + i\n pred.dt <- data.table(\n prob.dir=rownames(pred.pen.vec),\n pred.log.lambda=as.numeric(pred.pen.vec))\n out.list <- penaltyLearning::ROChange(\n possible.errors, pred.dt, \"prob.dir\")\n out.list$intercept <- out.list$thresholds[\n threshold==\"min.error\", (min.thresh+max.thresh)/2]\n out.list$weight.vec <- w\n out.list\n}\ninitial.roc <- computeAUM(coef(fit)[-1], 0)\n\nthis.roc <- initial.roc\nneg.t.X.subtrain <- -t(scaled.features.mat)\niterations.dt.list <- list()\nfor(step.number in 1:20){\n print(iterations.dt.list[[paste(step.number)]] <- with(this.roc, data.table(\n step.number, aum, auc,\n min.errors=thresholds[threshold==\"min.error\", errors])))\n g.dt <- this.roc[[\"aum.grad\"]]\n ## If aum.grad has some problems with no changes in error then\n ## they may be missing.\n g.vec <- rep(0, ncol(neg.t.X.subtrain))\n names(g.vec) <- colnames(neg.t.X.subtrain)\n g.vec[\n g.dt[[\"prob.dir\"]]\n ] <- g.dt[[\"lo\"]]\n direction.vec <- neg.t.X.subtrain %*% g.vec\n take.step <- function(s){\n this.roc$weight.vec + s*direction.vec\n }\n step.roc.list <- list()\n for(step.size in 10^seq(-5, 0, by=0.25)){\n step.roc.list[[paste(step.size)]] <- computeAUM(take.step(step.size), 0)\n }\n aum.vec <- sapply(step.roc.list, \"[[\", \"aum\")\n this.roc <- step.roc.list[[which.min(aum.vec)]]\n}#iteration\niterations.dt <- do.call(rbind, iterations.dt.list)\n\nroc.list <- list(initial=initial.roc, improved=this.roc)\nroc.dt.list <- list()\naum.dt.list <- list()\nfor(pred.name in names(roc.list)){\n L <- roc.list[[pred.name]]\n roc.dt.list[[pred.name]] <- data.table(\n pred.name, L[[\"roc\"]])\n aum.dt.list[[pred.name]] <- with(L, data.table(\n pred.name, aum, auc, thresholds[threshold==\"min.error\"]))\n}\n\nout.list <- list(\n iterations=iterations.dt,\n roc=do.call(rbind, roc.dt.list),\n auc=do.call(rbind, aum.dt.list))\n\nsaveRDS(out.list, \"figure-aum-train-data.rds\")\n"
},
{
"alpha_fraction": 0.6070431470870972,
"alphanum_fraction": 0.6237953901290894,
"avg_line_length": 28.44827651977539,
"blob_id": "bf34de1cc9c2ed8f68fd35200b2813a7655761c5",
"content_id": "8a244b568e2ea71d173a05bbe62b0153693af2ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 11103,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 377,
"path": "/figure-aum-convexity-interactive.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(animint2)\nlibrary(data.table)\n\ndata(neuroblastomaProcessed, package=\"penaltyLearning\")\ndata(neuroblastoma, package=\"neuroblastoma\")\ne <- function(label, profile.id, chromosome){\n data.table(label, profile.id=factor(profile.id), chromosome=factor(chromosome))\n}\nselect.dt <- rbind(\n e(\"pos\", 4, 2),\n e(\"neg\", 513, 3))\nnb.list <- lapply(neuroblastoma, data.table)\nnb.some <- lapply(nb.list, \"[\", select.dt, on=.NATURAL)\nmax.segments <- max(neuroblastomaProcessed$errors$n.segments)\nnb.segs <- nb.some$profiles[, {\n cum.vec <- cumsum(c(0, logratio))\n d <- diff(position)/2\n between <- position[-1]-d\n data.start.pos <- c(position[1]-d[1], between)\n data.end.pos <- c(between, position[.N]+d[.N-1])\n fit <- jointseg::Fpsn(logratio, max.segments)\n end.t <- t(fit$t.est)\n end.dt <- data.table(\n end=as.integer(end.t),\n segments=as.integer(col(end.t))\n )[!is.na(end)]\n end.dt[, start := c(0, end[-.N])+1, by=segments]\n end.dt[, mean := (cum.vec[end+1]-cum.vec[start])/(end-start+1)]\n end.dt[, `:=`(\n start.pos=data.start.pos[start],\n end.pos=data.end.pos[end]\n )]\n}, by=label]\n\nsome.err <- neuroblastomaProcessed$errors[select.dt, .(\n segments=n.segments,\n fp, fn, possible.fp, possible.fn,\n min.log.lambda=-max.log.lambda,\n max.log.lambda=-min.log.lambda,\n errors, labels,\n label\n), on=list(profile.id, chromosome)]\nerr.sizes <- c(\n \"min(FP,FN)\"=2,\n FP=6,\n FN=4)\nerr.colors <- c(\n correct=\"transparent\",\n \"min(FP,FN)\"=\"black\",\n FP=\"red\",\n FN=\"deepskyblue\")\nsome.err.tall <- melt(\n some.err,\n measure.vars=c(\"fp\",\"fn\"),\n variable.name=\"var.lower\")\nsome.err.tall[, error.type := toupper(var.lower)]\nleg <- \"Error type\"\n\ndmin <- 4\ndmax <- 6.5\nsome.err[, fp.diff := c(NA, diff(fp)), by=label]\nsome.err[, fn.diff := c(NA, diff(fn)), by=label]\nsome.diff <- some.err[fp.diff != 0 | fn.diff != 0, .(\n id=1, label, fp.diff, fn.diff, pred.log.lambda=min.log.lambda)]\nsome.diff[, fp.cum := cumsum(fp.diff), by=label]\nsome.diff[, fn.cum := rev(cumsum(rev(-fn.diff))), by=label]\ndlist <- split(some.diff, some.diff[[\"label\"]])\nborder.pred <- with(dlist, pos[ #orange dots\n neg,\n data.table(\n differentiable=FALSE,\n pos=pred.log.lambda,\n neg=i.pred.log.lambda),\n on=\"id\",\n allow.cartesian=TRUE])\ngrid.pred <- data.table( #black dots\n differentiable=TRUE,\n pos=0,\n neg=seq(dmin, dmax, by=0.05))\nboth.pred <- rbind(border.pred, grid.pred)\nboth.pred[, pred.diff := neg-pos]\npred.tall <- melt(\n both.pred,\n measure.vars=select.dt$label,\n variable.name=\"label\",\n value.name=\"pred.log.lambda\")[select.dt, nomatch=0L, on=\"label\"]\nmetrics.wide <- pred.tall[order(pred.diff)][, {\n L <- penaltyLearning::ROChange(some.err, .SD, \"label\")\n pos <- pred.log.lambda[label==\"pos\"]\n with(L, data.table(\n aum, auc,\n SM=roc[min.thresh < max.thresh, sum(min.fp.fn)],\n roc=list(roc[, `:=`(\n min.thresh=min.thresh+pos,\n max.thresh=max.thresh+pos\n )])\n ))\n}, by=list(pred.diff, differentiable)]\nmetrics.wide[auc==max(auc)] #max auc => aum>0.\nmetrics.wide[14:15, roc ]\n\n##compute slope and intercept of each of the 6 T_b(s) functions, plot\n##them using geom_abline, and geom_point to represent the 9\n##intersection points.\nsome.diff[, `:=`(\n slope=ifelse(label==\"pos\", 0, -1),\n intercept=pred.log.lambda-ifelse(label==\"pos\", 0, 6.5))]\n\n##ignore rest.\n\nshow.roc.dt <- metrics.wide[, data.table(\n roc[[1]],\n AUC=auc, AUM=round(aum,3)\n), by=pred.diff]\nshow.roc.tall <- melt(\n show.roc.dt,\n measure=c(\"fp\",\"fn\",\"min.fp.fn\"),\n variable.name=\"lower.var\")\nshow.roc.tall[, error.type := ifelse(\n lower.var==\"min.fp.fn\", \"min(FP,FN)\", toupper(lower.var))]\n\nmetrics.tall <- melt(\n metrics.wide,\n measure.vars=c(\"aum\", \"auc\"),\n variable.name=\"var.lower\"\n)[order(-differentiable)]\nmetrics.tall[, variable := toupper(var.lower)]\n\nshow.roc.dt[, roc.point := rank(min.thresh), by=pred.diff]\nthresh.offset <- 0.1\nshow.roc.dt[, text.constant := ifelse(\n min.thresh==-Inf, max.thresh-thresh.offset,\n ifelse(\n max.thresh==Inf,\n min.thresh+thresh.offset,\n (min.thresh+max.thresh)/2\n ))]\nshow.roc.dt[, text.roc.i := rank(roc.point), by=.(pred.diff, FPR, TPR)]\nshow.roc.dt[, text.FPR := text.roc.i*0.04+FPR]\ntext.size <- 15\ntext.color <- \"blue\"\nboth.pred.adj <- melt(both.pred[, .(\n differentiable,\n pos=0,\n neg=pred.diff,\n pred.diff\n)],\nmeasure.vars=select.dt$label,\nvariable.name = \"label\",\nvalue.name=\"pred.log.lambda\")\npred.tall.thresh <- both.pred.adj[\n show.roc.dt, on=\"pred.diff\", allow.cartesian=TRUE]\npred.tall.thresh[, pred.plus.constant := pred.log.lambda+text.constant]\npred.tall.thresh.wide <- dcast(\n pred.tall.thresh,\n pred.diff + roc.point ~ label,\n value.var=\"pred.plus.constant\"\n)[, label := \"neg\"]\nnb.models <- nb.segs[start==1, .(label, segments)]\nnb.changes <- nb.segs[start>1]\nerr.list <- penaltyLearning::labelError(\n nb.models,\n nb.some$annotations,\n nb.changes,\n model.vars=\"segments\",\n change.var=\"start.pos\",\n problem.vars=\"label\")\nselected.dt <- pred.tall.thresh[\n some.err,\n data.table(label, pred.diff, roc.point, segments),\n nomatch=NULL,\n on=.(\n label,\n pred.plus.constant < max.log.lambda,\n pred.plus.constant > min.log.lambda\n )]\nselected.segs <- nb.segs[selected.dt, on=.(\n label, segments), allow.cartesian=TRUE]\ntype.abbrev <- c(\n \"false negative\"=\"FN\",\n \"false positive\"=\"FP\",\n correct=\"correct\")\nselected.err <- err.list$label.errors[selected.dt, on=.(\n label, segments)][, error.type := type.abbrev[status] ]\nviz <- animint(\n title=\"Simple non-monotonic ROC curve\",\n out.dir=\"2021-11-12-aum-convexity\",\n overview=ggplot()+\n ggtitle(\"Overview, select difference\")+\n theme_bw()+\n theme(panel.margin=grid::unit(1, \"lines\"))+\n theme_animint(width=300, height=300)+\n facet_grid(variable ~ ., scales=\"free\")+\n scale_fill_manual(values=c(\n \"TRUE\"=\"black\",\n \"FALSE\"=\"orange\"))+\n geom_point(aes(\n pred.diff, value, fill=differentiable),\n size=4,\n shape=21,\n data=metrics.tall)+\n make_tallrect(metrics.tall, \"pred.diff\")+ \n xlab(\"Prediction difference, f(neg) - f(pos)\")+\n coord_cartesian(xlim=c(dmin,dmax))+\n scale_y_continuous(\"\", breaks=seq(0, 3, by=1)),\n data=ggplot()+\n ggtitle(\"Data, labels, predicted changepoint models\")+\n theme_bw()+\n theme(legend.position=\"none\")+\n theme_animint(width=600, height=300)+\n geom_tallrect(aes(\n xmin=min/1e6, xmax=max/1e6, fill=annotation),\n alpha=0.5,\n data=nb.some$annotations)+\n scale_fill_manual(\n \"label\",\n values=c(\n breakpoint=\"violet\",\n normal=\"orange\"))+\n geom_point(aes(\n position/1e6, logratio),\n color=\"grey50\",\n data=nb.some$profiles)+\n geom_tallrect(aes(\n xmin=min/1e6, xmax=max/1e6,\n color=error.type),\n data=selected.err,\n showSelected=c(\"pred.diff\", \"roc.point\"),\n size=5,\n fill=\"transparent\")+\n geom_segment(aes(\n start.pos/1e6, mean,\n xend=end.pos/1e6, yend=mean),\n data=selected.segs,\n size=3,\n color=text.color,\n showSelected=c(\"pred.diff\", \"roc.point\"))+\n geom_vline(aes(\n xintercept=start.pos/1e6),\n data=selected.segs[start>1],\n size=2,\n color=text.color,\n showSelected=c(\"pred.diff\", \"roc.point\"))+\n scale_color_manual(leg,values=err.colors)+\n facet_grid(label ~ ., labeller=label_both)+\n scale_y_continuous(\n \"DNA copy number (logratio)\")+\n scale_x_continuous(\n \"Position on chromosome\"),\n obsErr=ggplot()+\n ggtitle(\"Example error functions\")+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n theme(legend.position=\"none\")+\n theme_animint(width=300, height=300)+\n facet_grid(label ~ ., labeller=label_both)+\n geom_vline(aes(\n xintercept=pred.plus.constant),\n data=pred.tall.thresh,\n showSelected=c(\"pred.diff\", \"roc.point\"))+\n geom_segment(aes(\n min.log.lambda, value,\n xend=max.log.lambda, yend=value,\n color=error.type, size=error.type),\n showSelected=\"error.type\",\n data=some.err.tall)+\n geom_segment(aes(\n pos, -Inf,\n xend=neg, yend=-Inf),\n data=pred.tall.thresh.wide,\n showSelected=c(\"pred.diff\", \"roc.point\"))+\n geom_text(aes(\n neg-0.1, -0.3,\n label=sprintf(\"pred.diff=%.2f\", pred.diff)),\n hjust=1,\n data=pred.tall.thresh.wide,\n showSelected=c(\"pred.diff\", \"roc.point\"))+\n scale_y_continuous(\n \"Label errors\",\n breaks=c(0,1),\n limits=c(-0.4, 1.4))+\n scale_color_manual(leg,values=err.colors)+\n scale_size_manual(leg,values=err.sizes)+\n scale_x_continuous(\n \"Predicted value f(x)\"),\n totals=ggplot()+\n ggtitle(\"Total error, select interval\")+\n theme_bw()+\n theme(panel.grid.minor=element_blank())+\n theme_animint(width=300, height=300)+\n geom_rect(aes(\n xmin=min.thresh, xmax=max.thresh,\n ymin=0, ymax=min.fp.fn),\n fill=\"grey50\",\n color=NA,\n alpha=0.5,\n showSelected=\"pred.diff\",\n show.roc.dt)+\n geom_segment(aes(\n min.thresh, value,\n xend=max.thresh, yend=value,\n color=error.type, size=error.type),\n showSelected=\"pred.diff\",\n data=show.roc.tall)+\n geom_vline(aes(\n xintercept=text.constant),\n showSelected=c(\"pred.diff\", \"roc.point\"),\n color=text.color,\n alpha=0.5,\n data=show.roc.dt)+\n geom_text(aes(\n text.constant, -0.25, label=roc.point),\n showSelected=\"pred.diff\",\n size=text.size,\n color=text.color,\n data=show.roc.dt)+\n geom_text(aes(\n -1.5, 0.25, label=sprintf(\"AUM=%.2f\", aum)),\n data=metrics.wide,\n showSelected=\"pred.diff\")+\n geom_tallrect(aes(\n xmin=min.thresh, xmax=max.thresh),\n data=show.roc.dt,\n fill=text.color,\n clickSelects=\"roc.point\",\n showSelected=\"pred.diff\",\n color=\"transparent\",\n alpha=0.1)+\n scale_y_continuous(\n \"Label errors\",\n breaks=c(0,1))+\n scale_color_manual(leg,values=err.colors)+\n scale_size_manual(leg,values=err.sizes)+\n geom_blank(aes(\n x, y),\n data=data.table(x=0, y=c(-0.4,1.4)))+\n scale_x_continuous(\n \"Constant added to pred. values\"),\n roc=ggplot()+\n ggtitle(\"ROC curve, select point\")+\n theme_bw()+\n theme(panel.grid.minor=element_blank())+\n theme_animint(width=300, height=300)+\n geom_path(aes(\n FPR, TPR),\n showSelected=\"pred.diff\",\n data=show.roc.dt)+\n geom_text(aes(\n 0.5, 0.5, label=paste0(\"AUC=\", auc)),\n data=metrics.wide,\n showSelected=\"pred.diff\")+\n scale_x_continuous(\n \"False Positive Rate\",\n breaks=seq(0,1,by=0.5))+\n scale_y_continuous(\n \"True Positive Rate\",\n breaks=seq(0,1,by=0.5))+\n geom_point(aes(\n FPR, TPR),\n data=show.roc.dt,\n size=4,\n alpha=0.5,\n color=text.color,\n showSelected=c(\"pred.diff\", \"roc.point\"))+\n geom_text(aes(\n text.FPR, TPR+0.01, label=roc.point),\n size=text.size,\n color=text.color,\n showSelected=\"pred.diff\",\n clickSelects=\"roc.point\",\n data=show.roc.dt),\n time=list(\n variable=\"pred.diff\",\n ms=500)\n)\n##viz\n##animint2gist(viz)\n\n"
},
{
"alpha_fraction": 0.6246753334999084,
"alphanum_fraction": 0.6363636255264282,
"avg_line_length": 34.80232620239258,
"blob_id": "01f5f05f84fad5ea9069cab45e1b8d59a2c1fab1",
"content_id": "303222d7d86b9d944be11ad0a764c9946e7aaf3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 3080,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 86,
"path": "/figure-test-fold-monotonic.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(data.table)\nset.name <- \"systematic\"\nset.path <- file.path(\"../neuroblastoma-data/data\", set.name)\ncv.type <- \"R-3.6.0-profileSize\"\nfolds.csv <- file.path(set.path, \"cv\", cv.type, \"folds.csv\")\nfold.dt <- data.table::fread(folds.csv)\ntest.seqs <- fold.dt[fold==1]\nerrors.csv <- file.path(set.path, \"errors.csv\")\nif(!file.exists(errors.csv)){\n errors.csv.xz <- paste0(errors.csv, \".xz\")\n system(paste(\"unxz\", errors.csv.xz))\n}\nerr.dt <- data.table::fread(errors.csv)\nerr.test <- err.dt[test.seqs, on=\"sequenceID\"]\nerr.tall <- data.table::melt(\n err.test,\n measure=c(\"fp\", \"fn\"),\n id=c(\"sequenceID\", \"n.segments\"))\nerr.tall[, diff := c(NA, diff(value)), by=.(sequenceID, variable)]\nerr.tall[!is.na(diff) & diff != 0, .(\n count=.N\n), by=sequenceID][order(count)]\nerr.tall[sequenceID == \"508_chr2\"]\n\nd <- function(data.name, cv.type, test.fold){\n data.table(data.name, cv.type, test.fold)\n}\ndata.dt <- rbind(\n d(\"ATAC_JV_adipose\", \"equal_labels\", 4),\n d(\"H3K27ac-H3K4me3_TDHAM_BP\", \"equal_labels\", 2),\n d(\"H3K4me3_XJ_immune\", \"equal_labels\", 2),\n d(\"H3K4me3_XJ_immune\", \"equal_labels\", 4),\n d(\"systematic\", \"R-3.6.0-profileSize\", 1))\nmeta.dt.list <- list()\nfor(data.i in 1:nrow(data.dt)){\n data.row <- data.dt[data.i]\n set.path <- file.path(\"../neuroblastoma-data/data\", data.row$data.name)\n folds.csv <- file.path(set.path, \"cv\", data.row$cv.type, \"folds.csv\")\n fold.dt <- data.table::fread(folds.csv)\n inputs.csv <- file.path(set.path, \"inputs.csv\")\n if(!file.exists(inputs.csv)){\n inputs.csv.xz <- paste0(inputs.csv, \".xz\")\n system(paste(\"unxz\", inputs.csv.xz))\n }\n inputs.dt <- data.table::fread(inputs.csv)\n inputs.mat <- as.matrix(inputs.dt[, -1, with=FALSE])\n keep <- apply(is.finite(inputs.mat), 2, all)\n errors.csv <- file.path(set.path, \"evaluation.csv\")\n if(!file.exists(errors.csv)){\n errors.csv.xz <- paste0(errors.csv, \".xz\")\n system(paste(\"unxz\", errors.csv.xz))\n }\n err.dt <- data.table::fread(errors.csv)\n train.seqs <- fold.dt[fold != data.row$test.fold]\n err.train <- err.dt[train.seqs, on=\"sequenceID\"]\n err.tall <- data.table::melt(\n err.train,\n measure=c(\"fp\", \"fn\"),\n id=c(\"sequenceID\", \"min.log.lambda\"))\n err.tall[, diff := c(NA, diff(value)), by=.(sequenceID, variable)]\n some.diff <- err.tall[!is.na(diff) & diff != 0]\n wide.diff <- data.table::dcast(some.diff, sequenceID + min.log.lambda ~ variable)\n (wide.count <- wide.diff[, .(\n count=.N\n ), by=sequenceID][order(count)])\n (break.dt <- some.diff[, .(\n count=.N\n ), by=sequenceID][order(count)])\n meta.dt.list[[data.i]] <- data.table(\n data.row,\n features=sum(keep),\n n.train=nrow(train.seqs),\n mean.wide=mean(wide.count$count),\n mean.breaks=mean(break.dt$count),\n total.breaks=sum(break.dt$count))\n}\n(meta.dt <- do.call(rbind, meta.dt.list))\n\nmeta.dt[, .(data.name, test.fold, features, n.train, mean.breaks, mean.wide)]\nmeta.tall <- data.table::melt(\n meta.dt,\n measure=c(\"features\", \"n.train\", \"mean.breaks\", \"total.breaks\"))\n(meta.stats <- meta.tall[, .(\n min=min(value),\n max=max(value)\n), by=variable])\n\n"
},
{
"alpha_fraction": 0.6429791450500488,
"alphanum_fraction": 0.6508370637893677,
"avg_line_length": 39.63888931274414,
"blob_id": "903e6dba9a9c52b6f209f2a21633f00a7042ab8a",
"content_id": "a816920d31e57a1517f17dc01948c172fe1d7c1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2927,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 72,
"path": "/figure-aum-grad-speed-data.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\nfolds.dt <- fread(\"../feature-learning-benchmark/labeled_problems_folds.csv\")\naddMeta <- function(dt){\n dt[, set.name := sub(\"/.*\", \"\", prob.dir)]\n dt[, problem := sub(\".*/\", \"\", prob.dir)]\n dt[folds.dt, on=list(set.name, problem)]\n}\nerrors.dt <- addMeta(fread(\"../feature-learning-benchmark/labeled_problems_errors.csv\"))\npossible.dt <- addMeta(fread(\"../feature-learning-benchmark/labeled_problems_possible_errors.csv\"))\n\nerrors.dt[, min.log.lambda := min.log.penalty]\nerrors.dt[, max.log.lambda := max.log.penalty]\ntest.fold.targets <- penaltyLearning::targetIntervals(\n errors.dt, \"prob.dir\")\nerrors.dt[, min.lambda := exp(min.log.lambda)]\nerrors.dt[, example := prob.dir]\nfinite.targets <- test.fold.targets[\n is.finite(min.log.lambda) | is.finite(max.log.lambda)\n][order(prob.dir)]\nfinite.targets[, pred.in.interval := data.table::fcase(\n min.log.lambda == -Inf, max.log.lambda-1,\n max.log.lambda == Inf, min.log.lambda+1,\n -Inf < min.log.lambda & max.log.lambda < Inf, (min.log.lambda+max.log.lambda)/2)]\nfinite.targets[is.na(pred.in.interval)]\nset.seed(1)\nfinite.targets[, pred.rnorm := rnorm(.N)]\npred.names <- finite.targets[, prob.dir]\ndiff.dt <- aum::aum_diffs_penalty(errors.dt[order(prob.dir)], pred.names)\n\n## if(!file.exists(\"signal.list.annotation.sets.RData\")){\n## download.file(\"https://rcdata.nau.edu/genomic-ml/cbio/neuroblastoma/signal.list.annotation.sets.RData\", \"signal.list.annotation.sets.RData\")\n## }\n## (objs <- load(\"signal.list.annotation.sets.RData\"))\n\nsquared.hinge.fun.list <- list(\n loss=function(x, e=1)ifelse(x<e,(x-e)^2,0),\n grad=function(x,e=1)ifelse(x<e,2*(x-e),0))\nmax.N <- length(pred.names)\nN.vec <- as.integer(10^seq(1, log10(max.N), l=10))\nex.counts <- table(diff.dt[[\"example\"]])\npred.type.vec <- grep(\"pred\", names(finite.targets), value=TRUE)\ntiming.dt.list <- list()\nfor(N in N.vec){\n diff.N <- diff.dt[example < N]\n ex.N <- ex.counts[1:N]\n targets.N <- finite.targets[1:N]\n for(pred.type in pred.type.vec){\n pred.vec <- targets.N[[pred.type]]\n timing.df <- microbenchmark::microbenchmark(sort={\n sort(rep(pred.vec, ex.N)-diff.N[[\"pred\"]])\n }, aum={\n aum::aum(diff.N, pred.vec)\n }, squared.hinge.each.example={\n arg.mat <- cbind(\n min=pred.vec-targets.N[[\"min.log.lambda\"]],\n max=targets.N[[\"max.log.lambda\"]]-pred.vec)\n result.mat.list <- list()\n for(fun.name in names(squared.hinge.fun.list)){\n fun <- squared.hinge.fun.list[[fun.name]]\n result.mat.list[[fun.name]] <- fun(arg.mat)\n }\n with(result.mat.list, list(\n loss=mean(loss[,\"min\"]+loss[,\"max\"]),\n grad=grad[,\"min\"]-grad[,\"max\"]))\n }, times=10)\n timing.dt.list[[paste(N, pred.type)]] <- with(timing.df, data.table(\n N, pred.type, seconds=time/1e9, algorithm=expr))\n }\n}\n(timing.dt <- do.call(rbind, timing.dt.list))\ndata.table::fwrite(timing.dt, \"figure-aum-grad-speed-data.csv\")\n\n"
},
{
"alpha_fraction": 0.5571331977844238,
"alphanum_fraction": 0.5805273652076721,
"avg_line_length": 38.1216926574707,
"blob_id": "2c98a12944f818ac5704610aefd035d5ab2f8006",
"content_id": "f9553ea772099184c4e284aaa0980366a78ab994",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 7395,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 189,
"path": "/figure-unbalanced-grad-desc-data.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\nPairsDT <- function(output.vec){\n is.positive <- output.vec == 1\n data.table::data.table(expand.grid(\n positive=which(is.positive),\n negative=which(!is.positive)))\n}\nequal.class.weights <- function(output.vec){\n otab <- table(output.vec)\n as.numeric(1/otab[paste(output.vec)])\n}\nLogistic <- function(pred.vec, output.vec, obs.weights){\n list(\n gradient=-obs.weights*output.vec/(1+exp(output.vec*pred.vec)),\n loss=sum(obs.weights*log(1+exp(-output.vec*pred.vec))))\n}\nAUM <- function(pred.vec, diff.dt){\n L <- aum::aum(diff.dt, pred.vec)\n d <- L$derivative_mat\n non.diff <- abs(d[,1] - d[,2]) > 1e-6\n if(any(non.diff)){\n ## Some non-differentiable points that were actually observed!\n ## data=DNA fold=1 loss=aum.rate step=0.001000\n ## [,1] [,2]\n ## [1,] -0.001956947 -0.001175589\n ## data=DNA fold=1 loss=aum.rate step=1000.000000\n ## [,1] [,2]\n ## [1,] -0.0006463963 0\n cat(sprintf(\"%d non-diff points\\n\", sum(non.diff)))\n print(d[non.diff, ])\n }\n ## ifelse( derivative_mat[,1] == 0 | derivative_mat[,2] == 0, 0, ??\n with(L, list(\n gradient=(derivative_mat[,1]+derivative_mat[,2])/2,\n loss=aum))\n}\nzip.X.list <- list()\nzip.y.list <- list()\nfor(set in c(\"train\", \"test\")){\n f <- sprintf(\"zip.%s.gz\", set)\n if(!file.exists(f)){\n u <- paste0(\"https://web.stanford.edu/~hastie/ElemStatLearn/datasets/\", f)\n download.file(u, f)\n }\n zip.dt <- data.table::fread(f)\n y.vec <- zip.dt[[1]]\n is.01 <- y.vec %in% 0:1\n y01.dt <- data.table(label=y.vec[is.01])\n y01.dt[, cum := 1:.N, by=label]\n max.dt <- y01.dt[, .(max=max(cum)), by=label]\n keep <- y01.dt$cum <= min(max.dt[[\"max\"]])\n zip.y.list[[set]] <- y01.dt[keep, label]\n zip.X.list[[set]] <- as.matrix(zip.dt[is.01, -1, with=FALSE][keep,])\n}\n(y.tab <- sapply(zip.y.list, table))\n\ntrain.set.list <- list(\n full=list(X=zip.X.list[[\"train\"]], y=zip.y.list[[\"train\"]]))\nprop.pos.vec <- some.props <- c(0.01, 0.05, 0.5)\n##want p/(p + n) = 0.05 => 0.05*(p+n) = p => 0.05p + 0.05n = p => 0.05n = 0.95p => p = 0.05 / 0.95n\nmin.prop.pos <- min(prop.pos.vec)\nmin.n.pos <- as.integer(min.prop.pos/(1-min.prop.pos) * y.tab[\"0\", \"train\"])\nmin.total <- min.n.pos + y.tab[\"0\", \"train\"]\nc(min.n.pos, y.tab[\"0\", \"train\"])/min.total\nN.obs <- 1000\ntrain.y.dt <- data.table(label=zip.y.list[[\"train\"]])\ntrain.y.dt[, i := 1:.N]\ntest.y <- zip.y.list[[\"test\"]]\ntest.X <- zip.X.list[[\"test\"]]\nresult.dt.list <- list()\nselected.dt.list <- list()\nfor(prop.pos in prop.pos.vec){\n prop.dt <- rbind(\n data.table(prop=prop.pos, label=1),\n data.table(prop=1-prop.pos, label=0))\n prop.dt[, class.N := as.integer(N.obs*prop) ]\n prop.dt[, weight := 1/class.N]\n for(seed in 1:10){\n cat(sprintf(\"prop=%f seed=%d\\n\", prop.pos, seed))\n set.seed(seed)\n index.dt <- prop.dt[train.y.dt, on=\"label\"][, .(\n i=.SD[sample(1:.N), i[1:class.N] ]\n ), by=.(label, weight, class.N)]\n seed.i <- index.dt[[\"i\"]]\n seed.y <- zip.y.list[[\"train\"]][seed.i]\n seed.X <- zip.X.list[[\"train\"]][seed.i,]\n weight.list <- list(\n identity=rep(1, length(seed.y)),\n balanced=index.dt[[\"weight\"]])\n pred.list <- list()\n for(weight.name in names(weight.list)){\n weight.vec <- weight.list[[weight.name]]\n fit <- glmnet::cv.glmnet(seed.X, seed.y, weight.vec, family=\"binomial\")\n seed.pred <- predict(fit, test.X)\n pred.list[[paste0(\"cv.glmnet.\", weight.name)]] <- seed.pred\n }\n y.tilde <- ifelse(seed.y==0, -1, 1)\n is.validation <- rep(c(TRUE, FALSE), l=length(y.tilde))\n set.list <- list(subtrain=!is.validation, validation=is.validation)\n data.by.set <- list()\n for(set.name in names(set.list)){\n is.set <- set.list[[set.name]]\n data.by.set[[set.name]] <- list(X=seed.X[is.set,], y=y.tilde[is.set])\n }\n is.subtrain <- !is.validation\n y.subtrain <- y.tilde[is.subtrain]\n X.subtrain <- seed.X[is.subtrain,]\n diff.rate.dt <- aum::aum_diffs_binary(y.subtrain, denominator=\"rate\")\n diff.count.dt <- aum::aum_diffs_binary(y.subtrain, denominator=\"count\")\n pairs.dt <- PairsDT(y.subtrain)\n loss.list <- list(\n logistic=function(pred.vec){\n Logistic(pred.vec, y.subtrain, 1/length(pred.vec))\n },\n logistic.weighted=function(pred.vec){\n Logistic(pred.vec, y.subtrain, index.dt[is.subtrain, weight])\n },\n aum.count=function(pred.vec){\n AUM(pred.vec, diff.count.dt)\n },\n aum.rate=function(pred.vec){\n AUM(pred.vec, diff.rate.dt)\n },\n squared.hinge.all.pairs=function(pred.vec, margin=1){\n pairs.dt[, diff := pred.vec[positive]-pred.vec[negative]-margin]\n pairs.dt[, diff.clipped := ifelse(diff<0, diff, 0)]\n pairs.tall <- data.table::melt(\n pairs.dt,\n measure.vars=c(\"positive\", \"negative\"),\n value.name=\"pred.i\",\n variable.name=\"label\")\n ## d/dx (x - y - m)^2 = x - y - m\n ## d/dy (x - y - m)^2 = -(x - y - m)\n pairs.tall[, grad.sign := ifelse(label==\"positive\", 1, -1)]\n N.pairs <- nrow(pairs.dt)\n grad.dt <- pairs.tall[, .(\n gradient=sum(grad.sign*diff.clipped)\n ), keyby=pred.i]\n list(\n gradient=grad.dt$gradient/N.pairs,\n loss=sum(pairs.dt$diff.clipped^2)/N.pairs)\n }\n )\n for(loss.name in names(loss.list)){\n loss.fun <- loss.list[[loss.name]]\n step.candidates <- 10^seq(-2, 2, by=0.5)\n selection.dt.list <- list()\n test.pred.list <- list()\n for(step.size in step.candidates){\n weight.vec <- rnorm(ncol(seed.X))\n for(iteration in 1:1000){\n pred.vec <- X.subtrain %*% weight.vec\n loss.info <- loss.fun(pred.vec)\n direction <- -t(X.subtrain) %*% loss.info[[\"gradient\"]]\n weight.vec <- weight.vec + step.size * direction\n test.pred.list[[paste(step.size, iteration)]] <- test.X %*% weight.vec\n for(set.name in \"validation\"){\n Xy.list <- data.by.set[[set.name]]\n set.pred.vec <- Xy.list[[\"X\"]] %*% weight.vec\n roc.df <- WeightedROC::WeightedROC(set.pred.vec, Xy.list[[\"y\"]])\n auc <- WeightedROC::WeightedAUC(roc.df)\n selection.dt.list[[paste(step.size, iteration, set.name)]] <-\n data.table(step.size, iteration, set.name, auc)\n }#set.name\n }#iteration\n }#step.size\n selection.dt <- do.call(rbind, selection.dt.list)\n selected <- selection.dt[set.name==\"validation\"][which.max(auc)]\n selected.dt.list[[paste(prop.pos, seed, loss.name)]] <- data.table(\n prop.pos, seed, loss.name, selected)\n pred.list[[loss.name]] <- selected[\n , test.pred.list[[paste(step.size, iteration)]] ]\n }#loss.name\n for(model in names(pred.list)){\n seed.pred <- pred.list[[model]]\n roc.df <- WeightedROC::WeightedROC(seed.pred, test.y)\n seed.pred.class <- ifelse(0<seed.pred, 1, 0)\n accuracy <- mean(seed.pred.class == test.y)\n auc <- WeightedROC::WeightedAUC(roc.df)\n result.dt.list[[paste(prop.pos, seed, model)]] <- data.table(\n prop.pos, seed, model, accuracy, auc)\n }\n }#seed\n}#prop.pos\n(result.dt <- do.call(rbind, result.dt.list))\n(selected.dt <- do.call(rbind, selected.dt.list))\n\nsaveRDS(list(result=result.dt, selected=selected.dt, N.obs=nrow(seed.X)), file=\"figure-unbalanced-grad-desc-data.rds\")\n\n"
},
{
"alpha_fraction": 0.6173588633537292,
"alphanum_fraction": 0.6495566964149475,
"avg_line_length": 31.96923065185547,
"blob_id": "3a5909999b85c13b7a46ef9bffa8ee6764b83e7a",
"content_id": "41d5603869ba17f100684970b5882de5663a25b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2143,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 65,
"path": "/figure-aum-grad-speed-binary-cpp.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\ntiming.dt <- data.table::fread(\"figure-aum-grad-speed-binary-cpp-data.csv\")\ntiming.stats <- timing.dt[, .(\n max=max(seconds),\n median=median(seconds),\n min=min(seconds),\n times=.N\n), by=.(N, prediction.order, Algorithm=sub(\" All\", \"\\nAll\", algorithm))]\n\nalgo.colors <- c(\n \"Squared Hinge\\nAll Pairs\"=\"#A6CEE3\",\n \"squared hinge each example\"=\"#1F78B4\",\n \"Logistic\"=\"#B2DF8A\", #\"#33A02C\",\"#FB9A99\", \"#E31A1C\", \"#FDBF6F\", \"#FF7F00\", \"#CAB2D6\", \"#6A3D9A\", \"#FFFF99\", \"#B15928\"\n \"AUM\"=\"black\"\n)\nunsorted <- timing.stats[prediction.order == \"unsorted\"]\nmax.N <- unsorted[, max(N)]\ngg <- ggplot()+\n ggtitle(\"Binary classification\")+\n theme(legend.position='none')+\n scale_color_manual(values=algo.colors)+\n scale_fill_manual(values=algo.colors)+\n geom_ribbon(aes(\n N, ymin=min, ymax=max, fill=Algorithm),\n alpha=0.5,\n data=unsorted)+\n geom_line(aes(\n N, median, color=Algorithm),\n data=unsorted)+\n scale_x_log10(\n \"Number of predictions\",\n limits=c(10, max.N*10))+\n scale_y_log10(\n \"Computation time in seconds,\nmedian line, min/max band, 10 timings\") +\n geom_dl(aes(N, median, label = Algorithm, color = Algorithm), \n method = \"right.polygons\",\n data = unsorted[Algorithm == \"Squared Hinge\\nAll Pairs\",]) +\n geom_dl(aes(N, median, label = Algorithm, color = Algorithm), \n method = \"right.polygons\",\n data = unsorted[Algorithm != \"Squared Hinge\\nAll Pairs\",])\npng(\"figure-aum-grad-speed-binary-cpp-algos.png\", width=5, height=4, res=200, units=\"in\")\nprint(gg)\ndev.off()\n\ngg <- ggplot()+\n facet_grid(. ~ algorithm, labeller=label_both)+\n geom_ribbon(aes(\n N, ymin=min, ymax=max, fill=prediction.order),\n alpha=0.5,\n data=timing.stats)+\n geom_line(aes(\n N, median, color=prediction.order),\n data=timing.stats)+\n scale_x_log10(\n \"Number of predictions\",\n limits=c(10, max.N*10))+\n scale_y_log10(\n \"Computation time in seconds,\nmedian line, min/max band, 10 timings\")\ndl <- directlabels::direct.label(gg, \"right.polygons\")\npng(\"figure-aum-grad-speed-binary-cpp.png\", width=10, height=3, res=200, units=\"in\")\nprint(dl)\ndev.off()\n"
},
{
"alpha_fraction": 0.5731229186058044,
"alphanum_fraction": 0.5896245837211609,
"avg_line_length": 28.29305076599121,
"blob_id": "b0198469d710cc7c89e5ea73781efe915d3a985b",
"content_id": "812bba7148695e3e12561660f9cc8f14b6f78f26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 9696,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 331,
"path": "/figure-neuroblastomaProcessed-combinations-interactive.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\nnb.comb <- readRDS(\"neuroblastomaProcessed.combinations.rds\")\n\nroc.dt <- nb.comb$auc[, data.table(\n roc[[1]][[1]]\n), by=.(size, combo.i)]\nperfect <- roc.dt[FPR==0 & TPR==1]\nnb.comb$auc[!perfect, on=.(size, combo.i)]\n\none.combo <- roc.dt[combo.i==256]\nggplot()+\n geom_path(aes(\n FPR, TPR),\n data=one.combo)+\n facet_wrap(\"size\")\nwrong.way <- roc.dt[, {\n diff.dt <- data.table(\n dtpr=diff(TPR),\n dfpr=diff(FPR))\n diff.dt[, .(\n TPR=sum(dtpr[dtpr<0]),\n FPR=sum(dfpr[dfpr<0])\n )]\n}, by=.(size, combo.i)]\nu.roc <- roc.dt[, {\n list(n.uniq=nrow(unique(data.table(FPR, TPR))))\n}, by=.(size, combo.i)]\nauc.stats <- nb.comb$auc[u.roc, .(\n size, combo.i, auc, aub, n.finite, n.uniq,\n panel.key=paste(\"uniq\", n.uniq, auc)\n), on=.(size, combo.i)]\n\nauc.stat.counts <- auc.stats[, .(\n count=.N\n), by=.(size, n.uniq, panel.key, auc)]\nggplot()+\n facet_wrap(\"size\")+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n geom_hline(yintercept=1, col=\"grey\")+\n scale_fill_gradient(low=\"white\", high=\"red\")+\n scale_y_continuous(breaks=seq(0, 1.2, by=0.2))+\n scale_x_continuous(breaks=seq(0, 20, by=2))+\n geom_point(aes(\n n.uniq, auc, fill=count),\n shape=21,\n size=4,\n data=auc.stat.counts)\n\n## ideas for manual histogram.\nmax.finite <- max(auc.stats$n.finite)\nbreak.vec <- seq(0, 10, by=0.5)\nedge.vec <- seq(\n 0, max.finite, l=11)\nrect.dt <- data.table(\n xmin=edge.vec[-length(edge.vec)],\n xmax=edge.vec[-1])\n##link two plots, show details.\nauc.stats[, round.auc := round(auc, 4)]\nrfac <- 5\nauc.stats[, round.aub := round(aub*rfac)/rfac]\nauc.stats[, prop.finite := n.finite/max.finite]\npanel.titles <- c(\n round.aub=\"Area under both\",\n prop.finite=\"Proportion predictions in finite interval\",\n round.auc=\"Area under ROC curve\")\nxlevs <- c(\"prop.finite\", \"round.aub\")\nylevs <- c(\"prop.finite\", \"round.auc\")\npanel.dt <- data.table(\n expand.grid(\n xvar=xlevs,\n yvar=ylevs,\n stringsAsFactors=FALSE)\n)[xvar!=yvar]\nauc.panels <- panel.dt[, {\n auc.stats[, {\n getvar <- function(v)as.numeric(.SD[[v]])\n dt <- data.table(size, combo.i)\n for(xy in c(\"x\", \"y\")){\n v <- get(paste0(xy, \"var\"))\n L <- list(\n val=v,\n orig=sub(\"round.\", \"\", v))\n for(suffix in names(L)){\n to.col <- paste0(xy, suffix)\n from.col <- L[[suffix]]\n dt[[to.col]] <- as.numeric(.SD[[from.col]])\n }\n }\n dt\n }]\n}, by=.(xvar, yvar)]\nxfac <- function(val)factor(val, xlevs, panel.titles[xlevs])\nyfac <- function(val)factor(val, ylevs, panel.titles[ylevs])\nauc.panels[, xfac := xfac(xvar)]\nauc.panels[, yfac := yfac(yvar)]\nauc.panels[, key := paste(xval, yval)]\nauc.panels[, panel.key := paste(xvar, yvar, key)]\ncount.dt <- auc.panels[, .(\n combos=.N\n), by=.(size, xfac, yfac, xval, yval, key, panel.key)]\nggplot()+facet_wrap(\"size\")+geom_bar(aes(combos), data=count.dt)\nggplot()+facet_wrap(\"size\")+geom_bar(aes(log10(combos)), data=count.dt)\ncombos.for.panel.key <- count.dt[auc.panels, .(\n size, panel.key, combo.i\n), on=.(\n size, xfac, yfac, xval, yval, key, panel.key)]\n## use first.orig to avoid overplotting clickable points.\nfirst.orig <- auc.panels[, .SD[1], by=.(size, xfac, yfac, xorig, yorig)]\nauc.orig <- combos.for.panel.key[auc.panels, .(\n panel.key, xfac=i.xfac, yfac=i.yfac, size,\n combo.i,\n xorig,\n yorig\n), allow.cartesian=TRUE, on=.(size, combo.i)]\n## for each panel.key, find all original data points with combo.i values.\nXPANEL <- function(val, ...){\n data.table(..., xfac=xfac(val))\n}\nYPANEL <- function(val, ...){\n data.table(..., yfac=yfac(val))\n}\ncount.wide <- dcast(\n count.dt,\n xfac + yfac + xval + yval + key + panel.key ~ size,\n value.var=\"combos\")\ncount.tall <- melt(\n count.wide,\n id.vars=c(\"xfac\", \"yfac\", \"xval\", \"yval\", \"key\", \"panel.key\"),\n variable.name=\"size\",\n value.name=\"combos\")\ncount.tall[is.na(combos), combos := 0]\ncount.tall[, combos.chr := ifelse(combos==0, \"none\", \"some\")]\nroc.color <- \"deepskyblue\"\nsmall.size <- 4\nsmall.alpha <- 0.7\nmed.size <- 6\nbig.size <- 7\nerr.sizes <- c(\n fp=3,\n fn=2,\n errors=1)\nerr.colors <- c(\n fp=\"red\",\n fn=\"deepskyblue\",\n errors=\"black\")\nroc.dt[, thresh.i := 1:.N, by=.(size, combo.i)]\nroc.dt[, min.fp.fn := ifelse(fp<fn, fp, fn)]\nroc.dt[, width.new := ifelse(\n min.fp.fn>0, width.thresh, ifelse(\n width.thresh != 0.1, 0.1))]\nroc.dt[, max.new := cumsum(width.new), by=.(size, combo.i)]\nroc.dt[, min.new := c(0, max.new[-.N]), by=.(size, combo.i)]\nroc.dt.tall <- melt(\n roc.dt,\n measure.vars=names(err.colors))\naub0 <- auc.stats[, .(\n aub0=sum(aub==0),\n auc1=sum(auc==1),\n auc.over1=sum(auc>1)\n), by=.(size)]\naub0.tall <- melt(aub0, id.vars=\"size\")\nanimint(\n title=\"Generalized ROC curve metrics\",\n out.dir=\"figure-neuroblastomaProcessed-combinations-interactive\",\n sizes=ggplot()+\n ggtitle(\"Select margin size\")+\n theme_bw()+\n theme_animint(width=250, height=250)+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n facet_grid(variable ~ ., scales=\"free\")+\n scale_x_continuous(\n \"Margin size of prediction wrt infinite interval\")+\n scale_y_continuous(\"Number of prediction combinations\")+\n geom_point(aes(\n log10(size), value),\n data=aub0.tall)+\n geom_tallrect(aes(\n xmin=log10(size)-0.5,\n xmax=log10(size)+0.5),\n data=aub0,\n alpha=0.5,\n clickSelects=\"size\"),\n scatter=ggplot()+\n ggtitle(\"ROC curve, AUC/AUB distribution, select prediction\")+\n theme_bw()+\n theme_animint(width=500, height=500)+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n guides(color=\"none\")+\n facet_grid(yfac ~ xfac, scales=\"free\")+\n scale_x_continuous(\"\", breaks=break.vec)+\n scale_y_continuous(\"\", breaks=break.vec)+\n scale_color_manual(values=c(none=\"white\", some=\"black\"))+\n scale_fill_gradient(low=\"white\", high=\"red\")+\n geom_path(aes(\n FPR, TPR, key=1),\n data=XPANEL(\"prop.finite\", YPANEL(\"prop.finite\", roc.dt)),\n color=roc.color,\n showSelected=c(\"size\", \"combo.i\"))+\n geom_point(aes(\n FPR, TPR, key=1),\n data=XPANEL(\"prop.finite\", YPANEL(\"prop.finite\", roc.dt)),\n showSelected=c(\"size\", \"combo.i\", \"thresh.i\"))+\n geom_hline(aes(\n yintercept=auc),\n data=YPANEL(\"round.auc\", auc=1),\n color=\"grey50\")+\n geom_vline(aes(\n xintercept=aub),\n data=XPANEL(\"round.aub\", aub=0),\n color=\"grey50\")+\n geom_point(aes(\n xval, yval,\n key=1),\n size=big.size,\n showSelected=c(\"size\", \"panel.key\"),\n data=count.tall)+\n geom_point(aes(\n xval, yval,\n key=key,\n tooltip=sprintf(\n \"%d combos with %s=%s, %s=%s for size %s\",\n combos,\n xfac, paste(xval),\n yfac, paste(yval),\n paste(size)),\n color=combos.chr,\n fill=combos),\n size=med.size,\n clickSelects=\"panel.key\",\n showSelected=c(\"size\"),\n data=count.tall)+\n geom_point(aes(\n xorig, yorig, key=combo.i),\n showSelected=c(\"size\", \"panel.key\"),\n clickSelects=\"combo.i\",\n alpha=small.alpha,\n size=small.size,\n data=auc.orig)+\n geom_point(aes(\n xorig, yorig, key=1),\n color=roc.color,\n showSelected=c(\"size\", \"combo.i\"),\n size=small.size,\n data=auc.orig)+\n geom_blank(aes(\n x, y),\n data=data.table(x=0, y=0)),\n err=ggplot()+\n ggtitle(\"Error curves, select threshold\")+\n theme_bw()+\n theme_animint(\n width=300, height=300\n )+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n scale_x_continuous(\"Distorted prediction threshold\")+\n scale_y_continuous(\"Incorrectly predicted labels\")+\n scale_color_manual(values=err.colors)+\n scale_size_manual(values=err.sizes)+\n geom_rect(aes(\n xmin=min.new, ymin=0,\n key=thresh.i,\n xmax=max.new, ymax=min.fp.fn),\n alpha=0.1,\n size=0,\n showSelected=c(\"size\", \"combo.i\"),\n data=roc.dt)+\n geom_segment(aes(\n min.new, value,\n key=paste(variable, thresh.i),\n color=variable, size=variable,\n xend=max.new, yend=value),\n data=roc.dt.tall,\n showSelected=c(\"size\", \"combo.i\"))+\n geom_rect(aes(\n xmin=min.new, ymin=-Inf,\n xmax=max.new, ymax=Inf,\n key=thresh.i),\n clickSelects=\"thresh.i\",\n showSelected=c(\"size\", \"combo.i\"),\n alpha=0.5,\n data=roc.dt),\n ## geom_point(aes(\n ## thresh.i, value),\n ## data=roc.dt.tall,\n ## showSelected=c(\"size\", \"combo.i\")),\n ## uniq=ggplot()+\n ## theme_bw()+\n ## theme(panel.margin=grid::unit(0, \"lines\"))+\n ## geom_hline(yintercept=1, col=\"grey\")+\n ## scale_fill_gradient(low=\"white\", high=\"red\")+\n ## scale_y_continuous(breaks=seq(0, 1.2, by=0.2))+\n ## scale_x_continuous(breaks=seq(0, 20, by=2))+\n ## geom_point(aes(\n ## n.uniq, auc,\n ## key=1),\n ## showSelected=c(\"size\", \"panel.key\"),\n ## size=big.size,\n ## data=auc.stat.counts)+\n ## geom_point(aes(\n ## n.uniq, auc,\n ## key=panel.key,\n ## fill=count),\n ## showSelected=\"size\",\n ## clickSelects=\"panel.key\",\n ## size=med.size,\n ## data=auc.stat.counts)+\n ## geom_point(aes(\n ## n.uniq, auc,\n ## key=combo.i),\n ## showSelected=c(\"size\", \"panel.key\"),\n ## clickSelects=\"combo.i\",\n ## size=small.size,\n ## alpha=small.alpha,\n ## data=auc.stats)+\n ## geom_point(aes(\n ## n.uniq, auc,\n ## key=1),\n ## showSelected=c(\"size\", \"combo.i\"),\n ## color=roc.color,\n ## size=small.size,\n ## data=auc.stats),\n duration=list(size=1000, combo.i=1000, thresh.i=500),\n first=list(\n thresh.i=1,\n size=1e-4,\n combo.i=132,\n panel.key=\"prop.finite round.auc 0.625 1.1667\"),\n time=list(variable=\"thresh.i\", ms=500)\n)\n"
},
{
"alpha_fraction": 0.5890299081802368,
"alphanum_fraction": 0.6090871691703796,
"avg_line_length": 29.537500381469727,
"blob_id": "20d1da49427861d7f939831a8bdb29e446221f52",
"content_id": "be243d9fbfc39d3b97c9e91aeab170a99db99963",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2443,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 80,
"path": "/figure-compare-hinge-loss-data.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(data.table)\nlibrary(ggplot2)\n\npred.vec <- seq(-3, 3, by=0.25)\ngrid.dt <- data.table(expand.grid(\n pos=pred.vec,\n neg=pred.vec))\npositive.part <- function(x)ifelse(0<x, x, 0)\nsigmoid <- function(x)1/(1+exp(-x))\nhinge <- function(pred, label)positive.part(1-pred*label)\nhingeSet <- function(DT){\n DT[, hinge.loss := hinge(pred.real, label)]\n DT[, squared.hinge := hinge.loss^2 ]\n}\nerr.dt <- rbind(\n data.table(\n min.log.lambda=c(-Inf, 0),\n max.log.lambda=c(0, Inf),\n fn=c(1, 0), possible.fn=1,\n fp=c(0, 0), possible.fp=0,\n label=1, obs=\"pos\"),\n data.table(\n min.log.lambda=c(-Inf, 0),\n max.log.lambda=c(0, Inf),\n fn=c(0, 0), possible.fn=0,\n fp=c(0, 1), possible.fp=1,\n label=-1, obs=\"neg\"))\nerr.dt[, labels := 1]\nerr.dt[, errors := fp+fn]\nlab.dt <- unique(err.dt[, .(label, obs)])\nloss.wide.list <- list()\nfor(pred.i in 1:nrow(grid.dt)){\n pred.wide <- grid.dt[pred.i]\n pred.wide[, cat(sprintf(\n \"%4d / %4d %f %f\\n\",\n pred.i, nrow(grid.dt),\n pos, neg))]\n pred.tall <- melt(\n pred.wide,\n measure.vars=lab.dt$obs,\n value.name=\"pred.real\",\n variable.name=\"obs\"\n )[lab.dt, on=\"obs\"]\n hingeSet(pred.tall)\n pred.tall[, logistic.loss := log(1+exp(-label*pred.real))]\n pred.tall[, pred.label := ifelse(0<pred.real, 1, -1)]\n pred.tall[, `01.loss` := ifelse(pred.label == label, 0, 1)]\n pred.tall[, pred.prob := sigmoid(pred.real)]\n pairwise <- dcast(\n melt(pred.tall, measure.vars=c(\"pred.prob\",\"pred.real\")),\n variable ~ obs)\n pairwise[, pred.real := pos-neg]\n pairwise[, label := 1]\n hingeSet(pairwise)\n pred.tall[, pred.log.lambda := pred.real]\n roc.list <- penaltyLearning::ROChange(\n err.dt, pred.tall,\n problem.vars=\"obs\")\n err.marg <- data.table(err.dt)\n err.marg[, `:=`(\n min.log.lambda = c(-Inf, 1, -Inf, -1),\n max.log.lambda = c(1, Inf, -1, Inf)\n )]\n roc.marg <- penaltyLearning::ROChange(\n err.marg, pred.tall,\n problem.vars=\"obs\")\n out.row <- data.table(\n pred.i, pred.wide,\n AUM=roc.list$aum,\n AUC=roc.list$auc,\n AUM.margin=roc.marg$aum,\n pairwise=dcast(\n pairwise, NULL ~ variable, value.var=c(\"hinge.loss\", \"squared.hinge\")))\n for(out.col in c(\"01.loss\",\"logistic.loss\",\"hinge.loss\",\"squared.hinge\")){\n out.row[[out.col]] <- sum(pred.tall[[out.col]])\n }\n loss.wide.list[[pred.i]] <- out.row\n}\nloss.wide <- do.call(rbind, loss.wide.list)\ndata.table::fwrite(loss.wide, \"figure-compare-hinge-loss-data.csv\")\n"
},
{
"alpha_fraction": 0.657873809337616,
"alphanum_fraction": 0.6723343133926392,
"avg_line_length": 31.606218338012695,
"blob_id": "5925e55adba59557df57dca7f9df09aeb13997b0",
"content_id": "6fa91ea9e4bc0bd383f544c451e4b3a4828f4fe4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 6293,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 193,
"path": "/figure-unbalanced-grad-desc.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "source(\"packages.R\")\n\ndata.list <- readRDS(\"figure-unbalanced-grad-desc-data.rds\")\nx.lab <- \"Test AUC, median and quartiles over 10 random train sets\"\nseeds.wide <- dcast(\n data.list[[\"result\"]],\n prop.pos + seed ~ model,\n value.var=\"auc\")\nseeds.tall <- melt(\n seeds.wide,\n id.vars=c(\"prop.pos\",\"seed\",\"aum.count\"),\n variable.name=\"baseline.name\",\n value.name=\"baseline.auc\")\np.tall <- seeds.tall[, {\n result <- t.test(aum.count, baseline.auc, alternative=\"greater\", paired=TRUE)\n with(result, data.table(p.value))\n}, by=.(prop.pos, baseline.name)]\ndcast(p.tall, baseline.name ~ prop.pos, value.var=\"p.value\")\n\nresult.tall <- melt(data.list[[\"result\"]], measure.vars=c(\"accuracy\", \"auc\"))\nresult.tall[, percent.positive.labels := factor(prop.pos*100)]\nggplot()+\n facet_grid(variable ~ ., labeller = label_both, scales=\"free\")+\n geom_point(aes(\n percent.positive.labels, value, color=model),\n data=result.tall)\n\nresult.tall[variable==\"auc\"]\n\nresult.stats <- result.tall[, .(\n max=max(value),\n q75=quantile(value, 0.75),\n median=median(value),\n q25=quantile(value, 0.25),\n min=min(value),\n seeds=.N\n), by=.(variable, prop.pos, `percent\\npositive\\nlabels`=percent.positive.labels, model=sub(\"aum\", \"AUM\", model))]\ndcast(result.stats[variable==\"auc\"], model ~ prop.pos, value.var=\"median\")\n\nglmnet.stats <- result.stats[grepl(\"glmnet\", model)]\ngg <- ggplot()+\n ggtitle(paste0(\n \"cv.glmnet run on data sets with same number of observations, N=\",\n data.list[[\"N.obs\"]],\n \"\\nand with different proportions of positive labels\"))+\n facet_grid(variable ~ ., labeller = label_both, scales=\"free\")+\n geom_ribbon(aes(\n prop.pos, ymin=min, ymax=max, fill=model),\n alpha=0.5,\n data=glmnet.stats)+\n geom_line(aes(\n prop.pos, median, color=model),\n data=glmnet.stats)+\n scale_x_continuous(\n \"Proportion positive labels in train set\",\n breaks=unique(result.stats[[\"prop.pos\"]]))+\n ylab(\"Accuracy or AUC of predictions\non a test set of 50% positive\nand 50% negative labels\")\ngg\n\nlogistic.stats <- result.stats[grepl(\"logistic\", model)]\ngg <- ggplot()+\n ggtitle(paste0(\n \"Logistic regression grad descent run on data sets with same number of observations, N=\",\n data.list[[\"N.obs\"]],\n \"\\nand with different proportions of positive labels\"))+\n facet_grid(variable ~ ., labeller = label_both, scales=\"free\")+\n geom_ribbon(aes(\n prop.pos, ymin=min, ymax=max, fill=model),\n alpha=0.5,\n data=logistic.stats)+\n geom_line(aes(\n prop.pos, median, color=model),\n data=logistic.stats)+\n scale_x_continuous(\n \"Proportion positive labels in train set\",\n breaks=unique(result.stats[[\"prop.pos\"]]))+\n ylab(\"Accuracy or AUC of predictions\non a test set of 50% positive\nand 50% negative labels\")\ngg\n\nlog.glm.stats <- result.stats[variable==\"auc\" & grepl(\"logistic|glmnet\", model)]\nlog.glm.stats[, regularization := ifelse(\n grepl(\"logistic\", model), \"early stopping\", \"L2 norm\")]\nlog.glm.stats[, weights := ifelse(\n grepl(\"weighted|balanced\", model), \"balanced\", \"identity\")]\nx.lo <- 0.984\ngg <- ggplot()+\n ggtitle(\"Comparing logistic regression models (control experiment)\")+\n scale_x_continuous(\n x.lab)+\n coord_cartesian(xlim=c(x.lo, 1))+\n geom_point(aes(\n ifelse(median<x.lo, -Inf, median), weights),\n shape=1,\n data=log.glm.stats)+\n geom_segment(aes(\n q25, weights,\n xend=q75, yend=weights),\n data=log.glm.stats)+\n facet_grid(`percent\\npositive\\nlabels` ~ regularization, labeller=label_both, scales=\"free\")\npng(\"figure-unbalanced-grad-desc-logistic.png\", width=6, height=3, res=200, units=\"in\")\nprint(gg)\ndev.off()\n\naum.stats <- result.stats[grepl(\"AUM\", model)]\ngg <- ggplot()+\n ggtitle(paste0(\n \"AUM gradient descent with early stopping run on data sets\nwith same number of observations, N=\",\n data.list[[\"N.obs\"]],\n \"\\nand with different proportions of positive labels\"))+\n facet_grid(variable ~ ., labeller = label_both, scales=\"free\")+\n geom_ribbon(aes(\n prop.pos, ymin=min, ymax=max, fill=model),\n alpha=0.5,\n data=aum.stats)+\n geom_line(aes(\n prop.pos, median, color=model),\n data=aum.stats)+\n scale_x_continuous(\n \"Proportion positive labels in train set\",\n breaks=unique(result.stats[[\"prop.pos\"]]))+\n ylab(\"Accuracy or AUC of predictions\non a test set of 50% positive\nand 50% negative labels\")\ngg\n\naum.stats.auc <- aum.stats[variable==\"auc\"]\ngg <- ggplot()+\n ggtitle(\"(a) Comparing AUM variants\")+\n scale_x_continuous(\n x.lab,\n limits=c(0.985, 1))+\n ylab(\"Loss function\")+\n geom_point(aes(\n median, model),\n shape=1,\n data=aum.stats.auc)+\n geom_segment(aes(\n q25, model,\n xend=q75, yend=model),\n data=aum.stats.auc)+\n facet_grid(`percent\\npositive\\nlabels` ~ ., labeller=label_both, scales=\"free\")\npng(\"figure-unbalanced-grad-desc-aum.png\", width=5, height=2.5, units=\"in\", res=200)\nprint(gg)\ndev.off()\n\nlevs <- c(\"AUM.count\", \"squared.hinge.all.pairs\", \"logistic.weighted\")\ncompare.stats <- result.stats[model %in% levs]\ncompare.stats[, model.fac := factor(model, levs)]\ngg <- ggplot()+\n ggtitle(paste0(\n \"AUM gradient descent with early stopping run on data sets with same number of observations, N=\",\n data.list[[\"N.obs\"]],\n \"\\nand with different proportions of positive labels\"))+\n facet_grid(variable ~ ., labeller = label_both, scales=\"free\")+\n geom_ribbon(aes(\n prop.pos, ymin=min, ymax=max, fill=model),\n alpha=0.5,\n data=compare.stats)+\n geom_line(aes(\n prop.pos, median, color=model),\n data=compare.stats)+\n scale_x_continuous(\n \"Proportion positive labels in train set\",\n breaks=unique(result.stats[[\"prop.pos\"]]))+\n ylab(\"Accuracy or AUC of predictions\non a test set of 50% positive\nand 50% negative labels\")\ngg\n\ncompare.stats.auc <- compare.stats[variable==\"auc\"]\ngg <- ggplot()+\n ggtitle(\"(b) AUM compared to baselines\")+\n scale_x_continuous(\n x.lab,\n limits=c(0.990, 1))+\n ylab(\"Loss function\")+\n geom_point(aes(\n median, model.fac),\n shape=1,\n data=compare.stats.auc)+\n geom_segment(aes(\n q25, model.fac,\n xend=q75, yend=model.fac),\n data=compare.stats.auc)+\n facet_grid(`percent\\npositive\\nlabels` ~ ., labeller=label_both, scales=\"free\")\npng(\"figure-unbalanced-grad-desc.png\", width=5, height=2.5, units=\"in\", res=200)\nprint(gg)\ndev.off()\n"
},
{
"alpha_fraction": 0.6161657571792603,
"alphanum_fraction": 0.6326334476470947,
"avg_line_length": 30.00851058959961,
"blob_id": "c6e4d02c975309e8512cd73469c4aac0f6d3662b",
"content_id": "093ac3455678d3696f89acc3d2a5114a6c0060aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 14574,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 470,
"path": "/figure-neuroblastomaProcessed-complex.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "### Write down what package versions work with your R code, and\n### attempt to download and load those packages. The first argument is\n### the version of R that you used, e.g. \"3.0.2\" and then the rest of\n### the arguments are package versions. For\n### CRAN/Bioconductor/R-Forge/etc packages, write\n### e.g. RColorBrewer=\"1.0.5\" and if RColorBrewer is not installed\n### then we use install.packages to get the most recent version, and\n### warn if the installed version is not the indicated version. For\n### GitHub packages, write \"user/repo@commit\"\n### e.g. \"tdhock/animint@f877163cd181f390de3ef9a38bb8bdd0396d08a4\" and\n### we use install_github to get it, if necessary.\nworks_with_R <- function(Rvers,...){\n local.lib <- file.path(getwd(), \"library\")\n dir.create(local.lib, showWarnings=FALSE, recursive=TRUE)\n .libPaths(c(local.lib, .libPaths()))\n pkg_ok_have <- function(pkg,ok,have){\n stopifnot(is.character(ok))\n if(!as.character(have) %in% ok){\n warning(\"works with \",pkg,\" version \",\n paste(ok,collapse=\" or \"),\n \", have \",have)\n }\n }\n pkg_ok_have(\"R\",Rvers,getRversion())\n pkg.vers <- list(...)\n for(pkg.i in seq_along(pkg.vers)){\n vers <- pkg.vers[[pkg.i]]\n pkg <- if(is.null(names(pkg.vers))){\n \"\"\n }else{\n names(pkg.vers)[[pkg.i]]\n }\n if(pkg == \"\"){# Then it is from GitHub.\n ## suppressWarnings is quieter than quiet.\n if(!suppressWarnings(require(requireGitHub))){\n ## If requireGitHub is not available, then install it using\n ## devtools.\n if(!suppressWarnings(require(devtools))){\n install.packages(\"devtools\")\n require(devtools)\n }\n install_github(\"tdhock/requireGitHub\")\n require(requireGitHub)\n }\n print(search())\n requireGitHub(vers)\n }else{# it is from a CRAN-like repos.\n if(!suppressWarnings(require(pkg, character.only=TRUE))){\n install.packages(pkg)\n }\n pkg_ok_have(pkg, vers, packageVersion(pkg))\n library(pkg, character.only=TRUE)\n }\n }\n}\noptions(repos=c(\n \"http://www.bioconductor.org/packages/release/bioc\",\n ##\"http://r-forge.r-project.org\",\n \"http://cloud.r-project.org\",\n \"http://cran.r-project.org\"))\nworks_with_R(\n \"4.1.0\",\n data.table=\"1.14.0\",\n future=\"1.21.0\",\n future.apply=\"1.7.0\",\n RJSONIO=\"1.3.1.4\",\n R.utils=\"2.10.1\",\n \"tdhock/penaltyLearning@4e14a0b0e022d919884277d68b8e47bd158459f3\",\n jointseg=\"1.0.2\",\n gridExtra=\"2.3\",\n neuroblastoma=\"1.0\",\n tikzDevice=\"0.12.3.1\",\n microbenchmark=\"1.4.7\",\n animint2=\"1.0\")\n\ndata(neuroblastomaProcessed, package=\"penaltyLearning\")\ncounts <- neuroblastomaProcessed$errors[, {\n diff.tab <- table(factor(diff(errors), c(\"-1\", \"0\", \"1\")))\n L <- as.list(diff.tab)\n size <- max.log.lambda-min.log.lambda\n for(fun.name in c(\"min\", \"max\")){\n fun <- get(fun.name)\n L[[paste0(fun.name, \".size\")]] <- min(size[errors==fun(errors)])\n }\n L$mean.size <- with(L, (min.size+max.size)/2)\n L\n}, by=list(profile.id, chromosome)]\ntwo.changes <- counts[1 < `-1` | 1 < `1`]\ntwo.changes <- counts[order(-`-1`, -`1`, -mean.size)][profile.id != 481][1:8]\ntwo.changes[, panel := paste0(\n ifelse(`-1`==2, \"p\", \"n\"), #positive or negative label\n profile.id, \".\", chromosome)]\nsome.err <- neuroblastomaProcessed$errors[two.changes, on=list(\n profile.id, chromosome)]\nerr.sizes <- c(\n fp=3,\n fn=2,\n errors=1)\nerr.colors <- c(\n fp=\"red\",\n fn=\"deepskyblue\",\n errors=\"black\")\nsome.err.tall <- melt(\n some.err,\n measure.vars=names(err.colors))\nggplot()+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n facet_grid(profile.id + chromosome ~ .)+\n geom_segment(aes(\n min.log.lambda, value,\n xend=max.log.lambda, yend=value,\n color=variable, size=variable),\n data=some.err.tall)+\n scale_y_continuous(\n \"errors\",\n breaks=c(0,1),\n limits=c(-0.2, 1.2))+\n scale_color_manual(values=err.colors)+\n scale_size_manual(values=err.sizes)\n\nsome.err.tall[, value.i := cumsum(\n c(FALSE, diff(value) != 0)\n), by=list(panel, profile.id, chromosome, variable)]\nsegs.err.tall <- some.err.tall[, list(\n min.log.lambda=min(min.log.lambda),\n max.log.lambda=max(max.log.lambda),\n value=value[1]\n), by=list(panel, profile.id, chromosome, variable, value.i)]\nsegs.min.tall <- segs.err.tall[, {\n .SD[value==min(value)]\n}, by=list(panel, profile.id, chromosome, variable)]\nsegs.min.err <- segs.min.tall[variable==\"errors\"]\nsegs.min.err[, mid.log.lambda := (min.log.lambda+max.log.lambda)/2]\nset.seed(1)\nsize <- segs.min.err[is.finite(mid.log.lambda), log(1+rexp(.N, mean(\n max.log.lambda-min.log.lambda)))]\nsize <- 0.1\nsegs.min.err[, pred.log.lambda := ifelse(\n min.log.lambda == -Inf, max.log.lambda-size, ifelse(\n max.log.lambda == Inf, min.log.lambda+size, mid.log.lambda))]\nsegs.min.err[, interval := ifelse(\n is.finite(mid.log.lambda), \"finite\", \"infinite\")]\nauc.dt <- segs.min.err[, {\n L <- penaltyLearning::ROChange(\n some.err, .SD, c(\"panel\"))\n with(L, data.table(auc, thresholds[threshold==\"predicted\"]))\n}, by=list(interval)]\nroc.dt <- segs.min.err[, {\n L <- penaltyLearning::ROChange(\n some.err, .SD, c(\"panel\"))\n L$roc\n}, by=list(interval)]\nroc.dt[, row.i := 1:.N, by=interval]\nggplot()+\n theme_bw()+\n geom_path(aes(\n FPR, TPR, color=interval, size=interval),\n data=roc.dt)+\n geom_point(aes(\n FPR, TPR, color=interval, size=interval),\n fill=NA,\n shape=21,\n data=auc.dt)+\n scale_size_manual(values=c(\n finite=2,\n infinite=1))\nggplot()+\n theme_bw()+\n theme(panel.margin=grid::unit(0, 'lines'))+\n facet_grid(. ~ interval)+\n geom_path(aes(\n FPR, TPR),\n data=roc.dt)+\n geom_point(aes(\n FPR, TPR),\n fill=NA,\n shape=21,\n data=auc.dt)+\n geom_text(aes(\n FPR, TPR, label=row.i),\n data=roc.dt)\n\nroc.tall <- melt(\n roc.dt,\n measure.vars=names(err.colors)\n)[order(interval, variable, -min.thresh)]\nvert.dt <- roc.tall[, {\n data.table(\n thresh=min.thresh[-.N],\n value=value[-.N],\n next.value=value[-1])\n}, by=list(interval, variable)]\nggplot()+\n theme_bw()+\n theme(panel.margin=grid::unit(0, 'lines'))+\n facet_grid(. ~ interval)+\n geom_segment(aes(\n min.thresh, value,\n xend=max.thresh, yend=value,\n size=variable, color=variable),\n data=roc.tall)+\n geom_segment(aes(\n thresh, value,\n xend=thresh, yend=next.value,\n color=variable),\n data=vert.dt)+\n scale_color_manual(values=err.colors)+\n scale_size_manual(values=err.sizes)\n\ndata(neuroblastoma, package=\"neuroblastoma\")\nsome.profiles <- data.table(neuroblastoma$profiles)[two.changes, on=list(\n profile.id, chromosome)]\nsome.labels <- data.table(neuroblastoma$annotations)[two.changes, on=list(\n profile.id, chromosome)]\nsome.changes.list <- list()\nsome.models.list <- list()\nfor(profile.i in 1:nrow(two.changes)){\n profile.info <- two.changes[profile.i]\n one.profile <- some.profiles[profile.info, on=list(\n profile.id, chromosome, panel)]\n max.segments <- 20L\n fit <- jointseg::Fpsn(one.profile$logratio, max.segments)\n some.models.list[[profile.i]] <- data.table(\n profile.info, n.segments=1:max.segments, loss=fit$J.est)\n for(n.segments in 2:max.segments){\n end.i <- fit$t.est[n.segments, 1:n.segments]\n before <- end.i[-length(end.i)]\n after <- before+1\n change.pos <- one.profile[, (position[before]+position[after])/2]\n some.changes.list[[paste(profile.i, n.segments)]] <- data.table(\n profile.info, n.segments, change.pos)\n }\n}\nsome.models <- do.call(rbind, some.models.list)\nsome.changes <- do.call(rbind, some.changes.list)\nerr.list <- penaltyLearning::labelError(\n some.models, some.labels, some.changes,\n change.var=\"change.pos\",\n label.vars=c(\"min\", \"max\"),\n model.vars=\"n.segments\",\n problem.vars=\"panel\")\n\nthresh.dt <- roc.dt[, {\n data.table(thresh=seq(\n floor(min(max.thresh)), ceiling(max(min.thresh)), by=0.04))\n}]\nmin.thresh <- -2.1\nmax.thresh <- 2\nthresh.dt <- data.table(thresh=seq(min.thresh, max.thresh, by=0.04))\nthresh.pred <- data.table(id=1, thresh.dt)[data.table(\n id=1, segs.min.err), on=list(id), allow.cartesian=TRUE]\nthresh.pred[, pred.plus.thresh := pred.log.lambda+thresh]\nthresh.pred[, pred0 := pred.plus.thresh]\nsetkey(thresh.pred, panel, pred0, pred.plus.thresh)\nsetkey(some.err, panel, min.log.lambda, max.log.lambda)\nshow.pred <- foverlaps(\n some.err,\n thresh.pred[, .(thresh, interval, panel, pred0, pred.plus.thresh)],\n nomatch=0L)\nshow.pred[, value.fac := ifelse(errors==0, \"correct\", \"error\")]\nshow.label.errors <- show.pred[err.list$label.errors, nomatch=0L, on=list(\n panel, n.segments)]\nshow.changes <- show.pred[some.changes, nomatch=0L, on=list(\n panel, n.segments), allow.cartesian=TRUE]\nthresh.dt[, thresh0 := thresh]\nsetkey(thresh.dt, thresh, thresh0)\nsetkey(roc.dt, min.thresh, max.thresh)\nroc.points <- foverlaps(roc.dt, thresh.dt, nomatch=0L)\ntail.size <- 0.5\nroc.points[, tail.thresh := thresh-tail.size]\nsetkey(roc.points, interval, tail.thresh, thresh)\nsetkey(roc.dt, interval, min.thresh, max.thresh)\nroc.tails <- foverlaps(\n roc.points[, .(thresh, interval, tail.thresh)],\n roc.dt[, .(interval, min.thresh, max.thresh, FPR, TPR)],\n nomatch=0L)\nerr.sizes.animint <- c(\n fp=5,\n fn=2.5,\n errors=1.5)\nroc.segs <- roc.dt[order(interval, min.thresh), {\n dt <- data.table(\n from.FPR=FPR[-.N],\n to.FPR=FPR[-1],\n from.TPR=TPR[-.N],\n to.TPR=TPR[-1])\n print(dt)\n ##browser()\n dt\n}, by=list(interval)]\nyexp <- 1\nroc.u <- roc.points[, .SD[1], by=list(FPR, TPR, interval)]\nroc.color <- \"violet\"\nvstat <- c(\n fp=\"false positive\",\n fn=\"false negative\",\n errors=\"errors\")\nvert.dt[, status := vstat[variable] ]\nroc.tall[, status := vstat[variable] ]\nsegs.err.tall[, status := vstat[variable] ]\nerr.leg <- \"Error type\"\nanimint(\n title=\"ROC curves for neuroblastoma data with several minima\",\n duration=list(thresh=250),\n time=list(variable=\"thresh\", ms=300),\n samples=ggplot()+\n ggtitle(\"Sample label error curves\")+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n theme_animint(width=400)+\n facet_grid(panel ~ .)+\n geom_point(aes(\n pred.plus.thresh, errors,\n key=interval,\n fill=value.fac),\n color=roc.color,\n size=4,\n showSelected=c(\"interval\", \"thresh\"),\n data=show.pred)+\n scale_fill_manual(\n \"Prediction\",\n values=c(correct=\"white\", error=\"black\"))+\n geom_tallrect(aes(\n xmin=pred.plus.thresh-tail.size, xmax=pred.plus.thresh,\n key=interval),\n fill=roc.color,\n color=NA,\n alpha=0.2,\n showSelected=c(\"interval\", \"thresh\"),\n data=show.pred)+\n geom_vline(aes(\n xintercept=pred.plus.thresh,\n key=interval,\n linetype=interval),\n color=roc.color,\n showSelected=\"thresh\",\n data=show.pred)+\n geom_segment(aes(\n min.log.lambda, value,\n xend=max.log.lambda, yend=value,\n color=status, size=status),\n data=segs.err.tall)+\n scale_y_continuous(\n \"errors\",\n breaks=c(0,1),\n limits=c(0-yexp, 1+yexp))+\n scale_x_continuous(\"log(penalty)\")+\n scale_color_manual(\n err.leg,\n values=structure(err.colors[names(vstat)], names=vstat))+\n scale_size_manual(\n err.leg,\n values=structure(err.sizes[names(vstat)], names=vstat)),\n thresholds=ggplot()+\n coord_cartesian(xlim=c(min.thresh, max.thresh))+\n ggtitle(\"Total label error curves, select threshold\")+\n scale_color_manual(\n err.leg,\n values=structure(err.colors[names(vstat)], names=vstat))+\n scale_size_manual(\n err.leg,\n values=structure(err.sizes[names(vstat)], names=vstat))+\n theme_bw()+\n theme_animint(width=600)+\n facet_grid(. ~ interval)+\n geom_segment(aes(\n min.thresh, value,\n xend=max.thresh, yend=value,\n size=status, color=status),\n showSelected=\"interval\",\n data=roc.tall)+\n geom_segment(aes(\n thresh, value,\n xend=thresh, yend=next.value,\n color=status),\n showSelected=\"interval\",\n data=vert.dt)+\n geom_tallrect(aes(\n xmin=thresh-tail.size, xmax=thresh, key=1),\n data=thresh.dt,\n fill=roc.color,\n color=NA,\n showSelected=\"thresh\",\n alpha=0.2)+\n make_tallrect(thresh.dt, \"thresh\")+\n scale_y_continuous(\n \"errors\",\n breaks=seq(0, 20, by=2))+\n guides(color=\"none\", size=\"none\")+\n scale_x_continuous(\"Threshold = constant added to predicted values\"),\n roc=ggplot()+\n ggtitle(\"ROC curves, select threshold\")+\n theme_bw()+\n theme(panel.margin=grid::unit(0, 'lines'))+\n facet_grid(. ~ interval)+\n geom_text(aes(\n 0.2, 0.7, label=sprintf(\"AUC=%.2f\", auc)),\n showSelected=\"interval\",\n data=auc.dt)+\n geom_segment(aes(\n from.FPR, from.TPR,\n xend=to.FPR, yend=to.TPR),\n data=roc.segs,\n showSelected=\"interval\",\n alpha=0.2)+\n geom_path(aes(\n FPR, TPR, key=1),\n color=roc.color,\n showSelected=c(\"thresh\", \"interval\"),\n size=3,\n data=roc.tails)+\n coord_equal()+\n scale_x_continuous(\n \"False Positive Rate\",\n breaks=seq(0, 1, by=0.2))+\n scale_y_continuous(\n \"True Positive Rate\",\n breaks=seq(0, 1, by=0.2))+\n geom_point(aes(\n FPR, TPR, label=row.i),\n clickSelects=\"thresh\",\n size=4,\n alpha=0.7,\n showSelected=\"interval\",\n data=roc.u)+\n geom_point(aes(\n FPR, TPR, key=1, label=row.i),\n showSelected=c(\"interval\", \"thresh\"),\n color=roc.color,\n size=4,\n data=roc.points),\n profiles=ggplot()+\n ggtitle(\"Noisy data with predicted changes and label errors\")+\n theme_bw()+\n theme(panel.margin=grid::unit(0, \"lines\"))+\n theme_animint(height=400, width=1400)+\n facet_grid(panel ~ interval, scales=\"free\")+\n geom_tallrect(aes(\n xmin=min/1e6, xmax=max/1e6, fill=annotation),\n data=some.labels,\n alpha=0.5,\n color=\"grey\")+\n scale_linetype_manual(\n err.leg,\n values=c(\n correct=0,\n \"false negative\"=3,\n \"false positive\"=1))+\n geom_tallrect(aes(\n xmin=min/1e6, xmax=max/1e6,\n key=paste(min, max),\n linetype=status),\n data=show.label.errors,\n showSelected=c(\"thresh\", \"interval\"),\n fill=NA,\n size=2,\n color=\"black\")+\n scale_fill_manual(values=penaltyLearning::change.colors)+\n geom_point(aes(\n position/1e6, logratio),\n color=\"grey50\",\n data=some.profiles)+\n xlab(\"Position on chromosome (Mb = mega bases)\")+\n geom_vline(aes(\n xintercept=change.pos/1e6, key=change.pos),\n data=show.changes,\n showSelected=c(\"thresh\", \"interval\"),\n color=\"green\"),\n out.dir=\"figure-neuroblastomaProcessed-complex\")\n"
},
{
"alpha_fraction": 0.5902140736579895,
"alphanum_fraction": 0.598832368850708,
"avg_line_length": 28.975000381469727,
"blob_id": "895a940b9f21ba6c6540f82947c2db90ded815a3",
"content_id": "edc9791ea93153b4624b0653c905009b6f040f6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 3597,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 120,
"path": "/figure-linear-model-zero-init.R",
"repo_name": "tdhock/max-generalized-auc",
"src_encoding": "UTF-8",
"text": "library(data.table)\nlibrary(ggplot2)\n\ndata.dir <- \"../neuroblastoma-data/data/ATAC_JV_adipose\"\n\ndata.list <- list()\nfor(f in c(\"inputs\", \"outputs\", \"evaluation\")){\n f.csv.xz <- file.path(data.dir, paste0(f, \".csv.xz\"))\n if(file.exists(f.csv.xz)){\n system(paste(\"unxz\", f.csv.xz))\n }\n f.csv <- file.path(data.dir, paste0(f, \".csv\"))\n f.dt <- data.table::fread(f.csv)\n data.list[[f]] <- f.dt\n}\n\nfolds.csv <- Sys.glob(file.path(data.dir, \"cv\", \"*\", \"folds.csv\"))[1]\nfolds.dt <- data.table::fread(folds.csv)\nvalidation.fold <- 1\nvalidation.ids <- folds.dt[fold==validation.fold, sequenceID]\n\nX.all <- scale(data.list$inputs[, -1])\nrownames(X.all) <- data.list$inputs$sequenceID\nX.finite <- X.all[, apply(is.finite(X.all), 2, all)]\n\nset.list <- list(\n validation=rownames(X.finite) %in% validation.ids)\nset.list$train <- !set.list$validation\nX.list <- lapply(set.list, function(i)X.finite[i, ])\n\ny.train <- data.list[[\"outputs\"]][\n !sequenceID %in% validation.ids,\n cbind(min.log.lambda, max.log.lambda)]\nset.seed(1)\nweight.vec <- rep(0, ncol(X.finite))\nintercept <- 0\n\ncomputeAUM <- function(w, i, is.set){\n pred.pen.vec <- (X.finite %*% w) + i\n pred.dt <- data.table(\n sequenceID=rownames(pred.pen.vec),\n pred.log.lambda=as.numeric(pred.pen.vec))\n set.dt <- pred.dt[is.set]\n penaltyLearning::ROChange(\n data.list$evaluation, set.dt, \"sequenceID\")\n}\n\niteration.dt.list <- list()\n\nfor(iteration in 1:1000){\n if(! iteration %in% names(iteration.dt.list)){\n summary.dt.list <- list()\n set.roc.list <- list()\n for(set in names(set.list)){\n set.roc.list[[set]] <- computeAUM(weight.vec, intercept, set.list[[set]])\n summary.dt.list[[set]] <- with(set.roc.list[[set]], data.table(\n set,\n thresholds,\n aum))\n }\n summary.dt <- do.call(rbind, summary.dt.list)\n iteration.dt.list[[paste(iteration)]] <- data.table(\n iteration, summary.dt)\n print(iteration)\n g.dt <- set.roc.list[[\"train\"]][[\"aum.grad\"]]\n not.same <- g.dt[lo != hi]\n if(0 < nrow(not.same)){\n print(not.same)\n stop(\"not equal\")\n }\n g.vec <- g.dt$lo\n direction.vec <- -t(X.list[[\"train\"]]) %*% g.vec\n take.step <- function(s){\n weight.vec + s*direction.vec\n }\n set.aum.list <- list()\n for(step.size in 10^seq(-10, 0, by=0.5)){\n new.weight.vec <- take.step(step.size)\n for(set in \"train\"){\n set.roc <- computeAUM(new.weight.vec, 0, set.list[[set]])\n set.aum.list[[paste(step.size, set)]] <- data.table(\n step.size, set, aum=set.roc$aum,\n intercept=set.roc$thresholds[\n threshold==\"min.error\", (max.thresh+min.thresh)/2])\n }\n }\n set.aum <- do.call(rbind, set.aum.list)\n best.dt <- set.aum[, .SD[min(aum)==aum], by=set]\n ggplot()+\n geom_line(aes(\n step.size, aum),\n data=set.aum)+\n geom_point(aes(\n step.size, aum),\n data=best.dt)+\n geom_text(aes(\n step.size, aum, label=aum),\n vjust=0,\n data=best.dt)+\n scale_x_log10()+\n scale_y_log10()+\n facet_grid(set ~ ., scales=\"free\")\n weight.vec <- take.step(best.dt[[\"step.size\"]])\n intercept <- best.dt[[\"intercept\"]]\n }\n}\n\niteration.dt <- do.call(rbind, iteration.dt.list)\niteration.dt[set==\"train\", .(iteration, threshold, aum, errors)]\nggplot()+\n geom_line(aes(\n iteration, aum),\n data=iteration.dt[threshold==\"predicted\"])+\n facet_grid(set ~ ., scales=\"free\")\n\nggplot()+\n geom_line(aes(\n iteration, errors),\n data=iteration.dt[threshold==\"predicted\" & iteration>1])+\n facet_grid(set ~ ., scales=\"free\")\n"
}
] | 55 |
kg-nlp/PLOME | https://github.com/kg-nlp/PLOME | 71360ca22b47a9679bfa3717609ed35942132399 | d4fb0aeaf3524c442b0f3d155b20d2977955144b | acb8bb7d990c5818a0f4d7b00952c5fa97425b41 | refs/heads/main | 2023-07-31T12:05:18.150907 | 2021-09-17T04:31:44 | 2021-09-17T04:31:44 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.57276850938797,
"alphanum_fraction": 0.5841063857078552,
"avg_line_length": 38.279354095458984,
"blob_id": "d230ec2dab62e86a2d73838445f02b154dc8ed4b",
"content_id": "c72b8f4c641ac052856618d845c3b7f5b2a41193",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9702,
"license_type": "permissive",
"max_line_length": 159,
"num_lines": 247,
"path": "/pre_train_src/data_processor_mask.py",
"repo_name": "kg-nlp/PLOME",
"src_encoding": "UTF-8",
"text": "#-*-coding:utf8-*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\nimport random\nfrom collections import namedtuple\nimport re\nimport numpy as np\nimport tensorflow as tf\nimport csv\nimport tokenization\nfrom mask import Mask, PinyinConfusionSet, StrokeConfusionSet\nfrom pinyin_tool import PinyinTool\n \nDEBUG = False\n\nInputExample = namedtuple('InputExample', ['tokens'])\nInputFeatures = namedtuple('InputFeature', ['input_ids', 'input_mask', 'segment_ids', 'lmask']) #segment_ids is for pinyin_ids\n\ndef get_tfrecord_num(tf_file):\n num = 0\n for record in tf.python_io.tf_record_iterator(tf_file):\n num += 1\n if num > 300000:\n num = 50000000\n break\n return num\n\nclass DataProcessor:\n def __init__(self, input_path, max_sen_len, vocab_file, out_dir, label_list=None, is_training=True):\n self.input_path = input_path\n self.max_sen_len = max_sen_len\n self.is_training = is_training\n self.out_dir = out_dir\n self.tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=False)\n self.label_list = label_list\n if label_list is not None:\n self.label_map = {}\n for (i, label) in enumerate(self.label_list):\n self.label_map[label] = i\n else:\n self.label_map = self.tokenizer.vocab\n self.label_list = {}\n for key in self.tokenizer.vocab:\n self.label_list[self.tokenizer.vocab[key]] = key\n \n py_dict_path = './pinyin_data/zi_py.txt' \n py_vocab_path = './pinyin_data/py_vocab.txt'\n sk_dict_path = './stroke_data/zi_sk.txt' \n sk_vocab_path = './stroke_data/sk_vocab.txt'\n self.pytool = PinyinTool(py_dict_path=py_dict_path, py_vocab_path=py_vocab_path, py_or_sk='py')\n self.sktool = PinyinTool(py_dict_path=sk_dict_path, py_vocab_path=sk_vocab_path, py_or_sk='sk')\n self.pplen = len(self.sktool.ZM2ID)\n self.sklen = self.sktool.PYLEN\n\n self.PYID2SEQ = self.pytool.get_pyid2seq_matrix()\n self.SKID2SEQ = self.sktool.get_pyid2seq_matrix()\n\n tokenid_pyid = {}\n tokenid_skid = {}\n for key in self.tokenizer.vocab:\n tokenid_pyid[self.tokenizer.vocab[key]] = self.pytool.get_pinyin_id(key)\n tokenid_skid[self.tokenizer.vocab[key]] = self.sktool.get_pinyin_id(key)\n \n\n same_py_file = './confusions/same_pinyin.txt'\n simi_py_file = './confusions/simi_pinyin.txt'\n stroke_file = './confusions/same_stroke.txt'\n tokenizer = self.tokenizer\n pinyin = PinyinConfusionSet(tokenizer, same_py_file)\n jinyin = PinyinConfusionSet(tokenizer, simi_py_file)\n print('pinyin conf size:', len(pinyin.confusion))\n print('jinyin conf size:', len(jinyin.confusion))\n stroke = StrokeConfusionSet(tokenizer, stroke_file)\n self.masker = Mask(same_py_confusion=pinyin, simi_py_confusion=jinyin, sk_confusion=stroke, tokenid2pyid=tokenid_pyid, tokenid2skid=tokenid_skid)\n\n\n file_pattern = out_dir + '/*.tfrecord' \n if input_path is not None: \n if is_training is True:\n pass\n else:\n self.tfrecord_path = out_dir\n if is_training is False:\n self.file2features()\n else:\n self.TfrecordFile = tf.gfile.Glob(file_pattern)\n self.TfrecordFile = sorted(self.TfrecordFile)\n random.shuffle(self.TfrecordFile)\n print ('--'.join(self.TfrecordFile))\n self.num_examples = 50000000\n \n def sample(self, text_unicode):\n tokens = text_unicode.strip().split(' ')\n if len(tokens) < 2: return None\n return InputExample(tokens=tokens)\n\n def load_examples(self):\n '''sent'''\n train_data = open(self.input_path, encoding=\"utf-8\")\n #train_data = open(self.input_path)\n instances = []\n n_line = 0\n for ins in train_data:\n #ins = ins.decode('utf8')\n n_line += 1\n if (DEBUG is True) and (n_line > 10000):\n break\n ins = self.sample(ins)\n if ins is not None:\n yield ins\n\n def convert_single_example(self, ex_index, example):\n label_map = self.label_map\n tokens = example.tokens\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens) > self.max_sen_len - 2:\n tokens = tokens[0:(self.max_sen_len - 2)]\n\n _tokens = []\n _lmask = []\n segment_ids = []\n _tokens.append(\"[CLS]\")\n _lmask.append(0)\n segment_ids.append(0)\n for token in tokens:\n _tokens.append(token)\n _lmask.append(1)\n segment_ids.append(self.pytool.get_pinyin_id(token))\n _tokens.append(\"[SEP]\")\n segment_ids.append(0)\n _lmask.append(0)\n\n input_ids = self.tokenizer.convert_tokens_to_ids(_tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < self.max_sen_len:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n _lmask.append(0)\n\n assert len(input_ids) == self.max_sen_len\n assert len(input_mask) == self.max_sen_len\n assert len(segment_ids) == self.max_sen_len\n\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in _tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"lmask: %s\" % \" \".join(map(str, _lmask)))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n lmask=_lmask\n )\n return feature\n \n def get_label_list(self):\n return self.label_list\n \n def file2features(self):\n output_file = self.tfrecord_path\n if os.path.exists(output_file):\n os.remove(output_file)\n examples = self.load_examples()\n n_examples = 0\n writer = tf.python_io.TFRecordWriter(output_file)\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d\" % ex_index)\n\n feature = self.convert_single_example(ex_index, example)\n create_int_feature = lambda values: tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"lmask\"] = create_int_feature(feature.lmask)\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n n_examples += 1\n \n self.num_examples = n_examples\n\n def build_data_generator(self, batch_size):\n def _decode_record(record):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"input_mask\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"lmask\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n }\n\n\n example = tf.parse_single_example(record, name_to_features)\n\n #int64 to int32\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n input_ids = example['input_ids']\n input_mask = example['input_mask']\n segment_ids = example['segment_ids']\n lmask = example['lmask']\n \n masked_ids, masked_flgs, masked_py_ids, masked_sk_ids = tf.py_func(self.masker.mask_process, [input_ids], [tf.int32, tf.int32, tf.int32, tf.int32])\n lmask = tf.multiply(masked_flgs, lmask)\n label_ids = input_ids\n input_ids = masked_ids\n pinyin_ids = segment_ids\n masked_pinyin_ids = masked_py_ids\n\n return input_ids, input_mask, pinyin_ids, masked_pinyin_ids, masked_sk_ids, lmask, label_ids\n if self.is_training:\n dataset = tf.data.TFRecordDataset(filenames=self.TfrecordFile)\n else:\n pass\n #dataset = tf.data.TFRecordDataset(self.tfrecord_path)\n dataset = dataset.map(_decode_record, num_parallel_calls=10)\n if self.is_training:\n dataset = dataset.repeat().shuffle(buffer_size=500)\n dataset = dataset.batch(batch_size).prefetch(50)\n return dataset\n\nif __name__ == '__main__':\n import sys\n vocab_file = sys.argv[1]\n text_file = sys.argv[2]\n output_file = sys.argv[3]\n dp = DataProcessor(input_path=text_file, max_sen_len=512, vocab_file=vocab_file, out_dir=output_file, is_training=False)\n"
},
{
"alpha_fraction": 0.6950904130935669,
"alphanum_fraction": 0.7209302186965942,
"avg_line_length": 32.565216064453125,
"blob_id": "7f379dd1ab3253127dd21223012eac8ab775b06b",
"content_id": "527545b309b18dd1d2d239ac6b8afb3a1765311a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 774,
"license_type": "permissive",
"max_line_length": 356,
"num_lines": 23,
"path": "/finetune_src/start.sh",
"repo_name": "kg-nlp/PLOME",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\nappname=plome_finetune\ntrain_path=./datas/train.txt\ntest_path=./datas/test.txt \ninit_bert=./datas/pretrained_plome\n\npy_dim=32\nmulti_task=1\nsk_or_py=\"all\"\ngpuid=0\nkeep_prob=0.9\noutput_dir=./${appname}_output\ninit_bert_path=$init_bert\nmax_sen_len=180\nbatch_size=32\nepoch=10\nlearning_rate=5e-5 #5e-6 #3e-5\n\necho \"multi_task=$multi_task\"\necho \"appname=$appname\"\necho \"init_bert=$init_bert\"\nmkdir $output_dir\npython3 train_eval_tagging.py --py_dim $py_dim --gpuid $gpuid --train_path $train_path --test_path $test_path --output_dir $output_dir --max_sen_len $max_sen_len --batch_size $batch_size --learning_rate $learning_rate --epoch $epoch --keep_prob $keep_prob --init_bert_path $init_bert_path --multi_task $multi_task --sk_or_py $sk_or_py>$output_dir/train.log\n\n\n"
},
{
"alpha_fraction": 0.607422947883606,
"alphanum_fraction": 0.6161064505577087,
"avg_line_length": 40.994117736816406,
"blob_id": "aecea5af177823331b7b5892b6779f850689f411",
"content_id": "38589b0f30aa9b4cc9ab0f0f14401bf6ed2363f4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7140,
"license_type": "permissive",
"max_line_length": 240,
"num_lines": 170,
"path": "/pre_train_src/train_masklm.py",
"repo_name": "kg-nlp/PLOME",
"src_encoding": "UTF-8",
"text": "#-*-coding:utf8-*-\n\nimport sys, os\nimport numpy as np\nimport tensorflow as tf\nfrom mask_lm import DataProcessor, MaskLM\nimport modeling\nimport optimization\nimport time\nimport random\n#tf.logging.set_verbosity(tf.logging.INFO)\ntf.logging.set_verbosity(tf.logging.ERROR)\n\nclass MConfig:\n pass\n\ndef train(FLAGS):\n PY_OR_SK = 'all'\n\n rand_type_emb = True\n args = MConfig()\n args.use_mgpu = False\n args.seed = 1\n args.py_dim = int(FLAGS.py_dim)\n args.multi_task = True if int(FLAGS.multi_task) > 0 else False\n if int(FLAGS.use_mgpu) > 0:\n args.use_mgpu = True\n import horovod.tensorflow as hvd\n hvd.init()\n args.seed = hvd.rank()\n args.hvd = hvd\n print(\"=========== rank: \", hvd.rank(), \", local rank: \", hvd.local_rank(), \", size: \", hvd.size(), \", seed: \", args.seed)\n init_checkpoint = None if len(FLAGS.init_checkpoint.strip()) < 3 else FLAGS.init_checkpoint.strip()\n tf.random.set_random_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n \n gpuid_list = FLAGS.gpuid_list.strip().split(',')\n\n max_sen_len = FLAGS.max_sen_len\n train_path = FLAGS.train_path\n test_file = FLAGS.test_path\n out_dir = FLAGS.output_dir\n train_tfrecord_dir = FLAGS.train_tfrecord_path\n batch_size = FLAGS.batch_size\n bert_config_path = FLAGS.bert_config_path\n EPOCH = FLAGS.epoch\n learning_rate = FLAGS.learning_rate\n vocab_file = FLAGS.vocab_file\n\n keep_prob = FLAGS.keep_prob\n data_processor = DataProcessor(train_path, max_sen_len, vocab_file, train_tfrecord_dir, label_list=None, is_training=True)\n train_num = data_processor.num_examples\n train_data = data_processor.build_data_generator(batch_size)\n iterator = train_data.make_one_shot_iterator()\n input_ids, input_mask, pinyin_ids, masked_pinyin_ids, masked_stroke_ids, lmask, label_ids = iterator.get_next()\n\n #print ('input-ids:', id(input_ids), input_ids)\n\n input_ids.set_shape([None, max_sen_len])\n input_mask.set_shape([None, max_sen_len])\n pinyin_ids.set_shape([None, max_sen_len])\n lmask.set_shape([None, max_sen_len])\n label_ids.set_shape([None, max_sen_len])\n masked_pinyin_ids.set_shape([None, max_sen_len])\n\n\n \n tf_config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)\n tf_config.gpu_options.allow_growth = True\n if args.use_mgpu is True:\n tf_config.gpu_options.visible_device_list = str(hvd.local_rank())\n\n model = MaskLM(bert_config_path, num_class=len(data_processor.get_label_list()), pyid2seq=data_processor.PYID2SEQ, skid2seq=data_processor.SKID2SEQ, py_dim=args.py_dim, py_or_sk=PY_OR_SK, keep_prob=keep_prob, multi_task=args.multi_task)\n (loss, probs, golds, _, py_loss) = model.create_model(input_ids, input_mask, \n masked_pinyin_ids, masked_stroke_ids, lmask, label_ids, pinyin_ids, is_training=True)\n \n\n num_steps = train_num // batch_size * EPOCH\n num_warmup_steps = 100000\n #if args.use_mgpu is True:\n # learning_rate = learning_rate * hvd.size()\n train_op = optimization.create_optimizer(loss, learning_rate, num_steps, num_warmup_steps, args)\n\n\n with tf.Session(config=tf_config) as sess:\n if init_checkpoint is not None:\n print ('google_bert_init')\n tvars = tf.trainable_variables()\n (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint + '/bert_model.ckpt')\n keys = [x for x in assignment_map]\n for k in keys:\n if (rand_type_emb is True) and ('token_type_embeddings' in k):\n del assignment_map[k]\n continue\n print(k, '\\t', assignment_map[k])\n\n tf.train.init_from_checkpoint(init_checkpoint + '/bert_model.ckpt', assignment_map)\n init = tf.global_variables_initializer()\n sess.run(init)\n \n if args.use_mgpu is True:\n sess.run(hvd.broadcast_global_variables(0))\n \n loss_values = []\n saver = tf.train.Saver()\n best_score = 0.0\n best_model_path = os.path.join(out_dir, 'bert_model.ckpt')\n total_step = 0\n for epoch in range(EPOCH):\n for step in range(int(train_num / batch_size)):\n total_step += 1\n start_time = time.time()\n train_loss, _ = sess.run([loss, train_op]) \n loss_values.append(train_loss)\n if step % 50 == 0:\n duration = time.time() - start_time\n examples_per_sec = float(duration) / batch_size\n format_str = ('Epoch {} step {}, train loss = {:.4f},{:.4f},{:.4f} ( {:.4f} examples/sec; {:.4f} ''sec/batch)')\n \n if hvd.rank() == 0:\n print (format_str.format(epoch, step, np.mean(loss_values),np.mean(loss_values[-1000:]),np.mean(loss_values[-100:]), examples_per_sec, duration))\n loss_values = loss_values[-1000:]\n if step % 1000 == 0 and hvd.rank() == 0:\n saver.save(sess, best_model_path)\n \n \nif __name__ == '__main__':\n\n flags = tf.flags\n ## Required parameters\n flags.DEFINE_string(\"gpuid_list\", '0', \"i.e:0,1,2\")\n PREFIX = './pretrain_data'\n ## Optional\n flags.DEFINE_string(\"train_path\", '', \"train path \")\n flags.DEFINE_string(\"test_path\", '', \"test path \")\n flags.DEFINE_string(\"train_tfrecord_path\", '', \"train path \")\n flags.DEFINE_string(\"output_dir\", '', \"out dir \")\n flags.DEFINE_string(\"vocab_file\", '%s/datas/bert_datas/vocab.txt' % PREFIX, 'vocab')\n flags.DEFINE_string(\"init_checkpoint\", '', '')\n flags.DEFINE_string(\"bert_config_path\", '%s/datas/bert_datas/bert_config.json' % PREFIX, '')\n flags.DEFINE_string(\"label_list\", '', 'max_sen_len')\n flags.DEFINE_integer(\"max_sen_len\", 64, 'max_sen_len')\n flags.DEFINE_integer(\"batch_size\", 32, 'batch_size')\n flags.DEFINE_integer(\"py_dim\", 1, 'use_pinyin')\n flags.DEFINE_integer(\"multi_task\", 1, 'multi_task')\n flags.DEFINE_integer(\"epoch\", 2, 'batch_size')\n flags.DEFINE_float(\"learning_rate\", 5e-5, 'filter_punc')\n flags.DEFINE_float(\"keep_prob\", 0.9, 'keep prob in dropout')\n flags.DEFINE_string(\"use_mgpu\", '1', 'keep prob in dropout')\n\n\n flags.mark_flag_as_required('gpuid_list')\n flags.mark_flag_as_required('train_path')\n flags.mark_flag_as_required('output_dir')\n flags.mark_flag_as_required('train_tfrecord_path')\n flags.mark_flag_as_required('max_sen_len')\n\n FLAGS = flags.FLAGS\n #FLAGS.bert_config_path = '%s/bert_config.json' % FLAGS.output_dir\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = FLAGS.gpuid_list\n print ('Confings:')\n print ('\\tgpuid_list=', FLAGS.gpuid_list)\n print ('\\ttrain_path=', FLAGS.train_path)\n print ('\\ttest_path=', FLAGS.test_path)\n print ('\\toutput_dir=', FLAGS.output_dir)\n print ('\\tmax_sen_len=', FLAGS.max_sen_len)\n print ('\\tbert_config_path=', FLAGS.bert_config_path)\n print ('\\tmulti_task=', FLAGS.multi_task)\n train(FLAGS)\n\n"
},
{
"alpha_fraction": 0.5620834231376648,
"alphanum_fraction": 0.5726528167724609,
"avg_line_length": 36.841156005859375,
"blob_id": "1adde3d2005cd37b292241cf08b56ffc24ce437f",
"content_id": "326b73e4d5be0238b437aceb8af00b77507c4c17",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10502,
"license_type": "permissive",
"max_line_length": 122,
"num_lines": 277,
"path": "/finetune_src/data_processor_tagging.py",
"repo_name": "kg-nlp/PLOME",
"src_encoding": "UTF-8",
"text": "#-*-coding:utf8-*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\nimport random\nfrom collections import namedtuple\nimport re\nimport numpy as np\nimport tensorflow as tf\nimport csv\nimport tokenization\nfrom pinyin_tool import PinyinTool\n\nDEBUG = False\n\nInputExample = namedtuple('InputExample', ['tokens', 'labels'])\nInputFeatures = namedtuple('InputFeature', ['input_ids', 'input_mask', 'segment_ids', 'stroke_ids', 'lmask', 'label_ids'])\n\ndef get_tfrecord_num(tf_file):\n num = 0\n for record in tf.python_io.tf_record_iterator(tf_file):\n num += 1\n return num\n\nclass DataProcessor:\n '''\n data format:\n sent1\\tsent2\n '''\n def __init__(self, input_path, max_sen_len, vocab_file, out_dir, label_list=None, is_training=True):\n self.input_path = input_path\n self.max_sen_len = max_sen_len\n self.is_training = is_training\n self.dataset = None\n self.out_dir = out_dir\n self.tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=False)\n self.label_list = label_list\n if label_list is not None:\n self.label_map = {}\n for (i, label) in enumerate(self.label_list):\n self.label_map[label] = i\n else:\n self.label_map = self.tokenizer.vocab\n self.label_list = {}\n for key in self.tokenizer.vocab:\n self.label_list[self.tokenizer.vocab[key]] = key\n\n py_dict_path = './pinyin_data/zi_py.txt'\n py_vocab_path = './pinyin_data/py_vocab.txt'\n sk_dict_path = './stroke_data/zi_sk.txt'\n sk_vocab_path = './stroke_data/sk_vocab.txt'\n \n self.pytool = PinyinTool(py_dict_path=py_dict_path, py_vocab_path=py_vocab_path, py_or_sk='py')\n self.sktool = PinyinTool(py_dict_path=sk_dict_path, py_vocab_path=sk_vocab_path, py_or_sk='sk')\n\n self.PYID2SEQ = self.pytool.get_pyid2seq_matrix() \n self.SKID2SEQ = self.sktool.get_pyid2seq_matrix()\n\n self.py_label_list = {v: k for k, v in self.pytool.vocab.items()}\n \n self.tokenid_pyid = {}\n self.tokenid_skid = {}\n for key in self.tokenizer.vocab:\n self.tokenid_pyid[self.tokenizer.vocab[key]] = self.pytool.get_pinyin_id(key) \n self.tokenid_skid[self.tokenizer.vocab[key]] = self.sktool.get_pinyin_id(key) \n if input_path is not None: \n if is_training is True:\n self.tfrecord_path = os.path.join(out_dir, \"train.tf_record\")\n else:\n self.tfrecord_path = os.path.join(out_dir, \"eval.tf_record\")\n #os.remove(self.tfrecord_path)\n if os.path.exists(self.tfrecord_path) is False:\n self.file2features()\n else:\n self.num_examples = get_tfrecord_num(self.tfrecord_path)\n def get_zi_py_matrix(self):\n pysize = 430\n matrix = []\n for k in range(len(self.tokenizer.vocab)):\n matrix.append([0] * pysize)\n\n for key in self.tokenizer.vocab:\n tokenid = self.tokenizer.vocab[key]\n pyid = self.pytool.get_pinyin_id(key)\n matrix[tokenid][pyid] = 1.\n return np.asarray(matrix, dtype=np.float32) \n \n \n def sample(self, text_unicode1, text_unicode2):\n segs1 = text_unicode1.strip().split(' ')\n segs2 = text_unicode2.strip().split(' ')\n tokens, labels = [], []\n if len(segs1) != len(segs2):\n return None\n for x, y in zip(segs1, segs2):\n tokens.append(x)\n labels.append(y)\n if len(tokens) < 2: return None\n return InputExample(tokens=tokens, labels=labels)\n\n def load_examples(self):\n '''sent1 \\t sent2'''\n train_data = open(self.input_path, encoding=\"utf-8\")\n instances = []\n n_line = 0\n for ins in train_data:\n n_line += 1\n if (DEBUG is True) and (n_line > 1000):\n break\n #ins = ins.decode('utf8')\n tmps = ins.strip().split('\\t')\n if len(tmps) < 2: \n continue\n ins = self.sample(tmps[0], tmps[1])\n if ins is not None:\n instances.append(ins)\n\n if self.is_training:\n random.seed = 666\n random.shuffle(instances)\n return instances\n\n def convert_single_example(self, ex_index, example):\n label_map = self.label_map\n tokens = example.tokens\n labels = example.labels\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens) > self.max_sen_len - 2:\n tokens = tokens[0:(self.max_sen_len - 2)]\n labels = labels[0:(self.max_sen_len - 2)]\n\n _tokens = []\n _labels = []\n _lmask = []\n segment_ids = []\n stroke_ids = []\n _tokens.append(\"[CLS]\")\n _lmask.append(0)\n _labels.append(labels[0])\n segment_ids.append(0)\n stroke_ids.append(0)\n for token, label in zip(tokens, labels):\n _tokens.append(token)\n _labels.append(label)\n _lmask.append(1)\n segment_ids.append(self.pytool.get_pinyin_id(token))\n stroke_ids.append(self.sktool.get_pinyin_id(token))\n _tokens.append(\"[SEP]\")\n segment_ids.append(0)\n stroke_ids.append(0)\n _labels.append(labels[0])\n _lmask.append(0)\n\n input_ids = self.tokenizer.convert_tokens_to_ids(_tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < self.max_sen_len:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n stroke_ids.append(0)\n _labels.append(labels[0])\n _lmask.append(0)\n\n assert len(input_ids) == self.max_sen_len\n assert len(input_mask) == self.max_sen_len\n assert len(segment_ids) == self.max_sen_len\n assert len(stroke_ids) == self.max_sen_len\n\n label_ids = [label_map.get(l, label_map['UNK']) for l in _labels]\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n stroke_ids=stroke_ids,\n lmask=_lmask,\n label_ids=label_ids\n )\n return feature\n \n def get_label_list(self):\n return self.label_list\n \n def file2features(self):\n output_file = self.tfrecord_path\n if os.path.exists(output_file):\n os.remove(output_file)\n examples = self.load_examples()\n self.num_examples = len(examples)\n writer = tf.python_io.TFRecordWriter(output_file)\n for (ex_index, example) in enumerate(examples):\n if ex_index % 1000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = self.convert_single_example(ex_index, example)\n create_int_feature = lambda values: tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"stroke_ids\"] = create_int_feature(feature.stroke_ids)\n features[\"lmask\"] = create_int_feature(feature.lmask)\n features[\"label_ids\"] = create_int_feature(feature.label_ids)\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n \n def build_data_generator(self, batch_size):\n def _get_py_seq(token_seq):\n ans = []\n for t in list(token_seq):\n pyid = self.tokenid_pyid.get(t, 1)\n ans.append(pyid)\n ans = np.asarray(ans, dtype=np.int32)\n return ans\n\n def _decode_record(record):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"input_mask\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"stroke_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"lmask\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"label_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n }\n\n\n example = tf.parse_single_example(record, name_to_features)\n\n #int64 to int32\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n input_ids = example['input_ids']\n input_mask = example['input_mask']\n segment_ids = example['segment_ids']\n stroke_ids = example['stroke_ids']\n label_ids = example['label_ids']\n lmask = example['lmask']\n py_labels = tf.py_func(_get_py_seq, [label_ids], [tf.int32])\n\n return input_ids, input_mask, segment_ids, stroke_ids, lmask, label_ids, py_labels\n if self.dataset is not None:\n return self.dataset\n\n dataset = tf.data.TFRecordDataset(self.tfrecord_path)\n dataset = dataset.map(_decode_record, num_parallel_calls=10)\n if self.is_training:\n dataset = dataset.repeat().shuffle(buffer_size=100)\n dataset = dataset.batch(batch_size).prefetch(50)\n self.dataset = dataset\n return dataset\n\n def get_feature(self, u_input, u_output=None):\n if u_output is None:\n u_output = u_input\n instance = self.sample(u_input, u_output)\n feature = self.convert_single_example(0, instance)\n input_ids = feature.input_ids\n input_mask = feature.input_mask\n input_py_ids = feature.segment_ids\n input_sk_ids = feature.stroke_ids\n label_ids = feature.label_ids\n label_mask = feature.lmask\n return input_ids, input_mask, input_py_ids, input_sk_ids, label_ids, label_mask\n\n \n \n\n"
},
{
"alpha_fraction": 0.5030567646026611,
"alphanum_fraction": 0.5222707390785217,
"avg_line_length": 34.78125,
"blob_id": "8a2889b001cd605b7c08202da3db7bde3da20fa1",
"content_id": "e4f875a0e2dabdd22acee20d775149a936f45a5e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1145,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 32,
"path": "/pre_train_src/split_records.py",
"repo_name": "kg-nlp/PLOME",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\n\ndef split_tfrecord(tfrecord_path, out_dir, n_each_file_samples):\n with tf.Graph().as_default(), tf.Session() as sess:\n ds = tf.data.TFRecordDataset(tfrecord_path).batch(1000)\n batch = ds.make_one_shot_iterator().get_next()\n part_num = 0\n n_writed = 0\n part_path = out_dir + '/{:02d}.tfrecord'.format(part_num)\n writer = tf.python_io.TFRecordWriter(part_path)\n while True:\n try:\n records = sess.run(batch)\n for record in records:\n writer.write(record)\n n_writed += 1\n if n_writed > n_each_file_samples: \n part_num += 1\n n_writed = 0\n writer.close()\n part_path = out_dir + '/{:02d}.tfrecord'.format(part_num)\n writer = tf.python_io.TFRecordWriter(part_path)\n except tf.errors.OutOfRangeError: break\n writer.close()\n\n\n\nif __name__ == '__main__':\n import sys\n in_rec = sys.argv[1]\n out_dir = sys.argv[2]\n split_tfrecord(in_rec, out_dir, 5000000)\n"
},
{
"alpha_fraction": 0.5793324112892151,
"alphanum_fraction": 0.5956408977508545,
"avg_line_length": 48.323307037353516,
"blob_id": "9613b3dd88453745f732eeafacbc99ab3b5f39e3",
"content_id": "caaaffdb5c8d1ed30e04bd05c29f763a44cab862",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6561,
"license_type": "permissive",
"max_line_length": 132,
"num_lines": 133,
"path": "/pre_train_src/mask_lm.py",
"repo_name": "kg-nlp/PLOME",
"src_encoding": "UTF-8",
"text": "#-*-coding:utf8-*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport csv\nimport os\nimport random\nfrom collections import namedtuple\nimport re\nimport numpy as np\nimport modeling\nimport optimization\nimport tokenization\nimport tensorflow as tf\nfrom data_processor_mask import DataProcessor\n\nclass MaskLM:\n def __init__(self, bert_config_path, num_class, pyid2seq, skid2seq, py_dim, py_or_sk, multi_task=True, keep_prob=0.9):\n self.num_class = num_class\n self.keep_prob = keep_prob\n self.use_pinyin = True\n self.multi_task = multi_task\n self.pyid2seq = pyid2seq\n self.skid2seq = skid2seq\n self.py_or_sk = py_or_sk\n self.PYLEN = 4 #pinyin seq len\n self.SKLEN = 10 #stroke seq len\n self.PYDIM = py_dim\n self.MAX_SEN_LEN = 512\n self.bert_config = modeling.BertConfig.from_json_file(bert_config_path)\n \n def create_model(self, input_ids, input_mask, pinyin_ids, stroke_ids, lmask, labels, py_labels, is_training):\n\n def lstm_op(sen_pyids, ZM_EMBS, ID2SEQ, flg):\n hidden_size = 768\n seq_len = self.PYLEN if 'py' in flg else self.SKLEN\n sen_pyids = tf.reshape(sen_pyids, shape=[-1])\n sen_seq = tf.nn.embedding_lookup(ID2SEQ, sen_pyids, name=\"lookup_pyid2seq\")\n sen_seq_emb = tf.nn.embedding_lookup(ZM_EMBS, sen_seq, name=\"lookup_pyemb\")\n sen_seq_emb = tf.reshape(sen_seq_emb, shape=[-1, seq_len, self.PYDIM]) \n\n with tf.variable_scope('GRU', reuse=tf.AUTO_REUSE):\n cell = tf.contrib.rnn.GRUCell(num_units=hidden_size)\n all_out, final_out = tf.nn.dynamic_rnn(cell, sen_seq_emb, dtype=tf.float32)\n lstm_output = tf.reshape(final_out, shape=[-1, self.MAX_SEN_LEN, hidden_size])\n return lstm_output\n\n if 'py' in self.py_or_sk: \n with tf.variable_scope('py_emb', reuse=tf.AUTO_REUSE):\n zimu_emb = tf.get_variable('zimu_emb', [30, self.PYDIM], initializer=tf.truncated_normal_initializer(stddev=0.02))\n id2seq = tf.get_variable(\"pyid2seq_matrix\", initializer=self.pyid2seq, trainable=False)\n py_embs = lstm_op(pinyin_ids, zimu_emb, id2seq, 'py')\n elif 'sk' in self.py_or_sk:\n with tf.variable_scope('py_emb', reuse=tf.AUTO_REUSE):\n zimu_emb = tf.get_variable('zimu_emb', [1600, self.PYDIM], initializer=tf.truncated_normal_initializer(stddev=0.02))\n id2seq = tf.get_variable(\"pyid2seq_matrix\", initializer=self.skid2seq, trainable=False)\n py_embs = lstm_op(stroke_ids, zimu_emb, id2seq, 'sk')\n elif 'all' in self.py_or_sk:\n with tf.variable_scope('py_emb', reuse=tf.AUTO_REUSE):\n zimu_emb = tf.get_variable('zimu_emb', [30, self.PYDIM], initializer=tf.truncated_normal_initializer(stddev=0.02))\n pyid2seq = tf.get_variable(\"pyid2seq_matrix\", initializer=self.pyid2seq, trainable=False)\n py_embs = lstm_op(pinyin_ids, zimu_emb, pyid2seq, 'py')\n with tf.variable_scope('sk_emb', reuse=tf.AUTO_REUSE):\n zisk_emb = tf.get_variable('zisk_emb', [7, self.PYDIM], initializer=tf.truncated_normal_initializer(stddev=0.02))\n skid2seq = tf.get_variable(\"pyid2seq_matrix\", initializer=self.skid2seq, trainable=False)\n sk_embs = lstm_op(stroke_ids, zisk_emb, skid2seq, 'sk')\n py_embs = py_embs + sk_embs\n else:\n raise Exception('not supported py_or_sk:%s' % self.py_or_sk)\n \n\n with tf.variable_scope('bert', reuse=tf.AUTO_REUSE):\n model = modeling.BertModel(\n config=self.bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n pinyin_embs=py_embs, #phonic and shape embeddings\n use_one_hot_embeddings=False)\n\n output_seq = model.get_all_encoder_layers()[-1]\n hidden_size = output_seq[-1].shape[-1].value\n\n \n with tf.variable_scope(\"loss\", reuse=tf.AUTO_REUSE):\n output_weights = tf.get_variable(\n \"output_weights\", [self.num_class, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [self.num_class], initializer=tf.zeros_initializer())\n\n if self.multi_task is True:\n output_py_weights = tf.get_variable(\n \"output_py_weights\", [430, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n \n output_py_bias = tf.get_variable(\n \"output_py_bias\", [430], initializer=tf.zeros_initializer())\n\n\n output = tf.reshape(output_seq, [-1, hidden_size])\n labels = tf.squeeze(tf.reshape(labels, [-1, 1]))\n mask = tf.squeeze(tf.reshape(lmask, [-1, 1]))\n if is_training:\n output = tf.nn.dropout(output, keep_prob=self.keep_prob)\n\n # loss\n logits = tf.matmul(output, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=self.num_class, dtype=tf.float32)\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) * tf.cast(mask, tf.float32)\n loss = tf.reduce_sum(per_example_loss) / tf.reduce_sum(tf.cast(mask, tf.float32))\n\n \n # py_loss\n py_loss = tf.zeros(shape=[1,1])\n if self.multi_task is True:\n py_labels = tf.squeeze(tf.reshape(py_labels, [-1, 1]))\n py_logits = tf.matmul(output, output_py_weights, transpose_b=True)\n py_logits = tf.nn.bias_add(py_logits, output_py_bias)\n py_log_probs = tf.nn.log_softmax(py_logits, axis=-1)\n py_one_hot_labels = tf.one_hot(py_labels, depth=430, dtype=tf.float32)\n py_per_example_loss = -tf.reduce_sum(py_one_hot_labels * py_log_probs, axis=-1) * tf.cast(mask, tf.float32)\n py_loss = tf.reduce_sum(py_per_example_loss) / tf.reduce_sum(tf.cast(mask, tf.float32))\n loss = loss + py_loss\n \n return (loss, probabilities, one_hot_labels, mask, py_loss)\n\n"
},
{
"alpha_fraction": 0.7673377990722656,
"alphanum_fraction": 0.7718120813369751,
"avg_line_length": 36.25,
"blob_id": "7c92329f6297cd37e16a00fab7f456fd3eadc102",
"content_id": "cc7924a68e069a4d3f5ca0fedd06e94effac360c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 447,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 12,
"path": "/pre_train_src/gen_train_tfrecords.sh",
"repo_name": "kg-nlp/PLOME",
"src_encoding": "UTF-8",
"text": "#! /bin/bash\n\n#convert text to tfrecord\nvocab_file=\"./datas/vocab.txt\"\ntrain_corpus_file=\"./datas/pretrain_corpus_examples.txt\"\noutput_tf_record_file=\"./train.tf_record\"\npython3 data_processor_mask.py $vocab_file $train_corpus_file $output_tf_record_file\n\n#split tf_record_file to multiply files (for training on multiply gpus)\ntf_records_dir=./train_tfrecords\nmkdir $tf_records_dir\npython3 split_records.py $output_tf_record_file $tf_records_dir\n"
},
{
"alpha_fraction": 0.5932159423828125,
"alphanum_fraction": 0.600724995136261,
"avg_line_length": 45.15538024902344,
"blob_id": "69d7a8730576c8bb9872a585f7f0468eac9fa674",
"content_id": "e698cf5f9dcc00a13c907b183584d155c4102aa5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11586,
"license_type": "permissive",
"max_line_length": 288,
"num_lines": 251,
"path": "/finetune_src/train_eval_tagging.py",
"repo_name": "kg-nlp/PLOME",
"src_encoding": "UTF-8",
"text": "#-*-coding:utf8-*-\n\nimport sys, os\nimport numpy as np\nimport tensorflow as tf\nfrom bert_tagging import DataProcessor, BertTagging\nimport modeling\nimport optimization\nimport time\nfrom tagging_eval import score_f, score_f_sent, score_f_py\ntf.logging.set_verbosity(tf.logging.ERROR)\n\ndef evaluate(FLAGS, sess, model, data_processor, label_list=None):\n gpuid = FLAGS.gpuid\n max_sen_len = FLAGS.max_sen_len\n train_path = FLAGS.train_path\n test_file = FLAGS.test_path\n out_dir = FLAGS.output_dir\n batch_size = 50\n EPOCH = FLAGS.epoch\n learning_rate = FLAGS.learning_rate\n init_bert_dir = FLAGS.init_bert_path\n learning_rate = FLAGS.learning_rate\n vocab_file = '%s/vocab.txt' % init_bert_dir\n init_checkpoint = '%s/bert_model.ckpt' % init_bert_dir\n bert_config_path = '%s/bert_config.json'% init_bert_dir\n \n\n test_num = data_processor.num_examples\n test_data = data_processor.build_data_generator(batch_size)\n iterator = test_data.make_one_shot_iterator()\n input_ids, input_mask, segment_ids, stroke_ids, lmask, label_ids, py_labels = iterator.get_next()\n\n (pred_loss, pred_probs, gold_probs, gold_mask, py_probs, py_one_hot_labels, fusion_prob) = \\\n model.create_model(input_ids, input_mask, segment_ids, stroke_ids, lmask, label_ids, py_labels, is_training=False)\n label_list = data_processor.label_list\n py_label_list = data_processor.py_label_list\n ans_c, ans_py, ans = [], [], []\n all_inputs, all_golds, all_preds = [], [], []\n all_py_golds, all_py_preds = [], []\n all_fusino_preds = []\n all_inputs_sent, all_golds_sent, all_preds_sent = [], [], []\n all_py_pred_sent, all_py_gold_sent, all_fusion_sent = [], [], []\n all_py_inputs, all_py_inputs_sent = [], []\n for step in range(test_num // batch_size):\n if model.multi_task is True: \n inputs, py_inputs, loss_value, preds, golds, gmask, py_pred, py_golds, fusion_pred = sess.run([input_ids, segment_ids, pred_loss, pred_probs, gold_probs, gold_mask, py_probs, py_one_hot_labels, fusion_prob])\n else:\n inputs, loss_value, preds, golds, gmask = sess.run([input_ids, pred_loss, pred_probs, gold_probs, gold_mask])\n\n preds = np.reshape(preds, (batch_size, max_sen_len, len(label_list))) \n preds = np.argmax(preds, axis=2)\n golds = np.reshape(golds, (batch_size, max_sen_len, len(label_list))) \n golds = np.argmax(golds, axis=2)\n gmask = np.reshape(gmask, (batch_size, max_sen_len))\n if model.multi_task is True:\n py_pred = np.reshape(py_pred, (batch_size, max_sen_len, 430))\n py_pred = np.argmax(py_pred, axis=2)\n py_golds = np.reshape(py_golds, (batch_size, max_sen_len, 430))\n py_golds = np.argmax(py_golds, axis=2)\n fusion_pred = np.reshape(fusion_pred, (batch_size, max_sen_len, len(label_list)))\n fusion_pred = np.argmax(fusion_pred, axis=2)\n for k in range(batch_size):\n tmp1, tmp2, tmp3, tmps4, tmps5, tmps6, tmps7 = [], [], [], [], [], [], []\n for j in range(max_sen_len):\n if gmask[k][j] == 0: continue\n all_golds.append(golds[k][j])\n all_preds.append(preds[k][j])\n all_inputs.append(inputs[k][j])\n tmp1.append(label_list[golds[k][j]])\n tmp2.append(label_list[preds[k][j]])\n tmp3.append(label_list[inputs[k][j]])\n if model.multi_task is True:\n all_py_inputs.append(py_inputs[k][j])\n all_py_golds.append(py_golds[k][j])\n all_py_preds.append(py_pred[k][j])\n all_fusino_preds.append(fusion_pred[k][j])\n tmps4.append(str(py_golds[k][j]))\n tmps5.append(str(py_pred[k][j]))\n tmps6.append(label_list[fusion_pred[k][j]])\n tmps7.append(str(py_inputs[k][j]))\n \n \n all_golds_sent.append(tmp1)\n all_preds_sent.append(tmp2)\n all_inputs_sent.append(tmp3)\n if model.multi_task is True:\n all_py_pred_sent.append(tmps4)\n all_py_gold_sent.append(tmps5)\n all_fusion_sent.append(tmps6)\n all_py_inputs_sent.append(tmps7)\n \n\n all_golds = [label_list[k] for k in all_golds]\n all_preds = [label_list[k] for k in all_preds]\n all_inputs = [label_list[k] for k in all_inputs]\n if model.multi_task is True:\n all_fusino_preds = [label_list[k] for k in all_fusino_preds]\n all_py_inputs = [py_label_list.get(int(k), k) for k in all_py_inputs]\n all_py_golds = [py_label_list.get(int(k), k) for k in all_py_golds]\n all_py_preds = [py_label_list.get(int(k), k) for k in all_py_preds]\n \n if model.multi_task is True:\n print('pinyin result:')\n score_f_py((all_py_inputs, all_py_golds, all_py_preds), (all_inputs, all_golds, all_preds), out_dir, False)\n print('fusion result:')\n p, r, f = score_f((all_inputs, all_golds, all_fusino_preds))\n score_f_sent(all_inputs_sent, all_golds_sent, all_fusion_sent)\n else: \n print('zi result:') \n p, r, f = score_f((all_inputs, all_golds, all_preds), only_check=False)\n p_sent, r_sent, f_sent = score_f_sent(all_inputs_sent, all_golds_sent, all_preds_sent)\n \n del data_processor\n return f\n\n\ndef train(FLAGS):\n rand_type_emb = False\n\n gpuid = FLAGS.gpuid\n max_sen_len = FLAGS.max_sen_len\n train_path = FLAGS.train_path\n test_file = FLAGS.test_path\n out_dir = FLAGS.output_dir\n batch_size = FLAGS.batch_size\n EPOCH = FLAGS.epoch\n init_bert_dir = FLAGS.init_bert_path\n learning_rate = FLAGS.learning_rate\n sk_or_py = FLAGS.sk_or_py\n multi_task = True if FLAGS.multi_task > 0 else False\n py_dim = int(FLAGS.py_dim)\n vocab_file = '%s/vocab.txt' % init_bert_dir\n init_checkpoint = '%s/bert_model.ckpt' % init_bert_dir\n bert_config_path = '%s/bert_config.json'% init_bert_dir\n \n if os.path.exists(out_dir) is False:\n os.mkdir(out_dir)\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpuid\n keep_prob = FLAGS.keep_prob\n print('test_file=', test_file)\n test_data_processor = DataProcessor(test_file, max_sen_len, vocab_file, out_dir, label_list=None, is_training=False)\n print('train_file=', train_path)\n data_processor = DataProcessor(train_path, max_sen_len, vocab_file, out_dir, label_list=None, is_training=True)\n\n zi_py_matrix = data_processor.get_zi_py_matrix()\n train_num = data_processor.num_examples\n train_data = data_processor.build_data_generator(batch_size)\n iterator = train_data.make_one_shot_iterator()\n input_ids, input_mask, segment_ids, stroke_ids, lmask, label_ids, py_labels = iterator.get_next()\n\n model = BertTagging(bert_config_path, num_class=len(data_processor.get_label_list()), pyid2seq=data_processor.PYID2SEQ, skid2seq=data_processor.SKID2SEQ, py_dim=py_dim, max_sen_len=max_sen_len, py_or_sk=sk_or_py, keep_prob=keep_prob, zi_py_matrix=zi_py_matrix, multi_task=multi_task)\n (loss, probs, golds, _, py_probs, py_one_hot_labels, fusion_prob) = model.create_model(input_ids, input_mask, segment_ids, stroke_ids, lmask, label_ids, py_labels, is_training=True)\n\n tf_config = tf.ConfigProto(log_device_placement=False)\n tf_config.gpu_options.allow_growth = True\n with tf.Session(config=tf_config) as sess:\n if init_checkpoint is not None:\n tvars = tf.trainable_variables()\n (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n keys = [x for x in assignment_map.keys()]\n for key in keys:\n if (rand_type_emb is True) and ('token_type_embeddings' in key):\n del assignment_map[key]\n continue\n print(key, '\\t', assignment_map[key])\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n\n num_steps = train_num // batch_size * EPOCH\n num_warmup_steps = num_steps // 10\n train_op = optimization.create_optimizer(loss, learning_rate, num_steps, num_warmup_steps, use_tpu=False)\n\n init = tf.global_variables_initializer()\n sess.run(init)\n \n loss_values = []\n saver = tf.train.Saver()\n best_score = 0.0\n best_model_path = os.path.join(out_dir, 'best.ckpt')\n total_step = 0\n for epoch in range(EPOCH):\n for step in range(int(train_num / batch_size)):\n total_step += 1\n start_time = time.time()\n train_loss, _ = sess.run([loss, train_op]) \n loss_values.append(train_loss)\n if step % 500 == 0:\n duration = time.time() - start_time\n examples_per_sec = float(duration) / batch_size\n format_str = ('Epoch {} step {}, train loss = {:.4f},{:.4f},{:.4f} ( {:.4f} examples/sec; {:.4f} ''sec/batch)')\n print (format_str.format(epoch, step, np.mean(loss_values),np.mean(loss_values[-1000:]),np.mean(loss_values[-100:]), examples_per_sec, duration))\n loss_values = loss_values[-1000:]\n\n f1 = evaluate(FLAGS, sess, model, test_data_processor)\n if f1 > best_score:\n saver.save(sess, best_model_path)\n best_score = f1\n sys.stdout.flush()\n f1 = evaluate(FLAGS, sess, model, test_data_processor)\n if f1 > best_score:\n saver.save(sess, best_model_path)\n best_score = f1\n sys.stdout.flush()\n print ('best f value:', best_score)\n \n \nif __name__ == '__main__':\n\n flags = tf.flags\n ## Required parameters\n flags.DEFINE_string(\"gpuid\", '0', \"The gpu NO. \")\n\n ## Optional\n flags.DEFINE_string(\"train_path\", '', \"train path \")\n flags.DEFINE_string(\"test_path\", '', \"test path \")\n flags.DEFINE_string(\"output_dir\", '', \"out dir \")\n flags.DEFINE_string(\"init_bert_path\", '', \"out dir \")\n flags.DEFINE_string(\"sk_or_py\", 'py', \"sk_or_py\")\n flags.DEFINE_string(\"label_list\", '', 'max_sen_len')\n flags.DEFINE_integer(\"max_sen_len\", 64, 'max_sen_len')\n flags.DEFINE_integer(\"batch_size\", 32, 'batch_size')\n flags.DEFINE_integer(\"single_text\", '0', 'single_text')\n flags.DEFINE_integer(\"epoch\", 2, 'batch_size')\n flags.DEFINE_float(\"learning_rate\", 5e-5, 'filter_punc')\n flags.DEFINE_float(\"keep_prob\", 0.9, 'keep prob in dropout')\n flags.DEFINE_integer(\"py_dim\", 32, 'keep prob in dropout')\n flags.DEFINE_integer(\"multi_task\", 0, 'keep prob in dropout')\n\n\n flags.mark_flag_as_required('gpuid')\n flags.mark_flag_as_required('train_path')\n flags.mark_flag_as_required('test_path')\n flags.mark_flag_as_required('init_bert_path')\n flags.mark_flag_as_required('output_dir')\n flags.mark_flag_as_required('label_list')\n flags.mark_flag_as_required('max_sen_len')\n\n FLAGS = flags.FLAGS\n print ('Confings:')\n print ('\\tlearning_rate=', FLAGS.learning_rate)\n print ('\\ttrain_path=', FLAGS.train_path)\n print ('\\ttest_path=', FLAGS.test_path)\n print ('\\tinit_bert_path=', FLAGS.init_bert_path)\n print ('\\toutput_dir=', FLAGS.output_dir)\n print ('\\tmax_sen_len=', FLAGS.max_sen_len)\n print ('\\tpy_dim=', FLAGS.py_dim)\n print ('\\tmulti_task=', FLAGS.multi_task)\n print ('\\tsk_or_py=', FLAGS.sk_or_py)\n train(FLAGS)\n\n"
},
{
"alpha_fraction": 0.5034482479095459,
"alphanum_fraction": 0.5257799625396729,
"avg_line_length": 38.290321350097656,
"blob_id": "29659f757e45eb3df0a4b04d4df00244c4991124",
"content_id": "45241f1f9ee8b5a74dc3a37479f7af26e5aa3a88",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6090,
"license_type": "permissive",
"max_line_length": 149,
"num_lines": 155,
"path": "/finetune_src/tagging_eval.py",
"repo_name": "kg-nlp/PLOME",
"src_encoding": "UTF-8",
"text": "#-*-coding:utf8-*-\nimport sys, os\nimport numpy as np\nimport tensorflow as tf\nimport modeling\nimport optimization\nimport time\n\nos.environ[\"PYTHONIOENCODING\"] = \"utf-8\"\ntf.logging.set_verbosity(tf.logging.ERROR)\n\ndef score_f(ans, print_flg=False, only_check=False, out_dir=''):\n fout = open('%s/pred.txt' % out_dir, 'w', encoding=\"utf-8\")\n total_gold_err, total_pred_err, right_pred_err = 0, 0, 0\n check_right_pred_err = 0\n inputs, golds, preds = ans\n assert len(inputs) == len(golds)\n assert len(golds) == len(preds)\n for ori, god, prd in zip(inputs, golds, preds):\n ori_txt = str(ori)\n god_txt = str(god) #''.join(list(map(str, god)))\n prd_txt = str(prd) #''.join(list(map(str, prd)))\n if print_flg is True:\n print(ori_txt, '\\t', god_txt, '\\t', prd_txt)\n if 'UNK' in ori_txt:\n continue\n if ori_txt == god_txt and ori_txt == prd_txt:\n continue\n if prd_txt != god_txt:\n fout.writelines('%s\\t%s\\t%s\\n' % (ori_txt, god_txt, prd_txt)) \n if ori != god:\n total_gold_err += 1\n if prd != ori:\n total_pred_err += 1\n if (ori != god) and (prd != ori):\n check_right_pred_err += 1\n if god == prd:\n right_pred_err += 1\n fout.close()\n\n #check p, r, f\n p = 1. * check_right_pred_err / (total_pred_err + 0.001)\n r = 1. * check_right_pred_err / (total_gold_err + 0.001)\n f = 2 * p * r / (p + r + 1e-13)\n print('token check: p=%.3f, r=%.3f, f=%.3f' % (p, r, f))\n if only_check is True:\n return p, r, f\n\n #correction p, r, f\n #p = 1. * right_pred_err / (total_pred_err + 0.001)\n pc = 1. * right_pred_err / (check_right_pred_err + 0.001)\n rc = 1. * right_pred_err / (total_gold_err + 0.001)\n fc = 2 * pc * rc / (pc + rc + 1e-13) \n print('token correction: p=%.3f, r=%.3f, f=%.3f' % (pc, rc, fc))\n return p, r, f\n\n\ndef score_f_py(ans_py, ans_zi, out_dir, print_flg=False, only_check=False):\n fout = open('%s/pred_py.txt' % out_dir, 'w', encoding=\"utf-8\")\n total_gold_err, total_pred_err, right_pred_err = 0, 0, 0\n check_right_pred_err = 0\n inputs, golds, preds = ans_py\n inputs_z, golds_z, preds_z = ans_zi\n assert len(inputs) == len(golds)\n assert len(golds) == len(preds)\n assert len(inputs_z) == len(golds_z)\n\n index = -1\n total_len = len(inputs_z)\n for ori, god, prd in zip(inputs_z, golds_z, preds_z):\n index += 1\n ori_txt = str(ori)\n god_txt = str(god) #''.join(list(map(str, god)))\n prd_txt = str(prd) #''.join(list(map(str, prd)))\n if print_flg is True:\n print(ori_txt, '\\t', god_txt, '\\t', prd_txt)\n if 'UNK' in ori_txt:\n continue\n ori_py, god_py, prd_py = str(inputs[index]), str(golds[index]), str(preds[index])\n if (ori_txt == god_txt and ori_txt == prd_txt and prd_py == ori_py):\n continue\n if (god_txt != prd_txt) or (prd_py != ori_py):\n start_idx = index - 5\n if start_idx < 0: start_idx = 0\n end_idx = index + 5\n if end_idx > total_len: end_idx = total_len\n for _idx in range(start_idx, end_idx, 1):\n fout.writelines('%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n' % (inputs_z[_idx], golds_z[_idx], preds_z[_idx], inputs[_idx], golds[_idx], preds[_idx])) \n fout.writelines('\\n')\n if ori != god:\n total_gold_err += 1\n if (prd != ori) or (prd_py != ori_py):\n total_pred_err += 1\n \n if (ori != god) and ((prd != ori) or (prd_py != ori_py)):\n check_right_pred_err += 1\n if god_py == prd_py:\n right_pred_err += 1\n fout.close()\n\n #check p, r, f\n p = 1. * check_right_pred_err / (total_pred_err + 0.001)\n r = 1. * check_right_pred_err / (total_gold_err + 0.001)\n f = 2 * p * r / (p + r + 1e-13)\n print('token check: p=%.3f, r=%.3f, f=%.3f' % (p, r, f))\n if only_check is True:\n return p, r, f\n\n #correction p, r, f\n #p = 1. * right_pred_err / (total_pred_err + 0.001)\n pc = 1. * right_pred_err / (check_right_pred_err + 0.001)\n rc = 1. * right_pred_err / (total_gold_err + 0.001)\n fc = 2 * pc * rc / (pc + rc + 1e-13) \n print('token correction: p=%.3f, r=%.3f, f=%.3f' % (pc, rc, fc))\n return p, r, f\n\n\n\n\ndef score_f_sent(inputs, golds, preds):\n assert len(inputs) == len(golds)\n assert len(golds) == len(preds)\n total_gold_err, total_pred_err, right_pred_err = 0, 0, 0\n check_right_pred_err = 0\n fout = open('sent_pred_result.txt', 'w', encoding='utf-8')\n for ori_tags, god_tags, prd_tags in zip(inputs, golds, preds):\n assert len(ori_tags) == len(god_tags)\n assert len(god_tags) == len(prd_tags)\n gold_errs = [idx for (idx, tk) in enumerate(god_tags) if tk != ori_tags[idx]]\n pred_errs = [idx for (idx, tk) in enumerate(prd_tags) if tk != ori_tags[idx]]\n if len(gold_errs) > 0 or len(pred_errs) > 0:\n fout.writelines('\\n%s\\n%s\\n%s\\n' % ('|'.join(ori_tags), '|'.join(god_tags),'|'.join(prd_tags)))\n if len(gold_errs) > 0:\n total_gold_err += 1\n fout.writelines('gold_err\\n')\n if len(pred_errs) > 0:\n fout.writelines('check_err\\n')\n total_pred_err += 1\n if gold_errs == pred_errs:\n check_right_pred_err += 1\n fout.writelines('check_right\\n')\n if god_tags == prd_tags:\n right_pred_err += 1\n fout.writelines('correct_right\\n')\n fout.close()\n p = 1. * check_right_pred_err / total_pred_err\n r = 1. * check_right_pred_err / total_gold_err\n f = 2 * p * r / (p + r + 1e-13)\n #print(total_gold_err, total_pred_err, right_pred_err, check_right_pred_err)\n print('sent check: p=%.3f, r=%.3f, f=%.3f' % (p, r, f))\n p = 1. * right_pred_err / total_pred_err\n r = 1. * right_pred_err / total_gold_err\n f = 2 * p * r / (p + r + 1e-13)\n print('sent correction: p=%.3f, r=%.3f, f=%.3f' % (p, r, f))\n return p, r, f\n"
},
{
"alpha_fraction": 0.8014705777168274,
"alphanum_fraction": 0.8161764740943909,
"avg_line_length": 135,
"blob_id": "815161c2872e0542865698f5b20cbbfe501d571b",
"content_id": "90ad251c3275aa725c5c9135909aa7ed737eb7a6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 136,
"license_type": "permissive",
"max_line_length": 135,
"num_lines": 1,
"path": "/pre_train_src/readme.txt",
"repo_name": "kg-nlp/PLOME",
"src_encoding": "UTF-8",
"text": "Here we provide 10 pretraining samples to illustrate the data format, the whole corups: https://github.com/suzhoushr/nlp_chinese_corpus\n"
},
{
"alpha_fraction": 0.4314960539340973,
"alphanum_fraction": 0.47454068064689636,
"avg_line_length": 34.90565872192383,
"blob_id": "3100b99f0019bb92ca830d173339e03e75f8e6a9",
"content_id": "5631ae7733538814e40637219d84beff73f5cee3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1905,
"license_type": "permissive",
"max_line_length": 232,
"num_lines": 53,
"path": "/finetune_src/pinyin_tool.py",
"repo_name": "kg-nlp/PLOME",
"src_encoding": "UTF-8",
"text": "#encoding:utf8\nimport sys\nimport numpy as np\n\nclass PinyinTool:\n def __init__(self, py_dict_path, py_vocab_path, py_or_sk='py'):\n self.zi_pinyin = self._load_pydict(py_dict_path)\n self.vocab = self._load_pyvocab(py_vocab_path)\n if 'py' in py_or_sk:\n self.ZM2ID = {':':1, 'a':2, 'c':3, 'b':4, 'e':5, 'd':6, 'g':7, 'f':8, 'i':9, 'h':10, 'k':11, 'j':12, 'm':13, 'l':14, 'o':15, 'n':16, 'q':17, 'p':18, 's':19, 'r':20, 'u':21, 't':22, 'w':23, 'v':24, 'y':25, 'x':26, 'z':27}\n self.PYLEN = 4\n else:\n self.ZM2ID = {'1': 1, '2':2, '3':3, '4':4, '5':5}\n self.PYLEN = 10\n\n def _load_pydict(self, fpath):\n ans = {}\n for line in open(fpath, encoding='utf-8'):\n line = line.strip()#.decode('utf8')\n tmps = line.split('\\t')\n if len(tmps) != 2: continue\n ans[tmps[0]] = tmps[1]\n return ans\n\n\n def _load_pyvocab(self, fpath):\n ans = {'PAD': 0, 'UNK': 1}\n idx = 2\n for line in open(fpath, encoding='utf-8'):\n line = line.strip()#.decode('utf8')\n if len(line) < 1: continue\n ans[line] = idx\n idx += 1\n return ans\n\n def get_pinyin_id(self, zi_unicode):\n py = self.zi_pinyin.get(zi_unicode, None)\n if py is None:\n return self.vocab['UNK']\n return self.vocab.get(py, self.vocab['UNK'])\n \n def get_pyid2seq_matrix(self):\n ans = [[0] * self.PYLEN, [0] * self.PYLEN] #PAD, UNK\n rpyvcab = {v: k for k, v in self.vocab.items()}\n for k in range(2, len(rpyvcab), 1):\n pystr = rpyvcab[k]\n seq = []\n for c in pystr:\n seq.append(self.ZM2ID[c])\n seq = [0] * self.PYLEN + seq\n seq = seq[-self.PYLEN:]\n ans.append(seq)\n return np.asarray(ans, dtype=np.int32) \n\n"
},
{
"alpha_fraction": 0.5367749929428101,
"alphanum_fraction": 0.5459492802619934,
"avg_line_length": 40.21794891357422,
"blob_id": "bf59f9e9efb8771cc6e48169221a8f21ac986086",
"content_id": "2ac5709411a496dc7eb68e573f35d250ad5a9d0d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6431,
"license_type": "permissive",
"max_line_length": 183,
"num_lines": 156,
"path": "/pre_train_src/mask.py",
"repo_name": "kg-nlp/PLOME",
"src_encoding": "UTF-8",
"text": "#*-*encoding:utf8*-*\nimport random\nimport copy\nimport time\nimport numpy as np\nimport tokenization\n\nclass ConfusionSet:\n def __init__(self, tokenizer, in_file):\n self.tokenizer = tokenizer\n self.confusion = self._load_confusion(in_file)\n\n def _str2idstr(self, string):\n ids = [self.tokenizer.vocab.get(x, -1) for x in string]\n if min(ids) < 0:\n return None\n ids = ' '.join(map(str, ids))\n return ids\n \n def _load_confusion(self, in_file):\n pass\n\n def get_confusion_item_by_ids(self, token_id):\n confu = self.confusion.get(token_id, None)\n if confu is None:\n return None\n return confu[random.randint(0,len(confu) - 1)]\n\n def get_confusion_item_by_unicode(self, key_unicode):\n if len(key_unicode) == 1:\n keyid = self.tokenizer.vocab.get(key_unicode, None)\n else:\n keyid = self._str2idstr(key_unicode)\n if keyid is None:\n return None\n confu = self.confusion.get(keyid, None)\n if confu is None:\n return None\n return confu[random.randint(0, len(confu) - 1)]\n\n \nclass PinyinConfusionSet(ConfusionSet):\n def _load_confusion(self, in_file):\n confusion_datas = {}\n for line in open(in_file, encoding='utf-8'):\n line = line.strip()#.decode('utf-8')\n tmps = line.split('\\t')\n if len(tmps) != 2:\n continue\n key = tmps[0]\n values = tmps[1].split()\n if len(key) != 1:\n continue\n all_ids = set()\n keyid = self.tokenizer.vocab.get(key, None)\n if keyid is None:\n continue\n for k in values:\n if self.tokenizer.vocab.get(k, None) is not None:\n all_ids.add(self.tokenizer.vocab[k])\n all_ids = list(all_ids)\n if len(all_ids) > 0:\n confusion_datas[keyid] = all_ids\n return confusion_datas\n\nclass StrokeConfusionSet(ConfusionSet):\n def _load_confusion(self, in_file):\n confusion_datas = {}\n for line in open(in_file, encoding='utf-8'):\n line = line.strip()#.decode('utf-8')\n tmps = line.split(',')\n if len(tmps) < 2:\n continue\n values = tmps\n all_ids = set()\n for k in values:\n if k in self.tokenizer.vocab:\n all_ids.add(self.tokenizer.vocab[k])\n all_ids = list(all_ids)\n for k in all_ids:\n confusion_datas[k] = all_ids\n return confusion_datas\n\n\n\nclass Mask(object):\n def __init__(self, same_py_confusion, simi_py_confusion, sk_confusion, tokenid2pyid, tokenid2skid):\n self.same_py_confusion = same_py_confusion\n self.simi_py_confusion = simi_py_confusion\n self.sk_confusion = sk_confusion\n self.tokenid_pyid = tokenid2pyid\n self.tokenid_skid = tokenid2skid\n self.config = {'same_py': 0.3, 'simi_py': 0.3, 'stroke': 0.15, 'random': 0.1, 'keep': 0.15, 'global_rate': 0.15}\n self.same_py_thr = self.config['same_py'] \n self.simi_py_thr = self.config['same_py'] + self.config['simi_py']\n self.stroke_thr = self.config['same_py'] + self.config['simi_py'] + self.config['stroke']\n self.random_thr = self.config['same_py'] + self.config['simi_py'] + self.config['stroke'] + self.config['random']\n self.keep_thr = self.config['same_py'] + self.config['simi_py'] + self.config['stroke'] + self.config['random'] + self.config['keep']\n self.invalid_ids = set([self.same_py_confusion.tokenizer.vocab.get('UNK'),\n self.same_py_confusion.tokenizer.vocab.get('[CLS]'),\n self.same_py_confusion.tokenizer.vocab.get('[SEP]'),\n self.same_py_confusion.tokenizer.vocab.get('[UNK]')])\n\n self.all_token_ids = [int(x) for x in self.same_py_confusion.tokenizer.vocab.values()]\n self.n_all_token_ids = len(self.all_token_ids) - 1\n\n def get_mask_method(self):\n prob = random.random()\n if prob <= self.same_py_thr:\n return 'pinyin'\n elif prob <= self.simi_py_thr:\n return 'jinyin'\n elif prob <= self.stroke_thr:\n return 'stroke'\n elif prob <= self.random_thr:\n return 'random'\n elif prob <= self.keep_thr:\n return 'keep'\n return 'pinyin'\n\n def mask_process(self, input_sample):\n valid_ids = [idx for (idx, v) in enumerate(input_sample) if v not in self.invalid_ids]\n masked_sample = copy.deepcopy(list(input_sample))\n seq_len = len(masked_sample)\n masked_flgs = [0] * seq_len\n n_masked = int(len(valid_ids) * self.config['global_rate'])\n if n_masked < 1:\n n_masked = 1\n random.shuffle(valid_ids)\n for pos in valid_ids[:n_masked]:\n method = self.get_mask_method()\n if method == 'pinyin':\n new_c = self.same_py_confusion.get_confusion_item_by_ids(input_sample[pos])\n if new_c is not None:\n masked_sample[pos] = new_c\n masked_flgs[pos] = 1\n elif method == 'jinyin':\n new_c = self.simi_py_confusion.get_confusion_item_by_ids(input_sample[pos])\n if new_c is not None:\n masked_sample[pos] = new_c\n masked_flgs[pos] = 1\n elif method == 'stroke':\n new_c = self.sk_confusion.get_confusion_item_by_ids(input_sample[pos]) \n if new_c is not None:\n masked_sample[pos] = new_c\n masked_flgs[pos] = 1\n elif method == 'random':\n new_c = self.all_token_ids[random.randint(0, self.n_all_token_ids)]\n if new_c is not None:\n masked_sample[pos] = new_c\n masked_flgs[pos] = 1\n elif method == 'keep': \n masked_flgs[pos] = 1\n masked_py_ids = [self.tokenid_pyid.get(x, 1) for x in masked_sample] \n masked_sk_ids = [self.tokenid_skid.get(x, 1) for x in masked_sample] \n return np.asarray(masked_sample, dtype=np.int32), np.asarray(masked_flgs, dtype=np.int32), np.asarray(masked_py_ids, dtype=np.int32), np.asarray(masked_sk_ids, dtype=np.int32)\n\n"
},
{
"alpha_fraction": 0.6952022314071655,
"alphanum_fraction": 0.737535297870636,
"avg_line_length": 31.212121963500977,
"blob_id": "1b0f13b291c0b380195473d1a16746ffddc11536",
"content_id": "bff7aad7e3b1712a20de51064990a20da1278eb6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1077,
"license_type": "permissive",
"max_line_length": 193,
"num_lines": 33,
"path": "/README.md",
"repo_name": "kg-nlp/PLOME",
"src_encoding": "UTF-8",
"text": "# PLOME:Pre-training with Misspelled Knowledge for Chinese Spelling Correction (ACL2021)\nThis repository provides the code and data of the work in ACL2021: *PLOME: Pre-training with Misspelled Knowledge for Chinese Spelling Correction* https://aclanthology.org/2021.acl-long.233.pdf\n\n**Requirements:**\n\n- python3\n\n- tensorflow1.14\n\n- horovod\n\n**Instructions:**\n\n- Finetune: \n\n train and evaluation file format: original sentence \\t golden sentence \n ```bash\n step1: cd finetune_src ; \n step2: download the pretrained PLOME model and corpus from https://drive.google.com/file/d/1aip_siFdXynxMz6-2iopWvJqr5jtUu3F/view?usp=sharing ;\n step3: sh start.sh\n ```\n \n - Pre-train\n ```bash\n step1: cd pre_train_src ;\n step2: sh gen_train_tfrecords.sh ;\n step3: sh start.sh\n ```\n Our pre-trained model: https://drive.google.com/file/d/1aip_siFdXynxMz6-2iopWvJqr5jtUu3F/view?usp=sharing\n \n pre-trained cBERT model: https://drive.google.com/file/d/1cqSTpn7r9pnDcvMoM3BbX1X67JsPdZ8_/view?usp=sharing\n \n 国内下载地址:https://share.weiyun.com/OREEY0H3\n"
},
{
"alpha_fraction": 0.7050691246986389,
"alphanum_fraction": 0.731566846370697,
"avg_line_length": 40.238094329833984,
"blob_id": "f1f38b44615b1f1c1d2c65ff35a367f6c86c0f11",
"content_id": "7ec7eadfb0f015b62d35984eeed701ea4a231834",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 868,
"license_type": "permissive",
"max_line_length": 435,
"num_lines": 21,
"path": "/pre_train_src/start.sh",
"repo_name": "kg-nlp/PLOME",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\nappname=pretrain_plome\npy_dim=32\nmulti_task=0\ngpuid_list=\"0,1,2,3,4,5\"\nexport logdir='./'\ntrain_tfrecord_path=\"./train_tfrecords\"\noutput_dir=\"./${appname}_output\"\nkeep_prob=0.9\nmax_sen_len=512\ninit_checkpoint=\"./google_bert\" #\"0 for random initialization\"\nvocab_file=\"./datas/vocab.txt\"\nbert_config_file=\"./datas/bert_config.json\"\nbatch_size=8\nepoch=10\nlearning_rate=5e-5 \n\nmkdir $output_dir\nmkdir $train_tfrecord_path\n\nhorovodrun -np 6 -H localhost:6 python3 ./train_masklm.py --vocab_file $vocab_file --bert_config_path $bert_config_file --gpuid_list $gpuid_list --train_path $train_path --output_dir $output_dir --max_sen_len $max_sen_len --batch_size $batch_size --learning_rate $learning_rate --epoch $epoch --keep_prob $keep_prob --py_dim $py_dim --train_tfrecord_path $train_tfrecord_path --init_checkpoint $init_checkpoint --multi_task $multi_task\n\n\n"
}
] | 14 |
japroc/investmint_ticker_parser | https://github.com/japroc/investmint_ticker_parser | 02d2596d8ffafe3f34828cc80e71372b80f17131 | aaeafcb1132bd01a875a8a83f6b0f4db907e8c3e | 956316a9cc9800e07b61b5e8363b11774b80e346 | refs/heads/master | 2021-05-18T21:43:38.609547 | 2020-10-04T20:43:41 | 2020-10-04T20:43:41 | 251,436,593 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5860306620597839,
"alphanum_fraction": 0.5917092561721802,
"avg_line_length": 25.28358268737793,
"blob_id": "c4ac6a41df653fb4f05f550b29fc17a59d4a91bc",
"content_id": "e72b6b53a4ecfed32cf2efe43086a4df5c584085",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1761,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 67,
"path": "/src/main.py",
"repo_name": "japroc/investmint_ticker_parser",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport traceback\n\nfrom flask import Flask\nfrom flask import jsonify\n\nfrom modules.investing_stock import get_ticker_info\nfrom modules.investmint import parse_ticker\nfrom modules.smartlab_bonds import parse_coupon_by_isin\n\n\napp = Flask(__name__)\n\n\[email protected]('/investing/<ticker>')\ndef get_investing_ticker(ticker):\n try:\n ticker_info = get_ticker_info(ticker)\n if ticker_info:\n resp = {\"success\": True, \"result\": ticker_info}\n else:\n resp = {\"success\": False, \"error\": \"Ticker Not Found\"}\n except Exception as e:\n traceback.print_exc()\n resp = {\"success\": False, \"error\": \"{}\".format(e)}\n finally:\n return jsonify(resp)\n\n\[email protected]('/investmint/<ticker>')\ndef parse_investmint_ticker(ticker):\n try:\n ticker_info = parse_ticker(ticker)\n if ticker_info:\n resp = {\"success\": True, \"result\": ticker_info}\n else:\n resp = {\"success\": False, \"error\": \"Ticker Not Found\"}\n except Exception as e:\n traceback.print_exc()\n resp = {\"success\": False, \"error\": \"{}\".format(e)}\n finally:\n return jsonify(resp)\n\n\[email protected]('/smartlab/coupon/<isin>')\ndef parse_smartlab_coupon(isin):\n try:\n coupon_info = parse_coupon_by_isin(isin)\n if coupon_info:\n resp = {\"success\": True, \"result\": coupon_info}\n else:\n resp = {\"success\": False, \"error\": \"ISIN Not Found\"}\n except Exception as e:\n traceback.print_exc()\n resp = {\"success\": False, \"error\": \"{}\".format(e)}\n finally:\n return jsonify(resp)\n\n\[email protected]('/ping')\ndef ping():\n return \"pong\"\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=8000)\n"
},
{
"alpha_fraction": 0.5499813556671143,
"alphanum_fraction": 0.5601938366889954,
"avg_line_length": 35.85439682006836,
"blob_id": "6dfb78030cdc8ee394c530c4679e1c876ec9e2d2",
"content_id": "8e352e7d8f0556046f21b0fef0814fd20b79db0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13622,
"license_type": "no_license",
"max_line_length": 429,
"num_lines": 364,
"path": "/src/modules/investmint.py",
"repo_name": "japroc/investmint_ticker_parser",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport calendar\nimport datetime\nimport json\nimport re\nimport requests\nimport html\n\n\nclass Currency:\n RUB = \"RUB\"\n USD = \"USD\"\n\n\ndef parse_float(value):\n val = value.strip().replace(\",\", \".\").replace(\" \", \"\").replace(\"\\\\xa0\", \"\").replace(\"\\xa0\", \"\")\n return float(val) if val else None\n\ndef parse_currency(currency_):\n if not currency_:\n return None\n currency = currency_.strip()\n currencies = {\n \"₽\": Currency.RUB,\n \"$\": Currency.USD,\n }\n currency_ords = {\n 8381: Currency.RUB,\n 36: Currency.USD,\n }\n res = currencies.get(currency)\n if not res:\n res = currency_ords.get(ord(currency), currency)\n return res\n\ndef parse_date(day_and_month_, year_):\n day_and_month = day_and_month_.strip().split(\" \")\n day = int(day_and_month[0])\n month = parse_month(day_and_month[1])\n year = int(year_)\n date = Date(day, month, year)\n return date\n\ndef parse_month(month):\n monthes = {\n \"янв\": 1,\n \"января\": 1,\n \"фев\": 2,\n \"февраля\": 2,\n \"мар\": 3,\n \"марта\": 3,\n \"апр\": 4,\n \"апреля\": 4,\n \"мая\": 5,\n \"июн\": 6,\n \"июня\": 6,\n \"июл\": 7,\n \"июля\": 7,\n \"авг\": 8,\n \"августа\": 8,\n \"сен\": 9,\n \"сентября\": 9,\n \"окт\": 10,\n \"октября\": 10,\n \"ноя\": 11,\n \"ноября\": 11,\n \"дек\": 12,\n \"декабря\": 12,\n }\n return monthes.get(month)\n\n\n\ndef parse_divs_table(divs_table):\n future_divs = list()\n previous_divs = list()\n\n future_divs_regex = re.compile(r\"\"\"<tr class=\"(.*?)\">\\s?<td class=\"text-nowrap text-center\">.+?</td>\\s?<td class=\"text-nowrap\">(.*?) (\\d+)</td>\\s?<td class=\"text-nowrap\">(.*?) (\\d+)</td>\\s?<td class=\"text-nowrap text-right\">([\\d,]+) <small class=\"text-muted\">(.*?)</small></td>\\s?<td class=\"text-right\">([\\d,]+)<small class=\"text-muted\">%</small></td>\\s?<td>\\s?</td>\\s?</tr>\"\"\")\n previous_divs_regex = re.compile(r\"\"\"<tr class=\"(.*?)\">\\s?<td class=\"text-nowrap text-center\">\\s?</td>\\s?<td class=\"text-nowrap\">(.*?) (\\d+)</td>\\s?<td class=\"text-nowrap\">(.*?) (\\d+)</td>\\s?<td class=\"text-nowrap text-right\">([\\d,]+).*?<small class=\"text-muted\">(.*?)</small></td>\\s?<td class=\"text-right\">([\\d,]+).*?<small class=\"text-muted\">%</small></td>\\s?<td class=\"text-right\">([\\d,]+).*?<small class=\"text-muted\">\"\"\")\n\n lookup_for_future_divs = True\n\n prev_line_start_idx = divs_table.find(\"<tr\")\n while True:\n line_start_idx = divs_table.find(\"<tr\", prev_line_start_idx+1)\n if line_start_idx == -1:\n break\n\n line_end_idx = divs_table.find(\"</tr>\", line_start_idx)\n line = divs_table[line_start_idx:line_end_idx+5]\n\n if lookup_for_future_divs:\n m = re.search(future_divs_regex, line)\n if not m:\n lookup_for_future_divs = False\n\n if not lookup_for_future_divs:\n m = re.search(previous_divs_regex, line)\n if not m:\n break\n\n div_info = DivInfo()\n div_info.verified = \"green-bg\" in m.group(1) or \"gray-bg\" not in m.group(1)\n div_info.buy_till_date = parse_date(m.group(2), m.group(3))\n div_info.registry_close_date = parse_date(m.group(4), m.group(5))\n div_info.dividend = parse_float(m.group(6))\n div_info.currency = parse_currency(m.group(7))\n div_info.div_yield = parse_float(m.group(8))\n\n if lookup_for_future_divs:\n future_divs.append(div_info)\n else:\n div_info.close_price = parse_float(m.group(9))\n previous_divs.append(div_info)\n\n prev_line_start_idx = line_start_idx\n\n return future_divs, previous_divs\n\n\n# def parse_divs_table_v1(divs_table):\n# future_divs = list()\n# previous_divs = list()\n\n# future_divs_regex = re.compile(r\"\"\"<tr class=\"(.*?)\">.*?<td class=\"text-nowrap\">(.*?)<.*?>(.*?)</span></td><td class=\"text-nowrap\">(.*?)<span.*?>(.*?)</span></td><td class=\"text-nowrap text-right\">(.*?)(?: )?<small class=\"text-muted\">(.*?)</small></td><td class=\"text-right\">(.*?)<small class=\"text-muted\">%</small></td><td></td></tr>\"\"\")\n# previous_divs_regex = re.compile(r\"\"\"<tr class=\"(.*?)\">.*?<td class=\"text-nowrap\">(.*?)<.*?>(.*?)</span></td><td class=\"text-nowrap\">(.*?)<span.*?>(.*?)</span></td><td class=\"text-nowrap text-right\">(.*?)(?: )?<small class=\"text-muted\">(.*?)</small></td><td class=\"text-right\">(.*?)<small class=\"text-muted\">%</small></td><td class=\"text-right\">(.*?)(?: )?<\"\"\")\n\n# lookup_for_future_divs = True\n\n# prev_line_start_idx = divs_table.find(\"<tr\")\n# while True:\n# line_start_idx = divs_table.find(\"<tr\", prev_line_start_idx+1)\n# if line_start_idx == -1:\n# break\n\n# line_end_idx = divs_table.find(\"</tr>\", line_start_idx)\n# line = divs_table[line_start_idx:line_end_idx+5]\n\n# if lookup_for_future_divs:\n# m = re.search(future_divs_regex, line)\n# if not m:\n# lookup_for_future_divs = False\n\n# if not lookup_for_future_divs:\n# m = re.search(previous_divs_regex, line)\n# if not m:\n# break\n\n# div_info = DivInfo()\n# div_info.verified = \"green-bg\" in m.group(1) or \"gray-bg\" not in m.group(1)\n# div_info.buy_till_date = parse_date(m.group(2), m.group(3))\n# div_info.registry_close_date = parse_date(m.group(4), m.group(5))\n# div_info.dividend = parse_float(m.group(6))\n# div_info.currency = parse_currency(m.group(7))\n# div_info.div_yield = parse_float(m.group(8))\n\n# if lookup_for_future_divs:\n# future_divs.append(div_info)\n# else:\n# div_info.close_price = parse_float(m.group(9))\n# previous_divs.append(div_info)\n\n# prev_line_start_idx = line_start_idx\n\n# return future_divs, previous_divs\n\nclass Date:\n def __init__(self, day=None, month=None, year=None):\n self.day = day\n self.month = month\n self.year = year\n\n @property\n def timestamp(self):\n return calendar.timegm(self.date.timetuple())\n\n @property\n def date(self):\n return datetime.date(self.year, self.month, self.day)\n\n def json(self):\n return {\n \"day\": self.day,\n \"month\": self.month,\n \"year\": self.year,\n \"timestamp\": self.timestamp,\n }\n\n # def __repr__(self):\n # return json.dumps(self.json(), indent=4)\n\n\nclass DivInfo:\n def __init__(self):\n self.verified = None\n self.buy_till_date = None\n self.registry_close_date = None\n self.dividend = None\n self.currency = None\n self.div_yield = None\n self.close_price = None\n\n def json(self):\n buy_till_date = self.buy_till_date.json() if isinstance(self.buy_till_date, Date) else self.buy_till_date\n registry_close_date = self.registry_close_date.json() if isinstance(self.registry_close_date, Date) else self.registry_close_date\n return {\n \"verified\": self.verified,\n \"buy_till_date\": buy_till_date,\n \"registry_close_date\": registry_close_date,\n \"dividend\": self.dividend,\n \"currency\": self.currency,\n \"div_yield\": self.div_yield,\n \"close_price\": self.close_price,\n }\n\n # def __repr__(self):\n # return json.dumps(self.json(), indent=4)\n\n\nclass TickerInfo:\n def __init__(self):\n self.name = None\n self.sector = None\n self.isin = None\n self.price = None\n self.currency = None\n self.dividend = None\n self.div_yield = None\n self.buy_till_date = None\n self.ex_div_date = None\n self.registry_close_date = None\n self.div_pay_date = None\n self.future_divs = None\n self.previous_divs = None\n\n def eval_div_period(self, d2, d1):\n if not d2 or not d1:\n return None\n\n td = d2 - d1\n td_days = td.days\n\n if td_days > 50 and td_days < 130:\n return 3\n elif td_days > 150 and td_days < 220:\n return 6\n elif td_days > 300 and td_days < 410:\n return 12\n else:\n return None\n\n def json(self):\n buy_till_date = self.buy_till_date.json() if isinstance(self.buy_till_date, Date) else self.buy_till_date\n ex_div_date = self.ex_div_date.json() if isinstance(self.ex_div_date, Date) else self.ex_div_date\n registry_close_date = self.registry_close_date.json() if isinstance(self.registry_close_date, Date) else self.registry_close_date\n div_pay_date = self.div_pay_date.json() if isinstance(self.div_pay_date, Date) else self.div_pay_date\n future_divs = list(map(lambda x: x.json(), self.future_divs))\n previous_divs = list(map(lambda x: x.json(), self.previous_divs))\n future_div = future_divs[-1] if future_divs else None\n previous_div = previous_divs[0] if previous_divs else None\n if self.future_divs and self.previous_divs:\n next_date = self.future_divs[-1].registry_close_date.date\n prev_date = self.previous_divs[0].registry_close_date.date\n div_period = self.eval_div_period(next_date, prev_date)\n elif len(self.previous_divs) >= 2:\n date1 = self.previous_divs[0].registry_close_date.date\n date2 = self.previous_divs[1].registry_close_date.date\n div_period = self.eval_div_period(date1, date2)\n else:\n div_period = None\n\n if self.currency:\n currency = self.currency\n elif self.future_divs:\n currency = self.future_divs[-1].currency\n elif self.previous_divs:\n currency = self.previous_divs[0].currency\n elif self.isin and self.isin.startswith(\"RU\"):\n currency = Currency.RUB\n else:\n currency = Currency.USD\n return {\n \"name\": self.name,\n \"sector\": self.sector,\n \"isin\": self.isin,\n \"price\": self.price,\n \"currency\": currency,\n \"dividend\": self.dividend,\n \"div_yield\": self.div_yield,\n \"buy_till_date\": buy_till_date,\n \"ex_div_date\": ex_div_date,\n \"registry_close_date\": registry_close_date,\n \"div_pay_date\": div_pay_date,\n \"future_divs\": future_divs,\n \"previous_divs\": previous_divs,\n \"future_div\": future_div,\n \"previous_div\": previous_div,\n \"div_period\": div_period,\n }\n\n # def __repr__(self):\n # return json.dumps(self.json(), indent=4)\n\n\ndef parse_ticker(ticker):\n r = requests.get(\"https://investmint.ru/{}/\".format(ticker.lower()))\n text = r.text\n\n if r.status_code != 200:\n return None\n\n ticket_info = TickerInfo()\n\n m = re.search(r\"\"\"<div class=\"ml-3\">\\s?<h1 class=\"mb-2\">Дивиденды (.*?) \\d{4}</h1>\"\"\", text)\n if m:\n ticket_info.name = html.unescape(m.group(1))\n\n m = re.search(r\"\"\"<div class=\"smallcaps\">Сектор</div>\\s?<p>(.*?)</p>\"\"\", text)\n if m:\n ticket_info.sector = m.group(1)\n\n m = re.search(r\"\"\"<div class=\"smallcaps\">ISIN</div>\\s?<p>(.*?)</p>\"\"\", text)\n if m:\n ticket_info.isin = m.group(1)\n\n m = re.search(r\"\"\"<div class=\"smallcaps\">Курс акций</div>\\s?<div class=\"d-flex align-items-center text-nowrap\">\\s?<div class=\"num\\d* mr-2\">(.*?)(?:</div>|<small class=\"text-muted\">(.*?)</small></div>)\"\"\", text)\n if m:\n ticket_info.price = parse_float(m.group(1))\n ticket_info.currency = parse_currency(m.group(2))\n\n m = re.search(r\"\"\">\\s?<div class=\"smallcaps mb-1\">Дивиденд</div>\\s?<div class=\"d-flex align-items-center\">\\s?<div class=\"num\\d*\">([\\d,]*)\"\"\", text)\n m = re.search(r\"\"\">\\s?<div class=\"smallcaps mb-1\">Дивиденд</div>\\s?\"\"\", text)\n if m:\n ticket_info.dividend = parse_float(m.group(1))\n\n m = re.search(r\"\"\"<div class=\"smallcaps\">Доходность</div>\\s?<div class=\"num\\d*\">(.*?)<small class=\"text-muted\">%</small\"\"\", text)\n if m:\n ticket_info.div_yield = parse_float(m.group(1))\n\n m = re.search(r\"\"\"<div class=\"eventname smallcaps\">Купить до.*?</small>(.*?)<small class=\"text-muted\">(.*?)</small>\"\"\", text)\n if m:\n ticket_info.buy_till_date = parse_date(m.group(1), m.group(2))\n\n m = re.search(r\"\"\"<div class=\"eventname smallcaps\">Экс-дивидендная дата.*?</small>(.*?)<small class=\"text-muted\">(.*?)</small>\"\"\", text)\n if m:\n ticket_info.ex_div_date = parse_date(m.group(1), m.group(2))\n\n m = re.search(r\"\"\"<div class=\"eventname smallcaps\">Закрытие реестра.*?</small>(.*?)<small class=\"text-muted\">(.*?)</small>\"\"\", text)\n if m:\n ticket_info.registry_close_date = parse_date(m.group(1), m.group(2))\n\n m = re.search(r\"\"\"<div class=\"eventname smallcaps\">Дата выплаты.*?</small>(.*?)<small class=\"text-muted\">(.*?)</small>\"\"\", text)\n if m:\n ticket_info.div_pay_date = parse_date(m.group(1), m.group(2))\n\n divs_table_start_idx = text.find(\"\"\"<table class=\"table table-hover\">\"\"\")\n divs_table_end_idx = text.find(\"\"\"</table>\"\"\", divs_table_start_idx)\n divs_table = text[divs_table_start_idx:divs_table_end_idx]\n\n ticket_info.future_divs, ticket_info.previous_divs = parse_divs_table(divs_table)\n\n return ticket_info.json()\n"
},
{
"alpha_fraction": 0.5548478364944458,
"alphanum_fraction": 0.5601556897163391,
"avg_line_length": 34.32500076293945,
"blob_id": "098df57d32cbc50914af7c57ca392a0676371cf4",
"content_id": "64c73cb486810c33d1d2fa46c4f8fb12864e966d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5936,
"license_type": "no_license",
"max_line_length": 168,
"num_lines": 160,
"path": "/src/modules/smartlab_bonds.py",
"repo_name": "japroc/investmint_ticker_parser",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport calendar\nimport datetime\nimport re\nimport requests\n\n\nclass Date:\n def __init__(self, day=None, month=None, year=None):\n self.day = day\n self.month = month\n self.year = year\n\n @property\n def timestamp(self):\n return calendar.timegm(self.date.timetuple())\n\n @property\n def date(self):\n return datetime.date(self.year, self.month, self.day)\n\n def json(self):\n return {\n \"day\": self.day,\n \"month\": self.month,\n \"year\": self.year,\n \"timestamp\": self.timestamp,\n }\n\n\nclass Coupon:\n def __init__(self, date, coupon, coupon_yield):\n self.date = date\n self.coupon = coupon\n self.coupon_yield = coupon_yield\n\n def json(self):\n return {\n \"date\": self.date.json(),\n \"coupon\": self.coupon,\n \"coupon_yield\": self.coupon_yield,\n }\n\n\nclass BondInfo:\n def __init__(self):\n self.name = None\n self.isin = None\n self.publish_date = None\n self.close_date = None\n self.nominal = None\n self.currency = None\n self.coupon_yield = None\n self.next_coupon = None\n self.nkd = None\n self.coupon_period = None\n self.status = None\n self.all_coupons = None\n\n def eval_days_to_close(self):\n if not self.close_date:\n return None\n now = datetime.datetime.utcnow()\n now_date = datetime.date(now.year, now.month, now.day)\n close_date = datetime.date(self.close_date.year, self.close_date.month, self.close_date.day)\n td = close_date - now_date\n return td.days\n\n def json(self):\n publish_date = self.publish_date.json() if isinstance(self.publish_date, Date) else self.publish_date\n close_date = self.close_date.json() if isinstance(self.close_date, Date) else self.close_date\n all_coupons = list(map(lambda x: x.json(), self.all_coupons))\n return {\n \"name\": self.name,\n \"isin\": self.isin,\n \"nominal\": self.nominal,\n \"currency\": self.currency,\n \"coupon_yield\": self.coupon_yield,\n \"next_coupon\": self.next_coupon,\n \"nkd\": self.nkd,\n \"coupon_period\": self.coupon_period,\n \"status\": self.status,\n \"publish_date\": publish_date,\n \"close_date\": close_date,\n \"days_to_close\": self.eval_days_to_close(),\n \"all_coupons\": all_coupons,\n }\n\n\ndef parse_coupon_by_isin(isin):\n r = requests.get(\"https://smart-lab.ru/q/bonds/{}/\".format(isin))\n text = r.text\n\n if r.status_code != 200:\n return None\n\n bond_info = BondInfo()\n\n m = re.search(r\"\"\"<td><abbr title=\"Краткое наименование ценной бумаги\">Название</abbr></td>\\s*?<td>(.*?)</td>\"\"\", text)\n if m:\n bond_info.name = m.group(1)\n\n m = re.search(r\"\"\"<td><abbr title=\"ISIN\">ISIN</abbr></td>\\s*?<td>(.*?)</td>\"\"\", text)\n if m:\n bond_info.isin = m.group(1)\n\n m = re.search(r\"\"\"<td><abbr title=\"Дата размещения, дд.мм.гг\">Дата размещения</abbr></td>\\s*?<td>(\\d+)-(\\d+)-(\\d+)</td>\"\"\", text)\n if m:\n bond_info.publish_date = Date(int(m.group(1)), int(m.group(2)), int(m.group(3)))\n\n m = re.search(r\"\"\"<td><abbr title=\"Дата погашения, дд.мм.гг\">Дата погашения</abbr></td>\\s*?<td>(\\d+)-(\\d+)-(\\d+)</td>\"\"\", text)\n if m:\n bond_info.close_date = Date(int(m.group(1)), int(m.group(2)), int(m.group(3)))\n\n m = re.search(r\"\"\">Номинал</abbr></td>\\s*<td>(.*?)</td>\"\"\", text)\n if m:\n bond_info.nominal = int(m.group(1))\n\n m = re.search(r\"\"\"<td><abbr title=\"Валюта номинала\">Валюта</abbr></td>\\s*?<td>(.*?)</td>\"\"\", text)\n if m:\n bond_info.currency = \"RUB\" if m.group(1) == \"руб\" else None\n\n m = re.search(r\"\"\">Дох\\. купона, годовых от ном</abbr></td>\\s*<td(?:\\s+class=\"up\")?>(.*?)%</td>\"\"\", text)\n if m:\n bond_info.coupon_yield = float(m.group(1))\n\n m = re.search(r\"\"\"<abbr title=\"Величина купона\">Купон, руб \\(\\?\\)</abbr></a></td>\\s*?<td>(.*?)\\s\"\"\", text)\n if m:\n bond_info.next_coupon = float(m.group(1))\n\n m = re.search(r\"\"\"<abbr title=\"Накопленный купонный доход\">НКД \\(\\?\\)</abbr></a></td>\\s*?<td>(.*?)\\s\"\"\", text)\n if m:\n bond_info.nkd = float(m.group(1))\n\n m = re.search(r\"\"\"<td><abbr title=\"Длительность купона\">Выплата купона, дн</abbr></td>\\s*?<td>(.*?)</td>\"\"\", text)\n if m:\n bond_info.coupon_period = int(m.group(1))\n\n m = re.search(r\"\"\"<td><abbr title=\"Статус\">Статус</abbr></td>\\s*?<td>(.*?)</td>\"\"\", text)\n if m:\n bond_info.status = m.group(1)\n\n calendar_start_idx = text.find(\"\"\"<h2 style=\"margin-top: 2em\">Календарь выплаты купонов по облигации\"\"\")\n all_couponds_table_start_idx = text.find(\"\"\"<table class=\"simple-little-table bond\" cellspacing=\"0\">\"\"\", calendar_start_idx)\n all_couponds_table_stop_idx = text.find(\"\"\"</table>\"\"\", all_couponds_table_start_idx)\n all_couponds_table = text[all_couponds_table_start_idx:all_couponds_table_stop_idx]\n\n all_coupons = list()\n all_coupons_parts = re.findall(r\"\"\"<tr>\\s*<td>\\d+</td>\\s*<td>(\\d+)-(\\d+)-(\\d+)\\s*</td>\\s*<td>([0-9\\.]*)</td>\\s*<td>([\\d+\\.]*)%.*?</tr>\"\"\", all_couponds_table, re.S)\n for coupon_parts in all_coupons_parts:\n all_coupons.append(Coupon(\n date=Date(int(coupon_parts[0]), int(coupon_parts[1]), int(coupon_parts[2])),\n coupon=float(coupon_parts[3]),\n coupon_yield=float(coupon_parts[4])\n ))\n bond_info.all_coupons = all_coupons\n\n return bond_info.json()\n"
},
{
"alpha_fraction": 0.5198708176612854,
"alphanum_fraction": 0.537003219127655,
"avg_line_length": 30.932735443115234,
"blob_id": "05f80ff79c99df44194927b7d408c1edb7f347fb",
"content_id": "7ed38438505fbf710e2f578493b2b1ebb632d3d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7121,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 223,
"path": "/src/modules/investing_stock.py",
"repo_name": "japroc/investmint_ticker_parser",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport calendar\nimport datetime\nimport re\nimport requests\n\ndef parse_date(day, month, year):\n months = {\n \"Jan\": 1,\n \"Feb\": 2,\n \"Mar\": 3,\n \"Apr\": 4,\n \"May\": 5,\n \"Jun\": 6,\n \"Jul\": 7,\n \"Aug\": 8,\n \"Sep\": 9,\n \"Oct\": 10,\n \"Nov\": 11,\n \"Dec\": 12,\n }\n\n return Date(int(day), months.get(month), int(year))\n\n\nclass Date:\n def __init__(self, day=None, month=None, year=None):\n self.day = day\n self.month = month\n self.year = year\n\n @property\n def timestamp(self):\n return calendar.timegm(self.date.timetuple())\n\n @property\n def date(self):\n return datetime.date(self.year, self.month, self.day)\n\n def json(self):\n return {\n \"day\": self.day,\n \"month\": self.month,\n \"year\": self.year,\n \"timestamp\": self.timestamp,\n }\n\n\nclass DivInfo:\n def __init__(self):\n self.ex_div_date = None\n self.dividend = None\n self.pay_date = None\n self.div_yield = None\n\n def json(self):\n ex_div_date = self.ex_div_date.json() if isinstance(self.ex_div_date, Date) else self.ex_div_date\n pay_date = self.pay_date.json() if isinstance(self.pay_date, Date) else self.pay_date\n return {\n \"dividend\": self.dividend,\n \"div_yield\": self.div_yield,\n \"ex_div_date\": ex_div_date,\n \"pay_date\": pay_date,\n }\n\n\nclass TickerInfo:\n def __init__(self):\n self.price = None\n self.name = None\n self.industry = None\n self.sector = None\n self.currency = None\n self.pe = None\n self.next_earnings_date = None\n self.all_divs = list()\n\n def eval_div_period(self, d2, d1):\n if not d2 or not d1:\n return None\n\n td = d2 - d1\n td_days = td.days\n\n if td_days > 50 and td_days < 130:\n return 3\n elif td_days > 150 and td_days < 220:\n return 6\n elif td_days > 300 and td_days < 410:\n return 12\n else:\n return None\n\n def json(self):\n next_earnings_date = self.next_earnings_date.json() if isinstance(self.next_earnings_date, Date) else self.next_earnings_date\n all_divs = list(map(lambda x: x.json(), self.all_divs))\n if len(self.all_divs) >= 2:\n date1 = self.all_divs[0].ex_div_date.date\n date2 = self.all_divs[1].ex_div_date.date\n div_period = self.eval_div_period(date1, date2)\n else:\n div_period = None\n return {\n \"price\": self.price,\n \"name\": self.name,\n \"industry\": self.industry,\n \"sector\": self.sector,\n \"currency\": self.currency,\n \"pe\": self.pe,\n \"all_divs\": all_divs,\n \"div_period\": div_period,\n \"next_earnings_date\": next_earnings_date,\n }\n\n\ndef get_ticker_info(ticker_):\n ticker = ticker_.lower().replace(\".\", \"\")\n user_agent = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0\"\n headers = {\n \"User-Agent\": user_agent, \n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Connection\": \"close\",\n }\n url = \"https://uk.investing.com/search/service/searchTopBar\"\n\n data = \"search_text={}\".format(ticker.lower())\n r = requests.post(url, data=data, headers=headers, timeout=3)\n json_data = r.json()\n quotes = json_data[\"quotes\"]\n quotes = list(filter(lambda x:x.get(\"symbol\").upper() == ticker.upper(), quotes))\n\n if not quotes and ticker_.endswith(\"p\"):\n ticker = ticker[:-1]\n data = \"search_text={}\".format(ticker)\n r = requests.post(url, data=data, headers=headers, timeout=3)\n json_data = r.json()\n quotes = json_data[\"quotes\"]\n quotes = list(filter(lambda x:x.get(\"symbol\").upper() == ticker.upper() + \"_p\", quotes))\n\n if not quotes:\n return None\n\n exchanges = {\n \"Moscow\": 4, \n \"NASDAQ\": 3, \n \"NYSE\": 2,\n \"London\": 1,\n }\n quotes = list(filter(lambda x: x.get(\"exchange\") in exchanges.keys(), quotes))\n quotes = sorted(quotes, key=lambda x: exchanges.get(x.get(\"exchange\"), 0), reverse=True)\n quote = quotes[0]\n link = \"https://uk.investing.com{}\".format(quote[\"link\"])\n\n ticker_info = TickerInfo()\n\n r2 = requests.get(link, headers=headers, timeout=3)\n text = r2.text\n m = re.search(\"\"\"<input type=\"text\" class=\"newInput inputTextBox alertValue\" placeholder=\"([^\"]*)\"\"\", text)\n if m:\n ticker_info.price = float(m.group(1).replace(\",\", \"\"))\n\n m = re.search(r\"\"\"<h1 class=\"float_lang_base_1 relativeAttr\"\\s*dir=\"ltr\" itemprop=\"name\">(.*?)</h1>\"\"\", text)\n if m:\n ticker_info.name = m.group(1).strip()\n\n m = re.search(r\"\"\"<div>Industry<a.*?>(.*?)</a></div>\"\"\", text)\n if m:\n ticker_info.industry = m.group(1).strip()\n\n m = re.search(r\"\"\"<div>Sector<a.*?>(.*?)</a></div>\"\"\", text)\n if m:\n ticker_info.sector = m.group(1).strip()\n\n m = re.search(r\"\"\"Currency in <span class='bold'>(.*?)</span>\"\"\", text)\n if m:\n ticker_info.currency = m.group(1).strip()\n\n m = re.search(r\"\"\"Next Earnings Date.*?>([^\\s]*) (\\d*), (\\d*)</a>\"\"\", text)\n if m:\n ticker_info.next_earnings_date = parse_date(m.group(2), m.group(1), m.group(3))\n\n m = re.search(r\"\"\"class=\"float_lang_base_1\">P/E Ratio</span><span class=\"float_lang_base_2 bold\">(.*?)</span\"\"\", text)\n if m:\n if m.group(1) == \"N/A\":\n ticker_info.pe = None\n else:\n ticker_info.pe = float(m.group(1))\n\n all_divs = list()\n\n m = re.search(r\"\"\"<li><a href=\"(.*?)\" class=\"arial_12 bold\">Dividends</a></li>\"\"\", text)\n if m:\n dividend_link = \"https://uk.investing.com{}\".format(m.group(1))\n\n r3 = requests.get(dividend_link, headers=headers, timeout=3)\n text3 = r3.text\n\n div_table_start_idx = text3.find(\"\"\"<th class=\"first left\">Ex-Dividend Date<span sort_default class=\"headerSortDefault\"></span></th>\"\"\")\n div_table_finish_idx = text3.find(\"\"\"</table>\"\"\", div_table_start_idx)\n div_table = text3[div_table_start_idx:div_table_finish_idx]\n\n regex = r\"\"\"<tr event_timestamp=\".*?\">.*?\">([^\\s]*) (\\d*), (\\d*)</td>\\s*\"\"\"\n regex += r\"\"\"<td>(.*?)</td>.*?\"\"\"\n regex += r\"\"\"<td data-value=\".*?\">([^\\s]*) (\\d*), (\\d*)</td>\\s*\"\"\"\n regex += r\"\"\"<td>(.*?)%</td>\"\"\"\n\n all_divs_info = re.findall(regex, div_table, re.S)\n\n for div_info in all_divs_info:\n di = DivInfo()\n di.ex_div_date = parse_date(div_info[1], div_info[0], div_info[2])\n di.dividend = float(div_info[3])\n di.pay_date = parse_date(div_info[5], div_info[4], div_info[6])\n di.div_yield = float(div_info[7])\n all_divs.append(di)\n\n ticker_info.all_divs = all_divs\n\n return ticker_info.json()\n"
}
] | 4 |
Phil610351/DRL_UAV_CellularNet | https://github.com/Phil610351/DRL_UAV_CellularNet | 9b3255275b7a26673f6c8191b762cfc260c99ab6 | b8e960c827ae296c5ee832fb2706a694d444d4c4 | 7dc6f66a66707287ce3a1bd5b251513824cfcf71 | refs/heads/master | 2023-03-16T09:51:11.270267 | 2020-03-03T10:47:57 | 2020-03-03T10:47:57 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5102474689483643,
"alphanum_fraction": 0.5221062302589417,
"avg_line_length": 29.011606216430664,
"blob_id": "a6000a785149ae772acd4697f3089fb2d2d2a2d0",
"content_id": "2c664ab667673e53cc02a357eda6720506996ac1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15516,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 517,
"path": "/ue_mobility.py",
"repo_name": "Phil610351/DRL_UAV_CellularNet",
"src_encoding": "UTF-8",
"text": "# Python code for 2D random walk, fixed direction, and group reference point mobility model.\nimport numpy as np\n#import pylab\nimport random\nimport math\nfrom numpy.random import rand\n\ndef WalkNRandomSteps(initCoordinates, boundaries, stepLen, nSteps):\n #creating two array for containing x and y coordinate\n #of size equals to the number of size and filled up with 0's\n [initX, initY] = initCoordinates\n x = np.ones(nSteps) * initX\n y = np.ones(nSteps) * initY\n [xMin, xMax, yMin, yMax] = boundaries\n \n for i in range(1, nSteps):\n x[i] = x[i - 1]\n y[i] = y[i - 1]\n val = random.randint(1, 4)\n \n if val == 1:\n if x[i] + stepLen >= xMax:\n x[i] = xMin\n else:\n x[i] = x[i] + stepLen\n \n elif val == 2:\n if x[i] - stepLen <= xMin:\n x[i] = xMax\n else:\n x[i] = x[i] - stepLen\n\n elif val == 3:\n if y[i] + stepLen >= yMax:\n y[i] = yMin\n else:\n y[i] = y[i] + stepLen\n else:\n if y[i] - stepLen <= yMin:\n y[i] = yMax\n else:\n y[i] = y[i] - stepLen\n\n return (x, y)\n\ndef WalkToFixedDirection(initCoordinates, boundaries, stepLen, nSteps, direction):\n #creating two array for containing x and y coordinate\n #of size equals to the number of size and filled up with 0's\n [initX, initY] = initCoordinates\n x = np.ones(nSteps) * initX\n y = np.ones(nSteps) * initY\n [xMin, xMax, yMin, yMax] = boundaries\n \n for i in range(1, nSteps):\n x[i] = x[i - 1]\n y[i] = y[i - 1]\n val = direction%4 + 1\n \n if val == 1:\n if x[i] + stepLen >= xMax:\n x[i] = xMin\n else:\n x[i] = x[i] + stepLen\n \n elif val == 2:\n if x[i] - stepLen <= xMin:\n x[i] = xMax\n else:\n x[i] = x[i] - stepLen\n\n elif val == 3:\n if y[i] + stepLen >= yMax:\n y[i] = yMin\n else:\n y[i] = y[i] + stepLen\n else:\n if y[i] - stepLen <= yMin:\n y[i] = yMax\n else:\n y[i] = y[i] - stepLen\n\n return (x, y)\n\n\ndef GetRandomWalkTraceInbound(numStep, numUE, stepLen, boundaries):\n\n margin = 5\n\n [xMin, xMax, yMin, yMax] = boundaries\n xInit = np.random.randint(xMin + margin, xMax - margin, size=numUE)\n yInit = np.random.randint(yMin + margin, yMax - margin, size=numUE)\n\n\n nStepX = np.zeros((numUE,numStep))\n nStepY = np.zeros((numUE,numStep))\n\n\n for userId in range(numUE):\n # (nStepX[userId],nStepY[userId]) = WalkToFixedDirection([xInit[userId],yInit[userId]], boundaries, stepLen, nStep, userId)\n (nStepX[userId],nStepY[userId]) = WalkNRandomSteps([xInit[userId],yInit[userId]], boundaries, stepLen, numStep)\n\n trace = np.zeros((numStep,numUE,3)).astype(int)\n trace[:,:,:2] = np.array([nStepX,nStepY]).T\n\n return trace\n\n\n\ndef GetRandomLocationInCellCoverage(gridX_size, gridY_size, cell_size, bsloc, num_node):\n \"\"\"\n #Generate n random locations in coverage of bsLoc ((0, gridX_size),(0, gridY_size))\n input\n 1) cell size in x\n 2) BS locations (3D)\n 3) number of nodes (bs or ue)\n 4) height (z) value - default 0\n 5) minimum distance between every 2 nodes - default 0 #TODO\n \n return 1) location of nodes in format (x,y)\n 2) heatmap format locations\n \"\"\"\n \n nBS = np.shape(bsloc)[0]\n rand_users_loc = []\n for ue in range(num_node):\n in_bs = np.random.randint(0, nBS)\n theta = np.random.uniform(0, 2*math.pi)\n r = np.random.uniform(0, cell_size)\n \n rand_users_loc.append([bsloc[in_bs][0] + r* math.sin(theta), bsloc[in_bs][1] + r* math.cos(theta), 0])\n \n loc = np.asarray(rand_users_loc, dtype=int)\n\n# print loc\n\n grid = np.zeros((gridX_size, gridY_size))\n\n for n in range(num_node):\n# print loc[n][0], loc[n][1]\n grid[loc[n][0], loc[n][1]] += 1\n \n return loc, grid\n\n\ndef GetRandomLocationInGrid(gridX_size, gridY_size, num_node, h=0, min_dist=0):\n \"\"\"\n #Generate n random locations in range ((0, gridX_size),(0, gridY_size))\n input 1) grid size in x\n 2) grid size in y\n 3) number of nodes (bs or ue)\n 4) height (z) value - default 0\n 5) minimum distance between every 2 nodes - default 0 #TODO\n \n return 1) location of nodes in format (x,y)\n 2) heatmap format locations\n \"\"\"\n any_too_close = True\n while (any_too_close):\n x = np.random.randint(0, gridX_size, size=num_node)\n y = np.random.randint(0, gridY_size, size=num_node)\n\n loc = [x,y,np.ones((num_node)) * h] #3D loc\n any_too_close = Get_if_collide(loc, min_dist)\n\n grid = np.zeros((gridX_size, gridY_size))\n\n for n in range(num_node):\n grid[x[n], y[n]] += 1\n\n return np.array(loc, dtype=int).T, grid\n\ndef GetGridMap(gridX_size, gridY_size, nodeLoc):\n \"\"\"\n #Generate n random locations in range ((0, gridX_size),(0, gridY_size))\n input 1) grid size in x\n 2) grid size in y\n 3) node locations in (x,y) format\n \n return heatmap format locations\n \"\"\"\n grid = np.zeros((gridX_size, gridY_size))\n \n for n in range(np.shape(nodeLoc)[0]):\n \n grid[nodeLoc[n][0], nodeLoc[n][1]] += 1\n \n return grid\n\n\ndef BS_move(loc, bound, action, stepLen, min_dist, n_action):\n \"\"\"\n BS takes a single move based on \"action\" value\n loc: current BSs location\n action: action index (single number for all BS)\n stepLen: step length\n min_dist: minimum distant between BSs,\n (the BS will only move if its distance to all the\n other BSs is greater than this value.\n n_action: total number of actions for a single BS\n return: BSs locations after the step\n \"\"\"\n \n nBS = np.shape(loc)[0]\n# print \"location \\n\", loc\n act_all = Decimal_to_Base_N(action, n_action, nBS)\n# print \"action\", act_all\n [xMin, xMax, yMin, yMax] = bound\n \n #action 5-8 moves with longer stepLen\n stepLenLong = stepLen*2\n \n for i in range(nBS):\n \n val = act_all[i]\n [x, y, z] = loc[i]\n \n if val == 0:\n if x + stepLen < xMax:\n x = x + stepLen\n\n elif val == 1:\n if x - stepLen > xMin:\n x = x - stepLen\n\n elif val == 2:\n if y + stepLen < yMax:\n y = y + stepLen\n \n elif val == 3:\n if y - stepLen > yMin:\n y = y - stepLen\n \n # stay if val == 4\n\n elif val == 5:\n if x + stepLenLong < xMax:\n x = x + stepLenLong\n \n elif val == 6:\n if x - stepLenLong > xMin:\n x = x - stepLenLong\n\n elif val == 7:\n if y + stepLenLong < yMax:\n y = y + stepLenLong\n \n elif val == 8:\n if y - stepLenLong > yMin:\n y = y - stepLenLong\n\n\n if_collide = False\n\n for j in range(nBS):\n if i != j:\n dist = np.linalg.norm(loc[i]-loc[j]) # verify me\n\n if dist <= min_dist:\n if_collide = True\n\n if not if_collide:\n loc[i] = [x, y, z]\n\n# print \"new location \\n\", loc\n return loc\n\ndef UE_rand_move(loc, bound, stepLen):\n \n [xMin, xMax, yMin, yMax] = bound\n \n for i in range(np.shape(loc)[0]):\n \n [x, y, z] = loc[i]\n\n val = random.randint(0, 3)\n\n if val == 0:\n if x + stepLen >= xMax:\n x = xMin\n else:\n x = x + stepLen\n\n elif val == 1:\n if x - stepLen <= xMin:\n x = xMax\n else:\n x = x - stepLen\n\n elif val == 2:\n if y + stepLen >= yMax:\n y = yMin\n else:\n y = y + stepLen\n elif val == 3:\n if y - stepLen <= yMin:\n y = yMax\n else:\n y = y - stepLen\n\n loc[i] = [x, y, z]\n return loc\n\n\ndef Decimal_to_Base_N(num, base, digits):\n \"\"\"Change decimal number ``num'' to given base\n Upto base 36 is supported.\n num: the number to be converted\n base: the base\n digits: number of output digits\n return result_array\n \"\"\"\n \n result_array = np.zeros((digits))\n converted_string, modstring = \"\", \"\"\n# print num\n currentnum = num\n if not 1 < base < 37:\n raise ValueError(\"base must be between 2 and 36\")\n if not num:\n return result_array\n while currentnum:\n mod = currentnum % base\n currentnum = currentnum // base\n converted_string = chr(48 + mod + 7*(mod > 10)) + converted_string\n \n result = np.array([int(d) for d in str(converted_string)])\n\n result_array[digits - len(result):] = result\n\n return result_array\n\ndef Get_if_collide(locations, threshold):\n \"\"\"\n check if the distance between any 2 of the given locations are below the threshold\n \"\"\"\n any_collide = False\n for i in range(len(locations)):\n for j in range(len(locations)):\n if i == j:\n continue\n \n dist = np.linalg.norm(locations[i]-locations[j]) # verify me\n# in number of grids\n if dist <= threshold:\n any_collide = True\n\n return any_collide\n\ndef Get_loc_penalty(locations, threshold, nUE):\n \"\"\"\n check if the distance between any 2 of the given locations are below the threshold\n \"\"\"\n penalty = 0\n \n for i in range(len(locations)):\n for j in range(len(locations)):\n if i == j:\n continue\n \n dist = np.linalg.norm(locations[i]-locations[j])\n #\n if dist <= threshold:\n p = nUE - nUE * dist / threshold\n penalty += p\n\n penalty = math.floor(penalty/2)\n return penalty\n\n'''\n Reference Point Group Mobility model, discussed in the following paper:\n \n Xiaoyan Hong, Mario Gerla, Guangyu Pei, and Ching-Chuan Chiang. 1999.\n A group mobility model for ad hoc wireless networks. In Proceedings of the\n 2nd ACM international workshop on Modeling, analysis and simulation of\n wireless and mobile systems (MSWiM '99). ACM, New York, NY, USA, 53-60.\n \n In this implementation, group trajectories follow a random direction model,\n while nodes follow a random walk around the group center.\n The parameter 'aggregation' controls how close the nodes are to the group center.\n \n Required arguments:\n \n *nr_nodes*:\n list of integers, the number of nodes in each group.\n \n *dimensions*:\n Tuple of Integers, the x and y dimensions of the simulation area.\n \n keyword arguments:\n \n *velocity*:\n Tuple of Doubles, the minimum and maximum values for group velocity.\n \n *aggregation*:\n Double, parameter (between 0 and 1) used to aggregate the nodes in the group.\n Usually between 0 and 1, the more this value approximates to 1,\n the nodes will be more aggregated and closer to the group center.\n With a value of 0, the nodes are randomly distributed in the simulation area.\n With a value of 1, the nodes are close to the group center.\n '''\n\nU = lambda MIN, MAX, SAMPLES: rand(*SAMPLES.shape) * (MAX - MIN) + MIN\ndef reference_point_group(nr_nodes, dimensions, velocity=(0.1, 1.), aggregation=0.1):\n try:\n iter(nr_nodes)\n except TypeError:\n nr_nodes = [nr_nodes]\n \n NODES = np.arange(sum(nr_nodes))\n \n groups = []\n prev = 0\n for (i,n) in enumerate(nr_nodes):\n groups.append(np.arange(prev,n+prev))\n prev += n\n \n g_ref = np.empty(sum(nr_nodes), dtype=np.int)\n for (i,g) in enumerate(groups):\n for n in g:\n g_ref[n] = i\n \n FL_MAX = max(dimensions)\n MIN_V,MAX_V = velocity\n FL_DISTR = lambda SAMPLES: U(0, FL_MAX, SAMPLES)\n VELOCITY_DISTR = lambda FD: U(MIN_V, MAX_V, FD)\n \n MAX_X, MAX_Y = dimensions\n x = U(0, MAX_X, NODES)\n y = U(0, MAX_Y, NODES)\n velocity = 1.\n theta = U(0, 2*np.pi, NODES)\n costheta = np.cos(theta)\n sintheta = np.sin(theta)\n \n GROUPS = np.arange(len(groups))\n g_x = U(0, MAX_X, GROUPS)\n g_y = U(0, MAX_X, GROUPS)\n g_fl = FL_DISTR(GROUPS)\n g_velocity = VELOCITY_DISTR(g_fl)\n g_theta = U(0, 2*np.pi, GROUPS)\n g_costheta = np.cos(g_theta)\n g_sintheta = np.sin(g_theta)\n \n aggregating = 200\n deaggregating = 100\n \n while True:\n \n x = x + velocity * costheta\n y = y + velocity * sintheta\n \n g_x = g_x + g_velocity * g_costheta\n g_y = g_y + g_velocity * g_sintheta\n \n if aggregating:\n for (i,g) in enumerate(groups):\n \n # step to group direction + step to group center\n x_g = x[g]\n y_g = y[g]\n c_theta = np.arctan2(g_y[i] - y_g, g_x[i] - x_g)\n \n x[g] = x_g + g_velocity[i] * g_costheta[i] + aggregation*np.cos(c_theta)\n y[g] = y_g + g_velocity[i] * g_sintheta[i] + aggregation*np.sin(c_theta)\n \n aggregating -= 1\n if aggregating == 0: deaggregating = 100\n \n else:\n for (i,g) in enumerate(groups):\n \n # step to group direction + step to group center\n x_g = x[g]\n y_g = y[g]\n c_theta = np.arctan2(g_y[i] - y_g, g_x[i] - x_g)\n \n x[g] = x_g + g_velocity[i] * g_costheta[i]\n y[g] = y_g + g_velocity[i] * g_sintheta[i]\n \n deaggregating -= 1\n if deaggregating == 0: aggregating = 10\n \n # node and group bounces on the margins\n b = np.where(x<0)[0]\n if b.size > 0:\n x[b] = - x[b]; costheta[b] = -costheta[b]\n g_idx = np.unique(g_ref[b]); g_costheta[g_idx] = -g_costheta[g_idx]\n b = np.where(x>MAX_X)[0]\n if b.size > 0:\n x[b] = 2*MAX_X - x[b]; costheta[b] = -costheta[b]\n g_idx = np.unique(g_ref[b]); g_costheta[g_idx] = -g_costheta[g_idx]\n b = np.where(y<0)[0]\n if b.size > 0:\n y[b] = - y[b]; sintheta[b] = -sintheta[b]\n g_idx = np.unique(g_ref[b]); g_sintheta[g_idx] = -g_sintheta[g_idx]\n b = np.where(y>MAX_Y)[0]\n if b.size > 0:\n y[b] = 2*MAX_Y - y[b]; sintheta[b] = -sintheta[b]\n g_idx = np.unique(g_ref[b]); g_sintheta[g_idx] = -g_sintheta[g_idx]\n\n # update info for nodes\n theta = U(0, 2*np.pi, NODES)\n costheta = np.cos(theta)\n sintheta = np.sin(theta)\n\n # update info for arrived groups\n g_fl = g_fl - g_velocity\n g_arrived = np.where(np.logical_and(g_velocity>0., g_fl<=0.))[0]\n\n if g_arrived.size > 0:\n g_theta = U(0, 2*np.pi, g_arrived)\n g_costheta[g_arrived] = np.cos(g_theta)\n g_sintheta[g_arrived] = np.sin(g_theta)\n g_fl[g_arrived] = FL_DISTR(g_arrived)\n g_velocity[g_arrived] = VELOCITY_DISTR(g_fl[g_arrived])\n\n yield np.dstack((x,y))[0]\n"
},
{
"alpha_fraction": 0.558250904083252,
"alphanum_fraction": 0.5780402421951294,
"avg_line_length": 34.98850631713867,
"blob_id": "bc007df80cd2d1318e32f9d517423e743c7176d4",
"content_id": "40ccafbb6ead9ec0ce60da76ce0f3dbbfbbcbdae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3133,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 87,
"path": "/gradient.py",
"repo_name": "Phil610351/DRL_UAV_CellularNet",
"src_encoding": "UTF-8",
"text": "#from ue_mobility import *\nfrom mobile_env import *\nfrom copy import deepcopy\n# from random import randint\nimport time\nfrom itertools import product\n\nFILE_NAME_APPEND = \"\"\nOUTPUT_DIR = \"gradient/\"\nOUTPUT_FILE_NAME = OUTPUT_DIR + \"reward\" + FILE_NAME_APPEND\nN_BS = 4\n\n\ndef Choose_Act_Gradient(actual_env, s, n_step):\n virtual_env = deepcopy(actual_env)\n #BS remains but UE moves\n _, _, _, _ = virtual_env.step_test(624, False)\n# print np.equal(virtual_env.bsLoc, actual_env.bsLoc), virtual_env.bsLoc,\"\\n\", actual_env.bsLoc,\"\\n\"\n\n current_BS_sinr = virtual_env.channel.current_BS_sinr\n bs_loc = virtual_env.bsLoc\n ue_loc = virtual_env.ueLoc\n \n act_all_bs = np.zeros((len(bs_loc)))\n \n for i_bs in range(len(bs_loc)):\n dir_grad = np.zeros((4,1))\n dir_grad[0] = np.mean(current_BS_sinr[np.where(ue_loc[:,0] > bs_loc[i_bs][0])])\n dir_grad[1] = np.mean(current_BS_sinr[np.where(ue_loc[:,0] <= bs_loc[i_bs][0])])\n dir_grad[2] = np.mean(current_BS_sinr[np.where(ue_loc[:,1] > bs_loc[i_bs][1])])\n dir_grad[3] = np.mean(current_BS_sinr[np.where(ue_loc[:,1] <= bs_loc[i_bs][1])])\n act_all_bs[i_bs] = np.nanargmin(dir_grad)\n \n action = int(act_all_bs[3] + act_all_bs[2]*5 + act_all_bs[1]*(5**2) + act_all_bs[0]*(5**3))\n \n # print act_reward, \"best action:\", best_act\n return action\n\ndef Run_Test(reward_file_name):\n MAX_STEP = 10000\n #if reading mobility trace from file\n test_env = MobiEnvironment(N_BS, 40, 100, \"read_trace\", \"./ue_trace_10k.npy\")\n \n s = np.array([np.ravel(test_env.reset())])\n \n done = False\n step = 0\n \n outage_buf = []\n reward_buf = []\n sinr_all = []\n n_step_forward = 1\n reward_file_name = reward_file_name + str(n_step_forward)\n start_time = time.time()\n single_step_time = []\n while step <= MAX_STEP:\n before_step_time = time.time()\n action = Choose_Act_Gradient(test_env, s, n_step_forward)\n single_step_time.append(time.time() - before_step_time)\n \n s_, r, done, info = test_env.step_test(action, False)\n \n reward_buf.append(info[0])\n\tsinr_all.append(test_env.channel.current_BS_sinr) \n if step % 500 == 0 or step == MAX_STEP:\n print \"step \", step, \" time ellipsed \", time.time() - start_time\n start_time = time.time()\n np.save(reward_file_name, reward_buf)\n np.save(OUTPUT_DIR + \"time\",single_step_time)\n np.save(OUTPUT_DIR + \"sinr\",sinr_all)\n # reset the environment every 2000 steps\n if step % 2000 == 0:\n s = np.array([np.ravel(test_env.reset())])\n #warm up in 500 steps\n for _ in range(500):\n action = Choose_Act_Gradient(test_env, s, n_step_forward)\n _, _, _, _ = test_env.step_test(action, False)\n else:\n s = np.array([np.ravel(s_)])\n \n step+=1\n\n np.save(reward_file_name, reward_buf)\n np.save(OUTPUT_DIR + \"time\",single_step_time)\n np.save(OUTPUT_DIR + \"sinr\",sinr_all)\nif __name__ == \"__main__\":\n Run_Test(OUTPUT_FILE_NAME)\n\n\n"
},
{
"alpha_fraction": 0.557941734790802,
"alphanum_fraction": 0.5765110850334167,
"avg_line_length": 41.905311584472656,
"blob_id": "d12a7acfbfb57ed30965367e4496ba8d1f75e398",
"content_id": "9cf4e5b7e0c130a6197fde5e9a381f0f771ae95e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18579,
"license_type": "no_license",
"max_line_length": 189,
"num_lines": 433,
"path": "/channel.py",
"repo_name": "Phil610351/DRL_UAV_CellularNet",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport math\nfrom IPython import display\n#from sinr_visualisation import *\n\nWATCH_WINDOW = 200 # used for visualisation only \nOUT_THRESH = 0 # DL SINR below this value is considered to be user outage\n\nclass LTEChannel:\n \"\"\"\n LTE channel class including simulation of LTE downlink and uplink channels \n \"\"\"\n def __init__(self, nUE, nBS, boundaries, init_ueLoc, init_bsLoc):\n \n [self.xMin, self.xMax, self.yMin, self.yMax] = boundaries\n self.gridX = self.xMax - self.xMin + 1\n self.gridY = self.yMax - self.yMin + 1\n\n self.nUE = nUE\n self.nBS = nBS\n self.gridWidth = 5\n\n # FDD ratio\n self.alpha = 0.5\n # total number of channels\n self.K = 120\n # freq reuse factor\n self.r_freq = 1\n # numebr of channels per BS in DL\n self.K_DL = self.alpha * self.K / self.r_freq\n # numebr of channels per BS in UL\n self.K_UL = (1 - self.alpha) * self.K / self.r_freq\n # UE Tx power in dBm\n self.P_ue_dbm = 23\n # BS Tx power in dBm\n self.P_bs_dbm = 20\n # per channel tx power in DL\n #P_b = P_bs / K_DL\n # per channel Gaussian noise power in dBm\n self.noise_dbm = -121\n \n #path_loss = a + b*log(d) if d>= path_loss_dis in dB\n# self.pathloss_a = 128.1\n# self.pathloss_b = 37.6\n# self.pathloss_dis = 0.035 #in Km d>= path_loss_dis to have path loss else path loss is 0\n self.pathloss_a = 38\n self.pathloss_b = 30\n self.pathloss_dis = 0\n # Antenna Gain in dB\n self.antenna_gain = 2\n # Equipment/penetrasion loss in dB\n self.eq_loss = 0\n # shadow fading\n self.shadowing_mean = 0 #log normal shadowing N(mean, sd)\n self.shadowing_sd = 2 #6log normal shadowing N(mean, sd)\n \n self.P_ue_watt = 10 **(self.P_ue_dbm / float(10)) * 1e-3 #UE Tx power in W\n self.P_bs_watt = 10 **(self.P_bs_dbm/ float(10)) * 1e-3 #BS Tx power in W\n self.noise_watt = 10 **(self.noise_dbm / float(10)) * 1e-3\n \n self.sc_ofdm = 12 #nbr of data subcarriers/subchannel bandwidth\n self.sy_ofdm = 14 #nbr of ofdm symbols/subframe\n self.t_subframe = 1e-3 #subframe durantion in s\n #mapping from sinr to MCS\n self.sinr_thresholds = [-float('inf'), -6.5, -4, -2.6, -1, 1, 3, 6.6, 10, 11.4, 11.8, 13, 13.8, 15.6, 16.8, 17.6, float('inf')]\n self.sinr_thresholds_watt = [10 ** (s / float(10)) for s in self.sinr_thresholds]\n self.efficiency = [1e-16, 0.15, 0.23, 0.38, 0.60, 0.88, 1.18, 1.48, 1.91, 2.41, 2.73, 3.32, 3.90, 4.52, 5.12, 5.55] #bits/symbol\n self.rate_thresholds = [(self.sc_ofdm * self.sy_ofdm / float(self.t_subframe)) * e * 1e-6 for e in self.efficiency] #Mb/s\n \n self.total_channels = 120\n # dl_channels_init = (alpha * total_channels)/reuse_factor\n self.ul_channels_init = (1-self.alpha) * self.total_channels\n \n # UL gain average over n number of (imaginary) users from the interfering BS\n self.n = 1000\n # radius of BS coverage used to generate (imaginary) users for the interfering BS\n self.dth = 100\n #number of already associated users on each BS\n self.ass_per_bs = np.ones((self.nBS,1))#[1, 1, 1, 1, 1, 1]\n \n self.hoBufDepth = 3 #time to trigger HO\n self.hoThresh_db = 1\n # self.init_BS = np.zeros((self.nUE)).astype('int32')\n\n self.interfDL = [range(self.nBS) for bs in range(self.nBS)]\n \n for bs in range(self.nBS):\n self.interfDL[bs].remove(bs)\n \n self.interfUL = self.interfDL\n \n self.current_BS, self.current_BS_sinr = self.GetBestDlBS(init_ueLoc, init_bsLoc) #self.init_BS\n self.bestBS_buf = [self.current_BS]\n\n \n # rgbColor code for each BS\n self.rgbColours = [[255,0,0],[0,255,0],[0,0,255],[0,255,255],[255,255,0]]\n # needed for animation/plot the rgb colour code for each UE regarding to which BS it connects to\n self.bsColour = np.zeros((nUE,3))\n \n # monitor UE 0\n self.ue2Watch = 0\n self.watch_ue_sinrbest = np.zeros((WATCH_WINDOW))\n self.watch_ue_sinrcurr = np.zeros((WATCH_WINDOW))\n \n #for visualising mean rate ul dl\n self.watch_ul_rate = np.zeros((WATCH_WINDOW))\n self.watch_dl_rate = np.zeros((WATCH_WINDOW))\n \n self.ue_out = np.where(self.current_BS_sinr<= OUT_THRESH)\n \n \n def reset(self, ueLoc, bsLoc):\n self.current_BS, self.current_BS_sinr = self.GetBestDlBS(ueLoc, bsLoc) #self.init_BS\n self.bestBS_buf = [self.current_BS]\n self.ue_out = np.where(self.current_BS_sinr<= OUT_THRESH)\n\n\n def GetBestDlBS(self, ueLoc, bsLoc):\n channelGainAll = self.GetChannelGainAll(ueLoc, bsLoc)\n dlSinr = self.GetDLSinrAllDb(channelGainAll)\n bestDlBS = np.argmax(dlSinr, axis=1)\n bestDlSINR = np.max(dlSinr, axis=1)\n return bestDlBS, bestDlSINR\n \n def SetDistanceMultiplier(self, gridWidth):\n self.gridWidth = gridWidth\n \n def SetTxPower (self, txpower):\n self.P_bs_dbm = txpower\n self.P_bs_watt = 10 **(self.P_bs_dbm/ float(10)) * 1e-3\n \n def SetHandoverThreshold (self, ho_thresh):\n self.ho_thresh = ho_thresh\n \n\n\n def UpdateDroneNet(self, ueLoc, bsLoc, ifdisplay=False, time_now=0, get_rate=False):\n channelGainAll = self.GetChannelGainAll(ueLoc, bsLoc)\n dlSinr = self.GetDLSinrAllDb(channelGainAll)\n bestDlBS = np.argmax(dlSinr, axis=1)\n bestDlSINR = np.max(dlSinr, axis=1)\n# print time_now, \"s ue \", ueLoc[10], \" dl sinr \", bestDlSINR[10], \" from BS \", bestDlBS[10]\n\n for ue in xrange(self.nUE):\n self.current_BS_sinr[ue] = dlSinr[ue, self.current_BS[ue]]\n\n if np.shape(self.bestBS_buf)[0] < self.hoBufDepth:\n self.bestBS_buf = np.append(self.bestBS_buf, [bestDlBS], axis=0)\n else:\n #FIFO buffer bottom-in\n self.bestBS_buf[:-1] = self.bestBS_buf[1:]\n self.bestBS_buf[-1] = bestDlBS\n # print \"bestBS_buf..\\n\", bestBS_buf\n bestRemain = np.all(self.bestBS_buf == self.bestBS_buf[0,:], axis = 0)\n bestChanged = self.current_BS != self.bestBS_buf[-1,:]\n\n ifNeedHO = np.logical_and(bestRemain, bestChanged)\n ifNeedHO = np.logical_and(ifNeedHO, bestDlSINR - self.current_BS_sinr > self.hoThresh_db)\n# print \"if needHO\", ifNeedHO\n\n if np.any(ifNeedHO):\n ueNeedHO = np.flatnonzero(ifNeedHO)\n for ue in ueNeedHO:\n fromBS = self.current_BS[ue]\n toBS = self.bestBS_buf[-1][ue]\n self.current_BS[ue] = self.bestBS_buf[-1][ue]\n \n #compute number of ue out of coverage\n ue_out = np.array(np.where(self.current_BS_sinr<= OUT_THRESH))\n new_out = ue_out[np.isin(ue_out, self.ue_out, invert=True)]\n\n self.ue_out = ue_out\n n_outage = np.size(new_out)\n# print \" \", new_out, \" \", self.ue_out, \" \", n_outage\n\n \n if get_rate or ifdisplay:\n #Get DL/Ul rate updates\n #DL\n dlRatePerChannel = self.GetDLRatePerChannel(dlSinr)\n dlRatePerChannel_from_currentBS = np.zeros((self.nUE))\n #UL\n ulInterfPower = self.GetULInterference(bsLoc)\n # print \"UL interference power \\n\", ulInterfPower\n ulRequiredRate = 1\n\n ulSinr = []\n ulNumChannelNeeded = []\n ulRateUEBS = []\n\n for u in range(self.nUE):\n tup = self.GetULRateChannels (u, ulRequiredRate, ulInterfPower, channelGainAll)\n ulSinr.append(tup[0])\n ulNumChannelNeeded.append(tup[1])\n ulRateUEBS.append(tup[2])\n\n ulSinr = np.array(ulSinr)\n ulNumChannelNeeded = np.array(ulNumChannelNeeded)\n ulRateUEBS = np.array(ulRateUEBS)\n\n dlRatePerChannel_from_currentBS = np.zeros((self.nUE))\n ulRatePerChannel_from_currentBS = np.zeros((self.nUE))\n for ue_id in range(self.nUE):\n dlRatePerChannel_from_currentBS[ue_id] = dlRatePerChannel[ue_id][self.current_BS[ue_id]] #can be accelarated\n ulRatePerChannel_from_currentBS[ue_id] = ulRateUEBS[ue_id][self.current_BS[ue_id]] #can be accelarated\n # mean rate of all UEs as received from their current BSs(maybe don't care)\n dl_rate_mean = np.mean(dlRatePerChannel_from_currentBS)\n ul_rate_mean = np.mean(ulRatePerChannel_from_currentBS)\n # print \"mean DL and UL Rate Per Channel \\n\", dl_rate_mean, ul_rate_mean\n\n\n association_map = self.GetCurrentAssociationMap(ueLoc)\n\n# print self.current_BS_sinr, \"\\n mean sinr\", np.mean(self.current_BS_sinr)\n return association_map, np.mean(self.current_BS_sinr), n_outage\n\n\n # compute the euclidean distance between 2 nodes\n def GetDistance(self, coord1, coord2):\n coord1 = coord1[:2]* self.gridWidth\n coord2 = coord2[:2]* self.gridWidth\n dist = np.linalg.norm(coord1-coord2)\n\n # dist = math.sqrt((coord1[0] - coord2[0])**2 + (coord1[1] - coord2[1])**2 + (coord1[2] - coord2[2])**2)\n return dist\n \n # compute the pass loss value for the given euclidean distance between BS b and UE i\n # based on urban pass loss model as per 3GPP TR 36.814\n def GetPassLoss(self, d):\n# d = d/1000#work with km\n loss = 0\n if d > self.pathloss_dis:\n loss = self.pathloss_a + self.pathloss_b * math.log10(d)\n return loss\n \n def GetChannelGain(self, coord1, coord2):\n d = self.GetDistance(coord1, coord2)\n pathLoss = self.GetPassLoss(d)\n fading = np.random.normal(self.shadowing_mean, self.shadowing_sd)\n # fading = 10 #static fading >> for calibration only!!!\n # print fading\n # the channel gain between UE and BS accounts for\n #1) antenna gain 2) pathloss 3) equipment loss 4) shadow fading\n channel_gain_db = self.antenna_gain - pathLoss - fading - self.eq_loss\n channel_gain = 10 ** (channel_gain_db / float(10))\n return channel_gain\n \n def GetChannelGainAll(self, ueLocations, bsLocations):\n n_ue = np.shape(ueLocations)[0]\n n_bs = np.shape(bsLocations)[0]\n channel_gain_all = np.zeros((n_ue, n_bs))\n \n for ue_id in range(n_ue):\n for bs_id in range(n_bs):\n channel_gain_all[ue_id][bs_id] = self.GetChannelGain(ueLocations[ue_id], bsLocations[bs_id])\n return channel_gain_all\n \n def GetDLSinrAllDb(self, channel_gain_all):\n sinr_all = np.zeros((self.nUE,self.nBS))\n for ue_id in range(self.nUE):\n for bs_id in range(self.nBS):\n interf_bs = self.interfDL[bs_id]\n \n P_interf = np.sum(self.P_bs_watt * channel_gain_all[ue_id][interf_bs])\n sinr_dl = self.P_bs_watt * channel_gain_all[ue_id, bs_id] / float(self.noise_watt + P_interf)\n \n sinr_all[ue_id][bs_id] = 10 * math.log10(sinr_dl)\n return sinr_all\n \n #Mapping from SINR to MCS rates\n def GetDLRatePerChannel(self, dl_sinr_db):\n dl_rate_per_channel = np.zeros((self.nUE,self.nBS))\n for ue_id in range(self.nUE):\n for bs_id in range(self.nBS):\n for l in range(len(self.sinr_thresholds) - 1):\n if (dl_sinr_db[ue_id][bs_id] >= self.sinr_thresholds[l] and dl_sinr_db[ue_id][bs_id] < self.sinr_thresholds[l+1]):\n dl_rate_per_channel[ue_id][bs_id] = self.rate_thresholds[l]\n break\n return dl_rate_per_channel\n \n def GetNumberDLChannelNeeded(self, requiredRate, dl_rate_per_channel):\n dl_needed_channels = np.zeros((self.nUE,self.nBS))\n for ue_id in range(self.nUE):\n for bs_id in range(self.nBS):\n dl_needed_channels[ue_id][bs_id] = requiredRate/dl_rate_per_channel[ue_id][bs_id]\n return dl_needed_channels\n \n def GetAverageULChannelGainFromInterfBS(self, bs_id, intf_id, bs_loc, bs_intf_loc):\n channel_gain = np.zeros((self.n))\n \n theta = np.random.uniform(0, 2*math.pi, self.n)\n r = np.random.uniform(0, self.dth, self.n)\n \n #imagine n users attached to bs_intf\n vfunc_sin = np.vectorize(math.sin)\n vfunc_cos = np.vectorize(math.cos)\n \n rand_users_loc = np.array([bs_intf_loc[0] + r* vfunc_sin(theta), bs_intf_loc[1] + r* vfunc_cos(theta)])\n #if simulating 3D model (judging by the number of dimensions bs_loc has)\n if np.size(bs_loc) == 3:\n rand_users_loc = np.append(rand_users_loc, np.ones((1,self.n))*bs_loc[2], axis=0)\n \n rand_users_loc = np.transpose(rand_users_loc)\n \n # save the random user location for calibration\n # bs_id and intf_id can be removed from the function input if not printing this anymore\n # str_name = \"rand_users_bs_\" + str(bs_id) + \"_intf_\" + str(intf_id)\n # np.save(str_name, rand_users_loc)\n \n for intf_ue_id in range(self.n):\n channel_gain[intf_ue_id] = self.GetChannelGain(bs_loc, rand_users_loc[intf_ue_id])\n \n return np.mean(channel_gain)\n\n\n def GetAverageULChannelGain(self, bs_loc):\n avg_channel_gain = np.zeros((self.nBS, self.nBS))\n for bs_id in range(self.nBS):\n for intf_id in range(self.nBS):\n #make avg_channel_gain symmetric\n if (intf_id >= bs_id):\n if intf_id in self.interfUL[bs_id]:\n avg_channel_gain[bs_id][intf_id] = self.GetAverageULChannelGainFromInterfBS(bs_id, intf_id, bs_loc[bs_id], bs_loc[intf_id])\n else:\n avg_channel_gain[bs_id][intf_id] = avg_channel_gain[intf_id][bs_id]\n \n # print \"UL channel gain\", avg_channel_gain\n return avg_channel_gain\n\n def GetULInterference(self, bs_loc):\n ul_interference = np.zeros((self.nBS))\n ulAvgChannelGain= self.GetAverageULChannelGain(bs_loc)\n \n for bs_id in range(self.nBS):\n for intf_id in self.interfUL[bs_id]:\n ul_interference[bs_id] += self.P_ue_watt * ulAvgChannelGain[bs_id][intf_id] * self.ass_per_bs[intf_id] / self.ul_channels_init\n \n return ul_interference\n \n def GetULRateChannels (self, u, ul_datarate, ul_interference, channel_gain):\n \n \"\"\"\n #list of the number of needed channels on the UL from each base station to grant the user u\n #the data rate he asks for (when in guaranteed throughput)\n #returns both the number of channels needed by user u from each BS (to get the asked data rate),\n #and the uplink rate between the user u and each BS\n \n :param u: user index\n :param bs: {b0:[x_b0, y_b0], b1:[x_b1, y_b1], ...} #dictionary of BS coordinates\n :param ul_datarate: the value of the requested data rate on the UL\n :param ul_interference: uplink_interference(bs, interference_set, ass_per_bs)\n :param channel_gain: compute_channel_gain (users, bs)\n :return: ul_channels_needed, ul_rate\n \"\"\"\n \n ul_sinr_db = []\n ul_channels_needed = []\n ul_rate = []\n \n ul_channels_min = [ul_datarate / r for r in self.rate_thresholds]\n \n for b in range(self.nBS):\n ul_channels_threshold = []\n \n sinr_ratio = (self.P_ue_watt * channel_gain[u][b]) / (self.noise_watt + ul_interference[b]) #the SINR ratio without dividing by the number of channels needed, for a given user u\n \n ul_sinr_db.append(10 * math.log10(sinr_ratio))\n \n for l in range(1, len(self.sinr_thresholds_watt) - 1):\n ul_channels_threshold.append(sinr_ratio/self.sinr_thresholds_watt[l])\n \n ul_channels_threshold.insert(0, float('inf'))\n ul_channels_threshold.append(0)\n \n match = []\n for id, val in enumerate(ul_channels_min):\n if val <= ul_channels_threshold[id] and val > ul_channels_threshold[id +1]:\n match.append(val)\n\n ul_channels_needed.append(min(match))\n ul_rate.append(ul_datarate/min(match)) #assume it to be rate per channel?\n\n\n return ul_sinr_db, ul_channels_needed, ul_rate\n\n def GetCurrentAssociationMap(self, ueLoc):\n \"\"\"\n utility func mainly for the class mobile_env\n take ue locations as input,\n and convert current_BS into a (nBS x, GridX, GridY) heatmap for each BS\n NB: current_BS might not be best_bs\n Return the association heatmap\n TODO: check dim gridX_N and gridY_N\n \"\"\"\n# convert to 2D if given a 3D location\n if np.shape(ueLoc)[1] == 3:\n ueLoc = ueLoc[:,:-1]\n \n\n association_map = np.zeros((self.nBS, self.gridX, self.gridY))\n# association_sinr_map = np.zeros((self.nBS, gridX, gridY))\n\n for ue in range(self.nUE):\n bs = self.current_BS[ue]\n association_map[bs][ueLoc[ue][0]][ueLoc[ue][1]] += 1\n# association_sinr_map[bs][ueLoc[ue][0]][ueLoc[ue][1]] = self.current_BS_sinr[ue]\n\n return association_map#, association_sinr_map\n\n def GetSinrInArea (self, bsLoc):\n\n dl_sinr = np.zeros((self.gridX, self.gridY))\n\n loc_id = 0\n for x_ in range(self.xMin, self.xMax):\n for y_ in range(self.yMin, self.yMax):\n dist = []\n for bs in range(np.shape(bsLoc)[0]):\n dist.append(self.GetDistance(np.asarray([x_,y_,0]),bsLoc[bs]))\n \n bs_id = np.argmin(dist)\n P_interf = 0\n \n for i in self.interfDL[bs_id]:\n interf_gain = self.GetChannelGain(np.asarray([x_,y_,0]), bsLoc[i])\n P_interf += self.P_bs_watt * interf_gain\n \n sinr_dl = self.P_bs_watt * self.GetChannelGain(np.asarray([x_,y_,0]), bsLoc[bs_id]) / float(self.noise_watt + P_interf)\n\n dl_sinr[x_][y_] = 10 * math.log10(sinr_dl)\n\n return dl_sinr\n\n"
},
{
"alpha_fraction": 0.7373860478401184,
"alphanum_fraction": 0.7501519918441772,
"avg_line_length": 54.76271057128906,
"blob_id": "9f966699be031bad65318829973fd422d3289d8c",
"content_id": "2ab2f59eeab2218b027415403fdfbdd8dd92ee6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3294,
"license_type": "no_license",
"max_line_length": 466,
"num_lines": 59,
"path": "/README.md",
"repo_name": "Phil610351/DRL_UAV_CellularNet",
"src_encoding": "UTF-8",
"text": "# Overview \nSimulation scripts for the mobility management of UAV base stations project mainly built for paper https://dl.acm.org/citation.cfm?id=3308964. \n\n# Requirements \n* python2.7 \n* numpy==1.16.2\n* tensorflow \n* IPython \n* matplotlib \n\n# Files\n* main.py\n - main simulation script with A3C (V. Mnih et al. 2016. Asynchronous methods for deep reinforcement learning. In ICML. 1928–1937.) implementation \n - multi-threading to initialise parallel training of multiple workers in parallel spaces (MobiEnvironments) \n - each worker creates a MobiEnvironment instance and starts training in this environment \n - there is a pair of global AC nets and local AC nets for worker. Workers train their own nets individually while push the gradients to the global nets periodically, then the global nets optimise uploaded gradients from all workers and distribute the same optimal gradients to all workers. \n - choices of CNN and MLP are implimented. Default MLP nets perform as well as CNN in prior work with less training complexity \n \n \n* main_test.py \n - load trained model to test (taking input AC model from ./train/Global_A_PARA%.npy where % can be the training step, 2000 by default) \n - test is done on controlled UE mobility trace by loading a file ./ue_trace_10k.npy \n - at each test step, the output of nn is argmax-ed to make control decisions of UAV movements \n - per step reward, SINR, and computation time are recorded for performance evaluation (output to ./test) \n \n \n* mobile_env.py \n - followed openAI's gym implementation structure for a wireless mobile environment \n - creates a LTE wireless channel which provides computation of SINR values and handover functionality \n - step() and step_test() take action from the RL agent and returns updated state, reward, and customisable information to the RL agent. Please be careful here to make the two function consistant. It is not ideal to have two functions one for training and one for testing, but the reason to do this is to enable different user mobility models while keep both training and testing steps computationally cheap (rather than switching between if conditions per step) \n - during training the user moves following the group reference model \n - during testing the users move using preloaded trace (ue_trace_10k.npy), which is generated from the group reference model \n - reward function currently consists of a reward on mean sinr value and a penalty on number of outaged users. which is open for improvement\n \n* channel.py \n - downlink and uplink SINR \n - In the WAIN work we take only downlink sinr \n\n* ue_mobility.py \n - a couple of mobility models for UE's movement \n - group reference (X. Hong et al. 1999. A group mobility model for ad hoc wireless networks. In ACM MSWiM. 53–60.) model is used in the WAIN paper. please check the WAIN paper for more details \n\n* sinr_visualisation.py \n - utility functions for visualisation during the simulations \n\n# Build virtual environment \n` virtualenv env ` \n` source env/bin/activate ` \n\n# Run training \n` mkdir train ` \n` python main.py ` \n\n# Run testing\n` mkdir test ` \n` python main_test.py `\n\n### Email lirui628@gmail for any questions\n### Have fun :squirrel::octocat::robot::four_leaf_clover:\n"
},
{
"alpha_fraction": 0.6460176706314087,
"alphanum_fraction": 0.6725663542747498,
"avg_line_length": 15.142857551574707,
"blob_id": "34f813dfe253bdc80ea0a64e7193a8cbcf433b88",
"content_id": "4012820c822ed4bd6e67bb6e0c599d6cb9aa41a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 113,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 7,
"path": "/bash_run_seeds.sh",
"repo_name": "Phil610351/DRL_UAV_CellularNet",
"src_encoding": "UTF-8",
"text": "for i in {1..10}\ndo\n\techo bash starting seed$i \n\tmkdir seed$i\n\tpython main.py -seed=$i\n\tcp -r train seed$i/\ndone\n"
},
{
"alpha_fraction": 0.5569869875907898,
"alphanum_fraction": 0.5749610662460327,
"avg_line_length": 35.59695816040039,
"blob_id": "5472b7baff6964affebf3a921c57c57f1d122cf1",
"content_id": "485af7039cfb9d84583d1eea66d5feda6d18fa1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9625,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 263,
"path": "/mobile_env.py",
"repo_name": "Phil610351/DRL_UAV_CellularNet",
"src_encoding": "UTF-8",
"text": "\"\"\"\n # mobile environment that update channel and association\n \"\"\"\nimport numpy as np\nimport random\nimport itertools\nimport math\nfrom channel import *\nfrom ue_mobility import *\nimport copy\nimport numpy as np\nimport sys\n\n#matplotlib.rcParams.update({'font.size': 14})\n\n# defining the number of steps\nMAXSTEP = 2000\nUE_STEP = 1\n\nN_ACT = 5 #number of actions of a single agent\n\nMAX_UE_PER_GRID = 1 # Maximum number of UE per grid\n\n# relative hight of the BSs to UEs in m assuming plain terrain\nH_BS = 10\n# min distance between BSs in Grids\nMIN_BS_DIST = 2\n\nR_BS = 50\n#\nBS_STEP = 2\n\n\nclass MobiEnvironment:\n \n def __init__(self, nBS, nUE, grid_n=200, mobility_model = \"group\", test_mobi_file_name = \"\"):\n self.nBS = nBS\n self.nUE = nUE\n self.bs_h = H_BS\n \n # x,y boundaries\n self.grid_n = grid_n\n \n [xMin, xMax, yMin, yMax] = [1, self.grid_n, 1, self.grid_n]\n boundaries = [xMin, xMax, yMin, yMax]\n# xBS = np.array([int(xMin +1), int(xMax -1), int(xMin+1), int(xMax-1)])\n# yBS = np.array([int(yMin +1), int(yMax -1), int(yMax-1), int(yMin+1)])\n xBS = np.array([int(xMax/4), int(xMax/4), int(xMax*3/4), int(xMax*3/4)])\n yBS = np.array([int(yMax/4), int(yMax*3/4), int(yMax/4), int(yMax*3/4)])\n\n\n self.boundaries = boundaries\n self.mobility_model = mobility_model\n print \"mobility model: \", mobility_model, \" grid size \", grid_n\n # bsLoc is 3D, bsLocGrid is 2D heatmap\n #self.bsLoc, self.bsLocGrid = GetRandomLocationInGrid(self.grid_n, self.grid_n, self.nBS, H_BS, MIN_BS_DIST)\n self.initBsLoc = np.array([xBS, yBS, np.ones((np.size(xBS)))*self.bs_h], dtype=int).T\n self.initBsLocGrid = GetGridMap(self.grid_n, self.grid_n, self.initBsLoc[:,:2])\n self.bsLoc = copy.deepcopy(self.initBsLoc)\n self.bsLocGrid = copy.deepcopy(self.initBsLocGrid)\n\n# self.ueLoc, self.ueLocGrid = GetRandomLocationInGrid(self.grid_n, self.grid_n, self.nUE)\n self.ueLoc = []\n self.ueLocGrid = []\n \n self.mm = []\n \n #mobility trace used for testing\n self.test_mobi_trace = []\n\n if self.mobility_model == \"random_waypoint\":\n self.mm = random_waypoint(nUE, dimensions=(self.grid_n, self.grid_n), velocity=(1, 1), wt_max=1.0)\n elif self.mobility_model == \"group\":\n# self.mm = tvc([10,10,10,10], dimensions=(self.grid_n, self.grid_n), velocity=(1, 1.), aggregation=[0.5,0.2], epoch=[1000,1000])\n self.mm = reference_point_group([10,10,10,10], dimensions=(self.grid_n, self.grid_n), velocity=(0, 1), aggregation=0.8)\n for i in range(200):\n next(self.mm)\n i += 1 #repeat in reset\n elif self.mobility_model == \"in_coverage\":\n self.ueLoc, self.ueLocGrid = GetRandomLocationInCellCoverage(self.grid_n, self.grid_n, R_BS, self.bsLoc, self.nUE)\n elif self.mobility_model == \"read_trace\":\n print \"testing with mobility trace \", test_mobi_file_name\n assert test_mobi_file_name\n self.ueLoc_trace = np.load(test_mobi_file_name)\n \n self.ueLoc = self.ueLoc_trace[0]\n self.ueLocGrid = GetGridMap(self.grid_n, self.grid_n, self.ueLoc)\n \n else:\n sys.exit(\"mobility model not defined\")\n\n if (self.mobility_model == \"random_waypoint\") or (self.mobility_model == \"group\"):\n positions = next(self.mm)\n #2D to 3D\n z = np.zeros((np.shape(positions)[0],0))\n self.ueLoc = np.concatenate((positions, z), axis=1).astype(int)\n self.ueLocGrid = GetGridMap(self.grid_n, self.grid_n, self.ueLoc)\n \n self.channel = LTEChannel(self.nUE, self.nBS, self.boundaries, self.ueLoc, self.bsLoc)\n self.association = self.channel.GetCurrentAssociationMap(self.ueLoc)\n\n \n self.action_space_dim = N_ACT**self.nBS\n self.observation_space_dim = self.grid_n * self.grid_n * (nBS + 1) * MAX_UE_PER_GRID\n\n self.state = np.zeros((nBS + 1, self.grid_n, self.grid_n ))\n self.step_n = 0\n \n\n def SetBsH(self, h):\n self.bs_h = h\n \n \n def reset(self):\n # Get random locations for bs and ue\n #self.bsLoc, self.bsLocGrid = GetRandomLocationInGrid(self.grid_n, self.grid_n, self.nBS, H_BS, MIN_BS_DIST)\n\n self.bsLoc = copy.deepcopy(self.initBsLoc)\n self.bsLocGrid = copy.deepcopy(self.initBsLocGrid)\n \n if (self.mobility_model == \"random_waypoint\") or (self.mobility_model == \"group\"):\n positions = next(self.mm)\n #2D to 3D\n z = np.zeros((np.shape(positions)[0],0))\n self.ueLoc = np.concatenate((positions, z), axis=1).astype(int)\n self.ueLocGrid = GetGridMap(self.grid_n, self.grid_n, self.ueLoc)\n elif self.mobility_model == \"read_trace\":\n print \"reseting mobility trace \"\n self.ueLoc = self.ueLoc_trace[0]\n self.ueLocGrid = GetGridMap(self.grid_n, self.grid_n, self.ueLoc)\n else:\n self.ueLoc, self.ueLocGrid = GetRandomLocationInCellCoverage(self.grid_n, self.grid_n, R_BS, self.bsLoc, self.nUE)\n\n # reset channel\n self.channel.reset(self.ueLoc, self.bsLoc)\n # reset association\n self.association = self.channel.GetCurrentAssociationMap(self.ueLoc)\n \n self.state[0] = self.bsLocGrid\n self.state[1:] = self.association\n \n # self.ueLocGrid = np.sum(self.association, axis = 0)\n # print np.array_equal(np.sum(self.association, axis = 0), self.ueLocGrid )\n\n self.step_n = 0\n \n return np.array(self.state)\n \n def step(self, action , ifrender=False): #(step)\n \n positions = next(self.mm)\n #2D to 3D\n z = np.zeros((np.shape(positions)[0],0))\n self.ueLoc = np.concatenate((positions, z), axis=1).astype(int)\n\n self.bsLoc = BS_move(self.bsLoc, self.boundaries, action, BS_STEP, MIN_BS_DIST + BS_STEP, N_ACT)\n self.association_map, meanSINR, nOut = self.channel.UpdateDroneNet(self.ueLoc, self.bsLoc, ifrender, self.step_n)\n \n self.bsLocGrid = GetGridMap(self.grid_n, self.grid_n, self.bsLoc)\n self.ueLocGrid = GetGridMap(self.grid_n, self.grid_n, self.ueLoc)\n \n r_dissect = []\n \n r_dissect.append(meanSINR/20)\n\n r_dissect.append(-1.0 * nOut/self.nUE)\n \n self.state[0] = self.bsLocGrid\n self.state[1:] = self.association_map\n\n# dist_penalty = Get_loc_penalty(self.bsLoc, 25, self.nUE)\n# r_dissect.append(-dist_penalty/self.nUE *0.5)\n\n done = False\n# done = Get_if_collide(self.bsLoc, MIN_BS_DIST)\n# collision = done\n\n self.step_n += 1\n \n# if collision:\n# r_dissect.append(-1)\n# else:\n# r_dissect.append(0)\n\n if self.step_n >= MAXSTEP:\n done = True\n\n reward = max(sum(r_dissect), -1)\n# print meanSINR, \" \",nOut,\" \", r_dissect, \" \", reward\n\n# info = [r_dissect, self.step_n, self.ueLoc]\n info = [r_dissect, self.step_n]\n return np.array(self.state), reward, done, info\n \n def step_test(self, action , ifrender=False): #(step)\n \"\"\"\n similar to step(), but write here an individual function to\n avoid \"if--else\" in the original function to reduce training\n time cost\n \"\"\"\n \n self.ueLoc = self.ueLoc_trace[self.step_n]\n self.bsLoc = BS_move(self.bsLoc, self.boundaries, action, BS_STEP, MIN_BS_DIST + BS_STEP, N_ACT)\n self.association_map, meanSINR, nOut = self.channel.UpdateDroneNet(self.ueLoc, self.bsLoc, ifrender, self.step_n)\n \n self.bsLocGrid = GetGridMap(self.grid_n, self.grid_n, self.bsLoc)\n self.ueLocGrid = GetGridMap(self.grid_n, self.grid_n, self.ueLoc)\n \n r_dissect = []\n \n r_dissect.append(meanSINR/20)\n \n r_dissect.append(-1.0 * nOut/self.nUE)\n\n self.state[0] = self.bsLocGrid\n self.state[1:] = self.association_map\n \n done = False\n \n self.step_n += 1\n \n if self.step_n >= MAXSTEP:\n done = True\n \n reward = max(sum(r_dissect), -1)\n \n info = [r_dissect, self.step_n, self.ueLoc]\n return np.array(self.state), reward, done, info\n\n\n def render(self):\n fig = figure(1, figsize=(20,20))\n for bs in range(self.nBS):\n subplot(self.nBS +1, 2, bs+1)\n title(\"bs \" + str(bs) + \"ue distribution\")\n imshow(self.association[bs], interpolation='nearest', origin='lower')\n \n subplot(self.nBS +1, 2, self.nBS+ bs+1)\n title(\"bs \" + str(bs) + \"ue sinr distribution\")\n imshow(self.association_sinr[bs], interpolation='nearest', origin='lower')\n\n def plot_sinr_map(self):\n fig = figure(1, figsize=(100,100))\n subplot(1, 2, 1)\n \n sinr_all = self.channel.GetSinrInArea(self.bsLoc)\n imshow(sinr_all, interpolation='nearest', origin='lower', vmin= -50, vmax = 100)\n colorbar()\n xlabel('x[m]')\n ylabel('y[m]')\n title('DL SINR [dB]')\n \n subplot(1,2,2)\n hist(sinr_all, bins=100, fc='k', ec='k')\n ylim((0,20))\n xlabel(\"SINR\")\n ylabel(\"number of UEs\")\n xlim(-100, 100)\n \n show()\n fig.savefig(\"sinr_map.pdf\")\n \n np.save(\"sinr_map\",sinr_all)\n"
},
{
"alpha_fraction": 0.5435658693313599,
"alphanum_fraction": 0.5593798160552979,
"avg_line_length": 28.036035537719727,
"blob_id": "85fb0d88cf9446a6ff22686978286d339c1b2f0d",
"content_id": "8b40426c6054051e78724f371d6a7dd922cc3dff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3225,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 111,
"path": "/main_test.py",
"repo_name": "Phil610351/DRL_UAV_CellularNet",
"src_encoding": "UTF-8",
"text": "import time\nTEST_ALGO = \"A3C\"\n\nFILE_NAME_APPEND = \"2000\"\nOUTPUT_FILE_NAME = \"test/\" + FILE_NAME_APPEND + '_'\n\ndef Load_AC_Net():\n \"\"\"\n Load pre-trained A3C model for testing\n \"\"\"\n file_name = \"train/Global_A_PARA\" + FILE_NAME_APPEND +\".npz\"\n files = np.load(file_name)\n\n a_params = files['arr_0']\n\n G_AC_TEST = ACNet('Global_Net')\n\n ops = []\n for idx, param in enumerate(a_params): ops.append(G_AC_TEST.a_params[idx].assign(param))\n SESS.run(ops)\n return G_AC_TEST\n\ndef Load_DPPO_Net():\n \"\"\"\n Load pre-trained DDPO model for testing\n \"\"\"\n\n file_name = \"test/PI_PARA\" + FILE_NAME_APPEND +\".npz\"\n files = np.load(file_name)\n\n pi_params = files['arr_0']\n\n G_PPO_TEST = PPONet()\n\n ops = []\n for idx, param in enumerate(pi_params): ops.append(G_PPO_TEST.pi_params[idx].assign(param))\n SESS.run(ops)\n return G_PPO_TEST\n\ndef Run_Test(g_test_net, reward_file_name):\n #maximum training step\n MAX_STEP = 10000\n\n #Reading mobility trace from file\n test_env = MobiEnvironment(N_BS, 40, 100, \"read_trace\", \"./ue_trace_10k.npy\")\n\n #reset states\n s = np.array([np.ravel(test_env.reset())])\n\n done = False\n step = 0\n\n outage_buf = []\n reward_buf = []\n sinr_all = []\n time_all = []\n x = tf.argmax(g_test_net.a_prob, axis = 1)\n# ue_walk_trace = []\n while step <= MAX_STEP:\n \n feed_dict = {g_test_net.s:s}\n\tstart_time = time.time()\n action = SESS.run(x, feed_dict=feed_dict)\n\ttime_all.append(time.time()-start_time)\n\n s_, r, done, info = test_env.step_test(action, False)\n # s_, r, done, info = test_env.step(action, False)\n \tsinr_all.append(test_env.channel.current_BS_sinr) \n reward_buf.append(info[0])\n \n# ue_walk_trace.append(info[2])\n if step % 500 == 0 or step == MAX_STEP:\n print \"step \", step\n np.save(reward_file_name + \"reward\", reward_buf)\n\t np.save(reward_file_name +\"sinr\",sinr_all)\n\t np.save(reward_file_name + \"time\", time_all)\n# np.save(\"ue_trace_10k\", ue_walk_trace)\n\n #if step % 5 == 0:\n #np.save(reward_file_name +\"ue_loc\" + str(step), test_env.ueLoc)\n #np.save(reward_file_name +\"sinr_map\" + str(step), test_env.sinr_map)\n #np.save(reward_file_name +\"assoc_sinr\" + str(step), test_env.assoc_sinr)\n # reset the environment every 2000 steps\n if step % 2000 == 0:\n s = np.array([np.ravel(test_env.reset())])\n #warm up in 500 steps\n for _ in range(500):\n _, _, _, _ = test_env.step_test(action, False)\n else:\n s = np.array([np.ravel(s_)])\n \n step+=1\n\n np.save(reward_file_name + \"reward\", reward_buf)\n np.save(reward_file_name + \"sinr\",sinr_all)\n np.save(reward_file_name + \"time\", time_all)\n# np.save(\"ue_trace_10k\", ue_walk_trace)\n\nif __name__ == \"__main__\":\n if TEST_ALGO == \"A3C\":\n from main import *\n SESS = tf.Session()\n \n test_net = Load_AC_Net()\n elif TEST_ALGO == \"DPPO\":\n from dppo_main import *\n SESS = tf.Session()\n \n test_net = Load_DPPO_Net()\n\n Run_Test(test_net, OUTPUT_FILE_NAME)\n\n\n"
},
{
"alpha_fraction": 0.6032816767692566,
"alphanum_fraction": 0.6158564686775208,
"avg_line_length": 32.96354293823242,
"blob_id": "659c9af320852141780e21b0332c3b70ca51a807",
"content_id": "b95c3cc73e4a20a8985de16f8abef81c16f8eee1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6521,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 192,
"path": "/sinr_visualisation.py",
"repo_name": "Phil610351/DRL_UAV_CellularNet",
"src_encoding": "UTF-8",
"text": "from matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom matplotlib.pyplot import *\nfrom IPython import display\n\nmatplotlib.rcParams.update({'font.size': 14})\n#\n## used to plot snapshoot of user distribution\n#usrDistributionNSteps = np.zeros((nStep, gridX, gridY))\n#for userId in range(nUE):\n# (nStepX[userId],nStepY[userId]) = WalkToFixedDirection([xInit[userId],yInit[userId]], boundaries, stepLen, nStep, userId)\n# for stepN in range(nStep):\n# x = int(nStepX[userId][stepN])\n# y = int(nStepY[userId][stepN])\n# usrDistributionNSteps[stepN][x][y] += 1\n#\n#\n##plotting user distribution:\n## for time in [1,2,3,nStep/2, nStep-1]:\n#for time in [0]:\n# title(\"User Distribution at time \"+ str(time) + \"s\")\n# # print usrDistributionNSteps[time][np.nonzero(usrDistributionNSteps[time])]\n# imshow(usrDistributionNSteps[time].T, cmap='hot', interpolation='nearest', origin='lower')\n# xlabel(\"x\")\n# ylabel(\"y\")\n# show()\n#\n#\n## plotting user trajectory (static):\n#for ue in range(nUE):\n# title(\"Random Walk of UE \" + str(ue) +\"($n = \" + str(nStep) + \"$ steps)\")\n# ylim(xMax)\n# xlim(yMax)\n# xlabel(\"x\")\n# ylabel(\"y\")\n# plot(nStepX[ue],nStepY[ue])\n# show()\n#\n#\n## visualise UL DL sinrs\n## For best UL SINR\n#fig = figure()\n#sinrDistrbution = np.zeros((xMax - xMin, yMax - yMin))\n#for userId in range(nUE):\n# x = int(ueLocationAll[userId][0])\n# y = int(ueLocationAll[userId][1])\n# sinrDistrbution[y][x] = bestUlSinr[userId]\n#title(\"Best UL SINR\")\n#pos = imshow(sinrDistrbution, cmap='hot', interpolation='nearest')\n#fig.colorbar(pos)\n#show\n#savefig(\"BestULSINR\", dpi=None, facecolor='w', edgecolor='w',\n# orientation='portrait', papertype=None, format=None,\n# transparent=False, bbox_inches=None, pad_inches=0.1,\n# frameon=None)\n#\n## For best DL SINR\n#fig = figure()\n#sinrDistrbution = np.zeros((xMax - xMin, yMax - yMin))\n#for userId in range(nUE):\n# x = int(ueLocationAll[userId][0])\n# y = int(ueLocationAll[userId][1])\n# sinrDistrbution[y][x] = bestDlSinr[userId]\n#title(\"Best DL SINR\")\n#pos = imshow(sinrDistrbution, cmap='hot', interpolation='nearest')\n#fig.colorbar(pos)\n#show\n#savefig(\"BestDLSINR\", dpi=None, facecolor='w', edgecolor='w',\n# orientation='portrait', papertype=None, format=None,\n# transparent=False, bbox_inches=None, pad_inches=0.1,\n# frameon=None)\n#\n#\n## For individual BSs\n#sinrDistrbution = np.zeros((nBS, xMax - xMin, yMax - yMin))\n#\n#for bsId in range(nBS):\n# for userId in range(nUE):\n# x = int(ueLocationAll[userId][0])\n# y = int(ueLocationAll[userId][1])\n# # Issue with the ueLocationAll indexing when plot the heat map.\n# # checked with distance and SINR values all okay but when plotting the heatmap, x, y are inverted\n# # plotting fixed values are also okay..\n# # using sinrDistrbution[bsId][y][x] instead of sinrDistrbution[bsId][x][y] resolves the issue\n# # sinrDistrbution[bsId][x][y] = GetDistance(ueLocationAll[userId], bsLocationAll[bsId])\n# sinrDistrbution[bsId][y][x] = ulSinr[userId][bsId]#GetDistance(ueLocationAll[userId], bsLocationAll[bsId])\n#\n##plotting user distribution:\n#for bsId in range(nBS):\n# x = bsLocationAll[bsId][0]\n# y = bsLocationAll[bsId][1]\n# fig = figure()\n# ax = fig.add_subplot(111)\n# ax.annotate('BS', xy=(x,y), xytext=(x, y),\n# arrowprops=dict(facecolor='black', shrink=0.05))\n# for ueId in range(2):\n# ax.annotate('UE', xy=(ueLocationAll[ueId][0],ueLocationAll[ueId][1]), xytext=(ueLocationAll[ueId][0],ueLocationAll[ueId][1]),\n# arrowprops=dict(facecolor='white', shrink=0.05))\n# print \"UE\",ueId,\" (\",ueLocationAll[ueId] ,\")\", ulSinr[ueId][bsId]\n#\n# title(\"DL SINR Distribution from BS\"+ str(bsId) + \" (\" + str(x) + \", \" + str(y) + \")\")\n# imshow(sinrDistrbution[bsId], cmap='hot', interpolation='nearest')\n# xlabel(\"x [m]\")\n# ylabel(\"y [m]\")\n# show()\n\n\n\ndef draw_UE_HO(ue_loc, numGridX, numGridY, bsLoc, ue2watch, xbestSinr, xcurrSinr, xbestBS, xcurrBS, currentTime, dlRate, ulRate, size=(5, 5), color = []):\n fig = figure(1, figsize=size)\n fig.subplots_adjust(hspace=.4)\n# subplot(4, 1, 1)\n ueValGrid = np.zeros((3, numGridX, numGridY))\n if color.any():\n for usr in xrange(len(ue_loc)):\n ueValGrid[:, ue_loc[usr][0], ue_loc[usr][1]] = color[usr]\n else:\n # problem\n ueValGrid[:, ue_loc[0], ue_loc[1]] = 1\n \n for bsId in range(len(bsLoc)):\n x = bsLoc[bsId][0]\n y = bsLoc[bsId][1]\n strBS = \"BS \" + str(bsId)\n text(x, y, strBS, color='white')\n\n xlabel(\"x [m]\")\n ylabel(\"y [m]\")\n title(\"User distribution at time \" + str(currentTime))\n imshow(ueValGrid.T, interpolation='nearest', origin='lower')\n#\n# subplot(4,1,2)\n# grid = np.zeros((numGridX, numGridY))\n# grid[ue_loc[ue2watch][0], ue_loc[ue2watch][1]] = 1\n#\n# for bsId in range(len(bsLoc)):\n# x = bsLoc[bsId][0]\n# y = bsLoc[bsId][1]\n# strBS = \"BS \" + str(bsId)\n# text(x, y, strBS)\n#\n# xlabel(\"x [m]\")\n# ylabel(\"y [m]\")\n# title(\"UE\"+ str(ue2watch) + \" current location \")\n# imshow(grid.T, interpolation='nearest', origin='lower')\n#\n# # UE 2 watch SINR from current BS and best BS\n# subplot(4,1,3)\n# xlabel(\"Time [Steps]\")\n# ylabel(\"SINR [dB]\")\n# strLegendcur = \"current SINR (BS\" + str(xcurrBS) + \")\"\n# strLegendbes = \"best SINR (BS\" + str(xbestBS) + \")\"\n#\n# timeAxis = currentTime - np.array(range(len(xcurrSinr)))\n## print len(xcurrSinr)\n# plot(timeAxis, xcurrSinr, label = strLegendcur)\n# hold('on')\n# plot(timeAxis, xbestSinr, label = strLegendbes)\n#\n# legend(loc=2)\n# ylim(-100, 100)\n# title(\"SINR from best BS and current ass BS. UE\"+ str(ue2watch))\n#\n#\n# subplot(4,1,4)\n# xlabel(\"Time [Steps]\")\n# ylabel(\"Mean Rate [Mbps]\")\n# strLegendcur = \"UL\"\n# strLegendbes = \"DL\"\n#\n# timeAxis = currentTime - np.array(range(len(ulRate))) #here\n## print len(ulRate)\n# plot(timeAxis, ulRate, label = \"UL\")\n# hold('on')\n# plot(timeAxis, dlRate, label = \"DL\")\n#\n# legend(loc=2)\n# ylim(0, 1)\n# title(\"Mean rate of all UEs from serving BSs\")\n#\n#\n show()\n\n\ndef plot_sinr_distribution(sinr, x_from, x_to):\n hist(sinr, bins=50, fc='k', ec='k')\n \n xlabel(\"SINR\")\n ylabel(\"number of UEs\")\n xlim(x_from, x_to)\n# savefig(\"SINR_dist.pdf\")\n show()\n"
}
] | 8 |
jmarq/knucklestuff | https://github.com/jmarq/knucklestuff | 0a2b33d4383f4dcee86434f96984f7d3bc682740 | 1853ce7af6b7f747502a9f51d8089bef8c6a2f3f | 5be65a03c727dbfe8af6cd4674a7f458163d9a06 | refs/heads/master | 2021-01-10T09:54:11.495282 | 2017-03-13T19:32:16 | 2017-03-13T19:32:16 | 8,589,621 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4396551847457886,
"alphanum_fraction": 0.5129310488700867,
"avg_line_length": 45.400001525878906,
"blob_id": "431a9e52d4dc733b946db4d765b89bd85c5a3d08",
"content_id": "e577e84fbc775217436f7d30fe36af710d9a81b6",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 232,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 5,
"path": "/scramble.py",
"repo_name": "jmarq/knucklestuff",
"src_encoding": "UTF-8",
"text": "def scramble(s):\n \"\"\" returns a 2-tuple of strings resulting from the crossed fingers of a knuckle tattoo \"\"\"\n return (s[0]+s[4]+s[1]+s[5]+s[2]+s[6]+s[3]+s[7],\n s[4]+s[0]+s[5]+s[1]+s[6]+s[2]+s[7]+s[3]\n )\n"
},
{
"alpha_fraction": 0.5922203063964844,
"alphanum_fraction": 0.6009615659713745,
"avg_line_length": 31.225351333618164,
"blob_id": "b0c241be4661a65cd58861bb5f4ee1ca97fef180",
"content_id": "baa114fdf2d6b1cc458d216dd95278b967670227",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2288,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 71,
"path": "/loadwords.py",
"repo_name": "jmarq/knucklestuff",
"src_encoding": "UTF-8",
"text": "import timeit\nimport funcy as f\n\n\ndef load_word_set_by_length(length, as_set=True):\n filename = \"wordlists/scrabble_words_%d.txt\" % length\n infile = open(filename, 'r')\n content = infile.read()\n infile.close()\n content = content.strip().lower()\n word_list = content.split(\"\\n\")\n if as_set:\n word_list = set(word_list)\n return word_list\n\n\ndef load_all_words():\n word_list = []\n for i in range(1, 9):\n filename = \"wordlists/scrabble_words_%d.txt\" % i\n infile = open(filename, 'r')\n content = infile.read()\n infile.close()\n content = content.strip().lower()\n word_list = f.merge(word_list, content.split(\"\\n\"))\n return word_list\n\n\ndef load_prefix_lists():\n all_words = load_all_words()\n prefix_lists = {}\n for i in range(1, 8): # only need prefixes up to 7, right?\n # ignore words that are the length of the prefix or less\n long_enough_words = f.select(lambda d: len(d) > i, all_words)\n # grab the first i letters\n prefix_lists[i] = set(f.walk(lambda d: d[0:i], long_enough_words))\n # this line is probably extraneous at this point, right? \n # prefix_lists[i]= f.select(lambda d: len(d)==i, prefix_lists[i])\n return prefix_lists\n # {'a': ['a','and','android'] ...}\n# trying something different\n# but what?\n# some sort of dictionary of word beginnings\n# ['apple','ant','cat','corn','car']\n# what are we trying to do? know if any words start with a string.\n# that way if we don't check for 3 letter words that start with xqu, then 4 letter words, etc.\n# { 'a': {'p':{'p':{'l':{'e':'$'}}} 'n':{'t':'$'}} ...} something like that?\n\n\ndef test_perf(num=100):\n n = num\n l7 = load_word_set_by_length(7, as_set=False)\n s7 = load_word_set_by_length(7)\n\n def check_list():\n return \"wishful\" in l7\n \n def check_set():\n return \"wishful\" in s7\n l_time = float(timeit.timeit(check_list, number=n))\n s_time = float(timeit.timeit(check_set, number=n))\n ratio = l_time/s_time\n\n print \"list takes %f times more than the set, using %d repetitions\" % (ratio, n)\n print \"list: %f \" % l_time\n print \"set: %f\" % s_time\n\n\nif __name__ == \"__main__\":\n # test_perf(num=100)\n print f.walk(len, load_prefix_lists().values())\n"
},
{
"alpha_fraction": 0.6274362802505493,
"alphanum_fraction": 0.6334332823753357,
"avg_line_length": 26.224489212036133,
"blob_id": "4e669a599d4e35e94c5f499124413fd4b68efed0",
"content_id": "734a8ecf6ec0357f6cb2c3f70dc0b0224dde693a",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1334,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 49,
"path": "/flask_app/app.py",
"repo_name": "jmarq/knucklestuff",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom flask import request, render_template\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'\napp.config['TEMPLATES_AUTO_RELOAD'] = True\n\ndb = SQLAlchemy(app)\n\n# max number of results to return to the user (sending all ~2 million is too much)\nresult_limit = 1000\n\n\n# the ORM model used to store the knuckle tattoos\nclass Tattoo(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n base = db.Column(db.String(8))\n scrambled = db.Column(db.String(8))\n\n def __init__(self, base, scrambled):\n self.base = base\n self.scrambled = scrambled\n\n def __repr__(self):\n return \"%s -> %s\" % (self.base, self.scrambled)\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef homepage():\n # process the user's query if there is one\n q = request.args.get(\"q\", None)\n if q:\n like_arg = \"%%%s%%\" % q\n tats = Tattoo.query.filter((Tattoo.base.like(like_arg)) | (Tattoo.scrambled.like(like_arg))).limit(result_limit)\n else:\n tats = Tattoo.query.limit(result_limit).all()\n\n # the data passed to the jinja2 template\n template_dict = {\n \"query\": q,\n \"tats\": tats\n }\n\n return render_template('home.html', data=template_dict)\n\nif __name__ == '__main__':\n app.run()\n"
},
{
"alpha_fraction": 0.7693208456039429,
"alphanum_fraction": 0.7810304164886475,
"avg_line_length": 59.85714340209961,
"blob_id": "e561e266e890294b82518cdb60a7ca3278eaca2d",
"content_id": "0cbec236b0f082343628cc5da4a44c2c7bcad7b2",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 854,
"license_type": "no_license",
"max_line_length": 237,
"num_lines": 14,
"path": "/flask_app/README.md",
"repo_name": "jmarq/knucklestuff",
"src_encoding": "UTF-8",
"text": "# a flask web application for serving the knuckle tattoos\n*this application has additional dependencies, listed in flask_app_requirements.txt*\n\n## uses sqlalchemy and sqlite to store/query the tats\nusers can search for tattoos/scrambles that contain words that interest them\n\n## Usage:\n\n1. make sure you have the dependencies from flask_app_requirements.txt installed\n2. make sure you have a text file from which to read the tattoo results (the default is scrabble_results.txt in the parent directory of this flask app). If you don't, run the scramble finder code (see the readme in the parent directory)\n3. run create_db.py\n4. run populate_db.py (this takes a while and is resource intensive)\n5. run app.py, or deploy it with gunicorn or some other wsgi server of your choice. \n6. visit the app in your web browser (http://localhost:5000 by default)\n\n\n"
},
{
"alpha_fraction": 0.7628865838050842,
"alphanum_fraction": 0.7628865838050842,
"avg_line_length": 23.25,
"blob_id": "1771662ef1ac4c759cb20c4939e04a2493e13366",
"content_id": "6a9ce3f9ff05eee363861e1dbacfd7eca287b7c9",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 4,
"path": "/flask_app/create_db.py",
"repo_name": "jmarq/knucklestuff",
"src_encoding": "UTF-8",
"text": "from app import db\n\n# create the knuckle tattoo table (and any potential others)\ndb.create_all()\n"
},
{
"alpha_fraction": 0.5776892304420471,
"alphanum_fraction": 0.5879339575767517,
"avg_line_length": 35.60416793823242,
"blob_id": "6218ad3dd13d80b12c6da707f746834f4bb664e7",
"content_id": "2190a7eac673b030d920fac6a74678690bf9c570",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1757,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 48,
"path": "/scramble_finder.py",
"repo_name": "jmarq/knucklestuff",
"src_encoding": "UTF-8",
"text": "# this file is used to actually loop through starting tattoos, scramble them,\n# and then check to see if the scrambles are composed of real words\nfrom checkwords import WordsChecker\nfrom loadwords import load_word_set_by_length\nfrom collections import deque\nimport scramble\n\n# maybe make this a Class that can store the cache as self.cache\n\n\nclass ScrambleFinder(WordsChecker):\n def __init__(self, limit=False):\n super(ScrambleFinder, self).__init__()\n self.input_tats = []\n word_sets = []\n # load words grouped by length, to create pairs that add up to 8 for the tattoo\n for i in range(0, 8):\n word_sets.append(load_word_set_by_length(i+1))\n for i in range(0, 4):\n for word in word_sets[i]:\n for word2 in word_sets[6-i]:\n w1 = word+word2\n self.input_tats.append(w1)\n if i != 3:\n w2 = word2+word\n self.input_tats.append(w2)\n for word in word_sets[7]:\n # add the 8 letter words\n self.input_tats.append(word)\n # apply limit if exists\n if limit:\n self.input_tats = self.input_tats[0:limit]\n # get rid of duplicates\n self.input_tats = deque(set(self.input_tats))\n\n def check_scrambles(self):\n while self.input_tats:\n self.check_scramble(self.input_tats.pop())\n\n def check_scramble(self, tat):\n scrambles = scramble.scramble(tat)\n for scrambled_tat in scrambles:\n if self.is_words(scrambled_tat):\n print \"%s -> %s\" % (tat, scrambled_tat)\n\nif __name__ == \"__main__\":\n scramble_finder = ScrambleFinder()\n scramble_finder.check_scrambles()\n"
},
{
"alpha_fraction": 0.7654061913490295,
"alphanum_fraction": 0.767507016658783,
"avg_line_length": 70.30000305175781,
"blob_id": "bfc05cd44d8f38282e1fbf2013933796e2cccbd6",
"content_id": "ebf918f47fe42ffe901da8ea5ca76d79312cc3f9",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1428,
"license_type": "no_license",
"max_line_length": 275,
"num_lines": 20,
"path": "/README.md",
"repo_name": "jmarq/knucklestuff",
"src_encoding": "UTF-8",
"text": "# Knuckle Tattoos with Crossed Fingers\n*which word combinations scramble to make other valid word combinations?*\n\nscramble? like if you got a knuckle tattoo and then interweave your fingers\n\n*wouldn't it be cool if your knuckle tattoo said something else when you interweave your fingers?*\n\nright now it checks 8-letter 2-word combos. Can be used to check other types of 8-letter input strings\n\n## !!! WARNING !!!!\nthis program deals with a LOT of data (there are millions of combinations of words to try), and is probably not as optimized as it could/should be. a cache it creates can use up several GB of memory. running this program might overwhelm your machine. use at your own risk.\n\nthe amount of processing the program does can lead to running times of several minutes on a fairly powerful machine. \n\n## usage\n- see WARNING above\n- install pip requirements \"pip install -r requirements.txt\"\n- if desired, edit the word lists in wordlists/, adding words that are meaningful to you, deleting words that aren't. \n- run the scramble finder \"python scramble_finder.py\" *this just prints the result to the console. if you want to save to a file, you can redirect the output to a file: \"python scramble_finder.py >> results.txt\"*\n- if you want, once you have a txt file of results you can use the code in /flask_app to populate a database and serve/query the results via a simple web app. *see the readme in /flask_app*\n\n\n"
},
{
"alpha_fraction": 0.6564774513244629,
"alphanum_fraction": 0.6564774513244629,
"avg_line_length": 21.899999618530273,
"blob_id": "f08a360554faada8011031ce13a66a0dc1f09260",
"content_id": "010c9dbdf58078e8ad73ac9a6819a4fa75011a60",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 687,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 30,
"path": "/tests.py",
"repo_name": "jmarq/knucklestuff",
"src_encoding": "UTF-8",
"text": "from checkwords import WordsChecker \n# import timeit\nimport unittest\n\n\nclass TestWordCheck(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.checker = WordsChecker()\n\n def setUp(self):\n self.checker.cache = {}\n\n def testWoodshed(self):\n self.assertEquals(self.checker.is_word(\"woodshed\"), True)\n\n def testdog(self):\n self.assertEquals(self.checker.is_word(\"dog\"), True)\n\n def testxxxyz(self):\n self.assertEquals(self.checker.is_word(\"xxxyz\"), False)\n\n # test multiword match\n def testboomboom(self):\n self.assertEquals(self.checker.is_words(\"boomboom\"), True)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
},
{
"alpha_fraction": 0.7173100709915161,
"alphanum_fraction": 0.7198007702827454,
"avg_line_length": 32.45833206176758,
"blob_id": "deac8ea037bad859ce942f3ee893a64324958bca",
"content_id": "0e90151add20e9240a3c1986a7e24be1d82f8bb8",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 803,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 24,
"path": "/flask_app/populate_db.py",
"repo_name": "jmarq/knucklestuff",
"src_encoding": "UTF-8",
"text": "# reads a text file, parses it for knuckle tattoos and inserts them into the database \n# this takes a while and is resource intensive. probably because it is done in one session. \n# perhaps it could be broken apart to use less memory\n\nfrom app import db, Tattoo\n\nsource_filename = \"../scrabble_results.txt\"\n\n# split the file into a list of individual lines\nsource_file = open(source_filename, 'r')\nsource_content = source_file.read().strip().split(\"\\n\")\n\n\n# create a tattoo model object for each line using this function\ndef process_line(line):\n split_line = line.split(\"->\")\n split_line = [half.strip() for half in split_line]\n new_tat = Tattoo(split_line[0], split_line[1])\n db.session.add(new_tat)\n\nfor source_line in source_content:\n process_line(source_line)\n\ndb.session.commit()\n"
},
{
"alpha_fraction": 0.6284916400909424,
"alphanum_fraction": 0.6340782046318054,
"avg_line_length": 24.571428298950195,
"blob_id": "ef35d23923f514a6343a9a37ad0c523a32720eeb",
"content_id": "438c24c84e9a74dd98498f1a8d3fc9cdd1d9e36c",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 358,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 14,
"path": "/wordlists/process_scrabble_txt.py",
"repo_name": "jmarq/knucklestuff",
"src_encoding": "UTF-8",
"text": "import funcy\n\nfi = open(\"scrabble_dict.txt\", 'r')\ncontents = fi.read().lower().strip().split(\"\\n\")\nfi.close()\n\ngrouped_by_length = funcy.group_by(len, contents)\n\nfor i in range(1, 9):\n outfilename = \"scrabble_words_%d.txt\" % i\n outfile = open(outfilename, 'w')\n for word in grouped_by_length[i]:\n outfile.write(word+\"\\n\")\n outfile.close()\n"
},
{
"alpha_fraction": 0.7268518805503845,
"alphanum_fraction": 0.75,
"avg_line_length": 20.600000381469727,
"blob_id": "89e4cb384979ca8d2caebc761e863585a829710e",
"content_id": "4d1f1b5364d30bf234daa43e3252dfd19a204efd",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 432,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 20,
"path": "/timed.py",
"repo_name": "jmarq/knucklestuff",
"src_encoding": "UTF-8",
"text": "from timeit import timeit\nfrom scramble_finder import ScrambleFinder\n\n\ndef init_scramblefinder():\n return ScrambleFinder(limit=1000)\n\n\ndef run_scramblefinder():\n scramble_finder = ScrambleFinder(limit=10000)\n scramble_finder.check_scrambles()\n\n\ndef ptime(func, number=1):\n print \"took %f seconds for %d iterations\" % (float(timeit(func, number=number)), number)\n\n\n# ptime(init_scramblefinder)\n\nptime(run_scramblefinder)\n"
},
{
"alpha_fraction": 0.5585106611251831,
"alphanum_fraction": 0.5628626942634583,
"avg_line_length": 35.92856979370117,
"blob_id": "143c98481adb86b0fcabb81c5935fd3a35d9ff52",
"content_id": "b64c7843d12cef232045bad1695249c0fb0bafa5",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2068,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 56,
"path": "/checkwords.py",
"repo_name": "jmarq/knucklestuff",
"src_encoding": "UTF-8",
"text": "# check words using prefix lists in algorithm somehow?\n\nfrom loadwords import load_word_set_by_length, load_prefix_lists, load_all_words\n\n\nclass WordsChecker(object):\n def __init__(self):\n super(WordsChecker, self).__init__()\n self.word_sets = []\n for i in range(0, 8):\n self.word_sets.append(load_word_set_by_length(i+1))\n self.prefix_lists = load_prefix_lists()\n self.all_words = set(load_all_words())\n self.cache = {}\n\n def is_word(self, s):\n # eventually you should more precisely time the difference between these two. it is close, but which is better?\n # use one big list rather than 8 lists\n # and get rid of length checks, word_sets lookup, subtraction, etc.\n # if s:\n # return s in self.all_words\n # else:\n # return False\n return s in self.all_words\n \n # need to make the use of a cache optional, in case machine doesn't have enough memory.\n # how best to do this, without adding much extra checking to the method itself,\n # which will be called millions of times?\n # maybe another method entirely, and the decision on which one to use could be made once upfront,\n # either in init method here or in the code that actually calls .is_words(s)\n def is_words(self, s):\n cached = self.cache.get(s, -1)\n if cached != -1:\n return cached\n i = 1\n len_s = len(s)\n while i <= len_s:\n currently_checking = s[0:i]\n if self.is_word(currently_checking):\n if len_s == i:\n self.cache[s] = True\n return True\n elif self.is_words(s[i:]):\n self.cache[s] = True\n return True\n if i in self.prefix_lists and currently_checking in self.prefix_lists[i]:\n i += 1\n else:\n self.cache[s] = False\n return False\n self.cache[s] = False\n return False\n\n\nif __name__ == \"__main__\":\n pass\n"
},
{
"alpha_fraction": 0.38461539149284363,
"alphanum_fraction": 0.6153846383094788,
"avg_line_length": 12,
"blob_id": "0dc53624d747fb6169be22c6f97cca2184e7c090",
"content_id": "d519e7e328c6b4ce9fb9dc46d0ed323d88634385",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 13,
"license_type": "no_license",
"max_line_length": 12,
"num_lines": 1,
"path": "/requirements.txt",
"repo_name": "jmarq/knucklestuff",
"src_encoding": "UTF-8",
"text": "funcy==1.7.3\n"
}
] | 13 |
Ananileaf/AGE | https://github.com/Ananileaf/AGE | f1463a5d13a32043cf5eb212749af948b433ed6c | 01ffec15f4e4e9c06284a30869b8f8cd582daa6b | 71282728fa61ea0ddf7ac12a7e30d59a1ffbff43 | refs/heads/master | 2020-09-02T00:08:26.435229 | 2019-11-02T10:01:13 | 2019-11-02T10:01:13 | 219,092,397 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.46666666865348816,
"alphanum_fraction": 0.5388888716697693,
"avg_line_length": 35,
"blob_id": "552ac61d350839ab558ec2a4274db8b04dcddedd",
"content_id": "0b1f0092cee52ce1c0c0c9d1d312f45334563cf1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 424,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 10,
"path": "/datasets_config.py",
"repo_name": "Ananileaf/AGE",
"src_encoding": "UTF-8",
"text": "datasets_config = {\n\t\t\t\t\t\t'dataset_path':'/home/zuowenhang/datasets/AGE',#相对路径可能报错\n\t\t\t\t\t\t'img_size':998,\n \t\t\t\t\t'rotate_angle':15, #[-rotate_angle,rotate_angle]\n \t\t\t\t\t'num_workers':2, #\n \t\t\t\t\t'batch_size':4, #\n \t\t\t\t\t'SNR':0.95, #椒盐噪声,[0.0,1.0] 1不触发\n \t\t\t\t\t'GaussianBlur_sigma':1.0, #高斯噪声,0不触发\n \t\t\t\t\t'Noisy_prob':0.5, #[0,0.5)触发椒盐,[0.5,1]触发高斯\n}\n"
},
{
"alpha_fraction": 0.7368420958518982,
"alphanum_fraction": 0.7368420958518982,
"avg_line_length": 8.5,
"blob_id": "d639b29e13dd5cdc580019ed6d6ad31c8c22b29d",
"content_id": "ebcaed7a9be79b6a96fe738bfdb295406c63a880",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 35,
"license_type": "no_license",
"max_line_length": 11,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Ananileaf/AGE",
"src_encoding": "UTF-8",
"text": "# -AGE\n医疗影像课程AGE代码\n"
},
{
"alpha_fraction": 0.5550873875617981,
"alphanum_fraction": 0.5802122354507446,
"avg_line_length": 25.462810516357422,
"blob_id": "b311697246b23d66d3f5d2a71c6bc6e3a01c6164",
"content_id": "0a02f01b1c5241a05ec90584363244266bf989e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6430,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 242,
"path": "/dataloader.py",
"repo_name": "Ananileaf/AGE",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport torch\nimport torchvision\nimport pandas as pd\nimport os\nimport cv2\nfrom matplotlib import pyplot as plt\nfrom datasets_config import datasets_config\nimport imgaug as ia\nfrom imgaug import augmenters as iaa\nfrom imgaug import parameters as iap\nfrom torchvision import transforms\nfrom PIL import Image\nimport numpy as np\n#########loader = get_dataloader()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[2]:\n\n\ndef get_img(file_name,dataset_path):\n file_dir = os.path.join(dataset_path,\"Training100\",\"ASOCT_Image\",file_name)\n if os.path.exists(file_dir) == False:\n print(\"not exist :\",file_dir)\n img = cv2.imread(file_dir)\n \n return img\n\n\n# In[3]:\n\n\ndef show_label(img,point):\n point = (int(point[0]),int(point[1]))\n point_size = 15\n point_color = (255, 0, 0) # BGR\n thickness = 8 # 可以为 0 、4、8\n cv2.circle(img, point, point_size, point_color, thickness)\n \n plt.imshow(img)\n plt.show()\n\n\n# In[ ]:\n\n\n\n\n\n# In[4]:\n\n\nclass Augmentation(object):\n def __init__(self,datasets_config):\n super(Augmentation,self).__init__()\n self.rotate_angle = datasets_config['rotate_angle']\n self.GaussianBlur_sigma = datasets_config['GaussianBlur_sigma']\n self.scale_size = (datasets_config['scale_size'],datasets_config['scale_size'])\n \n def __call__(self, sample):\n image, point= sample['image'], sample['point']\n keypoints=ia.KeypointsOnImage([\n ia.Keypoint(x=int(point[0]), y=int(point[1]))], \n shape=image.shape)\n \n seq=iaa.Sequential([\n iaa.Affine(\n rotate=(-self.rotate_angle,self.rotate_angle)),\n iaa.Resize(self.scale_size,\n interpolation='cubic'),\n #iaa.GaussianBlur(\n # sigma=iap.Uniform(0.0, self.GaussianBlur_sigma))\n \n ])\n # augmentation choices\n seq_det = seq.to_deterministic()\n\n image_aug = seq_det.augment_images([image])[0]\n keypoints = seq_det.augment_keypoints([keypoints])[0]\n return {'image': image_aug, 'point':(keypoints.keypoints[0].x,keypoints.keypoints[0].y)}\n \ndef get_transform():\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n \n \n \n ])\n return transform\ndef addNoisy(img,Noisy_prob,SNR,sigma):\n prob = np.random.choice(range(0, 100))/100.0\n #print(\"prob:\",prob)\n if prob < Noisy_prob:\n return spNoisy(img, SNR)\n else:\n return GaussieNoisy(img,sigma)\n \ndef spNoisy(img, SNR):\n img_ = img.copy()\n c, h, w = img_.shape\n mask = np.random.choice((0, 1, 2), size=(c, h, w), p=[SNR, (1 - SNR) / 2., (1 - SNR) / 2.])\n img_[mask == 1] = 255 # 盐噪声\n img_[mask == 2] = 0 # 椒噪声\n return img_\ndef GaussieNoisy(image,sigma):\n row,col,ch= image.shape\n mean = 0\n gauss = np.random.normal(mean,sigma,(row,col,ch))\n gauss = gauss.reshape(row,col,ch)\n noisy = image + gauss\n return noisy.astype(np.uint8)\n\n\n# In[5]:\n\n\nclass AGE_DataSet(torch.utils.data.Dataset):\n def __init__(self,Training100_Location,datasets_config):\n super(AGE_DataSet,self).__init__()\n self.dataset_path = datasets_config['dataset_path']\n self.img_size = datasets_config['img_size']\n self.SNR = datasets_config['SNR']\n self.Noisy_prob = datasets_config['Noisy_prob']\n self.GaussianBlur_sigma = datasets_config['GaussianBlur_sigma']\n self.ASOCT_Name = Training100_Location['ASOCT_Name']\n self.Left_Label = Training100_Location['Left_Label']\n self.Right_Label = Training100_Location['Right_Label']\n self.X1 = Training100_Location['X1']\n self.Y1 = Training100_Location['Y1']\n self.X2 = Training100_Location['X2']\n self.Y2 = Training100_Location['Y2']\n self.len = len(Training100_Location)\n self.img_shape = get_img(Training100_Location['ASOCT_Name'][0],self.dataset_path).shape\n self.Augmentation = Augmentation(datasets_config)\n self.transform = get_transform()\n \n \n def resize_img(self,img,point,is_left):\n if is_left:\n img = img[0:self.img_size,0:self.img_size,:]\n else :\n shape = img.shape\n img = img[0:self.img_size,img.shape[1]-self.img_size-1:-1,:]\n point = (point[0] - (shape[1] - self.img_size),point[1])\n return img,point\n \n def __getitem__(self,index):\n idx = (index// 2)%self.len\n img = get_img(self.ASOCT_Name[idx],self.dataset_path)\n label = self.Left_Label[idx] if index % 2 == 0 else self.Left_Label[idx]\n point = (self.X1[idx],self.Y1[idx]) if index % 2 == 0 else (self.X2[idx],self.Y2[idx])\n img,point = self.resize_img(img,point,index%2 == 0)\n \n # use self.transform for input images\n sample = self.Augmentation({'image':img,'point':point})\n img,point = sample['image'],sample['point']\n img = addNoisy(img,self.Noisy_prob,self.SNR,self.GaussianBlur_sigma)\n img = self.transform(img)\n #return {'image':img,'point':point,'label':label}\n return (img,point,label)\n def __len__(self):\n return self.len*2\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[6]:\n\n\ndef get_dataloader():\n dataset_path = datasets_config['dataset_path']\n Training100_Location_dir = os.path.join(dataset_path,'Training100','Training100_Location.xlsx')\n Training100_Location = pd.read_excel(Training100_Location_dir)\n num_workers = datasets_config['num_workers']\n batch_size = datasets_config['batch_size']\n age_dataset = AGE_DataSet(Training100_Location,datasets_config)\n dataloader=torch.utils.data.DataLoader(age_dataset, \n batch_size=batch_size, \n shuffle=True,\n num_workers=num_workers)\n img,point,_ = age_dataset[0]\n print(img.shape)\n #show_label(img,point)\n return dataloader\n\n\n# In[7]:\n\n\n#dataloader = get_dataloader()\n#for i,(img,point,label) in enumerate(dataloader):\n# print(i)\n \n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n"
}
] | 3 |
claudds/comp472-nlp | https://github.com/claudds/comp472-nlp | 875b46a93092953565431aad4fa48c079661fc00 | 0e1eb9a7798226ed2ddc0f3dd15dbb48fd0ec0eb | d387379e7736c6e85ba9c740c5306e8a4b953442 | refs/heads/master | 2020-04-08T05:26:14.175521 | 2018-12-01T18:09:16 | 2018-12-01T18:09:16 | 159,060,447 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.597111701965332,
"alphanum_fraction": 0.613910973072052,
"avg_line_length": 39.153846740722656,
"blob_id": "24053d2e0fe6efdbdd882d9b99355442905802fc",
"content_id": "1b865b8d6faf625ba34dcd7888340c7200b02fac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6786,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 169,
"path": "/SentenceClassifier.py",
"repo_name": "claudds/comp472-nlp",
"src_encoding": "UTF-8",
"text": "from collections import Counter\nfrom math import log10\nimport re\n\ndef readFileTrain(filename):\n with open(filename, 'r') as file:\n #read in the whole file\n data = file.read()\n data = re.sub(r'[^a-z ]','',data)\n return data.lower()\n\ndef readFileTest(filename):\n testSentences = []\n with open(filename, 'r') as file:\n #read in the whole file\n testSentences = [line for line in file.read().split('\\n')]\n return testSentences\n\n\ndef unigramTrain(text, characters, outputFile, smoothing):\n text = re.sub(' ','',text)\n probabilities = {x: '' for x in characters}\n letterCounts = Counter(text)\n numberOfChars = len(text)\n\n if smoothing != 0:\n numberOfChars += (smoothing*numberOfChars)\n\n for letter in probabilities:\n probabilities[letter] = (letterCounts[letter]+smoothing)/numberOfChars\n\n with open(outputFile, 'w') as file:\n for letter in probabilities:\n file.write(\"P(\" + letter + \") = \" + str(probabilities[letter]) + '\\n')\n \n return probabilities\n\ndef bigramTrain(text, characters, outputFile, smoothing):\n pairs = {}\n letters = {}\n for c1 in characters:\n letters[c1]=0\n pairs[c1]={}\n for c2 in characters:\n pairs[c1][c2]=0\n \n for i in range(0, len(text)-1):\n if(text[i]==' ' or text[i+1]==' '):\n continue\n pairs[text[i]][text[i+1]]+=1\n letters[text[i]]+=1\n probabilities = {}\n\n for p1 in pairs.keys():\n probabilities[p1]={}\n for p2 in pairs[p1].keys():\n probabilities[p1][p2] = (pairs[p1][p2]+smoothing)/(smoothing*len(letters)+letters[p1])\n \n with open(outputFile, 'w') as file:\n for p1 in probabilities.keys():\n for p2 in probabilities[p1].keys():\n file.write(\"P(\" + p2 + \"|\" + p1 + \") = \" + str(probabilities[p1][p2]) + '\\n')\n \n return probabilities\n\n## Probability of each language should be 2/6 since there's 2 texts for each\ndef unigramTest(frModel, enModel, itModel, testString, filename):\n probEn = (2/6)\n probFr = (2/6)\n probIt = (2/6)\n\n testDict = {}\n testDict[\"English\"] = log10(probEn)\n testDict[\"French\"] = log10(probFr)\n testDict[\"Italian\"] = log10(probIt)\n\n with open(filename, 'w') as file:\n file.write(testString + \"\\n\\nUNIGRAM MODEL:\\n\")\n testString = testString.lower()\n print(testString)\n\n testString = re.sub(r'[^a-z ]','',testString)\n\n with open(filename, 'a') as file:\n for char in testString:\n if char.isalpha():\n file.write(\"\\nUnigram: \" + char + \"\\n\")\n testDict[\"English\"] += log10(enModel[char])\n testDict[\"French\"] += log10(frModel[char])\n testDict[\"Italian\"] += log10(itModel[char])\n\n file.write(\"English: P(\" + char + \") =\" + str(log10(enModel[char])) + \" ==> log prob of sentence so far: \" + str(testDict[\"English\"]) + \"\\n\")\n file.write(\"French: P(\" + char + \") =\" + str(log10(frModel[char])) + \" ==> log prob of sentence so far: \" + str(testDict[\"French\"])+ \"\\n\")\n file.write(\"Italian: P(\" + char + \") =\" + str(log10(itModel[char])) + \" ==> log prob of sentence so far: \" + str(testDict[\"Italian\"])+ \"\\n\")\n\n maxProb = max(testDict, key=testDict.get)\n with open(filename, 'a') as file:\n file.write(\"\\nAccording to the unigram model, the sentence is in \" + maxProb)\n return maxProb\n\ndef bigramTest(frModel, enModel, itModel, testString, filename):\n probEn = (2/6)\n probFr = (2/6)\n probIt = (2/6)\n\n testDict = {}\n testDict[\"English\"] = log10(probEn)\n testDict[\"French\"] = log10(probFr)\n testDict[\"Italian\"] = log10(probIt)\n \n testString = testString.lower()\n testString = re.sub(r'[^a-z ]','',testString)\n\n with open(filename, 'a') as file:\n file.write(\"\\n---------------- \\nBIGRAM MODEL:\\n\")\n for i in range(0, len(testString)-1):\n if(testString[i]==' ' or testString[i+1]==' '):\n continue\n enProb = log10(enModel[testString[i]][testString[i+1]])\n frProb = log10(frModel[testString[i]][testString[i+1]])\n itProb = log10(itModel[testString[i]][testString[i+1]])\n\n file.write(\"\\nBigram: \" + testString[i]+testString[i+1] + \"\\n\")\n testDict[\"English\"] += enProb\n testDict[\"French\"] += frProb\n testDict[\"Italian\"] += itProb\n\n file.write(\"English: P(\" + testString[i+1] + \"|\" + testString[i] + \") =\" + str(enProb) + \" ==> log prob of sentence so far: \" + str(testDict[\"English\"]) + \"\\n\")\n file.write(\"French: P(\" + testString[i+1] + \"|\" + testString[i] + \") =\" + str(frProb) + \" ==> log prob of sentence so far: \" + str(testDict[\"French\"])+ \"\\n\")\n file.write(\"Italian: P(\" + testString[i+1] + \"|\" + testString[i] + \") =\" + str(itProb) + \" ==> log prob of sentence so far: \" + str(testDict[\"Italian\"])+ \"\\n\")\n \n maxProb = max(testDict, key=testDict.get)\n with open(filename, 'a') as file:\n file.write(\"\\nAccording to the unigram model, the sentence is in \" + maxProb)\n return maxProb\n\ncharacters = list(re.sub(' ','',readFileTrain(\"train/character-set.txt\")))\ntestSentences = readFileTest(\"test/test-sentences.txt\")\n\n## English training\ntextE1 = readFileTrain(\"train/en-moby-dick.txt\")\ntextE2 = readFileTrain(\"train/en-the-little-prince.txt\")\ntrainingText = textE1 + \" \" + textE2\nenUnigramModel = unigramTrain(trainingText, characters, \"models/unigramEN.txt\", 0.5)\nenBigramModel = bigramTrain(trainingText, characters, \"models/bigramEN.txt\", 0.5)\n\n## French Training\ntextF1 = readFileTrain(\"train/fr-le-petit-prince.txt\")\ntextF2 = readFileTrain(\"train/fr-vingt-mille-lieues-sous-les-mers.txt\")\ntrainingText = textF1 + \" \" + textF2\nfrUnigramModel = unigramTrain(trainingText, characters, \"models/unigramFR.txt\", 0.5)\nfrBigramModel = bigramTrain(trainingText, characters, \"models/bigramFR.txt\", 0.5)\n\n\n## Italian Training\ntextI1 = readFileTrain(\"train/it-il-trono-di-spade.txt\")\ntextI2 = readFileTrain(\"train/it-la-divina-commedia.txt\")\ntrainingText = textI1 + \" \" + textI2\nitUnigramModel = unigramTrain(trainingText, characters, \"models/unigramOT.txt\", 0.5)\nitBigramModel = bigramTrain(trainingText, characters, \"models/bigramOT.txt\", 0.5)\n\ncounter = 1\nfor sentence in testSentences:\n filename = \"output/out\" + str(counter) + \".txt\" \n unigramResult = unigramTest(frUnigramModel, enUnigramModel, itUnigramModel, sentence, filename)\n bigramResult = bigramTest(frBigramModel, enBigramModel, itBigramModel, sentence, filename)\n print(\"According to the unigram model, the sentence is in \" + unigramResult)\n print(\"According to the bigram model, the sentence is in \" + bigramResult+'\\n')\n counter += 1\n"
}
] | 1 |
atsb/Py2do | https://github.com/atsb/Py2do | 051c8e97f18551064755aa4fbf91b056754a0187 | 12d9e1a85e969aa8bece7c34a5e1246649556be0 | c1407b78b52e3480c2936bec50855a72ab39eb57 | refs/heads/master | 2016-08-12T03:04:02.793097 | 2015-10-26T23:29:04 | 2015-10-26T23:29:04 | 45,004,109 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7757575511932373,
"alphanum_fraction": 0.7757575511932373,
"avg_line_length": 40.25,
"blob_id": "eec3f2faff60c21d16550bd17bd0915e2ef37753",
"content_id": "557571e602ede5de753f7a4790a9441aac8184e3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 165,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 4,
"path": "/README.md",
"repo_name": "atsb/Py2do",
"src_encoding": "UTF-8",
"text": "# PyDo\nA small todo list using prettytable and sqlite.\n\nOn first use, create a folder called 'database' and run the program, follow the menu to create the database.\n"
},
{
"alpha_fraction": 0.5779637098312378,
"alphanum_fraction": 0.5821478366851807,
"avg_line_length": 33.14285659790039,
"blob_id": "1e771ae8a12ab297f8055b18df7f2ba9faefd75b",
"content_id": "5e837fa4c5176e1113c404e9313de70aef1d71d8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3585,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 105,
"path": "/dodo.py",
"repo_name": "atsb/Py2do",
"src_encoding": "UTF-8",
"text": "import sqlite3\nimport os\nimport sys\nfrom prettytable import PrettyTable\n\n\ndef clear():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef exit_program():\n sys.exit(0)\n\n\ndef list_tasks():\n clear()\n dodo_sql_connect = sqlite3.connect('./database/dodo.db')\n with dodo_sql_connect:\n dodo_sql_cursor = dodo_sql_connect.cursor()\n dodo_sql_cursor.execute('''SELECT * FROM dodo WHERE STATUS!='1';''')\n dodo_column_names = [cn[0] for cn in dodo_sql_cursor.description]\n dodo_sql_table_data = dodo_sql_cursor.fetchall()\n prettytable = PrettyTable(dodo_column_names)\n prettytable.align[dodo_column_names[1]] = \"l\"\n prettytable.align[dodo_column_names[2]] = \"r\"\n prettytable.padding_width = 1\n for dodo in dodo_sql_table_data:\n prettytable.add_row(dodo)\n print(prettytable)\n raw_input(\"Press any key to continue.. \")\n clear()\n\n\ndef add_task():\n clear()\n dodo_sql_connect = sqlite3.connect('./database/dodo.db')\n dodo_enter_id = raw_input(\"Please choose an ID: \")\n dodo_enter_task_id = raw_input(\"Please choose a Task Name: \")\n dodo_enter_project_id = raw_input(\"Which Project does this task belong to: \")\n dodo_enter_status_id = raw_input(\"Is this task ongoing or completed? (0 for open and 1 for completed): \")\n with dodo_sql_connect:\n dodo_sql_cursor = dodo_sql_connect.cursor()\n dodo_sql_cursor.execute('''INSERT INTO dodo (ID, TASK, PROJECT, STATUS) values (?,?,?,?);''',\n (dodo_enter_id, dodo_enter_task_id, dodo_enter_project_id, dodo_enter_status_id))\n dodo_sql_connect.commit()\n clear()\n\n\ndef complete_task():\n clear()\n dodo_enter_id = raw_input(\"Enter the Task ID which you want to complete: \")\n dodo_sql_connect = sqlite3.connect('./database/dodo.db')\n with dodo_sql_connect:\n dodo_sql_cursor = dodo_sql_connect.cursor()\n dodo_sql_cursor.execute('''UPDATE dodo SET STATUS='1' where ID=?;''', dodo_enter_id)\n dodo_sql_connect.commit()\n clear()\n\n\ndef main():\n clear()\n print(\"+----------------------------+\")\n print(\"| Daily Todo Manager (dodo) |\")\n print(\"+----------------------------+\")\n dodo_menu_pt = PrettyTable([\"Key\", \"Description\"])\n dodo_menu_pt.align[\"Key\"] = \"c\"\n dodo_menu_pt.align[\"Description\"] = \"l\"\n dodo_menu_pt.padding_width = 1\n dodo_menu_pt.add_row([\"a\", \"Add a New Task\"])\n dodo_menu_pt.add_row([\"c\", \"Complete a Task\"])\n dodo_menu_pt.add_row([\"l\", \"List all Open Tasks\"])\n dodo_menu_pt.add_row([\"init\", \"Initialise Database\"])\n dodo_menu_pt.add_row([\"q\", \"Quit the Program\"])\n print(dodo_menu_pt)\n dodo_main_menu = raw_input(\"Option: \")\n if dodo_main_menu == 'a':\n add_task()\n main()\n if dodo_main_menu == 'l':\n list_tasks()\n main()\n if dodo_main_menu == 'c':\n complete_task()\n # subprocess.call([\"./completetask.sh\"])\n main()\n if dodo_main_menu == 'q':\n return\n exit_program()\n if dodo_main_menu == 'init':\n dodo_sql_connect = sqlite3.connect('./database/dodo.db')\n with dodo_sql_connect:\n dodo_sql_cursor = dodo_sql_connect.cursor()\n dodo_sql_cursor.execute(\n '''CREATE TABLE dodo (ID integer primary key, TASK text, PROJECT text, STATUS text);'''\n )\n dodo_sql_connect.commit()\n print(\"Database File Created\")\n main()\n else:\n print(\"Invalid Selection\")\n main()\n\n\nif __name__ == '__main__':\n main()\n"
}
] | 2 |
karry3775/Estimation | https://github.com/karry3775/Estimation | 9cf5450a1da24570a12052f41139b15f9000be0b | 05beb05e79ba1e4eb08e3d2959779fbbbefcb24b | b80f80dcfd30fd6f77c546bb5f537a367cf7f3ba | refs/heads/master | 2020-07-05T23:10:09.381542 | 2019-08-18T18:14:17 | 2019-08-18T18:14:17 | 202,812,153 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6544217467308044,
"alphanum_fraction": 0.6857143044471741,
"avg_line_length": 28.399999618530273,
"blob_id": "e3b20c89250edd21a78993096267b9d1b4686b28",
"content_id": "e0eba92ea494fef9ec731608a7b40a24dd3c8e31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 735,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 25,
"path": "/least_squares_estimation.py",
"repo_name": "karry3775/Estimation",
"src_encoding": "UTF-8",
"text": "########\n# BASIC CODE FOR ESTIMATION OF MEASUREMENTS USING LEAST SQUARES\n########\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndata = np.array([[1000,1200,998,800,400]]).T\nfor i in range(len(data)):\n plt.plot(i,data[i],'rx',label=\"MEASURED VALUE\")\n\nH = np.array([[1,1,1,1,1]]).T\nk = np.matmul(H.T,H)\np = np.linalg.inv(k)\n# print(np.matmul(np.array(1/(int(np.matmul(H.T,H)))),H.T))\nestimate = np.matmul(np.matmul(p,H.T),data)\nplt.plot(i,estimate,'cx',label=\"ESTIMATED VALUE\")\n\"\"\"\nTO PREVENT REPEATED LABELS FOR SAME LABEL TAG\n\"\"\"\nhandles, labels = plt.gca().get_legend_handles_labels()\nby_label = OrderedDict(zip(labels, handles))\nplt.legend(by_label.values(), by_label.keys())\n\nplt.show()\n"
},
{
"alpha_fraction": 0.5316863656044006,
"alphanum_fraction": 0.5628356337547302,
"avg_line_length": 18.39583396911621,
"blob_id": "7f13bd6ce3206052abb2eba88747584fffc2b991",
"content_id": "e2217aa0b2e75a6dd2615f0b115e2ab46fa2a656",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 931,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 48,
"path": "/Kalman_Filter.py",
"repo_name": "karry3775/Estimation",
"src_encoding": "UTF-8",
"text": "#############\n# SIMPLE IMPLEMENTATION OF KALMAN FILTER\n#############\nfrom __future__ import division\nimport numpy as np\n\n# define the initial information\ndt = 0.5\n#motion model\nF = np.array([[1, dt],\n [0, 1]])\n#measurement matrix\nG = np.array([[0, dt]]).T\n#Initial Uncertainty in state\nP = np.array([[0.01, 0],\n [0, 1]])\n# Motion noise\nQ = np.array([[0.1, 0],\n [0, 0.1]])\n#Measurement noise\nR = np.array([[0.05]])\n#initial state\nx = np.array([[0, 5]]).T\n\n#measurement model H\nH = np.array([[1,0]])\n\n#control signal\nu = -2\n\n#measurement\ny = 2.2\n#PREDICTION\nx = F.dot(x) + G.dot(u)\nP = F.dot(P).dot(F.T) + Q\nprint(\"P: {}\".format(P))\n\n#CALCULATION OF OPTIMAL GAIN\nK = P.dot(H.T).dot(np.linalg.inv(H.dot(P).dot(H.T) + R))\nprint(\"K: {}\".format(K))\n\n#CORRECTION STEP\nx = x + K.dot(y - H.dot(x))\nP = (np.eye(2) - K.dot(H)).dot(P)\n\n#printing values\nprint(\"x: {}\".format(x))\nprint(\"P: {}\".format(P))\n"
}
] | 2 |
bamthebot/bang_central | https://github.com/bamthebot/bang_central | 0dccf11b7a280e7f0598e6586735cf12581e68b7 | 8ff57b8e524c14ec677dc4e2688f9589073ef5ff | 04b97c6a33f378de850adb038851f31f8e658965 | refs/heads/master | 2020-05-22T16:04:53.331708 | 2019-10-18T11:48:15 | 2019-10-18T11:48:15 | 186,421,511 | 0 | 0 | null | 2019-05-13T13:06:57 | 2019-10-03T12:14:34 | 2019-10-18T11:48:16 | Python | [
{
"alpha_fraction": 0.6897546648979187,
"alphanum_fraction": 0.6897546648979187,
"avg_line_length": 37.5,
"blob_id": "b649dffcee14eaf0bb67a8014743a602a8d98909",
"content_id": "cafab5877e6a15420903d54feb5f55226454e483",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 693,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 18,
"path": "/bangs/urls.py",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom . import views, api_views\n\n# Register api urls\nrouter = DefaultRouter()\nrouter.register(r'twitch_users', api_views.TwitchUserViewSet, basename=\"twitch_user\")\nrouter.register(r'bangs', api_views.BangViewSet, basename=\"bangs\")\n\nurlpatterns = [\n path('bot/login/', views.login_view, name='login'),\n path('bot/bangs/', views.bangs, name='bangs'),\n path('bot/blasts/', views.blasts, name='blasts'),\n path('bot/prefix/', views.prefix, name='prefix'),\n path('bot/api/', include((router.urls, 'bangs-api'))),\n path('home/', views.home, name='home'),\n path('', views.home, name='home'),\n]\n"
},
{
"alpha_fraction": 0.6920052170753479,
"alphanum_fraction": 0.693315863609314,
"avg_line_length": 29.520000457763672,
"blob_id": "b04852debc2a3f70e3895c47bf5b4413e9229ea4",
"content_id": "ff753406cadc36356c88dcc9c2ff4f8dd255ed52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1526,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 50,
"path": "/bangs/forms.py",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.models import User\nfrom django.forms import inlineformset_factory, ModelForm\nfrom .models import TwitchUser, Bang, Blast\n\n\nBangInlineFormset = inlineformset_factory(\n User, Bang, fields=(\"command\", \"response\"), extra=1\n)\n\n\nBlastInlineFormset = inlineformset_factory(\n TwitchUser, Blast, fields=(\"name\", \"value\"), extra=1\n)\n\n\nclass CommandPrefixForm(ModelForm):\n class Meta:\n model = TwitchUser\n fields = [\"command_character\"]\n\n\ndef set_form_styles(form):\n for field in form:\n if field.name == \"DELETE\":\n field.field.widget.attrs.update({\"class\": \"form-check-input\"})\n continue\n field.field.widget.attrs.update({'class': 'form-control'})\n\n\ndef set_formset_styles(formset):\n for form in formset:\n set_form_styles(form)\n\n\ndef get_user_formsets(request, user, twitch_user):\n bang_formset = BangInlineFormset(instance=user)\n blast_formset = BlastInlineFormset(instance=twitch_user)\n set_formset_styles(bang_formset)\n set_formset_styles(blast_formset)\n return {\"bang_formset\": bang_formset, \"blast_formset\": blast_formset}\n\n\ndef post_user_formsets(request, user, formset_type=\"bang\"):\n twitch_user = TwitchUser.objects.get(user=user)\n if formset_type == \"bang\":\n formset = BangInlineFormset(request.POST, request.FILES, instance=user)\n elif formset_type == \"blast\":\n formset = BlastInlineFormset(request.POST, request.FILES, instance=twitch_user)\n if formset.is_valid():\n formset.save()\n"
},
{
"alpha_fraction": 0.594472348690033,
"alphanum_fraction": 0.5982412099838257,
"avg_line_length": 38.790000915527344,
"blob_id": "038103d13d00073f0d23001c62a4bfa1ec8d6912",
"content_id": "3ba2dbec1eaad19a92f4770a3a31849af614264c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3980,
"license_type": "no_license",
"max_line_length": 280,
"num_lines": 100,
"path": "/bangs/utils/twitchbot/super_commands.py",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "import requests\nimport json\nimport database_utils\n\n\ndef _get_ids(game, category, variable=False, value=False):\n game_url = 'https://www.speedrun.com/api/v1/games/{}'.format(game)\n game_req = requests.get(game_url)\n game_data = json.loads(game_req.text)['data']\n game_id = game_data['id']\n\n categories_uri, variables_uri =\\\n [x['uri']\n for x in list(filter(lambda d: d['rel'] == 'variables' or d['rel'] == 'categories', game_data['links']))]\n\n categories_req = requests.get(str(categories_uri))\n categories_data = json.loads(categories_req.text)['data']\n category_data = list(filter(lambda d: category == d['name'], categories_data))[0]\n category_id = category_data['id']\n\n if variable:\n variables_req = requests.get(str(variables_uri))\n variables_data = json.loads(variables_req.text)['data']\n variable_data = list(filter(lambda d: variable == d['name'], variables_data))[0]\n variable_id = variable_data['id']\n if value:\n values_data = variable_data['values']['values']\n value_id = [k for k,v in values_data.items() if v['label'] == value][0]\n return game_id,category_id, variable_id,value_id\n return game_id,category_id, variable_id\n return game_id,category_id\n\n\ndef get_top_str(game, category, variable=False, value=False):\n url = 'https://www.speedrun.com/api/v1/leaderboards'\n if value and variable:\n game_id, category_id, variable_id, value_id = _get_ids(game, category, variable, value)\n leaderboard_url = '{}/{}/category/{}?var-{}={}'.format(url, game_id, category_id, variable_id, value_id)\n else:\n print(game, category)\n game_id, category_id = _get_ids(game, category)\n print(game_id, category_id)\n # Fix for % in categories\n if '%' in category and category.strip('%').isdigit():\n category_id = category.strip('%')\n leaderboard_url = '{}/{}/category/{}'.format(url, game_id, category_id)\n\n req = requests.get(leaderboard_url)\n runs_data = json.loads(req.text)['data']['runs']\n runs = [run for run in runs_data if int(run['place'])<=5]\n\n ret = ''\n for run in runs:\n run_place = int(run['place'])\n run_time = run['run']['times']['primary'][2:].lower()\n\n run_user_uri = run['run']['players'][0]['uri']\n req = requests.get(run_user_uri)\n user_name = json.loads(req.text)['data']['names']['international']\n ret += '{}) {}: {} '.format(run_place, user_name, run_time)\n return ret\n\n\ndef existing_super_commands():\n return ['lb', 'commands', 'mute']\n\n\ndef existing_commands(id):\n db = '../../../db.sqlite3'\n user_commands_pairs = database_utils.get_commands(db, id)\n user_commands = [command for command,response in user_commands_pairs]\n return user_commands + existing_super_commands()\n\n\ndef super_command(command, id):\n command_root = command.split(\" \")[0]\n if command_root.strip() == 'lb':\n if len(command[4:].split('/')) < 2:\n return 'Please add enough parameters. You have to call lb this way: lb game/category(/variable-name/variable-value). (Examples: lb botw/Any% , lb botw/Any%/Amiibo/No Amiibo). Every game/category/variable name has to be the same as the one speedrun.com uses. Have fun!'\n print('lb called!')\n params = command[3:].split('/')\n print(params)\n try:\n if len(params) == 4:\n g, c, v, va = params\n return get_top_str(g,c,v,va)\n else:\n g, c = params[:2]\n return get_top_str(g,c)\n except KeyError:\n return \"Couldn't find any run like that. Try another formatting.\"\n elif command_root.strip() == 'commands':\n all_c = existing_commands(id)\n print(type(all_c))\n return all_c\n\n\nif __name__=='__main__':\n print(_get_ids('botw', 'Any%', 'Amiibo', 'No Amiibo'))\n print(get_top_str('botw', 'Any%', 'Amiibo'))\n\n"
},
{
"alpha_fraction": 0.7031700015068054,
"alphanum_fraction": 0.7031700015068054,
"avg_line_length": 27.91666603088379,
"blob_id": "bdc0b0f2910fd7edfcce62b6ff39e47938926571",
"content_id": "9873ed45a393c553c6fae29e6a281f4964b7fcca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 350,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 12,
"path": "/README.md",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "# 🔵 Bang Central\n[![Build Status](https://travis-ci.org/bamthebot/bang_central.svg?branch=master)](https://travis-ci.org/bamthebot/bang_central)\n\n> Where all your bangs will be stored.\n\nThis is the project that will hold all the information regarding to twitch and how the bots should behave.\n\n## TODO:\n- [ ] Tests\n- [ ] CSS\n- [ ] CI/CD\n- [ ] JWT\n"
},
{
"alpha_fraction": 0.5508196949958801,
"alphanum_fraction": 0.5792349576950073,
"avg_line_length": 31.678571701049805,
"blob_id": "edb120ba1d09bcdda3764ad04e80fbea033fafc4",
"content_id": "830e3d1096af8926e6173fdc9ebd6c175540d34c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 915,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 28,
"path": "/bangs/migrations/0002_auto_20191003_1150.py",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-10-03 11:50\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bangs', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='twitchuser',\n name='command_character',\n field=models.CharField(default='!', max_length=1),\n ),\n migrations.CreateModel(\n name='Blast',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=500)),\n ('value', models.CharField(max_length=500)),\n ('twitch_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blasts', to='bangs.TwitchUser')),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.518610417842865,
"alphanum_fraction": 0.578163743019104,
"avg_line_length": 22.705883026123047,
"blob_id": "9d4e2795f6f4b0b216de536072d0c7db1507bf22",
"content_id": "f56d23c076cd2e3ba775449a9d6c39eb1b357b41",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "YAML",
"length_bytes": 403,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 17,
"path": "/docker-compose.yaml",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "version: '3.3'\nservices:\n web:\n restart: always\n build: .\n volumes:\n - .:/home/bam/bangcentral\n - /static:/home/bam/bangcentral/static/\n - ./config/nginx:/etc/nginx/conf.d\n environment:\n - CLIENT_ID\n - CLIENT_SECRET\n - ALLOWED_HOST='*'\n - REDIRECT_URI='http://127.0.0.1:8000/bot/login'\n ports:\n - \"8000\"\n command: './manage.py runserver 0.0.0.0:8000'\n"
},
{
"alpha_fraction": 0.7591397762298584,
"alphanum_fraction": 0.7612903118133545,
"avg_line_length": 45.5,
"blob_id": "9dde103ada08cd1b9deeeda1e12ab7a7f6496896",
"content_id": "2d9520a5153f2e186e7234493827e8f95be1b0b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 465,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 10,
"path": "/scripts/setup_production_host.sh",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nPRODUCTION_HOST=$1\nBANG_CENTRAL_PATH=\"/home/ubuntu/bang_central/\"\ncd ..\necho \"Setting up repo at $PRODUCTION_HOST\"\nssh ubuntu@$PRODUCTION_HOST \"sudo rm -rf $BANG_CENTRAL_PATH\"\nssh ubuntu@$PRODUCTION_HOST \"git clone [email protected]:bamthebot/bang_central.git $BANG_CENTRAL_PATH\"\necho \"Initializing server at $PRODUCTION_HOST\"\nscp .env ubuntu@$PRODUCTION_HOST:/home/ubuntu/bang_central/.env\nssh ubuntu@$PRODUCTION_HOST \"cd $BANG_CENTRAL_PATH && ./init.sh\"\n"
},
{
"alpha_fraction": 0.770588219165802,
"alphanum_fraction": 0.770588219165802,
"avg_line_length": 33,
"blob_id": "1fee8f07299e2f86d2f92229f3de8388453fc328",
"content_id": "00d1b24f395152c14ac11b9a1b2f6b0b00fa9c9d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 170,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 5,
"path": "/init.sh",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nsource .env\ndocker-compose -f docker-compose.prod.yaml kill\ndocker-compose -f docker-compose.prod.yaml rm -f\ndocker-compose -f docker-compose.prod.yaml up -d\n"
},
{
"alpha_fraction": 0.8070175647735596,
"alphanum_fraction": 0.8070175647735596,
"avg_line_length": 27.5,
"blob_id": "12c20765cc425130d2ccc5c43a241c4128066ee3",
"content_id": "d5bb7c6ddf6411a59672aa568c01f748f979bfcc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 57,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 2,
"path": "/bangs/README.md",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "# burritobot\nA twitch chatbot with speedrunners in mind.\n"
},
{
"alpha_fraction": 0.8571428656578064,
"alphanum_fraction": 0.8730158805847168,
"avg_line_length": 11.600000381469727,
"blob_id": "9579da1206c652608a01de1f0fdf89a6fb2e1ffc",
"content_id": "68e563f3ef9f75038c81421eb4c4fe4717564b6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 63,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 5,
"path": "/requirements.txt",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "requests\ndjango\ndjango-rest-framework\ngunicorn\npsycopg2-binary\n"
},
{
"alpha_fraction": 0.7444933652877808,
"alphanum_fraction": 0.7488986849784851,
"avg_line_length": 19.636363983154297,
"blob_id": "2af544e568c86ffe551a531d1e8e8adebfd20fd5",
"content_id": "a30142bfc6399e5f6ca0bbb3bb3450b3791612bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 227,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 11,
"path": "/scripts/deploy.sh",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nTAG=$1\n\n# Build\ncd ..\ndocker build . -t bang_central\n\n# Push\ndocker login -u $DOCKERHUB_USER -p $DOCKERHUB_PASSWORD \ndocker tag bang_central twitchbambot/bang_central:$TAG\ndocker push twitchbambot/bang_central:$TAG\n"
},
{
"alpha_fraction": 0.7478532195091248,
"alphanum_fraction": 0.7478532195091248,
"avg_line_length": 31.024999618530273,
"blob_id": "7937d5fcc78700858a403bc4167e608198418c77",
"content_id": "0eb1051b703997f8f497037e5630806dda9890cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1281,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 40,
"path": "/bangs/api_views.py",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "from rest_framework.authentication import SessionAuthentication, TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework import viewsets\nfrom rest_framework import serializers\n\nfrom .models import TwitchUser, Bang, Blast\n\n\nclass BlastSerializer(serializers.ModelSerializer):\n class Meta:\n model = Blast\n fields = (\"name\", \"value\")\n\n\nclass TwitchUserSerializer(serializers.ModelSerializer):\n blasts = BlastSerializer(many=True)\n\n class Meta:\n model = TwitchUser\n fields = ('twitch_id', 'twitch_name', 'email', 'access_token', 'user', 'command_character', 'blasts')\n\n\nclass TwitchUserViewSet(viewsets.ReadOnlyModelViewSet):\n authentication_classes = (SessionAuthentication, TokenAuthentication)\n permission_classes = (IsAuthenticated,)\n serializer_class = TwitchUserSerializer\n queryset = TwitchUser.objects.all()\n\n\nclass BangSerializer(serializers.ModelSerializer):\n class Meta:\n model = Bang\n fields = ('command', 'response', 'user')\n\n\nclass BangViewSet(viewsets.ReadOnlyModelViewSet):\n authentication_classes = (SessionAuthentication, TokenAuthentication)\n permission_classes = (IsAuthenticated,)\n serializer_class = BangSerializer\n queryset = Bang.objects.all()\n"
},
{
"alpha_fraction": 0.559116005897522,
"alphanum_fraction": 0.5644198656082153,
"avg_line_length": 36.03278732299805,
"blob_id": "92e32a52c73d3fb10fd438ea9e9bc96776fe632d",
"content_id": "99ffcaeb3ec13adc1133ce04f47d4b1b38503b2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4525,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 122,
"path": "/bangs/utils/twitchbot/bot.py",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "import websockets\nimport asyncio\nimport database_utils\nimport super_commands\nimport traceback\n\nglobal NICK\nglobal COMMAND_PREFIX\nglobal db_location\nglobal RUNNING\n\nNICK = 'burritosr'\nCOMMAND_PREFIX = '!'\ndb_location = '../../../db.sqlite3'\nTOKEN = database_utils.get_access_token(db_location, database_utils.get_id_from_channel(db_location, NICK))\nRUNNING = []\n\n\nasync def twitch_bot(token, channel):\n try:\n async with websockets.connect('wss://irc-ws.chat.twitch.tv:443', ssl=True) as websocket:\n print('NEW SOCKET')\n print('PASS oauth:{}'.format(token.strip()))\n print('NICK {}'.format(NICK))\n await websocket.send('PASS oauth:{}'.format(token.strip()))\n await websocket.send('NICK {}'.format(NICK))\n await websocket.send('JOIN #{}'.format(channel.lower()))\n global connection\n connection = True\n mute = False\n asyncio.sleep(0)\n while connection:\n asyncio.sleep(0)\n try:\n buffer = await asyncio.wait_for(websocket.recv(), timeout=30)\n print(buffer)\n lines = buffer.split('\\n')\n except asyncio.TimeoutError:\n print('TIMEOUT')\n lines = []\n print(lines, channel)\n for line in lines:\n print('new line: {}\\n'.format(channel))\n line = line.strip()\n await check_ping(websocket, line, channel)\n msg = await get_chat_message(line)\n if msg:\n print(mute)\n author = msg[1]\n msg = msg[0]\n print('message: ' + msg, 'author: ' + author, sep='\\n')\n if msg[:5] == '{}mute'.format(COMMAND_PREFIX):\n print('MUTED?')\n mute = not mute\n elif not mute:\n await check_commands(websocket, msg, channel, author)\n except Exception as e:\n if str(e).strip() == 'Event loop is closed':\n await print('Bot Killed')\n else:\n traceback_str = ''.join(traceback.format_tb(e.__traceback__))\n print('SOMETHING OCCURRED, BOT RESTARTING\\nError: {}\\nTraceback:\\n{}'.format(e, traceback_str))\n await twitch_bot(token, channel)\n\n\nasync def check_ping(websocket, line, channel):\n if line == 'PING :tmi.twitch.tv':\n print('PONGED')\n await websocket.send('PONG :tmi.twitch.tv')\n\n\nasync def get_chat_message(line):\n msg = line.split(':')\n if len(msg) >= 2 and 'PRIVMSG' in msg[1]:\n return msg[2:][0] , msg[1].split('!')[0]\n else:\n return False\n\n\nasync def check_commands(websocket, msg, channel, author):\n id = database_utils.get_id_from_channel(db_location, channel)\n if msg and msg[0] == COMMAND_PREFIX:\n response = database_utils.get_response(db_location, msg[1:], id)\n print(response)\n if response:\n if response[1:].split(\" \")[0] in super_commands.existing_super_commands():\n print('CHAINED')\n return await check_commands(websocket, response, channel, author)\n else:\n return await send_chat_msg(websocket, channel, response)\n elif msg[1:].split(\" \")[0] in super_commands.existing_super_commands():\n response = super_commands.super_command(msg[1:], id)\n if response:\n print(response)\n return await send_chat_msg(websocket, channel, response)\n\n\nasync def send_chat_msg(websocket, channel, response):\n await websocket.send('PRIVMSG #{} :{}'.format(channel.lower(), response))\n\n\nasync def _token_channel_pairs():\n user_ids = database_utils.get_user_list(db_location)\n print(user_ids)\n token_channel_pairs = [(database_utils.get_access_token(db_location, i), database_utils.get_channel_from_id(db_location, i)) for i in user_ids]\n return token_channel_pairs\n\n\nasync def main():\n token_channel_pairs = await _token_channel_pairs()\n bots = []\n print(token_channel_pairs)\n for token,channel in token_channel_pairs:\n bots.append(asyncio.ensure_future(twitch_bot(TOKEN,channel)))\n RUNNING.append(channel)\n await asyncio.gather(*bots)\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n loop.run_forever()\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6501524448394775,
"alphanum_fraction": 0.6730182766914368,
"avg_line_length": 29.511627197265625,
"blob_id": "f1de61cecd60d17f38d557f32196ce11b1601719",
"content_id": "1ca351a805b380d6cff3d2d65d3c1d992ec657e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1312,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 43,
"path": "/bangs/models.py",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass TwitchUser(models.Model):\n twitch_id = models.IntegerField()\n twitch_name = models.CharField(max_length=100)\n email = models.EmailField(max_length=500)\n\n access_token = models.CharField(max_length=200)\n refresh_token = models.CharField(max_length=200)\n expiration_date = models.DateField()\n scope = models.CharField(max_length=500)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n command_character = models.CharField(max_length=1, default=\"!\")\n\n def __str__(self):\n return self.twitch_name\n\n\nclass Bang(models.Model):\n command = models.CharField(max_length=500)\n response = models.CharField(max_length=500)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.command\n\n def clean(self):\n if \"!\" in self.command[0]:\n self.command = self.command[1:]\n\n\nclass Blast(models.Model):\n name = models.CharField(max_length=500)\n value = models.CharField(max_length=500)\n twitch_user = models.ForeignKey(TwitchUser, related_name=\"blasts\", on_delete=models.CASCADE)\n\n def __str__(self):\n return f\"{self.name} -> {self.value}\"\n\n def __repr__(self):\n return f\"{self.name} -> {self.value}\"\n"
},
{
"alpha_fraction": 0.6834361553192139,
"alphanum_fraction": 0.6910299062728882,
"avg_line_length": 35.32758712768555,
"blob_id": "7421fe14cbbce3284260512f5b60bcdad42ea740",
"content_id": "23f6afac35d0e44f7330a2cc520a8630872fc7cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2107,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 58,
"path": "/bangs/utils/twitchbot/database_utils.py",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "import sqlite3\n\n\ndef get_user_list(db_location):\n connection = sqlite3.connect(db_location)\n cursor = connection.cursor()\n users_qs = cursor.execute(\"SELECT user_id FROM burritobot_twitchuser\").fetchall()\n users = [q[0] for q in users_qs]\n return users\n\n\ndef get_response(db_location, command, id):\n \"\"\"\n :param db_location: location of database.\n :param command: command for which we want a response\n :return: False if command doesn't exist, response if it does exists\n \"\"\"\n connection = sqlite3.connect(db_location)\n cursor = connection.cursor()\n exists = cursor.execute(\"SELECT EXISTS(SELECT 1 FROM burritobot_command WHERE command==? AND user_id=?)\", (command, id))\n exists = exists.fetchone()[0]\n if exists:\n response = cursor.execute(\"SELECT response FROM burritobot_command WHERE command==? AND user_id=?\", (command, id))\n return response.fetchall()[0][0]\n else:\n return False\n\n\ndef get_commands(db_location, id):\n \"\"\"\n :param db_location: location of database.\n :return: returns list containing commands.\n \"\"\"\n connection = sqlite3.connect(db_location)\n cursor = connection.cursor()\n commands = cursor.execute(\"SELECT command,response FROM burritobot_command WHERE user_id=?\", (id, ))\n return [pair for pair in commands.fetchall()]\n\n\ndef get_access_token(db_location, id):\n connection = sqlite3.connect(db_location)\n cursor = connection.cursor()\n token = cursor.execute(\"SELECT (access_token) FROM burritobot_twitchuser WHERE user_id=? LIMIT 1\", (id, )).fetchone()[0]\n return token\n\n\ndef get_id_from_channel(db_location, channel):\n connection = sqlite3.connect(db_location)\n cursor = connection.cursor()\n id = cursor.execute(\"SELECT user_id FROM burritobot_twitchuser WHERE twitch_name=?\",(channel,)).fetchone()[0]\n return id\n\n\ndef get_channel_from_id(db_location, id):\n connection = sqlite3.connect(db_location)\n cursor = connection.cursor()\n channel = cursor.execute(\"SELECT twitch_name FROM burritobot_twitchuser WHERE user_id=?\",(id,)).fetchone()[0]\n return channel\n"
},
{
"alpha_fraction": 0.7167832255363464,
"alphanum_fraction": 0.7552447319030762,
"avg_line_length": 20.923076629638672,
"blob_id": "0c7c9ebb745001148267f6b094a22033cfd43d0b",
"content_id": "f48a8b48d92d1c04ceffa7690a304c223517530a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 286,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 13,
"path": "/Dockerfile",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "FROM python:3.7-slim-buster\n\nENV PYTHONUNBUFFERED 1\n\nRUN mkdir -p /home/bam/bangcentral/\nWORKDIR /home/bam/bangcentral\n\nCOPY . /home/bam/bangcentral\n\nRUN python -m venv venv && . venv/bin/activate\nRUN pip install -r requirements.txt\n\nCMD gunicorn --bind 0.0.0.0:8000 bang_central.wsgi \n"
},
{
"alpha_fraction": 0.6675862073898315,
"alphanum_fraction": 0.6717241406440735,
"avg_line_length": 35.20000076293945,
"blob_id": "a33ad7a49a5a1a24b918784a0ac59e39525513d1",
"content_id": "13aad062804e1009ce006c60ef9c392477125636",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 725,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 20,
"path": "/bangs/management/commands/refresh_token.py",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "from django.core.management.base import BaseCommand, CommandError\nfrom bangs.models import TwitchUser\nfrom bangs.utils.auth import refresh_token\n\n\ndef handle(self, *args, **kwargs):\n try:\n bot_user = TwitchUser.objects.get(email=\"[email protected]\")\n except TwitchUser.DoesNotExist:\n print(\"Bot user does not exist\")\n return\n refresh_data = refresh_token(bot_user.refresh_token)\n if 'status' in refresh_data.keys():\n if refresh_data['status'] == 400:\n print('Couldn\\'t refresh token.')\n return\n bot_user.access_token = refresh_data['access_token']\n bot_user.refresh_token = refresh_data['refresh_token']\n bot_user.save()\n print('Token refreshed succesfully!')\n\n"
},
{
"alpha_fraction": 0.6731737852096558,
"alphanum_fraction": 0.6756926774978638,
"avg_line_length": 32.787235260009766,
"blob_id": "b05951577e00d598bb97941edae2e36f7fd2eb58",
"content_id": "f0105bc03b9b36aa80abf10c6b205a19c66ac987",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1588,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 47,
"path": "/bangs/utils/auth.py",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "import os\nimport requests\nimport json\n\nCLIENT_ID = os.getenv(\"CLIENT_ID\")\nCLIENT_SECRET_ID = os.getenv(\"CLIENT_SECRET\")\nREDIRECT_URI = os.getenv(\"REDIRECT_URI\")\n\n\ndef authorize_request(scope):\n print(CLIENT_ID, CLIENT_SECRET_ID)\n request = \"https://id.twitch.tv/oauth2/authorize?response_type=code&client_id={}&redirect_uri={}&scope={}\".format(\n CLIENT_ID, REDIRECT_URI, scope\n )\n print(\"Authorization request\", request)\n return request\n\n\ndef token_request(code):\n url = \"https://id.twitch.tv/oauth2/token?client_id={}&client_secret={}&code={}&grant_type=authorization_code&redirect_uri={}\".format(\n CLIENT_ID, CLIENT_SECRET_ID, code, REDIRECT_URI\n )\n request = requests.post(url)\n response_dict = json.loads(request.text)\n print(\"Token request:\", url)\n print(\"Response:\", response_dict)\n return response_dict\n\n\ndef get_user_dict(token):\n url = \"https://api.twitch.tv/helix/users?scope=user:read:email\"\n headers = {\"Client-ID\": CLIENT_ID, \"Authorization\": \"Bearer {}\".format(token)}\n response = requests.get(url, headers=headers).json()[\"data\"][0]\n print(\"User request:\", url)\n print(\"Response:\", response)\n return response\n\n\ndef refresh_token(token):\n url = \"https://id.twitch.tv/oauth2/token?client_id={}&client_secret={}&grant_type=refresh_token&refresh_token={}\".format(\n CLIENT_ID, CLIENT_SECRET_ID, token\n )\n response = requests.post(url)\n print(\"Refresh token request:\", url)\n print(\"Response\", response.text)\n if response.status_code == requests.codes.ok:\n return response.json()\n"
},
{
"alpha_fraction": 0.6050664186477661,
"alphanum_fraction": 0.6050664186477661,
"avg_line_length": 34.674072265625,
"blob_id": "d01a97b34839337617443641aedc1f5dd7e95e1f",
"content_id": "e126e0d6348c6c615f87f8bb894da44c6f6ffcad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4816,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 135,
"path": "/bangs/views.py",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import login, authenticate\nfrom django.forms import inlineformset_factory\nfrom .models import TwitchUser, Bang\nfrom .forms import (\n BangInlineFormset,\n post_user_formsets,\n get_user_formsets,\n CommandPrefixForm,\n set_form_styles,\n)\nfrom .utils import auth\nimport datetime\n\n\ndef login_view(request):\n if request.user.is_authenticated:\n print(\"AUTHENTICATED\")\n return bangs(request)\n\n if \"code\" in request.GET:\n # Get info from request necessary to make token request\n code = request.GET[\"code\"]\n scope = request.GET[\"scope\"]\n\n # Call auth function to get new auth info\n auth_dict = auth.token_request(code)\n expiration_date = datetime.datetime.now() + datetime.timedelta(\n seconds=int(auth_dict[\"expires_in\"])\n )\n\n # Call auth function to get user info with the token provided by twitch\n user_dict = auth.get_user_dict(auth_dict[\"access_token\"])\n\n try:\n # If the user exists, update it's auth info.\n twitch_user = TwitchUser.objects.get(twitch_id=int(user_dict[\"id\"]))\n user = User.objects.get(username=twitch_user.twitch_id)\n twitch_user.access_token, twitch_user.refresh_token, twitch_user.expiration_date, twitch_user.scope = (\n auth_dict[\"access_token\"],\n auth_dict[\"refresh_token\"],\n expiration_date,\n auth_dict[\"scope\"],\n )\n user.set_password(twitch_user.access_token)\n user.save()\n twitch_user.save()\n\n # Authenticate\n user = authenticate(\n username=twitch_user.twitch_id, password=twitch_user.access_token\n )\n print(\n \"User {} UPDATED! New token: {}\".format(\n twitch_user.twitch_name, twitch_user.access_token\n )\n )\n except TwitchUser.DoesNotExist:\n # If user doesn't exist, create new one with parameters given by auth and user info.\n user = User.objects.create_user(\n username=user_dict[\"id\"],\n email=user_dict[\"email\"],\n password=auth_dict[\"access_token\"],\n )\n twitch_user = TwitchUser(\n twitch_id=int(user_dict[\"id\"]),\n twitch_name=user_dict[\"display_name\"],\n email=user_dict[\"email\"],\n access_token=auth_dict[\"access_token\"],\n refresh_token=auth_dict[\"refresh_token\"],\n expiration_date=expiration_date,\n scope=scope,\n user=user,\n )\n twitch_user.save()\n user = authenticate(\n username=user_dict[\"id\"], password=auth_dict[\"access_token\"]\n )\n print(\"User {} CREATED\".format(twitch_user.twitch_name))\n login(request, user)\n return bangs(request)\n\n # Log in User and render bangs\n if user is not None:\n login(request, user)\n print(\"User LOGED IN\")\n return bangs(request)\n\n else:\n # If this request doesn't have 'code', we request it and render the view again to further process 'code'\n get_request = auth.authorize_request(\n \"chat:read+chat:edit+openid+user:read:email\"\n )\n context = {\"get_request\": get_request}\n\n return render(request, \"bangs/login.html\", context)\n\n\n@login_required(login_url=\"/bot/login/\")\ndef bangs(request):\n user = request.user\n twitch_user = TwitchUser.objects.get(user=user)\n if request.method == \"POST\":\n post_user_formsets(request, user, formset_type=\"bang\")\n prefix_form = CommandPrefixForm(instance=twitch_user)\n set_form_styles(prefix_form)\n formsets = get_user_formsets(request, user, twitch_user)\n return render(\n request, \"bangs/bangs.html\", {\"prefix_form\": prefix_form, \"formsets\": formsets}\n )\n\n\n@login_required(login_url=\"/bot/login/\")\ndef blasts(request):\n user = request.user\n if request.method == \"POST\":\n post_user_formsets(request, user, formset_type=\"blast\")\n return redirect(\"bangs\")\n\n\n@login_required(login_url=\"/bot/login/\")\ndef prefix(request):\n user = request.user\n twitch_user = TwitchUser.objects.get(user=user)\n if request.method == \"POST\":\n CommandPrefixForm(request.POST, instance=twitch_user).save()\n return redirect(\"bangs\")\n\n\ndef home(request):\n get_request = auth.authorize_request(\"chat:read+chat:edit+openid+user:read:email\")\n context = {\"get_request\": get_request}\n return render(request, \"bangs/index.html\", context)\n"
},
{
"alpha_fraction": 0.5118833184242249,
"alphanum_fraction": 0.5154843330383301,
"avg_line_length": 47.15044403076172,
"blob_id": "cc0542d7d9d18e3cc1a5cac2898d615126ac48b2",
"content_id": "5a7689da471c622947998e33058e5c8d3b111295",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 5554,
"license_type": "no_license",
"max_line_length": 192,
"num_lines": 113,
"path": "/bangs/templates/bangs/bangs.html",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "{% extends 'bangs/base.html' %}\r\n\r\n{% block content %}\r\n<form action=\"{% url 'prefix' %}\" method=\"post\" class=\"form-inline\">\r\n <div class=\"row form-group graphics-container\">\r\n {% csrf_token %}\r\n {% for field in prefix_form %}\r\n <div class=\"col-auto\">\r\n <label for=\"{{ field.name }}\">Command Prefix </label>\r\n </div>\r\n <div class=\"col-auto\">\r\n {{ field }}\r\n </div>\r\n {% endfor %}\r\n <input type=\"submit\" value=\"Submit\" class=\"btn btn-info\">\r\n </div>\r\n</form>\r\n<form method=\"post\" id=\"bang_formset\" action=\"{% url 'bangs' %}\">\r\n <div class=\"row\">\r\n <div id=\"existing-bangs\" class=\"col-md-3 form-group-sm graphics-container\">\r\n <h1 id=\"saved-bangs\">Your Bangs</h1>\r\n {% if formsets.bang_formset.forms %}\r\n {% csrf_token %}\r\n {{ formsets.bang_formset.management_form }}\r\n {% for form in formsets.bang_formset %}\r\n {% for hidden_field in form.hidden_fields %}\r\n {{ hidden_field }}\r\n {% endfor %}\r\n\r\n {% for field in form.visible_fields %}\r\n <div class=\"form-group\">\r\n {% if field.name == 'DELETE' %}\r\n <div class=\"form-check\">\r\n {{ field }}\r\n <label class=\"form-check-label\" for=\"{{ field.id_for_label}}\">Delete</label>\r\n </div>\r\n {% else %}\r\n {{ field }}\r\n {% endif %}\r\n {% if field.help_text %}\r\n <small class=\"form-text text-muted\">{{ field.help_text }}</small>\r\n {% endif %}\r\n </div>\r\n {% endfor %}\r\n {% empty %}\r\n <p class=\"no_bangs\">There are no bangs saved... yet.</p>\r\n {% endfor %}\r\n {% endif %}\r\n <input type=\"submit\" value=\"Submit\" class=\"btn btn-info\"/>\r\n </div>\r\n <div class=\"col-md-7 filled-container\">\r\n <h3>What is a Bang?</h3> \r\n <p>A bang is a command/response pair that can be either defined by the user, or be a special-bang.</p>\r\n <p>You can define, edit and delete your bangs here on the left. </p>\r\n <h3>What are Special Bangs?</h3> \r\n <p>Those are bangs that do very special things! Some control the bot, others can get you useful information. There's even a bang to help you with bangs (\"!help\").</p>\r\n <p>Some special bangs belong to tiers. Tier 1 is the free tier and all bangs will belong to this tier for a while. You can check what bangs belong to tier 1 by using \"!tierone\"</p>\r\n <p>Other special bangs belong to no tier, and that's because their only purpose is to control the bot. Currently, these are \"!help\" and \"!mute\" and they will be free forever!</p>\r\n </div>\r\n </div>\r\n</form>\r\n</div>\r\n<div class=\"row\">\r\n <div id=\"existing-blasts\" class=\"col-md-3 form-group-sm graphics-container\">\r\n <form method=\"post\" id=\"blast_formset\" action=\"{% url 'blasts' %}\">\r\n <h1 id=\"saved-bangs\">Your Blasts</h1>\r\n {% if formsets.blast_formset.forms %}\r\n {% csrf_token %}\r\n {{ formsets.blast_formset.management_form }}\r\n {% for form in formsets.blast_formset %}\r\n {% for hidden_field in form.hidden_fields %}\r\n {{ hidden_field }}\r\n {% endfor %}\r\n\r\n {% for field in form.visible_fields %}\r\n <div class=\"form-group\">\r\n {% if field.name == 'DELETE' %}\r\n <div class=\"form-check\">\r\n {{ field }}\r\n <label class=\"form-check-label\" for=\"{{ field.id_for_label}}\">Delete</label>\r\n </div>\r\n {% else %}\r\n {{ field }}\r\n {% endif %}\r\n {% if field.help_text %}\r\n <small class=\"form-text text-muted\">{{ field.help_text }}</small>\r\n {% endif %}\r\n </div>\r\n {% endfor %}\r\n {% empty %}\r\n <p class=\"no_bangs\">There are no blasts saved... yet.</p>\r\n {% endfor %}\r\n {% endif %}\r\n <input type=\"submit\" value=\"Submit\" class=\"btn btn-info\"/>\r\n </form>\r\n </div>\r\n<div class=\"col-md-7 filled-container\">\r\n <h3>What is a Blast?</h3> \r\n <p>A blast is a variable name/value defined by the user.</p>\r\n <p>You can define, edit and delete your blasts here on the left. </p>\r\n <h3>Whats their purpose?</h3> \r\n <p>Blasts are here to make your bangs simpler. They let you define a variable you'll be able to use in other bangs AND blasts.</p>\r\n <p>\r\n Imagine I want to make my own \"!worldrecord\", \"!leaderboard\" and \"!personalbest\" chains for my main game/category: LEGO Harry Potter/Replay Story.\r\n I'd have to write \"lhp1-4/Replay Story\" on my version of all those bangs! And if I change my main game I'd have to update them all, but with blasts\r\n I just have to define my \"maingamecategory\" in one blast and then just use that blast in the other bangs. My bangs will end up looking like this:\r\n </p>\r\n <pre><kbd>\"wr\": \"!worldrecord $(maingamecategory)\"</kbd></pre>\r\n <pre><kbd>\"lb\": \"!leaderboard $(maingamecategory)\"</kbd></pre>\r\n <pre><kbd>\"pb\": \"!personalbest $(myname)/$(maingamecategory)\"</kbd></pre> (here \"myname\" is another blast I defined)\r\n </div>\r\n</div>\r\n{% endblock %}\r\n"
},
{
"alpha_fraction": 0.7974026203155518,
"alphanum_fraction": 0.7974026203155518,
"avg_line_length": 24.66666603088379,
"blob_id": "94dda18761a4cba680be3c5970115a889257c949",
"content_id": "e0a7fb2ef8293014430d3f25bb6c42f58596ec69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 385,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 15,
"path": "/bangs/admin.py",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.models import User\nfrom .models import Bang, TwitchUser, Blast\n\n\nclass MyUserAdmin(UserAdmin):\n list_display = ('email',)\n\n\nadmin.site.unregister(User)\nadmin.site.register(User, MyUserAdmin)\nadmin.site.register(Bang)\nadmin.site.register(TwitchUser)\nadmin.site.register(Blast)\n"
},
{
"alpha_fraction": 0.7411764860153198,
"alphanum_fraction": 0.7411764860153198,
"avg_line_length": 16,
"blob_id": "e2d79c29dac167546d97c58796fe1f4c6428916d",
"content_id": "14b3705d62bf8f61d0f34fe694cc3c5afeead593",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 85,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 5,
"path": "/bangs/apps.py",
"repo_name": "bamthebot/bang_central",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass BangsConfig(AppConfig):\n name = 'bangs'\n"
}
] | 22 |
nSakk/ISN | https://github.com/nSakk/ISN | b2e7a4940ff87d018eb5c51f889d87845b360fbb | ac18f0c7fe8d8ca0ec5e263b6b5afd31988db0a7 | 0f0bd53a734ecbc49832f1af7e4355c1de37dd7b | refs/heads/master | 2020-08-05T05:02:28.655752 | 2020-02-14T21:08:36 | 2020-02-14T21:08:36 | 212,405,518 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.550973653793335,
"alphanum_fraction": 0.617411196231842,
"avg_line_length": 17.5744686126709,
"blob_id": "1afcfc45c2b2b918355e50e308f36e3bc43e38b7",
"content_id": "241a1e487a691f42e94afc958fc3f4a5802cc824",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 873,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 47,
"path": "/TP/Giaufer_Ex8_tkinter.py",
"repo_name": "nSakk/ISN",
"src_encoding": "UTF-8",
"text": "from tkinter import *\n\nroot = Tk()\nroot.geometry(\"300x300\")\n\n\n# Fonctions\ndef quitter():\n root.quit()\n root.destroy()\n\n\ndef left(event):\n x1, y1, x2, y2 = draw.coords(ball)\n draw.coords(ball, x1 - 5, y1, x2 - 5, y2)\n\n\ndef right(event):\n x1, y1, x2, y2 = draw.coords(ball)\n draw.coords(ball, x1 + 5, y1, x2 + 5, y2)\n\n\ndef up(event):\n x1, y1, x2, y2 = draw.coords(ball)\n draw.coords(ball, x1, y1 - 5, x2, y2 - 5)\n\n\ndef down(event):\n x1, y1, x2, y2 = draw.coords(ball)\n draw.coords(ball, x1, y1 + 5, x2, y2 + 5)\n\n\n# Widgets\ndraw = Canvas(root)\nboutonQuitter = Button(root, text='Quitter', command=quitter)\n\n# Affichage des Widgets\ndraw.pack()\nboutonQuitter.pack()\n\n# Main\nball = draw.create_oval(100, 100, 150, 150, fill='red')\nroot.bind(\"<Left>\", left)\nroot.bind(\"<Right>\", right)\nroot.bind(\"<Up>\", up)\nroot.bind(\"<Down>\", down)\nroot.mainloop()\n"
},
{
"alpha_fraction": 0.5851128697395325,
"alphanum_fraction": 0.6375839114189148,
"avg_line_length": 25.88524627685547,
"blob_id": "3c8c6d6fe385a102e235fa3d68b047b6480128a4",
"content_id": "9d759c06e53eae9c7724d2a82f22b168a331d0e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1642,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 61,
"path": "/TP/Sakkriou_Ex7_tkinter.py",
"repo_name": "nSakk/ISN",
"src_encoding": "UTF-8",
"text": "from tkinter import *\n\nmaFenetre = Tk()\n\nx = 0\ny = 0\nl = 0\nc = 0\n\n\n# Fonctions\ndef grilleCreation():\n grille.delete(\"all\")\n for loop in range(5, 305, 20):\n grille.create_line([(loop, 5), (loop, 305)])\n\n for loop in range(5, 305, 20):\n grille.create_line([(5, loop), (305, loop)])\n\n grille.create_line([(5, 5), (5, 305)])\n grille.create_line([(305, 5), (305, 305)])\n grille.create_line([(5, 5), (305, 5)])\n grille.create_line([(5, 305), (305, 305)])\n\n\ndef coordonnes(event):\n global x, y\n x = event.x\n y = event.y\n result_labelx.config(text=f\"abscisse : {x}\")\n result_labely.config(text=f\"ordonnée : {y}\")\n ligne = int(y / 20) + 1\n column = int(x / 20) + 1\n result_colonne.config(text=f\"colonne : {column}\")\n result_ligne.config(text=f\"ligne : {ligne}\")\n\n grilleCreation()\n grille.create_line([(x - 5, y), (x + 5, y)])\n grille.create_line([(x, y - 5), (x, y + 5)])\n\n\n# Création des widgets\nresult_labelx = Label(maFenetre, text=f\"abscisse : {x}\")\nresult_labely = Label(maFenetre, text=f\"ordonnée : {x}\")\nresult_ligne = Label(maFenetre, text=f\"ligne : {l}\")\nresult_colonne = Label(maFenetre, text=f\"colonne : {c}\")\nboutonGrid = Button(maFenetre, text=\"Tracer la grille\", command=grilleCreation)\n\ngrille = Canvas(maFenetre, width=310, height=310)\n\n# Affichage des widgets\nboutonGrid.grid(row=0, column=1)\nresult_labelx.grid(row=1, column=1)\nresult_labely.grid(row=2, column=1)\nresult_ligne.grid(row=3, column=1)\nresult_colonne.grid(row=4, column=1)\ngrille.grid(row=0, column=0, rowspan=5)\n\n# Boucle principale\nmaFenetre.bind(\"<Button 1>\", coordonnes)\nmaFenetre.mainloop()"
},
{
"alpha_fraction": 0.5339806079864502,
"alphanum_fraction": 0.6582524180412292,
"avg_line_length": 32.46666717529297,
"blob_id": "19e2068038550195795ca7a8ed6371fd28b84aba",
"content_id": "a85bddc025b9f7596a778e376ae6e1087b48c505",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 515,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 15,
"path": "/projet_fin_annee/resultat_foot.py",
"repo_name": "nSakk/ISN",
"src_encoding": "UTF-8",
"text": "import requests\r\n\r\nbase = requests.get('https://apiv2.apifootball.com/?action=get_events&from=2020-02-7&to=2020-02-14&league_id=148&APIkey=0628296b0a3128f59906a23df3e1894bec4e9ebb46dc82a5261dc3625682961e')\r\ndata = base.json()\r\n\r\nfor i in range(3):\r\n\tprint(data[i][\"match_hometeam_name\"], \": \", data[i][\"match_hometeam_score\"], \" | VS | \", data[i][\"match_awayteam_name\"], \": \", data[i][\"match_awayteam_score\"])\r\n\tprint(\"---------------------------\")\r\n\r\n\r\n\r\n\r\ninput()\r\n\r\n#https://apifootball.com/documentation/#Events"
},
{
"alpha_fraction": 0.615217387676239,
"alphanum_fraction": 0.634782612323761,
"avg_line_length": 25.176469802856445,
"blob_id": "2eef226893c1ae6e649b4c7edc7f4b6673ff7f96",
"content_id": "47a75a8e7ed759e86c1152453c3f6922b9787e0f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 460,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 17,
"path": "/projet_fin_annee/actu.py",
"repo_name": "nSakk/ISN",
"src_encoding": "UTF-8",
"text": "from newsapi import *\r\nnewsapi = NewsApiClient(api_key='3dc1dd43f9914d8891b0c45875f34882')\r\n\r\n# /v2/everything\r\ndata = newsapi.get_everything(q='software', language='en', page_size=20)\r\nprint(data.keys())\r\nprint(data['articles'][0])\r\n\r\narticle = data['articles']\r\nfor x, y in enumerate(article):\r\n print(f'{x} {y[\"title\"]}')\r\n\r\nfor key, value in article[0].items():\r\n print(f\"\\n{key.ljust(15)} {value}\")\r\n\r\nprint(data[article][0]['url'])\r\nprint(data[article][0]['urlToImage'])"
},
{
"alpha_fraction": 0.6383495330810547,
"alphanum_fraction": 0.6990291476249695,
"avg_line_length": 25.46666717529297,
"blob_id": "38fe927960a9c15a945d3ebb7a5506a270713a79",
"content_id": "412965ea2fb0ecadb3cb04a048ea63617fc04f39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 412,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 15,
"path": "/projet_fin_annee/interface.py",
"repo_name": "nSakk/ISN",
"src_encoding": "UTF-8",
"text": "from tkinter import *\r\n\r\n\r\nmain = Tk()\r\nmain.configure(width=1200, height=800)\r\nmain.resizable(width=False, height=False)\r\n\r\nheure = Canvas(main, width=700, height=400, background='white')\r\nactu = Canvas(main, width=250, height=800, background='white')\r\nmeteo = Canvas(main, width=150, height=150, background='white')\r\n#------\r\nheure.pack()#CENTRE\r\nactu.pack()#DROITE\r\nmeteo.pack()#HAUT GAUCHE\r\nmain.mainloop()\r\n"
},
{
"alpha_fraction": 0.4319213330745697,
"alphanum_fraction": 0.45915278792381287,
"avg_line_length": 35.71428680419922,
"blob_id": "70e2a9c059962ccd762a1167cb26de064605513d",
"content_id": "6484b3d7adc6942bdb436d96e7923c1f58580cf7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1327,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 35,
"path": "/projet_fin_annee/complet.py",
"repo_name": "nSakk/ISN",
"src_encoding": "UTF-8",
"text": "from datetime import *\r\nimport requests\r\nfrom newsapi import *\r\n\r\nr = requests.get(\"http://api.openweathermap.org/data/2.5/weather?appid=ab0b9935472a05bf72192f5d3f36cb11&q=Saint-Vaast-la-Hougue&lang=fr\")\r\ndata=r.json()\r\nt=data['main']['temp']\r\ng = round(t , 2)\r\nw=data['name']\r\nweather=data['weather'][0]['description']\r\nprint(\"La ville est {}\".format(w))\r\nprint(\"La témpérature est de {} degrés C°\".format(round(g-273.15)))\r\nprint(weather)\r\nprint(\"#-------------------------------------------\")\r\nheure = datetime.now().time()\r\nprint(heure.hour, ':', heure.minute)\r\nprint(\"#-------------------------------------------\")\r\nprint(\"Actu du jour\")\r\nnewsapi = NewsApiClient(api_key='3dc1dd43f9914d8891b0c45875f34882')\r\ndata = all_articles = newsapi.get_everything(q='a',\r\n language='fr',\r\n domains='lemonde.fr,',\r\n sort_by='publishedAt',\r\n page=2)\r\na = 0\r\nfor loop in range(20):\r\n print(a+1, \" \", data['articles'][a]['title'])\r\n print(data['articles'][a]['url'])\r\n print(\"#-------------------------------------------\")\r\n a += 1\r\n\r\n\r\nprint(\"#-------------------------------------------\")\r\np = input(\"Pour continuer, pressez Entrée ... \")\r\nprint(\"#-------------------------------------------\")\r\n\r\n"
},
{
"alpha_fraction": 0.4821576774120331,
"alphanum_fraction": 0.5112033486366272,
"avg_line_length": 34.51515197753906,
"blob_id": "f0e5cfd205b1c7625dbdcf4faf102cc49015fe41",
"content_id": "3852116bb1656af08df38571daca119e4849cc17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1209,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 33,
"path": "/projet_fin_annee/thomas.py",
"repo_name": "nSakk/ISN",
"src_encoding": "UTF-8",
"text": "from datetime import *\r\nimport requests\r\nfrom newsapi import *\r\n\r\nville = input(\"Entre la ville que tu veux : \")\r\n\r\nr = requests.get(\"http://api.openweathermap.org/data/2.5/weather?appid=ab0b9935472a05bf72192f5d3f36cb11&q={}&lang=fr\".format(ville))\r\ndata=r.json()\r\nt=data['main']['temp']\r\nw=data['name']\r\nweather=data['weather'][0]['description']\r\nprint(\"La ville est {}\".format(w))\r\nprint(\"La témpérature est de {} degrés C\".format(round(t-273.15)))\r\nprint(weather)\r\nprint(\"#-------------------------------------------\")\r\nheure = datetime.now().time()\r\nprint(heure.hour, ':', heure.minute)\r\nprint(\"#-------------------------------------------\")\r\nprint(\"Actu du jour\")\r\nnewsapi = NewsApiClient(api_key='3dc1dd43f9914d8891b0c45875f34882')\r\ndata = all_articles = newsapi.get_everything(q='a',\r\n language='fr',\r\n domains='lemonde.fr,',\r\n sort_by='publishedAt',\r\n page=2)\r\na = 0\r\nfor loop in range(20):\r\n print(a+1, \" \", data['articles'][a]['title'])\r\n print(data['articles'][a]['url'])\r\n print(\"#----------------\")\r\n a += 1\r\n\r\np = input(\"Appuiez sur entrée pour continuer\")\r\n"
},
{
"alpha_fraction": 0.6243094205856323,
"alphanum_fraction": 0.7044199109077454,
"avg_line_length": 34.400001525878906,
"blob_id": "9e97d2fbe94e342892921f7293dcf28368d1f402",
"content_id": "06a36268db9ba53a0928fff2dd3eb110bee502a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 365,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 10,
"path": "/projet_fin_annee/meteo.py",
"repo_name": "nSakk/ISN",
"src_encoding": "UTF-8",
"text": "import requests\r\n\r\nr = requests.get(\"http://api.openweathermap.org/data/2.5/weather?appid=ab0b9935472a05bf72192f5d3f36cb11&q={}&lang=fr\".format(ville))\r\ndata=r.json()\r\nt=data['main']['temp']\r\nw=data['name']\r\nweather=data['weather'][0]['description']\r\nprint(\"La ville est {}\".format(w))\r\nprint(\"La témpérature est de {} degrés C\".format(t-273.15))\r\nprint(weather)"
}
] | 8 |
shakaka/MDSplus_usage | https://github.com/shakaka/MDSplus_usage | f62ae605deb47374c99110f0171a210b2da5f5cb | 30400a67c6531943085ddfe990cdde0ab3b97b4b | 1961e65fafc3a1cef1074fd689b5fe357c14caa3 | refs/heads/master | 2020-04-07T11:04:10.599277 | 2019-02-14T07:39:22 | 2019-02-14T07:39:22 | 158,311,365 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5712435245513916,
"alphanum_fraction": 0.6075129508972168,
"avg_line_length": 18.299999237060547,
"blob_id": "35b4434659f4a1c078dc67ff40d190c009c99ef4",
"content_id": "485c65481c97567a84734b4e936cf5c4b11a921e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 772,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 40,
"path": "/resample.py",
"repo_name": "shakaka/MDSplus_usage",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport MDSplus\nimport os\nimport argparse\nimport matplotlib.pyplot as plt\n\ndef resample_show(args):\n resp_rate = args.resp[0]\n\n t = Tree(\"acq2106_test\", 1176)\n mp1Node = t.getNode(\"\")\n mp1Data = mp1Node.record.data()\n\n mp1Data_re = []\n\n if resp_rate < len(mp1Data):\n i = 0\n while i < len(mp1Data):\n mp1Data_re.append(mp1Data[i])\n i += resp_rate\n\n\n\n plt.figure(1)\n plt.subplot(211)\n plt.plot(mp1Data)\n\n plt.subplot(212)\n plt.plot(mp1Data_re)\n plt.show()\n\ndef run_main():\n parser = argparse.ArgumentParser(description=\"resampling\")\n parser.add_argument('resp', nargs=1, help=\"resample rate\")\n make_acqtree(parser.parse_args())\n\n\nif __name__ == '__main__':\n run_main()\n"
},
{
"alpha_fraction": 0.6033112406730652,
"alphanum_fraction": 0.6092715263366699,
"avg_line_length": 23.754098892211914,
"blob_id": "06ec3ce6bec2da83948e91256efb110bd226b757",
"content_id": "640bfb0e5ce2568d41d87634d98f2bf026788ebe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1510,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 61,
"path": "/make_maintree.py",
"repo_name": "shakaka/MDSplus_usage",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport argparse\nimport MDSplus\nimport os\n\ndef new_shot(tree):\n MDSplus.Tree.setCurrent(tree, 1)\n MDSplus.Tree(tree, -1).createPulse(1)\n\ndef make_tree(tree, nchan, id):\n for subtrees in args.subtrees:\n tree.addNode(subtrees, \"SUBTREE\")\n\ndef path_check(tname):\n root = os.getenv(\"MDS_TREE_ROOT\", \"{}/trees\".format(os.environ['HOME']))\n key = \"{}_path\".format(tname)\n tpath = \"{}/{}\".format(root, tname)\n mpath = os.getenv(key, \"notfound\")\n if mpath == \"notfound\":\n print(\"run as root:\")\n print('echo \"{} {}\" >> {}'.\n\t\tformat(key, tpath, \"/usr/local/mdsplus/local/envsyms\"))\n print(\"# for immediate use:\")\n print(\"export {}={}\".format(key, tpath))\n\tprint(\"then run the command again please\")\n\texit(1)\n\n if not os.path.exists(root):\n print('mkdir {}'.format(root))\n\texit(1)\n\n\ndef make_acqtree(args):\n tname = args.tree[0]\n path_check(tname)\n tree = MDSplus.Tree(tname, -1, \"NEW\")\n\n if not args.subtrees is None:\n make_tree(tree, args.subtrees)\n else:\n new_shot(tname)\n\n\n\n\ndef int_or_raw(value):\n if value == 'RAW' or value == 'raw':\n\treturn 0\n else:\n return int(value)\n\ndef run_main():\n parser = argparse.ArgumentParser(description=\"make_acqtree\")\n parser.add_argument('tree', nargs=1, help=\"tree name, ideally UUT name\")\n parser.add_argument('--subtrees', nargs='+', help=\"subtree list\")\n make_acqtree(parser.parse_args())\n\n\nif __name__ == '__main__':\n run_main()\n"
},
{
"alpha_fraction": 0.6352128982543945,
"alphanum_fraction": 0.6386651396751404,
"avg_line_length": 26.125,
"blob_id": "c9a0f7cc9707d11a39e2133dbb738ca667dff350",
"content_id": "1c8bd5779dfc8041a9db1833c534d3c1399902ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 869,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 32,
"path": "/get_shot",
"repo_name": "shakaka/MDSplus_usage",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\n\"\"\"\nnew_shot uut [uut..]\ncreate new MDSplus shot\nif multiple trees, find highest shot number, and set all trees to next\n\"\"\"\n\nimport argparse\nimport MDSplus\n\ndef get_next_shot(args):\n old_shots = [MDSplus.Tree.getCurrent(u) for u in args.uuts]\n \n for tree in args.uuts:\n sn = MDSplus.Tree.getCurrent(tree)\n if args.verbose:\n print(\"Tree %s shot shot %d\" % (tree, sn))\n print(sn)\n\ndef run_main():\n parser = argparse.ArgumentParser(description='new_shot uut [uut..]') \n parser.add_argument('--verbose', default=1, type=int, help='show shot number')\n parser.add_argument('--shot', default=-99, type=int, help='specify shot number')\n parser.add_argument('uuts', nargs='+', help=\"uut list\")\n get_next_shot(parser.parse_args())\n\n\n# execution starts here\n\nif __name__ == '__main__':\n run_main()\n\n"
},
{
"alpha_fraction": 0.5751469135284424,
"alphanum_fraction": 0.5860621333122253,
"avg_line_length": 28.395061492919922,
"blob_id": "dcf3b912e1c90b1a54eaa2ca0df7573a96c15cde",
"content_id": "f82b2b9fc6956a6ead9e96989d8eed8195b60168",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2382,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 81,
"path": "/make_acqtree.py",
"repo_name": "shakaka/MDSplus_usage",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport argparse\nimport MDSplus\nimport os\n\ndef new_shot(tree):\n MDSplus.Tree.setCurrent(tree, -1)\n# MDSplus.Tree(tree, -1).createPulse(1)\n\ndef make_chan(tree, nchan, id):\n if nchan == 0:\n subdir = tree.addNode(\".{}\".format(id))\n subdir.addNode(\":RAW\", \"SIGNAL\")\n else:\n subdir = tree.addNode(\".{}\".format(id))\n chfmt = \"CH{:0\" + \"{}\".format('3' if nchan > 99 else '2') + \"}\"\n\n for ch in range(1, nchan+1):\n subdir.addNode(chfmt.format(ch), \"SIGNAL\")\n\n\ndef path_check(tname):\n root = os.getenv(\"MDS_TREE_ROOT\", \"{}/TREES\".format(os.environ['HOME']))\n key = \"{}_path\".format(tname)\n tpath = \"{}/{}\".format(root, tname)\n mpath = os.getenv(key, \"notfound\")\n if mpath == \"notfound\":\n print(\"run as root:\")\n print('echo \"{} {}\" >> {}'.\n\t\tformat(key, tpath, \"/usr/local/mdsplus/local/envsyms\"))\n print(\"# for immediate use:\")\n print(\"export {}={}\".format(key, tpath))\n\tprint(\"then run the command again please\")\n\texit(1)\n \n if not os.path.exists(root):\n print('mkdir {}'.format(root))\n\texit(1)\n\n if os.path.exists(tpath):\n print('existing tree {} may already exist. Delete it'.format(tpath))\n\texit(1)\n else:\n os.mkdir(tpath)\n \n\ndef make_acqtree(args):\n tname = args.tree[0]\n path_check(tname)\n tree = MDSplus.Tree(tname, -1, \"NEW\")\n \n if args.aichan >= 0:\n\tmake_chan(tree, args.aichan, \"AI\")\n if args.aochan >= 0:\n make_chan(tree, args.aochan, \"AO\")\n if args.dio >= 0:\n make_chan(tree, args.dio, \"DIO\")\n if args.stat >= 0:\n make_chan(tree, args.stat, \"ST\")\n tree.write()\n new_shot(tname)\n\ndef int_or_raw(value):\n if value == 'RAW' or value == 'raw':\n\treturn 0\n else:\n return int(value)\n \ndef run_main():\n parser = argparse.ArgumentParser(description=\"make_acqtree\")\n parser.add_argument('--aichan', default=-1, type=int_or_raw, help='ai channel count')\n parser.add_argument('--aochan', default=-1, type=int_or_raw, help='ao channel count')\n parser.add_argument('--dio', default=-1, type=int, help='dio, words')\n parser.add_argument('--stat', default=-1, type=int, help='status, words')\n parser.add_argument('tree', nargs=1, help=\"tree name, ideally UUT name\")\n make_acqtree(parser.parse_args())\n\n\nif __name__ == '__main__':\n run_main()\n\n"
},
{
"alpha_fraction": 0.5897196531295776,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 25.75,
"blob_id": "efd65f8af06030ff7d4708bed25929c7bedcf806",
"content_id": "416986e1d2512873a5248ab22730f13760c748ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1070,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 40,
"path": "/new_shot",
"repo_name": "shakaka/MDSplus_usage",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\n\"\"\"\nnew_shot uut [uut..]\ncreate new MDSplus shot\nif multiple trees, find highest shot number, and set all trees to next\n\"\"\"\n\nimport argparse\nimport MDSplus\n\ndef set_next_shot(args):\n old_shots = [MDSplus.Tree.getCurrent(u) for u in args.uuts]\n if args.shot != -99:\n sn = args.shot\n if sn <= 0:\n sn = 1\n else:\n sn = max(old_shots) + 1\n if sn <= 0:\n sn = 1\n for tree in args.uuts:\n if args.verbose:\n print(\"Setting %s to shot %d\" % (tree, sn))\n MDSplus.Tree.setCurrent(tree, sn)\n MDSplus.Tree(tree, -1).createPulse(sn)\n return sn\n\ndef run_main():\n parser = argparse.ArgumentParser(description='new_shot uut [uut..]')\n parser.add_argument('--verbose', default=1, type=int, help='show shot number')\n parser.add_argument('--shot', default=-99, type=int, help='specify shot number')\n parser.add_argument('uuts', nargs='+', help=\"uut list\")\n set_next_shot(parser.parse_args())\n\n\n# execution starts here\n\nif __name__ == '__main__':\n run_main()\n"
},
{
"alpha_fraction": 0.5727272629737854,
"alphanum_fraction": 0.6063636541366577,
"avg_line_length": 27.179487228393555,
"blob_id": "f177de4a16fb060e3ad2bad758ceb56f8efea10b",
"content_id": "24a1002966afb64f561582426701868e380f08ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1100,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 39,
"path": "/make_bolo_tree.py",
"repo_name": "shakaka/MDSplus_usage",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport argparse\nfrom MDSplus import *\n\nidnames = (\"MAG_%d\", \"phi_%d\", \"PWR_%d\" )\nidunits = (\"V\", \"rad\", \"W\")\nidcal = (\"7.109e-8\", \"1.8626e-9\", \"4.550e-6\" )\n\n\ndef make_bolo_tree(args):\n\ttree = Tree(args.tree[0], -1, \"NEW\")\n\t\n\tfor site in range(1, args.bolo8_count+1):\n\t\tbname = \"BOLO%d\" % (site)\n\t\tmodule = tree.addNode(\".%s\" % (bname))\n\t\tmodpath = \"\\\\%s::TOP.%s\" % (args.tree[0], bname)\n\n\t\tfor ch in range(1, 24+1):\n\t\t\trawname = \"CH%02d\" % (ch)\n\t\t\traw = module.addNode(rawname, \"SIGNAL\")\n\t\t\tbchan = 1 + (ch - 1)/3\n\t\t\tid = (ch - 1)%3\n\t\t\tcooked = module.addNode(idnames[id] % (bchan), \"SIGNAL\")\n\t\t\texpr = \"%s.%s * %s\" % (modpath, rawname, idcal[id])\n\t\t\tprint(expr)\n\t\t\tcooked.putData(Data.compile(expr))\n\t\t\tcooked.setUnits(idunits[id])\n\ttree.write()\n\ndef run_main():\n\tparser = argparse.ArgumentParser(description=\"make_bolo_tree\")\n\tparser.add_argument('--bolo8_count', default=1, help = \"number of bolo8 modules\" )\n\tparser.add_argument('tree', nargs=1, help = \"tree name\")\n\tmake_bolo_tree(parser.parse_args())\n# execution starts here\n\nif __name__ == '__main__':\n run_main()\n\n"
},
{
"alpha_fraction": 0.5591953992843628,
"alphanum_fraction": 0.5867816209793091,
"avg_line_length": 31.50467300415039,
"blob_id": "3d7242b9dc89293174eaf9cae25cf2908710f7e0",
"content_id": "015328b5e8980e7ab0ccb1f08c0dff76af8aaaf1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3480,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 107,
"path": "/mds_put_slice.py",
"repo_name": "shakaka/MDSplus_usage",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\n\"\"\"\nmds_put_slice store slice[s] of a 2D array to mdsplu\nexample\n./mds_put_slice.py --ncols 112 --dtype np.uint32 \\ \n --store_cols 97,98,99 --node_name \"CH%02d\" \\\n --default_node ST \\\n acq2106_076 \\\n PROJECTS/AFHBA404/LLCONTROL/afhba.0.log\n\neg:\n raw file is 112 cols uint32 wide \n store cols 97.98.99 to node CH01 .. CH03\n under default node ST\n in tree acq2106_076\n raw data afhba.0.log\n\"\"\"\n\n#import acq400_hapi\nimport numpy as np\nimport argparse\nimport MDSplus\n\ndef do_tlatch_report(tla, verbose):\n t0 = 0;\n errors = 0\n errmax = 0\n \n if verbose:\n print(\"do_tlatch_report {}\".format(tla)) \n for tt in tla:\n if tt != t0+1:\n if verbose > 1:\n print(\"ERROR %d %d\" % (t0, tt))\n errors += 1\n if tt - t0 > errmax:\n errmax = tt - t0\n t0 = tt\n \n if errors:\n print(\"SUMMARY: errors %d maxerr %d\" % (errors, errmax))\n ll = len(tla)\n return np.concatenate((np.subtract(tla[1:ll], tla[0:ll-1]), [1]))\n \n \ndef mds_put_slice(args):\n if args.store_cols == ':':\n\tstore_cols = range(0, args.ncols)\n else:\n store_cols = eval('('+args.store_cols+', )')\n try:\n n_store = len(store_cols)\n except TypeError:\n print \"TypeError add brackets\"\n store_cols = ( store_cols, )\n \n with open(args.file[0], 'r') as fp:\n raw = np.fromfile(fp, dtype=eval(args.dtype))\n nsam = len(raw)/args.ncols\n print(\"mds_put_slice len {} ncols {} nsam {} nsam*ncols {}\".\n\t\tformat(len(raw), args.ncols, nsam, nsam*args.ncols))\n\n cols = np.reshape(raw[0:nsam*args.ncols], (nsam, args.ncols))\n\n if args.shr != 0:\n cols = np.right_shift(cols, args.shr)\n\n\n \n tree = MDSplus.Tree(args.tree[0], 0) \n iout = 1\n if args.tlatch_report:\n cols[:,1] = do_tlatch_report(cols[:,store_cols[0]], args.tlatch_report)\n\n for sc in store_cols:\n node_name = args.node_name % (iout)\n if args.default_node:\n node = tree.getNode(\"%s.%s\" % (args.default_node, node_name))\n else:\n node = tree.getNode(node_name)\n node.putData(cols[:,sc])\n\tif args.tlatch_report and sc==1:\n print(\"Node {} is delta tlatch\".format(node_name))\n iout += 1\n print(\"MDSplus.Event.setevent({}, 42)\".format(args.tree[0]))\n MDSplus.Event.setevent(args.tree[0])\n\n\n\ndef run_main():\n parser = argparse.ArgumentParser(description='mds_put_slice slice a data file and submit to MDSplus')\n parser.add_argument('--ncols', type=int, default=64, help=\"number of columns in data\")\n parser.add_argument('--dtype', type=str, default='np.uint32', help=\"data type np.uint32, np.int16 etc\")\n parser.add_argument('--shr', type=int, default=0, help='right shift')\n parser.add_argument('--store_cols', default = ':', type=str, help=\"list of column indices to store\")\n parser.add_argument('--node_name', type=str, help=\"node name %d format accepted\")\n parser.add_argument('--default_node', type=str, help=\"default node\")\n parser.add_argument('--tlatch_report', type=int, default=0, help=\"1: brief tlatch check, 2: detail tlatch check\")\n parser.add_argument('tree', nargs=1, help=\"tree name\")\n parser.add_argument('file', nargs=1, help=\"file \")\n mds_put_slice(parser.parse_args())\n\n# execution starts here\n\nif __name__ == '__main__':\n run_main()\n\n\n"
}
] | 7 |
pierrotmus/MLPythonStuffs | https://github.com/pierrotmus/MLPythonStuffs | 5d689cbf1e3d438c78258c87480c6314da227ae2 | d39456f059a3cf41b0b34b933d36550a78afb2a7 | e513e6ef504ec08fa408adcd96396cfec81e8b0b | refs/heads/master | 2020-03-13T09:17:10.936875 | 2019-09-01T12:44:04 | 2019-09-01T12:44:04 | 131,061,235 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6050523519515991,
"alphanum_fraction": 0.60813307762146,
"avg_line_length": 26.066667556762695,
"blob_id": "7f07b7551bf0f4469a90bbfe8dfad6c1f1f24db4",
"content_id": "1eff43e468d673fe299d378a392629c5cf9fd5b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1623,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 60,
"path": "/Twitter_Sentiment/frequency.py",
"repo_name": "pierrotmus/MLPythonStuffs",
"src_encoding": "UTF-8",
"text": "# Pierrot Musumbu Dibwe\n# Tweet word frequency counter collecting words from tweets obtained from the API\n# and returning the word and the frequency\n\nimport sys\nimport json\n\ntwitterData = sys.argv[1] #output.txt\n\ndef main():\n \n ''' Create a disctionary of words compute theirs frequencies '''\n collected_tweets = tweet_dict(twitterData)\n list_of_words = []\n words_frequency = {}\n # Identify punctuactions and make a list of punctuations.\n punctuactions = '?:!.,;\"!@\\''\n punctuations_list = []\n for char in punctuactions:\n \n punctuations_list.append(char)\n \n for index in range(len(collected_tweets)):\n \n if collected_tweets[index].has_key(\"text\"):\n \n words_in_tweet = collected_tweets[index][\"text\"].split()\n for word in words_in_tweet:\n \n word = word.rstrip(punctuactions)\n list_of_words.append(word)# create list of total terms\n \n\n for word in list_of_words:\n \n\tif word in words_frequency:\n\t\twords_frequency[word] = words_frequency[word]+1\n\t\t\n\telse:\n\t\twords_frequency[word] = 1\n\t\t\n total_number = len(words_frequency)\n\n for word in words_frequency:\n \n words_frequency[word] = \"%.4f\" %(float(words_frequency[word])/total_number)\n print(word.encode(\"utf-8\") + \" \" + words_frequency[word])\n\ndef tweet_dict(twitterData): \n \n twitter_list_dict = []\n twitterfile = open(twitterData)\n \n for line in twitterfile:\n twitter_list_dict.append(json.loads(line))\n return twitter_list_dict\n\n \nif __name__ == '__main__':\n main()"
},
{
"alpha_fraction": 0.6158038377761841,
"alphanum_fraction": 0.6230699419975281,
"avg_line_length": 31.41176414489746,
"blob_id": "47e548fda89d8be1991b976bee08502a8d92993f",
"content_id": "ed5cf6c84b699ad2627b9246a01b8694e24ae4da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1101,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 34,
"path": "/Twitter_Sentiment/term_sentiment.py",
"repo_name": "pierrotmus/MLPythonStuffs",
"src_encoding": "UTF-8",
"text": "# Pierrot Musumbu Dibwe\n# Tweet terms analyzer collecting words from tweets obtained from the API\n# and returning the word and the score\n\nfrom __future__ import division\nimport sys\nimport json\n\n\ndef scores_extraction(sent_file):\n with open(sent_file) as file_content:\n return {line.split('\\t')[0]: int(line.split('\\t')[1]) for line in file_content}\n\n\ndef tweet_afinn_score(tweet, scores):\n return sum(scores.get(word, 0) for word in tweet)\n\n\ndef new_word_score(tweet_file, scores):\n with open(tweet_file) as file_content:\n tweets = (json.loads(line).get('text', '').split() for line in file_content)\n return {word: tweet_afinn_score(tweet, scores) / len(tweet)\n for tweet in tweets if tweet\n for word in tweet if word not in scores}\n\ndef main():\n \n scores = scores_extraction(sent_file=sys.argv[1])\n sys.stdout.writelines('{0} {1}\\n'.format(word.encode('utf-8'), score)\n for word, score in new_word_score(\n tweet_file=sys.argv[2],scores=scores).items())\n\nif __name__ == '__main__':\n main()"
},
{
"alpha_fraction": 0.6511817574501038,
"alphanum_fraction": 0.6609616875648499,
"avg_line_length": 32.16216278076172,
"blob_id": "553bb729379e1d6d2b88f3b9d9f232d930320c9b",
"content_id": "f8a9bfbcfddf32a44299c565accdbe1587786c6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1227,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 37,
"path": "/Twitter_Sentiment/tweet_sentiment.py",
"repo_name": "pierrotmus/MLPythonStuffs",
"src_encoding": "UTF-8",
"text": "# Pierrot Musumbu Dibwe\n# Twitter sentiment analyzer collecting tweets from twenty lines obtained from the API\n# and returning the score\n\nimport sys\nimport json\n\n# Make a diction by parsing file and returnin a {word: sentiment}\ndef sentiment_score(sent_file):\n \n with open(sent_file) as file_content:\n return {line.split('\\t')[0]: int(line.split('\\t')[1]) for line in file_content}\n\n# Check the score of the tweet in AFINN-11.txt and return it or return 0\ndef tweet_afinn_score(tweet, scores):\n \n return sum(scores.get(word, 0) for word in tweet.split())\n\n# Evaluate the scores of all the tweets in the file\ndef all_tweets_score(tweet_file, scores):\n \n with open(tweet_file) as file_content:\n tweets = (json.loads(line).get('text', '') for line in file_content)\n return [tweet_afinn_score(tweet, scores) for tweet in tweets]\n\ndef main():\n sent_file = open(sys.argv[1])\n tweet_file = open(sys.argv[2])\n \n sent_file = sys.argv[1]\n tweet_file = sys.argv[2]\n scores = sentiment_score(sent_file=sent_file)\n sys.stdout.writelines('{0}.0\\n'.format(score)\n for score in all_tweets_score(tweet_file=tweet_file, scores=scores))\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5593578219413757,
"alphanum_fraction": 0.5669624209403992,
"avg_line_length": 39.13559341430664,
"blob_id": "1e606e80ae922b553dcf2aae5205e81ec64721a2",
"content_id": "c97c546c0ea6d73cd293e70cfa13409d42f35f4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2367,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 59,
"path": "/Twitter_Sentiment/happiest_state.py",
"repo_name": "pierrotmus/MLPythonStuffs",
"src_encoding": "UTF-8",
"text": "# Pierrot Musumbu Dibwe\n# Happinesst analyzer collecting tweets from twenty lines obtained from the API\n# and returning the name and the score of the state with the highest score\n\nfrom __future__ import division\nimport sys\nimport json\n\nlist_of_states = [\"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DC\", \"DE\", \"FL\", \"GA\", \n \"HI\", \"ID\", \"IL\", \"IN\", \"IA\", \"KS\", \"KY\", \"LA\", \"ME\", \"MD\", \n \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\", \"NJ\", \n \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\", \n \"SD\", \"TN\", \"TX\", \"UT\", \"VT\", \"VA\", \"WA\", \"WV\", \"WI\", \"WY\"]\n\ndef main():\n sent_file = open(sys.argv[1])\n tweet_file = open(sys.argv[2])\n \n # Construct a dict of words sentiments\n scores = {}\n for input_line in sent_file:\n term, score = input_line.split(\"\\t\") \n scores[term] = int(score)\n\n # Collect words from tweets\n collected_tweets = []\n for input_line in tweet_file:\n tweets = json.loads(input_line)\n\n # Select valid words\n if 'word' in tweets.keys() and 'place' in tweets.keys() and tweets['place'] != None:\n term = tweets['word'].replace(\"\\n\", \"\").encode('utf-8').strip()\n country = tweets['place']['country_code']\n state = tweets['place']['full_name'].encode('utf-8').strip()[-2::1]\n\n # Select only tweets from USA\n if country == 'US' and state in list_of_states:\n collected_tweets.append((term, state))\n \n ''' Compute the sentiment of each tweet according to the AFINN dictionary \n localize the place where the tweet originated classify the tweets by \n state of origine and compute the total sentiment by state and select \n the sate with the higest positive tweet score as the happiest state'''\n \n state_sentiment = {code:[0, 1] for code in list_of_states} # 1st is sum, 2nd is count\n for (tweet, state) in collected_tweets:\n sentiment = 0.0\n for word in tweet.split():\n if word in scores:\n sentiment += scores[word]\n state_sentiment[state][0] += sentiment\n state_sentiment[state][1] += 1\n \n state_score = {code:state_sentiment[code][0] / state_sentiment[code][1] for code in list_of_states}\n print(sorted(state_score, key = state_score.get)[-1])\n \n\nif __name__ == '__main__':\n main()"
},
{
"alpha_fraction": 0.8409090638160706,
"alphanum_fraction": 0.8409090638160706,
"avg_line_length": 21,
"blob_id": "de9f96f5fbdce4e8a08292fbb867cb575f60f3b5",
"content_id": "718b2186a16409d346918427c186e63c38f22992",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 44,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 2,
"path": "/README.md",
"repo_name": "pierrotmus/MLPythonStuffs",
"src_encoding": "UTF-8",
"text": "# MLPythonStuffs\nMy Machine Learning stuffs\n"
}
] | 5 |
cog-isa/HRL-grid | https://github.com/cog-isa/HRL-grid | aa3a8fce467c0451aa4e214d5f2c574003340b3d | b0690e1f6445c4bb30fd7cbe3b1639128c3de55a | 33929ce60529b5afc5c3b055132c4c1d1d5659cc | refs/heads/master | 2021-03-19T17:54:43.833876 | 2019-04-05T15:18:22 | 2019-04-05T15:18:22 | 111,657,371 | 4 | 3 | null | null | null | null | null | [
{
"alpha_fraction": 0.7192546725273132,
"alphanum_fraction": 0.7875776290893555,
"avg_line_length": 38.26829147338867,
"blob_id": "278093bbddaec4e4264760c9ed829b5ff5ca677b",
"content_id": "8ba4da619cafecfb1f2737bbe1663878f5ef7445",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2508,
"license_type": "permissive",
"max_line_length": 188,
"num_lines": 41,
"path": "/McGovern subgoal/README.md",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "WINDOWS-1251",
"text": "# В данном коде реализован алгоритм из статьи Automatic Discovery of Subgoals inReinforcement Learning using Diverse Density:\n\nhttps://scholarworks.umass.edu/cgi/viewcontent.cgi?article=1017&context=cs_faculty_pubs\n\nВ mcgovern.py описаны два класса, класс Game с описанием среды, и класс agent с описанием параметров и свойств агента\n\n## Запускается скрипт запуском файла train.py\n\nПри его открытии сразу начинается обучение с параметрами, описанными в train.py, при желании их можно изменить.\nПри обучении всплывает окно \"Grid World\", где показывается текущии стадии обучения.\n\nЖелтым обозначается цель, куда должен прийти агент, красным подцель, которая находится через Diverse Density.\n\nСиним обозначается сам агент.\n\nВ консоле пишутся сообщения типа 4 3 position [0, 1, 2, 3] [-0.18281843807147813, -0.18395445100610527, -0.17562076331583998, -0.18189855164936664] \nГде 4 3 position это текущая позиция агента, [0, 1, 2, 3] это все доступные действия из этой позиции и последний массив содержит Q-values, исходя из которых и выбирается следующее действие\n\nДействие 0 - вверх\n\t 1 - вниз\n\t 2 - влево\n\t 3 - вправо\n\n\nВ train.py для создания агента с сеткой 10 на 10 (можно поставить любую) создается \n\n\tag = agent(10,10)\n\nЗатем надо указать стенку, она описывается 3 параметрами, 1 - на каком стобце эта стенка 2 - с какой строчки начинается просвет, 3 - на какой строчке заканчивается просвет\n\n\tag.stenka(5,4,6)\n\nПосле задаются координаты цели \n\t\n\tag.tcel(8,8)\n\nДалее нужно инициализировать начальные значения Q-table для выше выбранной среды\n\n\tag.init2()\n\nПосле этого агент ag готов для дальнейшего обучения\n"
},
{
"alpha_fraction": 0.5575733184814453,
"alphanum_fraction": 0.5616088509559631,
"avg_line_length": 40.76404571533203,
"blob_id": "4a40c21da43de8e6b63437e585a613a039503dd3",
"content_id": "525cbf795582653e6bb5003101b09d47c4e6c097",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7487,
"license_type": "permissive",
"max_line_length": 154,
"num_lines": 178,
"path": "/workshop/generate_graph.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import re\nimport json\n\nfrom HAM.HAM_core import Action, MachineRelation, Stop, Start, AbstractMachine, MachineGraph, Choice\nfrom environments.arm_env.arm_env import ArmEnvToggleTopOnly\nfrom utils.graph_drawer import draw_graph\n\n\nclass MachineStored:\n @staticmethod\n def ms_from_machine(machine: AbstractMachine, env):\n # TODO fix bug with double edges instead of on model\n vertex_types = sorted(machine.graph.vertices)\n graph_id = 0\n for left_ind in range(len(vertex_types)):\n for right_ind in range(len(vertex_types)):\n for relation in machine.graph.vertex_mapping[vertex_types[left_ind]]:\n if relation.right is vertex_types[right_ind]:\n if relation.label is None:\n graph_id |= (2 ** (left_ind * len(vertex_types) + right_ind))\n\n return MachineStored(vertex_types=vertex_types, binary_matrix_representation=graph_id, env=env)\n\n def __init__(self, vertex_types, binary_matrix_representation, env):\n # self.vertex_types = sorted(vertex_types)\n self.vertex_types = vertex_types\n self.binary_matrix_representation = binary_matrix_representation\n self.env = env\n\n for i in range(len(self.vertex_types) - 1):\n assert not self.vertex_types[i + 1] < self.vertex_types[i], \"should be sorted\"\n\n def to_dict(self):\n return {\"vertices\": [_.get_name() for _ in self.vertex_types],\n \"binary_matrix_representation\": self.binary_matrix_representation\n }\n\n @staticmethod\n def from_dict(graph_dict, env):\n vertex_types = []\n for v in graph_dict[\"vertices\"]:\n if isinstance(v, (list, tuple)):\n _, action_id = v\n vertex_types.append(Action(action=action_id))\n elif isinstance(v, str):\n if v == \"Choice\":\n vertex_types.append(Choice())\n elif v == \"Start\":\n vertex_types.append(Start())\n elif v == \"Stop\":\n vertex_types.append(Stop())\n else:\n raise TypeError\n else:\n raise TypeError\n return MachineStored(vertex_types=vertex_types, binary_matrix_representation=graph_dict[\"binary_matrix_representation\"], env=env)\n\n def get_machine_without_on_model(self):\n transitions = []\n for left_ind in range(len(self.vertex_types)):\n for right_ind in range(len(self.vertex_types)):\n left = self.vertex_types[left_ind]\n right = self.vertex_types[right_ind]\n if (2 ** (left_ind * len(self.vertex_types) + right_ind)) & self.binary_matrix_representation:\n if isinstance(left, Action):\n transitions.append(MachineRelation(left=left, right=right, label=0))\n else:\n transitions.append(MachineRelation(left=left, right=right))\n\n start, stop = None, None\n for vertex in self.vertex_types:\n if isinstance(vertex, Start):\n start = vertex\n elif isinstance(vertex, Stop):\n stop = vertex\n\n assert start is not None\n assert stop is not None\n\n return AbstractMachine(MachineGraph(transitions=transitions, vertices=self.vertex_types))\n\n def get_machine(self):\n transitions = []\n for left_ind in range(len(self.vertex_types)):\n for right_ind in range(len(self.vertex_types)):\n left = self.vertex_types[left_ind]\n right = self.vertex_types[right_ind]\n if (2 ** (left_ind * len(self.vertex_types) + right_ind)) & self.binary_matrix_representation:\n if isinstance(left, Action):\n transitions.append(MachineRelation(left=left, right=right, label=0))\n else:\n transitions.append(MachineRelation(left=left, right=right))\n\n start, stop = None, None\n for vertex in self.vertex_types:\n if isinstance(vertex, Start):\n start = vertex\n elif isinstance(vertex, Stop):\n stop = vertex\n\n assert start is not None\n assert stop is not None\n\n for vertex in [_ for _ in self.vertex_types if isinstance(_, Action)]:\n transitions.append(MachineRelation(left=vertex, right=stop, label=1))\n\n return AbstractMachine(MachineGraph(transitions=transitions, vertices=self.vertex_types))\n\n def get_max_index(self):\n return 2 ** (len(self.vertex_types) ** 2)\n\n def draw(self, filename):\n draw_graph(filename, self.get_machine().get_graph_to_draw(action_to_name_mapping=self.env.get_actions_as_dict(), no_edges_with_exit_f=True))\n s = None\n with open(\"{filename}.svg\".format(**locals()), \"r\") as f:\n s = f.readlines()\n s = [re.sub(r\"Action\\d+\", r\"Action\", _) for _ in s]\n s = [re.sub(r\"Choice\\d+\", r\"Choice\", _) for _ in s]\n s = [re.sub(r\"Call\\d+\", r\"Call\", _) for _ in s]\n s = [re.sub(r\"Stop\\d+\", r\"Stop\", _) for _ in s]\n s = [re.sub(r\"Start\\d+\", r\"Start\", _) for _ in s]\n with open(\"{filename}.svg\".format(**locals()), \"w\") as f:\n f.writelines(s)\n\n def draw_ru(self, filename):\n action_to_name_mapping = self.env.get_actions_as_dict()\n ru_mapping = {\"LEFT\": \"ВЛЕВО\",\n \"RIGHT\": \"ВПРАВО\",\n \"DOWN\": \"ВНИЗ\",\n \"TOGGLE\": \"ПЕРЕКЛ.\",\n \"UP\": \"ВВЕРХ\"\n }\n\n action_to_name_mapping_ru = {\n\n }\n for key in action_to_name_mapping.keys():\n assert key in ru_mapping, \"don't worry you just should add translation of key <<{key}>> to ru_mapping dict placed above\".format(**locals())\n action_to_name_mapping_ru[ru_mapping[key]] = action_to_name_mapping[key]\n\n draw_graph(filename, self.get_machine().get_graph_to_draw(action_to_name_mapping=action_to_name_mapping_ru, no_edges_with_exit_f=True))\n s = None\n\n with open(\"{filename}.svg\".format(**locals()), \"r\") as f:\n s = f.readlines()\n s = [re.sub(r\"Action\\d+\", r\"Действие\", _) for _ in s]\n s = [re.sub(r\"Choice\\d+\", r\"Выбор\", _) for _ in s]\n s = [re.sub(r\"Call\\d+\", r\"Вызов\", _) for _ in s]\n s = [re.sub(r\"Stop\\d+\", r\"Стоп\", _) for _ in s]\n s = [re.sub(r\"Start\\d+\", r\"Старт\", _) for _ in s]\n with open(\"{filename}.svg\".format(**locals()), \"w\") as f:\n f.writelines(s)\n\n\ndef main():\n env = ArmEnvToggleTopOnly(size_x=5, size_y=5, cubes_cnt=4, episode_max_length=600, finish_reward=100, action_minus_reward=-0.001, tower_target_size=4)\n\n ms = MachineStored(vertex_types=sorted([\n Start(),\n Stop(),\n Action(env.ACTIONS.LEFT),\n Action(env.ACTIONS.LEFT),\n Choice(),\n Action(env.ACTIONS.RIGHT),\n Action(env.ACTIONS.TOGGLE),\n Action(env.ACTIONS.TOGGLE),\n ]), binary_matrix_representation=42, env=env)\n ms.draw(\"a\")\n d = ms.to_dict()\n ms = MachineStored.from_dict(d, env=env)\n ms.draw(\"b\")\n # for i in range(100):\n # ms.binary_matrix_representation = i\n # ms.draw_ru(\"ololo{i}\".format(**locals()))\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5117296576499939,
"alphanum_fraction": 0.5244925022125244,
"avg_line_length": 40.756343841552734,
"blob_id": "21993a18c3baaa3f4be9b147645adf8a0e931cd1",
"content_id": "617742d184bd36801259c2843d5705ceec759654",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8269,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 197,
"path": "/environments_dqn/arm_env_dqn_lift_cube.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from collections import namedtuple\nimport numpy as np\nimport sys\nfrom gym import spaces\nimport gym\nfrom environments_dqn.arm_env_dqn import up_scaler, ArmEnvDQN\n\n\nclass ArmEnvDQN_2(ArmEnvDQN):\n def place_cubes(self, seed=None):\n if seed:\n np.random.seed(seed)\n self._grid = np.zeros(shape=(self._size_x, self._size_y), dtype=np.int32)\n\n cubes_left = self._cubes_cnt\n while cubes_left != 0:\n column = np.random.randint(self._size_y)\n for i in np.arange(self._size_x - 1, 0, -1):\n if self._grid[i, column] == 0 and (self._size_x - i) < self._tower_target_size:\n self._grid[i, column] = 1\n cubes_left -= 1\n break\n\n def __init__(self, size_x, size_y, cubes_cnt, scaling_coeff, episode_max_length, finish_reward, action_minus_reward,\n tower_target_size, seed=None):\n\n # checking for grid overflow\n assert cubes_cnt < size_x * size_y, \"Cubes overflow the grid\"\n\n self._size_x = size_x\n self._size_y = size_y\n self._cubes_cnt = cubes_cnt\n self._episode_max_length = episode_max_length\n self._finish_reward = finish_reward\n self._action_minus_reward = action_minus_reward\n self._tower_target_size = tower_target_size\n self._scaling_coeff = scaling_coeff\n self.seed = seed\n\n self.action_space = spaces.Discrete(6)\n self.observation_space = spaces.Box(low=0, high=255,\n shape=(size_x * self._scaling_coeff, size_y * self._scaling_coeff, 3))\n\n self._episode_rewards = []\n self._episode_lengths = []\n\n _ = self.reset()\n\n def reset(self):\n self._episode_length = 0\n self._episode_reward = 0\n self._grid = np.zeros(shape=(self._size_x, self._size_y), dtype=np.int32)\n self._arm_x = 0\n self._arm_y = np.random.randint(self._size_y)\n self._done = False\n self._magnet_toggle = False\n\n # cubes_left = self._cubes_cnt\n # for (x, y), value in reversed(list(np.ndenumerate(self._grid))):\n # if cubes_left == 0:\n # break\n # cubes_left -= 1\n # self._grid[x, y] = 1\n self.place_cubes(self.seed)\n\n self._tower_height = self.get_tower_height() # инициализируем высоту башни\n self._current_state = self._grid\n\n return self.get_evidence_for_image_render()\n\n def get_evidence_for_image_render(self):\n res = np.array(self._grid, copy=True)\n arm_scale = self._scaling_coeff\n res[self._arm_x][self._arm_y] = 2\n res = up_scaler(res, arm_scale)\n for (x, y), value in np.ndenumerate(res):\n if value == 2:\n res[x:x + arm_scale, y:y + arm_scale] = 0\n res[x:x + arm_scale, y + arm_scale // 2] = 2\n res[x + arm_scale - 1, y:y + arm_scale] = 2\n break\n if self._magnet_toggle:\n res[res == 2] = 3\n\n size_i, size_j = res.shape\n channels = 3\n\n # Create an empty image\n img = np.zeros((size_i, size_j, channels), dtype=np.uint8)\n\n # Set the RGB values\n for x in range(img.shape[0]):\n for y in range(img.shape[1]):\n if res[x][y] == 1:\n img[x][y] = (230, 200, 150)\n\n if res[x][y] == 2:\n img[x][y] = (204, 0, 0)\n\n if res[x][y] == 3:\n img[x][y] = (51, 153, 255)\n return img\n\n def ok(self, x, y):\n return 0 <= x < self._grid.shape[0] and 0 <= y < self._grid.shape[1]\n\n def ok_and_empty(self, x, y):\n return self.ok(x, y) and self._grid[x][y] == 0\n\n # def grid_to_img(self):\n # \"\"\" Возвращает np.array размера [size_x, size_y] \"\"\"\n # grid = np.array(self._grid, copy=True)\n # grid[self._arm_x, self._arm_y] = 3 - self._magnet_toggle * 1\n # return grid\n\n def get_tower_height(self):\n h = 0\n for j in range(self._grid.shape[1]):\n t = 0\n for i in np.arange(self._grid.shape[0] - 1, 0, -1):\n if self._grid[i, j] == 1 and self._grid[i - 1, j] == 0 and (\n i + 1 == self._grid.shape[0] or self._grid[i + 1, j] == 1):\n t = self._grid.shape[0] - i\n break\n if t > h:\n h = t\n return h\n\n def step(self, a):\n\n self._episode_length += 1\n\n if a in self.MOVE_ACTIONS:\n cube_dx, cube_dy = self.MOVE_ACTIONS[self.ACTIONS.DOWN]\n cube_x, cube_y = self._arm_x + cube_dx, self._arm_y + cube_dy\n if self._magnet_toggle and self.ok(cube_x, cube_y) and self._grid[cube_x][cube_y] == 1:\n new_arm_x, new_arm_y = self._arm_x + self.MOVE_ACTIONS[a][0], self._arm_y + self.MOVE_ACTIONS[a][1]\n new_cube_x, new_cube_y = new_arm_x + cube_dx, new_arm_y + cube_dy\n self._grid[cube_x][cube_y] = 0\n if self.ok_and_empty(new_arm_x, new_arm_y) and self.ok_and_empty(new_cube_x, new_cube_y):\n self._arm_x, self._arm_y = new_arm_x, new_arm_y\n self._grid[new_cube_x][new_cube_y] = 1\n else:\n self._grid[cube_x][cube_y] = 1\n else:\n new_arm_x, new_arm_y = self._arm_x + self.MOVE_ACTIONS[a][0], self._arm_y + self.MOVE_ACTIONS[a][1]\n if self.ok_and_empty(new_arm_x, new_arm_y):\n self._arm_x, self._arm_y = new_arm_x, new_arm_y\n else:\n # cant move, mb -reward\n pass\n elif a == self.ACTIONS.ON:\n self._magnet_toggle = True\n elif a == self.ACTIONS.OFF:\n cube_dx, cube_dy = self.MOVE_ACTIONS[self.ACTIONS.DOWN]\n cube_x, cube_y = self._arm_x + cube_dx, self._arm_y + cube_dy\n if self.ok(cube_x, cube_y) and self._grid[cube_x, cube_y] == 1 and self._magnet_toggle:\n new_cube_x, new_cube_y = cube_x + cube_dx, cube_y + cube_dy\n while self.ok_and_empty(new_cube_x, new_cube_y):\n new_cube_x, new_cube_y = new_cube_x + cube_dx, new_cube_y + cube_dy\n new_cube_x, new_cube_y = new_cube_x - cube_dx, new_cube_y - cube_dy\n self._grid[new_cube_x, new_cube_y], self._grid[cube_x, cube_y] = self._grid[cube_x, cube_y], self._grid[\n new_cube_x, new_cube_y]\n self._magnet_toggle = False\n\n observation = self._grid\n self._current_state = observation\n reward = self._action_minus_reward\n if a == 0 or a == 2:\n reward += 10 * self._action_minus_reward\n\n # if self._tower_height < self.get_tower_height():\n # self._tower_height = self.get_tower_height()\n # reward += 10\n self._episode_reward += reward\n\n info = None\n # self.render_to_image()\n # observation (object): agent's observation of the current environment\n # reward (float) : amount of reward returned after previous action\n # done (boolean): whether the episode has ended, in which case further step() calls will return undefined results\n # info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)\n\n if self._arm_x == 0 and self._grid[1, self._arm_y] == 1 and self._grid[2, self._arm_y] == 0:\n self._done = True\n reward += self._finish_reward\n self._episode_reward += self._finish_reward\n info = True\n self._episode_rewards.append(self._episode_reward)\n self._episode_lengths.append(self._episode_length)\n return self.get_evidence_for_image_render(), reward, self._done, info\n\n if self._episode_max_length <= self._episode_length:\n self._done = True\n self._episode_rewards.append(self._episode_reward)\n self._episode_lengths.append(self._episode_length)\n return self.get_evidence_for_image_render(), reward, self._done, info\n\n"
},
{
"alpha_fraction": 0.6088157892227173,
"alphanum_fraction": 0.6289086937904358,
"avg_line_length": 48.45962905883789,
"blob_id": "a3d6a2022f5b565e0bbae6f0779831f01f010854",
"content_id": "9b7b36070df734a1ac38bfb51c98aed1cc17f84c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7963,
"license_type": "permissive",
"max_line_length": 163,
"num_lines": 161,
"path": "/DQN&Options end-to-end/graph2.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\n\nimport argparse\nimport gym\nfrom gym import wrappers\nimport os.path as osp\nimport random\nimport numpy as np\nimport itertools\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\n\nnb_dir = os.path.split(os.getcwd())[0]\nif nb_dir not in sys.path:\n sys.path.append(nb_dir)\n\nfrom environments_dqn.arm_env_dqn import ArmEnvDQN\n\n\ndef get_session():\n tf.reset_default_graph()\n session = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n session = tf.Session()\n return session\n\n\nenv = ArmEnvDQN(episode_max_length=200,\n size_x=4,\n size_y=3,\n cubes_cnt=3,\n scaling_coeff=3,\n action_minus_reward=-1,\n finish_reward=200,\n tower_target_size=3)\n\n\ndef conv_model(input_data, scope, flatten=True, reuse=False):\n with tf.variable_scope(scope, reuse=reuse):\n out = input_data\n out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)\n if flatten:\n out = layers.flatten(out)\n return out\n\n\ndef mlp_model(input_data, output_len, scope, reuse=False):\n with tf.variable_scope(scope, reuse=reuse):\n out = input_data\n out = layers.fully_connected(out, num_outputs=256, activation_fn=tf.nn.relu)\n out = layers.fully_connected(out, num_outputs=output_len, activation_fn=None)\n return out\n\n\nwith tf.Session() as session:\n frame_history_len = 1\n num_options = 2\n\n if len(env.observation_space.shape) == 1:\n # This means we are running on low-dimensional observations (e.g. RAM)\n input_shape = env.observation_space.shape\n else:\n img_h, img_w, img_c = env.observation_space.shape\n input_shape = (img_h, img_w, frame_history_len * img_c) # size_x, size_y,\n\n num_actions = env.action_space.n\n\n # INPUT DATA: previous action and image\n prev_action = tf.placeholder(tf.float32, [None, num_options + 1], name=\"prev_action\")\n\n with tf.variable_scope('input_image'):\n # placeholder for current observation (or state)\n obs_t_ph = tf.placeholder(tf.uint8, [None] + list(input_shape), name=\"obs_t_ph\")\n # casting to float on GPU ensures lower data transfer times.\n obs_t_float = tf.realdiv(tf.cast(obs_t_ph, tf.float32), 255.0, name='obs_t_float')\n\n # CONVOLUTION\n convolution = conv_model(obs_t_float, scope=\"convolution\", reuse=False)\n\n # MANAGER\n with tf.variable_scope(\"manager\"):\n manager = mlp_model(convolution, num_options + 1, scope=\"manager\", reuse=False)\n manager_pred_ac = tf.argmax(manager, axis=1, name=\"manager_pred_ac\")\n manager_one_hot = tf.one_hot(manager_pred_ac, depth=num_options + 1, name=\"manager_one_hot\")\n\n # NETs to check if the option is terminated\n options_checkers = [tf.argmax(mlp_model(convolution, 2, scope='opt{0}_checker'.format(i + 1), reuse=False), axis=1)\n for i in range(num_options)]\n\n for i in range(len(options_checkers)):\n options_checkers[i] = tf.reshape(options_checkers[i], (tf.shape(options_checkers[i])[0], 1))\n\n with tf.variable_scope(\"check_option\"):\n options_check = tf.cast(tf.concat(options_checkers, 1, name=\"options_check\"), tf.float32)\n cond = tf.cast(tf.reduce_sum(tf.multiply(options_check, prev_action[:, 1:]), axis=1), tf.bool, name='cond')\n # cond = tf.cast(opt_check2, tf.bool, name = 'cond')\n\n # SELECT on whether the option terminated\n with tf.variable_scope(\"subselect\"):\n one_hot0 = tf.where(cond, manager_one_hot, prev_action, name=\"select1\")\n\n # SELECT on if it was option or not\n with tf.variable_scope(\"select_task\"):\n one_hot = tf.where(tf.cast(prev_action[:, 0], tf.bool), manager_one_hot, one_hot0, name=\"select2\")\n\n tasks = [mlp_model(convolution, num_actions, scope='task{0}'.format(i), reuse=False)\n for i in range(num_options + 1)]\n\n with tf.variable_scope(\"action\"):\n pred_q = tf.boolean_mask(tf.transpose(tasks, perm=[1, 0, 2]), tf.cast(one_hot, tf.bool), name=\"get_task\")\n pred_ac = tf.argmax(pred_q, axis=1, name=\"pred_ac\")\n\n session.run(tf.global_variables_initializer())\n\n saver1 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"convolution\"))\n saver2 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"task0\"))\n saver3 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"task1\"))\n saver4 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"task2\"))\n saver5 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"opt1_checker\"))\n saver6 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"opt2_checker\"))\n\n saver1.restore(session, '../experiments/DQN&Options end-to-end/experiment task0/saved_model/conv_graph.ckpt')\n saver2.restore(session, '../experiments/DQN&Options end-to-end/experiment task0/saved_model/flat_graph.ckpt')\n saver3.restore(session, '../experiments/DQN&Options end-to-end/experiment task1/saved_model/graph.ckpt')\n saver4.restore(session, '../experiments/DQN&Options end-to-end/experiment task2/saved_model/graph.ckpt')\n saver5.restore(session, '../experiments/DQN&Options end-to-end/experiment checker1/saved_model/graph.ckpt')\n saver6.restore(session, '../experiments/DQN&Options end-to-end/experiment checker2/saved_model/graph.ckpt')\n\n env.step(3)\n env.step(3)\n env.render()\n\n # print(session.run(manager_one_hot,\n # {obs_t_ph: [env.get_evidence_for_image_render(), env.get_evidence_for_image_render()],\n # prev_action: [[1, 0, 0], [1, 0, 0]]}))\n print(session.run(options_checkers,\n {obs_t_ph: [env.get_evidence_for_image_render()],\n prev_action: [[1, 0, 0]]}))\n print(session.run(options_check,\n {obs_t_ph: [env.get_evidence_for_image_render(), env.get_evidence_for_image_render()],\n prev_action: [[1, 0, 0], [1, 0, 0]]}))\n # # print(session.run(opt_check2, {obs_t_ph: [env.get_evidence_for_image_render(), env.get_evidence_for_image_render()], prev_action: [[1,0,0], [1,0,0]]}))\n print(session.run(cond, {obs_t_ph: [env.get_evidence_for_image_render(), env.get_evidence_for_image_render()],\n prev_action: [[1, 0, 0], [1, 0, 0]]}))\n # print(session.run(one_hot0, {obs_t_ph: [env.get_evidence_for_image_render(), env.get_evidence_for_image_render()],\n # prev_action: [[1, 0, 0], [1, 0, 0]]}))\n # print(session.run(one_hot, {obs_t_ph: [env.get_evidence_for_image_render(), env.get_evidence_for_image_render()],\n # prev_action: [[1, 0, 0], [1, 0, 0]]}))\n # print(session.run(tasks, {obs_t_ph: [env.get_evidence_for_image_render(), env.get_evidence_for_image_render()],\n # prev_action: [[1, 0, 0], [1, 0, 0]]}))\n # print(len(session.run(tasks, {obs_t_ph: [env.get_evidence_for_image_render(), env.get_evidence_for_image_render()],\n # prev_action: [[1, 0, 0], [1, 0, 0]]})))\n # print(session.run(tf.cast(one_hot, tf.bool),\n # {obs_t_ph: [env.get_evidence_for_image_render(), env.get_evidence_for_image_render()],\n # prev_action: [[1, 0, 0], [1, 0, 0]]}))\n # print(session.run(pred_ac, {obs_t_ph: [env.get_evidence_for_image_render(), env.get_evidence_for_image_render()],\n # prev_action: [[1, 0, 0], [1, 0, 0]]}))\n\n # summary_writer = tf.summary.FileWriter(\"graph_logs\", graph=tf.get_default_graph())\n"
},
{
"alpha_fraction": 0.5971648693084717,
"alphanum_fraction": 0.6169832348823547,
"avg_line_length": 37.44444274902344,
"blob_id": "edbcd7ce3192ff202d99704d9c0e10c5c7870e69",
"content_id": "f3acc323c7d12b9946745409d16fb3dd4b6ec7e5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7266,
"license_type": "permissive",
"max_line_length": 162,
"num_lines": 189,
"path": "/HAM/HAM_experiments/experiment_06_HAM_limited_graph/experiment_06_limited_graph.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import random\nfrom collections import namedtuple, defaultdict\n\nfrom gym import spaces\n\nfrom HAM.HAM_core import RandomMachine, MachineGraph, Start, Stop, Action, AutoBasicMachine, MachineRelation, Choice, Call, AbstractMachine, LoopInvokerMachine, \\\n RootMachine\nfrom HAM.HAM_experiments.HAM_utils import HAMParamsCommon, ham_runner, plot_multi, PlotParams\nfrom HAM.HAM_experiments.experiment_04_auto_random_HAM_on_maze_env.experiment_04 import is_it_machine_runnable\nfrom HAM.HAM_experiments.experiment_05_HAM_NET.experiment_05_HAM_NET import super_runner\nfrom environments.arm_env.arm_env import ArmEnv\nfrom environments.env_core import CoreEnv\nfrom environments.env_utils import EnvForTesting, EnvForTesting2\nfrom environments.weak_methods import q_learning\nfrom utils.graph_drawer import draw_graph\n\n# maze = generate_maze_please(size_x=2, size_y=2)\n# env = MazeWorldEpisodeLength(maze=maze,finish_reward=1000)\nfrom utils.plotting import plot_multi_test\n\n\n# def super_runner(call_me_maybe, env):\n# start = Start()\n# choice_one = Choice()\n# actions = [Action(action=_) for _ in env.get_actions_as_dict().values()]\n# stop = Stop()\n#\n# call = Call(call_me_maybe)\n# transitions = [MachineRelation(left=start, right=choice_one), ]\n# for action in actions:\n# transitions.append(MachineRelation(left=choice_one, right=action))\n# transitions.append(MachineRelation(left=action, right=stop, label=0))\n# transitions.append(MachineRelation(left=action, right=stop, label=1))\n# transitions.append(MachineRelation(left=choice_one, right=call))\n# transitions.append(MachineRelation(left=call, right=stop))\n#\n# return AbstractMachine(graph=MachineGraph(transitions=transitions))\n\n\nclass StupidMachine(AbstractMachine):\n def __init__(self):\n action = Action(action=0)\n transition = (\n MachineRelation(left=Start(), right=action),\n MachineRelation(left=action, right=action, label=0),\n MachineRelation(left=action, right=Stop(), label=1),\n\n )\n super().__init__(graph=MachineGraph(transitions=transition))\n\n\nclass HAMsNet2(CoreEnv):\n ACTIONS = namedtuple(\"ACTIONS\",\n [\"ACTION_01\",\n \"ACTION_02\",\n \"ACTION_03\",\n \"ACTION_04\",\n \"ACTION_05\",\n \"ACTION_06\"])(\n ACTION_01=0,\n ACTION_02=1,\n ACTION_03=2,\n ACTION_04=3,\n ACTION_05=4,\n ACTION_06=5,\n )\n\n def __init__(self, env, num_of_episodes, max_size):\n self.machine = None\n self._reset()\n self.env = env\n self.num_of_episodes = num_of_episodes\n self.max_size = max_size\n self.dp = {}\n\n def _reset(self):\n self.machine = RandomMachine()\n self.state = tuple()\n self.last_reward = 0\n self.action_space = spaces.Discrete(len(self.ACTIONS))\n # TODO implement done\n self._done = False\n\n self.vertex_added = 0\n self.edges_added = 0\n\n self.machine = RandomMachine(graph=MachineGraph(transitions=[MachineRelation(left=Start(), right=Stop())]))\n\n def add(self, action):\n transitions = []\n\n for relation in self.machine.graph.transitions:\n if isinstance(relation.right, Stop) and (relation.label == 0 or isinstance(relation.left, Start)):\n a = Action(action=action)\n if relation.label == 0:\n transitions.append(MachineRelation(left=relation.left, right=a, label=0))\n else:\n transitions.append(MachineRelation(left=relation.left, right=a))\n transitions.append(MachineRelation(left=a, right=self.machine.graph.get_stop(), label=0))\n else:\n transitions.append(relation)\n res = MachineGraph(transitions=transitions)\n\n for vertex in res.get_special_vertices(Action):\n # print(\"::\", res.graph.action_vertex_label_mapping[vertex])\n if not res.vertex_mapping[vertex] and not res.vertex_reverse_mapping[vertex]:\n continue\n if 1 not in res.action_vertex_label_mapping[vertex]:\n res.transitions.append(MachineRelation(left=vertex, right=res.get_stop(), label=1))\n\n self.machine = RandomMachine(graph=MachineGraph(transitions=transitions))\n\n def _step(self, action):\n self.state = self.state + tuple([action])\n\n self.ham = RootMachine(LoopInvokerMachine(machine_to_invoke=super_runner(self.machine, self.env)))\n reward = None\n\n if action is None:\n raise KeyError\n elif action == self.ACTIONS.ACTION_01:\n self.add(Action(action=action))\n elif action == self.ACTIONS.ACTION_02:\n self.add(Action(action=action))\n elif action == self.ACTIONS.ACTION_03:\n self.add(Action(action=action))\n elif action == self.ACTIONS.ACTION_04:\n self.add(Action(action=action))\n elif action == self.ACTIONS.ACTION_05:\n self.add(Action(action=action))\n elif action == self.ACTIONS.ACTION_06:\n self.add(Action(action=action))\n\n if is_it_machine_runnable(self.machine):\n if self.state in self.dp:\n reward = self.dp[self.state]\n else:\n params = HAMParamsCommon(self.env)\n ham_runner(ham=self.ham,\n num_episodes=self.num_of_episodes,\n env=self.env, params=params,\n no_output=True\n )\n reward = sum(params.logs[\"ep_rewards\"])\n self.dp[self.state] = reward\n draw_graph(\"pics/\" + str(reward).rjust(10, \"0\") + str(self.state) + \" \",\n self.machine.get_graph_to_draw(action_to_name_mapping=self.env.get_actions_as_dict()))\n\n observation = self.state\n if len(self.state) >= self.max_size:\n self._done = True\n\n return observation, reward, self._done, None\n\n def _render(self, mode='human', close=False):\n pass\n\n\ndef get_current_state(self):\n return self.state\n\n\ndef is_done(self):\n return self._done\n\n\ndef get_actions_as_dict(self):\n return {_: getattr(self.ACTIONS, _) for _ in self.ACTIONS._fields}\n\n\ndef main():\n global_env = EnvForTesting2()\n env_obj = global_env.env\n net = HAMsNet2(env=env_obj, num_of_episodes=global_env.episodes, max_size=2)\n q_table = None\n q_stats, q_table = q_learning(env=net, num_episodes=500, gamma=1, eps=0.9, alpha=0.5, q_table=q_table)\n net.max_size = 3\n q_stats, q_table = q_learning(env=net, num_episodes=500, gamma=1, eps=0.5, alpha=0.5, q_table=q_table)\n net.max_size = 4\n q_stats, q_table = q_learning(env=net, num_episodes=500, gamma=1, eps=0.3, alpha=0.5, q_table=q_table)\n net.max_size = 5\n q_stats, q_table = q_learning(env=net, num_episodes=500, gamma=1, eps=0.2, alpha=0.5, q_table=q_table)\n net.max_size = 6\n q_stats, q_table = q_learning(env=net, num_episodes=500, gamma=1, eps=0.1, alpha=0.4, q_table=q_table)\n# 0035768353 (q-learning)\n# 0035786236(3, 1)\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6790004968643188,
"alphanum_fraction": 0.7039884924888611,
"avg_line_length": 43.752689361572266,
"blob_id": "f508e2d20a1d698fd6dbdc74606ed0e7462512e7",
"content_id": "ca998f68e615ffdef31d3869404c64c2c6dd1ed0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4162,
"license_type": "permissive",
"max_line_length": 146,
"num_lines": 93,
"path": "/HAM/HAM_experiments/experiment_03_arm_env/experiment_03.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from HAM.HAM_core import AutoBasicMachine, RootMachine, LoopInvokerMachine, AbstractMachine, Start, Choice, Action, Stop, MachineRelation, Call, \\\n MachineGraph\nfrom HAM.HAM_experiments.HAM_utils import HAMParamsCommon, maze_world_input_01, plot_multi, ham_runner, PlotParams\nfrom environments.arm_env.arm_env import ArmEnv\n\nto_plot = []\nenv = ArmEnv(episode_max_length=300,\n size_x=5,\n size_y=3,\n cubes_cnt=4,\n action_minus_reward=-1,\n finish_reward=100,\n tower_target_size=4)\nnum_episodes = 300\n\nparams = HAMParamsCommon(env)\nham_runner(ham=AutoBasicMachine(env), num_episodes=num_episodes, env=env, params=params)\nto_plot.append(PlotParams(curve_to_draw=params.logs[\"ep_rewards\"], label=\"HAM_basic\"))\n\n# --------------------------------------------------------\n\npull_up_start = Start()\npull_up_on = Action(action=env.get_actions_as_dict()[\"ON\"])\npull_up_down_01 = Action(action=env.get_actions_as_dict()[\"DOWN\"])\npull_up_down_02 = Action(action=env.get_actions_as_dict()[\"DOWN\"])\npull_up_down_03 = Action(action=env.get_actions_as_dict()[\"DOWN\"])\npull_up_down_04 = Action(action=env.get_actions_as_dict()[\"DOWN\"])\npull_up_up_01 = Action(action=env.get_actions_as_dict()[\"UP\"])\npull_up_up_02 = Action(action=env.get_actions_as_dict()[\"UP\"])\npull_up_up_03 = Action(action=env.get_actions_as_dict()[\"UP\"])\npull_up_up_04 = Action(action=env.get_actions_as_dict()[\"UP\"])\npull_up_stop = Stop()\n\npull_up_transitions = (\n MachineRelation(left=pull_up_start, right=pull_up_on),\n\n MachineRelation(left=pull_up_on, right=pull_up_down_01, label=0),\n MachineRelation(left=pull_up_down_01, right=pull_up_down_02, label=0),\n MachineRelation(left=pull_up_down_02, right=pull_up_down_03, label=0),\n MachineRelation(left=pull_up_down_03, right=pull_up_down_04, label=0),\n MachineRelation(left=pull_up_down_04, right=pull_up_up_01, label=0),\n MachineRelation(left=pull_up_up_01, right=pull_up_up_02, label=0),\n MachineRelation(left=pull_up_up_02, right=pull_up_up_03, label=0),\n MachineRelation(left=pull_up_up_03, right=pull_up_up_04, label=0),\n MachineRelation(left=pull_up_up_04, right=pull_up_stop, label=0),\n\n MachineRelation(left=pull_up_on, right=pull_up_stop, label=1),\n MachineRelation(left=pull_up_down_01, right=pull_up_stop, label=1),\n MachineRelation(left=pull_up_down_02, right=pull_up_stop, label=1),\n MachineRelation(left=pull_up_down_03, right=pull_up_stop, label=1),\n MachineRelation(left=pull_up_down_04, right=pull_up_stop, label=1),\n MachineRelation(left=pull_up_up_01, right=pull_up_stop, label=1),\n MachineRelation(left=pull_up_up_02, right=pull_up_stop, label=1),\n MachineRelation(left=pull_up_up_03, right=pull_up_stop, label=1),\n MachineRelation(left=pull_up_up_04, right=pull_up_stop, label=1),\n)\npull_up = AbstractMachine(MachineGraph(transitions=pull_up_transitions))\n\nstart = Start()\nchoice_one = Choice()\nleft = Action(action=env.get_actions_as_dict()[\"LEFT\"])\nright = Action(action=env.get_actions_as_dict()[\"RIGHT\"])\noff = Action(action=env.get_actions_as_dict()[\"OFF\"])\n\ncall = Call(machine_to_call=pull_up)\n\nstop = Stop()\n\ntransitions = (\n MachineRelation(left=start, right=choice_one),\n MachineRelation(left=choice_one, right=left),\n MachineRelation(left=choice_one, right=right),\n MachineRelation(left=choice_one, right=off),\n MachineRelation(left=choice_one, right=call),\n\n MachineRelation(left=call, right=stop),\n\n MachineRelation(left=left, right=stop, label=0),\n MachineRelation(left=right, right=stop, label=0),\n MachineRelation(left=off, right=stop, label=0),\n\n MachineRelation(left=left, right=stop, label=1),\n MachineRelation(left=right, right=stop, label=1),\n MachineRelation(left=off, right=stop, label=1),\n)\n\npull_up_machine = RootMachine(machine_to_invoke=LoopInvokerMachine(AbstractMachine(MachineGraph(transitions=transitions))))\n\nparams = HAMParamsCommon(env)\nham_runner(ham=pull_up_machine, num_episodes=num_episodes, env=env, params=params)\nto_plot.append(PlotParams(curve_to_draw=params.logs[\"ep_rewards\"], label=\"HAM_with_pull_up\"))\n\nplot_multi(to_plot)\n"
},
{
"alpha_fraction": 0.6247454285621643,
"alphanum_fraction": 0.6334012150764465,
"avg_line_length": 39.081634521484375,
"blob_id": "60d4530ca2310ffdc19c756d2c9ff4e976825698",
"content_id": "80d0b8b022b038a38e111a48b2148ab5a2fae76e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1964,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 49,
"path": "/article_experiments/05_net_nohand/net_nothand.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from HAM.HAM_core import RootMachine, LoopInvokerMachine\nfrom HAM.HAM_experiments.HAM_utils import HAMParamsCommon, ham_runner\nfrom HAM.HAM_experiments.experiment_04_auto_random_HAM_on_maze_env.experiment_04 import create_random_machine\nfrom article_experiments.global_envs import MazeEnvArticleSpecial, ArmEnvArticle, MazeEnvArticle, \\\n get_cumulative_rewards, EnvironmentsArticle\nfrom utils.graph_drawer import draw_graph\n\nname = \"05_random\"\n\n\ndef run(global_env):\n rewards = None\n if isinstance(global_env, ArmEnvArticle):\n pass\n elif isinstance(global_env, MazeEnvArticle):\n pass\n elif isinstance(global_env, MazeEnvArticleSpecial):\n env = global_env.env\n seed = 573846788\n internal_machine = create_random_machine(maximal_number_of_vertex=6, maximal_number_of_edges=6,\n random_seed=seed,\n env=env)\n machine = RootMachine(machine_to_invoke=LoopInvokerMachine(machine_to_invoke=internal_machine))\n draw_graph(file_name=\"maze_env_special\",\n graph=internal_machine.get_graph_to_draw(action_to_name_mapping=env.get_actions_as_dict()))\n params = HAMParamsCommon(env)\n ham_runner(ham=machine, num_episodes=global_env.episodes_count, env=env, params=params)\n rewards = params.logs[\"ep_rewards\"]\n else:\n raise KeyError\n\n if rewards is not None:\n full_name = name + \"_\" + global_env.__class__.__name__\n # with open(full_name + \" cumulative_reward.txt\", \"w\") as w:\n # for out in get_cumulative_rewards(rewards=rewards):\n # w.write(str(out) + '\\n', )\n\n with open(full_name + \" reward.txt\", \"w\") as w:\n for out in rewards:\n w.write(str(out) + '\\n', )\n\n\ndef main():\n for global_env in EnvironmentsArticle().environments:\n run(global_env)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.659375011920929,
"alphanum_fraction": 0.7093750238418579,
"avg_line_length": 37.400001525878906,
"blob_id": "c33bc6d5c47a9259bcab8285e44f327c2102a64c",
"content_id": "71aa25eafe59f8438b28982156ddfaec2bbe1fc6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 960,
"license_type": "permissive",
"max_line_length": 150,
"num_lines": 25,
"path": "/environments/env_utils.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from HAM.HAM_experiments.HAM_utils import maze_world_input_01\nfrom environments.arm_env.arm_env import ArmEnv\nfrom environments.grid_maze_env.grid_maze_generator import draw_maze, prepare_maze, generate_pattern\nfrom environments.grid_maze_env.maze_world_env import MazeWorldEpisodeLength\n\n\nclass EnvForTesting:\n def __init__(self):\n self.env = MazeWorldEpisodeLength(maze=maze_world_input_01(), finish_reward=2000, episode_max_length=400)\n # self.env = ArmEnv(size_x=5, size_y=4, cubes_cnt=4, episode_max_length=300, finish_reward=20000, action_minus_reward=-1, tower_target_size=4)\n self.episodes = 1600\n\n\nclass EnvForTesting2:\n def __init__(self):\n self.env = ArmEnv(size_x=4, size_y=3, cubes_cnt=3, episode_max_length=300, finish_reward=20000, action_minus_reward=-1, tower_target_size=3)\n self.episodes = 1800\n\n\ndef main():\n draw_maze(prepare_maze(maze_world_input_01()))\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.557983934879303,
"alphanum_fraction": 0.576122522354126,
"avg_line_length": 34.587303161621094,
"blob_id": "7737507941a4ff32021a77f420d151e1b72beaeb",
"content_id": "1604e08b4ff60d7c3f6a39705ba623d689dc4875",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6726,
"license_type": "permissive",
"max_line_length": 141,
"num_lines": 189,
"path": "/SearchHie/test.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from random import randrange\n\nfrom HAM.HAM_experiments.HAM_utils import ham_runner, HAMParamsCommon\nfrom article_experiments.global_envs import MazeEnvArticle, MazeEnvArticleSpecial, ArmEnvArticle, EnvironmentsArticle, get_cumulative_rewards\nfrom environments.weak_methods import q_learning\n\nfrom HAM.HAM_core import AbstractMachine, Action, Stop, MachineRelation, Start, MachineGraph, Choice, Call, RootMachine, LoopInvokerMachine\nfrom SearchHie import goodhams\nfrom SearchHie.main import environments\nfrom environments.arm_env.arm_env import ArmEnvToggleTopOnly\nfrom utils.graph_drawer import draw_graph\n\n\ndef main():\n class UpMachine4(AbstractMachine):\n def __init__(self, env: ArmEnvToggleTopOnly):\n d1 = Action(action=env.ACTIONS.UP)\n d2 = Action(action=env.ACTIONS.UP)\n d3 = Action(action=env.ACTIONS.UP)\n d4 = Action(action=env.ACTIONS.UP)\n stop = Stop()\n transitions = (\n MachineRelation(left=Start(), right=d1),\n MachineRelation(left=d1, right=d2, label=0),\n MachineRelation(left=d2, right=d3, label=0),\n MachineRelation(left=d3, right=d4, label=0),\n MachineRelation(left=d4, right=stop, label=0),\n\n MachineRelation(left=d1, right=stop, label=1),\n MachineRelation(left=d2, right=stop, label=1),\n MachineRelation(left=d3, right=stop, label=1),\n MachineRelation(left=d4, right=stop, label=1),\n\n )\n\n super().__init__(graph=MachineGraph(transitions=transitions))\n\n class UpMachine3(AbstractMachine):\n def __init__(self, env: ArmEnvToggleTopOnly):\n d1 = Action(action=env.ACTIONS.UP)\n d2 = Action(action=env.ACTIONS.UP)\n d3 = Action(action=env.ACTIONS.UP)\n # d4 = Action(action=env.ACTIONS.UP)\n stop = Stop()\n transitions = (\n MachineRelation(left=Start(), right=d1),\n MachineRelation(left=d1, right=d2, label=0),\n MachineRelation(left=d2, right=d3, label=0),\n MachineRelation(left=d3, right=stop, label=0),\n # MachineRelation(left=d4, right=stop, label=0),\n\n MachineRelation(left=d1, right=stop, label=1),\n MachineRelation(left=d2, right=stop, label=1),\n MachineRelation(left=d3, right=stop, label=1),\n # MachineRelation(left=d4, right=stop, label=1),\n\n )\n\n super().__init__(graph=MachineGraph(transitions=transitions))\n\n a = [\n Choice(),\n Action(ArmEnvToggleTopOnly.ACTIONS.RIGHT),\n Action(ArmEnvToggleTopOnly.ACTIONS.LEFT),\n Action(ArmEnvToggleTopOnly.ACTIONS.DOWN),\n # Action(ArmEnvToggleTopOnly.ACTIONS.UP),\n\n Call(machine_to_call=UpMachine4(environments[1])),\n\n ]\n\n transitions = []\n for i in a:\n for j in a:\n if randrange(2):\n if isinstance(i, Action):\n transitions.append(MachineRelation(left=i, right=j, label=0))\n else:\n transitions.append(MachineRelation(left=i, right=j))\n # len_ = len(goodhams)\n # print(len_)\n # len_4 = len_ // 4 + 1\n # l1, r1 = 0, len_4\n # l2, r2 = len_4, 2 * len_4\n # l3, r3 = 2 * len_4, 3 * len_4\n # l4, r4 = 3 * len_4, 4 * len_4\n\n # print(l1, r1 )\n # print(l2, r2 )\n # print(l3, r3 )\n # print(l4, r4 )\n # exit(0)\n # for brute_force in goodhams:\n # for index, brute_force in enumerate(goodhams[l1: r1]):\n # for index, brute_force in enumerate(goodhams[l2: r2]):\n # for index, brute_force in enumerate(goodhams[l3: r3]):\n brute_force = 1180698\n\n # if bin(brute_force).count(\"1\") > 12 or bin(brute_force).count(\"1\") < 4:\n # continue\n\n # continue\n go_continue = False\n transitions = []\n ss = set()\n for ii in range(len(a)):\n for jj in range(len(a)):\n i = a[ii]\n j = a[jj]\n if (2 ** (ii * len(a) + jj)) & brute_force:\n if isinstance(i, Action):\n transitions.append(MachineRelation(left=i, right=j, label=0))\n else:\n transitions.append(MachineRelation(left=i, right=j))\n if ii in ss and isinstance(a[ii], (Action, Call)):\n go_continue = True\n break\n ss.add(ii)\n stop = Stop()\n for ii in range(len(a)):\n if ii not in ss:\n i = a[ii]\n if isinstance(i, Action):\n transitions.append(MachineRelation(left=i, right=stop, label=0))\n else:\n transitions.append(MachineRelation(left=i, right=stop))\n for i in a:\n if isinstance(i, Action):\n transitions.append(MachineRelation(left=i, right=stop, label=1))\n transitions.append(MachineRelation(left=Start(), right=a[0]))\n machine = AbstractMachine(MachineGraph(transitions=transitions))\n am = RootMachine(LoopInvokerMachine(machine))\n env = environments[0]\n draw_graph(\"{brute_force}\".format(**locals()), am.get_graph_to_draw(action_to_name_mapping=env.get_actions_as_dict()))\n\n name = \"02_auto\"\n\n def run(global_env):\n full_name = name\n params = HAMParamsCommon(environments[0])\n ham_runner(ham=am, num_episodes=global_episodes, env=env,params=params)\n rewards = params.logs[\"ep_rewards\"]\n # with open(full_name + \" cumulative_reward.txt\", \"w\") as w:\n # for out in get_cumulative_rewards(rewards=rewards):\n # w.write(str(out) + '\\n', )\n\n with open(full_name + \" reward.txt\", \"w\") as w:\n for out in rewards:\n w.write(str(out) + '\\n', )\n\n def main():\n # for global_env in EnvironmentsArticle().environments:\n run(EnvironmentsArticle().environments[0])\n\n if __name__ == '__main__':\n main()\n\n\nenv = environments[3]\n\nglobal_episodes = 6000\n\n\ndef go_q_learn():\n name = \"01_table_q-learning\"\n\n def run(global_env):\n full_name = name\n rewards, _ = q_learning(env=env, num_episodes=global_episodes)\n\n # with open(full_name + \" cumulative_reward.txt\", \"w\") as w:\n # for out in get_cumulative_rewards(rewards=rewards):\n # w.write(str(out) + '\\n', )\n\n with open(full_name + \" reward.txt\", \"w\") as w:\n for out in rewards:\n w.write(str(out) + '\\n', )\n\n def main():\n # for global_env in EnvironmentsArticle().environments:\n run(EnvironmentsArticle().environments[0])\n\n if __name__ == '__main__':\n main()\n\n\nif __name__ == '__main__':\n go_q_learn()\n main()\n"
},
{
"alpha_fraction": 0.5898564457893372,
"alphanum_fraction": 0.6107177138328552,
"avg_line_length": 30.47590446472168,
"blob_id": "4ca9333638c880d2bfaf61c28f3a8dab6ba8d5a2",
"content_id": "22c614ce19397a87267464da67a50c9fb4e18afe",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5225,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 166,
"path": "/DQN with Options/train_option2 (lift cube).py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import argparse\nimport gym\nfrom gym import wrappers\nimport os.path as osp\nimport random\nimport numpy as np\nimport itertools\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\nimport time\n\nimport os\nimport sys\n\nnb_dir = os.path.split(os.getcwd())[0]\nif nb_dir not in sys.path:\n sys.path.append(nb_dir)\nimport utils.plotting as plotting\n\nfrom DQN import dqn\nfrom utils_dqn.dqn_utils import *\n# from atari_wrappers import *\n# from environments.arm_env.arm_env import ArmEnv\nfrom environments_dqn.arm_env_dqn_lift_cube import ArmEnvDQN_2\n\n\ndef arm_model(img_in, num_actions, scope, reuse=False):\n # as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf\n with tf.variable_scope(scope, reuse=reuse):\n out = img_in\n with tf.variable_scope(\"convnet\"):\n # original architecture\n out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)\n out = layers.flatten(out)\n with tf.variable_scope(\"action_value\"):\n out = layers.fully_connected(out, num_outputs=256, activation_fn=tf.nn.relu)\n out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)\n\n return out\n\n\ndef arm_learn(env, session, scope_name, num_timesteps, spec_file=None, exp_dir=None):\n lr_multiplier = 1.0\n lr_schedule = PiecewiseSchedule([\n (0, 1e-4 * lr_multiplier),\n (num_timesteps / 40, 1e-4 * lr_multiplier),\n (num_timesteps / 8, 5e-5 * lr_multiplier),\n ],\n outside_value=5e-5 * lr_multiplier)\n optimizer = dqn.OptimizerSpec(\n constructor=tf.train.AdamOptimizer,\n kwargs=dict(epsilon=1e-4),\n lr_schedule=lr_schedule\n )\n\n def stopping_criterion(t):\n return t >= num_timesteps\n\n exploration_schedule = PiecewiseSchedule(\n [\n (0, 1.0),\n (num_timesteps / 20, 0.3),\n (num_timesteps / 10, 0.1),\n (num_timesteps / 4, 0.01),\n ], outside_value=0.01\n )\n\n dqn.learn(\n env,\n q_func=arm_model,\n optimizer_spec=optimizer,\n session=session,\n scope_name=scope_name,\n exploration=exploration_schedule,\n stopping_criterion=stopping_criterion,\n replay_buffer_size=2000000,\n batch_size=32,\n gamma=0.99,\n learning_starts=5000,\n learning_freq=1,\n frame_history_len=1,\n target_update_freq=500,\n grad_norm_clipping=10,\n spec_file=spec_file,\n exp_dir=exp_dir\n )\n\n ep_rew = env.get_episode_rewards()\n ep_len = env.get_episode_lengths()\n\n return ep_rew, ep_len\n\n\ndef get_available_gpus():\n from tensorflow.python.client import device_lib\n local_device_protos = device_lib.list_local_devices()\n return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU']\n\n\ndef set_global_seeds(i):\n try:\n import tensorflow as tf\n except ImportError:\n pass\n else:\n tf.set_random_seed(i)\n np.random.seed(i)\n random.seed(i)\n\n\ndef get_session():\n tf.reset_default_graph()\n session = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n print(\"AVAILABLE GPUS: \", get_available_gpus())\n session = tf.Session()\n return session\n\n\ndef createFolder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print('Error: Creating directory. ' + directory)\n\n\ndef main():\n env = ArmEnvDQN_2(episode_max_length=200,\n size_x=8,\n size_y=6,\n cubes_cnt=6,\n scaling_coeff=3,\n action_minus_reward=-1,\n finish_reward=200,\n tower_target_size=6)\n\n # create a new folder for this experiment\n os.chdir('../experiments/DQN with options/')\n dir_name = \"experiment1/option2\" # + str(datetime.datetime.now())[:-10]\n createFolder(dir_name)\n os.chdir('../../DQN with Options/')\n\n f = open('../experiments/DQN with options/' + dir_name + '/specifications.txt', 'a').close()\n env.write_env_spec('../experiments/DQN with options/' + dir_name + '/specifications.txt')\n\n session = get_session()\n\n start = time.time()\n ep_rew, ep_len = arm_learn(env, session, scope_name=\"option1\", num_timesteps=1500000,\n spec_file='../experiments/DQN with options/' + dir_name + '/specifications.txt',\n exp_dir='../experiments/DQN with options/' + dir_name)\n end = time.time()\n print((end - start) / 60)\n\n stats = plotting.EpisodeStats(\n episode_lengths=ep_len,\n episode_rewards=ep_rew)\n plotting.plot_episode_stats(stats, save_fig=True,\n fig_dir='../experiments/DQN with options/' + dir_name + '/',\n fig_name='smoothed_')\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5214332938194275,
"alphanum_fraction": 0.5323147773742676,
"avg_line_length": 37.06694412231445,
"blob_id": "9ffcfbf1ec4c1bb2e21e6db95db756a05c382408",
"content_id": "9fc83fcb85883901cf4277395be2b0f505533cb2",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9123,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 239,
"path": "/environments_dqn/arm_env_dqn.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from collections import namedtuple\nimport numpy as np\nimport sys\nfrom gym import spaces\nimport gym\nfrom environments.env_core import CoreEnv\n\n\ndef up_scaler(grid, up_size):\n res = np.zeros(shape=np.asarray(np.shape(grid)) * up_size)\n for (x, y), value in np.ndenumerate(grid):\n res[x * up_size:x * up_size + up_size, y * up_size:y * up_size + up_size] = grid[x][y]\n return res\n\n\nclass ArmEnvDQN(CoreEnv):\n metadata = {'render.modes': ['human', 'ansi']}\n\n ACTIONS = namedtuple(\"ACTIONS\", [\"LEFT\", \"UP\", \"RIGHT\", \"DOWN\", \"ON\", \"OFF\", ])(\n LEFT=0,\n UP=1,\n RIGHT=2,\n DOWN=3,\n ON=4,\n OFF=5,\n )\n\n MOVE_ACTIONS = {\n ACTIONS.UP: [-1, 0],\n ACTIONS.LEFT: [0, -1],\n ACTIONS.DOWN: [1, 0],\n ACTIONS.RIGHT: [0, 1],\n }\n\n def __init__(self, size_x, size_y, cubes_cnt, scaling_coeff, episode_max_length, finish_reward, action_minus_reward,\n tower_target_size):\n\n # checking for grid overflow\n assert cubes_cnt < size_x * size_y, \"Cubes overflow the grid\"\n\n self._size_x = size_x\n self._size_y = size_y\n self._cubes_cnt = cubes_cnt\n self._episode_max_length = episode_max_length\n self._finish_reward = finish_reward\n self._action_minus_reward = action_minus_reward\n self._tower_target_size = tower_target_size\n self._scaling_coeff = scaling_coeff\n\n self.action_space = spaces.Discrete(6)\n self.observation_space = spaces.Box(low=0, high=255,\n shape=(size_x * self._scaling_coeff, size_y * self._scaling_coeff, 3))\n\n self._episode_rewards = []\n self._episode_lengths = []\n\n _ = self.reset()\n\n def reset(self):\n self._episode_length = 0\n self._episode_reward = 0\n self._grid = np.zeros(shape=(self._size_x, self._size_y), dtype=np.int32)\n self._arm_x = 0\n self._arm_y = 0\n self._done = False\n self._magnet_toggle = False\n\n cubes_left = self._cubes_cnt\n for (x, y), value in reversed(list(np.ndenumerate(self._grid))):\n if cubes_left == 0:\n break\n cubes_left -= 1\n self._grid[x, y] = 1\n\n self._tower_height = self.get_tower_height() # инициализируем высоту башни\n self._current_state = self._grid\n\n return self.get_evidence_for_image_render()\n\n def get_evidence_for_image_render(self):\n res = np.array(self._grid, copy=True)\n arm_scale = self._scaling_coeff\n res[self._arm_x][self._arm_y] = 2\n res = up_scaler(res, arm_scale)\n for (x, y), value in np.ndenumerate(res):\n if value == 2:\n res[x:x + arm_scale, y:y + arm_scale] = 0\n res[x:x + arm_scale, y + arm_scale // 2] = 2\n res[x + arm_scale - 1, y:y + arm_scale] = 2\n break\n if self._magnet_toggle:\n res[res == 2] = 3\n\n size_i, size_j = res.shape\n channels = 3\n\n # Create an empty image\n img = np.zeros((size_i, size_j, channels), dtype=np.uint8)\n\n # Set the RGB values\n for x in range(img.shape[0]):\n for y in range(img.shape[1]):\n if res[x][y] == 1:\n img[x][y] = (230, 200, 150)\n\n if res[x][y] == 2:\n img[x][y] = (204, 0, 0)\n\n if res[x][y] == 3:\n img[x][y] = (51, 153, 255)\n return img\n\n def ok(self, x, y):\n return 0 <= x < self._grid.shape[0] and 0 <= y < self._grid.shape[1]\n\n def ok_and_empty(self, x, y):\n return self.ok(x, y) and self._grid[x][y] == 0\n\n def get_tower_height(self):\n h = 0\n for j in range(self._grid.shape[1]):\n t = 0\n for i in np.arange(self._grid.shape[0] - 1, 0, -1):\n if self._grid[i, j] == 1 and self._grid[i - 1, j] == 0 and (\n i + 1 == self._grid.shape[0] or self._grid[i + 1, j] == 1):\n t = self._grid.shape[0] - i\n break\n if t > h:\n h = t\n return h\n\n def step(self, a, isoption=False):\n\n if not isoption:\n self._episode_length += 1\n\n if a in self.MOVE_ACTIONS:\n cube_dx, cube_dy = self.MOVE_ACTIONS[self.ACTIONS.DOWN]\n cube_x, cube_y = self._arm_x + cube_dx, self._arm_y + cube_dy\n if self._magnet_toggle and self.ok(cube_x, cube_y) and self._grid[cube_x][cube_y] == 1:\n new_arm_x, new_arm_y = self._arm_x + self.MOVE_ACTIONS[a][0], self._arm_y + self.MOVE_ACTIONS[a][1]\n new_cube_x, new_cube_y = new_arm_x + cube_dx, new_arm_y + cube_dy\n self._grid[cube_x][cube_y] = 0\n if self.ok_and_empty(new_arm_x, new_arm_y) and self.ok_and_empty(new_cube_x, new_cube_y):\n self._arm_x, self._arm_y = new_arm_x, new_arm_y\n self._grid[new_cube_x][new_cube_y] = 1\n else:\n self._grid[cube_x][cube_y] = 1\n else:\n new_arm_x, new_arm_y = self._arm_x + self.MOVE_ACTIONS[a][0], self._arm_y + self.MOVE_ACTIONS[a][1]\n if self.ok_and_empty(new_arm_x, new_arm_y):\n self._arm_x, self._arm_y = new_arm_x, new_arm_y\n else:\n # cant move, mb -reward\n pass\n elif a == self.ACTIONS.ON:\n self._magnet_toggle = True\n elif a == self.ACTIONS.OFF:\n cube_dx, cube_dy = self.MOVE_ACTIONS[self.ACTIONS.DOWN]\n cube_x, cube_y = self._arm_x + cube_dx, self._arm_y + cube_dy\n if self.ok(cube_x, cube_y) and self._grid[cube_x, cube_y] == 1 and self._magnet_toggle:\n new_cube_x, new_cube_y = cube_x + cube_dx, cube_y + cube_dy\n while self.ok_and_empty(new_cube_x, new_cube_y):\n new_cube_x, new_cube_y = new_cube_x + cube_dx, new_cube_y + cube_dy\n new_cube_x, new_cube_y = new_cube_x - cube_dx, new_cube_y - cube_dy\n self._grid[new_cube_x, new_cube_y], self._grid[cube_x, cube_y] = self._grid[cube_x, cube_y], self._grid[\n new_cube_x, new_cube_y]\n self._magnet_toggle = False\n\n observation = self._grid\n self._current_state = observation\n reward = self._action_minus_reward\n\n self._episode_reward += reward\n\n info = None\n # observation (object): agent's observation of the current environment\n # reward (float) : amount of reward returned after previous action\n # done (boolean): whether the episode has ended, in which case further step() calls will return undefined results\n # info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)\n\n if self.get_tower_height() == self._tower_target_size and self._magnet_toggle == False:\n self._done = True\n reward += self._finish_reward\n self._episode_reward += self._finish_reward\n info = True\n self._episode_rewards.append(self._episode_reward)\n self._episode_lengths.append(self._episode_length)\n return self.get_evidence_for_image_render(), reward, self._done, info\n\n if self._episode_max_length <= self._episode_length:\n self._done = True\n self._episode_rewards.append(self._episode_reward)\n self._episode_lengths.append(self._episode_length)\n return self.get_evidence_for_image_render(), reward, self._done, info\n\n def is_done(self):\n return self._done\n\n # return observation\n def _get_obs(self):\n pass\n\n def get_episode_rewards(self):\n return self._episode_rewards\n\n def get_episode_lengths(self):\n return self._episode_lengths\n\n def render(self, mode='human', close=False):\n if close:\n return\n outfile = sys.stdout\n\n out = np.array(self._grid, copy=True)\n out[self._arm_x, self._arm_y] = 3 - self._magnet_toggle * 1\n\n outfile.write('\\n')\n outfile.write(str(out))\n outfile.write('\\n')\n\n def get_current_state(self):\n return self._current_state\n\n def get_actions_as_dict(self):\n return {_: getattr(self.ACTIONS, _) for _ in self.ACTIONS._fields}\n\n def write_env_spec(self, file):\n f = open(file, 'a')\n f.write(\"Environment specifications:\" + '\\n')\n f.write(\" size_x : {}\".format(self._size_x) + '\\n')\n f.write(\" size_y : {}\".format(self._size_y) + '\\n')\n f.write(\" cubes_cnt : {}\".format(self._cubes_cnt) + '\\n')\n f.write(\" episode_max_length : {}\".format(self._episode_max_length) + '\\n')\n f.write(\" finish_reward : {}\".format(self._finish_reward) + '\\n')\n f.write(\" action_minus_reward : {}\".format(self._action_minus_reward) + '\\n')\n f.write(\" tower_target_size : {}\".format(self._tower_target_size) + '\\n')\n f.write('\\n')\n f.close()\n"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.5948553085327148,
"avg_line_length": 34.1129035949707,
"blob_id": "4cb3863f7f3f42f0221b4da1b6b6c98a3a0bd83c",
"content_id": "c63a3d30051801612799bb4243b3367a8ca7a79c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2177,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 62,
"path": "/article_experiments/global_envs.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from HAM.HAM_experiments.HAM_utils import maze_world_input_01\nfrom environments.arm_env.arm_env import ArmEnv\nfrom environments.grid_maze_env.grid_maze_generator import generate_maze_please, draw_maze, generate_pattern\nfrom environments.grid_maze_env.maze_world_env import MazeWorldEpisodeLength\n\naction_minus_reward = -0.0001\nfinish_reward = 0.5\n\n\ndef get_cumulative_rewards(rewards):\n res = []\n current_cumulative_reward = 0\n for reward in rewards:\n current_cumulative_reward += reward\n res.append(current_cumulative_reward)\n return res\n\n\nclass EnvironmentsArticle:\n def __init__(self):\n self.environments = [\n MazeEnvArticle(),\n ArmEnvArticle(),\n MazeEnvArticleSpecial(),\n ]\n\n\nclass ArmEnvArticle:\n def __init__(self):\n self.env = ArmEnv(size_x=5, size_y=4, cubes_cnt=4, episode_max_length=500, finish_reward=finish_reward,\n action_minus_reward=action_minus_reward,\n tower_target_size=4)\n self.episodes_count = 500\n\n\nclass MazeEnvArticle:\n def __init__(self):\n # draw_maze(maze_world_input_01())\n self.env = MazeWorldEpisodeLength(maze=maze_world_input_01(), finish_reward=finish_reward,\n episode_max_length=500,\n wall_minus_reward=action_minus_reward * 5,\n action_minus_reward=action_minus_reward)\n self.episodes_count = 500\n\n\nclass MazeEnvArticleSpecial:\n def __init__(self):\n # draw_maze(generate_maze_please(size_x=8, size_y=7))\n self.env = MazeWorldEpisodeLength(maze=generate_maze_please(size_x=8, size_y=8), finish_reward=finish_reward,\n episode_max_length=500,\n action_minus_reward=action_minus_reward,\n wall_minus_reward=action_minus_reward * 5)\n self.episodes_count = 500\n\n\ndef main():\n draw_maze(generate_pattern(64+16+256))\n # draw_maze(generate_maze_please(size_x=8, size_y=7))\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.7729257345199585,
"alphanum_fraction": 0.7882096171379089,
"avg_line_length": 49.88888931274414,
"blob_id": "333ce878bd4ebf2807752fbb6812a7fba608c160",
"content_id": "08dbc66722b5e78417b1ce028c3270a2d5fdd509",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 458,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 9,
"path": "/HAM/HAM_experiments/experiment_01_maze_env/experiment_01.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from HAM.HAM_core import AutoBasicMachine\nfrom HAM.HAM_experiments.HAM_utils import HAMParamsCommon, maze_world_input_01, plot_multi, ham_runner, PlotParams\nfrom environments.grid_maze_env.maze_world_env import MazeWorld\n\nenv = MazeWorld(maze_world_input_01())\nparams = HAMParamsCommon(env)\nham_runner(ham=AutoBasicMachine(env), num_episodes=300, env=env, params=params)\n\nplot_multi((PlotParams(curve_to_draw=params.logs[\"ep_rewards\"], label=\"HAM_basic\"),))\n"
},
{
"alpha_fraction": 0.6163948774337769,
"alphanum_fraction": 0.6290858387947083,
"avg_line_length": 45.73448181152344,
"blob_id": "67ea21259530e6d77ad1075f44bd96d152af8a3b",
"content_id": "dd0b2ac9650e1fd718490af1885a26ebef3c3fdc",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13553,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 290,
"path": "/DQN&Options end-to-end/Graph_train_manager.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import sys\nimport gym.spaces\nimport itertools\nimport numpy as np\nimport random\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\nfrom collections import namedtuple\nfrom utils_dqn.dqn_utils import *\n\nOptimizerSpec = namedtuple(\"OptimizerSpec\", [\"constructor\", \"kwargs\", \"lr_schedule\"])\n\n\ndef learn(env,\n n_options,\n conv_net,\n mlp,\n optimizer_spec,\n session,\n scope_name,\n exploration=LinearSchedule(300000, 0.1),\n stopping_criterion=None,\n replay_buffer_size=10000,\n batch_size=32,\n gamma=0.99,\n learning_starts=5000,\n learning_freq=1,\n frame_history_len=1,\n target_update_freq=1000,\n grad_norm_clipping=10):\n assert type(env.observation_space) == gym.spaces.Box\n assert type(env.action_space) == gym.spaces.Discrete\n\n if len(env.observation_space.shape) == 1:\n # This means we are running on low-dimensional observations (e.g. RAM)\n input_shape = env.observation_space.shape\n else:\n img_h, img_w, img_c = env.observation_space.shape\n input_shape = (img_h, img_w, frame_history_len * img_c) # size_x, size_y,\n\n num_actions = env.action_space.n\n\n # INPUT DATA: previous action and image\n prev_action = tf.placeholder(tf.float32, [None, n_options + 1], name=\"prev_action\")\n\n with tf.variable_scope('input_image'):\n # placeholder for current observation (or state)\n obs_t_ph = tf.placeholder(tf.uint8, [None] + list(input_shape), name=\"obs_t_ph\")\n # casting to float on GPU ensures lower data transfer times.\n obs_t_float = tf.realdiv(tf.cast(obs_t_ph, tf.float32), 255.0, name='obs_t_float')\n\n # CONVOLUTION\n convolution = conv_net(obs_t_float, scope=\"convolution\", reuse=False)\n\n # MANAGER\n with tf.variable_scope(\"manager\"):\n manager = mlp(convolution, n_options + 1, scope=\"manager\", reuse=False)\n manager_pred_ac = tf.argmax(manager, axis=1, name=\"manager_pred_ac\")\n manager_one_hot = tf.one_hot(manager_pred_ac, depth=n_options + 1, name=\"manager_one_hot\")\n\n # NETs to check if the option is terminated\n options_checkers = [tf.argmax(mlp(convolution, 2, scope='opt{0}_checker'.format(i + 1), reuse=False), axis=1)\n for i in range(n_options)]\n\n for i in range(len(options_checkers)):\n options_checkers[i] = tf.reshape(options_checkers[i], (tf.shape(options_checkers[i])[0], 1))\n\n with tf.variable_scope(\"check_option\"):\n options_check = tf.cast(tf.concat(options_checkers, 1, name=\"options_check\"), tf.float32)\n cond = tf.cast(tf.reduce_sum(tf.multiply(options_check, prev_action[:, 1:]), axis=1), tf.bool, name='cond')\n # cond = tf.cast(opt_check2, tf.bool, name = 'cond')\n\n # SELECT on whether the option terminated\n with tf.variable_scope(\"subselect\"):\n one_hot0 = tf.where(cond, manager_one_hot, prev_action, name=\"select1\")\n\n # SELECT on if it was option or not\n with tf.variable_scope(\"select_task\"):\n one_hot = tf.where(tf.cast(prev_action[:, 0], tf.bool), manager_one_hot, one_hot0, name=\"select2\")\n\n # MLP to perform tasks\n tasks = [mlp(convolution, num_actions, scope='task{0}'.format(i), reuse=False)\n for i in range(n_options + 1)]\n\n # OUTPUT: action that agent need to perform\n with tf.variable_scope(\"action\"):\n pred_q = tf.boolean_mask(tf.transpose(tasks, perm=[1, 0, 2]), tf.cast(one_hot, tf.bool), name=\"get_task\")\n pred_ac = tf.argmax(pred_q, axis=1, name=\"pred_ac\")\n\n # placeholder for current action\n act_t_ph = tf.placeholder(tf.int32, [None], name=\"act_t_ph\")\n\n # placeholder for current reward\n rew_t_ph = tf.placeholder(tf.float32, [None], name=\"rew_t_ph\")\n\n with tf.variable_scope(\"obs_tp1_ph\"):\n # placeholder for next observation (or state)\n obs_tp1_ph = tf.placeholder(tf.uint8, [None] + list(input_shape), name=\"obs_tp1_ph\")\n obs_tp1_float = tf.cast(obs_tp1_ph, tf.float32) / 255.0\n\n # placeholder for end of episode mask\n done_mask_ph = tf.placeholder(tf.float32, [None], name=\"done_mask_ph\")\n\n # placeholder for the time the option took\n opt_steps = tf.placeholder(tf.float32, [None], name=\"opt_steps\")\n\n with tf.variable_scope(\"pred_q_a\"):\n manager_pred_q_a = tf.reduce_sum(manager * tf.one_hot(act_t_ph, depth=n_options + 1), axis=1, name='pred_q_a')\n\n with tf.variable_scope(\"manager_target_net\"):\n target_conv = conv_net(obs_tp1_float, scope=\"target_convolution\", reuse=False)\n target_q = mlp(target_conv, n_options + 1, scope=\"manager_target_q_func\", reuse=False)\n\n with tf.variable_scope(\"target_q_a\"):\n target_q_a = rew_t_ph + (1 - done_mask_ph) * tf.pow(gamma, opt_steps) * tf.reduce_max(target_q, axis=1)\n\n with tf.variable_scope(\"Compute_bellman_error\"):\n total_error = tf.reduce_sum(huber_loss(manager_pred_q_a - tf.stop_gradient(target_q_a)), name='total_error')\n\n with tf.variable_scope(\"Hold_the_var\"):\n # Hold all of the variables of the Q-function network and target network, respectively.\n manager_conv_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='convolution')\n manager_target_conv_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,\n scope='manager_target_net/target_convolution')\n manager_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='manager/manager')\n manager_target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,\n scope='manager_target_net/manager_target_q_func')\n\n # construct optimization op (with gradient clipping)\n learning_rate = tf.placeholder(tf.float32, (), name=\"learning_rate\")\n with tf.variable_scope(\"Optimizer\"):\n optimizer = optimizer_spec.constructor(learning_rate=learning_rate, **optimizer_spec.kwargs)\n train_fn = minimize_and_clip(optimizer, total_error,\n var_list=manager_q_func_vars, clip_val=grad_norm_clipping)\n\n # update_target_fn will be called periodically to copy Q network to target Q network\n update_target_fn = []\n for var, var_target in zip(sorted(manager_q_func_vars, key=lambda v: v.name),\n sorted(manager_target_q_func_vars, key=lambda v: v.name)):\n update_target_fn.append(var_target.assign(var))\n\n with tf.variable_scope(\"Update_target_fn\"):\n update_target_fn = tf.group(*update_target_fn, name='update_target_fn')\n\n # update_target_fn_conv will copy weights of convolution\n update_target_fn_conv = []\n for var, var_target in zip(sorted(manager_conv_vars, key=lambda v: v.name),\n sorted(manager_target_conv_vars, key=lambda v: v.name)):\n update_target_fn_conv.append(var_target.assign(var))\n\n with tf.variable_scope(\"Update_target_fn_conv\"):\n update_target_fn_conv = tf.group(*update_target_fn_conv, name='update_target_fn_conv')\n\n # construct the replay buffer with options\n replay_buffer = ReplayBufferOptions(replay_buffer_size, frame_history_len)\n\n ###############\n # RUN ENV #\n ###############\n model_initialized = False\n num_param_updates = 0\n mean_episode_reward = -float('nan')\n best_mean_episode_reward = -float('inf')\n last_obs = env.reset()\n previous_action = [[1, 0, 0]]\n LOG_EVERY_N_STEPS = 500\n\n saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='manager/manager'))\n\n saver1 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"convolution\"))\n saver2 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"task0\"))\n saver3 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"task1\"))\n saver4 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"task2\"))\n saver5 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"opt1_checker\"))\n saver6 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"opt2_checker\"))\n\n saver1.restore(session, '../experiments/DQN&Options end-to-end/experiment task0/saved_model/conv_graph.ckpt')\n saver2.restore(session, '../experiments/DQN&Options end-to-end/experiment task0/saved_model/flat_graph.ckpt')\n saver3.restore(session, '../experiments/DQN&Options end-to-end/experiment task1/saved_model/graph.ckpt')\n saver4.restore(session, '../experiments/DQN&Options end-to-end/experiment task2/saved_model/graph.ckpt')\n saver5.restore(session, '../experiments/DQN&Options end-to-end/experiment checker1/saved_model/graph.ckpt')\n saver6.restore(session, '../experiments/DQN&Options end-to-end/experiment checker2/saved_model/graph.ckpt')\n\n for t in itertools.count():\n ### 1. Check stopping criterion\n if stopping_criterion is not None and stopping_criterion(env, t):\n break\n\n ### 2. Step the env and store the transition\n\n # Store the latest observation that was recorded from the simulator.\n idx = replay_buffer.store_frame(last_obs)\n\n # Epsilon greedy exploration\n if not model_initialized or random.random() < exploration.value(t):\n action = random.randint(0, n_options)\n else:\n obs = replay_buffer.encode_recent_observation()\n action = session.run(pred_ac, {obs_t_ph: [obs]})[0]\n\n if action < env.action_space.n:\n next_obs, reward, done, info = env.step(action)\n opt_steps_n = 1\n else:\n # here the execution of the option\n next_obs, reward, done, opt_steps_n, info = options[action - env.action_space.n].step(env, isoption=True)\n env._episode_length += 1\n\n # Store the outcome\n replay_buffer.store_effect(idx, action, reward, done, opt_steps_n)\n last_obs = env.reset() if done else next_obs\n\n ### 3. Perform experience replay and train the network.\n\n if (t > learning_starts and t % learning_freq == 0 and\n replay_buffer.can_sample(batch_size)):\n\n # 3.a sample a batch of transitions\n obs_batch, act_batch, rew_batch, next_obs_batch, done_batch, opt_steps_batch = replay_buffer.sample(\n batch_size)\n\n # 3.b initialize the model if haven't\n if not model_initialized:\n initialize_interdependent_variables(session, tf.global_variables(), {\n obs_t_ph: obs_batch,\n obs_tp1_ph: next_obs_batch,\n })\n session.run(update_target_fn)\n model_initialized = True\n\n # 3.c train the model\n _, error = session.run([train_fn, total_error], {\n obs_t_ph: obs_batch,\n act_t_ph: act_batch,\n rew_t_ph: rew_batch,\n obs_tp1_ph: next_obs_batch,\n opt_steps: opt_steps_batch,\n done_mask_ph: done_batch,\n learning_rate: optimizer_spec.lr_schedule.value(t)\n })\n\n # 3.d periodically update the target network\n if t % target_update_freq == 0:\n session.run(update_target_fn)\n num_param_updates += 1\n\n ### 4. Log progress\n episode_rewards = env.get_episode_rewards()\n episode_lengths = env.get_episode_lengths()\n\n if len(episode_rewards) > 0 and len(episode_rewards) <= 50:\n mean_episode_reward = np.mean(episode_rewards)\n mean_episode_length = np.mean(episode_lengths)\n\n max_episode_reward = np.max(episode_rewards)\n min_episode_length = np.min(episode_lengths)\n\n min_episode_reward = np.min(episode_rewards)\n max_episode_length = np.max(episode_lengths)\n\n elif len(episode_rewards) > 50:\n mean_episode_reward = np.mean(episode_rewards[-50:])\n mean_episode_length = np.mean(episode_lengths[-50:])\n\n max_episode_reward = np.max(episode_rewards[-50:])\n min_episode_length = np.min(episode_lengths[-50:])\n\n min_episode_reward = np.min(episode_rewards[-50:])\n max_episode_length = np.max(episode_lengths[-50:])\n\n best_mean_episode_reward = max(best_mean_episode_reward, mean_episode_reward)\n\n if t % LOG_EVERY_N_STEPS == 0 and model_initialized:\n print(\"Timestep %d\" % (t,))\n print(\"mean reward (50 episodes) %f\" % mean_episode_reward)\n print(\"mean length (50 episodes) %f\" % mean_episode_length)\n print(\"max_episode_reward (50 episodes) %f\" % max_episode_reward)\n print(\"min_episode_length (50 episodes) %f\" % min_episode_length)\n print(\"min_episode_reward (50 episodes) %f\" % min_episode_reward)\n print(\"max_episode_length (50 episodes) %f\" % max_episode_length)\n print(\"best mean reward %f\" % best_mean_episode_reward)\n print(\"episodes %d\" % len(episode_rewards))\n print(\"exploration %f\" % exploration.value(t))\n print(\"learning_rate %f\" % optimizer_spec.lr_schedule.value(t))\n print(\"\\n\")\n sys.stdout.flush()\n\n meta_graph_def = tf.train.export_meta_graph(filename=scope_name + '/graph.ckpt.meta', export_scope=scope_name)\n save_path = saver.save(session, scope_name + '/graph.ckpt', write_meta_graph=False)\n print(\"Model saved in path: %s\" % save_path)\n"
},
{
"alpha_fraction": 0.6295189261436462,
"alphanum_fraction": 0.6367776989936829,
"avg_line_length": 42.912498474121094,
"blob_id": "f370a1071432dabc391853635be1b18b066a5cad",
"content_id": "9e3e2ad0b1599fd11074d134b4a879239131d130",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7026,
"license_type": "permissive",
"max_line_length": 154,
"num_lines": 160,
"path": "/workshop/check_graphs.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import pathlib\n\nfrom HAM.HAM_core import Stop, Start, Action, Call, Choice, AbstractMachine, MachineRelation\nfrom HAM.HAM_experiments.experiment_04_auto_random_HAM_on_maze_env.experiment_04 import is_it_machine_runnable, dfs_distinct_from_start\nfrom environments.arm_env.arm_env import ArmEnvToggleTopOnly\nfrom workshop.generate_combination import vertex_combination, vertex_list_to_str\nfrom workshop.generate_graph import MachineStored\n\nimport shutil\n\n\ndef is_ham_ok(machine: AbstractMachine):\n start = machine.graph.get_start()\n # check for exact 1 outgoing edges from Start\n if len(machine.graph.vertex_mapping[start]) != 1:\n return False\n # check for no incoming edges to Start\n if len(machine.graph.vertex_reverse_mapping[start]) != 0:\n return False\n\n stop = machine.graph.get_stop()\n # check for no outgoing edges from Stop\n if len(machine.graph.vertex_mapping[stop]) != 0:\n return False\n\n # check for exact 2 outgoing edges from actions (1 vertex is on_model end to stop)\n for action in machine.graph.get_special_vertices(Action):\n if len(machine.graph.vertex_mapping[action]) != 1:\n return False\n\n # check for self loops\n for edge in machine.graph.transitions:\n # MachineRelation.left\n if edge.left is edge.right:\n return False\n\n # no edges from Choice to Stop\n for choice in machine.graph.get_special_vertices(Choice):\n for relation in machine.graph.vertex_mapping[choice]:\n if isinstance(relation.right, Stop):\n return False\n\n # check for more than 1 outgoing edges from Choice\n for choice in machine.graph.get_special_vertices(Choice):\n if len(machine.graph.vertex_mapping[choice]) <= 1:\n return False\n\n return True\n\n\ndef check_for_one_component_graph(machine: AbstractMachine):\n visited = dfs_distinct_from_start(graph=machine.graph, vertex=machine.graph.get_start(), visited=[])\n return len(visited) == len(machine.graph.vertices)\n\n\ndef get_graph_id_fast(m: MachineStored, current_index=0, cur_id=0, ans=None):\n if ans is None:\n ans = []\n if current_index == len(m.vertex_types):\n ans.append(cur_id)\n return ans\n\n if isinstance(m.vertex_types[current_index], (Action, Start)):\n for i in range(1, len(m.vertex_types)):\n if i == current_index:\n continue\n get_graph_id_fast(m, current_index + 1, cur_id + (2 ** i) * (2 ** (len(m.vertex_types * current_index))), ans=ans)\n elif isinstance(m.vertex_types[current_index], Stop):\n get_graph_id_fast(m, current_index + 1, cur_id, ans=ans)\n elif isinstance(m.vertex_types[current_index], Choice):\n for i in range(1, 2 ** len(m.vertex_types)):\n get_graph_id_fast(m, current_index + 1, cur_id + i * (2 ** (len(m.vertex_types * current_index))), ans=ans)\n else:\n raise TypeError\n return ans\n\n\ndef generate_good_graphs(env, vertexes, vertex_count):\n good_graphs = []\n vertex_count += 1\n for max_vertex_count in range(vertex_count):\n vc = vertex_combination(vertex_types=vertexes, max_vertex_count=max_vertex_count)\n for index, vertex_types in enumerate(vc):\n for graph_id in sorted(get_graph_id_fast(MachineStored(vertex_types=vertex_types, binary_matrix_representation=412, env=env))):\n ms = MachineStored(vertex_types=vertex_types, binary_matrix_representation=graph_id, env=env)\n if is_ham_ok(ms.get_machine_without_on_model()):\n if check_for_one_component_graph(ms.get_machine_without_on_model()):\n if is_it_machine_runnable(ms.get_machine_without_on_model()):\n good_graphs.append(ms)\n return good_graphs\n\n\ndef generate_good_graph_ids(env, vertexes, vertex_count):\n good_graphs = []\n vertex_count += 1\n for max_vertex_count in range(vertex_count):\n vc = vertex_combination(vertex_types=vertexes, max_vertex_count=max_vertex_count)\n for index, vertex_types in enumerate(vc):\n for graph_id in sorted(get_graph_id_fast(MachineStored(vertex_types=vertex_types, binary_matrix_representation=412, env=env))):\n ms = MachineStored(vertex_types=vertex_types, binary_matrix_representation=graph_id, env=env)\n if is_ham_ok(ms.get_machine_without_on_model()):\n if check_for_one_component_graph(ms.get_machine_without_on_model()):\n if is_it_machine_runnable(ms.get_machine_without_on_model()):\n good_graphs.append(graph_id)\n return good_graphs\n\n\ndef generate_machines_by_ids(env, vertexes, ids):\n machines = []\n for max_vertex_count in range(7):\n vc = vertex_combination(vertex_types=vertexes, max_vertex_count=max_vertex_count)\n for index, vertex_types in enumerate(vc):\n for graph_id in ids:\n ms = MachineStored(vertex_types=vertex_types, binary_matrix_representation=graph_id, env=env)\n if is_ham_ok(ms.get_machine_without_on_model()):\n if check_for_one_component_graph(ms.get_machine_without_on_model()):\n if is_it_machine_runnable(ms.get_machine_without_on_model()):\n machines.append(ms)\n return machines\n\n\ndef main():\n env = ArmEnvToggleTopOnly(size_x=5, size_y=5, cubes_cnt=4, episode_max_length=600, finish_reward=100, action_minus_reward=-0.001, tower_target_size=4)\n vertexes = sorted([\n Stop(),\n Start(),\n\n Action(env.ACTIONS.LEFT),\n Action(env.ACTIONS.RIGHT),\n Action(env.ACTIONS.UP),\n Action(env.ACTIONS.DOWN),\n # Action(env.ACTIONS.TOGGLE),\n Choice(),\n # Action(env.ACTIONS.LEFT),\n # Action(env.ACTIONS.RIGHT),\n # Action(env.ACTIONS.UP),\n # Action(env.ACTIONS.DOWN),\n # Action(env.ACTIONS.TOGGLE),\n # Choice(),\n ])\n\n # clearing directory\n pathlib.Path('pics/').mkdir(parents=True, exist_ok=True)\n shutil.rmtree('pics/')\n pathlib.Path('pics/').mkdir(parents=True, exist_ok=True)\n # brute force\n for max_vertex_count in range(7):\n vc = vertex_combination(vertex_types=vertexes, max_vertex_count=max_vertex_count)\n for index, vertex_types in enumerate(vc):\n for graph_id in sorted(get_graph_id_fast(MachineStored(vertex_types=vertex_types, binary_matrix_representation=412, env=env))):\n ms = MachineStored(vertex_types=vertex_types, binary_matrix_representation=graph_id, env=env)\n if is_ham_ok(ms.get_machine_without_on_model()):\n if check_for_one_component_graph(ms.get_machine_without_on_model()):\n if is_it_machine_runnable(ms.get_machine_without_on_model()):\n ms.draw(\"pics/\" + str(max_vertex_count) + \":\" + str(index) + \":\" + str(graph_id))\n print(\"added\")\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.42540621757507324,
"alphanum_fraction": 0.4483581483364105,
"avg_line_length": 33.11627960205078,
"blob_id": "37f78b2c42ea37ac46498c4f32e79b3274db3da4",
"content_id": "e1f86aa47e61f9451e732fede63488a7218f1a18",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8801,
"license_type": "permissive",
"max_line_length": 133,
"num_lines": 258,
"path": "/McGovern subgoal/train.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport random\nimport math\nfrom mcgovern import Game\nfrom mcgovern import agent\n#from setup_flags import set_up\n\n#FLAGS = set_up()\n\nfrom tkinter import *\nfrom tkinter import ttk\nimport time\n\ndef draw():\n global canvas\n for i in range(ag.a.shape[0]):\n for j in range(ag.a.shape[1]):\n\n if ag.a[i][j] != 1:\n color = \"white\"\n else:\n color = \"black\"\n id_num = i * len(ag.a[0]) + j + 1\n # print(id_num)\n canvas.itemconfig(id_num,fill=color)\n state = ag.posx*ag.w+ag.posy \n canvas.itemconfig(state+1,fill=\"blue\")\n goal = ag.th*ag.w+ag.tw \n canvas.itemconfig(goal+1,fill=\"yellow\") \n subgoal = ag.xx*ag.w+ag.yy \n canvas.itemconfig(subgoal+1,fill=\"red\")\n root.update()\n # time.sleep(0.1)\n\n\n\nag = agent(10,10)\nag.stenka(5,4,6)\nag.tcel(8,8)\nag.init2()\nag.reset()\n\n\npixel_width = 480\nblock_length = pixel_width / ag.w\nroot = Tk()\nroot.title(\"Grid World\")\ncanvas = Canvas(root,width = \"500\",height = \"500\")\ncanvas.grid(column=0, row=0, sticky=(N, W, E, S))\nfor i in range(ag.h):\n for j in range(ag.w):\n x_1 = 10 + block_length * j\n y_1 = 10 + block_length * i\n x_2 = x_1 + block_length\n y_2 = y_1 + block_length\n\n if ag.a[i][j] != 1:\n color = \"white\"\n else:\n color = \"black\"\n\n canvas.create_rectangle(x_1,y_1,x_2,y_2,fill=color)\n\nag = agent(10,10)\nag.stenka(5,4,6)\nag.tcel(8,8)\nag.init2()\nfor epochs in range(3000):\n ag.reset()\n bag = []\n trajs = []\n ag.transition1 = np.zeros((ag.h,ag.w,4))\n ag.transition[:,:,4] = 0\n \n for i in range(ag.h):\n for j in range(ag.w):\n if ag.B[i,j]==0:\n ag.transition[i,j,4]=1\n if ag.transition[i,j,0]==1 and [i-1,j] in ag.setI:\n ag.transition1[i,j,0]=1\n\n if ag.transition[i,j,1]==1 and [i+1,j] in ag.setI:\n ag.transition1[i,j,1]=1\n\n if ag.transition[i,j,2]==1 and [i,j-1] in ag.setI:\n ag.transition1[i,j,2]=1\n\n if ag.transition[i,j,3]==1 and [i,j+1] in ag.setI:\n ag.transition1[i,j,3]=1\n \n ag.transition[ag.xx,ag.yy,4]=0\n ag.transition1[ag.xx,ag.yy] = ag.transition[ag.xx,ag.yy,:4] \n num_opt = 0 \n \n for i in range(50):\n actions = ag.get_poss_next_states(ag.transition)\n startOption = False\n vall = [value for index,value in enumerate(ag.Qt[ag.posx,ag.posy]) if index in actions]\n print(ag.posx,ag.posy, 'position',actions,vall)\n maxval = max(vall)\n maxindval = [index for index,value in enumerate(ag.Qt[ag.posx,ag.posy]) if (index in actions and value==maxval)]\n next_s = random.choice(maxindval)\n \n if next_s==0:\n next_x,next_y = ag.posx-1,ag.posy\n if next_s==1:\n next_x,next_y = ag.posx+1,ag.posy\n if next_s==2:\n next_x,next_y = ag.posx,ag.posy-1\n if next_s==3:\n next_x,next_y = ag.posx,ag.posy+1\n if next_s==4:\n Rr = 0\n iii = 0\n mmax_Q = -9999\n oldX = ag.posx\n oldY = ag.posy\n while [ag.posx,ag.posy] in ag.setI and [ag.posx,ag.posy]!=[ag.xx,ag.yy] and iii<10:\n num_opt+=1\n\n startOption = True\n actions = ag.get_poss_next_states(ag.transition1)\n\n vall = [value for index,value in enumerate(ag.Qt1[ag.posx,ag.posy]) if index in actions]\n maxval = max(vall)\n maxindval = [index for index,value in enumerate(ag.Qt1[ag.posx,ag.posy]) if (index in actions and value==maxval)]\n next_s = random.choice(maxindval)\n print(ag.posx,ag.posy, 'POSITION',next_s,actions,vall)\n if next_s==0:\n next_x,next_y = ag.posx-1,ag.posy\n if next_s==1:\n next_x,next_y = ag.posx+1,ag.posy\n if next_s==2:\n next_x,next_y = ag.posx,ag.posy-1\n if next_s==3:\n next_x,next_y = ag.posx,ag.posy+1\n poss_next_next_states = ag.get_poss_next_states(ag.transition1,nnext=next_s)\n max_Q = -9999\n \n for j in range(len(poss_next_next_states)):\n nn_s = poss_next_next_states[j]\n q = ag.Qt1[next_x,next_y,nn_s]\n if q > max_Q:\n max_Q = q \n \n R = ag.reward[ag.posx,ag.posy,next_s]\n if next_s==0 and iii>0:\n if [(ag.posx-1),(ag.posy)]==[ag.xx,ag.yy]:\n R+=1\n if next_s==1 and iii>0:\n if [(ag.posx+1),ag.posy]==[ag.xx,ag.yy]:\n R+=1\n if next_s==2 and iii>0:\n if [ag.posx,(ag.posy-1)]==[ag.xx,ag.yy]:\n R+=1\n if next_s==3 and iii>0:\n if [ag.posx,(ag.posy+1)]==[ag.xx,ag.yy]:\n R+=1\n if R>5:\n print('ADD REWARD ',ag.posx,ag.posy,next_s)\n Rr+=R*pow(0.9,iii)\n if max_Q>mmax_Q:\n mmax_Q = max_Q\n ag.Qt1[ag.posx,ag.posy][next_s] = ((1 - ag.lr) * ag.Qt1[ag.posx,ag.posy][next_s]) + (ag.lr * (R +(ag.gamma * max_Q)))\n randm = random.random()\n if randm>0.9:\n next_s = ag.get_rnd_next_state(ag.transition1)\n r,tr = ag.act(next_s,ag.transition1)\n draw()\n trajs.append(next_s)\n bag.append(tr)\n iii+=1\n R = Rr\n max_Q = mmax_Q\n ag.Qt[oldX,oldY][4] = ((1 - ag.lr) * ag.Qt[oldX,oldY][4]) + (ag.lr * (R +(ag.gamma * max_Q)))\n ag.transition[:,:,4] = 0 \n\n \n if startOption==False: \n poss_next_next_states = ag.get_poss_next_states(ag.transition,nnext=next_s) \n\n max_Q = -9999\n for j in range(len(poss_next_next_states)):\n nn_s = poss_next_next_states[j]\n q = ag.Qt[next_x,next_y,nn_s]\n if q > max_Q:\n max_Q = q\n R = ag.reward[ag.posx,ag.posy,next_s]\n ag.Qt[ag.posx,ag.posy][next_s] = ((1 - ag.lr) * ag.Qt[ag.posx,ag.posy][next_s]) + (ag.lr * (R +(ag.gamma * max_Q)))\n\n\n randm = random.random()\n if randm>0.9:\n next_s = random.randint(0,3)\n r,tr = ag.act(next_s,ag.transition)\n draw()\n trajs.append(next_s)\n bag.append(tr)\n if r=='win':\n ag.positivebag.append(bag)\n break \n if r!='win':\n ag.negativebag.append(bag)\n ag.bags.append(bag)\n ag.trajss.append(trajs)\n ag.DD = ag.DDf()\n ag.DD = np.log(ag.DD)*(-1) \n ind = np.where(ag.DD==ag.DD.max())\n x_y_coords = zip(ind[0], ind[1])\n # print('!!!!!!!!!!!!!!',ind,ag.DD.max())\n ag.setI = []\n ag.NeSetI = []\n ag.B = np.ones((ag.h,ag.w))*7\n options= 0\n maxro = 0\n dooption = False\n for x,y in x_y_coords:\n if ([x,y] not in ag.staticfilter) and [x,y] in [item for sublist in ag.bags for item in sublist]:\n ag.ro[x,y]+=1\n ag.B = np.ones((ag.h,ag.w))*7\n \n if (ag.ro[x,y]>=4 and ag.ro[x,y]>=maxro):\n maxro = ag.ro[x,y]\n ag.xx = x\n ag.yy = y\n dooption = True\n options +=1\n ag.setI = []\n\n for bbag,ttraj in zip(ag.bags[-20:],ag.trajss[-20:]):\n\n if [ag.xx,ag.yy] in bbag[2:]:\n NeSetIfu = []\n for i2,i3 in enumerate(zip(bbag,ttraj)):\n inst = i3[0]\n insttr = i3[1]\n NeSetIfu.append(inst)\n if inst not in ag.setI:\n ag.setI.append(inst)\n if [ag.xx,ag.yy] == inst:\n break \n ag.NeSetI.append(NeSetIfu)\n\n if dooption: \n for ib in range(ag.h):\n for jb in range(ag.w):\n if [ib,jb] not in ag.setI:\n ag.B[ib,jb] = 1\n else:\n ag.B[ib,jb] = 0\n ag.B[ag.xx,ag.yy] = 0 \n print(ag.B)\n print(ag.xx,ag.yy,' XX YY')\n ag.Qt[:,:,4]*=(1-ag.B)\n ag.ro =ag.ro*0.8\n \n # break \nroot.destroy()"
},
{
"alpha_fraction": 0.6517192721366882,
"alphanum_fraction": 0.6587572693824768,
"avg_line_length": 40.7899169921875,
"blob_id": "48be1f86c2eb4c53dca7de452bf64e2f1e737061",
"content_id": "ee91a3f54380085df378bb8caeb4a99b4471def0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4973,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 119,
"path": "/article_experiments/04_handcrafted_hie/handcrafted_hie.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from HAM.HAM_core import RootMachine, LoopInvokerMachine, Action, Choice, MachineRelation, Stop, Start, MachineGraph, \\\n AbstractMachine, RandomMachine\nfrom HAM.HAM_experiments.HAM_utils import HAMParamsCommon, ham_runner, super_runner\nfrom HAM.HAM_experiments.experiment_04_auto_random_HAM_on_maze_env.experiment_04 import create_random_machine\nfrom article_experiments.global_envs import MazeEnvArticleSpecial, ArmEnvArticle, MazeEnvArticle, \\\n get_cumulative_rewards, EnvironmentsArticle\nfrom environments.grid_maze_env.maze_world_env import MazeWorldEpisodeLength\nfrom utils.graph_drawer import draw_graph\n\nname = \"04_random_handcrafted\"\n\n\ndef run(global_env):\n rewards = None\n if isinstance(global_env, ArmEnvArticle):\n env = global_env.env\n internal_machine = M1(env=env)\n machine = RootMachine(LoopInvokerMachine(super_runner(call_me_maybe=internal_machine, env=env)))\n draw_graph(file_name=\"maze_env_special\",\n graph=internal_machine.get_graph_to_draw(action_to_name_mapping=env.get_actions_as_dict()))\n params = HAMParamsCommon(env)\n ham_runner(ham=machine, num_episodes=global_env.episodes_count, env=env, params=params)\n rewards = params.logs[\"ep_rewards\"]\n elif isinstance(global_env, MazeEnvArticle):\n env = global_env.env\n internal_machine = M2(env=env)\n machine = RootMachine(LoopInvokerMachine(super_runner(call_me_maybe=internal_machine, env=env)))\n draw_graph(file_name=\"maze_env_special\",\n graph=internal_machine.get_graph_to_draw(action_to_name_mapping=env.get_actions_as_dict()))\n params = HAMParamsCommon(env)\n ham_runner(ham=machine, num_episodes=global_env.episodes_count, env=env, params=params)\n rewards = params.logs[\"ep_rewards\"]\n elif isinstance(global_env, MazeEnvArticleSpecial):\n env = global_env.env\n internal_machine = M3(env=env)\n machine = RootMachine(LoopInvokerMachine(super_runner(call_me_maybe=internal_machine, env=env)))\n draw_graph(file_name=\"maze_env_special\",\n graph=internal_machine.get_graph_to_draw(action_to_name_mapping=env.get_actions_as_dict()))\n params = HAMParamsCommon(env)\n ham_runner(ham=machine, num_episodes=global_env.episodes_count, env=env, params=params)\n rewards = params.logs[\"ep_rewards\"]\n else:\n raise KeyError\n\n if rewards is not None:\n full_name = \"_\" + global_env.__class__.__name__\n with open(full_name + \" cumulative_reward.txt\", \"w\") as w:\n for out in get_cumulative_rewards(rewards=rewards):\n w.write(str(out) + '\\n', )\n\n with open(full_name + \" reward.txt\", \"w\") as w:\n for out in rewards:\n w.write(str(out) + '\\n', )\n\n\ndef main():\n for global_env in EnvironmentsArticle().environments:\n run(global_env)\n\n\ndef add_action_transitions(transitions):\n res = MachineGraph(transitions=transitions)\n stop = res.get_stop()\n for vertex in res.get_special_vertices(Action):\n if not res.vertex_mapping[vertex] and not res.vertex_reverse_mapping[vertex]:\n continue\n if 1 not in res.action_vertex_label_mapping[vertex]:\n res.transitions.append(MachineRelation(left=vertex, right=stop, label=1))\n return res.transitions\n\n\nclass M1(AbstractMachine):\n def __init__(self, env: MazeWorldEpisodeLength):\n stop = Stop()\n up1 = Action(env.ACTIONS.UP)\n up2 = Action(env.ACTIONS.UP)\n transitions = [\n MachineRelation(left=Start(), right=up1),\n MachineRelation(left=up1, right=up2, label=0),\n MachineRelation(left=up2, right=stop, label=0),\n ]\n transitions = add_action_transitions(transitions)\n\n super().__init__(graph=MachineGraph(transitions=transitions))\n\n\nclass M2(AbstractMachine):\n def __init__(self, env: MazeWorldEpisodeLength):\n stop = Stop()\n up1 = Action(env.ACTIONS.UP)\n choice = Choice()\n transitions = [\n MachineRelation(left=Start(), right=up1),\n MachineRelation(left=up1, right=choice, label=0),\n MachineRelation(left=choice, right=stop),\n MachineRelation(left=choice, right=up1),\n ]\n transitions = add_action_transitions(transitions)\n\n super().__init__(graph=MachineGraph(transitions=transitions))\n\n\nclass M3(AbstractMachine):\n def __init__(self, env: MazeWorldEpisodeLength):\n stop = Stop()\n left1 = Action(env.ACTIONS.LEFT)\n up2 = Action(env.ACTIONS.UP)\n transitions = [\n MachineRelation(left=Start(), right=left1),\n MachineRelation(left=left1, right=up2, label=0),\n MachineRelation(left=up2, right=stop, label=0),\n ]\n transitions = add_action_transitions(transitions)\n\n super().__init__(graph=MachineGraph(transitions=transitions))\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.7502497434616089,
"alphanum_fraction": 0.7542457580566406,
"avg_line_length": 35.38181686401367,
"blob_id": "005e49fe76feff980ba2254f83c1180c5b7df322",
"content_id": "2d9f4e1f9cfb6815dadade624086d57d37fb0c8b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2002,
"license_type": "permissive",
"max_line_length": 325,
"num_lines": 55,
"path": "/README.md",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "# HRL-grid\n\n\nThe project is designed to simulate hierarchical reinforcement learning algorithms. \nThere is two various environments: [grid_maze_env README](https://github.com/cog-isa/HRL-grid/blob/master/environments/grid_maze_env/GRID_MAZE_README.md) and [arm_env README](https://github.com/cog-isa/HRL-grid/blob/master/environments/arm_env/ARM_README.md). One should check the environments' READMEs for more information. \n\n\n### Hierarchies of Abstract Machines \nYou can run experiments with handcrafted machines hierarchies in module [ham_experiments](https://github.com/cog-isa/HRL-grid/tree/master/HAM_new/HAM_experiments). \nAnd also examine [HAM's readme file](https://github.com/cog-isa/HRL-grid/blob/master/HAM_new/HAM_README.md).\n\n### Prerequisites\n\nThings you need to install the software:\n\n```\nsudo apt-get install python3-tk\nsudo apt-get install python3-dev\n```\n\nFor drawing graphs with pygraphviz one should install:\n\n```\nsudo apt-get install graphviz\nsudo apt-get install graphviz-dev\nsudo apt-get install python3.5-dev\nsudo pip3.5 install pygraphviz --install-option=\"--include-path=/usr/include/graphviz\" --install-option=\"--library-path=/usr/lib/graphviz/\"\n```\n## Installing\n\nTo run `random_policy.py` and test the environment, you must install the following libraries:\n```\ngym\nscipy\npandas\nmatplotlib\nnumpy\npygraphviz\n```\n\n## Getting Started\n\n\nRun the file [`q-policy.py`](https://github.com/cog-isa/HRL-grid/blob/master/environments/q-policy.py), which will show an example of interaction on both environments with q-learning and random policy. \n\n## Authors\n\n* **Alexander Panov** - *Project management* - [grafft](https://github.com/grafft)\n* **Alexey Skrynnik** - *Environments.* *Hierarchical RL on HAMs* - [Tviskaron](https://github.com/tviskaron)\n* **Vadim Kuzmin** - *Hierarchical RL on Options* - [vtkuzmin](https://github.com/vtkuzmin)\n\n\n## License\n\nThis project is licensed under the Apache License 2.0 - see the [LICENSE](https://github.com/cog-isa/HRL-grid/blob/master/LICENSE) file for details\n\n"
},
{
"alpha_fraction": 0.6944444179534912,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 8,
"blob_id": "d26db739ea586ac3158e839a2a2a9eb5813e1cc2",
"content_id": "519fb7396c4a776430c64a7fc2649eb109e2ab55",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 72,
"license_type": "permissive",
"max_line_length": 14,
"num_lines": 8,
"path": "/requirements.txt",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "seaborn==0.9.0\ntqdm\ngym==0.9.4\nscipy\npandas\nmatplotlib\nnumpy\npygraphviz\n"
},
{
"alpha_fraction": 0.5655698776245117,
"alphanum_fraction": 0.5722460746765137,
"avg_line_length": 32.83871078491211,
"blob_id": "177693bab8cd3f496932d16dc4b8d02cc6dac866",
"content_id": "6a937006ac34880a9f6391fbfec9ef68bce827a2",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2097,
"license_type": "permissive",
"max_line_length": 138,
"num_lines": 62,
"path": "/environments/arm_env/arm_env_rand_init.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from itertools import product\nimport random\nimport numpy as np\nfrom gym import spaces\nfrom environments.arm_env.arm_env import ArmEnv\n\nclass ArmEnvRand(ArmEnv):\n\n def place_cubes(self, seed=None):\n if seed:\n np.random.seed(seed)\n self._grid = np.zeros(shape=(self._size_x, self._size_y), dtype=np.int32)\n\n cubes_left = self._cubes_cnt\n while cubes_left != 0:\n column = np.random.randint(self._size_y)\n for i in np.arange(self._size_x-1, 0, -1):\n if self._grid[i, column] == 0 and (self._size_x -i) < self._tower_target_size:\n self._grid[i, column] = 1\n cubes_left -= 1\n break\n\n def __init__(self, size_x, size_y, cubes_cnt, episode_max_length, finish_reward, action_minus_reward, tower_target_size, seed = None):\n self._size_x = size_x\n self._size_y = size_y\n self._cubes_cnt = cubes_cnt\n self._episode_max_length = episode_max_length\n self._finish_reward = finish_reward\n self._action_minus_reward = action_minus_reward\n self._tower_target_size = tower_target_size\n # checking for grid overflow\n assert cubes_cnt < size_x * size_y, \"Cubes overflow the grid\"\n\n self.place_cubes(seed)\n self.reset_grid = np.copy(self._grid)\n\n self.reset()\n\n self.action_space = spaces.Discrete(6)\n self.grid_to_id = {}\n\n def _reset(self):\n self._episode_length = 0\n self._done = False\n self._magnet_toggle = False\n self._grid = np.copy(self.reset_grid)\n\n # cartesian product\n arm_pos = list(product(*[np.arange(self._size_x), np.arange(self._size_y)]))\n\n for (x, y), value in reversed(list(np.ndenumerate(self._grid))):\n if self._grid[x, y] == 1:\n arm_pos.remove((x, y))\n\n arm = random.choice(arm_pos)\n self._arm_x = arm[0]\n self._arm_y = arm[1]\n\n self._current_state = self.grid_to_bin()\n self.tower_height = self.get_tower_height()\n\n return self._get_obs()"
},
{
"alpha_fraction": 0.4508419334888458,
"alphanum_fraction": 0.4752851724624634,
"avg_line_length": 33.19330978393555,
"blob_id": "ca2851898f37f84b1a47e8f40a0ee77b27541c21",
"content_id": "3d633b1581e5d0016c27073233a5f5529e9f523b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9205,
"license_type": "permissive",
"max_line_length": 215,
"num_lines": 269,
"path": "/McGovern subgoal/mcgovern.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport random\nimport math\n\nclass Game:\n old_state = None\n old_action = None\n \n \n def __init__(self,h,w,par1=0,par2=0,par3=0,th=0,tw=0):\n self.e = 0.01\n self.alpha = 0.1\n self.gamma = 0.9\n self.rate = 0.99\n self.rewmove = -0.1\n self.rwd = 10\n self.lr = 0.05\n self.thres = 0.5\n self.h = h\n self.w = w\n self.a = np.zeros((h,w))\n self.a[0]=1\n self.a[-1]=1\n self.a[:,0]=1\n self.a[:,-1]=1\n self.posx=0\n self.posy=0\n self.th = th\n self.tw = tw\n self.par1 = par1\n self.par2 = par2\n self.par3 = par3\n self.reward = np.ones((self.h,self.w,4))*self.rewmove\n self.transition = np.zeros((self.h,self.w,5))\n \n \n def reset(self):\n self.a = np.zeros((self.h,self.w))\n self.a[0]=1\n self.a[-1]=1\n self.a[:,0]=1\n self.a[:,-1]=1\n self.stenka(self.par1,self.par2,self.par3)\n self.posy = random.randint(1, self.par1-1)\n self.posx = random.randint(1, self.h-2)\n self.a[self.posx,self.posy]=5\n self.a[self.th,self.tw]=8\n \n def draw(self):\n print(self.a)\n \n \n def stenka(self,par1,par2,par3):\n self.par1=par1\n self.par2=par2\n self.par3=par3\n if par1<2 or par1>self.w+2:\n print('wrong parametr par1')\n return\n if par2>par3:\n print('wrong parametr par2')\n return\n if par3>self.h:\n print('wrong parametr par3')\n return\n self.a[:,par1]=1\n for i in range(par3-par2):\n self.a[par2+i,par1]=0\n \n for i in range(self.h):\n for j in range(self.w):\n if self.a[i,j]!=1:\n if self.a[i-1,j]!=1:\n self.transition[i,j,0]=1\n if self.a[i+1,j]!=1:\n self.transition[i,j,1]=1\n if self.a[i,j-1]!=1:\n self.transition[i,j,2]=1\n if self.a[i,j+1]!=1:\n self.transition[i,j,3]=1 \n \n \n def update(self,xold,yold,xnew,ynew):\n if xnew == self.th and ynew == self.tw:\n print('GAME WIN')\n # self.__init__(self.h,self.w,par1=self.par1,par2=self.par2,par3=self.par3,th=self.th,tw=self.tw)\n # self.stenka(self.par1,self.par2,self.par3)\n # self.tcel(self.th,self.tw)\n # self.draw() \n return 'win'\n k = self.a[xold,yold]\n self.a[xold,yold] = self.a[xnew,ynew]\n self.a[xnew,ynew] = k\n return 'move'\n \n def play(self):\n k=True\n while k:\n key = ord(getch())\n print(key)\n if key==27:\n k=False\n if key == 80: #Down arrow\n self.down()\n elif key == 72: #Up arrow\n self.up()\n elif key == 75: #Left arrow\n self.left()\n elif key == 77: #Right arrow\n self.right()\n print(self.a) \n \n def tcel(self,h,w):\n self.th=h\n self.tw=w\n self.a[h,w]=8\n \n trans = self.transition[h,w]\n if trans[0]==1:\n self.reward[self.th-1,self.tw,1]=self.rwd\n if trans[1]==1:\n self.reward[self.th+1,self.tw,0]=self.rwd\n if trans[2]==1:\n self.reward[self.th,self.tw-1,3]=self.rwd\n if trans[3]==1:\n self.reward[self.th,self.tw+1,2]=self.rwd\n \n def up(self):\n xold,yold = self.posx,self.posy\n if self.a[self.posx-1,self.posy] ==1:\n print('udar v stenku!')\n a,tr = 'udar',[self.posx,self.posy]\n else:\n self.posx = self.posx-1\n a,tr = self.update(xold,yold,self.posx,self.posy),[self.posx,self.posy]\n return a,tr\n \n def down(self):\n xold,yold = self.posx,self.posy\n if self.a[self.posx+1,self.posy] ==1:\n print('udar v stenku!')\n a,tr = 'udar',[self.posx,self.posy]\n else:\n self.posx = self.posx+1\n a,tr = self.update(xold,yold,self.posx,self.posy),[self.posx,self.posy]\n return a,tr \n \n def left(self):\n xold,yold = self.posx,self.posy\n if self.a[self.posx,self.posy-1] ==1:\n print('udar v stenku!')\n a,tr = 'udar',[self.posx,self.posy]\n else:\n self.posy = self.posy-1\n a,tr = self.update(xold,yold,self.posx,self.posy),[self.posx,self.posy]\n return a,tr \n \n def right(self):\n xold,yold = self.posx,self.posy\n if self.a[self.posx,self.posy+1] ==1:\n print('udar v stenku!')\n a,tr = 'udar',[self.posx,self.posy]\n else:\n self.posy = self.posy+1\n a,tr = self.update(xold,yold,self.posx,self.posy),[self.posx,self.posy]\n return a,tr\n \n def act(self,key,transition):\n randm = random.random()\n # if randm>0.9:\n # key = self.get_rnd_next_state(transition)\n # print(key,' key') \n if key ==0:\n r,tr = self.up()\n if key ==1:\n r,tr = self.down()\n if key ==2:\n r,tr = self.left()\n if key ==3:\n r,tr = self.right()\n return r,tr\n \n def get_poss_next_states(self,transition,nnext=8):\n # given a state s and a feasibility matrix F\n # get list of possible next states\n if nnext==8:\n tr = transition[self.posx,self.posy]\n actions = [index for index,value in enumerate(tr) if value==1]\n if nnext!=8:\n if nnext==0:\n tr = transition[self.posx-1,self.posy]\n actions = [index for index,value in enumerate(tr) if value==1]\n if nnext==1:\n tr = transition[self.posx+1,self.posy]\n actions = [index for index,value in enumerate(tr) if value==1]\n if nnext==2:\n tr = transition[self.posx,self.posy-1]\n actions = [index for index,value in enumerate(tr) if value==1]\n if nnext==3:\n tr = transition[self.posx,self.posy+1]\n actions = [index for index,value in enumerate(tr) if value==1]\n \n return actions \n def get_rnd_next_state(self,transition):\n # given a state s, pick a feasible next state\n poss_next_states = self.get_poss_next_states(transition)\n next_state = poss_next_states[np.random.randint(0,len(poss_next_states))]\n return next_state\n\n\nclass agent(Game):\n def __init__(self,h,w):\n Game.__init__(self,h,w,par1=0,par2=0,par3=0,th=0,tw=0)\n self.positivebag = []\n self.negativebag = []\n self.bags = []\n self.trajss = []\n \n \n \n def init2(self):\n self.DD = np.ones((self.h,self.w))\n self.ro = np.zeros((self.h,self.w))\n self.Qt = np.zeros((self.h,self.w,5))\n self.Qt1 = np.zeros((self.h,self.w,4))\n self.setI = []\n self.NeSetIfu = []\n self.B = np.ones((self.h,self.w))*7\n self.transition1 = np.zeros((self.h,self.w,4))\n self.xx,self.yy =0,0\n \n self.staticfilter = [[self.th,self.tw],[self.th-1,self.tw],[self.th+1,self.tw],[self.th,self.tw-1],[self.th,self.tw+1],[self.th+1,self.tw+1],[self.th-1,self.tw-1],[self.th+1,self.tw-1],[self.th-1,self.tw+1]]\n for i in range(self.h):\n self.staticfilter.append([i,0])\n self.staticfilter.append([i,1])\n self.staticfilter.append([i,self.w-1])\n # self.staticfilter.append([i,self.w-2])\n for i in range(self.w):\n self.staticfilter.append([0,i])\n self.staticfilter.append([self.h-1,i])\n self.staticfilter.append([1,i])\n # self.staticfilter.append([self.h-2,i])\n \n def DDf(self):\n self.DD = np.ones((self.h,self.w))\n for i in range(self.h):\n for j in range(self.w):\n for n in range(len(self.positivebag)):\n if [i,j] in self.positivebag[n]:\n sump = 1\n for p in range(len(self.positivebag[n])):\n sumbc=0\n for k in range(2):\n sumbc += pow((self.positivebag[n][p][k]-self.th),2) \n sump *= 1-math.exp(-sumbc) \n self.DD[i,j] *= (1- sump)\n\n for i in range(self.h):\n for j in range(self.w):\n for n in range(len(self.negativebag)):\n if [i,j] in self.negativebag[n]:\n sump = 1\n for p in range(len(self.negativebag[n])):\n sumbc=0\n for k in range(2):\n sumbc += pow((self.negativebag[n][p][k]-self.tw),2) \n sump *= 1-math.exp(-sumbc) \n self.DD[i,j] *= (sump)\n return self.DD "
},
{
"alpha_fraction": 0.562174916267395,
"alphanum_fraction": 0.575413703918457,
"avg_line_length": 27.01324462890625,
"blob_id": "b4a1cde388453b27c601091bcc3dea765df0707a",
"content_id": "749982bd5fd84adb39deb6ff0bf8134a18d07b82",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4230,
"license_type": "permissive",
"max_line_length": 136,
"num_lines": 151,
"path": "/environments/q-policy.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import itertools\nimport sys\nfrom collections import defaultdict\n\nimport numpy as np\n\nfrom environments.arm_env.arm_env import ArmEnv\nfrom environments.grid_maze_env.grid_maze_generator import generate_maze_please\nfrom environments.grid_maze_env.maze_world_env import MazeWorldEpisodeLength\nfrom utils.plotting import plot_multi_test\n\n\ndef arg_max_action(q_dict, state, action_space):\n result_action = 0\n for action_to in range(action_space):\n if q_dict[state, action_to] > q_dict[state, result_action]:\n result_action = action_to\n return result_action\n\n\ndef q_learning(env, num_episodes, eps=0.1, alpha=0.1, gamma=0.9):\n to_plot = []\n\n # initialize q-function\n q_table = defaultdict(lambda: 0)\n\n for i_episode in range(num_episodes):\n # Print out which episode we're on, useful for debugging.\n ep_reward = 0\n if (i_episode + 1) % 100 == 0:\n print(\"\\rEpisode {}/{}.\".format(i_episode + 1, num_episodes), end=\"\")\n sys.stdout.flush()\n eps = eps - 0.01 * eps\n\n # Reset the environment and pick the first state\n state = env.reset()\n\n for t in itertools.count():\n # E-greedy\n if np.random.rand(1) < eps:\n # choosing a random action\n action = np.random.choice(env.action_space.n, size=1)[0]\n else:\n # choosing arg_max action\n action = arg_max_action(q_dict=q_table, state=state, action_space=env.action_space.n)\n\n next_state, reward, done, _ = env.step(action)\n # print(q_table[state, action])\n q_table[state, action] = (1 - alpha) * q_table[state, action] + alpha * (\n reward + gamma * q_table[next_state, arg_max_action(q_dict=q_table, state=next_state, action_space=env.action_space.n)])\n\n # Update statistics\n ep_reward += reward\n\n if done:\n break\n\n state = next_state\n to_plot.append(ep_reward)\n return to_plot, q_table\n\n\ndef check_policy(env, q_table):\n state = env.reset()\n s_r = 0\n s_t = 0\n\n for t in itertools.count():\n # WE CAN PRINT ENVIRONMENT STATE\n env.render()\n\n # Take a step\n action = arg_max_action(q_dict=q_table, state=state, action_space=env.action_space.n)\n # action = np.argmax(q_table[state, :])\n next_state, reward, done, _ = env.step(action)\n\n # Update statistics\n s_r += reward\n s_t = t\n\n if done:\n break\n\n state = next_state\n return s_r, s_t\n\n\ndef random_policy(env, num_episodes, ):\n to_plot = []\n\n for i_episode in range(num_episodes):\n ep_reward = 0\n if (i_episode + 1) % 100 == 0:\n print(\"\\rEpisode {}/{}.\".format(i_episode + 1, num_episodes), end=\"\")\n sys.stdout.flush()\n env.reset()\n for t in itertools.count():\n action = np.random.choice(env.action_space.n, size=1)[0]\n next_state, reward, done, _ = env.step(action)\n\n ep_reward += reward\n\n if done:\n break\n to_plot.append(ep_reward)\n return to_plot\n\n\ndef grid_maze_env():\n env = MazeWorldEpisodeLength(maze=generate_maze_please())\n ep_length = 2000\n\n # q-learning policy on MazeWorldEpisodeLength\n q_stats, q_table = q_learning(env, ep_length)\n s, t = check_policy(env, q_table)\n print(s, t)\n\n # random policy on MazeWorldEpisodeLength\n r_stats = random_policy(env, ep_length)\n\n plot_multi_test([q_stats, r_stats])\n\n\ndef arm_env():\n env = ArmEnv(episode_max_length=500,\n size_x=10,\n size_y=10,\n cubes_cnt=5,\n action_minus_reward=-1,\n finish_reward=1000,\n tower_target_size=5)\n\n ep_length = 10000\n # q-learning policy on MazeWorldEpisodeLength\n q_stats, q_table = q_learning(env, ep_length)\n s, t = check_policy(env, q_table)\n print(s, t)\n\n # random policy on MazeWorldEpisodeLength\n r_stats = random_policy(env, ep_length)\n\n plot_multi_test([q_stats, r_stats])\n\n\ndef main():\n grid_maze_env()\n arm_env()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.7383592128753662,
"alphanum_fraction": 0.7383592128753662,
"avg_line_length": 25.52941131591797,
"blob_id": "91a0ced1e1655948c11ab04524ba71b51a194862",
"content_id": "c978391aac16ebcd402c0ede5b28a8cfdff2738b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 451,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 17,
"path": "/environments/env_core.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import gym\n\n\nclass CoreEnv(gym.Env):\n def is_done(self): raise NotImplementedError\n\n def get_current_state(self): raise NotImplementedError\n\n def get_actions_as_dict(self): raise NotImplementedError\n\n def _step(self, action): raise NotImplementedError\n\n def _reset(self): raise NotImplementedError\n\n def _render(self, mode='human', close=False): raise NotImplementedError\n\n # def _seed(self, seed=None): raise NotImplementedError\n"
},
{
"alpha_fraction": 0.6185809969902039,
"alphanum_fraction": 0.6320087313652039,
"avg_line_length": 32.809814453125,
"blob_id": "20b260c71f64ef05647f0b8ae3e1df16e9a81718",
"content_id": "d5b540715b32184bb118af649a4110669caf1ee3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5511,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 163,
"path": "/DQN&Options end-to-end/checker1_train.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import sys\nimport gym.spaces\nimport itertools\nimport os\nimport numpy as np\nimport random\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\nfrom utils_dqn.dqn_utils import *\n\nimport datetime\n\n\ndef conv_model(input_data, scope, flatten=True, reuse=False):\n with tf.variable_scope(scope, reuse=reuse):\n out = input_data\n out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)\n if flatten:\n out = layers.flatten(out)\n return out\n\n\ndef mlp_model(input_data, output_len, scope, reuse=False):\n with tf.variable_scope(scope, reuse=reuse):\n out = input_data\n out = layers.fully_connected(out, num_outputs=256, activation_fn=tf.nn.relu)\n out = layers.fully_connected(out, num_outputs=output_len, activation_fn=None)\n return out\n\n\ndef train(conv_net,\n checker_mlp,\n session,\n epochs,\n X_train,\n y_train,\n batch_size=16,\n exp_dir=None):\n input_shape = X_train[0].shape\n\n obs_t_ph = tf.placeholder(tf.uint8, [None] + list(input_shape), name=\"obs_t_ph\")\n obs_t_float = tf.realdiv(tf.cast(obs_t_ph, tf.float32), 255.0, name='obs_t_float')\n\n target = tf.placeholder(tf.float32, [None, 2], name=\"target\")\n\n conv = conv_net(obs_t_float, scope=\"convolution\", reuse=False)\n pred_y = checker_mlp(conv, 2, scope=\"opt1_checker\", reuse=False)\n\n with tf.variable_scope(\"Compute_loss\"):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred_y, labels=target)\n cost = tf.reduce_mean(cross_entropy)\n\n with tf.variable_scope(\"Compute_loss\"):\n y_target = tf.argmax(target, axis=1, name=\"y_target\")\n y_pred = tf.argmax(pred_y, axis=1, name=\"y_pred\")\n correct_prediction = tf.equal(y_pred, y_target)\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name=\"accuracy\")\n\n with tf.variable_scope(\"Hold_the_var\"):\n # Hold all of the variables of the Q-function network and target network, respectively.\n vars1 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"opt1_checker\")\n\n optimizer = tf.train.AdamOptimizer().minimize(cost, var_list=vars1)\n\n saver1 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"convolution\"))\n saver2 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"opt1_checker\"))\n\n session.run(tf.global_variables_initializer())\n saver1.restore(session, '../experiments/DQN&Options end-to-end/experiment task0/saved_model/conv_graph.ckpt')\n\n iterations = int(len(X_train) / batch_size)\n\n for epoch in range(epochs):\n\n for batch in range(iterations):\n # 3.a sample a batch of transitions\n idx0 = 0 + batch * batch_size\n idx1 = max(batch_size + batch * batch_size, len(X_train))\n\n obs_batch, target_batch = X_train[idx0:idx1], y_train[idx0:idx1]\n\n # 3.c train the model\n _, loss, train_accuracy = session.run([optimizer, cost, accuracy], {\n obs_t_ph: obs_batch,\n target: target_batch\n })\n\n print(\"epoch {0}: , loss: {1} , accuracy: {2}\\n\".format(epoch, loss, train_accuracy))\n sys.stdout.flush()\n\n # meta_graph_def = tf.train.export_meta_graph(filename=exp_dir + '/saved_model/graph.ckpt.meta')\n save_path2 = saver2.save(session, exp_dir + '/saved_model/graph.ckpt', write_meta_graph=False)\n\n\ndef get_available_gpus():\n from tensorflow.python.client import device_lib\n local_device_protos = device_lib.list_local_devices()\n return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU']\n\n\ndef set_global_seeds(i):\n try:\n import tensorflow as tf\n except ImportError:\n pass\n else:\n tf.set_random_seed(i)\n np.random.seed(i)\n random.seed(i)\n\n\ndef get_session():\n tf.reset_default_graph()\n session = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n print(\"AVAILABLE GPUS: \", get_available_gpus())\n session = tf.Session()\n return session\n\n\ndef createFolder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print('Error: Creating directory. ' + directory)\n\n\ndef main():\n # create a new folder for this\n os.chdir('../experiments/DQN&Options end-to-end/')\n dir_name = \"experiment \" + str(datetime.datetime.now())[:-10]\n createFolder(dir_name)\n os.chdir('../../DQN&Options end-to-end/')\n\n session = get_session()\n\n X_dataset = np.load('../experiments/DQN&Options end-to-end/experiment task1/obs_dataset.npy')\n y_datatset = np.load('../experiments/DQN&Options end-to-end/experiment task1/done_dataset.npy')\n\n y_train = []\n for i in y_datatset:\n if i == 1:\n y_train.append([0, 1])\n else:\n y_train.append([1, 0])\n\n train(conv_model,\n mlp_model,\n session=session,\n epochs=600,\n X_train=X_dataset,\n y_train=y_train,\n # X_test=X_test,\n # y_test=y_test,\n batch_size=16,\n exp_dir='../experiments/DQN&Options end-to-end/' + dir_name\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5749027132987976,
"alphanum_fraction": 0.6053826212882996,
"avg_line_length": 31.13020896911621,
"blob_id": "ef914f6f599a7161565b4ba2f7a10575bfd313a7",
"content_id": "d3947a721f7d17e3bf5facbd0c964ad55b81fe71",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6168,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 192,
"path": "/DQN with Options/train_over_options.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import argparse\nimport gym\nfrom gym import wrappers\nimport os.path as osp\nimport random\nimport numpy as np\nimport itertools\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\n\nimport os\nimport sys\nnb_dir = os.path.split(os.getcwd())[0]\nif nb_dir not in sys.path:\n sys.path.append(nb_dir)\nimport utils_dqn.plotting as plotting\n\n# import dqn_with_options_v2 as dqn\nimport dqn_with_options as dqn\nfrom utils_dqn.dqn_utils import *\n#from atari_wrappers import *\n#from environments.arm_env.arm_env import ArmEnv\nfrom environments_dqn.arm_env_dqn import ArmEnvDQN\nfrom option_class import option\n\n\ndef arm_model(img_in, num_actions, scope, reuse=False):\n # as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf\n with tf.variable_scope(scope, reuse=reuse):\n out = img_in\n with tf.variable_scope(\"convnet\"):\n # original architecture\n out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)\n out = layers.flatten(out)\n with tf.variable_scope(\"action_value\"):\n out = layers.fully_connected(out, num_outputs=256, activation_fn=tf.nn.relu)\n out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)\n\n return out\n\n\ndef arm_learn(env, options, session, num_timesteps):\n # This is just a rough estimate\n num_iterations = float(num_timesteps) / 4.0\n\n lr_multiplier = 1.0\n lr_schedule = PiecewiseSchedule([\n (0, 1e-4 * lr_multiplier),\n (num_iterations / 10, 1e-4 * lr_multiplier),\n (num_iterations / 2, 5e-5 * lr_multiplier),\n ],\n outside_value=5e-5 * lr_multiplier)\n optimizer = dqn.OptimizerSpec(\n constructor=tf.train.AdamOptimizer,\n kwargs=dict(epsilon=1e-4),\n lr_schedule=lr_schedule\n )\n\n def stopping_criterion(env, t):\n # notice that here t is the number of steps of the wrapped env,\n # which is different from the number of steps in the underlying env\n return t >= num_timesteps\n\n exploration_schedule = PiecewiseSchedule(\n [\n (0, 1.0),\n (100000, 0.3),\n (200000, 0.1),\n (500000, 0.01),\n ], outside_value=0.01\n )\n\n dqn.learn(\n env,\n options=options,\n q_func=arm_model,\n optimizer_spec=optimizer,\n session=session,\n scope_name='over_options_8_6_6',\n exploration=exploration_schedule,\n stopping_criterion=stopping_criterion,\n replay_buffer_size=1000000,\n batch_size=32,\n gamma=0.99,\n learning_starts=5000,\n learning_freq=1,\n frame_history_len=1,\n target_update_freq=500,\n grad_norm_clipping=10\n )\n\n ep_rew = env.get_episode_rewards()\n ep_len = env.get_episode_lengths()\n env.close()\n return ep_rew, ep_len\n\n\ndef get_available_gpus():\n from tensorflow.python.client import device_lib\n local_device_protos = device_lib.list_local_devices()\n return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU']\n\n\ndef set_global_seeds(i):\n try:\n import tensorflow as tf\n except ImportError:\n pass\n else:\n tf.set_random_seed(i)\n np.random.seed(i)\n random.seed(i)\n\n\ndef get_session():\n tf.reset_default_graph()\n # tf_config = tf.ConfigProto(\n # inter_op_parallelism_threads=1,\n # intra_op_parallelism_threads=1)\n session = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n print(\"AVAILABLE GPUS: \", get_available_gpus())\n session = tf.Session()\n return session\n\n\ndef main():\n # Get Atari games.\n # benchmark = gym.benchmark_spec('Atari40M')\n #\n # # Change the index to select a different game.\n # task = benchmark.tasks[3]\n #\n # # Run training\n # seed = 0 # Use a seed of zero (you may want to randomize the seed!)\n # set_global_seeds(seed)\n # env = get_env(task, seed)\n env = ArmEnvDQN(episode_max_length=300,\n size_x=8,\n size_y=6,\n cubes_cnt=6,\n scaling_coeff=3,\n action_minus_reward=-1,\n finish_reward=1000,\n tower_target_size=5)\n session = get_session()\n\n def stop_cond1(env):\n if env._arm_x + 1 < env._size_x:\n if env._grid[env._arm_x + 1, env._arm_y] == 1 and env._arm_x + 2 >= env._size_x:\n return True\n if env._grid[env._arm_x + 1, env._arm_y] == 1 and env._arm_x + 2 < env._size_x:\n if env._grid[env._arm_x + 2, env._arm_y] == 1:\n return True\n else:\n return True\n return False\n\n def stop_cond2(env):\n if env._arm_x == 0 and env._grid[1, env._arm_y] == 1 and env._grid[2, env._arm_y] == 0:\n return True\n return False\n\n\n # initialize options\n\n # option(env, stop_cond2, path = \"option2_v2_8_6_6/dqn_graph.ckpt\", import_scope = \"option2_v2_8_6_6\")\n # option(env, stop_cond1, path = \"option1_8_6_6/dqn_graph.ckpt\", import_scope = \"option1_8_6_6\"),\n options = [option(env, stop_cond1, path=\"option1_8_6_6/dqn_graph.ckpt\", import_scope=\"option1_8_6_6\"),\n option(env, stop_cond2, path=\"option2_8_6_6/dqn_graph.ckpt\", import_scope=\"option2_8_6_6\")]\n\n ep_rew, ep_len = arm_learn(env, options, session, num_timesteps=1500000)\n\n thefile = open('ep_rew_8_6_6.txt', 'w')\n for item in ep_rew:\n thefile.write(\"%s\\n\" % item)\n\n thefile2 = open('ep_len_8_6_6.txt', 'w')\n for item in ep_len:\n thefile2.write(\"%s\\n\" % item)\n\n stats = plotting.EpisodeStats(\n episode_lengths=ep_len,\n episode_rewards=ep_rew)\n plotting.plot_episode_stats(stats)\n\n\n# tf.summary.FileWriter(\"logs\", tf.get_default_graph()).close()\n\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.5884543657302856,
"alphanum_fraction": 0.6091247797012329,
"avg_line_length": 30.220930099487305,
"blob_id": "1bbd5948423582ab1af49d3f0eadbbde3f589073",
"content_id": "f25fc560fc883c2e062bf471fc7418e90f00b772",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5370,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 172,
"path": "/DQN/run_dqn.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import argparse\nimport gym\nfrom gym import wrappers\nimport os.path as osp\nimport random\nimport numpy as np\nimport itertools\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\nimport datetime\n\nimport os\nimport sys\n\nnb_dir = os.path.split(os.getcwd())[0]\nif nb_dir not in sys.path:\n sys.path.append(nb_dir)\n\nimport dqn\nfrom utils_dqn.dqn_utils import *\nfrom environments_dqn.arm_env_dqn import ArmEnvDQN\nimport utils_dqn.plotting as plotting\n\n\ndef arm_model(img_in, num_actions, scope, reuse=False):\n # as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf\n with tf.variable_scope(scope, reuse=reuse):\n out = img_in\n with tf.variable_scope(\"convnet\"):\n # original architecture\n out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)\n out = layers.flatten(out)\n with tf.variable_scope(\"action_value\"):\n out = layers.fully_connected(out, num_outputs=256, activation_fn=tf.nn.relu)\n out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)\n\n return out\n\n\ndef arm_learn(env, session, scope_name, num_timesteps, spec_file=None, exp_dir=None):\n # # This is just a rough estimate\n # num_iterations = float(num_timesteps) / 4.0\n\n lr_multiplier = 1.0\n lr_schedule = PiecewiseSchedule([\n (0, 1e-4 * lr_multiplier),\n (num_timesteps / 40, 1e-4 * lr_multiplier),\n (num_timesteps / 8, 5e-5 * lr_multiplier),\n ],\n outside_value=5e-5 * lr_multiplier)\n optimizer = dqn.OptimizerSpec(\n constructor=tf.train.AdamOptimizer,\n kwargs=dict(epsilon=1e-4),\n lr_schedule=lr_schedule\n )\n\n def stopping_criterion(t):\n return t >= num_timesteps\n\n exploration_schedule = PiecewiseSchedule(\n [\n (0, 1.0),\n (num_timesteps / 20, 0.3),\n (num_timesteps / 10, 0.1),\n (num_timesteps / 2, 0.01),\n ], outside_value=0.01\n )\n\n dqn.learn(\n env,\n q_func=arm_model,\n optimizer_spec=optimizer,\n session=session,\n scope_name=scope_name,\n exploration=exploration_schedule,\n stopping_criterion=stopping_criterion,\n replay_buffer_size=1000000,\n batch_size=32,\n gamma=0.99,\n learning_starts=2000,\n learning_freq=1,\n frame_history_len=1,\n target_update_freq=500,\n grad_norm_clipping=10,\n log_every_n_steps=500,\n spec_file=spec_file,\n exp_dir=exp_dir\n )\n\n ep_rew = env.get_episode_rewards()\n ep_len = env.get_episode_lengths()\n\n return ep_rew, ep_len\n\n\ndef get_available_gpus():\n from tensorflow.python.client import device_lib\n local_device_protos = device_lib.list_local_devices()\n return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU']\n\n\ndef set_global_seeds(i):\n try:\n import tensorflow as tf\n except ImportError:\n pass\n else:\n tf.set_random_seed(i)\n np.random.seed(i)\n random.seed(i)\n\n\ndef get_session():\n tf.reset_default_graph()\n session = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n print(\"AVAILABLE GPUS: \", get_available_gpus())\n session = tf.Session()\n return session\n\n\ndef createFolder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print('Error: Creating directory. ' + directory)\n\n\ndef main():\n env = ArmEnvDQN(episode_max_length=800,\n size_x=7,\n size_y=5,\n cubes_cnt=4,\n scaling_coeff=3,\n action_minus_reward=-1,\n finish_reward=800,\n tower_target_size=4)\n\n # create a new folder for this experiment\n os.chdir('../experiments/plain DQN/')\n dir_name = \"experiment \" + str(datetime.datetime.now())[:-10]\n createFolder(dir_name)\n os.chdir('../../DQN/')\n\n f = open('../experiments/plain DQN/' + dir_name + '/specifications.txt', 'a').close()\n env.write_env_spec('../experiments/plain DQN/' + dir_name + '/specifications.txt')\n\n session = get_session()\n ep_rew, ep_len = arm_learn(env, session, scope_name=\"dqn\", num_timesteps=1000000,\n spec_file='../experiments/plain DQN/' + dir_name + '/specifications.txt',\n exp_dir='../experiments/plain DQN/' + dir_name)\n\n # add results\n thefile1 = open('../experiments/plain DQN/' + dir_name + '/ep_rewards.txt', 'w')\n for item in ep_rew:\n thefile1.write(\"%s\\n\" % item)\n\n thefile2 = open('../experiments/plain DQN/' + dir_name + '/ep_lengths.txt', 'w')\n for item in ep_len:\n thefile2.write(\"%s\\n\" % item)\n\n stats = plotting.EpisodeStats(\n episode_lengths=ep_len,\n episode_rewards=ep_rew)\n plotting.plot_episode_stats(stats, save_fig=True, fig_dir='../experiments/plain DQN/' + dir_name + '/',\n fig_name='smoothed_')\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5451635122299194,
"alphanum_fraction": 0.5635719299316406,
"avg_line_length": 36.69827651977539,
"blob_id": "7aeea8dd3b448a6f59a63d39950b21fc03ad04f2",
"content_id": "d128e7ec7e3e483e56cb2487d1c0dac1e2033a59",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8746,
"license_type": "permissive",
"max_line_length": 154,
"num_lines": 232,
"path": "/SearchHie/main.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from random import randrange\n\nfrom HAM.HAM_core import Action, Choice, Call, AbstractMachine, MachineGraph, MachineRelation, Start, Stop, RandomMachine, RootMachine, LoopInvokerMachine\nfrom HAM.HAM_experiments.HAM_utils import ham_runner, HAMParamsCommon\nfrom HAM.HAM_experiments.experiment_04_auto_random_HAM_on_maze_env.experiment_04 import is_it_machine_runnable\nfrom SearchHie.goodhams import goodhams\nfrom environments.arm_env.arm_env import ArmEnvToggleTopOnly\nfrom utils.graph_drawer import draw_graph\n\ndefault_size_x = 7\ndefault_episode_max_length = 400\ndefault_finish_reward = 1\ndefault_action_minus_reward = -0.02\n\nenvironments = [\n ArmEnvToggleTopOnly(\n size_y=2,\n cubes_cnt=2,\n tower_target_size=2,\n size_x=default_size_x, episode_max_length=default_episode_max_length, finish_reward=default_finish_reward,\n action_minus_reward=default_action_minus_reward),\n\n ArmEnvToggleTopOnly(\n size_y=3,\n cubes_cnt=3,\n tower_target_size=3,\n size_x=default_size_x, episode_max_length=default_episode_max_length, finish_reward=default_finish_reward,\n action_minus_reward=default_action_minus_reward),\n\n ArmEnvToggleTopOnly(\n size_y=4,\n cubes_cnt=4,\n tower_target_size=3,\n size_x=default_size_x, episode_max_length=default_episode_max_length, finish_reward=default_finish_reward,\n action_minus_reward=default_action_minus_reward),\n\n ArmEnvToggleTopOnly(\n size_y=5,\n cubes_cnt=5,\n tower_target_size=4,\n size_x=default_size_x, episode_max_length=default_episode_max_length, finish_reward=default_finish_reward,\n action_minus_reward=default_action_minus_reward)\n]\n\nenv = environments[0]\n\n\ndef go(transitions, brute_force, index_):\n machine = AbstractMachine(MachineGraph(transitions=transitions))\n am = RootMachine(LoopInvokerMachine(machine))\n\n # if randrange(1000) == 0:\n # draw_graph(\"{brute_force}\".format(**locals()), am.get_graph_to_draw(action_to_name_mapping=env.get_actions_as_dict()))\n # exit(0)\n\n if is_it_machine_runnable(machine):\n sum_rew = 0\n try:\n params = HAMParamsCommon(environments[0])\n ham_runner(ham=am, num_episodes=2, env=environments[0], params=params)\n sum_rew = sum(params.logs[\"ep_rewards\"])\n\n\n except ChildProcessError:\n # print(brute_force)\n pass\n # if randrange(1500) == 0:\n # draw_graph(\"bf{brute_force}\".format(**locals()), am.get_graph_to_draw())\n\n if sum_rew > 0:\n # TODO\n # with open(\"out.txt\", \"a\") as f:\n # f.write(str(brute_force) + \"\\n\")\n # return\n\n # print(\"\\n\\n EPISODE REWARD: \", sum_rew)\n # draw_graph(\"{sum_rew}__{brute_force}\".format(**locals()), am.get_graph_to_draw(action_to_name_mapping=env.get_actions_as_dict()))\n rew = None\n print(\"\\n\\n\\n\")\n for e in environments:\n params = HAMParamsCommon(e)\n ham_runner(ham=am, num_episodes=600, env=e, params=params)\n if rew is None:\n rew = 0\n rew += sum(params.logs[\"ep_rewards\"])\n print(\"to_add:\", sum(params.logs[\"ep_rewards\"]))\n # except ChildProcessError:\n # draw_graph(\"{rew}__{brute_force}\".format(**locals()), am.get_graph_to_draw(action_to_name_mapping=env.get_actions_as_dict()))\n # exit(0)\n # pass\n if rew is not None:\n draw_graph(\"{rew}__{brute_force}_{index_}\".format(**locals()), am.get_graph_to_draw(action_to_name_mapping=env.get_actions_as_dict()))\n\n\ndef main():\n class UpMachine4(AbstractMachine):\n def __init__(self, env: ArmEnvToggleTopOnly):\n d1 = Action(action=env.ACTIONS.UP)\n d2 = Action(action=env.ACTIONS.UP)\n d3 = Action(action=env.ACTIONS.UP)\n d4 = Action(action=env.ACTIONS.UP)\n stop = Stop()\n transitions = (\n MachineRelation(left=Start(), right=d1),\n MachineRelation(left=d1, right=d2, label=0),\n MachineRelation(left=d2, right=d3, label=0),\n MachineRelation(left=d3, right=d4, label=0),\n MachineRelation(left=d4, right=stop, label=0),\n\n MachineRelation(left=d1, right=stop, label=1),\n MachineRelation(left=d2, right=stop, label=1),\n MachineRelation(left=d3, right=stop, label=1),\n MachineRelation(left=d4, right=stop, label=1),\n\n )\n\n super().__init__(graph=MachineGraph(transitions=transitions))\n\n class UpMachine3(AbstractMachine):\n def __init__(self, env: ArmEnvToggleTopOnly):\n d1 = Action(action=env.ACTIONS.UP)\n d2 = Action(action=env.ACTIONS.UP)\n d3 = Action(action=env.ACTIONS.UP)\n # d4 = Action(action=env.ACTIONS.UP)\n stop = Stop()\n transitions = (\n MachineRelation(left=Start(), right=d1),\n MachineRelation(left=d1, right=d2, label=0),\n MachineRelation(left=d2, right=d3, label=0),\n MachineRelation(left=d3, right=stop, label=0),\n # MachineRelation(left=d4, right=stop, label=0),\n\n MachineRelation(left=d1, right=stop, label=1),\n MachineRelation(left=d2, right=stop, label=1),\n MachineRelation(left=d3, right=stop, label=1),\n # MachineRelation(left=d4, right=stop, label=1),\n\n )\n\n super().__init__(graph=MachineGraph(transitions=transitions))\n\n a = [\n Choice(),\n Action(ArmEnvToggleTopOnly.ACTIONS.RIGHT),\n Action(ArmEnvToggleTopOnly.ACTIONS.LEFT),\n Action(ArmEnvToggleTopOnly.ACTIONS.DOWN),\n # Action(ArmEnvToggleTopOnly.ACTIONS.UP),\n\n Call(machine_to_call=UpMachine4(environments[1])),\n\n ]\n\n transitions = []\n for i in a:\n for j in a:\n if randrange(2):\n if isinstance(i, Action):\n transitions.append(MachineRelation(left=i, right=j, label=0))\n else:\n transitions.append(MachineRelation(left=i, right=j))\n len_ = len(goodhams)\n print(len_)\n len_4 = len_ // 4 + 1\n l1, r1 = 0, len_4\n l2, r2 = len_4, 2 * len_4\n l3, r3 = 2 * len_4, 3 * len_4\n l4, r4 = 3 * len_4, 4 * len_4\n\n # print(l1, r1 )\n # print(l2, r2 )\n # print(l3, r3 )\n # print(l4, r4 )\n # exit(0)\n # for brute_force in goodhams:\n # for index, brute_force in enumerate(goodhams[l1: r1]):\n # for index, brute_force in enumerate(goodhams[l2: r2]):\n # for index, brute_force in enumerate(goodhams[l3: r3]):\n for index, brute_force in enumerate(goodhams[l4: r4]):\n if index >= len_:\n break\n if index % (len_ // 100) == 0:\n print(index // (len_ // 100), \"%\")\n\n if bin(brute_force).count(\"1\") > 10:\n continue\n\n if bin(brute_force).count(\"1\") < 4:\n continue\n\n # if bin(brute_force).count(\"1\") > 12 or bin(brute_force).count(\"1\") < 4:\n # continue\n\n # continue\n go_continue = False\n transitions = []\n ss = set()\n for ii in range(len(a)):\n for jj in range(len(a)):\n i = a[ii]\n j = a[jj]\n if (2 ** (ii * len(a) + jj)) & brute_force:\n if isinstance(i, Action):\n transitions.append(MachineRelation(left=i, right=j, label=0))\n else:\n transitions.append(MachineRelation(left=i, right=j))\n if ii in ss and isinstance(a[ii], (Action, Call)):\n go_continue = True\n break\n ss.add(ii)\n if go_continue:\n # print('continue')\n continue\n stop = Stop()\n for ii in range(len(a)):\n if ii not in ss:\n i = a[ii]\n if isinstance(i, Action):\n transitions.append(MachineRelation(left=i, right=stop, label=0))\n else:\n transitions.append(MachineRelation(left=i, right=stop))\n for i in a:\n if isinstance(i, Action):\n transitions.append(MachineRelation(left=i, right=stop, label=1))\n\n for index_, II in enumerate(a):\n transitions.append(MachineRelation(left=Start(), right=II))\n go(transitions=transitions, brute_force=brute_force, index_=index_)\n transitions.pop()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6196927428245544,
"alphanum_fraction": 0.6405812501907349,
"avg_line_length": 44.719722747802734,
"blob_id": "c8254d3f07e1fab878a0cf05acb458cdf700d737",
"content_id": "c55f7229d86b737e23171208995738051793a12a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13213,
"license_type": "permissive",
"max_line_length": 141,
"num_lines": 289,
"path": "/article_experiments/02_hand_crafted_HAM/ham_hand_crafted.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from HAM.HAM_core import AbstractMachine, Start, Action, Stop, MachineRelation, MachineGraph, Choice, Call, RootMachine, LoopInvokerMachine\nfrom HAM.HAM_experiments.HAM_utils import ham_runner, HAMParamsCommon\nfrom article_experiments.global_envs import MazeEnvArticle, MazeEnvArticleSpecial, ArmEnvArticle, EnvironmentsArticle, get_cumulative_rewards\nfrom environments.grid_maze_env.maze_world_env import MazeWorldEpisodeLength\nfrom utils.graph_drawer import draw_graph\n\nname = \"02_ham_hand_crafted\"\n\n\ndef run(global_env):\n if isinstance(global_env, ArmEnvArticle):\n env = global_env.env\n internal_machine = PullUpMachine(env=env)\n machine = RootMachine(machine_to_invoke=LoopInvokerMachine(machine_to_invoke=internal_machine))\n params = HAMParamsCommon(env)\n draw_graph(file_name=\"arm_env\", graph=internal_machine.get_graph_to_draw(action_to_name_mapping=env.get_actions_as_dict()))\n ham_runner(ham=machine, num_episodes=global_env.episodes_count, env=env, params=params)\n rewards = params.logs[\"ep_rewards\"]\n\n elif isinstance(global_env, MazeEnvArticle):\n env = global_env.env\n internal_machine = InterestingMachine(env=env)\n machine = RootMachine(machine_to_invoke=LoopInvokerMachine(machine_to_invoke=internal_machine))\n draw_graph(file_name=\"maze_env\",graph=internal_machine.get_graph_to_draw(action_to_name_mapping=env.get_actions_as_dict()))\n params = HAMParamsCommon(env)\n ham_runner(ham=machine, num_episodes=global_env.episodes_count, env=env, params=params)\n rewards = params.logs[\"ep_rewards\"]\n elif isinstance(global_env, MazeEnvArticleSpecial):\n env = global_env.env\n internal_machine = InterestingMachineLeftUpInteresting(env=env)\n machine = RootMachine(machine_to_invoke=LoopInvokerMachine(machine_to_invoke=internal_machine))\n draw_graph(file_name=\"maze_env_special\",graph=internal_machine.get_graph_to_draw(action_to_name_mapping=env.get_actions_as_dict()))\n params = HAMParamsCommon(env)\n ham_runner(ham=machine, num_episodes=global_env.episodes_count, env=env, params=params)\n rewards = params.logs[\"ep_rewards\"]\n else:\n raise KeyError\n full_name = name + \"_\" + global_env.__class__.__name__\n # with open(full_name + \" cumulative_reward.txt\", \"w\") as w:\n # for out in get_cumulative_rewards(rewards=rewards):\n # w.write(str(out) + '\\n', )\n\n with open(full_name + \" reward.txt\", \"w\") as w:\n for out in rewards:\n w.write(str(out) + '\\n', )\n\n\nclass PullUpMachine(AbstractMachine):\n def __init__(self, env):\n pull_up_start = Start()\n pull_up_on = Action(action=env.get_actions_as_dict()[\"ON\"])\n pull_up_down_01 = Action(action=env.get_actions_as_dict()[\"DOWN\"])\n pull_up_down_02 = Action(action=env.get_actions_as_dict()[\"DOWN\"])\n pull_up_down_03 = Action(action=env.get_actions_as_dict()[\"DOWN\"])\n pull_up_down_04 = Action(action=env.get_actions_as_dict()[\"DOWN\"])\n pull_up_up_01 = Action(action=env.get_actions_as_dict()[\"UP\"])\n pull_up_up_02 = Action(action=env.get_actions_as_dict()[\"UP\"])\n pull_up_up_03 = Action(action=env.get_actions_as_dict()[\"UP\"])\n pull_up_up_04 = Action(action=env.get_actions_as_dict()[\"UP\"])\n pull_up_stop = Stop()\n\n pull_up_transitions = (\n MachineRelation(left=pull_up_start, right=pull_up_on),\n\n MachineRelation(left=pull_up_on, right=pull_up_down_01, label=0),\n MachineRelation(left=pull_up_down_01, right=pull_up_down_02, label=0),\n MachineRelation(left=pull_up_down_02, right=pull_up_down_03, label=0),\n MachineRelation(left=pull_up_down_03, right=pull_up_down_04, label=0),\n MachineRelation(left=pull_up_down_04, right=pull_up_up_01, label=0),\n MachineRelation(left=pull_up_up_01, right=pull_up_up_02, label=0),\n MachineRelation(left=pull_up_up_02, right=pull_up_up_03, label=0),\n MachineRelation(left=pull_up_up_03, right=pull_up_up_04, label=0),\n MachineRelation(left=pull_up_up_04, right=pull_up_stop, label=0),\n\n MachineRelation(left=pull_up_on, right=pull_up_stop, label=1),\n MachineRelation(left=pull_up_down_01, right=pull_up_stop, label=1),\n MachineRelation(left=pull_up_down_02, right=pull_up_stop, label=1),\n MachineRelation(left=pull_up_down_03, right=pull_up_stop, label=1),\n MachineRelation(left=pull_up_down_04, right=pull_up_stop, label=1),\n MachineRelation(left=pull_up_up_01, right=pull_up_stop, label=1),\n MachineRelation(left=pull_up_up_02, right=pull_up_stop, label=1),\n MachineRelation(left=pull_up_up_03, right=pull_up_stop, label=1),\n MachineRelation(left=pull_up_up_04, right=pull_up_stop, label=1),\n )\n pull_up = AbstractMachine(MachineGraph(transitions=pull_up_transitions))\n\n start = Start()\n choice_one = Choice()\n left = Action(action=env.get_actions_as_dict()[\"LEFT\"])\n right = Action(action=env.get_actions_as_dict()[\"RIGHT\"])\n off = Action(action=env.get_actions_as_dict()[\"OFF\"])\n\n call = Call(machine_to_call=pull_up)\n\n stop = Stop()\n\n transitions = (\n MachineRelation(left=start, right=choice_one),\n MachineRelation(left=choice_one, right=left),\n MachineRelation(left=choice_one, right=right),\n MachineRelation(left=choice_one, right=off),\n MachineRelation(left=choice_one, right=call),\n\n MachineRelation(left=call, right=stop),\n\n MachineRelation(left=left, right=stop, label=0),\n MachineRelation(left=right, right=stop, label=0),\n MachineRelation(left=off, right=stop, label=0),\n\n MachineRelation(left=left, right=stop, label=1),\n MachineRelation(left=right, right=stop, label=1),\n MachineRelation(left=off, right=stop, label=1),\n )\n\n super().__init__(graph=MachineGraph(transitions=transitions))\n\n\nclass InterestingMachine(AbstractMachine):\n def __init__(self, env: MazeWorldEpisodeLength):\n left1 = Action(action=env.ACTIONS.LEFT)\n left2 = Action(action=env.ACTIONS.LEFT)\n left3 = Action(action=env.ACTIONS.LEFT)\n left4 = Action(action=env.ACTIONS.LEFT)\n left5 = Action(action=env.ACTIONS.LEFT)\n\n right1 = Action(action=env.ACTIONS.RIGHT)\n right2 = Action(action=env.ACTIONS.RIGHT)\n right3 = Action(action=env.ACTIONS.RIGHT)\n right4 = Action(action=env.ACTIONS.RIGHT)\n right5 = Action(action=env.ACTIONS.RIGHT)\n\n up1 = Action(action=env.ACTIONS.UP)\n up2 = Action(action=env.ACTIONS.UP)\n up3 = Action(action=env.ACTIONS.UP)\n up4 = Action(action=env.ACTIONS.UP)\n up5 = Action(action=env.ACTIONS.UP)\n\n down1 = Action(action=env.ACTIONS.DOWN)\n down2 = Action(action=env.ACTIONS.DOWN)\n down3 = Action(action=env.ACTIONS.DOWN)\n down4 = Action(action=env.ACTIONS.DOWN)\n down5 = Action(action=env.ACTIONS.DOWN)\n\n choice1 = Choice()\n choice2 = Choice()\n\n left = Action(action=env.ACTIONS.LEFT)\n right = Action(action=env.ACTIONS.RIGHT)\n up = Action(action=env.ACTIONS.UP)\n down = Action(action=env.ACTIONS.DOWN)\n\n stop = Stop()\n\n transitions = (\n MachineRelation(left=Start(), right=choice1),\n\n MachineRelation(left=choice1, right=left1),\n MachineRelation(left=left1, right=left2, label=0),\n MachineRelation(left=left2, right=left3, label=0),\n MachineRelation(left=left3, right=left4, label=0),\n MachineRelation(left=left4, right=left5, label=0),\n MachineRelation(left=left5, right=choice2, label=0),\n\n MachineRelation(left=left1, right=stop, label=1),\n MachineRelation(left=left2, right=stop, label=1),\n MachineRelation(left=left3, right=stop, label=1),\n MachineRelation(left=left4, right=stop, label=1),\n MachineRelation(left=left5, right=stop, label=1),\n\n MachineRelation(left=choice1, right=right1),\n MachineRelation(left=right1, right=right2, label=0),\n MachineRelation(left=right2, right=right3, label=0),\n MachineRelation(left=right3, right=right4, label=0),\n MachineRelation(left=right4, right=right5, label=0),\n MachineRelation(left=right5, right=choice2, label=0),\n\n MachineRelation(left=right1, right=stop, label=1),\n MachineRelation(left=right2, right=stop, label=1),\n MachineRelation(left=right3, right=stop, label=1),\n MachineRelation(left=right4, right=stop, label=1),\n MachineRelation(left=right5, right=stop, label=1),\n\n MachineRelation(left=choice1, right=up1),\n\n MachineRelation(left=up1, right=up2, label=0),\n MachineRelation(left=up2, right=up3, label=0),\n MachineRelation(left=up3, right=up4, label=0),\n MachineRelation(left=up4, right=up5, label=0),\n MachineRelation(left=up5, right=choice2, label=0),\n MachineRelation(left=up1, right=stop, label=1),\n MachineRelation(left=up2, right=stop, label=1),\n MachineRelation(left=up3, right=stop, label=1),\n MachineRelation(left=up4, right=stop, label=1),\n MachineRelation(left=up5, right=stop, label=1),\n\n MachineRelation(left=choice1, right=down1),\n MachineRelation(left=down1, right=down2, label=0),\n MachineRelation(left=down2, right=down3, label=0),\n MachineRelation(left=down3, right=down4, label=0),\n MachineRelation(left=down4, right=down5, label=0),\n MachineRelation(left=down5, right=choice2, label=0),\n MachineRelation(left=down1, right=stop, label=1),\n MachineRelation(left=down2, right=stop, label=1),\n MachineRelation(left=down3, right=stop, label=1),\n MachineRelation(left=down4, right=stop, label=1),\n MachineRelation(left=down5, right=stop, label=1),\n\n MachineRelation(left=choice2, right=left),\n MachineRelation(left=choice2, right=right),\n MachineRelation(left=choice2, right=up),\n MachineRelation(left=choice2, right=down),\n\n MachineRelation(left=left, right=stop, label=1, ),\n MachineRelation(left=right, right=stop, label=1),\n MachineRelation(left=up, right=stop, label=1),\n MachineRelation(left=down, right=stop, label=1),\n\n MachineRelation(left=left, right=stop, label=0, ),\n MachineRelation(left=right, right=stop, label=0),\n MachineRelation(left=up, right=stop, label=0),\n MachineRelation(left=down, right=stop, label=0),\n\n )\n\n super().__init__(graph=MachineGraph(transitions=transitions))\n\n\nclass InterestingMachineLeftUpInteresting(AbstractMachine):\n def __init__(self, env: MazeWorldEpisodeLength):\n left4 = Action(action=env.ACTIONS.LEFT)\n left5 = Action(action=env.ACTIONS.LEFT)\n\n up4 = Action(action=env.ACTIONS.UP)\n up5 = Action(action=env.ACTIONS.UP)\n\n choice1 = Choice()\n choice2 = Choice()\n\n left = Action(action=env.ACTIONS.LEFT)\n right = Action(action=env.ACTIONS.RIGHT)\n up = Action(action=env.ACTIONS.UP)\n down = Action(action=env.ACTIONS.DOWN)\n\n stop = Stop()\n\n transitions = (\n MachineRelation(left=Start(), right=choice1),\n\n MachineRelation(left=choice1, right=left4),\n MachineRelation(left=left4, right=left5, label=0),\n MachineRelation(left=left5, right=choice2, label=0),\n\n MachineRelation(left=left4, right=stop, label=1),\n MachineRelation(left=left5, right=stop, label=1),\n\n MachineRelation(left=choice1, right=up4),\n\n MachineRelation(left=up4, right=up5, label=0),\n MachineRelation(left=up5, right=choice2, label=0),\n MachineRelation(left=up4, right=stop, label=1),\n MachineRelation(left=up5, right=stop, label=1),\n\n MachineRelation(left=choice2, right=left),\n MachineRelation(left=choice2, right=right),\n MachineRelation(left=choice2, right=up),\n MachineRelation(left=choice2, right=down),\n\n MachineRelation(left=left, right=stop, label=1, ),\n MachineRelation(left=right, right=stop, label=1),\n MachineRelation(left=up, right=stop, label=1),\n MachineRelation(left=down, right=stop, label=1),\n\n MachineRelation(left=left, right=stop, label=0, ),\n MachineRelation(left=right, right=stop, label=0),\n MachineRelation(left=up, right=stop, label=0),\n MachineRelation(left=down, right=stop, label=0),\n\n )\n\n super().__init__(graph=MachineGraph(transitions=transitions))\n\n\ndef main():\n for global_env in EnvironmentsArticle().environments:\n run(global_env)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.4351184368133545,
"alphanum_fraction": 0.4376930892467499,
"avg_line_length": 35.660377502441406,
"blob_id": "95827e4650b84d74336bfbdd71ff687c503be9ae",
"content_id": "3ff5cea1f941000aa4421ba298c200062e166f98",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1942,
"license_type": "permissive",
"max_line_length": 214,
"num_lines": 53,
"path": "/environments/grid_maze_env/GRID_MAZE_README.md",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "The environment `maze_world_env.py` is a labyrinth of repeating patterns.\n\n```\n O O O O O O O O O O O O O O O \n O . . . O O . . . O O . S . O \n O . O . . . . O . . . . O . O \n O . . . O O . . . O O . . . O \n O O . O O O O . O O O O . O O \n O O . O O O O . O O O O . O O \n O . . . O O . . . O O . . . O \n O . O . . . . O . x . . O . O \n O . . . O O . . . O O . . . O \n O O . O O O O . O O O O . O O \n O O . O O O O . O O O O . O O \n O . . . O O . . . O O . . . O \n O . O . . . . O . . . . O . O \n O . . . O O . . . O O . . . O \n O O . O O O O . O O O O . O O \n O O . O O O O . O O O O . O O \n O . . . O O . . . O O . . . O \n O . O . . . . O . . . . O . O \n O . F O O O . . . O O . . . O \n O O O O O O O O O O O O O O O \n```\n\n\n\n\n\n> `O` -- is an obstacle \n `S` -- start \n `F` -- finish \n `-` -- free cell \n `x` -- agent position\n\n\n\nThe agent can perform four actions to move: up, down, left and right.\nIf the agent encounters a wall as a result of the action, the reward is `-5`.\nIf, after performing an action, the agent has moved to an empty cell - the reward is `-1`.\nIf the agent is in the terminal state `F` - the reward is `+100`.\n\nIn the file `grid_maze_generator.py` there is methods for automatic generation of labyrinths. Each labyrinth consists of patterns:\n```\n O O . O O \n O . . . O \n . . O . .\n O . . . O \n O O . O O \n```\t \n\nIn which instead of empty points ```.``` additional walls ```O``` can be specified.. \nIn the file `random_policy.py` there is an example of interaction with the environment. And after the end of the work schedules are drawn. It is possible to display the state of the environment at the current time."
},
{
"alpha_fraction": 0.5731527805328369,
"alphanum_fraction": 0.5869885087013245,
"avg_line_length": 35.923912048339844,
"blob_id": "f1e2aac08a7cd54f3fabbc2ca5328e9bf85631a3",
"content_id": "8082d204bde73b43eca1e0d511ba9fa4074cd128",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3397,
"license_type": "permissive",
"max_line_length": 124,
"num_lines": 92,
"path": "/HAM/HAM_experiments/HAM_utils.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from collections import namedtuple\n\nimport sys\n\nfrom HAM.HAM_core import HAMParams, RandomMachine, LoopInvokerMachine, RootMachine, Start, Choice, Action, Stop, Call, \\\n MachineRelation, MachineGraph, AbstractMachine\nfrom environments.grid_maze_env.grid_maze_generator import generate_pattern, generate_maze, place_start_finish, prepare_maze\nfrom utils import plotting\nfrom utils.graph_drawer import draw_graph\n\n\nclass HAMParamsCommon(HAMParams):\n def __init__(self, env):\n super().__init__(q_value={},\n env=env,\n current_state=None,\n eps=0.1,\n gamma=0.9,\n alpha=0.1,\n string_prefix_of_machine=None,\n accumulated_discount=1,\n accumulated_rewards=0,\n previous_machine_choice_state=None,\n env_is_done=None,\n logs={\"reward\": 0, \"ep_rewards\": []},\n on_model_transition_id_function=lambda env_: 1 if env_.is_done() else 0, )\n\n\ndef maze_world_input_01():\n base_patterns = [2 ** 4 + 2 ** 8, 1 + 2 ** 12, 0]\n x = list(map(generate_pattern, base_patterns))\n\n mz_level1 = generate_maze(x, size_x=3, size_y=3, seed=15)\n mz_level2 = generate_maze([mz_level1], size_x=3, size_y=2)\n return place_start_finish(prepare_maze(mz_level2))\n\n\ndef plot_multi(p_params, filename=None):\n plotting.plot_multi_test(smoothing_window=30,\n x_label=\"episode\",\n y_label=\"smoothed rewards\",\n curve_to_draw=[_.curve_to_draw for _ in p_params],\n labels=[_.label for _ in p_params],\n filename=filename\n )\n\n\ndef ham_runner(ham, num_episodes, env, params, no_output=None):\n for i_episode in range(1, num_episodes + 1):\n env.reset()\n ham.run(params)\n assert env.is_done(), \"The machine is STOPPED before STOP(done) of the environment\"\n if i_episode % 10 == 0:\n if no_output is None:\n print(\"\\r{ham} episode {i_episode}/{num_episodes}.\".format(**locals()), end=\"\")\n sys.stdout.flush()\n\n\ndef draw_system_machines():\n s1 = RandomMachine()\n s2 = LoopInvokerMachine(machine_to_invoke=s1)\n s3 = RootMachine(machine_to_invoke=s2)\n\n draw_graph(\"full_hie\", s3.get_graph_to_draw())\n\n\nPlotParams = namedtuple(\"PlotParams\", [\"curve_to_draw\", \"label\"])\n\n\ndef super_runner(call_me_maybe, env):\n start = Start()\n choice_one = Choice()\n actions = [Action(action=_) for _ in env.get_actions_as_dict().values()]\n stop = Stop()\n\n call = Call(call_me_maybe)\n transitions = [MachineRelation(left=start, right=choice_one), ]\n for action in actions:\n transitions.append(MachineRelation(left=choice_one, right=action))\n transitions.append(MachineRelation(left=action, right=stop, label=0))\n transitions.append(MachineRelation(left=action, right=stop, label=1))\n transitions.append(MachineRelation(left=choice_one, right=call))\n transitions.append(MachineRelation(left=call, right=stop))\n\n return AbstractMachine(graph=MachineGraph(transitions=transitions))\n\ndef main():\n draw_system_machines()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5748281478881836,
"alphanum_fraction": 0.5832892656326294,
"avg_line_length": 29.483871459960938,
"blob_id": "9529f1dc7094d7fa4a7af70ffa00f13ef021c28b",
"content_id": "3623f4a4213acb86962d60083604e01beed40c03",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1891,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 62,
"path": "/DQN with Options/option_class.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport itertools\nfrom utils_dqn.dqn_utils import *\n\nnb_dir = os.path.split(os.getcwd())[0]\nif nb_dir not in sys.path:\n sys.path.append(nb_dir)\n\n\n# import gym.spaces\n# import numpy as np\n# import random\n# import tensorflow as tf\n# import tensorflow.contrib.layers as layers\n# from collections import namedtuple\n\n\nclass option(object):\n def __init__(self, env, stop_condition, path, import_scope, name=None, max_t=25):\n\n self.max_t = max_t\n self.g = tf.Graph()\n self.sess = tf.Session(graph=self.g)\n self.stop_condition = stop_condition\n\n with self.g.as_default():\n self.saver = tf.train.import_meta_graph(path + \".meta\", import_scope=import_scope)\n self.saver.restore(self.sess, path)\n\n self.obs_t_float = self.g.get_tensor_by_name(import_scope + \"/obs_t_float:0\")\n self.pred_ac = self.g.get_tensor_by_name(import_scope + \"/pred_ac:0\")\n\n def encode_observation(self, frame):\n img_h, img_w = frame.shape[1], frame.shape[2]\n return frame.transpose(1, 2, 0, 3).reshape(img_h, img_w, -1)\n\n def step(self, env):\n \"\"\"Executes the option\"\"\"\n opt_reward = env._action_minus_reward\n opt_length = 0.0\n last_obs = env.get_evidence_for_image_render()\n # env.render()\n\n for t in itertools.count():\n obs = self.encode_observation(np.array([last_obs]))\n action = self.sess.run(self.pred_ac, {self.obs_t_float: [obs]})[0]\n\n next_obs, reward, done, info = env.step(action)\n\n opt_reward += reward\n opt_length += 1\n\n # env.render()\n\n if done or self.stop_condition(env) or t >= self.max_t:\n # env.render()\n break\n\n last_obs = next_obs\n\n return next_obs, opt_reward, done, opt_length, info\n\n"
},
{
"alpha_fraction": 0.5471100807189941,
"alphanum_fraction": 0.555536687374115,
"avg_line_length": 37.77631759643555,
"blob_id": "21bb0b72e12b8a285a5f3e8bb29b7a4c340f21e4",
"content_id": "3d9033e431d7ef4a5b51fd880a3be461b0a24ca8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17682,
"license_type": "permissive",
"max_line_length": 189,
"num_lines": 456,
"path": "/workshop/main.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import collections\nimport json\nimport operator\n\nfrom HAM.HAM_core import Stop, Start, Action, Choice, AbstractMachine, MachineRelation, MachineGraph\nfrom HAM.HAM_experiments.HAM_utils import HAMParamsCommon, plot_multi, PlotParams\nfrom environments.arm_env.arm_env import ArmEnvToggleTopOnly, ArmEnv\nfrom workshop.check_graphs import generate_good_graph_ids, generate_machines_by_ids, generate_good_graphs\nfrom workshop.generate_graph import MachineStored\nimport sys\nimport imageio\n\n\ndef compress_graphs_dicts(g_list):\n ss = set()\n res = []\n for item in g_list:\n item_json = json.dumps(obj=item)\n if item_json in ss:\n continue\n else:\n res.append(item)\n ss.add(item_json)\n\n return res\n\n\ndef part_one(env, vertexes):\n t = compress_graphs_dicts([_.to_dict() for _ in generate_good_graphs(env=env, vertexes=vertexes, vertex_count=6)])\n with open(\"machines_part_one.json\", \"w\") as out_f:\n json.dump(t, fp=out_f, sort_keys=True, indent=4)\n\n\ndef part_two(env):\n with open(\"machines_part_one.json\") as json_file:\n machines = [MachineStored.ms_from_machine(AutoMachineSimple(env), env)]\n machines_to_save = []\n for ms_dict in json.load(json_file):\n machines.append(MachineStored.from_dict(graph_dict=ms_dict, env=env))\n\n m_id = 0\n\n params = HAMParamsCommon(env)\n am = AutoMachineSimple(env)\n\n runner(ham=am,\n num_episodes=2000,\n env=env,\n params=params,\n on_model_mapping={},\n )\n qv = params.q_value\n\n for on_model_part in list(reversed(env.get_all_on_model())):\n for ms in machines:\n machine = ms.get_machine()\n\n params = HAMParamsCommon(env)\n params.q_value = qv\n\n runner(ham=am,\n num_episodes=1,\n env=env,\n params=params,\n on_model_mapping={on_model_part: machine},\n )\n to_plot = list()\n to_plot.append(PlotParams(curve_to_draw=params.logs[\"ep_rewards\"], label=\"HAM_with_pull_up\"))\n total_reward = sum(params.logs[\"ep_rewards\"])\n print(\"rewards sum:\", total_reward)\n # plot_multi(to_plot, filename=\"pics/\" + str(m_id) + \":::\" + str(on_model_part) + \":::\" + str(ms.binary_matrix_representation) + \":::\" + str(sum(params.logs[\"ep_rewards\"])))\n # ms.draw(\"pics/\" + str(m_id) + \":\" + str(ms.binary_matrix_representation) + \":\" + str(total_reward))\n m_id += 1\n\n if total_reward > 10:\n machines_to_save.append(ms)\n with open(\"machines_part_two.json\", \"w\") as out_f:\n t = compress_graphs_dicts([_.to_dict() for _ in machines_to_save])\n json.dump(obj=t, fp=out_f, sort_keys=True, indent=4)\n\n\ndef part_three(env):\n with open(\"machines_part_two.json\") as json_file:\n machines = []\n for ms_dict in json.load(json_file):\n machines.append(MachineStored.from_dict(graph_dict=ms_dict, env=env))\n\n cluster_best_result_mapper = {}\n cluster_best_machine_mapper = {}\n clusters_to_save = {}\n for on_model_part in list(reversed(env.get_all_on_model())):\n for index, ms in enumerate(machines):\n machine = ms.get_machine()\n total_reward = 0\n for tests in range(5):\n params = HAMParamsCommon(env)\n\n runner(ham=AutoMachineSimple(env),\n num_episodes=30,\n env=env,\n params=params,\n on_model_mapping={on_model_part: machine},\n no_output=True,\n )\n to_plot = list()\n to_plot.append(PlotParams(curve_to_draw=params.logs[\"ep_rewards\"], label=\"HAM_with_pull_up\"))\n total_reward += sum(params.logs[\"ep_rewards\"])\n # print(total_reward)\n on_model_part_str = str(on_model_part)\n if on_model_part_str in cluster_best_result_mapper:\n if cluster_best_result_mapper[on_model_part_str] < total_reward:\n cluster_best_result_mapper[on_model_part_str], cluster_best_machine_mapper[on_model_part_str] = total_reward, ms.to_dict()\n clusters_to_save[on_model_part_str] = {\"total_reward\": total_reward, \"graph_dict\": ms.to_dict()}\n else:\n cluster_best_result_mapper[on_model_part_str], cluster_best_machine_mapper[on_model_part_str] = total_reward, ms.to_dict()\n clusters_to_save[on_model_part_str] = {\"total_reward\": total_reward, \"graph_dict\": ms.to_dict()}\n # print('\\n')\n print(\"****\")\n ms_len = len(machines)\n print(\"machine {index} of {ms_len}\".format(**locals()))\n print()\n for i in ms.vertex_types:\n print(i)\n print(on_model_part_str, total_reward)\n # print(clusters_to_save)\n # exit(0)\n with open(\"machines_part_three.json\", \"w\") as out_f:\n json.dump(obj=clusters_to_save, fp=out_f, sort_keys=True, indent=4)\n\n\ndef part_four(env):\n with open(\"machines_part_three.json\") as json_file:\n cluster_best_machine_mapper_str_key = json.load(json_file)\n cluster_best_machine_mapper = {}\n\n for key in cluster_best_machine_mapper_str_key:\n tuple_key = key\n # tuple_key = key\n tuple_key = tuple_key.replace(\"(\", \"\")\n tuple_key = tuple_key.replace(\")\", \"\")\n tuple_key = tuple(map(eval, tuple_key.split(\",\")))\n cluster_best_machine_mapper[tuple_key] = MachineStored.from_dict(cluster_best_machine_mapper_str_key[key][\"graph_dict\"], env=env)\n cluster_best_machine_mapper_machine = {}\n for i in cluster_best_machine_mapper:\n cluster_best_machine_mapper_machine[i] = cluster_best_machine_mapper[i].get_machine()\n params = HAMParamsCommon(env)\n\n runner(ham=AutoMachineSimple(env),\n num_episodes=300,\n env=env,\n params=params,\n on_model_mapping=cluster_best_machine_mapper_machine,\n no_output=True,\n )\n for cluster in cluster_best_machine_mapper:\n ms = cluster_best_machine_mapper[cluster]\n ms.draw(filename=str(cluster))\n to_plot = list()\n to_plot.append(PlotParams(curve_to_draw=params.logs[\"ep_rewards\"], label=\"clustering, same env\"))\n plot_multi(to_plot, filename=\"a\")\n\n\ndef part_five(env):\n with open(\"machines_part_three.json\") as json_file:\n cluster_best_machine_mapper_str_key = json.load(json_file)\n cluster_best_machine_mapper = {}\n for key in cluster_best_machine_mapper_str_key:\n tuple_key = key\n tuple_key = tuple_key.replace(\"(\", \"\")\n tuple_key = tuple_key.replace(\")\", \"\")\n tuple_key = tuple(map(eval, tuple_key.split(\",\")))\n cluster_best_machine_mapper[tuple_key] = MachineStored.from_dict(cluster_best_machine_mapper_str_key[key][\"graph_dict\"], env=env).get_machine()\n\n params = HAMParamsCommon(env)\n\n runner(ham=AutoMachineSimple(env),\n num_episodes=700,\n env=env,\n params=params,\n on_model_mapping=cluster_best_machine_mapper,\n # no_output=True,\n )\n to_plot = list()\n to_plot.append(PlotParams(curve_to_draw=params.logs[\"ep_rewards\"], label=\"clustering\"))\n\n save_to_gif(\"olololo\", params.logs[\"gif\"][-1])\n\n params = HAMParamsCommon(env)\n runner(ham=AutoMachineSimple(env),\n num_episodes=700,\n env=env,\n params=params,\n on_model_mapping={},\n # no_output=True,\n )\n to_plot.append(PlotParams(curve_to_draw=params.logs[\"ep_rewards\"], label=\"q-learning\"))\n\n plot_multi(to_plot, filename=\"b\")\n\n\ndef part_six(env):\n #\n\n with open(\"machines_part_three.json\") as json_file:\n cluster_best_machine_mapper_str_key = json.load(json_file)\n ololo_mapping = {}\n ololo_to_sort = []\n for key in cluster_best_machine_mapper_str_key:\n tuple_key = key\n tuple_key = tuple_key.replace(\"(\", \"\")\n tuple_key = tuple_key.replace(\")\", \"\")\n tuple_key = tuple(map(eval, tuple_key.split(\",\")))\n ololo_mapping[tuple_key] = MachineStored.from_dict(cluster_best_machine_mapper_str_key[key][\"graph_dict\"], env=env)\n ololo_to_sort.append([cluster_best_machine_mapper_str_key[key][\"total_reward\"], tuple_key])\n\n best_clusters = {}\n\n for i in sorted(ololo_to_sort, reverse=True):\n key = i[1]\n print(key, type(key), key[0])\n\n # print(ololo_mapping[key])\n total_reward_a = 0\n for i in range(10):\n params = HAMParamsCommon(env)\n to_run = {}\n ss = {**best_clusters, key: ololo_mapping[key]}\n for i in ss:\n to_run[i] = ss[i].get_machine()\n\n runner(ham=AutoMachineSimple(env),\n num_episodes=800,\n env=env,\n params=params,\n on_model_mapping=to_run,\n )\n total_reward_a += sum(params.logs[\"ep_rewards\"])\n\n total_reward_b = 0\n for i in range(10):\n to_run = {}\n ss = {**best_clusters}\n for i in ss:\n to_run[i] = ss[i].get_machine()\n to_run = {}\n params = HAMParamsCommon(env)\n runner(ham=AutoMachineSimple(env),\n num_episodes=800,\n env=env,\n params=params,\n on_model_mapping=to_run,\n )\n total_reward_b += sum(params.logs[\"ep_rewards\"])\n\n if total_reward_a > total_reward_b:\n best_clusters[key] = ololo_mapping[key]\n print()\n print(total_reward_a, \" ::: \", total_reward_b)\n clusters_to_save = {}\n for i in best_clusters:\n on_model_part_str = str(i)\n clusters_to_save[on_model_part_str] = best_clusters[i].to_dict()\n with open(\"machines_part_six.json\", \"w\") as out_f:\n json.dump(obj=clusters_to_save, fp=out_f, sort_keys=True, indent=4)\n\n\ndef part_seven(env):\n with open(\"machines_part_six.json\") as json_file:\n cluster_best_machine_mapper_str_key = json.load(json_file)\n cluster_best_machine_mapper = {}\n for key in cluster_best_machine_mapper_str_key:\n tuple_key = key\n tuple_key = tuple_key.replace(\"(\", \"\")\n tuple_key = tuple_key.replace(\")\", \"\")\n tuple_key = tuple(map(eval, tuple_key.split(\",\")))\n cluster_best_machine_mapper[tuple_key] = MachineStored.from_dict(cluster_best_machine_mapper_str_key[key], env=env).get_machine()\n MachineStored.from_dict(cluster_best_machine_mapper_str_key[key], env=env).draw(\"ololo\"+str(key))\n params = HAMParamsCommon(env)\n\n runner(ham=AutoMachineSimple(env),\n num_episodes=2000,\n env=env,\n params=params,\n on_model_mapping=cluster_best_machine_mapper,\n # no_output=True,\n )\n to_plot = list()\n to_plot.append(PlotParams(curve_to_draw=params.logs[\"ep_rewards\"], label=\"clustering\"))\n\n save_to_gif(\"olololo\", params.logs[\"gif\"][-1])\n\n params = HAMParamsCommon(env)\n runner(ham=AutoMachineSimple(env),\n num_episodes=2000,\n env=env,\n params=params,\n on_model_mapping={},\n # no_output=True,\n )\n to_plot.append(PlotParams(curve_to_draw=params.logs[\"ep_rewards\"], label=\"q-learning\"))\n\n plot_multi(to_plot, filename=\"ololo_result\")\n\n\ndef runner(ham, num_episodes, env, params, on_model_mapping, no_output=None, ):\n for i_episode in range(1, num_episodes + 1):\n env.reset()\n\n while not env.is_done():\n # print(env.get_on_model())\n if env.get_on_model() in on_model_mapping:\n on_model_mapping[env.get_on_model()].run(params)\n else:\n ham.run(params)\n # print(\"****\" * 10)\n # print(env.get_on_model())\n # env.render()\n # print(\"\\n\")\n\n if i_episode % 10 == 0:\n if no_output is None:\n print(\"\\r{ham} episode {i_episode}/{num_episodes}.\".format(**locals()), end=\"\")\n sys.stdout.flush()\n\n\nclass AutoMachineSimple(AbstractMachine):\n def __init__(self, env):\n start = Start()\n choice_one = Choice()\n actions = [Action(action=_) for _ in env.get_actions_as_dict().values()]\n stop = Stop()\n\n transitions = [MachineRelation(left=start, right=choice_one), ]\n for action in actions:\n transitions.append(MachineRelation(left=choice_one, right=action))\n transitions.append(MachineRelation(left=action, right=stop, label=0))\n transitions.append(MachineRelation(left=action, right=stop, label=1))\n\n super().__init__(graph=MachineGraph(transitions=transitions))\n\n\ndef test_draw_gid():\n env = ArmEnvToggleTopOnly(size_x=5, size_y=4, cubes_cnt=4, episode_max_length=50, finish_reward=100, action_minus_reward=-0.001, tower_target_size=4)\n\n def get_on_model(self):\n return self.get_arm_x(), self.is_cube_graped()\n\n def get_all_on_model(self):\n res = []\n for height in range(0, self._size_x):\n for graped in [True, False]:\n if height == self._size_x - 1 and graped is True:\n continue\n res.append((height, graped))\n return res\n\n def get_arm_x(self):\n return self._arm_x\n return self._size_x - self._arm_x\n\n def is_cube_graped(self):\n cube_dx, cube_dy = self.MOVE_ACTIONS[self.ACTIONS.DOWN]\n cube_x, cube_y = self._arm_x + cube_dx, self._arm_y + cube_dy\n return self._magnet_toggle and self.ok(cube_x, cube_y) and self._grid[cube_x][cube_y] == 1\n\n ArmEnvToggleTopOnly.get_arm_x = get_arm_x\n ArmEnvToggleTopOnly.get_all_on_model = get_all_on_model\n ArmEnvToggleTopOnly.is_cube_graped = is_cube_graped\n ArmEnvToggleTopOnly.get_on_model = get_on_model\n\n params = HAMParamsCommon(env)\n\n runner(ham=AutoMachineSimple(env),\n num_episodes=1,\n env=env,\n params=params,\n on_model_mapping={},\n no_output=True,\n )\n\n save_to_gif(\"olololo\", params.logs[\"gif\"][0])\n # imageio.mimsave('movie.gif', images)\n # numpngw.write_apng('foo.png', images, delay=250, use_palette=True)\n\n exit(0)\n\n\ndef save_to_gif(filename, grids):\n images = []\n for i in grids:\n images.append(ArmEnv.render_to_image(i))\n\n imageio.mimsave(filename + \".gif\", ims=images, duration=0.2)\n\n\ndef main():\n def get_on_model(self):\n return self.get_arm_x(), self.is_cube_graped()\n\n def get_all_on_model(self):\n res = []\n for height in range(0, self._size_x - 1):\n for graped in [True, False]:\n if height == self._size_x - 1 and graped:\n continue\n res.append((height, graped))\n return res\n\n def get_arm_x(self):\n return self._arm_x\n return self._size_x - self._arm_x\n\n def is_cube_graped(self):\n cube_dx, cube_dy = self.MOVE_ACTIONS[self.ACTIONS.DOWN]\n cube_x, cube_y = self._arm_x + cube_dx, self._arm_y + cube_dy\n return self._magnet_toggle and self.ok(cube_x, cube_y) and self._grid[cube_x][cube_y] == 1\n\n ArmEnvToggleTopOnly.get_arm_x = get_arm_x\n ArmEnvToggleTopOnly.get_all_on_model = get_all_on_model\n ArmEnvToggleTopOnly.is_cube_graped = is_cube_graped\n ArmEnvToggleTopOnly.get_on_model = get_on_model\n\n # env = ArmEnvToggleTopOnly(size_x=5, size_y=4, cubes_cnt=4, episode_max_length=500, finish_reward=100, action_minus_reward=-0.001, tower_target_size=4)\n env = ArmEnvToggleTopOnly(size_x=4, size_y=3, cubes_cnt=3, episode_max_length=200, finish_reward=100, action_minus_reward=-0.00001, tower_target_size=3)\n\n vertexes = sorted([\n Stop(),\n Start(),\n\n Action(env.ACTIONS.LEFT),\n Action(env.ACTIONS.RIGHT),\n Action(env.ACTIONS.UP),\n Action(env.ACTIONS.DOWN),\n Action(env.ACTIONS.TOGGLE),\n Choice(),\n Action(env.ACTIONS.LEFT),\n Action(env.ACTIONS.RIGHT),\n Action(env.ACTIONS.UP),\n Action(env.ACTIONS.DOWN),\n Action(env.ACTIONS.TOGGLE),\n # Choice(),\n ])\n\n # part_one(env, vertexes)\n # part_two(env)\n # part_three(env)\n # part_four(env)\n env = ArmEnvToggleTopOnly(size_x=6, size_y=4, cubes_cnt=5, episode_max_length=800, finish_reward=100, action_minus_reward=-0.00001, tower_target_size=5)\n # part_six(env)\n\n # env = ArmEnvToggleTopOnly(size_x=5, size_y=4, cubes_cnt=4, episode_max_length=500, finish_reward=100, action_minus_reward=-0.001, tower_target_size=4)\n part_seven(env)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6056562662124634,
"alphanum_fraction": 0.615223228931427,
"avg_line_length": 41.10600662231445,
"blob_id": "7db29d1ddda6406730e02fd09f5199bca83dd949",
"content_id": "50851b5947cb3a5d3fb27c8090f8d0a30424b238",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11916,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 283,
"path": "/DQN/dqn.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import sys\nimport gym.spaces\nimport itertools\nimport numpy as np\nimport random\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\nfrom collections import namedtuple\nfrom utils_dqn.dqn_utils import *\n\nOptimizerSpec = namedtuple(\"OptimizerSpec\", [\"constructor\", \"kwargs\", \"lr_schedule\"])\n\n\ndef learn(env,\n q_func,\n optimizer_spec,\n session,\n scope_name,\n exploration=LinearSchedule(300000, 0.1),\n stopping_criterion=None,\n replay_buffer_size=10000,\n batch_size=32,\n gamma=0.99,\n learning_starts=5000,\n learning_freq=1,\n frame_history_len=1,\n target_update_freq=1000,\n grad_norm_clipping=10,\n log_every_n_steps=500,\n spec_file=None,\n exp_dir=None):\n \"\"\"Run Deep Q-learning algorithm.\n\n All schedules are w.r.t. total number of steps taken in the environment.\n\n Parameters\n ----------\n env: gym.Env\n gym environment to train on.\n q_func: function\n Model to use for computing the q function. It should accept the\n following named arguments:\n img_in: tf.Tensor\n tensorflow tensor representing the input image\n num_actions: int\n number of actions\n scope: str\n scope in which all the model related variables\n should be created\n reuse: bool\n whether previously created variables should be reused.\n optimizer_spec: OptimizerSpec\n Specifying the constructor and kwargs, as well as learning rate schedule\n for the optimizer\n session: tf.Session\n tensorflow session to use.\n exploration: rl_algs.deepq.utils.schedules.Schedule\n schedule for probability of chosing random action.\n stopping_criterion: (env, t) -> bool\n should return true when it's ok for the RL algorithm to stop.\n takes in env and the number of steps executed so far.\n replay_buffer_size: int\n How many memories to store in the replay buffer.\n batch_size: int\n How many transitions to sample each time experience is replayed.\n gamma: float\n Discount Factor\n learning_starts: int\n After how many environment steps to start replaying experiences\n learning_freq: int\n How many steps of environment to take between every experience replay\n frame_history_len: int\n How many past frames to include as input to the model.\n target_update_freq: int\n How many experience replay rounds (not steps!) to perform between\n each update to the target Q network\n grad_norm_clipping: float or None\n If not None gradients' norms are clipped to this value.\n \"\"\"\n\n f = open(spec_file, 'a')\n f.write(\"Learning parameters:\" + '\\n')\n f.write(\" replay_buffer_size : {}\".format(replay_buffer_size) + '\\n')\n f.write(\" batch_size : {}\".format(batch_size) + '\\n')\n f.write(\" gamma : {}\".format(gamma) + '\\n')\n f.write(\" learning_starts : {}\".format(learning_starts) + '\\n')\n f.write(\" learning_freq : {}\".format(learning_freq) + '\\n')\n f.write(\" frame_history_len : {}\".format(frame_history_len) + '\\n')\n f.write(\" target_update_freq : {}\".format(target_update_freq) + '\\n')\n f.write(\" grad_norm_clipping : {}\".format(grad_norm_clipping) + '\\n')\n f.close()\n\n writer = tf.summary.FileWriter(exp_dir + '/' + \"summary_writer\")\n\n assert type(env.observation_space) == gym.spaces.Box\n assert type(env.action_space) == gym.spaces.Discrete\n\n if len(env.observation_space.shape) == 1:\n # This means we are running on low-dimensional observations (e.g. RAM)\n input_shape = env.observation_space.shape\n else:\n img_h, img_w, img_c = env.observation_space.shape\n input_shape = (img_h, img_w, frame_history_len * img_c) # size_x, size_y,\n\n num_actions = env.action_space.n\n\n ### 1. Set up placeholders\n with tf.variable_scope(scope_name):\n # with tf.variable_scope(\"obs_t_ph\"):\n # placeholder for current observation (or state)\n obs_t_ph = tf.placeholder(tf.uint8, [None] + list(input_shape), name=\"obs_t_ph\")\n # casting to float on GPU ensures lower data transfer times.\n obs_t_float = tf.realdiv(tf.cast(obs_t_ph, tf.float32), 255.0, name='obs_t_float')\n\n pred_q = q_func(obs_t_float, num_actions, scope=\"q_func\", reuse=False)\n pred_ac = tf.argmax(pred_q, axis=1, name=\"pred_ac\")\n\n # placeholder for current action\n act_t_ph = tf.placeholder(tf.int32, [None], name=\"act_t_ph\")\n\n # placeholder for current reward\n rew_t_ph = tf.placeholder(tf.float32, [None], name=\"rew_t_ph\")\n\n with tf.variable_scope(\"obs_tp1_ph\"):\n # placeholder for next observation (or state)\n obs_tp1_ph = tf.placeholder(tf.uint8, [None] + list(input_shape), name=\"obs_tp1_ph\")\n obs_tp1_float = tf.cast(obs_tp1_ph, tf.float32) / 255.0\n\n # placeholder for end of episode mask\n done_mask_ph = tf.placeholder(tf.float32, [None], name=\"done_mask_ph\")\n\n with tf.variable_scope(\"pred_q_a\"):\n pred_q_a = tf.reduce_sum(pred_q * tf.one_hot(act_t_ph, depth=num_actions), axis=1, name='pred_q_a')\n\n target_q = q_func(obs_tp1_float, num_actions, scope=\"target_q_func\", reuse=False)\n\n with tf.variable_scope(\"target_q_a\"):\n target_q_a = rew_t_ph + (1 - done_mask_ph) * gamma * tf.reduce_max(target_q, axis=1)\n\n with tf.variable_scope(\"Compute_bellman_error\"):\n total_error = tf.reduce_sum(huber_loss(pred_q_a - tf.stop_gradient(target_q_a)), name='total_error')\n total_error_scalar = tf.summary.scalar('bellman_error', total_error)\n\n with tf.variable_scope(\"Hold_the_var\"):\n # Hold all of the variables of the Q-function network and target network, respectively.\n q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope_name + '/q_func')\n target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_q_func')\n\n # construct optimization op (with gradient clipping)\n learning_rate = tf.placeholder(tf.float32, (), name=\"learning_rate\")\n with tf.variable_scope(\"Optimizer\"):\n optimizer = optimizer_spec.constructor(learning_rate=learning_rate, **optimizer_spec.kwargs)\n train_fn = minimize_and_clip(optimizer, total_error,\n var_list=q_func_vars, clip_val=grad_norm_clipping)\n\n # update_target_fn will be called periodically to copy Q network to target Q network\n update_target_fn = []\n for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),\n sorted(target_q_func_vars, key=lambda v: v.name)):\n update_target_fn.append(var_target.assign(var))\n\n with tf.variable_scope(\"Update_target_fn\"):\n update_target_fn = tf.group(*update_target_fn, name='update_target_fn')\n\n # construct the replay buffer\n replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len)\n\n ###############\n # RUN ENV #\n ###############\n model_initialized = False\n num_param_updates = 0\n mean_episode_reward = -float('nan')\n best_mean_episode_reward = -float('inf')\n last_obs = env.reset()\n LOG_EVERY_N_STEPS = log_every_n_steps\n\n # as we need to save only Q-network weights\n saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope_name))\n\n for t in itertools.count():\n ### 1. Check stopping criterion\n if stopping_criterion is not None and stopping_criterion(t):\n break\n\n ### 2. Step the env and store the transition\n\n # Store the latest observation that was recorded from the simulator.\n idx = replay_buffer.store_frame(last_obs)\n\n # Epsilon greedy exploration\n if not model_initialized or random.random() < exploration.value(t):\n action = random.randint(0, num_actions - 1)\n else:\n obs = replay_buffer.encode_recent_observation()\n action = session.run(pred_ac, {obs_t_ph: [obs]})[0]\n\n next_obs, reward, done, info = env.step(action)\n\n # Store the outcome\n replay_buffer.store_effect(idx, action, reward, done)\n last_obs = env.reset() if done else next_obs\n\n ### 3. Perform experience replay and train the network.\n\n if (t > learning_starts and t % learning_freq == 0 and\n replay_buffer.can_sample(batch_size)):\n\n # 3.a sample a batch of transitions\n obs_batch, act_batch, rew_batch, next_obs_batch, done_batch = replay_buffer.sample(batch_size)\n\n # 3.b initialize the model if haven't\n if not model_initialized:\n initialize_interdependent_variables(session, tf.global_variables(), {\n obs_t_ph: obs_batch,\n obs_tp1_ph: next_obs_batch,\n })\n session.run(update_target_fn)\n model_initialized = True\n\n # 3.c train the model\n _, error = session.run([train_fn, total_error_scalar], {\n obs_t_ph: obs_batch,\n act_t_ph: act_batch,\n rew_t_ph: rew_batch,\n obs_tp1_ph: next_obs_batch,\n done_mask_ph: done_batch,\n learning_rate: optimizer_spec.lr_schedule.value(t)\n })\n writer.add_summary(error, t)\n\n # 3.d periodically update the target network\n if t % target_update_freq == 0:\n session.run(update_target_fn)\n num_param_updates += 1\n\n ### 4. Log progress\n episode_rewards = env.get_episode_rewards()\n episode_lengths = env.get_episode_lengths()\n\n if len(episode_rewards) > 0 and len(episode_rewards) <= 50:\n mean_episode_reward = np.mean(episode_rewards)\n mean_episode_length = np.mean(episode_lengths)\n\n max_episode_reward = np.max(episode_rewards)\n min_episode_length = np.min(episode_lengths)\n\n min_episode_reward = np.min(episode_rewards)\n max_episode_length = np.max(episode_lengths)\n\n elif len(episode_rewards) > 50:\n mean_episode_reward = np.mean(episode_rewards[-50:])\n mean_episode_length = np.mean(episode_lengths[-50:])\n\n max_episode_reward = np.max(episode_rewards[-50:])\n min_episode_length = np.min(episode_lengths[-50:])\n\n min_episode_reward = np.min(episode_rewards[-50:])\n max_episode_length = np.max(episode_lengths[-50:])\n\n best_mean_episode_reward = max(best_mean_episode_reward, mean_episode_reward)\n\n if t % LOG_EVERY_N_STEPS == 0 and model_initialized:\n print(\"Timestep %d\" % (t,))\n print(\"mean reward (50 episodes) %f\" % mean_episode_reward)\n print(\"mean length (50 episodes) %f\" % mean_episode_length)\n print(\"max_episode_reward (50 episodes) %f\" % max_episode_reward)\n print(\"min_episode_length (50 episodes) %f\" % min_episode_length)\n print(\"min_episode_reward (50 episodes) %f\" % min_episode_reward)\n print(\"max_episode_length (50 episodes) %f\" % max_episode_length)\n print(\"best mean reward %f\" % best_mean_episode_reward)\n print(\"episodes %d\" % len(episode_rewards))\n print(\"exploration %f\" % exploration.value(t))\n print(\"learning_rate %f\" % optimizer_spec.lr_schedule.value(t))\n print(\"\\n\")\n sys.stdout.flush()\n\n meta_graph_def = tf.train.export_meta_graph(filename=exp_dir + '/saved_model/graph.ckpt.meta',\n export_scope=scope_name)\n save_path = saver.save(session, exp_dir + '/saved_model/graph.ckpt', write_meta_graph=False)\n print(\"Model saved in path: %s\" % save_path)\n writer.add_graph(session.graph)\n writer.close()\n"
},
{
"alpha_fraction": 0.4777265787124634,
"alphanum_fraction": 0.5048643350601196,
"avg_line_length": 30.24799919128418,
"blob_id": "5e03469d77753fec0e8b1a50a289ad81f01ce28d",
"content_id": "a46254f011c8f3228c78b5bf3b199bac2c7f76c3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3906,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 125,
"path": "/environments/grid_maze_env/grid_maze_generator.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nfrom copy import deepcopy\nfrom queue import Queue\n\n\ndef generate_pattern(pattern_id):\n assert (0 <= pattern_id < 2 ** 13)\n pattern = np.zeros(shape=(5, 5)).astype(int)\n pattern[0, 0] = pattern[0, 1] = pattern[1, 0] = 1\n pattern[4, 0] = pattern[3, 0] = pattern[4, 1] = 1\n pattern[0, 4] = pattern[0, 3] = pattern[1, 4] = 1\n pattern[4, 4] = pattern[3, 4] = pattern[4, 3] = 1\n\n bit_cnt = 1\n for i in range(5):\n for j in range(5):\n if pattern[i][j] == 1:\n continue\n if (pattern_id & bit_cnt) > 0:\n pattern[i][j] = 1\n bit_cnt *= 2\n return pattern\n\n\ndef draw_maze(maze):\n cmap = colors.ListedColormap(['white', 'darkgreen', \"red\", \"blue\"])\n bounds = [0, 1, 2, 3, 40]\n norm = colors.BoundaryNorm(bounds, cmap.N)\n fig, ax = plt.subplots()\n ax.imshow(maze, cmap=cmap, norm=norm)\n plt.axis('off')\n plt.show()\n\n\ndef generate_maze(blocks, size_x, size_y, seed=314159265, options = False):\n np.random.seed(seed)\n pattern_no_to_id = {}\n maze = None\n for i in range(size_y):\n r0 = np.random.choice(len(blocks), size=1, replace=False)[0]\n row = blocks[r0]\n pattern_no_to_id[i*size_x] = r0\n for j in range(1, size_x):\n r1 = np.random.choice(len(blocks), 1, replace=False)[0]\n pattern_no_to_id[i*size_x+j] = r1\n row = np.concatenate((row, blocks[r1]), axis=1)\n maze = np.concatenate((maze, row), axis=0) if maze is not None else row\n\n if options:\n return maze, pattern_no_to_id\n else:\n return maze\n\n\ndef prepare_maze(maze):\n maze_size_x, maze_size_y = len(maze), len(maze[0])\n for i in range(maze_size_x):\n maze[i][0] = maze[i][maze_size_y - 1] = 1\n for i in range(maze_size_y):\n maze[0][i] = maze[maze_size_x - 1][i] = 1\n\n return maze\n\n\ndef bfs(start_x, start_y, maze):\n maze_size_x, maze_size_y = len(maze), len(maze[0])\n\n # max value\n MX = (maze_size_x + 10) * (maze_size_y + 10)\n maze[maze == 0] = MX\n q = Queue()\n q.put((start_x, start_y))\n maze[start_x, start_y] = 0\n while not q.empty():\n x, y = q.get()\n assert (maze[x][y] != -1)\n for dx in range(-1, 2):\n for dy in range(-1, 2):\n if dx * dy != 0 or (dx == 0 and dy == 0):\n continue\n nx, ny = x + dx, y + dy\n if maze[nx][ny] == -1:\n continue\n if maze[nx][ny] == MX:\n q.put((nx, ny))\n maze[nx][ny] = maze[x][y] + 1\n maze[maze == MX] = 0\n\n\ndef place_start_finish(maze):\n mz = deepcopy(maze)\n maze_size_x, maze_size_y = len(mz), len(mz[0])\n mz[mz > 0] = -1\n for i in range(maze_size_x):\n for j in range(maze_size_y):\n if mz[i][j] == 0:\n bfs(start_x=i, start_y=j, maze=mz)\n max_dist = int(np.max(mz))\n\n for i in range(maze_size_x):\n for j in range(maze_size_y):\n if mz[i][j] == max_dist:\n start_x, start_y = i, j\n mz[mz > 0] = 0\n bfs(start_x=start_x, start_y=start_y, maze=mz)\n max_dist = int(np.max(mz))\n for ii in range(maze_size_x):\n for jj in range(maze_size_y):\n if mz[ii][jj] == max_dist:\n finish_x, finish_y = ii, jj\n maze[start_x, start_y] = 2\n maze[finish_x, finish_y] = 3\n return maze\n raise ValueError\n\n\ndef generate_maze_please(size_x=3, size_y=4):\n t = place_start_finish(prepare_maze(generate_maze(blocks=[generate_pattern(64)], size_x=size_x, size_y=size_y)))\n return t\n\n\nif __name__ == \"__main__\":\n print(generate_maze_please())\n"
},
{
"alpha_fraction": 0.6391258239746094,
"alphanum_fraction": 0.6567164063453674,
"avg_line_length": 35.7843132019043,
"blob_id": "1672a0cb9a864780481dd12f05f3f56b01f74d6a",
"content_id": "aec421a99082b82a56337ebc51480e7fd0ed2d12",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1876,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 51,
"path": "/utils_dqn/plotting.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nfrom collections import namedtuple\nfrom matplotlib import pyplot as plt\n\nEpisodeStats = namedtuple(\"Stats\", [\"episode_lengths\", \"episode_rewards\"])\n\n\ndef plot_episode_stats(stats, smoothing_window=10, save_fig=False, fig_name=None, no_show=False, fig_dir=None):\n # Plot the episode length over time\n fig1 = plt.figure(figsize=(10, 5))\n plt.plot(stats.episode_lengths)\n plt.xlabel(\"Episode\", fontsize=13)\n plt.ylabel(\"Episode Length\", fontsize=13)\n plt.title(\"Episode Length over Time\", fontsize=14)\n if no_show:\n plt.close(fig1)\n else:\n if save_fig:\n plt.savefig(fig_dir + fig_name + \"length.png\")\n plt.show(fig1)\n\n # Plot the episode reward over time\n fig2 = plt.figure(figsize=(10, 5))\n rewards_smoothed = pd.Series(stats.episode_rewards).rolling(smoothing_window, min_periods=smoothing_window).mean()\n plt.plot(rewards_smoothed)\n plt.xlabel(\"Episode\", fontsize=13)\n plt.ylabel(\"Episode Reward (Smoothed)\", fontsize=13)\n plt.title(\"Episode Reward over Time (Smoothed over window size {})\".format(smoothing_window), fontsize=14)\n if no_show:\n plt.close(fig2)\n else:\n if save_fig:\n plt.savefig(fig_dir + fig_name + \"reward.png\")\n plt.show(fig2)\n\n\ndef plot_multi_test(curve_to_draw=None, smoothing_window=10, x_label=\"X\", y_label=\"Y\", labels=None, fig_dir=None,\n fig_name=None):\n fig2 = plt.figure(figsize=(10, 5))\n\n t = []\n for index, elem in enumerate(curve_to_draw):\n rewards_smoothed = pd.Series(elem).rolling(smoothing_window, min_periods=smoothing_window).mean()\n p, = plt.plot(rewards_smoothed)\n t.append(p)\n plt.legend(t, labels) if labels else plt.legend(t)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n\n plt.savefig(fig_dir + fig_name + \"multi_test.png\")\n plt.show(fig2)\n"
},
{
"alpha_fraction": 0.5790955424308777,
"alphanum_fraction": 0.6012669205665588,
"avg_line_length": 32.63313674926758,
"blob_id": "8ad2cbacc566181387928a6b4ec2c158a398d7b2",
"content_id": "afd02440e24f36e2370dbca4c4e29d9a84411284",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5683,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 169,
"path": "/DQN&Options end-to-end/test_task.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import argparse\nimport gym\nfrom gym import wrappers\nimport os.path as osp\nimport random\nimport numpy as np\nimport itertools\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\n\nimport os\nimport sys\nnb_dir = os.path.split(os.getcwd())[0]\nif nb_dir not in sys.path:\n sys.path.append(nb_dir)\n# from utils import plotting\n\n# from DQN import dqn\n# from DQN.dqn_utils import *\nfrom environments_dqn.arm_env_dqn import ArmEnvDQN\nfrom environments_dqn.arm_env_dqn_go_down import ArmEnvDQN_1\nfrom environments_dqn.arm_env_dqn_lift_cube import ArmEnvDQN_2\n\ndef encode_observation(frame):\n img_h, img_w = frame.shape[1], frame.shape[2]\n return frame.transpose(1, 2, 0, 3).reshape(img_h, img_w, -1)\n\n\ndef conv_model(input_data, scope, flatten=True, reuse=False):\n with tf.variable_scope(scope, reuse=reuse):\n out = input_data\n out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)\n if flatten:\n out = layers.flatten(out)\n return out\n\n\ndef mlp_model(input_data, output_len, scope, reuse=False):\n with tf.variable_scope(scope, reuse=reuse):\n out = input_data\n out = layers.fully_connected(out, num_outputs=256, activation_fn=tf.nn.relu)\n out = layers.fully_connected(out, num_outputs=output_len, activation_fn=None)\n return out\n\n\n\ndef main():\n env1 = ArmEnvDQN(episode_max_length=200,\n size_x=4,\n size_y=3,\n cubes_cnt=3,\n scaling_coeff=3,\n action_minus_reward=-1,\n finish_reward=200,\n tower_target_size=3)\n\n env2 = ArmEnvDQN_1(episode_max_length=200,\n size_x=4,\n size_y=3,\n cubes_cnt=3,\n scaling_coeff=3,\n action_minus_reward=-1,\n finish_reward=200,\n tower_target_size=3)\n\n env3 = ArmEnvDQN_2(episode_max_length=200,\n size_x=4,\n size_y=3,\n cubes_cnt=3,\n scaling_coeff=3,\n action_minus_reward=-1,\n finish_reward=200,\n tower_target_size=3)\n # print(env.reset())\n\n # First let's load meta graph and restore weights\n # saver = tf.train.import_meta_graph('option_lift_cube.ckpt.meta')\n\n # saver2 = tf.train.import_meta_graph('/tmp/option_lift_cube.ckpt.meta')\n # saver.restore(session, tf.train.latest_checkpoint('./'))\n frame_history_len = 1\n img_h, img_w, img_c = env1.observation_space.shape\n input_shape = (img_h, img_w, frame_history_len * img_c) # size_x, size_y,\n num_actions = env1.action_space.n\n\n # # placeholder for current observation (or state)\n # obs_t_ph = tf.placeholder(tf.uint8, [None] + list(input_shape))\n # # casting to float on GPU ensures lower data transfer times.\n # obs_t_float = tf.cast(obs_t_ph, tf.float32) / 255.0\n\n\n\n # pred_q = q_func(obs_t_float, num_actions, scope=\"q_func\", reuse=False)\n # pred_ac = tf.argmax(pred_q, axis=1)\n # graph = tf.get_default_graph()\n\n obs_t_ph = tf.placeholder(tf.uint8, [None] + list(input_shape), name=\"obs_t_ph\")\n # casting to float on GPU ensures lower data transfer times.\n obs_t_float = tf.realdiv(tf.cast(obs_t_ph, tf.float32), 255.0, name='obs_t_float')\n\n\n\n conv = conv_model(obs_t_float, scope=\"convolution\", reuse=False)\n pred_q = mlp_model(conv, num_actions, scope=\"task2\", reuse=False)\n pred_ac = tf.argmax(pred_q, axis=1, name=\"pred_ac\")\n\n # obs_t_float2 = graph.get_tensor_by_name(\"obs_t_ph_lift:0\")\n\n ## How to access saved operation\n # pred_ac2 = graph.get_tensor_by_name(\"pred_ac_lift:0\")\n\n episode_reward = 0\n episode_length = 0\n last_obs = env3.reset()\n\n session = tf.Session()\n\n # saver2.restore(session, \"/tmp/option_lift_cube.ckpt\")\n saver1 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"convolution\"))\n saver2 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"task2\"))\n\n saver1.restore(session, '../experiments/DQN&Options end-to-end/experiment task0/saved_model/conv_graph.ckpt')\n saver2.restore(session, '../experiments/DQN&Options end-to-end/experiment task2/saved_model/graph.ckpt')\n\n for t in itertools.count():\n\n env3.render()\n obs = encode_observation(np.array([last_obs]))\n action = session.run(pred_ac, {obs_t_float: [obs]})[0]\n\n next_obs, reward, done, info = env3.step(action)\n\n episode_reward += reward\n episode_length += 1\n\n if done or episode_length == 100:\n env3.render()\n break\n\n last_obs = next_obs\n print(episode_reward, episode_length)\n\n\n# episode_reward = 0\n# episode_length = 0\n# last_obs = env2.reset()\n# for t in itertools.count():\n\n# env2.render()\n# obs = encode_observation(np.array([last_obs]))\n# action = session.run(pred_ac2, {obs_t_float2: [obs]})[0]\n\n# next_obs, reward, done, info = env2.step(action)\n\n# episode_reward += reward\n# episode_length += 1\n\n# if done or episode_length == 500:\n# env2.render()\n# break\n\n# last_obs = next_obs\n# print(episode_reward, episode_length)\n\n\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.5970868468284607,
"alphanum_fraction": 0.609451949596405,
"avg_line_length": 41.03083801269531,
"blob_id": "1db916205fb3b7759018c2b70b379642b14ae471",
"content_id": "71796ea2fab79f784610a9b30e4a9d352e27e5a2",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9543,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 227,
"path": "/DQN with Options/dqn_with_options.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import sys\nimport gym.spaces\nimport itertools\nimport numpy as np\nimport random\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\nfrom collections import namedtuple\nfrom utils_dqn.dqn_utils import *\n\nOptimizerSpec = namedtuple(\"OptimizerSpec\", [\"constructor\", \"kwargs\", \"lr_schedule\"])\n\n\ndef learn(env,\n options,\n q_func,\n optimizer_spec,\n session,\n scope_name,\n exploration=LinearSchedule(300000, 0.1),\n stopping_criterion=None,\n replay_buffer_size=10000,\n batch_size=32,\n gamma=0.99,\n learning_starts=5000,\n learning_freq=1,\n frame_history_len=1,\n target_update_freq=1000,\n grad_norm_clipping=10):\n\n\n assert type(env.observation_space) == gym.spaces.Box\n assert type(env.action_space) == gym.spaces.Discrete\n\n if len(env.observation_space.shape) == 1:\n # This means we are running on low-dimensional observations (e.g. RAM)\n input_shape = env.observation_space.shape\n else:\n img_h, img_w, img_c = env.observation_space.shape\n input_shape = (img_h, img_w, frame_history_len * img_c) # size_x, size_y,\n\n # taking into account options\n num_actions = env.action_space.n + len(options)\n\n ### 1. Set up placeholders\n with tf.variable_scope(scope_name):\n # with tf.variable_scope(\"obs_t_ph\"):\n # placeholder for current observation (or state)\n obs_t_ph = tf.placeholder(tf.uint8, [None] + list(input_shape), name=\"obs_t_ph\")\n # casting to float on GPU ensures lower data transfer times.\n obs_t_float = tf.realdiv(tf.cast(obs_t_ph, tf.float32), 255.0, name='obs_t_float')\n\n pred_q = q_func(obs_t_float, num_actions, scope=\"q_func\", reuse=False)\n pred_ac = tf.argmax(pred_q, axis=1, name=\"pred_ac\")\n\n # placeholder for current action\n act_t_ph = tf.placeholder(tf.int32, [None], name=\"act_t_ph\")\n\n # placeholder for current reward\n rew_t_ph = tf.placeholder(tf.float32, [None], name=\"rew_t_ph\")\n\n with tf.variable_scope(\"obs_tp1_ph\"):\n # placeholder for next observation (or state)\n obs_tp1_ph = tf.placeholder(tf.uint8, [None] + list(input_shape), name=\"obs_tp1_ph\")\n obs_tp1_float = tf.cast(obs_tp1_ph, tf.float32) / 255.0\n\n # placeholder for end of episode mask\n done_mask_ph = tf.placeholder(tf.float32, [None], name=\"done_mask_ph\")\n\n # placeholder for the time the option took\n opt_steps = tf.placeholder(tf.float32, [None], name=\"opt_steps\")\n\n with tf.variable_scope(\"pred_q_a\"):\n pred_q_a = tf.reduce_sum(pred_q * tf.one_hot(act_t_ph, depth=num_actions), axis=1, name='pred_q_a')\n\n target_q = q_func(obs_tp1_float, num_actions, scope=\"target_q_func\", reuse=False)\n\n with tf.variable_scope(\"target_q_a\"):\n target_q_a = rew_t_ph + (1 - done_mask_ph) * tf.pow(gamma, opt_steps) * tf.reduce_max(target_q, axis=1)\n\n with tf.variable_scope(\"Compute_bellman_error\"):\n total_error = tf.reduce_sum(huber_loss(pred_q_a - tf.stop_gradient(target_q_a)), name='total_error')\n\n with tf.variable_scope(\"Hold_the_var\"):\n # Hold all of the variables of the Q-function network and target network, respectively.\n q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope_name + '/q_func')\n target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_q_func')\n\n # construct optimization op (with gradient clipping)\n learning_rate = tf.placeholder(tf.float32, (), name=\"learning_rate\")\n with tf.variable_scope(\"Optimizer\"):\n optimizer = optimizer_spec.constructor(learning_rate=learning_rate, **optimizer_spec.kwargs)\n train_fn = minimize_and_clip(optimizer, total_error,\n var_list=q_func_vars, clip_val=grad_norm_clipping)\n\n # update_target_fn will be called periodically to copy Q network to target Q network\n update_target_fn = []\n for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),\n sorted(target_q_func_vars, key=lambda v: v.name)):\n update_target_fn.append(var_target.assign(var))\n\n with tf.variable_scope(\"Update_target_fn\"):\n update_target_fn = tf.group(*update_target_fn, name='update_target_fn')\n\n # construct the replay buffer with options\n replay_buffer = ReplayBufferOptions(replay_buffer_size, frame_history_len)\n\n ###############\n # RUN ENV #\n ###############\n model_initialized = False\n num_param_updates = 0\n mean_episode_reward = -float('nan')\n best_mean_episode_reward = -float('inf')\n last_obs = env.reset()\n LOG_EVERY_N_STEPS = 500\n\n # saver = tf.train.Saver()\n saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope_name))\n\n for t in itertools.count():\n ### 1. Check stopping criterion\n if stopping_criterion is not None and stopping_criterion(env, t):\n break\n\n ### 2. Step the env and store the transition\n\n # Store the latest observation that was recorded from the simulator.\n idx = replay_buffer.store_frame(last_obs)\n\n # Epsilon greedy exploration\n if not model_initialized or random.random() < exploration.value(t):\n action = random.randint(0, num_actions - 1)\n else:\n obs = replay_buffer.encode_recent_observation()\n action = session.run(pred_ac, {obs_t_ph: [obs]})[0]\n\n if action < env.action_space.n:\n next_obs, reward, done, info = env.step(action)\n opt_steps_n = 1\n else:\n # here the execution of the option\n next_obs, reward, done, opt_steps_n, info = options[action - env.action_space.n].step(env, isoption=True)\n env._episode_length += 1\n\n # Store the outcome\n replay_buffer.store_effect(idx, action, reward, done, opt_steps_n)\n last_obs = env.reset() if done else next_obs\n\n ### 3. Perform experience replay and train the network.\n\n if (t > learning_starts and t % learning_freq == 0 and\n replay_buffer.can_sample(batch_size)):\n\n # 3.a sample a batch of transitions\n obs_batch, act_batch, rew_batch, next_obs_batch, done_batch, opt_steps_batch = replay_buffer.sample(\n batch_size)\n\n # 3.b initialize the model if haven't\n if not model_initialized:\n initialize_interdependent_variables(session, tf.global_variables(), {\n obs_t_ph: obs_batch,\n obs_tp1_ph: next_obs_batch,\n })\n session.run(update_target_fn)\n model_initialized = True\n\n # 3.c train the model\n _, error = session.run([train_fn, total_error], {\n obs_t_ph: obs_batch,\n act_t_ph: act_batch,\n rew_t_ph: rew_batch,\n obs_tp1_ph: next_obs_batch,\n opt_steps: opt_steps_batch,\n done_mask_ph: done_batch,\n learning_rate: optimizer_spec.lr_schedule.value(t)\n })\n\n # 3.d periodically update the target network\n if t % target_update_freq == 0:\n session.run(update_target_fn)\n num_param_updates += 1\n\n ### 4. Log progress\n episode_rewards = env.get_episode_rewards()\n episode_lengths = env.get_episode_lengths()\n\n if len(episode_rewards) > 0 and len(episode_rewards) <= 50:\n mean_episode_reward = np.mean(episode_rewards)\n mean_episode_length = np.mean(episode_lengths)\n\n max_episode_reward = np.max(episode_rewards)\n min_episode_length = np.min(episode_lengths)\n\n min_episode_reward = np.min(episode_rewards)\n max_episode_length = np.max(episode_lengths)\n\n elif len(episode_rewards) > 50:\n mean_episode_reward = np.mean(episode_rewards[-50:])\n mean_episode_length = np.mean(episode_lengths[-50:])\n\n max_episode_reward = np.max(episode_rewards[-50:])\n min_episode_length = np.min(episode_lengths[-50:])\n\n min_episode_reward = np.min(episode_rewards[-50:])\n max_episode_length = np.max(episode_lengths[-50:])\n\n best_mean_episode_reward = max(best_mean_episode_reward, mean_episode_reward)\n\n if t % LOG_EVERY_N_STEPS == 0 and model_initialized:\n print(\"Timestep %d\" % (t,))\n print(\"mean reward (50 episodes) %f\" % mean_episode_reward)\n print(\"mean length (50 episodes) %f\" % mean_episode_length)\n print(\"max_episode_reward (50 episodes) %f\" % max_episode_reward)\n print(\"min_episode_length (50 episodes) %f\" % min_episode_length)\n print(\"min_episode_reward (50 episodes) %f\" % min_episode_reward)\n print(\"max_episode_length (50 episodes) %f\" % max_episode_length)\n print(\"best mean reward %f\" % best_mean_episode_reward)\n print(\"episodes %d\" % len(episode_rewards))\n print(\"exploration %f\" % exploration.value(t))\n print(\"learning_rate %f\" % optimizer_spec.lr_schedule.value(t))\n print(\"\\n\")\n sys.stdout.flush()\n\n meta_graph_def = tf.train.export_meta_graph(filename=scope_name + '/dqn_graph.ckpt.meta', export_scope=scope_name)\n save_path = saver.save(session, scope_name + '/dqn_graph.ckpt', write_meta_graph=False)\n print(\"Model saved in path: %s\" % save_path)\n\n\n"
},
{
"alpha_fraction": 0.748235285282135,
"alphanum_fraction": 0.7858823537826538,
"avg_line_length": 31.69230842590332,
"blob_id": "a00cb60205129d26d9fd1a2ea404b2ce7189ada4",
"content_id": "73d890aef193fbad87d5c22e183209757127c39e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 425,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 13,
"path": "/DQN&Options end-to-end/README.md",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "# DQN&Options end-to-end\n\nThe work on the united architecture\n\nThe learning is step by step.\n1) Train the task0 with train_task0.py\n2) Train the task1 with train_task1.py\n3) Train the task2 with train_task2.py\n4) Train the checker1 with checker1_train.py\n5) Train the checker2 with checker2_train.py\n\nAfter that we can load all the trained variables into the graph graph2.py\nand train the manager with Graph_train_manager.py\n"
},
{
"alpha_fraction": 0.5528913736343384,
"alphanum_fraction": 0.5641748905181885,
"avg_line_length": 24.321428298950195,
"blob_id": "03f518cb87340973baeeaafa7e4adada5fd666a9",
"content_id": "017812337486450636583d835da9447b9747bf76",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 709,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 28,
"path": "/utils/graph_drawer.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import pygraphviz as pgv\n\n\ndef draw_graph(file_name, graph):\n \"\"\"\n drawing png graph from the list of edges\n :param file_name: file_name\n :param graph: graph file with format: (left_edge, right_edge) or (left_edge, right_edge, label)\n :return: None\n \"\"\"\n g_out = pgv.AGraph(strict=False, directed=True)\n for i in graph:\n g_out.add_edge(i[0], i[1], color='black')\n edge = g_out.get_edge(i[0], i[1])\n\n if len(i) > 2:\n edge.attr['label'] = i[2]\n\n g_out.layout(prog='dot')\n g_out.draw(path=\"{file_name}.svg\".format(**locals()))\n\n\ndef main():\n draw_graph(file_name=\"test\", graph=[(\"A\", \"B\", 'test'), (2, 3)])\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.4020857512950897,
"alphanum_fraction": 0.42989569902420044,
"avg_line_length": 23,
"blob_id": "f9b4a883bbc905242f29de393bc2c357fe183e29",
"content_id": "41d0963947d4b555bc410f5e056e6437eb0f6d3d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 863,
"license_type": "permissive",
"max_line_length": 45,
"num_lines": 36,
"path": "/environments/arm_env/console_interact.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from DQN.arm_env_dqn import ArmEnvDQN\n\nif __name__ == \"__main__\":\n\n print(\"\\n\" * 100)\n\n c_env = ArmEnvDQN(episode_max_length=200,\n size_x=5,\n size_y=4,\n cubes_cnt=3,\n scaling_coeff=3,\n action_minus_reward=-1,\n finish_reward=200,\n tower_target_size=3)\n _, reward, done = (None, None, None)\n done = False\n while not done:\n print('\\n' * 100)\n c_env.render()\n\n print(reward, done)\n\n print(\"0 LEFT\")\n print(\"1 UP\")\n print(\"2 RIGHT\")\n print(\"3 DOWN\")\n print(\"4 TOGGLE\")\n # print(\"5 OFF\")\n\n while True:\n try:\n act = int(input())\n break\n except ValueError:\n pass\n _, reward, done, _ = c_env.step(act)"
},
{
"alpha_fraction": 0.591050922870636,
"alphanum_fraction": 0.5951956510543823,
"avg_line_length": 39.50180435180664,
"blob_id": "094356ba07a674230d3fde4d326d833f6bd00d77",
"content_id": "a3603a636f97a1059a4b58391401d5b9d1b08d95",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22438,
"license_type": "permissive",
"max_line_length": 160,
"num_lines": 554,
"path": "/HAM/HAM_core.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import random\nimport operator\nimport sys\nfrom collections import defaultdict\n\nfrom environments.arm_env.arm_env import ArmEnv\nfrom utils import plotting\n\n\nclass HAMParams:\n def __init__(self,\n q_value,\n env,\n current_state,\n eps,\n gamma,\n alpha,\n string_prefix_of_machine,\n accumulated_discount,\n accumulated_rewards,\n previous_machine_choice_state,\n env_is_done,\n logs,\n on_model_transition_id_function\n ):\n self.q_value = q_value\n self.env = env\n self.current_state = current_state\n self.eps = eps\n self.gamma = gamma\n self.alpha = alpha\n self.string_prefix_of_machine = string_prefix_of_machine\n self.accumulated_discount = accumulated_discount\n self.accumulated_rewards = accumulated_rewards\n self.previous_machine_choice_state = previous_machine_choice_state\n self.env_is_done = env_is_done\n self.logs = logs\n self.on_model_transition_id_function = on_model_transition_id_function\n\n\nclass MachineGraph:\n def get_vertex_from_transitions(self):\n res = set(_.left for _ in self.transitions).union(set(_.right for _ in self.transitions))\n\n return res\n\n def get_vertex_mapping(self):\n res = defaultdict(lambda: [])\n for transition in self.transitions:\n res[transition.left].append(transition)\n return res\n # return {_: [i for i in self.transitions if i.left == _] for _ in self.vertices}\n\n def get_vertex_reverse_mapping(self):\n res = defaultdict(lambda: [])\n for transition in self.transitions:\n res[transition.right].append(transition)\n return res\n # return {_: [i for i in self.transitions if i.right == _] for _ in self.vertices}\n\n def get_action_vertex_label_mapping(self):\n return {_: {__.label: __ for __ in self.get_vertex_mapping()[_]} for _ in\n set([i.left for i in self.transitions if isinstance(i.left, Action)])}\n\n def get_special_vertices(self, special_vertex_class):\n return list(filter(lambda x: isinstance(x, special_vertex_class), self.vertices))\n\n def get_start(self):\n res = self.get_special_vertices(Start)\n assert (len(res) == 1)\n return res[0]\n\n def get_stop(self):\n res = self.get_special_vertices(Stop)\n assert (len(res) == 1)\n return res[0]\n\n def __init__(self, transitions, vertices=None):\n self.transitions = transitions\n self.vertices = vertices if vertices is not None else sorted(self.get_vertex_from_transitions(), key=lambda x: x.id)\n self.vertex_mapping = self.get_vertex_mapping()\n self.vertex_reverse_mapping = self.get_vertex_reverse_mapping()\n\n self.choice_relations = {__.left: {_.id: _ for _ in self.vertex_mapping[__.left]} for __ in transitions if isinstance(__.left, Choice)}\n self.action_vertex_label_mapping = {_: {__.label: __ for __ in self.vertex_mapping[_]} for _ in self.get_special_vertices(Action)}\n\n\nclass AbstractMachine:\n free_id = 1\n\n def __init__(self, graph: MachineGraph):\n self.graph = graph\n\n self.params = None\n self.get_on_model_transition_id = None\n\n self.previous_choice_state = None\n self.accumulated_discount = 1\n self.accumulated_rewards = 0\n\n # set unique id for AbstractMachine object\n self.id, AbstractMachine.free_id = AbstractMachine.free_id, AbstractMachine.free_id + 1\n\n def run(self, params: HAMParams):\n t = filter(lambda x: isinstance(x.left, Start), self.graph.transitions)\n try:\n current_vertex = t.__next__().left\n except StopIteration:\n raise Exception(\"No start vertex in graph\")\n try:\n t.__next__()\n raise Exception(\"More than one start vertex in graph\")\n except StopIteration:\n pass\n\n self.params = params\n # shortcut lambda for on_model function\n self.get_on_model_transition_id = lambda: self.params.on_model_transition_id_function(self.params.env)\n while not isinstance(current_vertex, Stop):\n current_vertex = current_vertex.run(self)\n\n def get_graph_to_draw(self, action_to_name_mapping=None, already_added_machines=None, no_edges_with_exit_f=None):\n if already_added_machines is None:\n already_added_machines = []\n graph = []\n for i in self.graph.transitions:\n if no_edges_with_exit_f and isinstance(i.left, Action) and i.label == 1:\n continue\n\n def get_str_with_special_for_actions(vertex):\n if isinstance(vertex, Action) and action_to_name_mapping is not None:\n res = str(vertex)\n\n for action_name, action_id in action_to_name_mapping.items():\n res = res.replace(\"({action_id})\".format(**locals()), \"({action_name})\".format(**locals()))\n return res\n else:\n return str(vertex)\n\n left_vertex = get_str_with_special_for_actions(i.left)\n right_vertex = get_str_with_special_for_actions(i.right)\n\n graph.append((left_vertex, right_vertex, \"f(E)=\" + str(i.label) if i.label is not None and no_edges_with_exit_f is None else \"\"))\n\n for i in self.graph.transitions:\n\n if isinstance(i.right, Call):\n if i.right not in already_added_machines:\n already_added_machines.append(i.right)\n if i.right.machine_to_call is not None:\n graph = graph + i.right.machine_to_call.get_graph_to_draw(already_added_machines=already_added_machines,\n action_to_name_mapping=action_to_name_mapping)\n return graph\n\n def __str__(self):\n return \"{self.__class__.__name__}{self.id}\".format(**locals())\n\n\nclass RootMachine(AbstractMachine):\n def __init__(self, machine_to_invoke):\n start = Start()\n call = Call(machine_to_invoke)\n choice = Choice()\n stop = Stop()\n transitions = (\n MachineRelation(left=start, right=call),\n MachineRelation(left=call, right=choice),\n MachineRelation(left=choice, right=stop)\n )\n super().__init__(graph=MachineGraph(transitions=transitions))\n\n\nclass LoopInvokerMachine(AbstractMachine):\n def __init__(self, machine_to_invoke):\n start = Start()\n call = Call(machine_to_invoke)\n stop = Stop()\n empty_action = Action()\n transitions = (\n MachineRelation(left=start, right=call),\n MachineRelation(left=call, right=empty_action),\n MachineRelation(left=empty_action, right=call, label=0),\n MachineRelation(left=empty_action, right=stop, label=1),\n )\n super().__init__(graph=MachineGraph(transitions=transitions))\n\n\nclass RandomMachine(AbstractMachine):\n @staticmethod\n def create_random_vertex(env, machines_to_call=()):\n vertex_to_add_list = [Action(action=i) for i in sorted(env.get_actions_as_dict().values())]\n vertex_to_add_list += [Choice(), Choice()]\n vertex_to_add_list += [Call(machine_to_call=i) for i in machines_to_call]\n return random.choice(sorted(vertex_to_add_list, key=lambda x: x.id))\n\n @staticmethod\n def get_vertex_from_transitions(transitions):\n res = set(_.left for _ in transitions).union(set(_.right for _ in transitions))\n # remove auxiliary empty(None) vertex\n # if None in res:\n # res.remove(None)\n return res\n\n @staticmethod\n def check_graph(graph):\n # TODO implement this\n\n # checking for duplicates\n for index_i, item_i in enumerate(graph.transitions):\n for index_j, item_j in enumerate(graph.transitions):\n if index_i >= index_j:\n continue\n if item_i == item_j:\n return False\n\n for vertex in graph.vertices:\n if isinstance(vertex, Call):\n if len(graph.vertex_mapping[vertex]) > 1:\n return False\n\n elif isinstance(vertex, Action):\n # check for only single edge with definite label (on_model value)\n if len(graph.vertex_mapping[vertex]) > len(set(_.label for _ in graph.vertex_mapping[vertex])):\n return False\n elif isinstance(vertex, Choice):\n pass\n elif isinstance(vertex, Stop):\n # no edges from Stop instance\n if len(graph.vertex_mapping[vertex]) > 0:\n return False\n elif isinstance(vertex, Start):\n # no input edges for Start instance\n if len(graph.vertex_reverse_mapping[vertex]) > 0:\n return False\n # single outer edge from Start instance\n if len(graph.vertex_mapping[vertex]) > 1:\n return False\n # if len(graph.vertex_mapping[vertex]) == 1 and isinstance(graph.vertex_mapping[vertex][0].right, Stop):\n # return False\n else:\n raise KeyError\n\n # p = AbstractMachine.get_action_vertex_label_mapping(transitions=transitions)\n return True\n\n @staticmethod\n def dfs_get_reachable_vertices(graph, vertex, reachable=None):\n if reachable is None:\n reachable = []\n if vertex in reachable:\n return reachable\n reachable.append(vertex)\n for relation in graph.vertex_mapping[vertex]:\n RandomMachine.dfs_get_reachable_vertices(graph=graph, vertex=relation.right, reachable=reachable)\n return reachable\n\n def get_new_possible_relation(self):\n # vertices = self.graph.vertices\n machine_relation_to_add = []\n\n # simple algorithm with complexity O(N^4) [one can done that with 0(N^2) complexity], but complexity is likely not an bottleneck in this case\n reachable_vertices = RandomMachine.dfs_get_reachable_vertices(graph=self.graph, vertex=self.graph.get_start())\n for index_i, left in enumerate(reachable_vertices):\n for index_j, right in enumerate(self.graph.vertices):\n new_machine_relation = MachineRelation(left=left, right=right, label=0) if isinstance(left, Action) else MachineRelation(left=left, right=right)\n if RandomMachine.check_graph(graph=MachineGraph(transitions=self.graph.transitions + [new_machine_relation], vertices=self.graph.vertices)):\n machine_relation_to_add.append(new_machine_relation)\n\n assert (len(machine_relation_to_add) > 0)\n return random.choice(machine_relation_to_add)\n\n def __init__(self, graph=None):\n if graph is None:\n graph = MachineGraph(transitions=[], vertices=[Start(), Stop()])\n super().__init__(graph=graph)\n\n def with_new_vertex(self, env, machines_to_call=()):\n new_vertex = self.create_random_vertex(env=env, machines_to_call=machines_to_call)\n return RandomMachine(graph=MachineGraph(transitions=self.graph.transitions, vertices=self.graph.vertices + [new_vertex]))\n\n def with_new_relation(self):\n # res = MachineGraph(transitions=self.graph.transitions + [self.get_new_possible_relation()], vertices=self.graph.vertices)\n res = MachineGraph(transitions=self.graph.transitions, vertices=self.graph.vertices)\n stop = res.get_stop()\n # added to Action vertices link to the Stop with on_model env_done\n # TODO don't create links between unused vertex and Stop\n for vertex in res.get_special_vertices(Action):\n # print(\"::\", res.graph.action_vertex_label_mapping[vertex])\n if not res.vertex_mapping[vertex] and not res.vertex_reverse_mapping[vertex]:\n continue\n if 1 not in res.action_vertex_label_mapping[vertex]:\n res.transitions.append(MachineRelation(left=vertex, right=stop, label=1))\n\n return RandomMachine(graph=MachineGraph(transitions=res.transitions, vertices=res.vertices))\n\n\nclass AutoBasicMachine(RootMachine):\n def __init__(self, env):\n start = Start()\n choice_one = Choice()\n actions = [Action(action=_) for _ in env.get_actions_as_dict().values()]\n stop = Stop()\n\n transitions = [MachineRelation(left=start, right=choice_one), ]\n for action in actions:\n transitions.append(MachineRelation(left=choice_one, right=action))\n transitions.append(MachineRelation(left=action, right=stop, label=0))\n transitions.append(MachineRelation(left=action, right=stop, label=1))\n\n super().__init__(machine_to_invoke=LoopInvokerMachine(AbstractMachine(MachineGraph(transitions=transitions))))\n\n\nclass MachineVertex:\n free_id = 1\n\n def __init__(self):\n # set unique id for MachineVertex object\n self.id, MachineVertex.free_id = MachineVertex.free_id, MachineVertex.free_id + 1\n\n def __str__(self):\n return \"{self.__class__.__name__}{self.id}\".format(**locals())\n\n def run(self, *args, **kwargs):\n raise NotImplementedError\n\n def get_name(self):\n if isinstance(self, Start):\n return \"Start\"\n elif isinstance(self, Stop):\n return \"Stop\"\n elif isinstance(self, Choice):\n return \"Choice\"\n elif isinstance(self, Call):\n return \"Call\"\n elif isinstance(self, Action):\n return \"Action\", self.action\n else:\n raise TypeError\n\n def __lt__(self, other):\n def get_vertex_id(vertex):\n if isinstance(vertex, Start):\n return 0\n elif isinstance(vertex, Stop):\n return 1\n elif isinstance(vertex, Choice):\n return 2\n elif isinstance(vertex, Call):\n return 3\n elif isinstance(vertex, Action):\n return 4\n else:\n raise TypeError\n\n if isinstance(self, Action) and isinstance(other, Action):\n return self.action < other.action\n return get_vertex_id(self) < get_vertex_id(other)\n\n\nclass Start(MachineVertex):\n def run(self, own_machine: AbstractMachine):\n # return next vertex\n return own_machine.graph.vertex_mapping[self][0].right\n\n def __str__(self):\n return \"{self.__class__.__name__}{self.id}\".format(**locals())\n\n\nclass Stop(MachineVertex):\n def run(self, own_machine: AbstractMachine):\n pass\n\n\nclass Choice(MachineVertex):\n def __init__(self):\n super().__init__()\n\n @staticmethod\n def get_e_greedy(q_choices: dict, eps: float):\n if random.random() < eps:\n return random.choice(list(q_choices.keys()))\n else:\n return max(q_choices.items(), key=operator.itemgetter(1))[0]\n\n def run(self, own_machine: AbstractMachine):\n combined_state = own_machine.id, self.id, own_machine.params.env.get_current_state()\n\n if combined_state not in own_machine.params.q_value:\n own_machine.params.q_value[combined_state] = {_: 0 for _ in own_machine.graph.choice_relations[self].keys()}\n\n if own_machine.params.previous_machine_choice_state is not None:\n q = own_machine.params.q_value[own_machine.params.previous_machine_choice_state][own_machine.params.previous_machine_choice]\n v = own_machine.params.q_value[combined_state][self.get_e_greedy(own_machine.params.q_value[combined_state], eps=0)]\n delta = own_machine.params.alpha * (own_machine.params.accumulated_rewards + own_machine.params.accumulated_discount * v - q)\n q += delta\n own_machine.params.q_value[own_machine.params.previous_machine_choice_state][own_machine.params.previous_machine_choice] = q\n\n action = self.get_e_greedy(own_machine.params.q_value[combined_state], eps=own_machine.params.eps)\n own_machine.params.previous_machine_choice_state = combined_state\n own_machine.params.previous_machine_choice = action\n\n own_machine.params.accumulated_rewards = 0\n own_machine.params.accumulated_discount = 1\n\n return own_machine.graph.choice_relations[self][action].right\n\n\nclass Call(MachineVertex):\n def __init__(self, machine_to_call: AbstractMachine):\n self.machine_to_call = machine_to_call\n super().__init__()\n\n def run(self, own_machine: AbstractMachine):\n self.machine_to_call.run(own_machine.params)\n\n # return next vertex\n return own_machine.graph.vertex_mapping[self][0].right\n\n def __str__(self):\n return super().__str__() + \"[{self.machine_to_call}]\".format(**locals())\n\n\nclass Action(MachineVertex):\n def __init__(self, action=None):\n self.action = action\n super().__init__()\n\n def __str__(self):\n return super(Action, self).__str__() + \"(\" + str(self.action) + \")\"\n\n def run(self, own_machine: AbstractMachine):\n if self.action is not None:\n state, reward, done, _ = own_machine.params.env.step(self.action)\n own_machine.params.logs[\"reward\"] += reward\n if done:\n own_machine.params.logs[\"ep_rewards\"].append(own_machine.params.logs[\"reward\"])\n own_machine.params.logs[\"reward\"] = 0\n\n own_machine.params.accumulated_rewards += reward * own_machine.params.accumulated_discount\n own_machine.params.accumulated_discount *= own_machine.params.gamma\n own_machine.params.eps *= 0.9999\n if \"gif\" not in own_machine.params.logs:\n own_machine.params.logs[\"gif\"] = [[]]\n if done:\n own_machine.params.logs[\"gif\"].append([])\n own_machine.params.logs[\"gif\"][-1].append(own_machine.params.env.get_grid())\n # return next vertex\n return own_machine.graph.action_vertex_label_mapping[self][own_machine.get_on_model_transition_id()].right\n\n\nclass MachineRelation:\n free_id = 1\n\n def __init__(self, left, right, label=None):\n assert not (not isinstance(left, Action) and label is not None), \"Action state vertex doesn't have specified label\"\n assert not (isinstance(left, Action) and label is None), \"Non action state vertex has specified label\"\n\n self.left = left\n self.right = right\n self.label = label\n\n # set unique id for MachineRelation object\n self.id, MachineRelation.free_id = MachineRelation.free_id, MachineRelation.free_id + 1\n\n def __eq__(self, other):\n if self.id == other.id:\n return True\n if self.right == other.right and self.left == other.left and self.label == other.label:\n return True\n return False\n\n def __str__(self):\n return str(self.left) + \" -> \" + str(self.right)\n\n\ndef main():\n env = ArmEnv(episode_max_length=300,\n size_x=5,\n size_y=3,\n cubes_cnt=4,\n action_minus_reward=-1,\n finish_reward=100,\n tower_target_size=4)\n\n params = HAMParams(q_value={},\n env=env,\n current_state=None,\n eps=0.1,\n gamma=0.9,\n alpha=0.1,\n string_prefix_of_machine=None,\n accumulated_discount=1,\n accumulated_rewards=0,\n previous_machine_choice_state=None,\n env_is_done=None,\n logs={\"reward\": 0, \"ep_rewards\": []},\n on_model_transition_id_function=lambda env_: 1 if env_.is_done() else 0,\n )\n\n start = Start()\n choice_one = Choice()\n left = Action(action=env.get_actions_as_dict()[\"LEFT\"])\n right = Action(action=env.get_actions_as_dict()[\"RIGHT\"])\n up = Action(action=env.get_actions_as_dict()[\"UP\"])\n down = Action(action=env.get_actions_as_dict()[\"DOWN\"])\n on = Action(action=env.get_actions_as_dict()[\"ON\"])\n off = Action(action=env.get_actions_as_dict()[\"OFF\"])\n\n stop = Stop()\n simple_machine = AbstractMachine(\n MachineGraph(transitions=(\n MachineRelation(left=start, right=choice_one),\n MachineRelation(left=choice_one, right=left),\n MachineRelation(left=choice_one, right=right),\n MachineRelation(left=choice_one, right=up),\n MachineRelation(left=choice_one, right=down),\n MachineRelation(left=choice_one, right=on),\n MachineRelation(left=choice_one, right=off),\n\n MachineRelation(left=left, right=stop, label=0),\n MachineRelation(left=right, right=stop, label=0),\n MachineRelation(left=up, right=stop, label=0),\n MachineRelation(left=down, right=stop, label=0),\n MachineRelation(left=on, right=stop, label=0),\n MachineRelation(left=off, right=stop, label=0),\n\n MachineRelation(left=left, right=stop, label=1),\n MachineRelation(left=right, right=stop, label=1),\n MachineRelation(left=up, right=stop, label=1),\n MachineRelation(left=down, right=stop, label=1),\n MachineRelation(left=on, right=stop, label=1),\n MachineRelation(left=off, right=stop, label=1),\n ), )\n )\n\n root = RootMachine(machine_to_invoke=LoopInvokerMachine(machine_to_invoke=simple_machine))\n num_episodes = 1500\n for i_episode in range(num_episodes):\n env.reset()\n root.run(params)\n if i_episode % 10 == 0:\n print(\"\\r{root} episode {i_episode}/{num_episodes}.\".format(**locals()), end=\"\")\n sys.stdout.flush()\n plotting.plot_multi_test(smoothing_window=30,\n x_label=\"episode\",\n y_label=\"smoothed rewards\",\n curve_to_draw=[params.logs[\"ep_rewards\"]\n ],\n labels=[\"HAM_basic\"]\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5185782313346863,
"alphanum_fraction": 0.5344256162643433,
"avg_line_length": 34.913185119628906,
"blob_id": "85cc77bf266b792971ecb814ff2447f4ae35aa49",
"content_id": "73d30b72783343850ca890c096d14a970267fa59",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11169,
"license_type": "permissive",
"max_line_length": 179,
"num_lines": 311,
"path": "/subgoals discovery/two_rooms_env.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from time import sleep\nimport pandas as pd\nimport numpy as np\nfrom collections import namedtuple, defaultdict\nfrom sklearn.preprocessing import MinMaxScaler\nimport sys\nfrom gym.envs.toy_text import discrete\nfrom sklearn.cluster import AgglomerativeClustering\nfrom tqdm import tqdm\n\nfrom HAM.HAM_core import LoopInvokerMachine, AutoBasicMachine\nfrom HAM.HAM_experiments.HAM_utils import HAMParamsCommon, PlotParams, plot_multi\nfrom utils.plotting import plot_multi_test\nfrom workshop.main import AutoMachineSimple\n\n\ndef arg_max_action(q_dict, state, action_space):\n result_action = 0\n for action_to in range(action_space):\n if q_dict[state, action_to] > q_dict[state, result_action]:\n result_action = action_to\n return result_action\n\n\nclass TwoRooms(discrete.DiscreteEnv):\n metadata = {'render.modes': ['human', 'ansi']}\n\n ACTIONS = namedtuple(\"ACTIONS\", [\"LEFT\", \"UP\", \"RIGHT\", \"DOWN\", ])(\n UP=0,\n RIGHT=1,\n DOWN=2,\n LEFT=3,\n )\n\n CONSTANTS = namedtuple(\"CONSTANTS\", [\"FREE_CELL\", \"OBSTACLE\", \"TARGET\"])(\n FREE_CELL=0,\n OBSTACLE=1,\n TARGET=2,\n )\n\n MOVES = [ACTIONS.UP, ACTIONS.RIGHT, ACTIONS.DOWN, ACTIONS.LEFT]\n MOVES_X_Y = {ACTIONS.UP: (0, -1), ACTIONS.RIGHT: (1, 0), ACTIONS.DOWN: (0, 1), ACTIONS.LEFT: (-1, 0)}\n\n def get_actions_as_dict(self):\n return {_: getattr(self.ACTIONS, _) for _ in self.ACTIONS._fields}\n\n def get_current_state(self):\n return self.s\n\n def is_done(self):\n return self.done\n\n def _step(self, a):\n res = next_s, reward, done, _ = super(TwoRooms, self)._step(a)\n self.done = done\n return res\n\n def __init__(self):\n\n finish_reward = 1\n\n co = self.CONSTANTS\n\n self.code_middle = 2 ** 7\n\n action_mapping = {\n self.ACTIONS.RIGHT: np.array([0, 1]),\n self.ACTIONS.LEFT: np.array([0, -1]),\n self.ACTIONS.UP: np.array([-1, 0]),\n self.ACTIONS.DOWN: np.array([1, 0]),\n }\n\n self.mark = []\n\n # building 2-rooms maze\n self._maze = np.full(shape=(12, 16), fill_value=co.FREE_CELL).astype(np.int32)\n # feel boundaries of room with obstacles\n self._maze[0, :] = self._maze[:, 0] = co.OBSTACLE\n self._maze[self._maze.shape[0] - 1, :] = co.OBSTACLE\n self._maze[:, self._maze.shape[1] - 1] = co.OBSTACLE\n\n # separate rooms\n self._maze[:, self._maze.shape[1] // 2] = co.OBSTACLE\n\n # clear obstacles for door\n self._maze[self._maze.shape[0] // 2, self._maze.shape[1] // 2] = 0\n self._maze[self._maze.shape[0] // 2 - 1, self._maze.shape[1] // 2] = 0\n\n # placing target at the lower right corner of the right hand room\n self._maze[self._maze.shape[0] - 2, self._maze.shape[1] - 2] = co.TARGET\n\n prob = {}\n\n def append_transitions_from_cell(a_x, a_y, p):\n state = self.encode(a_x, a_y)\n p[state] = {a: [] for a in range(len(self.ACTIONS))}\n for a in self.ACTIONS:\n for a2 in self.ACTIONS:\n dx, dy = action_mapping[a2]\n a_n_x, a_n_y = a_x + dx, a_y + dy\n if self._maze[a_n_x][a_n_y] == co.OBSTACLE:\n new_state = state\n else:\n new_state = self.encode(a_n_x, a_n_y)\n done = self._maze[a_n_x, a_n_y] == co.TARGET\n reward = finish_reward if self._maze[a_n_x, a_n_y] == co.TARGET else -0.01\n probability = 0.7 if a == a2 else 0.1\n p[state][a].append((probability, new_state, reward, done))\n\n for agent_x1 in range(self._maze.shape[0]):\n for agent_y1 in range(self._maze.shape[1]):\n if self._maze[agent_x1][agent_y1] == co.OBSTACLE:\n continue\n append_transitions_from_cell(agent_x1, agent_y1, prob)\n\n isd = []\n for x in range(1, self._maze.shape[0] - 1):\n for y in range(1, self._maze.shape[1] // 2):\n isd.append(self.encode(x, y))\n isd = np.array(isd)\n super(TwoRooms, self).__init__(self.encode(self._maze.shape[0] - 1, self._maze.shape[1] - 1), len(self.ACTIONS),\n prob, isd)\n\n def _reset(self):\n self.done = False\n self.s = np.random.choice(self.isd, size=1)[0]\n return self.s\n\n def encode(self, x, y):\n # checking constraints for x,y coordinates\n assert 0 <= x < self._maze.shape[0] and 0 <= y < self._maze.shape[1]\n # checking constraints for shape[1]\n assert self._maze.shape[1] < self.code_middle\n\n return x * self.code_middle + y\n\n def decode(self, state):\n return state // self.code_middle, state % self.code_middle\n\n def _render(self, mode='human', close=False, mark=None):\n if close:\n return\n\n outfile = sys.stdout\n\n maze_size_x = len(self._maze)\n maze_size_y = len(self._maze[0])\n output = \"\\n\"\n for i in range(maze_size_x):\n for j in range(maze_size_y):\n output += \" \"\n if self.encode(i, j) in self.mark:\n output += self.mark[self.encode(i, j)]\n elif self.s == self.encode(i, j):\n output += \"x\"\n else:\n if self._maze[i][j] == 0:\n output += \".\"\n if self._maze[i][j] == 1:\n output += \"H\"\n if self._maze[i][j] == 2:\n output += \"F\"\n if self._maze[i][j] == 3:\n output += \"F\"\n output += \" \"\n output += '\\n'\n outfile.write(output)\n\n\ndef q_learning(env, num_episodes, eps=0.1, alpha=0.1, gamma=0.9):\n to_plot = []\n\n q_table = defaultdict(lambda: 0)\n bns_count = defaultdict(lambda: 0)\n V = defaultdict(lambda: None)\n\n for _ in tqdm(range(num_episodes)):\n ep_reward = 0\n eps *= 0.9\n s = env.reset()\n\n bn_added = {}\n while True:\n if np.random.rand(1) < eps:\n action = np.random.choice(env.action_space.n, size=1)[0]\n else:\n action = arg_max_action(q_dict=q_table, state=s, action_space=env.action_space.n)\n\n next_s, reward, done, _ = env.step(action)\n a = arg_max_action(q_dict=q_table, state=s, action_space=env.action_space.n)\n # noinspection PyTypeChecker\n V[s] = (*env.decode(s), q_table[s, a])\n # making +1 to bn_counts once for each episode\n if not bn_added.get(s, False):\n bns_count[s] += 1\n bn_added[s] = True\n q_table[s, action] = (1 - alpha) * q_table[s, action] + alpha * (reward + gamma * q_table[next_s, a])\n\n ep_reward += reward\n if done:\n break\n\n s = next_s\n to_plot.append(ep_reward)\n sleep(0.1)\n\n def get_clusters(V, n_clusters, affinity):\n states = sorted(V.keys())\n ss = {\"state\": states}\n # noinspection PyTypeChecker\n for i in range(len(V[states[0]])):\n ss[str(i)] = [V[_][i] for _ in states]\n df = pd.DataFrame(ss).set_index(\"state\")\n sc = MinMaxScaler()\n df = df.rename(index=str, columns={\"0\": \"x\", \"1\": \"y\", \"2\": 'V'})\n X = df[[\"x\", \"y\", \"V\"]]\n X[[\"V\"]] *= 0.5\n sc.fit(np.vstack((df[[\"x\"]], df[[\"y\"]])))\n df[[\"x\", \"y\"]] = sc.transform(df[[\"x\", \"y\"]])\n ag = AgglomerativeClustering(n_clusters=n_clusters, affinity=affinity)\n clustered = list(ag.fit_predict(X))\n cluster_state_mapping = {}\n for i in range(len(states)):\n cluster_state_mapping[states[i]] = clustered[i]\n return cluster_state_mapping\n\n # all_states = V.keys()\n n_clusters = 4\n map_state_to_cluster = get_clusters(V=V, n_clusters=n_clusters, affinity=\"euclidean\")\n\n def get_bns_in_increasing_order(bns_count):\n state_count_pairs = sorted([(bns_count[_], _) for _ in bns_count], reverse=True)\n return list(map(lambda x: x[1], state_count_pairs, ))\n\n def get_mapping_for_cluster_to_sorted_bns(sorted_bns, map_state_to_cluster):\n res = defaultdict(lambda: list())\n for state in sorted_bns:\n res[map_state_to_cluster[state]].append(state)\n return res\n\n # bns = bottlenecks\n sorted_bns = get_bns_in_increasing_order(bns_count=bns_count)\n map_cluster_to_sorted_bns = get_mapping_for_cluster_to_sorted_bns(sorted_bns=sorted_bns,\n map_state_to_cluster=map_state_to_cluster)\n\n env.mark = {}\n\n for current_state in map_state_to_cluster:\n env.mark[current_state] = str(map_state_to_cluster[current_state])\n\n class colors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n COLOR_LIST = [HEADER, OKBLUE, OKGREEN, WARNING, FAIL]\n\n # draw best bns for clusters\n BNS_FOR_CLUSTER = 10\n for q in map_cluster_to_sorted_bns:\n for j in map_cluster_to_sorted_bns[q][:BNS_FOR_CLUSTER]:\n env.mark[j] = colors.COLOR_LIST[q % len(colors.COLOR_LIST)] + str(q) + colors.ENDC\n env.render()\n env.mark = {}\n\n def runner(hams, num_episodes, env):\n for i_episode in range(1, num_episodes + 1):\n env.reset()\n while not env.is_done():\n for ham in hams:\n if env.s in ham.states_in_my_cluster:\n while not env.is_done() and env.s not in ham.bns:\n ham.machine.run(params)\n while not env.is_done() and env.s in ham.states_in_my_cluster:\n ham.machine.run(params)\n\n if i_episode % 10 == 0:\n print(\"\\r{ham} episode {i_episode}/{num_episodes}.\".format(**locals()), end=\"\")\n sys.stdout.flush()\n\n class BnsMachine:\n def __init__(self, params, cluster_index, list_of_bns, states_in_my_cluster):\n self.machine = AutoMachineSimple(env)\n self.cluster_index = cluster_index\n self.bns = set(list_of_bns)\n self.states_in_my_cluster = states_in_my_cluster\n self.params = params\n\n params = HAMParamsCommon(env)\n hams = [BnsMachine(params=params, cluster_index=_, list_of_bns=map_cluster_to_sorted_bns[_][:BNS_FOR_CLUSTER], states_in_my_cluster=set(map_cluster_to_sorted_bns[_])) for _ in\n map_cluster_to_sorted_bns]\n\n\n runner(hams = hams,\n num_episodes=2000,\n env=env,\n )\n to_plot = list()\n to_plot.append(PlotParams(curve_to_draw=params.logs[\"ep_rewards\"], label=\"HAM_with_pull_up\"))\n plot_multi(to_plot)\n # print(params.logs[\"ep_rewards\"])\n return to_plot, q_table\n\n\nq_s, q_t = q_learning(TwoRooms(), 5000)\n# plot_multi_test([q_s, ])\n"
},
{
"alpha_fraction": 0.6132691502571106,
"alphanum_fraction": 0.6253007650375366,
"avg_line_length": 35.822784423828125,
"blob_id": "4de9e6e8b77d27a773af8ed42c2cd4f4dfe0f945",
"content_id": "626cbafc3305bc039908aa322ad2eb778d1c9bf6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2909,
"license_type": "permissive",
"max_line_length": 154,
"num_lines": 79,
"path": "/environments/arm_env/arm_env_on_model.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import sys\nfrom random import randrange\n\nfrom HAM.HAM_core import RootMachine, Start, Choice, Action, Stop, MachineRelation, LoopInvokerMachine, AbstractMachine, MachineGraph\nfrom HAM.HAM_experiments.HAM_utils import HAMParamsCommon, PlotParams, plot_multi\nfrom environments.arm_env.arm_env import ArmEnvToggleTopOnly\n\n\ndef main():\n def get_on_model(self):\n return self.get_arm_x(), self.is_cube_graped()\n\n def get_arm_x(self):\n return self._size_x - self._arm_x\n\n def is_cube_graped(self):\n cube_dx, cube_dy = self.MOVE_ACTIONS[self.ACTIONS.DOWN]\n cube_x, cube_y = self._arm_x + cube_dx, self._arm_y + cube_dy\n return self._magnet_toggle and self.ok(cube_x, cube_y) and self._grid[cube_x][cube_y] == 1\n\n ArmEnvToggleTopOnly.get_arm_x = get_arm_x\n ArmEnvToggleTopOnly.is_cube_graped = is_cube_graped\n ArmEnvToggleTopOnly.get_on_model = get_on_model\n\n env = ArmEnvToggleTopOnly(size_x=5, size_y=5, cubes_cnt=4, episode_max_length=600, finish_reward=100, action_minus_reward=-0.001, tower_target_size=4)\n\n params = HAMParamsCommon(env)\n runner(ham=AutoMachineNoLoop(env),\n num_episodes=2000,\n env=env,\n params=params,\n # no_output=True\n )\n to_plot = []\n to_plot.append(PlotParams(curve_to_draw=params.logs[\"ep_rewards\"], label=\"HAM_with_pull_up\"))\n plot_multi(to_plot)\n\n\ndef runner(ham, num_episodes, env, params, no_output=None):\n ham2 = AutoMachineNoLoop(env)\n params2 = HAMParamsCommon(env)\n for i_episode in range(1, num_episodes + 1):\n\n env.reset()\n print(\"****\" * 10)\n while not env.is_done():\n print(env.get_on_model())\n if i_episode % 10 >= 5:\n ham.run(params)\n else:\n pass\n ham2.run(params2)\n # print(params.previous_machine_choice_state)\n env.render()\n assert env.is_done(), \"The machine is STOPPED before STOP(done) of the environment\"\n if i_episode % 10 == 0:\n if no_output is None:\n print(\"\\r{ham} episode {i_episode}/{num_episodes}.\".format(**locals()), end=\"\")\n sys.stdout.flush()\n\n\nclass AutoMachineNoLoop(RootMachine):\n def __init__(self, env):\n start = Start()\n choice_one = Choice()\n actions = [Action(action=_) for _ in env.get_actions_as_dict().values()]\n stop = Stop()\n\n transitions = [MachineRelation(left=start, right=choice_one), ]\n for action in actions:\n transitions.append(MachineRelation(left=choice_one, right=action))\n transitions.append(MachineRelation(left=action, right=stop, label=0))\n transitions.append(MachineRelation(left=action, right=stop, label=1))\n\n super().__init__(machine_to_invoke=AbstractMachine(MachineGraph(transitions=transitions)))\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5382674336433411,
"alphanum_fraction": 0.5519644618034363,
"avg_line_length": 35.50438690185547,
"blob_id": "a6df484475b1a40d50f91a395b5729aa867ebf41",
"content_id": "351804f23ea766b10460d3a6517f3035a2e9b8ca",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8323,
"license_type": "permissive",
"max_line_length": 162,
"num_lines": 228,
"path": "/HAM/HAM_experiments/experiment_05_HAM_NET/experiment_05_HAM_NET.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import random\nfrom collections import namedtuple, defaultdict\n\nfrom gym import spaces\n\nfrom HAM.HAM_core import RandomMachine, MachineGraph, Start, Stop, Action, AutoBasicMachine, MachineRelation, Choice, Call, AbstractMachine, LoopInvokerMachine, \\\n RootMachine\nfrom HAM.HAM_experiments.HAM_utils import HAMParamsCommon, ham_runner, plot_multi, PlotParams, super_runner\nfrom HAM.HAM_experiments.experiment_04_auto_random_HAM_on_maze_env.experiment_04 import is_it_machine_runnable\nfrom environments.arm_env.arm_env import ArmEnv\nfrom environments.env_core import CoreEnv\nfrom environments.env_utils import EnvForTesting\nfrom environments.weak_methods import q_learning\nfrom utils.graph_drawer import draw_graph\n\n# maze = generate_maze_please(size_x=2, size_y=2)\n# env = MazeWorldEpisodeLength(maze=maze,finish_reward=1000)\nfrom utils.plotting import plot_multi_test\n\n\n\n\n\nclass StupidMachine(AbstractMachine):\n def __init__(self):\n action = Action(action=0)\n transition = (\n MachineRelation(left=Start(), right=action),\n MachineRelation(left=action, right=action, label=0),\n MachineRelation(left=action, right=Stop(), label=1),\n\n )\n super().__init__(graph=MachineGraph(transitions=transition))\n\n\nclass HAMsNet(CoreEnv):\n ACTIONS = namedtuple(\"ACTIONS\",\n [\"SEED_PLUS_0\",\n \"SEED_PLUS_1\",\n \"SEED_PLUS_2\",\n \"SEED_PLUS_3\",\n \"SEED_PLUS_4\",\n \"DELETE_TRANSITION_TO_STOP\"])(\n SEED_PLUS_0=0,\n SEED_PLUS_1=1,\n SEED_PLUS_2=2,\n SEED_PLUS_3=3,\n SEED_PLUS_4=4,\n DELETE_TRANSITION_TO_STOP=5,\n\n )\n\n def __init__(self, env, num_of_episodes, max_vertex_to_add, max_edges_to_add, init_seed=0):\n self.init_seed = init_seed\n self._reset()\n self.env = env\n self.num_of_episodes = num_of_episodes\n\n self.max_vertex_to_add = max_vertex_to_add\n self.max_edges_to_add = max_edges_to_add\n\n self.dp = {}\n\n def _reset(self):\n self.seed = self.init_seed\n self.machine = RandomMachine()\n self.state = tuple()\n self.last_reward = 0\n self.action_space = spaces.Discrete(len(self.ACTIONS))\n # TODO implement done\n self._done = False\n\n self.vertex_added = 0\n self.edges_added = 0\n\n def change_graph(self):\n if self.vertex_added < self.max_vertex_to_add:\n self.machine = self.machine.with_new_vertex(env=self.env)\n self.vertex_added += 1\n else:\n try:\n self.machine = self.machine.with_new_relation()\n # TODO rewrite catching assertion to ErorObject\n except AssertionError:\n pass\n self.edges_added += 1\n\n def _step(self, action):\n self.state = self.state + tuple([action])\n old_seed = random.seed\n if action == self.ACTIONS.SEED_PLUS_1:\n self.seed += 1\n random.seed(self.seed)\n\n # CODE -------------------------------\n self.change_graph()\n\n elif action == self.ACTIONS.SEED_PLUS_2:\n self.seed += 2\n random.seed(self.seed)\n\n # CODE -------------------------------\n self.change_graph()\n elif action == self.ACTIONS.SEED_PLUS_3:\n self.seed += 3\n random.seed(self.seed)\n\n # CODE -------------------------------\n self.change_graph()\n\n elif action == self.ACTIONS.SEED_PLUS_4:\n self.seed += 4\n random.seed(self.seed)\n\n # CODE -------------------------------\n self.change_graph()\n elif action == self.ACTIONS.SEED_PLUS_0:\n if self.vertex_added < self.max_vertex_to_add:\n self.vertex_added += 1\n else:\n self.edges_added += 1\n elif action == self.ACTIONS.DELETE_TRANSITION_TO_STOP:\n if self.vertex_added < self.max_vertex_to_add:\n self.vertex_added += 1\n else:\n self.edges_added += 1\n new_transitions = []\n for transition in self.machine.graph.transitions:\n if not isinstance(transition.right, Stop):\n new_transitions.append(transition)\n self.machine = RandomMachine(graph=MachineGraph(transitions=new_transitions, vertices=self.machine.graph.vertices))\n else:\n raise KeyError\n\n random.seed(old_seed)\n self.ham = RootMachine(LoopInvokerMachine(machine_to_invoke=super_runner(self.machine, self.env)))\n # self.ham = RootMachine(LoopInvokerMachine(machine_to_invoke=self.machine))\n\n reward = None\n if is_it_machine_runnable(self.machine):\n\n params = HAMParamsCommon(self.env)\n try:\n if self.state not in self.dp:\n ham_runner(ham=self.ham,\n num_episodes=self.num_of_episodes,\n env=self.env, params=params,\n no_output=True\n )\n reward = sum(params.logs[\"ep_rewards\"])\n if len(self.machine.graph.transitions) > 3:\n draw_graph(\"pics/\" + str(reward).rjust(10, \"0\") + str(self.state) + \" \" + str(self.init_seed),\n self.machine.get_graph_to_draw(action_to_name_mapping=self.env.get_actions_as_dict()))\n self.dp[self.state] = reward\n else:\n reward = self.dp[self.state]\n except KeyError:\n pass\n # print(\"keyError\", end=\"\")\n # except AssertionError:\n # pass\n # print(\"assertion\", end=\"\")\n except BlockingIOError:\n pass\n observation = self.state\n\n if reward is not None:\n self.last_reward = reward\n else:\n if None not in self.dp:\n params = HAMParamsCommon(self.env)\n ham_runner(ham=RootMachine(LoopInvokerMachine(machine_to_invoke=super_runner(StupidMachine(), self.env))),\n num_episodes=self.num_of_episodes,\n env=self.env, params=params,\n no_output=True\n )\n reward = sum(params.logs[\"ep_rewards\"])\n self.dp[None] = reward\n else:\n reward = self.dp[None]\n if self.vertex_added == self.max_vertex_to_add:\n print(self.state, reward)\n info = None\n assert (self.vertex_added <= self.max_vertex_to_add)\n assert (self.edges_added <= self.max_edges_to_add)\n if self.vertex_added == self.max_vertex_to_add and self.edges_added == self.max_edges_to_add:\n self._done = True\n return observation, reward, self._done, info\n\n def get_current_state(self):\n return self.state\n\n def is_done(self):\n return self._done\n\n def get_actions_as_dict(self):\n return {_: getattr(self.ACTIONS, _) for _ in self.ACTIONS._fields}\n\n def _render(self, mode='human', close=False):\n pass\n\n\ndef graph_me(steps):\n env = ArmEnv(size_x=6, size_y=3, cubes_cnt=3, episode_max_length=300, finish_reward=200, action_minus_reward=-1, tower_target_size=2)\n net = HAMsNet(env=env, num_of_episodes=300, max_vertex_to_add=7, max_edges_to_add=6)\n for i in steps:\n net.step(i)\n draw_graph(\"ololo\", net.machine.get_graph_to_draw(action_to_name_mapping=env.get_actions_as_dict()))\n\n\ndef main():\n env_obj = EnvForTesting()\n net = HAMsNet(env=env_obj.env, num_of_episodes=env_obj.episodes, max_vertex_to_add=8, max_edges_to_add=7, init_seed=random.randrange(1021321321))\n\n q_table = defaultdict(lambda: 0)\n to_plot = []\n for i in range(10):\n q_stats, q_table = q_learning(env=net, num_episodes=100, gamma=1, eps=1 - i * 10 / 10, q_table=q_table, alpha=0.5)\n to_plot += q_stats\n print(\":::::\" * 10)\n print(1 - i * 10 / 100)\n\n plot_multi_test([to_plot])\n\n\nif __name__ == '__main__':\n # graph_me(steps=(1, 0, 1, 3, 0, 3, 1, 1, 2, 1, 0, 2))\n main()\n"
},
{
"alpha_fraction": 0.48584625124931335,
"alphanum_fraction": 0.49731823801994324,
"avg_line_length": 34.326316833496094,
"blob_id": "6577f83b65d1621f57cc2dd74640a5294570d31c",
"content_id": "709e9277e0aa6ec030320d94aea815fbb074d263",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6712,
"license_type": "permissive",
"max_line_length": 157,
"num_lines": 190,
"path": "/environments/grid_maze_env/maze_world_env.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from collections import namedtuple\n\nimport numpy as np\n\nimport sys\nfrom gym.envs.toy_text import discrete\n\nfrom environments.env_core import CoreEnv\n\n\nclass MazeWorldEpisodeLength(CoreEnv, discrete.DiscreteEnv):\n metadata = {'render.modes': ['human', 'ansi']}\n\n ACTIONS = namedtuple(\"ACTIONS\", [\"LEFT\", \"UP\", \"RIGHT\", \"DOWN\", ])(\n UP=0,\n RIGHT=1,\n DOWN=2,\n LEFT=3,\n )\n\n MOVES = [ACTIONS.UP, ACTIONS.RIGHT, ACTIONS.DOWN, ACTIONS.LEFT]\n MOVES_X_Y = {ACTIONS.UP: (0, -1), ACTIONS.RIGHT: (1, 0), ACTIONS.DOWN: (0, 1), ACTIONS.LEFT: (-1, 0)}\n\n # noinspection PyUnresolvedReferences\n @staticmethod\n def categorical_sample(prob_n, np_random):\n \"\"\"\n Sample from categorical distribution\n Each row specifies class probabilities\n \"\"\"\n prob_n = np.asarray(prob_n)\n cs_prob_n = np.cumsum(prob_n)\n\n # cs_prob_n > np_random.rand() the same with:\n # np.apply_along_axis(lambda x: x > np_random.rand(), 0, cs_prob_n)))\n return (cs_prob_n > np_random.rand()).argmax()\n\n def is_done(self):\n return self._is_done\n\n def get_actions_as_dict(self):\n return {_: getattr(self.ACTIONS, _) for _ in self.ACTIONS._fields}\n\n def get_current_state(self):\n return self._current_state\n\n def __init__(self, maze, finish_reward=100, wall_minus_reward=-5, action_minus_reward=-1, episode_max_length=100):\n number_of_actions = 4\n\n prob = {}\n\n state_id = 1\n maze_size_x = len(maze)\n maze_size_y = len(maze[0])\n state_id_table = np.zeros(shape=(maze_size_x, maze_size_y), dtype=np.int64)\n start_id = None\n\n for i in range(maze_size_x):\n for j in range(maze_size_y):\n if maze[i][j] == 1:\n continue\n\n state_id_table[i][j] = state_id\n if maze[i][j] == 2:\n start_id = state_id_table[i][j]\n state_id += 1\n max_state_id = state_id\n for i in range(maze_size_x):\n for j in range(maze_size_y):\n state_id = state_id_table[i][j]\n if maze[i][j] == 1:\n continue\n\n prob[state_id] = {a: [] for a in range(number_of_actions)}\n\n for move in MazeWorldEpisodeLength.MOVES:\n x, y = MazeWorldEpisodeLength.MOVES_X_Y[move]\n x += i\n y += j\n\n new_state = state_id_table[x][y]\n\n # if we are trying to go into the wall then ...\n if maze[x][y] == 1:\n reward = wall_minus_reward\n new_state = state_id_table[i][j]\n # if agents on the finish cell\n elif maze[x][y] == 3:\n reward = finish_reward\n new_state = state_id_table[i][j]\n # if agents on the start cell\n elif maze[x][y] == 0 or maze[x][y] == 2:\n reward = action_minus_reward\n else:\n raise ValueError\n # [probability, state, reward, done]\n prob[state_id][move] = [(1.0, new_state, reward, maze[x][y] == 3)]\n\n isd = np.zeros(max_state_id)\n isd[start_id] = 1.0\n\n # uncomment for on-model\n # self.P = P\n\n self._is_done = False\n\n # will only be used for our own render method\n self._episode_max_length = episode_max_length\n self._episode_length = 0\n self._state_id_table = state_id_table\n self._maze = maze\n\n n = 0\n k = 0\n self.state_to_state_no = {}\n self.state_to_pattern_no = {}\n for i in range(int(len(self._state_id_table) / 5)):\n for j in range(int(len(self._state_id_table[0]) / 5)):\n for q in range(5):\n for z in range(5):\n if self._state_id_table[5 * i + q, 5 * j + z] != 0:\n self.state_to_state_no[self._state_id_table[5 * i + q, 5 * j + z]] = n\n self.state_to_pattern_no[self._state_id_table[5 * i + q, 5 * j + z]] = k\n n += 1\n k += 1\n n = 0\n\n super(MazeWorldEpisodeLength, self).__init__(max_state_id, number_of_actions, prob, isd)\n\n def _step(self, a):\n assert (not self._is_done), \"Environment already finished\"\n\n\n transitions = self.P[self._current_state][a]\n i = self.categorical_sample([t[0] for t in transitions], self.np_random)\n p, s, r, d = transitions[i]\n self._current_state = s\n self._episode_length += 1\n if self._episode_length == self._episode_max_length or transitions[0][3]:\n self._is_done = True\n assert (self._episode_length <= self._episode_max_length)\n return s, r, self._is_done, {\"prob\": p}\n\n def _reset(self):\n self._current_state = self.categorical_sample(self.isd, self.np_random)\n self._episode_length = 0\n self._is_done = False\n return self._current_state\n\n def _render(self, mode='human', close=False):\n if close:\n return\n\n outfile = sys.stdout\n\n maze_size_x = len(self._maze)\n maze_size_y = len(self._maze[0])\n output = \"\\n\"\n for i in range(maze_size_x):\n for j in range(maze_size_y):\n if self._current_state == self._state_id_table[i][j]:\n output += \" x \"\n else:\n if self._maze[i][j] == 0:\n output += \" . \"\n if self._maze[i][j] == 1:\n output += \" O \"\n if self._maze[i][j] == 2:\n output += \" S \"\n if self._maze[i][j] == 3:\n output += \" F \"\n output += '\\n'\n outfile.write(output)\n\n def get_agent_x_y(self):\n maze_size_x = len(self._maze)\n maze_size_y = len(self._maze[0])\n\n for i in range(maze_size_x):\n for j in range(maze_size_y):\n if self._current_state == self._state_id_table[i][j]:\n return i, j\n\n\nclass MazeWorld(MazeWorldEpisodeLength):\n metadata = {'render.modes': ['human', 'ansi']}\n\n def __init__(self, maze, finish_reward=100, wall_minus_reward=-5, action_minus_reward=-1):\n super(MazeWorld, self).__init__(maze=maze, finish_reward=finish_reward, wall_minus_reward=wall_minus_reward, action_minus_reward=action_minus_reward,\n episode_max_length=2 ** 128)\n"
},
{
"alpha_fraction": 0.5505922436714172,
"alphanum_fraction": 0.5585842728614807,
"avg_line_length": 32.05188751220703,
"blob_id": "73faee49213fcbc8c3ab75b21f439a71edc796eb",
"content_id": "93bdd728ae24f2f7e036690243d3d5255843a452",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7007,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 212,
"path": "/MAX-Q/max-q.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import random\nimport gym\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom tqdm import tqdm\n\nimport pandas as pd\n\nprimitive_actions = LEFT, RIGHT, UP, DOWN, PICK_UP, DROP_OFF = list(range(6))\ncomposite_actions = GO_TO_SOURCE, PUT, GET, ROOT, GO_TO_DESTINATION = list(range(6, 11))\n\n\nclass Agent:\n\n def __init__(self, env, alpha, gamma, eps):\n \"\"\"\n Agent initialisation\n :param env: used environment (taxi domain)\n :param alpha: learning rate\n :param gamma: discount rate\n :param eps: rate for e-greedy policy\n \"\"\"\n self.env = env\n action_size = len(primitive_actions) + len(composite_actions)\n self.V = np.zeros((action_size, env.observation_space.n))\n self.C = np.zeros((action_size, env.observation_space.n, action_size))\n\n self.graph = [[] for _ in range(len(primitive_actions) + len(composite_actions))]\n\n self.graph[GO_TO_SOURCE] = self.graph[GO_TO_DESTINATION] = [LEFT, RIGHT, UP, DOWN]\n self.graph[PUT] = [DROP_OFF, GO_TO_DESTINATION]\n self.graph[GET] = [PICK_UP, GO_TO_SOURCE]\n self.graph[ROOT] = [PUT, GET]\n\n self.alpha = alpha\n self.gamma = gamma\n self.r_sum = 0\n self.new_s = self.env.s\n self.done = False\n self.eps = eps\n\n @staticmethod\n def in_car(pass_index):\n \"\"\"\n checking what the passenger in car\n :param pass_index: pass_index from env.decode(self.env.s)\n :return: boolean\n \"\"\"\n return pass_index == 4\n\n def is_terminal(self, node):\n \"\"\"\n checking current node of tree for termination\n :param node: current node\n :return:\n \"\"\"\n taxi_row, taxi_col, pass_idx, destination = list(self.env.decode(self.env.s))\n if node == ROOT:\n return self.done\n elif node == GET:\n return self.in_car(pass_idx)\n elif node == PUT:\n return not self.in_car(pass_idx) or self.done\n elif node == GO_TO_SOURCE:\n return self.env.locs[pass_idx] == (taxi_row, taxi_col)\n elif node == GO_TO_DESTINATION:\n return self.env.locs[destination] == (taxi_row, taxi_col)\n\n def evaluate(self, node, s):\n \"\"\"\n evaluating best node for transition, implementation of evaluate function from Dietrich's paper\n :param node: current node\n :param s: state of the environment\n :return: best value for step from current node and index of that edge\n \"\"\"\n if node in primitive_actions:\n return self.V[node, s], node\n elif node in composite_actions:\n # TODO to reassign variables with more clear names\n j_arg_max, cur_max = None, None\n for j, a in enumerate(self.graph[node]):\n v, _ = self.evaluate(a, s)\n if cur_max is None or v + self.C[node, s, a] > cur_max:\n j_arg_max = j\n cur_max = v + self.C[node, s, a]\n return cur_max, j_arg_max\n else:\n raise KeyError\n\n def greed_act(self, node, s):\n \"\"\"\n choosing greedy transition on tree\n :param node: current node\n :param s: current environment state\n :return: action index\n \"\"\"\n # TODO rewrite this code\n q = np.arange(0)\n for a2 in self.graph[node]:\n q = np.concatenate((q, [self.V[a2, s] + self.C[node, s, a2]]))\n max_arg = np.argmax(q)\n possible_a = np.array(list(self.graph[node]))\n if np.random.rand(1) < self.eps:\n return np.random.choice(possible_a)\n else:\n return possible_a[max_arg]\n\n def is_parent_terminates(self, node):\n \"\"\"\n checking for parents termination does not implemented for max_q_0 implementation for Taxi domain\n since its redundantly. So now it always returns False\n :param node: current node of tree\n :return: boolean value\n \"\"\"\n return False\n\n def max_q_0(self, node, s):\n \"\"\"\n max_q_0 algorithm\n :param node: current node of tree\n :param s: current state of environment\n :return: number of applied primitive actions\n \"\"\"\n self.done = False\n if node in primitive_actions:\n _, r, self.done, _ = self.env.step(node)\n self.r_sum += r\n self.V[node, s] += self.alpha * (r - self.V[node, s])\n self.eps *= 0.999\n return 1\n elif node in composite_actions:\n\n count = 0\n while not self.is_terminal(node) and not self.is_parent_terminates(node):\n a = self.greed_act(node, s)\n # TODO move logic to greed act and choose only across possible transitions\n # TODO if there is no such ones raise an error\n for _ in range(100):\n if not self.is_terminal(a):\n break\n a = random.choice(list(self.graph[node]))\n else:\n raise ValueError(\"can't choose next vertex which doesn't terminates on current state\")\n self.alpha *= 0.99999\n n = self.max_q_0(a, s)\n obs = self.env.s\n v, _ = self.evaluate(node, obs)\n self.C[node, s, a] += self.alpha * (self.gamma ** n * v - self.C[node, s, a])\n\n count += n\n s = obs\n return count\n else:\n raise KeyError\n\n def reset(self):\n \"\"\"\n resetting current environment and special variables\n :return: None\n \"\"\"\n self.env.reset()\n self.r_sum = 0\n self.done = False\n\n\ndef run_max_q(episodes):\n \"\"\"\n launches max_q\n :param episodes: number of episodes\n :return: list of rewards for episodes\n \"\"\"\n env = gym.make('Taxi-v2').env\n taxi = Agent(env=env, alpha=0.8, gamma=0.99, eps=0.2)\n rewards_for_episodes = []\n for _ in tqdm(range(episodes), postfix=\"MAX_Q_0\"):\n taxi.reset()\n taxi.max_q_0(ROOT, env.s)\n rewards_for_episodes.append(taxi.r_sum)\n return rewards_for_episodes\n\n\ndef run_q_learning(episodes):\n \"\"\"\n launches q-learning algorithm\n :param episodes: number of episodes\n :return: list of rewards for episodes\n \"\"\"\n from environments.weak_methods import q_learning\n env = gym.make('Taxi-v2').env\n to_plot, _ = q_learning(env=env, num_episodes=episodes, eps=0.1, alpha=0.1, gamma=0.9)\n return to_plot\n\n\ndef main():\n \"\"\"\n :return: None\n \"\"\"\n sns.set(palette=\"Set2\")\n episodes = 100\n tests = 5\n stack = np.dstack([[p(episodes) for _ in range(tests)] for p in [run_max_q, run_q_learning]])\n name = pd.Series([\"MAX-Q\", \"Q-learning\"], name=\"\")\n sns.tsplot(stack, condition=name, value=\"position\")\n\n # If you want to save:\n # plt.savefig(\"MAXQ_vs_Q.png\", dpi=300)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.8048780560493469,
"alphanum_fraction": 0.8048780560493469,
"avg_line_length": 34.14285659790039,
"blob_id": "042756e64d0156eb9fb064f42fb44712764dc2e8",
"content_id": "bad974572b31d1a7c1e559be3e50269fe4fe1aee",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 246,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 7,
"path": "/DQN/README.md",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "# Pain DQN\n\nImplementation of the plain DQN algorithm\n\nTo make an experiment run the file run_dqn.py , where the environment, network and learning parameters can be modified;\n\nThe file dqn.py contains the implementation of the learning algorithm\n"
},
{
"alpha_fraction": 0.48547837138175964,
"alphanum_fraction": 0.5270501375198364,
"avg_line_length": 34.836734771728516,
"blob_id": "d317d718c248881143df6a2673d4800b5de8ba77",
"content_id": "0c700e1a9398401e97684cda2bee34d426e3f1b3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3512,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 98,
"path": "/HAM/HAM_experiments/experiment_00_slam_input/experiment_slam_input.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from HAM_old.machines_on_grid_maze import *\nfrom HAM_old.utils import ham_learning\nfrom environments.grid_maze_env.grid_maze_generator import *\nfrom environments.grid_maze_env.maze_world_env import MazeWorldEpisodeLength\nfrom utils import plotting\nimport imageio\n\n\ndef pil_to_list(img):\n return np.array(img.getdata(), np.uint8).reshape(img.size[1], img.size[0], 3)\n\n\ndef experiment_slam_input():\n from PIL import Image, ImageDraw\n im = Image.open('robots_map.jpg')\n img_drawer = ImageDraw.Draw(im)\n block_sizes = [1, 2, 3, 4, 5, 6, 8, 10, 12, 15, 16, 20, 24, 30, 32, 40, 48, 60, 64, 80, 96, 120, 160, 192, 240, 320,\n 480, 960]\n block_size = block_sizes[3]\n n, m = im.height, im.width\n ss = set()\n for i in range(n):\n for j in range(m):\n q = sum(im.getpixel((i, j))) // 3\n offset = 253\n if q > offset:\n img_drawer.point((i, j), fill=(0, 0, 0))\n elif q > 50:\n img_drawer.point((i, j), fill=(255, 255, 255))\n else:\n img_drawer.point((i, j), fill=(0, 0, 0))\n\n N, M = n // block_size, m // block_size\n maze = np.zeros(shape=(N, M)).astype(int)\n\n for i in range(n // block_size):\n for j in range(m // block_size):\n colors_sum = 0\n x, y = i, j\n for ii in range(x * block_size, x * block_size + block_size):\n for jj in range(y * block_size, y * block_size + block_size):\n colors_sum += sum(im.getpixel((ii, jj))) // 3\n\n colors_sum /= block_size * block_size\n ss.add(colors_sum)\n for ii in range(x * block_size, x * block_size + block_size):\n for jj in range(y * block_size, y * block_size + block_size):\n if colors_sum > 240:\n maze[j][i] = 0\n else:\n maze[j][i] = 1\n if colors_sum > 240:\n img_drawer.point((ii, jj), fill=(255, 255, 255))\n else:\n img_drawer.point((ii, jj), fill=(0, 0, 0))\n\n # TODO rewrite with new HAM\n\n maze = place_start_finish(prepare_maze(maze))\n\n episode_max_length = 1000\n env = MazeWorldEpisodeLength(maze=maze, finish_reward=1000000, episode_max_length=episode_max_length)\n env.render()\n params = {\n \"env\": env,\n \"num_episodes\": 800,\n \"machine\": L2Interesting,\n \"alpha\": 0.1,\n \"epsilon\": 0.1,\n \"discount_factor\": 1,\n \"path\": []\n }\n Q1, stats1 = ham_learning(**params)\n plotting.plot_multi_test(curve_to_draw=[stats1.episode_rewards], smoothing_window=10)\n\n im = Image.open('robots_map.jpg')\n\n d = params[\"path\"][-episode_max_length:]\n images = []\n for index, item in enumerate(d):\n img_drawer = ImageDraw.Draw(im)\n y, x = item\n for ii in range(x * block_size, x * block_size + block_size):\n for jj in range(y * block_size, y * block_size + block_size):\n img_drawer.point((ii, jj), fill=(240, 13, 13))\n\n images.append(pil_to_list(im))\n\n for ii in range(x * block_size, x * block_size + block_size):\n for jj in range(y * block_size, y * block_size + block_size):\n img_drawer.point((ii, jj), fill=(255, 255, 0))\n # if index > 100:\n # break\n imageio.mimsave('movie.gif', images)\n\n\nif __name__ == \"__main__\":\n experiment_slam_input()\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6793708205223083,
"avg_line_length": 32.060001373291016,
"blob_id": "d4933640da9559d5d8ca14b00840871dbce02baa",
"content_id": "5c8d1f7d53b2a12af084c608c190f6140206e5ad",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1653,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 50,
"path": "/utils/plotting.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nfrom collections import namedtuple\n# from matplotlib import pyplot as plt\nimport matplotlib\n\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n\nEpisodeStats = namedtuple(\"Stats\", [\"episode_lengths\", \"episode_rewards\"])\n\n\ndef plot_episode_stats(stats, smoothing_window=10, no_show=False):\n # Plot the episode length over time\n fig1 = plt.figure(figsize=(10, 5))\n plt.plot(stats.episode_lengths)\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Episode Length\")\n plt.title(\"Episode Length over Time\")\n if no_show:\n plt.close(fig1)\n else:\n plt.show(fig1)\n\n # Plot the episode reward over time\n fig2 = plt.figure(figsize=(10, 5))\n rewards_smoothed = pd.Series(stats.episode_rewards).rolling(smoothing_window, min_periods=smoothing_window).mean()\n plt.plot(rewards_smoothed)\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Episode Reward (Smoothed)\")\n plt.title(\"Episode Reward over Time (Smoothed over window size {})\".format(smoothing_window))\n if no_show:\n plt.close(fig2)\n else:\n plt.show(fig2)\n\n\ndef plot_multi_test(curve_to_draw=None, smoothing_window=10, x_label=\"X\", y_label=\"Y\", labels=None, filename=None):\n fig2 = plt.figure(figsize=(10, 5))\n\n t = []\n for index, elem in enumerate(curve_to_draw):\n rewards_smoothed = pd.Series(elem).rolling(smoothing_window, min_periods=smoothing_window).mean()\n p, = plt.plot(rewards_smoothed)\n t.append(p)\n plt.legend(t, labels) if labels else plt.legend(t)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n\n plt.savefig(\"diagram.png\" if filename is None else filename + \".png\")\n plt.show(fig2)\n"
},
{
"alpha_fraction": 0.6385836601257324,
"alphanum_fraction": 0.6410256624221802,
"avg_line_length": 27.241378784179688,
"blob_id": "5b9f05acf2188e11ffd9a73472b3357aab40ebe5",
"content_id": "0e1017325f8ba55b0f8ed2e73a57e9c51af7d2f7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 819,
"license_type": "permissive",
"max_line_length": 141,
"num_lines": 29,
"path": "/article_experiments/01_q-learning/q_learning.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from article_experiments.global_envs import MazeEnvArticle, MazeEnvArticleSpecial, ArmEnvArticle, EnvironmentsArticle, get_cumulative_rewards\nfrom environments.weak_methods import q_learning\n\nname = \"01_table_q-learning\"\n\n\n\n\n\ndef run(global_env):\n full_name = name + \"_\" + global_env.__class__.__name__\n rewards, _ = q_learning(env=global_env.env, num_episodes=global_env.episodes_count)\n\n # with open(full_name + \" cumulative_reward.txt\", \"w\") as w:\n # for out in get_cumulative_rewards(rewards=rewards):\n # w.write(str(out) + '\\n', )\n\n with open(full_name + \" reward.txt\", \"w\") as w:\n for out in rewards:\n w.write(str(out) + '\\n', )\n\n\ndef main():\n for global_env in EnvironmentsArticle().environments:\n run(global_env)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6270548701286316,
"alphanum_fraction": 0.6322044134140015,
"avg_line_length": 44.486488342285156,
"blob_id": "6ecf3f60932af3dac38aecb35ce3720ca02805aa",
"content_id": "bd49e7a304e0b23a4078e327b306936fb1a79537",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5049,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 111,
"path": "/HAM/HAM_experiments/experiment_04_auto_random_HAM_on_maze_env/experiment_04.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "import random\n\nfrom HAM.HAM_core import Action, RootMachine, \\\n LoopInvokerMachine, RandomMachine, MachineGraph\nfrom HAM.HAM_experiments.HAM_utils import HAMParamsCommon, plot_multi, ham_runner, PlotParams, super_runner\nfrom article_experiments.global_envs import MazeEnvArticleSpecial, MazeEnvArticle, ArmEnvArticle\nfrom environments.grid_maze_env.grid_maze_generator import generate_pattern, generate_maze, place_start_finish, \\\n prepare_maze, generate_maze_please\nfrom environments.grid_maze_env.maze_world_env import MazeWorldEpisodeLength\nfrom utils.graph_drawer import draw_graph\n\nto_plot = []\n\n\ndef dfs_check_graph_for_no_action_loops(graph: MachineGraph, current_vertex, visited, ok, action_vertex_was_visited):\n if current_vertex not in visited or (action_vertex_was_visited and current_vertex not in ok):\n visited.append(current_vertex)\n if isinstance(current_vertex, Action):\n action_vertex_was_visited = True\n if action_vertex_was_visited:\n ok.append(current_vertex)\n for relation in graph.vertex_mapping[current_vertex]:\n to = relation.right\n dfs_check_graph_for_no_action_loops(graph, to, visited, ok, action_vertex_was_visited)\n\n\ndef dfs_distinct_from_start(graph: MachineGraph, vertex, visited, reversed_order=None):\n if vertex in visited:\n return visited\n visited.append(vertex)\n mapping = graph.vertex_mapping if reversed_order is None else graph.vertex_reverse_mapping\n for relation in mapping[vertex]:\n to = relation.right if reversed_order is None else relation.left\n dfs_distinct_from_start(graph=graph, vertex=to, visited=visited, reversed_order=reversed_order)\n return visited\n\n\ndef is_it_machine_runnable(machine):\n ok = []\n dfs_check_graph_for_no_action_loops(graph=machine.graph, current_vertex=machine.graph.get_start(),\n visited=[], ok=ok,\n action_vertex_was_visited=False)\n if machine.graph.get_stop() not in ok:\n return False\n\n x = dfs_distinct_from_start(graph=machine.graph, vertex=machine.graph.get_start(), visited=[])\n y = dfs_distinct_from_start(graph=machine.graph, vertex=machine.graph.get_stop(), visited=[],\n reversed_order=True)\n\n if set(x) != set(y):\n return False\n return True\n\n\ndef create_random_machine(maximal_number_of_vertex, maximal_number_of_edges, random_seed, env):\n random.seed(random_seed)\n number_of_vertex = random.randrange(1, maximal_number_of_vertex)\n number_of_edges = random.randrange(1, maximal_number_of_edges)\n new_machine = RandomMachine().with_new_vertex(env=env)\n for _ in range(number_of_vertex):\n new_machine = new_machine.with_new_vertex(env=env)\n for __ in range(number_of_edges):\n try:\n new_machine = new_machine.with_new_relation()\n except AssertionError:\n break\n return new_machine\n\n\ndef main(begin_seed=0):\n for seed in range(begin_seed, begin_seed + 5000):\n # maze = maze_world_input_special()\n # maze = generate_maze_please(size_x=2, size_y=2)\n # env = MazeWorldEpisodeLength(maze=maze)\n # global_env, save_folder = MazeEnvArticleSpecial(), \"laby_spec/\"\n global_env, save_folder = MazeEnvArticle(), \"laby/\"\n # global_env, save_folder = ArmEnvArticle(), \"arm/\"\n\n env, num_episodes = global_env.env, global_env.episodes_count\n\n new_machine = create_random_machine(maximal_number_of_vertex=6, maximal_number_of_edges=6, random_seed=seed,\n env=env)\n\n if is_it_machine_runnable(new_machine):\n params = HAMParamsCommon(env)\n try:\n ham_runner(\n ham=RootMachine(LoopInvokerMachine(machine_to_invoke=super_runner(new_machine, env))),\n num_episodes=num_episodes,\n env=env, params=params,\n no_output=True\n )\n ham_runner(ham=RootMachine(machine_to_invoke=LoopInvokerMachine(new_machine)),\n num_episodes=num_episodes,\n env=env, params=params, no_output=True)\n\n # to_plot.append(PlotParams(curve_to_draw=params.logs[\"ep_rewards\"], label=\"Random\" + str(seed + 1)))\n reward = sum(params.logs[\"ep_rewards\"])\n draw_graph(save_folder + str(reward) + \":::\" + str(seed),\n new_machine.get_graph_to_draw(action_to_name_mapping=env.get_actions_as_dict()))\n # draw_graph(\"pics/\" + str(reward).rjust(10, \"0\"),\n # new_machine.get_graph_to_draw(action_to_name_mapping=env.get_actions_as_dict()))\n except KeyError:\n print(\"keyError\", end=\"\")\n except AssertionError:\n print(\"assertion\", end=\"\")\n plot_multi(to_plot)\n\n\nif __name__ == '__main__':\n main(begin_seed=random.randrange(1, 2000000000))\n"
},
{
"alpha_fraction": 0.6200084090232849,
"alphanum_fraction": 0.6300966739654541,
"avg_line_length": 33.985294342041016,
"blob_id": "bb8b5ec7c9dd670b38e9c6b29ecac224f7bb9feb",
"content_id": "e281568dfdeba013ea0210250ebbe34059f15924",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2379,
"license_type": "permissive",
"max_line_length": 159,
"num_lines": 68,
"path": "/workshop/generate_combination.py",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "from copy import deepcopy\n\nfrom HAM.HAM_core import Start, Stop, Action, Call, Choice\nfrom environments.arm_env.arm_env import ArmEnvToggleTopOnly\n\n\ndef vertex_list_to_str(vertex_list):\n ans = \"\"\n for i in vertex_list:\n ans = ans + \" \" + str(i)\n return ans\n\n\ndef generate_list_of_vertexes(vertex_types, vertex_of_each_type_max_count, max_vertex_count, current_list=None, deep=0, ans=None):\n if current_list is None:\n current_list = []\n if ans is None:\n ans = []\n if max_vertex_count == len(current_list):\n ans.append(deepcopy(current_list))\n return ans\n\n if deep < len(vertex_types):\n for add_times in range(0, vertex_of_each_type_max_count + 1):\n for _ in range(add_times):\n current_list.append(vertex_types[deep])\n generate_list_of_vertexes(vertex_types, vertex_of_each_type_max_count, max_vertex_count, current_list, deep=deep + 1, ans=ans)\n for _ in range(add_times):\n current_list.pop()\n return ans\n\n\ndef vertex_combination(vertex_types, max_vertex_count):\n start, stop = None, None\n for vertex in vertex_types:\n if isinstance(vertex, Start):\n start = vertex\n if isinstance(vertex, Stop):\n stop = vertex\n\n assert start is not None and stop is not None, \"Start and Stop vertex should be presented\"\n assert isinstance(vertex_types[0], Start), \"Start vertex should be sorted as first\"\n assert isinstance(vertex_types[1], Stop), \"Stop vertex should be sorted as second\"\n\n return generate_list_of_vertexes(vertex_types=vertex_types, vertex_of_each_type_max_count=1, max_vertex_count=max_vertex_count, current_list=[start, stop],\n deep=2)\n\n\ndef main():\n env = ArmEnvToggleTopOnly(size_x=5, size_y=5, cubes_cnt=4, episode_max_length=600, finish_reward=100, action_minus_reward=-0.001, tower_target_size=4)\n vertex_types = sorted([\n Stop(),\n Start(),\n Action(env.ACTIONS.LEFT),\n Action(env.ACTIONS.RIGHT),\n Action(env.ACTIONS.UP),\n Action(env.ACTIONS.DOWN),\n\n Call(None),\n Choice(),\n ])\n res = generate_list_of_vertexes(vertex_types=vertex_types, vertex_of_each_type_max_count=3, max_vertex_count=5)\n for i in res:\n print(vertex_list_to_str(i))\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.7854251265525818,
"alphanum_fraction": 0.7935222387313843,
"avg_line_length": 48.29999923706055,
"blob_id": "4938020ea033da8cdad04c7908ce72b0b29de6ff",
"content_id": "56fdb8c65ee20d67392c06e741a3685e648f9a2e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 494,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 10,
"path": "/DQN with Options/README.md",
"repo_name": "cog-isa/HRL-grid",
"src_encoding": "UTF-8",
"text": "# DQN with Options\n\nUsing DQN to represent every option and a separate DQN over options\n\nTo run the DQN algorithms with options:\n 1) Firstly the options must be trained running the files train_option1(go down).py and train_option2(lift cube).py\n 2) After that to train the DQN over options run the file train_over_options.py\n\nThe learning process for the DQN over options is implemented in the file dqn_with_options.py\nThe file option_class.py contains the implementation of the option's work\n\n"
}
] | 53 |
Spandan-Madan/sample_images | https://github.com/Spandan-Madan/sample_images | 57811ca2583819339dcbbc33da6ec41d1f298626 | 00ffad23f715149a4d8fd8a87f725e6baee97f60 | 45b7e7f809813be49d5f6a483e5732d053df4c0f | refs/heads/master | 2023-01-06T17:26:37.709509 | 2020-11-11T00:15:12 | 2020-11-11T00:15:12 | 311,755,306 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5700934529304504,
"alphanum_fraction": 0.6028037667274475,
"avg_line_length": 36.7529411315918,
"blob_id": "20cbc276a98a6205daae03379bf104b950470a79",
"content_id": "5247b5a98ca36f4c825e7645df79e3ff2f1bfaed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3210,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 85,
"path": "/generate_static.py",
"repo_name": "Spandan-Madan/sample_images",
"src_encoding": "UTF-8",
"text": "import os \nimport shutil\n\n\ndata_folder = \"/home/smadan/data_differentiable_interpretability/rendered_train_data/jpgs\"\nserver_folder = \"/home/smadan/sample_images/images/\"\n\n\nprint(\"Removing existing files....\")\nfor im in os.listdir(server_folder):\n os.remove(\"%s/%s\"%(server_folder, im))\n\nprint(\"Copying new files....\")\nfor im in os.listdir(data_folder):\n shutil.copyfile(\"%s/%s\"%(data_folder, im), \"%s/%s\"%(server_folder, im))\n\nprint(\"building template....\")\nwith open('index_template.html','r') as F:\n content = F.readlines()\n \nlines = []\n\n\nimage_paths = [\"images/%s\"%i for i in os.listdir('./images') if '.jpg' in i]\nnum_columns = 4\nnum_rows = int(len(image_paths)/num_columns)\n\nfor c in content:\n if \"Photo Grid\" in c:\n lines.append('<div>\\n')\n lines.append('<div class=\"row\">\\n')\n for col_num in range(num_columns):\n lines.append('<div class=\"column\">\\n')\n for row_num in range(num_rows):\n im_path = image_paths[row_num*num_columns + col_num]\n im_string = '<img src=\"%s\" '%im_path + 'style=\"width:100%\">\\n'\n lines.append(im_string)\n lines.append('</div>\\n')\n lines.append('</div>\\n')\n else:\n lines.append(c)\n\nwith open('index.html','w') as F:\n F.writelines(lines)\n\nprint('Updating on git')\nos.system('git add -A')\nos.system('git commit -m \"adding new images\"')\nos.system('git push')\nprint(\"Completed, please visit - http://spandan-madan.github.io/sample_images/\")\n# <div> \n# <div class=\"column\">\n# <img src=\"../w3images/underwater.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/ocean.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/wedding.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/mountainskies.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/rocks.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/underwater.jpg\" style=\"width:100%\">\n# </div>\n# <div class=\"column\">\n# <img src=\"../w3images/underwater.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/ocean.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/wedding.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/mountainskies.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/rocks.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/underwater.jpg\" style=\"width:100%\">\n# </div>\n# <div class=\"column\">\n# <img src=\"../w3images/wedding.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/rocks.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/falls2.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/paris.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/nature.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/mist.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/paris.jpg\" style=\"width:100%\">\n# </div>\n# <div class=\"column\">\n# <img src=\"../w3images/underwater.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/ocean.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/wedding.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/mountainskies.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/rocks.jpg\" style=\"width:100%\">\n# <img src=\"../w3images/underwater.jpg\" style=\"width:100%\">\n# </div>\n# </div>\n\n"
}
] | 1 |
linkel/nand2tetris | https://github.com/linkel/nand2tetris | 4f9a3c7ed1b633ef09abfe1494ceb8957ad5c9dd | 1864e1dd48c5e64b3ddc6c5a20f3e910343902b0 | aba51c1850329852332f3e485686809a28126080 | refs/heads/master | 2023-05-02T22:07:03.266058 | 2023-04-26T03:40:18 | 2023-04-26T03:40:18 | 157,652,492 | 0 | 0 | null | 2018-11-15T04:33:29 | 2020-09-27T22:46:00 | 2020-10-01T17:27:21 | Hack | [
{
"alpha_fraction": 0.7881041169166565,
"alphanum_fraction": 0.7908921837806702,
"avg_line_length": 96.7272720336914,
"blob_id": "ce8cd3f2472a0bee8e45d8d6727e0085605330d6",
"content_id": "6f5da058e6d2349dcdfb871b653e6c1804321199",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1076,
"license_type": "no_license",
"max_line_length": 326,
"num_lines": 11,
"path": "/09/README.md",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "# Project for Chapter 09\n\nTo familiarize myself with the Jack language in prepartaion for writing the compiler and the OS, I'm to write a program in Jack. \n\nEnded up going with a modification of the existing square game. Now there are other squares on the screen and going over them destroys them, since they don't redraw onto the screen if they don't move. Made squares smaller and the movement 1 pixel instead so it is smoother. \n\nIf I were wanting to spend more time on this, the next step would be to make some collision detection. Could check to see if your square has overlapped with another square, and if it does, then delete the whole overlapped square and increment your square's size. This would have to be constantly checked in the main game loop.\n\nThough if I had a lot of consumables, it does feel like I'd have to loop through all of them checking against the player square? How do real videogames do this? \n\nCould go simpler and have the squares you can eat be in fixed positions so I would just check for collision between player square and fixed locations. \n"
},
{
"alpha_fraction": 0.7396940588951111,
"alphanum_fraction": 0.7744361162185669,
"avg_line_length": 127.56666564941406,
"blob_id": "35dabd2e12b23eab8bb340ba34fb3295b9b6f947",
"content_id": "6424eb1500d169d04a9e5125987e06dddc2b67af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3857,
"license_type": "no_license",
"max_line_length": 340,
"num_lines": 30,
"path": "/README.md",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "# The Elements of Computing Systems (nand2tetris)\nNand2tetris is a really cool course available completely free on the internet. The first half of the textbook is freely available. It's nuts! I am currently working on project 5 and stunned by the quality of the course materials and how fun it is to work through the assignments. \n\nIt's about starting with a NAND-gate and then building the rest of the logical chips, building an ALU, sequential chips, learning the Hack computer's assembly language, and exploring computer architecture until you've built enough parts to write Tetris and run it on the computer you wrote. \n\n## Progress\n\n- Nov 2018: Started course end of Nov 2018.\n- Dec 2018: Became super stumped by sequential chips in Project 3 and also took a break to focus on career change. \n- Aug 2019: Resumed the course and understood sequential chips much better (data flip flop, register/bits, tick tock)\n- Aug 24, 2019: Finished project 4, the intro to Hack assembly language.\n- Mar 2020: After a long break due to switching gears to Skiena, I have returned! Got the Memory chip working.\n- Mar 19, 2020: The CPU chip is very buggy right now.\n - Note: I can specify bus pins on the out by writing `out[0..14]=thingthatis15wide`. I didn't know (or forgot) I could do it on the left side too.\n- Mar 20, 2020: The CPU chip passes the test script! Also, have completed joining the three parts into the Computer chip.\n- Mar 23, 2020: I have an assembler program that works for label-free code (tested with the three provided programs).\n- Mar 24, 2020: I have completed the assembler. It works for code with labels. Successfully played Pong translated from assembly to binary via the assembler I wrote.\n- Mar 31, 2020: Working slowly on chapter 7, the VM. Writing something to convert from an intermediate language into assembly.\n - Qualms: Is it really okay for me to have 6 - 8 assembly instructions per VM command? Feels like unknown territory.\n- Apr 3, 2020: Done with Stage I of Chapter 7 project, which is to implement the VM arithmetic and logical commands with the constant push.\n- Apr 19, 2020: Took a break since I started my new job. Now resuming work on the memory access, Stage II of the Chapter 7 project.\n - I got the Basic Test to work, but still need to implement popping of the pointer segment as well as the static segment's push and pop. \n- Apr 24, 2020: Completed all the segments for Stage II. Nice! Think that means I can go on to Chapter 8!\n- Aug 29, 2020: Refreshing myself on what I did previously, trying to write commands for the function call. \n- Sep 6, 2020: Got the VM Translator working with SimpleFunction. Still need to handle the init stuff, and correct how I am setting up the function label parts. I think I need to store state on what function I'm in. \n- Sep 19, 2020: I think I have the NestedCall working, without the bootstrap code. I have to think about how the bootstrap code works. \n- Sep 27, 2020: Got the FibonacciElement test working after implementing bootstrap code and compiling multiple vm files into asm file. Verified that NestedCall also works with bootstrap code (last time tested without bootstrap) and just fixed up the code around push and pop of static variables to make StaticsTest work! Hurray! Feels good.\n- Apr 15, 2023: Whoa! Been a long time since I updated this readme. Restarted some effort on the syntax analyzer. I know I touched this a little in 2021 as well, even if I didn't update this timeline.\n- Apr 23, 2023: Making progress on the compilation engine. Thinking I need to get all the XML creation process going which will also assist with debugging. And it would be nice to get the indentation on the output pretty. \n- Apr 25, 2023: It completes the parse of SquareGame.jack! I will test it on a few other files and consider improving the indentation of the xml file. "
},
{
"alpha_fraction": 0.781731903553009,
"alphanum_fraction": 0.7971529960632324,
"avg_line_length": 63.82692337036133,
"blob_id": "c72e6bc51e1790dc66bac1be847519bea745dea1",
"content_id": "537d4ad9211693b89e8b8f2b2397b221cfdc97a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3372,
"license_type": "no_license",
"max_line_length": 426,
"num_lines": 52,
"path": "/06/README.md",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "# Background\n\nSymbolic vs binary machine language. \n\nBinary codes represent actual machine instructions.\n`110111010010` etc. Depending on the logic design and the machine language, the pattern can cause the hardware to do something like \"load contents of Memory[7] into register R3\". Lots of opcodes, lots of memory addressing modes, different instruction formats exist for modern machine languages. \n\nWe could document these instructions using a syntax like `LOAD R3, 7` instead. This would then require translation from this symbolic notation into the binary code. Symbolic language is called `assembly` and the translator program is an `assembler`. \n\nThe assemblers parse each command into what it is in binary. \n\nOn the assembly level, writing something like LOAD R3, weight could be loading the contents of the weight variable to r3. Here we have used a variable to represent that memory location. \n\nThere can also be special labels for a location in the program and we can then do a `goto` to go to it. So Variables and Labels introduce symbols into assembly. \n\nAssemblers then are more complicated than just text processing. Translating the symbols into the binary codes is not complicated. But mapping user-defined variable names and symbolic labels is not trivial! \n\n## Symbol Resolution\n\nExample rules:\nTranslated code will be stored in the computer's memory starting at address 0. Variables will be allocated to memory locations starting at address 1024. \n\nNow we build a symbol table. For each new symbol xxx in the source code, we add a line (xxx,n) to the symbol table where n is the memory address associated with the symbol. Now we translate the program into the symbol-less version. \n\nThings to consider:\n\n1. This variable allocation assumption limits us to 1024 instructions-long programs. \n2. Each source command is not always mappable on one word. Some assembly commands may be multiple machine instructions (if i=101 goto end) and will need several memory locations. You'd then track how many words each command generates and update the instruction memory counter accordingly. \n3. Each variable is represented by a single memory location, which is naive. There are variables of different types which will occupy different memory spaces. short for 16-bit, double for 64-bits, for example. So then the 16-bit will occupy one memory address on a 16-bit machine and a block of 4 addresses for the 64-bit number. Translator's got to take into account the data types and the word width of the target hardware. \n\n## Assembler Requirements\n\nFollowing the machine language specification, this assembler must:\n\n1. Parse the symbolic command into its underlying fields.\n2. For each field, generate the bits in machine language.\n3. Replace symbolic references with numeric addresses for memory locations.\n4. Assemble the binary codes into a machine instruction.\n\n# Specification for Hack .asm to .hack (assembly to binary)\n\nInput: text file program.asm. \nOutput: text file program.hack. \nYou'd run it by typing in the cmdline, `Assembler prog.asm`\n\nFour modules proposal:\n1. Parser module to parse input\n2. Code module to provide the binary codes of all the mnemonics\n3. SymbolTable module that handles symbols\n4. Main program.\n\nThis is the first in a series of ive projects that build out the hierarchy of translators--assembler, virtual machine, and compiler.\n\n"
},
{
"alpha_fraction": 0.5120718479156494,
"alphanum_fraction": 0.5524985790252686,
"avg_line_length": 36.89361572265625,
"blob_id": "d85271d30f37cc394494a6c2e46eb388790486bb",
"content_id": "31e6ad8cc9cd31dd2d24eb9463362c03fe50a167",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1781,
"license_type": "no_license",
"max_line_length": 466,
"num_lines": 47,
"path": "/03/a/README.md",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "# Notes on Project 3a\n\n## Sequential Chips\n\nI had to revisit this to understand what was happening. Through plain imitation of what was in the textbook, I was able to get the Bit chip going, but I didn't really get what was happening. The register and the RAM in part a were fine since the Bit chip was already doing the confusing DFF sequential work, but I neglected implementing the PC chip and took a long break from this course to work on some other topics (and complete my job searching back in January). \n\nLooking at it again, I understand it a little better. \n\nFor the Bit, you can draw out a truth table of what happens at each combination of values in a and b and having the Mux's sel on or off (loaded or not for the chip), and then draw out what happens at t=t+1 if nothing changes, or if you modify what's coming in or modify the load.\n\nFor example,\n\n```\nAt t=0\n\nback (a), in (b), load (sel), result (which will be the back for t=1)\n0 0 0 0\n0 0 1 0\n0 1 0 0\n0 1 1 1\n\nAt t=1\n\nIf we don't change anything:\n\nback (a), in (b), load (sel), result (which will be the back for t=2)\n0 0 0 0\n0 0 1 0\n0 1 0 0\n1 1 1 1\n\nIf we set all our loads to false:\n\nback (a), in (b), load (sel), result (which will be the back for t=2)\n0 0 0 0\n0 0 0 0\n0 1 0 0\n1 1 0 1\n\nIf we set all the loads to true:\n\nback (a), in (b), load (sel), result (which will be the back for t=2)\n0 0 1 0\n0 0 1 0\n0 1 1 1\n1 1 1 1\n```\n"
},
{
"alpha_fraction": 0.6755037307739258,
"alphanum_fraction": 0.6839872598648071,
"avg_line_length": 41.8636360168457,
"blob_id": "17db531b24c480d23bde41c98a448a8aec584fd2",
"content_id": "bd85a1e02e5ebd361fca834a74fea7fd2ea2531a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 943,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 22,
"path": "/10/JackAnalyzer.py",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\nfrom JackTokenizer import JackTokenizer\nfrom CompilationEngine import CompilationEngine\n\n# JackAnalyzer, the top level driver\n# 1. creates JackTokenizer from the input.jack file\n# 2. create an output file that will contain the xml (technically being done by CompilationEngine here, TODO?)\n# 3. use CompilationEngine to compile input into the xml\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"usage: JackAnalyzer.py source\")\n path = sys.argv[1]\n with open(path) as f:\n data = f.read()\n tokenizer = JackTokenizer(data)\n # tests correctness of the token generation\n # tree = CompilationEngine(tokenizer, f'{os.path.splitext(path)[0]).read_file_and_build_xml_tokens_only(tokenizer)\n tree = CompilationEngine(\n tokenizer, f\"{os.path.splitext(path)[0]}_output.xml\"\n ).write()\n # tree.write(f'{os.path.splitext(path)[0]}_token_output.xml')\n"
},
{
"alpha_fraction": 0.7751980423927307,
"alphanum_fraction": 0.7847743630409241,
"avg_line_length": 65.87096405029297,
"blob_id": "ca5e91704c8a0de6ef0b21073e62479f98dfaf55",
"content_id": "d469c4877c9744733a4a1914fb7dcd675e3e4f58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 14515,
"license_type": "no_license",
"max_line_length": 782,
"num_lines": 217,
"path": "/05/README.md",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "# Notes\n\n## August 25, 2019\n\nStarted reading the course material for the intro to the Hack computer architecture. At this point I've written the processing devices (arithmetic logic unit) and the storage devices (RAM from the data flip flop sequential chip stuff). Now I'm going to get to put it together to make a CPU! \n\n## Stored Programs, Von Neumann Architecture\n\nSo there's a really neat concept called \"stored program\".\n\nPrior to 1930 mechanical computers had their program code embedded in the hardware. But instead of doing that, what if you could load in the program code and store it temporarily in the computer memory? What if you made it...SOFTWARE? \n\nNow you can use the same fixed hardware platform but load in a program to be temporarily stored and run, allowing your hardware platform to do totally different things depending on what it's loaded with. \n\nSo if we have a CPU that has a memory device that can get data from an input device and send data to an output device, that's actually a Von Neumann machine. It's the conceptual blueprint of all computers today. \n\n### Memory\n\nMemory is a sequence of addressable registers. Each one has a unique address and it stores a value. The value is some kind of fixed-size word of information. \n\nThere's one area dedicated to storing data (the arrays, variables, objects to do stuff with), and one area dedicated to storing the program instructions. If they're not in the same physical memory unit this is known as Harvard architecture, which is what the Hack computer is set up with. \n\nTo do stuff with a register, you've got to select the register by supplying an address. This lets you access it immediately. The term Random Access Memory is used to note that each randomly selected register can be reached in the same access time, regardless of the memory size and the register's location in it. \n\nData Memory, like I said earlier, stores variables, arrays, and objects. When you select a register, the contents of it can be either read or written to. \n\nWriting to a register will overwrite the previous value. \n\nInstruction Memory holds the programs that the computer will execute step by step. \n\n1. CPU fetches instruction from register\n2. CPU decodes it\n3. CPU executes the instruction\n4. CPU figures out which instruction to fetch next. \n\n### Central Processing Unit\n\nThe CPU, in order to execute programs, makes use of three parts:\n\n1. The arithmetic logic unit\n2. Registers,\n3. Control unit\n\nThe ALU can perform actions like adding numbers, computing bitwise ANDS, comparison, and more depending on implementation. \n\nThe registers boost performance by storing intermediate results rather than storing them in a separtae RAM chip. So CPUs usually have 2 to 32 high speed registers that an each hold a word. \n\nThe control unit decodes the instructions. Computer instructions are represented as binary code, usually 16, 32, or 64 bits wide. \n\nSo now with these three pieces together, the CPU operation will \n\n1. decode the current instruction\n2. execute it\n3. figure out which to execute next\n4. repeat.\n\nThis is sometimes called the \"fetch-execute cycle.\"\n\n### Registers\n\nAnything that can store a chunk of bits that represents a value like a variable, instruction, or address is usually referred to as a register. \n\nIn this discussion we are focusing on CPU-resident registers--the registers that sit inside the CPU. If we didn't have those, then any time the CPU needs to do i/o operations, it would have to access the RAM.\n\nIn this theoretical situation: \n\n1. CPU would send an address value from the CPU to the RAM's address input. \n2. The RAM's direct-access logic would use the address to select a specific memory register.\n3. Register's contetnts now travel back to the CPU if it was a read op, or another value from the CPU replaces it if it was a write op. \n\nThis uses at least 2 chips, an address bus, and a data bus. \n\nNow compare that to the ALU. The ALU is super fast and calculating stuff--but now it'd depend on a slow data storage. Now this is called starvation, which is when a processor is denied the resources it needs to complete its work. \n\nIf we had registers in the CPU itself, we'd save ourselves a lot of time! \n\nWhen you specify an instruction that includes a memory register, like M[address] = value, we have to supply the address of that memory! Consequently this would use a lot of bits. In our platform, the Hack computer, we have to use 2 machine instructions, 2 clock cycles even for mundane shit like Memory[address] = 0. \n\nBecause there's fewer CPU resident registers, this means that identifying one just uses a couple of bits. So it'd only need one machine instruction. \n\nSo there's Data Registers in the CPU that hold data, and Address Registers that specify the address. \n\nValues placed in an address register usually selects that meomry register. This the A thing that was inside of the assembly project, or the @100 setting A to 100. \n\nLastly, when the CPU executes a program, it must keep track of the address of the instruction that's gotta be fetched and executed next. The address is usually kept in a register called the program counter, or PC. \n\n### Input and Output\n\nComputers, to interact with stuff outside, need to use I/O devices. We don't pay attention much to the low-level architecture of these devices because the contract of these devices will (hopefully) make them all look the same to our computer. Memory-mapped I/O is when we make that device look like it's just a regular memory segment to the CPU. Now it gets allocated a spot in memory, which is the memory map for that device. For example, on the keyboard that I used last project, the memory map continually reflects the state of the keyboard--when a user was pressing the keyboard, the value representing the key was in the memory map for that duration. For the screen, the screen would continuously reflect what its memory map had--the 1's would blacken the corresponding pixel. \n\nThese get refreshed several times per second so that the user feels the response is instant. So any computer program could access any I/O device by manipulating the registers in those memory area.s \n\nStandards are what make sure the contracts for what codes do what on a keyboard, or how a device will interact with the computer. \n\nSo you can design a CPU and platform to be independent of the number or nature or make of these I/O devices! You just allocate a new memory map to that device, pay attention to its base address, and now you just have to manipulate registers in that map according to the contract/protocol to interact with it. \n\n## The Hack CPU\n\nSo the Hack platform consists of:\n\n1. CPU\n2. Instruction Memory\n3. Data Memory\n4. Screen\n5. Keyboard\n\nThe Hack CPU consists of:\n\n1. ALU \n2. Data Register (D)\n3. Address Register (A)\n4. Program Counter (PC)\n\nThese are 16-bit registers. The D stores data values, the A does three different things (store an inputted value to be saved, or points to where to jump next in instruction memory, or points to an address in the data memory to make use of). \n\nHack CPU executes instructions in the 16 bit format of \"ixxaccccccdddjjj\". \n\ni-bit is the opcode, or operation code, which tells it what the instruction type is. 0 for A-instruction, 1 for C-instruction. \n\nIf it's an A instruction, then the instruction is treated as a 16-bit binary value that's loaded into the A register. If it's a C-instruction, then we use that formatting that I just typed up there, the ixxaccccccdddjjj stuff. Each one of those characters represents a sequence of control bits that tells it what to do. \n\nThe Hack Instruction Memory consists of:\nA direct-access read-only memory device, also called ROM. IT's 32K addressable 16-bit registers. I think we get this pre-made...\n\nThe Input/Output devices interact with the memory-mapped buffers as mentioned earlier. \n\nThe Data Memory is created by a chip called Memory. This chip is three 16-bit storage devices--a RAM (16K registers), a Screen (8K registers), and a Keyboard (1 register). \n\nSo it's positions 0 to 16383, then 16384 to 24575, then 24576 on the keyboard. \n\nThe topmost chip is a Computer chip that is the CPU, instruction memory, and data memory.\n\nWe have to come up with a logic gate architecture that can execute instructions and determine which instruction to be fetched next. We already have the building blocks all created in the previous projects--now we gotta arrange them and connect them correctly. \n\n## Instruction Decoding\n\nA continuation of the ixxaccccccdddjjj stuff I mentioned earlier. \n\nThe a and c bits code the comp part of the instruction, and the d and j bits code the destination and jump parts of the instruction, and the x is unused for the C-instruction. \n\nAll these fields get routed to different parts simultaneously of the CPU architecture and different chip-parts will take it in and do what they are made to do to execute the instruction. For example, a C instruction's single a-bit determines whether the ALU will operate on the A register input or the M input, and then the six c-bits decide which function the ALU will compute. The d bits determine which registers \"accept\" ALU output, and the three j bits then branch control. \n\n## Instruction Fetching\n\nAfter executing the current instruction, the CPU determines the address of the next one. The Program Counter always stores the address of the next instruction. We have to connect the PC output of the CPU into the address input of the instruction memory. \n\nBy connecting the PC output into the instruction memory's input, the instruction memory will always emit the instruction that needs to be fetched and executed next. The output of the instruction memory gets connected to the instruction input of the CPU, and then we are done with that fetch-execute cycle. \n\nSo for the Hack computer, the current program gets stored in the instruction memory at address 0. If we want to start or restart execution of a program, we would set the PC to 0. So the CPU's reset input goes straight to the reset input of the PC chip. Asserting the bit on the CPU will affect PC to turn PC = 0 and then we'll go to that first instruction. After execution of the first instruction, we want to do the next, so then PC increments. PC++. \n\nIn other cases, we want to jump to an address of an instruction instead of mindlessly always executing the next line. So then we'd want to write a way to set PC = A if we jump. \n\nNow we can see that there's two things going on for the PC, at least:\n\n1. If jump, PC = A\n2. Otherwise PC++\n\nWhether we are jumping or not depends on the j-bits of the instruction and on the ALU's output. \n\nSo the output of the A register must feed into the input of the PC register. The PC chip has a load-bit that enables it to take in a new input value, or else it'll keep hanging on to what it has. So we can assert the load-bit to execute our PC = A instead of default PC++, and now we can figure out the jump boolean part. \n\n1. j-bits of current instruction specify the jump condition (our JEQ, JLE stuff). \n2. ALU output bits zr and ng, which can determine if the specified condition got satisfied.\n\nThe above are all excellent hints as to how to proceed. Hmmmm. \n\n## Memory \n\nAs mentioned before, the memory chip is a combo of three lower-level chips. \n\n1. RAM16K\n2. Screen\n3. Keyboard\n\nUsers of the memory chip will see a single address space from 0 to 24576 (which is 0x6000 in hexadecimal). \n\nSo we will have to split up those address inputs right so that we can select 16394 in the memory address and access address 0 in the Screen chip. We did that in chapter 3 to link sections of memory to other sections. \n\n## Computer\n\nCombining the \n\n1. CPU\n2. Data Memory: RAM16K/Screen/Keyboard\n3. Instruction Memory: ROM32K\n\nwill allow us to make our computer! \n\n## General Perspective\n\nGeneral-purpose computers can execute lots of programs. Dedicated computers are usually embedded in devices like cell phones, game consoles, cameras, weapons, factory equipment, consumer electronics, etc. For those specific applications, a single program is burned into the dedicated computer's Random Access Memory, and it'll only execute that. But otherwise these computers all share similar architecture:\n\n1. Stored programs\n2. Fetch-decode-execute logic\n3. CPU\n4. Registers\n5. Program Counters\n\nMost general-purpose computers, unlike Hack, use a single address space for storing both data and instructions. The address for the instruction and for the data must e fed into the same destination of the single address input of the shared address space. \n\nUsually there's two cycle logic that does this.\n\n1. Fetch Cycle: instruction address is fed to the address input of memory, it emits current instruction, and stores in instruction register.\n2. Execute Cycle: decode the instruction, then the optional data address that you get from it is fed to the memory's address input, so that the instruction can manipulate the selected memory location. \n\nThe Hack computer separates the instructions and data into two separate parts so the single cycle logic can be done--however, programs cannot be changed dynamically as a result. Not the clearest on what that means...\n\nOur Hack computer here is also simpler--we don't have to worry about connecting to printers, disks, and networks. We also don't have control over brightness levels per pixel or colors in our screen. Most modern computers allow the CPU to send high-level graphics instructions to a graphics card that then controls the screen. So the CPU doesn't have to draw figures and polygons directly. The GPU would do it with its own embedded chip-set that does it better and more efficiently. It can do very many instances of simple and repetitive math, whereas the CPU can focus hard on a few instances of complex action. Something like that. \n\nMore academic/educational/complex courses/efforts about designing computer hardward is focused around better performance. Stuff like memory hierarchies (cache), I/O device access, pipelining, parallelism, instruction prefetching, and other optimization isn't covered in nand2tetris. \n\nSounds like there's two main schools of hardware design.\n\n1. Complex Instruction Set Computing (CISC) - get better performance by providing rich and complex instruction sets. \n2. Reduced Instrucion Set Computing (RISC) - use simple instruction sets to promote faster hardware implementation. \n\nHack features neither a strong instruction set nor special hardware acceleration techniques. \n\n\n\n"
},
{
"alpha_fraction": 0.74556964635849,
"alphanum_fraction": 0.7616033554077148,
"avg_line_length": 32.38028335571289,
"blob_id": "b9060ef0e1bd854e92cdcbebe16a31e04c759667",
"content_id": "65ac30602be3dfdc665b1fcaa2522212767f3362",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2370,
"license_type": "no_license",
"max_line_length": 549,
"num_lines": 71,
"path": "/10/README.md",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "## Tokenizer\n\nRun by using `python JackAnalyzer.py Square/SquareGame.jack` from within this folder.\n\n## Notes\n\n4/25/2023\nLook at how time flies! Took me 2 years to get around to progressing this. My goodness. \nIt successfully runs through the SquareGame.jack file. My first recursive descent LL(1) syntax analyzer! And I basically wrote it all myself, with the exception of looking at the Wikipedia article. Didn't Google, didn't look at Stack Overflow, didn't look at Github, didn't ask ChatGPT anything. That said, I have been writing a grammar for an LR parser at work, so in these long 2 years I've actually put some work into parser-adjacent stuff. \n\nWell, I say this with lots of excitement, but I still need to check it against a few more files and also consider improving the indentation so I can easily diff against the provided xml examples. \n\n4/1/2021\n\nCompiler can be broken into syntax analysis and code generation.\n\nSyntax analysis can be broken into tokenizing and parsing.\n\nThere's a set of rules, called a context-free grammar, that describes how to properly analyze the syntax of some code.\n\nTokenizing is when you group the text file's text into chunks, or tokens, like (, or 'a string' or 324234 or &.\n\n### Ponderances\n\n4/16/2021\nI notice that when I break the file (as a string) down into the word chunks, for example:\n\n```\n/**\nDisposes\nthis\ngame.\n*/\n\nmethod\nvoid\ndispose()\n{\n\ndo\nsquare.dispose();\n\ndo\nMemory.deAlloc(this);\n\nreturn;\n\n}\n```\n\nI can see in the above I have to deal with comments.\n\n```\nmethod\nvoid\nmoveSquare()\n{\n\nif\n(direction\n=\n1)\n```\n\nAnd in this snippet I can see that there are sometimes parentheses next to some words, and I will have to peel off the parentheses from the identifier.\n\n4/1/2021\n\nI noticed that a line that is a let statement in the Jack language, like `let direction = 0;` gets correctly compiled by the provided compiler in the tools section of the project. I was thinking that what I'd write would expect that there's a whitespace between the identifier and the equals sign, so it'd fail at `let direction=0`... I guess if I were to write something that was robust it should be able to know, oh, when it's no longer letters and numbers or underscores for the identifier, end it and see if the current character is a new token.\n\nHence why there's two extra folders right now where I was playing around with that.\n"
},
{
"alpha_fraction": 0.7861152291297913,
"alphanum_fraction": 0.7899556756019592,
"avg_line_length": 50.272727966308594,
"blob_id": "7b8beb7e0228a4db4a4b7af65d91f530d6005d44",
"content_id": "f5fc30f3407e49fca14c866f83b24bd7fe3431f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3385,
"license_type": "no_license",
"max_line_length": 363,
"num_lines": 66,
"path": "/07/README.md",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "# Virtual Machine\n\nWhen you write a program, there generally needs to be a way to get it to the binary machine language so that the computer can read it.\nThe thing that turns the written program into binary language is the compiler. \n\nThere's a two-tier translation model that is present in some languages like Java and C# where first the program is translated into an intermediate language (Java bytecode, the instruction set of the JVM, and for C# intermediate language, which runs on the CLR, the common language runtime). Then the intermediate language is translated into the machine language. \nThis idea is nice because different CPUs have different implementations of how it understands the binary instructions. By having an intermediate language, now you can run the intermediate language on many different platforms via the CPU running a virtual machine emulating a specific computer. \n\nFor personal clarity, it sounds like assembly language to binary is done via the assembler, and that the compiler will probably directly turn the high level language into binary.\n\n## VM Language for the book\n\nThe VM language here consists of four types of commands:\n\n1. Arithmetic\n2. Memory access\n3. Program flow\n4. Subroutine calling commands.\n\nThis chapter has to do with building a VM translator, which can turn the arithmetic and memory commands into binary.\n\n## The Stack\n\nStack is super useful for arithmetic. Load into the stack, if you have an add command, pop one, save it, pop the other, and add the two together then put it back in the stack.\nThus high-level arithmetic and boolean operations can be turned into sequences of stack commands (ch 10-11).\n\n## VM Specification\n\nArithmetic commands perform arithmetic and logical operations on the stack.\n\nMemory access transfers info between stack and the virtual memory segments. \n\nProgram flow controls conditional and unconditional branching operations.\n\nFunction calling controls calling subroutines and then returning from them. \n\n### Program Flow\n\n- label - label declaration\n- goto - unconditional branching\n- if-goto - conditional branching\n- function functionName nLocals - function declaration with number of locals it's got \n- call Function nArgs - call the function with the number of arguments it has\n- return - switch control back to the outer scope\n\n# Compiler and VM Translator\n\nSo at the top level, the Jack language will have classes with methods. Foo.jack, Bar.jack.\nAfter getting compiled it'll become Foo.vm, Bar.vm. \nAfter getting translated by the VM Translator, each method in the class will go from YYY class XXX method to YYY.XXX, making\nvarious multiple files. \n\n# Thoughts\n\nI think for incrementing the stack pointer I'd want to do:\n```\n@SP\nM=M+1\n```\n\nInterestingly when pushing to a segment offset it's no big deal to grab the offset, store into D register, go to the offset address, then grab the info there,\nand then go to the stack pointer's address and save it in. But it's not possible to POP with just one D register like that--you need to hold the info on the offset address, otherwise you\nlose it when you switch addresses. So I am using the R13 register as storage for the address offset. \n\nThe way this thing handles the static push and pop is weirding me out a bit. So we're just making unique labels which get stored in the RAM addresses\nstarting at 16 and onwards, and then it just works? \n"
},
{
"alpha_fraction": 0.6243842244148254,
"alphanum_fraction": 0.6342364549636841,
"avg_line_length": 37.47618865966797,
"blob_id": "f1a44918d47465183cbb94b40860fa92a41226eb",
"content_id": "f4f290a76e609b9cf79c441ab0d5fc461c96a741",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 812,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 21,
"path": "/06/testassembler.py",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom .assembler import Parser\n\n\nclass ParserTestCase(unittest.TestCase):\n def setUp(self) -> None:\n test_lines = ['@123','M=1','@456','M=0','D;JGT','D=D-A;JNE']\n self.parser = Parser(test_lines)\n\n def test_command_type(self):\n self.assertEqual(self.parser.command_type(), 'A_COMMAND')\n self.parser.advance()\n self.assertEqual(self.parser.command_type(), 'C_COMMAND')\n self.parser.advance()\n self.assertEqual(self.parser.command_type(), 'A_COMMAND')\n self.parser.advance()\n self.assertEqual(self.parser.command_type(), 'C_COMMAND')\n self.parser.advance()\n self.assertEqual(self.parser.command_type(), 'C_COMMAND')\n self.parser.advance()\n self.assertEqual(self.parser.command_type(), 'C_COMMAND')\n\n\n\n\n"
},
{
"alpha_fraction": 0.5141621828079224,
"alphanum_fraction": 0.5142912268638611,
"avg_line_length": 34.54816436767578,
"blob_id": "df1e7f90e6fec31d3f8220eb5f24afcd73a9970c",
"content_id": "c929e3ca696cca0ea9c42fe0555e0dc67821dda4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15499,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 436,
"path": "/10/CompilationEngine.py",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "import os\nimport xml.etree.ElementTree as ET\nfrom JackTokenizer import JackTokenizer, TokenType\n\nstatement_keywords = {\"let\", \"do\", \"if\", \"while\", \"return\"}\n\n\nclass CompilationEngine:\n def __init__(self, tokenizer: JackTokenizer, outputFilename):\n self.tree = None\n self.node = None\n self.tokenizer = tokenizer\n self.filename = outputFilename\n\n self.compile_class()\n\n def _match_specific_or_any_token_of_type(\n self, curr_tok, curr_type: TokenType, tok, any_tok_of_type\n ):\n if any_tok_of_type == curr_type and len(tok) == 0:\n self.tokenizer.advance()\n el = ET.SubElement(self.node, curr_type.value)\n el.text = f\" {curr_tok} \"\n el.tail = \"\\n\"\n return True\n elif curr_tok == tok:\n self.tokenizer.advance()\n el = ET.SubElement(self.node, curr_type.value)\n el.text = f\" {curr_tok} \"\n el.tail = \"\\n\"\n return True\n return False\n\n def _accept(self, tok, any_tok_of_type=None):\n \"\"\"If the current token is equal to the input, move the tokenizer along\"\"\"\n curr_type = self.tokenizer.token_type()\n if curr_type == TokenType.identifier:\n curr_tok = self.tokenizer.identifier()\n return self._match_specific_or_any_token_of_type(\n curr_tok, curr_type, tok, any_tok_of_type\n )\n elif curr_type == TokenType.keyword:\n curr_tok = self.tokenizer.keyword()\n return self._match_specific_or_any_token_of_type(\n curr_tok, curr_type, tok, any_tok_of_type\n )\n elif curr_type == TokenType.symbol:\n curr_tok = self.tokenizer.symbol()\n return self._match_specific_or_any_token_of_type(\n curr_tok, curr_type, tok, any_tok_of_type\n )\n elif curr_type == TokenType.int_const:\n curr_tok = self.tokenizer.intVal()\n return self._match_specific_or_any_token_of_type(\n curr_tok, curr_type, tok, any_tok_of_type\n )\n elif curr_type == TokenType.string_const:\n curr_tok = self.tokenizer.stringVal()\n return self._match_specific_or_any_token_of_type(\n curr_tok, curr_type, tok, any_tok_of_type\n )\n else:\n raise Exception(\"unknown type found in accept method\")\n\n def _expect(self, tok, any_tok_of_type=None):\n if self._accept(tok, any_tok_of_type):\n return True\n self.tree.write(f\"{os.path.splitext(self.filename)[0]}_error.xml\")\n raise Exception(\n f\"Expected {tok} but received {self.tokenizer.current_token}. Cursor at {self.tokenizer.cursor}.\"\n )\n\n def compile_class(self):\n root_node = ET.Element(\"class\")\n root_node.text = \"\\n\"\n self.tree = ET.ElementTree(root_node)\n self.node = root_node\n self.tokenizer.advance()\n self._expect(\"class\")\n self._expect(\"\", TokenType.identifier)\n self._expect(\"{\")\n while self.tokenizer.token_type() == TokenType.keyword and (\n self.tokenizer.keyword() == \"static\" or self.tokenizer.keyword() == \"field\"\n ):\n self.compile_class_var_dec()\n\n while self.tokenizer.token_type() == TokenType.keyword and (\n self.tokenizer.keyword() == \"constructor\"\n or self.tokenizer.keyword() == \"method\"\n or self.tokenizer.keyword() == \"function\"\n ):\n self.compile_subroutine()\n self._expect(\"}\")\n return self.tree\n\n def compile_class_var_dec(self):\n \"\"\"Compiles a static declaration or a field declaration.\"\"\"\n parent = self.node\n el = ET.SubElement(self.node, \"classVarDec\")\n self.node = el\n if self._accept(\"static\") or self._accept(\"field\"):\n self.compile_type()\n self._expect(\"\", TokenType.identifier) # varName\n while self._accept(\",\"):\n self._expect(\"\", TokenType.identifier)\n self._expect(\";\")\n el.tail = \"\\n\"\n self.node = parent\n\n def compile_subroutine(self):\n \"\"\"Compiles a complete method, function, or constructor\"\"\"\n parent = self.node\n el = ET.SubElement(self.node, \"subroutineDec\")\n self.node = el\n if (\n self._accept(\"constructor\")\n or self._accept(\"method\")\n or self._accept(\"function\")\n ):\n if not self._accept(\"void\"):\n self.compile_type()\n self._expect(\"\", TokenType.identifier) # subroutineName\n self._expect(\"(\")\n self.compile_parameter_list()\n self._expect(\")\")\n\n # subroutine body\n el = ET.SubElement(self.node, \"subroutineBody\")\n self.node = el\n self._expect(\"{\")\n while self.tokenizer.token_type() == TokenType.keyword and (\n self.tokenizer.keyword() == \"var\"\n ):\n self.compile_var_dec()\n self.compile_statements()\n self._expect(\"}\")\n el.tail = \"\\n\"\n self.node = parent\n\n def compile_type(self, optional=False):\n \"\"\"Compiles a type, I added this contract for my own ease. It should not result in a new XML wrapper node.\"\"\"\n if not (self._accept(\"int\") or self._accept(\"char\") or self._accept(\"boolean\")):\n if optional:\n self._accept(\"\", True)\n else:\n self._expect(\"\", TokenType.identifier)\n\n def compile_parameter_list(self):\n \"\"\"Compiles a parameter list, not including the enclosing ()\"\"\"\n parent = self.node\n el = ET.SubElement(self.node, \"parameterList\")\n self.node = el\n if self.compile_type(True): # bool to show it's optional\n self._expect(\"\", TokenType.identifier)\n while self._accept(\",\"):\n self.compile_type()\n self._expect(\"\", TokenType.identifier)\n # else it's an empty param list\n el.tail = \"\\n\"\n self.node = parent\n\n def compile_var_dec(self):\n \"\"\"Compiles a var declaration\"\"\"\n parent = self.node\n el = ET.SubElement(self.node, \"varDec\")\n self.node = el\n self._expect(\"var\")\n self.compile_type()\n self._expect(\"\", TokenType.identifier)\n while self._accept(\",\"):\n self._expect(\"\", TokenType.identifier)\n self._expect(\";\")\n el.tail = \"\\n\"\n self.node = parent\n\n def compile_statements(self):\n \"\"\"Compiles a sequence of statements, not including the enclosing ()\"\"\"\n parent = self.node\n el = ET.SubElement(self.node, \"statements\")\n self.node = el\n while self.tokenizer.token_type() == TokenType.keyword:\n if self.tokenizer.keyword() == \"do\":\n self.compile_do()\n elif self.tokenizer.keyword() == \"let\":\n self.compile_let()\n elif self.tokenizer.keyword() == \"while\":\n self.compile_while()\n elif self.tokenizer.keyword() == \"if\":\n self.compile_if()\n elif self.tokenizer.keyword() == \"return\":\n self.compile_return()\n else:\n break\n el.tail = \"\\n\"\n self.node = parent\n\n def compile_subroutine_call(self):\n \"\"\"Should not create a new XML wrapper node.\"\"\"\n self._expect(\"\", TokenType.identifier)\n if self._accept(\"(\"): # go straight to expression list route\n self.compile_expression_list()\n self._expect(\")\")\n else:\n # self._expect(\"\", TokenType.identifier) # foo.bar then expression list route\n self._expect(\".\")\n self._expect(\"\", TokenType.identifier)\n self._expect(\"(\")\n self.compile_expression_list()\n self._expect(\")\")\n\n def compile_do(self):\n parent = self.node\n el = ET.SubElement(self.node, \"doStatement\")\n self.node = el\n self._expect(\"do\")\n self.compile_subroutine_call()\n self._expect(\";\")\n el.tail = \"\\n\"\n self.node = parent\n\n def compile_let(self):\n parent = self.node\n el = ET.SubElement(self.node, \"letStatement\")\n self.node = el\n self._expect(\"let\")\n self._expect(\"\", TokenType.identifier)\n if self._accept(\"[\"):\n self.compile_expression()\n self._expect(\"]\")\n self._expect(\"=\")\n self.compile_expression()\n self._expect(\";\")\n el.tail = \"\\n\"\n self.node = parent\n\n def compile_while(self):\n parent = self.node\n el = ET.SubElement(self.node, \"whileStatement\")\n self.node = el\n self._expect(\"while\")\n self._expect(\"(\")\n self.compile_expression()\n self._expect(\")\")\n self._expect(\"{\")\n self.compile_statements()\n self._expect(\"}\")\n el.tail = \"\\n\"\n self.node = parent\n\n def compile_return(self):\n parent = self.node\n el = ET.SubElement(self.node, \"returnStatement\")\n self.node = el\n self._expect(\"return\")\n if (\n self.tokenizer.token_type() == TokenType.symbol\n and self.tokenizer.symbol() == \";\"\n ):\n self._expect(\";\")\n else:\n self.compile_expression()\n self._expect(\";\")\n el.tail = \"\\n\"\n self.node = parent\n\n def compile_if(self):\n parent = self.node\n el = ET.SubElement(self.node, \"ifStatement\")\n self.node = el\n self._expect(\"if\")\n self._expect(\"(\")\n self.compile_expression()\n self._expect(\")\")\n self._expect(\"{\")\n self.compile_statements()\n self._expect(\"}\")\n if self._accept(\"else\"):\n self._expect(\"{\")\n self.compile_statements()\n self._expect(\"}\")\n el.tail = \"\\n\"\n self.node = parent\n\n def is_op(self, keyword):\n return keyword in {\"+\", \"-\", \"*\", \"/\", \"&\", \"|\", \"<\", \">\", \"=\"}\n\n # I think I need a lookahead here according to the textbook.\n def compile_expression(self):\n parent = self.node\n el = ET.SubElement(self.node, \"expression\")\n self.node = el\n self.compile_term()\n while self.tokenizer.token_type() == TokenType.symbol and self.is_op(\n self.tokenizer.symbol()\n ):\n if (\n self._accept(\"+\")\n or self._accept(\"-\")\n or self._accept(\"*\")\n or self._accept(\"/\")\n or self._accept(\"&\")\n or self._accept(\"|\")\n or self._accept(\"<\")\n or self._accept(\">\")\n or self._accept(\"=\")\n ):\n self.compile_term()\n el.tail = \"\\n\"\n self.node = parent\n\n def compile_term(self):\n parent = self.node\n el = ET.SubElement(self.node, \"term\")\n self.node = el\n if self.tokenizer.token_type() == TokenType.identifier:\n self.tokenizer.advance() # look ahead\n # it is a varName [ expression ]\n if (\n self.tokenizer.token_type() == TokenType.symbol\n and self.tokenizer.symbol() == \"[\"\n ):\n self.tokenizer.retreat()\n self._expect(\"\", TokenType.identifier)\n self._expect(\"[\")\n self.compile_expression()\n self._expect(\"]\")\n # it is a subroutineCall\n elif (\n self.tokenizer.token_type() == TokenType.symbol\n and self.tokenizer.symbol() == \"(\"\n ):\n self.tokenizer.retreat()\n self.compile_subroutine_call()\n # also a subroutineCall\n elif (\n self.tokenizer.token_type() == TokenType.symbol\n and self.tokenizer.symbol() == \".\"\n ):\n self.tokenizer.retreat()\n self.compile_subroutine_call()\n # just a varName\n else:\n self.tokenizer.retreat()\n self._expect(\"\", TokenType.identifier)\n # ( expression )\n elif (\n self.tokenizer.token_type() == TokenType.symbol\n and self.tokenizer.symbol() == \"(\"\n ):\n self._expect(\"(\")\n self.compile_expression()\n self._expect(\")\")\n\n elif not (\n self._accept(\"\", TokenType.int_const)\n or self._accept(\"\", TokenType.string_const)\n or self._accept(\"\", TokenType.keyword)\n ):\n # if it's none of the above then it is a unaryOp term\n if self._accept(\"-\") or self._accept(\"~\"):\n self.compile_term()\n el.tail = \"\\n\"\n self.node = parent\n\n def compile_expression_list(self):\n parent = self.node\n el = ET.SubElement(self.node, \"expressionList\")\n self.node = el\n if (\n self.tokenizer.token_type() == TokenType.symbol\n and self.tokenizer.symbol() == \")\"\n ):\n el.tail = \"\\n\"\n self.node = parent\n else:\n self.compile_expression()\n while (\n self.tokenizer.token_type() == TokenType.symbol\n and self.tokenizer.symbol() == \",\"\n ):\n self._expect(\",\")\n self.compile_expression()\n el.tail = \"\\n\"\n self.node = parent\n\n # TODO: Unused\n def is_keyword_constant(self, token):\n return token in {\"true\", \"false\", \"null\", \"this\"}\n\n def write(self) -> ET:\n self.tree.write\n self.tree.write(self.filename)\n\n def read_file_and_build_xml_tokens_only(self, tokenizer) -> ET:\n root = ET.Element(\"tokens\")\n root.text = \"\\n\"\n while tokenizer.has_more_tokens():\n tokenizer.advance()\n tok = tokenizer.current_token\n if tokenizer.token_type() == TokenType.keyword:\n keyword = tokenizer.keyword()\n sub_el = ET.SubElement(root, \"keyword\")\n sub_el.text = f\" {tok} \"\n sub_el.tail = \"\\n\"\n elif tokenizer.token_type() == TokenType.symbol:\n symbol = tokenizer.symbol()\n sub_el = ET.SubElement(root, \"symbol\")\n sub_el.text = f\" {tok} \"\n sub_el.tail = \"\\n\"\n\n elif tokenizer.token_type() == TokenType.identifier:\n identifier = tokenizer.identifier()\n sub_el = ET.SubElement(root, \"identifier\")\n sub_el.text = f\" {tok} \"\n sub_el.tail = \"\\n\"\n\n elif tokenizer.token_type() == TokenType.int_const:\n constant = tokenizer.intVal()\n sub_el = ET.SubElement(root, \"integerConstant\")\n sub_el.text = f\" {tok} \"\n sub_el.tail = \"\\n\"\n\n elif tokenizer.token_type() == TokenType.string_const:\n constant = tokenizer.stringVal()\n sub_el = ET.SubElement(root, \"stringConstant\")\n sub_el.text = f\" {tok} \"\n sub_el.tail = \"\\n\"\n\n else:\n raise TypeError(\n f\"{tokenizer.token_type} does not match any existing token type.\"\n )\n root.tail = \"\\n\"\n\n tree = ET.ElementTree(root)\n return tree\n"
},
{
"alpha_fraction": 0.7818599343299866,
"alphanum_fraction": 0.8048220276832581,
"avg_line_length": 95.77777862548828,
"blob_id": "19d2a246ce05be1debc8eea7d7d07453df5dccd7",
"content_id": "6f479e929084d1bec480b0222941876629d4cc9c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 871,
"license_type": "no_license",
"max_line_length": 543,
"num_lines": 9,
"path": "/04/README.md",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "# Using the Hack Assembly Language\n\nThis section had two assignments--writing a multiplication program in assembly and writing a program that would turn the screen fully black when a key was pressed. \n\nIt was very interesting having to think about how I wanted to use the registers and how I wanted to jump from label to label. The multiplication program was pretty easy but the fill program got me stumped for a bit because of how the screen was actually being used. There were 16 bits (1111111111111111) available in each memory location and so each mem location for the screen range of RAM was representing 16 pixels of space on the screen. Hence initially I had just one line colored for each memory location and was wondering why that was. \n\nI also was forgetting to reset the pointer to the screen's start location after the first loop. \n\nGood learning experience! "
},
{
"alpha_fraction": 0.3357253670692444,
"alphanum_fraction": 0.34359967708587646,
"avg_line_length": 39.598426818847656,
"blob_id": "0234ca53599a2a5e785ac55f0240c4fc650a6227",
"content_id": "420cc2c709ee4d67a2915a2da21adb31862bc46c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 25780,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 635,
"path": "/08/VMtranslator_ch8.py",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\n\nclass Parser:\n \"\"\"Parses a single vm file.\"\"\"\n def __init__(self, lines):\n self.lines = lines\n self.curr_line = lines[0]\n self.split_line = self.curr_line.split()\n self.idx = 0\n self.c_arithmetic = {\"add\", \"sub\", \"neg\", \"eq\", \"gt\", \"lt\", \"and\", \"or\", \"not\"}\n\n def has_more_commands(self):\n \"\"\"Return True if there are more commands left for this VM file.\"\"\"\n return self.idx < len(self.lines) - 1\n\n def advance(self):\n \"\"\"Step forward to the next line.\"\"\"\n self.idx += 1\n self.curr_line = self.lines[self.idx]\n self.split_line = self.curr_line.split()\n\n def command_type(self):\n \"\"\"Returns one of: C_ARITHMETIC, C_PUSH, C_POP, C_LABEL, C_GOTO, C_IF, C_FUNCTION, C_RETURN, C_CALL\"\"\"\n if self.split_line[0] in self.c_arithmetic:\n return \"C_ARITHMETIC\"\n elif self.split_line[0] == \"push\":\n return \"C_PUSH\"\n elif self.split_line[0] == \"pop\":\n return \"C_POP\"\n elif self.split_line[0] == \"label\":\n return \"C_LABEL\"\n elif self.split_line[0] == \"goto\":\n return \"C_GOTO\"\n elif self.split_line[0] == \"if-goto\":\n return \"C_IF\"\n elif self.split_line[0] == \"function\":\n return \"C_FUNCTION\"\n elif self.split_line[0] == \"return\":\n return \"C_RETURN\"\n elif self.split_line[0] == \"call\":\n return \"C_CALL\"\n else:\n raise ValueError\n\n def arg1(self):\n \"\"\"Returns the first argument of the current command\"\"\"\n return self.split_line[0]\n\n def arg2(self):\n \"\"\"Returns the second argument of the current command\"\"\"\n return self.split_line[1]\n\n def arg3(self):\n \"\"\"Returns the third argument of the current command\"\"\"\n return self.split_line[2]\n\n\nclass CodeWriter:\n def __init__(self):\n self.file = None\n self.filename = None\n self.label_number = 0\n self.curr_function = None\n\n def set_filename(self, filename: str):\n \"\"\"Tells CodeWriter that the translation of a new VM file has begun.\"\"\"\n self.file = open('{}'.format(filename), 'a+')\n self.filename = os.path.basename(filename)\n\n def write_init(self):\n # Initialize stack pointer (SP) to 256\n self.file.write('@256\\n'\n 'D=A\\n'\n '@SP\\n'\n 'M=D\\n')\n self.write_call('Sys.init', 0)\n\n def write_label(self, label):\n \"\"\"Write a label belonging to parent function 'function'\"\"\"\n if not self.curr_function:\n print('No current function!')\n raise RuntimeError\n self.file.write('({}${})\\n'.format(self.curr_function, label))\n\n def write_goto(self, label):\n \"\"\"Write a goto to a label inside a function.\"\"\"\n if self.curr_function:\n self.file.write('@{}${}\\n'\n '0;JMP\\n'.format(self.curr_function, label))\n else:\n self.file.write('@{}\\n'\n '0;JMP\\n'.format(label))\n\n def write_if(self, label):\n self.file.write('@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@{}${}\\n'\n 'D;JNE\\n'.format(self.curr_function, label))\n\n def _push_contents_of_address(self, address):\n self.file.write('@{}\\n'\n 'D=M\\n'\n '@SP\\n'\n 'A=M\\n'\n 'M=D\\n'\n '@SP\\n'\n 'M=M+1\\n'.format(address))\n\n def write_call(self, function, param_count):\n return_address = self._generate_label()\n self.write_pushpop('C_PUSH', 'constant', return_address)\n self._push_contents_of_address('LCL')\n self._push_contents_of_address('ARG')\n self._push_contents_of_address('THIS')\n self._push_contents_of_address('THAT')\n\n # set ARG to SP - n - 5 where n is param_count\n self._push_contents_of_address('SP')\n self.write_pushpop('C_PUSH', 'constant', str(param_count))\n self.write_arithmetic('sub')\n self.write_pushpop('C_PUSH', 'constant', '5')\n self.write_arithmetic('sub')\n self.file.write('@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@ARG\\n'\n 'M=D\\n')\n # reposition LCL (local) to SP (where we are now on stack)\n self.file.write('@SP\\n'\n 'D=M\\n'\n '@LCL\\n'\n 'M=D\\n')\n # TODO: I need to be able to go to the function, as well as the \n # if the function has any labels inside of it, to go to its function$label? \n # Doublecheck correctness of this.\n self.file.write('@{}\\n'\n '0;JMP\\n'.format(function))\n\n self.file.write('({})\\n'.format(return_address))\n\n def write_function(self, function_name, k):\n \"\"\"Declare a function function_name that has k local variables\"\"\"\n self.file.write('({})\\n'.format(function_name))\n for i in range(int(k)):\n self.write_pushpop('C_PUSH', 'constant', '0')\n\n def write_return(self):\n # save LCL in R15, which is 'FRAME' in Fig 8.5\n self.file.write('@LCL\\n'\n 'D=M\\n'\n '@R15\\n' \n 'M=D\\n')\n # push contents of R15 to stack and increment stack pointer\n self.file.write('@R15\\n'\n 'D=M\\n'\n '@SP\\n'\n 'A=M\\n'\n 'M=D\\n'\n '@SP\\n'\n 'M=M+1\\n')\n self.write_pushpop('C_PUSH', 'constant', '5')\n self.write_arithmetic('sub')\n # R14 is 'RETURN', set RETURN to *(FRAME-5)\n self.file.write('@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@R14\\n'\n 'M=D\\n')\n # TODO: The following line is for *ARG = pop() - I don't quite understand this. Need to review it.\n # It seems like I need to bring the stack pointer back one before I do the following?\n # I did this earlier so that my code worked for SimpleFunction but I'm suspecting that I needed to move it back\n # earlier here, for the set RETURN to *(FRAME-5) code I added before. I think maybe I now don't need to do it here. \n\n # self.file.write('@SP\\n'\n # 'M=M-1\\n')\n self.write_pushpop('C_POP', 'argument', '0')\n # Restore SP, THAT, THIS, ARG, LCL for the caller\n self.file.write('@ARG\\n'\n 'D=M+1\\n'\n '@SP\\n'\n 'M=D\\n'\n '@R15\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@THAT\\n'\n 'M=D\\n'\n '@R15\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@THIS\\n'\n 'M=D\\n'\n '@R15\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@ARG\\n'\n 'M=D\\n'\n '@R15\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@LCL\\n'\n 'M=D\\n')\n # Goto the return address previously saved in R14\n # R14 contains the address on the stack which contains the ROM line number to jump back to. \n self.file.write('@R14\\n'\n 'A=M\\n'\n '0;JMP\\n')\n \n def write_arithmetic(self, command: str):\n \"\"\"Writes the assembly code that is the translation of the arithmetic command.\"\"\"\n if command == \"add\":\n self.file.write('@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'M=D+M\\n'\n '@SP\\n'\n 'M=M+1\\n')\n elif command == \"sub\":\n self.file.write('@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'M=M-D\\n'\n '@SP\\n'\n 'M=M+1\\n')\n elif command == \"neg\":\n self.file.write('@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'M=-M\\n'\n '@SP\\n'\n 'M=M+1\\n')\n elif command == 'eq':\n label_eq = self._generate_label()\n label_neq = self._generate_label()\n self.file.write('@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M-D\\n'\n '@{}\\n'\n 'D;JEQ\\n'\n '@SP\\n'\n 'A=M\\n'\n 'M=0\\n'\n '@{}\\n'\n 'D;JNE\\n'\n '({})\\n'\n '@SP\\n'\n 'A=M\\n'\n 'M=-1\\n'\n '({})\\n'\n '@SP\\n'\n 'M=M+1\\n'.format(label_eq, label_neq, label_eq, label_neq))\n elif command == 'gt':\n label_gt = self._generate_label()\n label_ngt = self._generate_label()\n self.file.write('@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M-D\\n'\n '@{}\\n'\n 'D;JGT\\n'\n '@SP\\n'\n 'A=M\\n'\n 'M=0\\n'\n '@{}\\n'\n 'D;JLE\\n'\n '({})\\n'\n '@SP\\n'\n 'A=M\\n'\n 'M=-1\\n'\n '({})\\n'\n '@SP\\n'\n 'M=M+1\\n'.format(label_gt, label_ngt, label_gt, label_ngt))\n elif command == 'lt':\n label_lt = self._generate_label()\n label_nlt = self._generate_label()\n self.file.write('@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M-D\\n'\n '@{}\\n'\n 'D;JLT\\n'\n '@SP\\n'\n 'A=M\\n'\n 'M=0\\n'\n '@{}\\n'\n 'D;JGE\\n'\n '({})\\n'\n '@SP\\n'\n 'A=M\\n'\n 'M=-1\\n'\n '({})\\n'\n '@SP\\n'\n 'M=M+1\\n'.format(label_lt, label_nlt, label_lt, label_nlt))\n elif command == \"and\":\n self.file.write('@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'M=D&M\\n'\n '@SP\\n'\n 'M=M+1\\n')\n elif command == \"or\":\n self.file.write('@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'M=D|M\\n'\n '@SP\\n'\n 'M=M+1\\n')\n elif command == \"not\":\n self.file.write('@SP\\n'\n 'M=M-1\\n'\n 'A=M\\n'\n 'M=!M\\n'\n '@SP\\n'\n 'M=M+1\\n')\n else:\n return NotImplemented\n\n def write_pushpop(self, command: str, segment: str, index: str):\n \"\"\"Writes the assembly code that is the translation of the given command push or pop.\"\"\"\n if command == \"C_PUSH\":\n if segment == \"constant\":\n self.file.write('@{}\\n'\n 'D=A\\n'\n '@SP\\n'\n 'A=M\\n'\n 'M=D\\n'\n '@SP\\n'\n 'M=M+1\\n'.format(index))\n elif segment == \"argument\":\n self.file.write('@{}\\n'\n 'D=A\\n'\n '@ARG\\n'\n 'A=D+M\\n' # add offset\n 'D=M\\n'\n '@SP\\n'\n 'A=M\\n'\n 'M=D\\n'\n '@SP\\n'\n 'M=M+1\\n'.format(index))\n elif segment == \"local\":\n self.file.write('@{}\\n'\n 'D=A\\n'\n '@LCL\\n'\n 'A=D+M\\n' # add offset\n 'D=M\\n'\n '@SP\\n'\n 'A=M\\n'\n 'M=D\\n'\n '@SP\\n'\n 'M=M+1\\n'.format(index))\n elif segment == \"this\":\n self.file.write('@{}\\n'\n 'D=A\\n'\n '@THIS\\n'\n 'A=D+M\\n' # add offset\n 'D=M\\n'\n '@SP\\n'\n 'A=M\\n'\n 'M=D\\n'\n '@SP\\n'\n 'M=M+1\\n'.format(index))\n elif segment == \"that\":\n self.file.write('@{}\\n'\n 'D=A\\n'\n '@THAT\\n'\n 'A=D+M\\n' # add offset\n 'D=M\\n'\n '@SP\\n'\n 'A=M\\n'\n 'M=D\\n'\n '@SP\\n'\n 'M=M+1\\n'.format(index))\n elif segment == \"pointer\":\n if index == '0':\n self.file.write('@THIS\\n'\n 'D=M\\n' \n '@SP\\n'\n 'A=M\\n'\n 'M=D\\n'\n '@SP\\n'\n 'M=M+1\\n')\n elif index == '1':\n self.file.write('@THAT\\n'\n 'D=M\\n'\n '@SP\\n'\n 'A=M\\n'\n 'M=D\\n'\n '@SP\\n'\n 'M=M+1\\n')\n else:\n raise Exception(\"Index is out of range for pointer segment, only 0 or 1 permitted\")\n elif segment == \"temp\":\n if int(index) < 0 or int(index) > 7:\n raise Exception(\"Out of range for temp segment\")\n self.file.write('@{}\\n'\n 'D=M\\n'\n '@SP\\n'\n 'A=M\\n'\n 'M=D\\n'\n '@SP\\n'\n 'M=M+1\\n'.format(str(int(index) + 5)))\n elif segment == \"static\":\n self.file.write('@{}\\n'\n 'D=M\\n'\n '@SP\\n'\n 'A=M\\n'\n 'M=D\\n'\n '@SP\\n'\n 'M=M+1\\n'.format(self.curr_function + '.' + index))\n else:\n raise Exception(\"Segment not matching any of expected\")\n elif command == \"C_POP\":\n if segment == \"argument\":\n self.file.write('@{}\\n'\n 'D=A\\n'\n '@ARG\\n'\n 'D=D+M\\n'\n '@R13\\n' # Save the segment offset address in R13 register\n 'M=D\\n'\n '@SP\\n' # Pop from stack\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@R13\\n'\n 'A=M\\n' # Set address to what was saved in R13\n 'M=D\\n'.format(index))\n elif segment == \"local\":\n self.file.write('@{}\\n'\n 'D=A\\n'\n '@LCL\\n'\n 'D=D+M\\n'\n '@R13\\n' # Save the segment offset address in R13 register\n 'M=D\\n'\n '@SP\\n' # Pop from stack\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@R13\\n'\n 'A=M\\n' # Set address to what was saved in R13\n 'M=D\\n'.format(index))\n elif segment == \"this\":\n self.file.write('@{}\\n'\n 'D=A\\n'\n '@THIS\\n'\n 'D=D+M\\n'\n '@R13\\n' # Save the segment offset address in R13 register\n 'M=D\\n'\n '@SP\\n' # Pop from stack\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@R13\\n'\n 'A=M\\n' # Set address to what was saved in R13\n 'M=D\\n'.format(index))\n elif segment == \"that\":\n self.file.write('@{}\\n'\n 'D=A\\n'\n '@THAT\\n'\n 'D=D+M\\n'\n '@R13\\n' # Save the segment offset address in R13 register\n 'M=D\\n'\n '@SP\\n' # Pop from stack\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@R13\\n'\n 'A=M\\n' # Set address to what was saved in R13\n 'M=D\\n'.format(index))\n elif segment == \"pointer\":\n if index == '0':\n self.file.write('@SP\\n'\n 'M=M-1\\n' \n 'A=M\\n'\n 'D=M\\n'\n '@THIS\\n'\n 'M=D\\n')\n elif index == '1':\n self.file.write('@SP\\n'\n 'M=M-1\\n' \n 'A=M\\n'\n 'D=M\\n'\n '@THAT\\n'\n 'M=D\\n')\n else:\n print(index)\n raise Exception(\"Index is out of range for pointer segment, only 0 or 1 permitted\")\n elif segment == \"temp\":\n if int(index) < 0 or int(index) > 7:\n raise Exception(\"Out of range for temp segment\")\n self.file.write('@SP\\n' # Pop from stack\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@{}\\n' # Go to temp segment offset and store within\n 'M=D\\n'.format(str(int(index) + 5)))\n elif segment == \"static\":\n self.file.write('@SP\\n' # Pop from stack\n 'M=M-1\\n'\n 'A=M\\n'\n 'D=M\\n'\n '@{}\\n' # Go to the static segment offset and store within\n 'M=D\\n'.format(self.curr_function + \".\" + index))\n else:\n raise Exception(\"Segment not matching any of expected\")\n else:\n raise ValueError\n\n def close(self):\n \"\"\"Close the output file.\"\"\"\n self.file.close()\n\n def _generate_label(self) -> str:\n label_number = self.label_number\n self.label_number += 1\n return 'label' + self.curr_function + str(label_number)\n\n\ndef run_file(f, filename):\n lines = f.readlines()\n lines = [line.partition('//')[0].strip() for line in lines]\n lines = [line for line in lines if line]\n parser = Parser(lines)\n code_writer = CodeWriter()\n code_writer.set_filename(os.path.splitext(filename)[0])\n code_writer.curr_function = os.path.splitext(filename)[0]\n code_writer.write_init()\n while True:\n command_type = parser.command_type()\n if command_type == 'C_ARITHMETIC':\n code_writer.write_arithmetic(parser.arg1())\n elif command_type == 'C_PUSH' or command_type == 'C_POP':\n code_writer.write_pushpop(command_type, parser.arg2(), parser.arg3())\n elif command_type == 'C_LABEL':\n code_writer.write_label(parser.arg2())\n elif command_type == 'C_GOTO':\n code_writer.write_goto(parser.arg2())\n elif command_type == 'C_IF':\n code_writer.write_if(parser.arg2())\n elif command_type == 'C_FUNCTION':\n code_writer.write_function(parser.arg2(), parser.arg3())\n elif command_type == 'C_RETURN':\n code_writer.write_return()\n elif command_type == 'C_CALL':\n code_writer.write_call(parser.arg2(), parser.arg3())\n if not parser.has_more_commands():\n break\n parser.advance()\n\n\ndef run_directory(f, filename):\n lines = f.readlines()\n lines = [line.partition('//')[0].strip() for line in lines]\n lines = [line for line in lines if line]\n parser = Parser(lines)\n code_writer = CodeWriter()\n # For a directory, we want to set the filename as the directory and the current function as the filename\n code_writer.set_filename(os.path.splitext(filename)[0].split('/')[-2] + '.asm')\n code_writer.curr_function = os.path.splitext(filename)[0].split('/')[-1]\n code_writer.write_init()\n while True:\n command_type = parser.command_type()\n if command_type == 'C_ARITHMETIC':\n code_writer.write_arithmetic(parser.arg1())\n elif command_type == 'C_PUSH' or command_type == 'C_POP':\n code_writer.write_pushpop(command_type, parser.arg2(), parser.arg3())\n elif command_type == 'C_LABEL':\n code_writer.write_label(parser.arg2())\n elif command_type == 'C_GOTO':\n code_writer.write_goto(parser.arg2())\n elif command_type == 'C_IF':\n code_writer.write_if(parser.arg2())\n elif command_type == 'C_FUNCTION':\n code_writer.write_function(parser.arg2(), parser.arg3())\n elif command_type == 'C_RETURN':\n code_writer.write_return()\n elif command_type == 'C_CALL':\n code_writer.write_call(parser.arg2(), parser.arg3())\n if not parser.has_more_commands():\n break\n parser.advance()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"usage: VMtranslator.py source\")\n path = sys.argv[1]\n if os.path.isdir(path):\n for directory, subdirectories, files in os.walk(path):\n for file in files:\n if file.endswith('.vm'):\n filepath = os.path.join(path, file)\n with open(filepath) as f:\n run_directory(f, filepath)\n\n elif os.path.isfile(path) and os.path.splitext(path)[1] == '.vm':\n with open(path) as f:\n run_file(f, path)\n"
},
{
"alpha_fraction": 0.4930555522441864,
"alphanum_fraction": 0.5243055820465088,
"avg_line_length": 10.5600004196167,
"blob_id": "7410c41961daf1b024099c99c4a802ecdbe9621b",
"content_id": "7ef61674dcb440b263000ee54d2186cdaef1cd06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 288,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 25,
"path": "/07/scratch.py",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "line = \"push constant 6\"\n\nprint(line.split())\nfilename=\"test.vm\"\nprint('{}'.format(filename))\n\n\nex1 = 'dear' \\\n 'god' \\\n 'help' \\\n\n\nex2 = \"\"\"dear\ngod\nhelp\"\"\"\n\nex3 = \"python why {}\".format(\"god\")\n\nex4 = '@SP\\n' \\\n 'AM=M+1\\n' \\\n 'M=A\\n'\n\nprint(ex1)\nprint(ex2)\nprint(ex4)"
},
{
"alpha_fraction": 0.7711077332496643,
"alphanum_fraction": 0.7785941958427429,
"avg_line_length": 82.8720932006836,
"blob_id": "2aa3b00c316fabb20a9b17ff059bbac13849aaad",
"content_id": "7d0192eb60d244800251b5f020fde85701acde5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7213,
"license_type": "no_license",
"max_line_length": 739,
"num_lines": 86,
"path": "/08/README.md",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "## High level languages\n\nHigh level languages let us abstract away the machine execution. \nThe low level will manage the calling subroutine, and the called subroutines. \nWhat the heck does that mean? So if I were writing something like a math equation, each function I use\nlike power() or sqrt() is a subroutine and somehow all of these as we go along gets executed with the inputs it gets\nand all of this gets nicely terminated and returns the flow of control to the next command.\n\nAll the assembly stuff I've been is super sequential, and can only just be read and made to jump to different lines. \n\nIf we want a nice high level language to write in, the low level is handling all these details of the subroutines!\n\n- pass params from the caller to the called subroutine\n- saving the state of the caller before switching to execute the called subroutine\n- allocate space for the local variables of the subroutine\n- jump to execute the subroutine\n- return values from the subroutine back to the caller\n- recycle the memory space occupied by the subroutine\n- reinstate that saved state of the caller\n- jump to execute code of the caller following the spot where it was left\n\nLook at that! That's a crapload of stuff to track just for my math equation. \n\n## Program Flow\n\nLike previously mentioned and seen, the default execution of computer programs is linear. Sequential until broken by branching commands. \nBranching logic uses a goto to tell where to go, or to give a label that is an address to jump to. \n\nJump if a given boolean condition is true? Where do we get the boolean expression? We get it from the top of the stack, doing a comparison.\n\nYou can see that the VM commands label, goto label, and if-goto label will have something to do with this branching. We will express these commands \nusing the branching logic of the assembly language. \n\n## Labeling\n\nHere, the function labels will all be in the format (function_name$label) so that they're unique. \n\nThis means I should save what function scope I'm in since if there's a need to jump to a label there, I need that function name, right?\n \n## Other Notes\n\nSeems like all the folders now for function calls in Proj 8 have Sys.vm files that will create a Sys.init function. So in this implementation of the VM translator, I can always\njust commit to expecting that it exists given that the bootstrapper code will always call it. Meaning my previous proj 7 stuff won't have Sys.init and that program isn't compatible (unless I\ndo something with an argument or using it only if it exists).\n\n## Program Flow\n\nFigure 8.5 in the book details what to do when calling a function, declaring a function, and returning from a function. Will try to implement that. It feels like I could use the write pushpop functions I have already written to simplify some of the pushing. For example, for the call f n command listed in the book, it says:\n\n```\npush return-address\npush LCL\npush ARG\npush THIS\npush THAT\nARG = SP - n - 5\nLCL = SP\ngoto f\n(return-address)\n```\n\nAnd it seems like I could use push constant giving it the special address, and use the subtraction I have already written in chapter 7 to do the part that calculates what ARG is now. From a clean code perspective it feels a little janky to use a push constant and give it the label instead--feels like that should be another function, or should leave a comment about the function's overloadedness. Not 100% sure if it works yet though so will play with it. \n\n## Pointers\n\nStill not 100% instant knowledge for me when it comes to storing, for example, LCL in a temp variable. LCL is a fixed location in memory meant to store the address to the start of local variables. So if I am storing the address to which LCL points to then that's TEMP = LCL? Versus storing the actual memory fixed location of LCL, which would be TEMP = *LCL? I'd probably never be intending to do the latter. Gonna have to double check my call and return function to make sure I'm not conflating these two things in my generated assembly code.\n\n## *ARG = pop()\nI don't think I completely understand this step. We're popping off the top of the stack into ARG. \n\n## Bugs\n\nNoticed that I made a mistake restoring the variables for THAT, THIS, ARG, and LCL. Instead I was just stuffing the literal integers into them because I didn't access the memory stored at the location they're pointing to via the `A=M` then `D=M` stuff. Remember, every time you set A equal to M, you're now changing what M was. A and M change together--don't get confused by overloading of what the equals assignment operator means to you in other code. \n\nI also was jumping to code line 14 via using R14 literally, instead of getting the content that R14 had stored in its memory and jumping to what the content was. \n\nTrying to get it to work with NestedCall. Finding lots of problems. 1. R14 stored the address that stored the line number in the ROM to go to. I accidentally set it up to go to the line number that R14 had stored. 2. Subtraction takes a step back in the stack to work with what's there, then after it gets the new value it steps forward. So I have to make sure I step back in the stack if I want to pop that item, otherwise I'm off by one. Lots of off by one problems... 3. My function calls and returns were off. Still are a bit off. Currently debugging.\n\nActually, I discovered that for #1 in the paragraph above, I shouldn't store the address that contains the line number in R14! I should just store the damn line number in R14 in the first place, because the address that contains the line number is at risk of being overwritten by stuff while the program is running. So instead of fixing it by going to the address that stores the line number at the end of the RETURN code, I should just make sure I store that line number in R14 directly, at the beginning when I finish calculating the *(FRAME - 5) part. \n\nSep 27 2020\nI wrote up code to compile multiple vm files into one asm file, which my code previously wasn't doing (it was compiling one by one if a directory was specified) and got that working with the bootstrap code. One insidious problem I fixed was that the labels I was generating weren't unique enough if I was doing it by directory--so if I made label01 for a file1 and then label01 for file2 obviously there are problems here with the overlap. Instead I gave the labels the name file1label01 so file2label01 is unique too.\n\nStaticsTest fails, though...Gotta figure this out. \n\nHey, turns out it had to do with the push and pop on the static segment! Chapter 7 mentions this about representing each static variable number j in a VM file f as the symbol f.j. I was previously using the filename since I hadn't implemented that yet, which works for static variables that are in a single vm file, but for multiple vm files they need to be unique so I should use the current function I'm in. I guess naming-wise I call it \"function\" when it's pretty much a separate file. So I've contributed to the confusion here. Ex: the class1.vm could have multiple functions in it, but I'd be saying in CodeWriter that I'm in function 'class1' when I do labels related to it. I should probably have thought of a better naming system? "
},
{
"alpha_fraction": 0.4313725531101227,
"alphanum_fraction": 0.5098039507865906,
"avg_line_length": 12,
"blob_id": "c201639c3dfbe9860d172d46d474651db7ab2d55",
"content_id": "3ceedb43d127f85e874bbf29e34dba3aaa097c9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 51,
"license_type": "no_license",
"max_line_length": 14,
"num_lines": 4,
"path": "/10/Square/scratch.py",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "val = 10\nwhile val > 0:\n val -= 1\n print(val)"
},
{
"alpha_fraction": 0.5154857635498047,
"alphanum_fraction": 0.5235468745231628,
"avg_line_length": 26.4069766998291,
"blob_id": "96625b4c5607689ac60ef4ae2171d9d47514aaf8",
"content_id": "49581c2e834e1d9767d29b3e6ef246341dee8a89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4714,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 172,
"path": "/10/JackTokenizer.py",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "import re\nfrom typing import List\nimport xml.etree.ElementTree as ET\nfrom enum import Enum\n\n\nclass TokenType(Enum):\n keyword = \"keyword\"\n symbol = \"symbol\"\n identifier = \"identifier\"\n int_const = \"int_const\"\n string_const = \"string_const\"\n\n\nsymbol_set = {\n \"{\",\n \"}\",\n \"(\",\n \")\",\n \"[\",\n \"]\",\n \".\",\n \",\",\n \";\",\n \"+\",\n \"*\",\n \"/\",\n \"&\",\n \"|\",\n \"<\",\n \">\",\n \"=\",\n \"~\",\n}\n\nkeyword_set = {\n \"class\",\n \"constructor\",\n \"function\",\n \"method\",\n \"field\",\n \"static\",\n \"var\",\n \"int\",\n \"char\",\n \"boolean\",\n \"void\",\n \"true\",\n \"false\",\n \"null\",\n \"this\",\n \"let\",\n \"do\",\n \"if\",\n \"else\",\n \"while\",\n \"return\",\n}\n\ndigits_set = {\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"}\n\n\nclass JackTokenizer:\n def __init__(self, file_as_string):\n self.file = file_as_string\n self.token_list: List[str] = self._process_text(file_as_string)\n self.cursor = -1\n self.current_token = \"\"\n # print(self.token_list)\n\n def has_more_tokens(self) -> bool:\n \"\"\"Return True if there are more tokens in this file.\"\"\"\n if self.cursor >= len(self.token_list) - 1:\n return False\n return True\n\n # Four passes, inefficient.\n def _process_text(self, text: str) -> str:\n intermediate_tokens = self._remove_comments(text).split()\n final_tokens: List[str] = []\n for i, token in enumerate(intermediate_tokens):\n s_start = 0\n c_idx = 0\n # we have already split based on whitespace so now we \"split\" based on symbols\n while c_idx < len(token):\n if token[c_idx] in symbol_set:\n if token[s_start:c_idx] != \"\":\n final_tokens.append(token[s_start:c_idx])\n if token[c_idx] != \"\":\n final_tokens.append(token[c_idx])\n c_idx += 1\n s_start = c_idx\n else:\n c_idx += 1\n if token[s_start:c_idx] != \"\":\n final_tokens.append(token[s_start:c_idx])\n return final_tokens\n\n def _remove_comments(self, text: str) -> str:\n rgx_list = [\"\\/\\/.*\\n\", \"\\/\\*(.|\\n)*?\\*\\/\"]\n new_text = text\n for rgx_match in rgx_list:\n new_text = re.sub(rgx_match, \"\", new_text)\n # print(new_text)\n return new_text\n\n def advance(self):\n self.cursor += 1\n if self.cursor < len(self.token_list):\n self.current_token = self.token_list[self.cursor]\n\n \"\"\"Can I implement lookahead with this?\"\"\"\n\n def retreat(self):\n self.cursor -= 1\n self.current_token = self.token_list[self.cursor]\n\n def token_type(self):\n \"\"\"Returns the type of the current token:\n keyword, symbol, identifier, int_const, string_const\n \"\"\"\n tok = self.current_token\n if tok in symbol_set:\n return TokenType.symbol\n elif tok in keyword_set:\n return TokenType.keyword\n elif tok[0] == '\"':\n return (\n TokenType.string_const\n ) # might want to make sure the string doesn't contain \" or \\n edge case\n elif tok[0] in digits_set:\n return TokenType.int_const\n else:\n return TokenType.identifier\n\n def keyword(self):\n \"\"\"When token type is keyword, call this to\n return the type of keyword that is the current token:\n class, method, function, constructor, int, boolean, char, void,\n var, static, field, let, do, if, else, while, return,\n true, false, null, this\n \"\"\"\n return self.current_token\n\n def symbol(self):\n \"\"\"Returns the character which is the current token.\n Call this when token type is a symbol.\n \"\"\"\n return self.current_token\n\n def identifier(self):\n \"\"\"Returns the identifier which is the current token.\n Call this when token type is an identifier\n \"\"\"\n return self.current_token\n\n def intVal(self):\n \"\"\"Returns the integer value of the current token.\n Call this when token type is an int_const\n \"\"\"\n val = int(self.current_token)\n if val > 32767 or val < 0:\n print(f\"{val} is out of range of 0 to 32767\")\n raise Exception(f\"{val} is out of range of 0 to 32767\")\n return int(self.current_token)\n\n def stringVal(self):\n \"\"\"Returns the string value of the current token,\n without the double quotes. Call this when the\n token type is string_const\n \"\"\"\n return self.current_token.replace('\"', \"\")\n"
},
{
"alpha_fraction": 0.3469437062740326,
"alphanum_fraction": 0.38970765471458435,
"avg_line_length": 33.344398498535156,
"blob_id": "46d8d20f7536366d199a7dc4f430e1000c973c6a",
"content_id": "76c9a41de09911d425857d07192ccdea714ab9e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8278,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 241,
"path": "/06/assembler.py",
"repo_name": "linkel/nand2tetris",
"src_encoding": "UTF-8",
"text": "import sys\n\n\nclass Parser:\n def __init__(self, lines: []):\n self.lines = lines\n self.idx = 0\n self.curr_line = lines[0]\n self.ctriple = None\n self.symbol_table = {'SP': 0,\n 'LCL': 1,\n 'ARG': 2,\n 'THIS': 3,\n 'THAT': 4,\n 'R0': 0,\n 'R1': 1,\n 'R2': 2,\n 'R3': 3,\n 'R4': 4,\n 'R5': 5,\n 'R6': 6,\n 'R7': 7,\n 'R8': 8,\n 'R9': 9,\n 'R10': 10,\n 'R11': 11,\n 'R12': 12,\n 'R13': 13,\n 'R14': 14,\n 'R15': 15,\n 'SCREEN': 16384,\n 'KBD': 24576 }\n\n self._first_pass()\n self._second_pass()\n\n def _first_pass(self):\n \"\"\"Builds the symbol table's label/ROM address pair from any labels (LABEL)\"\"\"\n rom_address = 0\n lines_copy = []\n while True:\n if self.command_type() == 'L_COMMAND':\n self.symbol_table[self.curr_line[1:len(self.curr_line)-1]] = rom_address\n else:\n lines_copy.append(self.curr_line)\n rom_address += 1\n if not self.has_more_commands():\n break\n self.advance()\n\n self.idx = 0\n self.lines = lines_copy\n self.curr_line = self.lines[0]\n\n def _second_pass(self):\n \"\"\"Matches symbolic A-instruction with ROM location or creates new entry in RAM\"\"\"\n ram_address = 16\n while True:\n if self.command_type() == 'A_COMMAND':\n s = self.symbol()\n try:\n int(s)\n except ValueError:\n if s in self.symbol_table:\n self.lines[self.idx] = '@' + str(s)\n else:\n self.symbol_table[s] = ram_address\n ram_address += 1\n if not self.has_more_commands():\n break\n self.advance()\n\n self.idx = 0\n self.curr_line = self.lines[0]\n\n def has_more_commands(self):\n \"\"\"Return True if there are more commands left\"\"\"\n return self.idx < len(self.lines) - 1\n\n def advance(self):\n \"\"\"Reads the next command\"\"\"\n self.idx += 1\n self.curr_line = self.lines[self.idx]\n self.ctriple = None\n\n def command_type(self):\n \"\"\"Returns the type of the current command\"\"\"\n if self.curr_line[0] == \"@\":\n return \"A_COMMAND\"\n elif '=' in self.curr_line or ';' in self.curr_line:\n return \"C_COMMAND\"\n else:\n return \"L_COMMAND\"\n\n def symbol(self):\n \"\"\"Returns the symbol or decimal of the current command\"\"\"\n cmd_type = self.command_type()\n if cmd_type == \"A_COMMAND\":\n return self.curr_line[1:]\n if cmd_type == \"L_COMMAND\":\n raise self.symbol_table[self.curr_line[1:]]\n\n def _cinstruction(self):\n if self.command_type() != \"C_COMMAND\":\n raise Exception(\"Not a c-instruction\")\n equal_idx = None\n semicolon_idx = None\n dest = ''\n comp = ''\n jump = ''\n for i in range(len(self.curr_line)):\n if self.curr_line[i] == \"=\":\n equal_idx = i\n if self.curr_line[i] == \";\":\n semicolon_idx = i\n if equal_idx and semicolon_idx:\n dest = self.curr_line[0:equal_idx]\n comp = self.curr_line[equal_idx + 1: semicolon_idx]\n jump = self.curr_line[semicolon_idx + 1:]\n elif equal_idx and not semicolon_idx:\n dest = self.curr_line[0:equal_idx]\n comp = self.curr_line[equal_idx + 1:]\n elif not equal_idx and semicolon_idx:\n comp = self.curr_line[0:semicolon_idx]\n jump = self.curr_line[semicolon_idx + 1:]\n self.ctriple = (dest, comp, jump)\n\n def dest(self):\n \"\"\"Returns the destination mnemonic for the current C-command\"\"\"\n if self.ctriple:\n return self.ctriple[0]\n self._cinstruction()\n return self.ctriple[0]\n\n def comp(self):\n \"\"\"Returns the comp mnemonic for the current C-command\"\"\"\n if self.ctriple:\n return self.ctriple[1]\n self._cinstruction()\n return self.ctriple[1]\n\n def jump(self):\n \"\"\"Returns the jump mnemonic for the current C-command\"\"\"\n if self.ctriple:\n return self.ctriple[2]\n self._cinstruction()\n return self.ctriple[2]\n\n\nclass Code:\n def __init__(self):\n self.dest_map = {'': '000',\n 'M': '001',\n 'D': '010',\n 'A': '100',\n 'MD': '011',\n 'AM': '101',\n 'AD': '110',\n 'AMD': '111'}\n\n self.comp_map = {'0': '0101010',\n '1': '0111111',\n '-1': '0111010',\n 'D': '0001100',\n 'A': '0110000',\n '!D': '0001101',\n '!A': '0110001',\n '-D': '0001111',\n '-A': '0110011',\n 'D+1': '0011111',\n 'A+1': '0110111',\n 'D-1': '0001110',\n 'A-1': '0110010',\n 'D+A': '0000010',\n 'D-A': '0010011',\n 'A-D': '0000111',\n 'D&A': '0000000',\n 'D|A': '0010101',\n 'M': '1110000',\n '!M': '1110001',\n '-M': '1110011',\n 'M+1': '1110111',\n 'M-1': '1110010',\n 'D+M': '1000010',\n 'D-M': '1010011',\n 'M-D': '1000111',\n 'D&M': '1000000',\n 'D|M': '1010101'}\n\n self.jump_map = {'': '000',\n 'JGT': '001',\n 'JEQ': '010',\n 'JGE': '011',\n 'JLT': '100',\n 'JNE': '101',\n 'JLE': '110',\n 'JMP': '111'}\n\n def dest(self, s):\n return self.dest_map[s]\n\n def comp(self, s):\n return self.comp_map[s]\n\n def jump(self, s):\n return self.jump_map[s]\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"usage: assembler.py filename.txt\")\n else:\n with open(sys.argv[1]) as f:\n lines = f.readlines()\n lines = [line.partition('//')[0].strip() for line in lines]\n lines = [line for line in lines if line]\n parser = Parser(lines)\n\n code = Code()\n with open('Prog.hack', 'w') as f:\n while True:\n if parser.command_type() == 'A_COMMAND':\n s = parser.symbol()\n try:\n int(s)\n except ValueError: # it's a labelled @\n s = parser.symbol_table[s]\n bin_val = bin(int(s))\n value = str(bin_val)[2:]\n while len(value) < 15:\n value = '0' + value\n instruction = '0' + str(value)\n elif parser.command_type() == 'C_COMMAND':\n instruction = '111' + code.comp_map[parser.comp()] + code.dest_map[parser.dest()] + code.jump_map[\n parser.jump()]\n if instruction:\n f.write(instruction + '\\n')\n\n if not parser.has_more_commands():\n break\n parser.advance()\n\n"
}
] | 17 |
solrac7/docker-app | https://github.com/solrac7/docker-app | e519945d1d5ae435086a19c7c1b7b870e51a3209 | 5c76a13f19a323b9e311672e7358a51a7465b027 | 634fd82d4b22effee94d3897bb810eede0807209 | refs/heads/master | 2020-03-25T07:54:10.878881 | 2016-12-26T18:06:06 | 2016-12-26T18:06:06 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7244516015052795,
"alphanum_fraction": 0.7273943424224854,
"avg_line_length": 21.5180721282959,
"blob_id": "c5542d62c1080568a9002a980fccbc5082f6a9f6",
"content_id": "6aa8f80f55fc0e73191aee63f25a23e300b8e2af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3738,
"license_type": "no_license",
"max_line_length": 228,
"num_lines": 166,
"path": "/README.md",
"repo_name": "solrac7/docker-app",
"src_encoding": "UTF-8",
"text": "# docker-app\n\nEase your flow with docker-compose.\n\n# Description\n\ndocker-app is a small python script builded as a wrapper of docker-compose and its only goal is to reduce your typing and save your precious time.\n\n# Installation\n\nThe easy way (using pip):\n\n```\npip install docker-app\n```\n\nIf everything goes ok, you should have a *docker-app* command\n\nYou also can install the command line utility directly using setuptools:\n\n```\ngit clone https://github.com/diegoacuna/docker-app.git\ncd docker-app\npython setup.py install\n```\n\n## Use cases\n\nBy default (as in docker-compose), the name of the main container of an app is assumed to be the name of the actual working directory. You can change this behavior configuring the attribute *main* of the docker-app.yml file:\n\n```yaml\nmain: 'name_of_container'\n```\n\nThe file docker-app.yml should be placed on the root directory of your app. You can use docker-app without having a docker-app.yml file, docker-app is going to work with the default configuration (as explained above).\n\n### A set of dependent apps\n\nSuppose you have several docker-compose apps that interacts with each other:\n\n * my_app_1\n * my_app_2\n * my_app_3 => this app depends on my_app_2 and my_app_1\n\n**Without docker-app**\n\n```\n# on directory of my_app_1\ndocker-compose up -d\n# now on directory of my_app_2\ndocker-compose up -d\n# now on directory of my_app_3\ndocker-compose up -d\n```\n\n**With docker-app**\n\nYou define a set of dependencies in my_app_3 using a docker-app.yml file:\n\n```yaml\ndependencies:\n - my_app_1\n - my_app_2\n```\n\nand now:\n\n```\ndocker-app up\n```\n\ndocker-app is going to do a 'up -d' in all dependencies (in order of appearance) and then in the actual app.\n\nYou can also do:\n\n```\ndocker-app stop\n```\n\nBy default, this command only stops the containers from the docker-compose.yml file in the actual application. If you want to stop the actual app with all the containers in its dependencies, run:\n\n```\ndocker-app stop --all\n```\n\nThe same is valid for the restart command:\n\n```\ndocker-app restart # or restart --all for all the dependencies\n```\n\n### Executing stuff on containers\n\nYou can use the *exec* command of docker-app:\n\n```\ndocker-app exec command\ndocker-app exec command -c another_container # run the command in another container\n```\n\n### Launching a bash console in a container\n\n**Without docker-app**\n\n```\ndocker-compose exec container_name bash\n```\n\n**With docker-app**\n\n```\ndocker-app bash\n```\n\nThe last example is going to assume that the container to use has the same name that the actual directory where you executed docker-app. If you want to specify a specific container, you can use the *-c* (o **--container**) flag:\n\n```\ndocker-app bash -c other_container_in_docker_compose\n```\n\nNOTE: if you want to launch a command using exec bash (as in the original docker-compose) you need to use the exec wrapper of docker-app (because the -c flag on bash is going to interfere with the -c flag on docker-app):\n\n```\ndocker-app exec bash \"-c bundle exec rails console\"\ndocker-app exec bash \"-c bundle exec rails console\" -c another_container\n```\n\n### Ruby/Rails integration\n\ndocker-app detects if the current app has a Gemfile and allows for a more concise syntax when using bundler or rails:\n\n**Without docker-app**\n\n```\ndocker-compose exec container_name bundle install\n```\n\n**With docker-app**\n\n```\ndocker-app bundle install\n```\n\n**Without docker-app**\n\n```\ndocker-compose exec container_name bundle exec rails console\n```\n\n**With docker-app**\n\n```\ndocker-app rails console\n```\n\nNote that every rails command is executed using *bundle exec*.\n\nThe *-c* (*--console*) flag is available to the bundle/rails shortcut also:\n\n```\ndocker-app rails console -c another_container\n```\n\n# TODO\n\n - test!\n"
},
{
"alpha_fraction": 0.6536984443664551,
"alphanum_fraction": 0.6573718190193176,
"avg_line_length": 43.02941131591797,
"blob_id": "827ab0e631dc1428d46b2e794dc1a4586149d3f5",
"content_id": "cdb9caf68e60537596395621101d22683f7bbb50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5989,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 136,
"path": "/bin/docker-app",
"repo_name": "solrac7/docker-app",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n docker-app v0.1.2\n author: [email protected]\n\n docker-app allows to define external docker-compose dependencies\n on a docker-compose app. Also, detects (for now) ruby/rails environments\n so an user can write rails command in a container using a more concise syntax.\n\"\"\"\nfrom contextlib import contextmanager\nfrom argparse import RawTextHelpFormatter\nimport os\nimport sys\nimport yaml\nimport logging\nimport argparse\nimport subprocess\n\ndef execute_cmd(cmd, options=None):\n params_list = [cmd] if options is None else [cmd] + options\n p = subprocess.Popen(params_list, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, error = p.communicate()\n return output, error, p.returncode\n\n@contextmanager\ndef cd(newdir):\n prevdir = os.getcwd()\n os.chdir(os.path.expanduser(newdir))\n try:\n yield\n finally:\n os.chdir(prevdir)\n\ndef execute_in_containers(cmd_params, all_containers=True):\n if all_containers:\n if 'dependencies' in parsed:\n for service in parsed['dependencies']:\n dwd = pwd.replace(dc_name, service)\n with cd(dwd):\n output, error, rcode = execute_cmd('docker-compose', cmd_params)\n if rcode == 0:\n logging.info(\"ON SERVICE {0}:\\n{1}\\n{2}\".format(service, output, error))\n else:\n logging.info(error)\n sys.exit(-1)\n # now we execute the command in the current dir\n return execute_cmd('docker-compose', cmd_params)\n\n# cons values\nCONF_FILE = 'docker-app.yml'\nUP_CMD = ['up', '-d']\nSTOP_CMD = ['stop']\nRESTART_CMD = ['restart']\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n # valid arguments for command line parsing\n parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, epilog=\"Available Actions:\\n\" + \\\n \"- up: start the container with all their dependencies (in background mode, no need to specify --all)\\n\" + \\\n \"- stop: stop the container (with all their dependencies if --all is specified)\\n\" + \\\n \"- restart: restart the container (with all their dependencies if --all is specified)\\n\" + \\\n \"- rails: if the software detects a rails app, execute the given rails command\\n\" + \\\n \"- bash: start the bash console in the main container\")\n parser.add_argument(\"action\", help=\"Action to execute\")\n parser.add_argument('command', nargs='?', default='console', help=\"Aditional command to execute (for example using the rails action)\")\n parser.add_argument('-a', '--all', action='store_true', help='Apply the action to all containers (including dependencies)')\n parser.add_argument('-c', '--container', help='Container where the action is going to be executed (for actions like bash or rails). ' + \\\n 'If it is not specified, then the main container (from config file or directory name) is used.')\n\n # by default we want to show argparse help messages\n if len(sys.argv)==1:\n parser.print_help()\n sys.exit(1)\n # get cmd arguments\n args = parser.parse_args()\n\n pwd = os.getcwd()\n # this is the name of the actual service, if no main is specified in the config\n # file, then we assume that this is the main container in the docker-compose file\n dc_name = pwd.split(\"/\")[-1]\n\n parsed = {} # by default we have no properties\n # we look for a docker-app.yml file\n if os.path.isfile(CONF_FILE):\n with open(CONF_FILE) as f:\n content = f.read()\n try:\n parsed = yaml.load(content)\n except yaml.scanner.ScannerError as e:\n logging.error(\"ERROR: {0}\".format(e).format(e))\n # if we have a main property, then that is the main container in the docker-compose file\n main_container = parsed['main'] if 'main' in parsed else dc_name\n # proccess the current action\n if args.action == 'up':\n output, error, rcode = execute_in_containers(UP_CMD)\n logging.info(\"ON SERVICE {0}:\\n{1}\\n{2}\".format(dc_name, output, error))\n elif args.action == 'stop':\n output, error, rcode = execute_in_containers(STOP_CMD, all_containers=args.all)\n logging.info(\"ON SERVICE {0}:\\n{1}\\n{2}\".format(dc_name, output, error))\n elif args.action == 'restart':\n output, error, rcode = execute_in_containers(RESTART_CMD, all_containers=args.all)\n logging.info(\"ON SERVICE {0}:\\n{1}\\n{2}\".format(dc_name, output, error))\n elif args.action == 'bash':\n if args.container:\n main_container = args.container\n sys.exit(subprocess.call(['docker-compose', 'exec', main_container, 'bash']))\n elif args.action in ['bundle', 'rails']:\n # first we check for a Gemfile\n if os.path.isfile('Gemfile'):\n logging.debug(\"Found a bundler configuration!\")\n if args.container:\n main_container = args.container\n base_cmd = ['docker-compose', 'exec', main_container]\n if args.action == 'bundle':\n params = base_cmd + [args.action] if args.command is None else base_cmd + [args.action, args.command]\n logging.info(\"EXECUTING: \" + \" \".join(params))\n sys.exit(subprocess.call(params))\n if args.action == 'rails':\n params = base_cmd + ['bundle', 'exec', args.action] if args.command is None else base_cmd + ['bundle', 'exec', args.action, args.command]\n logging.info(\"EXECUTING: \" + \" \".join(params))\n sys.exit(subprocess.call(params))\n else:\n logging.info(\"No Gemfile in the actual directory.\")\n elif args.action == 'exec':\n # this is the general exec command, its only a wrapper to the original exec in docker-compose\n if not args.command:\n logging.error(\"ERROR: You need to specify a command to execute\")\n else:\n if args.container:\n main_container = args.container\n params = ['docker-compose', 'exec', main_container, args.command]\n logging.info(\"EXECUTING: \" + \" \".join(params))\n sys.exit(subprocess.call(params))\n else:\n logging.error(\"Command not found!\")\n\n"
}
] | 2 |
zhenxiforever/note | https://github.com/zhenxiforever/note | 21cb30831c2bafe2b29c0cf0c9d58dca60a7ca6e | ceb210646f3bde6fc43e59846e97a8fb0b411c1c | 74e484297af3488de7b99ee1f774b0b320c4b16d | refs/heads/master | 2020-04-10T19:09:16.275282 | 2016-04-13T07:17:22 | 2016-04-13T07:17:22 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6230068206787109,
"alphanum_fraction": 0.625284731388092,
"avg_line_length": 23.38888931274414,
"blob_id": "17181e75eb5d1852a94877934f886307fab7f0d2",
"content_id": "1419b3091840639775cc8bdd7bd0411667542ae5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 878,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 36,
"path": "/annotation/src/main/java/com/utils/UtilsDao.java",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "package com.utils;\n\nimport java.util.List;\nimport java.util.Map;\n\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\nimport org.springframework.jdbc.core.JdbcTemplate;\n\npublic class UtilsDao {\n \n private Logger logger = LoggerFactory.getLogger(UtilsDao.class);\n private JdbcTemplate jdbcTemplate;\n \n public List<Map<String, Object>> getTables() {\n try {\n String sql = \"show tables\";\n return jdbcTemplate.queryForList(sql);\n } catch (Exception e) {\n // TODO: handle exception\n e.printStackTrace();\n logger.debug(\"getTables failed.\" + e);\n }\n \n return null;\n }\n \n \n public void setJdbcTemplate(JdbcTemplate jdbcTemplate) {\n this.jdbcTemplate = jdbcTemplate;\n }\n \n public JdbcTemplate getJdbcTemplate() {\n return jdbcTemplate;\n }\n}\n"
},
{
"alpha_fraction": 0.4360465109348297,
"alphanum_fraction": 0.4767441749572754,
"avg_line_length": 20.375,
"blob_id": "953434ee5f9a320914362068137b237beea4ada7",
"content_id": "d38f56bf988d4f9a746f67690b924158a9fed40e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 172,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 8,
"path": "/python/show9_9.py",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/evn python\n# -*- coding: UTF-8 -*-\n\nfor i in range(1, 10):\n line = ''\n for j in range(1, i + 1):\n line = line + \"%d*%d = %2d \" % (j, i, j*i)\n print line\n\n"
},
{
"alpha_fraction": 0.6304985284805298,
"alphanum_fraction": 0.6304985284805298,
"avg_line_length": 20.0625,
"blob_id": "47513af1cdf787027f76080cfccd7df1042499b7",
"content_id": "8932251d193a61688f97d4022866b7a6aab50396",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 341,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 16,
"path": "/annotation/README.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "# Java Conn MySQL \n\n#### Dev \n\n``` \n$ git clone https://github.com/FOuR-/conn_mysql_common_exec.git \nconfig\n$ cd conn_mysql_common_exec\n$ vim src/main/resources/applicationContext.xml \n``` \n#### Run \n\n``` \n$ mvn clean compile package \n$ java -cp target/conn_mysql_common_exec-dist.jar com.application.AppTest \n``` \n\n\n"
},
{
"alpha_fraction": 0.8276056051254272,
"alphanum_fraction": 0.8309859037399292,
"avg_line_length": 27.580644607543945,
"blob_id": "317b5ff80190559faabd4e95af1d6c29ebb32568",
"content_id": "f65b81955755f155d644cf6271a9938eecbb0e1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2991,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 62,
"path": "/QA/Java.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "### Java 基础\n\n* String类为什么是final的 \n``` \n1. 安全: \n2. 性能: 不可变的数据使得线程安全高可用\n``` \n\n* HashMap的源码,实现原理,底层结构\n\n\n* 说说你知道的几个Java集合类:list、set、queue、map实现类\n\n\n* 描述一下ArrayList和LinkedList各自实现和区别\n\n* Java中的队列都有哪些,有什么区别\n\n* 反射中,Class.forName和classloader的区别\n``` \n共同点:两者都会将用户指定的类加载到内存中,供用户使用 \n不同点:\nClass的装载分了三个阶段,loading,linking和initializing\n\nClass.forName有两个重载的方法\nClass.forName方法有要不要初始static变量的参数,而ClassLoader.loadClass()没有\n\n\nClass.forName(className) 实际上是调用Class.forName(className, true, this.getClass().getClassLoader())。注意第二个参数,是指Class被loading后是不是必须被初始化。\nClassLoader.loadClass(className)实际上调用的是ClassLoader.loadClass(name, false),第二个参数指出Class是否被link\n一般情况下,这两个方法效果一样,都能装载Class。但如果程序依赖于Class是否被初始化,就必须用Class.forName(name)了 \n```\n\n* Java内存泄露的问题调查定位:jmap,jstack的使用等等\n\n* string、stringbuilder、stringbuffer区别\n```\n1. 可变与不可变:\nString类中使用字符数组保存字符串,如下就是,因为有“final”修饰符,所以可以知道string对象是不可变的。\n private final char value[];\nStringBuilder与StringBuffer都继承自AbstractStringBuilder类,在AbstractStringBuilder中也是使用字符数组保存字符串,如下就是,可知这两种对象都是可变的。\n char[] value; \n2. 是否多线程安全\nString中的对象是不可变的,也就可以理解为常量,显然线程安全\nAbstractStringBuilder是StringBuilder与StringBuffer的公共父类,定义了一些字符串的基本操作,如expandCapacity、append、insert、indexOf等公共方法\nStringBuffer对方法加了同步锁或者对调用的方法加了同步锁,所以是线程安全的, 看如下源码:\npublic synchronized StringBuffer reverse() {\n super.reverse();\n return this;\n}\n\npublic int indexOf(String str) {\n return indexOf(str, 0); //存在 public synchronized int indexOf(String str, int fromIndex) 方法\n}\n\nStringBuilder并没有对方法进行加同步锁,所以是非线程安全的\n\n3. StringBuilder与StringBuffer共同点:\nStringBuilder与StringBuffer有公共父类AbstractStringBuilder(抽象类)\n抽象类与接口的其中一个区别是:抽象类中可以定义一些子类的公共方法,子类只需要增加新的功能,不需要重复写已经存在的方法;而接口中只是对方法的申明和常量的定义\nStringBuilder、StringBuffer的方法都会调用AbstractStringBuilder中的公共方法,如super.append(...) 只是StringBuffer会在方法上加synchronized关键字,进行同步\n```\n\n\n\n"
},
{
"alpha_fraction": 0.3415760099887848,
"alphanum_fraction": 0.6122651100158691,
"avg_line_length": 43.588233947753906,
"blob_id": "cef8cd2f5b25c77709d0027f47f653cffedc0121",
"content_id": "dc8a3f80c878c396b9dcfac52125d20c821da578",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3191,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 68,
"path": "/script/邮件发送问题排查.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "邮件发送失败问题排查 \n==================== \n\n\n* jstack 查看堆栈信息(RUNNABLE) \n\n```bash \n$ jstack 85869 | less \n$ printf \"%d\\n\" 0x1450a -- 对应进程ID(83210) \n```\n\n* 查看TCP相关信息 \n\n```bash \n$ lsof -p 85869 | grep TCP \n$ ss -atnp | grep :25 (查看邮件相关网络请求) \n$ ss -atnp | grep :25 | grep 223.252.214\nESTAB 0 69120 10.126.97.98:39905 223.252.214.64:25 users:((\"java\",85869,195))\nESTAB 0 288000 10.126.97.98:30519 223.252.214.64:25 users:((\"java\",85869,185))\nESTAB 0 43835 10.126.97.98:58921 223.252.214.65:25 users:((\"java\",85869,59))\nESTAB 0 201600 10.126.97.98:50990 223.252.214.65:25 users:((\"java\",85869,183))\nESTAB 0 276480 10.126.97.98:51453 223.252.214.65:25 users:((\"java\",85869,186))\nESTAB 0 243360 10.126.97.98:31148 223.252.214.64:25 users:((\"java\",85869,181))\n\n$ lsof -p 85869 | grep TCP | grep smtp \njava 85869 root 59u IPv4 96189338 0t0 TCP tjtx-97-98.58os.org:58921->mail0401.icoremail.net:smtp (ESTABLISHED)\njava 85869 root 181u IPv4 96062679 0t0 TCP tjtx-97-98.58os.org:31148->mail0402.icoremail.net:smtp (ESTABLISHED)\njava 85869 root 183u IPv4 96036545 0t0 TCP tjtx-97-98.58os.org:50990->mail0401.icoremail.net:smtp (ESTABLISHED)\njava 85869 root 185u IPv4 96047561 0t0 TCP tjtx-97-98.58os.org:30519->mail0402.icoremail.net:smtp (ESTABLISHED)\njava 85869 root 186u IPv4 96047309 0t0 TCP tjtx-97-98.58os.org:51453->mail0401.icoremail.net:smtp (ESTABLISHED)\njava 85869 root 195u IPv4 96240773 0t0 TCP tjtx-97-98.58os.org:39905->mail0402.icoremail.net:smtp (ESTABLISHED)\n\n$ tcpdump -A -nn host 223.252.214.65 and port 25\n$ tcpdump -i any -A -nn host 223.252.214.65 and dst|src port 25\n\n```\n\n\n* 查看子进程详情信息\n\n```bash \n$ lsof -p 83210 \n$ strace -p 83210 \nsendto(195, \"7JOIG5jMqlSTGQo1\\r\\ncdnx0tHxAWoauG\"..., 6570, 0, NULL, 0) = 6570\nsendto(195, \"jFpOF0zfxyF3I8Zd/v/TtH+vJhng1yiP\"..., 8190, 0, NULL, 0) = 8190\nread(196, \"\\223\\32\\0Hm\\36\\227\\230\\35\\0R\\233\\307\\225\\377\\310\\1\\2446\\217K\\314/\\0\\351)\\36\\227\\350\\346!H\"..., 8192) = 8192\nsendto(195, \"95EWryyG0blXjukGJwjLFb38OFTO0R5r\"..., 8190, 0, NULL, 0) = 8190\nread(196, \"\\10\\342\\271\\360?a{\\201\\7p\\37\\272\\330\\36\\240kj\\177\\225\\303\\325\\364}U\\335\\177\\236\\22\\240k\\272\\375\"..., 8192) = 8192\nsendto(195, \"HxyjfNV3Yz2RBmgq7JZxCMUJ+rDu/C6t\"..., 8190, 0, NULL, 0 \n\n$ lsof -p 85869 | grep TCP | grep 195\njava 85869 root 195u IPv4 96240773 0t0 TCP tjtx-97-98.58os.org:39905->mail0402.icoremail.net:smtp (ESTABLISHED) \n\n$ lsof -p 85869 | grep 196\njava 85869 root 196r REG 8,10 14009262 18481291 /data1/dw_monitor/mini/excel/太平洋数据.xlsx\n\n$ tcpdump -i any port 39905 \n\n```\n\n* 配置代理转发 \n\n```bash \nroot@cp:~# /usr/lib/autossh/autossh -M 0 -L 0.0.0.0:25:stmp.xxx.com:25 localhost\n修改hosts\n\n10.249.14.41 smtp.xxx.com //如果配置后又要注释,看看是否有nscd服务吃住缓存\n```\n\n"
},
{
"alpha_fraction": 0.7329699993133545,
"alphanum_fraction": 0.7329699993133545,
"avg_line_length": 32.3636360168457,
"blob_id": "95d80ea50722bbf5fb64931285efbf7e65aed2a5",
"content_id": "8ee5936163456e7a92c011382d39ab2d1fa44846",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 367,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 11,
"path": "/cloudera/uninstall.sh",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\n\n/usr/share/cmf/uninstall-cloudera-manager.sh\n\napt-get purge 'cloudera-manager-*'\n\numount cm_processes\nrm -Rf /usr/share/cmf /var/lib/cloudera* /var/cache/yum/cloudera* /var/log/cloudera* /var/run/cloudera*\n\nrm -Rf /var/lib/flume-ng /var/lib/hadoop* /var/lib/hue /var/lib/navigator /var/lib/oozie /var/lib/solr /var/lib/sqoop* /var/lib/zookeeper\n"
},
{
"alpha_fraction": 0.6833333373069763,
"alphanum_fraction": 0.6833333373069763,
"avg_line_length": 11,
"blob_id": "53c324f8d4c4ace1fa325f327372d0f0734d3213",
"content_id": "fa178f496bf60ef7590af62757db521e96a31f0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 60,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 5,
"path": "/supervisor/README.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "## Local supervisor config\n\n\n* supervisord.conf \n* conf.d/\n"
},
{
"alpha_fraction": 0.6350998878479004,
"alphanum_fraction": 0.6620330214500427,
"avg_line_length": 16.953125,
"blob_id": "a4ae8d08b3be02bb7d01ce77339a07cf2ac6ac67",
"content_id": "b35b57215be2dad5e7965d22e911179d58880885",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1163,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 64,
"path": "/script/vps.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "* vps config\n\n```bash\n$ dpkg-reconfigure tzdata //时区\n// 字符编码\n$ apt-get install locales\n$ \n```\n\n\n* prepare \n\n```bash \n$ apt-get install vim\n\n$ grep -v ^PasswordAuthentication /etc/ssh/sshd_config | grep -v Port > /tmp/sshd_config\n$ echo \"Port 50070\" >> /tmp/sshd_config\n$ echo \"PasswordAuthentication no\" >> /tmp/sshd_config\n$ service ssh restart\n\n$ apt-get install python-pip\n$ pip install shadowsocks \n$ apt-get install supervisor\n```\n\n* config server\n\n```bash \n$ vim /etc/supervisor/conf.d/ss.conf \n\n[program:ss]\ncommand=/usr/local/bin/ssserver -c /root/ss.conf -vv --user nobody\nstdout_logfile=/var/log/ss.log\nstderr_logfile=/var/log/ss.err\nstdout_logfile_maxbytes=50MB\nstderr_logfile_maxbytes=50MB\nautorestart=true\nautostart=true\n\n$ vim /root/ss.conf\n\n{\n \"server_port\":8080,\n \"password\":\"{$yourpassword}\",\n \"method\":\"aes-256-cfb\"\n}\n``` \n\n* config local\n\n``` \n$ sudo apt-get install shadowscoks\n$ sslocal -c ss.conf -b 0.0.0.0 -d start --pid-file /tmp/ss.pid --log-file /tmp/ss.log\n\n$ cat ss.conf\n{\n \"server\":\"${serverip}\",\n \"server_port\":8080,\n \"local_port\":1080,\n \"password\":\"${yourpassword}\",\n \"method\":\"aes-256-cfb\"\n}\n\n```\n\n\n"
},
{
"alpha_fraction": 0.5600794553756714,
"alphanum_fraction": 0.5829195380210876,
"avg_line_length": 19.742267608642578,
"blob_id": "7f83a29972321ac572185b4dc2e41ec3acad1e0b",
"content_id": "ab87f916ded8b8fadf9db55f77ca78484bf348f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2264,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 97,
"path": "/script/Linux.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "* 文件查找 \n\n``` \n$ find ~/dwetl/ -name \"*.sh\" -exec grep \"dw_hadoop_summary_touch_city_sd\" -l {} \\;\n$ find ~/dwetl/ -name \"*.mxm\" -exec grep \"dw_hadoop_summary_touch\" -l {} \\;\n``` \n\n* 查看端口 \n\n```\n$ lsof -n -P | grep 8018\n$ netstat -an | grep 80 \n$ netstat -nltp \n``` \n\n* sftp向服务器上传文件 \n\n``` \n$ sftp host@ip\n> put file \n``` \n\n* 查看JVM 情况\n\n```\n$ jstat -gcutil $pid 1s\n看看 E 那一列是否一直 hang 在100, 或者hang在一个值不变\n如果hang住,去看看 jvm 的进程\n```\n\n* 查找文件并删除 \n\n``` \n$ [sudo] find /path/ -name \".DS_Store\" -depth -exec rm {} \\;\n$ [sudo] find . -name \"*.DS_Store\" -type f -delete\n``` \n\n* 配置ssh \n\n```\n$ ssh-keygen -t rsa //生成key\n$ ssh-add //秘钥加入ssh-agent \n$ ssh-add -l //查看\n$ cat ~/.ssh/id_rsa.pub >> authorized_keys\n$ vim ~/.ssh/config\nHost mid\n User saboloh //配置默认用户\n``` \n\n* 查看占用空间 \n\n``` \nls / | grep -vP '(proc)' | awk '{print \"du -s -h /\" $1}' | bash \n``` \n\n* 查看源 \n\n```bash \n$ ldd /usr/bin/nginx\n$ ldconfig -p | grep odbc\n$ vim /etc/ld.so.conf.d/usr.conf \n\n# 编译\n$ export CFLAGS=-m32 LDFLAGS=-m32 CXXFLAGS=-m32\n$ ./configure\n$ vim libtool\n$ make -j 4 \n$ make install \n```\n\n# 同步目录\n\n```bash \n$ rsync -a -f\"+ */\" -f\"- *\" /data/ [email protected]:/data/\n``` \n\n# wget 下载JDK\n\n```bash \n$ wget --no-check-certificate --no-cookies --header \"Cookie: oraclelicense=accept-securebackup-cookie\" http://download.oracle.com/otn-pub/java/jdk/8u77-b03/jdk-8u77-linux-x64.tar.gz\n```\n\n# JDK环境变量 \n\n```bash \n$ sudo vim /etc/profile.d/jdk7.sh\n#!/bin/bash\nexport JAVA_HOME=/usr/lib/jvm/java-7-oracle-cloudera\nexport JRE_HOME=$JAVA_HOME/jre\nexport PATH=$JAVA_HOME/bin:$PATH\nexport CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib/rt.jar\n\n//或者采用update-alternatives\n$ update-alternatives --display|--config java //可查看已有的信息(列表,优先级)\n$ update-alternatives --install /usr/bin/java java /usr/lib/jvm/java-7-oracle-cloudera/bin/java 316 // 316-优先级\n$ update-alternatives --config java // 回车通过序列号选择\n```\n\n\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 13.399999618530273,
"blob_id": "b1b282f4bd98e177ade760edf6e374fbc9fdf8fb",
"content_id": "1e8f18e0a5f0e20eaef8ecd0d77dcf890e03f6dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 72,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 5,
"path": "/fluentd/README.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "# Local config fluentd\n\n* /bin/fluentd --step .\n* fluent.conf \n* plugin\n"
},
{
"alpha_fraction": 0.5253164768218994,
"alphanum_fraction": 0.5991561412811279,
"avg_line_length": 16.481481552124023,
"blob_id": "a41f4f9f7a52c59eaff3a6bccbb283560899e969",
"content_id": "d347495f61e9fab499fc2a349831899c1d5e922a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 948,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 54,
"path": "/script/socks.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "Create proxy \n============\n\n\n* use ssh \n```bash \nssh -f -N -D 0.0.0.0:8081 localhost \n``` \n\n* use node.js \n * app.js\n * index.html\n```js \nvar express = require('express');\nvar path = require('path');\nvar app = express();\n\n\napp.use(express.static(path.join(__dirname, 'public')));\n\napp.listen(3000, function () {\n console.log('Example app listening on port 3000!');\n});\n\n``` \n```js\nfunction FindProxyForURL(url, host) {\n if (host.indexOf('10.') == 0 ||\n host.indexOf('tjtx-') == 0 ||\n host.indexOf('db20-') == 0 ||\n host.indexOf('ods10-') == 0\n ) {\n return \"SOCKS 10.249.14.41:8081;\";\n }\n return \"SOCKS 10.249.14.41:8081;\";\n} \n```\n\n* use supervisor\n\n```bash \n[program:app]\ndirectory = /home/saboloh/dwetl/socks\ncommand = /home/saboloh/developer/node-v5.1.0-linux-x64/bin/node app.js\nuser = root\nstdout_logfile = /home/saboloh/dwetl/socks/log\nredirect_stderr = true\n```\n\n* done & use\n\n```\nhttp://10.249.14.41:3000\n```\n\n\n\n\n"
},
{
"alpha_fraction": 0.4910461902618408,
"alphanum_fraction": 0.5063148140907288,
"avg_line_length": 32.15625,
"blob_id": "7f6c59ef957bd7b5ef87d00c1b725c47c5213299",
"content_id": "101964469a2d62e6ee4e2b9b7c3494c4e9b4fb7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Maven POM",
"length_bytes": 5305,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 160,
"path": "/annotation/pom.xml",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd\">\n <modelVersion>4.0.0</modelVersion>\n \n <groupId>conn_mysql_common_exec</groupId>\n <artifactId>conn_mysql_common_exec</artifactId>\n <version>0.0.1-SNAPSHOT</version>\n \n <name>conn_mysql_common_exec Maven Webapp</name>\n <url>http://maven.apache.org</url>\n \n <properties>\n <spring.version>4.1.4.RELEASE</spring.version>\n <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>\n </properties>\n\n <dependencies>\n <dependency>\n <groupId>org.springframework</groupId>\n <artifactId>spring-core</artifactId>\n <version>${spring.version}</version>\n </dependency>\n\n <dependency>\n <groupId>commons-logging</groupId>\n <artifactId>commons-logging</artifactId>\n <version>1.1.1</version>\n </dependency>\n\n <dependency>\n <groupId>org.springframework</groupId>\n <artifactId>spring-context</artifactId>\n <version>${spring.version}</version>\n </dependency>\n\n <dependency>\n <groupId>org.springframework</groupId>\n <artifactId>spring-jdbc</artifactId>\n <version>${spring.version}</version>\n </dependency> \n\n <dependency>\n <groupId>ch.qos.logback</groupId>\n <artifactId>logback-classic</artifactId>\n <version>1.0.7</version>\n </dependency>\n\n <dependency>\n <groupId>javax.servlet</groupId>\n <artifactId>servlet-api</artifactId>\n <version>2.5</version>\n <scope>provided</scope>\n </dependency>\n\n <dependency>\n <groupId>mysql</groupId>\n <artifactId>mysql-connector-java</artifactId>\n <version>5.1.34</version>\n </dependency>\n\n <dependency>\n <groupId>org.slf4j</groupId>\n <artifactId>slf4j-api</artifactId>\n <version>1.7.10</version>\n </dependency>\n \n <dependency>\n <groupId>org.json</groupId>\n <artifactId>json</artifactId>\n <version>20090211</version>\n </dependency>\n <dependency>\n <groupId>net.sf.json-lib</groupId>\n <artifactId>json-lib</artifactId>\n <version>2.3</version>\n <classifier>jdk15</classifier>\n </dependency>\n \n <dependency>\n <groupId>javax.persistence</groupId>\n <artifactId>persistence-api</artifactId>\n <version>1.0</version>\n </dependency>\n \n <dependency>\n <groupId>c3p0</groupId>\n <artifactId>c3p0</artifactId>\n <version>0.9.1.2</version>\n </dependency>\n \n <dependency>\n <groupId>commons-dbcp</groupId>\n <artifactId>commons-dbcp</artifactId>\n <version>1.4</version>\n </dependency>\n \n <dependency>\n <groupId>commons-pool</groupId>\n <artifactId>commons-pool</artifactId>\n <version>1.6</version>\n </dependency>\n <dependency>\n <groupId>commons-collections</groupId>\n <artifactId>commons-collections</artifactId>\n <version>3.2</version>\n </dependency>\n <dependency>\n <groupId>org.apache.commons</groupId>\n <artifactId>commons-exec</artifactId>\n <version>1.3</version>\n </dependency>\n \n <dependency>\n <groupId>junit</groupId>\n <artifactId>junit</artifactId>\n <version>3.8.1</version>\n <scope>test</scope>\n </dependency>\n </dependencies>\n \n <build>\n <finalName>conn_mysql_common_exec</finalName>\n <plugins>\n \n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-compiler-plugin</artifactId>\n <configuration>\n <source>1.6</source>\n <target>1.6</target>\n </configuration>\n </plugin>\n \n <plugin>\n <artifactId>maven-assembly-plugin</artifactId>\n <version>2.4</version>\n <executions>\n <execution>\n <id>dist</id>\n <phase>package</phase>\n <goals>\n <goal>single</goal>\n </goals>\n <configuration>\n <descriptors>\n <descriptor>assembly.xml</descriptor>\n </descriptors>\n <archive>\n <manifest>\n <addClasspath>true</addClasspath>\n <mainClass>com.application.App</mainClass>\n </manifest>\n </archive>\n </configuration>\n </execution>\n </executions>\n </plugin>\n </plugins>\n </build>\n</project>\n"
},
{
"alpha_fraction": 0.6358078718185425,
"alphanum_fraction": 0.6934497952461243,
"avg_line_length": 26.214284896850586,
"blob_id": "cc75e9b4d70c2eadacf0f222549f1cc626e0aa32",
"content_id": "c7c4b71f49f9930fe2613a2aeff1a07880169a3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2462,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 84,
"path": "/cloudera/cloudera-manager-install.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "[Cloudera Manager Install](http://www.cloudera.com/documentation/enterprise/latest/topics/cm_ig_install_path_a.html#cmig_topic_6_5_unique_2)\n==========================\n\n* 删除已有[cloudera manager](http://www.cloudera.com/documentation/enterprise/5-5-x/topics/cm_ig_uninstall_cm.html)\n\n* 查看系统版本 \n\n```\n$ lsb_release -a\nNo LSB modules are available.\nDistributor ID: Ubuntu\nDescription: Ubuntu 12.04.4 LTS\nRelease: 12.04\nCodename: precise\n```\n\n* 下载对应包并且配置本地源(避免使用cloudera-manager-installer.bin直接安装总是获取最新版CDH) \n\n```\n# 下载好tarball\n$ wget http://archive.cloudera.com/cm5/repo-as-tarball/5.4.7/cm5.4.7-ubuntu12-04.tar.gz\n$ tar -zxvf cm5.4.7-ubuntu12-04.tar.gz\n$ cp -r cm /var/spool/apt-mirror/mirror/archive.cloudera.com/\n\n# 使用nginx配置\n$ apt-get install nginx \n$ vim /etc/nginx/nginx.conf\n# 访问本地目录列表\nserver {\n listen 80;\n server_name dev-001;\n\n location / {\n root /var/spool/apt-mirror/mirror/archive.cloudera.com/;\n index index.html index.htm;\n autoindex on;\n autoindex_exact_size off;\n autoindex_localtime on;\n }\n\n error_page 500 502 503 504 /50x.html;\n location = /50x.html {\n root html;\n }\n}\n``` \n\n* 浏览确认是否能访问 \n\n![](https://raw.githubusercontent.com/jianle/note/master/cloudera/filelist.png)\n\n* 配置apt source\n\n```\n$ vim /etc/apt/source.list.d/my-private-cloudera-repo.list\n# Packages for Clouderas Distribution for Hadoop, Version 4, on Ubuntu 12.04 x86_64\ndeb [arch=amd64] http://dev-001/cm precise-cm5 contrib\ndeb-src http://dev-001/cm precise-cm5.4.7 contrib\n```\n\n* 下载对应[parcel](http://archive.cloudera.com/cdh5/parcels/)\n\n```\n找到对应自己系统的parcels\n$ wget http://archive.cloudera.com/cdh5/parcels/5.4.7/CDH-5.4.7-1.cdh5.4.7.p0.3-precise.parcel\n$ wget wget http://archive.cloudera.com/cdh5/parcels/5.4.7/CDH-5.4.7-1.cdh5.4.7.p0.3-precise.parcel.sha1\n$ wget http://archive.cloudera.com/cdh5/parcels/5.4.7/manifest.json\n\nsha1需修改成sha\n$ mv CDH-5.4.7-1.cdh5.4.7.p0.3-precise.parcel.sha1 CDH-5.4.7-1.cdh5.4.7.p0.3-precise.parcel.sha\n```\n\n* 下载安装[Cloudera Manager](https://archive.cloudera.com/cm5/installer/)\n\n``` \n$ wget https://archive.cloudera.com/cm5/installer/5.4.7/cloudera-manager-installer.bin\n```\n\n* 安装\n\n```\n$ chmod +x cloudera-manager-installer.bin\n$ ./cloudera-manager-installer.bin --skip_repo_package=1\n```\n\n\n\n\n"
},
{
"alpha_fraction": 0.7765012979507446,
"alphanum_fraction": 0.7785900831222534,
"avg_line_length": 26.724637985229492,
"blob_id": "5d71a480282bec048f3bb8880dc39f52f72ffc45",
"content_id": "75f268886072e4cdb60fa7634a313fc288f1c0e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3193,
"license_type": "no_license",
"max_line_length": 185,
"num_lines": 69,
"path": "/script/Linux环境变量.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "bash环境变量问题\n-----------\n\n* [bash手册](http://www.gnu.org/software/bash/manual/bashref.html#Bash-Startup-Files)\n\n\n如果是login的shell,那么bash在运行先执行 `/etc/profile` ;然后依次查找 `~/.bash_profile`, `~/.bash_login`, `~/.profile`;如果找到其中一个文件就执行之,后面的就不执行了.\n\n如果是interactive的非login shell,则先会执行 `/etc/bash.bashrc`,如果`~/.bashrc`存在的话,接着执行`~/.bashrc`.\n\n如果是非interactive的,且非login shell,则执行 $BASH_ENV 对应的shell.\n\nssh到远端的机器,远端机器上的bash会执行什么呢?如果是login shell,不用说,和第1种一样;如果是`ssh remote-machine 'cmd'`这种的(也就是我们一般shell脚本里面调用形式),则会执行`~/.bashrc`(远端机器上的bash在编译bash的时候define了SSH_SOURCE_BASHRC ).\n\n有些问题\n什么是interactive shell,什么是login shell\n\ninteractive shell就是顾名思义,有用户交互的,检查方法就是在命令行下执行 `echo $-`,如果有i这个选项,则就是interactive shell 了.\n\nlogin shell简单来说就是bash启动的时候加了 -l选项.\n\n举几个实际的例子:\n\ninteractive & login shell:\n\nlinux登录到真实的终端tty1\n\ninteractive & non-login shell:\n\n打开终端模拟器,在终端模拟器里面执行/bin/bash -i cmdfile\n\nnon-interactive & login shell\n\n在终端模拟器里面执行/bin/bash -l cmdfile\n\nnon-interactive & non-login shell:\n\n在终端模拟器里面执行/bin/bash cmdfile\n\n为什么我在手动在机器上可以ssh到远端机器的,但是在shell脚本里面不行?\n\n这种情形出现在ssh到远端机器的private key是有passphase 的,简单来说,ssh 到远端机器的时候,你还需要输入这个pass phase的,那为什么你一般ssh到远端机器的时候不要输入passphase呢,这个就是ssh-agent的功劳了,ssh通过环境变量中的SSH_AUTH_SOCK和SSH_AGENT_PID获得真实的private key。\n\n简单来说,因为这个时候有了SSH相关环境变量。这些环境变量一般在通过keychain设置的,而keychain一般是startup脚本里面执行的。 解决办法就是bash -l.\n\n骗人吧,按你的说法,ssh过去要执行~/.bashrc的,但是我~/.bashrc里面的命令没有执行\n\n应该.bashrc里面有一段话,[ -z \"$PS1\" ] && return 也就是说,如果是非interactive的时候,PS1不会被设置,然后就return了,后面的命令当然没有执行.\n\ninteractive & login 的shell怎么也执行 ~/.bashrc 了?\n\n看看 `~/.bash_profile`, `~/.bash_login`, `~/.profile` 这几个文件中,应该有写 \n\n```\nif [ -f \"$HOME/.bashrc\" ]; then\n . \"$HOME/.bashrc\"\nfi \n``` \nssh remote-machine 'cmd' 为什么不行啊?\n\n一样的道理,PATH环境变量不是在 ~/.bashrc里面设置的.\n\nssh remote-mache 'cmd' 的时候不执行~/.bashrc啊?\n\n编译bash的时候没有define SSH_SOURCE_BASHRC.\n\n我的cmdfile在shebang部分里面已经声明了#!/bin/bash -l 了,但是它表现的不是login shell\n\n不要使用 /bin/bash cmdfile 这种方式运行,要么使用cmdfile直接运行,要么使用/bin/bash -l cmdfile运行.\n\n\n"
},
{
"alpha_fraction": 0.6830313801765442,
"alphanum_fraction": 0.6948555111885071,
"avg_line_length": 18.729877471923828,
"blob_id": "19d7a7ec45644312502050f127ecb0030d6f2500",
"content_id": "a54036e0b5518c5826903f8e5b71360b0f5acf65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 24187,
"license_type": "no_license",
"max_line_length": 217,
"num_lines": 733,
"path": "/git/MarkDown.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "MarkDown\n========\nMarkDown 的目标是易读易写。\n\nMarkdown 强调可读性高于一切。一份Markdown格式的文档应该能直接以纯文本方式发布,而不致一眼看过去满眼都是标签和格式化指令。Markdown的语法确实受了几种现有的text转HTML过滤器影响--包括 `Setext` , `atx`,` Textile`, `reStructuredText`,`Grutatext`, 和`EtText` -- 其中对Markdown语法影响最大的单一来源是纯文本的Email格式。\n\n为实现这一目标,Markdown的语法几乎全部由标点符号构成,这些标点符号都是精心挑选而来,尽量做到能望文生义。如星号括着一个单词(Markdown中表示强调)看上去就像 * 强调 *。Markdown的列表看上去就像列表;Markdown的引文就象引文,和你使用email时的感觉一样。\n\n###内嵌HTML\nMarkdown的语法为“方便地在网上写作”这一目标而生。\n\nMarkdown不是HTML替代品,也不是为了终接HTML。它的语法非常简单,只相当于HTML标签的一个非常非常小的子集。它并非是为了更容易输入HTML标签而创造一种新语法。在我看来,HTML标签已经够容易书写的了。Markdown的目标是让(在网上)让读文章、写文章、修改文章更容易。HTML是一种适合发表的格式;而Markdown是一种书写格式。正因如此,Markdown的格式化语法仅需解决用纯文本表达的问题。\n\n对Markdown语法无法支持的格式,你可以直接用HTML。你不需要事先声明或者使用什么定界符来告诉Markdown要写HTML了,你直接写就是了。\n\n唯一的限制是那些块级HTML元素 -- 如**`<div>`**,**`<table>`**,**`<pre>`**,**`<p>`**等等 -- 必须使用空行与相邻内容分开,并且块元素的开始和结束标签之前不要留有空格或TAB。Markdown足够聪明不会添加额外的(也是不必要的)**`<p>`**标签包住这些块元素标签。\n\n下面这个例子,在一篇Markdown文章中添加了一个HTML表格:\n \n ```\n 这是一个普通的段落。\n \n <table> \n <tr>\n <td>Foo</td>\n </tr>\n </table>\n ```\n \n 注意一点,不要在块级HTML元素内使用Markdown格式化命令,Markdown不会处理它们。比如你不要在一个HTML块中使用 ` *emphasis*`这样的Markdown格式化命令。\n \n 行内HTML标签 -- 如 **`<span>`**, **`<cite>`**, 或 **`<del>`** -- 在一个Markdown段落里、列表中、或者标题中--随便用。 如果需要,你甚至可以用HTML标签代替Markdown格式化命令。比方你可以直接用HTML标签 **`<a>`** 或 **`<img>`** 而不使用Markdown的链接和图片语法,随你的便。\n\n不同于这些块级HTML元素,在HTML行内元素内的Markdown语法标记会被正确处理\n\n###自动转换特殊字符\n\n在HTML中,有两个字符需要特殊对待:``<``和 ``&``。``<``用于标签开始,``&``用于标识HTML实体。如果打算把它们当成普通字符,你必须使用反引号转义它们,如``<``和``&``。\n\n对一些互联网作家来说,&符号特别使人烦恼。如果你打算写'AT&T',你就得写 'AT&T'。甚至在URL中也得想着转义&符号。比方你打算写:\n\n```\nhttp://images.google.com/images?num=30&q=larry+bird\n````\n\n你就得在A标签中把href属性中的URL编码成:\n\n```\nhttp://images.google.com/images?num=30&q=larry+bird\n```\n\n不用说,这很容易忘。这往往是那些良构HTML站点中最容易出错的地方。\n\n在Markdown中,你尽管自然的使用这些字符,只需要关心那些必要的转义。如果使用在HTML实体中使用&符号,它会保持不变;而在其它场合,它会转换成``&``。\n\n所以,如果你打算在文章中书写版权符号,你可以这样写:\n\n```\n©\n```\nMarkdown不会碰它。然而如果你书写\n\n```\nAT&T\n```\nMarkdown就会把它翻译成:\n\n```\nAT&T\n```\n类似的,既然Markdown支持内嵌HTML,如果你使用``<``作为HTML标签定界符,Markdown就会把它们当成HTML标签定界符。可是如果你书写:\n\n```\n4 < 5\n```\nMarkdown就会把它翻译成:\n\n```\n4<5\n```\n然而,在Mardown代码行内标记和块级标记之中,``<``和``&``始终会被自动编码。这使得在Markdown文件中书写HTML代码更容易.(相对于纯HTML。如果想在纯在纯HTML里贴一段HTML代码,那才是糟糕透顶,必须对代码中的每一个``<``和``&``都转义才成。)\n\n----\n\n##块级元素\n\n\n####段落和换行 \n\n一个段落由一行或多个相关文本行构成。段落之间用一个或多个空行分隔。(一个空行就是一个看上去什么也没有的行--如果一行什么也没有或者只有空格和TAB都会被视为空行)正常的段落不要以空白或TAB字符开始。\n\n一行或多个相关文本行意味着Markdown支持“硬折行”。这一点与其它text转HTML的程序完全不同(包括Moveable Type的“Convert Line Breaks”选项),它们会将段落中的每一个换行符转换成``<br />``标签。\n\n如果你确实需要使用Markdown插入一个``<br />``换行符,只需要在每一行的末尾以两个或更多个空格符号结束,然后再打回车键。\n\n没错,在Markdown里生成一个``<br />``稍稍有一点麻烦,但那种简单的“把每一个换行符都转换``<br />``规则”并不适用于Markdown。Markdown Email风格的 blockquoting 和 multi-paragraph list items更好用 -- 并且更美观 -- 在你用换行符对其格式化时。\n\n####标题\n\n\n\n<br>\nMarkdown 支持两种风格的标题,``Setext`` 和 ``atx``.\n\nSetext-风格的一级标题下面一行使用等号符号,二级标题下面使用连字符符号,例如:\n\n```\n这是一个一级标题\n=============\n\n这是一个二级标题\n-------------\n```\n\n至少有一个__``=``__ 和 __``-``__就能正常工作。\n\n\nAtx-风格的标题在每行的开头使用1-6个井号字符,分别对应标题级别1-6。例如:\n\n```\n# 这是一级标题\n\n## 这是二级标题\n\n###### 这是六级标题\n```\n如果愿意, 你也可以 \"结束\" atx-风格的标题。这纯粹是美观考虑--如果你觉得这样会看上更舒服些的话。结束用的井号个数随便,不必与起始井号数量相同 (起始井号的数量决定标题级别):\n\n```\n# 这是一级标题 #\n\n## 这是二级标题 ##\n\n###### 这是六级标题 ######\n```\n####引用块\n<br>\nMarkdown使用Email风格的 ``>`` 字符引用块。如果你熟悉Email中的引用块,你就知道在Markdown中如何使用引用块。如果每一行你都使用硬换行并在行首放一个``>``符号,看上去会很美观:\n\n```\n> This is a blockquote with two paragraphs. Lorem ipsum dolor sit amet,\n> consectetuer adipiscing elit. Aliquam hendrerit mi posuere lectus.\n> Vestibulum enim wisi, viverra nec, fringilla in, laoreet vitae, risus.\n>\n> Donec sit amet nisl. Aliquam semper ipsum sit amet velit. Suspendisse\n> id sem consectetuer libero luctus adipiscing.\n```\n(如果觉得每行写一个``>``太累,)Markdown允许你偷懒,你只需在硬换行段落的第一行之前放一个``>``号:\n\n```\n> This is a blockquote with two paragraphs. Lorem ipsum dolor sit amet,\nconsectetuer adipiscing elit. Aliquam hendrerit mi posuere lectus.\nVestibulum enim wisi, viverra nec, fringilla in, laoreet vitae, risus.\n\n> Donec sit amet nisl. Aliquam semper ipsum sit amet velit. Suspendisse\nid sem consectetuer libero luctus adipiscing.\n```\n\n只需要多加一个``>``,就得到嵌套的引用块(即引用块中的引用块):\n\n```\n> This is the first level of quoting.\n>\n> > This is nested blockquote.\n>\n> Back to the first level.\n```\n\n引用块中可包含其它Markdown元素,如标题、列表和代码块:\n\n```\n> ## This is a header.\n>\n> 1. This is the first list item.\n> 2. This is the second list item.\n>\n> Here's some example code:\n>\n> return shell_exec(\"echo $input | $markdown_script\");\n```\n是个象样的文本编辑器都能实现Email风格的引用。比如在BBEdit里,你就可以选中一些文字之后从Text菜单里选择引用级别。\n<br>\n####列表\n\n\nMarkdown 支持有序列表和无序列表\n\n无序列表可使用星号、加号和连字符(这几个符号是等价的,你喜欢哪个就用哪个)作为列表标记:\n\n```\n* Red\n* Green\n* Blue\n```\n等同于:\n \n```\n + Red\n + Green\n + Blue\n```\n也等同于:\n\n```\n- Red\n- Green\n- Blue\n```\n有序列表则使用数字加英文句点:\n\n```\n1. Bird\n2. McHale\n3. Parish\n```\n有一点需要注意,你在列表中输入的标记数字并不会反映到Markdown输出的HTML之中。上面这个列表Markdown会输出为:\n\n```\n <ol>\n <li>Bird</li>\n <li>McHale</li>\n <li>Parish</li>\n </ol>\n```\n即使你写成下面这样:\n\n```\n1. Bird\n1. McHale\n1. Parish\n```\n甚至这样:\n\n```\n3. Bird\n1. McHale\n8. Parish\n```\n都会得到一模一样(但正确的)输出。要点在于,如果你愿意,就在你的Markdown有序列表里顺序使用数字(这样源代码里的顺序和生成的顺序会一致),如果你希望省点儿事,你就不用费心(去手工编号)。\n\n如果你打算偷懒,记住列表的第一行使用数字 1。以后Markdown或许会支持有序列表从任意数字开始(译者注:这儿和前面的例子有点矛盾,原文如此)。\n\n列表标记通常从左边界开始,至多可以有三个空格的缩进。列表标记之后至少要跟一个空格或TAB。\n\n为了让列表看起来美观,你可以使用TAB缩进列表项内容,使其整齐:\n\n```\n* Lorem ipsum dolor sit amet, consectetuer adipiscing elit.\n Aliquam hendrerit mi posuere lectus. Vestibulum enim wisi,\n viverra nec, fringilla in, laoreet vitae, risus.\n* Donec sit amet nisl. Aliquam semper ipsum sit amet velit.\n Suspendisse id sem consectetuer libero luctus adipiscing.\n```\n不过如果你很懒,下面这样也行:\n\n```\n* Lorem ipsum dolor sit amet, consectetuer adipiscing elit.\nAliquam hendrerit mi posuere lectus. Vestibulum enim wisi,\nviverra nec, fringilla in, laoreet vitae, risus.\n* Donec sit amet nisl. Aliquam semper ipsum sit amet velit.\nSuspendisse id sem consectetuer libero luctus adipiscing.\n```\n如果列表项之间用空行分隔,Markdown就会在HTML输出中使用``<p>``标签包裹列表项。比如:\n\n```\n* Bird\n* Magic\n```\n生成的HTML如下:\n\n```\n <ul>\n <li>Bird</li>\n <li>Magic</li>\n </ul>\n```\n而这个:\n\n```\n* Bird\n\n* Magic\n```\n生成的HTML是这样:\n\n```\n <ul>\n <li><p>Bird</p></li>\n <li><p>Magic</p></li>\n </ul>\n```\n列表项有可能由多个段落组成,列表项的每个后续段落必须缩进至少4个空格或者一个TAB:\n\n```\n1. This is a list item with two paragraphs. Lorem ipsum dolor\n sit amet, consectetuer adipiscing elit. Aliquam hendrerit\n mi posuere lectus.\n\n Vestibulum enim wisi, viverra nec, fringilla in, laoreet\n vitae, risus. Donec sit amet nisl. Aliquam semper ipsum\n sit amet velit.\n\n2. Suspendisse id sem consectetuer libero luctus adipiscing.\n```\n像上面这样缩进后续段落的每一行看起来很美观(但稍有些麻烦),如果你比较懒(和我一样),没问题,Markdown支持你:\n\n```\n* This is a list item with two paragraphs.\n\n This is the second paragraph in the list item. You're only required to indent the \n first line. Lorem ipsum dolor sit amet, consectetuer adipiscing elit.\n\n* Another item in the same list.\n```\n要在列表项中使用引用,引用定界符 ``>`` 需要缩进:\n\n```\n* A list item with a blockquote:\n\n > This is a blockquote\n > inside a list item.\n```\n要在列表项中使用代码块,代码块需要缩进两次 -- 8个空格或者两个TAB:\n\n```\n* A list item with a code block:\n\n <code goes here>\n```\n有时候不小心会触发一个有序列表,比方在写类似下面这样的东西时:\n\n```\n1986. What a great season.\n```\n换言之, 以数字+句点+空格 序列起始的行会触发有序列表。为避免此情况,要对句点符号进行转义:\n\n```\n1986\\. What a great season.\n```\n#####代码块\n\n我们经常在写有关编程或标记语言源代码时用到预格式化的代码块。不像格式化普通段落,代码块中的行会按字面进行解释。Markdown对代码块同时使用``<pre>``和``<code>``标签包裹:\n\n在Markdown中要生成一个代码块,只需要在代码块内容的每一行缩进至少四个空格或者一个TAB。比如:\n\n```\nThis is a normal paragraph:\n\n This is a code block.\n```\nMarkdown会生成:\n\n```\n <p>This is a normal paragraph:</p>\n <pre><code>This is a code block.\n </code></pre>\n```\nMarkdown会从生成的代码块中删除一级缩进 -- 4个空格或者1个TAB。看下面这个例子:\n\n```\nHere is an example of AppleScript:\n\n tell application \"Foo\"\n beep\n end tell\n```\n会得到:\n\n```\n <p>Here is an example of AppleScript:</p>\n \n <pre><code>tell application \"Foo\"\n beep\n end tell\n </code></pre>\n```\n代码块在遇到没有缩进的一行,或者文件末尾时自动结束。\n\n在代码块中,`&`符号和`<`、`>`会自动转换成HTML实体。因此在Markdown中包含HTML源代码只是小菜一碟--粘贴进去,缩进一下。剩下的脏活累活Markdown自会处理。看下面这个例子:\n\n```\n <div class=\"sample_footer\">\n © 2004 Foo Corporation\n </div>\n```\nMarkdown会生成:\n\n```\n © 2004 Foo Corporation \n```\nMarkdown不会解析代码块中的Markdown标记。如代码块中的星号就是星号,失去了它原来的Markdown含义。这意味着你能够使用Markdown编写Markdown自己的语法教程。(就象这篇文章一样)。\n\n######水平线\n\n如果在一行里只放三个或更多个连字符,或星号或下划线,你就会得到一个水平线标记(`<hr />`)。下面每一行都会得到一个水平线:\n\n```\n* * *\n\n***\n\n*****\n\n- - -\n\n---------------------------------------\n```\n***\n\n###行内元素\n\n####链接\n\nMarkdown 支持两种风格的链接: 行内链接 和 引用链接.\n\n两种风格的链接,链接文本都放在中括号之内[square brackets]。\n\n要生成一个行内链接,在链接文本之后紧跟用一对小括号。小括号里放链接地址和可选的的链接title。如果提供链接title的话,链接title要用引号包起来。例如:\n\n```\n这是一个 [an example](http://example.com/ \"Title\") 行内链接。\n\n[这个链接](http://example.net/) 没有title属性。\n```\nMarkdown会生成:\n\n```\n <p>This is <a href=\"http://example.com/\" title=\"Title\">\n an example</a> inline link.</p>\n\n <p><a href=\"http://example.net/\">This link</a> has no\n title attribute.</p>\n```\n如果你打算引用一个本地资源或者同一站点的资源,可以使用相对路径:\n\n```\n如果想进一步了解我,请参阅我的 [关于我](/about/) 页。\n```\n引用风格的链接,在链接文本之后紧跟又一对中括号。这对中括号里放的是该链接的标识符(可以理解为别名):\n\n```\n这是一个引用型链接 [示例][id]。\n```\n如果你嫌弃两对中括号过于亲密,Markdown允许你在两对中括号之间放一个空格:\n\n```\n这是一个引用型链接 [示例] [id]。\n```\n然后,我们可以在文档的任意位置,像下面这样定义链接标识与链接的对应关系(一行一个链接):\n\n```\n [id] : http://example.com/ \"Optional Title Here\"\n```\n即:\n\n* 中括号内放链接标识符(行前可选缩进,至多不超过三个空格);\n* 之后紧跟一个冒号;\n* 再后面是一个或多个空格(TAB也行);\n* 接下来是链接URL;\n* 最后面是可选的用双引号或单引号或小括号括起来的链接title。\n\n下面三种链接定义方式是等价的:\n\n```\n[foo] : http://example.com/ \"Optional Title Here\"\n[foo] : http://example.com/ 'Optional Title Here'\n[foo] : http://example.com/ (Optional Title Here)\n```\n__注意: Markdown.pl 1.0.1__ 版本有一个已知的bug,用单引号作为链接title的定界符会出问题。\n\n至于链接URL,还支持使用一对可选的尖括号包裹起来:\n\n```\n [id] : <http://example.com/> \"Optional Title Here\"\n```\n你也可以将链接的title属性放在下一行并使用额外的空格或TAB填充,这样较长的URL会比较美观:\n\n```\n[id] : http://example.com/longish/path/to/resource/here\n \"Optional Title Here\"\n```\n链妆定义仅供Markdown解析器使用。最终输出的HTML当中不会包含链接定义。\n\n链接标识符可以由字母、数字、空格和标点符号组成--不区分大小写。下面这两个链接:\n\n```\n[link text][a]\n[link text][A]\n```\n是等价的。\n\n隐式链接标识 允许我们省略链接标识,这时链接文本本身就是链接标识。在链接文本之后加一对空的中括号--例如,使用\"Google\"文本链接到google.com站点,可以这样写:\n\n```\n[Google][]\n```\n然后这样定义它的链接:\n\n```\n[Google] : http://google.com/\n```\n链接名字有可能包含空格,不过没问题,这种情况照样正常工作:\n\n```\nVisit [Daring Fireball][] for more information.\n```\n然后这样定义这个链接:\n\n```\n[Daring Fireball] : http://daringfireball.net/\n```\n链接定义可放于Markdown文档的任意位置。我建议把它们就近放到最先使用它的段落之后。不过如果你更喜欢放到文档末尾,当成某种形式的尾注,随你的便。\n\n下面是一些引用链接的例子:\n\n```\nI get 10 times more traffic from [Google] [11] than from\n[Yahoo] [12] or [MSN] [13].\n\n [1] : http://google.com/ \"Google\"\n [2] : http://search.yahoo.com/ \"Yahoo Search\"\n [3] : http://search.msn.com/ \"MSN Search\"\n```\n换成隐式链接标识,也可以这么写:\n\n```\nI get 10 times more traffic from [Google][] than from\n[Yahoo][] or [MSN][].\n\n [google] : http://google.com/ \"Google\"\n [yahoo] : http://search.yahoo.com/ \"Yahoo Search\"\n [msn] : http://search.msn.com/ \"MSN Search\"\n```\n上面两种写法最终得到HTML输出是一样的:\n\n```\n <p>I get 10 times more traffic from <a href=\"http://google.com/ \n title=\"Google\">Google</a> than from\n <a href=\"http://search.yahoo.com/\" title=\"Yahoo Search\">Yahoo</a> \n or <a href=\"http://search.msn.com/\" title=\"MSN Search\">MSN</a>.</p>\n```\n作为比较,下面这个段落使用Markdown的行内链接风格编写:\n\n```\nI get 10 times more traffic from [Google](http://google.com/ \"Google\")\nthan from [Yahoo](http://search.yahoo.com/ \"Yahoo Search\") or\n[MSN](http://search.msn.com/ \"MSN Search\").\n```\n引用型链接的亮点并不在于它更容易书写,而在于引用型链接让你的文档可读性更好。看看上面的例子:使用引用型链接,段落本身仅81个字符;而使用行内链接的例子,是176个字符。最终输出的HTML则有234个字符。纯HTML中标记字符甚至超过了文本本身。\n\n使用Markdown的引用型链接,源文档更接近于最终的浏览器输出效果。再加上Markdown允许将标记有关的元数据移到段落之外,你尽管添加链接,而不必担心打断文件的故事情节。\n\n####强调\n\nMarkdown使用星号(`*`)和下划线(`_`)作为表示强调。用一个 `*`或 `_` 包裹的文本会使用 HTML <`em`> 标签包裹; 用两个 `*` 或 `_`包裹的文本会使用HTML`<strong>` 标签包裹。如:\n\n```\n*single asterisks*\n\n_single underscores_\n\n**double asterisks**\n\n__double underscores__\n```\n将会输出为:\n\n```\n <em>single asterisks</em>\n\n <em>single underscores</em>\n\n <strong>double asterisks</strong>\n\n <strong>double underscores</strong>\n```\n你喜欢哪一种风格就用哪一种,唯一的限制就是起始字符与关闭字符必须一致。\n\n强调符号可用于一个单词的一部分:\n\n```\nun*frigging*believable\n```\n不过如果你用空格包裹单独的 `*` 或 `_`,它们就失去了强调的含义,而成为字面上的星号或下划线。\n\n如果不想让Markdown解释这两个元字符,就转义它:\n\n```\n\\*this text is surrounded by literal asterisks\\*\n```\n####代码\n\n要在行内表示部分代码,用反引号( ` )包住它。与预格式代码块不同和,行内代码用于段落之内。例如:\n\n```\nUse the `printf()` function.\n```\n会生成:\n\n```\n <p>Use the <code>printf()</code> function.</p>\n```\n要在一个行内代码中使用反引号(`)本身,用多个反引号作为定界符包住它:\n\n```\n``There is a literal backtick (`) here.``\n```\n这样就会得到:\n\n```\n <p><code>There is a literal backtick (`) here.</code></p>\n```\n包住行内代码的反引号定界符可以包括空格--即在起始反引号之后,结束反引号之前可以有一个空格。这使得我们能够在行内代码的开始或结束处使用反引号:\n\n```\nA single backtick in a code span: `` ` ``\n\nA backtick-delimited string in a code span: `` `foo` ``\n```\n会生成:\n\n```\n <p>A single backtick in a code span: <code>`</code></p>\n\n <p>A backtick-delimited string in a code span: \n <code>`foo`</code></p>\n```\n在行内代码中,&和<和>会自动编码为HTML实体,以方便包含HTML标签。Markdown会把下面这行:\n\n```\nPlease don't use any `<blink>` tags.\n```\n转换为:\n\n```\n <p>Please don't use any <code><blink></code> tags.</p>\n```\n你也可以这样写:\n\n```\n`—` is the decimal-encoded equivalent of `—`.\n```\n会得到:\n\n```\n <p><code>—</code> is the decimal-encoded \n equivalent of <code>—</code>.</p>\n```\n####图片\n\n必须承认,要以“自然的”语法把一个图片放到一个纯文本文档之中,确实是一个挑战。\n\nMarkdown使用了类似链接语法来表示图片,同样有两种风格:行内图片和引用图片。\n\n行内图片语法示例:\n\n```\n ![Alt text](/path/to/img.jpg)\n\n ![Alt text](/path/to/img.jpg \"Optional title\")\n```\n即:\n\n* 一个感叹号`!`开头;\n* 其后紧跟一对中括号,中括号内存放图片的`alt`属性;\n* 其后紧跟一对小括号,小括号内存放图片的URL或路径,及可选的用双引号或单引号或小括号括起来的图片`title`\n\n引用图片语法如下:\n\n```\n ![Alt text][id]\n```\n这里 \"id\" 是图片引用标识。图片引用定义的语法与链接定义完全相同:\n\n```\n [id] : url/to/image \"Optional title attribute\"\n```\n在写这篇文章时,Markdown还没有语法指定图片的大小,如果这一点对你特别重要,你可以直接使用``<img>``标签。\n\n###杂七杂八\n\n####自动链接\n\nMarkdown提供了一种快捷方式\"自动地\"定义链接和Email地址:直接用一对尖括号把URL或Email地址包住。这表示链接文本就是URL本身,Email文本就是Email本身。这样你就得到了一个可点击的链接,如:\n\n```\n<http://example.com/>\n```\nMarkdown会将它转换为:\n\n```\n <a href=\"http://example.com/\">http://example.com/</a>\n```\n自动Email地址工作方式相似,只有一点不同。Markdown自动的用一些十进制和十六进制数字表示你的Email,以防止遭遇垃圾邮件袭击。 例如:\n\n```\n<[email protected]>\n```\n会被转换为:\n\n```\n<a href=\"mailto:addre\nss@example.co\nm\">address@exa\nmple.com</a>\n```\n浏览器会将它渲染为一个可点击的链接,并正确显示\"[email protected]\"。\n\n(这种实体编码的小方法可以骗过一些收集邮件地址的机器人,不过它确实无法骗过所有的机器人。有总比没有强,聊胜于无。能阻止一点就阻止一点好了。)\n\n####反斜线转义\n\nMarkdown允许你使用反斜线转义那些Markdown元字符,让它们失去原有的“魔力”。举个例子,如果你确实想用星号包住一个词组(而不是想得到`<em>`标签),就可以在星号之前使用反斜线将其转义。即:\n\n```\n\\*literal asterisks\\*\n```\nMarkdown中,以下字符支持使用反斜线转义:\n\n```\n\\ 反斜线\n` 反引号\n* 星号\n_ 下划线\n{} 大括号\n[] 中括号\n() 小括号\n# 井号\n+ 加号\n- 减号(连字符)\n. 句点\n! 感叹号\n```\n"
},
{
"alpha_fraction": 0.6270566582679749,
"alphanum_fraction": 0.6288848519325256,
"avg_line_length": 22.69565200805664,
"blob_id": "225580580769be90e31d438ca72a4b94b52a5ea2",
"content_id": "6a94a5e19e03574ca99315c61d17b4c326b70c67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 547,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 23,
"path": "/script/html.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "# Table tr swap \n\n```js \nvar swapRows = function(a, b) {\n var columnsT = document.getElementById(\"columnsTable\");\n var rows = columnsT.rows;\n\n var row = rows[a],\n sibling = row.previousElementSibling,\n anchor = row.nextElementSibling,\n parent = row.parentNode;\n if (b == \"down\") {\n var row = rows[a+1],\n sibling = row.previousElementSibling,\n anchor = row.nextElementSibling,\n parent = row.parentNode;\n parent.insertBefore(row, sibling);\n } else {\n parent.insertBefore(row, sibling);\n }\n\n} \n``` \n\n"
},
{
"alpha_fraction": 0.501483678817749,
"alphanum_fraction": 0.7121661901473999,
"avg_line_length": 20,
"blob_id": "abc8c277d48f7f398aca65c4796875d4c08040ac",
"content_id": "76a34a88cc2d7ad8f43433569505d40beab982cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 419,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 16,
"path": "/script/ssh.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "\n* ssh 端口转发 \n\n```bash\n$ /usr/bin/ssh -f -N -Llocalhost:10000:10.126.215.131:10000 [email protected]\n$ ssh -fNg -L 3309:10.126.81.130:3306 [email protected]\n$ /usr/bin/ssh -f -N -D 0.0.0.0:8081 localhost\n\n本地查看端口:\n$ telnet localhost 10000\n\n如果服务器报错:\nopen failed: administratively prohibited: open failed\n\n这是由于代理机器的ssh服务部支持转发 换过一台机子\n\n```\n"
},
{
"alpha_fraction": 0.579741358757019,
"alphanum_fraction": 0.5818965435028076,
"avg_line_length": 12.880000114440918,
"blob_id": "806f698c2a7a921be9ea7d14ed5f17174680a99a",
"content_id": "c4841e23577e0f61d9a584d3a749edf6a6a936ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1526,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 100,
"path": "/git/README.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "[Git Help](http://www.git-scm.com/docs) \n========= \n\n[文档](http://git-scm.com/book/zh/v1) \n=========\n\n### git config \n\n``` \ngit config --global user.name \"[name]\" \ngit config --global user.email \"[email address]\" \ngit config --global color.ui auto \n美化git log\ngitlog='git log --graph --pretty=format:'\\''%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset'\\'' --abbrev-commit --date=relative' \n``` \n\n### ssk key \n \n``` \nssh-keygen -t rsa -P \"\" \n连续键入Enter即可 \ncat ~/.ssh/id_rsa.pub \nconfig to online \n``` \n\n### make changes \n\n* 查看当前状态 \n* 查看修改 \n* 添加&提交&回退 \n\n``` \ngit status \ngit diff \ngit add [file|-A]\ngit diff --staged \ngit reset [file] \ngit commit -m \"[descriptive message]\" \n```\n\n### gitignore \n\n* 忽略所有,仅仅提交.sh文件: \n \n``` \n*\n!.gitignore\n!*.sh\n!*/ \n```\n\n### branch \n\n* 创建分支 \n* 切换分支 \n* 分支查看 \n* 分支合并 \n* 分支删除 \n\n\n``` \ngit checkout -b test \ngit branch -a \ngit checkout master \ngit merge test\ngit branch -d test \n```\n\n### remote \n\n* 查看remote \n* 添加remote\n* 删除remote\n* 修改名称 \n\n``` \ngit remote -v \ngit remote add [name] [url] \ngit remote remove origin \ngit remote rename [old] [new]\n```\n\n### auto push \n\n``` \n#!/bin/bash\n\nCOMMIT_DESC=\"$1\"\nif [ -z \"$COMMIT_DESC\" ]; then \n echo \"Usage: ./git.sh <commit_desc>\"\n exit 1\nfi \n\ngit stash\ngit pull --rebase\ngit stash pop\ngit add -A\ngit commit -m \"$COMMIT_DESC\"\ngit push origin master \n```\n\n\n\n\n"
},
{
"alpha_fraction": 0.5489566326141357,
"alphanum_fraction": 0.5634028911590576,
"avg_line_length": 14.923076629638672,
"blob_id": "671b7021d50262e0b8ecba73fb744f751ba9cb07",
"content_id": "1308f8534c1591b7e8bc54d278428e40eb18383a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 623,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 39,
"path": "/script/Linux_Example.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "Some Examples \n=============\n\n\n* Fix Daily & Hourly Job\n\n```bash \n#!/usr/bin/env bash\n\n# set -x\n\nif [ $# -ne 2 ]; then\n echo \"Usage ./hourly_fix.sh <startDate> <endDate>\"\n exit 1\nfi\n\nstartDate=\"$1\"\nendDate=\"$2\"\n\nfunction runDailyJob {\n\n dealDate=\"$1\"\n for ((j=0; j<=23; j++)); do\n echo \"Run hourly job\" \n done\n echo \"Run daily job\"\n}\n\necho \"start date $startDate\"\necho \"end date $endDate\"\n\nhasDates=$(($(date -d $endDate +%j)-$(date -d $startDate +%j)))\n\nfor ((i=0; i<=$hasDates; i++)); do\n dealDate=$(date -d \"$startDate +$i day\" +%Y-%m-%d)\n #echo $dealDate\n runDailyJob $dealDate\ndone \n``` \n\n"
},
{
"alpha_fraction": 0.7103610634803772,
"alphanum_fraction": 0.7268446087837219,
"avg_line_length": 34.33333206176758,
"blob_id": "6065c8becaa66dd3978f3788b6dd09b749328a8b",
"content_id": "d660ee0d0678454f52aeabeb0d26404fd5014c0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1682,
"license_type": "no_license",
"max_line_length": 176,
"num_lines": 36,
"path": "/cloudera/Hadoop机房迁移相关问题.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "# Hadoop 机房迁移相关问题 \n\n### 更改机器的 ip 参考\n\n* CDH 4 的时候需要修改数据库\n\n * `/etc/cloudera-scm-server/db.properties` 这个文件记录 cm 用的 DB 的信息 \n ``` \n# psql -h localhost -U scm -p 7432\nscm=> \\dt # show tables in pg\nscm=> \\list\nscm=> select * from hosts; # show hostinfo in hosts\n```\n * `/etc/cloudera-scm-agent/config.ini` 每个 agent 里面有记录 cm 的地址,需要修改一下`server_host=10.xx.xx.xx`\n\n* CDH 5 的时候使用 uuid 来标识每台机器,看起来,只需要修改一下 `/etc/cloudera-scm-agent/config.ini` 里面对应的 server_host 即可 \n\n* 相关参考 :\n * [https://groups.google.com/a/cloudera.org/forum/#!mydiscussions/scm-users/gM-DtmxvkJ8](https://groups.google.com/a/cloudera.org/forum/#!mydiscussions/scm-users/gM-DtmxvkJ8)\n * [http://shulhi.com/change-ip-address-for-existing-nodes-in-cdh-5-3/](http://shulhi.com/change-ip-address-for-existing-nodes-in-cdh-5-3/)\n\n\n* 除了上面写的,还需要保证:\n * dns 解析正确,或者 `/etc/hosts` 里面配置正确,否则 `cloudera-scm-server` 会hang住,导致 7182 这个服务无法连接。`cloudera-scm-agent` 连不上\n * 需要`deploy client config`\n * namenode ha 启动会有问题,原因是之前 zk 里面保存的 hostname 发生改变,停掉 failover controller 去 initiale zookeeper 都不会失败\n\n\n需要手动去 \n```\nHADOOP_USER_NAME=hdfs \nhdfs --config /var/run/cloudera-scm-agent/process/150-failover-controller-initialize-znode zkfc -formatZK\n```\n提示我们 stop all hdfs service,输入 Y 确认格式掉 zookeeper 相关数据(注意,不是格式化 namenode)\n\n后面重启就好了\n\n\n"
},
{
"alpha_fraction": 0.7088122367858887,
"alphanum_fraction": 0.7716475129127502,
"avg_line_length": 35.22222137451172,
"blob_id": "50071310901043863f6e6cf1c1447c259e997556",
"content_id": "3c6f8c5b87b3b9f901786a74a742b835fff151ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1665,
"license_type": "no_license",
"max_line_length": 298,
"num_lines": 36,
"path": "/script/Hive_some_question.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "#### map join 的字段不一致,主要是 string 和 int 的列做 join\n\n日志里面看 job 长期 hang 在以下位置:\n\n\n\n查看你 map 的时候,map 的状态为 FAILED, 查看 map 的日志,最后有以下类似的信息:\n``` \n 2015-12-11 04:26:06,119 INFO [main] org.apache.hadoop.hive.ql.exec.MapJoinOperator: Load back 1 hashtable file from tmp file uri:file:/hadoop3/yarn/nm/usercache/hadoop/appcache/application_1449142557781_73597/container_e11_1449142557781_73597_01_000002/Stage-9.tar.gz/MapJoin-mapfile11--.hashtable\n``` \n\n<b>解决方法</b>: 保证用于 join 的列一致,cast 或者直接修改表的结构。\n\n如果还不能解决,先手动禁用掉 map join :\n\n```sql\nset hive.auto.convert.join=false;\n```\n\n\n#### hive 使用的新的序列化方法后偶尔出现的问题。\n\n这种问题一般是代码里面设置了 `set hive.exec.parallel=true;` 运行的时候,偶尔会报以下错误,重跑有可能成功:\n``` \n aliasToWork (org.apache.hadoop.hive.ql.plan.MapWork)\n at org.apache.hive.com.esotericsoftware.kryo.util.DefaultClassResolver.readClass(DefaultClassResolver.java:119)\n at org.apache.hive.com.esotericsoftware.kryo.Kryo.readClass(Kryo.java:656)\n at org.apache.hive.com.esotericsoftware.kryo.serializers.ObjectField.read(ObjectField.java:99)\n at org.apache.hive.com.esotericsoftware.kryo.serializers.FieldSerializer.read(FieldSerializer.java:507)\n at org.apache.hive.com.esotericsoftware.kryo.Kryo.readClassAndObject(Kryo.java:776)\n``` \n\n如果有这种情况,解决的方法是先更换序列化的方法,即加入:\n```sql\nset hive.plan.serialization.format=javaXML;\n``` \n"
},
{
"alpha_fraction": 0.7300469279289246,
"alphanum_fraction": 0.7300469279289246,
"avg_line_length": 20.25,
"blob_id": "77d4fb3a3128911a5c8a84b735fb3eb55a811aa3",
"content_id": "36be69269976513f969781b62a3be99835f5c3b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 542,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 20,
"path": "/script/Hive脚本中的LZO压缩.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "#### 对于配置了LZO压缩的表,在使用“项目开发”的“Hive脚本”任务时,需要做以下工作 \n\n* 生成数据时增加三个配置参数 \n\n``` \nset mapred.output.compress=true;\nset mapred.output.compression.codec=com.hadoop.compression.lzo.LzopCodec;\nset hive.exec.compress.output=true;\n\nINSERT OVERWRITE TABLE dw_stage.page_staytime\nPARTITION (cal_dt = ${dealDate})\nSELECT ...\n``` \n\n* 使用以下语句进行索引 \n\n``` \nLZOINDEXER\n/user/hive/warehouse/dw_stage.db/page_staytime/cal_dt=${outFileSuffix}/; \n```\n\n"
},
{
"alpha_fraction": 0.6920750141143799,
"alphanum_fraction": 0.6987295746803284,
"avg_line_length": 25.14285659790039,
"blob_id": "604443bb3b51ee37e52df66922408ba3e667ce97",
"content_id": "e99eeb9b95411ea6d67498ce671a4fb7bc609555",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1743,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 63,
"path": "/script/MySQL.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "\n* MySQL 企业版安装后,root密码自动生成在: `/root/.mysql_secret` \n\n* mysqldump 导出表结构到指定库 \n\n```bash \n$ mysqldump -hIP -uroot -ppassword --compact --skip-lock-tables --no-data database tableName > mysqldumpTmp.sql\n$ mysql -hIP -uroot -ppassword -Ddatabase < mysqldumpTemp.sql\n$ mysqldump -hIP -uroot -ppassword --compact --skip-lock-tables --no-data database tableName | mysql -uroot -ppassword databases \n``` \n\n* MySQL 创建指定用户,并授权 \n\n```bash \n$ CREATE USER 'dw_tools'@'localhost' IDENTIFIED BY 'dw_tools';\n$ GRANT ALL ON *.* TO 'dw_tools'@'localhost';\n$ FLUSH PRIVILEGES;\n``` \n\n* 查看进程信息\n\n```sql \n$ SELECT * FROM information_schema.processlist WHERE state = 'locked';\n``` \n\n* 切换MySQL 文件目录 \n\n1 . Stop MySQL using the following command: \n\n```bash \nsudo /etc/init.d/mysql stop\n```\n2 . Copy the existing data directory (default located in `/var/lib/mysql`) using the following command: \n\n```bash\nsudo cp -R -p /var/lib/mysql /newpath\n```\n3 . edit the MySQL configuration file with the following command: \n\n```bash\nsudo gedit /etc/mysql/my.cnf\n```\n4 . Look for the entry for datadir, and change the path (which should be `/var/lib/mysql`) to the new data directory.\n\n5 . In the terminal, enter the command: \n\n```bash\nsudo gedit /etc/apparmor.d/usr.sbin.mysqld\n```\n6 . Look for lines beginning with `/var/lib/mysql`. Change `/var/lib/mysql` in the lines with the new path.\n\n7 . Save and close the file.\n\n8 . Restart the AppArmor profiles with the command: \n\n```bash \nsudo /etc/init.d/apparmor reload\n```\n9 . Restart MySQL with the command: \n\n```bash \nsudo /etc/init.d/mysql restart\n```\n10 . Now login to MySQL and you can access the same databases you had before.\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5596330165863037,
"alphanum_fraction": 0.60550457239151,
"avg_line_length": 20.600000381469727,
"blob_id": "d7ce33e3d9ee2176b7d3a8d5b1a42b47ec0eee88",
"content_id": "cc871a5299b52dc23db9931cc59d1fbb47d5c319",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 109,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 5,
"path": "/script/Java.md",
"repo_name": "zhenxiforever/note",
"src_encoding": "UTF-8",
"text": "* exec jar \n\n```java \n$ /usr/local/java/jdk1.7.0_80/bin/jar -cp Main.jar;commons.jar com.App <args> \n``` \n"
}
] | 24 |
kabirsrivastava3/binary-search-project | https://github.com/kabirsrivastava3/binary-search-project | 6f8f43a019d006075ed58866882e40b953c815b1 | 380b06d081051f5040bb6a72e4c6e5cc32394fe7 | 0b3d28f33c8dd86e4602ec3c69e6e3f5eb6cf289 | refs/heads/main | 2023-06-30T13:12:09.782526 | 2021-08-02T20:55:06 | 2021-08-02T20:55:06 | 391,858,927 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7421731352806091,
"alphanum_fraction": 0.7421731352806091,
"avg_line_length": 32.875,
"blob_id": "eb91eb39f2512158ac2c03e850756893c2482754",
"content_id": "2d7b94caa027b7c3c79a2cd82c2618be11807f1d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 543,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 16,
"path": "/README.md",
"repo_name": "kabirsrivastava3/binary-search-project",
"src_encoding": "UTF-8",
"text": "# binary-search-project\n\n# Implementation of binary search algorithm!!\n\n# We will prove that binary search is faster than linear search!\n\n\n# Essence of binary search:\n# If you have a sorted list and you want to search this array for something,\n# You could go through each item in the list and ask, is this equal to what we're looking for?\n# But we can make this *faster* by leveraging the fact that our array is sorted!\n# Searching trough a list? Never check every item.\n\n\n# Time Complexity:\n# Binary search ~ O(log(n)), linear search ~ O(n)\n\n"
},
{
"alpha_fraction": 0.6152084469795227,
"alphanum_fraction": 0.6385707855224609,
"avg_line_length": 30.128570556640625,
"blob_id": "c1140d868e64e60e9da01140dc36a89e32360e26",
"content_id": "fa3fc268d10440ef1086dbed5abc5625af40b546",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2183,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 70,
"path": "/binary-search-project.py",
"repo_name": "kabirsrivastava3/binary-search-project",
"src_encoding": "UTF-8",
"text": "\nimport time\nimport random\n\n# In these two examples, numList is a list in ascending order, and target is something that we're looking for:\n# Return -1 if not found\n\n\n# Linear Search: scan entire list and ask if its equal to the target.\n# If Yes, return the index\n# If No, then return -1\n\ndef linearSearch(numList, target):\n # example numList = [1, 2, 9, 11]\n for index in range(len(numList)):\n if numList[index] == target:\n return index\n return -1\n\n# Binary Search uses Divide & Conquer!\n# We will leverage the fact that our list is already SORTED.\n\ndef binarySearch(numList, target, start = None, end = None):\n if start is None:\n start = 0\n if end is None:\n end = len(numList) - 1\n\n if end < start:\n return -1\n \n # Example numList = [1, 2, 4, 9, 11] # should return 2\n midpoint = (start + end) // 2 # 2\n\n # We'll check if numList[midpoint] == target, and if not, we can find out if\n # target will be to the left or right of midpoint\n # We know everything to the left of midpoint is smaller than the midpoint\n # and everything to the right is larger\n if numList[midpoint] == target:\n return midpoint\n elif target < numList[midpoint]:\n return binarySearch(numList, target, start, midpoint-1)\n else:\n # target > numList[midpoint]\n return binarySearch(numList, target, midpoint+1, end)\n\n\nif __name__=='__main__':\n # numberList = [1, 2, 4, 9, 11]\n # target = 9\n # print(linearSearch(numberList, target))\n # print(binarySearch(numberList, target))\n\n length = 10000\n # build a sorted list of length 10000\n sortedList = set()\n while len(sortedList) < length:\n sortedList.add(random.randint(-3*length, 3*length)) # range of -30,000 to 30,000\n sortedList = sorted(list(sortedList))\n\n start = time.time()\n for target in sortedList:\n linearSearch(sortedList, target)\n end = time.time()\n print(\"Linear search time: \", (end - start), \"seconds\")\n\n start = time.time()\n for target in sortedList:\n binarySearch(sortedList, target)\n end = time.time()\n print(\"Binary search time: \", (end - start), \"seconds\")\n\n\n\n"
}
] | 2 |
iloveyby/pyActionScriptTest | https://github.com/iloveyby/pyActionScriptTest | 8d96f9c977b468250faaaa77b4a0ebf1cc43cdec | 78338866e6b893b89ec0fc900f621ebf64df1724 | 65934a605c831ffb068e0473562a9b4ad3b4b72e | refs/heads/main | 2023-02-05T08:24:26.807182 | 2020-12-31T08:38:41 | 2020-12-31T08:38:41 | 318,040,378 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7352941036224365,
"alphanum_fraction": 0.7352941036224365,
"avg_line_length": 10.333333015441895,
"blob_id": "9686a2ba4188c4277b6b5ce2f3055665190032a5",
"content_id": "401a0b09c65c0633fb9e6424510db1a46d0ab7ea",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 34,
"license_type": "permissive",
"max_line_length": 20,
"num_lines": 3,
"path": "/test.py",
"repo_name": "iloveyby/pyActionScriptTest",
"src_encoding": "UTF-8",
"text": "import time\n\nprint(\"Hello World\")\n"
}
] | 1 |
YUNMI2/multi_process_and_thread | https://github.com/YUNMI2/multi_process_and_thread | 4845fa48bbe27a83b54f43c2e35475f5b97e6ed3 | 1d2c15d1c341ae628a5452253b395b0e9934cecb | 5cc6add0516b422b399e95b8377a26ea69d25779 | refs/heads/master | 2020-03-14T22:05:30.957373 | 2018-05-05T13:18:22 | 2018-05-05T13:18:22 | 131,813,281 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.523734986782074,
"alphanum_fraction": 0.5284298658370972,
"avg_line_length": 28.173229217529297,
"blob_id": "638d37a7a87e3ff6d16572b1593a84e19ea19c10",
"content_id": "0b4015272ade22b1066de7c186ed978c885a6a77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3930,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 127,
"path": "/convert_seg_to_nnseg.py",
"repo_name": "YUNMI2/multi_process_and_thread",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\r\n# -*- coding: UTF-8 -*-\r\n\r\nimport os\r\nimport os.path\r\nimport gzip\r\nimport sys\r\nimport threading\r\nimport multiprocessing\r\nimport codecs\r\n\r\nreload(sys)\r\nsys.setdefaultencoding('utf8')\r\n\r\ndef Attach_Tag(cur_char,pre_char,next_char):\r\n assert (cur_char != \" \")\r\n\r\n if pre_char == \" \" or pre_char == \"#START#\":\r\n if next_char == \"#END#\" or next_char == \" \":\r\n return \"s-seg\"\r\n else:\r\n return \"b-seg\"\r\n else:\r\n if next_char == \"#END#\" or next_char == \" \":\r\n return \"e-seg\"\r\n else:\r\n return \"m-seg\"\r\n\r\ndef Get_Bichar(cur_char, pre_char, pre_two_char,line,file_name):\r\n prefix_tag = \"[T1]\"\r\n bichar = cur_char\r\n assert(cur_char != \" \")\r\n\r\n if pre_char == \" \":\r\n assert (pre_two_char != \" \")\r\n bichar += pre_two_char\r\n else:\r\n bichar += pre_char\r\n return prefix_tag + bichar\r\n\r\ndef extract_info_and_write(file_name):\r\n file_name_out = file_name.replace(\"hwc.seg\",\"bichar.feats\")\r\n with codecs.open(file_name, \"r\",encoding=\"utf-8\") as fo:\r\n with codecs.open(file_name_out, \"w\", encoding=\"utf-8\") as fw:\r\n for line in fo.readlines():\r\n line = line.strip()\r\n if not line:\r\n continue\r\n line = \" \".join(line.split()) # 语料里面不是严格意义的每个词之间是一个空格,先split然后join一个空格\r\n for i in range(len(line)):\r\n if line[i] != \" \":\r\n cur_char = line[i]\r\n pre_char = line[i-1] if i > 0 else \"#START#\"\r\n pre_two_char = line[i-2] if i > 1 else \"#START#\"\r\n next_char = line[i+1] if i < len(line)-1 else \"#END#\"\r\n cur_char_tag = Attach_Tag(cur_char,pre_char,next_char)\r\n cur_bichar = Get_Bichar(cur_char,pre_char,pre_two_char,line,file_name)\r\n fw.write(\" \".join([cur_char,cur_bichar,cur_char_tag]) + \"\\n\")\r\n fw.write(\"\\n\")\r\n print file_name,\"end reading!\"\r\n\r\n\r\ndef load_dir_file(dir_path, list_file_name):\r\n parents = os.listdir(dir_path)\r\n\r\n for parent in parents:\r\n child = os.path.join(dir_path, parent)\r\n if os.path.isdir(child):\r\n load_dir_file(child, list_file_name)\r\n else:\r\n if child.endswith(\".hwc.seg\"):\r\n list_file_name.append(child)\r\n\r\n\r\ndef deal_with_multi_thread(list_file):\r\n threads = []\r\n\r\n for one_file in list_file:\r\n print \"start reading file: \", one_file\r\n one_thread = threading.Thread(target=extract_info_and_write, args=(one_file,))\r\n threads.append(one_thread)\r\n\r\n for one_thread in threads:\r\n one_thread.start()\r\n\r\n for one_thread in threads:\r\n one_thread.join()\r\n\r\n\r\ndef deal_with_multi_process(list_file, num_thread_in_one_process):\r\n processs = []\r\n groups = []\r\n one_group = []\r\n\r\n for i in range(len(list_file)):\r\n one_group.append(list_file[i])\r\n if i % num_thread_in_one_process == num_thread_in_one_process - 1:\r\n if one_group != []:\r\n groups.append(one_group)\r\n one_group = []\r\n if one_group != []:\r\n groups.append(one_group)\r\n\r\n for one_group in groups:\r\n each_process = multiprocessing.Process(target=deal_with_multi_thread, args=(one_group,))\r\n processs.append(each_process)\r\n\r\n for one_process in processs:\r\n one_process.start()\r\n\r\n for one_process in processs:\r\n one_process.join()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t'''\r\n\tinput file: 我 是 中国人\r\n\toutput file:\r\n\t我 [T1]我#START# s-seg\r\n\t是 [T1]是我 s-seg\r\n\t中 [T1]中是 b-seg\r\n 国 [T1]国中 m-seg\r\n\t人 [T1]人国 e-seg \r\n\t'''\r\n list_file_name = []\r\n load_dir_file(\"./\", list_file_name)\r\n deal_with_multi_process(list_file_name, 5)\r\n\r\n"
},
{
"alpha_fraction": 0.6055788993835449,
"alphanum_fraction": 0.610077977180481,
"avg_line_length": 21.624113082885742,
"blob_id": "76dd6f693c43ea3788e273d5d018945dc4253351",
"content_id": "f16c3c201aa77c7a73a01bd453f3d41ea46aa0be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3516,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 141,
"path": "/read_gz_file_with_multi_process_and_multi_thread.py",
"repo_name": "YUNMI2/multi_process_and_thread",
"src_encoding": "UTF-8",
"text": "import os\r\nimport os.path\r\nimport gzip\r\nimport sys\r\nimport thread\r\nimport threading\r\nimport multiprocessing\r\nfrom bs4 import BeautifulSoup\r\nreload(sys)\r\nsys.setdefaultencoding('utf8')\r\n\r\ndef read_gz_file(path):\r\n if os.path.exists(path):\r\n with gzip.open(path, 'r') as pf:\r\n for line in pf:\r\n yield line\r\n else:\r\n print('the path [{}] is not exist!'.format(path))\r\n\r\ndef readJsonfile(file_name):\r\n\t'''\r\n\t# load json file\r\n\t'''\r\n\tJson_dict = {}\r\n\tif file_name != \"\":\r\n\t\timport json\r\n\t\twith open(file_name, \"r\") as fo:\r\n\t\t\tJson_dict = json.load(fo)\r\n\treturn Json_dict\r\n\r\ndef is_kong(list_in):\r\n\tfor one in list_in:\r\n\t\tif not one.text.strip():\r\n\t\t\treturn 1\r\n\treturn 0\r\n\r\n\r\ndef extract_info_and_write(gz_file):\r\n\t'''\r\n\t# 按行读取.gz文件\r\n\t# 按照html标签筛选信息\r\n\t# 将筛选的信息写进文件\r\n\t'''\r\n\tcon = read_gz_file(gz_file)\r\n\tnum_index = 0\r\n\r\n\tout_file = gz_file.replace(\"gz\",\"out_v2\")\r\n\ttmp_file = gz_file.replace(\"gz\",\"tmp_v2\")\r\n\r\n\tif getattr(con, '__iter__', None):\r\n\t\tfw = open(out_file,\"w\")\r\n\t\tfor line in con:\r\n\t\t\tnum_index += 1\r\n\t\t\tif num_index%5 == 1:\r\n\t\t\t\tfo = open( tmp_file, \"w\")\t\r\n\t\t\t\r\n\t\t\tfo.write(line)\r\n\t\t\t\r\n\t\t\tif num_index % 5 == 0:\r\n\t\t\t\tfo.close()\r\n\t\t\t\tdict_html = readJsonfile(tmp_file)\r\n\t\t\t\tsoup = BeautifulSoup(dict_html[\"html\"], 'html.parser')\r\n\t\t\t\tcontent_list = soup.find_all(\"div\",attrs={\"class\":\"para\"})\r\n\t\t\t\tfor content in content_list:\r\n\t\t\t\t\tkey_list = content.find_all(\"a\", attrs={'target':\"_blank\"})\r\n\t\t\t\t\tif len(key_list) > 0 and not is_kong(key_list):\r\n\t\t\t\t\t\tfw.write(content.text + \"\\n\")\r\n\t\t\t\t\t\tfor one in content.find_all(\"a\", attrs={'target':\"_blank\"}):\r\n\t\t\t\t\t\t\tfw.write(one.text + \"\\n\")\r\n\t\t\t\t\t\tfw.write(\"\\n\\n\")\r\n\t\t\t\t\t\t\r\n\tif os.path.exists(tmp_file):\r\n\t\tos.remove(tmp_file) #删除临时文件\r\n\tfw.close()\r\n\r\ndef load_dir_file(dir_path,list_file_name):\r\n\t'''\r\n\t# 递归读取一个文件夹下面的.gz文件\r\n\t'''\r\n\tparents = os.listdir(dir_path)\r\n\tfor parent in parents:\r\n\t\tchild = os.path.join(dir_path,parent)\r\n\t\tif os.path.isdir(child):\r\n\t\t\tload_dir_file(child,list_file_name)\r\n\t\telse:\r\n\t\t\tif \".gz\" in child and child.endswith(\".gz\"):\r\n\t\t\t\tlist_file_name.append(child)\r\n\r\n\t\t\t\r\ndef deal_with_multi_thread(list_file):\r\n\t'''\r\n\t# 多线程,每个文件一个线程,python多线程只能使用一个cpu\r\n\t'''\r\n\tthreads = []\r\n\tfor one_file in list_file:\r\n\t\tprint \"start reading file: \", one_file\r\n\t\tone_thread = threading.Thread(target=extract_info_and_write,args=(one_file,))\r\n\t\tthreads.append(one_thread)\r\n\tfor one_thread in threads:\r\n\t\tone_thread.start()\r\n\tfor one_thread in threads:\r\n\t\tone_thread.join()\r\n \r\n\r\n\r\ndef deal_with_multi_process(list_file, num_thread_in_one_process):\r\n\t'''\r\n\t# 多进程,使用多核cpu,因为python 多线程只能使用一个cpu,效率不高\r\n\t'''\r\n\tprocesss = []\r\n\t\r\n\tgroups = []\r\n\t\r\n\tone_group = []\r\n\t\r\n\tfor i in range(len(list_file)):\r\n\t\tone_group.append(list_file[i])\r\n\t\tif i % num_thread_in_one_process == num_thread_in_one_process-1:\r\n\t\t\tif one_group != []:\r\n\t\t\t\tgroups.append(one_group)\r\n\t\t\tone_group = []\r\n\tif one_group != []:\r\n\t\tgroups.append(one_group)\r\n\t\r\n\tfor one_group in groups:\r\n\t\teach_process = multiprocessing.Process(target=deal_with_multi_thread,args=(one_group,))\r\n\t\tprocesss.append(each_process)\r\n\t\r\n\tfor one_process in processs:\r\n\t\tone_process.start()\r\n\t\r\n\tfor one_process in processs:\r\n\t\tone_process.join()\r\n\t\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tlist_file_name = []\r\n\tload_dir_file(\"./\",list_file_name)\r\n\tdeal_with_multi_process(list_file_name,5)\t\r\n\r\n"
},
{
"alpha_fraction": 0.7074829936027527,
"alphanum_fraction": 0.725623607635498,
"avg_line_length": 16.639999389648438,
"blob_id": "b5f504bf2979c70c2f7feebf1b753c4e0e3a10cf",
"content_id": "a7e8cf37a813d1b26b1dd0f6282c1a3418fb29e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 525,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 25,
"path": "/README.md",
"repo_name": "YUNMI2/multi_process_and_thread",
"src_encoding": "UTF-8",
"text": "# To use multi thread and multi process\n\n### Why\n- python 多线程只能使用一个cpu效率不高\n- 为了利用多核cpu,使用多进程\n- 其中每个进程,利用多线程\n\n\n### How\n- threading\n- multiprocessing\n\n### Reference\n- [多进程 multiprocessing 模块](https://blog.csdn.net/cityzenoldwang/article/details/78584175)\n\n### Introduction\n- read_gz_file_with_multi_process_and_multi_thread.py\n\t- extract info from .json.gz \n\n- convert_seg_to_nnseg.py\n\t- convert seg corpus to nnseg \n\n\n### Run\n- python xxx.py\n"
},
{
"alpha_fraction": 0.4662875831127167,
"alphanum_fraction": 0.4707554876804352,
"avg_line_length": 35.287879943847656,
"blob_id": "19f177d020201331e855cbbadb5b162ac41bcad1",
"content_id": "8dfedbc29c3131ed452e2cfa59466df6283eaea4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4957,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 132,
"path": "/split_baike_data.py",
"repo_name": "YUNMI2/multi_process_and_thread",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\r\n# -*- coding: UTF-8 -*-\r\n\r\nimport os\r\nimport os.path\r\nimport sys\r\nimport threading\r\nimport multiprocessing\r\nimport codecs\r\n\r\nreload(sys)\r\nsys.setdefaultencoding('utf8')\r\n\r\n\r\ndef split_baike_data_to_2_kinds(file_name = \"\"):\r\n file_name_normal = file_name + \".normal\"\r\n file_name_unnormal = file_name + \".unnormal\"\r\n num_normal_sen = 0\r\n num_normal_word = 0\r\n num_unnormal_sen = 0\r\n num_unnormal_word = 0\r\n with codecs.open(file_name, \"r\", encoding=\"utf-8\") as fo:\r\n with codecs.open(file_name_normal, \"w\", encoding=\"utf-8\") as fw_normal:\r\n with codecs.open(file_name_unnormal, \"w\", encoding=\"utf-8\") as fw_unnormal:\r\n list_sen_and_word = []\r\n flag_normal = True\r\n for line in fo.readlines():\r\n if line.startswith(\"sentence:\") and list_sen_and_word != []:\r\n for i in range(len(list_sen_and_word)):\r\n if \"- Data © NavInfo & CenNavi & 道道通\" in list_sen_and_word[i] or \"张)\" in list_sen_and_word[i] : #表示有地图的或者有剧照的\r\n flag_normal = False\r\n break\r\n if flag_normal:\r\n start_ix = 0\r\n end_ix = 0\r\n for i in range(len(list_sen_and_word)):\r\n if list_sen_and_word[i].startswith(\"key word:\"):\r\n end_ix = i\r\n break\r\n sen = \"\"\r\n while start_ix < end_ix:\r\n sen += list_sen_and_word[start_ix].strip()\r\n start_ix += 1\r\n fw_normal.write(sen + \"\\n\")\r\n num_normal_sen += 1\r\n while end_ix < len(list_sen_and_word):\r\n if list_sen_and_word[end_ix].strip() and list_sen_and_word[end_ix].strip() != \"key word:\":\r\n fw_normal.write(list_sen_and_word[end_ix].strip() + \"\\n\")\r\n num_normal_word += 1\r\n end_ix += 1\r\n fw_normal.write(\"\\n\")\r\n\r\n else:\r\n for one in list_sen_and_word:\r\n fw_unnormal.write(one)\r\n if one.startswith(\"sentence:\"):\r\n num_unnormal_sen += 1\r\n elif one.strip().startswith(\"key word:\"):\r\n num_unnormal_word += 1\r\n fw_unnormal.write(\"\\n\")\r\n\r\n list_sen_and_word = [line]\r\n flag_normal = True\r\n else:\r\n list_sen_and_word.append(line)\r\n print \"\\n\"\r\n print file_name, \"static info:\"\r\n print \"\\t\",\"num_normal_sen: \",num_normal_sen\r\n print \"\\t\", \"num_normal_word: \", num_normal_word\r\n print \"\\t\", \"num_unnormal_sen: \", num_unnormal_sen\r\n print \"\\t\", \"num_unnormal_word: \", num_unnormal_word\r\n print \"end reading!\"\r\n print \"\\n\"\r\n\r\n\r\ndef load_dir_file(dir_path, list_file_name):\r\n parents = os.listdir(dir_path)\r\n\r\n for parent in parents:\r\n child = os.path.join(dir_path, parent)\r\n if os.path.isdir(child):\r\n load_dir_file(child, list_file_name)\r\n else:\r\n if child.endswith(\".json.out_v2\"):\r\n list_file_name.append(child)\r\n\r\n\r\ndef deal_with_multi_thread(list_file):\r\n threads = []\r\n\r\n for one_file in list_file:\r\n print \"start reading file: \", one_file\r\n one_thread = threading.Thread(target=split_baike_data_to_2_kinds, args=(one_file,))\r\n threads.append(one_thread)\r\n\r\n for one_thread in threads:\r\n one_thread.start()\r\n\r\n for one_thread in threads:\r\n one_thread.join()\r\n\r\n\r\ndef deal_with_multi_process(list_file, num_thread_in_one_process):\r\n processs = []\r\n groups = []\r\n one_group = []\r\n\r\n for i in range(len(list_file)):\r\n one_group.append(list_file[i])\r\n if i % num_thread_in_one_process == num_thread_in_one_process - 1:\r\n if one_group != []:\r\n groups.append(one_group)\r\n one_group = []\r\n if one_group != []:\r\n groups.append(one_group)\r\n\r\n for one_group in groups:\r\n each_process = multiprocessing.Process(target=deal_with_multi_thread, args=(one_group,))\r\n processs.append(each_process)\r\n\r\n for one_process in processs:\r\n one_process.start()\r\n\r\n for one_process in processs:\r\n one_process.join()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n list_file_name = []\r\n stat_info_list = []\r\n load_dir_file(\"./\", list_file_name)\r\n deal_with_multi_process(list_file_name, 2)\r\n\r\n"
}
] | 4 |
adgray09/spd2.31 | https://github.com/adgray09/spd2.31 | bed284ea9f946ce4278cd0ffd72263dce6a00604 | 654ba301e4c15daec703be0b3b645a6adedcfd61 | fb1299fe38cffdb94f9d554b643091db0f73d26f | refs/heads/main | 2023-03-05T05:12:40.805039 | 2021-02-18T20:36:17 | 2021-02-18T20:36:17 | 339,525,006 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6163707971572876,
"alphanum_fraction": 0.6163707971572876,
"avg_line_length": 24.350000381469727,
"blob_id": "599e52f09681e86fb086e298904351e998b14e2f",
"content_id": "bead06c3633283c55d0c6aaa76ac05d98995f0f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1014,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 40,
"path": "/simplifying-conditional-expressions/consolidate_conditional.py",
"repo_name": "adgray09/spd2.31",
"src_encoding": "UTF-8",
"text": "# by Kami Bigdely\n# Consolidate conditional expressions\ndef dice(ingredients):\n print(\"diced all ingredients.\")\ndef mix_all(diced_ingredients):\n print(\"mixed all.\")\ndef add_salt():\n print('added salt.')\ndef pour(liquid):\n print('poured', liquid + '.',)\n \n \ndef lacks_ingredients(ingredients):\n \n necessary_ingredients = {\n 'cucumber': False,\n 'tomato': False,\n 'onion': False,\n 'lemon juice': False\n }\n \n for ingredient in ingredients:\n necessary_ingredients[ingredient] = True\n for _, value in necessary_ingredients.items():\n if not value:\n return True\n return False\n\ndef make_shirazi_salad(ingredients):\n if lacks_ingredients(ingredients):\n print('lacks ingredients')\n return\n dice(ingredients)\n mix_all(ingredients)\n add_salt()\n pour('lemon juice')\n print('Your yummy shirazi salad is ready!')\n\nif __name__ == \"__main__\":\n make_shirazi_salad(['cucumber', 'tomato', 'lemon juice', 'onion'])\n"
},
{
"alpha_fraction": 0.7430604696273804,
"alphanum_fraction": 0.7480427026748657,
"avg_line_length": 38.02777862548828,
"blob_id": "9d908d79201be8121aab2d651d9deec20d8d8ec2",
"content_id": "a1efe18e6b64d22bac86046e373c802d8ff27f82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1405,
"license_type": "no_license",
"max_line_length": 263,
"num_lines": 36,
"path": "/SPD-2.3-Debugging-Techniques-Lab-main/debug_log.md",
"repo_name": "adgray09/spd2.31",
"src_encoding": "UTF-8",
"text": "# Debug Log\n\n**Explain how you used the the techniques covered (Trace Forward, Trace Backward, Divide & Conquer) to uncover the bugs in each exercise. Be specific!**\n\nIn your explanations, you may want to answer:\n\n- What is the expected vs. actual output?\n- If there is a stack trace, what useful information does it contain?\n- Which technique did you use, on which line numbers?\n- What assumptions did you have about each line of code, and which ones were proven to be wrong?\n\n_Example: I noticed that the program should show pizza orders once a new order is made, and that it wasn't showing any. So, I used the trace forward technique starting on line 13. I discovered the bug on line 27 was caused by a typo of 'pzza' instead of 'pizza'._\n\n_Then I noticed another bug ..._\n\n## Exercise 1\n\n[[Your answer goes here!]]\n\n# expected output was that all the pizzas there were made were going to be displayed.\n# always gave that there were no pizzas\n# figured something was wrong with how the db was set up\n\n## Exercise 2\n\n[[Your answer goes here!]]\n\n# was expecting to get the output of the weather of a city that you put in\n# got an error in the get request. It had something to do with how the logic was set up\n\n## Exercise 3\n\n# was assuming you would get the search result instead got a index out of range.\n# assumed that the logic wouldnt go out of the boundaries of the list\n\n[[Your answer goes here!]]\n"
},
{
"alpha_fraction": 0.5656565427780151,
"alphanum_fraction": 0.6042240858078003,
"avg_line_length": 31.058822631835938,
"blob_id": "c44784fd07d5c19715801d44878b95aff3e73dc4",
"content_id": "7fcee847c63ea4ffb199a954bf94cd5af5e15d90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1089,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 34,
"path": "/composing-methods2/split_temp_variable.py",
"repo_name": "adgray09/spd2.31",
"src_encoding": "UTF-8",
"text": "# by Kami Bigdely\n# Split temporary variable\n\npatty = 70 # [gr]\npickle = 20 # [gr]\ntomatoes = 25 # [gr]\nlettuce = 15 # [gr]\nbuns = 95 # [gr]\nsandwich_weight = (2 * patty + 4 * pickle + 3 * tomatoes + 2 * lettuce + 2 * buns)\nprint(\"NY Burger Weight\", sandwich_weight)\n\nkimchi = 30 # [gr]\nmayo = 5 # [gr]\ngolden_fried_onion = 20 # [gr]\nsandwich_weight = (2 * patty + 4 * pickle + 3 * tomatoes + kimchi + mayo + golden_fried_onion + 2 * buns)\nprint(\"Seoul Kimchi Burger Weight\", sandwich_weight)\n\noptions = {\"patty\": 70, \"pickle\": 20, \"tomatoes\": 25, \"lettuce\": 15, \"buns\": 95, \"Kimchi\": 30, \"mayo\": 5, \"golden_fried_onions\": 20}\n\n\ndef make_sandwhich(dict):\n name_of_sandwhich = input(\"name of your sandwhich: \")\n total_weight = 0 \n for k,v in options.items():\n num_of_that_option = int(input(\"how many {}: \".format(k)))\n if num_of_that_option >= 1:\n total_weight += (num_of_that_option * v)\n # print(total_weight)\n elif num_of_that_option == 0:\n continue\n \n return name_of_sandwhich, total_weight\n\nprint(make_sandwhich(options))"
},
{
"alpha_fraction": 0.5998013615608215,
"alphanum_fraction": 0.6176762580871582,
"avg_line_length": 27.799999237060547,
"blob_id": "e2ead61cb73d20de02e67e8afe486e4211e49d64",
"content_id": "49cc729b6710f8db73b868f0f50de52c52366018",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1007,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 35,
"path": "/other-techniques/extract_class3.py",
"repo_name": "adgray09/spd2.31",
"src_encoding": "UTF-8",
"text": "# by Kami Bigdely\n# Extract class\nWELL_DONE = 3000\nMEDIUM = 2500\nCOOKED_CONSTANT = 0.05\n\nclass Cooking:\n def __init__(self, time, temperature, pressure, desired_state):\n self.time = time\n self.temperature = temperature\n self.pressure = pressure\n self.desired_state = desired_state\n \n def is_criteria_satisfied(self):\n return self.is_medium() or self.is_well_done()\n \n def is_well_done(self):\n return self.desired_state == \"well-done\" and self.cooking_progress() >= WELL_DONE\n \n def cooking_progress(self):\n return self.time * self.temperature * self.pressure * COOKED_CONSTANT\n \n def is_medium(self):\n return self.desired_state == \"medium\" and self.cooking_progress() >= MEDIUM\n \n def is_done_cooking(self):\n if self.is_criteria_satisfied():\n print('cooking is done')\n else:\n print('keep cooking')\n \n \nalex_steak = Cooking(30, 100, 20, \"well-done\")\n\nalex_steak.is_done_cooking()"
},
{
"alpha_fraction": 0.6181102395057678,
"alphanum_fraction": 0.6338582634925842,
"avg_line_length": 20.08333396911621,
"blob_id": "ce4c36f8035755135426a92dedc6caccc65a50a1",
"content_id": "f95b5457a5b42006d41786b4cf643df121835687",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 254,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 12,
"path": "/composing-methods2/split_temp_variable2.py",
"repo_name": "adgray09/spd2.31",
"src_encoding": "UTF-8",
"text": "# By Kami Bigdely\n# Split temp variable\n\ndef save_into_db():\n user_input = input('Please enter your username: ')\n age = 2020 - user_input\n print(\"saved into database\")\n print(\"You are\",age, \"years old.\")\n return age,user_input\n\n\nsave_into_db()\n\n"
},
{
"alpha_fraction": 0.5381526350975037,
"alphanum_fraction": 0.5477911829948425,
"avg_line_length": 31.736841201782227,
"blob_id": "9a7ec560c4e20bc9bef850bba7df7bfcecb68e6c",
"content_id": "2e1d97bdd735b068f24395c1ab1e6b6a7b2b7cab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1245,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 38,
"path": "/other-techniques/extract_class2.py",
"repo_name": "adgray09/spd2.31",
"src_encoding": "UTF-8",
"text": "movies = [['Tenet', 'Vita & Virgina', 'Guardians of the Galexy', 'The Great Gatsby'],\\\n ['Ace Ventura', 'The Mask', 'Dubm and Dumber', 'The Truman Show', 'Yes Man']]\n\n# def send_hiring_email(email):\n# print(\"email sent to: \", email)\n \n# for i, value in enumerate(emails):\n# if birth_year[i] > 1985:\n# print(first_names[i], last_names[i])\n# print('Movies Played: ', end='')\n# for m in movies[i]:\n# print(m, end=', ')\n# print()\n# send_hiring_email(value)\n\nclass Person:\n def __init__(self, first_name, last_name, birth_year, email, movies):\n self.first_name = first_name\n self.last_name = last_name\n self.birth_year = birth_year\n self.email = email\n self.movies = movies\n \n def send_hiring_email(self, email):\n print(\"email sent to \", self.email)\n \n def check_movies(self):\n if self.birth_year > 1985:\n print(self.first_name, self.last_name)\n print('Movies Played: ', end='')\n for m in self.movies:\n print(m, end=', ')\n print()\n self.send_hiring_email(self.email)\n\nAlex = Person(\"Alex\", \"Gray\", 1999, \"[email protected]\", movies)\n\nAlex.check_movies()\n\n"
}
] | 6 |
GitOverHere/TicTacToe | https://github.com/GitOverHere/TicTacToe | 8696030b983b6c8ed8e6af839a1341093d074b2f | 50997e413c5ea403c07b379fa3e8434a3c057d0e | c0ab85d1bee8453b464e4c973517118eea41a6eb | refs/heads/master | 2022-12-04T19:14:11.513439 | 2020-08-01T05:36:23 | 2020-08-01T05:36:23 | 284,191,846 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4418068826198578,
"alphanum_fraction": 0.47268956899642944,
"avg_line_length": 24.809524536132812,
"blob_id": "ca7f43bed671a2f7370044a63be38f0aa435ec7e",
"content_id": "d27ba38999ae082c6ee3ad5e8fa6f3d1c71a85a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4339,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 168,
"path": "/main.py",
"repo_name": "GitOverHere/TicTacToe",
"src_encoding": "UTF-8",
"text": "import replit\nimport random\nprint('Welcome to Tic-Tac-Toe')\ngameStarted = 0\ngrid = [[\"[]\", \"[]\", \"[]\"], [\"[]\", \"[]\", \"[]\"], [\"[]\", \"[]\", \"[]\"]]\ngamemode = \"1\"\nselection = 0\ngameFinished = 0\nrunning = True\n\ndef aiMove():\n\n if(gamemode == \"1\"):\n pos1 = random.randrange(0,2)\n pos2 = random.randrange(0,2)\n while(grid[pos1][pos2] == \"X\" or grid[pos1][pos2] == \"X\"):\n pos1 = random.randrange(0,2)\n pos2 = random.randrange(0,2)\n return str(pos1)+\",\"+str(pos2)\n\n\n if(gamemode == \"2\" or gamemode == \"3\"):\n pass\n \n if(gamemode == \"4\"):\n pass\n\ndef allFilled():\n count = 0\n for a in range(len(grid)):\n for b in len(grid[a]):\n if(grid[a][b]==\"X\" or grid[a][b]==\"O\"):\n count += 1\n if(count == 9):\n print(\"It's a draw!\")\n return True\n else:\n return False\n\ndef drawGrid():\n print (\"\\a\")\n for i in grid:\n print(i)\n\ndef gameOver():\n winner = whoWon()\n if(winner == 1 or winner == 2 or allFilled == True):\n return True\n else:\n return False\n\n\ndef whoWon():\n player1Pieces = 0\n player2Pieces = 0\n for h in range(len(grid)):\n for i in range(len(grid[h])):\n if (grid[h][i] == piece1):\n player1Pieces += 1\n elif (grid[h][i] == piece2):\n player2Pieces += 1\n if (player1Pieces >= 3):\n return 1\n elif (player2Pieces >= 3):\n return 2\n else:\n player1Pieces = 0\n player2Pieces = 0\n for h in range(len(grid)):\n for i in range(len(grid[h])):\n if (grid[i][h] == piece1):\n player1Pieces += 1\n elif (grid[i][h] == piece2):\n player2Pieces += 1\n if (player1Pieces >= 3):\n return 1\n elif (player2Pieces >= 3):\n return 2\n else:\n player1Pieces = 0\n player2Pieces = 0\n for i in range(len(grid)-1):\n if (grid[i][i] == piece1):\n player1Pieces += 1\n elif (grid[i][i] == piece2):\n player2Pieces += 2\n if (player1Pieces >= 3):\n return 1\n elif (player2Pieces >= 3):\n return 2\n \n else:\n return 0\n\n\nwhile(running == True):\n while (gameStarted == 0):\n while(selection == 0):\n print(\"Please select a gamemode\")\n print(\"1. Easy \\n 2. Medium \\n 3. Hard \\n 4. Impossible \\n 5. Play with a friend \\n 0. Quit \\n\")\n gamemode = input()\n if(gamemode == \"0\"):\n exit()\n elif(int(gamemode) > 5 or int(gamemode) < 0 ):\n print('Invalid gamemode.')\n else:\n selection = 1\n \n\n print(\"Do you want to use 'X' or 'O'\")\n piece = input()\n replit.clear()\n if (piece == \"X\"):\n player = 1\n piece1 = \"X\"\n piece2 = \"O\"\n\n else:\n player = 2\n piece1 = \"O\"\n piece2 = \"X\" \n gameStarted = 1\n\n while (gameOver()==False):\n replit.clear()\n invalid = 1\n print(\"You are \"+piece1)\n print('\\n Your oponent is'+ piece2) \n drawGrid();\n print(\"It is player \" + str(player) + \"'s turn.\")\n while(invalid == 1):\n print('Enter X and Y cordinates like so \"2,2\"')\n if(gamemode == \"5\"):\n cords = input()\n else:\n cords = aiMove()\n if(len(cords)==3 and (0<=int(cords[0])<=2) and (0<=int(cords[2])<=2)):\n if(cords[0].isnumeric()):\n if(cords[1]==','):\n if(cords[2].isnumeric()):\n invalid = 0\n if(grid[int(cords[0])][int(cords[2])]=='[]'):\n if(player == 1):\n print('moved player'+str(player)+'s piece')\n grid[int(cords[0])][int(cords[2])]= piece1;\n if(gameOver() == False):\n player = 2\n \n elif(player == 2):\n print('moved player'+str(player)+'s piece')\n grid[int(cords[0])][int(cords[2])]= piece2;\n if(gameOver() == False):\n player = 1\n \n else:\n print(\"That space is already occupied!\")\n \n else:\n invalid = 1\n drawGrid()\n print(\"\\n Game Over: \"+\"Player \"+ str(player)+\"wins!!!\")\n print(\"Press '1' to start a new game. \\n Press '0' to exit \\n\")\n i = input()\n if(i == \"0\"):\n exit()\n else:\n grid = [[\"[]\", \"[]\", \"[]\"], [\"[]\", \"[]\", \"[]\"], [\"[]\", \"[]\", \"[]\"]]\n replit.clear()\n\n\n\n"
}
] | 1 |
herbertizidro/captcha_ocr | https://github.com/herbertizidro/captcha_ocr | 3494674fc53628e4dd864be240105d37a7c5a197 | e2c881aab28565fef80f9fb4c382fa2eb84838bb | 693d38ceeb522c00e5479e9cee267c1f4e5049f2 | refs/heads/master | 2021-07-12T08:06:01.148111 | 2020-10-10T04:58:09 | 2020-10-10T04:58:09 | 205,964,570 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6269797086715698,
"alphanum_fraction": 0.6420719027519226,
"avg_line_length": 32.75471878051758,
"blob_id": "d4c57b7b1c91c1696af0a5f5c01e9ee8ec7ce259",
"content_id": "bb6e0df18cb1e910a405e4320cc54dd5eb547671",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5378,
"license_type": "permissive",
"max_line_length": 190,
"num_lines": 159,
"path": "/ocr_captcha_pytesseract.py",
"repo_name": "herbertizidro/captcha_ocr",
"src_encoding": "UTF-8",
"text": "import os\nimport cv2\nimport PIL\nimport time\nimport numpy as np\nimport pytesseract\nfrom PIL import Image\nfrom io import BytesIO\nfrom selenium import webdriver\n\n\nwhile True:\n #acessa o site e maximiza a tela\n browser = webdriver.Chrome(r\"C:\\Users\\<-USER->\\Downloads\\chromedriver_win32\\chromedriver.exe\")\n browser.get(\"<-SITE->\")\n browser.maximize_window()\n \n \n #login\n cpf = \"<-CPF->\"\n cpf_input = browser.find_element_by_xpath('<-XPATH->')\n for i in cpf:\n time.sleep(0.2)\n cpf_input.send_keys(i)\n senha_input = browser.find_element_by_xpath('<-XPATH->')\n senha_input.send_keys(\"<-SENHA->\")\n #login\n\n\n #localiza a imagem e recorta de acordo com coordenadas obtidas\n captcha = browser.find_element_by_xpath('<-XPATH->')\n localizacao = captcha.location\n tamanho = captcha.size\n \n png = browser.get_screenshot_as_png()\n img = Image.open(BytesIO(png)) #abre a imagem na memória\n \n esquerda = localizacao[\"x\"]\n superior = localizacao[\"y\"]\n direita = localizacao[\"x\"] + tamanho[\"width\"]\n inferior = localizacao[\"y\"] + tamanho[\"height\"]\n \n img = img.crop((esquerda, superior, direita, inferior)) #corte\n img.save(\"screenshot.png\")\n\n\n #redimensiona\n altura = 400\n img = Image.open('screenshot.png')\n a_percent = (altura / float(img.size[1]))\n largura = int((float(img.size[0]) * float(a_percent)))\n img = img.resize((largura, altura), Image.ANTIALIAS)\n img.save('screenshot.png')\n\n\n #binariza\n img = cv2.imread(\"screenshot.png\", 0)\n _, thresh = cv2.threshold(img, 150, 255, cv2.THRESH_BINARY)\n thresh = cv2.bilateralFilter(thresh, 11, 17, 17)\n cv2.imwrite(\"screenshot.png\", thresh)\n\n\n #tratamento para encontrar os contornos\n img = cv2.imread(\"screenshot.png\")\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n _,thresh = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY_INV) #o fundo deve ser preto e o objeto branco\n kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3)) #https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html\n dilatacao = cv2.dilate(thresh, kernel, iterations = 13)\n contornos, h = cv2.findContours(dilatacao, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n\n #organiza as coordenadas de acordo com a posição de cada caractere no eixo X\n #ou seja, não importa a posição do caractere no eixo Y, o que estiver\n #mais à esquerda será o primeiro\n coordenadas = []\n posicao_x = [] #array para o X de cada coordenada\n for c in contornos:\n x, y, w, h = cv2.boundingRect(c)\n coordenadas.append([x, y, w, h])\n posicao_x.append(x)\n posicao_x = sorted(posicao_x, key=int) #ordena o array\n #ordena as coordenadas de acordo com a ordem do array \"posicao_x\"\n coordenadas_ordenadas = []\n for pos in posicao_x:\n for coor in coordenadas:\n if pos == coor[0]:\n coordenadas_ordenadas.append(coor)\n\n\n #recorta as letras e salva na pasta \"letras\"\n if os.path.isdir(\"./letras\") == False:\n os.mkdir(\"./letras\")\n if len(coordenadas_ordenadas) == 3: #nesse caso serão sempre 3 caracteres\n aux = 0 \n for coor in coordenadas_ordenadas:\n aux += 1\n x, y, w, h = coor\n recorte_letra = img[y:y + h, x:x + w]\n cv2.imwrite(\"./letras/\" + str(aux) + \".png\", recorte_letra)\n else:\n browser.quit()\n continue\n\n\n #OCR\n txt_captcha = []\n letras_dir = os.listdir(\"./letras\")\n for letra in letras_dir:\n letra = os.getcwd() + \"\\\\letras\\\\\" + letra\n pytesseract.pytesseract.tesseract_cmd = \"C:\\\\Program Files (x86)\\\\Tesseract-OCR\\\\tesseract.exe\"\n ocr = pytesseract.image_to_string(Image.open(letra), config = \"--psm 10\")\n txt_captcha.append(ocr)\n\n\n #limpa o resultado\n txt_captcha = [i for i in txt_captcha if i.isalnum()] #só o que for letra ou número\n txt_captcha = str(txt_captcha).strip(\"[]\").replace(\",\", \"\").replace(\"'\", \"\").replace(\" \", \"\")\n \n\n #input captcha\n captcha_txt_input = browser.find_element_by_xpath('<-XPATH->')\n captcha_txt_input.send_keys(txt_captcha.upper())\n\n \n #clica em 'entrar'\n browser.find_element_by_xpath('<-XPATH->').click()\n browser.implicitly_wait(60)\n\n\n try:\n mensagem_modal = browser.find_element_by_class_name(\"<-CLASS_NAME->\").text\n browser.quit()\n except:\n #\n # MAIS ALGUMA COISA AQUI ...\n #\n browser.quit()\n break\n \n\n\n\n'''\nPage segmentation modes(psm):\n 0 Orientation and script detection (OSD) only.\n 1 Automatic page segmentation with OSD.\n 2 Automatic page segmentation, but no OSD, or OCR.\n 3 Fully automatic page segmentation, but no OSD. (Default)\n 4 Assume a single column of text of variable sizes.\n 5 Assume a single uniform block of vertically aligned text.\n 6 Assume a single uniform block of text.\n 7 Treat the image as a single text line.\n 8 Treat the image as a single word.\n 9 Treat the image as a single word in a circle.\n 10 Treat the image as a single character.\n 11 Sparse text. Find as much text as possible in no particular order.\n 12 Sparse text with OSD.\n 13 Raw line. Treat the image as a single text line, bypassing hacks that are Tesseract-specific.\n'''\n"
},
{
"alpha_fraction": 0.7189324498176575,
"alphanum_fraction": 0.7489574551582336,
"avg_line_length": 36.46875,
"blob_id": "fd9a36c66152dff32ef7244f7020320a9f59f60e",
"content_id": "2f7c0e03e15d7aeb5d43977030fb0bd81e4b67a0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1212,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 32,
"path": "/teste_fontes_definidas.py",
"repo_name": "herbertizidro/captcha_ocr",
"src_encoding": "UTF-8",
"text": "import PIL\nimport cv2\nimport pytesseract\nimport numpy as np\nfrom PIL import Image\n\n\naltura = 150\nimg = Image.open(\"imgTeste.jpg\")\na_percent = (altura / float(img.size[1]))\nlargura = int((float(img.size[0]) * float(a_percent)))\nimg = img.resize((largura, altura), Image.ANTIALIAS)\nimg.save(\"imgTeste.png\")\n\n\nimg = cv2.imread(\"imgTeste.png\", 0)\nlimiar, img_limiar = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY)\nimg_limiar = cv2.bilateralFilter(img_limiar, 11, 17, 17)\n#img_limiar = cv2.GaussianBlur(img_limiar,(5,5), cv2.BORDER_DEFAULT)\ncv2.imwrite(\"imgTeste.png\", img_limiar)\n\n\n#OCR\npytesseract.pytesseract.tesseract_cmd = \"C:\\\\Program Files (x86)\\\\Tesseract-OCR\\\\tesseract.exe\"\ntxt_captcha = pytesseract.image_to_string(\"imgTeste.jpg\", lang = \"Arial+Comic\")\n\nprint(txt_captcha)\n\n#com a adição de Arial.traineddata e Comic.traineddata em C:\\Program Files (x86)\\Tesseract-OCR\\tessdata\n#observei uma melhora significativa na precisão, porém O ainda é confundido com 0 e Q com O, por exemplo.\n#se preciso reconhecer \"7BS0\" em uma imagem, obtenho \"7BSO\" como resultado. a adição de modelos ao método\n#image_to_string é uma boa opção quando sabemos quais são as fontes utilizadas na imagem em questão.\n"
},
{
"alpha_fraction": 0.6433680057525635,
"alphanum_fraction": 0.6527958512306213,
"avg_line_length": 28.576923370361328,
"blob_id": "7c61148b926438439211892d52bdb217d625fb82",
"content_id": "933c41e775f9c731930e0b67c84a9558102f6792",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3079,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 104,
"path": "/ocr_captcha_google_api.py",
"repo_name": "herbertizidro/captcha_ocr",
"src_encoding": "UTF-8",
"text": "import io\nimport os\nimport PIL\nimport cv2\nimport time\n#import pytesseract\nimport numpy as np\nfrom PIL import Image\nfrom io import BytesIO\nfrom selenium import webdriver\nfrom google.cloud import vision\nfrom google.cloud.vision import types\n\nwhile True:\n\n browser = webdriver.Chrome(r\"C:\\Users\\<USER>\\Downloads\\chromedriver_win32\\chromedriver.exe\")\n browser.get(\"<SITE>\")\n browser.maximize_window()\n\n\n #login\n cpf_input = browser.find_element_by_xpath('<XPATH>')\n for i in \"<CPF>\":\n time.sleep(1) #evitar problemas com - e . adicionados automaticamente no input\n cpf_input.send_keys(i)\n\n senha_input = browser.find_element_by_xpath('<XPATH>')\n senha_input.send_keys(\"<SENHA>\")\n #login\n\n\n captcha = browser.find_element_by_xpath('<XPATH>')\n localizacao = captcha.location\n tamanho = captcha.size\n png = browser.get_screenshot_as_png()\n\n img = Image.open(BytesIO(png)) #abre a imagem na memória\n\n #corta a imagem de acordo com coordenadas obtidas\n esquerda = localizacao[\"x\"]\n superior = localizacao[\"y\"]\n direita = localizacao[\"x\"] + tamanho[\"width\"]\n inferior = localizacao[\"y\"] + tamanho[\"height\"]\n\n img = img.crop((esquerda, superior, direita, inferior)) #corte\n img.save(\"screenshot.png\")\n\n\n #redimensiona\n altura = 150\n img = Image.open(\"screenshot.png\")\n a_percent = (altura / float(img.size[1]))\n largura = int((float(img.size[0]) * float(a_percent)))\n img = img.resize((largura, altura), Image.ANTIALIAS)\n img.save(\"screenshot.png\")\n\n\n #binariza\n img = cv2.imread(\"screenshot.png\", 0)\n limiar, img_limiar = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY)\n #img_limiar = cv2.bilateralFilter(img_limiar, 11, 17, 17)\n cv2.imwrite(\"screenshot.png\", img_limiar)\n\n\n #VISION API\n #https://cloud.google.com/vision/?hl=pt-br\n #https://googleapis.github.io/google-cloud-python/latest/vision/index.html\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"<...>.json\"\n client = vision.ImageAnnotatorClient()\n file_name = os.path.join(os.path.dirname(__file__), \"screenshot.png\")\n with io.open(file_name, \"rb\") as image_file:\n content = image_file.read()\n image = types.Image(content = content)\n response = client.text_detection(image = image)\n\n aux = []\n for text in response.text_annotations:\n aux.append(text.description)\n #VISION API\n\n \n #limpa o resultado\n txt_captcha = [i for i in aux if i.isalnum()] #só o que for letra ou número\n txt_captcha = str(txt_captcha).strip(\"[]\").replace(\",\", \"\").replace(\"'\", \"\").replace(\" \", \"\")\n\n\n #input captcha\n captcha_txt_input = browser.find_element_by_xpath('<XPATH>')\n captcha_txt_input.send_keys(txt_captcha.upper())\n\n #clicar em 'entrar'\n browser.find_element_by_xpath('<XPATH>').click()\n browser.implicitly_wait(60)\n\n\n try:\n mensagem_erro_modal = browser.find_element_by_class_name(\"<CLASS>\").text\n browser.quit()\n except:\n #\n # MAIS ALGUMA COISA AQUI\n #\n browser.quit()\n break\n"
},
{
"alpha_fraction": 0.78899085521698,
"alphanum_fraction": 0.78899085521698,
"avg_line_length": 26.25,
"blob_id": "6ca00173a4f43fe5385d9f1f97a499f9786913fa",
"content_id": "82763991b1cf8114fbed88211e18279d555f461c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 109,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 4,
"path": "/README.md",
"repo_name": "herbertizidro/captcha_ocr",
"src_encoding": "UTF-8",
"text": "# captcha_ocr\n script principal: ocr_captcha_pytesseract.py\n \n script alternativo: ocr_captcha_google_api.py\n"
}
] | 4 |
rowhit/magma | https://github.com/rowhit/magma | b388db4c94f94310cfcdd6431a69bc3c26b8b8fa | 37e9a63da42b03848fc8201bdd1639823331a19a | fa9e680b72492893218a2a86055c5a596f3a2909 | refs/heads/master | 2020-03-19T07:12:26.139576 | 2018-06-04T23:07:44 | 2018-06-04T23:07:44 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5563910007476807,
"alphanum_fraction": 0.5563910007476807,
"avg_line_length": 6.705882549285889,
"blob_id": "9348f51c1268cdd379dedd7fd0bfa461c501b160",
"content_id": "6005c6188af365304290bcdd891593900754a034",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 133,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 17,
"path": "/doc/types.md",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "## Magma Types, Functions and Operators\n\n### Bit\n\n### Array\n\n### Tuple\n\n### Bits\n\n### UInt\n\n### SInt\n\n### Functions\n\n### Operators\n\n\n"
},
{
"alpha_fraction": 0.5871632099151611,
"alphanum_fraction": 0.6022186875343323,
"avg_line_length": 33.10810852050781,
"blob_id": "821785991a9e63d3f98edf1627e7b502fe221f25",
"content_id": "137858d7f725a666484b93f871f44fbd695c6554",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1262,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 37,
"path": "/examples/adder.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from full_adder import FullAdder\n\nimport magma as m\nfrom magma.bitutils import int2seq\n\ndef DefineAdder(N):\n T = m.UInt(N)\n class Adder(m.Circuit):\n name = \"Adder{}\".format(N)\n IO = [\"a\", m.In(T), \"b\", m.In(T), \"cin\", m.In(m.Bit),\n \"out\", m.Out(T), \"cout\", m.Out(m.Bit)]\n @classmethod\n def definition(io):\n adders = [FullAdder() for _ in range(N)]\n circ = m.braid(adders, foldargs={\"cin\":\"cout\"})\n m.wire(io.a, circ.a)\n m.wire(io.b, circ.b)\n m.wire(io.cin, circ.cin)\n m.wire(io.cout, circ.cout)\n m.wire(io.out, circ.out)\n return Adder\n\n\nif __name__ == \"__main__\":\n from magma.simulator.python_simulator import PythonSimulator\n from magma.bit_vector import BitVector\n\n Adder4 = DefineAdder(4)\n simulator = PythonSimulator(Adder4)\n simulator.set_value(Adder4.a, BitVector(2, num_bits=4))\n simulator.set_value(Adder4.b, BitVector(3, num_bits=4))\n simulator.set_value(Adder4.cin, True)\n simulator.evaluate()\n #assert simulator.get_value(Adder4.out) == int2seq(6, 4)\n assert simulator.get_value(Adder4.out) == BitVector(6, num_bits=4)\n assert simulator.get_value(Adder4.cout) == False\n print(\"Success!\")\n"
},
{
"alpha_fraction": 0.557208240032196,
"alphanum_fraction": 0.5697940587997437,
"avg_line_length": 29.10344886779785,
"blob_id": "b73fa41d20f0cd19eed80f8894db4b844dbf957b",
"content_id": "5d9ff845783bca9e7b18f1e776feabfb39f15a16",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 874,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 29,
"path": "/tests/test_wire/test_errors.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from magma import *\n\n\ndef test_input_as_output(capsys):\n Buf = DeclareCircuit('Buf', \"I\", In(Bit), \"O\", Out(Bit))\n\n main = DefineCircuit(\"main\", \"I\", In(Bit), \"O\", Out(Bit))\n\n buf = Buf()\n wire(main.O, buf.I)\n out, err = capsys.readouterr()\n err_lines = err.splitlines()\n assert err_lines[-1] == \"=\" * 80\n assert err_lines[-2] == \"Error: using an input as an output main.O\"\n assert err_lines[-3] == \" wire(main.O, buf.I)\"\n\n\ndef test_output_as_input(capsys):\n A = DeclareCircuit('A', \"I\", In(Bit), \"O\", Out(Bit))\n\n main = DefineCircuit(\"main\", \"I\", In(Bit), \"O\", Out(Bit))\n\n a = A()\n wire(main.I, a.O)\n out, err = capsys.readouterr()\n err_lines = err.splitlines()\n assert err_lines[-1] == \"=\" * 80\n assert err_lines[-2] == \"Error: using an output as an input inst0.O\"\n assert err_lines[-3] == \" wire(main.I, a.O)\"\n\n"
},
{
"alpha_fraction": 0.5778443217277527,
"alphanum_fraction": 0.5928143858909607,
"avg_line_length": 21.200000762939453,
"blob_id": "c54367e7a720b2906542ff79ea345a68b356aa2b",
"content_id": "45d9d9b5619f38a07aee9c9d85eef550facd4cb2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 668,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 30,
"path": "/tests/test_wire/test_const.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from magma import *\nfrom magma.testing import check_files_equal\n\n\ndef test_const0():\n Buf = DeclareCircuit('Buf', \"I\", In(Bit), \"O\", Out(Bit))\n\n main = DefineCircuit(\"main\", \"O\", Out(Bit))\n\n buf = Buf()\n\n wire(0, buf.I)\n wire(buf.O, main.O)\n\n compile(\"build/const0\", main)\n assert check_files_equal(__file__, \"build/const0.v\", \"gold/const0.v\")\n\n\ndef test_const1():\n Buf = DeclareCircuit('Buf', \"I\", In(Bit), \"O\", Out(Bit))\n\n main = DefineCircuit(\"main\", \"O\", Out(Bit))\n\n buf = Buf()\n\n wire(1, buf.I)\n wire(buf.O, main.O)\n\n compile(\"build/const1\", main)\n assert check_files_equal(__file__, \"build/const1.v\", \"gold/const1.v\")\n\n\n"
},
{
"alpha_fraction": 0.6046948432922363,
"alphanum_fraction": 0.6046948432922363,
"avg_line_length": 18.01785659790039,
"blob_id": "53923cf8cd6912b45211f370a2cff55465a65672",
"content_id": "0f0d4e2c3b1c50ef27c3fa6ffb31a05a176688be",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1065,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 56,
"path": "/magma/operator.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from magma import BitType, BitsType, UIntType, SIntType\n\nclass OperatorMantleNotImportedError(RuntimeError):\n def __init__(self):\n self.message = \"Operator overloading not activated, please import mantle to use this feature\"\n\ndef default(self, other):\n raise OperatorMantleNotImportedError()\n\ndef unary_default(self):\n raise OperatorMantleNotImportedError()\n\nunary_ops = [\n \"__invert__\"\n]\n\nfor op in unary_ops:\n setattr(BitType, op, unary_default)\n setattr(BitsType, op, unary_default)\n\nbitwise_ops = [\n \"__and__\",\n \"__or__\",\n \"__xor__\",\n # \"__eq__\"\n]\n\nfor op in bitwise_ops:\n setattr(BitType, op, default)\n setattr(BitsType, op, default)\n\nshift_ops = [\n \"__lshift__\",\n \"__rshift__\",\n]\n\nfor op in shift_ops:\n setattr(BitsType, op, default)\n\narithmetic_ops = [\n \"__add__\",\n \"__sub__\",\n \"__mul__\",\n \"__div__\"\n]\n\nrelational_ops = [\n \"__lt__\",\n \"__le__\",\n \"__gt__\",\n \"__ge__\",\n]\n\nfor op in arithmetic_ops + relational_ops:\n setattr(UIntType, op, default)\n setattr(SIntType, op, default)\n"
},
{
"alpha_fraction": 0.6556876301765442,
"alphanum_fraction": 0.6556876301765442,
"avg_line_length": 24.38793182373047,
"blob_id": "3a897812d005d267c6ebd5791f00aae296b23c96",
"content_id": "302a7a995db3b6e256af2a9b1bd8301e72ae5a5f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2945,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 116,
"path": "/tests/test_type/test_clock.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from magma import In, Out, Flip, \\\n Clock, ClockType, ClockKind, \\\n Reset, ResetType, ResetKind, \\\n Enable, EnableType, EnableKind\n\ndef test_clock():\n assert isinstance(Clock, ClockKind)\n assert Clock == Clock\n assert str(Clock) == 'Clock'\n\ndef test_clock_flip():\n ClockOut = Out(Clock)\n assert isinstance(ClockOut, ClockKind)\n assert str(ClockOut) == 'Out(Clock)'\n\n ClockIn = In(Clock)\n assert isinstance(ClockIn, ClockKind)\n assert str(ClockIn) == 'In(Clock)'\n\n clockin = In(ClockIn)\n clockout = Out(ClockIn)\n assert clockout == ClockOut\n assert clockin == ClockIn\n\n clockin = In(ClockOut)\n clockout = Out(ClockOut)\n assert clockout == ClockOut\n assert clockin == ClockIn\n\n clockin = Flip(ClockOut)\n clockout = Flip(ClockIn)\n assert clockout == ClockOut\n assert clockin == ClockIn\n\ndef test_clock_val():\n b = Clock(name=\"a\")\n assert str(b) == \"a\"\n assert isinstance(b, ClockType)\n assert isinstance(b, Clock)\n assert not b.isinput()\n assert not b.isoutput()\n\ndef test_reset():\n assert isinstance(Reset, ResetKind)\n assert Reset == Reset\n assert str(Reset) == 'Reset'\n\ndef test_reset_flip():\n ResetOut = Out(Reset)\n assert isinstance(ResetOut, ResetKind)\n assert str(ResetOut) == 'Out(Reset)'\n\n ResetIn = In(Reset)\n assert isinstance(ResetIn, ResetKind)\n assert str(ResetIn) == 'In(Reset)'\n\n resetin = In(ResetIn)\n resetout = Out(ResetIn)\n assert resetout == ResetOut\n assert resetin == ResetIn\n\n resetin = In(ResetOut)\n resetout = Out(ResetOut)\n assert resetout == ResetOut\n assert resetin == ResetIn\n\n resetin = Flip(ResetOut)\n resetout = Flip(ResetIn)\n assert resetout == ResetOut\n assert resetin == ResetIn\n\ndef test_reset_val():\n b = Reset(name=\"a\")\n assert str(b) == \"a\"\n assert isinstance(b, ResetType)\n assert isinstance(b, Reset)\n assert not b.isinput()\n assert not b.isoutput()\n assert not b.isinout()\n\ndef test_enable():\n assert isinstance(Enable, EnableKind)\n assert Enable == Enable\n assert str(Enable) == 'Enable'\n\ndef test_enable_flip():\n EnableOut = Out(Enable)\n assert isinstance(EnableOut, EnableKind)\n assert str(EnableOut) == 'Out(Enable)'\n\n EnableIn = In(Enable)\n assert isinstance(EnableIn, EnableKind)\n assert str(EnableIn) == 'In(Enable)'\n\n enablein = In(EnableIn)\n enableout = Out(EnableIn)\n assert enableout == EnableOut\n assert enablein == EnableIn\n\n enablein = In(EnableOut)\n enableout = Out(EnableOut)\n assert enableout == EnableOut\n assert enablein == EnableIn\n\n enablein = Flip(EnableOut)\n enableout = Flip(EnableIn)\n assert enableout == EnableOut\n assert enablein == EnableIn\n\ndef test_enable_val():\n b = Enable(name=\"a\")\n assert str(b) == \"a\"\n assert isinstance(b, EnableType)\n assert isinstance(b, Enable)\n assert not b.isinput()\n assert not b.isoutput()\n"
},
{
"alpha_fraction": 0.7875000238418579,
"alphanum_fraction": 0.7875000238418579,
"avg_line_length": 21.85714340209961,
"blob_id": "efb7b7f3096141fd0f2f1580a755e457f95363c5",
"content_id": "ee4c6c3626dcfd2fcf5900accb1b58e477689216",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 160,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 7,
"path": "/tests/test_coreir/test_coreir.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "import pytest\ncoreir = pytest.importorskip(\"coreir\")\nfrom magma import *\nfrom magma.testing import check_files_equal\n\n\n# FIXME: Write some generic coreir tests\n"
},
{
"alpha_fraction": 0.6536756157875061,
"alphanum_fraction": 0.6543757319450378,
"avg_line_length": 28.95804214477539,
"blob_id": "bc6d58de59621a03a6943f27b4989b3cec2dc101",
"content_id": "e5f0baa2b79be1b5045d0d6d3bbfb37e4369bbe8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4285,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 143,
"path": "/magma/fromverilog.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom collections import namedtuple\n\nfrom mako.template import Template\nfrom pyverilog.vparser.parser import VerilogParser, Node, Input, Output, ModuleDef, Ioport, Port, Decl\nimport pyverilog.vparser.parser as parser\nfrom pyverilog.dataflow.visit import NodeVisitor\n\nfrom .t import In, Out, InOut\nfrom .bit import Bit\nfrom .array import Array\nfrom .circuit import DeclareCircuit, DefineCircuit, EndDefine\n\n__all__ = ['DeclareFromVerilog']\n__all__ += ['DeclareFromVerilogFile']\n__all__ += ['DeclareFromTemplatedVerilog']\n__all__ += ['DeclareFromTemplatedVerilogFile']\n__all__ += ['DefineFromVerilog']\n__all__ += ['DefineFromVerilogFile']\n__all__ += ['DefineFromTemplatedVerilog']\n__all__ += ['DefineFromTemplatedVerilogFile']\n\n\nclass ModuleVisitor(NodeVisitor):\n def __init__(self):\n self.nodes = []\n\n def visit_ModuleDef(self, node):\n self.nodes.append(node)\n return node\n\ndef get_type(io):\n if isinstance(io, Input):\n direction = In\n elif isinstance(io, Output):\n direction = Out\n else:\n direction = InOut\n\n if io.width is None:\n type_ = Bit\n else:\n msb = int(io.width.msb.value)\n lsb = int(io.width.lsb.value)\n\n type_ = Array(msb-lsb+1, Bit)\n return direction(type_)\n\n\ndef ParseVerilogModule(node):\n args = []\n ports = []\n for port in node.portlist.ports:\n if isinstance(port, Ioport):\n io = port.first\n args.append(io.name)\n args.append(get_type(io))\n elif isinstance(port, Port):\n ports.append(port.name)\n else:\n raise NotImplementedError(type(port))\n\n if ports:\n assert not args, \"Can we have mixed declared and undeclared types in a Verilog module?\"\n for port in ports:\n for child in node.children():\n if isinstance(child, Decl):\n first_child = child.children()[0]\n if isinstance(first_child, (parser.Input, parser.Output, parser.Inout)) and \\\n first_child.name == port:\n args.append(first_child.name)\n args.append(get_type(first_child))\n break\n else:\n raise Exception(f\"Could not find type declaration for port {port}\")\n\n return node.name, args\n\ndef FromVerilog(source, func):\n parser = VerilogParser()\n\n ast = parser.parse(source)\n #ast.show()\n\n v = ModuleVisitor()\n v.visit(ast)\n\n if func == DefineCircuit:\n # only allow a single verilog module\n assert len(v.nodes) == 1\n modules = []\n for node in v.nodes:\n name, args = ParseVerilogModule(node)\n circuit = func(name, *args)\n if func == DefineCircuit:\n # inline source\n circuit.verilogFile = source\n EndDefine()\n modules.append(circuit)\n return modules\n\ndef FromVerilogFile(file, func):\n if file is None:\n return None\n verilog = open(file).read()\n return FromVerilog(verilog, func)\n\ndef FromTemplatedVerilog(templatedverilog, func, **kwargs):\n verilog = Template(templatedverilog).render(**kwargs)\n return FromVerilog(verilog, func)\n\ndef FromTemplatedVerilogFile(file, func, **kwargs):\n if file is None:\n return None\n templatedverilog = open(file).read()\n return FromTemplatedVerilog(templatedverilog, func, **kwargs)\n\n\ndef DeclareFromVerilog(source):\n return FromVerilog(source, DeclareCircuit)\n\ndef DeclareFromVerilogFile(file):\n return FromVerilogFile(file, DeclareCircuit)\n\ndef DeclareFromTemplatedVerilog(source, **kwargs):\n return FromTemplatedVerilog(source, DeclareCircuit, **kwargs)\n\ndef DeclareFromTemplatedVerilogFile(file, **kwargs):\n return FromTemplatedVerilogFile(file, DeclareCircuit, **kwargs)\n\n\ndef DefineFromVerilog(source):\n return FromVerilog(source, DefineCircuit)\n\ndef DefineFromVerilogFile(file):\n return FromVerilogFile(file, DefineCircuit)\n\ndef DefineFromTemplatedVerilog(source, **kwargs):\n return FromTemplatedVerilog(source, DefineCircuit, **kwargs)\n\ndef DefineFromTemplatedVerilogFile(file, **kwargs):\n return FromTemplatedVerilogFile(file, DefineCircuit, **kwargs)\n\n"
},
{
"alpha_fraction": 0.5734078884124756,
"alphanum_fraction": 0.5763280391693115,
"avg_line_length": 29.83333396911621,
"blob_id": "6f30ef4c486978eca160cd29db4afbd004c6d566",
"content_id": "878ca4c113d089f0080c85abe5431ea02cf8cbc7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16095,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 522,
"path": "/magma/circuit.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "import sys\nimport six\nimport inspect\nfrom functools import wraps\nif sys.version_info > (3, 0):\n from functools import reduce\ntry:\n from functools import lru_cache\nexcept ImportError:\n from backports.functools_lru_cache import lru_cache\nimport operator\nfrom collections import namedtuple\nfrom .interface import *\nfrom .wire import *\nfrom .t import Flip\nfrom .array import ArrayType\nfrom .tuple import TupleType\nfrom .bit import VCC, GND\nfrom .debug import get_callee_frame_info\nfrom .logging import warning\n\n__all__ = ['AnonymousCircuitType']\n__all__ += ['AnonymousCircuit']\n\n__all__ += ['CircuitType']\n__all__ += ['Circuit']\n__all__ += ['DeclareCircuit']\n__all__ += ['DefineCircuit', 'EndDefine', 'EndCircuit']\n\n__all__ += ['isdefinition']\n__all__ += ['isprimitive']\n__all__ += ['CopyInstance']\n__all__ += ['circuit_type_method']\n__all__ += ['circuit_generator']\n\n\ncircuit_type_method = namedtuple('circuit_type_method', ['name', 'definition'])\n\ndef circuit_to_html(cls):\n if isdefinition(cls):\n # Avoid circular dependency so dot backend can use passes\n from .backend.dot import to_html\n return to_html(cls)\n else:\n return repr(cls)\n\n# create an attribute for each port\ndef setports(self, ports):\n #print('setports', ports)\n for name, port in ports.items():\n #print(self, port, type(port))\n if isinstance(name, str):\n setattr(self, name, port)\n\n#\n# Metaclass for creating circuits\n#\nclass CircuitKind(type):\n\n def __new__(metacls, name, bases, dct):\n #print('CircuitKind new:', name)\n\n # override circuit class name\n if 'name' not in dct:\n dct['name'] = name\n name = dct['name']\n\n if 'primitive' not in dct:\n dct['primitive'] = False\n\n if 'coreir_lib' not in dct:\n dct['coreir_lib'] = \"global\"\n\n # create a new circuit class\n cls = type.__new__(metacls, name, bases, dct)\n\n for method in dct.get('circuit_type_methods', []):\n setattr(cls, method.name, method.definition)\n\n # create interface for this circuit class\n if hasattr(cls, 'IO') and not isinstance(cls.IO, InterfaceKind):\n # turn IO attribite into an Interface\n cls.IO = DeclareInterface(*cls.IO)\n cls.interface = cls.IO\n\n return cls\n\n def __call__(cls, *largs, **kwargs):\n #print('CircuitKind call:', largs, kwargs)\n debug_info = get_callee_frame_info()\n self = super(CircuitKind, cls).__call__(*largs, **kwargs)\n self.set_debug_info(debug_info)\n\n # instance interface for this instance\n if hasattr(cls, 'IO'):\n self.setinterface(cls.IO(inst=self))\n\n return self\n\n def __str__(cls):\n return cls.__name__\n\n def __repr__(cls):\n\n name = cls.__name__\n args = str(cls.IO)\n if hasattr(cls,\"instances\"):\n s = '{} = DefineCircuit(\"{}\", {})\\n'.format(name, name, args)\n\n # emit instances\n for instance in cls.instances:\n s += repr(instance) + '\\n'\n\n # emit wires from instances\n for instance in cls.instances:\n s += repr(instance.interface)\n\n # for input in cls.interface.inputs():\n s += repr( cls.interface )\n\n s += \"EndCircuit()\"\n else:\n s = '{} = DeclareCircuit(\"{}\", {})'.format(name, name, args)\n\n return s\n\n # def _repr_html_(cls):\n # return circuit_to_html(cls)\n\n def find(cls, defn):\n name = cls.__name__\n if not isdefinition(cls):\n return defn\n for i in cls.instances:\n type(i).find(defn)\n if name not in defn:\n defn[name] = cls\n return defn\n\n#\n# Abstract base class for circuits\n#\[email protected]_metaclass(CircuitKind)\nclass AnonymousCircuitType(object):\n\n def __init__(self, *largs, **kwargs):\n self.largs = largs\n self.kwargs = kwargs\n if hasattr(self, 'default_kwargs'):\n for key in self.default_kwargs:\n if key not in kwargs:\n self.kwargs[key] = self.default_kwargs[key]\n\n self.name = kwargs['name'] if 'name' in kwargs else \"\"\n\n self.loc = kwargs['loc'] if 'loc' in kwargs else None\n if self.loc and len(self.loc) == 2:\n self.loc = (self.loc[0], self.loc[1], 0)\n\n self.interface = None\n self.defn = None\n self.used = False\n self.is_instance = True\n\n self.filename = None\n self.lineno = None\n\n def set_debug_info(self, debug_info):\n self.filename = debug_info[0] # TODO: Change debug_info to a namedtuple\n self.lineno = debug_info[1]\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n args = []\n for k, v in self.interface.ports.items():\n args.append('\"{}\"'.format(k))\n args.append(repr(v))\n return '{}({})'.format(str(type(self)), ', '.join(args))\n\n #return '{} = {}({}) # {} {}'.format(str(self), str(type(self)),\n # ', '.join(args), self.filename, self.lineno)\n\n # def _repr_html_(self):\n # return circuit_to_html(self)\n\n def __getitem__(self, key):\n return self.interface[key]\n\n # wire a list of outputs to the circuit's inputs\n def wireoutputs(self, outputs, debug_info):\n inputs = self.interface.inputs()\n ni = len(inputs)\n no = len(outputs)\n if ni != no:\n warning(\"Warning: number of inputs is not equal to the number of outputs\")\n warning(\"Warning: only %d of the %d arguments will be wired\" % (ni, no))\n for i in range(min(ni,no)):\n wire(outputs[i], inputs[i], debug_info)\n\n # wire a single output to the circuit's inputs\n def wire(self, output, debug_info):\n\n if hasattr(output, 'interface'):\n # wire the circuit's outputs to this circuit's inputs\n self.wireoutputs(output.interface.outputs(), debug_info)\n else:\n # wire the output to this circuit's input (should only have 1 input)\n inputs = self.interface.inputs()\n ni = len(inputs)\n if ni == 0:\n warning(\"Warning: wiring an output to a circuit with no input arguments\")\n return\n if ni != 1:\n warning(\"Warning: wiring an output to a circuit with more than one input argument\")\n inputs[0].wire( output, debug_info )\n\n def __call__(input, *outputs, **kw):\n debug_info = get_callee_frame_info()\n\n no = len(outputs)\n if len(outputs) == 1:\n input.wire(outputs[0], debug_info)\n else:\n input.wireoutputs(outputs, debug_info)\n\n # wire up extra arguments, name to name\n #\n # this code should be changed to use clock types ...\n #\n for key, value in kw.items():\n if key == 'enable': key = 'CE'\n if key == 'reset': key = 'RESET'\n if key == 'set': key = 'SET' # NYI\n if key == 'ce': key = 'CE' # depreciated\n if hasattr(input, key):\n i = getattr(input, key)\n wire( value, getattr(input, key), debug_info)\n else:\n warning('Warning: circuit does not have {}'.format(key))\n\n o = input.interface.outputs()\n return o[0] if len(o) == 1 else tuple(o)\n\n def setinterface(self, interface):\n setports(self, interface.ports)\n self.interface = interface\n return self\n\n def on(self):\n self.used = True\n return self\n\n def off(self):\n self.used = False\n return self\n\n def rename(self, name):\n self.name = name\n return self\n\n def isclocked(self):\n return self.interface.isclocked()\n\n def clockargs(self):\n return self.interface.clockargs()\n\n def inputargs(self):\n return self.interface.inputargs()\n\n def outputargs(self):\n return self.interface.outputargs()\n\n#\n# AnonymousCircuits are like macros - the circuit instances are not placed\n#\ndef AnonymousCircuit(*decl):\n if len(decl) == 1:\n decl = decl[0]\n return AnonymousCircuitType().setinterface(Interface(decl))\n\n\n#\n# Placed circuit - instances placed in a definition\n#\nclass CircuitType(AnonymousCircuitType):\n def __init__(self, *largs, **kwargs):\n super(CircuitType, self).__init__(*largs, **kwargs)\n\n # Circuit instances are placed if within a definition\n global currentDefinition\n if currentDefinition:\n currentDefinition.place(self)\n\n def __repr__(self):\n args = []\n for k, v in self.kwargs.items():\n if isinstance(v, tuple):\n # { # Format identifier\n # 0: # first parameter\n # # # use \"0x\" prefix\n # 0 # fill with zeroes\n # {1} # to a length of n characters (including 0x), defined by the second parameter\n # x # hexadecimal number, using lowercase letters for a-f\n # } # End of format identifier\n if len(v) == 2:\n v = \"{0:#0{1}x}\".format(v[0], v[1] // 4)\n else:\n v = '\"{}\"'.format(v)\n args.append(\"%s=%s\"%(k, v))\n return '{} = {}({})'.format(str(self), str(type(self)), ', '.join(args))\n #return '{} = {}({}) # {} {}'.format(str(self), str(type(self)),\n # cls.filename, cls.lineno)\n\n# DeclareCircuit Factory\ndef DeclareCircuit(name, *decl, **args):\n dct = dict(\n IO=decl,\n is_definition=False,\n primitive=args.get('primitive', True),\n stateful=args.get('stateful', False),\n simulate=args.get('simulate'),\n firrtl_op=args.get('firrtl_op'),\n circuit_type_methods=args.get('circuit_type_methods', []),\n coreir_lib=args.get('coreir_lib', \"global\"),\n coreir_name=args.get('coreir_name', name),\n coreir_genargs=args.get('coreir_genargs', None),\n coreir_configargs=args.get('coreir_configargs', {}),\n verilog_name=args.get('verilog_name', name),\n default_kwargs=args.get('default_kwargs', {})\n )\n return CircuitKind( name, (CircuitType,), dct )\n\n\n\n# Maintain a current definition and stack of nested definitions\n\ncurrentDefinition = None\ncurrentDefinitionStack = []\n\ndef pushDefinition(defn):\n global currentDefinition\n if currentDefinition:\n currentDefinitionStack.append(currentDefinition)\n currentDefinition = defn\n\ndef popDefinition():\n global currentDefinition\n if len(currentDefinitionStack) > 0:\n currentDefinition = currentDefinitionStack.pop()\n else:\n currentDefinition = None\n\n# A circuit is a definition if it has instances\ndef isdefinition(circuit):\n 'Return whether a circuit is a module definition'\n return circuit.is_definition\n\ndef isprimitive(circuit):\n return circuit.primitive\n\n\nclass DefineCircuitKind(CircuitKind):\n def __new__(metacls, name, bases, dct):\n\n if 'name' not in dct:\n # Check if we are a subclass of something other than Circuit\n for base in bases:\n if base is not Circuit:\n if not issubclass(base, Circuit):\n raise Exception(\"Must subclass from Circuit or a \"\n \"subclass of Circuit. {}\".format(base))\n # If so, we will inherit the name of the first parent\n dct['name'] = base.name\n break\n else:\n dct['name'] = name\n name = dct['name']\n\n self = CircuitKind.__new__(metacls, name, bases, dct)\n\n self.verilog = None\n self.verilogFile = None\n self.verilogLib = None\n\n self.verilog_name = dct.get('verilog_name', name)\n self.coreir_name = dct.get('coreir_name', name)\n self.coreir_genargs = dct.get('coreir_genargs', None)\n self.coreir_configargs = dct.get('coreir_configargs', {})\n self.default_kwargs = dct.get('default_kwargs', {})\n\n self.firrtl = None\n\n self._instances = []\n self._is_definition = dct.get('is_definition', False)\n self.is_instance = False\n\n if hasattr(self, 'IO'):\n # instantiate interface\n self.interface = self.IO(defn=self)\n setports(self, self.interface.ports)\n\n # create circuit definition\n if hasattr(self, 'definition'):\n pushDefinition(self)\n self.definition()\n self._is_definition = True\n EndCircuit()\n\n return self\n\n @property\n def is_definition(self):\n return self._is_definition or self.verilog or self.verilogFile\n\n @property\n def instances(self):\n return self._instances\n\n #\n # place a circuit instance in this definition\n #\n def place(cls, inst):\n if not inst.name:\n inst.name = 'inst' + str(len(cls.instances))\n # osnr's suggested name\n #inst.name = 'inst' + str(len(cls.instances)) + '_' + inst.__class__.name\n #print('naming circuit instance', inst.name)\n #print('placing', inst, 'in', cls)\n inst.defn = cls\n inst.stack = inspect.stack()\n cls.instances.append(inst)\n\n\n# Register graphviz repr if running in IPython.\n# There's a bug in IPython which breaks visual reprs\n# on types.\ntry:\n ip = get_ipython()\n html_formatter = ip.display_formatter.formatters['text/html']\n html_formatter.for_type(DefineCircuitKind, circuit_to_html)\n html_formatter.for_type(CircuitKind, circuit_to_html)\nexcept NameError:\n # Not running in IPython right now?\n pass\n\n\[email protected]_metaclass(DefineCircuitKind)\nclass Circuit(CircuitType):\n pass\n\n\n# DefineCircuit Factory\ndef DefineCircuit(name, *decl, **args):\n debug_info = get_callee_frame_info()\n global currentDefinition\n if currentDefinition:\n currentDefinitionStack.append(currentDefinition)\n\n dct = dict(IO = decl,\n is_definition = True,\n primitive = args.get('primitive', False),\n stateful = args.get('stateful', False),\n simulate = args.get('simulate'),\n filename = debug_info[0],\n lineno = debug_info[1],\n verilog_name = args.get('verilog_name', name),\n coreir_name = args.get('coreir_name', name),\n coreir_lib = args.get('coreir_lib', \"global\"),\n coreir_genargs = args.get('coreir_genargs', None),\n coreir_configargs = args.get('coreir_configargs', None),\n default_kwargs = args.get('default_kwargs', {}))\n\n currentDefinition = DefineCircuitKind( name, (Circuit,), dct)\n return currentDefinition\n\ndef EndDefine():\n if currentDefinition:\n debug_info = get_callee_frame_info()\n currentDefinition.end_circuit_filename = debug_info[0]\n currentDefinition.end_circuit_lineno = debug_info[1]\n popDefinition()\n\nEndCircuit = EndDefine\n\ndef CopyInstance(instance):\n circuit = type(instance)\n new_instance = circuit()\n new_instance.kwargs = instance.kwargs\n return new_instance\n\ndef hex(i):\n if i < 10: return chr(ord('0')+i)\n else: return chr(ord('A')+i-10)\n\n\ndef hstr(init, nbits):\n bits = 1 << int(nbits)\n format = \"0x\"\n nformat = []\n for i in range(bits//4):\n nformat.append(init%16)\n init //= 16\n nformat.reverse()\n if nformat:\n return format + reduce(operator.add, map(hex, nformat))\n return format\n\n\nGeneratorArguments = namedtuple('GeneratorArguments', ['args', 'kwargs'])\n\n\ndef circuit_generator(func):\n @lru_cache(maxsize=None)\n @wraps(func)\n def wrapped(*args, **kwargs):\n result = func(*args, **kwargs)\n # Store arguments to generate the circuit\n result._generator_arguments = GeneratorArguments(args, kwargs)\n return result\n return wrapped\n"
},
{
"alpha_fraction": 0.6001494526863098,
"alphanum_fraction": 0.6098654866218567,
"avg_line_length": 28.733333587646484,
"blob_id": "cee811b96237c96ba4c7edddbca0ff9f3cccd084",
"content_id": "636b1ecdeba3fdb7c4edb57bfe546b25f8569807",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1338,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 45,
"path": "/magma/logging.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport logging\nimport traceback\nimport inspect\nimport sys\n\nlog = logging.getLogger(\"magma\")\n\n\ndef get_original_wire_call_stack_frame():\n for frame in inspect.stack():\n if sys.version_info < (3, 5):\n function = inspect.getframeinfo(frame[0]).function\n else:\n function = frame.function\n if function not in [\"wire\", \"connect\",\n \"get_original_wire_call_stack_frame\",\n \"error\", \"warn\"]:\n break\n if sys.version_info < (3, 5):\n return frame[0]\n else:\n return frame.frame\n\n\ndef info(message, *args, **kwargs):\n log.info(message, *args, **kwargs)\n\n\ndef warning(message, *args, **kwargs):\n log.warning(message, *args, **kwargs)\n\n\ndef error(message, include_wire_traceback=False, *args, **kwargs):\n if include_wire_traceback:\n sys.stderr.write(\"=\"*80 + \"\\n\")\n stack_frame = get_original_wire_call_stack_frame()\n traceback.print_stack(f=stack_frame, limit=10, file=sys.stderr)\n # FIXME: In python 2, log.error doesn't go to stderr by default\n # log.error(message, *args, **kwargs)\n print(message, file=sys.stderr, *args, **kwargs)\n if include_wire_traceback:\n sys.stderr.write(\"=\"*80 + \"\\n\")\n"
},
{
"alpha_fraction": 0.7285714149475098,
"alphanum_fraction": 0.7285714149475098,
"avg_line_length": 19,
"blob_id": "9a3d417681925c830b17d80fbc941c43313e6656",
"content_id": "fb468a5dc67d22c52c86028212e5a356c76a78b6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 140,
"license_type": "permissive",
"max_line_length": 51,
"num_lines": 7,
"path": "/conftest.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "import pytest\n\n\[email protected](autouse=True)\ndef magma_test():\n import magma.config\n magma.config.set_compile_dir('callee_file_dir')\n"
},
{
"alpha_fraction": 0.5646888613700867,
"alphanum_fraction": 0.5646888613700867,
"avg_line_length": 27.55371856689453,
"blob_id": "daba6613d073cd2f191fb39a2553f45b55871115",
"content_id": "bdd615e71adab3e8b01498a9c1fe93cebd28ab80",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3455,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 121,
"path": "/magma/bits.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from .compatibility import IntegerTypes\nfrom .ref import AnonRef\nfrom .bit import Bit, BitOut, VCC, GND, BitType, BitKind\nfrom .array import ArrayType, ArrayKind\nfrom .bit_vector import BitVector\n\n__all__ = ['Bits', 'BitsType', 'BitsKind']\n__all__ += ['UInt', 'UIntType', 'UIntKind']\n__all__ += ['SInt', 'SIntType', 'SIntKind']\n\nclass BitsType(ArrayType):\n def __repr__(self):\n if not isinstance(self.name, AnonRef):\n return repr(self.name)\n ts = [repr(t) for t in self.ts]\n return 'bits([{}])'.format(', '.join(ts))\n\n def bits(self):\n if not self.const():\n raise Exception(\"Not a constant\")\n def convert(x):\n if x is VCC:\n return True\n assert x is GND\n return False\n return [convert(x) for x in self.ts]\n\n def __int__(self):\n if not self.const():\n raise Exception(\"Can't call __int__ on a non-constant\")\n return BitVector(self.bits()).as_int()\n\n\nclass BitsKind(ArrayKind):\n def __str__(cls):\n if cls.isinput(): return \"In(Bits({}))\".format(cls.N)\n if cls.isoutput(): return \"Out(Bits({}))\".format(cls.N)\n return \"Bits({})\".format(cls.N)\n\n def qualify(cls, direction):\n if cls.T.isoriented(direction):\n return cls\n return Bits(cls.N, cls.T.qualify(direction))\n\n def flip(cls):\n return Bits(cls.N, cls.T.flip())\n\n\ndef Bits(N, T=None):\n if T is None:\n T = Bit\n assert isinstance(N, IntegerTypes)\n name = 'Bits({})'.format(N)\n return BitsKind(name, (BitsType,), dict(N=N, T=T))\n\n\nclass UIntType(BitsType):\n def __repr__(self):\n if not isinstance(self.name, AnonRef):\n return repr(self.name)\n ts = [repr(t) for t in self.ts]\n return 'uint([{}])'.format(', '.join(ts))\n\n\nclass UIntKind(BitsKind):\n def __str__(cls):\n if cls.isinput(): return \"In(UInt({}))\".format(cls.N)\n if cls.isoutput(): return \"Out(UInt({}))\".format(cls.N)\n return \"UInt({})\".format(cls.N)\n\n def qualify(cls, direction):\n if cls.T.isoriented(direction):\n return cls\n return UInt(cls.N, cls.T.qualify(direction))\n\n def flip(cls):\n return UInt(cls.N, cls.T.flip())\n\n\ndef UInt(N, T=None):\n if T is None:\n T = Bit\n assert isinstance(N, IntegerTypes)\n name = 'UInt({})'.format(N)\n return UIntKind(name, (UIntType,), dict(N=N, T=T))\n\n\nclass SIntType(BitsType):\n def __repr__(self):\n if not isinstance(self.name, AnonRef):\n return repr(self.name)\n ts = [repr(t) for t in self.ts]\n return 'sint([{}])'.format(', '.join(ts))\n\n def __int__(self):\n if not self.const():\n raise Exception(\"Can't call __int__ on a non-constant\")\n return BitVector(self.bits(), signed=True).as_int()\n\n\nclass SIntKind(BitsKind):\n def __str__(cls):\n if cls.isinput(): return \"In(SInt({}))\".format(cls.N)\n if cls.isoutput(): return \"Out(SInt({}))\".format(cls.N)\n return \"SInt({})\".format(cls.N)\n\n def qualify(cls, direction):\n if cls.T.isoriented(direction):\n return cls\n return SInt(cls.N, cls.T.qualify(direction))\n\n def flip(cls):\n return SInt(cls.N, cls.T.flip())\n\n\ndef SInt(N, T=None):\n if T is None:\n T = Bit\n assert isinstance(N, IntegerTypes)\n name = 'SInt({})'.format(N)\n return SIntKind(name, (SIntType,), dict(N=N, T=T))\n"
},
{
"alpha_fraction": 0.6171428561210632,
"alphanum_fraction": 0.6242856979370117,
"avg_line_length": 26.959999084472656,
"blob_id": "26e56c38acb803874f09bd37bc5aad9e20048f8b",
"content_id": "0474cbdfa365b35e370492ab37911748cce746c3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 700,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 25,
"path": "/magma/debug.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "import inspect\nimport sys\n\n\ndef get_callee_frame_info():\n callee_frame = inspect.stack()[2]\n if sys.version_info < (3, 5):\n debug_info = callee_frame[1], callee_frame[2]\n else:\n debug_info = callee_frame.filename, callee_frame.lineno\n return debug_info\n\n\ndef debug_wire(fn):\n \"\"\"\n Automatically populates the `debug_info` argument for a wire call if it's\n not already passed as an argument\n \"\"\"\n # TODO: We could check that fn has the correct interface \n # wire(i, o, debug_info)\n def wire(i, o, debug_info=None):\n if debug_info is None:\n debug_info = get_callee_frame_info()\n return fn(i, o, debug_info)\n return wire\n\n"
},
{
"alpha_fraction": 0.7141255736351013,
"alphanum_fraction": 0.7141255736351013,
"avg_line_length": 17.54166603088379,
"blob_id": "c1abf88321f645a318dd819d41e38e9106151435",
"content_id": "2a61e4b56c7895ab62258e5827f5366808c38768",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 892,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 48,
"path": "/magma/__init__.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "import os\n\ntry:\n from functools import lru_cache\nexcept ImportError:\n from backports.functools_lru_cache import lru_cache\n\ndef cache_definition(fn):\n return lru_cache(maxsize=None)(fn)\n\n\n# lowest-level wiring abstraction\nfrom .port import *\n\n# types\nfrom .t import *\nfrom .bit import *\n#from .bitutils import *\nfrom .array import *\nfrom .bits import *\nfrom .tuple import *\nfrom .clock import *\nfrom .conversions import *\nfrom .interface import *\n\nfrom .circuit import *\nfrom .braid import *\n\nfrom .wire import *\n\n# verilog\nfrom .fromverilog import *\nfrom .backend.verilog import *\nfrom .compile import *\n\n#from .tests import *\n\n#print('import magma')\n\nfrom .logging import warning\n\n\nmantle_target = None\ndef set_mantle_target(t):\n global mantle_target\n if mantle_target is not None:\n warning('setting mantle target again', mantle_target, t )\n mantle_target = t\n \n"
},
{
"alpha_fraction": 0.607279360294342,
"alphanum_fraction": 0.6094631552696228,
"avg_line_length": 28.074073791503906,
"blob_id": "a562cc0b6a5be29e31f266949eac7cb9e276fc52",
"content_id": "e676aa8fcea013e1fe75bccfdf805f5217f773c0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5495,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 189,
"path": "/magma/conversions.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from collections import Sequence, Mapping, OrderedDict\nfrom .compatibility import IntegerTypes\nfrom .t import In, Out\nfrom .bit import _BitKind, _BitType, Bit, BitKind, BitType, VCC, GND\nfrom .clock import ClockType, Clock, \\\n Reset, ResetType, \\\n Enable, EnableType\nfrom .array import ArrayType, Array\nfrom .bits import BitsType, Bits, UIntType, UInt, SIntType, SInt\nfrom .tuple import TupleType, Tuple\nfrom .bitutils import int2seq\n\n__all__ = ['bit']\n__all__ += ['clock', 'reset', 'enable']\n\n__all__ += ['array']\n__all__ += ['bits', 'uint', 'sint']\n\n__all__ += ['tuple_']\n\n__all__ += ['concat', 'repeat']\n__all__ += ['sext', 'zext']\n\ndef convertbit(value, totype, T):\n if isinstance(value, totype):\n return value\n\n if not isinstance(value, (_BitType, ArrayType, TupleType, IntegerTypes)):\n raise ValueError(\n \"bit can only be used on a Bit, an Array, or an int; not {}\".format(type(value)))\n\n if isinstance(value, (ArrayType, TupleType)):\n if len(value) != 1:\n raise ValueError(\n \"bit can only be used on arrays and tuples of length 1; not {}\".format(len(value)))\n value = value[0]\n if not isinstance(value, _BitType):\n raise ValueError(\n \"bit can only be used on arrays and tuples of bits; not {}\".format(type(value)))\n\n assert isinstance(value, (IntegerTypes, _BitType))\n\n if isinstance(value, IntegerTypes):\n value = VCC if value else GND\n\n if value.isinput(): b = In(T)()\n elif value.isoutput(): b = Out(T)()\n else: b = T()\n b.port = value.port\n return b\n\n\ndef bit(value):\n return convertbit(value, BitType, Bit)\n\ndef clock(value):\n return convertbit(value, ClockType, Clock)\n\ndef reset(value):\n return convertbit(value, ResetType, Reset)\n\ndef enable(value):\n return convertbit(value, EnableType, Enable)\n\n\n\ndef convertbits(value, n, totype, totypeconstructor, checkbit):\n if isinstance(value, totype):\n return value\n\n if not isinstance(value, (_BitType, TupleType, ArrayType, IntegerTypes, Sequence)):\n raise ValueError(\n \"bits can only be used on a Bit, an Array, a Tuple, an int, or a Sequence; not : {}\".format(type(value)))\n\n if isinstance(value, IntegerTypes):\n if n is None:\n n = max(value.bit_length(),1)\n ts = int2seq(value, n)\n elif isinstance(value, Sequence):\n ts = list(value)\n elif isinstance(value, _BitType):\n if n is None:\n ts = [value]\n else:\n ts = n*[value]\n else:\n ts = [value[i] for i in range(len(value))]\n\n # create list of types\n Ts = []\n for t in ts:\n T = type(t)\n if T in IntegerTypes:\n T = Out(Bit)\n Ts.append(T)\n\n # check that they are all the same\n for t in Ts:\n # this should be converted to error()\n if checkbit:\n if not isinstance(t, _BitKind):\n raise ValueError(\n \"bits can only be used on Arrays or Tuples containing bits, not : {}\".format(type(value)))\n if t != T:\n raise ValueError(\"All fields in a Array or a Tuple must be the same type\")\n\n assert len(Ts)\n\n return totypeconstructor(len(Ts), T)(*ts)\n\ndef array(value, n=None):\n return convertbits(value, n, ArrayType, Array, False)\n\ndef bits(value, n=None):\n return convertbits(value, n, BitsType, Bits, True)\n\ndef uint(value, n=None):\n if isinstance(value, SIntType):\n raise ValueError( \"uint cannot convert SInt\" )\n return convertbits(value, n, UIntType, UInt, True)\n\ndef sint(value, n=None):\n if isinstance(value, UIntType):\n raise ValueError( \"uint cannot convert SInt\" )\n return convertbits(value, n, SIntType, SInt, True)\n\n\n#\n# convert value to a tuple\n# *value = tuple from positional arguments\n# **kwargs = tuple from keyword arguments\n#\ndef tuple_(value, n=None):\n if isinstance(value, TupleType):\n return value\n\n if not isinstance(value, (_BitType, ArrayType, IntegerTypes, Sequence, Mapping)):\n raise ValueError(\n \"bit can only be used on a Bit, an Array, or an int; not {}\".format(type(value)))\n\n decl = OrderedDict()\n args = []\n\n if isinstance(value, IntegerTypes):\n if n is None:\n n = max(value.bit_length(),1)\n value = int2seq(value, n)\n elif isinstance(value, _BitType):\n value = [value]\n elif isinstance(value, ArrayType):\n value = [value[i] for i in range(len(value))]\n\n if isinstance(value, Sequence):\n ts = list(value)\n for i in range(len(ts)):\n args.append(ts[i])\n decl[i] = type(ts[i])\n elif isinstance(value, Mapping):\n for k, v in value.items():\n args.append(v)\n decl[k] = type(v)\n\n return Tuple(decl)(*args)\n\n\ndef concat(*arrays):\n ts = [t for a in arrays for t in a.ts] # flatten\n return array(ts)\n\ndef repeat(value, n):\n if isinstance(value, BitType):\n repeats = bits(n*[value])\n else:\n repeats = array(n*[value])\n return repeats\n\ndef zext(value, n):\n assert isinstance(value, (UIntType, SIntType, BitsType))\n if isinstance(value, UIntType):\n zeros = uint(0,n)\n elif isinstance(value, SIntType):\n zeros = sint(0,n)\n elif isinstance(value, BitsType):\n zeros = bits(0,n)\n return concat(zeros,value)\n\ndef sext(value, n):\n assert isinstance(value, SIntType)\n return sint(concat(array(value[-1], n), array(value)))\n"
},
{
"alpha_fraction": 0.6653639674186707,
"alphanum_fraction": 0.6917440295219421,
"avg_line_length": 37.61320877075195,
"blob_id": "b83c1291c5a658e6d12dd2cad5520be583578d44",
"content_id": "c92bec1c07cadcd67db6b0d56c5880dd1b271145",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4094,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 106,
"path": "/tests/test_type/test_conversions.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom collections import OrderedDict\nfrom magma import \\\n GND, VCC, \\\n bit, BitType, \\\n clock, ClockType, \\\n reset, ResetType, \\\n enable, EnableType, \\\n array, ArrayType, \\\n bits, BitsType, \\\n uint, UIntType, \\\n sint, SIntType, \\\n tuple_, TupleType\n\ndef test_bit():\n assert isinstance(bit(0), BitType)\n assert isinstance(bit(1), BitType)\n assert isinstance(bit(VCC), BitType)\n assert isinstance(bit(GND), BitType)\n assert isinstance(bit(bit(0)), BitType)\n assert isinstance(bit(clock(0)), BitType)\n assert isinstance(bit(reset(0)), BitType)\n assert isinstance(bit(enable(0)), BitType)\n assert isinstance(bit(bits(0,1)), BitType)\n assert isinstance(bit(uint(0,1)), BitType)\n assert isinstance(bit(sint(0,1)), BitType)\n\ndef test_enable():\n assert isinstance(enable(0), EnableType)\n assert isinstance(enable(1), EnableType)\n assert isinstance(enable(VCC), EnableType)\n assert isinstance(enable(GND), EnableType)\n assert isinstance(enable(bit(0)), EnableType)\n assert isinstance(enable(clock(0)), EnableType)\n assert isinstance(enable(reset(0)), EnableType)\n assert isinstance(enable(enable(0)), EnableType)\n assert isinstance(enable(bits(0,1)), EnableType)\n assert isinstance(enable(uint(0,1)), EnableType)\n assert isinstance(enable(sint(0,1)), EnableType)\n\ndef test_reset():\n assert isinstance(reset(0), ResetType)\n assert isinstance(reset(1), ResetType)\n assert isinstance(reset(VCC), ResetType)\n assert isinstance(reset(GND), ResetType)\n assert isinstance(reset(bit(0)), ResetType)\n assert isinstance(reset(clock(0)), ResetType)\n assert isinstance(reset(enable(0)), ResetType)\n assert isinstance(reset(reset(0)), ResetType)\n assert isinstance(reset(bits(0,1)), ResetType)\n assert isinstance(reset(uint(0,1)), ResetType)\n assert isinstance(reset(sint(0,1)), ResetType)\n\ndef test_clock():\n assert isinstance(clock(0), ClockType)\n assert isinstance(clock(1), ClockType)\n assert isinstance(clock(VCC), ClockType)\n assert isinstance(clock(GND), ClockType)\n assert isinstance(clock(bit(0)), ClockType)\n assert isinstance(clock(clock(0)), ClockType)\n assert isinstance(clock(reset(0)), ClockType)\n assert isinstance(clock(enable(0)), ClockType)\n assert isinstance(clock(bits(0,1)), ClockType)\n assert isinstance(clock(uint(0,1)), ClockType)\n assert isinstance(clock(sint(0,1)), ClockType)\n\ndef test_array():\n assert isinstance(array(1,4), ArrayType)\n assert isinstance(array([1,0,0,0]), ArrayType)\n assert isinstance(array(VCC), ArrayType)\n assert isinstance(array(array(1,4)), ArrayType)\n assert isinstance(array(uint(1,4)), ArrayType)\n assert isinstance(array(sint(1,4)), ArrayType)\n\ndef test_bits():\n assert isinstance(bits(1,4), BitsType)\n assert isinstance(bits([1,0,0,0]), BitsType)\n assert isinstance(bits(VCC), BitsType)\n assert isinstance(bits(array(1,4)), BitsType)\n assert isinstance(bits(uint(1,4)), BitsType)\n assert isinstance(bits(sint(1,4)), BitsType)\n\ndef test_uint():\n assert isinstance(uint(1,4), UIntType)\n assert isinstance(uint([1,0,0,0]), UIntType)\n assert isinstance(uint(VCC), UIntType)\n assert isinstance(uint(array(1,4)), UIntType)\n assert isinstance(uint(bits(1,4)), UIntType)\n #assert isinstance(uint(sint(1,4)), UIntType)\n\ndef test_sint():\n assert isinstance(sint(1,4), SIntType)\n assert isinstance(sint([1,0,0,0]), SIntType)\n assert isinstance(sint(VCC), SIntType)\n assert isinstance(sint(array(1,4)), SIntType)\n assert isinstance(sint(bits(1,4)), SIntType)\n #assert isinstance(sint(sint(1,4)), SIntType)\n\ndef test_tuple():\n assert isinstance(tuple_(OrderedDict(x=0, y=1)), TupleType)\n assert isinstance(tuple_([0,1]), TupleType)\n assert isinstance(tuple_(VCC), TupleType)\n assert isinstance(tuple_(array(1,4)), TupleType)\n assert isinstance(tuple_(bits(1,4)), TupleType)\n assert isinstance(tuple_(sint(1,4)), TupleType)\n assert isinstance(tuple_(uint(1,4)), TupleType)\n\n"
},
{
"alpha_fraction": 0.514157235622406,
"alphanum_fraction": 0.5163224339485168,
"avg_line_length": 25.56637191772461,
"blob_id": "7745980c9cca55473a18ab6efd0d804475a83031",
"content_id": "26c6d6bc0f8d9f49bcaaeca75332204a37c68cb0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6004,
"license_type": "permissive",
"max_line_length": 132,
"num_lines": 226,
"path": "/magma/array.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from collections import Sequence\nfrom .logging import error\nfrom .ref import AnonRef, ArrayRef\nfrom .t import Type, Kind\nfrom .compatibility import IntegerTypes\nfrom .bit import Bit, BitOut, VCC, GND, BitType, BitKind\nfrom .bitutils import int2seq, seq2int\nfrom .debug import debug_wire, get_callee_frame_info\n\n__all__ = ['ArrayType', 'ArrayKind', 'Array']\n\n#\n# Create an Array\n#\nclass ArrayType(Type):\n def __init__(self, *largs, **kwargs):\n\n Type.__init__(self, **kwargs)\n\n if isinstance(largs, Sequence) and len(largs) > 0:\n assert len(largs) == self.N\n self.ts = []\n for t in largs:\n if isinstance(t, IntegerTypes):\n t = VCC if t else GND\n assert type(t) == self.T\n self.ts.append(t)\n else:\n self.ts = []\n for i in range(self.N):\n T = self.T\n t = T(name=ArrayRef(self,i))\n self.ts.append(t)\n\n def __eq__(self, rhs):\n if not isinstance(rhs, ArrayType): return False\n return self.ts == rhs.ts\n\n __hash__ = Type.__hash__\n\n def __repr__(self):\n if not isinstance(self.name, AnonRef):\n return repr(self.name)\n ts = [repr(t) for t in self.ts]\n return 'array([{}])'.format(', '.join(ts))\n\n\n def __len__(self):\n return self.N\n\n def __getitem__(self, key):\n if isinstance(key, ArrayType) and all(t in {VCC, GND} for t in key.ts):\n key = seq2int([0 if t is GND else 1 for t in key.ts])\n if isinstance(key,slice):\n return array([self[i] for i in range(*key.indices(len(self)))])\n else:\n if not (-self.N <= key and key < self.N):\n raise IndexError\n\n return self.ts[key]\n\n def __add__(self, other):\n other_len = other.N\n total = self.N + other_len\n res_bits = []\n for i in range(total):\n res_bits.append(self[i] if i < self.N else other[i - self.N])\n return array(res_bits)\n\n def __call__(self, o):\n return self.wire(o, get_callee_frame_info())\n\n @classmethod\n def isoriented(cls, direction):\n return cls.T.isoriented(direction)\n\n def as_list(self):\n return [self[i] for i in range(len(self))]\n\n\n @debug_wire\n def wire(i, o, debug_info):\n # print('Array.wire(', o, ', ', i, ')')\n\n if not isinstance(o, ArrayType):\n error('Wiring Error: wiring {} ({}) to {} ({})'.format(repr(o), type(o), repr(i), type(i)), include_wire_traceback=True)\n return\n\n if i.N != o.N:\n error('Wiring Error: Arrays must have the same length {} != {}'.format(i.N, o.N), include_wire_traceback=True)\n return\n\n for k in range(len(i)):\n i[k].wire(o[k], debug_info)\n\n def driven(self):\n for t in self.ts:\n if not t.driven():\n return False\n return True\n\n def wired(self):\n for t in self.ts:\n if not t.wired():\n return False\n return True\n\n # test whether the values refer a whole array\n def iswhole(self, ts):\n\n n = len(ts)\n\n for i in range(n):\n if ts[i].anon():\n #print('not an inst or defn')\n return False\n\n for i in range(n):\n # elements must be an array reference\n if not isinstance(ts[i].name, ArrayRef):\n #print('not an array ref')\n return False\n\n for i in range(1,n):\n # elements must refer to the same array\n if ts[i].name.array is not ts[i-1].name.array:\n return False\n\n if n > 0 and n != ts[0].name.array.N:\n # must use all of the elements of the base array\n return False\n\n for i in range(n):\n # elements should be numbered consecutively\n if ts[i].name.index != i:\n return False\n\n return True\n\n\n def trace(self):\n ts = [t.trace() for t in self.ts]\n\n for t in ts:\n if t is None:\n return None\n\n if self.iswhole(ts):\n return ts[0].name.array\n\n return array(ts)\n\n def value(self):\n ts = [t.value() for t in self.ts]\n\n for t in ts:\n if t is None:\n return None\n\n if self.iswhole(ts):\n return ts[0].name.array\n\n return array(ts)\n\n def const(self):\n for t in self.ts:\n if not t.const():\n return False\n\n return True\n\n def flatten(self):\n return sum([t.flatten() for t in self.ts], [])\n\n\nclass ArrayKind(Kind):\n def __init__(cls, name, bases, dct):\n Kind.__init__( cls, name, bases, dct)\n\n def __str__(cls):\n s = \"Array(%d,%s)\" % (cls.N, cls.T)\n #if cls.isinput(): s = 'In({})'.format(s)\n #if cls.isoutput(): s = 'Out({})'.format(s)\n #if cls.isinout(): s = 'InOut({})'.format(s)\n return s\n\n def __eq__(cls, rhs):\n if not isinstance(rhs, ArrayKind): return False\n\n if cls.N != rhs.N: return False\n if cls.T != rhs.T: return False\n\n return True\n\n __ne__ = Kind.__ne__\n __hash__ = Kind.__hash__\n\n def __len__(cls):\n return cls.N\n\n def __getitem__(cls, key):\n if isinstance(key,slice):\n return array([cls[i] for i in xrange(*key.indices(len(cls)))])\n else:\n if not (0 <= key and key < cls.N):\n raise IndexError\n\n return cls.ts[key]\n\n def qualify(cls, direction):\n if cls.T.isoriented(direction):\n return cls\n return Array(cls.N, cls.T.qualify(direction))\n\n def flip(cls):\n return Array(cls.N, cls.T.flip())\n\n\ndef Array(N,T):\n assert isinstance(N, IntegerTypes)\n assert isinstance(T, Kind)\n name = 'Array(%d,%s)' % (N,str(T))\n return ArrayKind(name, (ArrayType,), dict(N=N, T=T))\n\n\nfrom .conversions import array\n"
},
{
"alpha_fraction": 0.8108108043670654,
"alphanum_fraction": 0.8108108043670654,
"avg_line_length": 36,
"blob_id": "0a22afc1e69615dcb87090f54c56eff75eaa7407",
"content_id": "cd7a5db3e826a4b3109db0f678e0be8a66209def",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 37,
"license_type": "permissive",
"max_line_length": 36,
"num_lines": 1,
"path": "/magma/testing/__init__.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from .utils import check_files_equal\n"
},
{
"alpha_fraction": 0.5776930451393127,
"alphanum_fraction": 0.5815061926841736,
"avg_line_length": 29.55339813232422,
"blob_id": "f967a0b80497c43c4c2d0d45dfc3d76469039dd9",
"content_id": "a3339090fb36f8a56fe67fb8aee903498ccc65c8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3147,
"license_type": "permissive",
"max_line_length": 220,
"num_lines": 103,
"path": "/magma/testing/verilator.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from .function import testvectors\nimport magma.config as config\nimport inspect\nimport os\nimport subprocess\n\n__all__ = ['harness', 'compile']\n\ndef harness(circuit,tests):\n\n assert len(circuit.interface.ports.keys()) == len(tests[0])\n\n source = '''\\\n#include \"V{name}.h\"\n#include \"verilated.h\"\n#include <cassert>\n#include <iostream>\n\nint main(int argc, char **argv, char **env) {{\n Verilated::commandArgs(argc, argv);\n V{name}* top = new V{name};\n'''.format(name=circuit.__name__)\n\n source += '''\n unsigned int tests[{}][{}] = {{\n'''.format(len(tests), len(tests[0]))\n\n for test in tests:\n testvector = ', '.join([t.as_binary_string() for t in test])\n #testvector += ', {}'.format(int(func(*test[:nargs])))\n source += '''\\\n {{ {} }}, \n'''.format(testvector)\n source += '''\\\n };\n'''\n\n source += '''\n for(int i = 0; i < {}; i++) {{\n unsigned int* test = tests[i];\n'''.format(len(tests))\n\n i = 0\n for name, port in circuit.interface.ports.items():\n if port.isoutput():\n source += '''\\\n top->{} = test[{}];\n'''.format(name,i)\n i += 1\n\n source += '''\\\n top->eval();\n'''\n\n i = 0\n for name, port in circuit.interface.ports.items():\n if port.isinput():\n source += '''\\\n assert(top->{} == test[{}]);\n'''.format(name,i)\n i += 1\n source += '''\\\n }\n'''\n\n source += '''\n delete top;\n std::cout << \"Success\" << std::endl;\n exit(0);\n}'''\n\n return source\n\ndef compile(basename, circuit, tests, input_ranges=None):\n if config.get_compile_dir() == 'callee_file_dir':\n (_, filename, _, _, _, _) = inspect.getouterframes(inspect.currentframe())[1]\n file_path = os.path.dirname(filename)\n filename = os.path.join(file_path, basename)\n else:\n filename = basename\n\n if callable(tests):\n tests = testvectors(circuit, tests, input_ranges)\n verilatorcpp = harness(circuit, tests)\n\n with open(filename, \"w\") as f:\n f.write(verilatorcpp)\n\ndef run_verilator_test(verilog_file_name, driver_name, top_module, verilator_flags=\"\", build_dir=None):\n if isinstance(verilator_flags, list):\n if not all(isinstance(flag, str) for flag in verilator_flags):\n raise ValueError(\"verilator_flags should be a str or list of strs\")\n verilator_flags = \" \".join(verilator_flags)\n if build_dir is None:\n if config.get_compile_dir() == 'callee_file_dir':\n (_, filename, _, _, _, _) = inspect.getouterframes(inspect.currentframe())[1]\n file_path = os.path.dirname(filename)\n build_dir = os.path.join(file_path, 'build')\n else:\n build_dir = \"build\"\n assert not subprocess.call('verilator -Wall -Wno-INCABSPATH -Wno-DECLFILENAME {} --cc {}.v --exe {}.cpp --top-module {}'.format(verilator_flags, verilog_file_name, driver_name, top_module), cwd=build_dir, shell=True)\n assert not subprocess.call('make -C obj_dir -j -f V{0}.mk V{0}'.format(top_module), cwd=build_dir, shell=True)\n assert not subprocess.call('./obj_dir/V{}'.format(top_module), cwd=build_dir, shell=True)\n"
},
{
"alpha_fraction": 0.5544760823249817,
"alphanum_fraction": 0.5739070177078247,
"avg_line_length": 33.30952453613281,
"blob_id": "f0685ffc55f895eff540462ae46370bd9bf582a7",
"content_id": "d2a9a9da665334b1cebf77a36e66123b726d3c61",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1441,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 42,
"path": "/tests/test_type/test_type_errors.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from magma import *\n\n\ndef test_array_lengths(capsys):\n Buf = DeclareCircuit('Buf', \"I\", In(Array(8, Bit)), \"O\", Out(Array(8, Bit)))\n\n main = DefineCircuit(\"main\", \"I\", In(Bit), \"O\", Out(Array(7, Bit)))\n\n buf = Buf()\n wire(main.O, buf.I)\n out, err = capsys.readouterr()\n err_lines = err.splitlines()\n assert err_lines[-1] == \"=\" * 80\n assert err_lines[-2] == \"Wiring Error: Arrays must have the same length 8 != 7\"\n assert err_lines[-3] == \" wire(main.O, buf.I)\"\n\n\ndef test_array_to_bit(capsys):\n Buf = DeclareCircuit('Buf', \"I\", In(Array(8, Bit)), \"O\", Out(Array(8, Bit)))\n\n main = DefineCircuit(\"main\", \"I\", In(Bit), \"O\", Out(Bit))\n\n buf = Buf()\n wire(main.O, buf.I)\n out, err = capsys.readouterr()\n err_lines = err.splitlines()\n assert err_lines[-1] == \"=\" * 80\n assert err_lines[-2] == \"Wiring Error: wiring main.O (In(Bit)) to inst0.I (Array(8,In(Bit)))\"\n assert err_lines[-3] == \" wire(main.O, buf.I)\"\n\ndef test_bit_to_array(capsys):\n Buf = DeclareCircuit('Buf', \"I\", In(Bit), \"O\", Out(Array(8, Bit)))\n\n main = DefineCircuit(\"main\", \"I\", In(Bit), \"O\", Out(Array(7, Bit)))\n\n buf = Buf()\n wire(buf.I, main.O)\n out, err = capsys.readouterr()\n err_lines = err.splitlines()\n assert err_lines[-1] == \"=\" * 80\n assert err_lines[-2] == \"Wiring Error: wiring inst0.I (In(Bit)) to main.O (Array(7,In(Bit)))\"\n assert err_lines[-3] == \" wire(buf.I, main.O)\"\n"
},
{
"alpha_fraction": 0.647398829460144,
"alphanum_fraction": 0.6485549211502075,
"avg_line_length": 31.037036895751953,
"blob_id": "4ba7c8168f9766acd37ce2e2f2d1cd19edea0a7c",
"content_id": "1e9fff11d3a443881b0573db41c38ca056e77573",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 865,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 27,
"path": "/tests/test_verilog/test_from_file.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "import magma as m\nimport magma.testing\nimport os\n\ndef check_port(definition, port, type, direction):\n assert hasattr(definition, port)\n port = getattr(definition, port)\n assert isinstance(port, type)\n if direction == \"input\":\n assert port.isoutput()\n elif direction == \"output\":\n assert port.isinput()\n else:\n raise NotImplementedError(direction)\n\ndef test():\n file_path = os.path.dirname(__file__)\n RXMOD = m.DefineFromVerilogFile(os.path.join(file_path, \"rxmod.v\"))[0]\n\n check_port(RXMOD, \"RX\", m.BitType, \"input\")\n check_port(RXMOD, \"CLK\", m.BitType, \"input\")\n check_port(RXMOD, \"data\", m.ArrayType, \"output\")\n check_port(RXMOD, \"valid\", m.BitType, \"output\")\n\n m.compile(\"build/test_rxmod\", RXMOD)\n assert m.testing.check_files_equal(__file__, \"build/test_rxmod.v\",\n \"gold/test_rxmod.v\")\n"
},
{
"alpha_fraction": 0.5856181383132935,
"alphanum_fraction": 0.5873419642448425,
"avg_line_length": 42.82014465332031,
"blob_id": "c812d0245fae85449b241c61a8b9e7b4a48a0815",
"content_id": "5c845cfde225f9fb570ff85162d6397618f5191b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12182,
"license_type": "permissive",
"max_line_length": 124,
"num_lines": 278,
"path": "/magma/backend/coreir_.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from collections import OrderedDict\nimport os\nfrom ..bit import VCC, GND, BitType, BitIn, BitOut, BitKind\nfrom ..array import ArrayKind, ArrayType, Array\nfrom ..tuple import TupleKind, TupleType, Tuple\nfrom ..clock import wiredefaultclock, ClockType, Clock, ResetType\nfrom ..bitutils import seq2int\nfrom ..backend.verilog import find\nfrom ..logging import error\nimport coreir\nfrom ..ref import ArrayRef, DefnRef\nfrom ..passes import InstanceGraphPass\nfrom ..t import In\n\nfrom collections import defaultdict\n\nclass keydefaultdict(defaultdict):\n # From https://stackoverflow.com/questions/2912231/is-there-a-clever-way-to-pass-the-key-to-defaultdicts-default-factory\n def __missing__(self, key):\n if self.default_factory is None:\n raise KeyError( key )\n else:\n ret = self[key] = self.default_factory(key)\n return ret\n\ndef magma_port_to_coreir(port):\n select = repr(port)\n\n name = port.name\n while isinstance(name, ArrayRef):\n name = name.array.name\n if isinstance(name, DefnRef):\n if name.defn.name != \"\":\n select = select.replace(name.defn.name, \"self\")\n\n return select.replace(\"[\", \".\").replace(\"]\", \"\")\n\nclass CoreIRBackend:\n def __init__(self, context=None):\n if context is None:\n context = coreir.Context()\n self.context = context\n self.libs = keydefaultdict(self.context.get_lib)\n self.__constant_cache = {}\n self.__unique_concat_id = -1\n\n def check_interface(self, definition):\n # for now only allow Bit, Array, or Record\n def check_type(portType, errorMessage=\"\"):\n if isinstance(portType, ArrayKind):\n check_type(portType.T, errorMessage.format(\"Array({}, {})\").format(\n str(portType.N, \"{}\")))\n elif isinstance(portType, TupleKind):\n for (k, t) in zip(port.Ks, port.Ts):\n check_type(t, errorMessage.format(\"Record({}:{})\".format(k, \"{}\")))\n elif isinstance(portType, BitKind):\n return\n else:\n error(errorMessage.format(str(port)))\n for name, port in definition.interface.ports.items():\n check_type('Error: Argument {} must be a Bit, Array, or Record')\n\n def get_type(self, port, is_input):\n if isinstance(port, (ArrayType, ArrayKind)):\n _type = self.context.Array(port.N, self.get_type(port.T, is_input))\n elif isinstance(port, (TupleType, TupleKind)):\n _type = self.context.Record({k:self.get_type(t, is_input)\n for (k,t) in zip(port.Ks, port.Ts)})\n elif is_input:\n if isinstance(port, ClockType):\n _type = self.context.named_types[(\"coreir\", \"clk\")]\n # FIXME: We need to distinguish between synchronous and\n # asynchronous resets\n # elif isinstance(port, ResetType):\n # _type = self.context.named_types[(\"coreir\", \"rst\")]\n else:\n _type = self.context.Bit()\n else:\n if isinstance(port, ClockType):\n _type = self.context.named_types[(\"coreir\", \"clkIn\")]\n # FIXME: We need to distinguish between synchronous and\n # asynchronous resets\n # elif isinstance(port, ResetType):\n # _type = self.context.named_types[(\"coreir\", \"rstIn\")]\n else:\n _type = self.context.BitIn()\n return _type\n\n coreirNamedTypeToPortDict = {\n \"clk\": Clock\n }\n\n def get_ports(self, coreir_type):\n if (coreir_type.kind == \"Bit\"):\n return BitOut\n elif (coreir_type.kind == \"BitIn\"):\n return BitIn\n elif (coreir_type.kind == \"Array\"):\n return Array(len(coreir_type), self.get_ports(coreir_type.element_type))\n elif (coreir_type.kind == \"Record\"):\n elements = {}\n for item in coreir_type.items():\n # replace the in port with I as can't reference that\n name = \"I\" if (item[0] == \"in\") else item[0]\n # exception to handle clock types, since other named types not handled\n if item[1].kind == \"Named\" and name in self.coreirNamedTypeToPortDict:\n elements[name] = In(self.coreirNamedTypeToPortDict[name])\n else:\n elements[name] = self.get_ports(item[1])\n # save the renaming data for later use\n if item[0] == \"in\":\n elements[name].origPortName = \"in\"\n return Tuple(**elements)\n elif (coreir_type.kind == \"Named\"):\n raise NotImplementedError(\"named types not supported yet\")\n else:\n raise NotImplementedError(\"Trying to convert unknown coreir type to magma type\")\n\n def get_ports_as_list(self, ports):\n return [item for i in range(ports.N) for item in [ports.Ks[i], ports.Ts[i]]]\n\n def convert_interface_to_module_type(self, interface):\n args = OrderedDict()\n for name, port in interface.ports.items():\n if not port.isinput() and not port.isoutput():\n raise NotImplementedError()\n args[name] = self.get_type(port, port.isinput())\n return self.context.Record(args)\n\n def compile_instance(self, instance, module_definition):\n name = instance.__class__.coreir_name\n lib = self.libs[instance.coreir_lib]\n if instance.coreir_genargs is None:\n if hasattr(instance, \"wrappedModule\"):\n module = instance.wrappedModule\n else:\n module = lib.modules[name]\n args = {}\n for name, value in instance.kwargs.items():\n if name in {\"name\", \"loc\"}:\n continue # Skip\n elif isinstance(value, tuple):\n args[name] = coreir.BitVector(value[1], value[0])\n else:\n args[name] = value\n args = self.context.new_values(args)\n return module_definition.add_module_instance(instance.name, module, args)\n else:\n generator = lib.generators[name]\n config_args = {}\n for name, value in instance.coreir_configargs.items():\n config_args[name] = value\n config_args = self.context.new_values(config_args)\n gen_args = {}\n for name, value in type(instance).coreir_genargs.items():\n gen_args[name] = value\n gen_args = self.context.new_values(gen_args)\n return module_definition.add_generator_instance(instance.name,\n generator, gen_args, config_args)\n\n def add_output_port(self, output_ports, port):\n output_ports[port] = magma_port_to_coreir(port)\n if isinstance(port, ArrayType):\n for bit in port:\n self.add_output_port(output_ports, bit)\n\n def compile_definition_to_module_definition(self, definition, module_definition):\n output_ports = {}\n for name, port in definition.interface.ports.items():\n if port.isoutput():\n self.add_output_port(output_ports, port)\n\n for instance in definition.instances:\n wiredefaultclock(definition, instance)\n coreir_instance = self.compile_instance(instance, module_definition)\n for name, port in instance.interface.ports.items():\n if port.isoutput():\n self.add_output_port(output_ports, port)\n\n\n def get_select(value):\n if value in [VCC, GND]:\n return self.get_constant_instance(value, None, module_definition)\n else:\n return module_definition.select(output_ports[value])\n\n for instance in definition.instances:\n for name, port in instance.interface.ports.items():\n if port.isinput():\n self.connect(module_definition, port, port.value(), output_ports)\n for input in definition.interface.inputs():\n output = input.value()\n if not output:\n error(repr(definition))\n raise Exception(f\"Output {input} of {definition.name} not connected.\".format(input))\n self.connect(module_definition, input, output, output_ports)\n\n def compile_definition(self, definition):\n self.check_interface(definition)\n module_type = self.convert_interface_to_module_type(definition.interface)\n coreir_module = self.context.global_namespace.new_module(definition.coreir_name, module_type)\n module_definition = coreir_module.new_definition()\n self.compile_definition_to_module_definition(definition, module_definition)\n coreir_module.definition = module_definition\n return coreir_module\n\n def connect(self, module_definition, port, value, output_ports):\n self.__unique_concat_id\n if value is None:\n raise Exception(\"Got None for port: {}, is it connected to anything?\".format(port))\n elif isinstance(value, coreir.Wireable):\n source = value\n\n elif value.anon() and isinstance(value, ArrayType):\n for p, v in zip(port, value):\n self.connect(module_definition, p, v, output_ports)\n return\n elif isinstance(value, ArrayType) and all(x in {VCC, GND} for x in value):\n source = self.get_constant_instance(value, len(value),\n module_definition)\n elif value is VCC or value is GND:\n source = self.get_constant_instance(value, None, module_definition)\n else:\n source = module_definition.select(output_ports[value])\n module_definition.connect(\n source,\n module_definition.select(magma_port_to_coreir(port)))\n\n\n __unique_constant_id = -1\n def get_constant_instance(self, constant, num_bits, module_definition):\n if module_definition not in self.__constant_cache:\n self.__constant_cache[module_definition] = {}\n if constant not in self.__constant_cache[module_definition]:\n self.__unique_constant_id += 1\n\n bit_type_to_constant_map = {\n GND: 0,\n VCC: 1\n }\n if constant in bit_type_to_constant_map:\n value = bit_type_to_constant_map[constant]\n elif isinstance(constant, ArrayType):\n value = seq2int([bit_type_to_constant_map[x] for x in constant])\n else:\n raise NotImplementedError(value)\n if num_bits is None:\n config = self.context.new_values({\"value\": bool(value)})\n name = \"bit_const_{}\".format(constant)\n corebit_const_module = self.libs['corebit'].modules[\"const\"]\n module_definition.add_module_instance(name, corebit_const_module, config)\n else:\n gen_args = self.context.new_values({\"width\": num_bits})\n config = self.context.new_values({\"value\": value})\n # name = \"const_{}_{}\".format(constant, self.__unique_constant_id)\n name = \"const_{}\".format(constant)\n instantiable = self.get_instantiable(\"const\", \"coreir\")\n module_definition.add_generator_instance(name, instantiable, gen_args, config)\n # return module_definition.select(\"{}.out\".format(name))\n self.__constant_cache[module_definition][constant] = module_definition.select(\"{}.out\".format(name))\n return self.__constant_cache[module_definition][constant]\n\n\n def compile(self, defn):\n modules = {}\n pass_ = InstanceGraphPass(defn)\n pass_.run()\n for key, _ in pass_.tsortedgraph:\n if key.is_definition:\n modules[key.name] = self.compile_definition(key)\n return modules\n\ndef compile(main, file_name=None, context=None):\n modules = CoreIRBackend(context).compile(main)\n if file_name is not None:\n return modules[main.coreir_name].save_to_file(file_name)\n else:\n return modules[main.coreir_name]\n"
},
{
"alpha_fraction": 0.6629778742790222,
"alphanum_fraction": 0.6780683994293213,
"avg_line_length": 32.13333511352539,
"blob_id": "715cea590cb72e1b3e31d4ccd3cd52e98b969309",
"content_id": "8498b7e9c4c4f0f8b01135873f3d7adcc7316021",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 994,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 30,
"path": "/.travis/install_coreir.sh",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nset -e\n\nif [ \"$TRAVIS_BRANCH\" == \"coreir-dev\" ]; then\n sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test\n sudo apt-get update\n sudo apt-get install g++-4.9\n mkdir deps;\n mkdir deps/bin;\n mkdir deps/lib;\n mkdir deps/include;\n cd deps;\n git clone -b dev https://github.com/rdaly525/coreir.git;\n cd coreir;\n export COREIRCONFIG=\"g++-4.9\";\n export COREIR=$PWD;\n make install prefix=$TRAVIS_BUILD_DIR/deps;\n cd ..;\n cd ..;\n export PATH=$TRAVIS_BUILD_DIR/deps/bin:$PATH;\n export LD_LIBRARY_PATH=$TRAVIS_BUILD_DIR/deps/lib:$LD_LIBRARY_PATH;\n pip install git+git://github.com/leonardt/pycoreir.git@dev;\nelse\n wget https://github.com/rdaly525/coreir/releases/download/v0.0.11/coreir.tar.gz;\n mkdir coreir_release;\n tar -xf coreir.tar.gz -C coreir_release --strip-components 1;\n export PATH=$TRAVIS_BUILD_DIR/coreir_release/bin:$PATH;\n export LD_LIBRARY_PATH=$TRAVIS_BUILD_DIR/coreir_release/lib:$LD_LIBRARY_PATH;\nfi\n"
},
{
"alpha_fraction": 0.507905125617981,
"alphanum_fraction": 0.5158102512359619,
"avg_line_length": 18.461538314819336,
"blob_id": "ca771f1fa120c71a87dd3414fdcae28bc1b14873",
"content_id": "42469fc2572deff5b9b2237b362197366ef305de",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 506,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 26,
"path": "/setup.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from setuptools import setup\nimport sys\n\nsetup(\n name='magma',\n version='0.1',\n description='A circuit wiring language for programming FPGAs',\n scripts=['bin/magma'],\n packages=[\n \"magma\",\n \"magma.backend\",\n \"magma.passes\",\n \"magma.simulator\",\n \"magma.testing\"\n ],\n install_requires=[\n \"six\",\n \"mako\",\n \"pyverilog\",\n \"numpy\",\n \"graphviz\",\n \"coreir\",\n \"bit_vector\"\n ],\n python_requires='>=3.6'\n)\n"
},
{
"alpha_fraction": 0.4871794879436493,
"alphanum_fraction": 0.5192307829856873,
"avg_line_length": 28.714284896850586,
"blob_id": "773e9a15e461df8a944c4a7abe95ff02c3c92241",
"content_id": "ffbd641768a708bb63d55bbe1057ca8cf8674ddc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1248,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 42,
"path": "/examples/full_adder.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "import magma as m\nimport mantle\n\nclass FullAdder(m.Circuit):\n IO = [\"a\", m.In(m.Bit), \"b\", m.In(m.Bit), \"cin\", m.In(m.Bit),\n \"out\", m.Out(m.Bit), \"cout\", m.Out(m.Bit)]\n @classmethod\n def definition(io):\n # Generate the sum\n a_xor_b = io.a ^ io.b\n m.wire(a_xor_b ^ io.cin, io.out)\n # Generate the carry\n a_and_b = io.a & io.b\n b_and_cin = io.b & io.cin\n a_and_cin = io.a & io.cin\n m.wire(a_and_b | b_and_cin | a_and_cin, io.cout)\n\n\nif __name__ == \"__main__\":\n from magma.simulator.python_simulator import PythonSimulator\n\n simulator = PythonSimulator(FullAdder)\n test_vectors = [\n [0, 0, 0, 0, 0],\n [0, 1, 0, 1, 0],\n [1, 0, 0, 1, 0],\n [1, 1, 0, 0, 1],\n [0, 1, 1, 0, 1],\n [1, 0, 1, 0, 1],\n [1, 1, 0, 0, 1],\n [1, 1, 1, 1, 1]\n ]\n\n for a, b, cin, out, cout in test_vectors:\n simulator.set_value(FullAdder.a, bool(a))\n simulator.set_value(FullAdder.b, bool(b))\n simulator.set_value(FullAdder.cin, bool(cin))\n simulator.evaluate()\n assert simulator.get_value(FullAdder.out) == bool(out)\n assert simulator.get_value(FullAdder.cout) == bool(cout)\n\n print(\"Success!\")\n"
},
{
"alpha_fraction": 0.5541666746139526,
"alphanum_fraction": 0.5592592358589172,
"avg_line_length": 33.83871078491211,
"blob_id": "d812fd2187a018b2226a3626d41ab6ddc6b95afb",
"content_id": "20ae0ef539281a9dd2dfcf4b648eee15318d915b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2160,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 62,
"path": "/magma/testing/function.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from magma import BitType, ArrayType, SIntType\nfrom magma.bit_vector import BitVector\nimport sys\nif sys.version_info < (3, 3):\n from funcsigs import signature\nelse:\n from inspect import signature\nfrom itertools import product\nimport pytest\n\n# check that number of function arguments equals number of circuit inputs\ndef check(circuit, func):\n sig = signature(func)\n nfuncargs = len(sig.parameters)\n\n # count circuit inputs\n ncircargs = 0\n for name, port in circuit.interface.ports.items():\n if port.isoutput():\n ncircargs += 1\n assert nfuncargs == ncircargs\n\[email protected](reason=\"Not a test\")\ndef testvectors(circuit, func, input_ranges=None, mode='complete'):\n check(circuit, func)\n\n args = []\n for i, (name, port) in enumerate(circuit.interface.ports.items()):\n if port.isoutput():\n if isinstance(port, BitType):\n args.append([BitVector(0),BitVector(1)])\n elif isinstance(port, ArrayType):\n num_bits = type(port).N\n if isinstance(port, SIntType):\n if input_ranges is None:\n start = -2**(num_bits - 1)\n end = 2**(num_bits - 1) # We don't subtract one because range end is exclusive\n input_range = range(start, end)\n else:\n input_range = input_ranges[i]\n args.append([BitVector(x, num_bits=num_bits, signed=True) for x in input_range])\n else:\n if input_ranges is None:\n input_range = range(1<<num_bits)\n else:\n input_range = input_ranges[i]\n args.append([BitVector(x, num_bits=num_bits) for x in input_range])\n else:\n assert True, \"can't test Tuples\"\n\n nargs = len(args)\n tests = []\n for test in product(*args):\n test = list(test)\n result = func(*test)\n if isinstance(result, tuple):\n test.extend(result)\n else:\n test.append(result)\n tests.append(test)\n\n return tests\n"
},
{
"alpha_fraction": 0.5830508470535278,
"alphanum_fraction": 0.5864406824111938,
"avg_line_length": 27.071428298950195,
"blob_id": "b05b2f9fbb2b7ea9b94ae8c0a28c5356f4e7565f",
"content_id": "60c05e36a01d8511feec192cdfa3448173542fcc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1180,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 42,
"path": "/magma/wire.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "import inspect\nfrom collections import Sequence\nfrom .port import INPUT, OUTPUT, INOUT\nfrom .compatibility import IntegerTypes\nfrom .t import Type\nfrom .debug import debug_wire\nfrom .logging import info, warning, error\n\n__all__ = ['wire']\n\n\n@debug_wire\ndef wire(o, i, debug_info):\n\n # Wire(o, Circuit)\n if hasattr(i, 'interface'):\n i.wire(o, debug_info)\n return\n\n # replace output Circuit with its output (should only be 1 output)\n if hasattr(o, 'interface'):\n # if wiring a Circuit to a Port\n # then circuit should have 1 output \n o = o.interface.outputs()\n if len(o) != 1:\n error('Wiring Error: wiring {} (Sequence of length={}) to {} ({})'.format(o, len(o), i, type(i)))\n return\n o = o[0]\n\n # if o is an input\n if not isinstance(o, IntegerTypes) and o.isinput():\n # if i is not an input\n if isinstance(i, IntegerTypes) or not i.isinput():\n # flip i and o\n i, o = o, i\n\n #if hasattr(i, 'wire'):\n # error('Wiring Error: The input must have a wire method - {} to {}'.format(o, i))\n # return\n\n # Wire(o, Type)\n i.wire(o, debug_info)\n\n"
},
{
"alpha_fraction": 0.5997521877288818,
"alphanum_fraction": 0.6022304892539978,
"avg_line_length": 27.821428298950195,
"blob_id": "86acd67d76f5390dafb349c395675e6c35b59c2c",
"content_id": "f31ffac1d3654391965b9f313c7c0aee30e28282",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 807,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 28,
"path": "/tests/test_verilog/test_simple.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from magma import DeclareFromVerilog\nimport inspect\nimport os\nimport magma as m\n\ndef test_simple():\n file_path = os.path.dirname(__file__)\n file_name = os.path.join(file_path, \"simple.v\")\n with open(file_name, 'r') as f:\n s = f.read()\n\n v = DeclareFromVerilog(s)\n top = v[0]\n assert top.name == \"top\"\n assert repr(top.IO) == \"Interface(a, In(Bit), b, Out(Bit), c, Bit)\"\n\ndef test_small():\n file_path = os.path.dirname(__file__)\n file_name = os.path.join(file_path, \"small.v\")\n small = m.DeclareFromVerilogFile(file_name)[0]\n for name in small.IO():\n assert name in [\"in\", \"out\"]\n\n for name in small.interface:\n assert name in [\"in\", \"out\"]\n\n for item in small.interface.items():\n assert item in [(\"in\", m.In(m.Bit)), (\"out\", m.Out(m.Bit))]\n"
},
{
"alpha_fraction": 0.6695906519889832,
"alphanum_fraction": 0.6754385828971863,
"avg_line_length": 30.090909957885742,
"blob_id": "04356463e14d374a632772a65e52bf09e7a61628",
"content_id": "088e583d2301a8cb0d992ed63d5573f5c4f5404c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 342,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 11,
"path": "/tests/test_operator.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from magma import *\nfrom magma.operator import OperatorMantleNotImportedError\n\n\ndef test_error():\n circ = DefineCircuit(\"test\", \"a\", In(Bits(4)), \"b\", Out(Bits(4)))\n try:\n b = ~circ.a\n assert False, \"Operator should throw an error since mantle is not imported\"\n except OperatorMantleNotImportedError as e:\n pass\n"
},
{
"alpha_fraction": 0.6245710253715515,
"alphanum_fraction": 0.6256005764007568,
"avg_line_length": 32.4942512512207,
"blob_id": "f0dda89b005c1ea968066f9647903d798aade781",
"content_id": "469ce81f984617f025cd2a9127beb8875e30f1f8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2914,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 87,
"path": "/magma/compile.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "import os\nimport inspect\n\nfrom .passes import DefinitionPass\nfrom .backend import verilog, blif, firrtl, dot\nfrom .config import get_compile_dir\nfrom .logging import error\nfrom .circuit import isdefinition\n\n__all__ = ['compile']\n\n\ndef write_file(file_name, extension, code):\n with open(\"{}.{}\".format(file_name, extension), 'w') as file:\n file.write(code)\n\n\nclass MultipleDefinitionException(Exception):\n pass\n\n\nclass CheckDefinitionUniquenessPass(DefinitionPass):\n def __init__(self, main):\n super(CheckDefinitionUniquenessPass, self).__init__(main)\n self.seen = {}\n\n def __call__(self, definition):\n if definition.name not in self.seen:\n self.seen[definition.name] = set()\n self.seen[definition.name].add(definition)\n\n def _run(self, definition):\n for instance in definition.instances:\n instancedefinition = type(instance)\n if isdefinition(instancedefinition):\n self._run( instancedefinition )\n\n self(definition)\n\n def run(self):\n super(CheckDefinitionUniquenessPass, self).run()\n duplicated = []\n #print(self.seen)\n for name, definitions in self.seen.items():\n if len(definitions) > 1:\n duplicated.append((name, definitions))\n error(\"Found multiple definitions for {}\".format(name))\n\n if len(duplicated):\n raise MultipleDefinitionException([name for name, _ in duplicated])\n\n\ndef check_definitions_are_unique(circuit):\n CheckDefinitionUniquenessPass(circuit).run()\n\n\ndef compile(basename, main, output='verilog', origin=None, include_coreir=False, vendor=None):\n check_definitions_are_unique(main)\n if get_compile_dir() == 'callee_file_dir':\n (_, filename, _, _, _, _) = inspect.getouterframes(inspect.currentframe())[1]\n file_path = os.path.dirname(filename)\n file_name = os.path.join(file_path, basename)\n else:\n file_name = basename\n\n if output == 'verilog':\n write_file(file_name, 'v', verilog.compile(main, include_coreir))\n elif output == 'blif':\n write_file(file_name, 'blif', blif.compile(main))\n elif output == 'firrtl':\n write_file(file_name, 'fir', firrtl.compile(main))\n elif output == 'coreir':\n # underscore so our coreir module doesn't conflict with coreir bindings\n # package\n from .backend import coreir_\n coreir_.compile(main, file_name + \".json\")\n elif output == 'dot':\n write_file(file_name, 'dot', dot.compile(main))\n\n if hasattr(main, 'fpga'):\n fpga = main.fpga\n if vendor == 'altera':\n write_file(file_name, 'qsf', fpga.qsf(basename.split('/')[-1]))\n elif vendor == 'xilinx':\n write_file(file_name, 'ucf', fpga.ucf())\n elif vendor == 'lattice' or vendor == 'silego':\n write_file(file_name, 'pcf', fpga.pcf())\n"
},
{
"alpha_fraction": 0.5537634491920471,
"alphanum_fraction": 0.5596774220466614,
"avg_line_length": 29.491804122924805,
"blob_id": "526009c6bbcdb81297a82ff4671f8705eaf34ca3",
"content_id": "69b9a9bcb2cfb4753d6a9711afb183492d564bc7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1860,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 61,
"path": "/magma/testing/newfunction.py",
"repo_name": "rowhit/magma",
"src_encoding": "UTF-8",
"text": "from magma import BitType, ArrayType, SIntType\nfrom magma.bit_vector import BitVector\nimport sys\nif sys.version_info < (3, 3):\n from funcsigs import signature\nelse:\n from inspect import signature\nfrom itertools import product\nimport pytest\n\n# check that number of function arguments equals number of circuit inputs\ndef check(circuit, func):\n sig = signature(func)\n nfuncargs = len(sig.parameters)\n\n # count circuit inputs\n ncircargs = 0\n for name, port in circuit.interface.ports.items():\n if port.isoutput():\n ncircargs += 1\n\n assert nfuncargs == ncircargs\n\[email protected](reason=\"Not a test\")\ndef testvectors(circuit, func, input_ranges=None, mode='complete'):\n check(circuit, func)\n\n args = []\n for i, (name, port) in enumerate(circuit.interface.ports.items()):\n if port.isoutput():\n if isinstance(port, BitType):\n args.append([0,1])\n elif isinstance(port, ArrayType):\n num_bits = type(port).N\n if isinstance(port, SIntType):\n if input_ranges is None:\n input_range = range(-2**(num_bits-1), 2**(num_bits-1))\n else:\n input_range = input_ranges[i]\n else:\n if input_ranges is None:\n input_range = range(1<<num_bits)\n else:\n input_range = input_ranges[i]\n args.append(input_range)\n else:\n assert True, \"can't test Tuples\"\n\n nargs = len(args)\n\n tests = []\n for test in product(*args):\n test = list(test)\n result = func(*test)\n if isinstance(result, tuple):\n test.extend(result)\n else:\n test.append(result)\n tests.append(test)\n\n return tests\n"
}
] | 31 |
mvenkatreddy/git_c | https://github.com/mvenkatreddy/git_c | 76722c5867f0a47507665100783b4c38ff381a03 | 3a6116402c0532f14c38215f1ae9d7490b48084b | 7d79041951850247244d1f184887bba7b56fef9a | refs/heads/master | 2020-03-17T00:53:20.369575 | 2018-05-18T03:22:05 | 2018-05-18T03:22:05 | 133,133,450 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6785714030265808,
"alphanum_fraction": 0.6785714030265808,
"avg_line_length": 3.200000047683716,
"blob_id": "425abaff2351ac8e0d4398cffda08f187369b61e",
"content_id": "55fac6eaebe43eafcce6c3599f35835c00d24a0f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 84,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 20,
"path": "/README.md",
"repo_name": "mvenkatreddy/git_c",
"src_encoding": "UTF-8",
"text": "# git_c\nThe git commands: \n\nclone\n\nstatus\n\nconfig\n\ninit\n\nadd\n\npull\n\npush\n\nlog\n\ndiff\n"
},
{
"alpha_fraction": 0.5853658318519592,
"alphanum_fraction": 0.5853658318519592,
"avg_line_length": 9.25,
"blob_id": "75d69782d3aa0fa2a618ef4c32fa603f79cb7378",
"content_id": "2d1076601cd5194339ce59212833914b084de1ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 41,
"license_type": "no_license",
"max_line_length": 14,
"num_lines": 4,
"path": "/test.py",
"repo_name": "mvenkatreddy/git_c",
"src_encoding": "UTF-8",
"text": "def display():\n print \"Hi\"\n\ndisplay()\n"
}
] | 2 |
jagan25/Team10-SDWAN | https://github.com/jagan25/Team10-SDWAN | 662843260bba6a644f22cff519dabf4bfc695239 | f8dc046a4501561c26d6300ad797af273c04de4a | a5487e72401bead2ff741fe06a37d9db89f534d2 | refs/heads/master | 2020-08-29T18:59:01.879248 | 2019-12-14T17:06:23 | 2019-12-14T17:06:23 | 218,138,714 | 0 | 2 | null | null | null | null | null | [
{
"alpha_fraction": 0.5814592838287354,
"alphanum_fraction": 0.5887056589126587,
"avg_line_length": 42.021507263183594,
"blob_id": "9ddf9224818eaa10f6d4cb61c37a8644d11ea41b",
"content_id": "d036fc6eb77f3334bf9b9a45ef887eb4d71c161d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4002,
"license_type": "no_license",
"max_line_length": 297,
"num_lines": 93,
"path": "/M3/src/logic/transit.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import sys\nimport yaml\nimport logging\nimport subprocess\nimport time\n\nCONFIG_FOLDER_PATH = '/etc/config/container/'\nANSIBLE_FOLDER_PATH = '/var/scripts/container/'\nCREATE_NS_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_ns.yaml\"\nDELETE_NS_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_ns.yaml\"\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = yaml.safe_load(stream)\n return data\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\ndef createTransit(config, logFile):\n logging.info('Creating transit VPC')\n if config['tenant']['hypervisorType'] == 'primary':\n hypervisorIP = str(config['tenant']['tenant_id'])+ \".0.0.1\"\n transitIP = str(config['tenant']['tenant_id'])+ \".0.0.2\"\n elif config['tenant']['hypervisorType'] == 'secondary':\n hypervisorIP = str(config['tenant']['tenant_id'])+ \".255.0.1\"\n transitIP = str(config['tenant']['tenant_id'])+ \".255.0.2\"\n\n transitNS = str(config['tenant']['tenant_name'])+'_transit'#+str(config['tenant'])#['transit_id'])\n tunnel_name = \"gre_\"+config['tenant']['tenant_name']\n command = \"sudo ansible-playbook \" + CREATE_NS_SCRIPT + \" -e ns_name=\"+transitNS+\" -e hypervisorIP=\"+hypervisorIP+\" -e option=transit -e transitIP=\"+transitIP+\" -e hypervisor=\"+config['tenant']['hypervisorType']+\" -e tunnel_name=\"+tunnel_name+\" -e remoteIP=\"+str(config['tenant']['remote_ip'])\n subprocess.call([command], shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Transit VPC : \" + command + \"\\n\")\n\ndef deleteTransit(config, logFile):\n logging.info('Deleting transit VPC')\n transitNS = str(config['tenant']['tenant_name'])+'_transit'#+str(config['tenant']['transit_id'])\n command = \"sudo ansible-playbook \" + DELETE_NS_SCRIPT + \" -e ns_name=\"+transitNS+\" -e hypervisor=\"+config['tenant']['hypervisorType']\n subprocess.call([command], shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting Transit VPC : \" + command + \"\\n\")\n\ndef checkYaml(yFile):\n if 'tenant' in yFile:\n if 'tenant_id' in yFile['tenant'] and 'tenant_name' in yFile['tenant'] and 'hypervisorType' in yFile['tenant']:\n return 0\n return 1\n\ndef main():\n \n fileName = \"/var/log/log_\"+time.strftime(\"%Y%m%d\")+\".txt\"\n logFile = open(fileName, 'a+')\n\n if(len(sys.argv)<2):\n logging.error(\"ERROR: No arguments given\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" No Arguments :\"+ str(sys.argv)+\"\\n\")\n exit(0)\n else:\n yFileName = sys.argv[2]\n # check if yaml file is passed\n if yFileName.endswith(\".yml\") or yFileName.endswith(\".yaml\"):\n try:\n # open the yaml file\n yFile = read_yaml_data(CONFIG_FOLDER_PATH + yFileName)\n flag = checkYaml(yFile)\n if flag == 1:\n logging.error(\"Incompatible YAML file\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Incompatible YAML File :\"+ str(sys.argv)+\"\\n\")\n exit(0)\n # check for the 1st argument i.e., create or delete\n if str(sys.argv[1]).lower() == \"delete\":\n logging.info(\"Performing delete operation depending upon the file\")\n deleteTransit(yFile, logFile)\n\n elif str(sys.argv[1]).lower() == \"create\":\n logging.info(\"Performing create operation depending upon the file\")\n createTransit(yFile, logFile)\n else:\n logging.error(\"ERROR: Unrecognized Command!!!\")\n exit(0)\n except Exception as ex:\n logging.error(str(ex))\n exit(0)\n else:\n logging.error(\"ERROR: No yaml/yml file found!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" No YAML found :\"+ str(sys.argv)+\"\\n\")\n exit(0)\n logFile.close()\n\n\nif __name__ == '__main__':\n main()\n\n"
},
{
"alpha_fraction": 0.7096773982048035,
"alphanum_fraction": 0.7419354915618896,
"avg_line_length": 14.5,
"blob_id": "99537a2ae8f59ea97fec8ce74e1fe2bdf57daaf8",
"content_id": "95fad54a2afa08c60ec5e743a47c44bea8b31e97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 62,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 4,
"path": "/M2/var/scripts/old_scripts/deleteVM.sh",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "#!/bin/bashsudo\n\nsudo virsh destroy $1\nsudo virsh undefine $1\n"
},
{
"alpha_fraction": 0.7245509028434753,
"alphanum_fraction": 0.7485029697418213,
"avg_line_length": 17.55555534362793,
"blob_id": "960159c9ac392593cb3e6a48d40a15c5e783f39f",
"content_id": "22e19de28e4ea25f302e558b1bc04858714f6661",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 167,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 9,
"path": "/M2/var/scripts/old_scripts/deleteNet.sh",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "#!/bin/bashsudo\n\n# destroy and undefine network\nsudo virsh net-destroy $1\nsudo virsh net-undefine $1\n\n# delete bridge\nsudo ip link set dev $2 down\nsudo brctl delbr $2\n"
},
{
"alpha_fraction": 0.583052396774292,
"alphanum_fraction": 0.5868832468986511,
"avg_line_length": 45.942447662353516,
"blob_id": "2302cb01eaaf3a73905fd8ee3ad125421620b38b",
"content_id": "c4a997553912c8579529cd2acdea96fd6907de7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6526,
"license_type": "no_license",
"max_line_length": 268,
"num_lines": 139,
"path": "/M2/src/logic/providerEdge.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import sys\nimport yaml\nimport logging\nimport subprocess\nimport time\n\nCONFIG_FOLDER_PATH = '/etc/config/'\nANSIBLE_FOLDER_PATH = '/var/scripts/'\nCREATE_NETWORK_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_l2net.yaml\"\nCREATE_BRNS_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_brns_conn.yaml\"\nCREATE_VM_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_vm.yaml\"\nDELETE_NETWORK_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_l2net.yaml\"\nDELETE_BRNS_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_brns_conn.yaml\"\nDELETE_VM_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_vm.yaml\"\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = yaml.safe_load(stream)\n return data\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\ndef createPE(yFile, yFileName, logFile):\n print(\"creating pe\")\n print(yFile)\n if \"IPrange\" in yFile:\n print(\" Creating PE network\")\n # log\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating IP Ranges for PE : \"+ str(yFile[\"tenant_name\"])+\"\\n\")\n\n # variables\n bridge_name = yFile['tenant_name']+'_PT_br'\n network_name = yFile['tenant_name']+'_PT_net'\n veth1 = yFile['tenant_name']+'PT1'\n veth2 = yFile['tenant_name']+'PT2'\n transit = yFile['tenant_name']+'_transit'\n\n # create l2 bridge\n command = \"sudo ansible-playbook \" + CREATE_NETWORK_SCRIPT + \" -e hypervisor=\"+yFile['hypervisorType']+\" -e bridge_name=\"+bridge_name+\" -e network_name=\"+network_name\n subprocess.call([command], shell = True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating L2 Bridge : \" + command + \"\\n\")\n\n # connect bridge with transit namespace\n command = \"sudo ansible-playbook \" + CREATE_BRNS_CONN_SCRIPT + \" -e hypervisor=\"+yFile['hypervisorType']+\" -e veth1=\"+veth1+\" -e veth2=\"+veth2+\" -e bridge_name=\"+bridge_name+\" -e namespace=\"+transit\n subprocess.call([command], shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Bridge to namespace connection : \" + command + \"\\n\")\n\n\n if \"vms\" in yFile:\n print(\"test\")\n network_name = yFile['tenant_name']+'_PT_net'\n controller_net = str(yFile['tenant_id'])+'controller_net'\n for vm in yFile[\"vms\"]:\n print(\"Creating VM : \"+str(vm['PE_name']))\n\n # create provider vm\n command = \"sudo ansible-playbook \" + CREATE_VM_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] +\" -e vm_name=\"+str(yFile['tenant_name'])+str(vm['PE_name']) +\" -e mem=\"+str(vm['mem'])+\" -e vcpu=\"+str(vm['vcpu']) +\" -e network=\"+ controller_net +\" -vvv\"\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Provider Edge VM : \" + command + \"\\n\")\n\n # attach provider edge to PE-Transit Network\n command = \"sudo virsh attach-interface --domain \" + str(vm['PE_name']) + \" --type network \" + network_name + \" --model virtio --config --live\"\n subprocess.call([command], shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Attaching Provider Edge to PE-Transit Network : \" + command + \"\\n\")\n\ndef deletePE(yFile, logFile):\n if 'vms' in yFile:\n for vm in yFile['vms']:\n # delete PE VM\n command = \"sudo ansible-playbook \" + DELETE_VM_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType']+\" -e vm_name=\" + str(vm[\"PE_name\"])\n subprocess.call([command], shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting PE : \" + command + \"\\n\")\n\n if 'IPrange' in yFile:\n network_name = yFile['tenant_name']+'_PT_net'\n bridge_name = yFile['tenant_name']+'_PT_br'\n veth1 = yFile['tenant_name']+'PT1'\n\n # delete l2 network\n command = \"sudo ansible-playbook \" + DELETE_NETWORK_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType']+\" -e network_name=\"+ network_name +\" -e bridge_name=\"+ bridge_name\n subprocess.call([command], shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting PE-Transit network : \" + command + \"\\n\")\n\n # delete veth pair\n command = \"sudo ansible-playbook \" + DELETE_BRNS_CONN_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType']+\" -e veth1=\"+ veth1\n subprocess.call([command], shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting PE-Transit connection : \" + command + \"\\n\")\n\ndef checkYaml(yFile, logFile):\n if 'PE' not in yFile:\n print(\"ERROR!!! Missing 'PE' key in YAML file\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" ERROR!!! Missing 'PE' key in YAML file : \"+ str(sys.argv) +\"\\n\")\n exit(0)\n if not 'IPrange' in yFile['PE'] and not 'vms' in yFile['PE']:\n print(\"ERROR!!! Cannot process the given YAML file. MISSING KEYS!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" ERROR!!! Cannot process the given YAML file. MISSING KEYS!!! : \"+ str(sys.argv) +\"\\n\")\n exit(0)\n\ndef main():\n\n fileName = \"/var/log/log_\"+time.strftime(\"%Y%m%d\")+\".txt\"\n logFile = open(fileName, 'a+')\n\n if(len(sys.argv)<2):\n logging.error(\"ERROR: No arguments given\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" No Arguments Error: \"+ str(sys.argv)+\"\\n\")\n exit(0)\n\n else:\n yFileName = sys.argv[2]\n # print(yFileName)\n # check if yaml file is passed\n if yFileName.endswith(\".yml\") or yFileName.endswith(\".yaml\"):\n try:\n # open the yaml file\n yFile = read_yaml_data(yFileName)\n #print(yFile)\n checkYaml(yFile, logFile)\n print(\"test\")\n # check for the 1st argument i.e., create or delete\n if str(sys.argv[1]).lower() == \"delete\":\n print(\"Performing delete operation depending upon the file\")\n deletePE(yFile['PE'], logFile)\n elif str(sys.argv[1]).lower() == \"create\":\n print(\"Performing create operation depending upon the file\")\n createPE(yFile['PE'], yFileName, logFile)\n except:\n print(\"ERROR!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Error In Executing Command : \"+ str(sys.argv) +\"\\n\")\n else:\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Error In Executing Command : \"+ str(sys.argv) +\"\\n\")\n \n logFile.close()\n\nif __name__ == '__main__':\n main()\n\n"
},
{
"alpha_fraction": 0.5931347608566284,
"alphanum_fraction": 0.5991923213005066,
"avg_line_length": 37.843135833740234,
"blob_id": "3dfb9b7380a6d4b3804a33f78ae1f2f015507c9d",
"content_id": "d1df72496c5f976c293b529cdc01451d7ad5753b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1981,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 51,
"path": "/M2/src/logic/createSite.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import sys\nimport yaml\nimport logging\nimport subprocess\n\ndef createFunc(yFile,yFileName):\n if(yFile['tenant']['changeNS'].lower() == 'y'):\n subprocess.call([\"sudo ansible-playbook createSiteNS.yaml -e file=\" + yFileName + \" -vvv\"],shell=True)\n if('router' in yFile):\n subprocess.call([\"sudo ansible-playbook createInternalRouter.yaml -e file=\" + yFileName + \" -vvv\"],shell=True)\n\ndef deleteFunc(yFile,yFileName):\n if('router' in yFile):\n subprocess.call([\"sudo ansible-playbook deleteInternalRouter.yaml -e file=\" + yFileName + \" -vvv\"],shell=True)\n if(yFile['tenant']['changeNS'].lower() == 'y'):\n subprocess.call([\"sudo ansible-playbook deleteSiteNS.yaml -e file=\" + yFileName + \" -vvv\"],shell=True)\n\ndef checkYAML(yaml):\n if 'tenant' not in yaml:\n print(\"ERROR!!! Not in correct format!!!\")\n exit(0)\n\n\nif(len(sys.argv)<2):\n logging.error(\"\\nERROR: less than 2 arguments given!!! Require 2 arguments to run\")\n exit(0)\nelse:\n yFileName = sys.argv[2]\n # check if yaml file is passed\n if yFileName.endswith(\".yml\") or yFileName.endswith(\".yaml\"):\n try:\n #open the yaml file\n with open(yFileName,'r') as file:\n yFile = yaml.load(file)\n checkYAML(yFile)\n # check for the 1st argument i.e., create or delete\n if str(sys.argv[1]).lower()==\"delete\":\n print(\"\\nPerforming delete operation depending upon the file\")\n deleteFunc(yFile,yFileName)\n elif str(sys.argv[1]).lower()==\"create\":\n logging.info(\"\\nPerforming create operation depending upon the file\")\n createFunc(yFile,yFileName)\n else:\n logging.error(\"\\nERROR: Unrecognized Command!!!\")\n exit(0)\n except Exception as ex:\n logging.error(str(ex))\n exit(0)\n else:\n logging.error(\"\\nERROR: No yaml/yml file found!!!\")\n exit(0)\n"
},
{
"alpha_fraction": 0.592901885509491,
"alphanum_fraction": 0.5981211066246033,
"avg_line_length": 50.97674560546875,
"blob_id": "4e1decf75794c39b3010a5860173b576c1ab7b61",
"content_id": "444712d1dca022edc348ebc642eff0f12cbb4ae0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6706,
"license_type": "no_license",
"max_line_length": 227,
"num_lines": 129,
"path": "/M2/src/logic/create_site.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import sys\nimport yaml\nimport logging\nimport subprocess\n\nCONFIG_FOLDER_PATH = '/etc/config/'\nANSIBLE_FOLDER_PATH = '/var/scripts/'\nCREATE_NS_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_ns.yaml\"\nCREATE_L2_BRIDGE_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_l2net.yaml\"\nCREATE_BRNS_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_brns_conn.yaml\"\nCREATE_VM_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_vm.yaml\"\nCREATE_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_conn.yaml\"\nDELETE_NS_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_ns.yaml\"\nDELETE_L2_BRIDGE_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_l2net.yaml\"\nDELETE_BRNS_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_brns_conn.yaml\"\nDELETE_VM_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_vm.yaml\"\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = yaml.safe_load(stream)\n return data\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\ndef createFunc(yFile,yFileName):\n print(\"create\")\n hypervisor = yFile['tenant']['hypervisor']\n ns_name = yFile['tenant']['tenant_name']+yFile['tenant']['site']\n if yFile['tenant']['change_ns'].lower()=='y':\n # playbook to create NS\n command = 'sudo ansible-playbook ' + CREATE_NS_SCRIPT + ' -e hypervisor='+hypervisor+' -e ns_name='+ns_name+' -e hypervisorIP='+yFile['tenant']['site_ip_ext']+' -e transitIP='+yFile['tenant']['site_ip_int']\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating namespace : \" + command + \"\\n\")\n\n # playbook to create networks\n if yFile['tenant']['change_net'].lower()=='y':\n for net in yFile['tenant']['networks']:\n net_name = ns_name+net\n command = 'sudo ansible-playbook ' + CREATE_L2_BRIDGE_SCRIPT + ' -e hypervisor='+hypervisor+' -e bridge_name='+net_name+' -e network_name='+net_name\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating L2 Bridge : \" + command + \"\\n\")\n \n # playbook to attach network bridges to namespace\n veth1 = str(yFile['tenant']['tenant_id'])+'_'+str(yFile['tenant']['site_id'])+'_'+net+'1'\n veth2 = str(yFile['tenant']['tenant_id'])+'_'+str(yFile['tenant']['site_id'])+'_'+net+'2'\n command = 'sudo ansible-playbook ' + CREATE_BRNS_CONN_SCRIPT + ' -e hypervisor='+hypervisor+\" -e veth1=\"+veth1+\" -e veth2=\"+veth2+\" -e bridge_name=\"+net_name+\" -e namespace=\"+ns_name\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Bridge to namespace connection : \" + command + \"\\n\")\n\n # playbook to create vms\n if yFile['tenant']['change_vm'].lower()=='y':\n for vm in yFile['router']:\n command = \"sudo ansible-playbook \" + CREATE_VM_SCRIPT + \" -e hypervisor=\" + hypervisor +\" -e vm_name=\"+str(vm['name']) +\" -e mem=\"+str(vm['mem'])+\" -e vcpu=\"+str(vm['vcpu']) +\" -e network=\"+ns_name+vm['networks'][0]\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Controller VM : \" + command + \"\\n\")\n \n # playbook to attach to other networks\n if len(vm['networks'])>1:\n for i in range(1,len(vm['networks'])):\n command = \"sudo ansible-playbook \" + CREATE_CONN_SCRIPT + \" -e vm=\"+vm['name']+\" -e network=\"+ns_name+vm['networks'][i]+ \" -e hypervisor=\"+yFile['hypervisorType']\n subprocess.call([command], shell = True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating connection : \" + command + \"\\n\")\n\ndef deleteFunc(yFile,yFileName):\n print(\"delete\")\n hypervisor = yFile['tenant']['hypervisor']\n ns_name = yFile['tenant']['tenant_name']+yFile['tenant']['site']\n # playbook to delete NS\n if yFile['tenant']['change_ns'].lower()=='y':\n command = 'sudo ansible-playbook ' + DELETE_NS_SCRIPT + ' -e hypervisor='+hypervisor+' -e ns_name='+ns_name\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting namespace : \" + command + \"\\n\")\n\n # playbook to delete networks\n if yFile['tenant']['change_net'].lower()=='y':\n for net in yFile['tenant']['networks']:\n net_name = ns_name+net\n command = 'sudo ansible-playbook ' + DELETE_L2_BRIDGE_SCRIPT + ' -e hypervisor='+hypervisor+' -e bridge_name='+net_name+' -e network_name='+net_name\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting L2 Bridge : \" + command + \"\\n\")\n\n # playbook to delete veth pair bridge and namespace\n veth1 = str(yFile['tenant']['tenant_id'])+'_'+str(yFile['tenant']['site_id'])+'_'+net+'1'\n command = 'sudo ansible-playbook ' + DELETE_BRNS_CONN_SCRIPT + ' -e hypervisor='+hypervisor+\" -e veth1=\"+veth1\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting Bridge to namespace connection : \" + command + \"\\n\")\n\n # playbook to delete vms\n if yFile['tenant']['change_vm'].lower()=='y':\n for vm in yFile['router']:\n command = \"sudo ansible-playbook \" + DELETE_VM_SCRIPT + \" -e hypervisor=\" + hypervisor +\" -e vm_name=\"+str(vm['name'])\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting Controller VM : \" + command + \"\\n\")\n \ndef checkYAML(yaml):\n if 'tenant' not in yaml:\n print(\"ERROR!!! Not in correct format!!!\")\n exit(0)\n\n\nif(len(sys.argv)<2):\n logging.error(\"\\nERROR: less than 2 arguments given!!! Require 2 arguments to run\")\n exit(0)\nelse:\n yFileName = sys.argv[2]\n # check if yaml file is passed\n if yFileName.endswith(\".yml\") or yFileName.endswith(\".yaml\"):\n try:\n yFile = read_yaml_data(yFileName)\n checkYAML(yFile)\n # check for the 1st argument i.e., create or delete\n if str(sys.argv[1]).lower()==\"delete\":\n print(\"\\nPerforming delete operation depending upon the file\")\n deleteFunc(yFile,yFileName)\n elif str(sys.argv[1]).lower()==\"create\":\n logging.info(\"\\nPerforming create operation depending upon the file\")\n createFunc(yFile,yFileName)\n else:\n logging.error(\"\\nERROR: Unrecognized Command!!!\")\n exit(0)\n except Exception as ex:\n logging.error(str(ex))\n exit(0)\n else:\n logging.error(\"\\nERROR: No yaml/yml file found!!!\")\n exit(0)\n\n"
},
{
"alpha_fraction": 0.5364187955856323,
"alphanum_fraction": 0.5440060496330261,
"avg_line_length": 47.3577995300293,
"blob_id": "7f098072dc5d2552038bd201238e94fdd7425f0a",
"content_id": "2e85daf8a6fe40e5bd427b7a9dbbd72d73e5054b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5272,
"license_type": "no_license",
"max_line_length": 177,
"num_lines": 109,
"path": "/M2/etc/config/startSetup.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import yaml\nimport sys\nimport os\nimport sys\nimport subprocess\n\nLOGIC_DIR = '/logic/'\nCONFIG_DIR = '/etc/config/'\n\ndef showCreate():\n try:\n option = input(\"\\n\\n1. Create transit \\n2. Create Controller \\n3. Create PE \\n4. Create CE \\n5. Create CE-PE Connection \\n\\n\")\n if option=='1':\n #create transit file\n config_file = input(\"Enter the transit NS configuration file name: \")\n subprocess.call(['sudo python3 ' + LOGIC_DIR + 'transit.py create '+ CONFIG_DIR+config_file] ,shell = True)\n elif option=='2':\n #create controller file\n config_file = input(\"Enter the controller configuration file name: \")\n subprocess.call(['sudo python3 ' + LOGIC_DIR + 'controller.py create '+ CONFIG_DIR+config_file], shell = True)\n elif option=='3':\n #create PE file\n config_file = input(\"Enter the PE configuration file name: \")\n subprocess.call(['sudo python3 ' + LOGIC_DIR + 'providerEdge.py create '+ CONFIG_DIR+config_file], shell = True)\n elif option=='4':\n #create CE file\n config_file = input(\"Enter the CE configuration file name: \")\n subprocess.call(['sudo python3 ' + LOGIC_DIR + 'CustomerEdge.py create '+ CONFIG_DIR+config_file], shell = True)\n elif option=='5':\n #create CE_PE connection file\n config_file = input(\"Enter the CE-PE configuration file name: \")\n subprocess.call(['sudo python3 ' + LOGIC_DIR + 'createCEPENet.py create '+ CONFIG_DIR+config_file], shell = True)\n else:\n print(\"INVALID OPTION! Please enter a valid option! \\n\\n\")\n showCreate()\n except Exception:\n print(\"INVALID OPTION! Please enter a valid option! \\n\\n\")\n showCreate()\n\ndef showDelete():\n try:\n option = input(\"\\n\\n1. Delete transit \\n2. Delete Controller \\n3. Delete PE \\n4. Delete CE \\n5. Delete CE-PE Connection \\n\\n\")\n if option=='1':\n #delete transit\n config_file = input(\"Enter the transit NS configuration file name: \")\n subprocess.call(['sudo python3 ' + LOGIC_DIR + 'transit.py delete '+ CONFIG_DIR+config_file] ,shell = True)\n elif option=='2':\n #delete Controller file\n config_file = input(\"Enter the controller configuration file name: \")\n subprocess.call(['sudo python3 ' + LOGIC_DIR + 'controller.py delete '+ CONFIG_DIR+config_file], shell = True)\n elif option=='3':\n #delete PE file\n config_file = input(\"Enter the PE configuration file name: \")\n subprocess.call(['sudo python3 ' + LOGIC_DIR + 'providerEdge.py delete '+ CONFIG_DIR+config_file], shell = True)\n elif option=='4':\n #delete CE file\n config_file = input(\"Enter the CE configuration file name: \")\n subprocess.call(['sudo python3 ' + LOGIC_DIR + 'CustomerEdge.py delete '+ CONFIG_DIR+config_file], shell = True)\n elif option=='5':\n #delete CE-PE CONNECTION\n config_file = input(\"Enter the CE-PE configuration file name: \")\n subprocess.call(['sudo python3 ' + LOGIC_DIR + 'createCEPENet.py delete '+ CONFIG_DIR+config_file], shell = True)\n else:\n print(\"INVALID OPTION! Please enter a valid option! \\n\\n\")\n showDelete()\n except Exception:\n print(\"INVALID OPTION! Please enter a valid option! \\n\\n\")\n showCreate()\n\ndef showConfigure():\n try:\n option= input(\"\\n\\nHave you loaded the configurations in the iptableconfig.yaml?\\nPlease Enter 1 for Yes / 0 for No \\n\\n\")\n if option=='1':\n print(\"\\nYour Configurations will be loaded and updated!\\n\")\n #call suchu file\n elif option=='0':\n print(\"\\nPlease save your configurations in the iptableconfig.yaml and then execute this! \\n\")\n sys.exit()\n else:\n print(\"INVALID OPTION! Please enter a valid option! \\n\\n\")\n showConfigure()\n\n except Exception:\n print(\"INVALID OPTION! Please enter a valid option! \\n\\n\")\n showConfigure()\n\ndef showOptions():\n try:\n option = input(\"1. Create (transitNS/CE/PE/Controller/CE-PE Connection) \\n2. Delete (transitNS/CE/PE/Controller/CE-PE Connection) \\n3. Configure IP tables \\n\\n\")\n\n if option=='1':\n showCreate()\n elif option=='2':\n showDelete()\n elif option=='3':\n showConfigure()\n else:\n print(\"INVALID OPTION! Please enter a valid option! \\n\\n\")\n showOptions()\n except Exception:\n print(\"INVALID OPTION! Please enter a valid option! \\n\\n\")\n showOptions()\n\ndef main():\n showOptions()\n\n\nif __name__ == '__main__':\n main()\n\n"
},
{
"alpha_fraction": 0.5695216655731201,
"alphanum_fraction": 0.6151279211044312,
"avg_line_length": 34.880001068115234,
"blob_id": "b25b6d2a72e65952a646fde78ffa088b22abc8f5",
"content_id": "7c1bf298014dfe4e2a952f45eccfd78da7a4c705",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 899,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 25,
"path": "/M3/src/logic/exec_command.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import paramiko\nimport time\n\ndef exec_cmd(hypervisor, command, flag):\n username = 'ece792'\n if hypervisor == 'primary':\n ip, password = '192.168.122.178','Avent@2506'\n elif hypervisor == 'secondary':\n ip, password = '192.168.122.197','Avent@2504'\n handler = paramiko.SSHClient()\n handler.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n handler.connect(ip, username = username, password = password, look_for_keys = False, allow_agent = False)\n time.sleep(2)\n shell = handler.invoke_shell()\n output = shell.recv(1000)\n shell.send(command)\n time.sleep(5)\n if flag == 0:\n data = shell.recv(10000).decode('utf-8')\n data = data.split(\"\\n\")\n for line_number in range(len(data)):\n if \"sudo docker inspect\" in data[line_number]:\n var = data[line_number+1]\n handler.close()\n return(var)\n\n\n"
},
{
"alpha_fraction": 0.5701483488082886,
"alphanum_fraction": 0.5740111470222473,
"avg_line_length": 46.93333435058594,
"blob_id": "ae071f34e9791a99350d88e31541cbffb163a408",
"content_id": "d4970b3972f94ca5bdc332361640c5e63f562e80",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6472,
"license_type": "no_license",
"max_line_length": 242,
"num_lines": 135,
"path": "/M2/src/logic/customerEdge.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import sys\nimport yaml\nimport logging\nimport subprocess\nimport time\n\nCONFIG_FOLDER_PATH = '/etc/config/'\nANSIBLE_FOLDER_PATH = '/var/scripts/'\nCREATE_NETWORK_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_l2net.yaml\"\nCREATE_BRNS_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_brns_conn.yaml\"\nCREATE_VM_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_vm.yaml\"\nCREATE_CONN = ANSIBLE_FOLDER_PATH+\"create_conn.yaml\"\nDELETE_NETWORK_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_l2net.yaml\"\nDELETE_BRNS_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_brns_conn.yaml\"\nDELETE_VM_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_vm.yaml\"\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = yaml.safe_load(stream)\n return data\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\ndef createCE(yFile, yFileName, logFile):\n print(\"creating ce\")\n print(yFile)\n if \"vms\" in yFile:\n print(\" Creating CE network\")\n\n # variables\n transit = yFile['tenant_name']+yFile['site']\n controller_net = str(yFile['tenant_id'])+'controller_net'\n\n for vm in yFile[\"vms\"]:\n #variables\n bridge_name = transit+vm['CE_name']\n network_name = transit+vm['CE_name']\n veth1 = transit+vm['CE_name']+'1'\n veth2 = transit+vm['CE_name']+'2'\n \n # create l2 bridge\n command = \"sudo ansible-playbook \" + CREATE_NETWORK_SCRIPT + \" -e hypervisor=\"+yFile['hypervisorType']+\" -e bridge_name=\"+bridge_name+\" -e network_name=\"+network_name\n subprocess.call([command], shell = True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating L2 Bridge : \" + command + \"\\n\")\n \n # connect bridge with transit namespace\n command = \"sudo ansible-playbook \" + CREATE_BRNS_CONN_SCRIPT + \" -e hypervisor=\"+yFile['hypervisorType']+\" -e veth1=\"+veth1+\" -e veth2=\"+veth2+\" -e bridge_name=\"+bridge_name+\" -e namespace=\"+transit\n subprocess.call([command], shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Bridge to namespace connection : \" + command + \"\\n\")\n \n print(\"Creating Customer Edge VM : \"+str(vm['CE_name']))\n\n # create customer edge vm\n command = \"sudo ansible-playbook \" + CREATE_VM_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] +\" -e vm_name=\"+str(vm['CE_name']) +\" -e mem=\"+str(vm['mem'])+\" -e vcpu=\"+str(vm['vcpu']) +\" -e network=\"+ controller_net +\" -vvv\"\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Customer Edge VM : \" + command + \"\\n\")\n\n # attach customer edge to Site-Bridge Network\n command = \"sudo ansible-playbook \"+ CREATE_CONN + \" -e vm=\"+vm['CE_name']+\" -e network=\"+network_name+ \" -e hypervisor=\"+yFile['hypervisorType']\n subprocess.call([command], shell = True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Attaching Customer Edge to CE-Site Network : \" + command + \"\\n\")\n\ndef deleteCE(yFile, logFile):\n if 'vms' in yFile:\n for vm in yFile['vms']:\n bridge_name = yFile['tenant_name']+vm['CE_name']+'_br'\n network_name = yFile['tenant_name']+vm['CE_name']+'_net'\n veth1 = yFile['tenant_name']+vm['CE_name']+'1'\n \n # delete CE VM\n command = \"sudo ansible-playbook \" + DELETE_VM_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType']+\" -e vm_name=\" + str(vm[\"CE_name\"])\n subprocess.call([command], shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting CE : \" + command + \"\\n\")\n \n # delete l2 network\n command = \"sudo ansible-playbook \" + DELETE_NETWORK_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType']+\" -e network_name=\"+ network_name +\" -e bridge_name=\"+ bridge_name\n subprocess.call([command], shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting CE-Site network : \" + command + \"\\n\")\n \n # delete veth pair\n command = \"sudo ansible-playbook \" + DELETE_BRNS_CONN_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType']+\" -e veth1=\"+ veth1\n subprocess.call([command], shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting CE-Site connection : \" + command + \"\\n\")\n\ndef checkYaml(yFile, logFile):\n if 'CE' not in yFile:\n print(\"ERROR!!! Missing 'CE' key in YAML file\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" ERROR!!! Missing 'CE' key in YAML file : \"+ str(sys.argv) +\"\\n\")\n exit(0)\n if not 'vms' in yFile['CE']:\n print(\"ERROR!!! Cannot process the given YAML file. MISSING KEYS!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" ERROR!!! Cannot process the given YAML file. MISSING KEYS!!! : \"+ str(sys.argv) +\"\\n\")\n exit(0)\n\ndef main():\n\n fileName = \"/var/log/log_\"+time.strftime(\"%Y%m%d\")+\".txt\"\n logFile = open(fileName, 'a+')\n\n if(len(sys.argv)<2):\n logging.error(\"ERROR: No arguments given\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" No Arguments Error: \"+ str(sys.argv)+\"\\n\")\n exit(0)\n\n else:\n yFileName = sys.argv[2]\n # print(yFileName)\n # check if yaml file is passed\n if yFileName.endswith(\".yml\") or yFileName.endswith(\".yaml\"):\n try:\n # open the yaml file\n yFile = read_yaml_data(yFileName)\n #print(yFile)\n checkYaml(yFile, logFile)\n print(\"test\")\n # check for the 1st argument i.e., create or delete\n if str(sys.argv[1]).lower() == \"delete\":\n print(\"Performing delete operation depending upon the file\")\n deleteCE(yFile['CE'], logFile)\n elif str(sys.argv[1]).lower() == \"create\":\n print(\"Performing create operation depending upon the file\")\n createCE(yFile['CE'], yFileName, logFile)\n except:\n print(\"ERROR!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Error In Executing Command : \"+ str(sys.argv) +\"\\n\")\n else:\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Error In Executing Command : \"+ str(sys.argv) +\"\\n\")\n \n logFile.close()\n\nif __name__ == '__main__':\n main()\n\n"
},
{
"alpha_fraction": 0.6302890181541443,
"alphanum_fraction": 0.6342196464538574,
"avg_line_length": 36.27586364746094,
"blob_id": "2d71574eeb94e1b6cb085c95ca60f4cbb5ad33bf",
"content_id": "1ee79203333ff0661a0cfaf5ae00baf9a7dd4825",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8650,
"license_type": "no_license",
"max_line_length": 194,
"num_lines": 232,
"path": "/M2/src/logic/dynamicRouting.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import yaml\nimport sys\nimport os \nimport ansible_runner\nimport operator\nimport time\nimport json\n#nproc\n#ip- load, cpus. flag\nproviderEdgeChange = {}\n\n#ip- load, cpus\ncustomerEdgeChange = {}\n\ncustomerProviderMapping = {}\n\navailableProvider = []\nheavyProvider = []\n\nIP_FILE = \"/var/scripts/hostCPUUsageVars.yaml\"\nROUTE_FILE = \"/var/scripts/changeRouteVars.yaml\"\nTRANSIT_FILE = \"/var/scripts/changeTransitVars.yaml\"\nROUTE_CHANGE_SCRIPT = \"changeDefaultRoute.yaml\"\nPROVIDER_EDGES_CONFIG_FILE = \"provider_edges_config.yaml\"\nCUSTOMER_EDGES_CONFIG_FILE = \"customer_edges_config.yaml\"\n\n\ndef run_ansible_script(logFile):\n # Refer https://ansible-runner.readthedocs.io/en/latest/python_interface.html for ansible doc\n provider_data = read_yaml_data(\"/etc/config/provider_edges_config.yaml\")\n customer_data = read_yaml_data(\"/etc/config/customer_edges_config.yaml\")\n transit_data = read_yaml_data(\"/etc/config/transit_edges_config.yaml\")\n hypervisor_data = read_yaml_data(\"/etc/config/hypervisor_config.yaml\")\n providerList = {}\n customerEdgeList = {}\n customerRouters = {}\n\n\n for provider in provider_data[\"ProviderEdges\"]:\n providerList[provider] = provider_data[\"ProviderEdges\"][provider][\"ip\"]\n if(len(provider_data[\"ProviderEdges\"][provider][\"customer_edges\"]))>0:\n for i in provider_data[\"ProviderEdges\"][provider][\"customer_edges\"]:\n customerProviderMapping[i] = provider\n \n\n for customer in customer_data[\"CustomerEdges\"]:\n customerEdgeList[customer] = customer_data[\"CustomerEdges\"][customer][\"ip\"]\n for cn in customer_data[\"CustomerEdges\"][customer][\"cnetwork\"]:\n customerRouters[cn] = customer_data[\"CustomerEdges\"][customer][\"cnetwork\"][cn]\n\n print(providerList)\n print(customerEdgeList)\n \n customerLoad = {}\n providerLoad = {}\n\n playbook_path = os.getcwd()\n changeFlag = False\n\n # r = ansible_runner.run(private_data_dir=playbook_path, playbook='ansible.yaml')\n # out = r.get_fact_cache(\"50.0.0.217\")\n # print(out['output'])\n\n #playbook_path = playbook_path.replace(\"north_bound\", \"ansible_scripts\")\n for name in providerList:\n #store in that file\n ip_data = {\"host\": providerList[name]}\n write_yaml_data(ip_data, IP_FILE)\n\n providerEdgeChange[name]={}\n r = ansible_runner.run(private_data_dir=playbook_path, playbook='/var/scripts/cpuUsage.yaml')\n out = r.get_fact_cache(providerList[name])\n retVal = out['output'].split(\" \")\n providerEdgeChange[name]['load'] = float(retVal[1])\n providerEdgeChange[name]['cpus'] = int(retVal[0])\n providerEdgeChange[name]['flag'] = retVal[2]\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" PE(load): \"+ name+\" :\"+str(retVal[1])+\"\\n\")\n if retVal[2]==\"TRUE\":\n changeFlag = True\n \n if changeFlag:\n\n transitCustomerEdgeMap = {}\n \n for i in transit_data[\"Transit_Edges\"]:\n transitCustomerEdgeMap[i] = transit_data[\"Transit_Edges\"][i]\n\n print(\"TRANSIT MAP\" + str(transitCustomerEdgeMap))\n\n for name in customerEdgeList:\n #store in that file\n customerEdgeChange[name]={}\n\n ip_data = {\"host\": customerEdgeList[name]}\n write_yaml_data(ip_data, IP_FILE)\n\n r = ansible_runner.run(private_data_dir=playbook_path, playbook='/var/scripts/cpuUsage.yaml')\n out = r.get_fact_cache(customerEdgeList[name])\n retVal = out['output'].split(\" \")\n customerEdgeChange[name]['load'] = float(retVal[1])\n customerEdgeChange[name]['cpus'] = int(retVal[0])\n customerEdgeChange[name]['flag'] = retVal[2]\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" CE(load): \"+ name+\" :\"+str(retVal[1])+\"\\n\")\n\n\n for i in customerEdgeChange:\n\n customerLoad[i] = customerEdgeChange[i]['load'] * customerEdgeChange[i]['cpus']\n\n customerLoad = dict(sorted(customerLoad.items(), key=operator.itemgetter(1), reverse = True))\n\n\n for i in providerEdgeChange:\n providerLoad[i] = (100-providerEdgeChange[i]['load']) * providerEdgeChange[i]['cpus']\n if providerEdgeChange[i]['flag'] == \"FALSE\":\n availableProvider.append(i)\n else:\n heavyProvider.append(i) \n\n providerLoad = dict(sorted(providerLoad.items(), key=operator.itemgetter(1), reverse = True))\n\n # # for prov in heavyProvider:\n # #check customerEdge:\n # customerEdges = customerProviderMapping[prov]\n #sort customer Edges\n print(\"availableProvider\" + str(availableProvider))\n print(\"heavyProvider\" + str(heavyProvider))\n\n for cust in customerLoad:\n # only for pe changes\n \n if customerProviderMapping[cust] in heavyProvider:\n\n cLoad = customerLoad[cust] \n\n for prov in providerLoad:\n #check available provider\n if prov in availableProvider:\n\n if cLoad < ((providerEdgeChange[prov]['cpus']*0.6*100)-(providerEdgeChange[prov]['cpus']*100-providerLoad[prov])):\n #logic to add to provider - call ssh \n\n existing_if = customer_data[\"CustomerEdges\"][cust][\"provider_edges\"][customerProviderMapping[cust]]\n new_if = customer_data[\"CustomerEdges\"][cust][\"provider_edges\"][prov]\n \n #add the things to file\n\n route_data = {}\n route_data['ip'] = customerEdgeList[cust]\n route_data['routing'] = []\n data={}\n allRouters = list(customerRouters.keys())\n otherCustomerRouters = list(set(allRouters) - set(customer_data[\"CustomerEdges\"][cust][\"cnetwork\"]))\n for cn in otherCustomerRouters:\n #cNetworks.append(customer_data[\"CustomerEdges\"][cust][\"cnetwork\"][cn])\n \n data[\"source\"] = customerRouters[cn]\n data[\"oldInterface\"] = existing_if\n data[\"newInterface\"] = new_if\n route_data['routing'].append(data)\n\n #route_data = {\"ip\": customerEdgeList[cust], \"oldInterface\": existing_if, \"newInterface\": new_if}\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Transit PE-CE(update): CE= \"+ cust+\" OLD PE= \"+ customerProviderMapping[cust]+\" NEW PE= \"+prov+ \" COMMAND = \"+str(route_data)+\"\\n\")\n write_yaml_data(route_data, ROUTE_FILE)\n \n r = ansible_runner.run(private_data_dir=playbook_path, playbook='/var/scripts/changeDefaultRoute.yaml')\n\n #change in transit route as well\n existing_if = transitCustomerEdgeMap[customerProviderMapping[cust]]\n new_if = transitCustomerEdgeMap[prov]\n\n\n route_data = {}\n route_data['hypervisorIP'] = hypervisor_data['hypervisorIP']\n route_data['transit'] = []\n data={}\n for cn in customer_data[\"CustomerEdges\"][cust][\"cnetwork\"]:\n \n data[\"source\"] = customer_data[\"CustomerEdges\"][cust][\"cnetwork\"][cn]\n data[\"oldInterface\"] = existing_if\n data[\"newInterface\"] = new_if\n data[\"netnsName\"] = transit_data[\"TransitName\"]\n route_data['transit'].append(data)\n\n\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" PE-CE(update): CE= \"+ cust+\" OLD PE= \"+ customerProviderMapping[cust]+\" NEW PE= \"+prov+ \" COMMAND = \"+str(route_data)+\"\\n\")\n #write_yaml_data(route_data, TRANSIT_FILE)\n with open(TRANSIT_FILE, 'w+') as file:\n file.write(json.dumps(route_data))\n \n r = ansible_runner.run(private_data_dir=playbook_path, playbook='/var/scripts/changeTransitRoute.yaml')\n\n #change in config files\n provider_data[\"ProviderEdges\"][customerProviderMapping[cust]][\"customer_edges\"].remove(cust)\n provider_data[\"ProviderEdges\"][prov][\"customer_edges\"].append(cust)\n providerLoad[prov] = (providerLoad[prov] - cLoad)\n providerLoad = dict(sorted(providerLoad.items(), key=operator.itemgetter(1), reverse = True))\n # change customerProviderMapping\n customerProviderMapping[cust] = prov\n break\n\n write_yaml_data(provider_data, \"/etc/config/provider_edges_config.yaml\")\n logFile.close()\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = yaml.safe_load(stream)\n return data\n\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w+') as outfile:\n yaml.dump(data, outfile)\n\n\ndef createVPC(file):\n data = read_yaml_data(file)\n print(data)\n\n\n\ndef main():\n #file = sys.argv[1]\n #createVPC(file)\n fileName = \"/var/log/log_\"+time.strftime(\"%Y%m%d\")+\".txt\"\n logFile = open(fileName, 'a+')\n run_ansible_script(logFile)\n\n\nif __name__ == '__main__':\n main()\n\n\n"
},
{
"alpha_fraction": 0.6602191925048828,
"alphanum_fraction": 0.6615086793899536,
"avg_line_length": 33.07692337036133,
"blob_id": "6f4e720ab9ace63c95ebb7d65d70fedd30e67e22",
"content_id": "640d372d9817d03d7258bb3b6d2f37e18972c55c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3102,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 91,
"path": "/M3/src/logic/security.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import yaml\nimport sys\nimport os \nimport ansible_runner\nimport time\n\nCONFIG_FOLDER_PATH = '/etc/config/container/'\nANSIBLE_FOLDER_PATH = '/var/scripts/container/'\nIP_FILE = ANSIBLE_FOLDER_PATH+\"hostVars.yaml\"\nIP_ROUTE_COMMANDS_FILE = ANSIBLE_FOLDER_PATH+\"ipRouteCommandVars.yaml\"\nIP_ROUTE_SCRIPT = ANSIBLE_FOLDER_PATH+\"addIpTableRules.yaml\"\nPROVIDER_EDGES_CONFIG_FILE = CONFIG_FOLDER_PATH+\"PEConfig.txt\"\nCUSTOMER_EDGES_CONFIG_FILE = CONFIG_FOLDER_PATH+\"CEConfig.txt\"\nALLOWED_NETWORK_LIST_FILE = CONFIG_FOLDER_PATH+\"allowed_network_list.yaml\"\n\ndef run_ansible_script(logFile):\n provider_data = read_txt_data(PROVIDER_EDGES_CONFIG_FILE)\n customer_data = read_txt_data(CUSTOMER_EDGES_CONFIG_FILE)\n network_data = read_yaml_data(ALLOWED_NETWORK_LIST_FILE)\n\n command_list = [\"iptables -A INPUT -p icmp -m limit --limit 1/s --limit-burst 1 -j ACCEPT\",\n \"iptables -A INPUT -p icmp -m limit --limit 1/s --limit-burst 1 -j LOG --log-prefix PING-DROP\",\n \"iptables -A INPUT -p icmp -j DROP\"]\n\n for nw in network_data[\"AllowedNetworks\"]:\n allow_nw_cmd = \"iptables -A INPUT -s \"+nw+\" -j ACCEPT\"\n command_list.append(allow_nw_cmd)\n\n cmd_data = {\"IPTableCommands\": command_list}\n write_yaml_data(cmd_data, IP_ROUTE_COMMANDS_FILE)\n print(cmd_data)\n\n Writing to log file\n for cmd in command_list:\n for key, provider in enumerate(provider_data[\"ProviderEdges\"]):\n for pr in provider:\n providerEdgeList[provider[pr][\"ip\"]] = pr\n l = time.strftime(\"%Y%m%d-%H%M%S\") + \"PE IPTABLE UPDATE: \" + pr + \"COMMAND: \" + cmd + \"\\n\"\n logFile.write(l)\n for key, customer in enumerate(customer_data[\"CustomerEdges\"]):\n for cr in customer:\n customerEdgeList[customer[cr][\"ip\"]] = cr\n l = time.strftime(\"%Y%m%d-%H%M%S\") + \"CE IPTABLE UPDATE: \" + cr + \"COMMAND: \" + cmd + \"\\n\"\n logFile.write(l)\n\n \n for key, provider in enumerate(provider_data[\"ProviderEdges\"]):\n for pr in provider:\n provider_ip = provider[pr][\"ip\"]\n ip_data = {\"host\": provider_ip}\n write_yaml_data(ip_data, IP_FILE)\n \n r = ansible_runner.run(private_data_dir=ANSIBLE_FOLDER_PATH, playbook=IP_ROUTE_SCRIPT)\n out = r.get_fact_cache(provider_ip)\n\n for key, customer in enumerate(customer_data[\"CustomerEdges\"]):\n for cr in customer:\n customer_ip = customer[cr][\"ip\"]\n ip_data = {\"host\": customer_ip}\n write_yaml_data(ip_data, IP_FILE)\n\n r = ansible_runner.run(private_data_dir=ANSIBLE_FOLDER_PATH, playbook=IP_ROUTE_SCRIPT)\n out = r.get_fact_cache(customer_ip)\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = yaml.safe_load(stream)\n return data\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\ndef read_txt_data(f_name):\n data = None\n with open(f_name) as stream:\n data = json.load(stream)\n return data\n\ndef main():\n fileName = \"/var/log/log_\"+time.strftime(\"%Y%m%d\")+\".txt\"\n logFile = open(fileName, 'a+')\n \n run_ansible_script(logFile)\n\n logFile.close()\n\n\nif __name__ == '__main__':\n main()\n\n"
},
{
"alpha_fraction": 0.6448673605918884,
"alphanum_fraction": 0.6495242118835449,
"avg_line_length": 29.677019119262695,
"blob_id": "3e24b8c144d38c1138aa702f317288edf91ef756",
"content_id": "9905526ad29c0352ed3ec3b76f65c56dcc577cba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4939,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 161,
"path": "/M2/src/logic/configure_script.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import yaml\nimport sys\nimport os \nimport ansible_runner\nimport time\n\ndef get_config_folder_path():\n path = os.getcwd()\n path = path.split(\"/\")\n path = path[:len(path)-2]\n return \"/\".join(path)\n\nCONFIG_FOLDER_PATH = get_config_folder_path()\nIP_FILE = \"hostVars.yaml\"\nIP_ROUTE_COMMANDS_FILE = \"ipRouteCommandVars.yaml\"\nIP_ROUTE_SCRIPT = \"addIpTableRules.yaml\"\nPROVIDER_EDGES_CONFIG_FILE = CONFIG_FOLDER_PATH+\"/config/provider_edges_config.yaml\"\nCUSTOMER_EDGES_CONFIG_FILE = CONFIG_FOLDER_PATH+\"/config/customer_edges_config.yaml\"\nNETWORK_LIST_FILE = CONFIG_FOLDER_PATH+\"/config/network_rule_config.yaml\"\nGET_IP_TABLE_RULES_SCRIPT = \"get_ip_tables_rules.yaml\"\n\nLINE_NUMBER_INDEX = 0\nACTION_INDEX = 1\nPROTOCOL_INDEX = 2\nSOURCE_INDEX = 4\nDESTINATION_INDEX = 5\nCOMMENT_INDEX = 7\n\ndef get_commands(nw_list):\n pass\n\ndef get_customer_edges(site, customer_data):\n customer_list = []\n for customer in customer_data[\"CustomerEdges\"]:\n if customer.startswith(site):\n customer_list.append(customer)\n\n return customer_list\n\ndef get_current_ip_tables_rules(customer):\n customer_ip = customer[\"ip\"]\n ip_data = {\"host\": customer_ip}\n write_yaml_data(ip_data, IP_FILE)\n\n r = ansible_runner.run(private_data_dir=os.getcwd(), playbook=GET_IP_TABLE_RULES_SCRIPT)\n out = r.get_fact_cache(customer_ip)\n rules = str(out[\"output\"]).split(\"Chain \")\n rules = rules[1]\n rule_array = rules.split(\"\\n\")\n\n new_rule_array = []\n for row in rule_array:\n ip = \" \".join(row.split())\n if ip != '':\n new_rule_array.append(ip)\n\n new_rule_array.pop(0)\n new_rule_array.pop(0)\n\n return new_rule_array\n\ndef find_conflicting_rules(current_rules, rule):\n for current_rule in current_rules:\n c_rule = current_rule.split(\" \")\n temp_source = \"anywhere\"\n if \"SOURCE\" in rule:\n temp_source = rule[\"SOURCE\"]\n temp_destination = \"anywhere\"\n if \"DESTINATION\" in rule:\n temp_destination = rule[\"DESTINATION\"]\n if temp_source == \"0.0.0.0/0\":\n temp_source = \"anywhere\"\n if temp_destination == \"0.0.0.0/0\":\n temp_destination = \"anywhere\"\n if c_rule[SOURCE_INDEX] == temp_source and c_rule[DESTINATION_INDEX] == temp_destination:\n if rule[\"PROTOCOL\"] == \"SSH\":\n if c_rule[COMMENT_INDEX].contains(rule[\"PROTOCOL\"]):\n if c_rule[ACTION_INDEX] != rule[\"ACTION\"]:\n return int(c_rule[LINE_NUMBER_INDEX])\n else:\n return False\n else:\n if c_rule[PROTOCOL_INDEX] == rule[\"PROTOCOL\"]:\n if c_rule[ACTION_INDEX] != rule[\"ACTION\"]:\n return int(c_rule[LINE_NUMBER_INDEX])\n else:\n return False\n return True\n\ndef construct_rule(rule):\n s = \"iptables -I INPUT 1 -p \" + rule[\"PROTOCOL\"]\n if \"SOURCE\" in rule:\n s += \" -s \" + rule[\"SOURCE\"]\n if \"DESTINATION\" in rule:\n s += \" -d \" + rule[\"DESTINATION\"]\n\n s += \" -j \" + rule[\"ACTION\"] \n return s\n\ndef get_delete_rules(line_nums):\n delete_rules = []\n for i, num in enumerate(line_nums):\n delete_rules.append(\"iptables -D INPUT \" + str(line_nums[i]-i))\n\n return delete_rules\n\ndef run_ansible_script():\n provider_data = read_yaml_data(PROVIDER_EDGES_CONFIG_FILE)\n customer_data = read_yaml_data(CUSTOMER_EDGES_CONFIG_FILE)\n network_data = read_yaml_data(NETWORK_LIST_FILE)\n\n command_list = []\n\n for site in network_data:\n customer_edges_list = get_customer_edges(site, customer_data)\n conflicitng_rules_line_numbers = []\n new_rules = []\n if len(customer_edges_list) > 0:\n current_ip_table_rules = get_current_ip_tables_rules(customer_data[\"CustomerEdges\"][customer_edges_list[0]])\n for rule in network_data[site]:\n res = find_conflicting_rules(current_ip_table_rules, rule)\n\n if type(res) is int:\n conflicitng_rules_line_numbers.append(res)\n elif res == True:\n new_rules.append(construct_rule(rule))\n\n delete_rules = get_delete_rules(conflicitng_rules_line_numbers)\n new_rules = delete_rules + new_rules\n cmd_data = {\"IPTableCommands\": new_rules}\n write_yaml_data(cmd_data, IP_ROUTE_COMMANDS_FILE)\n for customer in customer_edges_list:\n for rule in new_rules:\n l = time.strftime(\"%Y%m%d-%H%M%S\") + \"CE IPTABLE UPDATE: \" + customer + \"COMMAND: \" + rule + \"\\n\"\n logFile.write(l)\n\n customer_ip = customer_data[\"CustomerEdges\"][customer][\"ip\"]\n ip_data = {\"host\": customer_ip}\n write_yaml_data(ip_data, IP_FILE)\n\n r = ansible_runner.run(private_data_dir=os.getcwd(), playbook=IP_ROUTE_SCRIPT)\n out = r.get_fact_cache(customer_ip)\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = yaml.safe_load(stream)\n return data\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\ndef main():\n fileName = \"/tmp/logs/log_\"+time.strftime(\"%Y%m%d\")+\".txt\"\n logFile = open(fileName, 'a+')\n run_ansible_script()\n logFile.close()\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6000417470932007,
"alphanum_fraction": 0.6056749224662781,
"avg_line_length": 54.404624938964844,
"blob_id": "aca37222145ef360c77f3947073778a2e1ecc33a",
"content_id": "32b837af2a538cb500c87cbbc50eeebea2292994",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9586,
"license_type": "no_license",
"max_line_length": 380,
"num_lines": 173,
"path": "/M3/src/logic/controller.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import sys\nimport yaml\nimport logging\nimport subprocess\nimport time\n\nCONFIG_FOLDER_PATH = '/etc/config/container/'\nANSIBLE_FOLDER_PATH = '/var/scripts/container/'\nCREATE_NETWORK_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_l3net.yaml\"\nCREATE_NETWORK_SCRIPT2 = ANSIBLE_FOLDER_PATH+\"create_l2net.yaml\"\nCREATE_BRNS_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_brns_conn.yaml\"\nCREATE_CONTAINER_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_container.yaml\"\nCREATE_DOC_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_doc_conn.yaml\"\nDELETE_NETWORK_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_l3net.yaml\"\nDELETE_NETWORK_SCRIPT2 = ANSIBLE_FOLDER_PATH+\"delete_l2net.yaml\"\nDELETE_BRNS_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_brns_conn.yaml\"\nDELETE_CONTAINER_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_container.yaml\"\nDELETE_DOC_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_doc_conn.yaml\"\nRUN_DHCLIENT_SCRIPT = ANSIBLE_FOLDER_PATH+\"run_dhclient.yaml\"\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = yaml.safe_load(stream)\n return data\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\ndef createFunc(yFile, logFile):\n bridgeName = str(yFile[\"tenantName\"])+\"cont_br\"\n if yFile['controllerNet'].lower()==\"y\":\n print(\"Creating ControllerNet \")\n\n # variables\n networkName = str(yFile[\"tenantName\"])+\"cont_net\"\n br_ip = str(yFile['tenantID'])+'.0.1.1'\n start_ip = str(yFile['tenantID'])+'.0.1.2'\n end_ip = str(yFile['tenantID'])+'.0.1.254'\n\n # create controller network\n if yFile['hypervisorType']==\"primary\":\n command = \"sudo ansible-playbook \" + CREATE_NETWORK_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] + \" -e br_ip=\" +br_ip+\" -e start_ip=\"+start_ip+\" -e end_ip=\"+end_ip+\" -e network_name=\"+networkName+\" -e bridge_name=\"+bridgeName+\" -e option=create_vxlan -e id=\"+ str(yFile['tenantID'])+\" -e vxlan_name=\"+str(yFile['tenantName'])+\" -e remoteIP=\"+yFile['remoteIP']\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Controller in the primary hupervisor : \" + command + \"\\n\")\n\n elif yFile['hypervisorType']==\"secondary\":\n command = \"sudo ansible-playbook \" + CREATE_NETWORK_SCRIPT2 + \" -e hypervisor=\" + yFile['hypervisorType'] +\" -e bridge_name=\"+ bridgeName +\" -e network_name=\" + networkName + \" -e vxlan_name=\" + str(yFile['tenantName']) +\" -e id=\" + str(yFile['tenantID']) + \" -e remoteIP=\"+ yFile['remoteIP'] + \" -e option=create_vxlan\"\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Controller in the secondary hypervisor : \" + command + \"\\n\")\n\n print(\"Creating Bridge to namespace connection \")\n\n # create controller to transit ns connection\n veth1 = str(yFile[\"tenantID\"])+\"_1\"\n veth2 = str(yFile[\"tenantID\"])+\"_2\"\n namespace = str(yFile['tenantName'])+\"_transit\"\n command = \"sudo ansible-playbook \" + CREATE_BRNS_CONN_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] + \" -e veth1=\" + veth1 +\" -e veth2=\"+ veth2 +\" -e namespace=\"+namespace+\" -e bridge_name=\"+bridgeName+ \" -e option=run_dhclient\"\n subprocess.call([command],shell=True)\n\n #ommand = \"sudo ip netns exec \"+namespace+ \" dhclient \" +veth2\n #subprocess.call([command],shell=True\n \n if \"containers\" in yFile:\n for c in yFile[\"containers\"]:\n print(\"Creating Controller container : \"+str(c['cName']))\n # create controller container\n container = str(c['cName'])\n command = \"sudo ansible-playbook \" + CREATE_CONTAINER_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] +\" -e container=\"+ container + \" -e image=\"+str(c['image']) + \" -e tid=\"+str(yFile['tenantID'])\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Controller Container : \" + command + \"\\n\")\n \n # connect to controller network\n print(\"Connecting the container to controller bridge\")\n veth1 = 't' + str(yFile['tenantID']) + '_' + str(c['cID']) + 'cbr1'\n veth2 = 't' + str(yFile['tenantID']) + '_' + str(c['cID']) + 'cbr2'\n\n command = \"sudo ansible-playbook \" + CREATE_DOC_CONN_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] + \" -e veth1=\" +veth1+ \" -e veth2=\"+veth2+\" -e bridge_name=\"+bridgeName + \" -e container=\" + str(container) + \" -e option=run_dhclient -e option2=none -e tid=\"+str(yFile['tenantID'])\n\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Connecting to Controller network : \" + command + \"\\n\")\n\n #Run dhclient on container\n #command = \"sudo ansible-playbook \"+ RUN_DHCLIENT_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] + \" -e container=\"+container\n #subprocess.call([command],shell=True)\n #logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Running dhclient on controller : \" + command + \"\\n\")\n\n\ndef deleteFunc(yFile, logFile):\n #for net - hypervisor, net_name, bridge_name\n #for vm - hypervsr, vm_name\n #for bridge - hyp, veth1\n networkName = str(yFile[\"tenantName\"])+\"cont_net\"\n bridgeName = str(yFile[\"tenantName\"])+\"cont_br\"\n if \"containers\" in yFile:\n for c in yFile[\"containers\"]:\n veth = 't' + str(yFile['tenantID']) + '_' + str(c['cID']) + 'cbr2'\n print(\"Deleting container \"+str(c[\"cName\"]))\n command = \"sudo ansible-playbook \" + DELETE_CONTAINER_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType']+\" -e container=\"+str(c[\"cName\"])+\" -e veth=\"+veth\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting Controller at Primary: \" + command + \"\\n\")\n\n if yFile['controllerNet'].lower()==\"y\":\n if yFile['hypervisorType'] == 'primary':\n print(\"Deleting Controller net at primary\")\n command = \"sudo ansible-playbook \" + DELETE_NETWORK_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] + \" -e network_name=\"+networkName+ \" -e bridge_name=\"+bridgeName+\" -e option=delete_vxlan -e vxlan_name=\"+yFile['tenantName']\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting ControllerNet at primary : \" + command + \"\\n\")\n\n elif yFile['hypervisorType'] == 'secondary':\n command = \"sudo ansible-playbook \"+ DELETE_NETWORK_SCRIPT2 + \" -e hypervisor=\" + yFile['hypervisorType'] + \" -e bridge_name=\" + bridgeName+ \" -e network_name=\" + networkName + \" -e vxlan_name=\" + yFile['tenantName'] + \" -e option=delete_vxlan\"\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting ControllerNet at secondary : \" + command + \"\\n\")\n\n print(\"Deleting Bridge to namespace connection\")\n command = \"sudo ansible-playbook \" + DELETE_BRNS_CONN_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] + \" -e veth1=\" +str(yFile[\"tenantID\"])+\"_1\"\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting Bridge : \" + command + \"\\n\")\n\n\n\n\ndef checkYAML(yFile, logFile):\n if not \"tenantInfo\" in yFile:\n logging.error(\"\\nERROR: Cannot perform create operation!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Tenant Id missing in yaml file : \"+ str(sys.argv)+\"\\n\")\n exit(0)\n if not (\"vms\" in yFile['tenantInfo'] or \"controllerNet\" in yFile['tenantInfo']):\n logging.error(\"\\nERROR: Cannot perform create operation!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Wrong config in yaml file : \"+ str(sys.argv)+\"\\n\")\n exit(0)\n\ndef main():\n\n fileName = \"/var/log/log_\"+time.strftime(\"%Y%m%d\")+\".txt\"\n logFile = open(fileName, 'a+')\n\n if(len(sys.argv)<2):\n logging.error(\"\\nERROR: less than 2 arguments given!!! Require 2 arguments to run\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Argument Length Error: \"+ str(sys.argv)+\"\\n\")\n exit(0)\n else:\n yFileName = sys.argv[2]\n # check if yaml file is passed\n if yFileName.endswith(\".yml\") or yFileName.endswith(\".yaml\"):\n try:\n #open the yaml file\n yFile = read_yaml_data(CONFIG_FOLDER_PATH+yFileName)\n checkYAML(yFile, logFile)\n # check for the 1st argument i.e., create or delete\n if str(sys.argv[1]).lower()==\"delete\":\n print(\"\\nPerforming delete operation depending upon the file\")\n deleteFunc(yFile[\"tenantInfo\"], logFile)\n \n elif str(sys.argv[1]).lower()==\"create\":\n logging.info(\"\\nPerforming create operation depending upon the file\")\n createFunc(yFile[\"tenantInfo\"], logFile)\n else:\n logging.error(\"\\nERROR: Unrecognized Command!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Wrong Command : \"+ str(sys.argv)+\"\\n\")\n exit(0)\n except Exception as ex:\n logging.error(str(ex))\n exit(0)\n else:\n logging.error(\"\\nERROR: No yaml/yml file found!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" No yaml/yml file found : \"+ str(sys.argv)+\"\\n\")\n exit(0)\n logFile.close()\n\nif __name__ == '__main__':\n main()\n\n"
},
{
"alpha_fraction": 0.6455764770507812,
"alphanum_fraction": 0.6493836045265198,
"avg_line_length": 29.98314666748047,
"blob_id": "1d3096d028420b5fe98da5f5f928cd9220e19d23",
"content_id": "03c930aac604e5f31af05e980f1a5e519fb29202",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5516,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 178,
"path": "/M3/src/logic/configure.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import yaml\nimport sys\nimport os \nimport ansible_runner\nimport time\n\nCONFIG_FOLDER_PATH = '/etc/config/container/'\nANSIBLE_FOLDER_PATH = '/var/scripts/container/'\nIP_FILE = ANSIBLE_FOLDER_PATH+\"hostVars.yaml\"\nIP_ROUTE_COMMANDS_FILE = ANSIBLE_FOLDER_PATH+\"ipRouteCommandVars.yaml\"\nIP_ROUTE_SCRIPT = ANSIBLE_FOLDER_PATH+\"addIpTableRules.yaml\"\nPROVIDER_EDGES_CONFIG_FILE = CONFIG_FOLDER_PATH+\"PEConfig.txt\"\nCUSTOMER_EDGES_CONFIG_FILE = CONFIG_FOLDER_PATH+\"CEConfig.txt\"\nNETWORK_LIST_FILE = CONFIG_FOLDER_PATH+\"network_rule_config.yaml\"\nGET_IP_TABLE_RULES_SCRIPT = ANSIBLE_FOLDER_PATH+\"get_ip_tables_rules.yaml\"\nIPTABLE_COMMANDS_FILE = ANSIBLE_FOLDER_PATH+\"ipTableCommandVars.yaml\"\n\nLINE_NUMBER_INDEX = 0\nACTION_INDEX = 1\nPROTOCOL_INDEX = 2\nSOURCE_INDEX = 4\nDESTINATION_INDEX = 5\nCOMMENT_INDEX = 7\n\ndef get_commands(nw_list):\n pass\n\ndef get_customer_edges(site, customer_data):\n customer_list = []\n for key, customer in enumerate(customer_data[\"CustomerEdges\"]):\n for cr in customer:\n if cr.startswith(site):\n customer_list.append(cr)\n\n return customer_list\n\ndef get_current_ip_tables_rules(customer):\n customer_ip = customer[\"ip\"]\n ip_data = {\"host\": customer_ip}\n write_yaml_data(ip_data, IP_FILE)\n cmd_data = {\"IPTableCommand\": \"iptables --line-numbers -L\"}\n write_yaml_data(cmd_data, IPTABLE_COMMANDS_FILE)\n\n r = ansible_runner.run(private_data_dir=os.getcwd(), playbook=GET_IP_TABLE_RULES_SCRIPT)\n out = r.get_fact_cache(customer_ip)\n print(out)\n rules = str(out[\"output\"]).split(\"Chain \")\n rules = rules[1]\n rule_array = rules.split(\"\\n\")\n\n new_rule_array = []\n for row in rule_array:\n ip = \" \".join(row.split())\n if ip != '':\n new_rule_array.append(ip)\n\n new_rule_array.pop(0)\n new_rule_array.pop(0)\n\n return new_rule_array\n\ndef find_conflicting_rules(current_rules, rule):\n for current_rule in current_rules:\n c_rule = current_rule.split(\" \")\n temp_source = \"anywhere\"\n if \"SOURCE\" in rule:\n temp_source = rule[\"SOURCE\"]\n temp_destination = \"anywhere\"\n if \"DESTINATION\" in rule:\n temp_destination = rule[\"DESTINATION\"]\n if temp_source == \"0.0.0.0/0\":\n temp_source = \"anywhere\"\n if temp_destination == \"0.0.0.0/0\":\n temp_destination = \"anywhere\"\n if c_rule[SOURCE_INDEX] == temp_source and c_rule[DESTINATION_INDEX] == temp_destination:\n if rule[\"PROTOCOL\"] == \"SSH\":\n if c_rule[COMMENT_INDEX].contains(rule[\"PROTOCOL\"]):\n if c_rule[ACTION_INDEX] != rule[\"ACTION\"]:\n return int(c_rule[LINE_NUMBER_INDEX])\n else:\n return False\n else:\n if c_rule[PROTOCOL_INDEX] == rule[\"PROTOCOL\"]:\n if c_rule[ACTION_INDEX] != rule[\"ACTION\"]:\n return int(c_rule[LINE_NUMBER_INDEX])\n else:\n return False\n return True\n\ndef construct_rule(rule):\n s = \"iptables -I INPUT 1 -p \" + rule[\"PROTOCOL\"]\n if \"SOURCE\" in rule:\n s += \" -s \" + rule[\"SOURCE\"]\n if \"DESTINATION\" in rule:\n s += \" -d \" + rule[\"DESTINATION\"]\n\n s += \" -j \" + rule[\"ACTION\"] \n return s\n\ndef get_delete_rules(line_nums):\n delete_rules = []\n for i, num in enumerate(line_nums):\n delete_rules.append(\"iptables -D INPUT \" + str(line_nums[i]-i))\n\n return delete_rules\n\ndef run_ansible_script(logFile):\n provider_data = read_txt_data(PROVIDER_EDGES_CONFIG_FILE)\n customer_data = read_txt_data(CUSTOMER_EDGES_CONFIG_FILE)\n network_data = read_yaml_data(NETWORK_LIST_FILE)\n\n command_list = []\n\n for site in network_data:\n customer_edges_list = get_customer_edges(site, customer_data)\n conflicitng_rules_line_numbers = []\n new_rules = []\n if len(customer_edges_list) > 0:\n ip_cr = None\n\n for key, customer in enumerate(customer_data[\"CustomerEdges\"]):\n for cr in customer:\n if cr in customer_edges_list:\n ip_cr = customer[cr]\n\n current_ip_table_rules = get_current_ip_tables_rules(ip_cr)\n for rule in network_data[site]:\n res = find_conflicting_rules(current_ip_table_rules, rule)\n\n if type(res) is int:\n conflicitng_rules_line_numbers.append(res)\n elif res == True:\n new_rules.append(construct_rule(rule))\n\n delete_rules = get_delete_rules(conflicitng_rules_line_numbers)\n new_rules = delete_rules + new_rules\n cmd_data = {\"IPTableCommands\": new_rules}\n write_yaml_data(cmd_data, IP_ROUTE_COMMANDS_FILE)\n for customer_h in customer_edges_list:\n for rule in new_rules:\n l = time.strftime(\"%Y%m%d-%H%M%S\") + \"CE IPTABLE UPDATE: \" + customer + \"COMMAND: \" + rule + \"\\n\"\n logFile.write(l)\n\n customer_ip = None\n for key, customer in enumerate(customer_data[\"CustomerEdges\"]):\n for cr in customer:\n if cr == customer_h:\n customer_ip = customer[cr][\"ip\"]\n ip_data = {\"host\": customer_ip}\n write_yaml_data(ip_data, IP_FILE)\n\n r = ansible_runner.run(private_data_dir=os.getcwd(), playbook=IP_ROUTE_SCRIPT)\n out = r.get_fact_cache(customer_ip)\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = yaml.safe_load(stream)\n return data\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\ndef read_txt_data(f_name):\n data = None\n with open(f_name) as stream:\n data = json.load(stream)\n return data\n\ndef main():\n fileName = \"/tmp/logs/log_\"+time.strftime(\"%Y%m%d\")+\".txt\"\n logFile = open(fileName, 'a+')\n run_ansible_script(logFile)\n logFile.close()\n\nif __name__ == '__main__':\n main()\n\n"
},
{
"alpha_fraction": 0.7090300917625427,
"alphanum_fraction": 0.7290970087051392,
"avg_line_length": 22,
"blob_id": "bd29c0ebf1c07f4f6c81e2b1de09274ea97e34e1",
"content_id": "df705151bb597f39e25fe008d636223563adf091",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 299,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 13,
"path": "/M2/var/scripts/old_scripts/deletePENet.sh",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "#!/bin/bashsudo\n\n# destroy and undefine network\nsudo virsh net-destroy \"$1_PEnet\"\nsudo virsh net-undefine \"$1_PEnet\"\n\n# delete bridge\nsudo ip link set dev \"$1_PEnet\" down\nsudo brctl delbr \"$1_PEnet\"\n\n# delete veth pair to transit\nsudo ip link set dev \"$1PEveth\" down\nsudo ip link del dev \"$1PEveth\"\n"
},
{
"alpha_fraction": 0.5941511988639832,
"alphanum_fraction": 0.5994293689727783,
"avg_line_length": 46.68027114868164,
"blob_id": "be6c83b6eab63068f1b2375f5f85cb9ab56b3f8c",
"content_id": "d77a4d0a9a2d622e4cbaba5a0e0be2b0f750e7a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7010,
"license_type": "no_license",
"max_line_length": 281,
"num_lines": 147,
"path": "/M2/src/logic/controller.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import sys\nimport yaml\nimport logging\nimport subprocess\nimport time\n\nCONFIG_FOLDER_PATH = '/etc/config/'\nANSIBLE_FOLDER_PATH = '/var/scripts/'\nCREATE_NETWORK_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_l3net.yaml\"\nCREATE_BRNS_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_brns_conn.yaml\"\nCREATE_VM_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_vm.yaml\"\nDELETE_NETWORK_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_l3net.yaml\"\nDELETE_BRNS_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_brns_conn.yaml\"\nDELETE_VM_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_vm.yaml\"\nRUN_DHCLIENT_SCRIPT = ANSIBLE_FOLDER_PATH+\"run_dhclient.yaml\"\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = yaml.safe_load(stream)\n return data\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\ndef createFunc(yFile, logFile):\n if yFile['controllerNet'].lower()==\"y\":\n\n #vm = hypervisor, template, vm_name - create_vm.yaml, controllerTemplate.xml.j2, mem, vcpu, net\n #start_ip, bridge_name, start_ip, end_ip, network_name\n #bridge = hyp, bridge_name, namespace(tenantNAme_transit), veth1, veth2\n\n print(\"Creating ControllerNet \")\n\n # variables\n networkName = str(yFile[\"tenantID\"])+\"controller_net\"\n bridgeName = str(yFile[\"tenantID\"])+\"controller_br\"\n br_ip = str(yFile['tenantID'])+'.0.0.1'\n start_ip = str(yFile['tenantID'])+'.0.0.2'\n end_ip = str(yFile['tenantID'])+'.0.0.254'\n\n # create controller network\n command = \"sudo ansible-playbook \" + CREATE_NETWORK_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] + \" -e br_ip=\" +br_ip+\" -e start_ip=\"+start_ip+\" -e end_ip=\"+end_ip+\" -e network_name=\"+networkName+\" -e bridge_name=\"+bridgeName\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Controller : \" + command + \"\\n\")\n\n print(\"Creating Bridge to namespace connection \")\n\n # create controller to transit ns connection\n comand = \"sudo ansible-playbook \" + CREATE_BRNS_CONN_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] + \" -e veth1=\" +str(yFile[\"tenantID\"])+\"_1\"+\" -e veth2=\"+str(yFile[\"tenantID\"])+\"_2\"+\" -e namespace=\"+str(yFile['tenantName'])+\"_transit\"+\" -e bridge_name=\"+bridgeName\n subprocess.call([command],shell=True)\n \n # run dhclient on the namespace\n command = \"sudo ansible-playbook \" + RUN_DHCLIENT_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] + \" -e namespace=\" + str(yFile['tenantName'] + \"_transit -e veth=\" + str(yFile[\"tenantID\"])+\"_2\"\n \n subprocess.call([command],shell=True)\n\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Bridge to namespace connection : \" + command + \"\\n\")\n\n if \"vms\" in yFile:\n for vm in yFile[\"vms\"]:\n print(\"Creating VM : \"+str(vm['vmName']))\n\n # create controller vm\n command = \"sudo ansible-playbook \" + CREATE_VM_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] +\" -e vm_name=\"+str(vm['vmName']) +\" -e mem=\"+str(vm['mem'])+\" -e vcpu=\"+str(vm['vcpu']) +\" -e network=\"+networkName\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Controller VM : \" + command + \"\\n\")\n\ndef deleteFunc(yFile, logFile):\n #for net - hypervisor, net_name, bridge_name\n #for vm - hypervsr, vm_name\n #for bridge - hyp, veth1\n networkName = str(yFile[\"tenantID\"])+\"controller_net\"\n bridgeName = str(yFile[\"tenantID\"])+\"controller_br\"\n if \"vms\" in yFile:\n for vm in yFile[\"vms\"]:\n print(\"Deleting VM \"+str(vm[\"vmName\"]))\n\n command = \"sudo ansible-playbook \" + DELETE_VM_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType']+\" -e vm_name=\"+str(vm[\"vmName\"])+\" -e network=\"+networkName\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting Controller : \" + command + \"\\n\")\n\n if yFile['controllerNet'].lower()==\"y\":\n print(\"Deleting Controller \")\n command = \"sudo ansible-playbook \" + DELETE_NETWORK_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] + \" -e network_name=\"+networkName+ \" -e bridge_name=\"+bridgeName\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting ControllerNet : \" + command + \"\\n\")\n\n print(\"Deleting Bridge \")\n command = \"sudo ansible-playbook \" + DELETE_BRNS_CONN_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] + \" -e veth1=\" +str(yFile[\"tenantID\"])+\"_1\"\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Bridge : \" + command + \"\\n\")\n\n\ndef checkYAML(yFile, logFile):\n if not \"tenantInfo\" in yFile:\n logging.error(\"\\nERROR: Cannot perform create operation!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Tenant Id missing in yaml file : \"+ str(sys.argv)+\"\\n\")\n exit(0)\n if not (\"vms\" in yFile['tenantInfo'] or \"controllerNet\" in yFile['tenantInfo']):\n logging.error(\"\\nERROR: Cannot perform create operation!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Wrong config in yaml file : \"+ str(sys.argv)+\"\\n\")\n exit(0)\n\ndef main():\n\n fileName = \"/var/log/log_\"+time.strftime(\"%Y%m%d\")+\".txt\"\n logFile = open(fileName, 'a+')\n\n if(len(sys.argv)<2):\n logging.error(\"\\nERROR: less than 2 arguments given!!! Require 2 arguments to run\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Argument Length Error: \"+ str(sys.argv)+\"\\n\")\n exit(0)\n else:\n yFileName = sys.argv[2]\n # check if yaml file is passed\n if yFileName.endswith(\".yml\") or yFileName.endswith(\".yaml\"):\n try:\n #open the yaml file\n yFile = read_yaml_data(yFileName)\n checkYAML(yFile, logFile)\n # check for the 1st argument i.e., create or delete\n if str(sys.argv[1]).lower()==\"delete\":\n print(\"\\nPerforming delete operation depending upon the file\")\n deleteFunc(yFile[\"tenantInfo\"], logFile)\n \n elif str(sys.argv[1]).lower()==\"create\":\n logging.info(\"\\nPerforming create operation depending upon the file\")\n createFunc(yFile[\"tenantInfo\"], logFile)\n else:\n logging.error(\"\\nERROR: Unrecognized Command!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Wrong Command : \"+ str(sys.argv)+\"\\n\")\n exit(0)\n except Exception as ex:\n logging.error(str(ex))\n exit(0)\n else:\n logging.error(\"\\nERROR: No yaml/yml file found!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" No yaml/yml file found : \"+ str(sys.argv)+\"\\n\")\n exit(0)\n logFile.close()\n\n\n\nif __name__ == '__main__':\n main()\n\n"
},
{
"alpha_fraction": 0.55033940076828,
"alphanum_fraction": 0.5616215467453003,
"avg_line_length": 47.19355010986328,
"blob_id": "69d50becf03dedb5f5e5bc8451f8a712ab5ede1a",
"content_id": "5f225e61f1a04d096b8f20da350637fefbf599e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10459,
"license_type": "no_license",
"max_line_length": 338,
"num_lines": 217,
"path": "/M3/src/logic/providerEdge.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import sys\nimport yaml\nimport logging\nimport subprocess\nimport time\nimport re\nimport json\n\nCONFIG_FOLDER_PATH = '/etc/config/container/'\nANSIBLE_FOLDER_PATH = '/var/scripts/container/'\nCREATE_NETWORK_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_l2net.yaml\"\nCREATE_BRNS_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_brns_conn.yaml\"\nCREATE_DOC_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_doc_conn.yaml\"\nCREATE_CONTAINER_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_container.yaml\"\nDELETE_NETWORK_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_l2net.yaml\"\nDELETE_BRNS_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_brns_conn.yaml\"\nDELETE_CONTAINER_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_container.yaml\"\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = yaml.safe_load(stream)\n return data\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\ndef createPE(yFile, logFile):\n print(\"creating pe\")\n print(yFile)\n if yFile['change_net'].lower()=='y':\n print(\" Creating PE network\")\n # log\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating IP Ranges for PE : \"+ str(yFile[\"tenant_name\"])+\"\\n\")\n\n # variables\n bridge_name = yFile['tenant_name']+'_PT_br'\n network_name = yFile['tenant_name']+'_PT_net'\n veth1 = yFile['tenant_name']+'PT1'\n veth2 = yFile['tenant_name']+'PT2'\n transit = yFile['tenant_name']+'_transit'\n\n # create l2 bridge\n command = \"sudo ansible-playbook \" + CREATE_NETWORK_SCRIPT + \" -e hypervisor=\"+yFile['hypervisorType']+\" -e option=none -e bridge_name=\"+bridge_name+\" -e network_name=\"+network_name\n subprocess.call([command], shell = True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating L2 Bridge : \" + command + \"\\n\")\n\n # connect bridge with transit namespace\n if yFile['hypervisorType'] == 'primary':\n o_ip = str(yFile['tenant_id'])+\".0.2.254/24\"\n else:\n o_ip = str(yFile['tenant_id'])+\".255.2.254/24\"\n command = \"sudo ansible-playbook \" + CREATE_BRNS_CONN_SCRIPT + \" -e hypervisor=\"+yFile['hypervisorType']+\" -e veth1=\"+veth1+\" -e veth2=\"+veth2+\" -e bridge_name=\"+bridge_name+\" -e namespace=\"+transit +\" -e option=assign_ip -e ip=\"+o_ip\n subprocess.call([command], shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Bridge to namespace connection : \" + command + \"\\n\")\n\n # assign ip address\n #command = \"sudo ip netns exec \"+transit +\" ip add a \"+ str(yFile['tenant_id'])+\".0.2.254/24 dev \"+veth2 \n\n\n\n #print(command)\n #subprocess.call([command], shell=True)\n #logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Assigning IP to transit to PE connection : \" + command + \"\\n\")\n\n if \"containers\" in yFile:\n controller_br = str(yFile['tenant_name'])+'cont_br'\n bridge_name = yFile['tenant_name']+'_PT_br'\n for c in yFile[\"containers\"]:\n print(\"Creating VM : \"+str(c['PE_name']))\n # create provider vm\n container = str(yFile['tenant_name'])+str(c['PE_name'])\n #print(container)\n command = \"sudo ANSIBLE_STDOUT_CALLBACK=oneline ansible-playbook \" + CREATE_CONTAINER_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] +\" -e container=\"+ container + \" -e image=\" + c['image'] + \" -e tid=\"+str(yFile['tenant_id'])\n #print(command)\n check_output = subprocess.check_output([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Provider Edge Container : \" + command + \"\\n\")\n \n # connect to controller network\n print(\"Connecting the container to controller bridge\")\n veth1 = container + 'cbr1'\n veth2 = container + 'cbr2'\n command = \"sudo ANSIBLE_STDOUT_CALLBACK=oneline ansible-playbook \" + CREATE_DOC_CONN_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] + \" -e veth1=\" +veth1+ \" -e veth2=\"+veth2+\" -e bridge_name=\"+ controller_br + \" -e container=\" + str(container) + \" -e option=run_dhclient -e option2=none -e tid=\"+str(yFile['tenant_id'])\n \n\n check_output = subprocess.check_output([command],shell=True)\n print(check_output)\n r1 = re.search(r\"(([0-2]?[0-9]?[0-9]\\.)([0]\\.)([1]\\.)([0-2]?[0-9]?[0-9]))\",str(check_output))\n print(r1)\n with open('/etc/config/container/aliveStatus.txt',\"r+\") as f:\n fileData = json.load(f)\n pe = {}\n print(r1.group(0))\n pe['ip'] = r1.group(0)\n pe['name'] = container\n pe['1astPing'] = time.time()\n fileData['status'].append(pe)\n f.seek(0)\n f.truncate()\n json.dump(fileData, f)\n print(fileData)\n f.close()\n\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Connecting to Controller network : \" + command + \"\\n\")\n\n with open('/etc/config/container/PEConfig.txt',\"r+\") as f:\n fileData = json.load(f)\n pe = {}\n pe[container]={}\n pe[container]['ip'] = r1.group(0)\n fileData['ProviderEdges'].append(pe)\n f.seek(0)\n f.truncate()\n json.dump(fileData, f)\n f.close()\n\n with open('/etc/ansible/hosts','a+') as f:\n f.write(r1.group(0) + \" ansible_user=root ansible_ssh_private_key_file=/root/.ssh/id_rsa\")\n f.close()\n\n\n # attach provider edge to PE-Transit Network\n if yFile['hypervisorType'] == 'primary':\n c_ip = str(yFile['tenant_id'])+'.0.2.'+str(c['id'])+\"/24\"\n default_ip = str(yFile['tenant_id'])+'.0.2.254'\n else:\n c_ip = str(yFile['tenant_id'])+'.255.2.'+str(c['id'])+\"/24\"\n default_ip = str(yFile['tenant_id'])+'.255.2.254'\n veth1 = 'veth_t'+str(yFile['tenant_id'])+'_p'+str(c['id'])+'_1'\n veth2 = 'veth_t'+str(yFile['tenant_id'])+'_p'+str(c['id'])+'_2'\n command = \"sudo ansible-playbook \" + CREATE_DOC_CONN_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] +\" -e container=\"+ container + \" -e bridge_name=\" + bridge_name + \" -e veth1=\" + veth1 + \" -e veth2=\" + veth2+\" -e option=assign_ip -e option2=default -e ip=\"+ c_ip +\" -e default_ip=\"+default_ip\n #print(command)\n subprocess.call([command], shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Attaching Provider Edge Container to PE-transit Bridge : \" + command + \"\\n\")\n\n with open('/etc/config/container/TPE_config.txt', \"r+\") as f:\n fileData = json.load(f)\n fileData['TransitEdges'][container] = c_ip[:-3]\n f.seek(0)\n f.truncate()\n json.dump(fileData, f)\n f.close()\n\n\ndef deletePE(yFile, logFile):\n if 'containers' in yFile:\n for c in yFile['containers']:\n container = str(yFile['tenant_name'])+str(c['PE_name'])\n # delete PE Container\n command = \"sudo ansible-playbook \" + DELETE_CONTAINER_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType']+\" -e container=\" + container\n subprocess.call([command], shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting PE : \" + command + \"\\n\")\n\n if yFile['change_net'].lower()=='y':\n network_name = yFile['tenant_name']+'_PT_net'\n bridge_name = yFile['tenant_name']+'_PT_br'\n veth1 = yFile['tenant_name']+'PT1'\n\n # delete l2 network\n command = \"sudo ansible-playbook \" + DELETE_NETWORK_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType']+\" -e option=none -e network_name=\"+ network_name +\" -e bridge_name=\"+ bridge_name\n subprocess.call([command], shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting PE-Transit network : \" + command + \"\\n\")\n\n # delete veth pair\n command = \"sudo ansible-playbook \" + DELETE_BRNS_CONN_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType']+\" -e veth1=\"+ veth1\n subprocess.call([command], shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting PE-Transit connection : \" + command + \"\\n\")\n\ndef checkYaml(yFile, logFile):\n if 'PE' not in yFile:\n print(\"ERROR!!! Missing 'PE' key in YAML file\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" ERROR!!! Missing 'PE' key in YAML file : \"+ str(sys.argv) +\"\\n\")\n exit(0)\n if not 'change_net' in yFile['PE'] and not 'containers' in yFile['PE']:\n print(\"ERROR!!! Cannot process the given YAML file. MISSING KEYS!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" ERROR!!! Cannot process the given YAML file. MISSING KEYS!!! : \"+ str(sys.argv) +\"\\n\")\n exit(0)\n\ndef main():\n\n fileName = \"/var/log/log_\"+time.strftime(\"%Y%m%d\")+\".txt\"\n logFile = open(fileName, 'a+')\n\n if(len(sys.argv)<2):\n logging.error(\"ERROR: No arguments given\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" No Arguments Error: \"+ str(sys.argv)+\"\\n\")\n exit(0)\n\n else:\n yFileName = sys.argv[2]\n # print(yFileName)\n # check if yaml file is passed\n if yFileName.endswith(\".yml\") or yFileName.endswith(\".yaml\"):\n try:\n # open the yaml file\n yFilePath = CONFIG_FOLDER_PATH + yFileName\n yFile = read_yaml_data(yFilePath)\n checkYaml(yFile, logFile)\n print(\"test\")\n # check for the 1st argument i.e., create or delete\n if str(sys.argv[1]).lower() == \"delete\":\n print(\"Performing delete operation depending upon the file\")\n deletePE(yFile['PE'], logFile)\n elif str(sys.argv[1]).lower() == \"create\":\n print(\"Performing create operation depending upon the file\")\n createPE(yFile['PE'], logFile)\n except:\n print(\"ERROR!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Error In Executing Command : \"+ str(sys.argv) +\"\\n\")\n else:\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Error In Executing Command : \"+ str(sys.argv) +\"\\n\")\n \n logFile.close()\n\nif __name__ == '__main__':\n main()\n\n"
},
{
"alpha_fraction": 0.5996354818344116,
"alphanum_fraction": 0.6046982407569885,
"avg_line_length": 34.6534309387207,
"blob_id": "b0541c4efe13718b12686a8c70e4daa238325b63",
"content_id": "0614227bd9136da2bd18573d76f652666338b61a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19752,
"license_type": "no_license",
"max_line_length": 194,
"num_lines": 554,
"path": "/M3/src/logic/cpuLB.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import yaml\nimport sys\nimport os\nimport json\nimport ansible_runner\nimport operator\nimport time\nimport subprocess\n#nproc\n#ip- load, cpus. flag\nproviderEdgeChange = {}\n\n#ip- load, cpus\ncustomerEdgeChange = {}\n\ncustomerProviderMapping = {}\n\navailableProvider = []\nheavyProvider = []\n\nCONFIG_FOLDER_PATH = '/etc/config/container/'\nANSIBLE_FOLDER_PATH = '/var/scripts/container/'\nPROVIDER_EDGES_CONFIG_FILE = CONFIG_FOLDER_PATH+\"PEConfig.txt\"\nCUSTOMER_EDGES_CONFIG_FILE = CONFIG_FOLDER_PATH+\"CEConfig.txt\"\nTRANSIT_EDGES_CONFIG_FILE = CONFIG_FOLDER_PATH+\"TPE_config.txt\"\nSTATS_LOADER_FILE = CONFIG_FOLDER_PATH+\"statsLoader.txt\"\nROUTE_FILE = ANSIBLE_FOLDER_PATH+\"changeRouteVars.yaml\"\nTRANSIT_FILE = ANSIBLE_FOLDER_PATH+\"changeTransitVars.yaml\"\nROUTE_CHANGE_SCRIPT = ANSIBLE_FOLDER_PATH+\"changeDefaultRoute.yaml\"\n\n# See if PE has to be changed\n# If PE is changed, then think about CE\n# IF PE is not changed, think only about auto scaling\n\n# this is because CE and PE can have different configurations\n\ndef readStatsFile():\n filepath = 'STATS_LOADER_FILE'\n statsMap = {}\n with open(filepath) as fp:\n line = fp.readline()\n while line:\n line = line.split(\" \")\n statsMap[name] = line[0]\n statsMap[name]['cpus'] = int(line[1])\n statsMap[name]['load'] = float(line[2])\n statsMap[name]['flag'] = line[3]\n \n return statsMap\n\n\ndef run_ansible_script(logFile):\n # Refer https://ansible-runner.readthedocs.io/en/latest/python_interface.html for ansible doc\n \n provider_data = read_yaml_data(\"PROVIDER_EDGES_CONFIG_FILE\")\n customer_data = read_yaml_data(\"CUSTOMER_EDGES_CONFIG_FILE\")\n transit_data = read_yaml_data(\"TRANSIT_EDGES_CONFIG_FILE\")\n providerList = {}\n customerEdgeList = {}\n customerEdgeCount = {}\n providerIdList = {}\n\n for key, provider in enumerate(provider_data[\"ProviderEdges\"]):\n for pr in provider:\n providerList[pr] = provider[pr][\"ip\"]\n if \"customer_edges\" in provider[\"pr\"] and len(customer[\"cr\"][\"customer_edges\"])>0:\n for cr in provider[\"pr\"][\"customer_edges\"]:\n customerProviderMapping[cr] = pr\n\n for key, customer in enumerate(customer_data[\"CustomerEdges\"]):\n for cr in customer:\n customerEdgeList[cr] = customer[cr][\"ip\"]\n\n for key, customer in enumerate(customer_data[\"CECount\"]):\n for cr in customer:\n customerEdgeCount[cr] = {}\n customerEdgeCount[cr]['max'] = customer[cr]['max']\n customerEdgeCount[cr]['min'] = customer[cr]['min']\n\n\n print(providerList)\n print(customerEdgeList)\n \n customerLoad = {}\n providerLoad = {}\n\n playbook_path = os.getcwd()\n providerChangeFlag = False\n customerChangeFlag = False\n \n ##read stats file\n statsMap = readStatsFile()\n\n # r = ansible_runner.run(private_data_dir=playbook_path, playbook='ansible.yaml')\n # out = r.get_fact_cache(\"50.0.0.217\")\n # print(out['output'])\n\n #playbook_path = playbook_path.replace(\"north_bound\", \"ansible_scripts\")\n for name in providerList:\n #store in that file\n #ip_data = {\"host\": providerList[name]}\n #write_yaml_data(ip_data, IP_FILE)\n\n providerEdgeChange[name]={}\n #r = ansible_runner.run(private_data_dir=playbook_path, playbook='ansi.yaml')\n #out = r.get_fact_cache(providerList[name])\n #retVal = out['output'].split(\" \")\n providerEdgeChange[name]['load'] = statsMap[name]['load']\n providerEdgeChange[name]['cpus'] = statsMap[name]['cpus']\n providerEdgeChange[name]['flag'] = statsMap[name]['flag']\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" PE(load): \"+ name+\" :\"+str(statsMap[name]['load'])+\"\\n\")\n \n providerLoad[name] = (100-providerEdgeChange[i]['load']) * providerEdgeChange[i]['cpus']\n\n if statsMap[name]['flag']==\"TRUE\":\n \n providerChangeFlag = True\n availableProvider.append(name) \n \n else:\n \n heavyProvider.append(name) \n\n \n for name in customerEdgeList:\n #store in that file\n customerEdgeChange[name]={}\n\n #ip_data = {\"host\": customerEdgeList[name]}\n #write_yaml_data(ip_data, IP_FILE)\n\n #r = ansible_runner.run(private_data_dir=playbook_path, playbook='cpuUsage.yaml')\n #out = r.get_fact_cache(customerEdgeList[name])\n #retVal = out['output'].split(\" \")\n customerEdgeChange[name]['load'] = statsMap[name]['load']\n customerEdgeChange[name]['cpus'] = statsMap[name]['cpus']\n customerEdgeChange[name]['flag'] = statsMap[name]['flag']\n \n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" CE(load): \"+ name+\" :\"+str(statsMap[name]['load'])+\"\\n\")\n customerLoad[name] = customerEdgeChange[name]['load'] * customerEdgeChange[name]['cpus']\n \n if statsMap[name]['flag']==\"TRUE\":\n customerChangeFlag = True\n\n\n # downgrade - intermittent VM\n for name in customerEdgeChange:\n\n if customerEdgeChange[name][\"load\"] < 20 and len({key for key in customerEdgeList.items() \n if key.startswith(customer.split(\"CE\")[0])}) > 1:\n\n #auto scale\n currentPE = customerProviderMapping[name]\n #give this PE to customer\n\n deleteCEContainer(customer.split(\"CE\")[0],currentPE, customer.split(\"CE\")[1])\n output = subprocess.check_output(\"sudo python customerEdge.py create CEConfVar.yaml\", shell = True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Down Scaling CE: \"+name+\" PE = \"+currentPE+\"\\n\")\n \n # remove from customerredgelist\n customerEdgeList.remove(name)\n\n\n\n\n\n if providerChangeFlag == True or customerChangeFlag == True:\n providerLoad = dict(sorted(providerLoad.items(), key=operator.itemgetter(1), reverse = True))\n customerLoad = dict(sorted(customerLoad.items(), key=operator.itemgetter(1), reverse = True))\n\n if providerChangeFlag == False and customerChangeFlag == True:\n\n #only auto scaling\n for customer in customerEdgeChange:\n\n if customerEdgeChange[customer]['flag'] == \"TRUE\":\n # for autoscaling, we can give \n if len({key for key in customerEdgeList.items() \n if key.startswith(customer.split(\"CE\")[0])}) < customerEdgeCount[customer.split(\"CE\")[0]]['max']:\n\n #auto scale\n currentPE = customerProviderMapping[currentContainers[0]]\n #give this PE to customer\n\n createCEContainer(customer.split(\"CE\")[0],currentPE)\n output = subprocess.check_output(\"sudo python customerEdge.py create CEConfVar.yaml\", shell = True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Auto Scaling CE: \"+customer.split(\"CE\")[0]+\" PE = \"+currentPE+\"\\n\")\n\n \n\n #call sandeep function. When the return is success - add it to provider Mac\n \n \n\n \n\n\n elif providerChangeFlag == True and customerChangeFlag == True:\n\n #both have to be done\n transitCustomerEdgeMap = {}\n \n for i in transit_data[\"Transit_Edges\"]:\n transitCustomerEdgeMap[i] = transit_data[\"Transit_Edges\"][i]\n\n for customer in customerEdgeChange:\n\n if customerEdgeChange[customer]['flag'] == \"TRUE\":\n #auto scaling if possible\n\n currentContainers = {key for key in customerEdgeList.items() \n if key.startswith(customer.split(\"CE\")[0])}\n\n if len(currentContainers) < customerEdgeCount[customer.split(\"CE\")[0]]['max']:\n #auto scale - else chuck\n\n #find how many autoscaling can be done\n numberOfAutoScaling = 1\n reduceLoad = (customerEdgeChange[customer]['load'] - 60) * len(currentContainers)\n while(reduceLoad)\n currentCELoad = 0\n for container in currentContainers:\n customerEdgeChange[name]['load'] = currentLoad[container] * ((len(currentContainers))/(len(currentContainers)+1))\n currentLoad[container] = currentLoad[container] * ((len(currentContainers))/len(currentContainers)+1)\n currentCELoad = customerEdgeChange[name]['load']\n\n customerLoad = dict(sorted(customerLoad.items(), key=operator.itemgetter(1), reverse = True))\n\n #now find a suitable PE for load\n currentPE = customerProviderMapping[currentContainers[0]]\n for pe in providerLoad:\n if pe in availableProvider and providerLoad[pe]>currentCELoad:\n currentPE = pe \n providerLoad[pe] -= currentCELoad\n providerLoad = dict(sorted(providerLoad.items(), key=operator.itemgetter(1), reverse = True))\n break;\n\n #autoscale with currentPE and create a new instance\n \n #create CE and autoscale\n \n # add it to provider Edge file and customerEdge file\n createCEContainer(customer.split(\"CE\")[0],currentPE)\n output = subprocess.check_output(\"sudo python customerEdge.py create CEConfVar.yaml\", shell = True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Auto Scaling CE: \"+customer.split(\"CE\")[0]+\" PE = \"+currentPE+\"\\n\")\n\n \n \n\n # divide customerLoad\n\n #customerLoad[customer] = customerLoad[customer] - {{load}}\n #change it \n\n #-----PE----------Dynamic Routing Change\n\n for cust in customerLoad:\n # only for pe changes\n \n if customerProviderMapping[cust] in heavyProvider:\n\n cLoad = customerLoad[cust] \n\n for prov in providerLoad:\n #check available provider\n if prov in availableProvider:\n\n if cLoad < ((providerEdgeChange[prov]['cpus']*0.6*100)-(providerEdgeChange[prov]['cpus']*100-providerLoad[prov])):\n #logic to add to provider - call ssh \n\n existing_if = None\n new_if = None\n for key, customer in enumerate(customer_data[\"CustomerEdges\"]):\n for cr in customer:\n if cust==cr:\n for pe in customer[cr][\"provider_edges\"]:\n if pe==customerProviderMapping[cust]:\n existing_if = customer[cr][\"provider_edges\"][pe]\n if pe==prov:\n new_if = customer[cr][\"provider_edges\"][pe]\n \n #add the things to file\n route_data = {\"ip\": customerEdgeList[name], \"oldInterface\": existing_if, \"newInterface\": new_if}\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Transit PE-CE(update): CE= \"+ cust+\" OLD PE= \"+ customerProviderMapping[cust]+\" NEW PE= \"+prov+ \" COMMAND = \"+str(route_data)+\"\\n\")\n write_yaml_data(route_data, ROUTE_FILE)\n \n r = ansible_runner.run(private_data_dir=playbook_path, playbook='changeDefaultRoute.yaml')\n\n #change in transit route as well\n existing_if = transitCustomerEdgeMap[customerProviderMapping[cust]]\n new_if = transitCustomerEdgeMap[prov]\n cNetworks=[]\n\n\n route_data = {}\n count=0\n for key, customer in enumerate(customer_data[\"CustomerEdges\"]):\n for cr in customer:\n if cr==cust:\n for cn in customer[cr][\"cnetwork\"]:\n route_data[count] = {}\n route_data[count][\"source\"] = customer[cr][\"cnetwork\"][cn]\n route_data[count][\"oldInterface\"] = existing_if\n route_data[count][\"newInterface\"] = new_if\n route_data[count][\"netnsName\"] = cr.split(\"ns\")[0]+\"_transit\"\n count+=1\n\n print(route_data)\n\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" PE-CE(update): CE= \"+ cust+\" OLD PE= \"+ customerProviderMapping[cust]+\" NEW PE= \"+prov+ \" COMMAND = \"+str(route_data)+\"\\n\")\n write_yaml_data(route_data, TRANSIT_FILE)\n \n r = ansible_runner.run(private_data_dir=playbook_path, playbook='changeTransitRoute.yaml')\n\n for key, provider in enumerate(provider_data[\"ProviderEdges\"]):\n for i in range(len(provider)):\n\n if provider_data[\"ProviderEdges\"][i]==customerProviderMapping[cust]:\n provider_data[\"ProviderEdges\"][i][customerProviderMapping[cust]][\"customer_edges\"].remove(cust)\n\n if provider_data[\"ProviderEdges\"][i]==prov:\n provider_data[\"ProviderEdges\"][i][prov][\"customer_edges\"].append(cust)\n\n\n\n\n #change in config files\n # provider_data[\"ProviderEdges\"][customerProviderMapping[cust]][\"customer_edges\"].remove(cust)\n # provider_data[\"ProviderEdges\"][prov][\"customer_edges\"].append(cust)\n \n\n providerLoad[prov] = (providerLoad[prov] - cLoad)\n providerLoad = dict(sorted(providerLoad.items(), key=operator.itemgetter(1), reverse = True))\n # change customerProviderMapping\n customerProviderMapping[cust] = prov\n break\n\n \n\n else:\n\n #only PE Change\n\n for cust in customerLoad:\n # only for pe changes\n \n if customerProviderMapping[cust] in heavyProvider:\n\n cLoad = customerLoad[cust] \n\n for prov in providerLoad:\n #check available provider\n if prov in availableProvider:\n\n if cLoad < ((providerEdgeChange[prov]['cpus']*0.6*100)-(providerEdgeChange[prov]['cpus']*100-providerLoad[prov])):\n #logic to add to provider - call ssh \n\n existing_if = None\n new_if = None\n for key, customer in enumerate(customer_data[\"CustomerEdges\"]):\n for cr in customer:\n if cust==cr:\n for pe in customer[cr][\"provider_edges\"]:\n if pe==customerProviderMapping[cust]:\n existing_if = customer[cr][\"provider_edges\"][pe]\n if pe==prov:\n new_if = customer[cr][\"provider_edges\"][pe]\n\n #existing_if = customer_data[\"CustomerEdges\"][cust][\"provider_edges\"][customerProviderMapping[cust]]\n #new_if = customer_data[\"CustomerEdges\"][cust][\"provider_edges\"][prov]\n \n #add the things to file\n route_data = {\"ip\": customerEdgeList[name], \"oldInterface\": existing_if, \"newInterface\": new_if}\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Transit PE-CE(update): CE= \"+ cust+\" OLD PE= \"+ customerProviderMapping[cust]+\" NEW PE= \"+prov+ \" COMMAND = \"+str(route_data)+\"\\n\")\n write_yaml_data(route_data, ROUTE_FILE)\n \n r = ansible_runner.run(private_data_dir=playbook_path, playbook='changeDefaultRoute.yaml')\n\n #change in transit route as well\n existing_if = transitCustomerEdgeMap[customerProviderMapping[cust]]\n new_if = transitCustomerEdgeMap[prov]\n\n\n route_data = {}\n count=0\n\n for key, customer in enumerate(customer_data[\"CustomerEdges\"]):\n for cr in customer:\n if cr==cust:\n for cn in customer[cr][\"cnetwork\"]:\n route_data[count] = {}\n route_data[count][\"source\"] = customer[cr][\"cnetwork\"][cn]\n route_data[count][\"oldInterface\"] = existing_if\n route_data[count][\"newInterface\"] = new_if\n route_data[count][\"netnsName\"] = cr.split(\"ns\")[0]+\"_transit\"\n count+=1\n\n print(route_data)\n\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" PE-CE(update): CE= \"+ cust+\" OLD PE= \"+ customerProviderMapping[cust]+\" NEW PE= \"+prov+ \" COMMAND = \"+str(route_data)+\"\\n\")\n write_yaml_data(route_data, TRANSIT_FILE)\n \n r = ansible_runner.run(private_data_dir=playbook_path, playbook='changeTransitRoute.yaml')\n \n for key, provider in enumerate(provider_data[\"ProviderEdges\"]):\n for i in range(len(provider)):\n\n if provider_data[\"ProviderEdges\"][i]==customerProviderMapping[cust]:\n provider_data[\"ProviderEdges\"][i][customerProviderMapping[cust]][\"customer_edges\"].remove(cust)\n\n if provider_data[\"ProviderEdges\"][i]==prov:\n provider_data[\"ProviderEdges\"][i][prov][\"customer_edges\"].append(cust)\n\n\n\n\n #change in config files\n # provider_data[\"ProviderEdges\"][customerProviderMapping[cust]][\"customer_edges\"].remove(cust)\n # provider_data[\"ProviderEdges\"][prov][\"customer_edges\"].append(cust)\n providerLoad[prov] = (providerLoad[prov] - cLoad)\n providerLoad = dict(sorted(providerLoad.items(), key=operator.itemgetter(1), reverse = True))\n # change customerProviderMapping\n customerProviderMapping[cust] = prov\n break\n\n write(provider_data, \"provider_edges_config.yaml\")\n #save provider file\n with open(PROVIDER_EDGES_CONFIG_FILE,\"r+\") as f:\n f.seek(0)\n f.truncate()\n json.dump(provider_data, f)\n \n #truncate stats loader file as well\n f = open(\"STATS_LOADER_FILE\", \"w\")\n f.close()\n\n logFile.close()\n\n\ndef createCEContainer(sitePrefix, PE):\n #t50ns1\n sitePrefix = sitePrefix.split(\"ns\")\n site = \"ns\"+str(sitePrefix[1])\n varPE = {}\n varPE['CE'] = {}\n varPE['CE']['tenant_id'] = sitePrefix[0][1:]\n varPE['CE']['site_id'] = sitePrefix[1]\n varPE['hypervisorType'] = \"primary\"\n varPE['change_container'] = 'n'\n varPE['container'] = {}\n varPE['container']['image'] = \"edge_sd\"\n varPE['container']['change_link'] = 'n'\n currPE = 0\n peList = []\n \n for provider in providerList:\n if provider==PE:\n currPE = int(providerList[provider])\n else:\n peList.append(int(providerList[provider]))\n \n peList.append(currPE)\n varPE['PE'] = peList\n \n ceList = {key for key in customerEdgeList.items() \n if key.startswith(customer.split(\"CE\")[0])}\n currCE = 0\n \n for ce in ceList:\n \n if currCE < int(ce.split(\"CE\")[1]):\n currCE = int(ce.split(\"CE\")[1])\n\n varPE['continer']['id'] = currCE+1\n\n write_yaml_data(varPE,'/etc/config/container/CEConfVar.yaml')\n\ndef deleteCEContainer(sitePrefix, PE, containerId):\n #t50ns1\n sitePrefix = sitePrefix.split(\"ns\")\n site = \"ns\"+str(sitePrefix[1])\n varPE = {}\n varPE['CE'] = {}\n varPE['CE']['tenant_id'] = sitePrefix[0][1:]\n varPE['CE']['site_id'] = sitePrefix[1]\n varPE['hypervisorType'] = \"primary\"\n varPE['change_container'] = 'y'\n varPE['container'] = {}\n varPE['container']['image'] = \"edge_sd\"\n varPE['container']['id'] = \n varPE['container']['change_link'] = 'y'\n currPE = 0\n peList = []\n \n for provider in providerList:\n if provider==PE:\n currPE = int(providerList[provider])\n else:\n peList.append(int(providerList[provider]))\n \n peList.append(currPE)\n varPE['PE'] = peList\n\n varPE['continer']['id'] = containerId \n\n write_yaml_data(varPE,'/etc/config/container/CEConfVar.yaml')\n\n\n# ---\n# CE:\n# tenant_id: 50\n# tenant_name: t50\n# site: ns2\n# site_id: 2\n# hypervisorType: primary\n# delete_container: 'y'\n# container:\n# - image: hw4\n# id: 1\n# delete_link: 'y'\n# PE: [1, 2]\n\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = json.load(stream)\n return data\n\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\n\ndef createVPC(file):\n data = read_yaml_data(file)\n print(data)\n #vpc_data = read_yaml_data(VPC_FILE)\n\n #valid = validateInput(data, vpc_data)\n\n\ndef main():\n #file = sys.argv[1]\n #createVPC(file)\n fileName = \"/tmp/logs/log_\"+time.strftime(\"%Y%m%d\")+\".txt\"\n logFile = open(fileName, 'a+')\n run_ansible_script(logFile)\n\n\nif __name__ == '__main__':\n main()\na\n"
},
{
"alpha_fraction": 0.569609522819519,
"alphanum_fraction": 0.5725806355476379,
"avg_line_length": 42.62036895751953,
"blob_id": "450a58126a2b21aaea51c29e62318d6b0a47f277",
"content_id": "939650b04229861652f46ddbe8c64902faa752fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4712,
"license_type": "no_license",
"max_line_length": 178,
"num_lines": 108,
"path": "/M2/src/logic/createCEPEnet.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import sys\nimport yaml\nimport logging\nimport subprocess\nimport time\n\nCONFIG_FOLDER_PATH = '/etc/config/'\nANSIBLE_FOLDER_PATH = '/var/scripts/'\nCREATE_NETWORK_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_l2net.yaml\"\nCREATE_CONN = ANSIBLE_FOLDER_PATH+\"create_conn.yaml\"\nDELETE_NETWORK_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_l2net.yaml\"\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = yaml.safe_load(stream)\n return data\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\ndef createCEPECONN(yFile, yFileName, logFile):\n print(\"creating cepe connection\")\n print(yFile)\n if \"PEs\" in yFile:\n print(\" Creating CEPE connection\")\n\n for pe in yFile[\"PEs\"]:\n #variables\n bridge_name = yFile['tenant_name']+yFile['CE_name']+pe+'_br'\n network_name = yFile['tenant_name']+yFile['CE_name']+pe+'_net'\n \n # create l2 bridge\n command = \"sudo ansible-playbook \" + CREATE_NETWORK_SCRIPT + \" -e hypervisor=\"+yFile['hypervisorType']+\" -e bridge_name=\"+bridge_name+\" -e network_name=\"+network_name\n subprocess.call([command], shell = True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating L2 Bridge : \" + command + \"\\n\")\n\n #Create CE connection\n command = \"sudo ansible-playbook \"+ CREATE_CONN + \" -e vm=\"+yFile['CE_name']+\" -e network=\"+network_name+ \" -e hypervisor=\"+yFile['hypervisorType']\n subprocess.call([command], shell = True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating CE connection : \" + command + \"\\n\")\n\n #Create PE connection\n command = \"sudo ansible-playbook \"+ CREATE_CONN + \" -e vm=\"+pe+\" -e network=\"+network_name+ \" -e hypervisor=\"+yFile['hypervisorType']\n subprocess.call([command], shell = True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating CE connection : \" + command + \"\\n\")\n\ndef deleteCEPECONN(yFile, logFile):\n if 'PEs' in yFile:\n for pe in yFile['PEs']:\n #variables\n bridge_name = yFile['tenant_name']+yFile['CE_name']+pe+'_br'\n network_name = yFile['tenant_name']+yFile['CE_name']+pe+'_net'\n \n # delete l2 bridge\n command = \"sudo ansible-playbook \" + DELETE_NETWORK_SCRIPT + \" -e hypervisor=\"+yFile['hypervisorType']+\" -e bridge_name=\"+bridge_name+\" -e network_name=\"+network_name\n subprocess.call([command], shell = True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting L2 Bridge : \" + command + \"\\n\")\n \ndef checkYaml(yFile, logFile):\n if 'CEPE' not in yFile:\n print(\"ERROR!!! Missing 'CEPE' key in YAML file\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" ERROR!!! Missing 'CE' key in YAML file : \"+ str(sys.argv) +\"\\n\")\n exit(0)\n if not 'PEs' in yFile['CEPE']:\n print(\"ERROR!!! Cannot process the given YAML file. MISSING KEYS!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" ERROR!!! Cannot process the given YAML file. MISSING KEYS!!! : \"+ str(sys.argv) +\"\\n\")\n exit(0)\n\ndef main():\n\n fileName = \"/var/log/log_\"+time.strftime(\"%Y%m%d\")+\".txt\"\n logFile = open(fileName, 'a+')\n\n if(len(sys.argv)<2):\n logging.error(\"ERROR: No arguments given\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" No Arguments Error: \"+ str(sys.argv)+\"\\n\")\n exit(0)\n\n else:\n yFileName = sys.argv[2]\n # print(yFileName)\n # check if yaml file is passed\n if yFileName.endswith(\".yml\") or yFileName.endswith(\".yaml\"):\n try:\n # open the yaml file\n yFile = read_yaml_data(yFileName)\n #print(yFile)\n checkYaml(yFile, logFile)\n print(\"test\")\n # check for the 1st argument i.e., create or delete\n if str(sys.argv[1]).lower() == \"delete\":\n print(\"Performing delete operation depending upon the file\")\n deleteCEPECONN(yFile['CEPE'], logFile)\n elif str(sys.argv[1]).lower() == \"create\":\n print(\"Performing create operation depending upon the file\")\n createCEPECONN(yFile['CEPE'], yFileName, logFile)\n except:\n print(\"ERROR!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Error In Executing Command : \"+ str(sys.argv) +\"\\n\")\n else:\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Error In Executing Command : \"+ str(sys.argv) +\"\\n\")\n \n logFile.close()\n\nif __name__ == '__main__':\n main()\n\n"
},
{
"alpha_fraction": 0.599053144454956,
"alphanum_fraction": 0.6005098223686218,
"avg_line_length": 28.212766647338867,
"blob_id": "ed2c8014cf1339866839639cc52e9e28da7ef027",
"content_id": "bf50b8fe9b36add6312788a56c3fa6b725158135",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2746,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 94,
"path": "/M3/etc/config/aliveRunner.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import sys\nimport os \nimport subprocess\n\n#import ansible_runner\nimport operator\nimport time\nimport requests\nimport yaml\nimport json\n\nCONFIG_FOLDER_PATH = '/etc/config/container/'\nANSIBLE_FOLDER_PATH = '/var/scripts/container/'\nHOSTS_FILE = CONFIG_FOLDER_PATH+\"hosts\"\nCONTAINER_STATUS_SCRIPT = ANSIBLE_FOLDER_PATH+\"ansibleVM.yaml\"\nDOCKER_RESTART_SCRIPT = ANSIBLE_FOLDER_PATH+\"ansibleDockerStart.yaml\"\nPROVIDER_EDGES_CONFIG_FILE = CONFIG_FOLDER_PATH+\"PEConfig.txt\"\nCUSTOMER_EDGES_CONFIG_FILE = CONFIG_FOLDER_PATH+\"CEConfig.txt\"\nALIVE_STATUS_FILE = CONFIG_FOLDER_PATH+\"aliveStatus.txt\"\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = json.load(stream)\n return data\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\ndef checkStatus(logFile):\n\n providerEdgeList = {}\n customerEdgeList = {}\n\n provider_data = read_yaml_data(PROVIDER_EDGES_CONFIG_FILE)\n customer_data = read_yaml_data(CUSTOMER_EDGES_CONFIG_FILE)\n\n \n for key, provider in enumerate(provider_data[\"ProviderEdges\"]):\n for pr in provider:\n providerEdgeList[provider[pr][\"ip\"]] = pr\n\n for key, customer in enumerate(customer_data[\"CustomerEdges\"]):\n for cr in customer:\n customerEdgeList[customer[cr][\"ip\"]] = cr\n\n\n with open(ALIVE_STATUS_FILE,\"r+\") as f:\n fileData = json.load(f)\n for p in fileData['status']:\n \n if float(p['lastPing']) < time.time()-60:\n print(\"CHANGE \" + str(p['ip']))\n #check with the name for the CE\n \n\n #subprocess and check if it is down\n\n output = subprocess.check_output(\"ANSIBLE_STDOUT_CALLBACK=oneline ansible-playbook \" + CONTAINER_STATUS_SCRIPT + \" -e 'container=+'p['name']\", shell=True)\n #print(output)\n if \"true\" in str(output):\n #see if new vms have to be spun up\n print(\"O\")\n\n\n else:\n #restart the docker\n output = subprocess.check_output(\"ANSIBLE_STDOUT_CALLBACK=oneline ansible-playbook \" + DOCKER_RESTART_SCRIPT + \" -e 'container='+p['name']\", shell=True)\n print(output)\n #the instance is down\n\n #after making it up\n #write to log file as well\n p['lastPing'] = time.time()\n\n \n f.seek(0)\n f.truncate()\n json.dump(fileData, f)\n\n f.close() \n\n\ndef main():\n #file = sys.argv[1]\n #createVPC(file)\n fileName = \"/tmp/logs/log_\"+time.strftime(\"%Y%m%d\")+\".txt\"\n logFile = open(fileName, 'a+')\n checkStatus(logFile)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6710663437843323,
"alphanum_fraction": 0.6741924285888672,
"avg_line_length": 33.68674850463867,
"blob_id": "3cdfdde17e999191f558dea16b9da1540f3a09a2",
"content_id": "745a2edf4d1bd9bd5cf809ec35961df7dad18ef9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2879,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 83,
"path": "/M2/src/logic/security_script.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import yaml\nimport sys\nimport os \nimport ansible_runner\nimport time\n\nCONFIG_FOLDER_PATH = '/etc/config/'\nANSIBLE_FOLDER_PATH = '/var/scripts/'\nIP_FILE = ANSIBLE_FOLDER_PATH+\"hostVars.yaml\"\nIP_ROUTE_COMMANDS_FILE = ANSIBLE_FOLDER_PATH+\"ipRouteCommandVars.yaml\"\nIP_ROUTE_SCRIPT = \"addIpTableRules.yaml\"\nPROVIDER_EDGES_CONFIG_FILE = CONFIG_FOLDER_PATH+\"provider_edges_config.yaml\"\nCUSTOMER_EDGES_CONFIG_FILE = CONFIG_FOLDER_PATH+\"customer_edges_config.yaml\"\nALLOWED_NETWORK_LIST_FILE = CONFIG_FOLDER_PATH+\"allowed_network_list.yaml\"\n\nfileName = \"/var/log/log_\"+time.strftime(\"%Y%m%d\")+\".txt\"\nlogFile = open(fileName, 'a+')\n\ndef run_ansible_script():\n provider_data = read_yaml_data(PROVIDER_EDGES_CONFIG_FILE)\n customer_data = read_yaml_data(CUSTOMER_EDGES_CONFIG_FILE)\n network_data = read_yaml_data(ALLOWED_NETWORK_LIST_FILE)\n\n command_list = [\"iptables -A INPUT -p icmp -m limit --limit 1/s --limit-burst 1 -j ACCEPT\",\n \"iptables -A INPUT -p icmp -m limit --limit 1/s --limit-burst 1 -j LOG --log-prefix PING-DROP\",\n \"iptables -A INPUT -p icmp -j DROP\"]\n\n for nw in network_data[\"AllowedNetworks\"]:\n allow_nw_cmd = \"iptables -A INPUT -s \"+nw+\" -j ACCEPT\"\n command_list.append(allow_nw_cmd)\n\n deny_all_cmd = \"iptables -A INPUT -s 0.0.0.0/0 -j DROP\"\n command_list.append(deny_all_cmd)\n cmd_data = {\"IPTableCommands\": command_list}\n write_yaml_data(cmd_data, IP_ROUTE_COMMANDS_FILE)\n\n # Writing to log file\n for cmd in command_list:\n for provider_name in provider_data[\"ProviderEdges\"]:\n l = time.strftime(\"%Y%m%d-%H%M%S\") + \"PE IPTABLE UPDATE: \" + provider_name + \"COMMAND: \" + cmd + \"\\n\"\n logFile.write(l)\n for customer_name in customer_data[\"CustomerEdges\"]:\n l = time.strftime(\"%Y%m%d-%H%M%S\") + \"CE IPTABLE UPDATE: \" + customer_name + \"COMMAND: \" + cmd + \"\\n\"\n logFile.write(l)\n\n\n # for provider_name in provider_data[\"ProviderEdges\"]:\n # provider_ip = provider_data[\"ProviderEdges\"][provider_name][\"ip\"]\n # ip_data = {\"host\": provider_ip}\n # write_yaml_data(ip_data, IP_FILE)\n\n # r = ansible_runner.run(private_data_dir=ANSIBLE_FOLDER_PATH, playbook=IP_ROUTE_SCRIPT)\n # out = r.get_fact_cache(provider_ip)\n\n for customer_name in customer_data[\"CustomerEdges\"]:\n customer_ip = customer_data[\"CustomerEdges\"][customer_name][\"ip\"]\n ip_data = {\"host\": customer_ip}\n write_yaml_data(ip_data, IP_FILE)\n\n print(command_list)\n\n r = ansible_runner.run(private_data_dir=ANSIBLE_FOLDER_PATH, playbook=IP_ROUTE_SCRIPT)\n out = r.get_fact_cache(customer_ip)\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = yaml.safe_load(stream)\n return data\n\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\n\ndef main():\n run_ansible_script()\n logFile.close()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.4924924969673157,
"alphanum_fraction": 0.5001876950263977,
"avg_line_length": 52.099666595458984,
"blob_id": "88a9eeb47fb779f19f1ad537fbb8bf8a4ff51694",
"content_id": "8fd361ea06d9d76b9b6006888d77d0a6b1f80414",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15984,
"license_type": "no_license",
"max_line_length": 484,
"num_lines": 301,
"path": "/M3/src/logic/customerEdge.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import sys\nimport json\nimport yaml\nimport logging\nimport subprocess\nimport time\nimport re\n\nCONFIG_FOLDER_PATH = '/etc/config/container/'\nANSIBLE_FOLDER_PATH = '/var/scripts/container/'\nCREATE_NETWORK_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_l2net.yaml\"\nCREATE_BRNS_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_brns_conn.yaml\"\nCREATE_CONTAINER_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_container.yaml\"\nCREATE_DOC_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_doc_conn.yaml\"\nDELETE_NETWORK_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_l2net.yaml\"\nDELETE_BRNS_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_brns_conn.yaml\"\nDELETE_CONTAINER_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_container.yaml\"\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = yaml.safe_load(stream)\n return data\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\ndef createCE(yFile, yFileName, logFile):\n print(\"creating ce\")\n print(yFile)\n if yFile['change_container'] == 'y':\n print(\" Creating CE network\")\n\n # variables\n transit = yFile['tenant_name']+yFile['site']\n controller_net = str(yFile['tenant_id'])+'controller_net'\n\n with open('/etc/config/container/CEConfig.txt',\"r+\") as f:\n fileData = json.load(f)\n print(fileData)\n if len(fileData['CECount'])>0:\n for i in range(len(fileData['CECount'])):\n if not transit in fileData['CECount'][i]:\n cc = {}\n cc[transit]={}\n cc[transit]['min'] = yFile['autoscale_min']\n cc[transit]['max'] = yFile['autoscale_max']\n fileData['CECount'].append(cc)\n else:\n fileData['CECount'][i][transit]['min'] = yFile['autoscale_min']\n fileData['CECount'][i][transit]['max'] = yFile['autoscale_max']\n else:\n cc = {}\n cc[transit]={}\n cc[transit]['min'] = yFile['autoscale_min']\n cc[transit]['max'] = yFile['autoscale_max']\n fileData['CECount'].append(cc)\n\n\n f.seek(0)\n f.truncate()\n json.dump(fileData, f)\n f.close()\n\n\n for cont in yFile[\"container\"]:\n #variables\n bridge_name = transit+'CE_net'\n controller_br = yFile[\"tenant_name\"]+'cont_br'\n \n print(\"Creating Customer Edge container : CE\"+str(cont['id']))\n\n # create customer edge container\n command = \"sudo ANSIBLE_STDOUT_CALLBACK=oneline ansible-playbook \" + CREATE_CONTAINER_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] +\" -e container=\"+transit+\"CE\"+ str(cont['id']) + \" -e image=\"+str(cont['image'])+ \" -e tid=\"+str(yFile[\"tenant_id\"])\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Customer Edge Container : \" + command + \"\\n\")\n\n # create connection to controller br\n print(\"Connecting the container to controller bridge\")\n veth1 = transit+\"CE\"+ str(cont['id']) + 'cbr1'\n veth2 = transit+\"CE\"+ str(cont['id']) + 'cbr2'\n default_ip = str(yFile['tenant_id'])+\".\"+str(yFile['site_id'])+\".\"+str(cont['PE'][0])+\".254\"\n command = \"sudo ANSIBLE_STDOUT_CALLBACK=oneline ansible-playbook \" + CREATE_DOC_CONN_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] + \" -e option2=none -e veth1=\" +veth1+ \" -e veth2=\"+veth2+\" -e bridge_name=\"+ controller_br + \" -e container=\" + transit+\"CE\"+ str(cont['id']) + \" -e option=run_dhclient -e tid=\"+str(yFile[\"tenant_id\"])\n\n check_output = subprocess.check_output([command],shell=True)\n print(check_output)\n r1 = re.search(r\"(([0-2]?[0-9]?[0-9]\\.)([0]\\.)([1]\\.)([0-2]?[0-9]?[0-9]))\",str(check_output))\n print(r1.group(0))\n with open('/etc/config/container/aliveStatus.txt',\"r+\") as f:\n fileData = json.load(f)\n ce = {}\n ce['ip'] = r1.group(0)\n ce['name'] = transit+\"CE\"+ str(cont['id'])\n ce['lastPing'] = time.time()\n fileData['status'].append(ce)\n f.seek(0)\n f.truncate()\n json.dump(fileData, f)\n f.close()\n\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Connecting to Controller network : \" + command + \"\\n\")\n\n with open('/etc/ansible/hosts','a+') as f:\n f.write(r1.group(0) + \" ansible_user=root ansible_ssh_private_key_file=/root/.ssh/id_rsa\")\n f.close()\n\n\n # attach customer edge to Site-Bridge Network\n print(\"Connecting CE to CE network\")\n veth1 = transit+'CE'+str(cont['id'])+'1'\n veth2 = transit+'CE'+str(cont['id'])+'2'\n ns_name = yFile['tenant_name']+yFile['site']\n command = \"sudo ansible-playbook \" + CREATE_DOC_CONN_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType'] + \" -e veth1=\" +veth1+ \" -e veth2=\"+veth2+\" -e bridge_name=\"+bridge_name + \" -e container=\" +transit+\"CE\"+ str(cont['id'])+ \" -e option=assign_ip -e option2=assign_default -e ns_name=\"+ ns_name +\" -e ip=\"+str(yFile['tenant_id'])+\".\"+str(yFile['site_id'])+\".0.\"+str(cont['id'])+\"/24\"+\" -e ip2=\"+str(yFile['tenant_id'])+\".\"+str(yFile['site_id'])+\".0.\"+str(cont['id'])\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Connecting to Site-Bridge network : \" + command + \"\\n\")\n\n # create connection to PE network\n for p in cont['PE']:\n print(\"Connecting CE to PE network\")\n veth1 = transit+'CE'+str(cont['id'])+'PE'+str(p)+'1'\n veth2 = transit+'CE'+str(cont['id'])+'PE'+str(p)+'2'\n bridge = transit+'PE'+str(p)\n command = \"sudo ansible-playbook \"+ CREATE_DOC_CONN_SCRIPT + \" -e hypervisor=\"+ yFile['hypervisorType'] + \" -e option2=default -e veth1=\"+veth1+\" -e veth2=\"+veth2+\" -e bridge_name=\"+bridge+\" -e container=\"+transit+\"CE\"+ str(cont['id'])+\" -e option=assign_ip -e ip=\"+str(yFile['tenant_id'])+\".\"+str(yFile['site_id'])+\".\"+str(p)+\".\"+str(cont['id'])+\"/24 -e default_ip=\"+default_ip\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Connecting CE to PE network : \" + command + \"\\n\")\n\n # saving config in CE\n with open('/etc/config/container/CEConfig.txt',\"r+\") as f:\n fileData = json.load(f)\n flag = 0\n for i in range(len(fileData['CustomerEdges'])):\n if transit+\"CE\"+ str(cont['id']) in fileData['CustomerEdges'][i]:\n flag = 1\n fileData['CustomerEdges'][i][transit+\"CE\"+ str(cont['id'])]['provider_edges'][ns_name+'PE'+str(p)]= str(yFile['tenant_id'])+\".\"+str(yFile['site_id'])+\".\"+str(p)+\".254\"\n if flag ==0:\n ce = {}\n ce[transit+\"CE\"+ str(cont['id'])]={}\n ce[transit+\"CE\"+ str(cont['id'])]['ip']=r1.group(0)\n ce[transit+\"CE\"+ str(cont['id'])]['provider_edges']={}\n ce[transit+\"CE\"+ str(cont['id'])]['provider_edges'][ns_name+'PE'+str(p)]= str(yFile['tenant_id'])+\".\"+str(yFile['site_id'])+\".\"+str(p)+\".254\"\n fileData[\"CustomerEdges\"].append(ce)\n f.seek(0)\n f.truncate()\n print(fileData)\n json.dump(fileData, f)\n f.close()\n\n\n with open('/etc/config/container/PEConfig.txt',\"r+\") as f:\n fileData = json.load(f)\n for i in range(len(fileData['ProviderEdges'])):\n for key in fileData['ProviderEdges'][i]:\n print(key)\n if key == yFile['tenant_name']+'PE'+str(cont['PE'][0]):\n print(\"debug1\")\n if 'CustomerEdges' in fileData['ProviderEdges'][i][key]:\n print(\"debug2\")\n fileData['ProviderEdges'][i][key]['CustomerEdges'].append(transit+\"CE\"+ str(cont['id']))\n else:\n print(\"debug2a\")\n fileData['ProviderEdges'][i][key]['CustomerEdges']=[]\n fileData['ProviderEdges'][i][key]['CustomerEdges'].append(transit+\"CE\"+ str(cont['id']))\n f.seek(0)\n f.truncate()\n json.dump(fileData, f)\n f.close()\n\n\n else:\n for c in yFile['container']:\n if c['change_link'] == 'y':\n for p in cont['PE']:\n print(\"Connecting CE to PE network\")\n veth1 = transit+'CE'+str(cont['id'])+'PE'+str(p)+'1'\n veth2 = transit+'CE'+str(cont['id'])+'PE'+str(p)+'2'\n bridge = transit+'PE'+str(p)\n command = \"sudo ansible-playbook \"+ CREATE_DOC_CONN_SCRIPT + \" -e hypervisor=\"+ yFile['hypervisorType'] + \" -e option=none -e option2=none -e veth1=\"+veth1+\" -e veth2=\"+veth2+\" -e bridge_name=\"+bridge+\" -e container=\"+transit+\"CE\"+ str(cont['id'])+\" -e option=assign_ip -e ip=\"+str(yFile['tenant_id'])+\".\"+str(yFile['site_id'])+\".\"+str(p)+\".\"+str(cont['id'])+\"/24\"\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Connecting CE to PE network : \" + command + \"\\n\")\n \n with open('/etc/config/container/CEConfig_test.txt',\"r+\") as f:\n fileData = json.load(f)\n flag = 0\n for i in range(len(fileData['CustomerEdges'])):\n if transit+\"CE\"+ str(cont['id']) in fileData['CustomerEdges'][i]:\n flag = 1\n fileData['CustomerEdges'][i][transit+\"CE\"+ str(cont['id'])]['provider_edges'][ns_name+'PE'+str(p)]= str(yFile['tenant_id'])+\".\"+str(yFile['site_id'])+\".\"+str(p)+\".254\"\n if flag ==0:\n ce = {}\n ce[transit+\"CE\"+ str(cont['id'])]={}\n ce[transit+\"CE\"+ str(cont['id'])]['ip']=r1.group(0)\n ce[transit+\"CE\"+ str(cont['id'])]['provider_edges']={}\n ce[transit+\"CE\"+ str(cont['id'])]['provider_edges'][ns_name+'PE'+str(p)]= str(yFile['tenant_id'])+\".\"+str(yFile['site_id'])+\".\"+str(p)+\".254\"\n fileData[\"CustomerEdges\"].append(ce)\n \n f.seek(0)\n f.truncate()\n json.dump(fileData, f)\n f.close()\n\n\n ### save to pe_config\n with open('/etc/config/container/PEConfig.txt',\"r+\") as f:\n fileData = json.load(f)\n for i in range(len(fileData['ProviderEdges'])):\n for key in fileData['ProviderEdges'][i]:\n print(key)\n if key == yFile['tenant_name']+'PE'+str(cont['PE'][0]):\n print(\"debug1\")\n if 'containers' in fileData['ProviderEdges'][i][key]:\n print(\"debug2\")\n fileData['ProviderEdges'][i][key]['containers'].append(transit+\"CE\"+ str(cont['id']))\n else:\n print(\"debug2a\")\n fileData['ProviderEdges'][i][key]['containers']=[]\n fileData['ProviderEdges'][i][key]['containers'].append(transit+\"CE\"+ str(cont['id']))\n f.seek(0)\n f.truncate()\n json.dump(fileData, f)\n f.close()\n \n\ndef deleteCE(yFile, logFile):\n \n if yFile['change_container']=='y':\n for cont in yFile['container']:\n # delete CE Container\n transit = yFile['tenant_name']+yFile['site']\n print(\"deleting container\")\n command = \"sudo ansible-playbook \" + DELETE_CONTAINER_SCRIPT + \" -e hypervisor=\" + yFile['hypervisorType']+\" -e container=\"+transit+\"CE\"+ str(cont['id'])\n subprocess.call([command],shell=True)\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Deleting CE : \" + command + \"\\n\")\n print(\"Deleting link between CE and CE network\")\n veth = transit+'CE'+str(cont['id'])+'1'\n command = 'sudo ansible-playbook ' + DELETE_BRNS_CONN_SCRIPT + ' -e hypervisor='+yFile['hypervisorType']+' -e veth1='+veth\n subprocess.call([command],shell=True)\n for p in cont['PE']:\n print(\"Deleting link between CE and PE\")\n veth = transit+'CE'+str(cont['id'])+'PE'+str(p)+'1'\n command = 'sudo ansible-playbook ' + DELETE_BRNS_CONN_SCRIPT + ' -e hypervisor='+yFile['hypervisorType']+\" -e veth1=\"+veth\n subprocess.call([command],shell=True)\n\n else:\n for c in yFile['container']:\n if c['change_link']=='y':\n for p in c['PE']:\n print(\"Deleting link between CE and PE\")\n veth = transit+'CE'+str(c['id'])+'PE'+str(p)+'1'\n command = 'sudo ansible-playbook ' + DELETE_BRNS_CONN_SCRIPT + ' -e hypervisor='+yFile['hypervisorType']+\" -e veth1=\"+veth\n subprocess.call([command],shell=True)\n\n \ndef checkYaml(yFile, logFile):\n if 'CE' not in yFile:\n print(\"ERROR!!! Missing 'CE' key in YAML file\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" ERROR!!! Missing 'CE' key in YAML file : \"+ str(sys.argv) +\"\\n\")\n exit(0)\n if not 'container' in yFile['CE']:\n print(\"ERROR!!! Cannot process the given YAML file. MISSING KEYS!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" ERROR!!! Cannot process the given YAML file. MISSING KEYS!!! : \"+ str(sys.argv) +\"\\n\")\n exit(0)\n\ndef main():\n\n fileName = \"/var/log/log_\"+time.strftime(\"%Y%m%d\")+\".txt\"\n logFile = open(fileName, 'a+')\n\n if(len(sys.argv)<2):\n logging.error(\"ERROR: No arguments given\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" No Arguments Error: \"+ str(sys.argv)+\"\\n\")\n exit(0)\n\n else:\n yFileName = sys.argv[2]\n # print(yFileName)\n # check if yaml file is passed\n if yFileName.endswith(\".yml\") or yFileName.endswith(\".yaml\"):\n try:\n # open the yaml file\n yFile = read_yaml_data(CONFIG_FOLDER_PATH + yFileName)\n #print(yFile)\n checkYaml(yFile, logFile)\n print(\"test\")\n # check for the 1st argument i.e., create or delete\n if str(sys.argv[1]).lower() == \"delete\":\n print(\"Performing delete operation depending upon the file\")\n deleteCE(yFile['CE'], logFile)\n elif str(sys.argv[1]).lower() == \"create\":\n print(\"Performing create operation depending upon the file\")\n createCE(yFile['CE'], yFileName, logFile)\n except:\n print(\"ERROR!!!\")\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Error In Executing Command : \"+ str(sys.argv) +\"\\n\")\n else:\n logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Error In Executing Command : \"+ str(sys.argv) +\"\\n\")\n \n logFile.close()\n\nif __name__ == '__main__':\n main()\n\n"
},
{
"alpha_fraction": 0.5491525530815125,
"alphanum_fraction": 0.5796610116958618,
"avg_line_length": 21.69230842590332,
"blob_id": "b13b2dc31d2e62dc0cff11dec0c28db3e5cd2811",
"content_id": "04e865b2a1be0fc7ac1cb59cde538b1db882c386",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 295,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 13,
"path": "/M3/etc/config/ping.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import requests\nimport math\n\npingUrl = 'http://' + sys.argv[1] + ':5000/alive'\nstatus = {'status': 'alive'}\n\n# @app.route('/stats', methods=['POST','GET'])\ndef sendStats():\n x = requests.post(pingUrl, data = status) \n\nif __name__ == '__main__':\n # app.run(host='0.0.0.0')\n sendStats()\n"
},
{
"alpha_fraction": 0.48670801520347595,
"alphanum_fraction": 0.4968985319137573,
"avg_line_length": 41.18691635131836,
"blob_id": "55293ae16d86ca8a2a1e5fcd998097f6a7112dda",
"content_id": "0bca3a0c0f9edda98fa80343106c597fee11382d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4514,
"license_type": "no_license",
"max_line_length": 199,
"num_lines": 107,
"path": "/M3/src/logic/assign_ip_route.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import sys\nimport yaml\nimport logging\nimport subprocess\nimport time\n\nCONFIG_FOLDER_PATH = '/etc/config/container/'\nANSIBLE_FOLDER_PATH = '/var/scripts/container/'\nSITE_NET_LIST = 'site_net_list.yaml'\nNS_LIST = 'ns_list.yaml'\nASSIGN_IPROUTE = ANSIBLE_FOLDER_PATH + \"assign_ip_route.yaml\"\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = yaml.safe_load(stream)\n return data\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\ndef transit(tid, hypervisor, network, flag):\n if hypervisor == 'primary':\n if flag == 'i':\n nh = str(tid)+\".0.2.1\"\n else:\n nh = \"gre_t\"+str(tid) \n else:\n if flag == 'i':\n nh = str(tid)+\".255.2.1\"\n else:\n nh = \"gre_t\"+str(tid)\n for n in network:\n if nh.startswith(\"gre\"):\n command = \"sudo ansible-playbook \" + ASSIGN_IPROUTE + \" -e hypervisor=\"+hypervisor+\" -e option1=transit -e option2=gre -e transit_name=t\"+str(tid)+\"_transit -e network=\"+n+\" -e nh=\"+nh\n else:\n command = \"sudo ansible-playbook \" + ASSIGN_IPROUTE + \" -e hypervisor=\"+hypervisor+\" -e option1=transit -e option2=normal -e transit_name=t\"+str(tid)+\"_transit -e network=\"+n+\" -e nh=\"+nh\n print(command)\n #subprocess.call([command], shell=True)\n\n\ndef ce(tid, sid, hypervisor, network, flag, ce_name):\n for n in network:\n if flag == \"i\":\n nh = str(tid)+\".\"+str(sid)+\".0.254\"\n command = \"sudo ansible-playbook \" + ASSIGN_IPROUTE + \" -e hypervisor=\"+hypervisor+\" -e option1=normal -e option2=normal -e container=\"+ce_name +\" -e network=\"+n+\" -e nh=\"+nh\n print(command)\n #subprocess.call([command], shell=True)\n else:\n print(n+\"via default\")\n\ndef pe(tid,sid,pid, hypervisor, network, flag, pe_name):\n for n in network:\n if flag == \"i\":\n for i in range(len(pid)):\n if pe_name[5:] == str(pid[i]):\n nh = str(tid)+\".\"+str(sid)+\".\"+str(pid[i])+\".1\"\n command = \"sudo ansible-playbook \" + ASSIGN_IPROUTE + \" -e hypervisor=\"+hypervisor+\" -e option1=normal -e option2=normal -e container=\"+pe_name +\" -e network=\"+n+\" -e nh=\"+nh\n print(command)\n #subprocess.call([command], shell=True)\n else:\n print(n + \" via default\")\n\ndef main():\n ns_list = read_yaml_data(CONFIG_FOLDER_PATH + NS_LIST)\n site_net_list = read_yaml_data(CONFIG_FOLDER_PATH + SITE_NET_LIST)\n \n \n for ns in ns_list['list']:\n # go inside the ns and add routes\n for nets in site_net_list['site']:\n if ns['hypervisor']=='primary':\n if nets['hypervisor']=='primary':\n if 'transit' in ns['name']:\n transit(50, \"primary\", nets['network'], \"i\")\n elif 'CE' in ns['name']:\n ce(50,nets[\"id\"], \"primary\", nets['network'], \"i\", ns['name'])\n elif 'PE' in ns['name']:\n pe(50, nets[\"id\"],nets[\"pid\"],\"primary\", nets['network'], \"i\", ns['name'])\n else:\n if 'transit' in ns['name']:\n transit(50, \"primary\", nets['network'], \"o\")\n elif 'CE' in ns['name']:\n ce(50,nets[\"id\"], \"primary\", nets['network'], \"o\", ns['name'])\n elif 'PE' in ns['name']:\n pe(50,nets[\"id\"],nets[\"pid\"], \"primary\", nets['network'], \"o\", ns['name'])\n\n else:\n if nets['hypervisor']=='primary':\n if 'transit' in ns['name']:\n transit(50, \"secondary\", nets['network'], \"o\")\n elif 'CE' in ns['name']:\n ce(50,nets[\"id\"], \"secondary\", nets['network'], \"o\", ns['name'])\n elif 'PE' in ns['name']:\n pe(50,nets[\"id\"],nets[\"pid\"], \"secondary\", nets['network'], \"o\", ns['name'])\n\n else:\n if 'transit' in ns['name']:\n transit(50, \"secondary\", nets['network'], \"i\")\n elif 'CE' in ns['name']:\n ce(50,nets[\"id\"], \"secondary\", nets['network'], \"i\", ns['name'])\n elif 'PE' in ns['name']:\n pe(50,nets[\"id\"],nets[\"pid\"], \"secondary\", nets['network'], \"i\", ns['name'])\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.586517870426178,
"alphanum_fraction": 0.5939562916755676,
"avg_line_length": 53.31312942504883,
"blob_id": "b3340d3fae65d25a27fb2ea3bdebef842ae48de1",
"content_id": "3083d264fc510bbc24a76c4f56ed80216f3d20fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10755,
"license_type": "no_license",
"max_line_length": 377,
"num_lines": 198,
"path": "/M3/src/logic/site.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "import sys\nimport yaml\nimport logging\nimport subprocess\n\nCONFIG_FOLDER_PATH = '/etc/config/container/'\nANSIBLE_FOLDER_PATH = '/var/scripts/container/'\nCREATE_NS_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_ns.yaml\"\nCREATE_L2_BRIDGE_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_l2net.yaml\"\nCREATE_BRNS_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_brns_conn.yaml\"\nCREATE_CONTAINER_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_container.yaml\"\nCREATE_DOC_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"create_doc_conn.yaml\"\nDELETE_NS_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_ns.yaml\"\nDELETE_L2_BRIDGE_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_l2net.yaml\"\nDELETE_BRNS_CONN_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_brns_conn.yaml\"\nDELETE_CONTAINER_SCRIPT = ANSIBLE_FOLDER_PATH+\"delete_container.yaml\"\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name) as stream:\n data = yaml.safe_load(stream)\n return data\n\ndef write_yaml_data(data, f_name):\n with open(f_name, 'w') as outfile:\n yaml.dump(data, outfile)\n\ndef createFunc(yFile,yFileName):\n print(\"create\")\n hypervisor = yFile['tenant']['hypervisorType']\n ns_name = yFile['tenant']['tenant_name']+yFile['tenant']['site']\n if yFile['tenant']['change_site'].lower()=='y':\n # playbook to create NS\n print(\"Creating site namespace\")\n extra_vars = {\"hypervisor\": hypervisor, \"ns_name\": ns_name, \"hypervisorIP\": yFile['tenant']['site_ip_ext'], \"transitIP\": yFile['tenant']['site_ip_int'], \"option\": 'none'}\n command = 'sudo ansible-playbook ' + CREATE_NS_SCRIPT + ' --extra-vars \"'+str(extra_vars)+'\"'\n subprocess.call([command],shell=True)\n\n # Create Customer Egde router's bridge\n print(\"Creating CE network\")\n bridge = yFile['tenant']['tenant_name']+yFile['tenant']['site']+'CE_net'\n command = \"sudo ansible-playbook \"+ CREATE_L2_BRIDGE_SCRIPT + \" -e hypervisor=\"+ hypervisor+\" -e option=none -e bridge_name=\"+bridge+ \" -e network_name=\" +bridge\n subprocess.call([command],shell=True)\n\n veth1 = yFile['tenant']['tenant_name']+yFile['tenant']['site']+'CE1'\n veth2 = yFile['tenant']['tenant_name']+yFile['tenant']['site']+'CE2'\n #Connect site to CE's bridge\n print(\"Connecting CE network to site namespace\")\n command = \"sudo ansible-playbook \"+ CREATE_BRNS_CONN_SCRIPT+ \" -e hypervisor=\"+hypervisor+ \" -e veth1=\"+veth1+\" -e veth2=\"+veth2+\" -e bridge_name=\"+bridge+\" -e namespace=\"+ns_name+\" -e option=assign_ip -e ip=\"+str(yFile['tenant']['tenant_id'])+\".\"+str(yFile['tenant']['site_id'])+\".0.254/24\"\n subprocess.call([command],shell=True)\n\n for p in yFile['tenant']['PE']:\n # Create bridges for each PE\n print(\"Creating PE network for site\")\n bridge = yFile['tenant']['tenant_name']+yFile['tenant']['site']+'PE'+str(p)\n command = \"sudo ansible-playbook \"+CREATE_L2_BRIDGE_SCRIPT+ \" -e hypervisor=\"+hypervisor+ \" -e option=none -e bridge_name=\"+bridge+ \" -e network_name=\"+bridge\n subprocess.call([command],shell=True)\n \n #Connect bridge to PE\n print(\"Connecting PE network to PE\")\n veth1 = yFile['tenant']['tenant_name']+yFile['tenant']['site']+'PE'+str(p)+'1'\n veth2 = yFile['tenant']['tenant_name']+yFile['tenant']['site']+'PE'+str(p)+'2'\n command = \"sudo ansible-playbook \"+ CREATE_DOC_CONN_SCRIPT+ \" -e hypervisor=\"+hypervisor+ \" -e option=none -e option2=none -e veth1=\"+veth1+\" -e veth2=\"+veth2+\" -e bridge_name=\"+bridge+\" -e container=\"+yFile['tenant']['tenant_name']+\"PE\"+str(p)+\" -e option=assign_ip -e ip=\"+str(yFile['tenant']['tenant_id'])+\".\"+str(yFile['tenant']['site_id'])+\".\"+str(p)+\".254/24\"\n subprocess.call([command],shell=True)\n\n # playbook to create networks\n if yFile['tenant']['change_net'].lower()=='y':\n for net in range(len(yFile['tenant']['networks'])):\n net_name = ns_name+'net'+str(yFile['tenant']['networks'][net])\n print(\"Creating network inside the site\")\n command = 'sudo ansible-playbook ' + CREATE_L2_BRIDGE_SCRIPT + ' -e hypervisor='+hypervisor+' -e option=none -e bridge_name='+net_name+' -e network_name='+net_name\n subprocess.call([command],shell=True)\n # logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating L2 Bridge : \" + command + \"\\n\")\n \n # playbook to attach network bridges to namespace\n veth1 = net_name+'1'\n veth2 = net_name+'2'\n print(\"Connecting network to the site namespace\")\n command = 'sudo ansible-playbook ' + CREATE_BRNS_CONN_SCRIPT + ' -e hypervisor='+hypervisor+\" -e veth1=\"+veth1+\" -e veth2=\"+veth2+\" -e bridge_name=\"+net_name+\" -e namespace=\"+ns_name+ \" -e option=assign_ip -e ip=\"+yFile['tenant']['site_ip'][net]\n subprocess.call([command],shell=True)\n \n # logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Bridge to namespace connection : \" + command + \"\\n\")\n\n # playbook to create container\n if yFile['tenant']['change_container'].lower()=='y':\n for c in yFile['router']:\n container = ns_name+'router'+str(c['container_id'])\n print(\"Creating router container inside namespace\")\n command = \"sudo ansible-playbook \" + CREATE_CONTAINER_SCRIPT + \" -e hypervisor=\" + hypervisor +\" -e container=\"+container + \" -e image=\"+c['image']+ \" -e tid=\"+ str(yFile['tenant']['tenant_id'])\n subprocess.call([command],shell=True)\n # logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Creating Customer Edge Container : \" + command + \"\\n\")\n \n # playbook to attach to other networks\n if len(c['networks'])>0:\n for i in range(len(c['networks'])):\n # attach provider edge to Bridge Network\n print(\"Connecting router container to network\")\n net_name = ns_name+'net'+str(c['networks'][i])\n veth1 = net_name+'r'+str(c['container_id'])+'1'\n veth2 = net_name+'r'+str(c['container_id'])+'2'\n command = \"sudo ansible-playbook \" + CREATE_DOC_CONN_SCRIPT + \" -e hypervisor=\" + hypervisor + \" -e option=none -e option2=none -e veth1=\" +veth1+ \" -e veth2=\"+veth2+\" -e bridge_name=\"+net_name + \" -e container=\" + container+\" -e option=assign_ip -e ip=\"+ c['ip'][i]\n subprocess.call([command],shell=True)\n # logFile.write(time.strftime(\"%Y%m%d-%H%M%S\")+\" Connecting Container to bridge network : \" + command + \"\\n\")\n \ndef deleteFunc(yFile,yFileName):\n print(\"delete\")\n hypervisor = yFile['tenant']['hypervisorType']\n ns_name = yFile['tenant']['tenant_name']+yFile['tenant']['site']\n # Deleting the site\n if yFile['tenant']['change_site'].lower()=='y':\n for c in yFile['router']:\n container = ns_name+'router'+str(c['container_id'])\n delete_container(hypervisor, container)\n for net in range(1, len(yFile['tenant']['networks'])+1):\n net_name = ns_name+'net'+str(net)\n veth = net_name+'1'\n delete_net(hypervisor, net_name, net_name)\n delete_vethpair(hypervisor, veth)\n delete_ns(hypervisor, ns_name)\n bridge = yFile['tenant']['tenant_name']+yFile['tenant']['site']+'CE_net'\n delete_net(hypervisor, bridge, bridge)\n for p in yFile['tenant']['PE']:\n bridge = yFile['tenant']['tenant_name']+yFile['tenant']['site']+'PE'+str(p)\n delete_net(hypervisor, bridge, bridge)\n \n else: \n if yFile['tenant']['change_container'].lower()=='y':\n for c in yFile['router']:\n container = ns_name+'router'+str(c['container_id'])\n delete_container(hypervisor, container)\n\n else:\n for c in yFile['router']:\n if c['change_link'].lower()=='y':\n for net in c['networks']:\n net_name = ns_name+'net'+str(net)\n veth1 = net_name+'r'+str(c['container_id'])+'1'\n delete_vethpair(hypervisor, veth1)\n\n if yFile['tenant']['change_net'].lower()=='y':\n for net in range(1, len(yFile['tenant']['networks'])+1):\n net_name = ns_name+'net'+str(net)\n veth = net_name+'1'\n delete_net(hypervisor, net_name, net_name)\n delete_vethpair(hypervisor, veth)\n for c in yFile['router']:\n if net in c['networks']:\n veth1 = net_name+'r'+str(c['container_id'])+'1'\n delete_vethpair(hypervisor, veth1)\n\ndef delete_ns(hypervisor, ns_name):\n command = 'sudo ansible-playbook ' + DELETE_NS_SCRIPT + ' -e hypervisor='+hypervisor+' -e ns_name='+ns_name\n subprocess.call([command],shell=True)\n\ndef delete_net(hypervisor, bridge_name, network_name):\n command = 'sudo ansible-playbook ' + DELETE_L2_BRIDGE_SCRIPT + ' -e hypervisor='+hypervisor+' -e option=none -e bridge_name='+bridge_name+' -e network_name='+network_name\n subprocess.call([command],shell=True)\n\ndef delete_container(hypervisor, container):\n command = \"sudo ansible-playbook \" + DELETE_CONTAINER_SCRIPT + \" -e hypervisor=\" + hypervisor +\" -e container=\"+container\n subprocess.call([command],shell=True)\n\ndef delete_vethpair(hypervisor, veth):\n command = 'sudo ansible-playbook ' + DELETE_BRNS_CONN_SCRIPT + ' -e hypervisor='+hypervisor+\" -e veth1=\"+veth\n subprocess.call([command],shell=True)\n\ndef checkYAML(yaml):\n if 'tenant' not in yaml:\n print(\"ERROR!!! Not in correct format!!!\")\n exit(0)\n\n\nif(len(sys.argv)<2):\n logging.error(\"\\nERROR: less than 2 arguments given!!! Require 2 arguments to run\")\n exit(0)\nelse:\n yFileName = sys.argv[2]\n # check if yaml file is passed\n if yFileName.endswith(\".yml\") or yFileName.endswith(\".yaml\"):\n try:\n yFile = read_yaml_data(CONFIG_FOLDER_PATH + yFileName)\n checkYAML(yFile)\n # check for the 1st argument i.e., create or delete\n if str(sys.argv[1]).lower()==\"delete\":\n print(\"\\nPerforming delete operation depending upon the file\")\n deleteFunc(yFile,yFileName)\n elif str(sys.argv[1]).lower()==\"create\":\n logging.info(\"\\nPerforming create operation depending upon the file\")\n createFunc(yFile,yFileName)\n else:\n logging.error(\"\\nERROR: Unrecognized Command!!!\")\n exit(0)\n except Exception as ex:\n logging.error(str(ex))\n exit(0)\n else:\n logging.error(\"\\nERROR: No yaml/yml file found!!!\")\n exit(0)\n\n"
},
{
"alpha_fraction": 0.6061588525772095,
"alphanum_fraction": 0.6077795624732971,
"avg_line_length": 25.144067764282227,
"blob_id": "2449bedf63c0aaea2b394dfdd5d107b73293ceaf",
"content_id": "468556c7effdb1a07ac2f18bad9b875240fe6dd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3085,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 118,
"path": "/M3/etc/config/controllerStats.py",
"repo_name": "jagan25/Team10-SDWAN",
"src_encoding": "UTF-8",
"text": "from flask import Flask, url_for, request\nfrom flask import Response, make_response\nimport sys\nimport json\nimport requests\nimport datetime\nfrom datetime import date, datetime\nimport os\nimport math\nimport time\nimport yaml\n\napp = Flask(__name__)\n\ndef read_yaml_data(f_name):\n data = None\n with open(f_name,\"r+\") as stream:\n data = json.load(stream)\n return data\n\n\[email protected]('/', methods=['POST','GET'])\ndef hello():\n return \"Hello, Controller here!\"\n\[email protected]('/stats', methods=['POST','GET'])\ndef receiveStats():\n \n providerEdgeList = {}\n customerEdgeList = {}\n\n provider_data = read_yaml_data(\"PEConfig.txt\")\n customer_data = read_yaml_data(\"CEConfig.txt\")\n\n for provider in provider_data[\"ProviderEdges\"]:\n providerEdgeList[provider_data[\"ProviderEdges\"][provider][\"ip\"]] = provider\n \n\n for customer in customer_data[\"CustomerEdges\"]:\n customerEdgeList[customer_data[\"CustomerEdges\"][customer][\"ip\"]] = customer\n\n\n #open everytime\n f = open(\"statsLoader.txt\", \"a+\")\n\n data = request.json\n data['ip'] = request.remote_addr\n \n name = None\n \n if data['ip'] in customerEdgeList:\n print(\"TRUE, IN CUSTOMER EDGE LIST\")\n name = customerEdgeList[data['ip']]\n \n elif data['ip'] in providerEdgeList:\n name = providerEdgeList[data['ip']]\n\n if name is not None:\n f.write(name + \" \" + str(data['cpus']) + \" \" + str(data['load']) + \" \" + data['flag'])\n print(name + \" \" + str(data['cpus']) + \" \" + str(data['load']) + \" \" + data['flag'])\n\n f.close()\n return \"NULL\"\n\[email protected]('/alive', methods=['GET','POST'])\ndef receiveAliveStats():\n \n providerEdgeList = {}\n customerEdgeList = {}\n\n provider_data = read_yaml_data(\"PEConfig.txt\")\n customer_data = read_yaml_data(\"CEConfig.txt\")\n print(provider_data)\n print(customer_data)\n for key,provider in enumerate(provider_data[\"ProviderEdges\"]):\n for pr in provider:\n providerEdgeList[provider[pr][\"ip\"]] = pr\n for key, customer in enumerate(customer_data[\"CustomerEdges\"]):\n for cr in customer:\n customerEdgeList[customer[cr][\"ip\"]] = cr\n \n #print(request.form)\n # print(request.remote_addr)\n # #open everytime\n # print(request.headers)\n # print(request)\n #data = json.loads(request.form[\"payload\"])\n #print(data)\n\n name = None\n hostIP = request.remote_addr\n\n if hostIP in customerEdgeList:\n print(\"IN CUSTOMER EDGE LIST, RE\")\n name = customerEdgeList[hostIP]\n \n elif hostIP in providerEdgeList:\n name = providerEdgeList[hostIP]\n\n if name is not None:\n \n #fileData = read_yaml_data('aliveStatus.yaml')\n with open('aliveStatus.txt',\"r+\") as f:\n fileData = json.load(f) \n \n for p in fileData['status']:\n if p['name'] == name and p['ip']== hostIP:\n p['lastPing'] = time.time()\n f.seek(0)\n f.truncate()\n json.dump(fileData, f)\n break;\n f.close()\n\n return \"NULL\"\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n"
}
] | 26 |
graffjr/Jeremiah-Graff-MS-ADS-Portfolio | https://github.com/graffjr/Jeremiah-Graff-MS-ADS-Portfolio | 2948ee4a71fdbc1a6ee5ce05ce9f09899b2396ce | e6b5f3525b1bdf178aa9ffd6dbd7e2ccdd48a919 | 7e74c5f92f1de55318cf57361ddcec4ac1d52cde | refs/heads/master | 2022-12-09T12:13:11.609313 | 2020-09-04T05:42:50 | 2020-09-04T05:42:50 | 284,176,241 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7369641065597534,
"alphanum_fraction": 0.7763615250587463,
"avg_line_length": 25.15151596069336,
"blob_id": "29d879a2deeff5255a2c6a0f14e04699688b99b4",
"content_id": "2bb865403d53a65615e4809e4b560011564715fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 863,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 33,
"path": "/Portfolio_README.md",
"repo_name": "graffjr/Jeremiah-Graff-MS-ADS-Portfolio",
"src_encoding": "UTF-8",
"text": "# Jeremiah Graff Portfolio\n\nHello,\nThis is my portfolion for my MS Applied Data Science from Syracuse University. Please feel free to reach out to me with any questions. Thanks!\n\nJeremiah Graff\n\[email protected]\n\n209-485-8026\n\nSUID: 256450200\n\n\n# Table of Contents:\n\nIST 687: Intro to Data Science Project Code (R) - This can be opened into R Studio\n\nIST 719: Information Visualization Project Code (Word document with R code copied onto it) - This can be copied and pasted into R\n\nIST 719: Information Visualization Project PDF - Finished Product - This was built using the R code above & also in Adobe Illustrator\n\nIST 736: Text Mining Project Code (python file) - This was run in Anaconda Spyder\n\nIST 718: Big Data Analytics Lab Code (python file) - This was run in Anaconda Spyder\n\nProfessional Resume\n\nPortfolio Write Up\n\nPortfolio Presentation\n\nPortfolio Presentation Video\n"
},
{
"alpha_fraction": 0.6994739174842834,
"alphanum_fraction": 0.7279365658760071,
"avg_line_length": 59.230770111083984,
"blob_id": "8f43e1f58464299856b6bc82ac98b1312403ca4a",
"content_id": "2a50d2680ec8864fb081542be816199113591992",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 25472,
"license_type": "no_license",
"max_line_length": 328,
"num_lines": 416,
"path": "/IST 718 Lab 1 Code.py",
"repo_name": "graffjr/Jeremiah-Graff-MS-ADS-Portfolio",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 9 20:17:01 2020\r\n\r\n@author: GRAFFJE\r\n\"\"\"\r\n\r\n# import packages for analysis and modeling\r\nimport pandas as pd # data frame operations\r\n\r\nimport numpy as np # arrays and math functions\r\nfrom scipy.stats import uniform # for training-and-test split\r\n#import statsmodels.api as sm # statistical models (including regression)\r\nimport statsmodels.formula.api as smf # R-like model specification\r\nimport matplotlib.pyplot as plt # 2D plotting\r\nimport seaborn as sns # PLOTTING\r\nfrom scipy import stats\r\n\r\n\r\n# read in data and create data frame (first one is the online file, second is csv file that i added additional info to from online sources)\r\n# coaches3 = pd.read_csv(\"https://raw.githubusercontent.com/2SUBDA/IST_718/master/Coaches9.csv\")\r\ncoaches = pd.read_csv(\"H:/Jeremiah Graff/0-Jeremiah Master's Degree/6-July 2020 Classes/IST 718/Labs/Lab1/coaches.csv\")\r\n# the file below changes AAC to Big East to answer one of the questions in assignment\r\n# coaches2 = pd.read_csv(\"H:/Jeremiah Graff/0-Jeremiah Master's Degree/6-July 2020 Classes/IST 718/Labs/Lab1/coachesv2.csv\")\r\n\r\n# i hard coded the info below after loading the text file from online into a csv file.\r\n# stadium info from: https://en.wikipedia.org/wiki/List_of_American_football_stadiums_by_capacity & https://en.wikipedia.org/wiki/List_of_NCAA_Division_I_FBS_football_stadiums\r\n# strength of schedule from: http://powerrankingsguru.com/college-football/strength-of-schedule.php\r\n# record data from: https://www.espn.com/college-football/standings\r\n# academic data from: https://www.icpsr.umich.edu/icpsrweb/NCAA/studies/30022/datadocumentation\r\n\r\n# just to see if it works\r\nprint(coaches)\r\n\r\n# just so i don't include it in my model and be one of those folks :)\r\ncoaches = coaches.drop('SchoolPay',1)\r\n\r\n# all cells below are not needed\r\ncoaches = coaches.drop('AssistantPay',1)\r\ncoaches = coaches.drop('Bonus',1)\r\ncoaches = coaches.drop('BonusPaid',1)\r\ncoaches = coaches.drop('Buyout',1)\r\n\r\nprint(coaches.info())\r\n# https://stackoverflow.com/questions/38516481/trying-to-remove-commas-and-dollars-signs-with-pandas-in-python\r\ncoaches['TotalPay'] = coaches['TotalPay'].replace({'\\$': '', ',': '', ' ':'','--':''}, regex=True)\r\nprint(coaches)\r\n\r\n# changing the columns below to a numeric object\r\ncoaches[\"TotalPay\"] = pd.to_numeric(coaches['TotalPay'], errors = 'coerce')\r\n\r\nprint(coaches.info())\r\n\r\n\r\n\r\n# https://stackoverflow.com/questions/29314033/drop-rows-containing-empty-cells-from-a-pandas-dataframe\r\n# dropping rows that have an empty value for TotalPay, includes: Baylor, BYU, Rice, SMU\r\ncoaches['TotalPay'].replace('', np.nan, inplace=True)\r\ncoaches.dropna(subset=['TotalPay'], inplace=True)\r\n\r\n# checking to make sure that the 4 rows dropped as hoped...should end up with 125 entries\r\nprint(coaches.info())\r\n\r\ncoaches['Conference2'] = coaches['Conference'].replace({'AAC': '2AAC', 'ACC': '3ACC', 'Big 12':'4Big 12','Big Ten':'5Big Ten'\r\n ,'C-USA':'6C-USA','Ind.':'10Ind.','MAC':'7MAC','Mt. West':'8Mt. West'\r\n ,'Pac-12':'9Pac-12','SEC':'1SEC','Sun Belt':'11Sun Belt'}, regex=True)\r\n\r\nsns.boxplot(y=\"TotalPay\", data=coaches)\r\nsns.boxplot(x= \"Conference\", y = \"TotalPay\", data = coaches).set_title('Conference TotalPay Boxplot')\r\n# going horizontal with the boxplot to make conferences more readable: https://python-graph-gallery.com/31-horizontal-boxplot-with-seaborn/\r\nsns.boxplot(y= \"Conference\", x = \"TotalPay\", data = coaches).set_title('Conference TotalPay Boxplot')\r\n\r\n# https://seaborn.pydata.org/generated/seaborn.violinplot.html\r\nsns.violinplot(x= \"Conference\", y = \"TotalPay\", data = coaches).set_title('Conference TotalPay Violinplot')\r\n# going horizontal with the boxplot to make conferences more readable\r\nsns.violinplot(y= \"Conference\", x = \"TotalPay\", data = coaches).set_title('Conference TotalPay Violinplot')\r\n\r\n# https://stackoverflow.com/questions/38309729/count-unique-values-with-pandas-per-groups\r\nConfCount = coaches.groupby('Conference')['Coach'].nunique()\r\n#print(ConfCount)\r\n# below should equal 125 to match the number of coaches\r\n#print(ConfCount.sum())\r\nConfSum = coaches.groupby('Conference')['TotalPay'].sum()\r\n#print(ConfSum)\r\n# below should equal 302132595 to match the number of coaches\r\n#print(ConfSum.sum())\r\nConfAvgPay = ConfSum/ConfCount\r\n# https://stackoverflow.com/questions/3387655/safest-way-to-convert-float-to-integer-in-python\r\nConfAvgPay = ConfAvgPay.astype(int)\r\n#print(ConfAvgPay)\r\n\r\n# https://stackoverflow.com/questions/28503445/assigning-column-names-to-a-pandas-series\r\nConfAvgPayDF = pd.DataFrame({\"Conference\":ConfAvgPay.index, \"AvgPay\":ConfAvgPay.values})\r\n#print(ConfAvgPayDF)\r\n\r\n# https://stackoverflow.com/questions/24988873/python-sort-descending-dataframe-with-pandas\r\nSortConfAvgPayDF = ConfAvgPayDF.sort_values('AvgPay', ascending=False)\r\n#print(SortConfAvgPayDF)\r\nsns.barplot(x=\"AvgPay\", y=\"Conference\", color = 'Blue', data=SortConfAvgPayDF).set_title('Avg Conference TotalPay')\r\n\r\n# conference attendance capabilities\r\nConfAttSum = coaches.groupby('Conference')['StadiumSize'].sum()\r\n#print(ConfAttSum)\r\n# https://stackoverflow.com/questions/3387655/safest-way-to-convert-float-to-integer-in-python\r\nConfAvgAtt = ConfAttSum/ConfCount.astype(int)\r\n#print(ConfAvgAtt)\r\n\r\nConfAvgAttDF = pd.DataFrame({\"Conference\":ConfAvgAtt.index, \"StadiumSize\":ConfAvgAtt.values})\r\n#print(ConfAvgAttDF)\r\n\r\nSortConfAvgAttDF = ConfAvgAttDF.sort_values('StadiumSize', ascending=False)\r\n#print(SortConfAvgAttDF)\r\nsns.barplot(x=\"StadiumSize\", y=\"Conference\", color = 'Blue', data=SortConfAvgAttDF).set_title('Avg Conference Stadium Size')\r\n\r\n\r\n# https://stackoverflow.com/questions/43859416/finding-top-10-in-a-dataframe-in-pandas\r\n# https://seaborn.pydata.org/tutorial/categorical.html?highlight=color%20bar\r\nTop10Salaries = coaches.sort_values(\"TotalPay\", ascending = False).head(10)\r\n#print(Top10Salaries)\r\n\r\n# run both lines together\r\nsns.barplot(x=\"TotalPay\", y=\"Coach\", hue = 'Conference', data=Top10Salaries).set_title('Top 10 Coach Salaries')\r\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n\r\nsns.catplot(x=\"TotalPay\", y=\"Coach\", hue = 'Conference', data=Top10Salaries)\r\n\r\nConfTop10Count = Top10Salaries.groupby('Conference')['TotalPay'].nunique()\r\n#print(ConfTop10Count)\r\nConfTop10CountDF = pd.DataFrame({\"Conference\":ConfTop10Count.index, \"Count\":ConfTop10Count.values})\r\nSortConfTop10Count = ConfTop10CountDF.sort_values('Count', ascending=False)\r\n\r\nsns.barplot(x=\"Count\", y=\"Conference\", color = 'Blue', data=SortConfTop10Count).set_title('# of Coaches in top 10 highest paid')\r\n\r\n# https://seaborn.pydata.org/generated/seaborn.scatterplot.html & https://stackoverflow.com/questions/30490740/move-legend-outside-figure-in-seaborn-tsplot \r\n# run both lines together\r\nsns.scatterplot(x=\"WinPct\", y=\"TotalPay\", hue=\"Conference\", data=coaches).set_title('TotalPay Compared to WinPct')\r\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n\r\n\r\n# correlation: https://stackoverflow.com/questions/29432629/plot-correlation-matrix-using-pandas\r\n# run all 4 lines together\r\nf, ax = plt.subplots(figsize=(10, 8))\r\ncorr = coaches.corr()\r\nsns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool), cmap=sns.diverging_palette(220, 10, as_cmap=True),\r\n square=True, ax=ax)\r\n\r\n\r\n\r\n# https://seaborn.pydata.org/generated/seaborn.regplot.html\r\nsns.regplot(x=\"WinPct\", y=\"TotalPay\", data=coaches).set_title('TotalPay & WinPct Regression Line Plot')\r\n\r\n# https://stackoverflow.com/questions/25579227/seaborn-implot-with-equation-and-r2-text\r\n# https://stackoverflow.com/questions/60358228/how-to-set-title-on-seaborn-jointplot\r\ndef r2(x,y):\r\n return stats.pearsonr(x,y)[0] **2\r\n\r\n# run all 3 lines below together\r\npWinPct = sns.jointplot(x=\"WinPct\", y=\"TotalPay\", data=coaches, kind=\"reg\",stat_func = r2)\r\npWinPct.fig.suptitle(\"TotalPay Relationship with WinPct\")\r\npWinPct.fig.subplots_adjust(top=0.95)\r\n# run all 3 lines below together\r\npGuruRank = sns.jointplot(x=\"guruRank\", y=\"TotalPay\", data=coaches, kind=\"reg\",stat_func = r2)\r\npGuruRank.fig.suptitle(\"TotalPay Relationship with guruRank\")\r\npGuruRank.fig.subplots_adjust(top=0.95)\r\n# run all 3 lines below together\r\npStadiumSize = sns.jointplot(x=\"StadiumSize\", y=\"TotalPay\", data=coaches, kind=\"reg\",stat_func = r2)\r\npStadiumSize.fig.suptitle(\"TotalPay Relationship with StadiumSize\")\r\npStadiumSize.fig.subplots_adjust(top=0.95)\r\n# run all 3 lines below together\r\npOppoRank = sns.jointplot(x=\"OppoRank\", y=\"TotalPay\", data=coaches, kind=\"reg\",stat_func = r2)\r\npOppoRank.fig.suptitle(\"TotalPay Relationship with OppoRank\")\r\npOppoRank.fig.subplots_adjust(top=0.95)\r\n# run all 3 lines below together\r\npStudAth = sns.jointplot(x=\"StudAth\", y=\"TotalPay\", data=coaches, kind=\"reg\",stat_func = r2)\r\npStudAth.fig.suptitle(\"TotalPay Relationship with StudAth\")\r\npStudAth.fig.subplots_adjust(top=0.95)\r\n# run all 3 lines below together\r\npGradSuccRate = sns.jointplot(x=\"GradSuccRate\", y=\"TotalPay\", data=coaches, kind=\"reg\",stat_func = r2)\r\npGradSuccRate.fig.suptitle(\"TotalPay Relationship with GradSuccRate\")\r\npGradSuccRate.fig.subplots_adjust(top=0.95)\r\n\r\n# StadiumSize r^2 = .64\r\n# OppoRank r^2 = .58\r\n# guruRank r^2 = .51\r\n# GradSuccRate r^2 = .13\r\n# WinPct r^2 = .12\r\n# StudAth r^2 = .027\r\n\r\n# totalpay also pops with stadium size in a particularly strong way, strongest r^2 value\r\n# guruRank has a strong negative correlation, this is intuitive because the lower you are, the better your team is\r\n# lastly, TotalPay has a strong negative correlation when measured against OppoRank, which makes sense because the better teams will also be rated lower (meaning more difficult)...the better the team you are, the tougher your schedule will be\r\n\r\n\r\n#################################################\r\n############ starting to build model ############\r\n#################################################\r\n# employ training-and-test regimen for model validation\r\n\r\nnp.random.seed(1234)\r\ncoaches['runiform'] = uniform.rvs(loc = 0, scale = 1, size = len(coaches))\r\ncoaches_train = coaches[coaches['runiform'] >= 0.33]\r\ncoaches_test = coaches[coaches['runiform'] < 0.33]\r\n# check training data frame\r\nprint('\\ncoaches_train data frame (rows, columns): ',coaches_train.shape)\r\nprint(coaches_train.head())\r\n# check test data frame\r\nprint('\\ncoaches_test data frame (rows, columns): ',coaches_test.shape)\r\nprint(coaches_test.head())\r\n\r\n# specify a simple model with bobblehead entered last\r\nmy_model_1 = str('TotalPay ~ StadiumSize + guruRank + OppoRank + WinPct + StudAth + GradSuccRate + Conference2')\r\n\r\n# fit the model to the training set\r\ntrain_model_fit_1 = smf.ols(my_model_1, data = coaches_train).fit()\r\n# summary of model fit to the training set\r\nprint(train_model_fit_1.summary())\r\n# training set predictions from the model fit to the training set\r\ncoaches_train['predict_totalPay'] = train_model_fit_1.fittedvalues\r\n\r\n# test set predictions from the model fit to the training set\r\ncoaches_test['predict_totalPay'] = train_model_fit_1.predict(coaches_test)\r\nprint(coaches_test)\r\nprint(coaches_train)\r\n\r\n# https://stackoverflow.com/questions/30787901/how-to-get-a-value-from-a-pandas-dataframe-and-not-the-index-and-object-type\r\nSyracuseSuggestedSalary = coaches_test[coaches_test.School == \"Syracuse\"][\"predict_totalPay\"].item()\r\n\r\n# https://stackoverflow.com/questions/44176475/printing-float-number-as-integer-in-python/44176556\r\n# https://stackoverflow.com/questions/60610101/string-formatting-dollar-sign-in-python\r\n# https://stackoverflow.com/questions/5180365/python-add-comma-into-number-string\r\nprint(\"The Suggested Salary for the next Syracuse Football coach is: ${:,}\".format(int(SyracuseSuggestedSalary)))\r\n\r\n# compute the proportion of response variance\r\n# accounted for when predicting out-of-sample\r\nprint('\\nProportion of Test Set Variance Accounted for: ', round(np.power(coaches_test['TotalPay'].corr(coaches_test['predict_totalPay']),2),3))\r\n\r\n# use the full data set to obtain an estimate of the increase in Salary for each of the variables\r\nmy_model_fit_1 = smf.ols(my_model_1, data = coaches).fit()\r\nprint(my_model_fit_1.summary())\r\nprint(my_model_fit_1.params)\r\nprint('\\nIntercept of TotalPay is: ${:,}'.format(int(my_model_fit_1.params[0]))) # Intercept\r\nprint('\\nEstimated Effect on Coach Salary from Sun Belt on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[1])))\r\nprint('\\nEstimated Effect on Coach Salary from SEC on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[2]))) \r\nprint('\\nEstimated Effect on Coach Salary from AAC on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[3]))) # Big East\r\nprint('\\nEstimated Effect on Coach Salary from ACC on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[4]))) # ACC, Syracuse\r\nprint('\\nEstimated Effect on Coach Salary from Big 12 on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[5]))) \r\nprint('\\nEstimated Effect on Coach Salary from Big Ten on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[6]))) \r\nprint('\\nEstimated Effect on Coach Salary from C-USA on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[7]))) \r\nprint('\\nEstimated Effect on Coach Salary from MAC on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[8]))) \r\nprint('\\nEstimated Effect on Coach Salary from Mt. West on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[9]))) \r\nprint('\\nEstimated Effect on Coach Salary from Pac-12 on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[10]))) \r\nprint('\\nEstimated Effect on Coach Salary from Stadium Size on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[11]))) \r\nprint('\\nEstimated Effect on Coach Salary from guruRank on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[12]))) \r\nprint('\\nEstimated Effect on Coach Salary from OppoRank on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[13]))) \r\nprint('\\nEstimated Effect on Coach Salary from WinPct on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[14]))) \r\nprint('\\nEstimated Effect on Coach Salary from Student Athlete on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[15]))) \r\nprint('\\nEstimated Effect on Coach Salary from Grad Success Rate on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[16]))) \r\n\r\n#################################################\r\n############ starting to build 2nd model ########\r\n#################################################\r\n# employ training-and-test regimen for model validation\r\nnp.random.seed(1234)\r\ncoaches_train2 = coaches[coaches['runiform'] >= 0.33]\r\ncoaches_test2 = coaches[coaches['runiform'] < 0.33]\r\n# check training data frame\r\nprint('\\ncoaches_train2 data frame (rows, columns): ',coaches_train2.shape)\r\nprint(coaches_train2.head())\r\n# check test data frame\r\nprint('\\ncoaches_test2 data frame (rows, columns): ',coaches_test2.shape)\r\nprint(coaches_test2.head())\r\n\r\n# specify a simple model with bobblehead entered last\r\nmy_model_2 = str('TotalPay ~ StadiumSize + guruRank + OppoRank + WinPct + Conference2')\r\n\r\n# fit the model to the training set\r\ntrain_model_fit_2 = smf.ols(my_model_2, data = coaches_train2).fit()\r\n# summary of model fit to the training set\r\nprint(train_model_fit_2.summary())\r\n# training set predictions from the model fit to the training set\r\ncoaches_train2['predict_totalPay'] = train_model_fit_2.fittedvalues\r\n\r\n# test set predictions from the model fit to the training set\r\ncoaches_test2['predict_totalPay'] = train_model_fit_2.predict(coaches_test2)\r\n#print(coaches_test2)\r\n#print(coaches_train2)\r\n\r\n# https://stackoverflow.com/questions/30787901/how-to-get-a-value-from-a-pandas-dataframe-and-not-the-index-and-object-type\r\nSyracuseSuggestedSalary2 = coaches_test2[coaches_test2.School == \"Syracuse\"][\"predict_totalPay\"].item()\r\n\r\n# https://stackoverflow.com/questions/44176475/printing-float-number-as-integer-in-python/44176556\r\n# https://stackoverflow.com/questions/60610101/string-formatting-dollar-sign-in-python\r\n# https://stackoverflow.com/questions/5180365/python-add-comma-into-number-string\r\nprint(\"The Suggested Salary for the next Syracuse Football coach is: ${:,}\".format(int(SyracuseSuggestedSalary2)))\r\n\r\n# compute the proportion of response variance\r\n# accounted for when predicting out-of-sample\r\nprint('\\nProportion of Test Set Variance Accounted for: ', round(np.power(coaches_test2['TotalPay'].corr(coaches_test2['predict_totalPay']),2),3))\r\n\r\n# use the full data set to obtain an estimate of the increase in Salary for each of the variables\r\nmy_model_fit_2 = smf.ols(my_model_2, data = coaches).fit()\r\nprint(my_model_fit_2.summary())\r\nprint(my_model_fit_2.params)\r\nprint('\\nIntercept of TotalPay is: ${:,}'.format(int(my_model_fit_2.params[0]))) # Intercept\r\nprint('\\nEstimated Effect on Coach Salary from Sun Belt on TotalPay is: ${:,}'.format(int(my_model_fit_2.params[1])))\r\nprint('\\nEstimated Effect on Coach Salary from SEC on TotalPay is: ${:,}'.format(int(my_model_fit_2.params[2]))) \r\nprint('\\nEstimated Effect on Coach Salary from AAC on TotalPay is: ${:,}'.format(int(my_model_fit_2.params[3]))) # Big East\r\nprint('\\nEstimated Effect on Coach Salary from ACC on TotalPay is: ${:,}'.format(int(my_model_fit_2.params[4]))) # ACC, Syracuse\r\nprint('\\nEstimated Effect on Coach Salary from Big 12 on TotalPay is: ${:,}'.format(int(my_model_fit_2.params[5]))) \r\nprint('\\nEstimated Effect on Coach Salary from Big Ten on TotalPay is: ${:,}'.format(int(my_model_fit_2.params[6]))) \r\nprint('\\nEstimated Effect on Coach Salary from C-USA on TotalPay is: ${:,}'.format(int(my_model_fit_2.params[7]))) \r\nprint('\\nEstimated Effect on Coach Salary from MAC on TotalPay is: ${:,}'.format(int(my_model_fit_2.params[8]))) \r\nprint('\\nEstimated Effect on Coach Salary from Mt. West on TotalPay is: ${:,}'.format(int(my_model_fit_2.params[9]))) \r\nprint('\\nEstimated Effect on Coach Salary from Pac-12 on TotalPay is: ${:,}'.format(int(my_model_fit_2.params[10]))) \r\nprint('\\nEstimated Effect on Coach Salary from Stadium Size on TotalPay is: ${:,}'.format(int(my_model_fit_2.params[11]))) \r\nprint('\\nEstimated Effect on Coach Salary from guruRank on TotalPay is: ${:,}'.format(int(my_model_fit_2.params[12]))) \r\nprint('\\nEstimated Effect on Coach Salary from OppoRank on TotalPay is: ${:,}'.format(int(my_model_fit_2.params[13]))) \r\nprint('\\nEstimated Effect on Coach Salary from WinPct on TotalPay is: ${:,}'.format(int(my_model_fit_2.params[14]))) \r\n\r\n\r\n# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.reset_index.html\r\nmodelparams = pd.DataFrame(data = my_model_fit_1.params).reset_index(0)\r\nmodelparams.columns = [\"Variable\",\"Model_1_Value\"]\r\nmodel2params = pd.DataFrame(my_model_fit_2.params).reset_index(0)\r\nmodel2params.columns = [\"Variable\",\"Model_2_Value\"]\r\n\r\n# https://stackoverflow.com/questions/58774232/is-there-a-vlookup-function-in-python-that-allows-you-to-return-a-value-from-a-c\r\nmodelparams_merged = modelparams.merge(model2params, left_on = ['Variable'], right_on = ['Variable'], how = \"left\")\r\n\r\n# https://datascience.stackexchange.com/questions/45314/dataframe-has-no-column-names-how-to-add-a-header\r\n#print(modelparams)\r\n#print(model2params)\r\n#print(modelparams_merged)\r\nmodelparams_merged.dropna(inplace=True)\r\n#print(modelparams_merged)\r\n\r\n# https://stackoverflow.com/questions/45393123/adding-calculated-column-in-pandas\r\n# https://www.tutorialspoint.com/How-to-calculate-absolute-value-in-Python\r\nmodelparams_merged[\"Model_Abs_Difference\"] = abs(modelparams_merged.Model_1_Value - modelparams_merged.Model_2_Value)\r\n#print(modelparams_merged)\r\n# https://stackoverflow.com/questions/21291259/convert-floats-to-ints-in-pandas\r\nmodelparams_merged = modelparams_merged.astype(int, errors = 'ignore')\r\nprint(modelparams_merged)\r\n\r\nsns.barplot(y= \"Variable\", x = \"Model_Abs_Difference\", data = modelparams_merged).set_title('Variable Difference Boxplot')\r\n\r\n\r\n\r\n# answers to questions\r\n# What is the recommended salary for the Syracuse football coach?\r\nprint(\"Model 1 says the Suggested Salary for the next Syracuse Football coach is: ${:,}\".format(int(SyracuseSuggestedSalary)))\r\nprint(\"Model 2 says the Suggested Salary for the next Syracuse Football coach is: ${:,}\".format(int(SyracuseSuggestedSalary2)))\r\nprint(\"The range for Models 1 & 2 is: ${:,}\".format(int(SyracuseSuggestedSalary - SyracuseSuggestedSalary2)))\r\n\r\n# What would his salary be if we were still in the Big East? \r\n# Model 1\r\nprint('\\nBig East (AAC) minus ACC coefficients = ${:,}'.format(int(my_model_fit_1.params[3] - my_model_fit_1.params[4]))) # this number shows difference of Big East - ACC intercept...we add this number to the suggested salary for model 1 to see what the Syracuse coach should make in the Big East\r\nprint(\"\\nThe Suggested Salary for the next Syracuse Football coach is: ${:,}\".format(int(SyracuseSuggestedSalary)))\r\n# the sum of the 2 above numbers\r\nprint(\"\\nModel 1 says the Suggested Salary for the next Syracuse Football coach in the Big East is: ${:,}\".format(int(SyracuseSuggestedSalary + (my_model_fit_1.params[3] - my_model_fit_1.params[4]))))\r\n\r\n\r\n# Model 2\r\nprint('\\nBig East (AAC) minus ACC coefficients = ${:,}'.format(int(my_model_fit_2.params[3] - my_model_fit_2.params[4]))) # this number shows difference of Big East - ACC intercept...we add this number to the suggested salary for model 2 to see what the Syracuse coach should make in the Big East\r\nprint(\"\\nThe Suggested Salary for the next Syracuse Football coach is: ${:,}\".format(int(SyracuseSuggestedSalary2)))\r\n# the sum of the 2 above numbers\r\nprint(\"\\nModel 2 says the Suggested Salary for the next Syracuse Football coach in the Big East is: ${:,}\".format(int(SyracuseSuggestedSalary2 + (my_model_fit_2.params[3] - my_model_fit_2.params[4]))))\r\n\r\n\r\n# What if we went to the Big Ten?\r\n# Model 1: param 6 references big 10, param 3 references ACC\r\nprint('\\nBig 10 minus ACC coefficients = ${:,}'.format(int(my_model_fit_1.params[6] - my_model_fit_1.params[4]))) # this number shows difference of Big 10 - ACC intercept...we add this number to the suggested salary for model 1 to see what the Syracuse coach should make in the Big 10\r\nprint(\"\\nThe Suggested Salary for the next Syracuse Football coach is: ${:,}\".format(int(SyracuseSuggestedSalary)))\r\n# the sum of the 2 above numbers\r\nprint(\"\\nModel 1 says the Suggested Salary for the next Syracuse Football coach in the Big 10 is: ${:,}\".format(int(SyracuseSuggestedSalary + (my_model_fit_1.params[6] - my_model_fit_1.params[4]))))\r\n\r\n\r\n# Model 2: param 6 references big 10, param 3 references ACC\r\nprint('\\nBig 10 minus ACC coefficients = ${:,}'.format(int(my_model_fit_2.params[6] - my_model_fit_2.params[4]))) # this number shows difference of Big 10 - ACC intercept...we add this number to the suggested salary for model 2 to see what the Syracuse coach should make in the Big 10\r\nprint(\"\\nThe Suggested Salary for the next Syracuse Football coach is: ${:,}\".format(int(SyracuseSuggestedSalary2)))\r\n# the sum of the 2 above numbers\r\nprint(\"\\nModel 2 says the Suggested Salary for the next Syracuse Football coach in the Big 10 is: ${:,}\".format(int(SyracuseSuggestedSalary2+(my_model_fit_2.params[6] - my_model_fit_2.params[4]))))\r\n\r\n\r\n# What schools did we drop from our data, and why?\r\n# The schools that were dropped from the data were Baylor, BYU, Rice & SMU because they had no data for TotalPay in the original data file. Lines 56 & 57 above show the code that was used to drop these schools (any schools that had NA values).\r\n\r\n\r\n# What effect does graduation rate have on the projected salary?\r\n# Graduation Rate has an r^2 value of 0.13 when regressed against TotalPay (as seen in lines 170-172), which means that 13% of TotalPay is accounted for by using Grad Success Rate as a predictive variable.\r\n# This is not a high r^2 value (highest is 1) as it does not account for very much of the TotalPay variable.\r\n# Additionally, the p-value of Grad Success Rate was higher than 0.05 in the 2 regression models that it was included for, which means it is not a significant variable in the TotalPay variable prediction.\r\n# Lastly, the output of the line of code below suggests that each point of Grad Success Rate is worth $9,133.\r\nprint('\\nEstimated Effect on Coach Salary from Grad Success Rate on TotalPay is: ${:,}'.format(int(my_model_fit_1.params[16]))) # Grad Success Rate\r\n\r\n\r\n# How good is our model?\r\n# Model 1\r\nprint(my_model_fit_1.summary())\r\n# Adjusted R Squared of 0.784\r\n\r\n# Model 2\r\nprint(my_model_fit_2.summary())\r\n# Adjusted R Squared of 0.787\r\n\r\n\r\n# The two models have a similar adjust r^2 of ~0.785. This suggests that 78% of variance is accounted for within the models. These are moderately good results.\r\n\r\n# What is the single biggest impact on salary size?\r\n# Outside of the Intercept, Models 1 & 2 both suggest that being a coach in the SEC makes the largest impact on salary size. The coefficient for the SEC had the largest value (~$76,000 higher than being a coach in the Big 10 for model 1, ~$10,000 higher than being a coach in the Big 10 for model 2) of the predictive variables.\r\nprint(modelparams_merged)\r\nmodelparamsimpact = modelparams_merged.drop([0])\r\n# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.max.html\r\n# The values from line 390 need to be compared to the list in line 391 to find which variable they correspond with. The max values for Models 1 & 2 both show that the SEC is the largest variable.\r\nprint(modelparamsimpact.max())\r\nprint(modelparamsimpact)\r\n"
},
{
"alpha_fraction": 0.6760807633399963,
"alphanum_fraction": 0.6920093297958374,
"avg_line_length": 41.39215850830078,
"blob_id": "3a1235d7b4bac4722e57554a52819045125e564c",
"content_id": "e92e85cbdaa76693953eed1ed1c52c4a0c161578",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 26556,
"license_type": "no_license",
"max_line_length": 171,
"num_lines": 612,
"path": "/IST 687 Final Project Code.R",
"repo_name": "graffjr/Jeremiah-Graff-MS-ADS-Portfolio",
"src_encoding": "UTF-8",
"text": "# Course: IST 687\r\n# Name: Jeremiah Graff\r\n# Final Project\r\n# Due Date: 9/16/19\r\n# Date Submitted: 9/15/19\r\n\r\n#-----------------------------------------\r\nlibrary(readxl)\r\nlibrary(tidyverse)\r\nlibrary(dplyr)\r\nlibrary(sqldf)\r\nlibrary(ggplot2)\r\nlibrary(data.table)\r\nlibrary(openintro)\r\nlibrary(imputeTS)\r\nlibrary(\"arules\")\r\nlibrary(arulesViz)\r\nlibrary(kernlab)\r\nlibrary(maps)\r\nlibrary(ggmap)\r\n\r\n# the RStudio Import Wizard helped generate the code below to import excel file\r\nfpjDF <- read_excel(\"H:/Jeremiah Graff/0-Jeremiah Master's Degree/2-July 2019 Classes/IST 687 - intro to data science/Final Project/finalprojectIST687.xlsx\")\r\n\r\n# finding the # of NAs in the data\r\nsum(length(which(is.na(fpjDF))))\r\n\r\n# finding the columns with NAs\r\ncolnames(fpjDF)[colSums(is.na(fpjDF)) > 0 ]\r\n\r\n# https://stackoverflow.com/questions/8161836/how-do-i-replace-na-values-with-zeros-in-an-r-dataframe\r\n# filling NAs with 0\r\nfpjDF[is.na(fpjDF)] <- 0\r\n\r\n# checking for anymore NAs\r\nsum(length(which(is.na(fpjDF))))\r\n\r\nstr(fpjDF)\r\nsummary(fpjDF)\r\n\r\n#----------------------------\r\n# everything below will help me determine where i want to focus my efforts on this project\r\n#----------------------------\r\n\r\n# creating a data frame to show the avg sat score by airline\r\navgSatByAirline<-data.frame(tapply(fpjDF$Satisfaction,fpjDF$`Airline Name`,mean))\r\n\r\n# renaming the column for AvgSatScore by Airline\r\n# help from https://www.datanovia.com/en/lessons/rename-data-frame-columns-in-r/\r\nnames(avgSatByAirline)[1]<-\"AvgSatScore\"\r\n\r\n# trying to see total flight counts to find % of flights by airline\r\navgSatByAirline$flightCount <- length(fpjDF$`Airline Name`)\r\n\r\n# finding the count of flights scheduled by airline\r\navgSatByAirline$flightsScheduled <- tapply(fpjDF$Satisfaction,fpjDF$`Airline Name`,length)\r\n\r\n# finding the % of flights schedule by airline and adding \r\navgSatByAirline$percentOfFlightsScheduled <- avgSatByAirline$flightsScheduled/avgSatByAirline$flightCount\r\n\r\n# finding the # of successful flights flights\r\navgSatByAirline$SuccessfulFlights <- sqldf('select count(\"Flight Cancelled\") as SuccessulFlights from fpjDF where \"Flight Cancelled\" = \"No\" group by \"Airline Name\"')\r\n\r\n# help from https://stackoverflow.com/questions/12384071/how-to-coerce-a-list-object-to-type-double\r\n# to convert int to numeric\r\navgSatByAirline$SuccessfulFlights <- as.numeric(unlist(avgSatByAirline$SuccessfulFlights))\r\n\r\n# calculating % of flights successfully flown\r\navgSatByAirline$SuccessfulFlightsPercentage <- avgSatByAirline$SuccessfulFlights/avgSatByAirline$flightsScheduled\r\navgSatByAirline\r\n\r\n# showing avg price sensitivity of airline instances\r\n# below shows a range of price sensitivity with range of 1.256 to 1.292 for all 14 airlines...no outliers in terms of avg satisfaction scores\r\n# don't need the code below but it was helpful to see little variation amongst the airlines\r\n# sqldf('select \"Airline Name\",avg(\"Price Sensitivity\") as AvgPriceSensitivity from fpjDF group by \"Airline Name\"')\r\n\r\n# sorting data frame by % of flights scheduled\r\n# help from https://www.guru99.com/r-sort-data-frame.html\r\navgSatByAirlineSorted <- avgSatByAirline[order(-avgSatByAirline$percentOfFlightsScheduled),]\r\navgSatByAirlineSorted\r\n\r\n# sorting by successful flight %\r\navgSatByAirlineSorted2 <- avgSatByAirline[order(-avgSatByAirline$SuccessfulFlightsPercentage),]\r\navgSatByAirlineSorted2\r\n\r\nsetDT(avgSatByAirline, keep.rownames = \"Airline\")[]\r\nstr(avgSatByAirline)\r\n\r\n# plot 1\r\nggplot(avgSatByAirline, aes(x=SuccessfulFlightsPercentage, y=percentOfFlightsScheduled, colour=SuccessfulFlightsPercentage\r\n )) + geom_point(aes(size=percentOfFlightsScheduled)) + geom_text(aes(label=Airline),hjust=0,vjust=0\r\n ) + ggtitle(\"% Successful Flights by % of Flights\")\r\n\r\n# plot 2\r\nggplot(avgSatByAirline, aes(x=SuccessfulFlightsPercentage, y=AvgSatScore, colour=SuccessfulFlightsPercentage\r\n)) + geom_point(aes(size=percentOfFlightsScheduled)) + geom_text(aes(label=Airline),hjust=0,vjust=0\r\n) + ggtitle(\"% Successful Flights & % of Flights by Avg Sat Score\")\r\n\r\n# after looking through the data shown in code above, i plan to focus on FlyFast Airways due to the fact that they are \r\n# considerably lower than the next lowest airline in terms of successful flights (95.7% compared to 96.4% - most are in 98% range)\r\n# with the combination of being the 3rd most frequently flown airline...to me, they are a good target to bring about sow ROI on\r\n# in a consultative relationship\r\n\r\n#-----------------------------------------\r\n# below will start seeing my narrowed focus on FlyFastAirWays\r\n#-----------------------------------------\r\n\r\n# creating a new data frame that only contains FlyFast Airway Inc. Data\r\nFlyFastData <- subset(fpjDF, fpjDF$'Airline Name' == \"FlyFast Airways Inc.\")\r\nstr(FlyFastData)\r\n\r\nFFOriginCityCounts<-data.frame(tapply(FlyFastData$Satisfaction,FlyFastData$`Orgin City` ,length))\r\nFFOriginCityCounts$FlightArriving <- tapply(FlyFastData$Satisfaction,FlyFastData$`Destination City` ,length)\r\n\r\n# help from https://stackoverflow.com/questions/29511215/convert-row-names-into-first-column\r\nsetDT(FFOriginCityCounts, keep.rownames = \"City_State\")[]\r\ncolnames(FFOriginCityCounts) <- c(\"City_State\", \"Flights Leaving\", \"Flights Arriving\")\r\nFFOriginCityCounts$cityStateLower <- tolower(FFOriginCityCounts$City_State)\r\nFFOriginCityCounts$State <- FFOriginCityCounts$City_State\r\n\r\n# help from https://stackoverflow.com/questions/7963898/extracting-the-last-n-characters-from-a-string-in-r\r\nsubstrRight <- function(x, n){\r\n substr(x, nchar(x)-n+1, nchar(x))\r\n}\r\n\r\nFFOriginCityCounts$State <- substrRight(FFOriginCityCounts$State,2)\r\nFFOriginCityCounts$StateFull <- abbr2state(FFOriginCityCounts$State)\r\nFFOriginCityCounts$StateFullLower <- tolower(FFOriginCityCounts$StateFull)\r\n\r\nFFOriginCityCounts\r\ndim(FFOriginCityCounts)\r\nFFOriginCityCounts$`Flights Leaving` <- as.numeric(FFOriginCityCounts$`Flights Leaving`)\r\nFFOriginCityCounts$`Flights Arriving` <- as.numeric(FFOriginCityCounts$`Flights Arriving`)\r\nstr(FFOriginCityCounts)\r\n\r\n\r\nStateFlightCounts <- data.frame(tapply(FFOriginCityCounts$`Flights Arriving`,FFOriginCityCounts$StateFull,sum))\r\nStateFlightCounts$FlightsLeaving <- tapply(FFOriginCityCounts$`Flights Leaving`,FFOriginCityCounts$StateFull,sum)\r\ncolnames(StateFlightCounts) <- c(\"FlightsArriving\",\"FlightsLeaving\")\r\nsetDT(StateFlightCounts, keep.rownames = \"State\")[]\r\nStateFlightCounts$stateLower <- tolower(StateFlightCounts$State)\r\nStateFlightCounts\r\nstr(StateFlightCounts)\r\n\r\nLeavingFlightSorted <- StateFlightCounts[order(-StateFlightCounts$FlightsLeaving),]\r\nLeavingFlightSorted\r\n\r\nArrivingFlightSorted <- StateFlightCounts[order(-StateFlightCounts$FlightsArriving),]\r\nArrivingFlightSorted\r\n\r\nus <- map_data(\"state\")\r\n\r\n# plot 3\r\n# map showing the flight leaving focus\r\nmap.NumOfFlightsLeaving <- ggplot(StateFlightCounts,aes(map_id=stateLower))\r\nmap.NumOfFlightsLeaving <- map.NumOfFlightsLeaving + geom_map(map =us,aes(fill=StateFlightCounts$FlightsLeaving))\r\nmap.NumOfFlightsLeaving <- map.NumOfFlightsLeaving + expand_limits(x=us$long,y=us$lat)\r\nmap.NumOfFlightsLeaving <- map.NumOfFlightsLeaving + coord_map() + ggtitle(\"Leading Flight Departure States\")\r\nmap.NumOfFlightsLeaving \r\n\r\n# plot 4\r\n# map showing the flight arriving focus\r\nmap.NumOfFlightsArriving <- ggplot(StateFlightCounts,aes(map_id=stateLower))\r\nmap.NumOfFlightsArriving <- map.NumOfFlightsArriving + geom_map(map =us,aes(fill=StateFlightCounts$FlightsArriving))\r\nmap.NumOfFlightsArriving <- map.NumOfFlightsArriving + expand_limits(x=us$long,y=us$lat)\r\nmap.NumOfFlightsArriving <- map.NumOfFlightsArriving + coord_map() + ggtitle(\"Leading Flight Arrival States\")\r\nmap.NumOfFlightsArriving \r\n\r\n# ----------------------------------------------------------\r\n# bringing in NewLatLon Script elements\r\nlibrary(jsonlite)\r\nlibrary(tidyverse)\r\n\r\nnominatim_osm <- function(address = NULL)\r\n{\r\n if(suppressWarnings(is.null(address)))\r\n return(data.frame())\r\n tryCatch(\r\n d <- jsonlite::fromJSON( \r\n gsub('\\\\@addr\\\\@', gsub('\\\\s+', '\\\\%20', address), \r\n 'http://nominatim.openstreetmap.org/search/@addr@?format=json&addressdetails=0&limit=1')\r\n ), error = function(c) return(data.frame())\r\n )\r\n if(length(d) == 0) return(data.frame())\r\n return(data.frame(lon = as.numeric(d$lon), lat = as.numeric(d$lat)))\r\n}\r\n\r\n\r\n\r\nNewLatLon<-function(addresses){\r\n d <- suppressWarnings(lapply(addresses, function(address) {\r\n #set the elapsed time counter to 0\r\n t <- Sys.time()\r\n #calling the nominatim OSM API\r\n api_output <- nominatim_osm(address)\r\n #get the elapsed time\r\n t <- difftime(Sys.time(), t, 'secs')\r\n #return data.frame with the input address, output of the nominatim_osm function and elapsed time\r\n return(data.frame(address = address, api_output, elapsed_time = t))\r\n }) %>%\r\n #stack the list output into data.frame\r\n bind_rows() %>% data.frame())\r\n #output the data.frame content into console\r\n return(d)\r\n}\r\n\r\n# https://stackoverflow.com/questions/6347356/creating-a-comma-separated-vector\r\n\r\naddys <- c(\"abilene, tx\", \"akron, oh\", \"albany, ga\", \"albany, ny\", \"albuquerque, nm\", \r\n \"alexandria, la\", \"allentown, pa\", \"amarillo, tx\", \r\n \"appleton, wi\", \"asheville, nc\", \"atlanta, ga\", \"augusta, ga\", \r\n \"austin, tx\", \"baltimore, md\", \"bangor, me\", \"baton rouge, la\", \r\n \"beaumont, tx\", \"billings, mt\", \"birmingham, al\", \r\n \"bismarck, nd\", \"bloomington, il\", \"boston, ma\", \r\n \"bristol, tn\", \"brownsville, tx\", \"brunswick, ga\", \r\n \"buffalo, ny\", \"burlington, vt\", \"cedar rapids, ia\", \r\n \"charleston, sc\", \"charleston, wv\", \"charlotte, nc\", \"charlottesville, va\", \r\n \"chattanooga, tn\", \"chicago, il\", \"cincinnati, oh\", \"cleveland, oh\", \r\n \"college station, tx\", \"colorado springs, co\", \"columbia, sc\", \r\n \"columbus, ga\", \"columbus, ms\", \"columbus, oh\", \"corpus christi, tx\", \r\n \"dallas, tx\", \"dayton, oh\", \"denver, co\", \r\n \"des moines, ia\", \"detroit, mi\", \"dickinson, nd\", \"dothan, al\", \r\n \"durango, co\", \"el paso, tx\", \"elmira, ny\", \"evansville, in\", \r\n \"fargo, nd\", \"fayetteville, ar\", \"fayetteville, nc\", \"flint, mi\", \r\n \"fort myers, fl\", \"fort smith, ar\", \"fort wayne, in\", \"gainesville, fl\", \r\n \"grand junction, co\", \"grand rapids, mi\", \"green bay, wi\", \"greensboro, nc\", \r\n \"greer, sc\", \"gulfport, ms\", \"gunnison, co\", \"harlingen, tx\", \r\n \"harrisburg, pa\", \"hartford, ct\", \"hobbs, nm\", \"houston, tx\", \r\n \"huntsville, al\", \"indianapolis, in\", \"jackson, ms\", \r\n \"jacksonville, fl\", \"jacksonville, nc\", \"kansas city, mo\", \r\n \"key west, fl\", \"killeen, tx\", \"knoxville, tn\", \"lafayette, la\", \r\n \"lake charles, la\", \"lansing, mi\", \"laredo, tx\", \"lexington, ky\", \r\n \"lincoln, ne\", \"little rock, ar\", \"louisville, ky\", \"lubbock, tx\", \r\n \"madison, wi\", \"manchester, nh\", \"memphis, tn\", \"miami, fl\", \r\n \"midland, tx\", \"milwaukee, wi\", \"minneapolis, mn\", \"minot, nd\", \r\n \"mission, tx\", \"mobile, al\", \"moline, il\", \"monroe, la\", \r\n \"montgomery, al\", \"montrose, co\", \"mosinee, wi\", \"myrtle beach, sc\", \r\n \"nashville, tn\", \"new bern, nc\", \"new orleans, la\", \r\n \"new york, ny\", \"newark, nj\", \"newport news, va\", \r\n \"norfolk, va\", \"oklahoma city, ok\", \"omaha, ne\", \"orlando, fl\", \r\n \"panama city, fl\", \"pensacola, fl\", \"peoria, il\", \"philadelphia, pa\", \r\n \"pittsburgh, pa\", \"portland, me\", \"providence, ri\", \"raleigh, nc\", \r\n \"rapid city, sd\", \"richmond, va\", \"roanoke, va\", \"rochester, ny\", \r\n \"salt lake city, ut\", \"san angelo, tx\", \"san antonio, tx\", \"santa fe, nm\", \r\n \"savannah, ga\", \"scranton, pa\", \"shreveport, la\", \r\n \"sioux falls, sd\", \"south bend, in\", \"springfield, mo\", \"st. louis, mo\", \r\n \"state college, pa\", \"syracuse, ny\", \"tallahassee, fl\", \"tampa, fl\", \r\n \"topeka, ks\", \"traverse city, mi\", \"tucson, az\", \"tulsa, ok\", \r\n \"tyler, tx\", \"valdosta, ga\", \"valparaiso, fl\", \"washington, dc\", \r\n \"west palm beach, fl\", \"white plains, ny\", \"wichita falls, tx\", \r\n \"wichita, ks\", \"williston, nd\", \"wilmington, nc\")\r\n\r\n\r\n\r\nmap(\"state\")\r\n\r\n# based on this map, why don't they fly to the west/CA?\r\n# plot 5\r\nfor (i in 1:length(addys)) {\r\n g.codes <- NewLatLon(addys[i]) \r\n #print(addys[i])\r\n print(g.codes)\r\n points(g.codes$lon, g.codes$lat, col = \"red\", cex = 1.5, pch = 16)\r\n}\r\n\r\nNewLatLon(addys)\r\nlatlon<-NewLatLon(addresses)\r\n\r\n# --------------------------------------------------------------------\r\n# exploring the data\r\n# plot 6\r\nSatByGender <- data.frame(tapply(FlyFastData$Satisfaction,FlyFastData$Gender,mean))\r\ncolnames(SatByGender) <- \"AvgSat\"\r\nsetDT(SatByGender, keep.rownames = \"Gender\")[]\r\n\r\n\r\nggplot(SatByGender, aes(x=Gender,y=AvgSat)) + geom_col(color=\"white\",fill=\"black\"\r\n ) + scale_y_continuous(limits = c(0,4)) + ggtitle(\"AvgSatByGender\")\r\n\r\n# plot 7\r\nSatByAge<-data.frame(tapply(FlyFastData$Satisfaction,FlyFastData$Age ,mean))\r\ncolnames(SatByAge) <- \"AvgSat\"\r\nsetDT(SatByAge, keep.rownames = \"Age\")[]\r\n\r\n\r\nggplot(SatByAge, aes(x=Age,y=AvgSat,group=1)) + geom_line(\r\n )+ theme(axis.text.x = element_text(size = 10)) + ggtitle(\"AvgSatByAge\")\r\n\r\n# price sensitivity plot\r\nSatByPriceSens <- data.frame(tapply(FlyFastData$Satisfaction,FlyFastData$`Price Sensitivity` ,mean))\r\ncolnames(SatByPriceSens) <- \"AvgSat\"\r\nsetDT(SatByPriceSens, keep.rownames = \"PriceSensitivity\")[]\r\n\r\nggplot(SatByPriceSens, aes(x=PriceSensitivity,y=AvgSat)) + geom_col(color=\"white\",fill=\"black\"\r\n) + scale_y_continuous(limits = c(0,5)) + ggtitle(\"AvgSatByPriceSensitivity\")\r\n\r\n\r\n# plot \r\nSatByStatus <- data.frame(tapply(FlyFastData$Satisfaction,FlyFastData$`Airline Status` ,mean))\r\ncolnames(SatByStatus) <- \"AvgSat\"\r\nsetDT(SatByStatus, keep.rownames = \"Status\")[]\r\n\r\nggplot(SatByStatus, aes(x=Status,y=AvgSat)) + geom_col(color=\"white\",fill=\"black\"\r\n) + scale_y_continuous(limits = c(0,4)) + ggtitle(\"AvgSatByStatus\")\r\n\r\n# plot \r\nSatByTravelType <- data.frame(tapply(FlyFastData$Satisfaction,FlyFastData$`Type of Travel` ,mean))\r\ncolnames(SatByTravelType) <- \"AvgSat\"\r\nsetDT(SatByTravelType, keep.rownames = \"TravelType\")[]\r\n\r\nggplot(SatByTravelType, aes(x=TravelType,y=AvgSat)) + geom_col(color=\"white\",fill=\"black\"\r\n) + scale_y_continuous(limits = c(0,4)) + ggtitle(\"AvgSatByTravelType\")\r\n\r\n# the mean is not as drastic of impact on satisfaction as i would've figured\r\ntapply(FlyFastData$Satisfaction,FlyFastData$`Flight cancelled` ,mean)\r\n\r\n# the mean is not as drastic of impact on satisfaction as i would've figured\r\ntapply(FlyFastData$Satisfaction,FlyFastData$`Arrival Delay greater 5 Mins` ,mean)\r\n\r\n# plot \r\nSatByDestState <- data.frame(tapply(FlyFastData$Satisfaction,FlyFastData$`Destination State` ,mean))\r\ncolnames(SatByDestState) <- \"AvgSat\"\r\nsetDT(SatByDestState, keep.rownames = \"DestState\")[]\r\n\r\nggplot(SatByDestState, aes(x=DestState,y=AvgSat)) + geom_col(color=\"white\",fill=\"black\"\r\n) + scale_y_continuous(limits = c(0,4)) + ggtitle(\"AvgSatByDestState\") +theme(\r\n axis.text.x = element_text(color = \"grey20\", size = 12, angle = 90, hjust = .5, vjust = .5, face = \"plain\"\r\n))\r\n\r\n# plot \r\nSatByOriginState <- data.frame(tapply(FlyFastData$Satisfaction,FlyFastData$`Origin State` ,mean))\r\ncolnames(SatByOriginState) <- \"AvgSat\"\r\nsetDT(SatByOriginState, keep.rownames = \"OriginState\")[]\r\n\r\nggplot(SatByOriginState, aes(x=OriginState,y=AvgSat)) + geom_col(color=\"white\",fill=\"black\"\r\n) + scale_y_continuous(limits = c(0,4)) + ggtitle(\"AvgSatByOriginState\") +theme(\r\n axis.text.x = element_text(color = \"grey20\", size = 12, angle = 90, hjust = .5, vjust = .5, face = \"plain\"\r\n ))\r\n\r\n# plot: ok plot, nothing special\r\nSatByDay <- data.frame(tapply(FlyFastData$Satisfaction,FlyFastData$`Day of Month` ,mean))\r\ncolnames(SatByDay) <- \"AvgSat\"\r\nsetDT(SatByDay, keep.rownames = \"Day\")[]\r\nSatByDay$Day <- as.numeric(SatByDay$Day)\r\nstr(SatByDay)\r\n\r\nggplot(SatByDay, aes(x=Day,y=AvgSat)) + geom_col(color=\"white\",fill=\"black\"\r\n) + scale_y_continuous(limits = c(0,4)) + ggtitle(\"AvgSatByDay\") +theme(\r\n axis.text.x = element_text(color = \"grey20\", size = 12, angle = 90, hjust = .5, vjust = .5, face = \"plain\"\r\n ))\r\n\r\nggplot(SatByDay, aes(x=Day,y=AvgSat,group=1)) + geom_line(\r\n)+ theme(axis.text.x = element_text(size = 10)) + ggtitle(\"AvgSatByDay\")\r\n\r\n# plot: strong trend as x axis increases\r\nSatByNFPA <- data.frame(tapply(FlyFastData$Satisfaction,FlyFastData$`No of Flights p.a.` ,mean))\r\ncolnames(SatByNFPA) <- \"AvgSat\"\r\nsetDT(SatByNFPA, keep.rownames = \"NFPA\")[]\r\nSatByNFPA$NFPA <- as.numeric(SatByNFPA$NFPA)\r\nstr(SatByNFPA)\r\n\r\nggplot(SatByNFPA, aes(x=NFPA,y=AvgSat,group=1)) + geom_line()+ theme(axis.text.x = element_text(size = 15),axis.text.y = element_text(size = 15)) + ggtitle(\"AvgSatByNFPA\")\r\n\r\n# good plot\r\nSatByTravelDist <- data.frame(tapply(FlyFastData$Satisfaction,FlyFastData$`Flight Distance` ,mean))\r\ncolnames(SatByTravelDist) <- \"AvgSat\"\r\nsetDT(SatByTravelDist, keep.rownames = \"TravelDist\")[]\r\nSatByTravelDist$TravelDist <- as.numeric(SatByTravelDist$TravelDist)\r\nstr(SatByNFPA)\r\n\r\nggplot(SatByTravelDist, aes(x=TravelDist,y=AvgSat,group=1)) + geom_line(\r\n)+ theme(axis.text.x = element_text(size = 25)) + ggtitle(\"AvgSatByTravelDist\")\r\n\r\n# plot: create line chart\r\nSatByShopAmnt <- data.frame(tapply(FlyFastData$Satisfaction,FlyFastData$`Shopping Amount at Airport` ,mean))\r\ncolnames(SatByShopAmnt) <- \"AvgSat\"\r\nsetDT(SatByShopAmnt, keep.rownames = \"ShopAmnt\")[]\r\n\r\nSatByShopAmnt$ShopAmnt <- as.numeric(SatByShopAmnt$ShopAmnt)\r\n\r\nggplot(SatByShopAmnt, aes(x=ShopAmnt,y=AvgSat,group=1)) + geom_line(\r\n)+ theme(axis.text.x = element_text(size = 15),axis.text.y=element_text(size = 15)) + ggtitle(\"AvgSatByShopAmnt\")\r\n\r\n# plot: create line chart\r\nSatByEatDrink <- data.frame(tapply(FlyFastData$Satisfaction,FlyFastData$`Eating and Drinking at Airport` ,mean))\r\ncolnames(SatByEatDrink) <- \"AvgSat\"\r\nsetDT(SatByEatDrink, keep.rownames = \"EatDrink\")[]\r\n\r\nSatByEatDrink$EatDrink <- as.numeric(SatByEatDrink$EatDrink)\r\n\r\nggplot(SatByEatDrink, aes(x=EatDrink,y=AvgSat,group=1)) + geom_line(\r\n)+ theme(axis.text.x = element_text(size = 15),axis.text.y=element_text(size = 15)) + ggtitle(\"AvgSatByEatDrink\")\r\n\r\n# everything below is aimed at creating numeric values to run through linear model\r\n# to find statistical signifcance of the data in reference to customer satisfaction\r\nFlyFastData2 <- FlyFastData\r\n\r\nstr(FlyFastData2)\r\n\r\n# 1 = Female, 0 = male\r\nFlyFastData2$Gender2 <- ifelse(FlyFastData2$Gender == \"Female\",1,0) \r\n\r\n# learned about factoring and then applied as.numeric to see if below would work and it did!\r\n# blue = 1\r\n# gold = 2\r\n# platinum = 3\r\n# silver = 4\r\nFlyFastData2$AirlineStatus2 <- as.numeric(factor(FlyFastData2$`Airline Status`))\r\n\r\n# business travel =1\r\n# mileage tickets =2\r\n# personal travel =3\r\nFlyFastData2$TypeOfTravel2 <- as.numeric(factor(FlyFastData2$`Type of Travel`))\r\n\r\n# business =1\r\n# eco =2\r\n# eco plus =3\r\nFlyFastData2$Class2 <- as.numeric(factor(FlyFastData2$Class))\r\n\r\n#1 Alabama \r\n#2 Arizona \r\n#3 Arkansas \r\n#4 Colorado \r\n#5 Connecticut \r\n #6 District of Columbia\r\n#6 Florida \r\n#7 Georgia \r\n#8 Illinois \r\n#9 Indiana \r\n#10 Iowa \r\n#11 Kansas \r\n#12 Kentucky \r\n#13 Louisiana \r\n#14 Maine \r\n#15 Maryland \r\n#16 Massachusetts \r\n#17 Michigan \r\n#18 Minnesota \r\n#19 Mississippi \r\n#20 Missouri \r\n#21 Montana \r\n#22 Nebraska \r\n#23 New Hampshire \r\n#24 New Jersey \r\n#25 New Mexico \r\n#26 New York \r\n#27 North Carolina \r\n#28 North Dakota \r\n#29 Ohio \r\n#30 Oklahoma \r\n#31 Pennsylvania \r\n#32 Rhode Island \r\n#33 South Carolina \r\n#34 South Dakota \r\n#35 Tennessee \r\n#36 Texas \r\n#37 Utah \r\n#38 Vermont \r\n#39 Virginia \r\n#40 West Virginia\r\n#41 Wisconsin \r\n\r\n#as.factor(FlyFastData2$`Destination State`)\r\nFlyFastData2$DestinationState2 <- as.numeric(factor(FlyFastData2$`Destination State`))\r\n\r\n#as.factor(FlyFastData2$`Origin State`)\r\nFlyFastData2$OriginState2 <- as.numeric(factor(FlyFastData2$`Origin State`))\r\n\r\n# yes = 1, no = 0\r\n#as.factor(FlyFastData2$`Flight cancelled`)\r\nFlyFastData2$FlightCancelled2 <- ifelse(FlyFastData2$`Flight cancelled` == \"Yes\",1,0)\r\n\r\n# yes = 1, no = 0\r\n#as.factor(FlyFastData2$`Arrival Delay greater 5 Mins`)\r\nFlyFastData2$ArrDelayGrt5Min2 <- ifelse(FlyFastData2$`Arrival Delay greater 5 Mins` == \"Yes\",1,0)\r\n\r\nFlyFastData2 <- FlyFastData2[,-2]\r\nFlyFastData2 <- FlyFastData2[,-3]\r\nFlyFastData2 <- FlyFastData2[,-7]\r\nFlyFastData2 <- FlyFastData2[,-10]\r\nFlyFastData2 <- FlyFastData2[,-12:-15]\r\nFlyFastData2 <- FlyFastData2[,-12:-13]\r\nFlyFastData2 <- FlyFastData2[,-15]\r\nFlyFastData2 <- FlyFastData2[,-17]\r\n# removing date due to lack of significance\r\nFlyFastData2 <- FlyFastData2[,-11]\r\n\r\nstr(FlyFastData2)\r\n\r\n# linear model time!!!\r\n# starting to predict values\r\n# create the training & test data sets\r\ndim(FlyFastData2)\r\nFlyFastData2[1:5,]\r\nrandIndex <- sample(1:dim(FlyFastData2)[1])\r\nsummary(randIndex)\r\nlength(randIndex)\r\nhead(randIndex)\r\ncutPoint2_3 <- floor(2 * dim(FlyFastData2)[1]/3)\r\ncutPoint2_3\r\ntrainData <- FlyFastData2[randIndex[1:cutPoint2_3],]\r\ndim(trainData)\r\nhead(trainData)\r\ntestData <- FlyFastData2[randIndex[(cutPoint2_3+1):dim(FlyFastData2)[1]],]\r\ndim(testData)\r\nhead(testData)\r\nstr(testData)\r\n\r\n# lm equation output\r\n# https://www.dataquest.io/blog/statistical-learning-for-predictive-modeling-r/\r\n# parsimonious code to find statistically significant inputs for predicting Satisfaction\r\n# this code will be used throughout linear modeling\r\nparsModel=lm(formula = Satisfaction ~ .,data=FlyFastData2)\r\nstep(parsModel, data=FlyFastData2, direction=\"backward\")\r\n\r\n\r\n# ksvm model\r\n# ksvm values code was from parsimonious model above\r\nksvmOutput <- ksvm(Satisfaction ~ Age + `Price Sensitivity` + `Year of First Flight` + \r\n `No of Flights p.a.` + `Shopping Amount at Airport` + `Departure Delay in Minutes` + \r\n `Arrival Delay in Minutes` + `Flight time in minutes` + `Flight Distance` + \r\n Gender2 + AirlineStatus2 + TypeOfTravel2 + Class2 + FlightCancelled2, data = trainData)\r\n\r\nksvmOutput\r\n\r\n# creating ksvm predictions from model above\r\nksvmPred <- predict(ksvmOutput, testData, type=\"votes\")\r\n\r\n# comparison dataframe that shows actual vs predicted\r\nKSVMcompTable <- data.frame(testData[,1],ksvmPred[,1])\r\ncolnames(KSVMcompTable) <- c(\"true\",\"pred\")\r\nhead(KSVMcompTable)\r\nKSVMcompTable\r\n\r\n# compute root mean squared error\r\nksvmRMSE <- sqrt(mean((KSVMcompTable$true - KSVMcompTable$pred)^2))\r\nksvmRMSE\r\n\r\n# compute absolute error for each case\r\nKSVMcompTable$error <- abs(KSVMcompTable$true - KSVMcompTable$pred)\r\nhead(KSVMcompTable)\r\n\r\n# create new dataframe for plot\r\nksvmPlot <- data.frame(KSVMcompTable$error,testData$Satisfaction, testData$`No of Flights p.a.`, testData$Age , testData$AirlineStatus2, testData$TypeOfTravel2)\r\ncolnames(ksvmPlot) <- c(\"error\",\"Satisfaction\",\"NoOfFlights\", \"Age\",\"Status\",\"TravelType\")\r\n\r\n# plot for ksvm model with errors shown\r\n# color help from http://www.sthda.com/english/wiki/ggplot2-colors-how-to-change-colors-automatically-and-manually\r\nplt1 <- ggplot(ksvmPlot,aes(x=Age,y=NoOfFlights)) + geom_point(aes(size=Satisfaction,color=error)) + ggtitle(\"ksvm\") + scale_color_gradientn(colours = rainbow(5))\r\nplt1\r\n\r\n#svm output\r\n# https://cran.r-project.org/web/packages/e1071/vignettes/svmdoc.pdf\r\n# creating svm prediction model\r\nlibrary(rpart)\r\nsvmModel <- rpart(Satisfaction ~ Age + `Price Sensitivity` + `Year of First Flight` + \r\n `No of Flights p.a.` + `Shopping Amount at Airport` + `Departure Delay in Minutes` + \r\n `Arrival Delay in Minutes` + `Flight time in minutes` + `Flight Distance` + \r\n Gender2 + AirlineStatus2 + TypeOfTravel2 + Class2 + FlightCancelled2, data = trainData)\r\n\r\n# actual predictions\r\nsvmPred <- predict(svmModel,testData)\r\n\r\n# comparison dataframe that shows actual vs predicted\r\nSVMcompTable <- data.frame(pred=svmPred,true=testData$Satisfaction)\r\nhead(SVMcompTable)\r\n\r\n# compute root mean squared error for svm model to see the spread of residuals\r\nsvmRMSE <- sqrt(mean((SVMcompTable$true - SVMcompTable$pred)^2))\r\nsvmRMSE\r\n\r\n# compute absolute error for each case\r\nSVMcompTable$error <- abs(SVMcompTable$true - SVMcompTable$pred)\r\nhead(SVMcompTable)\r\n\r\n# create new dataframe to build plot\r\nsvmPlot <- data.frame(SVMcompTable$error,testData$Satisfaction, testData$`No of Flights p.a.`, testData$Age , testData$AirlineStatus2, testData$TypeOfTravel2)\r\ncolnames(svmPlot) <- c(\"error\",\"Satisfaction\",\"NoOfFlights\", \"Age\",\"Status\",\"TravelType\")\r\n\r\n# plot for svm predictions along with error value\r\nplt2 <- ggplot(svmPlot,aes(x=Age,y=NoOfFlights)) + geom_point(aes(size=Satisfaction,color=error)) + ggtitle(\"svm\") + scale_color_gradientn(colours = rainbow(5))\r\nplt2\r\n\r\n# lm output\r\nlmOutput <- lm(formula = Satisfaction ~ Age + `Price Sensitivity` + `Year of First Flight` + \r\n `No of Flights p.a.` + `Shopping Amount at Airport` + `Departure Delay in Minutes` + \r\n `Arrival Delay in Minutes` + `Flight time in minutes` + `Flight Distance` + \r\n Gender2 + AirlineStatus2 + TypeOfTravel2 + Class2 + FlightCancelled2, data = testData)\r\nsummary(lmOutput)\r\n\r\n# creating the predictions based on model above\r\npredValue <- data.frame(Satisfaction = testData$Satisfaction, Age = testData$Age, NoOfFlights = testData$`No of Flights p.a.`)\r\npredValue$Pred <- predict(lmOutput,pred=predValue)\r\npredValue$error <- abs(predValue$Satisfaction - predValue$Pred)\r\n\r\n# compute root mean squared error for lm\r\nlmRMSE <- sqrt(mean((predValue$Satisfaction - predValue$Pred)^2))\r\nlmRMSE\r\n\r\n# plot for lm predictions of actual values along with the error\r\nplt3<-ggplot(predValue,aes(x=Age,y=NoOfFlights)) + geom_point(aes(size=Satisfaction,color=error)) + ggtitle(\"lm\") + scale_color_gradientn(colours = rainbow(5))\r\nplt3\r\n\r\n# extraGrid for actual value predictions\r\n#https://stackoverflow.com/questions/35634736/consistent-plotting-panel-width-height-when-using-gridextra\r\n#https://cran.r-project.org/web/packages/egg/vignettes/Ecosystem.html\r\nlibrary(gridExtra)\r\ngrid.arrange(plt1,plt2,plt3,nrow=2)\r\n\r\n# joining the RMSE's into df for step 6\r\nRMSEdf <- data.frame(ksvmRMSE,svmRMSE,lmRMSE)\r\nRMSEdf\r\n"
}
] | 3 |
torrerod/volume_calculator | https://github.com/torrerod/volume_calculator | 637fae2323a34aa962e91a8359a4b36eeed0d1b9 | 7f91979ebfc505b77f5e3d06b7e6bb1a3d62435c | b336ec4c3677046b12e34fdd8fcdc3ccb6a432ec | refs/heads/main | 2023-06-17T04:18:13.829409 | 2021-07-12T20:08:45 | 2021-07-12T20:08:45 | 383,233,246 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7184115648269653,
"alphanum_fraction": 0.7292418479919434,
"avg_line_length": 17.46666717529297,
"blob_id": "28d26587741860fc1c8ed8bd6287a6604a7af4a0",
"content_id": "79c556ad2c1355be73dcfe645fab2d6cce4538c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 15,
"path": "/Dockerfile",
"repo_name": "torrerod/volume_calculator",
"src_encoding": "UTF-8",
"text": "FROM python:3.7.4\n\nLABEL maintenance=\"Rodrigo\"\n\nRUN pip install --upgrade pip\n\nWORKDIR /usr/src/app\nCOPY requirements.txt ./\nRUN pip install -r requirements.txt\n\nCOPY . .\nWORKDIR /usr/src/app/volume_calculator\n\n# command to run on container start\nCMD [\"python\", \"__init__.py\"]\n"
},
{
"alpha_fraction": 0.6412742137908936,
"alphanum_fraction": 0.6634349226951599,
"avg_line_length": 27.8799991607666,
"blob_id": "a6a3574f205b95a5791807320b187380429482cf",
"content_id": "29b1cd7ff508b81d33b81df91e0453918fbce593",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 722,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 25,
"path": "/volume_calculator/__init__.py",
"repo_name": "torrerod/volume_calculator",
"src_encoding": "UTF-8",
"text": "from flask import Flask, Response, request, url_for, render_template\nfrom forms.volume_form import Volume\n\napp=Flask(__name__)\n\napp.config['SECRET_KEY']='hidden-key'\n\[email protected](\"/\")\ndef main():\n return render_template(\"index.html\")\n\[email protected](\"/volume\", methods=['GET', 'POST'])\ndef volume():\n form=Volume()\n if form.validate():\n diameter=float(request.form['diameter'])\n height=float(request.form['height'])\n calculated_volume=round(3.1415*((diameter/2)**2)*height,ndigits=2)\n return render_template(\"volume.html\",form=form,volume=calculated_volume)\n return render_template(\"volume.html\",form=form)\n\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0',port=5000)\n"
},
{
"alpha_fraction": 0.801369845867157,
"alphanum_fraction": 0.801369845867157,
"avg_line_length": 53.75,
"blob_id": "04cf0889c4ba5921c67f52a344608761d5d830b1",
"content_id": "bc8a34862faf469c36a606aa003f9289192c8021",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 8,
"path": "/volume_calculator/forms/volume_form.py",
"repo_name": "torrerod/volume_calculator",
"src_encoding": "UTF-8",
"text": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, IntegerField, DecimalField, BooleanField, SubmitField\nfrom wtforms.validators import DataRequired\n\nclass Volume(FlaskForm):\n diameter= DecimalField(label=\"Diameter in mm\",validators=[DataRequired(message='please enter the diameter in mm')])\n height=DecimalField(label=\"Cylinder height mm\",validators=[DataRequired()])\n submit=SubmitField(label=\"calculate volume\")\n"
},
{
"alpha_fraction": 0.5072992444038391,
"alphanum_fraction": 0.7031630277633667,
"avg_line_length": 16.125,
"blob_id": "5036556698173d37d47d7156f63ecd17754c861c",
"content_id": "8fb4e64e463c19d3979d115368f99e4d87e09bb1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 822,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 48,
"path": "/requirements.txt",
"repo_name": "torrerod/volume_calculator",
"src_encoding": "UTF-8",
"text": "alembic==0.9.9\nasync-generator==1.10\nblinker==1.4\nchardet==3.0.4\nclick==6.7\ncycler==0.10.0\nentrypoints==0.3\nenum34==1.1.10\nFlask==1.0.2\nFlask-Dance==0.14.0\nFlask-DebugToolbar==0.10.1\nFlask-Login==0.4.1\nFlask-Migrate==2.1.1\nFlask-OAuth==0.12\nFlask-OAuthlib==0.9.4\nFlask-SQLAlchemy==2.3.2\nFlask-WTF==0.14.2\nhttplib2==0.11.3\nidna==2.6\nitsdangerous==0.24\njedi==0.17.0\nkiwisolver==1.3.1\nlazy==1.3\nMako==1.0.7\nmatplotlib==3.3.4\nmistune==0.8.4\nnumpy==1.19.5\noauth2==1.9.0.post1\noauthlib==2.0.7\nopencv-python==4.5.1.48\npandas==1.1.5\nPillow==8.1.2\npython-editor==1.0.3\npytz==2021.1\npyueye==4.90.0.0\npyzmq==20.0.0\nrequests==2.18.4\nrequests-oauthlib==0.8.0\nSQLAlchemy==1.2.6\nSQLAlchemy-Utils==0.33.2\nterminado==0.9.4\ntraitlets==4.3.3\nurllib3==1.22\nURLObject==2.4.3\nwebencodings==0.5.1\nWerkzeug==0.14.1\nwincertstore==0.2\nWTForms==2.1\n"
}
] | 4 |
jmaloyjr/Pong | https://github.com/jmaloyjr/Pong | eb1aee0deedb3a1e03a1ea2c1e3335c79a7ce202 | aca7e44cb9ad8a97d44e67cee5622e2ca1e40a1f | 3f7834186620baecff9bb8a3607a284a36475489 | refs/heads/master | 2020-04-04T17:50:56.695785 | 2018-11-05T18:20:09 | 2018-11-05T18:20:09 | 156,138,598 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5741444826126099,
"alphanum_fraction": 0.5950570106506348,
"avg_line_length": 16.5,
"blob_id": "c3225151539e78d27abfca80a9d661ab689f9208",
"content_id": "b0ca65a253c069c50a112cd2d835e3fe9b17be8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1578,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 90,
"path": "/Pong.py",
"repo_name": "jmaloyjr/Pong",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Pong Game (might make it cooler)\n Author: Jack Maloy\n Date: 11/4/18\n\"\"\"\nimport sys\nimport os\nimport time\nimport turtle\n\n#----- Global Variables -----#\n\nplayerSpeed = 15\ncomputerSpeed = 10\n\n#----- VISUALS NEEDED -----#\n\n# Going to create a background for pong to be played\nscreen = turtle.Screen()\nscreen.bgcolor(\"black\")\nscreen.title(\"Pong\")\n\n# Player turtle\nplayer = turtle.Turtle()\nplayer.color(\"green\")\nplayer.shape(\"square\")\nplayer.shapesize(.5, 4)\nplayer.tilt(90)\nplayer.penup()\nplayer.speed(0)\nplayer.setposition(300, 0)\n\n# Computer Turtle\ncomp = turtle.Turtle()\ncomp.color(\"green\")\ncomp.shape(\"square\")\ncomp.shapesize(.5, 4)\ncomp.tilt(90)\ncomp.penup()\ncomp.speed(0)\ncomp.setposition(-300, 0)\n\n\n#----- Player movements -----#\n\ndef moveDown():\n if player.ycor() > -300:\n y = player.ycor()\n y -= playerSpeed\n player.sety(y)\n\ndef moveUp():\n if player.ycor() < 300:\n y = player.ycor()\n y += playerSpeed\n player.sety(y)\n \n\nturtle.onkey(moveDown, \"Down\")\nturtle.onkey(moveUp, \"Up\")\nturtle.listen()\n\ndef updateComputer():\n \n if comp.ycor() > player.ycor():\n y = comp.ycor()\n y -= computerSpeed\n comp.sety(y)\n elif comp.ycor() < player.ycor():\n y = comp.ycor()\n y += computerSpeed\n comp.sety(y)\n else:\n #Dont really need to do anything\n y = comp.ycor()\n comp.sety(y)\n \n\n#----- Main Loop -----#\n\nwhile True:\n\n # Implement Computer following ball\n updateComputer()\n \n \n \n\n\ndelay = raw_input(\"Press enter to finish\")\n\n\n\n"
}
] | 1 |
tomkielczewski/BoatScraper | https://github.com/tomkielczewski/BoatScraper | eb9b0f17151d8431863350653ac1e1e506b66536 | f813bcefe02d035c249aef4d3b4bbfbc26ba12d7 | e8498356ee4f736f3eed7bccb6b77db88c677dd0 | refs/heads/main | 2023-06-11T13:47:44.840748 | 2021-06-30T19:37:34 | 2021-06-30T19:37:34 | 372,268,120 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8048780560493469,
"alphanum_fraction": 0.8048780560493469,
"avg_line_length": 19.5,
"blob_id": "3f10afdd1dcd88654dce5ad81cb04ed8a898eee8",
"content_id": "b3191cfc083fc877b498d5b03877ed35a4000ee6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 41,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 2,
"path": "/README.md",
"repo_name": "tomkielczewski/BoatScraper",
"src_encoding": "UTF-8",
"text": "# BoatScraper\nWeb scraper for yacht data\n"
},
{
"alpha_fraction": 0.5704789757728577,
"alphanum_fraction": 0.5869012475013733,
"avg_line_length": 38.9609375,
"blob_id": "c122e8b730f04791ddf91baf3e6cefde75321578",
"content_id": "561f40ef2174c138008a2f815fdeaa1679062add",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5138,
"license_type": "no_license",
"max_line_length": 282,
"num_lines": 128,
"path": "/scraper.py",
"repo_name": "tomkielczewski/BoatScraper",
"src_encoding": "UTF-8",
"text": "from bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport time\nimport pandas as pd\n\ndef download_pages(url1, url2, path1, path2):\n for i in range(1,15):\n # url = \"https://www.clickandboat.com/pl/czarter-jacht%C3%B3w/szukaj?where=Polska&_nbKm=1000&_tri=Selection&TypeNavigation=With%20or%20without%20captain&LongueurMin=0&LongueurMax=60&PrixJourMin=0&PrixJourMax=1005&ProduitTypeId=Sailboat;&_page=\"+ str(i) +\"&hasDiscount=0\"\n url = url1 + str(i) + url2\n page = urlopen(url)\n html = page.read().decode(\"utf-8\")\n soup = BeautifulSoup(html, \"html.parser\")\n \n # file_name = \"clickandboat/clickandboat_polska_zaglowe_\" + str(i) + \".html\"\n file_name = path1 + str(i) + path2\n with open(file_name, 'w', encoding = 'utf-8') as file:\n file.write(str(soup))\n print('Downloaded: ', file_name)\n time.sleep(10)\n\ndef download_product(url, path):\n url = url\n page = urlopen(url)\n html = page.read().decode(\"utf-8\")\n soup = BeautifulSoup(html, \"html.parser\")\n\n file_name = path\n with open(file_name, 'w', encoding = 'utf-8') as file:\n file.write(str(soup))\n print('Downloaded: ', file_name)\n time.sleep(10)\n\n#TODO:\ndef verify_details(columns):\n for column in columns:\n pass\n \ndef scrap_data(offer_url):\n dataframe = pd.DataFrame(columns = ['Producent', 'Model', 'Rok', 'Pojemność', 'Liczba kabin', 'Liczba koi', 'Ilość kabin łazienkowych', 'Długość', 'Szerokość', 'Zanurzenie', 'Moc', 'Lokalizacja', 'Wyposażenie'])\n row = {}\n with open(offer_url, 'r', encoding = 'utf-8') as file:\n\n soup = BeautifulSoup(file, \"html.parser\")\n boatDetails = soup.find(\"div\", {\"class\": \"boatDetails__list\"})\n children = boatDetails.findChildren(\"div\" , recursive=False)\n for child in children:\n column_name = child.contents[0].strip().replace(':', '')\n span = child.find(\"span\")\n value = span.contents[0].strip()\n row[column_name] = value\n\n localisation = soup.find(class_='map__text')\n span = localisation.find(\"span\")\n row['Lokalizacja'] = span.contents[0].strip()\n\n equipment = soup.find(\"div\", {\"class\": \"itemsList\"})\n children = equipment.find_all(\"div\", {\"class\": \"itemsList__text\"})\n equipment = ''\n for child in children:\n equipment = equipment + child.contents[0].strip() + ', '\n row['Wyposażenie'] = equipment\n\n dataframe = dataframe.append(row, ignore_index=True)\n return dataframe\n\ndef scrap_offers():\n pass\n\ndef scrap_porduct_links(url):\n links = []\n with open(url, 'r', encoding = 'utf-8') as file:\n soup = BeautifulSoup(file, \"html.parser\")\n a_elems = soup.find_all(\"a\", {\"class\": \"product-link\"})\n for a in a_elems:\n links.append(a['href'])\n return links\n\nif __name__ == \"__main__\":\n \n # print(scrap_porduct_links('clickandboat/clickandboat_polska_zaglowe_1.html'))\n df = pd.DataFrame(columns = ['Producent', 'Model', 'Rok', 'Pojemność', 'Liczba kabin', 'Liczba koi', 'Ilość kabin łazienkowych', 'Długość', 'Szerokość', 'Zanurzenie', 'Moc', 'Lokalizacja', 'Wyposażenie'])\n for i in range(1, 15):\n # links = links + scrap_porduct_links('clickandboat/clickandboat_polska_zaglowe_' + i + '.html')\n links = scrap_porduct_links('clickandboat/clickandboat_polska_zaglowe_' + str(i) + '.html')\n j = 0\n for link in links:\n j += 1\n product_path = 'clickandboat/products/product_' + str(i*10 + j - 10) + '.html'\n # download_product(link, product_path)\n # df = df.append(scrap_data(product_path))\n try:\n download_product(link, product_path)\n df = df.append(scrap_data(product_path))\n except:\n print('Error during scraping data from: ' + link)\n \n df.to_excel(\"CnBData.xlsx\") \n\n # children = boatDetails.findChildren(\"div\" , recursive=False)\n # df = scrap_data('clickandboat_example_page.html')\n # print(df)\n\n # for a in a_elems:\n # print(a['href'])\n # url = a_elems[0]['href']\n # page = urlopen(url)\n # html = page.read().decode(\"utf-8\")\n # soup = BeautifulSoup(html, \"html.parser\")\n # file_name = \"clickandboat_example_page.html\"\n # with open(file_name, 'w', encoding = 'utf-8') as file:\n # file.write(str(soup))\n\n \n\n\n\n \n\n\n # url = url1 + str(i) + url2\n # page = urlopen(url)\n # html = page.read().decode(\"utf-8\")\n # soup = BeautifulSoup(html, \"html.parser\")\n # download_pages(\n # \"https://www.clickandboat.com/pl/czarter-jacht%C3%B3w/szukaj?where=Polska&_nbKm=1000&_tri=Selection&TypeNavigation=With%20or%20without%20captain&LongueurMin=0&LongueurMax=60&PrixJourMin=0&PrixJourMax=1005&ProduitTypeId=Sailboat;&_page=\"\n # , \"&hasDiscount=0\"\n # , \"clickandboat/clickandboat_polska_zaglowe_\"\n # ,\".html\")\n"
}
] | 2 |
mojomortgages/coding-challenges | https://github.com/mojomortgages/coding-challenges | c7840f6ba789ca7a8a60e317a27f7cbcbd8b3ea1 | a67c4ffbd21c529eda63a0d5b34202495d88a3db | cbc10a5ed908e32827d0049ad12566a10a539d13 | refs/heads/master | 2020-05-16T12:37:38.767973 | 2019-04-23T16:09:02 | 2019-04-23T16:09:02 | 183,050,942 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5780525207519531,
"alphanum_fraction": 0.5850077271461487,
"avg_line_length": 31.630252838134766,
"blob_id": "0dd9b19cf1ea522730c1013df5a17142ec884559",
"content_id": "1b53fb9a64f3ae52b786fd8b6d0cc5ef783041c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3882,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 119,
"path": "/Challenge3/startGame.js",
"repo_name": "mojomortgages/coding-challenges",
"src_encoding": "UTF-8",
"text": "var express = require('express');\nvar app = express();\nvar { performance } = require('perf_hooks');\nconst bodyParser = require('body-parser');\napp.use(bodyParser.json());\n\n//require map coords\nconst map = require('./map1.json');\n\nlet playerStartTimes = {};\nlet playerEndTimes = {};\n\napp.get('/start/:userName', (req, res) => {\n const userStartTime = performance.now();\n const userName = req.params.userName;\n playerStartTimes[userName] = userStartTime;\n console.log(`${userName} Has joined the game..`);\n\n res.send(shuffle(map));\n});\n\napp.get('/submit/:userName', (req, res) => {\n const userEndTime = performance.now();\n const userName = req.params.userName;\n playerEndTimes[userName] = userEndTime;\n\n let results = {};\n results.timeAccrued = playerEndTimes[userName] - playerStartTimes[userName];\n const submission = req.body.submission;\n const path = submission.path;\n //check result\n results.distanceAccrued = checkResults.calculateDistance(path);\n results.returnsToStart = checkResults.returnsToStart(path);\n results.eachPointVisited = checkResults.eachPointVisited(path);\n results.noRepeats = checkResults.noRepeats(path);\n\n console.log(userName, \" Returns back to start: \", results.returnsToStart);\n console.log(userName, \" Visits every point: \", results.eachPointVisited);\n console.log(userName, \" Doesn't double visit any points (apart from the first): \", results.noRepeats);\n console.log(userName, \" Chosen path distance is \", results.distanceAccrued);\n console.log(`${userName} Finished in ${results.timeAccrued} milliseconds\\n`);\n\n res.send(results);\n});\n\nconst checkResults = {\n returnsToStart(path){\n //true if first Point is the same as the last, otherwise false\n return path[0] == path[path.length-1];\n },\n eachPointVisited(path){\n //true if each Point is visited at least once, otheriwse false\n let check = true;\n for(key in map){\n if(path.indexOf(key) == -1){\n check = false;\n }\n }\n return check;\n },\n noRepeats(path){\n //false if any Point apart from first is visted twice, otherwise true\n let check = true;\n let count = {};\n path.map(Point => {\n //If count doesn't exist start it, otherwise increment by 1\n !(count[Point]) ? count[Point] = 1 : count[Point] += 1;\n\n if(path.indexOf(Point) == 0){\n if(count[Point] > 2){\n check = false;\n }\n }else{\n if(count[Point] > 1){\n check = false;\n }\n }\n });\n return check;\n },\n calculateDistance(path){\n let distance = 0;\n for(let i = 0; i < path.length; i++){\n let Point = path[i];\n let PointX = map[Point][0];\n let PointY = map[Point][1];\n \n //If we're not looking at the last Point (which should also be the first)\n if(i != path.length -1){\n\n let nextPointX = map[path[path.indexOf(Point) +1]][0];\n let nextPointY = map[path[path.indexOf(Point) +1]][1];\n\n let xDistance = nextPointX - PointX;\n let yDistance = nextPointY - PointY;\n\n let hDistance = Math.sqrt(Math.pow(xDistance, 2) + Math.pow(yDistance, 2));\n distance += hDistance;\n }\n };\n\n return distance;\n }\n}\n\nfunction shuffle(map){\n let newMap = {};\n let keyList = Object.keys(map);\n \n while(keyList.length > 0){\n var j = keyList[Math.floor(Math.random()*keyList.length)];\n newMap[j] = map[j];\n keyList.splice(keyList.indexOf(j),1);\n }\n return newMap;\n}\n\nconst port = process.env.PORT || 3000;\napp.listen(port, () => console.log('\\nWaiting for players...'))"
},
{
"alpha_fraction": 0.7122877240180969,
"alphanum_fraction": 0.7257742285728455,
"avg_line_length": 28.01449203491211,
"blob_id": "62cab9100cacf4d88591989c5e511ac4da0d7e55",
"content_id": "6f56b784eb82b0e007ccf14f57f4ed42b46f63fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2002,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 69,
"path": "/Challenge2/maze.server/README.md",
"repo_name": "mojomortgages/coding-challenges",
"src_encoding": "UTF-8",
"text": "# maze.server\n\n## Installation\n### Prerequisites\n\n - Install node.js https://nodejs.org/en/download/\n - Install npm https://www.npmjs.com/get-npm\n\n ### Launching\n - Run `npm install`\n - Run `node server.js`\n\n## Endpoints\n### /{width}/{height}/{player}/{command}\n\nExample `/10/10/joe/start`\n\nParameters:\n-`width` number of tiles horizontally in the maze (must be a positive integer)\n-`height` number of tiles vertically in the maze (must be a positive integer)\n-`player` string name of your player (no special characters allowed)\n-`command` must be in the following list (start/up/right/down/left)\n\nUsage:\n\nThe first command that references a new dimension of maze generates a new maze, this maze is stored until the server stops.\nThe first command for a new player must be `start`\nEach command after start increments the player moves by 1\n\nOutput:\n\n`{ error: 'Message', tile: [0,1,1,0], player: { x: 0, y: 0, finished: false }, start: { x: 0, y: 0 }, finish: { x: 10, y: 10 } }`\n\n-`error` message will be populated when an invalid command is attempted\n-`tile` the point on the maze your player is now positioned. Each number represents a direction (up/right/down/left) 0 = wall, 1 = floor\n-`player` x and y co-ordinates of the player position as well as a completed flag that returns true when you have completed the maze\n-`start` the starting position for players in the maze\n-`finish` the position the player must reach to complete the maze\n\n### /status/{width}/{height}\n\nExample `/status/10/10`\n\nParameters:\n-`width` number of tiles horizontally in the maze (must be a positive integer)\n-`height` number of tiles vertically in the maze (must be a positive integer)\n\nUsage:\n\nRetrieve the status of an entire maze\n\nOutput:\n\n`{ tiles: [][], players: {} }`\n\n-`tiles` two-dimensional array representing the maze\n-`players` all participating players on the maze\n\n### /draw-status/{width}/{height}\n\nExample `/draw-status/10/10`\n\nUsage:\n\nReturn a HTML page that renders the maze status\n\nOutput:\n\nindex.html\n"
},
{
"alpha_fraction": 0.5610425472259521,
"alphanum_fraction": 0.5843621492385864,
"avg_line_length": 23.299999237060547,
"blob_id": "4425180611bcd287d331910839110a3bfcb071ce",
"content_id": "428d2bd0d179d195f869a4e92a3869352033c1b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 729,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 30,
"path": "/Challenge2/maze.server/test.py",
"repo_name": "mojomortgages/coding-challenges",
"src_encoding": "UTF-8",
"text": "import requests\n\nurl = 'http://localhost:3002/10/10/jacob/'\n\nmove = requests.get(url + 'start')\n\n# while \"completed\" not in move.json()[\"error\"]:\n\nfor x in range(5):\n if move.json()[\"tile\"][3] == 1:\n move = requests.get(url + 'left')\n elif move.json()[\"tile\"][0] == 1:\n move = requests.get(url + 'up')\n elif move.json()[\"tile\"][1] == 1:\n move = requests.get(url + 'right')\n else:\n move = requests.get(url + 'down')\n\n'''\nmove1 = requests.get(url + 'up')\nif \"completed\" in move1.json()[\"error\"]:\n print(\"Completed\")\nelse:\n print(\"not yet\")\n'''\n\n# resp = requests.get(url + 'down')\n# data = resp.json() # Check the JSON Response Content documentation below\n\nprint(move.json()[\"tile\"])\n"
}
] | 3 |
hyp1231/Info_search | https://github.com/hyp1231/Info_search | 653d31716ace859bdb35ab03e55aafceb58a4dde | 5037ab59977922b9775dad0703a4aceaf36b0aaa | 186f3f1c76518d7513d89fd7384838d305e10b64 | refs/heads/master | 2018-02-09T11:01:36.436993 | 2017-07-10T02:40:05 | 2017-07-10T02:40:05 | 96,724,626 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6711711883544922,
"avg_line_length": 16.076923370361328,
"blob_id": "7e08aa7d7bbef14b970a0e5729a5c078a4a15c36",
"content_id": "edc6f19f127f31ae26c5c0f197037b89dc31a876",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 888,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 52,
"path": "/html/cgi-bin/vsm.h",
"repo_name": "hyp1231/Info_search",
"src_encoding": "UTF-8",
"text": "#ifndef VSM\n#define VSM\n\n#include <iostream>\n#include <fstream>\n#include <cstdio>\n#include <vector>\n#include <cstdlib>\n#include <map>\n#include <string>\n#include <sstream>\n#include <cmath>\n#include <cstring>\n#include <algorithm>\n#include <utility>\n#include <ctime>\n#include <unordered_map>\n#include <unistd.h>\nusing namespace std;\n\nstruct State {\n\tint index, cnt;\n\tState(int index = 0, int cnt = 0):index(index), cnt(cnt){}\n};\n\nstruct output {\n\tint id;\n\tdouble num;\n\toutput(int id, double num):id(id), num(num){}\n\tconst bool operator < (const output A) const {\n\t\treturn num > A.num;\n\t}\n};\n\nclass Vsm {\nprivate:\n\tint F, W;\n\tunordered_map<string, int> f_id, w_id;\n\tvector<string> f_list, dic, title_list;\n\tvector<vector<State> > w_list;\n\tvoid repair();\n\tstring change(int id);\n\tvoid print(vector<output>& ans);\n\npublic:\n\tVsm():F(0), W(0){}\n\tvoid m_init();\n\tvoid query(string q);\n\n};\n\n#endif\n"
},
{
"alpha_fraction": 0.5481147766113281,
"alphanum_fraction": 0.5548677444458008,
"avg_line_length": 16.594058990478516,
"blob_id": "09abb7d52e4abd185c8a54525ebbcc50645c7f8b",
"content_id": "069d5cb6539acd3fe4371c1f38c0eb9d8791c9f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1777,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 101,
"path": "/data/database.cpp",
"repo_name": "hyp1231/Info_search",
"src_encoding": "UTF-8",
"text": "#include \"database.h\"\n\nvoid Database::get_name() {\n\n\tstring root = path + \"/websites\";\n\n\tDIR *dir = opendir(root.c_str());\n\tdirent *ptr;\n\n\tvector<string> ve;\n\n\twhile((ptr = readdir(dir)) != NULL) {\n\t\tif( strcmp(ptr->d_name, \".\") == 0 || strcmp(ptr->d_name, \"..\") == 0 )\n\t\t\tcontinue;\n\t\telse if(ptr->d_type == 8 || ptr->d_type == 10 )\n\t\t\tve.push_back(string(ptr->d_name));\n\t}\n\t\n\tsort(ve.begin(), ve.end());\n\n\tofstream ofile;\n\tofile.open(\"name.txt\",ios::out|ios::app);\n\n\tfor (int i = 0; i < (int)ve.size(); ++i) {\n\t\tofile << ve[i] << endl;\n\t}\n\n\tofile.close();\n\n}\n\nvoid Database::extract() {\n\n\tifstream ifile;\n\tifile.open(\"name.txt\");\n\n\tstring filename, html;\n\n\twhile(getline(ifile,filename)) {\n\t\tget_html(filename, html);\n\t\tget_title(filename, html);\n\t\tget_text(filename, html);\n\t\tfilename.clear(); html.clear();\n\t}\n\n\tifile.close();\n\n}\n\nvoid Database::get_title(string filename, string& html) {\n\n\tofstream ofile;\n\tofile.open(\"./title/\" + filename);\n\n\tint l, r = 0;\n\n while(1) {\n \tl = html.find(\"<title>\", r);\n \tr = html.find(\"</title>\", l);\n \tl += 7;\n \tif(l == html.npos || r == html.npos || r <= l)break;\n \tofile << html.substr( l , r - l ) << endl;\n }\n\n ofile.close();\n\n}\n\nvoid Database::get_text(string filename, string& html) {\n\n\tofstream ofile;\n\tofile.open(\"./text/\" + filename);\n\n\tint l, r = 0;\n\n while(1) {\n \tl = html.find(\"<p\", r);\n \tr = html.find(\"</p>\", l);\n \tl += 3;\n \tif(l == html.npos || r == html.npos || r <= l)break;\n \tofile << html.substr( l , r - l ) << endl;\n }\n\n ofile.close();\n\n}\n\nvoid Database::get_html(string filename, string& html) {\n\n\tifstream ifile;\n\tstring cur = path + \"/websites/\" + filename, tmp;\n\tifile.open(cur);\n\n\twhile(getline( ifile, tmp )) {\n\t\ttmp += \"\\n\";\n\t\thtml += tmp;\n\t}\n\n\tifile.close();\n\n}\n"
},
{
"alpha_fraction": 0.5104166865348816,
"alphanum_fraction": 0.5260416865348816,
"avg_line_length": 13.769230842590332,
"blob_id": "f2af77f3c5867ccc30b8a9b876e099858694a2e4",
"content_id": "602b3fbc9b99c444c7f0b6d8f342cf88e2db32ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 192,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 13,
"path": "/html/cgi-bin/Makefile",
"repo_name": "hyp1231/Info_search",
"src_encoding": "UTF-8",
"text": "CXX = g++\nCXXFLAGS = -std=c++11 -g -O3\n\nall: main clean\n\nmain: vsm.o main.o\n\t$(CXX) $(CXXFLAGS) $^ -o $@ -lpthread\n\n%.o: %.cpp *.h\n\t$(CXX) $(CXXFLAGS) -c -o $@ $< -lpthread\n\nclean:\n\trm -f *.o\n"
},
{
"alpha_fraction": 0.6095890402793884,
"alphanum_fraction": 0.6232876777648926,
"avg_line_length": 15.84615421295166,
"blob_id": "37a5ffb8eaf7c320d3eefdc87908fb3bccad8fb3",
"content_id": "bea79d9ce30e956cbfbc673e266786bb809574ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 26,
"path": "/html/cgi-bin/engi.py",
"repo_name": "hyp1231/Info_search",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport cgi, cgitb, os, time\n\nform = cgi.FieldStorage()\n\nsite_text = form.getvalue('textlist')\n\nfl = open(\"./query.txt\", \"w\")\nos.system('chmod 777 ./query.txt')\nfl.write(site_text)\nfl.close()\n\nwhile (os.path.exists(r'./ans.txt') == False):\n\ttime.sleep(0.1)\n\nfl = open(\"./ans.txt\", \"r\")\n\nline = fl.readline()\nwhile line:\n\tprint line\n\tline = fl.readline()\n\t\nfl.close()\nos.system('rm -f ./ans.txt')\n"
},
{
"alpha_fraction": 0.5597269535064697,
"alphanum_fraction": 0.57337886095047,
"avg_line_length": 12.318181991577148,
"blob_id": "642c06216351c133e4acc715bcf8fb648a82a32f",
"content_id": "36a0d00e2b7b6f85ff420868e15d20bb08695687",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 293,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 22,
"path": "/html/cgi-bin/main.cpp",
"repo_name": "hyp1231/Info_search",
"src_encoding": "UTF-8",
"text": "#include \"vsm.h\"\n\nint main(void) {\n\n\tVsm solve;\n\tsolve.m_init();\n\n\tstring m_input;\n\tfstream m_file;\n\twhile(1) {\n\t\tm_file.open(\"./query.txt\");\n\t\tif(m_file) {\n\t\t\tgetline(m_file, m_input);\n\t\t\tm_file.close();\n\t\t\tremove(\"./query.txt\");\n\t\t\tsolve.query(m_input);\n\t\t}\n\t\tusleep(6e5);\n\t}\n\t\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.6116504669189453,
"alphanum_fraction": 0.6213592290878296,
"avg_line_length": 9.300000190734863,
"blob_id": "98ef4ee6cd17195bee72fa0a3eeee5deaed3cd82",
"content_id": "99afa3a55105ad8b203e0e3303375b51a658df62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 103,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 10,
"path": "/search/main.cpp",
"repo_name": "hyp1231/Info_search",
"src_encoding": "UTF-8",
"text": "#include \"cut.h\"\n\nint main(void) {\n\n\tCut solve;\n\tsolve.go_through_file();\n\tsolve.save();\n\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.5973154306411743,
"alphanum_fraction": 0.6040268540382385,
"avg_line_length": 18.72058868408203,
"blob_id": "51286f2c4cfd3f207bc1586571dddf4525c772fb",
"content_id": "aad6def6872069bdf7d29ccb161f2f2ee6fe8c52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1341,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 68,
"path": "/m_crawler/crawler.cpp",
"repo_name": "hyp1231/Info_search",
"src_encoding": "UTF-8",
"text": "#include \"crawler.h\"\n\nset<string> S;\nqueue<string> que;\n\npthread_mutex_t S_mutex, que_mutex;\n\nvoid* th_download( void* args ) {\n\n\tpthread_mutex_lock( &que_mutex ); \n\tif(que.empty()) {\n\t\t\tpthread_mutex_unlock( &que_mutex );\n\t\t\tpthread_exit(0);\n\t\t}\n\t\n\tRoot_Domain now( que.front() ); que.pop(); \n\tpthread_mutex_unlock( &que_mutex );\n\t\t\n\tusleep(100);\n\tvector<string> Websites;\n\n\tcout << \"Th\" << *(int*)args << \" \";\n\tnow.download();\n\tnow.extract( Websites );\n\n\tfor( auto it = Websites.begin(); it != Websites.end(); ++it ) {\n\t\tpthread_mutex_lock( &S_mutex );\n\t\tpthread_mutex_lock( &que_mutex ); \n\n\t\tif(!S.count(*it)) {\n\t\t\tS.insert(*it);\n\t\t\tque.push(*it);\n\t\t}\n\n\t\tpthread_mutex_unlock( &que_mutex ); \n\t\tpthread_mutex_unlock( &S_mutex );\n\t}\n\n\tpthread_exit(0);\n}\n\nvoid Crawler(string st) {\n\n\tS.insert(st);\n\tque.push(st);\n\n\tint thread_id[NUM_THREADS];\n\tpthread_t tids[NUM_THREADS];\n\tpthread_mutex_init( &S_mutex, NULL );\n\tpthread_mutex_init( &que_mutex, NULL );\n\n\twhile(1) {\n\t\tif(que.empty()){\n\t\t\tsleep(10);\n\t\t\tif(que.empty())break;\n\t\t}\n\t\tfor(int i = 0;i < NUM_THREADS;++i) {\n\t\t\tthread_id[i] = i;\n\t\t\tpthread_create( &tids[i], NULL, th_download, (void*)&thread_id[i]);\n\t\t}\n\t}\n\n\tpthread_mutex_destroy( &que_mutex );\n\tpthread_mutex_destroy( &S_mutex );\n\n\tcout << \"Finished!\" << endl;\n\tcout << S.size() << \" url(s) has(have) been downloaded.\" << endl;\n}\n"
},
{
"alpha_fraction": 0.5332376956939697,
"alphanum_fraction": 0.5370635986328125,
"avg_line_length": 23.034482955932617,
"blob_id": "9cc822d2558e94fd13201df1e0e9b96e0d218ab3",
"content_id": "e49880f8896c314c33a5275421c8069d71c10c30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2091,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 87,
"path": "/m_crawler/domain.cpp",
"repo_name": "hyp1231/Info_search",
"src_encoding": "UTF-8",
"text": "#include \"domain.h\"\n\nstring Domain::make_filename() {\n\tint po;\n\tstring tmp = url;\n\tfor(int t = 0, i = 0; i < tmp.size(); ++i) {\n\t\tif(tmp[i] == '/')++t;\n\t\tif(t >= 2) { po = i + 1; break; }\n\t}\n\tstring ans = tmp.substr(po);\n\tfor(int i = 0; i < tmp.size(); ++i) {\n\t\tif(ans[i] == '/')ans[i] = '-';\n\t}\n\treturn ans + \".html\";\n}\n\nvoid Domain::download() {\n\n\tcout << \"Downloading \\\"\" << url << \"\\\".\" << endl;\n\n\tstring strcom = \"wget -q -t 1 \\\"\" + url + \"\\\" -O \\\"./websites/\" + filename + \"\\\"\";\n\tsystem(strcom.c_str());\n\n}\n\nvoid Root_Domain::extract( vector<string>& Websites ) {\n\n\tifstream ifile;\n\tstring cur = \"./websites/\" + filename;\n\tifile.open(cur);\n\n\tstring html, tmp;\n\twhile( getline( ifile, tmp )){\n\t\ttmp += \"\\n\";\n\t\thtml += tmp;\n\t}\n\tifile.close();\n\n\tget_url( html, Websites );\n\n}\n\nvoid Root_Domain::get_postfix( string& html, vector<string>& Websites ) {\n\n\tstring pattern(\"href=\\\"([\\\\w-])+[\\\\w-]+([\\\\w- ./\\\\(\\\\)?%&=]*)?\\\"\"), tmp;\n pattern = \"[[:alpha:]]*\" + pattern + \"[[:alpha:]]*\";\n\n regex r(pattern);\n for ( sregex_iterator it(html.begin(), html.end(), r), end; it != end; ++it ) {\n \ttmp = it->str();\n \tif(tmp.find(\".css\") == tmp.npos && tmp.find(\".js\") == tmp.npos) {\n \t\ttmp = Post_To_All(tmp);\n \tWebsites.push_back(tmp);\n \t}\n }\n}\n\nvoid Root_Domain::get_whole( string& html, vector<string>& Websites ) {\n\n \tstring pattern(\"http(s)?://([\\\\w-]+\\\\.)+[\\\\w-]+(/[\\\\w- ./\\\\(\\\\)?%&=]*)?\"), tmp;\n pattern = \"[[:alpha:]]*\" + pattern + \"[[:alpha:]]*\";\n\n regex r(pattern);\n for (sregex_iterator it(html.begin(), html.end(), r), end;it != end;++it) {\n tmp = it->str();\n if(tmp.find(root) != tmp.npos)\n \tWebsites.push_back(tmp);\n }\n\n}\n\nvoid Root_Domain::get_url( string& html, vector<string>& Websites ) {\n\n\tget_postfix(html, Websites);\n\tget_whole(html, Websites);\n\n\tsort(Websites.begin(), Websites.end());\n\tint m = unique( Websites.begin(), Websites.end() ) - Websites.begin();\n\tWebsites.resize(m);\n\n}\n\nstring Root_Domain::Post_To_All( string profix ) {\n\tstring ans = root;\n\tans += profix.substr(6, profix.size() - 7);\n\treturn ans;\n}\n"
},
{
"alpha_fraction": 0.7208835482597351,
"alphanum_fraction": 0.7208835482597351,
"avg_line_length": 16.172412872314453,
"blob_id": "641d6b157c8cf370ed43d71cded172667bce7f71",
"content_id": "2cdfc5d73656f6fce0ab9720ae293aa3fdbc95e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 498,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 29,
"path": "/data/database.h",
"repo_name": "hyp1231/Info_search",
"src_encoding": "UTF-8",
"text": "#ifndef DATABASE\n#define DATABASE\n\n#include <iostream>\n#include <string>\n#include <fstream>\n#include <dirent.h>\n#include <cstring>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nclass Database {\n\nprivate:\n\tstring path;\n\tvoid get_html(string filename, string& html);\n\tvoid get_title(string filename, string& html);\n\tvoid get_text(string filename, string& html);\n\npublic:\n\tDatabase(string path):path(path){}\n\tstring show(){ return path; }\n\tvoid get_name();\n\tvoid extract();\n\n};\n\n#endif\n"
},
{
"alpha_fraction": 0.6981339454650879,
"alphanum_fraction": 0.7003293037414551,
"avg_line_length": 19.244443893432617,
"blob_id": "2dd59e577534787ed7d0377c0b49716261ae90e7",
"content_id": "58dc2db5dad77bb83b48b1c0fb57beee97968d13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 911,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 45,
"path": "/m_crawler/domain.h",
"repo_name": "hyp1231/Info_search",
"src_encoding": "UTF-8",
"text": "#ifndef DOMAIN_H\n#define DOMAIN_H\n\n#include <vector>\n#include <iostream>\n#include <string>\n#include <cstdlib>\n#include <fstream>\n#include <cstdio>\n#include <regex>\n#include <algorithm>\n#include <unistd.h>\nusing namespace std;\n\nclass Domain {\nprivate:\n\tstring url;\n\tstring make_filename();\n\npublic:\n\tstring filename;\n\tDomain(string url) : url(url) {\n\t\tfilename = make_filename();\n\t}\n\tvoid download();\t\t\t\t\t\t//download html\n\n};\n\nclass Root_Domain : public Domain {\nprivate:\n\tstring root;\n\tvoid get_postfix(string& html, vector<string>& Websites);\n\tvoid get_whole(string& html, vector<string>& Websites);\n\tvoid get_url(string& html, vector<string>& Websites);\n\tstring Post_To_All(string profix);\t\t//fix all the url from a profix\n\npublic:\n\tRoot_Domain(string url):Domain(url) {\n\t\troot = url.substr(0, url.find_last_of(\"/\") + 1);\n\t}\n\tvoid extract(vector<string>& Websites);\t//extract the urls of the html\n\n};\n\n#endif\n"
},
{
"alpha_fraction": 0.5925925970077515,
"alphanum_fraction": 0.6018518805503845,
"avg_line_length": 9.800000190734863,
"blob_id": "6528f6a106dadb3fe17f54cae921c221f29445b6",
"content_id": "afd0ac717836efcd71056489dca4a26cee984d1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 108,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 10,
"path": "/data/main.cpp",
"repo_name": "hyp1231/Info_search",
"src_encoding": "UTF-8",
"text": "#include \"database.h\"\n\nint main(void) {\n\n\tDatabase now(\".\");\n\tnow.get_name();\n\tnow.extract();\n\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.51416015625,
"alphanum_fraction": 0.53515625,
"avg_line_length": 24.92405128479004,
"blob_id": "59359856fb25267280a20044a586363537695c3a",
"content_id": "f082357430e4f5955b2746c135e82427f5726307",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4100,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 158,
"path": "/html/cgi-bin/vsm.cpp",
"repo_name": "hyp1231/Info_search",
"src_encoding": "UTF-8",
"text": "#include \"vsm.h\"\n#include \"include/thulac.h\"\n\nTHULAC tool;\ndouble G[6000][28000] = { 0 };\n\nvoid Vsm::repair() {\n\n\tifstream ifile;\n\tifile.open(\"./file\");\n\n\tstring tmp, web;\n\tlong long num;\n\twhile(getline(ifile, tmp)) {\n\t\tgetline(ifile, web);\n\t\tstringstream ss(tmp);\n\t\tss >> num;\n\t\tf_list.push_back(web);\n\t\tf_id.insert(make_pair(web, (int)num));\n\t\tstring path = \"./title/\" + web, con;\n\t\tifstream ifile2;\n\t\tifile2.open(path);\n\t\tgetline(ifile2, con);\n\t\tifile2.close();\n\t\ttitle_list.push_back(con);\n\t}\n\tifile.close();\n\n\tifile.open(\"./dic\");\n\twhile(getline(ifile, tmp)) {\n\t\tgetline(ifile, web);\n\t\tstringstream ss(tmp);\n\t\tss >> num;\n\t\tdic.push_back(web);\n\t\tvector<State> ve;\n\t\tw_list.push_back(ve);\n\t\tw_id.insert(make_pair(web, (int)num));\n\t}\n\tifile.close();\n\n\tlong long a, b;\n\tifile.open(\"./inverted\");\n\twhile(getline(ifile, tmp)) {\n\t\tstringstream ss(tmp);\n\t\tss >> num;\n\t\twhile(1) {\n\t\t\tgetline(ifile, tmp);\n\t\t\tss.clear();ss.str(tmp);\n\t\t\tss >> a >> b;\n\t\t\tif(a == -1 && b == -1)break;\n\t\t\tw_list[num].push_back(State(a,b));\n\t\t}\n\t}\n\tifile.close();\n\n\tF = f_list.size();\n\tW = w_list.size();\n\n}\n\nvoid Vsm::m_init() {\n\t\n\trepair();\n\n\tfor (int i = 0; i < (int)w_list.size(); ++i) {\n\t\tdouble div = log10(F / (double)w_list[i].size());\n\t\tfor(int j = 0;j < (int)w_list[i].size(); ++j) {\n\t\t\tState& tmp = w_list[i][j];\n\t\t\tG[tmp.index][i] = (1 + log10(tmp.cnt)) * div;\n\t\t}\n\t}\n\n\ttool.init(NULL, NULL, NULL, NULL, true, '_');\n}\n\nvoid Vsm::query(string q) {\n\n\tTHULAC_result res;\n\ttool.cut(q, res);\n\t\n\tint q_cnt[28000] = { 0 };\n\tfor (int i = 0; i < (int)res.size(); ++i) {\n\t\tunordered_map<string, int>::const_iterator it = w_id.find(res[i].first);\n\t\tif(it != w_id.end()) {\n\t\t\tint t_id = it->second;\n\t\t\t++q_cnt[t_id];\n\t\t}\n\t}\n\n\tdouble q_ve[28000] = { 0 }, q_l = 0;\n\tfor(int i = 0; i < W; ++i) if(q_cnt[i]) {\n\t\tq_ve[i] = (1 + log10(q_cnt[i])) * log10(F / (double)w_list[i].size());\n\t\tq_l += q_ve[i] * q_ve[i];\n\t}\n\tq_l = sqrt(q_l);\n\n\tvector<output> ans;\n\tfor(int i = 0; i < F; ++i) {\n\t\tdouble sum = 0, l = 0;\n\t\tfor(int j = 0; j < W; ++j) { \n\t\t\tsum += q_ve[j] * G[i][j];\n\t\t\tl += G[i][j] * G[i][j];\n\t\t}\n\t\tif(sum)ans.push_back(output(i, sum / q_l / sqrt(l)));\n\t}\n\n\tsort(ans.begin(), ans.end());\n\tprint(ans);\n}\n\nstring Vsm::change(int id) {\n\tstring ans = f_list[id];\n\tfor (int i = 0; i < ans.size(); ++i) {\n\t\tif(ans[i] == '-')ans[i] = '/';\n\t}\n\treturn ans.substr(0, ans.size() - 5);\n}\n\nvoid Vsm::print(vector<output>& ans) {\n\tofstream ofile;\n\tofile.open(\"./ans.txt\");\n\n\tofile << \"Content-type:text/html\\n\\n\";\n\tofile << \"<html>\" << endl;\n\tofile << \"<head>\" << endl;\n\tofile << \"\t<meta charset=\\\"utf-8\\\">\" << endl;\n\tofile << \"\t<title>results - Info_search</title>\" << endl;\n\tofile << \"\t<style type=\\\"text/css\\\">\" << endl;\n\tofile << \"\t\tbody {\" << endl;\n\tofile << \"\t\t\tbackground-image: url(../imgs/bg.png);\" << endl;\n\tofile << \"\t\t\tbackground-size: 85%%;\" << endl;\n\tofile << \"\t\t\tbackground-repeat: no-repeat;\" << endl;\n\tofile << \"\t\t\tbackground-position: center 100px;\" << endl;\n\tofile << \"\t\t}\" << endl << \"\t</style>\" << endl;\n\tofile << \"</head>\" << endl << \"<body>\" << endl;\n\tofile << \"<img src=\\\"../imgs/pattern.png\\\" alt=\\\"info_pattern\\\" width=\\\"136\\\" height=\\\"34\\\" style=\\\"float:left;\\\">\" << endl;\n\tofile << \"\t<form action=\\\"http://localhost/cgi-bin/engi.py\\\" method=\\\"get\\\" >\" << endl;\n\tofile << \"\t\t<input type=\\\"text\\\" name=\\\"textlist\\\" style=\\\"height: 33px; width: 300px\\\" />\" << endl;\n\tofile << \" \t<input type=\\\"submit\\\" value=\\\"Info一下!\\\" style=\\\"height: 33px;\\\" /><br>\" << endl;\n\tofile << \"\t</form>\" << endl;\n\n\tint m_size = ans.size();\n\tofile << \"<p><font size=\\\"3\\\" color=\\\"a1a2a2\\\">\";\n\tofile << m_size << \" result(s) in total :D</font></p>\" << endl;\n\n\tfor(int i = 0; i < min(10, m_size); ++i) {\n\t\tofile << \"<p>\" << endl;\n\t\tstring url = change(ans[i].id);\n\t\tofile << \"\t<a href=\\\"http://\" << url << \"\\\" style=\\\"color: #1c4fa1; font-size: 19px\\\">\" << title_list[ans[i].id] << \"</a> \" << endl;\n\t\tofile << \"\t<a href=\\\"http://\" << url << \"\\\" style=\\\"color: #a1a2a2; font-size: 12px\\\">[\" << url << \"]</a>\" << endl;\n\t\tofile << \"</p>\" << endl;\n\t}\n\n\tofile << \"</body>\" << endl;\n\tofile << \"</html>\" << endl;\n\n\tofile.close();\n}\n"
},
{
"alpha_fraction": 0.5547839403152466,
"alphanum_fraction": 0.5613425970077515,
"avg_line_length": 19.571428298950195,
"blob_id": "6890165f857559a7c54ba64d40f5031ab3f568a7",
"content_id": "730d23dfec04b4d250d620328fd9f346fe699ab2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2592,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 126,
"path": "/search/cut.cpp",
"repo_name": "hyp1231/Info_search",
"src_encoding": "UTF-8",
"text": "#include \"cut.h\"\n#include \"include/thulac.h\"\n\nTHULAC tool;\nset<string> Tag;\n\nvoid Cut::go_through_file() {\n\n\tifstream ifile;\n\tifile.open(\"name.txt\");\n\n\tm_init();\n\tstring filename;\n\n\tint flag=0;\n\n\twhile(getline(ifile, filename)) {\n\n\t\tcout << filename << endl;\n\n\t\tf_list.push_back(filename);\n\t\tf_id[filename] = F++;\n\n\t\tvector<string> word_list;\n\t\tcut_word(filename, word_list);\n\t\tcalcu_word(word_list);\n\n\t}\n\tifile.close();\n}\n\nvoid Cut::save() {\n\n\tofstream ofile;\n\tofile.open(\"./file\");\n\tfor (int i = 0; i < (int)f_list.size(); ++i) {\n\t\tofile << i << endl;\n\t\tofile << f_list[i] << endl;\n\t}\n\tofile.close();\n\n\tofile.open(\"./dic\");\n\tfor (int i = 0; i < (int)dic.size(); ++i) {\n\t\tofile << i << endl;\n\t\tofile << dic[i] << endl;\n\t}\n\tofile.close();\n\n\tofile.open(\"./inverted\");\n\tfor (int i = 0; i < (int)w_list.size(); ++i) {\n\t\tofile << i << endl;\n\t\tfor (int j = 0; j < (int)w_list[i].size(); ++j)\t{\n\t\t\tofile << w_list[i][j].index << \" \" << w_list[i][j].cnt << endl;\n\t\t}\n\t\tofile << -1 << \" \" << -1 << endl;\n\t}\n\tofile.close();\n\n\tcout << dic.size() << \" words in total :)\" << endl;\n}\n\nvoid Cut::cut_word(string filename, vector<string>& word_list) {\n\n\tstring text;\n\tfile_to_string(filename, text);\n\n\tTHULAC_result result;\n\ttool.cut(text, result);\n\n\tfor (int i = 0; i < (int)result.size(); ++i) {\n\t\tif(Tag.count(result[i].second))\n\t\t\tword_list.push_back(result[i].first);\n\t}\n\n}\n\nvoid Cut::file_to_string(string filename, string& text) {\n\n\tifstream ifile;\n\tstring tmp;\n\n\tfor(int i = 0; i < 4; ++i) {\n\t\tifile.open(\"./title/\" + filename);\n\t\twhile(getline( ifile, tmp )) {\n\t\t\ttmp += \"\\n\";\n\t\t\ttext += tmp;\n\t\t}\n\t\tifile.close();\n\t}\n\n\tifile.open(\"./text/\" + filename);\n\twhile(getline( ifile, tmp )) {\n\t\ttmp += \"\\n\";\n\t\ttext += tmp;\n\t}\n\tifile.close();\n}\n\nvoid Cut::calcu_word(vector<string>& word_list) {\n\t\n\tsort(word_list.begin(), word_list.end());\n\n\tfor (int i = 0; i < (int)word_list.size(); ++i)\t{\n\t\tstring cur = word_list[i];\n\t\tif(!w_id.count(cur)) {\n\t\t\tw_id[cur] = W++;\n\t\t\tvector<State> ve;\n\t\t\tve.push_back(State(F - 1, 1));\n\t\t\tw_list.push_back(ve);\n\t\t\tdic.push_back(cur);\n\t\t} else {\n\t\t\tint id = w_id[cur];\n\t\t\tif(i && word_list[i] == word_list[i-1]) {\n\t\t\t\tint tmp = w_list[id].size();\n\t\t\t\tw_list[id][tmp - 1].cnt++;\n\t\t\t} else w_list[id].push_back(State(F - 1, 1));\n\t\t}\n\t}\n}\n\nvoid m_init() {\n\ttool.THULAC::init(NULL, NULL, NULL, NULL, true, '_');\n\tTag.insert(\"n\"); Tag.insert(\"np\"); Tag.insert(\"ns\"); Tag.insert(\"ni\"); Tag.insert(\"nz\");\n\tTag.insert(\"t\"); Tag.insert(\"f\"); Tag.insert(\"s\"); Tag.insert(\"v\"); Tag.insert(\"a\");\n\tTag.insert(\"d\"); Tag.insert(\"h\"); Tag.insert(\"t\"); Tag.insert(\"i\"); Tag.insert(\"j\");\n}\n"
},
{
"alpha_fraction": 0.7247706651687622,
"alphanum_fraction": 0.7293577790260315,
"avg_line_length": 14.571428298950195,
"blob_id": "4bf52a5d410d2eed5f02de555c05f6a517a9a70c",
"content_id": "b5a8d1b4fe41ff6ec22cb51252d2bfb339c5c6b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 218,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 14,
"path": "/m_crawler/crawler.h",
"repo_name": "hyp1231/Info_search",
"src_encoding": "UTF-8",
"text": "#ifndef CRAWLER_H\n#define CRAWLER_H\n\n#include \"domain.h\"\n#include <queue>\n#include <set>\n#include <pthread.h>\nusing namespace std;\n#define NUM_THREADS 8\n\nvoid Crawler(string st);\nvoid* th_download(void* args);\n\n#endif\n"
},
{
"alpha_fraction": 0.671159029006958,
"alphanum_fraction": 0.6765498518943787,
"avg_line_length": 16.66666603088379,
"blob_id": "590a8d16db500e33cd5fb1be4aec11149dbda6fe",
"content_id": "4c101dfd5b1c9add6970c9da7130e5304ad6c9b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 742,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 42,
"path": "/search/cut.h",
"repo_name": "hyp1231/Info_search",
"src_encoding": "UTF-8",
"text": "#ifndef CUT_H\n#define CUT_H\n\n#include <iostream>\n#include <cstdlib>\n#include <vector>\n#include <string>\n#include <fstream>\n#include <map>\n#include <algorithm>\n#include <cstdio>\nusing namespace std;\n\nstruct State{\n\tint index, cnt;\n\tState(int index = 0, int cnt = 0):index(index), cnt(cnt){}\n};\n\nclass Cut {\n\nprivate:\n\tint F, W;\n\tmap<string, int> f_id, w_id;\n\tvector<string> f_list, dic;\n\tvector<vector<State> > w_list;\n\n\tvoid cut_word(string filename, vector<string>& word_list);\n\t\t//cut_word: cut a file into words and save them in the word_list\n\tvoid calcu_word(vector<string>& word_list);\n\tvoid file_to_string(string filename, string& text);\n\n\npublic:\n\tCut(): F(0), W(0){}\n\tvoid go_through_file();\n\tvoid save();\n\n};\n\nvoid m_init();\n\n#endif\n"
},
{
"alpha_fraction": 0.5944584608078003,
"alphanum_fraction": 0.6196473836898804,
"avg_line_length": 23.8125,
"blob_id": "4d2a929e216f3fdfad8167d38a0a90c2af20ea3a",
"content_id": "4f06faaded8a3d9269fe2c1f1ad53730ccc45893",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 397,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 16,
"path": "/m_crawler/main.cpp",
"repo_name": "hyp1231/Info_search",
"src_encoding": "UTF-8",
"text": "#include \"domain.h\"\n#include \"crawler.h\"\n\nint main(void) {\n\n\tcout << \"Hello, Mr.Hou!\" << endl; usleep(2*1e5);\n\tcout << \"I'm going to help you to download all the pages from the website that you have chosen.\" << endl; usleep(2*1e5);\n\tcout << \"Good luck! :D\" << endl << endl; usleep(5*1e5);\n\n\tcout << \"Let's start!.\" << endl;\n\n\tstring init = \"http://info.ruc.edu.cn/\";\n\tCrawler(init);\n\n\treturn 0;\n}\n"
}
] | 16 |
crocodoyle/mritools | https://github.com/crocodoyle/mritools | ef3dbac169c55ff02dad31256b2768aa527e9bf2 | 1cd7f2646e9a596782ee97a51248e826fb537eea | ba989dbd686e802d77b26b56454837dfb01d8242 | refs/heads/master | 2021-01-17T05:39:56.826078 | 2018-07-25T17:28:19 | 2018-07-25T17:28:19 | 30,318,405 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.490461528301239,
"alphanum_fraction": 0.5199999809265137,
"avg_line_length": 27.034482955932617,
"blob_id": "e229a5e4a661bc5005f0fb6eaf27f4f62b38bfea",
"content_id": "1aed508320379df872bef3d4e352681204153ead",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1625,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 58,
"path": "/parse_clinical.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 8 11:33:35 2016\n\n@author: adoyle\n\"\"\"\n\nimport csv \nimport pickle\n\ndata_dir = '/data1/users/adoyle/MS-LAQ/MS-LAQ-302-STX/'\n\nmri_list = pickle.load(open(data_dir + 'mri_list.pkl', 'rb'))\ncsvwriter = csv.writer(open(data_dir + 'extraOnes.csv', 'w'))\ncsvreader = csv.reader(open(data_dir + 'MSLAQ-clinical.csv'))\n\nindex = 0\nfor row in csvreader:\n if index >= 8:\n\n saveDocument = {}\n uid = row[0][0:3] + row[0][4:]\n treatment = row[4]\n \n newT2 = row[29]\n newT1 = row[32]\n atrophy = row[36]\n \n inList = False\n for scan in mri_list:\n if scan.uid == uid:\n inList = True\n right_scan = scan\n\n if not inList: # we don't have imaging data for the results, log it\n print(uid, 'NOT FOUND')\n csvwriter.writerow([uid[0:3] + '_' + uid[4:]])\n else:\n print(uid, treatment, newT2, newT1, atrophy)\n saveDocument['treatment'] = treatment\n try:\n saveDocument['newT1'] = int(newT1)\n except ValueError:\n saveDocument['newT1'] = 0\n try:\n saveDocument['newT2'] = int(newT2)\n except ValueError:\n saveDocument['newT2'] = 0\n\n try:\n saveDocument['atrophy'] = float(atrophy)\n except:\n saveDocument['atrophy'] = 0.0\n\n print(right_scan.features_dir + 'clinical.pkl')\n pickle.dump(saveDocument, open(right_scan.features_dir + 'clinical.pkl', 'wb'))\n\n index +=1"
},
{
"alpha_fraction": 0.4903535544872284,
"alphanum_fraction": 0.507394015789032,
"avg_line_length": 33.496620178222656,
"blob_id": "56cc6b22564cd73fc19e760e1023e6e79c7e7684",
"content_id": "b7402ad76cd4fce5246f7c181d8eabaaacb40d3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20422,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 592,
"path": "/context_extraction.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "import nibabel as nib\nimport numpy as np\nimport os\n\nfrom mri import mri\n\nimport pickle, csv\n\nimport subprocess\nimport time, sys\n\nfrom random import shuffle\nimport skeletons\n\nimport bitstring\nfrom multiprocessing import Pool, Process\n\ndata_dir = '/data1/users/adoyle/MS-LAQ/MS-LAQ-302-STX/'\nicbmRoot = data_dir + 'quarantine/common/models/icbm_avg_152_'\nlesion_atlas = data_dir + 'quarantine/common/models/icbm_avg_3714_t2les.mnc.gz'\n\n\nthreads = 8\nrecompute = True\nreconstruct = False\n\ndoLBP = True\ndoContext = True\ndoRIFT = True\ndoIntensity = True\n\nreload_list = True\n\nmodalities = ['t1p', 't2w', 'pdw', 'flr']\n\nriftRadii = [3, 6]\nlbpRadii = [1]\n\nlbpBinsTheta = 6\nlbpBinsPhi = 4\n\nthinners = skeletons.thinningElements()\n\ndef write_clinical_outputs(mri_list):\n\n # csvwriter = csv.writer(open(data_dir + 'extraOnes.csv', 'w'))\n csvreader = csv.reader(open(data_dir + 'MSLAQ-clinical.csv'))\n\n index = 0\n for row in csvreader:\n if index >= 8:\n\n saveDocument = {}\n uid = row[0][0:3] + row[0][4:]\n treatment = row[4]\n\n newT2 = row[29]\n newT1 = row[32]\n atrophy = row[36]\n\n inList = False\n for scan in mri_list:\n if scan.uid == uid:\n inList = True\n right_scan = scan\n\n if not inList: # we don't have imaging data for the results, log it\n print(uid, 'NOT FOUND')\n # csvwriter.writerow([uid[0:3] + '_' + uid[4:]])\n else:\n print(uid, treatment, newT2, newT1, atrophy)\n saveDocument['treatment'] = treatment\n try:\n saveDocument['newT1'] = int(newT1)\n except ValueError:\n saveDocument['newT1'] = 0\n try:\n saveDocument['newT2'] = int(newT2)\n except ValueError:\n saveDocument['newT2'] = 0\n\n try:\n saveDocument['atrophy'] = float(atrophy)\n except:\n saveDocument['atrophy'] = 0.0\n\n print(right_scan.features_dir + 'clinical.pkl')\n pickle.dump(saveDocument, open(right_scan.features_dir + 'clinical.pkl', 'wb'))\n\n index += 1\n\n\ndef convertToNifti(mri_list):\n new_list = []\n for scan in mri_list:\n for mod in modalities:\n if not '.nii' in scan.images[mod]:\n subprocess.call(['mnc2nii', scan.images[mod], scan.images[mod][0:-7] + '.nii'])\n scan.images[mod] = scan.images[mod][0:-7] + '.nii'\n subprocess.call(['gzip', scan.images[mod]])\n scan.images[mod] += '.gz' \n \n for prior in scan.tissues:\n if not '.nii' in scan.priors[prior]:\n subprocess.call(['mnc2nii', scan.priors[prior], scan.priors[prior][0:-7]+'.nii'])\n scan.priors[prior] = scan.priors[prior][0:-7]+'.nii'\n subprocess.call(['gzip', scan.priors[prior]])\n scan.priors[prior] += '.gz'\n \n new_list.append(scan)\n \n outfile = open('/usr/local/data/adoyle/new_mri_list.pkl', 'wb')\n pickle.dump(new_list, outfile)\n outfile.close()\n \n return new_list\n\n\ndef invertLesionCoordinates(mri_list):\n new_list = []\n for scan in mri_list:\n new_lesion_list = []\n for lesion in scan.lesionList:\n new_lesion = []\n for point in lesion:\n x = point[2]\n y = point[1]\n z = point[0]\n new_lesion.append([x, y, z])\n new_lesion_list.append(new_lesion)\n scan.lesionList = new_lesion_list\n new_list.append(scan)\n \n outfile = open('/usr/local/data/adoyle/new_mri_list.pkl', 'wb')\n pickle.dump(new_list, outfile)\n outfile.close() \n\n return new_list\n\n\ndef getBoundingBox(mri_list):\n lesTypes = ['tiny', 'small', 'medium', 'large']\n\n boundingBoxes, xMax, yMax, zMax = {}, {}, {}, {}\n\n for lesType in lesTypes:\n xMax[lesType] = 0\n yMax[lesType] = 0\n zMax[lesType] = 0\n \n for scan in mri_list:\n for les in scan.lesionList:\n if len(les) > 2 and len(les) < 11:\n lesType = 'tiny'\n if len(les) > 10 and len(les) < 26:\n lesType = 'small'\n if len(les) > 25 and len(les) < 101:\n lesType = 'medium'\n if len(les) > 100:\n lesType = 'large'\n if len(les) < 3:\n continue\n\n lesion = np.asarray(les)\n xRange = np.amax(lesion[:,0]) - np.amin(lesion[:,0])\n yRange = np.amax(lesion[:,1]) - np.amin(lesion[:,1])\n zRange = np.amax(lesion[:,2]) - np.amin(lesion[:,2])\n\n if xRange > xMax[lesType]:\n xMax[lesType] = xRange\n if yRange > yMax[lesType]:\n yMax[lesType] = yRange\n if zRange > zMax[lesType]:\n zMax[lesType] = zRange\n \n for lesType in lesTypes:\n boundingBoxes[lesType] = [xMax[lesType], yMax[lesType], zMax[lesType]]\n \n print('boundingBoxes: ', boundingBoxes)\n return boundingBoxes\n\n\ndef separate_lesions(scan):\n lesion_image = nib.load(scan.lesions).get_data()\n lesion_locations = list(np.asarray(np.nonzero(lesion_image)).T)\n connected_lesion = np.zeros((len(lesion_locations)))\n\n lesion_list = []\n for i, (x, y, z) in enumerate(lesion_locations):\n for lesion in lesion_list:\n for point in lesion:\n if np.abs(x - point[0]) <= 1 and np.abs(y - point[1]) <= 1 and np.abs(z - point[2]) <= 1:\n lesion.append([x, y, z])\n connected_lesion[i] = True\n if connected_lesion[i]:\n break\n\n if not connected_lesion[i]:\n newLesion = [[x, y, z]]\n lesion_list.append(newLesion)\n\n return lesion_list\n\n\ndef uniformLBP(image, lesion, radius):\n lbp = bitstring.BitArray('0b00000000')\n \n size = \"\"\n if (len(lesion) > 2) and (len(lesion) < 11):\n size = 'tiny'\n elif (len(lesion) > 10) and (len(lesion) < 26):\n size = 'small'\n elif (len(lesion) > 25) and (len(lesion) < 101):\n size = 'medium'\n elif (len(lesion) > 100):\n size = 'large'\n \n r = radius\n \n if size == 'tiny' or size == 'small':\n uniformPatterns = np.zeros(9, dtype='float32')\n \n for i, [x,y,z] in enumerate(lesion):\n threshold = image[x,y,z]\n \n lbp.set(image[x-r, y, z] > threshold, 0)\n lbp.set(image[x-r, y+r, z] > threshold, 1)\n lbp.set(image[x, y+r, z] > threshold, 2)\n lbp.set(image[x+r, y+r, z] > threshold, 3)\n lbp.set(image[x+r, y, z] > threshold, 4)\n lbp.set(image[x+r, y-r, z] > threshold, 5)\n lbp.set(image[x, y-r, z] > threshold, 6)\n lbp.set(image[x-r, y-r, z] > threshold, 7)\n \n transitions = 0\n for bit in range(len(lbp)-1):\n if not lbp[bit] == lbp[bit+1]:\n transitions += 1\n \n if not lbp[0] == lbp[-1]:\n transitions += 1\n \n ones = lbp.count(1)\n \n if transitions <= 2:\n uniformPatterns[ones] += 1.0 / float(len(lesion))\n else:\n uniformPatterns[8] += 1.0 / float(len(lesion))\n \n elif size == 'medium' or size == 'large':\n uniformPatterns = np.zeros(9, dtype='float32')\n # garbage, skeleton = skeletons.hitOrMissThinning(lesion, thinners)\n\n for i, [x,y,z] in enumerate(lesion):\n threshold = image[x,y,z]\n\n lbp.set(image[x-r, y, z] > threshold, 0)\n lbp.set(image[x-r, y+r, z] > threshold, 1)\n lbp.set(image[x, y+r, z] > threshold, 2)\n lbp.set(image[x+r, y+r, z] > threshold, 3)\n lbp.set(image[x+r, y, z] > threshold, 4)\n lbp.set(image[x+r, y-r, z] > threshold, 5)\n lbp.set(image[x, y-r, z] > threshold, 6)\n lbp.set(image[x-r, y-r, z] > threshold, 7)\n\n transitions = 0\n for bit in range(len(lbp)-1):\n if not lbp[bit] == lbp[bit+1]:\n transitions += 1\n\n if not lbp[0] == lbp[-1]:\n transitions += 1\n\n ones = lbp.count(1)\n\n if transitions <= 2:\n uniformPatterns[ones] += 1.0 / float(len(lesion))\n else:\n uniformPatterns[8] += 1.0 / float(len(lesion))\n \n return uniformPatterns\n\n \ndef generateRIFTRegions2D(radii):\n pointLists = []\n \n for r in range(len(radii)):\n pointLists.append([])\n \n for x in range(-np.max(radii), np.max(radii)):\n for y in range(-np.max(radii), np.max(radii)):\n distance = np.sqrt(x**2 + y**2)\n \n if distance <= radii[0]:\n pointLists[0].append([x, y])\n elif distance > radii[0] and distance <= radii[1]:\n pointLists[1].append([x, y])\n # if distance > radii[1] and distance <= radii[2]:\n # pointLists[2].append([x, y])\n\n return pointLists\n \ndef getRIFTFeatures2D(scan, riftRegions, img):\n numBinsTheta = 4\n sigma = np.sqrt(2)\n \n binsTheta = np.linspace(0, 2*np.pi, num=numBinsTheta+1, endpoint=True)\n \n grad_x, grad_y, grad_z = {}, {}, {}\n mag, theta = {}, {}\n \n for mod in modalities:\n grad_x[mod], grad_y[mod], grad_z[mod] = np.gradient(img[mod])\n \n mag[mod] = np.sqrt(np.square(grad_x[mod]) + np.square(grad_y[mod]))\n theta[mod] = np.arctan2(grad_y[mod], grad_x[mod])\n \n for l, lesion in enumerate(scan.lesionList):\n size = \"\"\n if (len(lesion) > 2) and (len(lesion) < 11):\n size = 'tiny'\n elif (len(lesion) > 10) and (len(lesion) < 26):\n size = 'small'\n elif (len(lesion) > 25) and (len(lesion) < 101):\n size = 'medium'\n elif (len(lesion) > 100):\n size = 'large'\n else:\n continue\n \n saveDocument = {}\n saveDocument['_id'] = scan.uid + '_' + str(l) \n\n for mod in modalities:\n if size == 'tiny' or size == 'small':\n feature = np.zeros((len(riftRadii), numBinsTheta))\n for pIndex, point in enumerate(lesion):\n xc, yc, zc = point\n for r, region in enumerate(riftRegions):\n gradient_direction, gradient_strength = [], []\n for p, evalPoint in enumerate(region):\n x = xc + evalPoint[0]\n y = yc + evalPoint[1]\n z = zc\n\n relTheta = np.arctan2((y - yc), (x - xc))\n outwardTheta = (theta[mod][x,y,z] - relTheta + 2*np.pi)%(2*np.pi)\n gaussianWindow = 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (np.square(y-yc) + np.square(x-xc)) / (2 * sigma**2))\n\n gradient_direction.append(outwardTheta)\n gradient_strength.append(mag[mod][x,y,z]*gaussianWindow)\n\n hist, bins = np.histogram(gradient_direction, bins=binsTheta, range=(0, np.pi), weights=gradient_strength)\n # hist = np.divide(hist, sum(hist))\n if not np.isnan(np.min(hist)):\n feature[r, :] += hist / float(len(lesion))\n else:\n print('NaNs in RIFT for', scan.uid, 'at radius', str(riftRadii[r]))\n \n elif size == 'medium' or size == 'large':\n feature = np.zeros((len(riftRadii), numBinsTheta))\n # garbage, skeleton = skeletons.hitOrMissThinning(lesion, thinners)\n\n for pIndex, point in enumerate(lesion):\n xc, yc, zc = point\n for r, region in enumerate(riftRegions):\n for p, evalPoint in enumerate(region):\n gradient_direction, gradient_strength = [], []\n\n x = xc + evalPoint[0]\n y = yc + evalPoint[1]\n z = zc\n\n if [x, y, z] in lesion:\n relTheta = np.arctan2((y - yc), (x - xc))\n outwardTheta = (theta[mod][x, y, z] - relTheta + 2 * np.pi) % (2 * np.pi)\n\n gradient_direction.append(outwardTheta)\n gradient_strength.append(mag[mod][x, y, z])\n\n gaussianWindow = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(- (np.square(y - yc) + np.square(x - xc)) / (2 * sigma ** 2))\n\n hist, bins = np.histogram(gradient_direction, bins=binsTheta, range=(0, np.pi), weights=gradient_strength)\n # hist = np.divide(hist, sum(hist))\n\n if not np.isnan(np.min(hist)):\n feature[r, :] += hist / float(len(lesion))\n else:\n print('NaNs in RIFT for', scan.uid, 'at radius', str(riftRadii[r]))\n\n saveDocument[mod] = feature\n\n pickle.dump(saveDocument, open(scan.features_dir + 'rift_' + str(l) + '.pkl', \"wb\"))\n\n\ndef loadMRIList():\n complete_data_subjects, missing_data_subjects = 0, 0\n\n mri_list = []\n for root, dirs, filenames in os.walk(data_dir):\n for f in filenames:\n if f.endswith('_m0_t1p.mnc.gz'):\n scan = mri(f)\n \n if os.path.isfile(scan.lesions):\n if os.path.isfile(scan.images['t1p']) and os.path.isfile(scan.images['t2w']) and os.path.isfile(scan.images['pdw']) and os.path.isfile(scan.images['flr']):\n print('Parsing files for', f)\n mri_list.append(scan)\n complete_data_subjects += 1\n else:\n print('Missing MRI modality: ', f)\n missing_data_subjects += 1\n else:\n print('Missing lesion labels: ', f)\n missing_data_subjects += 1\n\n print(complete_data_subjects, '/', missing_data_subjects + complete_data_subjects, 'have all modalities and lesion labels')\n\n mri_list_lesions = []\n for i, scan in enumerate(mri_list):\n scan.lesionList = separate_lesions(scan)\n mri_list_lesions.append(scan)\n print(scan.uid, i+1, '/', len(mri_list)+1)\n\n return mri_list_lesions\n\n\ndef getICBMContext(scan, images):\n # contextMin = {\"csf\": -0.001, \"wm\": -0.001, \"gm\": -0.001, \"pv\": -0.001, \"lesion\": -0.001}\n # contextMax = {'csf': 1.001, 'wm': 1.001, 'gm': 1.001, 'pv': 1.001, 'lesion': 0.348}\n #\n # numBins = 4\n\n wm_tracts = ['Anterior_Segment', 'Arcuate', 'Cingulum', 'Cortico_Ponto_Cerebellum', 'Cortico_Spinal',\n 'Inferior_Cerebellar_Pedunculus', 'Inferior_Longitudinal_Fasciculus',\n 'Inferior_Occipito_Frontal_Fasciculus', 'Long_Segment', 'Optic_Radiations', 'Posterior_Segment',\n 'Superior_Cerebelar_Pedunculus', 'Uncinate', 'Anterior_Commissure', 'Corpus_Callosum', 'Fornix', 'Internal_Capsule']\n\n for tissue in scan.tissues:\n filename = scan.priors[tissue]\n images[tissue] = nib.load(filename).get_data()\n\n for wm_tract in wm_tracts:\n images[wm_tract] = nib.load('/data1/users/adoyle/atlases/Catani/MSLAQ/' + wm_tract + '.nii').get_data()\n\n for l, lesion in enumerate(scan.lesionList):\n saveDocument = {}\n saveDocument['_id'] = scan.uid + '_' + str(l)\n \n for tissue in scan.tissues + wm_tracts:\n context = []\n\n for p in lesion:\n context.append(images[tissue][p[0], p[1], p[2]])\n \n # contextHist = np.histogram(context, numBins, (contextMin[tissue], contextMax[tissue]))\n # contextHist = contextHist[0] / np.sum(contextHist[0], dtype='float')\n #\n # if np.isnan(contextHist).any():\n # contextHist = np.zeros(numBins)\n # contextHist[0] = 1\n\n saveDocument[tissue] = [np.mean(context), np.var(context)]\n\n pickle.dump(saveDocument, open(scan.features_dir + 'context_' + str(l) + '.pkl', \"wb\"))\n\n\ndef getLBPFeatures(scan, images):\n for l, lesion in enumerate(scan.lesionList):\n saveDocument = {}\n saveDocument['_id'] = scan.uid + '_' + str(l)\n\n if len(lesion) > 100:\n size = 'large'\n elif len(lesion) > 25:\n size = 'medium'\n elif len(lesion) > 10:\n size = 'small'\n elif len(lesion) > 2:\n size = 'tiny'\n else:\n continue\n \n if size == 'large' or size == 'medium':\n feature = np.zeros((len(lbpRadii), 9))\n elif size == 'small' or size == 'tiny':\n feature = np.zeros((len(lbpRadii), 9))\n else:\n continue\n \n for j, mod in enumerate(modalities):\n saveDocument[mod] = {}\n \n for r, radius in enumerate(lbpRadii):\n feature[r, ...] = uniformLBP(images[mod], lesion, radius)\n saveDocument[mod] = feature\n\n pickle.dump(saveDocument, open(scan.features_dir + 'lbp_' + str(l) + '.pkl', \"wb\"))\n\n\ndef getIntensityFeatures(scan, images):\n # intensityMin = {\"t1p\": 32.0, \"t2w\": 10.0, \"flr\": 33.0, \"pdw\": 49.0}\n # intensityMax = {'t1p': 1025.0, 't2w': 1000.0, 'flr': 1016.0, 'pdw': 1018.0}\n\n for l, lesion in enumerate(scan.lesionList):\n saveDocument = {}\n saveDocument['_id'] = scan.uid + '_' + str(l)\n \n histBins = 4\n \n for m in modalities:\n intensities = []\n for point in lesion:\n intensities.append(images[m][point[0], point[1], point[2]])\n \n # intensityHist = np.histogram(intensities, histBins, (intensityMin[m], intensityMax[m]))\n # intensityHist = intensityHist[0] / np.sum(intensityHist[0], dtype='float')\n #\n # if np.isnan(intensityHist).any():\n # intensityHist = np.zeros((histBins))\n # intensityHist[0] = 1\n\n saveDocument[m] = [np.mean(intensities), np.var(intensities)]\n \n pickle.dump(saveDocument, open(scan.features_dir + 'intensity_' + str(l) + '.pkl', \"wb\"))\n\n\ndef getFeaturesOfList(mri_list, riftRegions):\n for i, scan in enumerate(mri_list):\n images = {}\n for j, m in enumerate(modalities):\n images[m] = nib.load(scan.images[m]).get_data()\n \n print('Patient:', scan.uid, i+1, '/', len(mri_list)+1)\n startTime = time.time()\n\n if doContext:\n getICBMContext(scan, images)\n\n if doLBP:\n getLBPFeatures(scan, images)\n \n if doRIFT:\n getRIFTFeatures2D(scan, riftRegions, images)\n \n if doIntensity:\n getIntensityFeatures(scan, images)\n \n elapsed = time.time() - startTime\n print(elapsed, \"seconds\")\n\n\ndef chunks(l, n):\n shuffle(l)\n for i in range(0, len(l), n):\n yield l[i:i+n]\n\n\ndef main():\n startTime = time.time()\n\n print('Loading MRI file list...')\n \n if reload_list:\n mri_list = loadMRIList()\n outfile = open(data_dir + 'mri_list.pkl', 'wb')\n pickle.dump(mri_list, outfile)\n outfile.close()\n print('Cached MRI file listing')\n else:\n infile = open(data_dir + 'mri_list.pkl', 'rb')\n mri_list = pickle.load(infile)\n infile.close()\n \n print('MRI list loaded')\n\n riftRegions = generateRIFTRegions2D(riftRadii)\n print('extracting imaging ')\n getFeaturesOfList(mri_list, riftRegions)\n\n print('writing clinical outputs...')\n write_clinical_outputs(mri_list)\n print('Done')\n \n endTime = time.time()\n \n elapsed = endTime - startTime\n print(\"Total time elapsed:\", elapsed/3600, 'hours', elapsed/60, 'minutes')\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.589715301990509,
"alphanum_fraction": 0.6003875136375427,
"avg_line_length": 47.06017303466797,
"blob_id": "9eb163eec78c85ac789c6b6b8c098470f30ac111",
"content_id": "955ba209a5b7c265feaee1928b3e63507f03f55c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 33545,
"license_type": "no_license",
"max_line_length": 293,
"num_lines": 698,
"path": "/predict_responders.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "import os, pickle, time, csv\nfrom collections import defaultdict\n\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nfrom matplotlib import cm\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nimport argparse\n\nfrom sklearn.mixture import GMM\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import roc_curve, roc_auc_score, brier_score_loss, confusion_matrix\nfrom sklearn.calibration import calibration_curve\n\nimport umap\nfrom sklearn.manifold import TSNE\nfrom sklearn.neighbors import KNeighborsClassifier, DistanceMetric\n\nimport load_data\nimport bol_classifiers\nfrom analyze_lesions import learn_bol, project_to_bol, separatePatientsByTreatment, choose_clusters\nfrom feature_extraction import write_features\n\nfrom mri import mri\n\ntreatments = ['Placebo', 'Laquinimod', 'Avonex']\ntreatment_labels = ['Placebo', 'Drug A', 'Drug B']\n\nclassifier_names = ['Random Forest', 'SVM (Linear)', 'SVM (RBF)', '1-NN ($\\\\chi^2$)', '1-NN (Mahalanobis)']\n\nmodalities = ['t1p', 't2w', 'pdw', 'flr']\ntissues = ['csf', 'wm', 'gm', 'pv', 'lesion']\n\nfeats = [\"Context\", \"RIFT\", \"LBP\", \"Intensity\"]\n\nscoringMetrics = ['TP', 'FP', 'TN', 'FN']\n\nmetrics = ['newT2']\n\ndatadir = '/data1/users/adoyle/MS-LAQ/MS-LAQ-302-STX/'\nresponder_filename = 'Bravo_responders.csv'\n\nmri_list_location = datadir + 'mri_list.pkl'\n\ndef responder_roc(all_test_patients, activity_truth, activity_posterior, untreated_posterior, n_folds, results_dir):\n\n # print('Untreated posteriors:', untreated_posterior['Placebo'])\n # print('Activity posteriors:', activity_posterior)\n # print('Activity truth:', activity_truth)\n\n # print('Untreated posteriors shape:', untreated_posterior['Placebo'].shape)\n # print('Activity posteriors:', activity_posterior['Avonex'].shape)\n # print('Activity truth:', activity_truth['Avonex'].shape)\n\n with open(results_dir + 'responders.csv', 'w') as csvfile:\n responder_writer = csv.writer(csvfile)\n responder_writer.writerow(\n ['Subject ID', 'Treatment', '# T2 Lesions', 'P(A=1|BoL, untr)', 'P(A=1|BoL, tr)', 'Responder'])\n mri_list = defaultdict(list)\n for treatment in treatments:\n for sublists in all_test_patients[treatment]:\n for mri in sublists:\n mri_list[treatment].append(mri)\n\n fig1 = plt.figure(0, dpi=500) # roc\n fig2 = plt.figure(1, dpi=500) # predictions distribution\n\n ax = fig1.add_subplot(1, 1, 1)\n ax1 = fig2.add_subplot(1, 1, 1)\n\n for treatment, treat in zip(treatments, treatment_labels):\n p_a_auc, p_d_distance, p_d_harmonic_mean, p_d_anti_harmonic_mean, p_a_brier = [], [], [], [], []\n\n if 'Placebo' not in treatment:\n a_prob = np.concatenate(tuple(untreated_posterior[treatment]), axis=0) # must use predictions for untreated\n a_prob = a_prob[:, 1] # to infer what would have happened if untreated\n\n # print('Untreated predictions (' + treatment + '):', a_prob)\n\n d_true = np.concatenate(tuple(activity_truth[treatment]), axis=0)\n d_prob = np.concatenate(tuple(activity_posterior[treatment]), axis=0)\n\n d_prob = d_prob[:, 1] # just consider the P(A=1|BoL, treatment)\n\n a_range = np.linspace(0, 1, n_folds, endpoint=False)\n d_range = np.linspace(0, 1, n_folds, endpoint=False)\n\n for n_a, p_a in enumerate(a_range):\n try:\n a_true_inferred = np.zeros(a_prob.shape)\n a_true_inferred[a_prob > p_a] = 1\n\n # print('A untreated predictions:', a_true_inferred)\n fraction_of_positives, mean_predicted_value = calibration_curve(a_true_inferred, a_prob, n_bins=10)\n\n score = brier_score_loss(a_true_inferred, a_prob)\n p_a_brier.append(score)\n\n # if n_a%5 == 0:\n # ax1.plot(mean_predicted_value, fraction_of_positives, \"s-\", label=\"%s (%1.3f)\" % (str(p_a), score))\n\n except:\n p_a_brier.append(1)\n\n try:\n # tn, tp, _ = roc_curve(a_true_inferred, a_prob)\n auc_weighted = roc_auc_score(a_true_inferred, a_prob, 'weighted')\n auc_macro = roc_auc_score(a_true_inferred, a_prob, 'macro')\n auc_micro = roc_auc_score(a_true_inferred, a_prob, 'micro')\n auc_samples = roc_auc_score(a_true_inferred, a_prob, 'samples')\n\n # print('AUCs (weighted, macro, micro, samples):', auc_weighted, auc_macro, auc_micro, auc_samples)\n\n p_a_auc.append(auc_macro)\n except:\n print('AUC undefined for:', p_a)\n p_a_auc.append(0)\n\n\n ax1.hist(a_prob, range=(0, 1), bins=20, label='P(A=1|BoL, untr) for ' + treat + ' subjs', histtype=\"step\", lw=2)\n ax1.hist(d_prob, range=(0, 1), bins=20, label='P(A=1|BoL, ' + treat + ')', histtype='step', lw=2)\n\n best_p_a = a_range[np.argmin(p_a_brier)]\n a_true = np.ones(a_prob.shape)\n a_true[a_prob <= best_p_a] = 0\n\n print('P(A|BoL, untr) Brier scores: ', p_a_brier)\n print('Best theshold:', best_p_a)\n\n for p_d in d_range:\n try:\n d_predicted = np.zeros(d_prob.shape)\n d_predicted[d_prob <= p_d] = 0\n d_predicted[d_prob > p_d] = 1\n\n tn, fp, fn, tp = confusion_matrix(d_true, d_predicted).ravel()\n\n epsilon = 1e-6\n sens = tp/(tp + fn + epsilon)\n spec = tn/(tn + fp + epsilon)\n\n distance = np.sqrt( (1 - sens)**2 + (1 - spec)**2 )\n harmonic_mean = 2*sens*spec / (sens + spec)\n anti_harmonic_mean = sens * spec / (2 - sens*spec)\n\n p_d_distance.append(distance)\n p_d_harmonic_mean.append(harmonic_mean)\n p_d_anti_harmonic_mean.append(anti_harmonic_mean)\n except:\n print('sens/spec or something else undefined for', p_d)\n p_d_distance.append(1)\n p_d_harmonic_mean.append(0)\n p_d_anti_harmonic_mean.append(0)\n\n print('P(A|BoL, ' + treatment + ') sensitivity/specificity harmonic means: ', p_d_harmonic_mean)\n\n # select operating point with best AUC\n\n best_p_d = d_range[np.argmax(p_d_harmonic_mean)]\n\n # best is min distance, max (anti) harmonic mean of sens/spec\n print('Best P(A|BoL, ' + treatment + ') using distance:', d_range[np.argmin(p_d_distance)])\n print('Best P(A|BoL, ' + treatment + ') using harmonic mean:', d_range[np.argmax(p_d_harmonic_mean)])\n print('Best P(A|BoL, ' + treatment + ') using anti-harmonic mean:', d_range[np.argmax(p_d_anti_harmonic_mean)])\n\n print('Best threshold for untreated activity prediction: ', best_p_a)\n print('Best threshold for treated activity prediction: ', best_p_d)\n\n d_predicted = np.zeros(d_prob.shape)\n d_predicted[d_prob <= best_p_d] = 0\n d_predicted[d_prob > best_p_d] = 1\n\n r_true = np.zeros(a_true.shape) # Assumption that our Placebo future lesion activity classifier is perfect\n r_true[d_true == 0] = 1 # and that responders have no future lesion activity on drug\n r_true[a_true == 0] = 0\n\n r_predicted = np.zeros(d_predicted.shape) # Responders are predicted when active on Placebo\n r_predicted[a_prob > best_p_a] = 1 # and inactive on the drug\n r_predicted[d_predicted < best_p_d] = 0\n\n roc_auc = roc_auc_score(r_true, r_predicted, 'weighted')\n fpr, tpr, _ = roc_curve(r_true, r_predicted)\n\n plt.figure(0)\n lw = 2\n if 'Laquinimod' in treatment:\n ax.plot(fpr, tpr, color='darkorange', lw=lw, label=treat + ' ROC (AUC = %0.2f)' % roc_auc)\n ax.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n else:\n ax.plot(fpr, tpr, color='darkred', lw=lw, label=treat + ' ROC (AUC = %0.2f)' % roc_auc)\n\n # plt.title('Receiver operating characteristic example', fontsize=24)\n\n print(treatment + ' optimal thresholds (activity, drug_activity): ', best_p_a, best_p_d)\n\n # untreated_threshold, treated_threshold = 0.8, 0.2\n\n untreated_thresholds = np.linspace(0, 1)\n treated_thresholds = np.linspace(0, 1)\n\n responder_results = np.zeros((untreated_thresholds.shape[0], treated_thresholds.shape[0], 4))\n\n for i, untreated_threshold in enumerate(untreated_thresholds):\n for j, treated_threshold in enumerate(treated_thresholds):\n responder_list, actual_outcome_list = [], []\n\n for p_activity_untreated, p_activity_treated, activity in zip(a_prob, d_prob, d_true):\n # print('P(A=1|BoL, untr), P(A=1|BoL, tr), A', p_activity_untreated, p_activity_treated, activity)\n if p_activity_untreated > untreated_threshold and p_activity_treated <= treated_threshold:\n responder_list.append(1)\n actual_outcome_list.append(activity)\n # elif p_activity_untreated < untreated_threshold:\n # responder_list.append(0)\n # actual_outcome_list.append(activity)\n elif p_activity_untreated > untreated_threshold and p_activity_treated >= treated_threshold:\n responder_list.append(0)\n actual_outcome_list.append(activity)\n\n if len(responder_list) > 0:\n tn, fp, fn, tp = confusion_matrix(np.asarray(responder_list), np.asarray(actual_outcome_list), labels=[0, 1]).ravel()\n\n epsilon = 1e-6\n\n sens = tp/(tp + fn + epsilon)\n spec = tn/(tn + fp + epsilon)\n\n responder_results[i, j, 0] = sens\n responder_results[i, j, 1] = spec\n responder_results[i, j, 2] = 2*sens*spec / (sens + spec + epsilon) # harmonic mean!\n responder_results[i, j, 3] = len(responder_list)\n\n # print(untreated_threshold, treated_threshold, sens, spec)\n X, Y = np.meshgrid(untreated_thresholds, treated_thresholds)\n z = responder_results[:, :, 2]\n\n fig = plt.figure(2, dpi=500)\n ax_thresholds = plt.axes(projection='3d')\n surf = ax_thresholds.plot_surface(X, Y, z, vmin=np.nanmin(z), vmax=np.nanmax(z), rstride=1, cstride=1, cmap='Spectral_r', edgecolor='none')\n ax_thresholds.set_xlabel('P(A=1|BoL, untr)\\nthreshold')\n ax_thresholds.set_ylabel('P(A=0|BoL, ' + treat + ')\\nthreshold')\n ax_thresholds.set_zlabel('Sens/Spec\\n(harmonic mean)')\n\n ax_thresholds.invert_xaxis()\n\n fig.colorbar(surf, shrink=0.4, aspect=4)\n plt.savefig(results_dir + treatment + '_thresholds.png')\n\n flat_index = np.argmax(responder_results[:, :, 2])\n unflat_indices = np.unravel_index(flat_index, (responder_results.shape[0], responder_results.shape[1]))\n\n best_untreated_threshold = untreated_thresholds[unflat_indices[0]]\n best_treated_threshold = treated_thresholds[unflat_indices[1]]\n\n for i in range(len(mri_list[treatment])):\n scan = mri_list[treatment][i]\n treatment = scan.treatment\n t2_les = str(scan.newT2)\n p_a_untr = str(a_prob[i])\n p_a_tr = str(d_prob[i])\n respond = str(r_predicted[i])\n\n responder_writer.writerow([scan.uid, treatment, t2_les, p_a_untr, p_a_tr, respond])\n\n plt.figure(1)\n ax1.set_xlabel(\"Mean predicted value\", fontsize=24)\n ax1.set_ylabel(\"Count\", fontsize=24)\n ax1.legend(loc='upper left', shadow=True, fancybox=True, fontsize=20)\n plt.savefig(results_dir + 'prediction_distribution.png')\n\n plt.figure(0)\n ax.set_xlabel('False Positive Rate', fontsize=20)\n ax.set_ylabel('True Positive Rate', fontsize=20)\n ax.set_xlim([0.0, 1.0])\n ax.set_ylim([0.0, 1.05])\n\n ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., shadow=True, fontsize=20)\n plt.savefig(results_dir + 'responder_' + 'p_a_'+ str(best_p_a) + '_p_d_' + str(best_p_d) + '_roc.png', bbox_inches='tight')\n\n return best_p_a, best_p_d\n\n\ndef cluster_stability(bol_mixtures, random_forests, lime_importances, results_dir):\n n_folds = len(bol_mixtures)\n\n n_lesion_types_all_folds = 0\n\n feature_dims = bol_mixtures[0].means_.shape[1]\n\n for k in range(n_folds):\n n_lesion_types_all_folds += len(bol_mixtures[k].weights_)\n\n all_lesion_types = np.zeros((n_lesion_types_all_folds, feature_dims + 1))\n # all_type_weights = np.zeros((n_lesion_types_all_folds))\n\n all_lesion_importances = np.zeros((n_folds, random_forests['Placebo'][0].feature_importances_.shape[0]))\n all_lime_importances = np.zeros((n_folds, random_forests['Placebo'][0].feature_importances_.shape[0]))\n\n n_components = []\n\n idx = 0\n for fold, (mixture_model, rf_placebo, rf_avonex, rf_laquinimod) in enumerate(zip(bol_mixtures, random_forests['Placebo'], random_forests['Avonex'], random_forests['Laquinimod'])):\n n_components.append(len(mixture_model.weights_))\n\n all_lesion_importances[fold, :] += rf_placebo.feature_importances_\n all_lesion_importances[fold, :] += rf_avonex.feature_importances_\n all_lesion_importances[fold, :] += rf_laquinimod.feature_importances_\n\n all_lime_importances[fold, :] += lime_importances['Placebo'][fold]\n all_lime_importances[fold, :] += lime_importances['Avonex'][fold]\n all_lime_importances[fold, :] += lime_importances['Laquinimod'][fold]\n\n for lesion_type_centre, type_weight in zip(mixture_model.means_, mixture_model.weights_):\n all_lesion_types[idx, :-1] = lesion_type_centre\n all_lesion_types[idx, -1] = type_weight\n\n idx += 1\n\n # all_type_weights *= (10/np.max(all_type_weights))\n\n n_lesion_types_first_fold = len(bol_mixtures[0].weights_)\n lesion_type_labels = np.arange(n_lesion_types_first_fold)\n\n first_fold_lesion_types = all_lesion_types[0:n_lesion_types_first_fold, :]\n\n # V = np.cov(all_lesion_types)\n # mahalanobis_distance = DistanceMetric.get_metric('mahalanobis', V=np.cov(V))\n\n # sort lesion types by greatest cluster separation and then iterate through folds?\n knn = KNeighborsClassifier(n_neighbors=1)\n knn.fit(first_fold_lesion_types, lesion_type_labels)\n\n corresponding_lesion_types = knn.predict(all_lesion_types)\n print('corresponding lesion types:', corresponding_lesion_types.shape)\n\n embedded_umap = umap.UMAP(metric='euclidean', random_state=42).fit_transform(all_lesion_types)\n embedded_tsne = TSNE(random_state=42, metric='euclidean').fit_transform(all_lesion_types)\n\n print('t-sne embedded shape:', embedded_tsne.shape)\n print('umap embedded shape:', embedded_umap.shape)\n\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6), dpi=500)\n\n cmap = mpl.cm.get_cmap('rainbow')\n markers = ['o', 'v', '^', '<', '>', '8', 's', 'p', 'P', '*', 'x', 'D']\n type_markers = []\n\n for n in range(n_lesion_types_first_fold):\n type_markers.append(markers[np.random.randint(len(markers))])\n\n for label in lesion_type_labels:\n for predicted_label, (x_tsne, y_tsne), (x_umap, y_umap) in zip(corresponding_lesion_types, embedded_tsne, embedded_umap):\n if label == predicted_label:\n # ax1.scatter(x_tsne, y_tsne, s=4**weight, color=cmap((label+1)/len(lesion_type_labels)))\n # ax2.scatter(x_umap, y_umap, s=4**weight, color=cmap((label+1)/len(lesion_type_labels)))\n\n ax1.scatter(x_tsne, y_tsne, color=cmap((label+1)/len(lesion_type_labels)), marker=type_markers[label])\n ax2.scatter(x_umap, y_umap, color=cmap((label+1)/len(lesion_type_labels)), marker=type_markers[label])\n\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax1.set_xlabel('t-SNE', fontsize=24)\n\n ax2.set_xticks([])\n ax2.set_yticks([])\n ax2.set_xlabel('UMAP', fontsize=24)\n\n plt.tight_layout()\n plt.savefig(results_dir + 'embedding_of_lesion_types.png', dpi=600)\n\n # boxplot for lesion-type importance across folds\n\n\n corresponding_lesion_type_importance, corresponding_lime_importance = [], []\n\n for n in range(n_lesion_types_first_fold):\n corresponding_lesion_type_importance.append([])\n corresponding_lime_importance.append([])\n\n for fold, (type_importances, lime_type_importances) in enumerate(zip(all_lesion_importances, all_lime_importances)):\n #types don't correspond yet\n fold_type_labels = corresponding_lesion_types[(fold)*n_lesion_types_first_fold:(fold+1)*n_lesion_types_first_fold]\n\n for type_number in fold_type_labels:\n corresponding_lesion_type_importance[type_number].append(type_importances[type_number])\n corresponding_lime_importance[type_number].append(lime_type_importances[type_number])\n\n fig, (ax, ax2) = plt.subplots(1, 2, figsize=(24, 6), dpi=600)\n\n ax.boxplot(corresponding_lesion_type_importance)\n ax.set_ylabel('Importance', fontsize=20)\n ax.set_xlabel('Lesion-type', fontsize=20)\n ax.set_title('Gini', fontsize=20)\n\n ax2.boxplot(corresponding_lime_importance)\n ax2.set_ylabel('Importance', fontsize=20)\n ax2.set_xlabel('Lesion-type', fontsize=20)\n ax2.set_title('LIME', fontsize=20)\n\n plt.tight_layout()\n plt.savefig(results_dir + 'corresponding_lesion_importance.png', bbox_inches='tight')\n\n\n fig, axes = plt.subplots(1, 3)\n\n data = [n_components]\n print('lesion-types:', data)\n axes[0].boxplot(data)\n axes[0].set_ylabel('Number of lesion-types', fontsize=20)\n\n # importance = np.zeros((n_folds, np.max(n_components)))\n # \n # for fold, mixture_models in enumerate(bol_mixtures):\n # importance_start_idx = 0\n # \n # rfs = random_forests['Placebo']\n # lesion_importance = rfs[fold].feature_importances_\n # \n # sorted_indices = np.argsort(mixture_models.weights_)\n # \n # for c, cluster_idx in enumerate(sorted_indices):\n # lesion_type_means[fold, :] = mixture_models.means_[cluster_idx, :]\n # importance[fold, c] = lesion_importance[importance_start_idx+c]\n # \n # importance_start_idx += len(sorted_indices)\n # \n # \n # dim_mean = np.mean(lesion_type_means, axis=0)\n # dim_var = np.var(lesion_type_means, axis=0)\n # \n # print('cluster centre means:', dim_mean.shape)\n # print('cluster centre variances:', dim_var.shape)\n # \n # diffs = []\n # \n # for fold, lesion_type_centre in enumerate(lesion_type_means):\n # print('lesion type centre:', lesion_type_centre.size)\n # \n # diff = np.subtract(lesion_type_centre, dim_mean)\n # diff_normalized = np.divide(diff, dim_var)\n # \n # diffs.append(diff_normalized)\n # \n # data2 = [diffs]\n # \n # axes[1].boxplot(data2)\n # axes[1].set_xlabel('Lesion size', fontsize=20)\n # axes[1].set_ylabel('Diff. from mean', fontsize=20)\n # \n # \n # data3 = [importance[:, 0], importance[:, 1], importance[:, 2], importance[:,3]]\n # axes[2].boxplot(data3)\n # axes[2].set_xlabel('Lesion size', fontsize=20)\n # axes[2].set_ylabel('P(A|BoL) Importance', fontsize=20)\n # \n # plt.tight_layout()\n # plt.savefig(results_dir + 'cluster_numbers_lesion_centres.png', bbox_inches='tight')\n\n\ndef plot_activity_prediction_results(activity_truth, activity_posteriors, results_dir):\n classifier_names = ['Random Forest', '1-NN (Euclidean)', 'SVM (linear)', 'SVM ($\\\\chi^2$)', 'SVM (RBF)', 'MLP']\n colours = ['darkred', 'indianred', 'lightsalmon', 'darkorange', 'goldenrod', 'tan', 'k']\n\n for treatment in treatments:\n plt.figure(figsize=(8, 8))\n lw = 2\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n\n y_true = np.concatenate(tuple(activity_truth[treatment]), axis=0)\n for p, (probabilities, colour) in enumerate(zip(activity_posteriors, colours)):\n y_prob = np.concatenate(tuple(probabilities[treatment]), axis=0)\n\n roc_auc = roc_auc_score(y_true, y_prob[:, 1], 'weighted')\n fpr, tpr, _ = roc_curve(y_true, y_prob[:, 1])\n plt.plot(fpr, tpr, color=colour, lw=lw, label=classifier_names[p] + ' ROC (area = %0.2f)' % roc_auc)\n\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate', fontsize=20)\n plt.ylabel('True Positive Rate', fontsize=20)\n # plt.title('Receiver operating characteristic example', fontsize=24)\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., shadow=True, fontsize=20)\n plt.savefig(results_dir + 'activity_prediction_' + treatment + '_roc.png', bbox_inches='tight')\n\n return\n\n\ndef predict_responders(args):\n start = time.time()\n\n try:\n experiment_number = pickle.load(open(datadir + 'experiment_number.pkl', 'rb'))\n experiment_number += 1\n except:\n print('Couldnt find the file to load experiment number')\n experiment_number = 0\n\n print('This is experiment number:', experiment_number)\n\n results_dir = datadir + '/experiment-' + str(experiment_number) + '/'\n os.makedirs(results_dir)\n\n pickle.dump(experiment_number, open(datadir + 'experiment_number.pkl', 'wb'))\n\n mri_list = pickle.load(open(mri_list_location, 'rb'))\n\n if args.choose_k:\n features = load_data.loadAllData(mri_list, args.include_catani)\n n_lesion_types = choose_clusters(features, results_dir)\n else:\n n_lesion_types = args.k\n\n if args.predict_activity:\n mri_list, without_clinical = load_data.loadClinical(mri_list)\n\n print('We have ' + str(len(mri_list)) + ' patients who finished the study and ' + str(len(without_clinical)) + ' who did not')\n outcomes = load_data.get_outcomes(mri_list)\n\n mri_list = load_data.load_responders(datadir + responder_filename, mri_list)\n\n patient_results = {}\n for scan in mri_list:\n patient_results[scan.uid] = {}\n\n kf = StratifiedKFold(args.n_folds, shuffle=True, random_state=50)\n\n bol_mixture_models = []\n random_forests = defaultdict(list)\n deep_models = defaultdict(list)\n lime_importances = defaultdict(list)\n\n all_test_patients, activity_posterior, activity_truth, untreated_posterior = defaultdict(list), defaultdict(list), defaultdict(list), defaultdict(list)\n euclidean_knn_posterior, mahalanobis_knn_posterior, chi2_svm_posterior, rbf_svm_posterior, linear_svm_posterior, naive_bayes_posterior, deep_posterior = defaultdict(list), defaultdict(list), defaultdict(list), defaultdict(list), defaultdict(list), defaultdict(list), defaultdict(list)\n\n # initialization of result structures complete\n # start learning BoL, predicting activity\n for foldNum, (train_index, test_index) in enumerate(kf.split(range(len(mri_list)), outcomes)):\n print(foldNum+1, '/', kf.get_n_splits())\n\n mri_train, mri_test = np.asarray(mri_list)[train_index], np.asarray(mri_list)[test_index]\n\n # incorporate patients with no clinical data\n train_patients = []\n for scan in mri_train:\n train_patients.append(scan)\n for scan in without_clinical:\n train_patients.append(scan)\n\n # print('loading feature data...')\n # startLoad = time.time()\n raw_train_data = load_data.loadAllData(train_patients, args.include_catani)\n raw_test_data = load_data.loadAllData(mri_test, args.include_catani)\n\n print('learning bag of lesions...')\n\n startBol = time.time()\n bol_train_data, mixture_model = learn_bol(train_patients, raw_train_data, n_lesion_types, len(mri_train), results_dir, foldNum)\n\n bol_mixture_models.append(mixture_model)\n\n elapsedBol = time.time() - startBol\n print(str(elapsedBol / 60), 'minutes to learn BoL.')\n\n print('transforming test data to bag of lesions representation...')\n bol_test_data = project_to_bol(mri_test, raw_test_data, mixture_model)\n\n print('train BoL shape:', bol_train_data.shape)\n print('test BoL shape:', bol_test_data.shape)\n\n trainingPatientsByTreatment, testingPatientsByTreatment, trainingData, testingData = separatePatientsByTreatment(mri_train, mri_test, bol_train_data, bol_test_data)\n\n for treatment in treatments:\n train_data, test_data = trainingData[treatment], testingData[treatment]\n train_outcomes, test_outcomes = load_data.get_outcomes(trainingPatientsByTreatment[treatment]), load_data.get_outcomes(\n testingPatientsByTreatment[treatment])\n\n all_test_patients[treatment].append(testingPatientsByTreatment[treatment])\n\n if treatment == \"Placebo\":\n # if args.feature_selection:\n # train_data, test_data, bad_types = bol_classifiers.lesion_type_selection(train_data, test_data, train_outcomes, test_outcomes, 8, results_dir)\n\n (bestFeaturePredictions, placebo_rf, probPredicted) = bol_classifiers.random_forest(train_data, test_data, train_outcomes)\n deep_probs, mlp_model, lime_importance = bol_classifiers.mlp(train_data, test_data, train_outcomes, test_outcomes, foldNum, results_dir)\n\n random_forests[treatment].append(placebo_rf)\n deep_models[treatment].append(mlp_model)\n activity_truth[treatment].append(test_outcomes)\n activity_posterior[treatment].append(probPredicted)\n lime_importances[treatment].append(lime_importance)\n\n svm_linear_posterior, svm_rbf_posterior, chi2svm_posterior = bol_classifiers.svms(train_data, test_data, train_outcomes)\n knn_euclid_posterior, knn_maha_posterior = bol_classifiers.knn(train_data, train_outcomes, test_data)\n\n chi2_svm_posterior[treatment].append(chi2svm_posterior)\n rbf_svm_posterior[treatment].append(svm_rbf_posterior)\n linear_svm_posterior[treatment].append(svm_linear_posterior)\n\n euclidean_knn_posterior[treatment].append(knn_euclid_posterior)\n mahalanobis_knn_posterior[treatment].append(knn_maha_posterior)\n\n deep_posterior[treatment].append(deep_probs)\n\n naive_bayes_posterior[treatment].append([]) # FIX IT\n\n # drugged patients\n else:\n # if args.feature_selection:\n # train_data, test_data = bol_classifiers.apply_lesion_type_selection(train_data, test_data, bad_types)\n # project onto untreated MS model (don't train)\n (bestPreTrainedFeaturePredictions, meh, pretrainedProbPredicted) = bol_classifiers.random_forest(\n train_data, test_data, train_outcomes, placebo_rf)\n\n # new model on drugged patients\n (bestFeaturePredictions, drug_rf, probDrugPredicted) = bol_classifiers.random_forest(train_data, test_data, train_outcomes)\n\n deep_probs, mlp_model, lime_importance = bol_classifiers.mlp(train_data, test_data, train_outcomes, test_outcomes, foldNum, results_dir)\n\n random_forests[treatment].append(drug_rf)\n deep_models[treatment].append(mlp_model)\n lime_importances[treatment].append(lime_importance)\n\n svm_linear_posterior, svm_rbf_posterior, chi2svm_posterior = bol_classifiers.svms(train_data, test_data, train_outcomes)\n knn_euclid_posterior, knn_maha_posterior = bol_classifiers.knn(train_data, train_outcomes, test_data)\n\n deep_posterior[treatment].append(np.asarray(deep_probs))\n activity_truth[treatment].append(test_outcomes)\n activity_posterior[treatment].append(np.asarray(probDrugPredicted))\n untreated_posterior[treatment].append(np.asarray(pretrainedProbPredicted))\n\n chi2_svm_posterior[treatment].append(chi2svm_posterior)\n rbf_svm_posterior[treatment].append(svm_rbf_posterior)\n linear_svm_posterior[treatment].append(svm_linear_posterior)\n euclidean_knn_posterior[treatment].append(knn_euclid_posterior)\n mahalanobis_knn_posterior[treatment].append(knn_maha_posterior)\n\n activity_posteriors = [activity_posterior, euclidean_knn_posterior, linear_svm_posterior, chi2_svm_posterior, rbf_svm_posterior, deep_posterior]\n\n print('saving prediction results (all folds test cases)...')\n pickle.dump(activity_posteriors, open(datadir + 'posteriors.pkl', 'wb'))\n pickle.dump(all_test_patients, open(datadir + 'all_test_patients.pkl', 'wb'))\n pickle.dump(untreated_posterior, open(datadir + 'untreated_posterior.pkl', 'wb'))\n pickle.dump(activity_truth, open(datadir + 'activity_truth.pkl', 'wb'))\n pickle.dump(bol_mixture_models, open(datadir + 'mixture_models.pkl', 'wb'))\n pickle.dump(random_forests, open(datadir + 'random_forests.pkl', 'wb'))\n # pickle.dump(deep_models, open(datadir + 'deep_models.pkl', 'wb'))\n print('saved!')\n else:\n activity_posteriors = pickle.load(open(datadir + 'posteriors.pkl', 'rb'))\n all_test_patients = pickle.load(open(datadir + 'all_test_patients.pkl', 'rb'))\n untreated_posterior = pickle.load(open(datadir + 'untreated_posterior.pkl', 'rb'))\n activity_truth = pickle.load(open(datadir + 'activity_truth.pkl', 'rb'))\n bol_mixture_models = pickle.load(open(datadir + 'mixture_models.pkl', 'rb'))\n random_forests = pickle.load(open(datadir + 'random_forests.pkl', 'rb'))\n # deep_models = pickle.load(open(datadir + 'deep_models.pkl', 'rb'))\n\n best_p_a, best_p_d = responder_roc(all_test_patients, activity_truth, activity_posteriors[5], untreated_posterior, args.n_folds, results_dir)\n\n plot_activity_prediction_results(activity_truth, activity_posteriors, results_dir)\n\n end = time.time()\n elapsed = end - start\n\n cluster_stability(bol_mixture_models, random_forests, lime_importances, results_dir)\n print(str(elapsed / 60), 'minutes elapsed.')\n\n return experiment_number\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='MS Drug Responder Prediction.')\n parser.add_argument('--choose-k', type=bool, default=False, metavar='N',\n help='choose the number of lesion-types (default: False)')\n parser.add_argument('--k', type=int, default=60, metavar='N',\n help='if choose-k is \\'False\\', number of lesion-types (default: 60)')\n parser.add_argument('--predict-activity', type=bool, default=False, metavar='N',\n help='predict activity. if false, loads pre-computed results from previous run (default: True')\n parser.add_argument('--n-folds', type=int, default=50, metavar='N',\n help='number of folds for cross-validation (default: 50)')\n parser.add_argument('--get-features', type=bool, default=False, metavar='N',\n help='extract features from the imaging data (default: False)')\n parser.add_argument('--feature-selection', type=bool, default=False, metavar='N',\n help='remove lesion types that have no information (default: False)')\n parser.add_argument('--include-catani', type=bool, default=False, metavar='N',\n help='include the Catani context priors in the features for determining lesion-types (default: False)')\n\n args = parser.parse_args()\n print('Arguments:', args)\n\n if args.get_features:\n print('Extracting features from imaging data and writing to disk')\n write_features(include_catani=False)\n\n experiment_number = predict_responders(args)\n print('This experiment was brought to you by the number:', experiment_number)"
},
{
"alpha_fraction": 0.4769322872161865,
"alphanum_fraction": 0.6311164498329163,
"avg_line_length": 34.49645233154297,
"blob_id": "7f7e477d2d9b900de1f95a59d7c87672cc6a6472",
"content_id": "d27d71b0d523dbb194e67dbf94ae6498595612b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5007,
"license_type": "no_license",
"max_line_length": 209,
"num_lines": 141,
"path": "/plot_script.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 20 19:25:29 2017\n\n@author: adoyle\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\n\n\"\"\"\nResponders DRUG A\n13.0 20.0 201.0 46.0\nsensitivity: 0.220338983051\nspecificity: 0.909502262443\nResponders (certain GT)\n1.0 32.0 223.0 24.0\nsensitivity: 0.04\nspecificity: 0.874509803922\nResponders (certain prediction)\n23.0 46.0 175.0 36.0\nsensitivity: 0.389830508475\nspecificity: 0.79185520362\nResponders (all certain)\n23.0 46.0 209.0 2.0\nsensitivity: 0.92\nspecificity: 0.819607843137\n\"\"\"\n\n\"\"\"\nResponders DRUG B\n10.0 13.0 206.0 30.0\nsensitivity: 0.25\nspecificity: 0.940639269406\nResponders (certain GT)\n4.0 19.0 222.0 14.0\nsensitivity: 0.222222222222\nspecificity: 0.921161825726\nResponders (certain prediction)\n17.0 38.0 181.0 23.0\nsensitivity: 0.425\nspecificity: 0.826484018265\nResponders (all certain)\n17.0 38.0 203.0 1.0\nsensitivity: 0.944444444444\nspecificity: 0.842323651452\n\"\"\"\n\n#sensA = [0.220338983051, 0.04, 0.389830508475, 0.92]\n#specA = [0.909502262443, 0.874509803922, 0.79185520362, 0.819607843137]\n#\n#sensB = [0.25, 0.222222222222, 0.425, 0.944444444444]\n#specB = [0.940639269406, 0.921161825726, 0.826484018265, 0.842323651452]\n#labels = [\"Drug A ($\\\\alpha$=$\\\\beta$=0.5)\", \"Drug A ($\\\\alpha$=$\\\\beta$=0.8)\", \"Drug B ($\\\\alpha$=$\\\\beta$=0.5)\", \"Drug B ($\\\\alpha$=$\\\\beta$=0.8)\"]\n#\n#fig = plt.figure(figsize=(4,4))\n#\n#p1 = plt.scatter(specA[0],sensA[0], marker='x', color='b', s=(100,))\n##p2 = plt.scatter(sensA[1], specA[1], marker='x', color='c', s=(60,))\n##p3 = plt.scatter(sensA[2], specA[2], marker='x', color='g', s=(60,))\n#p4 = plt.scatter(specA[3], sensA[3], marker='x', color='c', s=(100,))\n#\n#p2 = plt.scatter(specB[0],sensB[0], marker='x', color='orange', s=(100,))\n#p3 = plt.scatter(specB[3], sensB[3], marker='x', color='r', s=(100,))\n#\n#plt.ylabel(\"Sensitivity\")\n#plt.xlabel(\"Specificity\")\n#plt.title(\"Responder Prediction\")\n#plt.xlim([0,1])\n#plt.ylim([0,1])\n#plt.legend((p1, p4, p2, p3), tuple(labels), loc=3, scatterpoints=1, fancybox=True, shadow=True)\n#plt.show()\n\n\nfig = plt.figure(figsize=(3,3))\n\nlabels = [\"NN-Euclidean\", \"NN-Mahalanobis\", \"NN-$\\chi^2$\", \"SVM-Linear\",\"SVM-RBF\",\"SVM-$\\chi^2$\", \"Random Forest\", \"Naive Bayes (lesion counts)\"]\n\nsens = [0.7, .66, .97, .26, .52, .45, .7, .25, .944]\nspec = [.52, .4, .07, .82, .6, .62, .58, .72, .142]\n\np1 = plt.scatter(spec[0], sens[0], marker='x', color='b', s=(200,))\np2 = plt.scatter(spec[1], sens[1], marker='x', color='c', s=(200,))\np3 = plt.scatter(spec[2], sens[2], marker='x', color='deepskyblue', s=(200,))\np4 = plt.scatter(spec[3], sens[3], marker='+', color='gold', s=(200,))\np5 = plt.scatter(spec[4], sens[4], marker='+', color='y', s=(200,))\np6 = plt.scatter(spec[5], sens[5], marker='+', color='goldenrod', s=(200,))\np7 = plt.scatter(spec[6], sens[6], marker='*', color='r', s=(400,))\n#p9 = plt.scatter(spec[8], sens[8], marker='*', color='deeppink', s=(400,))\np8 = plt.scatter(spec[7], sens[7], marker='d', color='g', s=(200,))\n\nplt.ylabel(\"Sensitivity\")\nplt.xlabel(\"Specificity\")\nplt.title(\"Activity Prediction\")\nplt.xlim([0,1.05])\nplt.ylim([0,1.05])\nplt.legend((p1, p2, p3, p4, p5, p6, p7, p8), tuple(labels), loc='center left', bbox_to_anchor=(1, 0.5), scatterpoints=1, fancybox=True, shadow=True)\nplt.show()\n\n\n#p1 = plt.scatter(sensB[0], specB[0], marker='x', color='b')\n#p2 = plt.scatter(sensB[1], specB[1], marker='x', color='c')\n#p3 = plt.scatter(sensB[2], specB[2], marker='x', color='g')\n#p4 = plt.scatter(sensB[3], specB[3], marker='x', color='r')\n#\n#plt.xlabel(\"Sensitivity\")\n#plt.ylabel(\"Specificity\")\n#plt.title(\"Drug B Responder Prediction\")\n#plt.xlim([0,1])\n#plt.ylim([0,1])\n#plt.legend((p1, p2, p3, p4), tuple(labels), loc='center left', bbox_to_anchor=(1, 0.5), scatterpoints=1, fancybox=True, shadow=True)\n#plt.show()\n#\n#\n#\n\n\nfig = plt.figure(figsize=(3,3))\n\nlabels = [\"Drug A ($\\\\beta=0.5$)\", \"Drug A ($\\\\beta=0.8$)\", \"Drug B ($\\\\beta=0.5$)\", \"Drug B ($\\\\beta=0.8$)\", \"Untreated $\\\\rightarrow$ Drug A$(\\\\alpha=0.5)$\", \"Untreated $\\\\rightarrow$ Drug B$(\\\\alpha=0.5)$\"]\n\nsens = [0.7, .944, 0.532, 0.884, 0.648, 1.0, 0.675, 0.630]\nspec = [.58, .142, 0.674, 0.5, 0.593, 0.5, 0.515, 0.505]\n\n#p1 = plt.scatter(spec[0], sens[0], marker='*', color='r', s=(200,))\n#p2 = plt.scatter(spec[1], sens[1], marker='*', color='deeppink', s=(200,))\np3 = plt.scatter(spec[2], sens[2], marker='+', color='g', s=(200,))\np4 = plt.scatter(spec[3], sens[3], marker='+', color='lime', s=(200,))\np5 = plt.scatter(spec[4], sens[4], marker='x', color='b', s=(200,))\np6 = plt.scatter(spec[5], sens[5], marker='x', color='c', s=(200,))\np7 = plt.scatter(spec[6], sens[6], marker='>', color='yellowgreen', s=(200,))\np9 = plt.scatter(spec[7], sens[7], marker='>', color='lightblue', s=(200,))\n\n\nplt.ylabel(\"Sensitivity\")\nplt.xlabel(\"Specificity\")\nplt.title(\"Activity Prediction (Treatments)\")\nplt.xlim([0,1.05])\nplt.ylim([0,1.05])\nplt.legend((p3, p4, p5, p6, p7, p9), tuple(labels), loc='center left', bbox_to_anchor=(1, 0.5), scatterpoints=1, fancybox=True, shadow=True)\nplt.show()\n\n\n"
},
{
"alpha_fraction": 0.6412140727043152,
"alphanum_fraction": 0.6626198291778564,
"avg_line_length": 28.537734985351562,
"blob_id": "8857d8ff70f207a20b01d439cde7c573986f6fcf",
"content_id": "9f928dff7e77a8c22f611c4d7c8cccba794f994f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3130,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 106,
"path": "/classify_lesions2.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 05 19:59:43 2015\n\n@author: Andrew\n\"\"\"\nimport nibabel as nib\nimport numpy as np\nimport os\nimport h5py\n\nfrom mri import mri\nfrom sklearn.naive_bayes import GaussianNB\n\ndata_dir = 'C:/MRI/MS-LAQ/'\n\nmalf_classes = ['bg', 'bs', 'cgm', 'crblr_gm', 'crblr_wm', 'csf', 'dgm', 'lv', 'ov', 'wm']\nmodalities = ['t1p', 't2w', 'pdw', 'flr']\ngood_malf_classes = ['cgm', 'dgm', 'wm']\n\n\nmri_list = []\n\nfor root, dirs, filenames in os.walk(data_dir): \n for f in filenames:\n if f.endswith('_m0_t1p.mnc.gz'):\n mri_list.append(mri(f))\n\nf = h5py.File(data_dir + 'features2.hdf5', 'r')\n\n#features types: malf-context*scales + image-intensities\n#features types: 10*5 + 4\n\nimage_pixels = 60*256*256\n\npriors = 10\nfeature_scales = 2\nnum_mods = 4\n\nnum_train = 5\nnum_test = 5\n\ntraining_vector = np.zeros((num_train*image_pixels, priors*feature_scales + num_mods))\nprint \"training vector size:\", np.shape(training_vector)\n\nfor i, img in enumerate(mri_list[0:num_train]):\n print i\n features = f[img.uid]\n \n training_vector[(i)*image_pixels:(i+1)*image_pixels, 0:priors*feature_scales] = np.reshape(features, (image_pixels, feature_scales*priors))\n\n for j, mod in enumerate(modalities):\n image_data = nib.load(img.images[mod]).get_data()\n training_vector[(i)*image_pixels:(i+1)*image_pixels, priors*feature_scales + j] = np.reshape(image_data, image_pixels)\n\n\ntest_vector = np.zeros(shape=(num_test*image_pixels, feature_scales*priors + num_mods))\n\nfor i, img in enumerate(mri_list[num_train:num_train+num_test]):\n features = f[img.uid]\n test_vector[(i)*image_pixels:(i+1)*image_pixels, 0:priors*feature_scales] = np.reshape(features, (image_pixels, feature_scales*priors))\n \n for j, mod in enumerate(modalities):\n image_data = nib.load(img.images[mod]).get_data()\n test_vector[(i)*image_pixels:(i+1)*image_pixels, priors*feature_scales + j] = np.reshape(image_data, image_pixels)\n\nprint \"loading lesion labels...\"\n\ntrain_labels = np.zeros(shape=(num_train*image_pixels))\nfor i, img in enumerate(mri_list[0:num_train]):\n train_labels[(i)*image_pixels:(i+1)*image_pixels] = np.reshape(nib.load(img.lesions).get_data(), image_pixels)\n\ntest_labels = np.zeros(shape=(num_test*image_pixels))\nfor i, img in enumerate(mri_list[num_train:num_train+num_test]):\n test_labels[(i)*image_pixels:(i+1)*image_pixels] = np.reshape(nib.load(img.lesions).get_data(), image_pixels)\n\nprint \"training classifier...\"\n\ngnb = GaussianNB()\ngnb.fit(training_vector, train_labels)\npredictions = gnb.predict(test_vector)\n\nprint \"done predictions\"\n\ntp = 0\nfp = 0\nfn = 0\n\ntotal_voxels = image_pixels*num_test\n\nprint np.shape(predictions), np.shape(test_labels)\n\nfor i, p in enumerate(predictions):\n #print p, test_labels[i] \n \n if p > 0.0 and test_labels[i] > 0.0:\n tp+=1\n if p > 0.0 and test_labels[i] == 0.0:\n fp+=1\n if p == 0.0 and test_labels[i] > 0.0:\n fn+=1\n \nprint \"true positives: \", tp\nprint \"false positives: \", fp\nprint \"false negatives: \", fn\nprint \"total lesions: \", np.count_nonzero(test_labels)"
},
{
"alpha_fraction": 0.5022059679031372,
"alphanum_fraction": 0.5295383334159851,
"avg_line_length": 37.56016540527344,
"blob_id": "498b933f851b1638f3706c861a7039adeeee646f",
"content_id": "9b459f7fc2f077d75f431692beddf09c2d8236ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18586,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 482,
"path": "/feature_extraction.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "import nibabel as nib\nimport numpy as np\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nfrom mri import mri\n\nimport pickle, csv, os, time, sys, subprocess, h5py\n\nfrom random import shuffle\nimport skeletons\n\nimport bitstring\nfrom multiprocessing import Pool, Process\n\ndata_dir = '/data1/users/adoyle/MS-LAQ/MS-LAQ-302-STX/'\nicbmRoot = data_dir + 'quarantine/common/models/icbm_avg_152_'\nlesion_atlas = data_dir + 'quarantine/common/models/icbm_avg_3714_t2les.mnc.gz'\n\nreload_list = False\n\nmodalities = ['t1p', 't2w', 'pdw', 'flr']\n\nthinners = skeletons.thinningElements()\n\nlbpRadii = [1]\n\ndef write_clinical_outputs(mri_list):\n csvreader = csv.reader(open(data_dir + '2018-01_BRAVO_IPMSA.csv'))\n lines = list(csvreader)\n\n for scan in mri_list:\n saveDocument = {}\n patient_id = scan.uid[4:]\n\n for row in lines:\n if patient_id in row[2]:\n treatment = row[5].split(' ')[0]\n newT2 = row[42]\n relapse = row[32]\n\n gad12 = row[43]\n gad24 = row[44]\n\n if not 'NULL' in gad12 and not 'NULL' in gad24:\n gad = int(gad12) + int(gad24)\n saveDocument['gad'] = str(gad)\n\n country = row[4]\n race = row[8]\n sex = row[9]\n age = row[11]\n\n saveDocument['newT2'] = newT2\n saveDocument['relapse'] = relapse\n saveDocument['treatment'] = treatment\n\n saveDocument['country'] = country\n saveDocument['race'] = race\n saveDocument['sex'] = sex\n saveDocument['age'] = age\n\n print(scan.uid, saveDocument)\n\n\n pickle.dump(saveDocument, open(scan.features_dir + 'clinical.pkl', 'wb'))\n\ndef separate_lesions(scan):\n lesion_image = nib.load(scan.lesions).get_data()\n lesion_locations = list(np.asarray(np.nonzero(lesion_image)).T)\n connected_lesion = np.zeros((len(lesion_locations)))\n\n lesion_list = []\n for i, (x, y, z) in enumerate(lesion_locations):\n for lesion in lesion_list:\n for point in lesion:\n if np.abs(x - point[0]) <= 1 and np.abs(y - point[1]) <= 1 and np.abs(z - point[2]) <= 1:\n lesion.append([x, y, z])\n connected_lesion[i] = True\n if connected_lesion[i]:\n break\n\n if not connected_lesion[i]:\n newLesion = [[x, y, z]]\n lesion_list.append(newLesion)\n\n return lesion_list\n\n\ndef uniformLBP(image, lesion, radius):\n r = radius\n uniformPatterns = np.zeros(9, dtype='float32')\n\n for i, [x, y, z] in enumerate(lesion):\n threshold = image[x, y, z]\n\n lbp = bitstring.BitArray('0b00000000')\n\n lbp.set(image[x, y - r, z] > threshold, 0)\n lbp.set(image[x, y - r, z + r] > threshold, 1)\n lbp.set(image[x, y, z + r] > threshold, 2)\n lbp.set(image[x, y + r, z + r] > threshold, 3)\n lbp.set(image[x, y + r, z] > threshold, 4)\n lbp.set(image[x, y + r, z - r] > threshold, 5)\n lbp.set(image[x, y, z - r] > threshold, 6)\n lbp.set(image[x, y - r, z - r] > threshold, 7)\n\n transitions = 0\n for bit in range(len(lbp) - 1):\n if not lbp[bit] == lbp[bit + 1]:\n transitions += 1\n\n if not lbp[0] == lbp[-1]:\n transitions += 1\n\n ones = lbp.count(1)\n\n if transitions <= 2:\n uniformPatterns[ones] += 1.0 / float(len(lesion))\n else:\n uniformPatterns[8] += 1.0 / float(len(lesion))\n\n return uniformPatterns\n\n\ndef get_rift(scan, img):\n numBinsTheta = 4\n visualize_slice = False\n visualize_lesion = False\n\n binsTheta = np.linspace(0, 2 * np.pi, num=numBinsTheta + 1, endpoint=True)\n\n grad_x, grad_y, grad_z = {}, {}, {}\n mag, theta = {}, {}\n\n for mod in modalities:\n grad_x[mod], grad_y[mod], grad_z[mod] = np.gradient(img[mod])\n\n mag[mod] = np.sqrt(np.square(grad_y[mod]) + np.square(grad_z[mod]))\n theta[mod] = np.arctan2(grad_z[mod], grad_y[mod])\n\n for l, lesion in enumerate(scan.lesionList):\n saveDocument = {}\n saveDocument['_id'] = scan.uid + '_' + str(l)\n\n for mod in modalities:\n feature = np.zeros(numBinsTheta, dtype='float32')\n\n lesion_points = np.asarray(lesion)\n # print('lesion points:', lesion_points.shape)\n # for point in lesion_points:\n # print(point)\n\n x_min, x_max = np.min(lesion_points[:, 0]), np.max(lesion_points[:, 0])\n # print('Lesion connected across', x_max - x_min, 'slices')\n\n for xc in range(x_min, x_max+1):\n in_plane = lesion_points[lesion_points[:, 0] == xc]\n\n yc = np.mean(in_plane[:, 1])\n zc = np.mean(in_plane[:, 2])\n\n # print('Lesion has', len(in_plane), 'voxels in slice', xc, 'centered at', yc, zc)\n\n if len(in_plane) > 10 and np.random.rand() > 0.99 and not visualize_lesion:\n visualize_slice = True\n visualize_lesion = True\n\n if visualize_slice:\n fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(1, 5, figsize=(15, 2))\n\n img = nib.load(scan.images['t2w']).get_data()\n lesionMaskImg = np.zeros((np.shape(img)))\n\n angle = np.zeros_like(theta['t2w'])\n magnitude = np.zeros_like(mag['t2w'])\n\n for point in lesion:\n lesionMaskImg[point[0], point[1], point[2]] = 1\n\n angle[point[0], point[1], point[2]] = theta['t2w'][point[0], point[1], point[2]]\n magnitude[point[0], point[1], point[2]] = mag['t2w'][point[0], point[1], point[2]]\n\n maskImg = np.ma.masked_where(lesionMaskImg == 0, np.ones((np.shape(lesionMaskImg))) * 5000)\n\n maskSquare = np.zeros((np.shape(img)))\n maskSquare[int(xc), int(yc) - 20:int(yc) + 20, int(zc) - 20] = 1\n maskSquare[int(xc), int(yc) - 20:int(yc) + 20, int(zc) + 20] = 1\n maskSquare[int(xc), int(yc) - 20, int(zc) - 20:int(zc) + 20] = 1\n maskSquare[int(xc), int(yc) + 20, int(zc) - 20:int(zc) + 20] = 1\n\n square = np.ma.masked_where(maskSquare == 0, np.ones(np.shape(maskSquare)) * 5000)\n\n lesionMaskPatch = maskImg[int(xc), int(yc) - 20:int(yc) + 20, int(zc) - 20:int(zc) + 20]\n\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax1.imshow(img[int(xc), 20:200, 20:175], cmap=plt.cm.gray, interpolation='nearest', origin='lower')\n ax1.imshow(maskImg[int(xc), 20:200, 20:175], cmap=plt.cm.autumn, interpolation='nearest', alpha=0.25, origin='lower')\n ax1.imshow(square[int(xc), 20:200, 20:175], cmap=plt.cm.autumn, interpolation='nearest', origin='lower')\n\n centre_point = (20, 20)\n\n intensity_img = ax2.imshow(img[int(xc), int(yc) - 20:int(yc) + 20, int(zc) - 20:int(zc) + 20], cmap=plt.cm.gray, interpolation='nearest', origin='lower')\n ax2.imshow(lesionMaskPatch, cmap=plt.cm.autumn, alpha=0.25, interpolation='nearest', origin='lower')\n ax2.set_xticks([])\n ax2.set_yticks([])\n\n divider = make_axes_locatable(ax2)\n cax1 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.colorbar(intensity_img, cax=cax1)\n\n mag_img = ax3.imshow(magnitude[int(xc), int(yc) - 20: int(yc) + 20, int(zc) - 20: int(zc) + 20], cmap=plt.cm.gray, interpolation='nearest', origin='lower')\n\n divider = make_axes_locatable(ax3)\n cax2 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.colorbar(mag_img, cax=cax2)\n\n ax3.set_xticks([])\n ax3.set_yticks([])\n\n max_grad = np.argmax(magnitude[int(xc), int(yc) - 20: int(yc) + 20, int(zc) - 20: int(zc) + 20])\n\n max_grad_pos = np.unravel_index(max_grad, magnitude[int(xc), int(yc) - 20: int(yc) + 20, int(zc) - 20: int(zc) + 20].shape)\n\n max_grad_val = magnitude[int(xc), int(yc) - 20: int(yc) + 20, int(zc) - 20: int(zc) + 20][max_grad_pos]\n max_grad_angle = angle[int(xc), int(yc) - 20: int(yc) + 20, int(zc) - 20: int(zc) + 20][max_grad_pos]\n\n arrow_angle = max_grad_angle + np.arctan2((max_grad_pos[0] - yc), (max_grad_pos[1] - zc))\n\n o = np.sin(arrow_angle)*(max_grad_val / 100)*5\n a = np.cos(arrow_angle)*(max_grad_val / 100)*5\n\n arrow_begin = (max_grad_pos[1], max_grad_pos[0])\n arrow_end = (a, o)\n\n # print('arrow begin:', arrow_begin, 'arrow end:', arrow_end)\n\n intensity_img = ax4.imshow(img[int(xc), int(yc) - 20:int(yc) + 20, int(zc) - 20:int(zc) + 20], cmap=plt.cm.gray, interpolation='nearest', origin='lower')\n ax4.imshow(lesionMaskPatch, cmap=plt.cm.autumn, alpha=0.25, interpolation='nearest', origin='lower')\n\n divider = make_axes_locatable(ax4)\n cax3 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.colorbar(intensity_img, cax=cax3)\n\n ax4.arrow(arrow_begin[0], arrow_begin[1], arrow_end[0], arrow_end[1], head_width=2, head_length=2, color='b')\n\n ax4.plot(centre_point[0], centre_point[1], 'ro', markersize=2)\n ax4.plot(arrow_begin[0], arrow_begin[1], 'bo', markersize=2)\n\n radial_line_x = [centre_point[0], arrow_begin[0]]\n radial_line_y = [centre_point[1], arrow_begin[1]]\n\n ax4.plot(radial_line_x, radial_line_y, color='r')\n\n ax4.set_xticks([])\n ax4.set_yticks([])\n\n ax1.set_xlabel('Lesion region', fontsize=14)\n ax2.set_xlabel('Lesion closeup', fontsize=14)\n ax3.set_xlabel('Gradient magnitude', fontsize=14)\n ax4.set_xlabel('Max grad. direction', fontsize=14)\n\n visualize_slice = False\n\n gradient_direction, gradient_strength = [], []\n for (x, y, z) in in_plane:\n # print('Point:', x, y, z)\n\n if not y == yc and not z == zc:\n relTheta = np.arctan2((z - zc), (y - yc))\n outwardTheta = (theta[mod][x, y, z] - relTheta + 2 * np.pi) % (2 * np.pi)\n\n # print('Relative angle:', relTheta)\n # print('Angle from radius:', outwardTheta)\n\n gradient_direction.append(outwardTheta)\n gradient_strength.append(mag[mod][x, y, z])\n\n # gaussian = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(\n # - (np.square(y - yc) + np.square(z - zc)) / (2 * sigma ** 2))\n\n hist, bins = np.histogram(gradient_direction, bins=binsTheta, range=(0, np.pi),\n weights=gradient_strength)\n\n # print('Histogram values, bins:', hist, bins)\n feature += hist / (x_max - x_min + 1)\n\n if visualize_lesion:\n ax5.bar(bins[:-1], hist / 4)\n ax5.set_xticks(list(np.linspace(0, 2*np.pi, num=4, endpoint=False)))\n ax5.set_xticklabels(['outward', 'counter\\nclockwise', 'inward', 'clockwise'], rotation='vertical')\n ax5.set_yticks([])\n ax5.set_xlabel('RIFT feature', fontsize=20)\n\n plt.savefig(data_dir + '/examples/' + 'RIFT_example_' + str(scan.uid) + '_lesion_' + str(l) + '.png', dpi=500, bbox_inches='tight')\n plt.clf()\n visualize_slice = False\n visualize_lesion = False\n\n saveDocument[mod] = feature / 1000\n\n # print('Final RIFT descriptor:', saveDocument)\n pickle.dump(saveDocument, open(scan.features_dir + 'rift_' + str(l) + '.pkl', \"wb\"))\n\n\ndef loadMRIList():\n complete_data_subjects, missing_data_subjects = 0, 0\n\n mri_list = []\n for root, dirs, filenames in os.walk(data_dir):\n for f in filenames:\n if f.endswith('_m0_t1p.mnc.gz'):\n scan = mri(f)\n\n if os.path.isfile(scan.lesions):\n if os.path.isfile(scan.images['t1p']) and os.path.isfile(scan.images['t2w']) and os.path.isfile(\n scan.images['pdw']) and os.path.isfile(scan.images['flr']):\n print('Parsing files for', f)\n mri_list.append(scan)\n complete_data_subjects += 1\n else:\n print('Missing MRI modality: ', f)\n missing_data_subjects += 1\n else:\n print('Missing lesion labels: ', f)\n missing_data_subjects += 1\n\n print(complete_data_subjects, '/', missing_data_subjects + complete_data_subjects,\n 'have all modalities and lesion labels')\n\n mri_list_lesions = []\n for i, scan in enumerate(mri_list):\n scan.lesionList = separate_lesions(scan)\n mri_list_lesions.append(scan)\n print(scan.uid, i + 1, '/', len(mri_list) + 1)\n\n return mri_list_lesions\n\n\ndef get_context(scan, images, include_catani):\n # contextMin = {\"csf\": -0.001, \"wm\": -0.001, \"gm\": -0.001, \"pv\": -0.001, \"lesion\": -0.001}\n # contextMax = {'csf': 1.001, 'wm': 1.001, 'gm': 1.001, 'pv': 1.001, 'lesion': 0.348}\n #\n # numBins = 4\n\n wm_tracts = ['Anterior_Segment', 'Arcuate', 'Cingulum', 'Cortico_Ponto_Cerebellum', 'Cortico_Spinal',\n 'Inferior_Cerebellar_Pedunculus', 'Inferior_Longitudinal_Fasciculus',\n 'Inferior_Occipito_Frontal_Fasciculus', 'Long_Segment', 'Optic_Radiations', 'Posterior_Segment',\n 'Superior_Cerebelar_Pedunculus', 'Uncinate', 'Anterior_Commissure', 'Corpus_Callosum', 'Fornix',\n 'Internal_Capsule']\n\n wm_networks = ['Projection', 'Cerebellar', 'Optic', 'Cingulum', 'Inferior', 'Arcuate', 'Perisylvian', 'Anterior_Commissure', 'Fornix', 'Corpus_Callosum']\n\n for tissue in scan.tissues:\n filename = scan.priors[tissue]\n images[tissue] = nib.load(filename).get_data()\n\n for wm_network in wm_networks:\n images[wm_network] = nib.load('/data1/users/adoyle/atlases/Catani/MSLAQ/' + wm_network + '.nii').get_data()\n\n for l, lesion in enumerate(scan.lesionList):\n saveDocument = {}\n saveDocument['_id'] = scan.uid + '_' + str(l)\n\n context_priors = scan.tissues\n if include_catani:\n context_priors += wm_networks\n\n for tissue in context_priors:\n context = []\n\n for p in lesion:\n context.append(images[tissue][p[0], p[1], p[2]])\n\n saveDocument[tissue] = [np.mean(context), np.var(context)]\n\n pickle.dump(saveDocument, open(scan.features_dir + 'context_' + str(l) + '.pkl', \"wb\"))\n\n\ndef get_lbp(scan, images):\n for l, lesion in enumerate(scan.lesionList):\n saveDocument = {}\n saveDocument['_id'] = scan.uid + '_' + str(l)\n\n for j, mod in enumerate(modalities):\n feature = np.zeros((len(lbpRadii), 9))\n\n for r, radius in enumerate(lbpRadii):\n feature[r, ...] = uniformLBP(images[mod], lesion, radius)\n # print(mod, feature)\n saveDocument[mod] = feature\n\n pickle.dump(saveDocument, open(scan.features_dir + 'lbp_' + str(l) + '.pkl', \"wb\"))\n\n\ndef get_intensity(scan, images):\n # intensityMin = {\"t1p\": 32.0, \"t2w\": 10.0, \"flr\": 33.0, \"pdw\": 49.0}\n # intensityMax = {'t1p': 1025.0, 't2w': 1000.0, 'flr': 1016.0, 'pdw': 1018.0}\n\n for l, lesion in enumerate(scan.lesionList):\n saveDocument = {}\n saveDocument['_id'] = scan.uid + '_' + str(l)\n\n for m in modalities:\n intensities = []\n for point in lesion:\n intensities.append(images[m][point[0], point[1], point[2]] / 1000)\n\n # intensityHist = np.histogram(intensities, histBins, (intensityMin[m], intensityMax[m]))\n # intensityHist = intensityHist[0] / np.sum(intensityHist[0], dtype='float')\n #\n # if np.isnan(intensityHist).any():\n # intensityHist = np.zeros((histBins))\n # intensityHist[0] = 1\n\n saveDocument[m] = [np.mean(intensities), np.var(intensities), np.min(intensities), np.max(intensities)]\n\n pickle.dump(saveDocument, open(scan.features_dir + 'intensity_' + str(l) + '.pkl', \"wb\"))\n\n\ndef getFeaturesOfList(mri_list, include_catani):\n for i, scan in enumerate(mri_list):\n images = {}\n for j, m in enumerate(modalities):\n images[m] = nib.load(scan.images[m]).get_data()\n\n print('Patient:', scan.uid, i + 1, '/', len(mri_list) + 1)\n startTime = time.time()\n\n get_context(scan, images, include_catani)\n get_lbp(scan, images)\n get_rift(scan, images)\n get_intensity(scan, images)\n\n elapsed = time.time() - startTime\n print(elapsed, \"seconds\")\n\n\ndef chunks(l, n):\n shuffle(l)\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\ndef write_features(include_catani=True):\n startTime = time.time()\n\n print('Loading MRI file list...')\n\n if reload_list:\n mri_list = loadMRIList()\n outfile = open(data_dir + 'mri_list.pkl', 'wb')\n pickle.dump(mri_list, outfile)\n outfile.close()\n print('Cached MRI file listing')\n else:\n infile = open(data_dir + 'mri_list.pkl', 'rb')\n mri_list = pickle.load(infile)\n infile.close()\n\n print('MRI list loaded')\n\n print('extracting imaging ')\n getFeaturesOfList(mri_list, include_catani)\n\n print('writing clinical outputs...')\n write_clinical_outputs(mri_list)\n print('Done')\n\n endTime = time.time()\n\n elapsed = endTime - startTime\n print(\"Total time elapsed:\", elapsed / 60, 'minutes')\n return\n\n\nif __name__ == \"__main__\":\n write_features()\n"
},
{
"alpha_fraction": 0.6578947305679321,
"alphanum_fraction": 0.678947389125824,
"avg_line_length": 32.55882263183594,
"blob_id": "e94dd123c72e4e515b3aa5ee25beef024a110321",
"content_id": "713eca63f3d1c290e356aa7b823419c591c8c57c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1140,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 34,
"path": "/generate_atlas.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "from context_extraction import loadMRIList\n\nimport os, subprocess\nimport nibabel as nib\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom pyminc.volumes.factory import *\n\nicbm = '/usr/local/data/adoyle/trials/quarantine/common/models/mni_icbm152_t1_tal_nlin_sym_09a.mnc'\nicbm2 = '/usr/local/data/adoyle/trials/quarantine/common/models/icbm_avg_152_gm.mnc.gz'\noutput_dir = '/usr/local/data/adoyle/atlas/'\n\n#mri_list = loadMRIList()\n#for i, scan in enumerate(mri_list):\n# print i, '/', len(mri_list) \n# outfile = output_dir + scan.uid + '.mnc'\n# subprocess.call(['mincresample', '-transformation', scan.transformToICBM, '-like', icbm, '-tricubic', scan.lesions, outfile])\n#\n#\n\natlas = np.zeros((189,233,197), dtype='float')\n\n\nfor root, dirs, filenames in os.walk(output_dir):\n for f in filenames[0:10]:\n print root + f\n if f.endswith('.mnc'):\n image = nib.load(root + f).get_data() \n atlas = atlas + np.divide(image, len(filenames), dtype='float')\n\n\natlas_image = nib.Nifti1Image(atlas, np.eye(4))\nnib.save(atlas_image, '/usr/local/data/adoyle/trials/lesion_atlas.nii.gz')"
},
{
"alpha_fraction": 0.5691887140274048,
"alphanum_fraction": 0.5736478567123413,
"avg_line_length": 29.23043441772461,
"blob_id": "750da815cc191fb939e6fbf0c87afb7cd6927652",
"content_id": "966cd0d4678cf85ed2226d44434f2277a9b561d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6952,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 230,
"path": "/load_data.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\nimport matplotlib.pyplot as plt\nimport pickle\n\nfrom collections import defaultdict\nimport csv\n\n\nmodalities = ['t1p', 't2w', 'pdw', 'flr']\ntissues = ['csf', 'wm', 'gm', 'pv', 'lesion']\nmetrics = ['newT2']\nfeats = [\"Context\", \"RIFT\", \"LBP\", \"Intensity\"]\nsizes = [\"tiny\", \"small\", \"medium\", \"large\"]\n\nscoringMetrics = ['TP', 'FP', 'TN', 'FN']\n\nwm_tracts = ['Anterior_Segment', 'Arcuate', 'Cingulum', 'Cortico_Ponto_Cerebellum', 'Cortico_Spinal',\n 'Inferior_Cerebellar_Pedunculus', 'Inferior_Longitudinal_Fasciculus',\n 'Inferior_Occipito_Frontal_Fasciculus', 'Long_Segment', 'Optic_Radiations', 'Posterior_Segment',\n 'Superior_Cerebelar_Pedunculus', 'Uncinate', 'Anterior_Commissure', 'Corpus_Callosum', 'Fornix', 'Internal_Capsule']\n\nwm_networks = ['Projection', 'Cerebellar', 'Optic', 'Cingulum', 'Inferior', 'Arcuate', 'Perisylvian',\n 'Anterior_Commissure', 'Fornix', 'Corpus_Callosum']\n\nlbpRadii = [1]\n\n\nletters = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)', '(g)', '(h)', '(i)', '(j)', '(k)', '(l)', '(m)', '(n)', '(o)']\n\ntreatments = ['Placebo', 'Laquinimod', 'Avonex']\n\nthreads = 1\n\nplotFeats = False\nusePCA = False\n\ndef getLesionSizes(mri_list):\n numLesions = 0\n \n lesionSizes = []\n brainUids = []\n lesionCentroids = []\n \n print('Counting lesions')\n for i, scan in enumerate(mri_list):\n for j, lesion in enumerate(scan.lesionList):\n \n if len(lesion) > 2: \n numLesions += 1\n lesionSizes.append(len(lesion))\n brainUids.append(scan.uid)\n \n x, y, z = [int(np.mean(x)) for x in zip(*lesion)]\n lesionCentroids.append((x, y, z))\n \n print('Counted lesions, we have', numLesions)\n return numLesions, lesionSizes, lesionCentroids, brainUids\n\n\ndef get_outcomes(mri_list):\n outcomes = []\n\n for scan in mri_list:\n new_lesions = int(scan.newT2)\n if new_lesions > 0:\n outcomes.append(1)\n else:\n outcomes.append(0)\n\n return outcomes\n\n\ndef loadIntensity(mri_list):\n numBins = 4\n data = []\n\n for i, scan in enumerate(mri_list):\n for j in range(len(scan.lesionList)):\n lesion_feature = pickle.load(open(scan.features_dir + 'intensity_' + str(j) + '.pkl', 'rb'))\n\n feature = np.zeros((len(modalities), numBins))\n for m, mod in enumerate(modalities):\n feature[m, :] = lesion_feature[mod]\n\n data.append(np.ndarray.flatten(feature))\n \n return np.asarray(data)\n \ndef loadRIFT(mri_list):\n numBinsTheta = 4\n data = []\n \n for i, scan in enumerate(mri_list):\n for j in range(len(scan.lesionList)):\n lesion_feature = pickle.load(open(scan.features_dir + 'rift_' + str(j) + '.pkl', 'rb'))\n\n feature = np.zeros((len(modalities), numBinsTheta))\n for m, mod in enumerate(modalities):\n feature[m, ...] = lesion_feature[mod]\n\n data.append(np.ndarray.flatten(feature))\n \n return np.asarray(data)\n\n\ndef loadContext(mri_list, include_catani):\n numBins = 2\n \n data = []\n \n for i, scan in enumerate(mri_list):\n context_priors = scan.tissues\n if include_catani:\n context_priors += wm_networks\n\n for j, lesion in enumerate(scan.lesionList):\n lesion_feature = pickle.load(open(scan.features_dir + 'context_' + str(j) + '.pkl', 'rb'))\n\n feature = np.zeros((len(context_priors), numBins), dtype='float32')\n\n for k, tissue in enumerate(context_priors):\n feature[k, :] = lesion_feature[tissue]\n data.append(np.ndarray.flatten(feature))\n\n data = np.asarray(np.asarray(data))\n print('data:', data[0])\n return data\n\n\ndef loadLBP(mri_list):\n data = []\n \n for i, scan in enumerate(mri_list):\n for j in range(len(scan.lesionList)):\n lesion_feature = pickle.load(open(scan.features_dir + 'lbp_' + str(j) + '.pkl', 'rb'))\n\n feature = np.zeros((len(modalities), len(lbpRadii), 9))\n for m, mod in enumerate(modalities):\n feature[m, ...] = lesion_feature[mod]\n data.append(np.ndarray.flatten(feature))\n\n return np.asarray(np.asarray(data))\n\n\ndef loadAllData(mri_list, include_catani):\n\n context = loadContext(mri_list, include_catani)\n rift = loadRIFT(mri_list)\n lbp = loadLBP(mri_list)\n intensity = loadIntensity(mri_list)\n\n size_feature = []\n for scan in mri_list:\n for lesion in scan.lesionList:\n size_feature.append(len(lesion))\n\n size_feature = np.reshape(size_feature, ((len(size_feature), 1))) / np.max(size_feature)\n\n print('Feature vector sizes:')\n print('Context:', context.shape)\n print('RIFT:', rift.shape)\n print('LBP:', lbp.shape)\n print('Intensity:', intensity.shape)\n print('Size:', size_feature.shape)\n\n feature_data = np.hstack((context, rift, lbp, intensity, size_feature))\n\n return feature_data\n \n \ndef loadClinical(mri_list):\n new_mri_list, without_clinical = [], []\n for i, scan in enumerate(mri_list):\n try:\n clinicalData = pickle.load(open(scan.features_dir + 'clinical.pkl', 'rb'))\n # print(scan.uid, clinicalData)\n\n scan.newT2 = int(clinicalData['newT2'])\n scan.newGad = int(clinicalData['gad'])\n\n if int(clinicalData['newT2']) > 0:\n scan.activity = 1\n else:\n scan.activity = 0\n\n scan.age = clinicalData['age']\n scan.country = clinicalData['country']\n scan.sex = clinicalData['sex']\n scan.race = clinicalData['race']\n\n scan.relapse = clinicalData['relapse']\n scan.treatment = clinicalData['treatment']\n\n new_mri_list.append(scan)\n except:\n without_clinical.append(scan)\n\n return new_mri_list, without_clinical\n\n\ndef load_responders(responder_filename, mri_list):\n found = 0\n with open(responder_filename) as responder_file:\n responder_mri_list = []\n responder_reader = csv.reader(responder_file)\n\n lines = list(responder_reader)\n\n for scan in mri_list:\n\n responder_info_found = False\n\n for line in lines:\n if line[0] in scan.uid:\n print('Found responder info!')\n responder = int(line[2])\n scan.responder = responder\n responder_info_found = True\n found += 1\n\n if not responder_info_found:\n print('Couldnt find info for', scan.uid, 'on', scan.treatment)\n scan.responder = 0\n\n responder_mri_list.append(scan)\n\n print('Started with', len(mri_list), 'subjects, have responder info for', found)\n\n return responder_mri_list"
},
{
"alpha_fraction": 0.5398948788642883,
"alphanum_fraction": 0.5764452815055847,
"avg_line_length": 27.87586212158203,
"blob_id": "cec3baaef1b4337e618abc0c11f8f5c58601a1db",
"content_id": "1426de2394d595d37177f3caca1cb9b363ec1e54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4186,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 145,
"path": "/classify_lesions3.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 05 19:59:43 2015\n\n@author: Andrew\n\"\"\"\nimport nibabel as nib\nimport numpy as np\nimport os\nimport h5py\n\nfrom scipy.ndimage.filters import gaussian_filter\nfrom sklearn.mixture import GMM\nfrom sklearn.ensemble import RandomForestClassifier\n\n\n\ndata_dir = 'G:/MRI/MS-LAQ/'\n\nmalf_classes = ['bg', 'bs', 'cgm', 'crblr_gm', 'crblr_wm', 'csf', 'dgm', 'lv', 'ov', 'wm']\nmodalities = ['t1p', 't2w', 'pdw', 'flr']\ngood_malf_classes = ['cgm', 'dgm', 'wm']\n\nclass mri:\n t1p = ''\n lesions = ''\n \n priors = {}\n \n folder = ''\n uid = ''\n \n images = {} \n def __init__(self, t1p_image):\n \n tokens = t1p_image.split('_')\n \n self.folder = data_dir + tokens[2] + '_' + tokens[3] + '/m0/'\n \n self.uid = tokens[2] + tokens[3] \n \n self.images['t1p'] = self.folder + 'MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m0_t1p_ISPC-stx152lsq6.mnc.gz'\n self.images['t2w'] = self.folder + 'MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m0_t2w_ISPC-stx152lsq6.mnc.gz'\n self.images['pdw'] = self.folder + 'MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m0_pdw_ISPC-stx152lsq6.mnc.gz'\n self.images['flr'] = self.folder + 'MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m0_flr_ISPC-stx152lsq6.mnc.gz' \n \n self.lesions = self.folder + 'MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m0_ct2f_ISPC-stx152lsq6.mnc.gz'\n\n for tissue in malf_classes:\n self.priors[tissue] = self.folder + 'malf/MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m0_prior_' + tissue + '_ISPC-stx152lsq6.mnc.gz'\n\n\nmri_list = []\n\nfor root, dirs, filenames in os.walk(data_dir): \n for f in filenames:\n if f.endswith('_m0_t1p.mnc.gz'):\n mri_list.append(mri(f))\n\n#features types: malf-context*scales + image-intensities\n#features types: 10*5 + 4\n\nimage_pixels = 60*256*256\n\nscales = [1]\npriors = len(malf_classes)\nfeature_scales = len(scales)\nnum_mods = len(modalities)\n\nnum_train = 5\nnum_test = 5\n\nfeatures = []\nlabels = []\n\n\nfor i, img in enumerate(mri_list[0:num_train+num_test]):\n print i, '/', num_train+num_test\n not_background = np.nonzero(nib.load(img.priors['bg']).get_data() < 0.3)\n feature = np.zeros((np.shape(not_background)[1], num_mods + priors*feature_scales))\n \n for j, malf in enumerate(malf_classes):\n malf_image = nib.load(img.priors[malf]).get_data()\n \n for k, s in enumerate(scales):\n filtered = gaussian_filter(malf_image, s)\n feature[:, j*feature_scales + k] = filtered[not_background]\n\n for j, mod in enumerate(modalities):\n feature[:, priors*feature_scales + j] = nib.load(img.images[mod]).get_data()[not_background]\n\n\n for f in feature:\n features.append(f)\n\n image_labels = nib.load(img.lesions).get_data()[not_background]\n \n for l in image_labels:\n labels.append(l)\n\n\ntraining_features = features[0:np.shape(features)[0]/2]\ntest_features = features[np.shape(features)[0]/2:]\ntraining_labels = labels[0:len(labels)/2]\ntest_labels = labels[len(features)/2:]\n\nprint \"done getting features & labels\"\n\n\nprint np.shape(training_features)\n\nfor g in [1,2,3,4,5]:\n \n #mix_model = GMM(n_components=g)\n #mix_model.fit(training_features)\n #predictions = mix_model.predict(test_features)\n \n\n forest = RandomForestClassifier(n_estimators=g*100) \n forest.fit(training_features, training_labels)\n forest.predict(test_features)\n \n \n print \"done predictions at level\", g\n \n tp = 0\n fp = 0\n fn = 0\n \n total_voxels = image_pixels*num_test\n \n for i, p in enumerate(predictions): \n if p > 0.0 and test_labels[i] > 0.0:\n tp+=1\n if p > 0.0 and test_labels[i] == 0.0:\n fp+=1\n if p == 0.0 and test_labels[i] > 0.0:\n fn+=1\n tp = 0\n fp = 0\n fn = 0\n \n print \"true positives: \", tp\n print \"false positives: \", fp\n print \"false negatives: \", fn"
},
{
"alpha_fraction": 0.5594890713691711,
"alphanum_fraction": 0.5769534111022949,
"avg_line_length": 46.561248779296875,
"blob_id": "d4e641c882c2edca99aa76264e04fe8c5356ff26",
"content_id": "67195eacb0b9e0340eea51cf5174726099bc3ac7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 76098,
"license_type": "no_license",
"max_line_length": 533,
"num_lines": 1600,
"path": "/analyze_lesions.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pickle as pkl\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.mixture import GaussianMixture, BayesianGaussianMixture\n\nfrom sklearn.decomposition import PCA, FastICA\nfrom sklearn.model_selection import StratifiedKFold, train_test_split\nfrom sklearn.metrics import silhouette_score\n\nfrom scipy.spatial.distance import euclidean\n\nimport time\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport nibabel as nib\n\nimport collections\nfrom collections import defaultdict\n\nimport random\nimport sys\n\n# these are the modules that I wrote\nimport context_extraction, load_data\nimport bol_classifiers\n\nimport subprocess\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\nmodalities = ['t1p', 't2w', 'pdw', 'flr']\ntissues = ['csf', 'wm', 'gm', 'pv', 'lesion']\nmetrics = ['newT2']\nfeats = [\"Context\", \"RIFT\", \"LBP\", \"Intensity\"]\nsizes = [\"tiny\", \"small\", \"medium\", \"large\"]\n\nscoringMetrics = ['TP', 'FP', 'TN', 'FN']\n\nwm_tracts = ['Anterior_Segment', 'Arcuate', 'Cingulum', 'Cortico_Ponto_Cerebellum', 'Cortico_Spinal',\n 'Inferior_Cerebellar_Pedunculus', 'Inferior_Longitudinal_Fasciculus',\n 'Inferior_Occipito_Frontal_Fasciculus', 'Long_Segment', 'Optic_Radiations', 'Posterior_Segment',\n 'Superior_Cerebelar_Pedunculus', 'Uncinate', 'Anterior_Commissure', 'Corpus_Callosum', 'Fornix', 'Internal_Capsule']\n\nlbpRadii = [1]\nriftRadii = [3, 6]\nselectK = False\nvisualizeAGroup = True\n\nletters = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)', '(g)', '(h)', '(i)', '(j)', '(k)', '(l)', '(m)', '(n)', '(o)']\n\ntreatments = ['Placebo', 'Laquinimod', 'Avonex']\n\nthreads = 8\n\nplotFeats = False\nusePCA = False\n\ndata_dir = '/data1/users/adoyle/MS-LAQ/MS-LAQ-302-STX/'\n \n\n\ndef getNClosest(candidate, n, allLesionFeatures):\n\n distance = np.zeros((np.shape(allLesionFeatures)[0]))\n\n for i, lesionFeatures in enumerate(allLesionFeatures):\n distance[i] = euclidean(candidate, lesionFeatures)\n \n nClosest = distance.argsort()[:n+1]\n\n return nClosest\n\n\ndef getNClosestMahalanobis(candidate, n, allLesionFeatures):\n distances = np.zeros(np.shape(allLesionFeatures)[0])\n variance = np.var(allLesionFeatures, axis=0)\n\n for i, example in enumerate(allLesionFeatures):\n distances[i] = np.sum(np.divide((candidate - example), variance)**2)\n sys.stdout.flush()\n\n nClosest = distances.argsort()[:n]\n\n return nClosest \n\ndef choose_clusters(feature_data, results_dir):\n\n n_clusters, bics, aics, silhouettes, clust_search, time_taken = [], [], [], [], [], []\n\n cluster_range = range(2, 100)\n clust_search.append('')\n clust_search.append('')\n\n for k in cluster_range:\n print('trying ' + str(k) + ' clusters...')\n\n clust_search.append(GaussianMixture(n_components=k, covariance_type='full', max_iter=1000, warm_start=True, verbose=1))\n\n start_cluster_time = time.time()\n clust_search[k].fit(feature_data)\n end_cluster_time = time.time()\n\n if not clust_search[k].converged_:\n print('WARNING: Gaussian Mixture fitting did not converge')\n\n time_taken.append((end_cluster_time - start_cluster_time) / 60)\n n_clusters.append(k)\n\n bics.append(clust_search[k].bic(feature_data))\n aics.append(clust_search[k].aic(feature_data))\n\n labels = clust_search[k].predict(feature_data)\n\n silhouettes.append(silhouette_score(feature_data, labels, random_state=42, sample_size=feature_data.shape[0] // 4))\n\n print('it took ' + str(time_taken[-1]) + ' minutes')\n\n # n_lesion_types = n_clusters[np.argmin(bics)]\n n_lesion_types = n_clusters[np.argmin(bics)]\n print(n_lesion_types, 'is the optimal number of lesion-types!')\n print('total time taken for clustering:', str(np.sum(time_taken)))\n\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))\n\n ax1.plot(n_clusters, bics, lw=2, label='Bayesian')\n ax1.plot(n_clusters, aics, lw=2, label='Akaike')\n\n ax1.set_xlabel(\"Lesion-types in model\", fontsize=24)\n ax1.set_ylabel(\"Information Criterion\", fontsize=24)\n\n ax1.plot(n_lesion_types, bics[n_lesion_types], 'ro', fillstyle='none', markersize=5) #circle selected value\n\n ax1.legend(shadow=True, fancybox=True, fontsize=20)\n\n ax2.plot(n_clusters, silhouettes, color='r')\n ax2.set_xlabel(\"Lesion-types in model\", fontsize=24)\n ax2.set_ylabel(\"Silhouette Score\", fontsize=24)\n\n plt.tight_layout()\n plt.savefig(results_dir + 'choosing_clusters.png', bbox_inches='tight')\n plt.close()\n\n return n_lesion_types\n\n\ndef learn_bol(mri_list, feature_data, n_lesion_types, numWithClinical, results_dir, fold_num):\n type_examples = []\n\n max_iterations = 1000\n\n # c = BayesianGaussianMixture(n_components=n_lesion_types, covariance_type='full', weight_concentration_prior_type='dirichlet_process', weight_concentration_prior=1/(n_lesion_types*10), max_iter=max_iterations)\n c = GaussianMixture(n_components=n_lesion_types, covariance_type='full', max_iter=max_iterations, warm_start=True, verbose=1)\n c.fit(feature_data)\n\n while not c.converged_:\n max_iterations *= 2\n # c = BayesianGaussianMixture(n_components=n_lesion_types, covariance_type='full',\n # weight_concentration_prior_type='dirichlet_process',\n # weight_concentration_prior=1 / (n_lesion_types * 10), max_iter=max_iterations)\n # print('WARNING: Learning the Bag of Lesions did not converge, trying again...')\n c = GaussianMixture(n_components=n_lesion_types, covariance_type='full', max_iter=max_iterations, warm_start=True, verbose=1)\n c.fit(feature_data)\n\n cluster_assignments = c.predict(feature_data)\n cluster_probabilities = c.predict_proba(feature_data)\n\n bol_representation = np.zeros((len(mri_list), n_lesion_types), dtype='float32')\n\n # maintain a list of indices for each cluster in each size\n for n in range(n_lesion_types):\n type_examples.append([])\n\n lesion_idx = 0\n for i, scan in enumerate(mri_list):\n for j, lesion in enumerate(scan.lesionList):\n feature_values = feature_data[lesion_idx, ...]\n lesion_type_distribution = cluster_probabilities[lesion_idx, ...]\n\n bol_representation[i, :] += lesion_type_distribution\n\n lesion_type = np.argmax(lesion_type_distribution)\n\n type_examples[lesion_type].append((scan, j, feature_values, lesion_type_distribution))\n\n lesion_idx += 1\n\n for lesion_type_idx in range(n_lesion_types):\n print('Number of lesions in type', lesion_type_idx, ':', len(type_examples[lesion_type_idx]))\n\n print('Lesion-type probabilities shape:', cluster_probabilities.shape)\n\n if fold_num%1 == 0:\n n = 6\n for k in range(n_lesion_types):\n if len(type_examples[k]) > n:\n plt.figure(1, figsize=(15, 15))\n\n random.shuffle(type_examples[k])\n\n for i, example in enumerate(type_examples[k][0:n]):\n scan, lesion_index, feature_val, cluster_probs = example[0], example[1], example[2], example[3]\n\n img = nib.load(scan.images['t2w']).get_data()\n lesionMaskImg = np.zeros((np.shape(img)))\n\n for point in scan.lesionList[lesion_index]:\n lesionMaskImg[point[0], point[1], point[2]] = 1\n\n x, y, z = [int(np.mean(xxx)) for xxx in zip(*scan.lesionList[lesion_index])]\n\n maskImg = np.ma.masked_where(lesionMaskImg == 0,\n np.ones((np.shape(lesionMaskImg))) * 5000)\n maskSquare = np.zeros((np.shape(img)))\n maskSquare[x, y-20:y+20, z-20] = 1\n maskSquare[x, y-20:y+20, z+20] = 1\n maskSquare[x, y-20, z-20:z+20] = 1\n maskSquare[x, y+20, z-20:z+20] = 1\n\n square = np.ma.masked_where(maskSquare == 0, np.ones(np.shape(maskSquare)) * 5000)\n\n lesionMaskPatch = maskImg[x, y-20:y+20, z-20:z+20]\n ax = plt.subplot(4, n, i + 1)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.imshow(img[x, 20:200, 20:175], cmap=plt.cm.gray, interpolation='nearest', origin='lower')\n ax.imshow(maskImg[x, 20:200, 20:175], cmap=plt.cm.autumn, interpolation='nearest', alpha=0.25, origin='lower')\n ax.imshow(square[x, 20:200, 20:175], cmap=plt.cm.autumn, interpolation='nearest', origin='lower')\n\n ax2 = plt.subplot(4, n, i + 1 + n)\n ax2.imshow(img[x, y-20:y+20, z-20:z+20], cmap=plt.cm.gray, interpolation='nearest', origin='lower')\n ax2.imshow(lesionMaskPatch, cmap=plt.cm.autumn, alpha=0.25, interpolation='nearest', origin='lower')\n # ax2.axes.get_yaxis().set_visible(False)\n ax2.set_yticks([])\n ax2.set_xticks([])\n ax2.set_xlabel(letters[i])\n\n # x = np.linspace(1, feature_data.shape[1], num=feature_data.shape[1])\n ax3 = plt.subplot(4, n, i + 1 + 2*n)\n # ax3.bar(x, feature_val, color='darkred')\n\n if len(feature_val) > 100:\n x_context = np.arange(44)\n x_rift = np.arange(44, 60)\n x_lbp = np.arange(60, 96)\n x_intensity = np.arange(96, 104)\n x_size = 104\n else:\n x_context = np.arange(44- 17*2)\n x_rift = np.arange(44- 17*2, 60- 17*2)\n x_lbp = np.arange(60- 17*2, 96- 17*2)\n x_intensity = np.arange(96- 17*2, 104- 17*2)\n x_size = 104- 17*2\n\n ticks = [x_context[-1] / 2, ((x_rift[-1] - x_rift[0]) / 2) + x_rift[0], ((x_lbp[-1] - x_lbp[0]) / 2) + x_lbp[0], ((x_intensity[-1] - x_intensity[0]) / 2) + x_intensity[0], x_size]\n tick_labels = ['C', 'RIFT', 'LBP', 'I', 'S']\n\n ax3.bar(x_context, feature_val[x_context], color='r')\n ax3.bar(x_rift, feature_val[x_rift], color='g')\n ax3.bar(x_lbp, feature_val[x_lbp], color='b')\n ax3.bar(x_intensity, feature_val[x_intensity], color='orange')\n ax3.bar(x_size, feature_val[x_size], color='m')\n\n ax3.set_xticks(ticks)\n ax3.set_xticklabels(tick_labels)\n\n # data = {}\n # data['context'] = {}\n # data['context']['ICBM Prior'] = feature_val[0:4]\n # data['context']['Lesion Prior'] = feature_val[4]\n # data['context']['Catani Prior'] = feature_val[5:44]\n #\n # data['RIFT'] = {}\n # data['RIFT']['T1w'] = feature_val[44:48]\n # data['RIFT']['T2w'] = feature_val[48:52]\n # data['RIFT']['PDw'] = feature_val[52:56]\n # data['RIFT']['FLR'] = feature_val[56:60]\n #\n # data['LBP'] = {}\n # data['LBP']['T1w'] = feature_val[60:69]\n # data['LBP']['T2w'] = feature_val[69:78]\n # data['LBP']['PDw'] = feature_val[78:87]\n # data['LBP']['FLR'] = feature_val[87:96]\n #\n # data['intensity'] = {}\n # data['intensity']['T1w'] = feature_val[96:98]\n # data['intensity']['T2w'] = feature_val[98:100]\n # data['intensity']['PDw'] = feature_val[100:102]\n # data['intensity']['FLR'] = feature_val[102:104]\n #\n # data['size'] = {}\n # data['size']['vox'] = feature_val[104]\n #\n # label_group_bar(ax3, data)\n ax3.set_ylim([0, 1])\n ax3.set_yticks([])\n\n # for tick in ax3.get_xticklabels():\n # tick.set_rotation(45)\n\n y = np.linspace(1, cluster_probabilities.shape[1], num=cluster_probabilities.shape[1])\n ax4 = plt.subplot(4, n, i + 1 + 3*n)\n ax4.bar(y, cluster_probs, color='darkorange')\n ax4.set_ylim([0, 1])\n ax4.set_yticks([])\n\n\n if i == 0:\n ax.set_ylabel('Lesion', fontsize=24)\n ax2.set_ylabel('Close-up', fontsize=24)\n ax3.set_ylabel('Feature values', fontsize=24)\n ax4.set_ylabel('Lesion-type prob.', fontsize=24)\n\n plt.subplots_adjust(wspace=0.01)\n plt.savefig(results_dir + 'fold_' + str(fold_num) + '_lesion_type_' + str(k) + '.png', dpi=600, bbox_inches='tight')\n plt.clf()\n\n try:\n fig, (ax) = plt.subplots(1, 1, figsize=(6, 4))\n\n bins = np.linspace(0, n_lesion_types, num=n_lesion_types+1)\n histo = np.histogram(cluster_assignments, bins=bins)\n\n # print('bins', bins)\n # print('histo', histo[0])\n ax.bar(bins[:-1], histo[0])\n\n plt.tight_layout()\n plt.savefig(results_dir + 'lesion-types-hist_fold_' + str(fold_num) + '.png', bbox_inches='tight')\n plt.close()\n except Exception as e:\n print(e)\n print('Error generating lesion-type histogram for this fold')\n\n return bol_representation[0:numWithClinical, :], c\n\n\ndef project_to_bol(mri_list, feature_data, c):\n lesion_types = c.predict_proba(feature_data)\n\n bol_representation = np.zeros((len(mri_list), lesion_types.shape[-1]))\n\n lesion_idx = 0\n for i, scan in enumerate(mri_list):\n for j, lesion in enumerate(scan.lesionList):\n bol_representation[i, :] += lesion_types[lesion_idx, :]\n lesion_idx += 1\n\n return bol_representation\n\n\ndef createRepresentationSpace(mri_list, dataVectors, lesionSizes, numWithClinical, lesionCentroids, examineClusters=False):\n subtypeShape, clusters, lesionTypes = [], [], []\n brainIndices, lesionIndices, brainsOfType, lesionsOfType = {}, {}, {}, {}\n \n for m, size in enumerate(sizes):\n brainIndices[size], lesionIndices[size], brainsOfType[size], lesionsOfType[size] = defaultdict(list), defaultdict(list), defaultdict(list), defaultdict(list)\n\n for m, size in enumerate(sizes):\n subtypeShape.append( () )\n subtypeShape[m] += (len(mri_list),)\n \n clusterAssignments = []\n clusterProbabilities = []\n clusters.append([])\n\n for d, data in enumerate(dataVectors):\n lesionFeatures = data[size]\n print(\"START OF\", sizes[m], feats[d])\n print('lesion feature shape:', np.shape(lesionFeatures))\n \n numClusters, bics, aics, scores, clustSearch = [], [], [], [], []\n clustSearch.append(\"\")\n clustSearch.append(\"\")\n \n clusterData, validationData = train_test_split(lesionFeatures, test_size=0.3, random_state=5)\n for k in range(2,4):\n print('trying ' + str(k) + ' clusters...')\n clustSearch.append(GaussianMixture(n_components = k, covariance_type = 'full'))\n clustSearch[k].fit(clusterData)\n \n numClusters.append(k)\n bics.append(clustSearch[k].bic(validationData))\n aics.append(clustSearch[k].aic(validationData))\n scores.append(np.mean(clustSearch[k].score(validationData)))\n\n \n nClusters = numClusters[np.argmin(bics)]\n# nClusters = numClusters[np.argmax(silhouette)]\n \n# fig, (ax, ax2) = plt.subplots(1,2, figsize=(12, 4))\n# \n# ax.plot(numClusters, bics, label=\"BIC\")\n# ax.plot(numClusters, aics, label=\"AIC\")\n# ax.set_xlabel(\"# of \" + feats[d] + \" sub-types of \" + sizes[m] + \" lesions\")\n# ax.set_ylabel(\"Information Criterion (lower is better)\")\n# ax.legend()\n#\n# ax2.plot(numClusters, scores, label=\"Log Prob\")\n# ax2.set_xlabel(\"# of \" + feats[d] + \" sub-types of \" + sizes[m] + \" lesions\")\n# ax2.set_ylabel(\"Average Log Probability (higher is better)\")\n# ax2.legend()\n# \n## ax3.plot(numClusters, silhouette, label=\"Avg. Silhouette\")\n## ax3.set_xlabel(\"# of \" + feats[d] + \" sub-types of \" + sizes[m] + \" lesions\")\n## ax3.set_ylabel(\"Average Silhouette (higher is better)\")\n## ax3.legend()\n## \n# plt.tight_layout()\n# plt.show()\n# plt.close()\n \n# else:\n# nClusters = 5\n \n print(\"Selected \" + str(nClusters) + \" clusters for \" + feats[d] + \" in \" + sizes[m] + \" lesions\")\n sys.stdout.flush()\n \n c = GaussianMixture(n_components = nClusters, covariance_type = 'full')\n c.fit(lesionFeatures)\n \n subtypeShape[m] += (nClusters, )\n \n clusterAssignments.append(c.predict(lesionFeatures))\n clusterProbabilities.append(c.predict_proba(lesionFeatures))\n clusters[m].append(c)\n\n lesionTypes.append(np.zeros(subtypeShape[m]))\n \n# randomLesionType = (np.random.randint(shape[1]), np.random.randint(shape[2]), np.random.randint(shape[3]), np.random.randint(shape[4]), m)\n\n print(\"Subtypes for \" + sizes[m] + \": \", subtypeShape[m])\n print(\"Combining lesion subtypes...\")\n\n lesionIndex = 0\n for i, scan in enumerate(mri_list):\n for j, lesion in enumerate(scan.lesionList):\n if (len(lesion) > 2 and len(lesion) < 11 and m == 0) or (len(lesion) > 10 and len(lesion) < 26 and m == 1) or (len(lesion) > 25 and len(lesion) < 101 and m == 2) or (len(lesion) > 100 and m == 3):\n for f1 in range(subtypeShape[m][1]):\n for f2 in range(subtypeShape[m][2]):\n for f3 in range(subtypeShape[m][3]):\n for f4 in range(subtypeShape[m][4]):\n lesionTypes[m][i, f1, f2, f3, f4] += clusterProbabilities[0][lesionIndex, f1]*clusterProbabilities[1][lesionIndex, f2]*clusterProbabilities[2][lesionIndex, f3]*clusterProbabilities[3][lesionIndex, f4]\n\n brainIndices[size][''.join((str(clusterAssignments[0][lesionIndex]),str(clusterAssignments[1][lesionIndex]),str(clusterAssignments[2][lesionIndex]),str(clusterAssignments[3][lesionIndex])))].append(i)\n lesionIndices[size][''.join((str(clusterAssignments[0][lesionIndex]),str(clusterAssignments[1][lesionIndex]),str(clusterAssignments[2][lesionIndex]),str(clusterAssignments[3][lesionIndex])))].append(j)\n\n lesionIndex += 1\n\n if visualizeAGroup:\n n = 6\n \n for f1 in range(subtypeShape[m][1]):\n for f2 in range(subtypeShape[m][2]):\n for f3 in range(subtypeShape[m][3]):\n for f4 in range(subtypeShape[m][4]):\n lesionToViz = ''.join((str(f1),str(f2),str(f3),str(f4)))\n plt.figure(figsize=(8.5,2.5))\n\n for i, (brainIndex, lesionIndex) in enumerate(zip(brainIndices[size][lesionToViz][0:n], lesionIndices[size][lesionToViz][0:n])):\n scan = mri_list[brainIndex]\n img = nib.load(scan.images['t2w']).get_data()\n lesionMaskImg = np.zeros((np.shape(img)))\n \n for point in scan.lesionList[lesionIndex]:\n lesionMaskImg[point[0], point[1], point[2]] = 1\n \n x, y, z = [int(np.mean(xxx)) for xxx in zip(*scan.lesionList[lesionIndex])]\n \n maskImg = np.ma.masked_where(lesionMaskImg == 0, np.ones((np.shape(lesionMaskImg)))*5000) \n maskSquare = np.zeros((np.shape(img)))\n maskSquare[x-10:x+10, y+10, z] = 1\n maskSquare[x-10:x+10, y-10, z] = 1\n maskSquare[x-10, y-10:y+10, z] = 1\n maskSquare[x+10, y-10:y+10, z] = 1\n \n square = np.ma.masked_where(maskSquare == 0, np.ones(np.shape(maskSquare))*5000)\n \n lesionMaskPatch = maskImg[x-20:x+20, y-20:y+20, z]\n ax = plt.subplot(2, n, i+1)\n ax.axis('off')\n ax.imshow(img[20:200,20:200, z].T, cmap = plt.cm.gray, interpolation = 'nearest',origin='lower')\n ax.imshow(maskImg[20:200,20:200, z].T, cmap = plt.cm.autumn, interpolation = 'nearest', alpha = 0.4, origin='lower')\n ax.imshow(square[20:200, 20:200, z].T, cmap = plt.cm.autumn, interpolation = 'nearest', origin='lower')\n \n ax3 = plt.subplot(2, n, i+1+n)\n ax3.imshow(img[x-20:x+20, y-20:y+20, z].T, cmap = plt.cm.gray, interpolation = 'nearest', origin='lower')\n ax3.imshow(lesionMaskPatch.T, cmap = plt.cm.autumn, alpha = 0.4, interpolation = 'nearest', origin='lower')\n ax3.axes.get_yaxis().set_visible(False)\n ax3.set_xticks([])\n ax3.set_xlabel(letters[i])\n\n plt.subplots_adjust(wspace=0.01,hspace=0.01)\n plt.savefig(data_dir + 'images/t2lesions-'+ size + '-' + ''.join((str(f1),str(f2),str(f3),str(f4))) + '.png', dpi=600)\n\n pcas, lesionFlat = [], []\n\n if usePCA:\n print(\"applying PCA...\")\n \n pcaTransformedData = []\n for m, size in enumerate(sizes):\n lesionBins = 1\n for dims in np.shape(lesionTypes[m])[1:]:\n lesionBins *= dims\n \n lesionFlat = np.reshape(lesionTypes[m], (len(mri_list), lesionBins))\n pcas.append(PCA(n_components = 0.95).fit(lesionFlat))\n pcaTransformedData.append(pcas[-1].transform(lesionFlat))\n \n data = np.hstack((pcaTransformedData[0], pcaTransformedData[1], pcaTransformedData[2], pcaTransformedData[3]))\n else:\n for m, size in enumerate(sizes):\n lesionBins = 1\n for dims in np.shape(lesionTypes[m])[1:]:\n lesionBins *= dims\n \n lesionFlat.append(np.reshape(lesionTypes[m], (len(mri_list), lesionBins)))\n \n data = np.hstack((lesionFlat[0], lesionFlat[1], lesionFlat[2], lesionFlat[3]))\n\n data = data[:, 0:numWithClinical, ...]\n \n for m, size in enumerate(sizes):\n lesionType = 0\n for f1 in range(subtypeShape[m][1]):\n for f2 in range(subtypeShape[m][2]):\n for f3 in range(subtypeShape[m][3]):\n for f4 in range(subtypeShape[m][4]):\n brainsOfType[size][lesionType] = brainIndices[size][''.join((str(f1),str(f2),str(f3),str(f4)))]\n lesionsOfType[size][lesionType] = lesionIndices[size][''.join((str(f1),str(f2),str(f3),str(f4)))]\n\n lesionType += 1\n\n return data, clusters, pcas, subtypeShape, brainsOfType, lesionsOfType\n\n\ndef testRepresentationSpace(mri_list, dataVectors, lesionSizes, clusters, pcas):\n subtypeShape = []\n lesionTypes = []\n\n for m, size in enumerate(sizes):\n clusterProbabilities = []\n \n subtypeShape.append( () )\n \n subtypeShape[m] += (len(mri_list),)\n subtypeShape[m] += tuple(c.n_components for c in clusters[m])\n \n lesionTypes.append(np.zeros(subtypeShape[m]))\n\n for d, data in enumerate(dataVectors):\n lesionFeatures = data[size]\n c = clusters[m][d]\n \n clusterProbabilities.append(c.predict_proba(lesionFeatures))\n\n lesionIndex = 0\n for i, scan in enumerate(mri_list):\n for j, lesion in enumerate(scan.lesionList):\n if (len(lesion) > 2 and len(lesion) < 11 and m == 0) or (len(lesion) > 10 and len(lesion) < 26 and m == 1) or (len(lesion) > 25 and len(lesion) < 101 and m == 2) or (len(lesion) > 100 and m == 3): \n for f1 in range(subtypeShape[m][1]):\n for f2 in range(subtypeShape[m][2]):\n for f3 in range(subtypeShape[m][3]):\n for f4 in range(subtypeShape[m][4]):\n lesionTypes[m][i, f1, f2, f3, f4] += clusterProbabilities[0][lesionIndex, f1]*clusterProbabilities[1][lesionIndex, f2]*clusterProbabilities[2][lesionIndex, f3]*clusterProbabilities[3][lesionIndex, f4]\n lesionIndex += 1\n \n lesionFlat = []\n if usePCA:\n pcaTransformedData = []\n for m, size in enumerate(sizes):\n lesionBins = 1\n for dims in np.shape(lesionTypes[m])[1:]:\n lesionBins *= dims\n \n lesionFlat = np.reshape(lesionTypes[m], (len(mri_list), lesionBins))\n pcaTransformedData.append(pcas[m].transform(lesionFlat))\n \n data = np.hstack((pcaTransformedData[0], pcaTransformedData[1], pcaTransformedData[2], pcaTransformedData[3]))\n else:\n for m, size in enumerate(sizes):\n lesionBins = 1\n for dims in np.shape(lesionTypes[m])[1:]:\n lesionBins *= dims\n \n lesionFlat.append(np.reshape(lesionTypes[m], (len(mri_list), lesionBins)))\n \n data = np.hstack((lesionFlat[0], lesionFlat[1], lesionFlat[2], lesionFlat[3]))\n\n return data\n\n \ndef mk_groups(data):\n try:\n newdata = data.items()\n except:\n return\n\n thisgroup = []\n groups = []\n for key, value in newdata:\n newgroups = mk_groups(value)\n if newgroups is None:\n thisgroup.append((key, value))\n else:\n thisgroup.append((key, len(newgroups[-1])))\n if groups:\n groups = [g + n for n, g in zip(newgroups, groups)]\n else:\n groups = newgroups\n return [thisgroup] + groups\n\n\ndef add_line(ax, xpos, ypos):\n line = plt.Line2D([xpos, xpos], [ypos + .1, ypos],\n transform=ax.transAxes, color='black')\n line.set_clip_on(False)\n ax.add_line(line)\n\n\ndef label_group_bar(ax, data):\n groups = mk_groups(data)\n xy = groups.pop()\n x, y = zip(*xy)\n ly = len(y)\n xticks = range(1, ly + 1)\n\n print(groups)\n print(xticks)\n print(y)\n\n ax.bar(xticks, y, align='center')\n ax.set_xticks([])\n\n ax.set_xlim(.5, ly + .5)\n ax.yaxis.grid(True)\n\n scale = 1. / ly\n for pos in range(ly + 1):\n add_line(ax, pos * scale, -.1)\n ypos = -.2\n while groups:\n group = groups.pop()\n pos = 0\n for label, rpos in group:\n lxpos = (pos + .5 * rpos) * scale\n if label==\"LES\" or label=='T' or label==\"M\" or label==\"S\" or label==\"L\":\n ax.text(lxpos, ypos, label, ha='center', transform=ax.transAxes)\n add_line(ax, pos * scale, ypos)\n pos += rpos\n add_line(ax, pos * scale, ypos)\n ypos -= .1\n\n\ndef getOutcomes(mri_list):\n outcomes = {}\n for metric in metrics:\n outcomes[metric] = []\n for scan in mri_list:\n if metric == 'newT1':\n outcomes[metric].append(scan.newT1)\n elif metric == 'newT2':\n outcomes[metric].append(scan.newT2)\n elif metric == 'newT1andT2':\n outcomes[metric].append(scan.newT1andT2)\n \n return outcomes\n\n\ndef visualizePatientGroupHistograms(trainData, trainClusterAssignments):\n fig, axs = plt.subplots(len(set(trainClusterAssignments)), 1, sharey=True, figsize = (32, 3*len(set(trainClusterAssignments))))\n for group in set(trainClusterAssignments):\n patientsInGroup = []\n \n for i, (patientHistogram, clusterNum) in enumerate(zip(trainData, trainClusterAssignments)):\n if clusterNum == group:\n patientsInGroup.append(patientHistogram)\n \n print(np.shape(np.asarray(patientsInGroup).T))\n axs[group].boxplot(np.asarray(patientsInGroup), 1, '')\n axs[group].set_title('Group ' + str(group) + ' histogram')\n axs[group].set_xticks([])\n axs[group].set_yticks([])\n axs[group].set_xlabel('Lesion Type')\n \n plt.savefig(data_dir + 'images/groupHistograms.png', dpi=200)\n plt.show()\n \n\ndef plotTreatmentHists(mri_list, outcomes):\n data = {}\n fig, axs = plt.subplots(1, 3, sharey=True, figsize=(12,3))\n for m, metric in enumerate(metrics): \n \n data[metric] = collections.OrderedDict()\n \n for treatment in treatments:\n data[metric][treatment] = collections.OrderedDict()\n data[metric][treatment]['active'] = 0 \n data[metric][treatment]['non-active'] = 0\n \n \n for i, (scan, outcome) in enumerate(zip(mri_list, outcomes[metric])):\n for treatment in treatments:\n if scan.treatment == treatment:\n if outcome == 1:\n data[metric][treatment]['active'] += 1\n else:\n data[metric][treatment]['non-active'] += 1\n \n label_group_bar(axs[m], data[metric])\n axs[m].set_title(metric)\n\n plt.show()\n\n\ndef pruneFeatures(trainData, testData):\n featureCounts = {}\n for s, size in enumerate(sizes):\n # print np.shape(trainData[size])\n featureCounts[size] = np.zeros((np.shape(trainData[size])[1]))\n\n for s, size in enumerate(sizes):\n testData[size] = testData[size][:, (trainData[size] != 0).sum(axis=0) >= 10]\n trainData[size] = trainData[size][:, (trainData[size] != 0).sum(axis=0) >= 10]\n\n if plotFeats:\n fig, ax = plt.subplots(1, 4, figsize=(14, 4))\n for s, size in enumerate(sizes):\n for d in trainData[size]:\n ax[s].plot(np.ndarray.flatten(trainData[size]))\n\n ax[s].set_title(size)\n\n plt.tight_layout()\n plt.show()\n\n return trainData, testData\n\n\ndef visualizePatientGroups(mri_list, trainData, groups, subtypeShape):\n plt.close()\n \n lesionSizeFeatures = {}\n \n for index, l in enumerate(['T', 'S', 'M', 'L']):\n lesionSizeFeatures[l] = subtypeShape[index][1]*subtypeShape[index][2]*subtypeShape[index][3]*subtypeShape[index][4]\n \n for g in set(groups):\n fig = plt.figure(figsize=(15,6), dpi=500)\n n=0\n ymax = 0\n axes = []\n for i, (scan, hist, group) in enumerate(zip(mri_list, trainData, groups)):\n hist = np.add(hist, 0.01)\n if group == g:\n n+=1\n t2 = nib.load(scan.images['t2w']).get_data()\n \n lesionMaskImg = np.zeros((np.shape(t2)))\n \n for lesion in scan.lesionList:\n for point in lesion:\n lesionMaskImg[point[0], point[1], point[2]] = 1\n \n maskImg = np.ma.masked_where(lesionMaskImg == 0, np.ones((np.shape(lesionMaskImg)))*5000) \n \n ax = fig.add_subplot(2, 6, n)\n ax.imshow(t2[20:200, 20:200, 30].T, cmap=plt.cm.gray, origin='lower')\n ax.imshow(maskImg[20:200,20:200, 30].T, cmap = plt.cm.autumn, interpolation = 'nearest', alpha = 0.4, origin='lower')\n ax.axis('off')\n \n ax2 = fig.add_subplot(2, 6, n+6)\n ax2.bar(range(len(hist)), hist, width=1)\n ax2.set_xticks([0, lesionSizeFeatures['T']/2, lesionSizeFeatures['T'], lesionSizeFeatures['T']+(lesionSizeFeatures['S']/2), lesionSizeFeatures['T']+lesionSizeFeatures['S'], lesionSizeFeatures['T']+lesionSizeFeatures['S']+lesionSizeFeatures['M']/2, lesionSizeFeatures['T']+lesionSizeFeatures['S']+lesionSizeFeatures['M'], lesionSizeFeatures['T']+lesionSizeFeatures['S']+lesionSizeFeatures['M']+lesionSizeFeatures['L']/2, lesionSizeFeatures['T']+lesionSizeFeatures['S']+lesionSizeFeatures['M']+lesionSizeFeatures['L']])\n ax2.set_xticklabels(['', 'T', '', 'S', '', 'M', '', 'L', ''])\n\n if np.amax(hist) > ymax:\n ymax = np.amax(hist)\n \n if n == 6:\n for ax2 in axes:\n ax2.set_ylim([0, ymax])\n break\n \n groupNum = random.randint(0,100)\n print(groupNum)\n plt.subplots_adjust(wspace=0.01,hspace=0.01)\n plt.savefig(data_dir + '/images/patient-groups' + str(groupNum) + '-' + str(g) + '.png', dpi=500)\n# plt.show()\n plt.close()\n \ndef visualizeWhereTreatmentInfoHelps(example, mri_test, testData, mri_train, trainData):\n print('example', example)\n print('test data', testData[example, :])\n sys.stdout.flush()\n closeOnes = getNClosestMahalanobis(testData[example, ...], 20, trainData)\n print('found closest ones:', closeOnes)\n sys.stdout.flush()\n visualize = []\n for index in closeOnes:\n if mri_train[index].treatment == 'Avonex':\n visualize.append(visualize)\n print('picked the Avonex ones')\n visualize = visualize[0:6]\n sys.stdout.flush()\n fig = plt.figure(figsize=(15,4))\n axes = []\n ymax = 0\n for n, index in enumerate(visualize):\n print(index)\n sys.stdout.flush()\n print(np.shape(trainData))\n sys.stdout.flush()\n hist = trainData[index, :]\n print(hist)\n sys.stdout.flush()\n scan = mri_train[index]\n print('loading image...')\n sys.stdout.flush()\n t2 = nib.load(scan.images['t2w']).get_data()\n print('image loaded')\n sys.stdout.flush()\n lesionMaskImg = np.zeros((np.shape(t2)))\n \n for lesion in mri_train[index].lesionList:\n for point in lesion:\n lesionMaskImg[point[0], point[1], point[2]] = 1\n \n maskImg = np.ma.masked_where(lesionMaskImg == 0, np.ones((np.shape(lesionMaskImg)))*5000) \n print('image masked')\n sys.std.out.flush()\n ax = fig.add_subplot(2, 6, n+1)\n ax.imshow(t2[20:200, 20:200, 30].T, cmap=plt.cm.gray, origin='lower')\n ax.imshow(maskImg[20:200,20:200, 30].T, cmap = plt.cm.autumn, interpolation = 'nearest', alpha = 0.4, origin='lower')\n ax.axis('off')\n \n print('making hist')\n sys.stdout.flush()\n axes.append(fig.add_subplot(2, 6, n+7))\n axes[-1].bar(range(np.shape(hist)[0]), hist)\n axes[-1].set_xticks([])\n if np.amax(hist) > ymax:\n ymax = np.amax(hist)\n \n if n == 6:\n for ax2 in axes:\n ax2.set_ylim([0, ymax])\n break\n \n plt.subplots_adjust(wspace=0.01,hspace=0.01)\n plt.savefig(data_dir + 'images/example' + str(example) + '.png')\n# plt.show()\n plt.close()\n\n\ndef removeWorstFeatures(trainData, testData, removeThisRound):\n for remove in removeThisRound:\n trainData = np.delete(trainData, remove, 1)\n testData = np.delete(testData, remove, 1)\n\n return trainData, testData\n\n\ndef plotScores(scoring, plotTitle, results_dir):\n try:\n numBars = len(scoring)*4\n colours = ['b', 'g', 'r', 'c', 'm', 'y', 'aqua', 'k', 'gold', 'lightgreen'] \n \n fig, (ax) = plt.subplots(nrows=1, ncols=1, figsize=(4, 4))\n \n for i, (scoreObj, label) in enumerate(scoring):\n x = np.linspace(0, numBars, num=4, dtype='float')\n x = np.add(x, i) #shifts bars over\n y = [np.sum(scoreObj['TP']), np.sum(scoreObj['FP']), np.sum(scoreObj['TN']), np.sum(scoreObj['FN'])]\n\n print(label)\n print(np.sum(scoreObj['TP']), np.sum(scoreObj['FP']), np.sum(scoreObj['TN']), np.sum(scoreObj['FN']))\n \n print('sensitivity: ', np.sum(scoreObj['TP']) / (np.sum(scoreObj['TP']) + np.sum(scoreObj['FN'])))\n print('specificity: ', np.sum(scoreObj['TN']) / (np.sum(scoreObj['TN']) + np.sum(scoreObj['FP'])))\n \n labels = []\n \n print(plotTitle)\n \n plots = []\n for i, (scoreObj, label) in enumerate(scoring):\n labels.append(label)\n scatterPoint = ax.scatter(np.sum(scoreObj['TN']) / (np.sum(scoreObj['TN']) + np.sum(scoreObj['FP'])), np.sum(scoreObj['TP']) / (np.sum(scoreObj['TP']) + np.sum(scoreObj['FN'])), marker='x', s=(100,), color=colours[i])\n plots.append(scatterPoint)\n \n ax.set_ylabel('Sensitivity')\n ax.set_xlabel('Specificity')\n ax.set_xlim([0, 1])\n ax.set_ylim([0, 1])\n\n plt.title(plotTitle)\n plt.legend(tuple(plots), tuple(labels), loc='center left', bbox_to_anchor=(1, 0.5), scatterpoints=1, fancybox=True, shadow=True)\n\n plt.savefig(results_dir + '/ss-results-' + str(random.randint(1, 1000)) + '.png', dpi=500, bbox_inches='tight')\n except:\n print(\"couldnt plot\")\n\n \ndef beforeAndAfter():\n plt.close()\n mri_list = context_extraction.loadMRIList()\n \n for i, scan in enumerate(mri_list):\n fig = plt.figure()\n \n subprocess.call(['mnc2nii', scan.newT2, scan.newT2[0:-7] + '.nii'])\n subprocess.call(['gzip', scan.newT2[0:-7] + '.nii'])\n scan.newT2 = scan.newT2[0:-7] + '.nii.gz'\n \n t2 = nib.load(scan.images['t2w'][0:-7] + '.nii.gz').get_data() \n newT2 = nib.load(scan.newT2).get_data()\n \n lesionPoints = nib.load(scan.lesions).get_data()\n lesionList = list(np.asarray(np.nonzero(lesionPoints)).T)\n \n newLesionPoints = nib.load(scan.futureLabels).get_data()\n newLesionList = list(np.asarray(np.nonzero(newLesionPoints)).T)\n \n lesionImg = np.zeros(np.shape(t2))\n newLesionImg = np.zeros(np.shape(newT2))\n\n for i, (x, y, z) in enumerate(lesionList):\n lesionImg[x,y,z] = 1\n \n for i, (x, y, z) in enumerate(newLesionList):\n newLesionImg[x,y,z] = 1\n \n maskImg = np.ma.masked_where(lesionImg == 0, np.ones(np.shape(lesionImg))*5000)\n newMaskImg = np.ma.masked_where(newLesionImg == 0, np.ones(np.shape(newLesionImg))*5000)\n\n ax = fig.add_subplot(1, 2, 1)\n ax.imshow(t2[:, :, 30], cmap=plt.cm.gray, origin='lower')\n ax.imshow(maskImg[:, :, 30], cmap = plt.cm.autumn, interpolation = 'nearest', alpha = 0.4, origin='lower')\n ax.axis('off')\n \n ax = fig.add_subplot(1, 2, 2)\n ax.imshow(newT2[:, :, 30], cmap=plt.cm.gray, origin='lower')\n ax.imshow(newMaskImg[:, :, 30], cmap=plt.cm.autumn, interpolation = 'nearest', alpha=0.4, origin='lower')\n ax.axis('off') \n \n plt.savefig(data_dir + 'images/before-after-' + str(i) + '.png', dpi=500)\n plt.show() \n plt.close() \n \n for i, scan in enumerate(mri_list):\n fig = plt.figure(figsize=(12,4))\n \n img = {}\n for j, mod in enumerate(modalities):\n scan.images[mod] = scan.images[mod][0:-7] + '.nii.gz'\n img[mod] = nib.load(scan.images[mod]).get_data()\n ax = fig.add_subplot(1, 4, j+1)\n ax.imshow(img[mod][:, :, 30].T, cmap=plt.cm.gray, origin='lower')\n ax.axis('off')\n ax.set_xlabel(mod)\n \n# plt.tight_layout()\n plt.savefig(data_dir + '/allmods-' + scan.uid + '.png', dpi=500)\n plt.show()\n plt.close()\n \n for i, scan in enumerate(mri_list):\n fig = plt.figure(figsize=(12,4))\n \n img = {}\n for j, mod in enumerate(modalities):\n img[mod] = nib.load(scan.rawImages[mod]).get_data()\n ax = fig.add_subplot(1, 4, j+1)\n ax.imshow(img[mod][30, 10:210, 10:180], cmap=plt.cm.gray, origin='lower')\n ax.axis('off')\n ax.set_xlabel(mod)\n \n# plt.tight_layout()\n plt.savefig(data_dir + '/images/rawmods-' + scan.uid + '.png', dpi=500)\n plt.show()\n plt.close()\n\n\ndef separatePatientsByTreatment(mri_train, mri_test, trainData, testData):\n trainingPatientsByTreatment, testingPatientsByTreatment = defaultdict(list), defaultdict(list)\n \n trainingData, testingData = {}, {}\n\n treatmentCountTrains, treatmentCountTest = {}, {}\n treatmentIndexTrains, treatmentIndexTest = {}, {}\n\n for treatment in treatments:\n treatmentCountTrains[treatment] = 0\n treatmentCountTest[treatment] = 0\n treatmentIndexTrains[treatment] = 0\n treatmentIndexTest[treatment] = 0\n\n for scan in mri_train:\n treatmentCountTrains[scan.treatment] += 1\n for scan in mri_test:\n treatmentCountTest[scan.treatment] += 1\n \n for treatment in treatments:\n trainingData[treatment] = np.zeros((treatmentCountTrains[treatment], np.shape(trainData)[-1]))\n testingData[treatment] = np.zeros((treatmentCountTest[treatment], np.shape(testData)[-1]))\n # trainLesionCounts[treatment] = np.zeros((treatmentCountTrains[treatment], np.shape(trainCounts)[1]))\n # testLesionCounts[treatment] = np.zeros((treatmentCountTest[treatment], np.shape(testCounts)[1]))\n\n for i, scan in enumerate(mri_train):\n trainingPatientsByTreatment[scan.treatment].append(scan)\n trainingData[scan.treatment][treatmentIndexTrains[scan.treatment],:] = trainData[i,:]\n # trainLesionCounts[scan.treatment][treatmentIndexTrains[scan.treatment],:] = trainCounts[i,:]\n treatmentIndexTrains[scan.treatment] += 1\n \n for i, scan in enumerate(mri_test):\n testingPatientsByTreatment[scan.treatment].append(scan)\n testingData[scan.treatment][treatmentIndexTest[scan.treatment],:] = testData[i,:]\n # testLesionCounts[scan.treatment][treatmentIndexTest[scan.treatment],:] = testCounts[i,:]\n treatmentIndexTest[scan.treatment] += 1\n \n for treatment in treatments:\n print('training shape:', treatment, np.shape(trainingData[treatment]))\n print('testing shape:', treatment, np.shape(testingData[treatment]))\n \n return trainingPatientsByTreatment, testingPatientsByTreatment, trainingData, testingData\n\n# we want to show here where the placebo-trained model failed to predict a patient showing activity\n# this means that the drug had an effect, because it messed up our pre-trained prediction\ndef showWhereTreatmentHelped(pretrained_predictions, predictions, train_data, test_data, train_outcomes, test_outcomes, train_mri, test_mri, results_dir):\n respondersRight, respondersWrong = 0, 0\n \n responder_prediction, responder_actual, responder_certain_actual, responder_certain_prediction = [], [], [], []\n\n all_responders_info = []\n\n for test_index, (pretrained_prediction, prediction, test_outcome) in enumerate(zip(pretrained_predictions, predictions, test_outcomes)):\n \n if pretrained_prediction[1] > 0.5 and test_outcome == 0: \n responder_actual.append(1)\n else:\n responder_actual.append(0)\n \n if pretrained_prediction[1] > 0.8 and test_outcome == 0:\n responder_certain_actual.append(1)\n else:\n responder_certain_actual.append(0)\n \n print('values (probs, drug prediction, actual): ', pretrained_prediction[1], prediction, test_outcome)\n \n if pretrained_prediction[1] > 0.5 and prediction[1] < 0.5:\n responder_prediction.append(1)\n else:\n responder_prediction.append(0)\n \n if pretrained_prediction[1] > 0.8 and prediction[1] < 0.8:\n responder_certain_prediction.append(1)\n else:\n responder_certain_prediction.append(0)\n \n if pretrained_prediction[1] > 0.8 and prediction[1] < 0.8 and test_outcome == 0:\n scan = test_mri[test_index]\n t2_test = nib.load(scan.images['t2w']).get_data()\n testLesionPoints = nib.load(scan.lesions).get_data()\n testLesionList = list(np.asarray(np.nonzero(testLesionPoints)).T)\n\n responder_info = dict()\n responder_info['uid'] = scan.uid\n responder_info['treatment'] = scan.treatment\n responder_info['t2_lesions'] = len(testLesionList)\n responder_info['P(A=1|BoL, untr)'] = pretrained_prediction[1]\n responder_info['P(A=0|BoL, tr)'] = prediction[0]\n all_responders_info.append(responder_info)\n\n testLesionImg = np.zeros(np.shape(t2_test))\n\n for (x, y, z) in testLesionList:\n testLesionImg[x,y,z] = 1\n\n maskImg = np.ma.masked_where(testLesionImg == 0, np.ones(np.shape(testLesionImg))*5000)\n n=4\n \n fig, axes = plt.subplots(2, n+1, sharey='row', figsize=(10, 4))\n axes[0,0].set_xticks([])\n axes[0,0].set_yticks([])\n axes[0,0].imshow(t2_test[30, :, :], cmap=plt.cm.gray, origin='lower')\n axes[0,0].imshow(maskImg[30, :, :], cmap = plt.cm.autumn, interpolation = 'nearest', alpha = 0.4, origin='lower')\n \n if scan.treatment == \"Avonex\":\n axes[0,0].set_xlabel('Responder (Drug A)')\n else:\n axes[0,0].set_xlabel('Responder (Drug B)')\n axes[0,0].set_xticks([])\n axes[0,0].set_yticks([])\n \n x = np.linspace(1, len(test_data[test_index]), num=len(test_data[test_index]))\n axes[1,0].bar(x, test_data[test_index])\n axes[1,0].set_xlabel('Lesion-Types')\n \n closest_index = getNClosestMahalanobis(test_data[test_index], n, train_data)\n\n print(\"Responder:\", scan.uid)\n for i, closest in enumerate(closest_index): \n train_scan = train_mri[closest]\n print(\"closest:\", train_scan.uid)\n \n t2_train = nib.load(train_scan.images['t2w']).get_data()\n \n trainLesionPoints = nib.load(train_scan.lesions).get_data()\n trainLesionList = list(np.asarray(np.nonzero(trainLesionPoints)).T)\n trainLesionImg = np.zeros(np.shape(t2_train))\n\n for (x, y, z) in trainLesionList:\n trainLesionImg[x,y,z] = 1\n\n newMaskImg = np.ma.masked_where(trainLesionImg == 0, np.ones(np.shape(trainLesionImg))*5000)\n axes[0,i+1].set_xticks([])\n axes[0,i+1].set_yticks([])\n \n axes[0,i+1].imshow(t2_train[30, :, :], cmap=plt.cm.gray, origin='lower')\n axes[0,i+1].imshow(newMaskImg[30, :, :], cmap=plt.cm.autumn, interpolation = 'nearest', alpha=0.4, origin='lower')\n axes[0,i+1].set_title('Close Patient') \n if scan.newT2 > 0:\n axes[0,i+1].set_xlabel('(active)')\n else:\n axes[0,i+1].set_xlabel('(non-active)')\n axes[0,i+1].set_xticks([])\n axes[0,i+1].set_yticks([])\n\n x = np.linspace(1, len(train_data[closest]), num=len(train_data[closest]))\n \n axes[1,i+1].set_xlabel('Lesion-Types')\n axes[1,i+1].bar(x, train_data[closest])\n \n plt.savefig(results_dir + 'responder-' + scan.uid + '.png', dpi=500)\n plt.close()\n \n respondersRight += 1\n \n if pretrained_prediction[1] > 0.5 and prediction[1] < 0.5 and test_outcome == 1:\n respondersWrong += 1\n \n responder_score = bol_classifiers.calculateScores(responder_prediction, responder_actual)\n responder_uncertain_score = bol_classifiers.calculateScores(responder_prediction, responder_certain_actual)\n responder_certain_score = bol_classifiers.calculateScores(responder_certain_prediction, responder_actual)\n responder_more_certain_score = bol_classifiers.calculateScores(responder_certain_prediction, responder_certain_actual)\n \n print(\"Responders(right, wrong)\", respondersRight, respondersWrong)\n\n return respondersRight, respondersWrong, responder_score, responder_uncertain_score, responder_certain_score, responder_more_certain_score, all_responders_info\n\n\ndef justTreatmentGroups():\n start = time.time()\n mri_list = pkl.load(open(data_dir + 'mri_list.pkl', 'rb'))\n mri_list, without_clinical = load_data.loadClinical(mri_list)\n \n outcomes = getOutcomes(mri_list)\n \n kf = StratifiedKFold(outcomes['newT2'], n_folds=50, shuffle=True)\n failedFolds = 0\n\n respondersRight, respondersWrong = {}, {}\n\n certainNumber, certainCorrect, certainNumberPre, certainCorrectPre = defaultdict(dict), defaultdict(dict), defaultdict(dict), defaultdict(dict)\n\n scores = defaultdict(dict)\n\n knnEuclideanScores = defaultdict(dict)\n knnMahalanobisScores = defaultdict(dict)\n subdivisionScores = defaultdict(dict)\n softSubdivisionScores = defaultdict(dict)\n chi2Scores = defaultdict(dict)\n chi2svmScores = defaultdict(dict)\n featureScores = defaultdict(dict)\n svmLinScores = defaultdict(dict)\n svmRadScores = defaultdict(dict)\n rvmScores = defaultdict(dict)\n preTrainedKnnEuclideanScores = defaultdict(dict)\n preTrainedFeatureScores = defaultdict(dict)\n preTrainedSvmLinScores = defaultdict(dict)\n preTrainedSvmRadScores = defaultdict(dict)\n \n countingScores = defaultdict(dict)\n \n bestScores = defaultdict(dict)\n bestKnnEuclideanScores = defaultdict(dict)\n bestKnnMahalanobisScores = defaultdict(dict)\n bestSubdivisionScores = defaultdict(dict)\n bestSoftSubdivisionScores = defaultdict(dict)\n bestChi2Scores = defaultdict(dict)\n bestChi2svmScores = defaultdict(dict)\n bestFeatureScores = defaultdict(dict)\n bestSvmLinScores = defaultdict(dict)\n bestSvmRadScores = defaultdict(dict)\n bestRvmScores = defaultdict(dict)\n bestPreTrainedKnnEuclideanScores = defaultdict(dict) \n bestPreTrainedFeatureScores = defaultdict(dict)\n bestPreTrainedSvmLinScores = defaultdict(dict)\n bestPreTrainedSvmRadScores = defaultdict(dict)\n \n probScores = defaultdict(dict)\n allProbScores = defaultdict(dict)\n \n responderScores = defaultdict(dict)\n responderHighProbScores = defaultdict(dict)\n countScores = defaultdict(dict)\n \n r1 = defaultdict(dict)\n r2 = defaultdict(dict)\n r3 = defaultdict(dict)\n r4 = defaultdict(dict)\n \n for treatment in treatments:\n scores[treatment] = defaultdict(list)\n knnEuclideanScores[treatment] = defaultdict(list)\n knnMahalanobisScores[treatment] = defaultdict(list)\n subdivisionScores[treatment] = defaultdict(list)\n softSubdivisionScores[treatment] = defaultdict(list)\n chi2Scores[treatment] = defaultdict(list)\n chi2svmScores[treatment] = defaultdict(list)\n featureScores[treatment] = defaultdict(list)\n svmLinScores[treatment] = defaultdict(list)\n svmRadScores[treatment] = defaultdict(list)\n rvmScores[treatment] = defaultdict(list)\n preTrainedKnnEuclideanScores[treatment] = defaultdict(list)\n preTrainedFeatureScores[treatment] = defaultdict(list)\n bestPreTrainedSvmLinScores[treatment] = defaultdict(list)\n bestPreTrainedSvmRadScores[treatment] = defaultdict(list)\n countingScores[treatment] = defaultdict(list)\n \n bestScores[treatment] = defaultdict(list)\n bestKnnEuclideanScores[treatment] = defaultdict(list)\n bestKnnMahalanobisScores[treatment] = defaultdict(list)\n bestSubdivisionScores[treatment] = defaultdict(list)\n bestSoftSubdivisionScores[treatment] = defaultdict(list)\n bestChi2Scores[treatment] = defaultdict(list)\n bestChi2svmScores[treatment] = defaultdict(list)\n bestFeatureScores[treatment] = defaultdict(list)\n bestSvmLinScores[treatment] = defaultdict(list)\n bestSvmRadScores[treatment] = defaultdict(list)\n bestRvmScores[treatment] = defaultdict(list)\n bestPreTrainedKnnEuclideanScores[treatment] = defaultdict(list)\n bestPreTrainedFeatureScores[treatment] = defaultdict(list)\n preTrainedSvmLinScores[treatment] = defaultdict(list)\n preTrainedSvmRadScores[treatment] = defaultdict(list)\n \n probScores[treatment], allProbScores[treatment] = defaultdict(list), defaultdict(list)\n \n responderScores[treatment], responderHighProbScores[treatment], countScores[treatment] = defaultdict(list), defaultdict(list), defaultdict(list)\n\n certainNumber[treatment], certainCorrect[treatment], certainNumberPre[treatment], certainCorrectPre[treatment] = 0, 0, 0, 0\n respondersRight[treatment], respondersWrong[treatment] = 0, 0\n\n r1[treatment], r2[treatment], r3[treatment], r4[treatment] = defaultdict(list), defaultdict(list), defaultdict(list), defaultdict(list)\n\n for foldNum, (train_index, test_index) in enumerate(kf.split(range(len(mri_list)))):\n print(foldNum, '/', len(kf))\n \n mri_train = np.asarray(mri_list)[train_index]\n mri_test = np.asarray(mri_list)[test_index]\n \n trainCounts = load_data.loadLesionNumbers(mri_train)\n testCounts = load_data.loadLesionNumbers(mri_test)\n \n print(\"training:\", len(mri_train))\n #incorporate patients with no clinical data\n train_patients = []\n for scan in mri_train:\n train_patients.append(scan)\n for scan in without_clinical:\n train_patients.append(scan)\n \n print('loading data...')\n startLoad = time.time()\n numLesionsTrain, lesionSizesTrain, lesionCentroids, brainUids = load_data.getLesionSizes(train_patients)\n trainDataVectors, lbpPCA = load_data.loadAllData(train_patients, numLesionsTrain)\n \n numLesionsTest, lesionSizesTest, lesionCentroids, brainUids = load_data.getLesionSizes(mri_test)\n dataVectorsTest, lbpPCA = load_data.loadAllData(mri_test, numLesionsTest, lbpPCA=lbpPCA)\n \n print('loading data took', (time.time() - startLoad)/60.0, 'minutes')\n print('removing infrequent features...')\n startPruneTime = time.time()\n prunedDataTrain = []\n prunedDataTest = []\n \n for dTrain, dTest in zip(trainDataVectors, dataVectorsTest):\n dTrainPruned, dTestPruned = pruneFeatures(dTrain, dTest)\n prunedDataTrain.append(dTrainPruned)\n prunedDataTest.append(dTestPruned)\n \n del trainDataVectors\n del dataVectorsTest\n print(\"it took\", (time.time() - startPruneTime)/60.0, \"minutes\")\n print('learning bag of lesions...')\n\n startBol = time.time()\n allTrainData, clusters, pcas, subtypeShape, brainIndices, lesionIndices = createRepresentationSpace(train_patients, prunedDataTrain, lesionSizesTrain, len(mri_train), lesionCentroids, examineClusters=False)\n elapsedBol = time.time() - startBol\n print(str(elapsedBol / 60), 'minutes to learn BoL.')\n \n# tfidfTrans = TfidfTransformer()\n# allTrainData = tfidfTrans.fit_transform(allTrainData).toarray()\n \n# pca = None\n# ica = FastICA()\n# ica.fit(data)\n# data = ica.transform(data)\n\n# pca = PCA(n_components=120, copy=False)\n# data = pca.fit_transform(data)\n# print 'explained variance ratio:', np.sum(pca.explained_variance_ratio_)\n\n print('transforming test data to bag of lesions representation...')\n allTestData = testRepresentationSpace(mri_test, prunedDataTest, lesionSizesTest, clusters, pcas) \n \n# allTestData = tfidfTrans.transform(allTestData).toarray()\n# allTrainData, allTestData, lesionSizeFeatures = pruneFeatures(allTrainData, allTestData)\n \n print('splitting data up by treatment group')\n trainingPatientsByTreatment, testingPatientsByTreatment, trainingData, testingData, trainCounts, testCounts = separatePatientsByTreatment(mri_train, mri_test, allTrainData, allTestData, trainCounts, testCounts)\n \n featuresToRemove, c = None, None\n\n print('grouping patients')\n for treatment in treatments:\n try:\n scoreThisFold = True\n \n trainData, testData = trainingData[treatment], testingData[treatment]\n trainDataCopy, testDataCopy = trainData, testData\n trainOutcomes, testOutcomes = getOutcomes(trainingPatientsByTreatment[treatment]), getOutcomes(testingPatientsByTreatment[treatment])\n\n remove_worst_features = True\n if remove_worst_features:\n if treatment == \"Placebo\":\n print('selecting features...')\n bestTrainData, bestTestData, featuresToRemove = bol_classifiers.randomForestFeatureSelection(trainDataCopy, testDataCopy, trainOutcomes['newT2'], testOutcomes['newT2'], 12) \n else:\n bestTrainData, bestTestData = removeWorstFeatures(trainDataCopy, testDataCopy, featuresToRemove)\n else:\n bestTrainData = trainDataCopy\n bestTestData = testDataCopy\n\n print('train, test data shape:', np.shape(bestTrainData), np.shape(bestTestData))\n \n # trainClusterData, validationData = train_test_split(bestTrainData, test_size=0.1, random_state=5)\n # ratio = len(trainOutcomes['newT2'])/ float(np.sum(trainOutcomes['newT2']))\n # smote = SMOTE(ratio=ratio, kind='regular', k=3)\n # \n # print 'oversampling data...'\n # trainData, trainOutcomes['newT2'] = smote.fit_transform(trainData, np.asarray(trainOutcomes['newT2']))\n \n # numClusters = []\n # bics = []\n # aics = []\n # for k in range(2, 12):\n # clust = GMM(n_components = k, covariance_type = 'full')\n # clust.fit(trainClusterData)\n # \n # numClusters.append(k)\n # bic = clust.bic(validationData)\n # aic = clust.aic(validationData)\n # \n # bics.append(bic)\n # aics.append(aic)\n ## print k, bic \n #\n # nClusters = numClusters[np.argmin(bics)]\n # \n # plt.plot(numClusters, bics)\n # plt.plot(numClusters, aics)\n # plt.xlabel(\"Number of Clusters\")\n # plt.ylabel(\"Information Criterion Value (Lower is better)\")\n # plt.show()\n ## if nClusters == 2:\n ## nClusters = 5\n # \n # \n # c = GMM(n_components = nClusters, covariance_type = 'full')\n # c.fit(bestTrainData)\n #\n # trainClusterAssignments = c.predict(bestTrainData)\n # testClusterAssignments = c.predict(bestTestData)\n # \n # trainGroupProbs = c.predict_proba(bestTrainData)\n # testGroupProbs = c.predict_proba(bestTestData)\n # \n # \n # visualizePatientGroups(trainingPatientsByTreatment[treatment], trainData, trainClusterAssignments, subtypeShape)\n \n \n # comparing classifiers\n if treatment == \"Placebo\":\n (bestFeatureScore, bestFeaturePredictions, placebo_rf), (probScore, probPredicted), (correct, total) = bol_classifiers.featureClassifier(bestTrainData, bestTestData, trainOutcomes, testOutcomes, subtypeShape, train_patients, mri_test, brainIndices, lesionIndices, len(mri_list)) \n \n (bestChi2Score, bestChi2Predictions), (bestChi2svmscore, bestChi2svmPredictions) = bol_classifiers.chi2Knn(bestTrainData, bestTestData, trainOutcomes, testOutcomes)\n (bestSvmLinScore, bestSvmLinPredictions, svm1), (bestSvmRadScore, bestSvmRadPredictions, svm2) = bol_classifiers.svmClassifier(bestTrainData, bestTestData, trainOutcomes, testOutcomes)\n (bestKnnEuclideanScoreVals, bestEuclideanPredictions), (bestKnnMahalanobisScoreVals, bestMahalanobisPredictions) = bol_classifiers.knn(bestTrainData, trainOutcomes, bestTestData, testOutcomes)\n\n (featureScore, featurePredictions, meh), (allProbScore, allprobPredicted), (allCorrect, allTotal) = bol_classifiers.featureClassifier(trainData, testData, trainOutcomes, testOutcomes, subtypeShape, train_patients, mri_test, brainIndices, lesionIndices, len(mri_list)) \n \n (countingScore, countingPredictions, placebo_nb) = bol_classifiers.countingClassifier(trainCounts[treatment], testCounts[treatment], trainOutcomes, testOutcomes)\n \n # drugged patients\n else:\n # natural course ms model\n (bestPreTrainedFeatureScore, bestPreTrainedFeaturePredictions, meh), (pretrainedProbScore, pretrainedProbPredicted), (correct, total) = bol_classifiers.featureClassifier(bestTrainData, bestTestData, trainOutcomes, testOutcomes, subtypeShape, train_patients, mri_test, brainIndices, lesionIndices, len(mri_list), placebo_rf) \n \n #new model on drugged patients\n (bestFeatureScore, bestFeaturePredictions, meh), (probScore, probDrugPredicted), (correct, total) = bol_classifiers.featureClassifier(bestTrainData, bestTestData, trainOutcomes, testOutcomes, subtypeShape, train_patients, mri_test, brainIndices, lesionIndices, len(mri_list)) \n \n # (bestPreTrainedKnnEuclideanScoreVals, bestEuclideanPredictions, meh) = bol_classifiers.knn(bestTrainData, trainOutcomes, bestTestData, testOutcomes, clf)\n # (bestPreTrainedSvmLinScore, bestPreTraindSvmLinearPredictions, meh), (bestPreTrainedSvmRadScore, bestSvmRadialPredictions, meh) = bol_classifiers.svmClassifier(bestTrainData, bestTestData, trainOutcomes, testOutcomes, svm1, svm2)\n certainNumber[treatment] += total\n certainCorrect[treatment] += correct\n \n right, wrong, r1_score, r2_score, r3_score, r4_score = showWhereTreatmentHelped(pretrainedProbPredicted, probDrugPredicted, bestTrainData, bestTestData, trainOutcomes['newT2'], testOutcomes['newT2'], trainingPatientsByTreatment[treatment], testingPatientsByTreatment[treatment])\n \n respondersRight[treatment] += right\n respondersWrong[treatment] += wrong\n \n print('responders right', respondersRight)\n print('responders wrong', respondersWrong)\n \n (responderScore, responderProbs), responderHighProbScore, count_score = bol_classifiers.identifyResponders(bestTrainData, bestTestData, trainOutcomes, testOutcomes, trainCounts[treatment], testCounts[treatment], placebo_rf, placebo_nb) \n \n certainNumberPre[treatment] += total\n certainCorrectPre[treatment] += correct\n # full feature set\n # try:\n # (softSubdivisionScore, softSubdivisionPredictions) = bol_classifiers.softSubdividePredictGroups(trainData, trainClusterAssignments, trainOutcomes, testData, testGroupProbs, testOutcomes, nClusters)\n # except:\n # print 'ERROR: Couldnt do this one'\n \n \n # (chi2Score, chi2Predictions), (chi2svmscore, chi2svmPredictions) = bol_classifiers.chi2Knn(trainData, testData, trainOutcomes, testOutcomes)\n # (svmLinearScore, svmLinearPredictions), (svmRadialScore, svmRadialPredictions) = bol_classifiers.svmClassifier(trainData, testData, trainOutcomes, testOutcomes)\n # (bayesScoreVals, bayesPredictions) = bol_classifiers.predictOutcomeGivenGroups(trainGroupProbs, trainOutcomes, testGroupProbs, testOutcomes, testClusterAssignments) \n # (knnEuclideanScoreVals, euclideanPredictions), (knnMahalanobisScoreVals, mahalanobisPredictions) = bol_classifiers.knn(trainData, trainOutcomes, testData, testOutcomes)\n # (knnEuclideanScoreVals, euclideanPredictions) = bol_classifiers.knn(trainData, trainOutcomes, testData, testOutcomes)\n # try:\n # (rvmScoreVals, rvmPredictions) = bol_classifiers.rvmClassifier(trainData, testData, trainOutcomes, testOutcomes)\n # except:\n # pass\n \n # plotGroupDistribution(trainOutcomes['newT2'], testOutcomes['newT2'], trainClusterAssignments, testClusterAssignments, nClusters, softSubdivisionPredictions, 'Activity Subgroups')\n \n for scoreMet in scoringMetrics + ['sensitivity', 'specificity']:\n ## scores[treatment][scoreMet].append(bayesScoreVals['newT2'][scoreMet])\n # knnEuclideanScores[treatment][scoreMet].append(knnEuclideanScoreVals['newT2'][scoreMet])\n # knnMahalanobisScores[treatment][scoreMet].append(knnMahalanobisScoreVals['newT2'][scoreMet])\n ## softSubdivisionScores[treatment][scoreMet].append(softSubdivisionScore['newT2'][scoreMet])\n ## chi2Scores[treatment][scoreMet].append(chi2Score['newT2'][scoreMet])\n ## chi2svmScores[treatment][scoreMet].append(chi2svmscore['newT2'][scoreMet])\n featureScores[treatment][scoreMet].append(featureScore['newT2'][scoreMet])\n ## svmLinScores[treatment][scoreMet].append(svmLinearScore['newT2'][scoreMet])\n ## svmRadScores[treatment][scoreMet].append(svmRadialScore['newT2'][scoreMet])\n ## rvmScores[treatment][scoreMet].append(rvmScoreVals['newT2'][scoreMet])\n \n #bad classifiers\n bestKnnEuclideanScores[treatment][scoreMet].append(bestKnnEuclideanScoreVals['newT2'][scoreMet])\n bestKnnMahalanobisScores[treatment][scoreMet].append(bestKnnMahalanobisScoreVals['newT2'][scoreMet])\n bestChi2Scores[treatment][scoreMet].append(bestChi2Score['newT2'][scoreMet])\n bestChi2svmScores[treatment][scoreMet].append(bestChi2svmscore['newT2'][scoreMet])\n bestFeatureScores[treatment][scoreMet].append(bestFeatureScore['newT2'][scoreMet])\n bestSvmLinScores[treatment][scoreMet].append(bestSvmLinScore['newT2'][scoreMet])\n bestSvmRadScores[treatment][scoreMet].append(bestSvmRadScore['newT2'][scoreMet])\n \n \n countingScores[treatment][scoreMet].append(countingScore['newT2'][scoreMet])\n probScores[treatment][scoreMet].append(probScore[scoreMet])\n allProbScores[treatment][scoreMet].append(probScore[scoreMet])\n \n \n if treatment != \"Placebo\":\n preTrainedFeatureScores[treatment][scoreMet].append(bestPreTrainedFeatureScore['newT2'][scoreMet])\n # preTrainedKnnEuclideanScores[treatment][scoreMet].append(bestPreTrainedKnnEuclideanScoreVals['newT2'][scoreMet])\n # preTrainedSvmLinScores[treatment][scoreMet].append(bestPreTrainedSvmLinScore['newT2'][scoreMet])\n # preTrainedSvmRadScores[treatment][scoreMet].append(bestPreTrainedSvmRadScore['newT2'][scoreMet])\n responderScores[treatment][scoreMet].append(responderScore[scoreMet])\n responderHighProbScores[treatment][scoreMet].append(responderHighProbScore[scoreMet])\n countScores[treatment][scoreMet].append(count_score[scoreMet])\n \n r1[treatment][scoreMet].append(r1_score[scoreMet])\n r2[treatment][scoreMet].append(r2_score[scoreMet])\n r3[treatment][scoreMet].append(r3_score[scoreMet])\n r4[treatment][scoreMet].append(r4_score[scoreMet])\n \n except:\n failedFolds += 1\n scoreThisFold = False\n \n if scoreThisFold:\n for treatment in treatments:\n if treatment == \"Placebo\":\n bestScoring = []\n bestScoring.append((bestKnnEuclideanScores[treatment], \"NN-Euclidean\"))\n bestScoring.append((bestKnnMahalanobisScores[treatment], \"NN-Mahalanobis\"))\n bestScoring.append((bestChi2Scores[treatment], \"NN-$\\chi^2$\"))\n \n \n bestScoring.append((bestSvmLinScores[treatment], \"SVM-Linear\"))\n bestScoring.append((bestSvmRadScores[treatment], \"SVM-RBF\"))\n bestScoring.append((bestChi2svmScores[treatment], \"SVM-$\\chi^2$\"))\n \n bestScoring.append((bestFeatureScores[treatment], \"Random Forest\"))\n bestScoring.append((countingScores[treatment], \"Naive Bayes (Lesion Counts)\"))\n \n plotScores(bestScoring, 'Activity Prediction (Untreated)')\n \n if treatment == \"Placebo\":\n bestScoring = []\n \n bestScoring.append((featureScores[treatment], \"Random Forest (all lesions)\"))\n bestScoring.append((allProbScores[treatment], \"Random Forest (all lesions, certain)\"))\n \n bestScoring.append((bestFeatureScores[treatment], \"Random Forest (best lesions)\"))\n bestScoring.append((probScores[treatment], \"Random Forest (best lesions, certain)\"))\n # plotScores(bestScoring, '')\n \n for treatment in treatments:\n if treatment == \"Avonex\":\n # plotScores([(responderScores[treatment], 'Responders'), (responderHighProbScores[treatment], 'Responders (certain)'), (countScores[treatment], 'Responders (lesion counts)')], \"Avonex Responder Prediction\")\n plotScores([(r1[treatment], 'Responders'), (r2[treatment], 'Responders (certain GT)'), (r3[treatment], 'Responders (certain prediction)'), (r4[treatment], 'Responders (all certain)')], \"Avonex Responder Prediction\")\n elif treatment == \"Laquinimod\":\n # plotScores([(responderScores[treatment], 'Responders'), (responderHighProbScores[treatment], 'Responders (certain)'), (countScores[treatment], 'Responders (lesion counts)')], \"Laquinimod Responder Prediction\")\n plotScores([(r1[treatment], 'Responders'), (r2[treatment], 'Responders (certain GT)'), (r3[treatment], 'Responders (certain prediction)'), (r4[treatment], 'Responders (all certain)')], \"Laquinimod Responder Prediction\")\n \n bestScoring = []\n \n for treatment in treatments:\n # scoring = []\n ## scoring.append((softSubdivisionScores[treatment], 'Activity Subgroups'))\n ## scoring.append((scores[treatment], 'Group Membership'))\n # scoring.append((knnEuclideanScores[treatment], 'Nearest Neighbour: Euclidean'))\n ## scoring.append((chi2Scores[treatment], 'Nearest Neighbour: $\\chi^2$'))\n ## scoring.append((knnMahalanobisScores[treatment], 'Nearest Neighbour: Mahalanobis'))\n ## scoring.append((chi2svmScores[treatment], 'SVM: $\\chi^2$')) \n ## scoring.append((svmLinScores[treatment], 'SVM: Linear'))\n ## scoring.append((svmRadScores[treatment], 'SVM: RBF'))\n # scoring.append((featureScores[treatment], 'Random Forest'))\n ## scoring.append((rvmScores[treatment], 'RVM: RBF'))\n # \n # plotScores(scoring, treatment + 'all features fold ' + str(foldNum))\n \n \n # bestScoring.append((bestSoftSubdivisionScores[treatment], 'Activity Subgroups'))\n # bestScoring.append((bestScores[treatment], 'Group Membership'))\n # bestScoring.append((bestKnnEuclideanScores[treatment], 'Nearest Neighbour: Euclidean'))\n # bestScoring.append((bestChi2Scores[treatment], 'Nearest Neighbour: $\\chi^2$'))\n # bestScoring.append((bestKnnMahalanobisScores[treatment], 'Nearest Neighbour: Mahalanobis'))\n # bestScoring.append((bestChi2svmScores[treatment], 'SVM: $\\chi^2$')) \n # bestScoring.append((bestSvmLinScores[treatment], 'SVM: Linear'))\n # bestScoring.append((bestSvmRadScores[treatment], 'SVM: RBF'))\n \n \n if treatment == \"Placebo\":\n bestScoring.append((bestFeatureScores[treatment], 'Untreated ($\\\\alpha=0.5$)'))\n bestScoring.append((probScores[treatment], 'Untreated ($\\\\alpha=0.8$)'))\n # bestScoring.append((countingScores[treatment], 'Naive Bayesian (Lesion Counts)'))\n \n if treatment == \"Avonex\":\n bestScoring.append((preTrainedFeatureScores[treatment], 'Untreated Predictor on Drug A'))\n bestScoring.append((bestFeatureScores[treatment], 'Drug A ($\\\\alpha=0.5$)'))\n bestScoring.append((probScores[treatment], 'Drug A ($\\\\alpha=0.8$)'))\n \n if treatment == \"Laquinimod\":\n bestScoring.append((preTrainedFeatureScores[treatment], 'Untreated Predictor on Drug B'))\n bestScoring.append((bestFeatureScores[treatment], 'Drug B ($\\\\alpha=0.5$)'))\n bestScoring.append((probScores[treatment], 'Drug B ($\\\\alpha=0.8$)'))\n \n plotScores(bestScoring, \"Activity Prediction\")\n \n\n \n print(\"FAILED FOLDS:\", failedFolds)\n\n print('certain correct pretrained', certainCorrectPre)\n print('certain total pretrained', certainNumberPre)\n\n print('certain correct', certainCorrect)\n print('certain total', certainNumber)\n \n end = time.time()\n elapsed = end - start\n print(str(elapsed / 60), 'minutes elapsed.')\n\nif __name__ == \"__main__\":\n# beforeAndAfter()\n plt.ion()\n justTreatmentGroups()\n"
},
{
"alpha_fraction": 0.6499269008636475,
"alphanum_fraction": 0.6623538136482239,
"avg_line_length": 35.774192810058594,
"blob_id": "0a06b201aa032080f803ac60c92788ee24ed1f51",
"content_id": "e27c66b7ca4f6fd59159af26c2f80cad3d49dee0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13680,
"license_type": "no_license",
"max_line_length": 190,
"num_lines": 372,
"path": "/bol_classifiers.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.naive_bayes import GaussianNB, MultinomialNB\nfrom sklearn.metrics.pairwise import chi2_kernel\n\nfrom sklearn.covariance import EmpiricalCovariance\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\nfrom sklearn.svm import SVC\nfrom scipy import stats\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nfrom keras.layers import Dense, Dropout, BatchNormalization\nfrom keras.models import Model, Sequential, load_model\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.optimizers import Adam\nfrom keras.utils import to_categorical\nfrom sklearn.utils import class_weight\n\nfrom keras.constraints import max_norm\n\nfrom keras import backend as K\n\nimport lime\nimport lime.lime_tabular\n\nmodalities = ['t1p', 't2w', 'pdw', 'flr']\ntissues = ['csf', 'wm', 'gm', 'pv', 'lesion']\nmetrics = ['newT2']\nfeats = [\"Context\", \"RIFT\", \"LBP\", \"Intensity\"]\n\nscoringMetrics = ['TP', 'FP', 'TN', 'FN']\n\nselectK = False\nvisualizeAGroup = False\n\nletters = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)', '(g)', '(h)', '(i)', '(j)', '(k)', '(l)', '(m)', '(n)', '(o)']\n\ntreatments = ['Placebo', 'Laquinimod', 'Avonex']\ntreatment_labels = ['Untreated', 'Drug A', 'Drug B']\n\nthreads = 1\n\nplotFeats = False\nusePCA = False\n\ndef calculateScores(predictions, actual):\n score = {}\n \n for scoreMet in scoringMetrics:\n score[scoreMet] = 0.0\n \n for predicted, actual in zip(predictions, actual):\n if predicted >= 0.5 and actual == 1:\n score['TP'] += 1.0\n elif predicted >= 0.5 and actual == 0:\n score['FP'] += 1.0\n elif predicted < 0.5 and actual == 0:\n score['TN'] += 1.0\n elif predicted < 0.5 and actual == 1:\n score['FN'] += 1.0\n \n try:\n score['sensitivity'] = score['TP'] / (score['TP'] + score['FN'])\n except:\n score['sensitivity'] = 0\n \n try:\n score['specificity'] = score['TN'] / (score['TN'] + score['FP'])\n except:\n score['specificity'] = 0\n\n return score\n \n\ndef countingClassifier(trainCounts, testCounts, trainOutcomes, testOutcomes):\n countingScore = {}\n \n for metric in metrics:\n nb = GaussianNB()\n nb.fit(trainCounts, trainOutcomes[metric])\n \n predictions = nb.predict(testCounts)\n \n countingScore[metric] = calculateScores(predictions, testOutcomes[metric])\n\n return (countingScore, predictions, nb)\n\n\ndef random_forest(trainData, testData, trainOutcomes, rf=None):\n\n if rf == None:\n print('training random forest...')\n rf = RandomForestClassifier(class_weight='balanced', n_estimators=3000, n_jobs=-1)\n rf.fit(trainData, trainOutcomes)\n\n predictions = rf.predict(testData)\n probabilities = rf.predict_proba(testData)\n \n return predictions, rf, probabilities\n\n\ndef identify_responders(trainData, testData, trainOutcomes, testOutcomes, train_patients, test_patients, drug_rf, placebo_rf):\n relapse_certainty = 0.8\n\n train_activity = placebo_rf.predict_proba(trainData)\n test_activity = placebo_rf.predict_proba(testData)\n \n responder_label_train = np.zeros((len(trainOutcomes)), dtype='bool')\n responder_label_test = np.zeros((len(testOutcomes)), dtype='bool')\n\n responder_train_weight = np.zeros((len(trainOutcomes)), dtype='float')\n\n # BoL RF responder setup\n for index, (prediction, actual) in enumerate(zip(train_activity, trainOutcomes)):\n #predicted active but actually inactive\n if prediction[1] > relapse_certainty and actual == 0:\n responder_label_train[index] = 1\n responder_train_weight[index] = prediction[1]\n else:\n responder_label_train[index] = 0\n responder_train_weight[index] = prediction[0]\n \n for index, (prediction, actual) in enumerate(zip(test_activity, testOutcomes)):\n if prediction[1] > relapse_certainty and actual == 0:\n responder_label_test[index] = 1\n else:\n responder_label_test[index] = 0\n \n print('training responders:', np.sum(responder_label_train))\n print('training non-responders:', (len(trainOutcomes) - np.sum(responder_label_train)))\n \n print('testing responders:', np.sum(responder_label_test))\n print('testing non-responders:', (len(testOutcomes) - np.sum(responder_label_test)))\n\n predictions = drug_rf.predict_proba(testData)\n \n responder_predictions = [] \n \n for index, (prediction, actual) in enumerate(zip(predictions, responder_label_test)):\n if prediction[1] > 0.5:\n responder_predictions.append(1)\n if prediction[0] > 0.5:\n responder_predictions.append(0)\n \n responder_score = calculateScores(responder_predictions, responder_label_test)\n \n\n high_prob_responder_predictions = []\n high_prob_responder_actual = []\n\n for index, (prediction, actual) in enumerate(zip(predictions, responder_label_test)):\n print('high prob responder predictions:', prediction, actual)\n if prediction[1] > 0.8:\n high_prob_responder_predictions.append(1)\n high_prob_responder_actual.append(actual)\n if prediction[0] > 0.8:\n high_prob_responder_predictions.append(0)\n high_prob_responder_actual.append(actual)\n \n print('high probability predictions:', len(high_prob_responder_predictions))\n high_prob_scores = calculateScores(high_prob_responder_predictions, high_prob_responder_actual)\n\n return (responder_score, responder_predictions), high_prob_scores\n\n\ndef mlp(train_data, test_data, train_outcomes, test_outcomes, fold_num, results_dir):\n model = Sequential()\n model.add(Dense(32, activation='relu', kernel_constrain=max_norm(), input_shape=(train_data.shape[1],)))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n model.add(Dense(32, kernel_constraint=max_norm(), activation='relu'))\n # model.add(BatchNormalization())\n model.add(Dropout(0.5))\n model.add(Dense(2, activation='softmax'))\n\n model_checkpoint = ModelCheckpoint(results_dir + \"fold_\" + str(fold_num) + \"_best_weights.hdf5\", monitor=\"categorical_accuracy\", save_best_only=True)\n\n adam = Adam(lr=0.0002, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-5, amsgrad=False)\n model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['categorical_accuracy'])\n # model.summary()\n\n train_labels = to_categorical(train_outcomes, num_classes=2)\n test_labels = to_categorical(test_outcomes, num_classes=2)\n\n class_weights = class_weight.compute_class_weight('balanced', np.unique(train_outcomes), train_outcomes)\n\n hist = model.fit(train_data, train_labels, batch_size=128, epochs=1200, validation_data=(test_data, test_labels), callbacks=[model_checkpoint], verbose=False, class_weight=class_weights)\n\n # print(model.metrics_names)\n\n plt.figure(figsize=(6, 6))\n plt.plot(hist['categorical_accuracy'], label='Training')\n plt.plot(hist['val_categorical_accuracy'], label='Validation')\n plt.legend(loc='lower right', textsize=20)\n plt.xlabel('Epoch #', fontsize=20)\n plt.ylabel('% accuracy', fontsize=20)\n plt.savefig(results_dir + 'fold_' + str(fold_num) + '_mlp_accuracy.png', dpi=500)\n\n\n model.load_weights(results_dir + \"fold_\" + str(fold_num) + \"_best_weights.hdf5\")\n model.save(results_dir + 'best_bol_model' + str(fold_num) + '.hdf5')\n\n deep_probabilities = model.predict_proba(test_data)\n\n train_labels = to_categorical(train_outcomes, num_classes=2)\n\n explainer = lime.lime_tabular.LimeTabularExplainer(train_data, training_labels=train_labels, discretize_continuous=True, discretizer='quartile', class_names=['Inactive', 'Active'])\n\n lime_type_importance = np.zeros((train_data.shape[1]))\n\n for i in range(test_data.shape[0]):\n prediction = model.predict(test_data[i, ...][np.newaxis, ...])\n if prediction[0][1] > 0.5:\n prediction = 1\n label = ['Active']\n else:\n prediction = 0\n label = ['Inactive']\n\n if test_outcomes[i] == prediction:\n exp = explainer.explain_instance(test_data[i, ...], model.predict_proba, num_features=10, labels=[prediction])\n exp.save_to_file(results_dir + 'explanation' + str(fold_num) + '-' + str(i) + '.html')\n important_types = exp.as_map()\n # print('types', important_types)\n\n fig = exp.as_pyplot_figure(label=prediction)\n plt.tight_layout()\n fig.savefig(results_dir + str(fold_num) + '_' + str(i) + '_explained.png')\n\n for lesion_type in important_types[prediction]:\n lime_type_importance[lesion_type[0]] += lesion_type[1]\n\n K.clear_session()\n\n return deep_probabilities, model, lime_type_importance\n\n\ndef svms(trainData, testData, trainOutcomes):\n linear = SVC(kernel='linear', class_weight='balanced', probability=True)\n linear.fit(trainData, trainOutcomes)\n svm_linear_posterior = linear.predict_proba(testData)\n\n rbf = SVC(class_weight='balanced', probability=True)\n rbf.fit(trainData, trainOutcomes)\n svm_rbf_posterior = rbf.predict_proba(testData)\n\n trainDistances = chi2_kernel(trainData, trainData)\n testDistances = chi2_kernel(testData, trainData)\n\n svc = SVC(kernel='precomputed', class_weight='balanced', probability=True)\n svc.fit(trainDistances, trainOutcomes)\n\n chi2svm_posterior = svc.predict_proba(testDistances)\n\n return svm_linear_posterior, svm_rbf_posterior, chi2svm_posterior\n\n\ndef knn(trainData, trainOutcomes, testData):\n\n try:\n knnEuclidean = KNeighborsClassifier(n_neighbors=1)\n knnEuclidean.fit(trainData, trainOutcomes)\n knn_euclid_posterior = knnEuclidean.predict_proba(testData)\n except np.linalg.linalg.LinAlgError as e:\n knn_euclid_posterior = np.zeros((len(trainOutcomes), 2))\n knn_euclid_posterior[:, 1] = 1\n print('Not enough samples for Euclidean covariance estimation! Predicting all active.')\n print(e)\n try:\n knnMahalanobis = KNeighborsClassifier(n_neighbors=1, algorithm='brute', metric = 'mahalanobis')\n knnMahalanobis.fit(trainData, trainOutcomes)\n knn_maha_posterior = knnMahalanobis.predict_proba(testData)\n except np.linalg.linalg.LinAlgError as e:\n print('Not enough samples for Mahalanobis covariance estimation! Predicting all active.')\n print(e)\n knn_maha_posterior = np.zeros((len(trainOutcomes), 2))\n knn_maha_posterior[:, 1] = 1\n\n return knn_euclid_posterior, knn_maha_posterior\n\n\ndef lesion_type_selection(trainData, testData, trainOutcomes, testOutcomes, minTypes, results_dir):\n train = trainData\n test = testData \n \n rf = RandomForestClassifier(class_weight='balanced', n_estimators=2000, n_jobs=-1, oob_score=True)\n rf.fit(trainData, trainOutcomes)\n \n allFeatureImportance = rf.feature_importances_\n featureImportance = allFeatureImportance\n \n typesLeft = len(featureImportance)\n \n trainScores, oobScores, testScores, numFeatures = [], [], [], []\n\n while typesLeft > minTypes:\n removeThisRound = []\n \n for r in range(int(np.ceil(0.1*typesLeft))):\n remove = np.argmin(featureImportance)\n removeThisRound.append(remove)\n \n featureImportance[remove] = 999\n \n featureImportance = [x for x in featureImportance if x != 999]\n \n removeThisRound = sorted(removeThisRound, reverse=True)\n \n for remove in removeThisRound:\n train = np.delete(train, remove, 1)\n test = np.delete(test, remove, 1)\n\n typesLeft -= len(removeThisRound)\n \n rf.fit(train, trainOutcomes)\n \n trainScores.append(rf.score(train, trainOutcomes))\n oobScores.append(rf.oob_score_)\n testScores.append(rf.score(test, testOutcomes))\n numFeatures.append(typesLeft)\n\n print(\"out of bag scores\", oobScores)\n\n best_number = numFeatures[np.argmax(np.asarray(oobScores))]\n\n print(best_number, 'is the optimal number of features')\n \n plt.figure()\n plt.plot(numFeatures, oobScores, label=\"Out-of-Bag Score\")\n plt.plot(numFeatures, trainScores, label=\"Training Score\")\n plt.plot(numFeatures, testScores, label=\"Test Score\")\n plt.xlabel('Number of codewords in BoL', fontsize=20)\n plt.ylabel('Mean Accuracy', fontsize=20)\n plt.legend(shadow=True, fontsize=20)\n plt.tight_layout()\n plt.savefig(results_dir + 'feature_selection.png', dpi=500)\n plt.close()\n\n train = trainData\n test = testData\n finalRemove = np.shape(testData)[1] - best_number\n \n removeThisRound = []\n for r in range(finalRemove):\n remove = np.argmin(allFeatureImportance)\n removeThisRound.append(remove)\n \n allFeatureImportance[remove] = 999\n \n # this is where I need to remove/update the visualization arrays\n allFeatureImportance = [x for x in featureImportance if x != 999]\n \n removeThisRound = sorted(removeThisRound, reverse=True)\n \n for remove in removeThisRound:\n train = np.delete(train, remove, 1)\n test = np.delete(test, remove, 1)\n \n return train, test, removeThisRound\n\n\ndef apply_lesion_type_selection(train_data, test_data, types_to_remove):\n for remove in types_to_remove:\n train_data = np.delete(train_data, remove, 1)\n test_data = np.delete(test_data, remove, 1)\n\n return train_data, test_data\n"
},
{
"alpha_fraction": 0.5680933594703674,
"alphanum_fraction": 0.6028534173965454,
"avg_line_length": 28.20833396911621,
"blob_id": "0a9bea0f04b98368be12932d29c076cb133647c1",
"content_id": "27d2da351283d8d44b09751405c577dac57390ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7710,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 264,
"path": "/skeletons.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\n# import vtk\n\nimport pickle\n\nfrom scipy.ndimage.morphology import binary_erosion\nfrom scipy.ndimage.morphology import binary_hit_or_miss\n\nfrom scipy.ndimage.morphology import distance_transform_edt\nfrom scipy.ndimage.measurements import center_of_mass\n\nfrom scipy.spatial import distance, ConvexHull, Voronoi\nfrom scipy.special import sph_harm\n\n\ndef thinningElements():\n elem = np.zeros((3, 3), dtype='bool')\n elem[2,:] = 1\n elem[1,1] = 1\n \n elemConv = np.zeros((3, 3), dtype='bool')\n elemConv[0,:] = 1\n \n elem2 = np.zeros((3, 3), dtype='bool')\n elem2[0,1] = 1\n elem2[1,1] = 1\n elem2[1,2] = 1\n\n elem2Conv = np.zeros((3, 3), dtype='bool')\n elem2Conv[1,0] = 1\n elem2Conv[2,0] = 1\n elem2Conv[2,1] = 1\n \n rotatedElements = np.zeros((4,4,3,3), dtype='bool')\n\n for r in range(4):\n rotatedElements[r,0,:,:] = np.rot90(elem, r)\n rotatedElements[r,1,:,:] = np.rot90(elemConv, r)\n rotatedElements[r,2,:,:] = np.rot90(elem2, r)\n rotatedElements[r,3,:,:] = np.rot90(elem2Conv, r)\n \n return rotatedElements\n\n\ndef hitOrMissThinning(lesion, thinningElements):\n img = np.zeros((60, 256, 256), dtype='bool')\n \n for point in lesion:\n img[point[0], point[1], point[2]] = 1\n\n for z in range(img.shape[0]):\n iterations = 0\n numSkelePoints = 0\n \n while not numSkelePoints == np.sum(img[z,:,:]):\n numSkelePoints = np.sum(img[z,:,:])\n for r in range(4):\n remove = binary_hit_or_miss(img[z,:,:], thinningElements[r,0,:,:], thinningElements[r,1,:,:])\n img[z,:,:] = img[z,:,:] - remove\n \n for r in range(4):\n remove = binary_hit_or_miss(img[z,:,:], thinningElements[r,2,:,:], thinningElements[r,3,:,:])\n img[z,:,:] = img[z,:,:] - remove\n \n iterations += 1\n\n print(np.sum(img), '/', len(lesion), 'lesion voxels')\n \n skeletonPoints = np.transpose(np.nonzero(img))\n return img, skeletonPoints\n\n\ndef voroSkeleton(lesion):\n skeleton = []\n\n vor = Voronoi(lesion)\n \n for region in vor.regions:\n if region.all() >= 0:\n for pointIndex in region:\n skeleton.append(vor.vertices[pointIndex])\n \n return skeleton\n\n\ndef getLesionSkeleton(scan):\n\n thinningOperators = thinningElements()\n\n for lesion in scan.lesionList:\n if len(lesion) > 50:\n skeleImg, hitMissSkele = hitOrMissThinning(lesion, thinningOperators)\n \n img = np.zeros((256, 256, 60), dtype='float')\n \n for point in lesion:\n img[point[0], point[1], point[2]] = 1\n \n \n boundaryDistance = distance_transform_edt(img, sampling=[1, 1, 3])\n \n point = center_of_mass(img)\n \n centrePoint = (int(point[0]), int(point[1]), int(point[2]))\n distanceGrad = np.abs(np.gradient(boundaryDistance))\n \n sumGrads = distanceGrad[0] + distanceGrad[1] + distanceGrad[2]\n sumGrads = np.multiply(img, sumGrads)\n \n displaySkeleton3D(lesion, hitMissSkele)\n \n plt.subplot(1, 3, 1) \n plt.axis('off')\n plt.imshow(img[centrePoint[0], centrePoint[1]-10:centrePoint[1]+10, centrePoint[2]-10:centrePoint[2]+10], cmap = plt.cm.gray, interpolation = 'nearest') \n plt.xlabel('lesion slice')\n \n plt.subplot(1, 3, 2) \n plt.axis('off')\n plt.imshow(boundaryDistance[centrePoint[0], centrePoint[1]-10:centrePoint[1]+10, centrePoint[2]-10:centrePoint[2]+10], cmap = plt.cm.gray, interpolation = 'nearest')\n plt.xlabel('boundary distance')\n\n plt.subplot(1, 3, 3) \n plt.axis('off')\n plt.imshow(skeleImg[centrePoint[0], centrePoint[1]-10:centrePoint[1]+10, centrePoint[2]-10:centrePoint[2]+10], cmap = plt.cm.gray, interpolation = 'nearest') \n plt.xlabel('skeleton')\n \n plt.show()\n\n\ndef displaySkeleton3D(lesion, skeleton):\n \n points = vtk.vtkPoints()\n vertices = vtk.vtkCellArray()\n\n points2 = vtk.vtkPoints()\n vertices2 = vtk.vtkCellArray() \n\n \n Colors = vtk.vtkUnsignedCharArray()\n Colors.SetNumberOfComponents(3)\n Colors.SetName(\"Colors\")\n Colors2 = vtk.vtkUnsignedCharArray()\n Colors2.SetNumberOfComponents(3)\n Colors2.SetName(\"Colors2\")\n\n for point in lesion:\n pointId = points.InsertNextPoint(point)\n vertices.InsertNextCell(1)\n vertices.InsertCellPoint(pointId)\n Colors.InsertNextTuple3(255,255,255)\n\n for point in skeleton:\n pointId = points2.InsertNextPoint(point)\n vertices2.InsertNextCell(1)\n vertices2.InsertCellPoint(pointId)\n Colors2.InsertNextTuple3(0,255,0)\n\n poly = vtk.vtkPolyData()\n poly2 = vtk.vtkPolyData()\n\n poly.SetPoints(points)\n poly.SetVerts(vertices)\n poly.GetPointData().SetScalars(Colors)\n poly.Modified()\n poly.Update()\n\n# delaunay = vtk.vtkDelaunay2D()\n# delaunay.SetInput(poly)\n# delaunay.SetSource(poly)\n# delaunay.SetAlpha(0.5)\n# delaunay.Update()\n# \n# delMapper = vtk.vtkDataSetMapper()\n# delMapper.SetInputConnection(delaunay.GetOutputPort())\n# \n# delActor = vtk.vtkActor()\n# delActor.SetMapper(delMapper)\n# delActor.GetProperty().SetInterpolationToFlat()\n# delActor.GetProperty().SetRepresentationToWireframe()\n\n poly2.SetPoints(points2)\n poly2.SetVerts(vertices2)\n poly2.GetPointData().SetScalars(Colors2)\n poly2.Modified()\n poly2.Update()\n \n# poly3.SetPoints(points3)\n# poly3.SetVerts(vertices3)\n# poly3.GetPointData().SetScalars(Colors3)\n# poly3.Modified()\n# poly3.Update()\n \n ren = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(ren)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n \n renWin.SetSize(500, 500)\n\n mapper = vtk.vtkPolyDataMapper()\n mapper2 = vtk.vtkPolyDataMapper()\n# mapper3 = vtk.vtkPolyDataMapper()\n mapper.SetInput(poly)\n mapper2.SetInput(poly2)\n# mapper3.SetInput(poly3)\n \n \n transform1 = vtk.vtkTransform()\n transform1.Translate(0.0, 0.1, 0.0)\n transform2 = vtk.vtkTransform()\n transform2.Translate(0.0, 0.0, 0.1) \n \n# transform = vtk.vtkTransform()\n# transform.Translate(0.2, 0.0, 0.0)\n# axesTransform = vtk.vtkTransform()\n# axesTransform.Scale(0.1, 0,0) \n\n# axes = vtk.vtkAxesActor()\n# axes.SetUserTransform(transform)\n# axes.SetUserTransform(axesTransform)\n# axes.AxisLabelsOff()\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetPointSize(5)\n \n actor2 = vtk.vtkActor()\n actor2.SetMapper(mapper2)\n actor2.SetUserTransform(transform1)\n actor2.GetProperty().SetPointSize(5)\n\n# actor3 = vtk.vtkActor()\n# actor3.SetMapper(mapper3)\n# actor3.SetUserTransform(transform2)\n# actor3.GetProperty().SetPointSize(5)\n \n ren.AddActor(actor)\n ren.AddActor(actor2)\n# ren.AddActor(axes)\n# ren.AddActor(actor3)\n# ren.AddActor(delActor)\n ren.SetBackground(.2, .3, .4)\n \n renWin.Render()\n iren.Start()\n\n \ndef displaySkeletons():\n infile = open('/usr/local/data/adoyle/mri_list.pkl', 'rb')\n mri_list = pickle.load(infile)\n infile.close()\n\n for scan in mri_list[0:100]:\n getLesionSkeleton(scan)\n\n\ndef main():\n displaySkeletons()\n \n \nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.6270215511322021,
"alphanum_fraction": 0.6610512137413025,
"avg_line_length": 31.615385055541992,
"blob_id": "103f7057e4fa6afb911a45d73d0f43e78705382b",
"content_id": "0f460a3ba7d662add48365941c2467be0d8ef46c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2968,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 91,
"path": "/classify_lesions.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 05 19:59:43 2015\n\n@author: Andrew\n\"\"\"\nimport nibabel as nib\nimport numpy as np\nimport os\nimport h5py\n\nfrom mri import mri\nfrom sklearn.ensemble import RandomForestClassifier\n\ndata_dir = '/usr/local/data/adoyle/mri_data/MS-LAQ/'\n\nmalf_classes = ['bg', 'bs', 'cgm', 'crblr_gm', 'crblr_wm', 'csf', 'dgm', 'lv', 'ov', 'wm']\nmodalities = ['t1p', 't2w', 'pdw', 'flr']\ngood_malf_classes = ['cgm', 'dgm', 'wm']\n\n\nmri_list = []\n\nfor root, dirs, filenames in os.walk(data_dir): \n for f in filenames:\n if f.endswith('_m0_t1p.mnc.gz'):\n mri_list.append(mri(f))\n\nf = h5py.File(data_dir + 'features2.hdf5', 'r')\n\n#features types: malf-context*scales + image-intensities\n#features types: 10*5 + 4\n\nimage_pixels = 60*256*256\n\npriors = 10\nfeature_scales = 2\nnum_mods = 4\n\nnum_train = 15\nnum_test = 5\n\n\n\n\ntraining_vector = np.zeros(shape=(num_train, image_pixels*(priors*feature_scales+num_mods)))\nprint 'train_vector', np.shape(training_vector)\n\nfor i, img in enumerate(mri_list[0:num_train]):\n print i\n features = f[img.uid]\n flat_feature = np.reshape(features, image_pixels*feature_scales*priors)\n #print 'flat_features', np.shape(flat_feature)\n training_vector[i, 0:image_pixels*(feature_scales*priors)] = flat_feature\n #print 'begin ', 0:60*256*256*50\n \n for j, m in enumerate(modalities):\n malf_data = nib.load(img.images[m]).get_data()\n flat_malf_data = np.reshape(malf_data, (image_pixels))\n #print 'flat_malf', np.shape(flat_malf_data)\n #print 'train_vector_slice', np.shape(training_vector[i, 60*256*256*(50+j):60*256*256*(50+j+1)])\n \n #print 'be gin: ', 60*256*256*(50+j+1)\n #print 'end: ', 60*256*256*(50+j+2)\n \n training_vector[i, image_pixels*(feature_scales*priors+j):image_pixels*(feature_scales*priors+j+1)] = flat_malf_data\n \ntest_vector = np.zeros(shape=(5, image_pixels*(feature_scales*priors + num_mods)))\nfor i, img in enumerate(mri_list[num_train:num_train+num_test]):\n features = f[img.uid]\n test_vector[i, 0:image_pixels*(feature_scales*priors)] = np.reshape(features, image_pixels*feature_scales*priors)\n \n for j, m in enumerate(modalities):\n malf_data = nib.load(img.images[m]).get_data()\n flat_malf_data = np.reshape(malf_data, (image_pixels))\n test_vector[i, image_pixels*(feature_scales*priors+j):image_pixels*(feature_scales*priors+j+1)] = flat_malf_data\n\n\ntrain_labels = np.zeros(shape=(num_train, image_pixels))\nfor i, img in enumerate(mri_list[0:num_train]):\n train_labels[i, :] = np.reshape(nib.load(img.lesions).get_data(), image_pixels)\n\ntest_labels = np.zeros(shape=(num_test, image_pixels))\nfor i, img in enumerate(mri_list[num_train:num_train+num_test]):\n test_labels[i, :] = np.reshape(nib.load(img.lesions).get_data(), image_pixels)\n\n\n\nforest = RandomForestClassifier()\nforest.fit(training_vector, train_labels)\nforest.predict(test_vector)\n"
},
{
"alpha_fraction": 0.4937931001186371,
"alphanum_fraction": 0.5448275804519653,
"avg_line_length": 53.94936752319336,
"blob_id": "ed9bd68a42a61f5d63e4cc3ddfa3c880003a6ff7",
"content_id": "679b869460ed1bedece63aaf0d4b8ad4b7089fd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4350,
"license_type": "no_license",
"max_line_length": 236,
"num_lines": 79,
"path": "/mri.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "import nibabel as nib\nimport numpy as np\nimport collections\nimport os\n\nclass mri(object):\n \n def __init__(self, t1p_image):\n \n tokens = t1p_image.split('_')\n self.data_dir = '/data1/users/adoyle/MS-LAQ/MS-LAQ-302-STX/'\n\n self.folder = self.data_dir + tokens[1] + '/' + tokens[2] + '_' + tokens[3] + '/m0/'\n self.features_dir = self.folder[:-3] + 'results/'\n os.makedirs(self.features_dir, exist_ok=True)\n \n self.uid = tokens[2] + tokens[3]\n\n self.images = collections.OrderedDict()\n self.images['t1p'] = self.folder + 'classifier_files/' + 'MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m0_t1p_norm_ANAT-brain_ISPC-stx152lsq6.mnc.gz'\n self.images['t2w'] = self.folder + 'classifier_files/' + 'MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m0_t2w_norm_ANAT-brain_ISPC-stx152lsq6.mnc.gz'\n self.images['pdw'] = self.folder + 'classifier_files/' + 'MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m0_pdw_norm_ANAT-brain_ISPC-stx152lsq6.mnc.gz'\n self.images['flr'] = self.folder + 'classifier_files/' + 'MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m0_flr_norm_ANAT-brain_ISPC-stx152lsq6.mnc.gz' \n \n self.rawImages = collections.OrderedDict()\n self.rawImages['t1p'] = self.folder + 'MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m0_t1p_ISPC-stx152lsq6.mnc.gz'\n self.rawImages['t2w'] = self.folder + 'MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m0_t2w_ISPC-stx152lsq6.mnc.gz'\n self.rawImages['pdw'] = self.folder + 'MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m0_pdw_ISPC-stx152lsq6.mnc.gz'\n self.rawImages['flr'] = self.folder + 'MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m0_flr_ISPC-stx152lsq6.mnc.gz'\n \n self.lesions = self.folder + 'MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m0_ct2f_ISPC-stx152lsq6.mnc.gz'\n\n self.transformToICBM = self.folder[0:-3] + 'stx152lsq6/MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_patient_stx152lsq6-to-stx152lsq6_nl.xfm'\n\n self.lesionList = []\n self.tissues = ['csf', 'wm', 'gm', 'pv', 'lesion']\n self.priors = collections.OrderedDict()\n \n self.lesionPriorXfm = self.folder + 'MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m0_t1p-to-stx152lsq6.xfm'\n\n for tissue in self.tissues:\n self.priors[tissue] = self.folder[0:-3] + 'stx152lsq6/MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_patient_avg_ANAT-' + tissue + '-cerebrum_ISPC-stx152lsq6.mnc.gz'\n\n self.newT2 = 0\n self.newGad = 0\n self.relapse = 0\n self.treatment = ''\n\n self.age = 0.\n self.country = ''\n self.race = ''\n self.sex = 'F'\n \n self.futureLabels = self.folder[0:-3] + '/m24/MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m24_ct2f_ISPC-stx152lsq6.mnc.gz'\n self.newLesions = 0\n \n # self.newT2 = self.data_dir + tokens[1] + '/' + tokens[2] + '_' + tokens[3] + '/m24/' + 'classifier_files/' + 'MS-LAQ-302-STX_' + tokens[1] + '_' + tokens[2] + '_' + tokens[3] + '_m24_t2w_norm_ANAT-brain_ISPC-stx152lsq6.mnc.gz'\n self.has_2_years_data = os.path.exists(self.folder[0:-3] + '/m24/')\n\n def calculateNewLesions(self):\n lesionImage = nib.load(self.futureLabels).get_data()\n lesionLocations = list(np.asarray(np.nonzero(lesionImage)).T)\n \n connectedLesion = np.zeros((len(lesionLocations)))\n lesionList = []\n \n for i, (x,y,z) in enumerate(lesionLocations):\n for lesion in lesionList:\n for point in lesion:\n if np.abs(x - point[0]) <= 1 and np.abs(y - point[1]) <= 1 and np.abs(z-point[2]) <= 1:\n lesion.append([x,y,z])\n connectedLesion[i] = True\n if connectedLesion[i]:\n break\n if not connectedLesion[i]:\n newLesion = [[x,y,z]]\n lesionList.append(newLesion)\n \n self.newLesions = len(lesionList)\n \n"
},
{
"alpha_fraction": 0.5699300765991211,
"alphanum_fraction": 0.5784770846366882,
"avg_line_length": 28.261363983154297,
"blob_id": "310b67760247659ad05d8b23a64a2b966e232ecb",
"content_id": "903423a08b036c0a97c29be0b03afe2c89654d1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2574,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 88,
"path": "/lesion_distributions.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport nibabel as nib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport mri\n\n\nfrom scipy import stats\n\ndata_dir = 'C:/MRI/MS-LAQ/'\n\nmalf_classes = ['bg', 'bs', 'cgm', 'crblr_gm', 'crblr_wm', 'csf', 'dgm', 'lv', 'ov', 'wm']\nmodalities = ['t1p', 't2w', 'pdw', 'flr']\ngood_malf_classes = ['cgm', 'dgm', 'wm']\n\nmri_list = []\n\n\nfor root, dirs, filenames in os.walk(data_dir): \n for f in filenames:\n if f.endswith('_m0_t1p.mnc.gz'):\n mri_list.append(mri(f))\n\n\nmalf_lesions = {}\nmalf_thresh = {}\nmalf_tissues = {}\nmalf_lesion_locations = {}\nmalf_lesion_locations_nonzero = {}\n\nfor mod in modalities:\n malf_lesions[mod] = {}\n for m in malf_classes:\n malf_lesions[mod][m] = []\n \nfor img in mri_list:\n print img.priors['t1p']\n lesions = nib.load(img.lesions).get_data()\n\n tissues = {}\n for mod in modalities:\n tissues[mod] = nib.load(img.priors[mod]).get_data() \n \n for m in malf_classes:\n malf_tissues[m] = nib.load(img.malf[m]).get_data()\n malf_thresh[m] = np.greater_equal(malf_tissues[m], 0.7)\n malf_lesion_locations[m] = np.multiply(lesions, malf_thresh[m])\n malf_lesion_locations_nonzero[m] = np.nonzero(malf_lesion_locations[m])\n \n for m in malf_classes:\n for mod in modalities:\n for lesion_voxel in tissues[mod][malf_lesion_locations_nonzero[m]]:\n malf_lesions[mod][m].append(lesion_voxel)\n \nplt.close('all')\nf, subplots = plt.subplots(len(modalities), len(good_malf_classes), sharex=True)\n\nfor i, mod in enumerate(modalities):\n for j, m in enumerate(good_malf_classes):\n try:\n print i,j, len(malf_lesions[mod][m])\n kde = stats.gaussian_kde(malf_lesions[mod][m])\n X_plot = np.linspace(0, 1500, 1000)\n density = kde(X_plot)\n \n subplots[i,j].plot(X_plot, density)\n subplots[0,j].set_title(m)\n subplots[i,0].set_ylabel(mod, rotation=0, size='large')\n #subplots[i,j].set_title('lesions in ' + mod + ' for ' + m + ', ' + malf_lesions[mod][m] + ' lesions')\n \n #plt.title(m + ', ' + str(np.shape(malf_lesions[m])[0]) + ' voxels')\n \n #plt.savefig(data_dir + m +'.jpg')\n \n except Exception as e:\n print 'couldnt do {0}'.format(m)\n print e.message\n\n#plt.xlabel('voxel intensity')\n#plt.ylabel('proportion of voxels')\n#plt.show()\nf.tight_layout()\nplt.savefig(data_dir + 'plots.jpg')\nplt.show()\n#plt.close()\n\nprint 'done'"
},
{
"alpha_fraction": 0.5126984119415283,
"alphanum_fraction": 0.5349206328392029,
"avg_line_length": 22.370370864868164,
"blob_id": "296e3ee7cdf3144375037ec2d9d7e8ccfaf7019d",
"content_id": "87a26df6dc6f87dbbbf8cebf9d8351b64cdefe39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 630,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 27,
"path": "/convert_files.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 03 16:18:40 2015\n\n@author: Andrew\n\"\"\"\n\nimport nibabel as nib\nimport numpy as np\nimport os\n\ndata_dir = 'C:/MRI/MS-LAQ/'\n\n\nmri_list = []\n\nfor root, dirs, filenames in os.walk(data_dir):\n for name in filenames:\n if '.mnc.gz' in name:\n try:\n input_img = nib.load(os.path.join(root, name))\n tokens = os.path.join(root, name).split('.')\n output_name = tokens[0] + '.nii.gz'\n print output_name\n nib.save(input_img, output_name)\n except Exception as e:\n print e.message"
},
{
"alpha_fraction": 0.6787840723991394,
"alphanum_fraction": 0.6878721117973328,
"avg_line_length": 48.859375,
"blob_id": "4c96f999314073863f47dfe28ec76a6c095716a7",
"content_id": "c64135c780241884197f4781bb1f34e98d3ac10b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3191,
"license_type": "no_license",
"max_line_length": 314,
"num_lines": 64,
"path": "/merge_resample_catani.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "from nibabel.processing import resample_from_to\n\nimport nibabel as nib\nimport numpy as np\n\n\ndata_dir = 'E:/brains/'\n\nlr_tracts = ['Anterior_Segment', 'Arcuate', 'Cingulum', 'Cortico_Ponto_Cerebellum', 'Cortico_Spinal', 'Inferior_Cerebellar_Pedunculus', 'Inferior_Longitudinal_Fasciculus', 'Inferior_Occipito_Frontal_Fasciculus', 'Long_Segment', 'Optic_Radiations', 'Posterior_Segment', 'Superior_Cerebellar_Pedunculus', 'Uncinate']\nother_tracts = ['Anterior_Commissure', 'Corpus_Callosum', 'Fornix', 'Internal_Capsule']\n\ncombined_tracts = ['Projection_Network', 'Cerebellar_Network', 'Inferior_Network', 'Perisylvian_Network']\n\nimg = nib.load(data_dir + 'test.nii.gz')\n# atlas = nib.load(data_dir + 'atlases/Catani/Arcuate/Arcuate_Left.nii')\n# img2 = resample_from_to(atlas, img)\n\n\nfor tract_name in lr_tracts:\n left = nib.load(data_dir + 'atlases/Catani/all/' + tract_name + '_Left.nii')\n right = nib.load(data_dir + 'atlases/Catani/all/' + tract_name + '_Right.nii')\n\n output = nib.Nifti1Image(left.get_data() + right.get_data(), left.affine)\n\n atlas = resample_from_to(output, img)\n nib.save(atlas, data_dir + 'atlases/Catani/all/resampled/' + tract_name + '.nii')\n\nfor tract_name in other_tracts:\n atlas = nib.load(data_dir + 'atlases/Catani/all/' + tract_name + '.nii')\n output = resample_from_to(atlas, img)\n nib.save(output, data_dir + 'atlases/Catani/all/resampled/' + tract_name + '.nii')\n\nfor tract_name in combined_tracts:\n affine = nib.load(data_dir + 'atlases/Catani/all/resampled/Internal_Capsule.nii').get_affine()\n\n if 'Projection' in tract_name:\n atlas1 = nib.load(data_dir + 'atlases/Catani/all/resampled/Internal_Capsule.nii').get_data()\n atlas2 = nib.load(data_dir + 'atlases/Catani/all/resampled/Cortico_Spinal.nii').get_data()\n\n output = (atlas1 + atlas2) / 2\n\n if 'Cerebellar' in tract_name:\n atlas1 = nib.load(data_dir + 'atlases/Catani/all/resampled/Cortico_Ponto_Cerebellum.nii').get_data()\n atlas2 = nib.load(data_dir + 'atlases/Catani/all/resampled/Superior_Cerebellar_Pedunculus.nii').get_data()\n atlas3 = nib.load(data_dir + 'atlases/Catani/all/resampled/Inferior_Cerebellar_Pedunculus.nii').get_data()\n\n output = (atlas1 + atlas2 + atlas3) / 3\n\n if 'Inferior' in tract_name:\n atlas1 = nib.load(data_dir + 'atlases/Catani/all/resampled/Inferior_Longitudinal_Fasciculus.nii').get_data()\n atlas2 = nib.load(data_dir + 'atlases/Catani/all/resampled/Inferior_Occipito_Frontal_Fasciculus.nii').get_data()\n atlas3 = nib.load(data_dir + 'atlases/Catani/all/resampled/Uncinate.nii').get_data()\n\n output = (atlas1 + atlas2 + atlas3) / 3\n\n if 'Perisylvian' in tract_name:\n atlas1 = nib.load(data_dir + 'atlases/Catani/all/resampled/Anterior_Segment.nii').get_data()\n atlas2 = nib.load(data_dir + 'atlases/Catani/all/resampled/Long_Segment.nii').get_data()\n atlas3 = nib.load(data_dir + 'atlases/Catani/all/resampled/Posterior_Segment.nii').get_data()\n\n output = (atlas1 + atlas2 + atlas3) / 3\n\n to_save = nib.Nifti1Image(output, affine)\n nib.save(to_save, data_dir + 'atlases/Catani/all/combined/' + tract_name + '.nii')\n"
},
{
"alpha_fraction": 0.5638474225997925,
"alphanum_fraction": 0.5953565239906311,
"avg_line_length": 17.303030014038086,
"blob_id": "6b2b4de89243f60ea2f696e22529d6c89419f272",
"content_id": "69f3a46d0cae71baeac466274ba7d18ad64b02c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 603,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 33,
"path": "/lesion_malf.py",
"repo_name": "crocodoyle/mritools",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 19 13:50:06 2015\n\n@author: Andrew\n\"\"\"\nimport nibabel as nib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nfrom mri import mri\n\nimport h5py\n\n\ndata_dir = 'C:/MRI/MS-LAQ/'\n\nmalf_classes = ['bg', 'bs', 'cgm', 'crblr_gm', 'crblr_wm', 'csf', 'dgm', 'lv', 'ov', 'wm']\nmodalities = ['t1p', 't2w', 'pdw', 'flr']\n\n\nmri_list = []\n\n\nfor root, dirs, filenames in os.walk(data_dir): \n for f in filenames:\n if f.endswith('_m0_t1p.mnc.gz'):\n mri_list.append(mri(f))\n\nmalf_tissues = {}\n\nsize = nib.load(mri_list[0].priors['cgm'])"
}
] | 18 |
VanFeo/HTTPServer | https://github.com/VanFeo/HTTPServer | f420a14c52a8d51c9ac60985640a403e873962e3 | 321b11ca3ed9024c85e5a2e57bcec290d5bc167f | 2fc0051cfa6709e125cde1ded46cdda25494e024 | refs/heads/master | 2020-11-27T08:19:48.184198 | 2019-12-21T03:03:43 | 2019-12-21T03:03:43 | 229,368,204 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.49685534834861755,
"alphanum_fraction": 0.49685534834861755,
"avg_line_length": 12.25,
"blob_id": "d45b9126665c293175496ef74d0b5e26db4c7564",
"content_id": "4b4110bda28eeb1e8ddbfe21ca304d60df2a0632",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 199,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 12,
"path": "/webframe/urls.py",
"repo_name": "VanFeo/HTTPServer",
"src_encoding": "UTF-8",
"text": "\"\"\"\n路由选择模块\n\"\"\"\nfrom views import *\n\nurls = [\n # 如果访问'/time'路径,有数据\n # 用show_time方法提供\n (\"/time\", show_time),\n (\"/hello\", hello),\n (\"/bye\", bye)\n]\n"
},
{
"alpha_fraction": 0.4840041399002075,
"alphanum_fraction": 0.48710009455680847,
"avg_line_length": 25.189189910888672,
"blob_id": "a8b77333035fbfe21f115ef3694b9292521db0bc",
"content_id": "9ab915aed903be2a8a0582020ab5e8b8186b1e51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1377,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 37,
"path": "/README.md",
"repo_name": "VanFeo/HTTPServer",
"src_encoding": "UTF-8",
"text": "# HTTPServer\n## 功能 \n### httpserver部分\n* 获取http请求 \n* 解析http请求\n* 将请求发送给webframe\n* 从WebFrame接收反馈数据\n* 将数据组织为response格式发送给客户端\n### webframe部分\n* 从httpserver接收具体请求\n* 根据请求进行逻辑处理和数据处理\n* 将需要的数据反馈给httpserver\n### 特点 \n* 采用httpserver和应用处理分离的模式,降低了耦合度\n* 采用了用户配置文件的思路\n* webframe部分采用了模拟后端框架的处理方法\n### 技术点\n* httpserver部分需要与两端建立通信\n* webFrame部分采用多线程接收并发请求\n* 数据传递使用json格式\n项目结构: \n```\n |--httpserver --httpserver.py (主程序) \n | --config (httpserver配置) \nproject--|\n |\n |--webframe --webframe.py (主程序代码)\n | --settings (框架配置)\n | --views.py ( 应用处理程序) \n | --urls.py (存放路由)\n | --settings (框架配置)\n```\n### 交互数据格式协议\n```\nhttpserver-->webframe {'method': 'GET', 'info': '/'}\nwebframe-->httpserver {'status': '200', 'data': 'xxx'}\n```\n"
},
{
"alpha_fraction": 0.49433961510658264,
"alphanum_fraction": 0.5052410960197449,
"avg_line_length": 26.413793563842773,
"blob_id": "5396c7ceddc5a2cce9c8baa03485e966e04c1677",
"content_id": "405d4143eff195c96251a8fc8192619027fa3cf5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2473,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 87,
"path": "/webframe/webframe.py",
"repo_name": "VanFeo/HTTPServer",
"src_encoding": "UTF-8",
"text": "\"\"\"\nweb框架\nenv: python3.6\nauthor: Feo\n\"\"\"\nfrom threading import Thread\nfrom settings import *\nfrom socket import *\nimport json\nfrom urls import *\n\n\nclass Application:\n def __init__(self):\n self.address = (HOST, PORT)\n self.create_socket()\n self.bind()\n\n def create_socket(self):\n self.sockfd = socket()\n self.sockfd.setsockopt(SOL_SOCKET, SO_REUSEADDR, DEBUG)\n\n def bind(self):\n self.host = HOST\n self.port = PORT\n self.sockfd.bind(self.address)\n\n def start(self):\n self.sockfd.listen(3)\n print(\"Running web frame %d...\" % self.port)\n while True:\n connfd, addr = self.sockfd.accept()\n print(\"Connect from\", addr)\n t = Thread(target=self.handle, args=(connfd,))\n t.setDaemon(True)\n t.start()\n\n def handle(self, connfd):\n request = connfd.recv(4096).decode()\n request = json.loads(request) # 请求字典\n # request->{'method':'GET','info':'xxx'}\n # print(request)\n if not request:\n connfd.close()\n return\n # 解析请求,提取请求内容\n # 根据请求内容分为两类\n if request['method'] == 'GET':\n if request['info'] == '/' or request['info'][-5:] == '.html':\n response = self.get_html(request['info'])\n else:\n response = self.get_data(request['info'])\n elif request['method'] == 'POST':\n pass\n # 将数据传给httpserver\n # reponse->{'status': '200', 'data': 'xxx'}\n response = json.dumps(response)\n connfd.send(response.encode())\n connfd.close()\n\n # 网页处理\n def get_html(self, info):\n if info == '/':\n filename = DIR + '/index.html'\n else:\n filename = DIR + info\n try:\n with open(filename) as f:\n data = f.read()\n except Exception as e:\n with open(DIR + '/404.html') as fd:\n data = fd.read()\n return {'status': '404', 'data': data}\n else:\n return {'status': '200', 'data': data}\n\n # 其他处理\n def get_data(self, info):\n for url, func in urls:\n if url == info:\n return {'status': '200', 'data': func()}\n return {'status': '404', 'data': 'Sorry...'}\n\n\nif __name__ == '__main__':\n app = Application()\n app.start() # 启动应用\n"
},
{
"alpha_fraction": 0.48603352904319763,
"alphanum_fraction": 0.5307262539863586,
"avg_line_length": 12.84615421295166,
"blob_id": "081dc4aeedf803bd78024ae7aa38db08260015a1",
"content_id": "20624220fbd33e6358b67fc4f1d02cd839280892",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 217,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 13,
"path": "/webframe/settings.py",
"repo_name": "VanFeo/HTTPServer",
"src_encoding": "UTF-8",
"text": "\"\"\"\nwebframe配置文件\nauthor: Feo\n\"\"\"\n# [webframe address] 被http_server访问\nHOST = '0.0.0.0'\nPORT = 8080\n\n# 是否调试模式\nDEBUG = True\n\n# 网页存储位置\nDIR = './static'"
},
{
"alpha_fraction": 0.5296367406845093,
"alphanum_fraction": 0.5513065457344055,
"avg_line_length": 22.417909622192383,
"blob_id": "f1e8d3154dc504fe917ad596f6ace48bd9c16273",
"content_id": "963333bb5459def81459b3b4206585143ed108d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1863,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 67,
"path": "/httpserver1.0/http_server1.0.py",
"repo_name": "VanFeo/HTTPServer",
"src_encoding": "UTF-8",
"text": "\"\"\"\nenv: python3.6\nauthor: Feo\nrequirement:\n 编写一个服务端http程序,在客户端发起request请求时将网页按照http响应格式发送给浏览器展示\n 网页内容作为响应体\n * 注意协调响应格式\n * 对请求做一定的解析判断\n 如果请求内容是 '/' 则发送这个网页\n 其他 则用404响应\n\"\"\"\nfrom socket import *\n\n\ndef handle(connfd):\n # http请求\n request = connfd.recv(4096).decode()\n if not request:\n return\n # 从http请求中提取请求内容\n # print(request)\n # request_line = request.split('\\n')[0]\n info = request.split(' ')[1]\n # 根据请求内容组织响应\n if info == '/':\n with open('httpserver1.0/index.html') as f:\n response_body = f.read()\n response = \"HTTP/1.1 200 OK\\r\\n\"\n response += \"Content-Type:text/html\\r\\n\"\n response += \"\\r\\n\"\n response += \"%s\" % response_body\n else:\n response = \"HTTP/1.1 404 Not Found\\r\\n\"\n response += \"Content-Type:text/html;charset=UTF-8\\r\\n\"\n response += \"\\r\\n\"\n response += \"sorry,请求失败\"\n connfd.send(response.encode()) # 发送响应\n connfd.close()\n\n\ndef main(addr):\n # http使用tcp传输\n sockfd = socket()\n # 设置端口立即重用\n sockfd.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n # 绑定客户端地址\n sockfd.bind(addr)\n sockfd.listen(3)\n print(\"Listen the port...\")\n while True:\n try:\n # 等待浏览器连接\n connfd, addr = sockfd.accept()\n print(\"Connect From:\", addr)\n except KeyboardInterrupt:\n break\n else:\n handle(connfd) # 处理浏览器请求\n\n sockfd.close()\n\n\nif __name__ == '__main__':\n HOST = '0.0.0.0'\n PORT = 8080\n addr = (HOST, PORT)\n main(addr)\n"
},
{
"alpha_fraction": 0.7637362480163574,
"alphanum_fraction": 0.7747252583503723,
"avg_line_length": 17.299999237060547,
"blob_id": "babf59be7c5ac97605d6ef320ee6a47f6f3d315e",
"content_id": "affe5ddb1b54e90e39e359908604fabd9a8ece01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 442,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 10,
"path": "/httpserver2.0/requirement.md",
"repo_name": "VanFeo/HTTPServer",
"src_encoding": "UTF-8",
"text": "# HTTPServer2.0\n## 主要功能 :\n* 接收客户端(浏览器)请求\n* 解析客户端发送的请求\n* 根据请求组织数据内容\n* 将数据内容形成http响应格式返回给浏览器\n## 升级点 :\n* 采用IO并发,可以满足多个客户端同时发起请求情况\n* 通过类接口形式进行功能封装\n* 做基本的请求解析,根据具体请求返回具体内容,同时处理客户端的非网页请求行为"
},
{
"alpha_fraction": 0.46702897548675537,
"alphanum_fraction": 0.4855072498321533,
"avg_line_length": 27.4536075592041,
"blob_id": "93f61f6710e15b244398a35d10c4443c76f9af89",
"content_id": "a75d882877df1082fd707864d22683d2bff6f40b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2914,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 97,
"path": "/httpserver2.0/http_server2.0.py",
"repo_name": "VanFeo/HTTPServer",
"src_encoding": "UTF-8",
"text": "\"\"\"\nhttp_server2.0\n\"\"\"\nimport os\nfrom socket import *\nfrom select import select\n\n\nclass HTTPServer:\n def __init__(self, host='0.0.0.0', port=8080, dir=None):\n self.Host = host\n self.Port = port\n self.address = (host, port)\n self.dir = dir\n # 直接创建套接字\n self.create_socket()\n\n # 创建套接字\n def create_socket(self):\n self.sockfd = socket()\n self.sockfd.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n self.sockfd.bind(self.address)\n\n # 启动服务\n def serve_forever(self):\n self.sockfd.listen(3)\n print(\"Listen the port %d...\" % self.Port)\n # IO多路复用\n self.rlist = [self.sockfd]\n self.wlist = []\n while True:\n rs, ws, xs = select(self.rlist, self.wlist, [])\n for r in rs:\n if r == self.sockfd:\n # 浏览器连接\n connfd, addr = r.accept()\n print(\"Connect from\", addr)\n self.rlist.append(connfd)\n else:\n # 处理具体请求\n self.handle(r)\n\n def handle(self, c):\n request = c.recv(1024).decode()\n # print(request)\n if not request:\n self.rlist.remove(c)\n c.close()\n return\n # 解析请求,提取请求内容\n request_line = request.split('\\n')[0]\n info = request_line.split(' ')[1]\n print(c.getpeername(), ':', info) # 获取客户端地址\n # 根据请求内容分为两类\n if info == '/' or info[-5:] == '.html':\n self.get_html(c, info)\n else:\n self.get_data(c, info)\n c.close()\n self.rlist.remove(c)\n\n def get_html(self, c, info):\n if info == '/':\n filename = self.dir + '/index.html'\n else:\n filename = self.dir + info\n try:\n with open(filename) as f:\n data = f.read()\n except Exception:\n response = \"HTTP/1.1 404 Not Found\\r\\n\"\n response += \"Content-Type:text/html;charset=UTF-8\\r\\n\"\n response += '\\r\\n'\n response += \"<h1>网页不存在</h1>\"\n else:\n response = \"HTTP/1.1 200 OK\\r\\n\"\n response += \"Content-Type:text/html\\r\\n\"\n response += '\\r\\n'\n response += data\n finally:\n # 将响应发送给浏览器\n c.send(response.encode())\n\n def get_data(self, c, info):\n response = \"HTTP/1.1 200 OK\\r\\n\"\n response += \"Content-Type:text/html\\r\\n\"\n response += '\\r\\n'\n response += \"<h1>Waiting for httpserver3.0</h1>\"\n c.send(response.encode())\n\n\nif __name__ == '__main__':\n HOST = '0.0.0.0'\n PORT = 8080\n DIR = 'httpserver2.0/static'\n httpd = HTTPServer(HOST, PORT, DIR)\n httpd.serve_forever() # 启动服务\n"
},
{
"alpha_fraction": 0.4993959069252014,
"alphanum_fraction": 0.5146999359130859,
"avg_line_length": 23.584157943725586,
"blob_id": "ca4f2e2073666847cede52790bf698aa9bb327bd",
"content_id": "9c99d81712ff6fe8e751df6c12499d9d5910f1a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2681,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 101,
"path": "/httpserver3.0/httpserver.py",
"repo_name": "VanFeo/HTTPServer",
"src_encoding": "UTF-8",
"text": "\"\"\"\nhttp服务端主程序\nenv: python3.6\nauthor: Feo\n\"\"\"\nfrom socket import *\nfrom threading import Thread\nfrom config import *\nimport re, json\n\n\n# 与后端应用交互\ndef connect_frame(env):\n '''\n 将请求发送给WebFrame\n 从WebFrame接收反馈数据\n :param env:\n :return:\n '''\n s = socket()\n try:\n s.connect((frame_ip, frame_port))\n except Exception as e:\n print(e)\n return\n # 发送json请求\n data = json.dumps(env)\n s.send(data.encode())\n # 获取返回数据(json格式)\n data = s.recv(1024 * 1024 * 10).decode()\n if data:\n try:\n result = json.loads(data) # 返回字典\n except:\n pass\n return result\n\n\nclass HTTPServer:\n def __init__(self):\n self.address = (HOST, PORT)\n self.create_socket()\n self.bind()\n\n # 创建套接字\n def create_socket(self):\n self.sockfd = socket()\n self.sockfd.setsockopt(SOL_SOCKET, SO_REUSEADDR, DEBUG)\n\n # 绑定地址\n def bind(self):\n # 有些属性可以在调用函数时再生产\n self.host = HOST\n self.port = PORT\n self.sockfd.bind(self.address)\n\n # 处理具体的浏览器请求\n def handle(self, connfd):\n request = connfd.recv(4096).decode()\n pattern = r'(?P<method>[A-Z]+)\\s+(?P<info>/\\S*)'\n try:\n env = re.match(pattern, request).groupdict()\n except:\n connfd.close()\n return\n else:\n # 与webframe交互(数据字典/None)\n data = connect_frame(env)\n # print(data)\n if data:\n self.response(connfd, data)\n\n # 组织响应格式\n def response(self, connfd, data):\n # data-->{'status':'200','data':'OK'}\n if data['status'] == '200':\n res = \"HTTP/1.1 200 OK\\r\\n\"\n res += \"Content-Type:text/html;charset=UTF-8\\t\\n\"\n res += \"\\r\\n\"\n res += data['data']\n elif data['status'] == '404':\n res = \"HTTP/1.1 404 Not Found\\r\\n\"\n res += \"Content-Type:text/html;charset=UTF-8\\t\\n\"\n res += \"\\r\\n\"\n res += data['data']\n connfd.send(res.encode()) # 响应给浏览器\n\n def server_forever(self):\n self.sockfd.listen(3)\n print(\"Listen the port %d...\" % self.port)\n while True:\n connfd, addr = self.sockfd.accept()\n print(\"Connect from\", addr)\n t = Thread(target=self.handle, args=(connfd,))\n t.setDaemon(True)\n t.start()\n\n\nif __name__ == '__main__':\n httpd = HTTPServer()\n httpd.server_forever() # 启动服务\n"
},
{
"alpha_fraction": 0.49302324652671814,
"alphanum_fraction": 0.5767441987991333,
"avg_line_length": 14.428571701049805,
"blob_id": "a4860f019f1b1f97bb43e9433b800a949df26bd2",
"content_id": "db8c76e43512970006f73d6f2a4ac56e8eecc4be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 273,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 14,
"path": "/httpserver3.0/config.py",
"repo_name": "VanFeo/HTTPServer",
"src_encoding": "UTF-8",
"text": "\"\"\"\nhttpserver配置文件\nauthor: Feo\n\"\"\"\n# [http server address] 被浏览器访问\nHOST = '0.0.0.0'\nPORT = 8000\n\n# 是否调试模式\nDEBUG = True\n\n# [后端应用地址] 表明配合的webframe地址\nframe_ip = '127.0.0.1'\nframe_port = 8080"
}
] | 9 |
PaulMFleming/PythonAndTwitter | https://github.com/PaulMFleming/PythonAndTwitter | eeaa560716635be81a0087632c2b0b010af6efd7 | 53aa464d558ae3629825634dd1500c3d5eaca952 | 2a99b819c09ada60d3cf8af090e9b7c347dbfa1d | refs/heads/master | 2020-12-30T15:07:41.593572 | 2017-05-20T19:45:59 | 2017-05-20T19:45:59 | 91,101,976 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7186684012413025,
"alphanum_fraction": 0.7212793827056885,
"avg_line_length": 39.31578826904297,
"blob_id": "45b481c495b8c7b519c4a17c253eaa2b9c06a1c8",
"content_id": "0f7241ba831cd0367aac2585837851da8637a96f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1532,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 38,
"path": "/tweet_stream.py",
"repo_name": "PaulMFleming/PythonAndTwitter",
"src_encoding": "UTF-8",
"text": "from tweepy import Stream # connects to Twitter stream (live tweets)\nfrom tweepy import OAuthHandler \nfrom tweepy.streaming import StreamListener # pulls in the stream data\nfrom settings import twitter_app_config\n\nCONSUMER_KEY = twitter_app_config.get('CONSUMER_KEY')\nCONSUMER_SECRET = twitter_app_config.get('CONSUMER_SECRET')\nOAUTH_TOKEN = twitter_app_config.get('OAUTH_TOKEN')\nOAUTH_TOKEN_SECRET = twitter_app_config.get('OAUTH_TOKEN_SECRET')\n\n# list of strings to search for (good idea: search for what's trending atm)\nkeyword_list = ['WonderWoman','IrishFilm','Cannes2017','Syria'] # track list\n\n# this is an extension to the StreamListener class we imported\n# we need to extend the class in order to customize the incoming data\n# to suit our needs\nclass MyStreamListener(StreamListener):\n\n def on_data(self, data): # override the StreamListener on_data() function\n try:\n with open('tweet_mining.json', 'a') as tweet_file:\n tweet_file.write(data)\n return True\n except BaseException as e:\n print \"Failed on_data: %s\" % str(e)\n return True\n\n def on_error(self, status): # override the Stream_Listener on_error() function\n print status\n return True\n\nauth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n\n# create an instance of the stream and pull in\n# the data by filtering for our ketwords\ntwitter_stream = Stream(auth, MyStreamListener())\ntwitter_stream.filter(track=keyword_list) "
},
{
"alpha_fraction": 0.6923937201499939,
"alphanum_fraction": 0.708053708076477,
"avg_line_length": 27.838708877563477,
"blob_id": "6a8a74bcfea277fbd0fe64b45ea788468ba6f854",
"content_id": "00198f29c6d06c0d7dafdbfa5dd9a3bc73531ad2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 894,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 31,
"path": "/twitter_intro.py",
"repo_name": "PaulMFleming/PythonAndTwitter",
"src_encoding": "UTF-8",
"text": "import json\nimport tweepy\nfrom tweepy import OAuthHandler\nfrom settings import twitter_app_config\n\n\nCONSUMER_KEY = twitter_app_config.get('CONSUMER_KEY')\nCONSUMER_SECRET = twitter_app_config.get('CONSUMER_SECRET')\nOAUTH_TOKEN = twitter_app_config.get('OAUTH_TOKEN')\nOAUTH_TOKEN_SECRET = twitter_app_config.get('OAUTH_TOKEN_SECRET')\n\nauth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\napi = tweepy.API(auth)\n\nDUB_WOE_ID = 560743\nLON_WOE_ID = 44418\n\ndub_trends = api.trends_place(DUB_WOE_ID)\nlon_trends = api.trends_place(LON_WOE_ID)\n\ndub_trends_set = set([trend['name']\n for trend in dub_trends[0]['trends']])\n\nlon_trends_set = set([trend['name']\n for trend in lon_trends[0]['trends']])\n\ncommon_trends = set.intersection(dub_trends_set, lon_trends_set)\n\n\nprint json.dumps(lon_trends, indent=1)\n"
},
{
"alpha_fraction": 0.6607800126075745,
"alphanum_fraction": 0.7122884392738342,
"avg_line_length": 31.369047164916992,
"blob_id": "18a8657d2f9e8989bfa0e79513c0c7412e0c5ff5",
"content_id": "16cf34c0cab974970702f430fd630c4fe54b5822",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2718,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 84,
"path": "/tweet_stream_reader.py",
"repo_name": "PaulMFleming/PythonAndTwitter",
"src_encoding": "UTF-8",
"text": "import json\nimport pandas\nimport matplotlib.pyplot as plt\n\ntweets_data_path = 'tweet_mining.json'\n\n# loop through the JSON file line by line and add \n# tweets to the results list\nresults = [] \ntweets_file = open(tweets_data_path, 'r')\nfor tweet_line in tweets_file:\n try:\n status = json.loads(tweet_line)\n results.append(status)\n except:\n continue\n\nprint len(results) # print to test \n\n# create a dataframe\nstatuses = pandas.DataFrame()\n\n# store the text values\nstatuses['text'] = map(lambda status: status['text'], results)\n# store the language values\nstatuses['lang'] = map(lambda status: status['lang'], results)\n# sometimes there may not be a place listed in the tweet\n# so set to 'None' if not present\nstatuses['country'] = map(lambda status: status['place']['country']\n if status['place'] != None else None, results)\n\n# get each tweet language and the count of its appearance\n# (not to be confused with programming languages)\ntweets_by_lang = statuses['lang'].value_counts() \n# get each tweet country of origin and the count of it's appearance\ntweets_by_country = statuses['country'].value_counts()\n\n# create our drawing space/window(figure)\nfig = plt.figure()\n\n# prepare to plot two charts on the same figure\nax1 = fig.add_subplot(2,1,1)\nax2 = fig.add_subplot(2,1,2)\n\n# style the axes and labels of our plot\nax1.tick_params(axis='x', labelsize=15)\nax1.tick_params(axis='y', labelsize=10)\nax1.set_xlabel('Tweet Languages', fontsize=15)\nax1.set_ylabel('Number of tweets', fontsize=15)\nax1.xaxis.label.set_color('#666666')\nax1.yaxis.label.set_color('#666666')\nax1.tick_params(axis='x', colors='#666666')\nax1.tick_params(axis='y', colors='#666666')\n# style the title\nax1.set_title('Top 10 languages', fontsize=15, color='#666666')\n\n# plot the top 10 tweet languages and appearance count using a bar chart\ntweets_by_lang[:10].plot(ax=ax1, kind='bar', color='#FF7A00')\n\n# color the spines (borders)\nfor spine in ax1.spines.values():\n spine.set_edgecolor('#666666')\n\n# second subplot\nax2.tick_params(axis='x', labelsize=15)\nax2.tick_params(axis='y', labelsize=10)\nax2.set_xlabel('Countries', fontsize=15)\nax2.set_ylabel('Number of tweets', fontsize=15)\nax2.xaxis.label.set_color('#666666')\nax2.yaxis.label.set_color('#666666')\nax2.tick_params(axis='x', colors='#666666')\nax2.tick_params(axis='y', colors='#666666')\n# style the title\nax2.set_title('Top 10 Countries', fontsize=15, color='#666666')\n\n# plot the top 10 tweet languages and appearance count using a bar chart\ntweets_by_country[:10].plot(ax=ax2, kind='bar', color='#FF7A00')\n\n# color the spines (borders)\nfor spine in ax2.spines.values():\n spine.set_edgecolor('#666666')\n\n# render the graph\nplt.show()"
},
{
"alpha_fraction": 0.7050938606262207,
"alphanum_fraction": 0.7091152667999268,
"avg_line_length": 31.39130401611328,
"blob_id": "cf013db7e3e57c27aacc27bf055623297faab34f",
"content_id": "6e7d9c41ab27c9b8751f890e389d2a649455785d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1492,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 46,
"path": "/my_tweet_analysis_practice.py",
"repo_name": "PaulMFleming/PythonAndTwitter",
"src_encoding": "UTF-8",
"text": "import json\nimport tweepy\nfrom tweepy import OAuthHandler\nfrom prettytable import PrettyTable\nfrom settings import twitter_app_config\n\nCONSUMER_KEY = twitter_app_config.get('CONSUMER_KEY')\nCONSUMER_SECRET = twitter_app_config.get('CONSUMER_SECRET')\nOAUTH_TOKEN = twitter_app_config.get('OAUTH_TOKEN')\nOAUTH_TOKEN_SECRET = twitter_app_config.get('OAUTH_TOKEN_SECRET')\n\nauth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\napi = tweepy.API(auth)\n\ncount = 100\nquery = 'Cohen'\n\n# Get all stats\nresults = [status for status in tweepy.Cursor(api.search, q=query).items(count)]\n\n# Get the name stats\nscreen_names = [ status._json['user']['screen_name']\n for status in results ]\n\n# Get the Tweets text\ntweet_text = [ status._json['text'] for status in results ]\n\n# Get the location\nlocations = [ status._json['user']['location'] for status in results]\n\n# Print the following attributes:\nfor status in results:\n print \"Name:\\t\", status.user.name.encode('utf-8'), \"\\n\"\n print \"Description:\\t\", status.user.description.encode('utf-8'), \"\\n\"\n print \"Location:\\t\", status.user.location.encode('utf-8'), \"\\n\"\n print \"Time Zone:\\t\", status.user.time_zone, \"\\n\"\n\nprint \"\\n\", screen_names\n\n# Print the same as above using PrettyTable to format the results\nfor label in ('Text', tweet_text):\n table = PrettyTable(field_names=[label])\n [ table.add_row(entry) for entry in tweet_text ]\n table.align = 'l'\n print table\n\n\n"
},
{
"alpha_fraction": 0.6771397590637207,
"alphanum_fraction": 0.6825568675994873,
"avg_line_length": 34.519229888916016,
"blob_id": "0b9e3e2b39c1ed8823c0e5b4dbafbbcf0ce2e152",
"content_id": "4ddcb2522cbec8e6dcf9c081f1af8b981b0264c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1846,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 52,
"path": "/tweet_frequency_analysis.py",
"repo_name": "PaulMFleming/PythonAndTwitter",
"src_encoding": "UTF-8",
"text": "import json\nimport tweepy\nfrom tweepy import OAuthHandler\nfrom collections import Counter # keeps track of how many times equivalent vallues added\nfrom prettytable import PrettyTable\nfrom settings import twitter_app_config\n\nCONSUMER_KEY = twitter_app_config.get('CONSUMER_KEY')\nCONSUMER_SECRET = twitter_app_config.get('CONSUMER_SECRET')\nOAUTH_TOKEN = twitter_app_config.get('OAUTH_TOKEN')\nOAUTH_TOKEN_SECRET = twitter_app_config.get('OAUTH_TOKEN_SECRET')\n\nauth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\napi = tweepy.API(auth)\n\ncount = 50\nquery = 'Trump'\n\n# Get all tweets for the search query\nresults = [status for status in tweepy.Cursor(api.search, q=query).items(count)]\n\nstatus_texts = [ status._json['text'] for status in results ]\n\nscreen_names = [ status._json['user']['screen_name']\n for status in results\n for mention in status._json['entities']['user_mentions'] ]\n\nhashtags = [ hashtag['text']\n for status in results \n for hashtag in status._json['entities']['hashtags'] ]\n\nwords = [ w for t in status_texts\n for w in t.split() ]\n\n# Print the 10 most common screen names, hastags and words in \n# tweets relating to our query\n\nfor entry in [screen_names, hashtags, words]:\n counter = Counter(entry)\n print counter.most_common()[:10] # the top 10 results\n print\n\n# Do the same as above, only use Pretty Table to format the results\nfor label, data in (('Text', status_texts),\n ('Screen Name', screen_names),\n ('Word', words)):\n table = PrettyTable(field_names=[label, 'Count'])\n counter = Counter(data)\n [ table.add_row(entry) for entry in counter.most_common()[:10] ]\n table.align[label], table.align['Count'] = 'l', 'r' # align the columns\n print table"
},
{
"alpha_fraction": 0.644371509552002,
"alphanum_fraction": 0.6792024374008179,
"avg_line_length": 29.484615325927734,
"blob_id": "3f845d5baa7117e850772f480f2ea68faf52516b",
"content_id": "c669f1e3501dabf101bf70c5c494acb2fc29e24d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3962,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 130,
"path": "/my_tweet_stream_reader.py",
"repo_name": "PaulMFleming/PythonAndTwitter",
"src_encoding": "UTF-8",
"text": "import json\nimport pandas\nimport matplotlib.pyplot as plt\nfrom collections import Counter\nfrom prettytable import PrettyTable\n\ntweets_data_path = 'my_tweet_mining.json'\n\n# loop through the JSON file line by line and add \n# tweets to the results list\nresults = [] \ntweets_file = open(tweets_data_path, 'r')\nfor tweet_line in tweets_file:\n try:\n status = json.loads(tweet_line)\n results.append(status)\n except:\n continue\n\nprint len(results) # print to test \n\n# create a dataframe\nstatuses = pandas.DataFrame()\n\n# store the text values\nstatuses['text'] = map(lambda status: status['text'], results)\n# store the language values\nstatuses['lang'] = map(lambda status: status['lang'], results)\n# sometimes there may not be a place listed in the tweet\n# so set to 'None' if not present\nstatuses['country'] = map(lambda status: status['place']['country']\n if status['place'] != None else None, results)\n\nstatuses['retweet_count'] = map(lambda status: status['retweet_count'], results)\n\n\n\nhashtags = [ hashtag['text']\n for status in results \n for hashtag in status['entities']['hashtags'] ]\n\n# get each tweet language and the count of its appearance\n# (not to be confused with programming languages)\ntweets_by_lang = statuses['lang'].value_counts() \n# get each tweet country of origin and the count of it's appearance\ntweets_by_country = statuses['country'].value_counts()\n\nretweet_count = statuses['retweet_count'].value_counts()\n# create our drawing space/window(figure)\nfig = plt.figure()\n\n# prepare to plot two charts on the same figure\nax1 = fig.add_subplot(2,1,1)\nax2 = fig.add_subplot(2,1,2)\n\n# style the axes and labels of our plot\nax1.tick_params(axis='x', labelsize=15)\nax1.tick_params(axis='y', labelsize=10)\nax1.set_xlabel('Number of retweets', fontsize=15)\nax1.set_ylabel('Number of tweets', fontsize=15)\nax1.xaxis.label.set_color('#666666')\nax1.yaxis.label.set_color('#666666')\nax1.tick_params(axis='x', colors='#666666')\nax1.tick_params(axis='y', colors='#666666')\n# style the title\nax1.set_title('Retweet Count:', fontsize=15, color='#666666')\n\n# plot the top 10 tweet languages and appearance count using a bar chart\nretweet_count.plot(ax=ax1, kind='bar', color='#FF7A00')\n\n# color the spines (borders)\nfor spine in ax1.spines.values():\n spine.set_edgecolor('#666666')\n\n# second subplot\nax2.tick_params(axis='x', labelsize=15)\nax2.tick_params(axis='y', labelsize=10)\nax2.set_xlabel('Countries', fontsize=15)\nax2.set_ylabel('Number of tweets', fontsize=15)\nax2.xaxis.label.set_color('#666666')\nax2.yaxis.label.set_color('#666666')\nax2.tick_params(axis='x', colors='#666666')\nax2.tick_params(axis='y', colors='#666666')\n# style the title\nax2.set_title('All Countries Tweeted From:', fontsize=15, color='#666666')\n\n# plot the top 10 tweet languages and appearance count using a bar chart\ntweets_by_country.plot(ax=ax2, kind='bar', color='#FF7A00')\n\n# color the spines (borders)\nfor spine in ax2.spines.values():\n spine.set_edgecolor('#666666')\n\n# render the graph\nplt.show()\n\n\n\n\n\n\n\nstatus_texts = [ status['text'] for status in results ]\n\nscreen_names = [ status['user']['screen_name']\n for status in results\n for mention in status['entities']['user_mentions'] ]\n\nhashtags = [ hashtag['text']\n for status in results \n for hashtag in status['entities']['hashtags'] ]\n\nwords = [ w for t in status_texts\n for w in t.split() ]\n\n\n\nfor entry in [screen_names, hashtags, words]:\n counter = Counter(entry)\n print counter.most_common()[:10] # the top 10 results\n print\n\nfor label, data in (('Word', words), \n ('Screen Name', screen_names), \n ('Hashtag', hashtags)):\n pt = PrettyTable(field_names=[label, 'Count']) \n c = Counter(data)\n [ pt.add_row(kv) for kv in c.most_common()[:10] ]\n pt.align[label], pt.align['Count'] = 'l', 'r' # Set column alignment\n print pt"
},
{
"alpha_fraction": 0.7160268425941467,
"alphanum_fraction": 0.7221206426620483,
"avg_line_length": 31.19607925415039,
"blob_id": "cb9f631ded4b67cdcab31a33d02bdcfaa8bba141",
"content_id": "ef4db6dd201f0a0ff8e5a8162289db4a25cd320d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1641,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 51,
"path": "/retweet_popularity.py",
"repo_name": "PaulMFleming/PythonAndTwitter",
"src_encoding": "UTF-8",
"text": "import json\nimport tweepy\nfrom tweepy import OAuthHandler\nfrom prettytable import PrettyTable\nfrom collections import Counter\nfrom operator import itemgetter\nfrom settings import twitter_app_config\n\n\nCONSUMER_KEY = twitter_app_config.get('CONSUMER_KEY')\nCONSUMER_SECRET = twitter_app_config.get('CONSUMER_SECRET')\nOAUTH_TOKEN = twitter_app_config.get('OAUTH_TOKEN')\nOAUTH_TOKEN_SECRET = twitter_app_config.get('OAUTH_TOKEN_SECRET')\n\n\nauth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\napi = tweepy.API(auth)\n\ncount = 150\nquery = 'Ireland'\n\n# Get all tweets for the search query\nresults = [status for status in tweepy.Cursor(api.search, q=query).items(count)]\n\n# controls how many retweets a tweet \n# needs in order to be considered popular\nmin_retweets = 10 \n\n# loops through results to determine if a tweet is popular\n# if it is it's added to the pop_tweets list\npop_tweets = [ status\n for staus in results\n if status._json['retweet_count'] > min_retweets ]\n\n# create a list of tweet tuples associating each \n# tweet's text with it's retweet count\ntweet_tups = [(tweet._json['text'].encode('utf-8'), \n tweet._json['retweet_count'])\n for tweet in pop_tweets]\n\n# sort the tuple entries in descending order\nmost_popular_tups = sorted(tweet_tups, key=itemgetter(1), reverse=True)[:5]\n\n# prettify \ntable = PrettyTable(field_names=['Text', 'Retweet Count'])\nfor key, val in most_popular_tups:\n table.add_row([key, val])\ntable.max_width['Text'] = 50\ntable.align['Text'], table.align['Retweet Count'] = 'l', 'r' # align colums\nprint table"
},
{
"alpha_fraction": 0.770312488079071,
"alphanum_fraction": 0.770312488079071,
"avg_line_length": 26.869565963745117,
"blob_id": "5a39aa5267067ff36f98f64d8fc06cb9712bafa0",
"content_id": "6e8660e1a2c90741dd626306de05e7c5d78a76bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 640,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 23,
"path": "/twitter_user.py",
"repo_name": "PaulMFleming/PythonAndTwitter",
"src_encoding": "UTF-8",
"text": "import tweepy\nfrom tweepy import OAuthHandler\nfrom settings import twitter_app_config\n\nCONSUMER_KEY = twitter_app_config.get('CONSUMER_KEY')\nCONSUMER_SECRET = twitter_app_config.get('CONSUMER_SECRET')\nOAUTH_TOKEN = twitter_app_config.get('OAUTH_TOKEN')\nOAUTH_TOKEN_SECRET = twitter_app_config.get('OAUTH_TOKEN_SECRET')\n\nauth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n\napi = tweepy.API(auth)\n\nuser = api.get_user('@neilhimself')\n\nprint user.screen_name\nprint user.followers_count\n\nfor friend in user.friends():\n print\n print friend.screen_name\n print friend.followers_count"
},
{
"alpha_fraction": 0.7074040770530701,
"alphanum_fraction": 0.7082961797714233,
"avg_line_length": 32,
"blob_id": "edbf8b165e810c0eb6d069d1e180338c573837e2",
"content_id": "60ed5cdfbd0da9f177578cc7b1482079552a8ed4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1121,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 34,
"path": "/my_tweet_stream.py",
"repo_name": "PaulMFleming/PythonAndTwitter",
"src_encoding": "UTF-8",
"text": "from tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nfrom settings import twitter_app_config\n\nCONSUMER_KEY = twitter_app_config.get('CONSUMER_KEY')\nCONSUMER_SECRET = twitter_app_config.get('CONSUMER_SECRET')\nOAUTH_TOKEN = twitter_app_config.get('OAUTH_TOKEN')\nOAUTH_TOKEN_SECRET = twitter_app_config.get('OAUTH_TOKEN_SECRET')\n\n# list of strings to search for\nkeyword_list = ['trumpimpeachment','globalseedbank','americangods','guardiansofthegalaxy2']\n\n# customize the class here\nclass MyStreamListener(StreamListener):\n\n def on_data(self, data): \n try:\n with open('my_tweet_mining.json', 'a') as tweet_file:\n tweet_file.write(data)\n return True\n except BaseException as e:\n print \"Failed on_data: %s\" % str(e)\n return True\n\n def on_error(self, status):\n print status\n return True\n\nauth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n\ntwitter_stream = Stream(auth, MyStreamListener())\ntwitter_stream.filter(track=keyword_list)"
},
{
"alpha_fraction": 0.6793200969696045,
"alphanum_fraction": 0.6838526725769043,
"avg_line_length": 33.6274528503418,
"blob_id": "cff7afa99e3ba8adf38e557c0cb8349238896dc5",
"content_id": "89bde5c2fb8378dc739ec0971b528bf202b2e83e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1765,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 51,
"path": "/tweet_access.py",
"repo_name": "PaulMFleming/PythonAndTwitter",
"src_encoding": "UTF-8",
"text": "import json\nimport tweepy\nfrom tweepy import OAuthHandler\nfrom settings import twitter_app_config\n\nCONSUMER_KEY = twitter_app_config.get('CONSUMER_KEY')\nCONSUMER_SECRET = twitter_app_config.get('CONSUMER_SECRET')\nOAUTH_TOKEN = twitter_app_config.get('OAUTH_TOKEN')\nOAUTH_TOKEN_SECRET = twitter_app_config.get('OAUTH_TOKEN_SECRET')\n\nauth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\napi = tweepy.API(auth)\n\ncount = 50\nquery = 'Enda Kenny'\n\n# Get all status\nresults = [status for status in tweepy.Cursor(api.search, q=query).items(count)]\n# print json.dumps(results[0]._json, indent=4)\n\n# Get specific info isolated ino variables\nstatus_texts = [ status._json['text'] for status in results ]\n\nscreen_names = [ status._json['user']['screen_name']\n for status in results\n for mention in status._json['entities']['user_mentions'] ]\n\nhashtags = [ hashtag['text']\n for status in results\n for hashtag in status._json['entities']['hashtags'] ]\n\nwords = [ word\n for text in status_texts\n for word in text.split() ]\n\n\n# Figures out how many unique words \n# are in a list of text\ndef get_lexical_diversity(items):\n return 1.0*len(set(items))/len(items)\n\n# Figures out the average number of words in a text\ndef get_average_words(tweets):\n total_words = sum([ len(tweet.split()) for tweet in tweets ])\n return 1.0*total_words/len(tweets)\n\nprint \"Average words: %s\" % get_average_words(status_texts)\nprint \"Word Diversity: %s\" % get_lexical_diversity(words)\nprint \"Screen Name Diversity: %s\" % get_lexical_diversity(screen_names)\nprint \"HashTag Diversity: %s\" % get_lexical_diversity(hashtags)"
}
] | 10 |
eqsongca/pytorch-enhance | https://github.com/eqsongca/pytorch-enhance | 4aba07cd2ba007c1e1a35aba7cd78a2277f78e45 | 16c0a354812fda7adb4c51f9e5025e8ca8fe317d | e406b91b2d45af627c796ffa3d83e78655990fc1 | refs/heads/master | 2023-07-19T05:59:53.520330 | 2021-09-03T11:55:19 | 2021-09-03T11:55:19 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6137565970420837,
"alphanum_fraction": 0.6338624358177185,
"avg_line_length": 26.794116973876953,
"blob_id": "87defa217078fb4168e405c62a8b7c98653546a6",
"content_id": "1fdb8d6faa3b636c7452dbc7b872f08e143927ed",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 945,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 34,
"path": "/torch_enhance/datasets/general100.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import os\nfrom dataclasses import dataclass\n\nimport torchvision.transforms as T\n\nfrom .base import GENERAL100_URL, BaseDataset\n\n\n@dataclass()\nclass General100(BaseDataset):\n\n scale_factor: int = 2\n image_size: int = 256\n color_space: str = \"RGB\"\n data_dir: str = \"\"\n lr_transforms: T.Compose = None\n hr_transforms: T.Compose = None\n\n def __post_init__(self):\n\n self.url = GENERAL100_URL\n self.extensions = [\".png\"]\n\n if self.data_dir == \"\":\n self.data_dir = os.path.join(os.getcwd(), self.base_dir)\n\n self.root_dir = os.path.join(self.data_dir, \"General100\")\n self.download_google_drive(self.data_dir, filename=\"General100.zip\")\n self.file_names = self.get_files(self.root_dir)\n\n if self.lr_transforms is None:\n self.lr_transform = self.get_lr_transforms()\n if self.hr_transforms is None:\n self.hr_transform = self.get_hr_transforms()\n"
},
{
"alpha_fraction": 0.6725274920463562,
"alphanum_fraction": 0.6840659379959106,
"avg_line_length": 29.847457885742188,
"blob_id": "aea55a43c3d478e4c7f0cae1e808d1b6f190447b",
"content_id": "6de3893e898a46155c786c9f5bc6be144b63e618",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1820,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 59,
"path": "/tests/test_models.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import itertools\n\nimport pytest\nimport torch\nimport torch.nn as nn\nfrom torch_enhance import models\n\nDTYPE = torch.float32\nDEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nIMAGE_SIZE = 32\nSCALE_FACTOR = [2, 3, 4]\nCHANNELS = [1, 3]\nBATCH_SIZE = [1, 2]\nMODELS = [\n models.Bicubic, models.SRCNN, models.ESPCN,\n models.EDSR, models.VDSR, models.SRResNet\n]\nparams = list(itertools.product(MODELS, SCALE_FACTOR, CHANNELS, BATCH_SIZE))\n\n\[email protected](\"module, scale_factor, channels, batch_size\", params)\ndef test_model(module, scale_factor, channels, batch_size):\n\n # SRResNet only supports scale_factor 2 or 4\n if scale_factor == 3 and module in [models.SRResNet, models.EDSR]:\n return\n\n model = module(scale_factor, channels)\n model = model.to(DEVICE)\n\n lr = torch.ones(batch_size, channels, IMAGE_SIZE, IMAGE_SIZE)\n lr = lr.to(DTYPE)\n lr = lr.to(DEVICE)\n sr = model(lr)\n assert sr.shape == (batch_size, channels, IMAGE_SIZE*scale_factor, IMAGE_SIZE*scale_factor)\n assert sr.dtype == torch.float32\n\n\[email protected](\"module, scale_factor, channels, batch_size\", params)\ndef test_enhance(module, scale_factor, channels, batch_size):\n\n # SRResNet only supports scale_factor 2 or 4\n if scale_factor == 3 and module in [models.SRResNet, models.EDSR]:\n return\n\n model = module(scale_factor, channels)\n model = model.to(DEVICE)\n\n lr = torch.ones(batch_size, channels, IMAGE_SIZE, IMAGE_SIZE)\n lr = lr.to(DTYPE)\n lr = lr.to(DEVICE)\n sr = model.enhance(lr)\n\n if batch_size == 1:\n assert sr.shape == (channels, IMAGE_SIZE*scale_factor, IMAGE_SIZE*scale_factor)\n else:\n assert sr.shape == (batch_size, channels, IMAGE_SIZE*scale_factor, IMAGE_SIZE*scale_factor)\n \n assert sr.dtype == torch.torch.uint8\n"
},
{
"alpha_fraction": 0.46587032079696655,
"alphanum_fraction": 0.48025354743003845,
"avg_line_length": 25.636363983154297,
"blob_id": "0185c051039773be71508a188b54cecfbf809e7c",
"content_id": "3d1620c4b8c41ecf131618361b21a2d14978974b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4102,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 154,
"path": "/torch_enhance/models/srresnet.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\n\nfrom .base import BaseModel\n\n\nclass ResidualBlock(nn.Module):\n \"\"\"Base Residual Block\"\"\"\n\n def __init__(self, channels: int, kernel_size: int, activation):\n super().__init__()\n\n self.model = nn.Sequential(\n nn.Conv2d(\n in_channels=channels,\n out_channels=channels,\n kernel_size=kernel_size,\n stride=1,\n padding=kernel_size // 2,\n ),\n nn.BatchNorm2d(num_features=channels),\n activation(),\n nn.Conv2d(\n in_channels=channels,\n out_channels=channels,\n kernel_size=kernel_size,\n stride=1,\n padding=kernel_size // 2,\n ),\n nn.BatchNorm2d(num_features=channels),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return x + self.model(x)\n\n\nclass UpsampleBlock(nn.Module):\n \"\"\"Base PixelShuffle Upsample Block\"\"\"\n\n def __init__(\n self, n_upsamples: int, channels: int, kernel_size: int, activation\n ):\n super().__init__()\n\n layers = []\n for _ in range(n_upsamples):\n layers.extend(\n [\n nn.Conv2d(\n in_channels=channels,\n out_channels=channels * 2 ** 2,\n kernel_size=kernel_size,\n stride=1,\n padding=kernel_size // 2,\n ),\n nn.PixelShuffle(2),\n activation(),\n ]\n )\n\n self.model = nn.Sequential(*layers)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.model(x)\n\n\nclass SRResNet(BaseModel):\n \"\"\"Super-Resolution Residual Neural Network\n https://arxiv.org/pdf/1609.04802v5.pdf\n\n Parameters\n ----------\n scale_factor : int\n Super-Resolution scale factor. Determines Low-Resolution downsampling.\n channels: int\n Number of input and output channels\n num_blocks: int\n Number of stacked residual blocks\n \"\"\"\n\n def __init__(\n self, scale_factor: int, channels: int = 3, num_blocks: int = 16\n ):\n super().__init__()\n\n # Pre Residual Blocks\n self.head = nn.Sequential(\n nn.Conv2d(\n in_channels=channels,\n out_channels=64,\n kernel_size=9,\n stride=1,\n padding=4,\n ),\n nn.PReLU(),\n )\n\n # Residual Blocks\n self.res_blocks = [\n ResidualBlock(channels=64, kernel_size=3, activation=nn.PReLU)\n for _ in range(num_blocks)\n ]\n self.res_blocks.append(\n nn.Conv2d(\n in_channels=64,\n out_channels=64,\n kernel_size=3,\n stride=1,\n padding=1,\n )\n )\n self.res_blocks.append(nn.BatchNorm2d(num_features=64))\n self.res_blocks = nn.Sequential(*self.res_blocks)\n\n # Upsamples\n n_upsamples = 1 if scale_factor == 2 else 2\n self.upsample = UpsampleBlock(\n n_upsamples=n_upsamples,\n channels=64,\n kernel_size=3,\n activation=nn.PReLU,\n )\n\n # Output layer\n self.tail = nn.Sequential(\n nn.Conv2d(\n in_channels=64,\n out_channels=channels,\n kernel_size=9,\n stride=1,\n padding=4,\n ),\n nn.Sigmoid(),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Super-resolve Low-Resolution input tensor\n\n Parameters\n ----------\n x : torch.Tensor\n Input Low-Resolution image as tensor\n\n Returns\n -------\n torch.Tensor\n Super-Resolved image as tensor\n\n \"\"\"\n x = self.head(x)\n x = x + self.res_blocks(x)\n x = self.upsample(x)\n x = self.tail(x)\n return x\n"
},
{
"alpha_fraction": 0.6260330677032471,
"alphanum_fraction": 0.6280992031097412,
"avg_line_length": 20.04347801208496,
"blob_id": "774a6b3b40963617463627a0797df8bc58525f88",
"content_id": "6afe41469c483972ec370b95eca2cda49c5a030b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 484,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 23,
"path": "/docs/index.rst",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": ":github_url: https://github.com/IsaacCorley/pytorch_enhance\n\nWelcome to PyTorch Enhance's documentation!\n===========================================\n\nPyTorch Enhance is a Deep Learning Super-Resolution library for `PyTorch <https://pytorch.org/>`_.\n\n.. toctree::\n :glob:\n :maxdepth: 2\n :caption: Package Reference\n\n source/datasets\n source/models\n source/losses\n source/metrics\n source/utils\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`modindex`\n"
},
{
"alpha_fraction": 0.5813191533088684,
"alphanum_fraction": 0.5907415151596069,
"avg_line_length": 24.164947509765625,
"blob_id": "fd5f49ab329dbb9a1369c27f4535c7685e6e2d24",
"content_id": "02569ad0605f83faeb793f1372eb9d69bd8bba0e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2441,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 97,
"path": "/examples/pytorch_lightning_example.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\n\nimport pytorch_lightning as pl\n\nfrom torch_enhance.datasets import BSDS300, Set14, Set5\nfrom torch_enhance.models import SRCNN\nfrom torch_enhance import metrics\n\n\nclass Module(pl.LightningModule):\n\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def forward(self, x):\n return self.model(x)\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=1e-3)\n\n def training_step(self, batch, batch_idx):\n lr, hr = batch\n sr = self(lr)\n loss = F.mse_loss(sr, hr, reduction=\"mean\")\n \n # metrics\n mae = metrics.mae(sr, hr)\n psnr = metrics.psnr(sr, hr)\n\n # Logs\n self.log(\"train_loss\", loss)\n self.log(\"train_mae\", mae)\n self.log(\"train_psnr\", psnr)\n\n return loss\n\n def validation_step(self, batch, batch_idx):\n lr, hr = batch\n sr = self(lr)\n loss = F.mse_loss(sr, hr, reduction=\"mean\")\n \n # metrics\n mae = metrics.mae(sr, hr)\n psnr = metrics.psnr(sr, hr)\n\n # Logs\n self.log(\"val_loss\", loss)\n self.log(\"val_mae\", mae)\n self.log(\"val_psnr\", psnr)\n\n return loss\n\n def test_step(self, batch, batch_idx):\n lr, hr = batch\n sr = self(lr)\n loss = F.mse_loss(sr, hr, reduction=\"mean\")\n \n # metrics\n mae = metrics.mae(sr, hr)\n psnr = metrics.psnr(sr, hr)\n\n # Logs\n self.log(\"test_loss\", loss)\n self.log(\"test_mae\", mae)\n self.log(\"test_psnr\", psnr)\n\n return loss\n\n\nif __name__ == '__main__':\n \n scale_factor = 2\n\n # Setup dataloaders\n train_dataset = BSDS300(scale_factor=scale_factor)\n val_dataset = Set14(scale_factor=scale_factor)\n test_dataset = Set5(scale_factor=scale_factor)\n train_dataloader = DataLoader(train_dataset, batch_size=32)\n val_dataloader = DataLoader(val_dataset, batch_size=1)\n test_dataloader = DataLoader(test_dataset, batch_size=1)\n\n # Define model\n channels = 3 if train_dataset.color_space == \"RGB\" else 1\n model = SRCNN(scale_factor, channels)\n module = Module(model)\n\n trainer = pl.Trainer(max_epochs=5, gpus=1)\n trainer.fit(\n module,\n train_dataloader,\n val_dataloader\n )\n trainer.test(module, test_dataloader)\n"
},
{
"alpha_fraction": 0.5416830778121948,
"alphanum_fraction": 0.5585922002792358,
"avg_line_length": 27.255556106567383,
"blob_id": "6f1626b1ba198011b74a9aee7a5ebe3a57ab822d",
"content_id": "07011a2702bedbf295e2276ba6de7bd2d0cb98ad",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5086,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 180,
"path": "/torch_enhance/datasets/base.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import os\nfrom typing import List, Tuple\n\nimport torch\nimport torchvision.transforms as T\nfrom torchvision.transforms import Compose, ToTensor, Resize\nfrom torchvision.datasets.utils import (\n download_file_from_google_drive,\n extract_archive,\n)\nfrom PIL import Image\n\n\nDIV2K_TRAIN_URL = \"http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_HR.zip\"\nDIV2K_VAL_URL = \"http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_valid_HR.zip\"\nBSDS300_URL = \"http://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/BSDS300-images.tgz\"\nBSDS500_URL = \"http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz\"\nBSDS100_URL = \"1nu78kEKoSTti7ynh8pdxa7ae7TvZiNOy\"\nBSDS200_URL = \"1N9cK1OScGrACUgCms0f2rFlUOHhgkW0l\"\nSET5_URL = \"14g2glfOdkxzZ2RnQZR6jYU5CoClsxQRo\"\nSET14_URL = \"1FSJqQVISh19onL1TUqPNor0uRyp8LlNb\"\nT91_URL = \"1VSG1e5nvdV9UCUSYuaKecNFuk3OPUat4\"\nHISTORICAL_URL = \"1sc14tdRslyZsfw1-LpoOCKF72kSWKedx\"\nMANGA109_URL = \"1bEjcSRiT4V6vxjHjhr_jBPmAr3sGS_5l\"\nURBAN100_URL = \"1svYMEyfc5mkpnW6JnkF0ZS_KetgEYgLR\"\nGENERAL100_URL = \"1tD6XBLkV9Qteo2obMRcRueTRwie7Hqae\"\n\n\nclass BaseDataset(torch.utils.data.Dataset):\n \"\"\"Base Super Resolution Dataset Class\"\"\"\n\n base_dir: str = \".data\"\n color_space: str = \"RGB\"\n extensions: List[str] = [\"\"]\n lr_transform: T.Compose = None\n hr_transform: T.Compose = None\n\n def get_lr_transforms(self):\n \"\"\"Returns HR to LR image transformations\"\"\"\n return Compose(\n [\n Resize(\n size=(\n self.image_size // self.scale_factor,\n self.image_size // self.scale_factor,\n ),\n interpolation=T.InterpolationMode.BICUBIC,\n ),\n ToTensor(),\n ]\n )\n\n def get_hr_transforms(self):\n \"\"\"Returns HR image transformations\"\"\"\n return Compose(\n [\n Resize(\n (self.image_size, self.image_size),\n T.InterpolationMode.BICUBIC,\n ),\n ToTensor(),\n ]\n )\n\n def get_files(self, root_dir: str) -> List[str]:\n \"\"\"Returns a list of valid image files in a directory\n\n Parameters\n ----------\n root_dir : str\n Path to directory of images.\n\n Returns\n -------\n List[str]\n List of valid images in `root_dir` directory.\n\n \"\"\"\n return [\n os.path.join(root_dir, x)\n for x in os.listdir(root_dir)\n if self.is_valid_file(x)\n ]\n\n def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Returns a tuple of and lr and hr torch tensors\n\n Parameters\n ----------\n idx : int\n Index value to index the list of images\n\n Returns\n -------\n lr: torch.Tensor\n Low Resolution transformed indexed image.\n hr: torch.Tensor\n High Resolution transformed indexed image.\n\n \"\"\"\n lr = self.load_img(self.file_names[idx])\n hr = lr.copy()\n if self.lr_transform:\n lr = self.lr_transform(lr)\n if self.hr_transform:\n hr = self.hr_transform(hr)\n\n return lr, hr\n\n def __len__(self) -> int:\n \"\"\"Return number of images in dataset\n\n Returns\n -------\n int\n Number of images in dataset file_names list\n\n \"\"\"\n return len(self.file_names)\n\n def is_valid_file(self, file_path: str) -> bool:\n \"\"\"Returns boolean if the given `file_path` has a valid image extension\n\n Parameters\n ----------\n file_path : str\n Path to image file\n\n Returns\n -------\n bool\n True if `file_path` has a valid image extension otherwise False\n\n \"\"\"\n return any(file_path.endswith(ext) for ext in self.extensions)\n\n def load_img(self, file_path: str) -> Image.Image:\n \"\"\"Returns a PIL Image of the image located at `file_path`\n\n Parameters\n ----------\n file_path : str\n Path to image file to be loaded\n\n Returns\n -------\n PIL.Image.Image\n Loaded image as PIL Image\n\n \"\"\"\n return Image.open(file_path).convert(self.color_space)\n\n def download_google_drive(self, data_dir: str, filename: str) -> None:\n \"\"\"Download dataset\n\n Parameters\n ----------\n data_dir : str\n Path to base dataset directory\n filename : str\n Filename of google drive file being downloaded\n\n Returns\n -------\n None\n\n \"\"\"\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n if not os.path.exists(self.root_dir):\n\n download_file_from_google_drive(\n file_id=self.url, root=data_dir, filename=filename\n )\n extract_archive(\n from_path=os.path.join(data_dir, filename),\n to_path=data_dir,\n remove_finished=True,\n )\n"
},
{
"alpha_fraction": 0.45618557929992676,
"alphanum_fraction": 0.473195880651474,
"avg_line_length": 22.658536911010742,
"blob_id": "32cda584ee8ba3fd5fc669f12cc2765f4db6f310",
"content_id": "8c87b632ed3baa10cbd40147547b28480bc1ad23",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1940,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 82,
"path": "/torch_enhance/models/vdsr.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import torch.nn as nn\n\nfrom .base import BaseModel\nfrom .baseline import Bicubic\n\n\nclass VDSR(BaseModel):\n \"\"\"Very Deep Super Resolution\n https://arxiv.org/pdf/1511.04587.pdf\n\n Parameters\n ----------\n scale_factor : int\n Super-Resolution scale factor. Determines Low-Resolution downsampling.\n channels: int\n Number of input and output channels\n num_layers: int\n Number of stacked conv layers\n \"\"\"\n\n def __init__(\n self, scale_factor: int, channels: int = 3, num_layers: int = 20\n ):\n super().__init__()\n\n self.upsample = Bicubic(scale_factor)\n\n # Initial layer\n layers = [\n nn.Conv2d(\n in_channels=channels,\n out_channels=64,\n kernel_size=3,\n stride=1,\n padding=1,\n ),\n nn.ReLU(),\n ]\n\n # Residual reconstruction\n for i in range(num_layers - 2):\n layers.append(\n nn.Conv2d(\n in_channels=64,\n out_channels=64,\n kernel_size=3,\n stride=1,\n padding=1,\n )\n )\n layers.append(nn.ReLU())\n\n # Output reconstruction layer\n layers.append(\n nn.Conv2d(\n in_channels=64,\n out_channels=channels,\n kernel_size=3,\n stride=1,\n padding=1,\n )\n )\n\n self.model = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"Super-resolve Low-Resolution input tensor\n\n Parameters\n ----------\n x : torch.Tensor\n Input Low-Resolution image as tensor\n\n Returns\n -------\n torch.Tensor\n Super-Resolved image as tensor\n\n \"\"\"\n x = self.upsample(x)\n x = x + self.model(x)\n return x\n"
},
{
"alpha_fraction": 0.5630252361297607,
"alphanum_fraction": 0.5630252361297607,
"avg_line_length": 19,
"blob_id": "8146144a2446a9389c82f4ea9ca83468825b0f7d",
"content_id": "80d0b51476d85e0862c57220436168d1332cdf19",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 119,
"license_type": "permissive",
"max_line_length": 37,
"num_lines": 6,
"path": "/docs/source/metrics.rst",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "torch_enhance.metrics\n========================\n\n.. automodule:: torch_enhance.metrics\n :members:\n :undoc-members:"
},
{
"alpha_fraction": 0.6742209792137146,
"alphanum_fraction": 0.6742209792137146,
"avg_line_length": 26.153846740722656,
"blob_id": "1727687ca693d763eba6a276f9e3fdd5ba239f71",
"content_id": "d4017585e20622cc50b2ff4c09c5715513ab989a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 353,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 13,
"path": "/torch_enhance/utils.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import torchvision\n\n\n__all__ = [\"plot_compare\"]\n\n\ndef plot_compare(sr, hr, baseline, filename):\n \"\"\"\n Plot Super-Resolution and High-Resolution image comparison\n \"\"\"\n sr, hr, baseline = sr.squeeze(), hr.squeeze(), baseline.squeeze()\n grid = torchvision.utils.make_grid([hr, baseline, sr])\n torchvision.utils.save_image(grid, filename)\n"
},
{
"alpha_fraction": 0.5478261113166809,
"alphanum_fraction": 0.5478261113166809,
"avg_line_length": 18.33333396911621,
"blob_id": "740f2ce3bb2ccab59016b29c65d860c54d89c27d",
"content_id": "a29aba7b10de450b1dbf3bdc0db37151915232bb",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 115,
"license_type": "permissive",
"max_line_length": 35,
"num_lines": 6,
"path": "/docs/source/utils.rst",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "torch_enhance.utils\n========================\n\n.. automodule:: torch_enhance.utils\n :members:\n :undoc-members:"
},
{
"alpha_fraction": 0.7276119589805603,
"alphanum_fraction": 0.7276119589805603,
"avg_line_length": 25.799999237060547,
"blob_id": "7ed5493f9f6a8dddf0962c607031186f008a8357",
"content_id": "9144f72fc82559b96174507a22c2a4840eb823ec",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 268,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 10,
"path": "/torch_enhance/models/__init__.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "from .base import BaseModel\nfrom .baseline import Bicubic\nfrom .srcnn import SRCNN\nfrom .edsr import EDSR\nfrom .vdsr import VDSR\nfrom .espcn import ESPCN\nfrom .srresnet import SRResNet\n\n\n__all__ = [\"BaseModel\", \"Bicubic\", \"SRCNN\", \"VDSR\", \"EDSR\", \"ESPCN\", \"SRResNet\"]\n"
},
{
"alpha_fraction": 0.5494335889816284,
"alphanum_fraction": 0.5592172741889954,
"avg_line_length": 26.742856979370117,
"blob_id": "7bf97debc1cde2d5d2307384a8c181145c632938",
"content_id": "7e570cc5acdda3d44f84cbced0d22dc81ffa9ad0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1942,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 70,
"path": "/torch_enhance/datasets/bsds500.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import os\nimport shutil\nfrom dataclasses import dataclass\n\nimport torchvision.transforms as T\nfrom torchvision.datasets.utils import download_and_extract_archive\n\nfrom .base import BSDS500_URL, BaseDataset\n\n\n@dataclass()\nclass BSDS500(BaseDataset):\n\n scale_factor: int = 2\n image_size: int = 256\n color_space: str = \"RGB\"\n set_type: str = \"train\"\n data_dir: str = \"\"\n lr_transforms: T.Compose = None\n hr_transforms: T.Compose = None\n\n def __post_init__(self):\n self.url = BSDS500_URL\n self.extensions = [\".jpg\"]\n\n if self.data_dir == \"\":\n self.data_dir = os.path.join(os.getcwd(), self.base_dir)\n\n self.root_dir = os.path.join(self.data_dir, \"BSDS500\")\n self.download(self.data_dir)\n self.set_dir = os.path.join(self.root_dir, self.set_type)\n self.file_names = self.get_files(self.set_dir)\n\n if self.lr_transforms is None:\n self.lr_transform = self.get_lr_transforms()\n if self.hr_transforms is None:\n self.hr_transform = self.get_hr_transforms()\n\n def download(self, data_dir: str) -> None:\n \"\"\"Download dataset\n\n Parameters\n ----------\n data_dir : str\n Path to base dataset directory\n\n Returns\n -------\n None\n\n \"\"\"\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n if not os.path.exists(self.root_dir):\n os.makedirs(self.root_dir)\n\n download_and_extract_archive(\n self.url, data_dir, remove_finished=True\n )\n\n # Tidy up\n for d in [\"train\", \"val\", \"test\"]:\n shutil.move(\n src=os.path.join(data_dir, \"BSR/BSDS500/data/images\", d),\n dst=self.root_dir,\n )\n os.remove(os.path.join(self.root_dir, d, \"Thumbs.db\"))\n\n shutil.rmtree(os.path.join(data_dir, \"BSR\"))\n"
},
{
"alpha_fraction": 0.44002240896224976,
"alphanum_fraction": 0.4624439477920532,
"avg_line_length": 23.77777862548828,
"blob_id": "53747dbba12ea585379d8e9004b11356783e027b",
"content_id": "9a0059b1c8078ac8582b0d6992e5b6fbd924478f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1784,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 72,
"path": "/torch_enhance/models/espcn.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\n\nfrom .base import BaseModel\n\n\nclass ESPCN(BaseModel):\n \"\"\"Efficient Sub-Pixel Convolutional Neural Network\n https://arxiv.org/pdf/1609.05158v2.pdf\n\n Parameters\n ----------\n scale_factor : int\n Super-Resolution scale factor. Determines Low-Resolution downsampling.\n channels: int\n Number of input and output channels\n\n \"\"\"\n\n def __init__(self, scale_factor: int, channels: int = 3):\n super().__init__()\n\n self.model = nn.Sequential(\n nn.Conv2d(\n in_channels=channels,\n out_channels=64,\n kernel_size=5,\n stride=1,\n padding=2,\n ),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=64,\n out_channels=64,\n kernel_size=3,\n stride=1,\n padding=1,\n ),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=64,\n out_channels=32,\n kernel_size=3,\n stride=1,\n padding=1,\n ),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=32,\n out_channels=channels * scale_factor ** 2,\n kernel_size=3,\n stride=1,\n padding=1,\n ),\n nn.PixelShuffle(scale_factor),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Super-resolve Low-Resolution input tensor\n\n Parameters\n ----------\n x : torch.Tensor\n Input Low-Resolution image as tensor\n\n Returns\n -------\n torch.Tensor\n Super-Resolved image as tensor\n\n \"\"\"\n return self.model(x)\n"
},
{
"alpha_fraction": 0.556543231010437,
"alphanum_fraction": 0.5624691247940063,
"avg_line_length": 23.39759063720703,
"blob_id": "a8c25932b3c137ce412262a2e6bfa176c54e0fae",
"content_id": "f0b5ce9b0f5aaa830f5e02052b63ec2afca017fc",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2025,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 83,
"path": "/torch_enhance/models/base.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import os\nimport shutil\n\nimport torch\nimport torch.nn as nn\nfrom torchvision.datasets.utils import download_and_extract_archive\n\n\nMODELS_PATH = \".models\"\n\n\nclass BaseModel(nn.Module):\n \"\"\"Base Super-Resolution module\"\"\"\n\n def load_pretrained(self, weights_url: str, weights_path: str) -> None:\n \"\"\"Download pretrained weights and load as state dict\n\n Parameters\n ----------\n weights_url : str\n Base URL to pretrained weights.\n weights_path : str\n Path to save pretrained weights.\n\n Returns\n -------\n None\n\n \"\"\"\n base_file = os.path.basename(weights_path)\n\n if not os.path.exists(os.path.join(MODELS_PATH, base_file)):\n self.download(weights_url, weights_path)\n\n self.load_state_dict(torch.load(os.path.join(MODELS_PATH, base_file)))\n\n @staticmethod\n def download(url: str, weights_path: str) -> None:\n \"\"\"Download pretrained weights\n\n Parameters\n ----------\n weights_path : str\n Path to save pretrained weights.\n\n Returns\n -------\n None\n\n \"\"\"\n base_file = os.path.basename(weights_path)\n\n if not os.path.exists(MODELS_PATH):\n os.mkdir(MODELS_PATH)\n\n download_and_extract_archive(url, MODELS_PATH, remove_finished=True)\n shutil.copyfile(weights_path, os.path.join(MODELS_PATH, base_file))\n shutil.rmtree(os.path.dirname(weights_path))\n\n @torch.no_grad()\n def enhance(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Super-resolve x and cast as image\n\n Parameters\n ----------\n x : torch.Tensor\n Input Low-Resolution image as tensor\n\n Returns\n -------\n torch.Tensor\n Super-Resolved image as tensor\n\n \"\"\"\n if x.ndim == 3:\n x = x.unsqueeze(0)\n\n x = self.forward(x)\n x *= 255.0\n x = x.clamp(0, 255)\n x = x.to(torch.uint8)\n x = x.squeeze(0)\n return x\n"
},
{
"alpha_fraction": 0.4677320122718811,
"alphanum_fraction": 0.48678550124168396,
"avg_line_length": 23.28358268737793,
"blob_id": "2d252fc7141299884279bca5787cd01923b1b36f",
"content_id": "53dd53e44f3a1429a23253c680ec099738e629b4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1627,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 67,
"path": "/torch_enhance/models/srcnn.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\n\nfrom .base import BaseModel\nfrom .baseline import Bicubic\n\n\nclass SRCNN(BaseModel):\n \"\"\"Super-Resolution Convolutional Neural Network\n https://arxiv.org/pdf/1501.00092v3.pdf\n\n Parameters\n ----------\n scale_factor : int\n Super-Resolution scale factor. Determines Low-Resolution downsampling.\n channels: int\n Number of input and output channels\n \"\"\"\n\n def __init__(self, scale_factor: int, channels: int = 3):\n super().__init__()\n\n self.upsample = Bicubic(scale_factor)\n\n self.model = nn.Sequential(\n nn.Conv2d(\n in_channels=channels,\n out_channels=64,\n kernel_size=9,\n stride=1,\n padding=4,\n ),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=64,\n out_channels=32,\n kernel_size=1,\n stride=1,\n padding=0,\n ),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=32,\n out_channels=channels,\n kernel_size=5,\n stride=1,\n padding=2,\n ),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Super-resolve Low-Resolution input tensor\n\n Parameters\n ----------\n x : torch.Tensor\n Input Low-Resolution image as tensor\n\n Returns\n -------\n torch.Tensor\n Super-Resolved image as tensor\n\n \"\"\"\n x = self.upsample(x)\n x = self.model(x)\n return x\n"
},
{
"alpha_fraction": 0.5397022366523743,
"alphanum_fraction": 0.5614144206047058,
"avg_line_length": 25.42622947692871,
"blob_id": "0121db2f06ed5062357d206cc3eb4e308340a3fa",
"content_id": "e96e802ea535348ba52ed4de1c4823d1eb5d1e97",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1612,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 61,
"path": "/torch_enhance/losses/vgg.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.transforms as T\n\n\nclass VGG(nn.Module):\n \"\"\"VGG/Perceptual Loss\n\n Parameters\n ----------\n conv_index : str\n Convolutional layer in VGG model to use as perceptual output\n\n \"\"\"\n\n def __init__(self, conv_index: str = \"22\"):\n\n super().__init__()\n vgg_features = torchvision.models.vgg16(pretrained=True).features\n modules = [m for m in vgg_features]\n\n if conv_index == \"22\":\n vgg = nn.Sequential(*modules[:8])\n elif conv_index == \"54\":\n vgg = nn.Sequential(*modules[:35])\n\n vgg.requires_grad = False\n vgg.eval()\n\n self.vgg = vgg\n self.vgg_mean = torch.tensor([0.485, 0.456, 0.406])[None, :, None, None]\n self.vgg_std = torch.tensor([0.229, 0.224, 0.225])[None, :, None, None]\n\n def forward(self, sr: torch.Tensor, hr: torch.Tensor) -> torch.Tensor:\n \"\"\"Compute VGG/Perceptual loss between Super-Resolved and High-Resolution\n\n Parameters\n ----------\n sr : torch.Tensor\n Super-Resolved model output tensor\n hr : torch.Tensor\n High-Resolution image tensor\n\n Returns\n -------\n loss : torch.Tensor\n Perceptual VGG loss between sr and hr\n\n \"\"\"\n sr = (sr - self.vgg_mean) / self.vgg_std\n hr = (hr - self.vgg_mean) / self.vgg_std\n vgg_sr = self.vgg(sr)\n\n with torch.no_grad():\n vgg_hr = self.vgg(hr)\n\n loss = F.mse_loss(vgg_sr, vgg_hr)\n\n return loss\n"
},
{
"alpha_fraction": 0.47087377309799194,
"alphanum_fraction": 0.48671436309814453,
"avg_line_length": 25.093334197998047,
"blob_id": "8461f76c3dd57428b8caefb2c487145e74be3e47",
"content_id": "f2db5741c616c4d8eec0d9c33f52cc51c68dc521",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3914,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 150,
"path": "/torch_enhance/models/edsr.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\n\nfrom .base import BaseModel\n\n\nclass UpsampleBlock(nn.Module):\n \"\"\"Base PixelShuffle Upsample Block\"\"\"\n\n def __init__(self, n_upsamples: int, channels: int, kernel_size: int):\n super().__init__()\n\n layers = []\n for _ in range(n_upsamples):\n layers.extend([\n nn.Conv2d(\n in_channels=channels,\n out_channels=channels * 2 ** 2,\n kernel_size=kernel_size,\n stride=1,\n padding=kernel_size // 2,\n ),\n nn.PixelShuffle(2),\n ])\n\n self.model = nn.Sequential(*layers)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.model(x)\n\n\nclass ResidualBlock(nn.Module):\n \"\"\"Base Residual Block\"\"\"\n\n def __init__(\n self, channels: int, kernel_size: int, res_scale: float, activation\n ):\n super().__init__()\n\n self.res_scale = res_scale\n\n self.model = nn.Sequential(\n nn.Conv2d(\n in_channels=channels,\n out_channels=channels,\n kernel_size=kernel_size,\n stride=1,\n padding=kernel_size // 2,\n ),\n activation(),\n nn.Conv2d(\n in_channels=channels,\n out_channels=channels,\n kernel_size=kernel_size,\n stride=1,\n padding=kernel_size // 2,\n ),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n shortcut = x\n x = self.model(x) * self.res_scale\n x = x + shortcut\n return x\n\n\nclass EDSR(BaseModel):\n \"\"\"Enhanced Deep Residual Networks for Single Image Super-Resolution\n https://arxiv.org/pdf/1707.02921v1.pdf\n\n Parameters\n ----------\n scale_factor : int\n Super-Resolution scale factor. Determines Low-Resolution downsampling.\n channels: int\n Number of input and output channels\n num_blocks: int\n Number of stacked residual blocks\n \"\"\"\n\n def __init__(\n self, scale_factor: int, channels: int = 3, num_blocks: int = 32\n ):\n super().__init__()\n\n # Pre Residual Blocks\n self.head = nn.Sequential(\n nn.Conv2d(\n in_channels=channels,\n out_channels=256,\n kernel_size=3,\n stride=1,\n padding=1,\n ),\n )\n\n # Residual Blocks\n self.res_blocks = [\n ResidualBlock(\n channels=256, kernel_size=3, res_scale=0.1, activation=nn.ReLU\n )\n for _ in range(num_blocks)\n ]\n self.res_blocks.append(\n nn.Conv2d(\n in_channels=256,\n out_channels=256,\n kernel_size=3,\n stride=1,\n padding=1,\n )\n )\n self.res_blocks = nn.Sequential(*self.res_blocks)\n\n # Upsamples\n n_upsamples = 1 if scale_factor == 2 else 2\n self.upsample = UpsampleBlock(\n n_upsamples=n_upsamples, channels=256, kernel_size=3\n )\n\n # Output layer\n self.tail = nn.Sequential(\n nn.Conv2d(\n in_channels=256,\n out_channels=channels,\n kernel_size=3,\n stride=1,\n padding=1,\n ),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Super-resolve Low-Resolution input tensor\n\n Parameters\n ----------\n x : torch.Tensor\n Input Low-Resolution image as tensor\n\n Returns\n -------\n torch.Tensor\n Super-Resolved image as tensor\n\n \"\"\"\n x = self.head(x)\n x = x + self.res_blocks(x)\n x = self.upsample(x)\n x = self.tail(x)\n return x\n"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.7061923742294312,
"avg_line_length": 18.487178802490234,
"blob_id": "06bef25e68b5297ca627584ee4bfcb4e1c91c6fc",
"content_id": "81c746ad62d5b4b858355a30ac300a5ac7634cfb",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 759,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 39,
"path": "/tests/test_datasets.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import pytest\n\nfrom torch_enhance import datasets\n\n\nSCALE_FACTOR = 2\n\ndef test_BSDS300():\n data = datasets.BSDS300(SCALE_FACTOR)\n\ndef test_BSDS500():\n data = datasets.BSDS500(SCALE_FACTOR)\n\ndef test_BSDS200():\n data = datasets.BSDS200(SCALE_FACTOR)\n\ndef test_BSDS100():\n data = datasets.BSDS100(SCALE_FACTOR)\n\ndef test_Set5():\n data = datasets.Set5(SCALE_FACTOR)\n\ndef test_Set14():\n data = datasets.Set14(SCALE_FACTOR)\n\ndef test_T91():\n data = datasets.T91(SCALE_FACTOR)\n\ndef test_Historical():\n data = datasets.Historical(SCALE_FACTOR)\n\ndef test_General100():\n data = datasets.General100(SCALE_FACTOR)\n\ndef test_Urban100():\n data = datasets.Urban100(SCALE_FACTOR)\n\ndef test_Manga109():\n data = datasets.Manga109(SCALE_FACTOR)"
},
{
"alpha_fraction": 0.5699373483657837,
"alphanum_fraction": 0.5709812045097351,
"avg_line_length": 22.950000762939453,
"blob_id": "11165d191f57fe3b7c1a795c6d9fa3171bd89109",
"content_id": "a514ac2980cd580cf2e077762794772e5d3defe7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 958,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 40,
"path": "/torch_enhance/models/baseline.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\n\nfrom .base import BaseModel\n\n\nclass Bicubic(BaseModel):\n \"\"\"Bicubic Interpolation Upsampling module\n\n Parameters\n ----------\n scale_factor : int\n Super-Resolution scale factor. Determines Low-Resolution downsampling.\n channels: int\n Number of input and output channels\n \"\"\"\n\n def __init__(self, scale_factor: int, channels: int = 3):\n super().__init__()\n self.model = nn.Sequential(\n nn.Upsample(\n scale_factor=scale_factor, mode=\"bicubic\", align_corners=False\n )\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Super-resolve Low-Resolution input tensor\n\n Parameters\n ----------\n x : torch.Tensor\n Input Low-Resolution image as tensor\n\n Returns\n -------\n torch.Tensor\n Super-Resolved image as tensor\n\n \"\"\"\n return self.model(x)\n"
},
{
"alpha_fraction": 0.550000011920929,
"alphanum_fraction": 0.550000011920929,
"avg_line_length": 12.333333015441895,
"blob_id": "24fe9248cb35f6ea19cf180389129441303fef94",
"content_id": "444bff960ad53a7cede4d7edf85bff20cbb7ba56",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 40,
"license_type": "permissive",
"max_line_length": 20,
"num_lines": 3,
"path": "/torch_enhance/losses/__init__.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "from .vgg import VGG\n\n__all__ = [\"VGG\"]\n"
},
{
"alpha_fraction": 0.6266857385635376,
"alphanum_fraction": 0.6757254004478455,
"avg_line_length": 38.152000427246094,
"blob_id": "750f04862e54a3091e606e17a7f9a98066b4d930",
"content_id": "d37c0c1254b9c1ea1cece49c8aa3f480814c1f5e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4894,
"license_type": "permissive",
"max_line_length": 217,
"num_lines": 125,
"path": "/README.md",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "![](assets/pytorch-enhance-logo-cropped.png)\n\n# pytorch-enhance: Image Super-Resolution in PyTorch\n[![PyPI version](https://badge.fury.io/py/torch-enhance.svg)](https://badge.fury.io/py/torch-enhance)\n![PyPI - Downloads](https://img.shields.io/pypi/dm/torch-enhance?style=plastic)\n![GitHub](https://img.shields.io/github/license/IsaacCorley/pytorch-enhance?style=plastic)\n![Travis (.com)](https://img.shields.io/travis/com/IsaacCorley/pytorch-enhance?style=plastic)\n[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3739368.svg)](https://doi.org/10.5281/zenodo.3739368)\n\nLibrary for Minimal Modern Image Super-Resolution in PyTorch\n\n\n--------------------------------------------------------------------------------\nPyTorch Enhance provides a consolidated package of popular Image Super-Resolution models, datasets, and metrics to allow for quick and painless benchmarking or for quickly adding pretrained models to your application.\n\n## Documentation\n\n[https://pytorch-enhance.readthedocs.io](https://pytorch-enhance.readthedocs.io)\n\n## Installation\n\n### pip\n```\npip install torch-enhance\n```\n\n### latest\n```\ngit clone https://github.com/IsaacCorley/pytorch-enhance.git\ncd pytorch-enhance\npython setup.py install\n```\n\n## Models\nThe following models are currently implemented:\n\n* **SRCNN** from Dong et. al [Image Super-Resolution Using Deep Convolutional Networks](https://arxiv.org/pdf/1501.00092v3.pdf)\n* **VDSR** from Lee et al. [Accurate Image Super-Resolution Using Very Deep Convolutional Networks](https://arxiv.org/pdf/1511.04587.pdf)\n* **ESPCN** from Shi et. al [Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network](https://arxiv.org/pdf/1609.05158v2.pdf)\n* **SRResNet** from Ledig et. al [Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network](https://arxiv.org/pdf/1609.04802v5.pdf)\n* **EDSR** from Lim et. al [Enhanced Deep Residual Networks for Single Image Super-Resolution](https://arxiv.org/pdf/1707.02921v1.pdf)\n\n```python\nimport torch\nimport torch_enhance\n\n# increase resolution by factor of 2 (e.g. 128x128 -> 256x256)\nmodel = torch_enhance.models.SRResNet(scale_factor=2, channels=3)\n\nlr = torch.randn(1, 3, 128, 128)\nsr = model(x) # [1, 3, 256, 256]\n```\n\n## State-of-the-Art\nNot sure which models are currently the best? Check out the [PapersWithCode Image Super-Resolution Leaderboards](https://paperswithcode.com/task/image-super-resolution)\n\n\n## Datasets\nThe following benchmark datasets are available for usage:\n\n* **[BSDS100](https://drive.google.com/drive/folders/1pRmhEmmY-tPF7uH8DuVthfHoApZWJ1QU)**\n* **[BSDS200](https://drive.google.com/drive/folders/1pRmhEmmY-tPF7uH8DuVthfHoApZWJ1QU)**\n* **[BSDS300](https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/)**\n* **[BSDS500](https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/resources.html)**\n* **[Set5](https://drive.google.com/drive/folders/1pRmhEmmY-tPF7uH8DuVthfHoApZWJ1QU)**\n* **[Set14](https://drive.google.com/drive/folders/1pRmhEmmY-tPF7uH8DuVthfHoApZWJ1QU)**\n* **[T91](https://drive.google.com/drive/folders/1pRmhEmmY-tPF7uH8DuVthfHoApZWJ1QU)**\n* **[Historical](https://drive.google.com/drive/folders/1pRmhEmmY-tPF7uH8DuVthfHoApZWJ1QU)**\n* **[Urban100](https://drive.google.com/drive/folders/1pRmhEmmY-tPF7uH8DuVthfHoApZWJ1QU)**\n* **[Manga109](https://drive.google.com/drive/folders/1pRmhEmmY-tPF7uH8DuVthfHoApZWJ1QU)**\n* **[General100](https://drive.google.com/drive/folders/1pRmhEmmY-tPF7uH8DuVthfHoApZWJ1QU)**\n* **[DIV2K](https://data.vision.ee.ethz.ch/cvl/DIV2K/)**\n\n\n## Dataset Samples\n\n**BSDS300** | **BSDS500** | **T91**\n:-------------------------:|:-------------------------:|:-------------------------:\n![](assets/BSDS300.gif) | ![](assets/BSDS500.gif) | ![](assets/T91.gif) \n\n**Set5** | **Set14** | **Historical**\n:-------------------------:|:-------------------------:|:-------------------------:\n![](assets/Set5.gif) | ![](assets/Set14.gif) | ![](assets/Historical.gif) \n\n## Losses\n\n* **Perceptual Loss (VGG16)**\n\n## Metrics\n\n* **Mean Squared Error (MSE)**\n* **Mean Absolute Error (MAE)**\n* **Peak-Signal-Noise-Ratio (PSNR)**\n\n## Examples\n\n```\n$ cd examples\n```\n\n* **[Get up and benchmarking quickly with PyTorch Lightning](examples/pytorch_lightning_example.py)**\n* **[Coming from Keras? Try our example using the Poutyne library](examples/poutyne_example.py)**\n\n## Running Tests\n\n```\n$ pytest -ra\n```\n\n## Cite\n\nPlease cite this repository if you used this code in your own work:\n\n```\n@software{isaac_corley_2020_3739368,\n author = {Isaac Corley},\n title = {PyTorch Enhance},\n month = apr,\n year = 2020,\n publisher = {Zenodo},\n version = {0.1.2},\n doi = {10.5281/zenodo.3739368},\n url = {https://doi.org/10.5281/zenodo.3739368}\n}\n```\n"
},
{
"alpha_fraction": 0.5942028760910034,
"alphanum_fraction": 0.5985507369041443,
"avg_line_length": 20.230770111083984,
"blob_id": "be60412d9c9f4190475f647e2b78d836a14e636f",
"content_id": "0507ce643c9d4d9d06a762a4b246ce290ad04004",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1380,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 65,
"path": "/torch_enhance/metrics.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn.functional as F\n\n\n__all__ = [\"mse\", \"mae\", \"psnr\"]\n\n\[email protected]_grad()\ndef mse(y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:\n \"\"\"Mean squared error (MSE) metric\n\n Parameters\n ----------\n y_pred : torch.Tensor\n Super-Resolved image tensor\n y_true : torch.Tensor\n High Resolution image tensor\n\n Returns\n -------\n torch.Tensor\n Mean squared error between y_true and y_pred\n\n \"\"\"\n return F.mse_loss(y_pred, y_true)\n\n\[email protected]_grad()\ndef mae(y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:\n \"\"\"Mean absolute error (MAE) metric\n\n Parameters\n ----------\n y_pred : torch.Tensor\n Super-Resolved image tensor\n y_true : torch.Tensor\n High Resolution image tensor\n\n Returns\n -------\n torch.Tensor\n Mean absolute error between y_true and y_pred\n\n \"\"\"\n return F.l1_loss(y_pred, y_true)\n\n\[email protected]_grad()\ndef psnr(y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:\n \"\"\"Peak-signal-noise ratio (PSNR) metric\n\n Parameters\n ----------\n y_pred : torch.Tensor\n Super-Resolved image tensor\n y_true : torch.Tensor\n High Resolution image tensor\n\n Returns\n -------\n torch.Tensor\n Peak-signal-noise-ratio between y_true and y_pred\n\n \"\"\"\n return 10 * (1 / mse(y_pred, y_true)).log10()\n"
},
{
"alpha_fraction": 0.6632124185562134,
"alphanum_fraction": 0.6787564754486084,
"avg_line_length": 16.545454025268555,
"blob_id": "24b6170663690fac5cfb7ca5c55d7ecf7a07ef57",
"content_id": "1b647c5595da9fd31db95240c333f1e04b91430a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 193,
"license_type": "permissive",
"max_line_length": 29,
"num_lines": 11,
"path": "/torch_enhance/__init__.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import torch_enhance.models\nimport torch_enhance.datasets\nimport torch_enhance.metrics\nimport torch_enhance.losses\n\n__version__ = \"0.1.8\"\n\n__all__ = [\n \"torch_enhance\",\n \"__version__\",\n]\n"
},
{
"alpha_fraction": 0.5506003499031067,
"alphanum_fraction": 0.6895368695259094,
"avg_line_length": 19.10344886779785,
"blob_id": "31ffd70595433b56fcf86886b5baf0bdff9e88a6",
"content_id": "4ab9388bc645ba8037813c90a72b9a8b6fd8ce43",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 583,
"license_type": "permissive",
"max_line_length": 34,
"num_lines": 29,
"path": "/torch_enhance/datasets/__init__.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "from .base import BaseDataset\nfrom .bsds300 import BSDS300\nfrom .bsds500 import BSDS500\nfrom .bsds200 import BSDS200\nfrom .bsds100 import BSDS100\nfrom .set5 import Set5\nfrom .set14 import Set14\nfrom .t91 import T91\nfrom .historical import Historical\nfrom .urban100 import Urban100\nfrom .manga109 import Manga109\nfrom .general100 import General100\nfrom .div2k import DIV2K\n\n__all__ = [\n \"BaseDataset\",\n \"BSDS300\",\n \"BSDS500\",\n \"BSDS200\",\n \"BSDS100\",\n \"Set5\",\n \"Set14\",\n \"T91\",\n \"Historical\",\n \"Urban100\",\n \"Manga109\",\n \"General100\",\n \"DIV2K\",\n]\n"
},
{
"alpha_fraction": 0.7298850417137146,
"alphanum_fraction": 0.7543103694915771,
"avg_line_length": 23,
"blob_id": "fd8a3fc952d35d167eeeeddaad18fc2954d58074",
"content_id": "894672fe20ce3eb4a63e9b1972b7e6633bad8156",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 696,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 29,
"path": "/examples/poutyne_example.py",
"repo_name": "eqsongca/pytorch-enhance",
"src_encoding": "UTF-8",
"text": "import torch\nfrom torch.utils.data import DataLoader\n\nfrom poutyne.framework import Model\n\nfrom torch_enhance.datasets import BSDS300, Set14, Set5\nfrom torch_enhance.models import SRCNN\nfrom torch_enhance import metrics\n\n\nscale_factor = 2\ntrain_dataset = BSDS300(scale_factor=scale_factor)\nval_dataset = Set14(scale_factor=scale_factor)\ntrain_dataloader = DataLoader(train_dataset, batch_size=8)\nval_dataloader = DataLoader(val_dataset, batch_size=2)\n\nchannels = 3 if train_dataset.color_space == \"RGB\" else 1\npytorch_network = SRCNN(scale_factor, channels)\n\nmodel = Model(\n pytorch_network,\n \"sgd\",\n \"mse\"\n)\nmodel.fit_generator(\n train_dataloader,\n val_dataloader,\n epochs=1\n)\n"
}
] | 25 |
dream-chaser/shape_pre_tool | https://github.com/dream-chaser/shape_pre_tool | d3ef3e394c01a52c29b77f99bb6aba2f8b7410ae | abd6b320a1283d1a9fb457d074eba62ca43d0533 | 4b95b44e1f029d65af18ce7b470cfb5ff76ad0be | refs/heads/master | 2021-01-01T17:45:48.025400 | 2017-07-24T05:14:03 | 2017-07-24T05:14:03 | 98,150,953 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5893909335136414,
"alphanum_fraction": 0.6385068893432617,
"avg_line_length": 24.5,
"blob_id": "2a3718b741c8497ec9912ee903c997d6ce6f8332",
"content_id": "c50f92ee07f923aa21cfa940a506b7e795c6c1a3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 509,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 20,
"path": "/read_sparse_npz.py",
"repo_name": "dream-chaser/shape_pre_tool",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom scipy.sparse import csr_matrix\n\nfpath = 'I:\\\\BU4D_2_mesh\\\\coords\\\\F001_1_066.npz'\nt = np.load(fpath)\ncol = t['col'].astype(np.int32)\nptr = t['ptr'].astype(np.int32)\nx_local = t['x_local'].astype(np.float32)\ny_local = t['y_local'].astype(np.float32)\n\ndim = len(ptr)-1\nx_mat = csr_matrix((x_local, col, ptr),shape=(dim,dim))\ny_mat = csr_matrix((y_local, col, ptr),shape=(dim,dim))\n\nfor i in range(0,dim,100):\n\tres = 0\n\tfor j in range(dim):\n\t\tif x_mat[i,j] != 0:\n\t\t\tres += 1\n\tprint(i,res)"
}
] | 1 |
Ram1L5/ra | https://github.com/Ram1L5/ra | dff96401687f985a553b5472a650565502211b6c | 5a47b37036cafb744fda2936b5e342c2791b1f35 | c9d9f85ac063ff30eb7db2aa2182d5005dc0290d | refs/heads/master | 2022-12-11T01:51:46.857933 | 2018-03-23T22:28:29 | 2018-03-23T22:28:29 | 71,710,742 | 0 | 0 | MIT | 2016-10-23T15:12:29 | 2018-03-23T22:29:29 | 2021-06-10T20:00:28 | HTML | [
{
"alpha_fraction": 0.6855894923210144,
"alphanum_fraction": 0.688209593296051,
"avg_line_length": 44.599998474121094,
"blob_id": "f84a7b2c8691ad4cc2fda0e680c89490fa3d4bbf",
"content_id": "b4346d02ed1c5bddccd1a27666a7bc733aac48c2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1145,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 25,
"path": "/search_satellite_images/views.py",
"repo_name": "Ram1L5/ra",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.views import generic\nfrom django.utils import timezone\nimport satsearch, json\n\n# Create your views here.\ndef index(request):\n return render(request, 'search_satellite_images/index.html')\n\ndef get_download_info(request):\n params = {'path': request.POST.get('path'), 'row': request.POST.get('row'),\n 'satellite_name': request.POST.get('satellite_name'), 'intersects': request.POST.get('intersects'),\n 'date_from': request.POST.get('date_from'), 'date_to': request.POST.get('date_to'),\n 'cloud_to': request.POST.get('cloud_to')}\n params = {k: v for k, v in params.items() if v}\n s = satsearch.Search(**params)\n scenes = satsearch.Scenes(s.scenes())\n geojson = json.dumps(scenes.geojson())\n geojson_fixed = json.loads(geojson.replace('null', '\"no information\"'))\n return render(request, 'search_info.html', {'info': geojson_fixed, 'params':params})\n\ndef get_help(request):\n return render(request, 'search_satellite_images/help.html')\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.7983193397521973,
"alphanum_fraction": 0.7983193397521973,
"avg_line_length": 22.799999237060547,
"blob_id": "30ba4f240ef4f70d36eb691a2dd8f1372b147e91",
"content_id": "b4d31bbecfaf1fb63f0ef0158d1956603b62f93a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 119,
"license_type": "permissive",
"max_line_length": 45,
"num_lines": 5,
"path": "/search_satellite_images/apps.py",
"repo_name": "Ram1L5/ra",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass SearchSatelliteImagesConfig(AppConfig):\n name = 'search_satellite_images'\n"
},
{
"alpha_fraction": 0.42446044087409973,
"alphanum_fraction": 0.6906474828720093,
"avg_line_length": 14.44444465637207,
"blob_id": "026c5b8995f6a860f239e604919607195d641353",
"content_id": "8d32e47a6d48fbafee51beba3d42217346da2691",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 139,
"license_type": "permissive",
"max_line_length": 18,
"num_lines": 9,
"path": "/requirements.txt",
"repo_name": "Ram1L5/ra",
"src_encoding": "UTF-8",
"text": "certifi==2018.1.18\nchardet==3.0.4\nDjango==2.0.3\nidna==2.6\npsycopg2==2.7.3.2\npytz==2017.3\nrequests==2.18.4\nsatsearch==0.1.0b7\nurllib3==1.22\n"
},
{
"alpha_fraction": 0.6600000262260437,
"alphanum_fraction": 0.6600000262260437,
"avg_line_length": 24.100000381469727,
"blob_id": "eff59c2f5bb7d23f4cc518f70f6f98b9b740b66e",
"content_id": "7cf9c80a8bbc5ec877b2e230599be6d9d0b26135",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 250,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 10,
"path": "/search_satellite_images/urls.py",
"repo_name": "Ram1L5/ra",
"src_encoding": "UTF-8",
"text": "from django.urls import path\n\nfrom . import views\n\napp_name = 'search_satellite_images'\nurlpatterns = [\n path('', views.index, name='index'),\n path('info/', views.get_download_info, name='info'),\n path('help/', views.get_help, name='help')\n]"
}
] | 4 |
YefriTavarez/NomiApp | https://github.com/YefriTavarez/NomiApp | 6d25713c1d5782ca1d36d5bef1253dff43f64798 | a532ae7a3871ee91ec6f17b4b46ba67db7a056b5 | b4695567ec615c0d30ce285fd698ef76f22a88a6 | refs/heads/master | 2021-01-12T05:54:14.476616 | 2017-03-16T20:02:52 | 2017-03-16T20:02:52 | 77,231,189 | 1 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.5779816508293152,
"alphanum_fraction": 0.6146789193153381,
"avg_line_length": 17,
"blob_id": "c4f1642d56e679753566953ca4519a954ff93687",
"content_id": "855da1bd324f1311de84235d242936816e1b39ad",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 109,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 6,
"path": "/nomiapp/__init__.py",
"repo_name": "YefriTavarez/NomiApp",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport time, frappe\n\n__version__ = '0.1.1'\n\n"
},
{
"alpha_fraction": 0.7049891352653503,
"alphanum_fraction": 0.7180043458938599,
"avg_line_length": 24.66666603088379,
"blob_id": "173e12b51209ef5e7b04626eaaf1ef2b167e81d6",
"content_id": "2559b6db5c16f63ad1c09858bf06164ae6d22f1b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 461,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 18,
"path": "/nomiapp/nomiapp/doctype/empleados_en_obra/empleados_en_obra.py",
"repo_name": "YefriTavarez/NomiApp",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Soldeva, SRL and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\n\nclass EmpleadosenObra(Document):\n\tdef get_list(self):\n\t\tdoclist = frappe.db.sql(\"\"\"SELECT name \n\t\t\tFROM `tabEmpleados en Obra`\n\t\t\tWHERE docstatus <> 2 ORDER BY name ASC\"\"\", as_dict=True)\n\n\t\tif not doclist:\n\t\t\treturn []\n\n\t\treturn doclist"
},
{
"alpha_fraction": 0.6832946538925171,
"alphanum_fraction": 0.6954309940338135,
"avg_line_length": 28.8799991607666,
"blob_id": "fe5a87feefdb2af561ccd5c23a3613952910443d",
"content_id": "21a8575efba8d0b7a438d01dbb8a25505791d7e5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11206,
"license_type": "permissive",
"max_line_length": 137,
"num_lines": 375,
"path": "/nomiapp/api.py",
"repo_name": "YefriTavarez/NomiApp",
"src_encoding": "UTF-8",
"text": "\nfrom __future__ import unicode_literals\nimport frappe, time, json\nfrom frappe import _\nfrom frappe.utils import get_last_day, get_first_day, nowdate, cstr\n\nfrom frappe.utils import add_days, cint, flt, getdate, rounded, date_diff, money_in_words\nfrom math import ceil\n\nfrom frappe.utils.csvutils import UnicodeWriter\n\[email protected]()\ndef process_missing_components(doc, method):\n\t#month_start = get_first_day(nowdate())\n\t#month_end = get_last_day(nowdate())\n\n\tearnings = doc.get(\"earnings\")\n\n\tif not has_component(earnings, \"Kilometros\") and not has_component(earnings, \"Horas\"):\n\t\tset_missing_earning_component(doc)\n\n\tset_missing_deduction_component(doc)\n\ndef set_missing_earning_component(salary_slip):\n\tsalary_slip.designation = frappe.get_value(\"Employee\", salary_slip.employee, \"designation\")\n\t\n\tif not salary_slip.designation:\n\t\tfrappe.throw(_(\"Employee {0} has not any designation set!\").format(salary_slip.employee))\n\n\tif salary_slip.designation == \"Chofer\" or salary_slip.designation == \"Operador\":\n\t\tsalary_slip.earnings = []\n\t\n\tamount = 0\n\n\tif salary_slip.designation == \"Chofer\":\n\n\t\tkilometers_list = get_kilometros_de_chofer(salary_slip.employee, salary_slip.start_date, salary_slip.end_date)\n\n\t\tif kilometers_list:\n\t\t\tfor row in kilometers_list:\n\t\t\t\tif row.is_amount:\n\t\t\t\t\tamount += float(row.amount)\n\t\t\t\telse:\n\t\t\t\t\tamount += ((float(row.kilometers)-5)*float(row.adict_kilometer_rate))+float(row.kilometer_rate)\n\n\t\t\tadd_component_to_earnings(salary_slip, \"Kilometros\", amount)\n\t\t\t\n\n\tif salary_slip.designation == \"Operador\":\n\t\thours_list = get_horas_de_operador(salary_slip.employee, salary_slip.start_date, salary_slip.end_date)\n\n\t\tif hours_list:\n\t\t\tfor row in hours_list:\n\t\t\t\tif row.is_amount:\n\t\t\t\t\tamount += float(row.amount)\n\t\t\t\telse:\n\t\t\t\t\tamount += float(row.hours) * float(row.rate)\n\n\t\t\tadd_component_to_earnings(salary_slip, \"Horas\", amount)\n\t\t\tif amount < 12000:\n\t\t\t\tadd_component_to_earnings(salary_slip, \"Sueldo Base\", 12000 - amount)\n\t\t\t\t\n\t\telse:\n\t\t\tadd_component_to_earnings(salary_slip, \"Sueldo Base\", 12000)\n\n\t#salary_slip.calculate_net_pay()\n\tsalary_slip.validate()\n\ndef set_missing_deduction_component(salary_slip):\n\tcomponent = get_component(salary_slip.get(\"deductions\"), \"ISR\")\n\n\tif not component:\n\t\tcomponent = frappe.get_doc({\n\t\t\t\"doctype\" : \"Salary Detail\",\n\t\t\t\"salary_component\": \"ISR\",\n\t\t\t\"default_amount\": 0,\n\t\t\t\"amount_based_on_formula\": 0,\n\t\t\t\"denpends_on_lwp\": 0\n\t\t})\n\n\tcomponent.amount = calculate_isr_amount(salary_slip)\n\t\n\tif component.amount and not has_component(salary_slip.get(\"deductions\"),\"ISR\"):\n\t\tsalary_slip.append(\"deductions\", component)\n\t\tsalary_slip.validate()\n\n\ndef get_choferes_result(obra, emp_ob=None):\n\tif emp_ob:\n\t\treturn frappe.db.sql(\"\"\"SELECT \n\t\t\tchofer.employee,\n\t\t\tchofer.employee_name,\n\t\t\tchofer.kilometer_rate,\n\t\t\tchofer.adict_kilometer_rate,\n\t\t\tchofer.odometer_start,\n\t\t\tchofer.odometer_end,\n\t\t\tchofer.kilometers,\n\t\t\tchofer.is_amount,\n\t\t\tchofer.amount \n\t\tFROM `tabTabla de Choferes` AS chofer \n\t\tJOIN `tabEmpleados en Obra` project on chofer.parent = project.name \n\t\tWHERE project.name = %(project)s\"\"\",{\"project\": emp_ob }, as_dict=True)\n\n\treturn frappe.db.sql(\"\"\"SELECT chofer.employee, chofer.employee_name\n\tFROM `tabEmpleado en Proyecto` AS chofer \n\tJOIN tabProject project on chofer.parent = project.name \n\tWHERE chofer.parentfield = 'choferes' \n\tAND project.name = %(project)s\"\"\",{\"project\": obra }, as_dict=True)\n\ndef get_operadores_result(obra, emp_ob=None):\n\tif emp_ob:\n\t\treturn frappe.db.sql(\"\"\"SELECT \n\t\t\toperador.employee,\n\t\t\toperador.employee_name,\n\t\t\toperador.rate,\n\t\t\toperador.horometer_start,\n\t\t\toperador.horometer_end,\n\t\t\toperador.hours,\n\t\t\toperador.inactive_hours,\n\t\t\toperador.is_amount,\n\t\t\toperador.amount\n\t\tFROM `tabTabla de Operadores` AS operador \n\t\tJOIN `tabEmpleados en Obra` project ON operador.parent = project.name \n\t\tWHERE project.name = %(project)s\"\"\",{\"project\": emp_ob }, as_dict=True)\n\n\treturn frappe.db.sql(\"\"\"SELECT operador.employee, operador.employee_name\n\tFROM `tabEmpleado en Proyecto` AS operador \n\tJOIN tabProject project ON operador.parent = project.name \n\tWHERE operador.parentfield = 'operadores' \n\tAND project.name = %(project)s\"\"\",{\"project\": obra }, as_dict=True)\n\[email protected]()\ndef getChoferes(obra):\n\treturn frappe.db.sql(\"SELECT * FROM `tabEmpleado en Proyecto` \\\n\t\tWHERE parent = '{0}' \\\n\t\tAND parentfield = 'choferes'\"\n\t.format(obra), as_dict=True)\n\[email protected]()\ndef getOperadores(obra):\n\treturn frappe.db.sql(\"SELECT * FROM `tabEmpleado en Proyecto` \\\n\t\tWHERE parent = '{0}' \\\n\t\tAND parentfield = 'operadores'\"\n\t.format(obra), as_dict=True)\n\ndef get_kilometros_de_chofer(chofer, from_date, to_date):\n\treturn frappe.db.sql(\"SELECT kilometers, adict_kilometer_rate, kilometer_rate, amount, is_amount \\\n\t\tFROM `tabTabla de Choferes` AS c \\\n\t\tJOIN `tabEmpleados en Obra` AS o \\\n\t\tON c.parent = o.name \\\n\t\tWHERE c.employee = '{0}' \\\n\t\tAND o.fecha >= '{1}' \\\n\t\tAND o.fecha <= '{2}' \\\n\t\tAND c.docstatus = 1\"\n\t.format(chofer, from_date, to_date), as_dict=True)\n\ndef get_horas_de_operador(operador, from_date, to_date):\n\treturn frappe.db.sql(\"SELECT hours, rate, amount, is_amount \\\n\t\tFROM `tabTabla de Operadores` AS c \\\n\t\tJOIN `tabEmpleados en Obra` AS o \\\n\t\tON c.parent = o.name \\\n\t\tWHERE c.employee = '{0}' \\\n\t\tAND o.fecha >= '{1}' \\\n\t\tAND o.fecha <= '{2}' \\\n\t\tAND c.docstatus = 1\"\n\t.format(operador, from_date, to_date), as_dict=True)\n\ndef add_component_to_earnings(salary_slip, salary_component, amount):\n\tsalary_slip.append(\"earnings\",{\n\t\t\"default_amount\": 0,\n\t\t\"amount_based_on_formula\": 0,\n\t\t\"denpends_on_lwp\": 0,\n\t\t\"salary_component\": salary_component,\n\t\t\"amount\": amount\n\t})\n\ndef has_component(array, component):\n\tfound = False\n\n\tfor current in array:\n\t\tif component == current.salary_component:\n\t\t\tfound = True\n\n\treturn found\n\ndef get_component(array, component):\n\tcomponent_found = None\n\t\n\tfor current in array:\n\t\tif component == current.salary_component:\n\t\t\tcomponent_found = current\n\t\n\treturn component_found\n\ndef del_component(array, component):\n\tdeleted = False\n\t\n\tfor current in array:\n\t\tif component == current.salary_component:\n\t\t\tcurrent.delete()\n\t\t\tdeleted = True\n\t\n\treturn deleted\n\ndef calculate_isr_amount(salary_slip):\n\tif not salary_slip.gross_pay:\n\t\tsalary_slip.gross_pay = 0\n\n\tdef after_afp(net_pay):\n\t\tif has_component(salary_slip.get(\"deductions\"), \"AFP\"):\n\t\t\tcomponent = get_component(salary_slip.get(\"deductions\"), \"AFP\")\n\t\t\treturn float(net_pay - component.amount)\n\n\t\treturn 0\n\n\tdef after_sfs(net_pay):\n\t\tif has_component(salary_slip.get(\"deductions\"), \"SFS\"):\n\t\t\tcomponent = get_component(salary_slip.get(\"deductions\"), \"SFS\")\n\t\t\treturn float(net_pay - component.amount)\n\t\n\t\treturn 0\n\n\tdef get_acumulated():\n\t\tif salary_slip.anual_gross_pay > float(anual_isr_from_25):\n\t\t\treturn (((float(anual_isr_to_15) - float(anual_isr_from_15)) * 0.15)) + (((float(anual_isr_to_20) - float(anual_isr_from_20)) * 0.20))\n\n\t\telif salary_slip.anual_gross_pay > float(anual_isr_from_20):\n\t\t\treturn ((float(anual_isr_to_15) - float(anual_isr_from_15)) * 0.15)\n\n\tfrappe.errprint(\"gross_pay: {0}\".format(salary_slip.gross_pay))\n\tfrappe.errprint(\"anual gross_pay: {0}\".format(salary_slip.gross_pay * 12))\n\tnet_pay = salary_slip.gross_pay\n\t#net_pay = after_afp(salary_slip.gross_pay) # to deduct only after the afp\n\t#net_pay = after_sfs(net_pay) # to deduct only after the sfs\n\n\tsalary_slip.anual_gross_pay = float(net_pay * 12)\n\n\tanual_isr_from_25 = frappe.get_value(doctype=\"Configuracion ISR\", fieldname=\"from25\")\n\t#anual_isr_to_25 = frappe.get_value(doctype=\"Configuracion ISR\", fieldname=\"to25\")\n\tanual_isr_from_20 = frappe.get_value(doctype=\"Configuracion ISR\", fieldname=\"from20\")\n\tanual_isr_to_20 = frappe.get_value(doctype=\"Configuracion ISR\", fieldname=\"to20\")\n\tanual_isr_from_15 = frappe.get_value(doctype=\"Configuracion ISR\", fieldname=\"from15\")\n\tanual_isr_to_15 = frappe.get_value(doctype=\"Configuracion ISR\", fieldname=\"to15\")\n\n\tif salary_slip.anual_gross_pay > float(anual_isr_from_25):\n\t\treturn ((float(salary_slip.anual_gross_pay) - float(anual_isr_from_25)) * 0.25 + ceil(get_acumulated())) /12\n\n\telif salary_slip.anual_gross_pay > float(anual_isr_from_20):\n\t\treturn ((float(salary_slip.anual_gross_pay) - float(anual_isr_from_20)) * 0.2 + ceil(get_acumulated()))/12\n\n\telif salary_slip.anual_gross_pay > float(anual_isr_from_15):\n\t\treturn ((float(salary_slip.anual_gross_pay) - float(anual_isr_from_15)) * 0.15)/12\n\n\telse:\n\t\treturn 0\n\t\t\[email protected]()\ndef descargar_choferes(obra, with_data=False, emp_ob=None):\n\tw = UnicodeWriter()\n\tw.writerow([\n\t\t\"Empleado\",\n\t\t\"Nombre del Empleado\",\n\t\t\"Precio de Kilometros (5KM)\",\n\t\t\"Precio de Kilometros Adicionales\",\n\t\t\"Odometro Inicial\",\n\t\t\"Odometro Final\",\n\t\t\"Kilometros\",\n\t\t\"Es en Monto\",\n\t\t\"Monto\"\n\t])\n\n\tw.writerow([\n\t\t\"employee\",\n\t\t\"employee_name\",\n\t\t\"kilometer_rate\",\n\t\t\"adict_kilometer_rate\",\n\t\t\"odometer_start\",\n\t\t\"odometer_end\",\n\t\t\"kilometers\",\n\t\t\"is_amount\",\n\t\t\"amount\" \n\t])\n\n\tif with_data:\n\t\tif not emp_ob:\n\t\t\tfrappe.throw(\"Se necesita un Documento tipo <i>Empleados en Obra</i> para continuar!\")\n\n\t\tfor chofer in get_choferes_result(obra, emp_ob):\n\t\t\tw.writerow([ \n\t\t\t\tchofer.employee,\n\t\t\t\tchofer.employee_name,\n\t\t\t\tchofer.kilometer_rate,\n\t\t\t\tchofer.adict_kilometer_rate,\n\t\t\t\tchofer.odometer_start,\n\t\t\t\tchofer.odometer_end,\n\t\t\t\tchofer.kilometers,\n\t\t\t\tchofer.is_amount,\n\t\t\t\tchofer.amount \n\t\t\t])\n\telse:\n\t\tfor chofer in get_choferes_result(obra):\n\t\t\tw.writerow([ chofer.employee, chofer.employee_name, 105, 15, \"\", \"\", \"\", 0, 0\t])\n\n\tfrappe.response['result'] = cstr(w.getvalue())\n\tfrappe.response['type'] = 'csv'\n\tfrappe.response['doctype'] = \"template_choferes_\" + str(int(time.time()))\n\[email protected]()\ndef descargar_operadores(obra, with_data=False, emp_ob=None):\n\n\tw = UnicodeWriter()\n\tw.writerow([\n\t\t\"Empleado\",\n\t\t\"Nombre del Empleado\",\n\t\t\"Precio por Hora\",\n\t\t\"Horometro Inicial\",\n\t\t\"Horometro Final\",\n\t\t\"Cantidad de Horas\",\n\t\t\"Horas Inactivas\",\n\t\t\"Es en Monto\",\n\t\t\"Monto\"\n\t])\n\n\tw.writerow([\n\t\t\"employee\",\n\t\t\"employee_name\",\n\t\t\"rate\",\n\t\t\"horometer_start\",\n\t\t\"horometer_end\",\n\t\t\"hours\",\n\t\t\"inactive_hours\",\n\t\t\"is_amount\",\n\t\t\"amount\"\n\t])\n\n\tif with_data:\n\t\tif not emp_ob:\n\t\t\tfrappe.throw(\"Se necesita un Documento tipo <i>Empleados en Obra</i> para continuar!\")\n\n\t\tfor operador in get_operadores_result(obra, emp_ob):\n\t\t\tw.writerow([ \n\t\t\t\toperador.employee,\n\t\t\t\toperador.employee_name,\n\t\t\t\toperador.rate,\n\t\t\t\toperador.horometer_start,\n\t\t\t\toperador.horometer_end,\n\t\t\t\toperador.hours,\n\t\t\t\toperador.inactive_hours,\n\t\t\t\toperador.is_amount,\n\t\t\t\toperador.amount\n\t\t\t])\n\telse:\n\t\tfor operador in get_operadores_result(obra):\n\t\t\tw.writerow([ operador.employee, operador.employee_name, 110, \"\", \"\", \"\", 0, 0, 0 ])\n\n\tfrappe.response['result'] = cstr(w.getvalue())\n\tfrappe.response['type'] = 'csv'\n\tfrappe.response['doctype'] = \"template_operadores_\" + str(int(time.time()))\n\[email protected]()\ndef delete_doc_list(doctype=None):\n\tif not doctype: \n\t\treturn \"fail\"\n\n\tfor current in frappe.get_list(doctype):\n\t\tdoc = frappe.get_doc(doctype, current.name)\n\n\t\tif doc.docstatus == 1:\n\t\t\tfrappe.errprint(\"Cancelling doc: {0}\".format(doc.name))\n\t\t\tdoc.cancel()\n\n\t\tfrappe.errprint(\"Deleting doc: {0}\".format(doc.name))\n\t\tdoc.delete()\n\n\tfrappe.errprint(\"Committing to the Database\")\n\tfrappe.db.commit()\n"
},
{
"alpha_fraction": 0.6938461661338806,
"alphanum_fraction": 0.7061538696289062,
"avg_line_length": 25.040000915527344,
"blob_id": "6d4a232dca6f7a2bf1b76298321a6c35ff7132a8",
"content_id": "f1bc287fa4269a85fe5cdf284f15691d29943f53",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 650,
"license_type": "permissive",
"max_line_length": 51,
"num_lines": 25,
"path": "/nomiapp/nomiapp/doctype/configuracion_isr/configuracion_isr.py",
"repo_name": "YefriTavarez/NomiApp",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Soldeva, SRL and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\n\nclass ConfiguracionISR(Document):\n\tpass\n\[email protected]()\ndef getRangosISR():\n\treturn frappe.db.sql(\"SELECT field, value \\\n\t\tFROM `tabSingles` \\\n\t\tWHERE doctype='Configuracion ISR' \\\n\t\tAND (field like 'from%' OR field like 'to%') \\\n\t\tORDER BY field\", as_dict=1)\n\n\n\tcomment = \"\"\"return frappe.db.sql(\"SELECT value \\\n\t\tFROM `tabSingles` \\\n\t\tWHERE doctype='Configuracion ISR'\\\n\t\tAND field='{0}'\"\n\t.format(field),as_dict=1)\"\"\""
},
{
"alpha_fraction": 0.6833333373069763,
"alphanum_fraction": 0.7055555582046509,
"avg_line_length": 21.5,
"blob_id": "a105ed4af99ad1d275e15d4cb3ddde75e7ae6e8c",
"content_id": "b469ccfa0b87e82f1a1c6c3d8d5ab8848f3fff81",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 180,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 8,
"path": "/nomiapp/nomiapp/doctype/configuracion_isr/configuracion_isr.js",
"repo_name": "YefriTavarez/NomiApp",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016, Soldeva, SRL and contributors\n// For license information, please see license.txt\n\nfrappe.ui.form.on('Configuracion ISR', {\n\trefresh: function(frm) {\n\n\t}\n});\n"
},
{
"alpha_fraction": 0.5711820721626282,
"alphanum_fraction": 0.5785159468650818,
"avg_line_length": 32.60869598388672,
"blob_id": "4e31ea1df217212e4ab5cf66547b7ef56d9a52aa",
"content_id": "2a92ffeb68a2799e9b960865a250e3dcda7b5d5c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2318,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 69,
"path": "/nomiapp/nomiapp/doctype/subir_empleados/subir_empleados.js",
"repo_name": "YefriTavarez/NomiApp",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016, Soldeva, SRL and contributors\n// For license information, please see license.txt\nfrappe.ui.form.on('Subir Empleados', {\n\tonload: function(frm){\n\t\tfrm.doc.project = \"\";\n\t\tfrm.doc.record = \"\";\n \tfrm.set_df_property(\"project\",\"reqd\", frm.doc.update_record? 0 : 1);\n\t},\n\tvalidate: function(frm){\n\t\tvalidated = false;\n\t},\n refresh: function(frm) {\n frm.doc.date = frappe.datetime.get_today();\n cur_frm.toolbar.print_icon.addClass(\"hide\");\n\t\tcur_frm.disable_save();\n\t\tsetTimeout(function(){\n\t\t\t$(\"button[data-fieldname=validate_and_send]\")\n\t\t\t\t.attr(\"class\", \"btn btn-primary btn-sm\");\n\t\t},100);\n },\n update_record: function(frm){\n \tfrm.set_df_property(\"project\",\"reqd\", frm.doc.update_record? 0 : 1);\n \tfrm.set_df_property(\"record\",\"reqd\", frm.doc.update_record? 1 : 0);\n },\n validate_and_send: function(frm) {\n \tvar me = this;\n \tif((!frm.doc.load_drivers && !frm.doc.load_operators) ||\n \t\t(!frm.doc.update_record && !frm.doc.project) ||\n \t\t(frm.doc.update_record && !frm.doc.record)) return;\n\n var callback = function(response) {\n \tif(\"inserted\" == response.message) frappe.msgprint(\"Registros agregados correctamente!\");\n \tif(\"updated\" == response.message) frappe.msgprint(\"Registros actualizados correctamente!\");\n\n\t delete_attachments();\n\n\t frm.doc.load_drivers = \"\";\n \t frm.doc.load_operators = \"\";\n \trefresh_many([\"load_drivers\", \"load_operators\"]);\n\n //setTimeout(frappe.hide_msgprint, 2000);\n \tfrm.save();\n }\n\n $c(\"runserverobj\", args = {\"method\": \"upload_files\", \"docs\": cur_frm.doc }, callback);\n\n\t function delete_attachments() {\n\t \tvar attachments = cur_frm.attachments.get_attachments();\n\t \tattachments.forEach(function(attachment){ \n\t \t\tdelete_attachment(attachment.name);\n\t \t});\n\t }\n\n\t function delete_attachment(docname) {\n\t frappe.call({\n\t method: 'frappe.client.delete',\n\t args: {\n\t doctype: \"File\",\n\t name: docname\n\t },\n\t callback: function(response) {\n\t //frappe.utils.play_sound(\"submit\");\n\t frappe.model.clear_doc(\"File\", docname);\n\t cur_frm.reload_doc();\n\t }\n\t });\n\t }\n }\n});"
},
{
"alpha_fraction": 0.6397454738616943,
"alphanum_fraction": 0.6475771069526672,
"avg_line_length": 25.545454025268555,
"blob_id": "a9f99eeace3ebca53c572482ee233252cac2e6da",
"content_id": "b4917dc05beab6994673ac6879fd1b330868efb7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2043,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 77,
"path": "/nomiapp/nomiapp/doctype/subir_empleados/subir_empleados.py",
"repo_name": "YefriTavarez/NomiApp",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Soldeva, SRL and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\nimport csv, os\n\nclass SubirEmpleados(Document):\n\tdef upload_files(self):\n\t\tempbo_doc = frappe.get_doc({\n\t\t \"doctype\": \"Empleados en Obra\",\n\t\t \"obra\": self.project,\n\t\t \"fecha\": self.date\t\t \t\n\t\t})\n\n\t\tif self.update_record:\n\t\t\tif not self.record:\n\t\t\t\tfrappe.throw(\"Debe de seleccionar un Registro valido!\")\n\n\t\t\tempbo_doc = frappe.get_doc(\"Empleados en Obra\", self.record)\n\n\t\t\tif self.load_drivers:\n\t\t\t\tself.clear_table(empbo_doc, \"Drivers\")\n\n\t\t\tif self.load_operators:\n\t\t\t\tself.clear_table(empbo_doc, \"Operators\")\n\n\n\t\tif self.load_drivers:\n\t\t\tself.read_file_and_add(self.load_drivers,\"choferes\",empbo_doc)\n\n\t\tif self.load_operators:\n\t\t\tself.read_file_and_add(self.load_operators,\"operadores\",empbo_doc)\n\n\t\tif self.load_drivers or self.load_operators:\n\n\t\t\tif self.update_record:\n\t\t\t\tempbo_doc.save()\n\t\t\t\t\n\t\t\tif not self.update_record:\n\t\t\t\tempbo_doc.insert()\n\t\t\treturn \"success\"\n\t\t\n\tdef read_file_and_add(self, path, key, doc):\n\t\tif not path.endswith(\".csv\"):\n\t\t\tfrappe.throw(\"Extension no soportada. El sistema espera solo archivos CSV!\")\n\n\t\tif \"/private/\" in path:\n\t\t\tfull_path = \"{0}{1}\".format(frappe.conf.full_path2site, path)\n\t\telse:\n\t\t\tfull_path = \"{0}{1}{2}\".format(frappe.conf.full_path2site, \"/public\", path)\n\n\n\t\twith open(full_path,'rb') as csvfile:\n\t\t counter = 0\n\t\t spamreader = csv.reader(csvfile, delimiter=str(','), quotechar=str('|'))\n\n\t\t for row in spamreader:\n\t\t if(counter == 1):\n\t\t headers = row\n\n\t\t if(counter > 1):\n\t\t dictionary = dict(zip(headers,row))\n\t\t doc.append(key,dictionary)\n\n\t\t counter = counter + 1\n\n\tdef clear_table(self, record, table_field):\n\t\tif table_field == \"Drivers\":\n\t\t\tfor rec in record.choferes:\n\t\t\t\trec.delete()\n\n\t\tif table_field == \"Operators\":\n\t\t\tfor rec in record.operadores:\n\t\t\t\trec.delete()"
},
{
"alpha_fraction": 0.663551390171051,
"alphanum_fraction": 0.672897219657898,
"avg_line_length": 23.457143783569336,
"blob_id": "46cfc50ae681755304c95ae8bad70692457a8403",
"content_id": "3672c7e4f87aa1292f1c965de2a66fca67b5a6c3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 856,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 35,
"path": "/nomiapp/nomiapp/doctype/bajar_empleados/bajar_empleados.js",
"repo_name": "YefriTavarez/NomiApp",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016, Soldeva, SRL and contributors\n// For license information, please see license.txt\n\nfrappe.ui.form.on('Bajar Empleados', {\n\trefresh: function(frm) {\n\t\tcur_frm.toolbar.print_icon.addClass(\"hide\");\n\t\tcur_frm.disable_save();\n\n\t\tsetTimeout(function(){\n\t\t\t$(\"button[data-fieldname=download_drivers]\")\n\t\t\t\t.attr(\"class\", \"btn btn-primary btn-sm\");\n\n\t\t\t$(\"button[data-fieldname=download_operators]\")\n\t\t\t\t.attr(\"class\", \"btn btn-primary btn-sm\");\n\n\t\t},400);\n\n\t}, download_drivers: function(){\n\t\tdescargar(\"choferes\");\n\t}, download_operators: function(){\n\t\tdescargar(\"operadores\");\n\t}\n});\n\nfunction descargar(tabla){\n\tif(!cur_frm.doc.project){\n\t\tfrappe.msgprint(\"Debe de seleccionar una obra!\");\n\t\treturn 1;\n\t}\n\n\tvar dowload_url = \n\t\t\"/api/method/nomiapp.api.descargar_\" + tabla +\n\t\t\"?obra=\" + cur_frm.doc.project;\n\twindow.open(dowload_url);\n}\n"
},
{
"alpha_fraction": 0.7228915691375732,
"alphanum_fraction": 0.7228915691375732,
"avg_line_length": 11,
"blob_id": "bd4efae107aee511764213ee5563753784534ed2",
"content_id": "a777737d73a82e93fd0b4cfcf6cdce60d464c201",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 83,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 7,
"path": "/README.md",
"repo_name": "YefriTavarez/NomiApp",
"src_encoding": "UTF-8",
"text": "## NomiApp\n\nUna aplicacion para la ayuda de la Recursos Humanos.\n\n#### License\n\nMIT"
},
{
"alpha_fraction": 0.6561504602432251,
"alphanum_fraction": 0.6626725196838379,
"avg_line_length": 29.669767379760742,
"blob_id": "6de0f78b78be1e4c8d0039482d66c8925f0d8db4",
"content_id": "e5a33ca4330d82e72cf32dcdef2c0a764afe3067",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 6593,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 215,
"path": "/nomiapp/nomiapp/doctype/empleados_en_obra/empleados_en_obra.js",
"repo_name": "YefriTavarez/NomiApp",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016, Soldeva, SRL and contributors\n// For license information, please see license.txt\n\nfrappe.ui.form.on('Empleados en Obra', {\n\tadd_navigation_buttons: function(frm){\n\t\tvar callback = function(response){\n\t\t\tif(frm.doc.__islocal || !response.message) return ;\n\n\t\t\tvar list = response.message;\n\t\t\tvar index = 0, prev_index = 0, next_index = 0;\n\t\t\tvar cur_route, prev_route, next_route;\n\n\t\t\tfor( ; index < list.length; index ++){\n\t\t\t\tprev_index = index - 1 < 0 ? 0 : index - 1;\n\t\t\t\tnext_index = index + 1 >= list.length ? list.length - 1 : index + 1;\n\n\t\t\t\tif(frm.doc.name == list[index].name){\n\t\t\t\t\t//console.log(\"found\");\n\t\t\t\t\tprev_route = list[prev_index].name;\n\t\t\t\t\tnext_route = list[next_index].name;\n\t\t\t\t\tcur_route = list[index].name;\n\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar route_next = function(res){ set_emp_route(next_route); };\n\t\t\tvar route_prev = function(res){ set_emp_route(prev_route); };\n\t\t\tvar set_emp_route = function(docname){ frappe.set_route(\"Form/Empleados en Obra\",docname); };\n\n\t\t\tif(prev_route != cur_route) frm.add_custom_button(\"<< Prev\", route_prev);\n\n\t\t\tif(next_route != cur_route)\tfrm.add_custom_button(\"Next >>\", route_next);\n\t\t};\n\t\t\n\t\t$c(\"runserverobj\", args={\"method\": \"get_list\", \"docs\": cur_frm.doc}, callback=callback);\n\t},\n\trefresh: function(frm) {\n\t\tvar me = this;\n\t\tif(!frm.doc.__islocal)\n\t\t\tfrm.add_custom_button(\"Choferes\", descargar_choferes,\"Descargar CSV\") &\n\t\t\tfrm.add_custom_button(\"Operadores\", descargar_operadores,\"Descargar CSV\");\n\n\t\tcur_frm.trigger(\"add_navigation_buttons\");\n\n\t\tfunction descargar_choferes(){\n\t\t\tdescargar(\"choferes\");\n\t\t}\n\n\t\tfunction descargar_operadores(){\n\t\t\tdescargar(\"operadores\");\n\t\t}\n\n\t\tfunction descargar(tabla){\n\t\t\tif(!frm.doc.obra){\n\t\t\t\tfrappe.msgprint(\"Debe de seleccionar una obra!\");\n\t\t\t\treturn 1;\n\t\t\t}\n\n\t\t\tvar dowload_url = \n\t\t\t\t\"/api/method/nomiapp.api.descargar_\" + tabla +\n\t\t\t\t\"?with_data=True&emp_ob=\" + frm.doc.name + \"&obra=\" + frm.doc.obra;\n\t\t\twindow.open(dowload_url);\n\t\t\t\n\t\t}\n\t},\n\tobra: function(frm){\n\t\tif(!frm.doc.obra) return ;\n\n\t\tfrm.clear_table(\"choferes\");\n\n\t\tfrappe.call({\n\t\t\tmethod: \"nomiapp.api.getChoferes\",\n\t\t\targs: { obra: me.frm.doc.obra },\n\t\t\tcallback: function(data) {\n\t\t\t\tagregarChoferes(data.message);\n\t\t\t}\n\t\t});\n\n\t\tfunction agregarChoferes(choferes){\n\t\t\tif(!choferes){ \n\t\t\t\tfrm.clear_table(\"choferes\");\n\t\t\t\trefresh_field(\"choferes\");\n\t\t\t\treturn ; \n\t\t\t}\n\n\t\t\tchoferes.forEach(function(chofer){\n\t\t\t\tagregarChofer(chofer);\n\t\t\t});\n\t\t}\n\n\t\tfunction agregarChofer(chofer){\n\t\t\tfrm.add_child(\"choferes\",{\n\t\t employee : chofer.employee,\n\t\t employee_name : chofer.employee_name,\n\t\t odometer_start: 0.0,\n\t\t \todometer_end: 0.0,\n\t\t \tkilometers: 0.0\n\t\t });\n\n\t\t\trefresh_field(\"choferes\");\n\t\t}\n\n\t\tfrm.clear_table(\"operadores\");\n\t \n\t\tfrappe.call({\n\t\t\tmethod: \"nomiapp.api.getOperadores\",\n\t\t\targs: { obra: me.frm.doc.obra },\n\t\t\tcallback: function(data) {\n\t\t\t\tagregarOperadores(data.message);\n\t\t\t}\n\t\t});\n\n\t\tfunction agregarOperadores(operadores){\n\t\t\tif(!operadores){ \n\t\t\t\tfrm.clear_table(\"operadores\");\n\t\t\t\trefresh_field(\"operadores\");\n\t\t\t\treturn ; \n\t\t\t}\n\n\t\t\toperadores.forEach(function(operador){\n\t\t\t\tagregarOperador(operador);\n\t\t\t});\n\t\t}\n\n\t\tfunction agregarOperador(operador){\n\t\t\tcur_frm.add_child(\"operadores\",{\n\t\t employee : operador.employee,\n\t\t employee_name : operador.employee_name,\n\t\t horometer_start: 0.0,\n\t\t \thorometer_end: 0.0,\n\t\t \thours: 0.0\n\t\t });\n\n\t\t\trefresh_field(\"operadores\");\n\t\t}\n\t}\n});\n\nfrappe.ui.form.on(\"Tabla de Choferes\",{\n \todometer_start:\tfunction(frm, child_doctype, child_name) { \n\t\tvar odometer_start = frappe.model.get_value(child_doctype, child_name,'odometer_start'); \n\t\tvar odometer_end = frappe.model.get_value(child_doctype, child_name,'odometer_end'); \n\n\t\tvar kilometers = flt(odometer_end) - flt(odometer_start);\n\t\tfrappe.model.set_value(child_doctype, child_name,'kilometers', kilometers);\n\t},\n\n\todometer_end: function(frm, child_doctype, child_name) { \n\t\tvar odometer_start = frappe.model.get_value(child_doctype, child_name,'odometer_start'); \n\t\tvar odometer_end = frappe.model.get_value(child_doctype, child_name,'odometer_end'); \n\n\t\tvar kilometers = flt(odometer_end) - flt(odometer_start);\n\t\tfrappe.model.set_value(child_doctype, child_name,'kilometers', kilometers);\n\t},\n\n\tis_amount: function(frm, child_doctype, child_name) { \n\t\tvar is_amount = frappe.model.get_value(child_doctype, child_name,'is_amount');\n\t\tif(is_amount){\n\t\t\tfrappe.model.set_value(child_doctype, child_name,'odometer_start', 0); \n\t\t\tfrappe.model.set_value(child_doctype, child_name,'odometer_end', 0); \n\t\t\tfrappe.model.set_value(child_doctype, child_name,'kilometers', 0); \n\t\t} else {\n\t\t\tfrappe.model.set_value(child_doctype, child_name,'amount', 0); \n\t\t}\n\t}\n});\n\nfrappe.ui.form.on(\"Tabla de Operadores\", {\n\thorometer_start : function(frm, child_doctype, child_name) { \n\t\tvar horometer_start = frappe.model.get_value(child_doctype, child_name,'horometer_start'); \n\t\tvar horometer_end = frappe.model.get_value(child_doctype, child_name,'horometer_end'); \n\n\t\tvar hours = flt(horometer_end) - flt(horometer_start);\n\n\t\tif(hours < 6){\n\t\t\tvar inactive_hours = 6 - hours;\n\t\t\tfrappe.model.set_value(child_doctype, child_name,'inactive_hours', inactive_hours);\n\t\t\t//set the minimun amount of hours allowed in case it is less than 6\n\t\t\thours = 6;\n\t\t}\n\n\t\tfrappe.model.set_value(child_doctype, child_name,'hours', hours);\n\t},\n\t\n\thorometer_end: \tfunction(frm, child_doctype, child_name) { \n\t\tvar horometer_start = frappe.model.get_value(child_doctype, child_name,'horometer_start'); \n\t\tvar horometer_end = frappe.model.get_value(child_doctype, child_name,'horometer_end');\n\n\t\tif(horometer_end < horometer_start){\n\t\t\tfrappe.msgprint(\"Horometro Final no puede ser menor que el Horometro Inicial.\t\");\n\t\t}\n\n\t\tvar hours = flt(horometer_end) - flt(horometer_start);\n\n\t\tif(hours < 6){\n\t\t\tvar inactive_hours = 6 - hours;\n\t\t\tfrappe.model.set_value(child_doctype, child_name,'inactive_hours', inactive_hours);\n\t\t\t//set the minimun amount of hours allowed in case it is less than 6\n\t\t\thours = 6;\n\t\t}\n\n\t\tfrappe.model.set_value(child_doctype, child_name,'hours', hours);\n\t},\n\tis_amount: function(frm, child_doctype, child_name) { \n\t\tvar is_amount = frappe.model.get_value(child_doctype, child_name,'is_amount');\n\t\tif(is_amount){\n\t\t\tfrappe.model.set_value(child_doctype, child_name,'horometer_start', 0); \n\t\t\tfrappe.model.set_value(child_doctype, child_name,'horometer_end', 0); \n\t\t\tfrappe.model.set_value(child_doctype, child_name,'hours', 0); \n\t\t} else {\n\t\t\tfrappe.model.set_value(child_doctype, child_name,'amount', 0); \n\t\t}\n\t}\n});"
}
] | 10 |
shauryagupta06/FR_full | https://github.com/shauryagupta06/FR_full | 703ef5646df8432e7c17fd649337463dec5b4c98 | 56ae097a87ba441713f85fa74f3f4dc2db0e0751 | 04d217a34f0b7b15cd0ba58c9be0f9bc1c0b48de | refs/heads/master | 2020-06-02T14:12:47.984499 | 2019-06-10T17:36:16 | 2019-06-10T17:36:16 | 191,183,395 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.39952343702316284,
"alphanum_fraction": 0.4066719710826874,
"avg_line_length": 31.756755828857422,
"blob_id": "622b979dc3957f519c20c6cb197531fe1990a653",
"content_id": "fbe2abf08a162c5187ad7e6a6000bec83cc753d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1259,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 37,
"path": "/detectimage.py",
"repo_name": "shauryagupta06/FR_full",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\nimport datetime\r\nimport csv\r\n\r\nfrom numba import cuda,vectorize\r\n\r\nimport cv2 # openCV\r\nimport numpy as np # for numpy arrays\r\nimport sqlite3\r\nimport os # for creating folders\r\n\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\n \r\n# calling the sqlite3 database\r\n\r\nId = input(\"lecture no. :\")\r\nprint(Id)\r\nfolderName = \"lecture\" + str(Id) # creating the person or user folder\r\nfolderPath = \"testing/\"+folderName\r\nif not os.path.exists(folderPath):\r\n os.makedirs(folderPath)\r\n\r\n\r\nsampleNum = 0\r\nsampleNum1 = 0\r\nwhile(True):\r\n ret, img = cap.read() # reading the camera input\r\n\r\n \r\n\r\ncap.release() # turning the webcam off\r\ncv2.destroyAllWindows() # Closing all the opened windows\r\n\r\n\r\n#X = datetime.datetime.now().strftime (\"%Y\" + \"/\" + \"%m\" + \"/\" + \"%d\")\r\n \r\n"
},
{
"alpha_fraction": 0.4285714328289032,
"alphanum_fraction": 0.4285714328289032,
"avg_line_length": 11,
"blob_id": "bdf906b120fb5bd7321196a010b3341673097c1b",
"content_id": "7e33f024649df928ea5978ba4185314bec7e3f78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 14,
"license_type": "no_license",
"max_line_length": 11,
"num_lines": 1,
"path": "/README.md",
"repo_name": "shauryagupta06/FR_full",
"src_encoding": "UTF-8",
"text": "\"# FR_full\" \r\n"
},
{
"alpha_fraction": 0.40628984570503235,
"alphanum_fraction": 0.4266893267631531,
"avg_line_length": 43.25,
"blob_id": "6b36c57b9b9b1e2146fef3cf76ff97e20235525c",
"content_id": "8a9351ee5a280dfe621cc5fe6c6d566482e7dd0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2353,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 52,
"path": "/addstudentwithoutface.py",
"repo_name": "shauryagupta06/FR_full",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jan 20 02:54:18 2019\r\n\r\n@author: piyush\r\n\"\"\"\r\n\r\nimport cv2 # openCV\r\nimport numpy as np # for numpy arrays\r\nimport sqlite3\r\n#import dlib\r\nimport os \r\nimport csv # for creating folders\r\n\r\ncap = cv2.VideoCapture(0)\r\n#detector = dlib.get_frontal_face_detector()\r\n\r\n # closing the connection\r\n\r\nname = input(\"Enter student's name : \")\r\nroll = input(\"Enter student's Roll Number : \")\r\nId = input(\"id : \")\r\n\r\nwith open('mycsv.csv','a' , newline = '' ) as f:\r\n thewriter = csv.writer(f)\r\n thewriter.writerow([Id,roll,name])\r\n\r\n#insertOrUpdate(Id, name, roll) # calling the sqlite3 database\r\n\r\n\r\nfolderName = \"user\" + str(Id) # creating the person or user folder\r\nfolderPath = \"train2_img/\"+folderName\r\nif not os.path.exists(folderPath):\r\n os.makedirs(folderPath)\r\n\r\nsampleNum = 0\r\nwhile(True):\r\n ret, img = cap.read() # reading the camera input\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Converting to GrayScale\r\n# dets = detector(img, 1)\r\n sampleNum += 1\r\n cv2.imwrite(folderPath + \"/User.\" + Id + \".\" + str(sampleNum) + \".jpg\",\r\n img) # Saving the faces\r\n # cv2.rectangle(img, (d.left(), d.top()) ,(d.right(), d.bottom()),(0,255,0) ,2) # Forming the rectangle\r\n cv2.waitKey(200) # waiting time of 200 milisecond\r\n cv2.imshow('frame', img) # showing the video input from camera on window\r\n cv2.waitKey(1)\r\n if(sampleNum >= 50): # will take 20 faces\r\n break\r\n\r\ncap.release() # turning the webcam off\r\ncv2.destroyAllWindows() # Closing all the opened windows\r\n"
}
] | 3 |
nipeshroy9194/cs5600 | https://github.com/nipeshroy9194/cs5600 | 2b1cd1404a3f11bbdfacf450b1120a2713d3e06f | fdd206e1bca5bfe0e68b9c5d43888fd8ebb24d8c | e65e487f8942d6f727db80919c0781fded628ab3 | refs/heads/master | 2021-03-27T20:45:12.110311 | 2017-12-21T01:44:19 | 2017-12-21T01:44:19 | 104,553,824 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6335697174072266,
"alphanum_fraction": 0.6453900933265686,
"avg_line_length": 20.100000381469727,
"blob_id": "0b4531b22de5ff846a9a1053e2cddb8f34ddf653",
"content_id": "5502ffb3e022523acdfee566a86b32f4297a150a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 423,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 20,
"path": "/hw2/source/Makefile",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "CC = gcc\nCFLAGS = -g -Wall -Werror -fno-stack-protector\nCXXFLAGS = -o\nSRCWORKER = worker.c\nSRCMASTER = master.c\nTARGET = ../worker\nTARGET1 = ../master\nSTATICSME = ../sme/libsme.a\n\nall: $(TARGET) $(TARGET1)\n\n$(TARGET): $(SRCWORKER)\n\t$(CC) $(CFLAGS) $(SRCWORKER) $(CXXFLAGS) $(TARGET)\n\n$(TARGET1): $(SRCMASTER)\n\t$(CC) $(CFLAGS) $(SRCMASTER) $(CXXFLAGS) $(TARGET1) $(STATICSME)\n\n.PHONY: clean\nclean:\n\trm $(TARGET) $(TARGET1)\n\n"
},
{
"alpha_fraction": 0.5294579863548279,
"alphanum_fraction": 0.5435978174209595,
"avg_line_length": 16.438356399536133,
"blob_id": "8e2f2d4b7c86013bd08c343a34cfdab30e9ed8c1",
"content_id": "67e7aed0e13640b4397819445be1d893bb668f06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1273,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 73,
"path": "/hw2/source/worker.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#include <stdbool.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n\nbool validate_and_get_args(int argc, char **argv, int *x, int *n) {\n int ch;\n\n if (argc == 1) {\n printf(\"Supply arguments with -x and -n\\n\");\n exit(1);\n }\n\n while ((ch = getopt(argc, argv, \"x:n:\")) != -1) {\n switch (ch) {\n case 'x':\n *x = atoi(optarg);\n break;\n case 'n':\n *n = atoi(optarg);\n break;\n default:\n if (n < 0)\n printf(\"Invalid arguments\\n\");\n return false;\n }\n }\n\n return true;\n}\n\nint factorial(int n) {\n if (n == 0)\n return 1;\n if (n == 1)\n return 1;\n return n * factorial(n - 1);\n}\n\nlong int power(int x, int n) {\n long int answer = x;\n\n if (n == 1)\n goto out;\n\n while (n > 1) {\n answer = answer * x;\n n--;\n }\n\nout:\n return answer;\n}\n\nint main(int argc, char **argv) {\n int x = -1, n = -1;\n long int numerator = 0, denominator = 0;\n double answer = 0;\n bool ret = false;\n\n ret = validate_and_get_args(argc, argv, &x, &n);\n if (ret == false) {\n printf(\"Usage ./worker.o -x number -n number\\n\");\n return 1;\n }\n\n numerator = power(x, n);\n denominator = factorial(n);\n\n answer = (double)numerator / (double)denominator;\n write(1, &answer, sizeof(answer));\n}\n"
},
{
"alpha_fraction": 0.610497236251831,
"alphanum_fraction": 0.6270717978477478,
"avg_line_length": 21.375,
"blob_id": "429d32cec6e1253e209c14c26a4d07e34476b6ec",
"content_id": "15cb61eae4fcafc8a6d1237a1efeb2a8cb5141b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 362,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 16,
"path": "/hw6/automate.sh",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# A simple client doing \n\nrouter_ip=$1\nrouter_port=$2\ntext_file=$3\n\nwhile IFS= read -r line; do\n\tip=$(echo $line | cut -d \" \" -f 1)\n\tport=$(echo $line | cut -d \" \" -f 2)\n\t# Run server with the ip and port specified\n\tpython node.py $ip $port &\t\ndone < \"$3\"\n \n# ./router $router_port $text_file\ngo run router.go $router_ip $router_port $text_file\n\n\n\n\n"
},
{
"alpha_fraction": 0.5423038005828857,
"alphanum_fraction": 0.5667685866355896,
"avg_line_length": 20.326086044311523,
"blob_id": "aa2d940656a89c62134114bf3e2d657e87f13ba0",
"content_id": "c3a74c3eb45e9234f3f69eea0cabf1a92601d896",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 981,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 46,
"path": "/hw2/test/echo.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#define _GNU_SOURCE /* See feature_test_macros(7) */\n#include \"../sme/sme_epoll.h\"\n#include \"../sme/sme_select.h\"\n#include <fcntl.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n\nvoid fd_handle(sme_mech_t *m, sme_fd_t *fde, void *data) {\n char buf[100];\n read(fde->fd, buf, 5);\n printf(\"Received data : %s\\n\", buf);\n}\n\nvoid pid_handle(sme_mech_t *mech, sme_proc_t *proce, void *data) {\n printf(\"child exited..\\n\");\n}\n\nint main() {\n int pid;\n int p_fd[2];\n int ret;\n sme_mech_t *m = NULL;\n m = epoll_mech_init();\n /* create pipe */\n pipe2(p_fd, O_NONBLOCK);\n\n pid = fork();\n if (pid < 0) {\n printf(\"fork failed\");\n } else if (pid == 0) {\n close(p_fd[0]);\n dup2(p_fd[1], 1);\n ret = write(p_fd[1], \"hello\", 5);\n if (ret < 0)\n write(p_fd[1], \"write failed\", 10);\n _exit(EXIT_SUCCESS);\n } else {\n close(p_fd[1]);\n epoll_mech_add_fd(m, p_fd[0], 0, fd_handle, 0);\n }\n\n epoll_mech_loop_wait(m);\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6680850982666016,
"alphanum_fraction": 0.672340452671051,
"avg_line_length": 17.076923370361328,
"blob_id": "4fee82a614ea8219e19ec8333b9c81a811650117",
"content_id": "ee99f5f30b55473029a3cc612dc1d728cc13a194",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 235,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 13,
"path": "/test/Makefile",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "CC=gcc\nCFLAGS=\n\nall: malloc_bfs malloc_dfs\n\nmalloc_dfs:\n\t$(CC) $(CFLAGS) -lm my_malloc_dfs.c -o malloc_dfs.o\n\nmalloc_bfs:\n\t$(CC) $(CFLAGS) -lm my_malloc_bfs.c -o malloc_bfs.o\n\nclean:\n\trm -f malloc_bfs.o malloc_dfs.o driver 2>/dev/null\n"
},
{
"alpha_fraction": 0.5747126340866089,
"alphanum_fraction": 0.6321839094161987,
"avg_line_length": 8.666666984558105,
"blob_id": "a9fa1bea03e483be970bccb6db3fba653260aab9",
"content_id": "bf781eaddc42428ad740ebee4b499a8fee0d0ee3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 87,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 9,
"path": "/hw5/Makefile",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "CC=gcc\n\nall:hw5\n\ncalculate_ram :\n\t$(CC) -O3 -g hw5.c -o hw5\n\nclean:\n\trm hw5 output.txt\n"
},
{
"alpha_fraction": 0.6233951449394226,
"alphanum_fraction": 0.6233951449394226,
"avg_line_length": 15.302325248718262,
"blob_id": "361abd704d07510b23b1d089abec12ab381fe0ca",
"content_id": "e8649f12252b1a7769876cdd4b1f4c7a5cfa790d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 701,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 43,
"path": "/hw2/sme/sme_def.h",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#ifndef __SME_DEF_H__\n#define __SME_DEF_H__\n\n#include <stdbool.h>\n/*\n * fd event type\n */\nenum fd_event { SME_READ, SME_WRITE, SME_EXCEPTION };\n\nstruct sme_fd {\n struct sme_fd *next, *prev;\n struct sme_mech *mech;\n int fd;\n enum fd_event ev;\n sme_fd_cb cb;\n void *cb_data;\n};\n\nstruct sme_proc {\n struct sme_proc *next, *prev;\n struct sme_mech *mech;\n int pid;\n int flags;\n sme_proc_cb cb;\n void *cb_data;\n};\n\nstruct sme_mech {\n /* list of fd events */\n struct sme_fd *fd_events;\n\n /* list of proc events */\n struct sme_proc *proc_events;\n\n /* private data */\n void *priv_data;\n};\n\nbool sme_sequential_init(void);\nbool sme_select_init();\nbool sme_poll_init();\n\n#endif //__SME_DEF_H__\n"
},
{
"alpha_fraction": 0.602523684501648,
"alphanum_fraction": 0.602523684501648,
"avg_line_length": 21.64285659790039,
"blob_id": "277f9fefdff2f41ab31157ef8a6ac06e554d464d",
"content_id": "1afd6f49f37d0ad12aa8ae2fb13c955522ff0b6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 317,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 14,
"path": "/hw2/sme/sme_epoll.h",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#ifndef __SME_EPOLL_H__\n#define __SME_EPOLL_H__\n#include \"list.h\"\n#include \"sme.h\"\n#include \"sme_def.h\"\n\nsme_mech_t *epoll_mech_init();\n\nsme_fd_t *epoll_mech_add_fd(sme_mech_t *mech, int fd, fd_event_t ev,\n sme_fd_cb cb, void *cb_data);\n\nint epoll_mech_loop_wait(sme_mech_t *mech);\n\n#endif\n"
},
{
"alpha_fraction": 0.6120996475219727,
"alphanum_fraction": 0.6298932433128357,
"avg_line_length": 19,
"blob_id": "7a151f9324d2a912245e0d0f677bffd2747febc6",
"content_id": "eba2b688066f0f9405aa212587e0ddba40cec584",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 281,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 14,
"path": "/hw2/test/Makefile",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "CC = gcc\nCFLAGS = -g -Wall -Werror -fno-stack-protector\nCXXFLAGS = -o\nSMEECHO = ./echo.c\nTARGET = ../sme/libsme.a\nTARGET2 = ./echo\nall: $(TARGET2)\n\n$(TARGET2): $(SMEECHO) $(TARGET)\n\t$(CC) $(CFLAGS) $(SMEECHO) $(CXXFLAGS) $(TARGET2) $(TARGET)\n\n.PHONY: clean\nclean:\n\trm $(TARGET2)\n\n"
},
{
"alpha_fraction": 0.7308270931243896,
"alphanum_fraction": 0.7338345646858215,
"avg_line_length": 46.5,
"blob_id": "2ebc6748bdd5291daebd9a18a1a165f7f5c64ea3",
"content_id": "e23e50d7964991b1123116ae570423225f0a0bc9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 665,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 14,
"path": "/hw6/client.sh",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# A simple client doing curl on distributed key-value store\n\nrouter_port=$2\nrouter_ip=$1\njson_put=put.json\njson_post=post.json\n\ncurl -iH 'Content-Type: application/json' -X PUT -d @$json_put $router_ip:$router_port/set\ncurl -iH 'Content-Type: application/json' -X POST -d @$json_post $router_ip:$router_port/query\ncurl -iH 'Content-Type: application/json' -X POST -d @$json_post $router_ip:$router_port/fetch\ncurl -iH \"Accept: application/json\" -X GET $router_ip:$router_port/fetch\ncurl -iH \"Accept: application/json\" -X GET $router_ip:$router_port/query\ncurl -iH 'Content-Type: application/json' -X DELETE -d @$json_post $router_ip:$router_port/delete\n"
},
{
"alpha_fraction": 0.5203858613967896,
"alphanum_fraction": 0.5282126069068909,
"avg_line_length": 33.772151947021484,
"blob_id": "f30ad7f3c5aaee7ea4cd73929106f8f2df86719b",
"content_id": "7c0cd4ab46bde4d4b496c82480a94cdfe70031ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5494,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 158,
"path": "/hw6/node.py",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\"\"\"\nVery simple HTTP server in python.\nUsage::\n ./dummy-web-server.py [<ip_address>] [<port>]\nSend a GET request::\n curl http://localhost\nSend a HEAD request::\n curl -I http://localhost\nSend a POST request::\n curl -d \"foo=bar&bin=baz\" http://localhost\n\"\"\"\nfrom BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nimport SocketServer\nfrom urlparse import parse_qs\nimport json\nimport string\n\nkey_value = {}\nKEY_NOT_FOUND = \"KEY NOT FOUND\"\n\n'''\nsudo lsof -i -P -n | grep LISTEN\n'''\nclass S(BaseHTTPRequestHandler):\n\n def _set_headers(self, header_code):\n self.send_response(header_code)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n\n '''\n curl localhost http://localhost:5000/?key=tret,abc\n '''\n def do_GET(self):\n print \"----- GET IN SERVER !! ------\"\n response_data = []\n if not (self.path == '/fetch' or self.path == '/query'):\n self._set_headers(403)\n self.wfile.write(response = {\"keyvalue\": response_data})\n return\n self._set_headers(200)\n for key in key_value.keys():\n if self.path == '/fetch':\n row = {\"key\": key, \"value\": key_value[key]}\n elif self.path == '/query':\n row = {\"key\": key, \"value\": \"True\"}\n response_data.append(row)\n response = {\"keyvalue\": response_data}\n resp_json = json.dumps(response)\n self.wfile.write(resp_json) \n\n '''\n curl --request PUT 'localhost:5000' --data '[{\"mnop\":\"345\"}, {\"tret\":\"555\"}]'\n '''\n def do_PUT(self):\n print \"----- PUT IN SERVER !! ------\"\n response_data = []\n if not self.path == '/set':\n self._set_headers(403)\n self.wfile.write(response = {\"keyvalue\": response_data})\n return\n self._set_headers(200)\n print self.headers\n length = int(self.headers['Content-Length'])\n content = self.rfile.read(length)\n content_lst = json.loads(content)\n print content_lst\n for k_v in content_lst['keyvalue']:\n key = k_v['key']\n val = k_v['value']\n print key, '------',val\n if key in key_value:\n row = {\"key\": key, \"value\": \"UPDATED\"}\n else:\n row = {\"key\": key, \"value\": \"INSERTED\"}\n key_value[key] = val \n response_data.append(row)\n response = {\"keyvalue\": response_data}\n resp_json = json.dumps(response)\n print \"RESPONSE :::: \", resp_json\n self.wfile.write(resp_json) \n\n\n def do_HEAD(self):\n self._set_headers()\n \n def do_POST(self):\n print \"----- POST IN SERVER !! ------\"\n response_data = []\n if not (self.path == '/fetch' or self.path == '/query'):\n self._set_headers(403)\n self.wfile.write(response = {\"keyvalue\": response_data})\n return\n self._set_headers(200)\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n content = self.rfile.read(content_length) # <--- Gets the data itself\n content_lst = json.loads(content)\n print content_lst\n for k_v in content_lst['keyvalue']:\n key = k_v['key']\n if key in key_value:\n if self.path == '/fetch':\n row = {\"key\": key, \"value\": key_value[key]}\n elif self.path == '/query':\n row = {\"key\": key, \"value\": \"True\"}\n else:\n if self.path == '/fetch':\n row = {\"key\": key, \"value\": KEY_NOT_FOUND}\n elif self.path == '/query':\n row = {\"key\": key, \"value\": \"False\"}\n response_data.append(row)\n response = {\"keyvalue\": response_data}\n resp_json = json.dumps(response)\n print \"Response :: \", resp_json\n self.wfile.write(resp_json) \n\n def do_DELETE(self):\n print \"------ DELETE IN SERVER -----\"\n response_data = []\n print self.path\n if not (self.path == '/delete'):\n self._set_headers(403)\n self.wfile.write(response = {\"keyvalue\": response_data})\n return\n self._set_headers(200)\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n content = self.rfile.read(content_length) # <--- Gets the data itself\n content_lst = json.loads(content)\n print content_lst\n for k_v in content_lst['keyvalue']:\n key = k_v['key']\n if key in key_value:\n row = {\"key\" : key, \"value\" : \"DELETED\"}\n else:\n row = {\"key\" : key, \"value\" : \"KEY NOT FOUND\"}\n key_value.pop(key, None)\n response_data.append(row)\n response = {\"keyvalue\": response_data}\n resp_json = json.dumps(response)\n print \"Response :: \", resp_json\n self.wfile.write(resp_json) \n \n\ndef run(server_class=HTTPServer, handler_class=S, host='', port=80):\n server_address = (host, port)\n httpd = server_class(server_address, handler_class)\n # print 'Starting Server at ' + string(host) + 'on PORT ' + string(port) + '...'\n print 'Server :: ', host , port\n httpd.serve_forever()\n\nif __name__ == \"__main__\":\n from sys import argv\n\n if len(argv) == 3:\n run(host=argv[1].encode('string-escape'), port=int(argv[2]))\n else:\n run()\n"
},
{
"alpha_fraction": 0.582608699798584,
"alphanum_fraction": 0.582608699798584,
"avg_line_length": 26.058822631835938,
"blob_id": "1feb297d90451a9faa7671994b5ef1f40f5d84a8",
"content_id": "9d9de5e798e15b62f38ceb9a4f0c5d333317d23a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 460,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 17,
"path": "/hw2/sme/sme_select.h",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#ifndef __SME_SELECT_H__\n#define __SME_SELECT_H__\n#include \"list.h\"\n#include \"sme.h\"\n#include \"sme_def.h\"\n\nsme_mech_t *select_mech_init();\n\nsme_fd_t *select_mech_add_fd(sme_mech_t *mech, int fd, fd_event_t ev,\n sme_fd_cb cb, void *cb_data);\n\nsme_proc_t *select_mech_add_proc(sme_mech_t *mech, int pid, int flags,\n sme_proc_cb cb, void *cb_data);\n\nint select_mech_loop_wait(sme_mech_t *mech);\n\n#endif\n"
},
{
"alpha_fraction": 0.58859783411026,
"alphanum_fraction": 0.5932203531265259,
"avg_line_length": 20.633333206176758,
"blob_id": "472fe11d0c534ea8ab14a5ad6fdbebea55b44fd9",
"content_id": "2e27e7c56c07f2bde59433bd13d46c634c81fc46",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 649,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 30,
"path": "/hw3/calloc.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#include \"mymalloc.h\"\n#include \"buddy.h\"\n#include \"utility.h\"\n#include <string.h>\n#include <errno.h>\n\n//void *calloc(size_t nmemb, size_t size)\nvoid *mycalloc(size_t nmemb, size_t size)\n{\n void *mem = NULL;\n\tsize_t total_size = nmemb * size;\n\n if (0 >= size) {\n goto out;\n }\n //debug(\"Size requested %zu\", nmemb * size);\n\n /* TODO round-off size of the next higher power of 2*/\n /* check if size can be satisfied */\n mem = mymalloc(total_size);\n //mem = malloc(total_size);\n\tif (NULL != mem) {\n //debug(\"mem %zu allocated\", nmemb * size);\n\t\tmemset(mem, 0, total_size);\n\t\tgoto out;\n }\n\nout:\n return mem;\n}\n"
},
{
"alpha_fraction": 0.6449191570281982,
"alphanum_fraction": 0.6530023217201233,
"avg_line_length": 26.0625,
"blob_id": "b4c9f53aed31463174776dcc5c4e03f5dfe0e23b",
"content_id": "46430da0cac2a60e7df5410c8fdf26a91713167f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1732,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 64,
"path": "/hw1/include/memory_map.h",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "/*\n * Filename : memory_map.h\n * Description : Header including everything related to memory maps\n *\n * Author : Nipesh Roy <[email protected]>\n * Date : Sept 11 2017\n */\n#include <stdio.h>\n#include <ucontext.h>\n\n#define SUCCESS 0\n#define FAILURE 1\n\ntypedef struct memory_region {\n /* Start address & End address of the memory region */\n void *start_addr;\n void *end_addr;\n /* Permissions for the particular memory region */\n int is_readable;\n int is_writeable;\n int is_executable;\n int is_private;\n /**\n * It represent the filename from where the memory is populated\n * say for 'cat' the memory is populated from the 'usr/bin/cat' file\n */\n void *filename;\n /* The total data size of the memory region */\n int total_data_size;\n} mem_reg;\n\n/**\n * @brief : Populate Memory Region structure\n *\n * @param : mr - Memory Map structure to be populated\n * @param : memory_map - Memory Map Buffer from /proc/pid/maps\n *\n * @return : SUCCESS(0)/FAILURE(1)\n */\nvoid populate_mem_reg(mem_reg *mr, char *memory_map);\n\n/**\n * @brief : Write memory map into the file\n *\n * @param : mr - Memory map structure to be written\n * @param : ckpt_fd - File descriptor of the file which contains checkpoint\n * \t\t\t\t\t data\n *\n * @return : SUCCESS(0)/FAILURE(1)\n */\nint write_mem_ref(mem_reg *mr, int ckpt_fd);\n\n/**\n * @brief : Process the memory map and checkpoint the data\n *\n * @param : mr - Memory map structure\n * @param : memory_map - Memory map read from \"/proc/self/maps\"\n * @param : pipe_fd - Unnamed Pipe File Descriptor\n * @param : ckpt_fd - Checkpoint file, File Descriptor\n *\n * @return : SUCCESS(0)/FAILURE(0)\n */\nint process_memory_map(mem_reg *mr, char *memory_map, int *pipe_fd,\n int ckpt_fd);\n"
},
{
"alpha_fraction": 0.6855862736701965,
"alphanum_fraction": 0.6878267526626587,
"avg_line_length": 21.694915771484375,
"blob_id": "b56ce7cbe29337eff7121494b45e2a24edd0aef6",
"content_id": "2c72179354eb1d45c0be3694f189d16288b67be4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 1339,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 59,
"path": "/hw3/Makefile",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "CC=gcc\nDEBUG=no\nCFLAGS=-fPIC -Wall\nCFLAGS_ON_DEBUG=-DDEBUG -fPIC -Wall -Werror\n\nall: lib driver test\n\nifeq ($(DEBUG),no)\ntest:\n\t$(CC) t-test1.c -lpthread -o test\n\ndriver:\n\t$(CC) $(CFLAGS) -L./ driver.c -lmalloc -o driver\n\nlib: malloc buddy free calloc realloc\n\t$(CC) -shared -o libmalloc.so buddy.o malloc.o calloc.o realloc.o free.o\n\nmalloc: mymalloc.h buddy utility.h\n\t$(CC) $(CFLAGS) -c malloc.c\n\ncalloc: mymalloc.h buddy utility.h\n\t$(CC) $(CFLAGS) -c calloc.c\n\nrealloc: mymalloc.h buddy utility.h\n\t$(CC) $(CFLAGS) -c realloc.c\n\nfree: mymalloc.h buddy utility.h\n\t$(CC) $(CFLAGS) -c free.c\n\nbuddy: buddy.h utility.h\n\t$(CC) $(CFLAGS) -c buddy.c\nelse\ntest:\n\t$(CC) t-test1.c -lpthread -o test\n\ndriver:\n\t$(CC) $(CFLAGS_ON_DEBUG) -L./ driver.c -lmalloc -o driver\n\nlib: malloc buddy free calloc realloc\n\t$(CC) -shared -o libmalloc.so buddy.o malloc.o calloc.o realloc.o free.o\n\nmalloc: mymalloc.h buddy utility.h\n\t$(CC) $(CFLAGS_ON_DEBUG) -c malloc.c\n\ncalloc: mymalloc.h buddy utility.h\n\t$(CC) $(CFLAGS_ON_DEBUG) -c calloc.c\n\nrealloc: mymalloc.h buddy utility.h\n\t$(CC) $(CFLAGS_ON_DEBUG) -c realloc.c\n\nfree: mymalloc.h buddy utility.h\n\t$(CC) $(CFLAGS_ON_DEBUG) -c free.c\n\nbuddy: buddy.c buddy.h utility.h\n\t$(CC) $(CFLAGS_ON_DEBUG) -c buddy.c\nendif\n\nclean:\n\trm -f test libmalloc.so buddy.o malloc.o realloc.o free.o calloc.o driver 2>/dev/null\n"
},
{
"alpha_fraction": 0.5890411138534546,
"alphanum_fraction": 0.5890411138534546,
"avg_line_length": 17.25,
"blob_id": "2d99459be494d1f258bb3e3c10b456f86ac8ccc3",
"content_id": "40243a37b2a4c50ab21cc68609a00c19a36db7ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 219,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 12,
"path": "/hw3/utility.h",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#ifndef __MY_UTILITY_H\n#define __MY_UTILITY_H\n#include <stdio.h>\n\n//Debug macro\n#ifdef DEBUG\n #define debug(fmt, ...) printf(fmt\" f(): %s\\n\", ##__VA_ARGS__, __func__)\n#else\n #define debug(fmt, ...)\n#endif\n\n#endif\n"
},
{
"alpha_fraction": 0.5794901847839355,
"alphanum_fraction": 0.5914247632026672,
"avg_line_length": 23.77007293701172,
"blob_id": "decd6dcdb6c44c190fa3adab4d3fb035375dfe93",
"content_id": "7bd54e4860e5a0a096b4a61c343a59e601d2037b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 6787,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 274,
"path": "/hw1/source/myrestart.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "/*\n * Filename : myrestart.c\n * Description : Program to restore a process using a checkpoint image\n *\n * Author : Nipesh Roy <[email protected]>\n * Date : Sept 11 2017\n */\n#include \"../include/common_func.h\"\n#include \"../include/helper_func.h\"\n#include <errno.h>\n#include <fcntl.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/mman.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <ucontext.h>\n#include <unistd.h>\n\nchar ckpt_filename[1000];\n\n/**\n * @brief : Process memory map for the restart program\n *\n * @param : mr \t\t - Memory map structure\n * @param : memory_map - String containing memory map from /proc/self/maps\n * @param : pipe_fd - Unnamed pipe fd\n *\n * @return : SUCCESS(0)/FAILURE(1)\n */\nint proc_memory_map(mem_reg *mr, char *memory_map, int *pipe_fd) {\n char ch = '\\0';\n int itr = 0, ret = -1;\n\n /**\n * Close the write end of the unnamed pipe on the parent process as there is\n * nothing to write now\n */\n ret = close(pipe_fd[1]);\n if (ret == -1) {\n printf(\"Unable to close write end of pipe on parent : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n /**\n * Read the memory map array from the command output and parse the output\n * into the memory mapped structure\n */\n itr = 0;\n while (read(pipe_fd[0], &ch, 1) != 0) {\n if (ch == '\\n') {\n memory_map[itr] = '\\0';\n populate_mem_reg(mr, memory_map);\n if (ret == FAILURE) {\n printf(\"Unable to write memory-map to the file : %d\\n\", errno);\n goto error_out;\n }\n if (strncmp(mr->filename, \"[stack]\", 7) == 0) {\n break;\n }\n itr = 0;\n memset(memory_map, 0, sizeof(*memory_map));\n } else {\n memory_map[itr++] = ch;\n }\n }\n\n /* Close the read end of the unnamed pipe */\n ret = close(pipe_fd[0]);\n if (ret == -1) {\n printf(\"Unable to close read end of the pipe : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n ret = SUCCESS;\n return ret;\n\nerror_out:\n return ret;\n}\n\n/**\n * @brief : Read ckpt file and restore memory maps\n *\n * @param : proc_ctx - Process context of checkpointed file\n * @param : ckpt_filename - Checkpoint filename\n * @param : mr - Memory map structure\n *\n */\nint read_ckpt_file_and_restore(ucontext_t *proc_ctx, char *ckpt_filename,\n mem_reg *mr) {\n int ckpt_fd = 0, flags = 0, prot = 0, ret = -1;\n void *addr = NULL;\n unsigned long int start_addr = 0;\n void *buf = NULL;\n\n /* Open the checkpoint FD */\n ckpt_fd = open(CHECKPT_PATH, O_RDWR, S_IRWXU);\n if (ckpt_fd == -1) {\n printf(\"Unable to open the checkpoint file :%d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n /* Read the process context */\n ret = read(ckpt_fd, proc_ctx, sizeof(*proc_ctx));\n if (ret < 0) {\n printf(\"Unable to read from the checkpoint file :%d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n while (1) {\n /* Read the memory map metadata structure */\n ret = read(ckpt_fd, mr, sizeof(*mr));\n if (ret == -1) {\n printf(\"Unable to read from the checkpoint file :%d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n if (mr->start_addr == NULL)\n break;\n\n if (mr->is_readable == 1)\n prot |= PROT_READ;\n /**\n * Add write permission to every memory map as we need to write that\n * memory area with the data during restore\n */\n if (mr->is_writeable == 1 || mr->is_writeable == 0)\n prot |= PROT_WRITE;\n if (mr->is_executable == 1)\n prot |= PROT_EXEC;\n if (mr->is_private == 1)\n flags |= MAP_PRIVATE;\n\n flags |= (MAP_ANONYMOUS | MAP_FIXED);\n\n /* Memory Map the processes map at its original position */\n addr = mmap(mr->start_addr, mr->total_data_size, prot, flags, -1, 0);\n if (addr == MAP_FAILED) {\n printf(\"Unable to mmap: %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n buf = calloc(1, mr->total_data_size);\n if (buf == NULL) {\n printf(\"Unable to allocate memory\\n\");\n ret = FAILURE;\n goto error_out;\n }\n\n /**\n * Read the total data related to the particular memory map being\n * processed\n */\n ret = read(ckpt_fd, buf, mr->total_data_size);\n if (ret == -1) {\n printf(\"Unable to read from the checkpoint file :%d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n memcpy(mr->start_addr, buf, mr->total_data_size);\n\n free(buf);\n buf = NULL;\n }\n\n ret = close(ckpt_fd);\n if (ret == -1) {\n printf(\"Unable to close checkpoint file :%d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n ret = SUCCESS;\n return ret;\n\nerror_out:\n return ret;\n}\n\n/**\n * @brief : Restore memory maps\n */\nvoid restore_memory_maps() {\n int pipe_fd[2] = {0}, ret = -1;\n char memory_map[512] = {'\\0'};\n int start_addr = 0, end_addr = 0;\n int stack_len = 0;\n void *buf = NULL;\n mem_reg *mr = NULL;\n ucontext_t proc_ctx;\n\n /* Allocate memory for the memory register */\n mr = (mem_reg *)calloc(1, sizeof(*mr));\n if (mr == NULL) {\n printf(\"Unable to allocate memory\\n\");\n\treturn;\n }\n\n /* Read the restart processes memory map */\n ret = create_process_for_cmd_exec(pipe_fd);\n if (ret == FAILURE) {\n printf(\"Unable to execute command to read process memory map\\n\");\n\treturn;\n }\n\n /**\n * Process the memory map of the restart process to get current stack\n * address\n */\n ret = proc_memory_map(mr, memory_map, pipe_fd);\n if (ret == FAILURE) {\n printf(\"Unable to process memory maps\\n\");\n\treturn;\n }\n\n stack_len = mr->total_data_size;\n\n /**\n * Munmap fails with EINVAL if there is an memory leak hence ignoring\n * munmap return value in this case\n */\n ret = munmap(mr->start_addr, stack_len);\n if (ret == -1) {\n printf(\"Unable to munmap : %d\\n\", errno);\n\treturn;\n }\n\n /* Restore the checkpointed processes memory map */\n ret = read_ckpt_file_and_restore(&proc_ctx, CHECKPT_PATH, mr);\n if (ret == FAILURE) {\n printf(\"Unable to read and restore the ckpt process\\n\");\n\treturn;\n }\n\n free(mr);\n mr = NULL;\n\n setcontext(&proc_ctx);\n}\n\nint main(int argc, char **argv) {\n unsigned long int start_addr = 0x5300000;\n void *stack_new_addr = (void *)start_addr, *stack_addr = NULL;\n\n if (argc == 1) {\n printf(\"Pass a checkpoint file path\\n\");\n exit(FAILURE);\n }\n\n strncpy(ckpt_filename, argv[1], strlen(argv[1]));\n\n /* Allocate memory for the new stack */\n stack_addr = mmap(stack_new_addr, 8192, (PROT_READ | PROT_WRITE),\n (MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED), -1, 0);\n if (stack_addr == MAP_FAILED) {\n printf(\"Unable to mmap : %d\\n\", errno);\n return FAILURE;\n }\n\n /* Move the stack pointer to new address */\n start_addr = start_addr + 8192;\n asm volatile(\"mov %0,%%rsp\" : : \"g\"(start_addr) : \"memory\");\n\n /* Restore the memory maps from the checkpoint */\n restore_memory_maps();\n}\n"
},
{
"alpha_fraction": 0.6630926132202148,
"alphanum_fraction": 0.6651949882507324,
"avg_line_length": 22.316177368164062,
"blob_id": "8a0ab6c293c0e47989f93b2b2f4807ba0cb44328",
"content_id": "39b27ed755e7a3d5713ad8956947804506bd6dce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Go",
"length_bytes": 9513,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 408,
"path": "/hw6/router.go",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "/* Installation ----- https://tecadmin.net/install-go-on-ubuntu/# */\npackage main\n\nimport (\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net/http\"\n\t\"reflect\"\n\t\"bytes\"\n\t\"unicode/utf8\"\n\t\"bufio\"\n\t\"os\"\n\t\"strings\"\n)\n\ntype keyValueRequestDataArray struct {\n\tKeyValuePair []keyValueRequestDataFormat `json:\"keyvalue\"`\n}\n\ntype keyValueRequestDataFormat struct {\n\tKey string `json:\"key\"`\n\tValue string `json:\"value,omitempty\"`\n}\n\nvar server = make(map[int]string)\nvar ports = make(map[int]string)\nvar server_cnt = 0\n\n/* TODO: improve hashing -> Consistent hashing\n * hashing implemented using simple modulo function\n */\nfunc hashing (KeyValuePair []keyValueRequestDataFormat)([][]keyValueRequestDataFormat) {\n\tserver_data_cnt := make([]int, server_cnt)\n\tserver_data := make([][]keyValueRequestDataFormat, server_cnt)\n\n\tfmt.Println(\"SERVER_CNT ========= \", server_cnt)\n\n\tx := 0\n\tvar row []keyValueRequestDataFormat\n\tfor _,v := range KeyValuePair {\n\t\ttemp := v.Key\n\t\ti := 0;\n\t\t/* Find ascii value of string for hashing */\n\t\tfor len(temp) > 0 {\n\t\t\tr, size := utf8.DecodeRuneInString(temp)\n\t\t\ti = i + int(r)\n\t\t\ttemp = temp[size:]\n\t\t}\n\t\thash := i % server_cnt\n\t\trow = server_data[hash]\n\t\trow = append(row, keyValueRequestDataFormat{v.Key, v.Value})\n\t\tserver_data[hash] = row\n\t\tserver_data_cnt[hash]++;\n\t\tx++;\n\t}\n\tfor index,val := range server_data {\n\t\tfmt.Println(index, val);\n\t}\n\treturn server_data\n}\n\nfunc formatDataForRequest(indx int, data []keyValueRequestDataFormat) ([]byte) {\n\tvar slice []keyValueRequestDataFormat\n\tvar i int\n\tvar v keyValueRequestDataFormat\n\tvar post_data keyValueRequestDataArray\n\tvar post_data_JSON []byte\n\tvar err_data error\n\tvar KeyValuePair_cnt int\n\n\tfmt.Println(\"SERVER :::: \"+server[indx])\n\tKeyValuePair_cnt = len(data)\n\tfmt.Println(\"Total number of keyVals = \", KeyValuePair_cnt)\n\tif KeyValuePair_cnt == 0 {\n\t\treturn post_data_JSON\n\t}\n\tslice = make([]keyValueRequestDataFormat, KeyValuePair_cnt)\n\tfor i,v = range data {\n\t\tfmt.Println(\"\\n\", v.Key, v.Value)\n\t\tslice[i] = v\n\t}\n\n\tpost_data = keyValueRequestDataArray{slice}\n\tpost_data_JSON, err_data = json.Marshal(post_data)\n\tif err_data != nil {\n\t\tfmt.Println(\"Error in JSON formatting !! \")\n\t}\n\treturn post_data_JSON\n}\n\nfunc makeRequest(indx int, post_data_JSON []byte, request_type string ) (string) {\n\tvar err_data error\n\tvar req *http.Request\n\tvar client *http.Client\n\tvar resp *http.Response\n\n\turl := server[indx]\n\n\tif request_type == \"PUT\" || request_type == \"POST\" || request_type == \"DELETE\" {\n\t\treq, err_data = http.NewRequest(request_type,\n\t\t\t\t\t\t\t\t\t\turl,\n\t\t\t\t\t\t\t\t\t\tbytes.NewBuffer(post_data_JSON))\n\t\tclient = &http.Client{}\n\t\tresp, err_data = client.Do(req)\n\t\tif err_data != nil {\n\t\t\tpanic(err_data)\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\treturn string(body)\n\t} else {\n\t\tfmt.Println(\"GET REQ STARTING !!\\n\")\n\t\tresp, err_data = http.Get(url)\n\n\t\tif err_data != nil {\n\t\t\tpanic(err_data)\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\treturn string(body)\n\t}\n}\n\n/* GetHandler handles the index route */\nfunc GetHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"GET HANDLER STARTING !!!!\\n\")\n\tvar slice []keyValueRequestDataFormat\n\n\tfor indx, _ := range server {\n\t\tpost_data_JSON := []byte(\"\")\n\t\tbody := makeRequest(indx, post_data_JSON, \"GET\")\n\n\t\tvar msg keyValueRequestDataArray\n\t\terr := json.Unmarshal([]byte(body), &msg)\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tfor _,v := range msg.KeyValuePair {\n\t\t\tslice = append(slice, v)\n\t\t}\n\t}\n\trestore_server_details()\n\n\tresponse_data := keyValueRequestDataArray{slice}\n\t//fmt.Println(response_data)\n\tfmt.Fprint(w, response_data)\n\tfmt.Fprint(w, \"\\nGet Done !!\\n\")\n}\n\nfunc PostHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"POST\" {\n\t\tdecodeJson := json.NewDecoder(r.Body)\n\t\tvar msg keyValueRequestDataArray\n\t\terr := decodeJson.Decode(&msg)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error here 1\\n\");\n\t\t\tpanic(err)\n\t\t}\n\n\t\t/* find the server using hash */\n\t\tserver_data := hashing(msg.KeyValuePair)\n\n\t\tvar slice []keyValueRequestDataFormat\n\t\tfor index, data := range server_data {\n\t\t\tif len(data) == 0 {\n\t\t\t\tfmt.Println(\"url %s has no data\", server[index])\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// formatting data for request\n\t\t\tpost_data_JSON := formatDataForRequest(index, data)\n\t\t\tfmt.Println(post_data_JSON)\n\n\t\t\t// forward to server\n\t\t\tbody := makeRequest(index, post_data_JSON, \"POST\")\n\t\t\t//fmt.Println(body)\n\n\t\t\tvar msg keyValueRequestDataArray\n\t\t\terr := json.Unmarshal([]byte(body), &msg)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error here 2\\n\");\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tfor _,v := range msg.KeyValuePair {\n\t\t\t\tslice = append(slice, v)\n\t\t\t}\n\n\t\t}\n\t\tresponse_data := keyValueRequestDataArray{slice}\n\t\t//fmt.Println(response_data)\n\t\tfmt.Fprint(w, response_data)\n\t} else {\n\t\tfmt.Println(\"Protocol NOT supported !!\")\n\t}\n\trestore_server_details()\n}\n\nfunc PutHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"PUT\" {\n\t\tdecodeJson := json.NewDecoder(r.Body)\n\n\t\tvar msg keyValueRequestDataArray\n\t\terr := decodeJson.Decode(&msg)\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfmt.Println(reflect.TypeOf(msg.KeyValuePair))\n\t\tfmt.Println(msg)\n\n\t\t/* find the server using hash */\n\t\tserver_data := hashing(msg.KeyValuePair)\n\n\t\tvar slice []keyValueRequestDataFormat\n\t\tfor indx, data := range server_data {\n\t\t\tfmt.Println(\"SERVER NUMBER =========== \", indx)\n\t\t\tif len(data) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// formatting data for request\n\t\t\tpost_data_JSON := formatDataForRequest(indx, data)\n\t\t\tfmt.Println(post_data_JSON)\n\n\t\t\t// forward to server\n\t\t\tbody := makeRequest(indx, post_data_JSON, \"PUT\")\n\t\t\tvar msg keyValueRequestDataArray\n\t\t\terr := json.Unmarshal([]byte(body), &msg)\n\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tfor _,v := range msg.KeyValuePair {\n\t\t\t\tslice = append(slice, v)\n\t\t\t}\n\n\n\t\t}\n\n\t\tresponse_data := keyValueRequestDataArray{slice}\n\t\tfmt.Println(\"ROUTER RESPONSE ::::: \", response_data)\n\t\tfmt.Fprint(w, response_data)\n\t\t\n\t}\n}\n\n\nfunc DeleteHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"DELETE\" {\n\t\tdecodeJson := json.NewDecoder(r.Body)\n\n\t\tvar msg keyValueRequestDataArray\n\t\terr := decodeJson.Decode(&msg)\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfmt.Println(reflect.TypeOf(msg.KeyValuePair))\n\t\tfmt.Println(msg)\n\n\t\t/* find the server using hash */\n\t\tserver_data := hashing(msg.KeyValuePair)\n\n\t\tvar slice []keyValueRequestDataFormat\n\t\tfor indx, data := range server_data {\n\t\t\tif len(data) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// formatting data for request\n\t\t\tpost_data_JSON := formatDataForRequest(indx, data)\n\t\t\tfmt.Println(post_data_JSON)\n\n\t\t\t// forward to server\n\t\t\tbody := makeRequest(indx, post_data_JSON, \"DELETE\")\n\t\t\tvar msg keyValueRequestDataArray\n\t\t\terr := json.Unmarshal([]byte(body), &msg)\n\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tfor _,v := range msg.KeyValuePair {\n\t\t\t\tslice = append(slice, v)\n\t\t\t}\n\n\n\t\t}\n\n\t\tresponse_data := keyValueRequestDataArray{slice}\n\t\tfmt.Println(\"ROUTER RESPONSE ::::: \", response_data)\n\t\tfmt.Fprint(w, response_data)\n\t\t\n\t}\n}\n\nfunc fetch(w http.ResponseWriter, r *http.Request) {\n\trestore_server_details()\n\tmake_server_addresses(\"/fetch\")\n\n\tif r.Method == \"POST\" {\n\t\tPostHandler(w, r)\n\t} else if r.Method == \"GET\" {\n\t\tGetHandler(w, r)\n\t} else {\n\t\tfmt.Print(\"Method not Supported on this end point!!\")\n\t\tfmt.Fprint(w, \"Method not Supported on this end point!!\")\n\t}\n}\n\nfunc query(w http.ResponseWriter, r *http.Request) {\n\trestore_server_details()\n\tmake_server_addresses(\"/query\")\n\n\tif r.Method == \"POST\" {\n\t\tPostHandler(w, r)\n\t} else if r.Method == \"GET\" {\n\t\tGetHandler(w, r)\n\t} else {\n\t\tfmt.Print(\"Method not Supported on this end point!!\")\n\t\tfmt.Fprint(w, \"Method not Supported on this end point!!\")\n\t}\n}\n\nfunc set(w http.ResponseWriter, r *http.Request) {\n\trestore_server_details()\n\tmake_server_addresses(\"/set\")\n\n\tif r.Method == \"PUT\" {\n\t\tPutHandler(w, r)\n\t} else {\n\t\tfmt.Print(\"Method not Supported on this end point!!\")\n\t\tfmt.Fprint(w, \"Method not Supported on this end point!!\")\n\t}\n}\n\nfunc deleteKey(w http.ResponseWriter, r *http.Request) {\n\trestore_server_details()\n\tmake_server_addresses(\"/delete\")\n\n\tif r.Method == \"DELETE\" {\n\t\tDeleteHandler(w, r)\n\t} else {\n\t\tfmt.Print(\"Method not Supported on this end point!!\")\n\t\tfmt.Fprint(w, \"Method not Supported on this end point!!\")\n\t}\n}\n\nfunc make_server_addresses(endpoint string){\n\tfor i, s := range server {\n\t\tserver[i] = s+\":\"+ports[i]+endpoint\n\t\tfmt.Println(server[i])\n\t}\n}\n\nfunc restore_server_details() {\n\tfmt.Println(\"Restore Server Details\")\n\tfile, err := os.Open(os.Args[3])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer file.Close()\n\n\ti := 0\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tserver_url := \"http://\"\n\t\tresult := strings.Split(scanner.Text(), \" \")\n\t\tserver_url += result[0]\n\t\tserver_port := result[1]\n\t\tserver[i] = server_url\n\t\tports[i] = server_port\n\t\tfmt.Println(server_url, server_port)\n\t\ti++\n\t}\n\tserver_cnt = i\n\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc init() {\n\tlog.SetFlags(log.Lmicroseconds | log.Lshortfile)\n\tflag.Parse()\n\trestore_server_details()\n\tserver_cnt = len(server)\n}\n\nfunc main() {\n\t/* flagPort is the open port the application listens on */\n\tfmt.Println(os.Args[1])\n\tfmt.Println(os.Args[2])\n\tvar (flagPort = flag.String(\"port\", os.Args[2], \"Port to listen on\"))\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/query\", query)\n\tmux.HandleFunc(\"/fetch\", fetch)\n\tmux.HandleFunc(\"/set\", set)\n\tmux.HandleFunc(\"/delete\", deleteKey)\n\n\tlog.Printf(\"listening on port %s\", *flagPort)\n\tlog.Fatal(http.ListenAndServe(os.Args[1]+\":\"+*flagPort, mux))\n}\n"
},
{
"alpha_fraction": 0.6343519687652588,
"alphanum_fraction": 0.6343519687652588,
"avg_line_length": 23.33333396911621,
"blob_id": "f9b13c46791beaf64b7668513593e24ae636c4e1",
"content_id": "775f3a95a3b07001d6eba44ee593aac9967d45cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 949,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 39,
"path": "/hw2/sme/sme.h",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#ifndef __SME_H_\n#define __SME_H_\n\n#include <stdbool.h>\n#include <stdint.h>\n\ntypedef struct sme_mech sme_mech_t;\ntypedef struct sme_ops sme_ops_t;\ntypedef struct sme_fd sme_fd_t;\ntypedef struct sme_proc sme_proc_t;\ntypedef enum fd_event fd_event_t;\n\n/**\n * callbacks for event\n */\n\n/*\n * fd event callback\n */\ntypedef void (*sme_fd_cb)(sme_mech_t *mech, sme_fd_t *fde, void *data);\n/*\n * process event callback\n */\ntypedef void (*sme_proc_cb)(sme_mech_t *mech, sme_proc_t *pe, void *data);\n\n/*\n * Add a fd event\n */\nsme_fd_t *sme_comm_add_fd(sme_mech_t *mech, int fd, fd_event_t ev, sme_fd_cb cb,\n void *cb_data);\n\nsme_proc_t *sme_comm_add_proc(sme_mech_t *mech, int pid, int flags,\n sme_proc_cb cb, void *cb_data);\n\nint sme_comm_loop_once(sme_mech_t *mech);\nint sme_comm_loop_wait(sme_mech_t *mech);\nbool sme_comm_loop_proc(sme_mech_t *mech);\nbool sme_loop_has_events(sme_mech_t *mech);\n#endif\n"
},
{
"alpha_fraction": 0.5885673761367798,
"alphanum_fraction": 0.5965067148208618,
"avg_line_length": 20.38867950439453,
"blob_id": "6b430d4dd21dca408c16004ae34d64ee98a2528d",
"content_id": "9d289c6d7063891c78033ab82565973a8d6b4e06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 5668,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 265,
"path": "/hw3/buddy.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#include \"buddy.h\"\n#include <unistd.h> // for sysconf()\n#include <string.h>\n#include \"utility.h\"\n#include <assert.h>\n\n/* 1MB initial memory in bytes */\n#define INITIAL_MEMORY (1048576)\n#define NO_OF_NODES\t (2*20)\n\n/**\n * Need Dynamic memory allocation for metadata as well and this also needs\n * be managed\n */\nbuddy_node_t *root = NULL;\nbuddy_node_t pool[NO_OF_NODES];\nuint8_t pool_free[NO_OF_NODES];\n\nvoid _init_buddy_node_pool()\n{\n\tint i = 0;\n\n\tfor (i = 0; i < NO_OF_NODES; i++) {\n\t\tmemset(&pool[i], 0, sizeof(pool[i]));\n\t\tpool[i].free = 1;\n\t\tpool_free[i] = 1;\n\t}\n}\n\n/* Return a node structure from pool of buddy_nodes */\nbuddy_node_t *_get_node()\n{\n\tint i = 0;\n\n\tfor (i = 0; i < NO_OF_NODES; i++) {\n\t\tif (1 == pool_free[i]) {\n\t\t\tpool_free[i] = 0;\n\t\t\treturn &pool[i];\n\t\t}\n\t}\n\treturn NULL;\n}\n\nvoid _give_up_node(buddy_node_t *node)\n{\n\tint i = 0;\n\n\tfor (i = 0; i < NO_OF_NODES; i++) {\n\t\tif (node == &pool[i]) {\n\t\t\tpool_free[i] = 1;\n\t\t}\n\t}\n}\n\nvoid print_pool()\n{\n\tint i = 0;\n\n\tfor (i = 0; i < NO_OF_NODES; i++) {\n\t\tdebug(\"Pool Node %d: Free %d\", i, pool_free[i]);\n\t}\n}\n\nvoid print_tree(buddy_node_t *root)\n{\n\tif (root != NULL) {\n\t\tdebug(\"Tree Node Size: %zu, Free %d, Address %p\",\n\t\t\t\troot->size, root->free, root->start);\n\t\tprint_tree(root->left);\n\t\tprint_tree(root->right);\n\t}\n}\n\nvoid mark_parent_as_allocated(buddy_node_t *root, buddy_node_t *node)\n{\n\t/* Inefficient */\n\tif (root != NULL && root != node) {\n\t\tif (root->left == node || root->right == node) {\n\t\t\tif (root->left->free == 0 && root->right->free == 0) {\n\t\t\t\troot->free = 0;\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\t\tmark_parent_as_allocated(root->left, node);\n\t\tmark_parent_as_allocated(root->right, node);\n\t}\n}\n\nvoid *_setup_metadata(buddy_node_t *node, size_t size)\n{\n\tif (node) {\n\t\tif (node->size == size) {\n\t\t\tnode->free = 0;\n\t\t\tmark_parent_as_allocated(root, node);\n\t\t\treturn node->start;\n\t\t}\n\t\t/* split */\n\t\tnode->left = _get_node();\n\t\tassert(node->left != NULL);\n\t\tnode->left->free = 1;\n\t\tnode->left->size = node->size / 2;\n\t\tnode->left->start = node->start;\n\n\t\tnode->right = _get_node();\n\t\tassert(node->right != NULL);\n\t\tnode->right->free = 1;\n\t\tnode->right->size = node->size / 2;\n\t\tnode->right->start = (uint8_t *)node->start + node->left->size;\n\n\t\treturn _setup_metadata(node->left, size);\n\t}\n\treturn NULL;\n}\n\nbuddy_node_t *_check_for_available_block(buddy_node_t *root, size_t size)\n{\n\t/* check if a block is of size >= to request */\n\tbuddy_node_t *mem = NULL;\n\tif (root != NULL && root->size >= size) {\n\t\tdebug(\"NODE - size %zu, free %d\", root->size, root->free);\n\t\t/* if it has children, see about the children */\n\t\tif (root->free && root->left == NULL) {\n\t\t\tif (size == root->size) {\n\t\t\t\tmem = root;\n\t\t\t\tgoto out;\n\t\t\t}\n\t\t}\n\t\tmem = _check_for_available_block(root->left, size);\n\t\tif (NULL != mem)\n\t\t\tgoto out;\n\t\tmem = _check_for_available_block(root->right, size);\n\t\tif (NULL != mem)\n\t\t\tgoto out;\n\t\tmem = root;\n\t}\n\nout:\n\treturn mem;\n}\n\n/* Obtain memory from kernel */\nvoid *_request_from_kernel(size_t bytes)\n{\n\tsize_t page_size = 0;\n\tuint32_t pg_count = 0;\n\tvoid *mem = NULL;\n\n\tpage_size = sysconf(_SC_PAGESIZE);\n\tdebug(\"page size is %zu\", page_size);\n\t/* all calculations are in bytes here */\n\tpg_count = bytes / page_size;\n\tif (pg_count < 1) \n\t\tpg_count = 1;\n\n\tdebug(\"Requesting %u page(s)\", pg_count);\n\tmem = sbrk(pg_count);\n\tif (NULL == mem) {\n\t\tdebug(\"Failed to increase size of heap\");\n\t\tgoto out;\n\t}\n\tdebug(\"New end of heap %p\", mem);\n\nout:\n\treturn mem;\n}\n\nvoid *_alloc_memory(size_t size)\n{\n\tbuddy_node_t *temp = NULL;\n\tvoid *mem = NULL;\n\n\tif (NULL == root) {\n\t\t/**\n\t\t * if this is first call\n\t\t * _request_from_kernel()\n\t\t */\n\t\t_init_buddy_node_pool();\n\t\troot = _get_node();\n\t\tdebug(\"%p %u\", root, root->free);\n\t\tassert(root != NULL);\n\t\troot->start = _request_from_kernel(INITIAL_MEMORY);\n\t\troot->size = INITIAL_MEMORY;\n\t\tassert(root->start != NULL);\n\t}\n\n\t/* check satisfaction, request for more if can't satisfy */\n\ttemp = _check_for_available_block(root, size);\n\tif (NULL == temp) {\n\t\tdebug(\"TODO request for more memory\");\n\t\t/* TODO request for more memory */\n\t\tgoto null_return;\n\t}\n\tdebug(\"Available node: size %zu, free %d\", temp->size, temp->free);\n\t/* set_up_metadata */\n\tmem = _setup_metadata(temp, size);\n\tprint_tree(root);\n\n\treturn mem;\n\nnull_return:\n\treturn NULL;\n}\n\nvoid _mark_parent_as_free(buddy_node_t *root, buddy_node_t *node)\n{\n\t/* Inefficient */\n\tif (root != NULL) {\n\t\tdebug(\"root: %p\", root);\n\t\tdebug(\"l: %p | r: %p | node: %p\", root->left, root->right, node);\n\t\tif (root->left == node || root->right == node) {\n\t\t\troot->free = 1;\n\t\t\tif (root->left->free == 1 && root->right->free == 1) {\n\t\t\t\t/* coalescing */\n\t\t\t\tdebug(\"coalescing...\");\n\t\t\t\t_give_up_node(root->left);\n\t\t\t\troot->left = NULL;\n\t\t\t\t_give_up_node(root->right);\n\t\t\t\troot->right = NULL;\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\t\t_mark_parent_as_free(root->left, node);\n\t\t_mark_parent_as_free(root->right, node);\n\t}\n}\n\nbuddy_node_t *_mark_node_as_free(buddy_node_t *root, void *mem)\n{\n\tbuddy_node_t *temp = NULL;\n\tif (NULL != root) {\n\t\ttemp = _mark_node_as_free(root->left, mem);\n\t\tif (NULL != temp)\n\t\t\treturn temp;\n\t\tdebug(\"Current node has start address %p\", root->start);\n\t\tif (root->start == mem) {\n\t\t\troot->free = 1;\n\t\t\treturn root;\n\t\t}\n\t\ttemp = _mark_node_as_free(root->right, mem);\n\t\tif (NULL != temp)\n\t\t\treturn temp;\n\t}\n\treturn NULL;\n}\n\nvoid _reclaim_memory(void *mem)\n{\n\tbuddy_node_t * temp = NULL;\n\n\t/* find the block and mark it free */\n\ttemp = _mark_node_as_free(root, mem);\n\tif (NULL == temp) {\n\t\tdebug(\"GOT NULL from free\");\n\t\tgoto out;\n\t}\n\t/**\n\t * mark the parent free if it is not\n\t * try coalescing blocks\n\t */\n\t_mark_parent_as_free(root, temp);\nout:\n\tdebug(\"after reclaiming memory\");\n\tprint_tree(root);\n\treturn;\n}\n"
},
{
"alpha_fraction": 0.5794392228126526,
"alphanum_fraction": 0.5831775665283203,
"avg_line_length": 18.10714340209961,
"blob_id": "5de37ca8cefe92b8eba5ec19d346068de1869916",
"content_id": "f53526372bc4443a41b9765b6a8d14c031139f57",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 535,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 28,
"path": "/hw3/realloc.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#include \"mymalloc.h\"\n#include \"buddy.h\"\n#include \"utility.h\"\n#include <string.h>\n#include <errno.h>\n\n//void *realloc(void *ptr, size_t size)\nvoid *myrealloc(void *ptr, size_t size)\n{\n void *mem = NULL;\n\n if (0 >= size) {\n goto out;\n }\n\n /* TODO round-off size of the next higher power of 2*/\n /* check if size can be satisfied */\n mem = mymalloc(size);\n if (NULL != mem) {\n debug(\"mem %zu allocated\", size);\n\t\tmemcpy(mem, ptr, sizeof(*ptr));\n\t\tmyfree(ptr);\n\t\tgoto out;\n }\n\nout:\n return mem;\n}\n"
},
{
"alpha_fraction": 0.5502063035964966,
"alphanum_fraction": 0.5593764185905457,
"avg_line_length": 19.009174346923828,
"blob_id": "960f97d06edef35dfdd04ac53f5394eadab4d220",
"content_id": "416e8fa60479472bef7b86888d2b10ba957121cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2181,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 109,
"path": "/hw2/sme/sme_epoll.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#include \"sme.h\"\n#include \"list.h\"\n#include \"sme_def.h\"\n#include <errno.h>\n#include <stddef.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/epoll.h>\n\ntypedef struct e_poll_data e_poll_data_t;\n\nstruct e_poll_data {\n sme_mech_t *m;\n /* epoll create fd */\n int epoll_fd;\n};\n\nsme_mech_t *epoll_mech_init() {\n sme_mech_t *mech;\n e_poll_data_t *d;\n int epoll_fd = 0;\n\n mech = calloc(1, sizeof(*mech));\n if (!mech)\n return NULL;\n\n d = calloc(1, sizeof(*d));\n if (!d)\n return NULL;\n mech->priv_data = d;\n\n epoll_fd = epoll_create1(0);\n if (epoll_fd == -1) {\n printf(\"Error while epoll_create: %d\\n\", errno);\n exit(1);\n }\n\n d->m = mech;\n d->epoll_fd = epoll_fd;\n\n return mech;\n}\n\nsme_fd_t *epoll_mech_add_fd(sme_mech_t *mech, int fd, fd_event_t ev,\n sme_fd_cb cb, void *cb_data) {\n sme_fd_t *fde;\n e_poll_data_t *ed = mech->priv_data;\n int ret = -1;\n struct epoll_event event;\n\n event.data.fd = fd;\n event.events = EPOLLIN;\n ret = epoll_ctl(ed->epoll_fd, EPOLL_CTL_ADD, fd, &event);\n if (ret == -1) {\n printf(\"Epoll ctl call failed :%d\\n\", errno);\n exit(1);\n }\n\n fde = sme_comm_add_fd(mech, fd, ev, cb, cb_data);\n return fde;\n}\n\nbool epoll_mech_loop_once(sme_mech_t *mech) {\n int eret;\n int tv = 5000; // 5 seconds\n sme_fd_t *fde;\n struct epoll_event events;\n\n e_poll_data_t *ed = mech->priv_data;\n\n eret = epoll_wait(ed->epoll_fd, &events, 1, tv);\n\n if (eret == -1) {\n printf(\"epoll error :%d\\n\", errno);\n return false;\n }\n\n if (eret > 0) {\n for (fde = ed->m->fd_events; fde; fde = fde->next) {\n if (events.data.fd == fde->fd) {\n\n LIST_REMOVE(ed->m->fd_events, fde);\n epoll_ctl(ed->epoll_fd, EPOLL_CTL_DEL, fde->fd, NULL);\n /*\n * Trigger the callback\n */\n fde->cb(ed->m, fde, fde->cb_data);\n break;\n }\n }\n }\n return true;\n}\n\nint epoll_mech_loop_wait(sme_mech_t *mech) {\n bool ret;\n /**\n * Run as much as we have events\n */\n while (sme_loop_has_events(mech)) {\n ret = epoll_mech_loop_once(mech);\n if (ret != true) {\n printf(\"loop once failed\\n\");\n return 1;\n }\n }\n return 0;\n}\n"
},
{
"alpha_fraction": 0.5477317571640015,
"alphanum_fraction": 0.5640078783035278,
"avg_line_length": 22.039894104003906,
"blob_id": "253436ef173774589da58beb29a229f3bc6ea4c2",
"content_id": "9476c6a09f842b45f55837b4fc974ba3cad88829",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 8663,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 376,
"path": "/hw2/source/master.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#define _GNU_SOURCE\n#include \"../sme/sme_epoll.h\"\n#include \"../sme/sme_select.h\"\n#include <errno.h>\n#include <fcntl.h>\n#include <getopt.h>\n#include <stdbool.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/types.h>\n#include <sys/wait.h>\n#include <unistd.h>\n\n#define SUCCESS 0\n#define FAILURE 1\n\ndouble g_summation = 0;\nchar g_worker_path[256] = {'\\0'};\nint g_num_workers = 0;\nint g_active_workers = 0;\nint g_current_n = 1;\nint g_in_x = 0;\nint g_in_n = 0;\n\ntypedef struct wait_mech {\n char mechanism_name[11];\n int mechanism_id;\n} wait_mech;\n\nvoid pid_handle(sme_mech_t *mech, sme_proc_t *proce, void *data);\nvoid fd_handle(sme_mech_t *mech, sme_fd_t *fde, void *data);\nvoid fd_handle_epoll(sme_mech_t *mech, sme_fd_t *fde, void *data);\n\nwait_mech valid_wait_mech[4] = {\n {\"sequential\", 0}, {\"select\", 1}, {\"poll\", 2}, {\"epoll\", 3}};\n\nvoid str_reverse(char *str) {\n int var = 0, i = 0, j = 0;\n j = strlen(str) - 1;\n\n while (i < j) {\n var = str[i];\n str[i] = str[j];\n str[j] = var;\n i++;\n j--;\n }\n}\n\nvoid integer_to_string(int number, char *str) {\n int rem = -1, i = 0;\n\n while (number != 0) {\n rem = number % 10;\n number = number / 10;\n str[i] = rem + 48;\n i++;\n }\n str_reverse(str);\n}\n\nbool validate_and_get_args(int argc, char **argv, char *worker_path,\n char *wait_mechanism, int *num_workers, int *x,\n int *n) {\n int ch;\n\n if (argc == 1) {\n printf(\"Supply arguments with -x and -n\\n\");\n exit(1);\n }\n\n while (1) {\n int option_index = 0;\n static struct option long_options[] = {\n {\"worker_path\", required_argument, 0, 'a'},\n {\"num_workers\", required_argument, 0, 'b'},\n {\"wait_mechanism\", required_argument, 0, 'c'},\n {\"x\", required_argument, 0, 'x'},\n {\"n\", required_argument, 0, 'n'},\n {0, 0, 0, 0}};\n\n ch = getopt_long(argc, argv, \"a:b:c:x:n:\", long_options, &option_index);\n if (ch == -1)\n break;\n\n switch (ch) {\n case 'a':\n strcpy(worker_path, optarg);\n strcpy(g_worker_path, worker_path);\n break;\n case 'b':\n *num_workers = atoi(optarg);\n g_num_workers = *num_workers;\n break;\n case 'c':\n strcpy(wait_mechanism, optarg);\n break;\n case 'x':\n *x = atoi(optarg);\n g_in_x = *x;\n break;\n case 'n':\n *n = atoi(optarg);\n g_in_n = *n;\n break;\n default:\n printf(\"Invalid arguments\\n\");\n return false;\n }\n }\n\n if ((*n < 0) || (*num_workers <= 0) || (strcmp(worker_path, \"\") == 0) ||\n (strcmp(wait_mechanism, \"\") == 0)) {\n printf(\"Invalid arguments\\n\");\n printf(\"Usage ./master --worker_path ./worker \"\n \"--num_workers 5 --wait_mechanism MECHANISM \"\n \"-x 2 -n 12\\n\");\n return false;\n }\n\n return true;\n}\n\nint process_command(const char *worker_path, int x, int n, int pipe_fd[2]) {\n int ret = -1;\n char *arg[6] = {0};\n char x_str[10] = {'\\0'}, n_str[10] = {'\\0'};\n\n /* Close the read end of the unnamed pipe */\n ret = close(pipe_fd[0]);\n if (ret == -1) {\n printf(\"Unable to close read end of pipe : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n /**\n * Duplicate the file descriptors of the standard output as to\n * get the output in the unnamed pipe\n */\n ret = dup2(pipe_fd[1], 1); /* duplicate the std output */\n if (ret == -1) {\n printf(\"Unable to duplicate stdout FD : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n integer_to_string(x, x_str);\n integer_to_string(n, n_str);\n arg[0] = \".\";\n arg[1] = \"-x\";\n arg[2] = x_str;\n arg[3] = \"-n\";\n arg[4] = n_str;\n arg[5] = NULL;\n\n ret = execv(worker_path, arg);\n if (ret == -1) {\n printf(\"Unable to create new worker thread : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n exit(0);\n\nerror_out:\n return ret;\n}\n\ntypedef struct seq_cb_data {\n char *worker_path;\n int fd;\n int x;\n int n;\n} seq_cb_data_t;\n\nint exec_worker_spawn(sme_mech_t *m, const char *worker_path, int x, int n,\n int flag) {\n int pipe_fd[2];\n int pid = 0, ret = -1;\n\n /* create pipe */\n ret = pipe2(pipe_fd, O_NONBLOCK);\n if (ret == -1) {\n printf(\"Unable to create an unnamed pipe :%d\\n\", errno);\n goto error_out;\n }\n\n pid = fork();\n if (pid < 0) {\n printf(\"Unable to fork a child process :%d\\n\", errno);\n goto error_out;\n } else if (pid == 0) {\n /* In child */\n ret = process_command(worker_path, x, n, pipe_fd);\n if (ret == FAILURE) {\n printf(\"Unable to process command\\n\");\n goto error_out;\n }\n } else {\n /* In parent */\n g_active_workers++;\n g_current_n++;\n\n close(pipe_fd[1]);\n\n /* Sequential mechanism selected */\n if (flag == 0) {\n int *fd = calloc(1, sizeof(int));\n *fd = pipe_fd[0];\n select_mech_add_proc(m, pid, 0, pid_handle, fd);\n } else if (flag == 1) {\n /* Select mechanism selected */\n select_mech_add_fd(m, pipe_fd[0], 0, fd_handle, 0);\n } else if (flag == 2) {\n /* Epoll mechanism selected */\n epoll_mech_add_fd(m, pipe_fd[0], 0, fd_handle_epoll, 0);\n }\n }\n\nerror_out:\n return 0;\n}\n\nvoid pid_handle(sme_mech_t *mech, sme_proc_t *proce, void *data) {\n double worker_data;\n int *fd = (int *)data;\n\n read(*fd, &worker_data, sizeof(worker_data));\n\n printf(\"Received data : %0.9f\\n\", worker_data);\n g_summation += worker_data;\n\n g_active_workers--;\n\n if ((g_active_workers < g_num_workers) && (g_current_n <= g_in_n)) {\n\n exec_worker_spawn(mech, g_worker_path, g_in_x, g_current_n, 0);\n }\n}\n\nvoid fd_handle(sme_mech_t *m, sme_fd_t *fde, void *data) {\n double worker_data = 0;\n\n read(fde->fd, &worker_data, sizeof(worker_data));\n printf(\"Received data : %0.9f\\n\", worker_data);\n g_summation += worker_data;\n\n g_active_workers--;\n\n if ((g_active_workers < g_num_workers) && (g_current_n <= g_in_n)) {\n\n exec_worker_spawn(m, g_worker_path, g_in_x, g_current_n, 1);\n }\n}\n\nvoid fd_handle_epoll(sme_mech_t *m, sme_fd_t *fde, void *data) {\n double worker_data = 0;\n\n read(fde->fd, &worker_data, sizeof(worker_data));\n printf(\"Received data : %0.9f\\n\", worker_data);\n g_summation += worker_data;\n\n g_active_workers--;\n\n if ((g_active_workers < g_num_workers) && (g_current_n <= g_in_n)) {\n\n exec_worker_spawn(m, g_worker_path, g_in_x, g_current_n, 2);\n }\n}\n\ndouble epoll_mech(char *worker_path, int num_workers, int x, int n) {\n int k = 1;\n sme_mech_t *m = NULL;\n m = epoll_mech_init();\n\n /* Launch initial num of workers */\n do {\n /* Implemented epoll mechanism by monitoring fd's */\n exec_worker_spawn(m, g_worker_path, x, k, 2);\n k++;\n } while ((k <= n) && (k <= num_workers));\n\n epoll_mech_loop_wait(m);\n\n g_summation += 1;\n\n return 0;\n}\n\ndouble select_mech(char *worker_path, int num_workers, int x, int n) {\n int k = 1;\n sme_mech_t *m = NULL;\n m = select_mech_init();\n\n /* Launch initial num of workers */\n do {\n /* Implemented select mechanism by monitoring fd's */\n exec_worker_spawn(m, g_worker_path, x, k, 1);\n k++;\n } while ((k <= n) && (k <= num_workers));\n\n select_mech_loop_wait(m);\n\n g_summation += 1;\n\n return 0;\n}\n\ndouble sequential_mech(char *worker_path, int num_workers, int x, int n) {\n int k = 1;\n sme_mech_t *m = NULL;\n m = select_mech_init();\n\n /* Launch initial num of workers */\n do {\n /**\n * Implemented sequential using select mechanism itself by\n * monitoring pid instead of fd's\n */\n exec_worker_spawn(m, g_worker_path, x, k, 0);\n k++;\n } while ((k <= n) && (k <= num_workers));\n\n select_mech_loop_wait(m);\n\n g_summation += 1;\n\n return 0;\n}\n\nint main(int argc, char **argv) {\n bool ret = false;\n int x = 0, n = 0, mech_id = 0, no = 0, num_workers = 0;\n char worker_path[256] = {'\\0'};\n char wait_mechanism[12] = {'\\0'};\n\n ret = validate_and_get_args(argc, argv, worker_path, wait_mechanism,\n &num_workers, &x, &n);\n if (ret == false) {\n printf(\"Argument parsing failed\\n\");\n goto error_out;\n }\n\n while (no != 4) {\n if (strcmp(wait_mechanism, valid_wait_mech[no].mechanism_name) == 0) {\n mech_id = valid_wait_mech[no].mechanism_id;\n printf(\"Mechanism selected : %s\\n\", valid_wait_mech[no].mechanism_name);\n break;\n }\n no++;\n }\n\n switch (mech_id) {\n case 0:\n sequential_mech(worker_path, num_workers, x, n);\n break;\n case 1:\n select_mech(worker_path, num_workers, x, n);\n break;\n case 3:\n epoll_mech(worker_path, num_workers, x, n);\n break;\n\n default:\n printf(\"Unsupported mechanism\\n\");\n }\n\n printf(\"Summation is : %0.9f\\n\", g_summation);\n\n return SUCCESS;\n\nerror_out:\n return FAILURE;\n}\n"
},
{
"alpha_fraction": 0.5635388493537903,
"alphanum_fraction": 0.5662198662757874,
"avg_line_length": 17.284313201904297,
"blob_id": "40609eec810f002104b2b5fcdc8f4c5673b716ec",
"content_id": "2b0c7dea736ab1bebfbdb44f2bfd7fd288316c39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1865,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 102,
"path": "/hw2/sme/sme_sys.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#include \"sme.h\"\n#include \"list.h\"\n#include \"sme_def.h\"\n#include <stddef.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/types.h>\n#include <sys/wait.h>\n\nsme_fd_t *sme_comm_add_fd(sme_mech_t *mech, int fd, fd_event_t ev, sme_fd_cb cb,\n void *cb_data) {\n sme_fd_t *fde;\n\n if (fd < 0)\n return NULL;\n\n fde = calloc(1, sizeof(*fde));\n if (fde == NULL) {\n return NULL;\n }\n\n fde->mech = mech;\n fde->fd = fd;\n fde->ev = ev;\n fde->cb = cb;\n fde->cb_data = cb_data;\n\n /* Add to fds to the events list */\n\n LIST_ADD_END(mech->fd_events, fde);\n\n return fde;\n}\n\nsme_proc_t *sme_comm_add_proc(sme_mech_t *mech, int pid, int flags,\n sme_proc_cb cb, void *cb_data) {\n sme_proc_t *proce;\n\n if (pid < 0)\n return NULL;\n\n proce = calloc(1, sizeof(*proce));\n if (proce == NULL) {\n return NULL;\n }\n\n proce->mech = mech;\n proce->pid = pid;\n proce->flags = flags;\n proce->cb = cb;\n proce->cb_data = cb_data;\n\n /* Add to pid to the events list */\n\n LIST_ADD_END(mech->proc_events, proce);\n\n return proce;\n}\n\nbool sme_comm_loop_proc(sme_mech_t *mech) {\n int ws;\n int ret;\n sme_proc_t *proce;\n sme_proc_cb proc_cb;\n void *cb_data;\n\n proce = mech->proc_events;\n if (!proce)\n return false;\n\n for (; proce; proce = proce->next) {\n ret = waitpid(proce->pid, &ws, proce->flags);\n if (ret == -1) {\n printf(\"wait failed\");\n return false;\n } else if (WIFEXITED(ws)) {\n\n break;\n }\n }\n\n if (!proce)\n /* No exit */\n return true;\n\n proc_cb = proce->cb;\n cb_data = proce->cb_data;\n\n LIST_REMOVE(mech->proc_events, proce);\n\n /**\n * Trigger proc event callback\n */\n proc_cb(mech, proce, cb_data);\n\n return true;\n}\n\nbool sme_loop_has_events(sme_mech_t *mech) {\n return ((mech->fd_events != NULL) || (mech->proc_events != NULL));\n}\n"
},
{
"alpha_fraction": 0.5663687586784363,
"alphanum_fraction": 0.5703409314155579,
"avg_line_length": 19.97916603088379,
"blob_id": "81e1ad61a0504a51c7a161bff129a0fb2c6ed4f4",
"content_id": "38d11c12dd449543be3586c4a0a34486e66d7703",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 6042,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 288,
"path": "/test/my_malloc_bfs.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <stdlib.h>\n#include <stdbool.h>\n#include <math.h>\n\n#define MAX_BLOCK_SIZE_POW 32\n#define BLOCK_AVAILABLE 0\n#define BLOCK_ALLOCATED 1\n#define BLOCK_SPLIT 2\n\n/* Tree node struct */\ntypedef struct node\n{\n struct node *parent;\n struct node *left;\n struct node *right;\n void *physical_addr;\n unsigned int block_size_pow;\n unsigned int state;\n}node;\n\nnode *root = NULL;\n\n/* Queue Functions and struct */\ntypedef struct q_node\n{\n struct q_node *next;\n struct node *data;\n}q_node;\n\ntypedef struct queue\n{\n q_node *head;\n q_node *tail;\n}queue;\n\nvoid queue_append(queue *Q, node *n)\n{\n q_node *q = NULL;\n\tif (n != NULL) {\n\t\tprintf(\"Append to queue\\n\");\n\t\tq = (q_node *) malloc(sizeof(q_node));\n\t\tq -> next = NULL;\n\t\tq -> data = n;\n\t\tif (Q->head == NULL) {\n\t\t Q->head = q;\n\t\t Q->tail = q;\n\t\t}\n\t\telse {\n\t\t Q->tail -> next = q;\n\t\t Q->tail = q;\n\t\t}\n\t}\n}\n\nvoid queue_pop(queue *Q)\n{\n q_node *q = Q->head -> next;\n free(Q->head);\n Q->head = q;\n if (NULL == Q->head) {\n Q->tail = NULL;\n }\n}\n\nvoid clear_queue(queue *Q)\n{\n\tq_node *q = Q->head;\n\twhile (q) {\n\t\tQ->head = q->next;\n\t\tfree(q);\n\t\tq = Q->head;\n\t}\n\tQ->head = NULL;\n\tQ->tail = NULL;\n}\n\n/* Tree Functions */\nnode *initialize_node(node *parent, unsigned int block_size_pow)\n{\n node *n = NULL;\n n = (node *) malloc(sizeof(node));\n n->parent = parent;\n n->left = NULL;\n n->right = NULL;\n n->physical_addr = NULL;\n n->block_size_pow = block_size_pow;\n n->state = BLOCK_AVAILABLE;\n return n;\n}\n\nnode *split(node *parent, unsigned int req_block_size_pow)\n{\n\twhile ((req_block_size_pow < parent->block_size_pow)\n\t\t\t&& (parent->block_size_pow > 3)) {\n\t\tparent->left = initialize_node(parent, parent->block_size_pow - 1);\n\t\tparent->right = initialize_node(parent, parent->block_size_pow - 1);\n\t\tparent->state = BLOCK_SPLIT;\n\t\tparent = parent->left;\n\t}\n\treturn parent;\n}\n\nvoid coalesce(node *parent)\n{\n if ((BLOCK_AVAILABLE == parent -> left -> state) \n\t \t\t&& (BLOCK_AVAILABLE == parent -> right -> state)) {\n free(parent -> left);\n free(parent -> right);\n parent -> state = BLOCK_AVAILABLE;\n }\n else {\n printf(\"Child is not free!\\n\");\n printf(\"Left child: %d\\n\", parent -> left -> state);\n printf(\"Right child: %d\\n\", parent -> right -> state);\n }\n}\n\nnode *find_block(unsigned int req_block_size_pow)\n{\n node *n = NULL;\n node *allocated_node = NULL;\n queue Q;\n Q.head = NULL;\n Q.tail = NULL;\n\n queue_append(&Q, root);\n\t\n while (Q.head != NULL) {\n\t\tprintf(\"Find a new block for allocation\\n\");\n\t\tn = Q.head -> data;\n\t\tif ((req_block_size_pow == n-> block_size_pow)\n\t\t\t\t&& (BLOCK_AVAILABLE == n-> state)) {\n\t\t\tprintf(\"new Node allocated\");\n\t\t\tn -> state = BLOCK_ALLOCATED;\n\t\t\tallocated_node = n;\n\t\t\tbreak;\n\t\t}\n\t\telse if ((req_block_size_pow == n-> block_size_pow)\n\t\t\t\t&& (BLOCK_AVAILABLE != n-> state)) {\n\t\t\tprintf(\"Leaf node unavailable\\n\");\n queue_pop(&Q);\n }\n else if ((req_block_size_pow < n-> block_size_pow)\n\t\t\t\t&& (BLOCK_SPLIT == n-> state)) {\n\t\t\tprintf(\"Node has children\\n\");\n queue_append(&Q, n -> left);\n queue_append(&Q, n -> right);\n queue_pop(&Q);\n }\n else if ((req_block_size_pow < n-> block_size_pow)\n\t\t\t\t&& (BLOCK_ALLOCATED == n-> state)) {\n\t\t\tprintf(\"Node is already allocated\\n\");\n queue_pop(&Q);\n }\n else if ((req_block_size_pow < n-> block_size_pow)\n\t\t\t\t&& BLOCK_AVAILABLE == n-> state) {\n\t\t\tprintf(\"Split larger nodes\\n\");\n\t\t\tallocated_node = split(n, req_block_size_pow);\n\t\t\tbreak;\n }\n }\n\tclear_queue(&Q);\n return allocated_node;\n}\n\nvoid *my_malloc(size_t size)\n{\n unsigned int req_block_size_pow = 3;\n size_t block_size = 8;\n node *allocated_node = NULL;\n void *addr = NULL;\n while (size > block_size) {\n block_size *= 2;\n req_block_size_pow++;\n }\n \n allocated_node = find_block(req_block_size_pow);\n if (NULL == allocated_node) {\n printf(\"Allocation failed!\\n\");\n }\n else {\n \taddr = malloc(size);\n \tallocated_node -> physical_addr = addr;\n printf(\"Allocated memory!\\n\");\n }\n return addr;\n}\n\nvoid free_block(void *addr)\n{\n\tnode *n = NULL;\n\tnode *p = NULL;\n\tqueue Q;\n\tQ.head = NULL;\n\tQ.tail = NULL;\n\n\tqueue_append(&Q, root);\n\n\twhile(Q.head != NULL) {\n\t\tn = Q.head -> data;\n\t\tif(addr == n-> physical_addr) {\n\t\t\tn -> physical_addr = NULL;\n\t\t\tn -> state = BLOCK_AVAILABLE;\n\t\t\tp = p -> parent;\n\t\t\twhile(p) {\n\t\t\t\tif((BLOCK_AVAILABLE == p -> left -> state)\n\t\t\t\t\t\t&& (BLOCK_AVAILABLE == p -> right -> state)) {\n\t\t\t\t\tcoalesce(p);\n\t\t\t\t\tp = p -> parent;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tqueue_append(&Q, n -> left);\n\t\t\tqueue_append(&Q, n -> right);\n\t\t\tqueue_pop(&Q);\n\t\t}\n\t}\n\tclear_queue(&Q);\n}\n\nvoid my_free(void *addr)\n{\n\tfree_block(addr);\n\tfree(addr);\n}\n\n/* Print nodes at a given level */\nvoid printGivenLevel(struct node* root, int level)\n{\n\tif (root == NULL)\n\t\treturn;\n\tif (level == 1)\n\t\tprintf(\"%u\\n\", root->block_size_pow);\n\telse if (level > 1)\n\t{\n\t\tprintGivenLevel(root->left, level-1);\n\t\tprintGivenLevel(root->right, level-1);\n\t}\n}\n\n/* Compute the \"height\" of a tree -- the number of\n * nodes along the longest path from the root node\n * down to the farthest leaf node.*/\nint height(struct node* node)\n{\n\tif (node==NULL)\n\t\treturn 0;\n\telse\n\t{\n\t\t/* compute the height of each subtree */\n\t\tint lheight = height(node->left);\n\t\tint rheight = height(node->right);\n\n\t\t/* use the larger one */\n\t\tif (lheight > rheight) {\n\t\t\treturn(lheight+1);\n\t\t}\n\t\telse {\n\t\t\treturn(rheight+1);\n\t\t}\n\t}\n}\n\n/* Function to print level order traversal a tree*/\nvoid printLevelOrder(struct node* root)\n{\n\tint h = height(root);\n\tint i = 0;\n\tfor (i = 1; i <= h; i++)\n\t\tprintGivenLevel(root, i);\n}\n\nint main()\n{\n root = initialize_node(NULL, MAX_BLOCK_SIZE_POW);\n\tif (root == NULL)\t\n\t\tprintf(\"Unable to initialise root\\n\");\n my_malloc(3);\n my_malloc(3);\n my_malloc(3);\n\tprintLevelOrder(root);\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6450742483139038,
"alphanum_fraction": 0.6450742483139038,
"avg_line_length": 25.428571701049805,
"blob_id": "870c13564d7cd514db2a968f31b252737de3ebe7",
"content_id": "c35a907b2fa7c5ff88d25b83496488ee484a7199",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 741,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 28,
"path": "/hw2/sme/Makefile",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "CC = gcc\nCFLAGS = -c -g -Wall -Werror -fno-stack-protector\nCXXFLAGS = -o\nSMESELECT = ./sme_select.c\nSMEEPOLL = ./sme_epoll.c\nSMESYS = ./sme_sys.c\nOBJSEL = sme_select.o\nOBJEPOLL = sme_epoll.o\nOBJSYS = sme_sys.o\nTARGETSME = libsme.a\n\nall: $(TARGETSME)\n\n$(OBJSYS): $(SMESYS)\n\t$(CC) $(CFLAGS) $(SMESYS) $(CXXFLAGS) $(OBJSYS)\n\n$(OBJSEL): $(SMESELECT) $(SMESYS) sme.h sme_def.h sme_select.h list.h\n\t$(CC) $(CFLAGS) $(SMESELECT) $(CXXFLAGS) $(OBJSEL)\n\n$(OBJEPOLL): $(SMEEPOLL) $(SMESYS) sme.h sme_def.h sme_epoll.h list.h\n\t$(CC) $(CFLAGS) $(SMEEPOLL) $(CXXFLAGS) $(OBJEPOLL)\n\n$(TARGETSME): $(OBJSEL) $(OBJSYS) $(OBJEPOLL)\n\tar -cvq $(TARGETSME) $(OBJSEL) $(OBJSYS) $(OBJEPOLL)\n\n.PHONY: clean\nclean:\n\trm $(TARGETSME) $(OBJSEL) $(OBJSYS) $(OBJEPOLL)\n\n"
},
{
"alpha_fraction": 0.5519713163375854,
"alphanum_fraction": 0.5806451439857483,
"avg_line_length": 15.470588684082031,
"blob_id": "e200918a17bc80105fb129e2edbed4084d42f6ff",
"content_id": "218fdfa7da6a18b6cfa904a9e4cc2743c315d44c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 279,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 17,
"path": "/hw1/source/hello.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "/*\n * Filename : hello.c\n * Description : Sample program to be checkpointed\n *\n * Author : Nipesh Roy <[email protected]>\n * Date : Sept 11 2017\n */\n#include <stdio.h>\n#include <unistd.h>\n\nint main(int argc, char **argv) {\n while (1) {\n printf(\".\");\n fflush(stdout);\n sleep(1);\n }\n}"
},
{
"alpha_fraction": 0.6024844646453857,
"alphanum_fraction": 0.6024844646453857,
"avg_line_length": 13.636363983154297,
"blob_id": "44b31a03bb28a926b2161747389bff6e3a75e012",
"content_id": "e9c95cd6eebeb2eebab5dd3b045a09cecfc178dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 161,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 11,
"path": "/hw2/Makefile",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": ".PHONY: all\nall:\n\t$(MAKE) -C sme\n\t$(MAKE) -C test\n\t$(MAKE) -C source\n\n.PHONY: clean\nclean:\n\t$(MAKE) -C sme clean\n\t$(MAKE) -C test clean\n\t$(MAKE) -C source clean\n"
},
{
"alpha_fraction": 0.5033783912658691,
"alphanum_fraction": 0.5337837934494019,
"avg_line_length": 16.41176414489746,
"blob_id": "bba6510036750f58226bf2802843be54f2e8b03c",
"content_id": "a02f5b200de566c298b3e2cb862cc353f33ca8d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 592,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 34,
"path": "/hw1/source/helper_func.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "/*\n * Filename : helper_func.c\n * Description : Contains helper functions\n *\n * Author : Nipesh Roy <[email protected]>\n * Date : Sept 11 2017\n */\n#include \"../include/helper_func.h\"\n#include \"stdlib.h\"\n#include \"string.h\"\n\nvoid str_reverse(char *buffer) {\n int var = 0, i = 0, j = strlen(buffer) - 1;\n\n while (i < j) {\n var = buffer[i];\n buffer[i] = buffer[j];\n buffer[j] = var;\n i++;\n j--;\n }\n}\n\nvoid int_to_str(int pid, char *buffer) {\n int rem = -1, i = 0;\n\n while (pid != 0) {\n rem = pid % 10;\n pid = pid / 10;\n buffer[i] = rem + 48;\n i++;\n }\n str_reverse(buffer);\n}\n"
},
{
"alpha_fraction": 0.6433990597724915,
"alphanum_fraction": 0.658573567867279,
"avg_line_length": 21.724138259887695,
"blob_id": "6fa4a9278432cb4dcf20072c80a0e3fd753bee86",
"content_id": "324a9c9b7051b838a046d94245a88a3223be3b8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 659,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 29,
"path": "/hw1/include/common_func.h",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "/*\n * Filename : common_func.h\n * Description : Common functions used by both checkpoint and restart process\n *\n * Author : Nipesh Roy <[email protected]>\n * Date : Sept 11 2017\n */\n#include \"memory_map.h\"\n\n#define CHECKPT_PATH \"./myckpt\"\n\n/**\n * @brief : Process the command \"cat /proc/self/maps\" and redirect the output\n * \t\t\tto an unnamed pipe\n *\n * @param : pipe_fd - Unnamed Pipe File Descriptor\n *\n * @return : SUCCESS(0)/FAILURE(0)\n */\nint process_command(int *pipe_fd);\n\n/**\n * @brief : Create a process for cmd execution\n *\n * @param : pipe_fd\t- Unnamed pipe File descriptor\n *\n * @return : SUCCESS(0)/FAILURE(1)\n */\nint create_process_for_cmd_exec(int *pipe_fd);\n"
},
{
"alpha_fraction": 0.4649919867515564,
"alphanum_fraction": 0.5072153806686401,
"avg_line_length": 23.946666717529297,
"blob_id": "32e886f47043a9f36c19cf7dece7d74add7492f4",
"content_id": "a5ad697b7219b94d0f6184a411a3ad2baf23f9b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1871,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 75,
"path": "/hw5/hw5.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#include <stdio.h> \n#include <stdlib.h>\n#include <time.h>\n\n#define KB 1024\n#define MB 1024 * KB\n\nint sizes[] = {1 * KB, 4 * KB, 8 * KB, 16 * KB,\n\t\t\t 32 * KB, 64 * KB, 128 * KB, 256 * KB,\n \t\t 512 * KB, 1 * MB, 1.5 * MB, 2 * MB, \n\t\t\t 2.5 * MB, 3 * MB, 3.5 * MB, 4 * MB,\n\t\t\t 8 * MB, 16 * MB};\nint N = (sizeof(sizes)/sizeof(int));\nint rerun = 5;\n\n/* Summate the plots */\nvoid calculate_cache_size(double plots[rerun][N], FILE *fp)\n{\n\tint k = 0, i = 0;\n\tint C1 = 0, C2 = 0;\n\tdouble summation[N];\n\t\n\tfprintf(fp, \"\\n\");\n\tfor (i = 0; i < N; i++)\n\t{\n\t\tfor (k = 0; k < rerun-1; k++)\n\t\t{\n\t\t\tsummation[i] += plots[k][i];\n\t\t}\n\t\tprintf(\"Plot : %dKB %f\\n\", (sizes[i]/KB), summation[i]);\n\t\tfprintf(fp, \"Plot : %dKB %f\\n\", (sizes[i]/KB), summation[i]);\n\t}\n}\n\nint main() {\n\tFILE *fp = NULL;\n unsigned int strides = 256 * MB;\n static int arr[16 * MB];\n int lengthMod = 0;\n unsigned int i = 0;\n int s = 0, k = 0;\n double time_taken = 0;\n clock_t start, end;\n int N = sizeof(sizes)/sizeof(int);\n double plots[rerun][N];\n \n fp = fopen(\"output.txt\", \"w\");\n\tif (fp == NULL) {\n\t\tprintf(\"Unable to open file output.txt\\n\");\n\t\treturn 1;\n\t}\n\n /* Regime 1 Saavedhra and Smith : Find Cache Size */\n\t/* Considering the cache size is not greater than 8MB */\n\tfor (k = 0; k < rerun-1; k++) {\n\t\tfprintf(fp, \"Plot : %d\\n\", k+1);\n \tfor (s = 0; s < N; s++) {\n\t \tlengthMod = sizes[s] - 1;\n\t \tstart = clock();\n\t \tfor (i = 0; i < strides; i++) {\n\t \tarr[(i * 16) & lengthMod] *= 10;\n \tarr[(i * 16) & lengthMod] /= 10;\n\t \t}\n\t\t\tend = clock();\n\t \ttime_taken = (double)(end - start)/CLOCKS_PER_SEC;\n \tplots[k][s] = time_taken;\n \tprintf(\"%d, %.8f \\n\", (sizes[s] / KB), plots[k][s]);\n\t\t\tfprintf(fp, \"%d, %.8f \\n\", (sizes[s] / KB), plots[k][s]);\n\t\t\tfflush(fp);\n\t }\n\t}\n\tcalculate_cache_size(plots, fp);\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.4821731746196747,
"alphanum_fraction": 0.5161290168762207,
"avg_line_length": 17.123077392578125,
"blob_id": "436194c56aafe1bb845361fa0b10220af8d1bb47",
"content_id": "e3ac8787d904e29e92dcb5f0f1461cc561f43288",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1178,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 65,
"path": "/hw3/driver.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#include \"mymalloc.h\"\n#include \"utility.h\"\n#include <stdio.h>\n#include <errno.h>\n\nint main(int argc, char **argv)\n{\n\tint *p = mymalloc(4);\n if (NULL == p) {\n debug(stdout,\"No memory allocated\");\n /* check if errrno is ENOMEM */\n\t\treturn ENOMEM;\n }\n *p = 12;\n\tdebug(stdout,\"Sum %d\\n\", *p + 1);\n\n int *q = mymalloc(2048);\n if (NULL == q) {\n debug(stdout,\"no mem for q\");\n\t\treturn ENOMEM;\n\t}\n\t*q = 10;\n\tdebug(stdout,\"Sum %d\\n\", *q + 1);\n\n\tmyfree(p);\n\tp = NULL;\n\n int *f = mycalloc(1, 4);\n\tif (NULL == f) {\n debug(stdout,\"No memory allocated\");\n /* check if errrno is ENOMEM */\n\t\treturn ENOMEM;\n }\n\tdebug(stdout,\"Sum %d\\n\", *f+1);\n\n\tint *e = mycalloc(1, 4);\n\tif (NULL == e) {\n debug(stdout,\"No memory allocated\");\n /* check if errrno is ENOMEM */\n\t\treturn ENOMEM;\n }\n\n\te[0] = 12;\n\tdebug(stdout,\"e[0]:%d\\n\", e[0]);\n\n\te = myrealloc(e, 16);\t\n\te[0] += 1;\n\te[1] = 2;\n\te[2] = 3;\n\te[3] = 4;\n\t\n\tdebug(stdout,\"e[0]:%d\\n\", e[0]);\n\tdebug(stdout,\"e[1]:%d\\n\", e[1]);\n\tdebug(stdout,\"e[2]:%d\\n\", e[2]);\n\tdebug(stdout,\"e[3]:%d\\n\", e[3]);\n\n\tmyfree(q);\n\tq = NULL;\n\tmyfree(f);\n\tf = NULL;\n\tmyfree(e);\n\te = NULL;\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6425896286964417,
"alphanum_fraction": 0.6629213690757751,
"avg_line_length": 25.700000762939453,
"blob_id": "6cf43903c2a6991dce31f5110cb38fdd567504da",
"content_id": "cc34f9d30ba0c16ed2406c57b9af1caae8432ffa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 1869,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 70,
"path": "/hw1/Makefile",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "# Filename : Makefile\n# Description : To build the checkpoint and restore process libraries \n# and executables\n#\n# Author : Nipesh Roy <[email protected]>\n# Date : Sept 11 2017\n\n# Compiler flags\nCC = gcc\nCFLAGS = -c -g -fPIC -Wall -Werror -fno-stack-protector\nC2FLAGS = -g -static -Wl,-Ttext-segment=5000000 -Wl,-Tdata=5100000 -Wl,-Tbss=5200000 -fno-stack-protector\nCXXFLAGS = -o\nLDFLAGS = -shared\n\n#Source files\nSRCSAMP = ./source/hello.c\nSRCCKPT = ./source/checkpoint.c\nSRCREST = ./source/myrestart.c\nSRCEXEC = ./source/exec_cmd.c\nSRCMEM = ./source/memory_map.c\nSRCHELPER = ./source/helper_func.c\n\n#Headers\nHEADER1 = ./include/memory_map.h\nHEADER2 = ./include/common_func.h\nHEADER3 = ./include/helper_func.h\n\n#Object files\nOBJSAMP = hello.o\nOBJCKPT = ckpt.o\nOBJREST = myrestart.o\nOBJHELPER = helper_func.o\nOBJMEM = memory_map.o\nOBJEXEC = exec_cmd.o\n\n#Shared library\nTARGET = libckpt.so\n\n#Output checkpoint file\nOUTFILE = myckpt\n\n#Rules\nall : $(TARGET) $(OBJSAMP) $(OBJREST)\n\n$(OBJSAMP): $(SRCSAMP)\n\t$(CC) $(SRCSAMP) $(CXXFLAGS) $(OBJSAMP)\n\n$(TARGET): $(OBJCKPT) $(OBJEXEC) $(OBJMEM) $(OBJHELPER)\n\t$(CC) $(LDFLAGS) $(CXXFLAGS) $(TARGET) $(OBJCKPT) $(OBJEXEC) $(OBJMEM) $(OBJHELPER)\n\n$(OBJREST) : $(SRCREST) $(OBJEXEC) $(OBJMEM) $(OBJHELPER)\t\n\t$(CC) $(C2FLAGS) $(SRCREST) $(CXXFLAGS) $(OBJREST) $(OBJEXEC) $(OBJMEM) $(OBJHELPER)\n\n$(OBJCKPT): $(SRCCKPT) $(HEADER1) $(HEADER2) $(HEADER3)\n\t$(CC) $(CFLAGS) $(SRCCKPT) $(CXXFLAGS) $(OBJCKPT) \n\n$(OBJEXEC): $(SRCEXEC) $(HEADER2) $(OBJMEM) $(OBJHELPER)\n\t$(CC) $(CFLAGS) $(SRCEXEC) $(CXXFLAGS) $(OBJEXEC)\n\n$(OBJMEM): $(SRCMEM) $(HEADER1) $(OBJHELPER)\n\t$(CC) $(CFLAGS) $(SRCMEM) $(CXXFLAGS) $(OBJMEM)\n\n$(OBJHELPER): $(SRCHELPER) $(HEADER3)\n\t$(CC) $(CFLAGS) $(SRCHELPER) $(CXXFLAGS) $(OBJHELPER)\n\n#Clean routine\n.PHONY: clean\nclean:\n\trm $(TARGET) $(OBJCKPT) $(OBJREST)\\\n\t\t$(OBJEXEC) $(OBJHELPER) $(OBJMEM) $(OBJSAMP) $(OUTFILE) "
},
{
"alpha_fraction": 0.5436105728149414,
"alphanum_fraction": 0.5517241358757019,
"avg_line_length": 16,
"blob_id": "3a6c91dad1640972083dc57f8418fb0fc9332cfb",
"content_id": "d01ea31d163bca63ac71403094640d169c690729",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 493,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 29,
"path": "/hw3/malloc.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#include \"mymalloc.h\"\n#include \"buddy.h\"\n#include \"utility.h\"\n#include <errno.h>\n\nvoid *mymalloc(size_t size)\n{\n void *mem = NULL;\n\n if (0 >= size) {\n goto out;\n }\n\n\tif (size < 8)\n\t\tsize = 8;\n\n debug(\"Size requested %zu\", size);\n\n /* TODO round-off size of the next higher power of 2*/\n /* check if size can be satisfied */\n mem = _alloc_memory(size);\n if (NULL != mem) {\n debug(\"mem %zu allocated\", size);\n goto out;\n }\n\nout:\n return mem;\n}\n"
},
{
"alpha_fraction": 0.7090619802474976,
"alphanum_fraction": 0.7106518149375916,
"avg_line_length": 23.19230842590332,
"blob_id": "9720dd656be464b8372473eb753894e7a7b6ea46",
"content_id": "76ab8ad574be37c235da44c22cfe118f17c079cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 629,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 26,
"path": "/hw3/mymalloc.h",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#ifndef __MY_MALLOC_H\n#define __MY_MALLOC_H\n#include <stddef.h> //for size_t\n\n/**\n * Implement the below functionality and add hooks for the same\n * but make sure printf and others are not executed as they use malloc()\n * internally\n */\n#if 0\nvoid *malloc(size_t size);\nvoid free(void *ptr);\nvoid *calloc(size_t nmemb, size_t size);\nvoid *realloc(void *ptr, size_t size);\nstruct mallinfo mallinfo();\nvoid malloc_stats();\n#else\nvoid *mymalloc(size_t size);\nvoid myfree(void *ptr);\nvoid *mycalloc(size_t nmemb, size_t size);\nvoid *myrealloc(void *ptr, size_t size);\nstruct mallinfo mallinfo();\nvoid malloc_stats();\n#endif\n\n#endif\n"
},
{
"alpha_fraction": 0.6832740306854248,
"alphanum_fraction": 0.6868327260017395,
"avg_line_length": 16.5625,
"blob_id": "2a3b084b6d0c5843f2428e6dc044e4442bf89d29",
"content_id": "615d76036c6b98075ff08ca60aa4a91a6eb6bddb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 281,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 16,
"path": "/hw3/buddy.h",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#ifndef __MY_BUDDY_H\n#define __MY_BUDDY_H\n#include <stddef.h>\n#include <stdint.h>\n\ntypedef struct BUDDY_NODE{\n\tvoid *start;\n size_t size;\n\tstruct BUDDY_NODE *left, *right;\n\tuint8_t free;\n}buddy_node_t;\n\nvoid *_alloc_memory(size_t size);\nvoid _reclaim_memory(void *mem);\n\n#endif\n"
},
{
"alpha_fraction": 0.6167872548103333,
"alphanum_fraction": 0.6257597804069519,
"avg_line_length": 21.880794525146484,
"blob_id": "5b76f281647feb4cc6a5fbb958ac88a6f5b8ff9b",
"content_id": "af86d76e734177b0ea5dfc08f3651528ce3e0cda",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3455,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 151,
"path": "/hw1/source/checkpoint.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "/*\n * Filename : checkpoint.c\n * Description : Program to checkpoint a process\n *\n * Author : Nipesh Roy <[email protected]>\n * Date : Sept 11 2017\n */\n#include \"../include/common_func.h\"\n#include \"../include/helper_func.h\"\n#include <errno.h>\n#include <fcntl.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <sys/wait.h>\n#include <unistd.h>\n\nint flag = 0;\n\n/**\n * @brief : Write the process context into the checkpoint file\n *\n * @param : process_ctx - The structure storing the entire process context\n * @param : ckpt_fd \t- checkpoint file FD\n *\n * @return : SUCCESS(0)/FAILURE(1)\n */\nint write_ctx_to_ckpt_header(ucontext_t *process_ctx, int ckpt_fd) {\n int ret = -1;\n\n /* Write the process context for restarting */\n ret = write(ckpt_fd, process_ctx, sizeof(*process_ctx));\n if (ret == -1) {\n printf(\"Write failed : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n ret = SUCCESS;\n return ret;\n\nerror_out:\n return ret;\n}\n\n/**\n * @brief : Function which initiates the checkpoint process\n *\n * @return : SUCCESS(0)/FAILURE(1)\n */\nvoid start_checkpoint() {\n int pipe_fd[2] = {0}, ret = -1;\n char memory_map[512] = {'\\0'};\n mem_reg *mr = NULL;\n int ckpt_fd = 0;\n ucontext_t process_ctx;\n\n /* Initialise Memory path structure */\n mr = (mem_reg *)calloc(1, sizeof(*mr));\n if (mr == NULL) {\n printf(\"Unable to allocate memory\\n\");\n ret = FAILURE;\n goto error_out;\n }\n\n /* Open the file where the checkpoint will be created */\n ckpt_fd = open(CHECKPT_PATH, (O_RDWR | O_CREAT | O_TRUNC), (S_IRWXU));\n if (ckpt_fd == -1) {\n printf(\"Unable to open checkpoint file : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n /* Read the cat /proc/pid/maps */\n ret = create_process_for_cmd_exec(pipe_fd);\n if (ret == FAILURE) {\n printf(\"Unable to execute command to read process memory map\\n\");\n goto error_out;\n }\n\n /* Get the process context */\n ret = getcontext(&process_ctx);\n if (ret == -1) {\n printf(\"Unable to get process context : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n if (flag == 1) {\n return;\n }\n\n flag = 1;\n\n /* Write the context to the checkpoint file */\n ret = write_ctx_to_ckpt_header(&process_ctx, ckpt_fd);\n if (ret == FAILURE) {\n printf(\"Unable to get write context to checkpoint file\\n\");\n goto error_out;\n }\n\n /* Process memory map and write it to the checkpoint file */\n ret = process_memory_map(mr, memory_map, pipe_fd, ckpt_fd);\n if (ret == FAILURE) {\n printf(\"Unable to process memory maps\\n\");\n goto error_out;\n }\n\n ret = close(ckpt_fd);\n if (ret == FAILURE) {\n printf(\"Unable to close the checkpoint file FD : %d\\n\", errno);\n goto error_out;\n }\n\n free(mr);\n mr = NULL;\n\n printf(\"Process Checkpointed successfully\\n\");\n ret = SUCCESS;\n exit(ret);\n\nerror_out:\n exit(ret);\n}\n\n/**\n * @brief : Signal Handler\n *\n * @param : signum - Signal number that is caught\n */\nvoid handle_signal(int signum) {\n /* Start the checkpointing signal when you receive SIGUSR2 */\n start_checkpoint();\n}\n\n/**\n * @brief : Constructor to initialise signal handler\n */\n__attribute__((constructor)) void sig_handler_init() {\n int ret = -1;\n struct sigaction catch_sig = {0};\n\n catch_sig.sa_handler = &handle_signal;\n catch_sig.sa_flags = SA_SIGINFO;\n\n ret = sigaction(SIGUSR2, &catch_sig, NULL);\n if (ret == -1) {\n printf(\"Unable to initialise signal handler : %d\", errno);\n exit(FAILURE);\n }\n}\n"
},
{
"alpha_fraction": 0.5794457793235779,
"alphanum_fraction": 0.5945282578468323,
"avg_line_length": 23.791303634643555,
"blob_id": "c2a083bbb4e01aefd0537fa23f8dc3513ad55efb",
"content_id": "3dcaf9396e30bb04787e5f9c95e7652d71b3b94d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2851,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 115,
"path": "/hw1/source/exec_cmd.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "/*\n * Filename : exec_cmd.c\n * Description : Program to execute a command using fork(),execvp(),waitpid()\n *\n * Author : Nipesh Roy <[email protected]>\n * Date : Sept 11 2017\n */\n#include \"../include/helper_func.h\"\n#include \"../include/memory_map.h\"\n#include <errno.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/wait.h>\n#include <unistd.h>\n\nint process_command(int *pipe_fd, int pid) {\n int ret = -1;\n char *arg[3] = {0}, *cmd = \"cat\", str_pid[100] = {'\\0'};\n char str[512] = {'\\0'};\n\n int_to_str(pid, str_pid);\n\n /**\n * \"/proc/$pid/maps\" contains the memory regions being currently used by\n * the process as well as the start and end address of all the memory\n * mappings currently in use by the process which will be then used for\n * checkpointing by acquiring the memory address and size and then storing\n * the data from the memory map into the checkpoint file.\n */\n strcpy(str, \"/proc/\");\n strcat(str, str_pid);\n strcat(str, \"/maps\");\n printf(\"%s\", arg[1]);\n arg[0] = \"cat\";\n arg[1] = str;\n arg[2] = NULL;\n\n /* Close the read end of the unnamed pipe */\n ret = close(pipe_fd[0]);\n if (ret == -1) {\n printf(\"Unable to close read end of pipe : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n /**\n * Duplicate the file descriptors of the standard output as to\n * get the output in the unnamed pipe\n */\n ret = dup2(pipe_fd[1], 1); /* duplicate the std output */\n if (ret == -1) {\n printf(\"Unable to duplicate stdout FD : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n ret = dup2(pipe_fd[1], 2); /* duplicate the std error output */\n if (ret == -1) {\n printf(\"Unable to duplicate stderr FD : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n /* Close the write end of the unnamed pipe */\n ret = close(pipe_fd[1]);\n if (ret == -1) {\n printf(\"Unable to close write end of pipe : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n /* Execute the command in child process */\n ret = execvp(cmd, arg);\n if (ret == -1) {\n printf(\"Unable to process /proc/self/maps : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n ret = SUCCESS;\n return ret;\n\nerror_out:\n return ret;\n}\n\nint create_process_for_cmd_exec(int *pipe_fd) {\n int pid = 0, ret = -1, status = 0, ppid = 0;\n\n ret = pipe(pipe_fd);\n if (ret == -1) {\n printf(\"Unable to create an unnamed pipe :%d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n pid = 0;\n pid = fork();\n if (pid < 0) {\n printf(\"Unable to fork a child process :%d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n } else if (pid == 0) {\n ppid = getppid();\n ret = process_command(pipe_fd, ppid);\n if (ret == FAILURE) {\n printf(\"Unable to process command\\n\");\n goto error_out;\n }\n } else {\n waitpid(pid, &status, 0);\n }\n\n ret = SUCCESS;\n return ret;\n\nerror_out:\n return ret;\n}\n"
},
{
"alpha_fraction": 0.5355297327041626,
"alphanum_fraction": 0.567829430103302,
"avg_line_length": 20.5,
"blob_id": "a117e70f7a3a9833dd39a3a626a66a1ac0ad3709",
"content_id": "46d040a25ada51d164d4cf94fcd084040b09e3fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1548,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 72,
"path": "/hw4/calculate_ram.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "/**\n * HW4 : Program to calculate RAM using memory thrashing\n * Author : Nipesh Roy <[email protected]>\n */\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <time.h>\n\n#define ARRAY_SIZE \t1000\n#define MB \t\t\t\t1024 * 1024\n#define CHUNK_ALLOCATED 40 * MB\n\nint main()\n{\n\tvoid *p[ARRAY_SIZE];\n\tint i = 0, j = 0, k = 0;\n\tlong int ram_size = 0;\n\tclock_t start;\n\tdouble timeTaken = 0, prev_time = 0;\n\tlong int GB = 1024 * 1024 * 1024;\n\tFILE *fp = NULL;\n\n\tram_size = (CHUNK_ALLOCATED / (MB));\n\tprintf(\"\\n CHUNK ALLOCATED : %ld\\n\", ram_size);\n\t\n\tfp = fopen(\"./output\", \"w\");\n\tif (fp == NULL) {\n\t\tprintf(\"\\nUnable to create output file\\n\");\t\n\t\tgoto error_out;\n\t}\n\n\tram_size = 0;\n\n\tfor (i = 0 ; i < ARRAY_SIZE; i++)\n\t{\n\t\tstart = clock();\n\t\tp[i] = malloc(CHUNK_ALLOCATED);\n\t\tif (p[i] == NULL)\n\t\t{\n\t\t\tprintf(\"\\nNo more memory available\");\n\t\t\tgoto error_out;\n\t\t}\n\t\tfor (j = 0; j < i; j++)\n\t\t{\n\t\t\tmemset(p[j], 0, CHUNK_ALLOCATED);\n\t\t}\n\t\tram_size += (CHUNK_ALLOCATED / (MB));\n\t\tram_size += (CHUNK_ALLOCATED / (MB)); \n\t\tif (i > 9) { /* Considering RAM will be atleast 400 MB */\n\t\t\tprev_time = timeTaken;\n\t\t}\n\t\ttimeTaken = (double)(clock() - start)/CLOCKS_PER_SEC;\n\t\tprintf(\"Prev time : %lf, TimeTaken : %lf\\n\", prev_time, timeTaken);\n\t\tif ((prev_time != 0) && ((2 * prev_time) < timeTaken)) {\n\t\t\tfor (k = 0; k < i ; k++)\n\t\t\t{\n\t\t\t\tfree(p[k]);\n\t\t\t\tp[k] = NULL;\n\t\t\t}\n\t\t\tbreak;\n\t\t}\n\t\tfprintf(fp, \"%ldMB, %.8f\\n\", ram_size, timeTaken);\n\t\tfflush(fp);\n\t}\n\nerror_out:\n\tfprintf(fp, \"\\nRam Size is : %ldGB\\n\", (ram_size / 1024));\n\tfflush(fp);\n\tfclose(fp);\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.5357551574707031,
"alphanum_fraction": 0.5646452903747559,
"avg_line_length": 23.619718551635742,
"blob_id": "5153974279fe599b3648d4d69a1a52f7c65e1d10",
"content_id": "c963e3df0dbf1d5c16f00c444faef00c36faf014",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3496,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 142,
"path": "/hw1/source/memory_map.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "/*\n * Filename : memory_map.c\n * Description : Process and write memory maps\n *\n * Author : Nipesh Roy <[email protected]>\n * Date : Sept 11 2017\n */\n#include \"../include/memory_map.h\"\n#include \"../include/helper_func.h\"\n#include <errno.h>\n#include <fcntl.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <unistd.h>\n\nvoid populate_mem_reg(mem_reg *mr, char *memory_map) {\n char str1[256] = {'\\0'}, str2[256] = {'\\0'};\n char str3[256] = {'\\0'}, str4[256] = {'\\0'};\n char str5[256] = {'\\0'}, str6[256] = {'\\0'};\n char *token = NULL;\n char *end_ptr = NULL;\n unsigned long int start_addr = 0, end_addr = 0;\n char *buf = calloc(1, 512);\n char *buf1 = calloc(1, 512);\n\n sscanf(memory_map, \"%s %s %s %s %s %s\", str1, str2, str3, str4, str5, str6);\n\n /* process str1 to get the start and end offset */\n token = strtok(str1, \"-\");\n memcpy(buf, token, strlen(token));\n start_addr = strtol(buf, &end_ptr, 16);\n\n token = strtok(NULL, \"-\");\n memcpy(buf1, token, strlen(token));\n end_addr = strtol(buf1, &end_ptr, 16);\n\n mr->start_addr = (void *)start_addr;\n mr->end_addr = (void *)end_addr;\n mr->total_data_size = end_addr - start_addr;\n\n mr->is_readable = 0;\n mr->is_writeable = 0;\n mr->is_executable = 0;\n mr->is_private = 0;\n\n /* process str2 to get the permissions */\n if (str2[0] == 'r')\n mr->is_readable = 1;\n if (str2[1] == 'w')\n mr->is_writeable = 1;\n if (str2[2] == 'x')\n mr->is_executable = 1;\n if (str2[3] == 'p')\n mr->is_private = 1;\n\n mr->filename = calloc(1, 512);\n memcpy(mr->filename, str6, sizeof(str6));\n}\n\nint write_mem_ref(mem_reg *mr, int ckpt_fd) {\n int ret = -1;\n\n /* To skip private guard regions */\n if (mr->is_readable || mr->is_writeable || mr->is_executable) {\n ret = write(ckpt_fd, mr, sizeof(*mr));\n if (ret == -1) {\n printf(\"Write failed : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n ret = write(ckpt_fd, mr->start_addr, mr->total_data_size);\n if (ret == -1) {\n printf(\"Data Write failed : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n }\n\n ret = SUCCESS;\n return ret;\n\nerror_out:\n return ret;\n}\n\nint process_memory_map(mem_reg *mr, char *memory_map, int *pipe_fd,\n int ckpt_fd) {\n char ch = '\\0';\n int itr = 0, ret = -1;\n\n /**\n * Close the write end of the unnamed pipe on the parent process as there is\n * nothing to write now\n */\n ret = close(pipe_fd[1]);\n if (ret == -1) {\n printf(\"Unable to close write end of pipe on parent : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n /**\n * Read the memory map array from the command output and parse the output\n * into the memory mapped structure\n */\n itr = 0;\n while (read(pipe_fd[0], &ch, 1) != 0) {\n if (ch == '\\n') {\n memory_map[itr] = '\\0';\n populate_mem_reg(mr, memory_map);\n if (strstr(mr->filename, \"[vsyscall]\") != NULL) {\n continue;\n }\n ret = write_mem_ref(mr, ckpt_fd);\n if (ret == FAILURE) {\n printf(\"Unable to write memory-map to the file : %d\\n\", errno);\n goto error_out;\n }\n itr = 0;\n memset(memory_map, 0, sizeof(*memory_map));\n } else {\n memory_map[itr++] = ch;\n }\n }\n\n /* Close the read end of the unnamed pipe */\n ret = close(pipe_fd[0]);\n if (ret == -1) {\n printf(\"Unable to close read end of the pipe : %d\\n\", errno);\n ret = FAILURE;\n goto error_out;\n }\n\n ret = SUCCESS;\n return ret;\n\nerror_out:\n return ret;\n}\n"
},
{
"alpha_fraction": 0.6018211841583252,
"alphanum_fraction": 0.6096026301383972,
"avg_line_length": 18.80327796936035,
"blob_id": "11cc2d5c055cd6584b9252a94ae697d2587a8408",
"content_id": "6b9775b388eece8aa4268894ffd14a35a33a47e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 6040,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 305,
"path": "/test/my_malloc_dfs.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <stdlib.h>\n\n#define MAX_BLOCK_SIZE_POW 32\n#define BLOCK_AVAILABLE 0\n#define BLOCK_ALLOCATED 1\n#define BLOCK_SPLIT 2\n\n/* Tree node struct */\ntypedef struct node\n{\n\tstruct node *parent;\n\tstruct node *left;\n\tstruct node *right;\n\tvoid *physical_addr;\n\tunsigned int block_size_pow;\n\tunsigned int state;\n}node;\n\nnode *root = NULL;\n\n/* Stack Funtions and structs */\n\ntypedef struct s_node\n{\n\tstruct s_node *next;\n\tstruct node *data;\n}s_node;\n\ntypedef struct stack\n{\n\ts_node *top;\n}stack;\n\nvoid stack_push(stack *S, node *n)\n{\n\ts_node *s;\n\tif(n != NULL) {\n\t\ts = (s_node *) malloc(sizeof(s_node));\n\t\ts->data = n;\n\t\ts->next = S->top;\n\t\tS->top = s;\t\t\n\t}\n\telse {\n\t\tprintf(\"Cannot push empty node!\\n\");\n\t}\n}\n\nvoid stack_pop(stack *S)\n{\n\tif(S->top) {\n\t\ts_node *s = S->top->next;\n\t\tfree(S->top);\n\t\tS->top = s;\n\t}\n\telse {\n\t\tprintf(\"Stack is empty!\");\n\t}\n}\n\nvoid clear_stack(stack *S)\n{\n\twhile(S->top) {\n\t\tstack_pop(S);\n\t}\n}\n\n/* Tree Functions */\nnode *initialize_node(node *parent, unsigned int block_size_pow)\n{\n\tnode *n = NULL;\n\tn = (node *) malloc(sizeof(node));\n\tn->parent = parent;\n\tn->left = NULL;\n\tn->right = NULL;\n\tn->physical_addr = NULL;\n\tn->block_size_pow = block_size_pow;\n\tn->state = BLOCK_AVAILABLE;\n\treturn n;\n}\n\nnode *split(node *parent, unsigned int req_block_size_pow)\n{\n\twhile ((req_block_size_pow < parent->block_size_pow)\n\t\t\t&& (parent->block_size_pow > 3)) {\n\t\tparent->left = initialize_node(parent, parent->block_size_pow - 1);\n\t\tparent->right = initialize_node(parent, parent->block_size_pow - 1);\n\t\tparent->state = BLOCK_SPLIT;\n\t\tparent = parent->left;\n\t}\n\treturn parent;\n}\n\nvoid coalesce(node *parent)\n{\n\tif ((BLOCK_AVAILABLE == parent->left->state) \n\t\t\t&& (BLOCK_AVAILABLE == parent->right->state)) {\n\t\tfree(parent->left);\n\t\tfree(parent->right);\n\t\tparent->state = BLOCK_AVAILABLE;\n\t}\n\telse {\n\t\tprintf(\"Child is not free!\\n\");\n\t\tprintf(\"Left child: %d\\n\", parent->left->state);\n\t\tprintf(\"Right child: %d\\n\", parent->right->state);\n\t}\n}\n\nnode *check(node *n, unsigned int req_block_size_pow) \n{\n\tnode *allocated_node = NULL;\n\tprintf(\"Check function called!\\n\");\n\tif ((req_block_size_pow == n-> block_size_pow)\n\t\t\t&& (BLOCK_AVAILABLE == n-> state)) {\n\t\tprintf(\"new Node allocated\");\n\t\tn->state = BLOCK_ALLOCATED;\n\t\tallocated_node = n;\n\t}\n\telse if ((req_block_size_pow == n-> block_size_pow)\n\t\t\t&& (BLOCK_AVAILABLE != n-> state)) {\n\t\tprintf(\"Leaf node unavailable\\n\");\n\t}\n\telse if ((req_block_size_pow < n-> block_size_pow)\n\t\t\t&& (BLOCK_SPLIT == n-> state)) {\n\t\tprintf(\"Node has children\\n\");\n\t}\n\telse if ((req_block_size_pow < n-> block_size_pow)\n\t\t\t&& (BLOCK_ALLOCATED == n-> state)) {\n\t\tprintf(\"Node is already allocated\\n\");\n\t}\n\telse if ((req_block_size_pow < n-> block_size_pow)\n\t\t\t&& BLOCK_AVAILABLE == n-> state) {\n\t\tprintf(\"Split larger nodes\\n\");\n\t\tallocated_node = split(n, req_block_size_pow);\n\t\tallocated_node->state = BLOCK_ALLOCATED;\n\t}\n\treturn allocated_node;\n}\n\nnode *find_block(unsigned int req_block_size_pow)\n{\n\tnode *n = NULL;\n\tnode *allocated_node = NULL;\n\tstack S;\n\tS.top = NULL;\n\n\tn = root;\n\n\twhile(1) {\n\t\tif(NULL != n) {\n\t\t\tstack_push(&S, n); \n\t\t\tn = n->left; \n\t\t}\n\t\telse {\n\t\t\tif (NULL != S.top) {\n\t\t\t\tn = S.top->data;\n\t\t\t\tstack_pop(&S);\n\t\t\t\tallocated_node = check(n, req_block_size_pow);\n\t\t\t\tif(NULL != allocated_node) {\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tn = n->right;\n\t\t\t}\n\t\t\telse {\n\t\t\t\tbreak;\n\t\t\t} \n\t\t}\n\t}\n\tclear_stack(&S);\n\treturn allocated_node;\n}\n\nvoid *my_malloc(size_t size)\n{\n\tunsigned int req_block_size_pow = 3;\n\tsize_t block_size = 8;\n\tnode *allocated_node = NULL;\n\tvoid *addr = NULL;\n\twhile (size > block_size) {\n\t\tblock_size *= 2;\n\t\treq_block_size_pow++;\n\t}\n\n\tallocated_node = find_block(req_block_size_pow);\n\tif (NULL == allocated_node) {\n\t\tprintf(\"Allocation failed!\\n\");\n\t}\n\telse {\n\t\t/* commented so that we dont run out of ram can change dis later for\n\t\t * lower allocations\n\t\t */\n\t\t//addr = malloc(size);\n\t\taddr = malloc(1);\n\t\tallocated_node->physical_addr = addr;\n\t\tprintf(\"Allocated memory!\\n\");\n\t}\n\treturn addr;\n}\n\n/* For searching the tree not using BFS nor DFS */\n\nvoid free_block(void *addr)\n{\n\tnode *n = NULL;\n\tnode *p = NULL;\n\tstack S;\n\tS.top = NULL;\n\n\tstack_push(&S, root);\n\n\twhile(S.top != NULL) {\n\t\tn = S.top->data;\n\t\tif(addr == n-> physical_addr) {\n\t\t\tn->physical_addr = NULL;\n\t\t\tn->state = BLOCK_AVAILABLE;\n\t\t\tp = p->parent;\n\t\t\twhile(p) {\n\t\t\t\tif((BLOCK_AVAILABLE == p->left->state)\n\t\t\t\t\t\t&& (BLOCK_AVAILABLE == p->right->state)) {\n\t\t\t\t\tcoalesce(p);\n\t\t\t\t\tp = p->parent;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tif(n->right != NULL)\n\t\t\t\tstack_push(&S, n->right);\n\t\t\tif(n->left != NULL)\n\t\t\t\tstack_push(&S, n->left);\n\t\t\tstack_pop(&S);\n\t\t}\n\t}\n\tclear_stack(&S);\n}\n\nvoid my_free(void *addr)\n{\n\tfree_block(addr);\n\tfree(addr);\n}\n\n/* Print nodes at a given level */\nvoid printGivenLevel(struct node* root, int level)\n{\n\tif (root == NULL)\n\t\treturn;\n\tif (level == 1)\n\t\tprintf(\"%u %p\\n\", root->block_size_pow, root->physical_addr);\n\telse if (level > 1)\n\t{\n\t\tprintGivenLevel(root->left, level-1);\n\t\tprintGivenLevel(root->right, level-1);\n\t}\n}\n\n/* Compute the \"height\" of a tree -- the number of\n * nodes along the longest path from the root node\n * down to the farthest leaf node.*/\nint height(struct node* node)\n{\n\tif (node==NULL)\n\t\treturn 0;\n\telse\n\t{\n\t\t/* compute the height of each subtree */\n\t\tint lheight = height(node->left);\n\t\tint rheight = height(node->right);\n\n\t\t/* use the larger one */\n\t\tif (lheight > rheight) {\n\t\t\treturn(lheight+1);\n\t\t}\n\t\telse {\n\t\t\treturn(rheight+1);\n\t\t}\n\t}\n}\n\n/* Function to print level order traversal a tree*/\nvoid printLevelOrder(struct node* root)\n{\n\tint h = height(root);\n\tint i = 0;\n\tfor (i = 1; i <= h; i++)\n\t\tprintGivenLevel(root, i);\n}\n\nint main()\n{\n\troot = initialize_node(NULL, MAX_BLOCK_SIZE_POW);\n\tif (root == NULL)\t\n\t\tprintf(\"Unable to initialise root\\n\");\n\tfor(int i = 0; i < 2; i++) {\n\t\tprintf(\"\\nALLOCATION %d\\n\", i+1);\n\t\tmy_malloc(1073741824);\n\t\tprintLevelOrder(root);\t\n\t}\n\tprintf(\"\\n\\nALLOCATION 5\");\n\tmy_malloc(2073741824);\n\tprintLevelOrder(root);\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.6499032974243164,
"alphanum_fraction": 0.6615086793899536,
"avg_line_length": 21.521739959716797,
"blob_id": "dcaf5861308b437f4ca52a3a33cf724e478ead31",
"content_id": "5abc2c6808706eb5eab4e3111be0f6ca42213c76",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 517,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 23,
"path": "/hw1/include/helper_func.h",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "/*\n * Filename : helper_func.h\n * Description : Helper functions used by both checkpoint and restart process\n *\n * Author : Nipesh Roy <[email protected]>\n * Date : Sept 11 2017\n */\n#include <stdio.h>\n\n/**\n * @brief : Helper function to reverse a string\n *\n * @param : buffer - The string to be reversed\n */\nvoid str_reverse(char *buffer);\n\n/**\n * @brief : Integer to string converter\n *\n * @param : pid - The pid to be converted\n * @param : buffer - The string obtained after conversion\n */\nvoid int_to_str(int pid, char *buffer);"
},
{
"alpha_fraction": 0.7226890921592712,
"alphanum_fraction": 0.7226890921592712,
"avg_line_length": 12.222222328186035,
"blob_id": "42a4783cc8e88ed3e1d0aba57191fff68aa4a355",
"content_id": "026d84b59a1f75718117a6e757fd0dbdc6cf4f53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 119,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 9,
"path": "/hw4/Makefile",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "CC=gcc\n\nall:calculate_ram\n\ncalculate_ram :\n\t$(CC) -g calculate_ram.c -o calculate_ram\n\nclean:\n\trm calculate_ram output\n"
},
{
"alpha_fraction": 0.5226870775222778,
"alphanum_fraction": 0.5282852053642273,
"avg_line_length": 18.73255729675293,
"blob_id": "d96cd611730db136f9b58ba9387a6015e32e09b1",
"content_id": "5f72fc44d52072ba67a3a69b9cefc02fa214f0f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3394,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 172,
"path": "/hw2/sme/sme_select.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#include \"sme.h\"\n#include \"list.h\"\n#include \"sme_def.h\"\n#include <errno.h>\n#include <stddef.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\ntypedef struct select_data select_data_t;\n#define INVALID_MAXFD -1\n\nstruct select_data {\n sme_mech_t *m;\n /* max fd */\n int max_fd;\n};\n\nvoid calc_maxfd(select_data_t *sa) {\n\n sme_fd_t *fde;\n sa->max_fd = 0;\n\n for (fde = sa->m->fd_events; fde; fde = fde->next) {\n if (sa->max_fd < fde->fd)\n sa->max_fd = fde->fd;\n }\n}\n\nsme_mech_t *select_mech_init() {\n sme_mech_t *mech;\n select_data_t *d;\n\n mech = calloc(1, sizeof(*mech));\n if (!mech)\n return NULL;\n\n d = calloc(1, sizeof(*d));\n if (!d)\n return NULL;\n mech->priv_data = d;\n\n d->m = mech;\n d->max_fd = INVALID_MAXFD;\n\n return mech;\n}\n\nsme_fd_t *select_mech_add_fd(sme_mech_t *mech, int fd, fd_event_t ev,\n sme_fd_cb cb, void *cb_data) {\n sme_fd_t *fde;\n select_data_t *sd = mech->priv_data;\n\n fde = sme_comm_add_fd(mech, fd, ev, cb, cb_data);\n if (!fde)\n return NULL;\n\n if ((sd->max_fd != INVALID_MAXFD) && (fde->fd > sd->max_fd)) {\n sd->max_fd = fde->fd;\n }\n\n return fde;\n}\n\nsme_proc_t *select_mech_add_proc(sme_mech_t *mech, int pid, int flags,\n sme_proc_cb cb, void *cb_data) {\n sme_proc_t *proce;\n\n proce = sme_comm_add_proc(mech, pid, flags, cb, cb_data);\n if (!proce)\n return NULL;\n\n return proce;\n}\n\nbool select_mech_loop_select(sme_mech_t *mech) {\n int sret;\n int isset;\n sme_fd_t *fde;\n fd_set r_fds, w_fds;\n\n FD_ZERO(&r_fds);\n FD_ZERO(&w_fds);\n\n select_data_t *sd = mech->priv_data;\n if (sd->max_fd == INVALID_MAXFD) {\n calc_maxfd(sd);\n }\n for (fde = sd->m->fd_events; fde; fde = fde->next) {\n if (fde->ev == SME_READ) {\n FD_SET(fde->fd, &r_fds);\n }\n\n if (fde->ev == SME_WRITE) {\n FD_SET(fde->fd, &w_fds);\n }\n }\n\n struct timeval tv;\n /* Wait up to five seconds. */\n tv.tv_sec = 2;\n tv.tv_usec = 0;\n\n do {\n sret = select(sd->max_fd + 1, &r_fds, &w_fds, NULL, &tv);\n } while (0 > sret && EINTR == errno);\n\n if (sret == -1) {\n perror(\"select()\");\n return false;\n }\n\n if (sret > 0) {\n /* FD_ISSET(0, &rfds) will be true. */\n isset = 0;\n for (fde = sd->m->fd_events; fde; fde = fde->next) {\n if ((FD_ISSET(fde->fd, &r_fds)) && (fde->ev == SME_READ)) {\n isset = 1;\n }\n\n if ((FD_ISSET(fde->fd, &w_fds)) && (fde->ev == SME_WRITE)) {\n isset = 1;\n ;\n }\n\n if (isset) {\n /*\n * Remove fde from fd_events.\n * Decrement num of alive fd events\n */\n LIST_REMOVE(sd->m->fd_events, fde);\n sd->max_fd = INVALID_MAXFD;\n /*\n * Trigger the callback\n */\n fde->cb(sd->m, fde, fde->cb_data);\n break;\n }\n }\n return true;\n }\n return true;\n}\n\nint select_mech_loop_once(sme_mech_t *mech) {\n\n if ((mech->proc_events) && (sme_comm_loop_proc(mech)))\n return 0;\n\n if ((mech->fd_events) && (select_mech_loop_select(mech)))\n return 0;\n\n /* No events */\n return -1;\n}\n\nint select_mech_loop_wait(sme_mech_t *mech) {\n int ret;\n /**\n * Run as much as we have events\n */\n while (sme_loop_has_events(mech)) {\n ret = select_mech_loop_once(mech);\n if (ret != 0) {\n printf(\"loop once failed\\n\");\n return ret;\n }\n }\n\n printf(\"No more events. Exiting...\\n\");\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6028037667274475,
"alphanum_fraction": 0.6028037667274475,
"avg_line_length": 16.83333396911621,
"blob_id": "b74f90f8e3f339fcc090c55588107a2f7e55c75f",
"content_id": "78bec811e122eef972bbf653893c587528e2767d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 214,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 12,
"path": "/hw3/free.c",
"repo_name": "nipeshroy9194/cs5600",
"src_encoding": "UTF-8",
"text": "#include \"mymalloc.h\"\n#include \"buddy.h\"\n#include \"utility.h\"\n\n//void free(void *ptr)\nvoid myfree(void *ptr)\n{\n if (NULL == ptr)\n return;\n debug(\"Memory for free %p\", ptr);\n _reclaim_memory(ptr);\n}\n"
}
] | 45 |
Alex-Hall-Data/statistical-test-examples | https://github.com/Alex-Hall-Data/statistical-test-examples | 6b7ad675483d68cfd63140d039a3fe2e8898d60c | 344d6a67d535289854c9c2c0d8225f7e7e17078c | f801cb382964abc5e4083f3e9186dc879f724cda | refs/heads/master | 2021-09-02T09:39:53.826888 | 2018-01-01T14:46:06 | 2018-01-01T14:46:06 | 115,923,120 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6844660043716431,
"alphanum_fraction": 0.7718446850776672,
"avg_line_length": 50.75,
"blob_id": "17e7d54978a4aaa5aaca710b4ccd2834340431ec",
"content_id": "7485d8f3151bdf980a0c7ef39a2b4b872d234564",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 206,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 4,
"path": "/t test power example.R",
"repo_name": "Alex-Hall-Data/statistical-test-examples",
"src_encoding": "UTF-8",
"text": "pp=power.t.test(delta=21-20,power=0.70,sd=6.5,type=\"one.sample\")\npp\n\n#263 = sample size required to have 70% confidence of correctly rejecting null hypothesis that population mean is 20 if actual mean is 21"
},
{
"alpha_fraction": 0.5818858742713928,
"alphanum_fraction": 0.6191067099571228,
"avg_line_length": 22.705883026123047,
"blob_id": "bba62ad086af71498a8184de51605565b4f549e2",
"content_id": "bdfa5772d3c8aa8ddb61499527b4dcf3d4f324d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 806,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 34,
"path": "/pearson rank.py",
"repo_name": "Alex-Hall-Data/statistical-test-examples",
"src_encoding": "UTF-8",
"text": "from recommendations import critics\nfrom math import sqrt\n\ndef sim_pearson(prefs,p1,p2):\n #get list of mutually rated items\n si={}\n for item in prefs[p1]:\n if item in prefs[p2]:si[item]=1\n\n #find number of elements\n n=len(si)\n\n #if there are no ratings in common return 0\n if n==0: return 0\n\n #add up all preferances\n sum1=sum([prefs[p1][it] for it in si])\n sum2=sum([prefs[p2][it] for it in si])\n\n #sum the squares\n sum1sq=sum([pow(prefs[p1][it],2) for it in si])\n sum2sq=sum([pow(prefs[p2][it],2) for it in si])\n\n #sum the products\n psum=sum([prefs[p1][it]*prefs[p2][it] for it in si])\n\n #calculate pearson score\n num=psum-(sum1*sum2/n)\n den=((sum1sq-pow(sum1,2)/n)*(sum2sq-pow(sum2,2)/n))\n if den==0: return 0\n\n r=num/den\n\n return r\n"
},
{
"alpha_fraction": 0.739130437374115,
"alphanum_fraction": 0.7634782791137695,
"avg_line_length": 26.428571701049805,
"blob_id": "aafebc1d7d1af5e12f762b6d6d5bdaf1b719892b",
"content_id": "a4513a8c709cfc484b1c58dadffcaefee92e384c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 575,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 21,
"path": "/power curve.R",
"repo_name": "Alex-Hall-Data/statistical-test-examples",
"src_encoding": "UTF-8",
"text": "#sample to compute power of any true mean\nmypower=function(true.mean){\n pp=power.t.test(n=38,delta=true.mean-20,sd=6.5,type=\"one.sample\")\n pp$power\n}\n \n\n#vector of range of true means to be evaluated\nmeans=seq(from=20,to=25,by=0.5)\n\n#apply mypower function to vector of means\npowers=sapply(means,mypower)\n\n#produce table of means and respective powers\ncbind(means,powers)\n\n#plot power curve. Gives chance of correctly rejecting null hypothesis for range of true means for given sample size\nplot(means,powers)\nlines(spline(means,powers))\nabline(h=1,lty=2)\ngrid(col=\"black\")"
},
{
"alpha_fraction": 0.765625,
"alphanum_fraction": 0.796875,
"avg_line_length": 23.69230842590332,
"blob_id": "8c54f01c6c14714a39b031efa722ef15bf88875f",
"content_id": "14b824d70faa789d400b9898272998ac553010a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 320,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 13,
"path": "/qq plots.R",
"repo_name": "Alex-Hall-Data/statistical-test-examples",
"src_encoding": "UTF-8",
"text": "random.normal=rnorm(100,mean=10,sd=3)\nhist(random.normal)\n\n#qqplot for normally distributed random data. Good fit expected\nqqnorm(random.normal)\nqqline(random.normal)\n\nrandom.exp=rexp(100,rate=1)\nhist(random.exp)\n\n#qq plot for exponentially dstributed random data. poor fit expected\nqqnorm(random.exp)\nqqline(random.exp)"
},
{
"alpha_fraction": 0.6285714507102966,
"alphanum_fraction": 0.7428571581840515,
"avg_line_length": 43,
"blob_id": "1932ffb51ebc27fc5b70e33ca49ecaf3f9cccc78",
"content_id": "c952c586bc78dcad2023e179f9e6b6e0b85e1926",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 175,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 4,
"path": "/binomial test.R",
"repo_name": "Alex-Hall-Data/statistical-test-examples",
"src_encoding": "UTF-8",
"text": "pp=binom.test(389,1023)\npp\n\n#ie, if 389 from a sample of 1023 individuals vot for 'A', we have 95% confidence that between 35-41% of the general population would vote for 'A'."
},
{
"alpha_fraction": 0.7547683715820312,
"alphanum_fraction": 0.7738419771194458,
"avg_line_length": 29.66666603088379,
"blob_id": "df3e803281bb85afad43925d4ba9e70683c48d62",
"content_id": "0e86c3fde1a699668573b23ccfba84f7742161d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 367,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 12,
"path": "/paired t test.R",
"repo_name": "Alex-Hall-Data/statistical-test-examples",
"src_encoding": "UTF-8",
"text": "#paired t test for paired dataset (hind and foreleg length o a set of 10 deer)\n\ndeer=read.table(\"deer.txt\",header=T)\n\ndeer.pairedt=t.test(deer$Foreleg,deer$Hindleg,paired=T)\n\ndeer.pairedt\n\n#small P value so reject null hypothesis. So, leg lengths differ with 95% confidence interval of 1-5.5cm\n\nplot(deer$Foreleg,deer$Hindleg)\nlines(lowess(deer$Foreleg,deer$Hindleg))"
},
{
"alpha_fraction": 0.707446813583374,
"alphanum_fraction": 0.7446808218955994,
"avg_line_length": 28,
"blob_id": "31f65d7d9243e1a7174203e827ffcd7d6b7a5b49",
"content_id": "e161cb140249d166c01199ef94ee131e05e216f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 376,
"license_type": "no_license",
"max_line_length": 210,
"num_lines": 13,
"path": "/power test.R",
"repo_name": "Alex-Hall-Data/statistical-test-examples",
"src_encoding": "UTF-8",
"text": "cars<-read.table(\"cars.txt\",header=T)\n\nattach(cars)\n\n#t test\ntt=t.test(MPG,mu=20)\ntt\n\n#power of t test\npp=power.t.test(n=38,delta=25-20,sd=6.5,type=\"one.sample\")\npp\n\n#power gives the probability of being correctly able to reject the null hypothesis. In this case, this gives us the chance of being able to correctly reject the null of the mean MPG being 20 if it is really 25."
},
{
"alpha_fraction": 0.47560974955558777,
"alphanum_fraction": 0.6585366129875183,
"avg_line_length": 11.615385055541992,
"blob_id": "baf079b5467794b9c925abfebd2d9515af9f8d5a",
"content_id": "79d1a6145b132139e4cc95b05255c50efafbd656",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 164,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 13,
"path": "/pairwise prop test example.R",
"repo_name": "Alex-Hall-Data/statistical-test-examples",
"src_encoding": "UTF-8",
"text": "\n#group 1 successes\nx1 <- 6085\n#group 1 counts\nn1 <- 13353\n\n\n\n#group 2 successes\nx2 <- 6566\n#group 2 counts\nn2 <- 14213\n\npairwise.prop.test(x=c(x1,x2) , n=c(n1,n2))"
},
{
"alpha_fraction": 0.7061728239059448,
"alphanum_fraction": 0.7358024716377258,
"avg_line_length": 15.916666984558105,
"blob_id": "7ea09b1bac047d6ff4d563cbaf97418ca41f4009",
"content_id": "691b4438c0528037219ef7aa23f1c1761275f149",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 405,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 24,
"path": "/cars t test.R",
"repo_name": "Alex-Hall-Data/statistical-test-examples",
"src_encoding": "UTF-8",
"text": "cars<-read.table(\"cars.txt\",header=T)\n\nattach(cars)\n\n#t test\ntt=t.test(MPG)\ntt\n\n##99% interval t test\ntt99=t.test(MPG,conf.level=0.99)\ntt99\n\n#filter by American cars only\nis.american=(Country==\"U.S.\")\n\n#2 sample t test. tests whether american cars have higher MPG than others\ntwot=t.test(MPG~is.american)\ntwot\n\n#get categories for two sample t test\nnames(twot)\n\n#get 95% confidence interval.\ntwot$conf.int"
},
{
"alpha_fraction": 0.7325408458709717,
"alphanum_fraction": 0.7592867612838745,
"avg_line_length": 31.095237731933594,
"blob_id": "17e7e22b926d2595a5294e3a96bc16252092dfb1",
"content_id": "92ebacdee1fb855b9620594d7ce063e21ac8fa91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 673,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 21,
"path": "/cars two sample t test.R",
"repo_name": "Alex-Hall-Data/statistical-test-examples",
"src_encoding": "UTF-8",
"text": "#two sample t test to find whether American cars have lower MPG but only for 4 cylinder engines. pg 58\ncars<-read.table(\"cars.txt\",header=T)\nattach(cars)\n\n#filter by American cars only\nis.american=(Country==\"U.S.\")\n\ntwot4=t.test(MPG[Cylinders==4]~is.american[Cylinders==4])\n#2 sample t test on MPG for US cars with 4 cylinders. Null Hypothesis=no difference in MPG to other nationalities\ntwot4\n\n#P value exceeds alpha (0.05) so null hypothesis is true\n\n\n#t test to determine a measured MPG of 20 is representative from the above:\n\n#null hypothesis is that population mean is equal to 20.\n\ntt=t.test(MPG,mu=20)\ntt\n#p value is tiny so reject null hypothesis - MPG has changed"
},
{
"alpha_fraction": 0.6946386694908142,
"alphanum_fraction": 0.7552447319030762,
"avg_line_length": 46.55555725097656,
"blob_id": "0a8e63dc332859b683afe5a8589f5f66dc70a2fb",
"content_id": "dd8df57ab9c8c3e3da457aa18f95d21ef22a5ebb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 429,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 9,
"path": "/biased coin binomial test.R",
"repo_name": "Alex-Hall-Data/statistical-test-examples",
"src_encoding": "UTF-8",
"text": "#binomial test for coin. 70 flips performed with 45 heads. p=p(heads)\n#null hypothesis is p=0.5\n#alternative hypothesis p!=0.5\n\npp=binom.test(45,70,p=0.5,conf.level=0.9)\npp\n\n#alternative hypothesis true - coin is biased. p value is 0.022 so the evidence isn't overwhelmingly strong\n#90% confidence interval is between 54% and 74% (ie percentage bias). This is a big range so a bigger sample size is needed to ascetain the bias.\n\n"
},
{
"alpha_fraction": 0.688524603843689,
"alphanum_fraction": 0.688524603843689,
"avg_line_length": 25.14285659790039,
"blob_id": "8584d1edf29645f344ff6cee480165b96589d671",
"content_id": "bd90681abf4b040d877acaf42c95cbb935ec8412",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 183,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 7,
"path": "/pairwise wilcox test.R",
"repo_name": "Alex-Hall-Data/statistical-test-examples",
"src_encoding": "UTF-8",
"text": "\npairwise_wilcox_test <- function(df, attribute,grouping_name){\n\n df <- as.data.frame(df)\n df$group <- df[, grouping_name]\n \n print(pairwise.wilcox.test(df$attribute, df$group))\n}"
}
] | 12 |
OscarEscamilla/API_REST_web- | https://github.com/OscarEscamilla/API_REST_web- | 7bbb3c3feeb5b628f712174daebec1dfbadabc39 | 5e2c5b4ac5c8c5348577d2c16ce0afecdbe47414 | 49b6b66d3546d23f9b8a93b648358627b78fc390 | refs/heads/master | 2020-04-30T05:58:31.808052 | 2019-03-22T22:04:21 | 2019-03-22T22:04:21 | 176,639,572 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5791062712669373,
"alphanum_fraction": 0.5797101259231567,
"avg_line_length": 29.66666603088379,
"blob_id": "e762ed766cb3a2d34f323853fa6b82c8ac54c447",
"content_id": "ab9e89bf40de42ffcf77ebab803c4f588d1a55ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1656,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 54,
"path": "/application/models/model_contactos.py",
"repo_name": "OscarEscamilla/API_REST_web-",
"src_encoding": "UTF-8",
"text": "import web\nimport config\n\ndb = config.db\n\n\ndef get_all_contactos():\n try:\n return db.select('contactos')\n except Exception as e:\n print \"Model get all Error {}\".format(e.args)\n print \"Model get all Message {}\".format(e.message)\n return None\n\ndef get_contacto(id_contacto):\n try:\n return db.select('contactos', where='id_contacto=$id_contacto', vars=locals())[0]\n except Exception as e:\n print \"Model get Error {}\".format(e.args)\n print \"Model get aMessage {}\".format(e.message)\n return None\n\ndef delete_contacto(id_contacto):\n try:\n return db.delete('contactos', where='id_contacto=$id_contacto', vars=locals())\n except Exception as e:\n print \"Model delete Error {}\".format(e.args)\n print \"Model delete Message {}\".format(e.message)\n return None\n\ndef insert_contacto(nombre, telefono, email):\n try:\n db.insert('contactos',\n nombre=nombre,\n telefono=telefono,\n email=email,\n )\n except Exception as e:\n print \"Model insert Error {}\".format(e.args)\n print \"Model insert Message {}\".format(e.message)\n return None\n\ndef edit_contacto(id_contacto, nombre, telefono, email):\n try:\n db.update('contactos',\n nombre=nombre,\n telefono=telefono,\n email=email,\n where='id_contacto=$id_contacto',\n vars=locals())\n except Exception as e:\n print \"Model update Error {}\".format(e.args)\n print \"Model update Message {}\".format(e.message)\n return None\n"
},
{
"alpha_fraction": 0.53156977891922,
"alphanum_fraction": 0.5507861375808716,
"avg_line_length": 40.73958206176758,
"blob_id": "2b11c2445cf3531a733fa198babe35abea52e4d8",
"content_id": "3484bae19eba5c117019cd2ad94e51359b21b480",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4007,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 96,
"path": "/application/api/contactos/api_contactos.py",
"repo_name": "OscarEscamilla/API_REST_web-",
"src_encoding": "UTF-8",
"text": "import web\nimport config\nimport json\n\n\nclass Api_contactos:\n def get(self, id_contacto):\n try:\n # http://api_products?user_hash=12345&action=get\n if id_contacto == None:\n result = config.model.get_all_contactos()\n contactos_json = []\n for row in result:\n tmp = str(dict(row))\n contactos_json.append(tmp)\n web.header('Content-Type', 'application/json')\n return json.dumps(contactos_json)\n else:\n # http://api_products?user_hash=12345&action=get&id_product=1\n result = config.model.get_contacto(int(id_contacto))\n contactos_json = []\n contactos_json.append(str(dict(result)))\n web.header('Content-Type', 'application/json')\n return json.dumps(contactos_json)\n except Exception as e:\n print \"GET Error {}\".format(e.args)\n contactos_json = '[]'\n web.header('Content-Type', 'application/json')\n return json.dumps(contactos_json)\n\n# https://0.0.0.0:8080/api_products?user_hash=12345&action=put&id_product=1&product=nuevo&description=nueva&stock=10&purchase_price=1&price_sale=3&product_image=0\n def put(self, nombre, telefono, email):\n try:\n config.model.insert_contacto( nombre, telefono, email)\n contactos_json = '[{200}]'\n web.header('Content-Type', 'application/json')\n return json.dumps(contactos_json)\n except Exception as e:\n print \"PUT Error {}\".format(e.args)\n return None\n# http://api_products?user_hash=12345&action=get&id_product=1\n def delete(self, id_contacto):\n try:\n config.model.delete_contacto(id_contacto)\n contactos_json = '[{200}]'\n web.header('Content-Type', 'application/json')\n return json.dumps(contactos_json)\n except Exception as e:\n print \"DELETE Error {}\".format(e.args)\n return None\n# https://0.0.0.0:8080/api_products?user_hash=12345&action=update&id_product=1&product=nuevo&description=nueva&stock=10&purchase_price=1&price_sale=3&product_image=default.jpg\n def update(self, id_contacto, nombre, telefono, email):\n try:\n config.model.edit_contacto(id_contacto, nombre, telefono, email)\n contactos_json = '[{200}]'\n web.header('Content-Type', 'application/json')\n return json.dumps(contactos_json)\n except Exception as e:\n print \"GET Error {}\".format(e.args)\n contactos_json = '[]'\n web.header('Content-Type', 'application/json')\n return json.dumps(contactos_json)\n\n def GET(self):\n user_data = web.input(\n user_hash=None,\n action=None,\n id_contacto=None,\n nombre=None,\n telefono=None,\n email=None,\n )\n try:\n user_hash = user_data.user_hash # user validation\n action = user_data.action # action GET, PUT, DELETE, UPDATE\n id_contacto = user_data.id_contacto\n nombre = user_data.nombre\n telefono = user_data.telefono\n email = user_data.email\n\n if user_hash == '12345': # user_hash\n if action == None:\n raise web.seeother('/404')\n elif action == 'get':\n return self.get(id_contacto)\n elif action == 'put':\n return self.put(nombre, telefono, email)\n elif action == 'delete':\n return self.delete(id_contacto)\n elif action == 'update':\n return self.update(id_contacto, nombre, telefono, email)\n else:\n raise web.seeother('/404')\n except Exception as e:\n print \"WEBSERVICE Error {}\".format(e.args)\n raise web.seeother('/404')\n"
},
{
"alpha_fraction": 0.6106666922569275,
"alphanum_fraction": 0.6106666922569275,
"avg_line_length": 22.4375,
"blob_id": "bfb3c6404e6551fdd501b4a4b98b56e6f7d34d33",
"content_id": "63a0cb12543118490412b0ad8870c8966998d1a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 375,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 16,
"path": "/application/controllers/contactos/delete.py",
"repo_name": "OscarEscamilla/API_REST_web-",
"src_encoding": "UTF-8",
"text": "import config\n\n\nclass Delete:\n \n def __init__(self):\n pass\n\n def GET(self, id_contacto):\n result = config.model.get_contacto(int(id_contacto))\n return config.render.delete(result)\n\n def POST(self, id_contacto):\n form = config.web.input()\n config.model.delete_contacto(form['id_contacto'])\n raise config.web.seeother('/')\n"
},
{
"alpha_fraction": 0.790960431098938,
"alphanum_fraction": 0.790960431098938,
"avg_line_length": 33.599998474121094,
"blob_id": "6e269414fdec0be3eb711723bb455561692a1f10",
"content_id": "579aa9a4b32bf59ed7799f3bb61d1040fc4632c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 177,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 5,
"path": "/application/controllers/contactos/config.py",
"repo_name": "OscarEscamilla/API_REST_web-",
"src_encoding": "UTF-8",
"text": "import web\nimport application.models.model_contactos\n\nrender = web.template.render('application/views/contactos/', base='master')\nmodel = application.models.model_contactos\n "
},
{
"alpha_fraction": 0.6066945791244507,
"alphanum_fraction": 0.6066945791244507,
"avg_line_length": 20.727272033691406,
"blob_id": "7d5661f1bdc30130cc5a08ab61f1be2b6d099a46",
"content_id": "9ef75597a3b6d6c3442db779f1ab44f9fdee56b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 239,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 11,
"path": "/application/controllers/contactos/view.py",
"repo_name": "OscarEscamilla/API_REST_web-",
"src_encoding": "UTF-8",
"text": "import config\n\nclass View:\n \n def __init__(self):\n pass\n\n def GET(self, id_contacto):\n id_contacto = int(id_contacto)\n result = config.model.get_contacto(id_contacto)\n return config.render.view(result)\n"
}
] | 5 |
gabeo13/python-challenge | https://github.com/gabeo13/python-challenge | 4dd6d73644cdcfd632815138f035fe00bd422e03 | bb91303e5180ab60b002831d8efc33c35ec6b9f1 | 1f3e2f6e6e1265adb9e648a8c65a86f8549a305a | refs/heads/master | 2023-02-14T19:01:13.068507 | 2020-12-28T21:27:24 | 2020-12-28T21:27:24 | 322,052,144 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6246408224105835,
"alphanum_fraction": 0.6285919547080994,
"avg_line_length": 32.154762268066406,
"blob_id": "6f37e07f39ca4847d8ce44b6f09d061828747b9d",
"content_id": "958c630fab7bc5d95a87e1fcab81f01df034dd22",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2784,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 84,
"path": "/PyPoll/main.py",
"repo_name": "gabeo13/python-challenge",
"src_encoding": "UTF-8",
"text": "'''----------PyPoll----------\n* This Python script is designed to analyze an election dataset with VoterID, County, and Candidate columns\n* Input file is in CSV (comma seperated value format)\n* Program tasks include:\n * Calcuate total number of votes cast\n * Calculate a complete list of candidates that received votes\n * Calculate percentage of the total vote each candiate won\n * Calculate the total number of votes each candidate won\n * Find the winner of the election based on popular vote\n* Print output to terminal and text file in \"Analysis\" folder\n'''\n\n#Import Dependancies\nimport os\nimport csv\nimport sys\n\n#Create Variable for CSV File Path\ncsvpath = os.path.join('resources', 'election_data.csv')\n\n#Read CSV File into program\nwith open(csvpath) as csvfile:\n\n #Assign csvfile to iterable variable\n csvreader = csv.reader(csvfile, delimiter = ',')\n\n #Read the header row first to move cursor to first data row\n csv_header = next(csvreader)\n\n #Build container for election dictionary \n election = {}\n\n #Define candidate counting function\n def CountCandidates(candidates):\n \n for candidate in candidates:\n if candidate in election:\n election[candidate] += 1\n else:\n election[candidate] = 1\n return election\n\n #Build list container for candidates\n candidates = []\n\n #Loop through csv and extract candidates into list\n for row in csvreader:\n candidates.append(row[2])\n\n #Calculate Total # of Votes in election\n totVote = len(candidates)\n print('\\n---------------\\nElection Results\\n---------------\\n')\n print(f'Total Votes: {totVote}')\n\n #Call function to tally candidate totals (frequency function)\n CountCandidates(candidates)\n\n #Loop though election dictionary to calculate vote spread\n for key, value in election.items(): \n pctTot =round((value / totVote) *100, 2)\n print(f'{key}: {pctTot}% ({value})')\n\n #Calculate & Print Winner of Election \n winner = max(election, key=election.get) \n print(f'---------------\\nWinner: {winner}\\n---------------')\n\n #Write results to txt file using sys dependancy \n txtpath = os.path.join('analysis', 'analysis.txt')\n\n with open(txtpath, 'w') as txtfile:\n\n print('\\n----------------', file=txtfile)\n print('Election Results', file=txtfile)\n print('----------------\\n', file=txtfile)\n print(f'Total Votes: {totVote}', file=txtfile)\n for key, value in election.items():\n pctTot =round((value / totVote) *100, 2) \n print(f'{key}: {pctTot}% ({value})', file=txtfile)\n\n print(f'---------------\\nWinner: {winner}\\n---------------', file=txtfile)\n\n txtfile.close()\n\ncsvfile.close()"
},
{
"alpha_fraction": 0.7226935029029846,
"alphanum_fraction": 0.7253446578979492,
"avg_line_length": 35.28845977783203,
"blob_id": "73c8ebbefc24debb6f1cb29cb36080698e2c7b29",
"content_id": "4448aaeacea9805b1231fdfff00099e015d9b74d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1886,
"license_type": "no_license",
"max_line_length": 322,
"num_lines": 52,
"path": "/README.md",
"repo_name": "gabeo13/python-challenge",
"src_encoding": "UTF-8",
"text": "# Python Challenge - Py Me Up, Charlie\nThis two part project consists of analyzing two disparate datasets with basic python scripts. Tools used for this project consist of VS Code for text editing, GitBash for running code, and GitHub for version control. Dataset snippets for each project may be found in their respective \"resources\" directory for the project.\n---\n---\n## Part I - PyBank\n![](Images/pyBankheader.jpg)\n---\n---\nUsing a csv file with columns for Date & Profit/Loss as laid out below, analyze and record the following:\n---\n_CSV Table Format_\n| Date | Profit/Losses |\n|----------|----------|\n|Mon-YYYY|$ XXXXXX|\n---\n_Requested Analytical Output_\n* Calculate total number of months included in dataset\n* Calculate total \"Profit/Loss\" over period\n* Calculate m/m change and output average m/m change for \"Profit/Loss\"\n* Identify Date & Amount of greatest increase in \"Profit/Loss\"\n* Identify Date & Amount of greatest decrease in \"Profit/Loss\"\n* Print all output to terminal and text file in seperate \"analysis\" folder\n---\n_Results_\n\n![](Images/pyBankanalysis.jpg)\n---\n---\n## Part II - PyPoll\n![](Images/pyPollheader.jpg)\n---\n---\nUsing an election dataset containing \"Voter ID\", \"Candidate\", and \"County\", analyze and record the following:\n---\n_CSV Table Format_\n| Voter ID | County |Candidate|\n|----------|----------|----------|\n| 12345 | Clark | Drumpf | \n---\n_Requested Analytical Output_\n* Calculate total number of votes\n* Generate a list of candidates that received votes\n* Calculate percentage of the total vote that each candidate won\n* Calculte the total number of votes that each candidate won\n* Identify winner of the election based on popular vote\n* Print all output to terminal and text file in seperate \"analysis\" folder\n---\n_Results_\n\n![](Images/pyPollanalysis.jpg)\n\n>This readme is powered by [Bing Cherry Energy](https://bingbeverage.com/product/bing-cherry/)"
},
{
"alpha_fraction": 0.6769230961799622,
"alphanum_fraction": 0.6795417070388794,
"avg_line_length": 35.807228088378906,
"blob_id": "de200198f6604cf92c9ff39608618ed3077ae7eb",
"content_id": "372f5e3b36417dff0b8519509f9625e37c00e22d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3055,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 83,
"path": "/PyBank/main.py",
"repo_name": "gabeo13/python-challenge",
"src_encoding": "UTF-8",
"text": "'''----------PyBank----------\n* This Python script is designed to analyze a finacial dataset comprised of Date and Profit/Loss columns\n* Input file is in CSV (comma seperated value format)\n* Program tasks include:\n * Calcuate total number of months included in dataset\n * Calculate total net amount of \"Profit/Loss\" over entire period\n * Calculate M/M change in \"Profit/Loss\" and compute average M/M change\n * Find greatest increase in proft (Date & Amount)\n * Find greatest decrease in profit (Date & Amount)\n* Print output to terminal and text file in \"Analysis\" folder\n'''\n\n#Import OS Module to enable cross platform functionality across operating systems\nimport os \n\n#Import Module to read csv files\nimport csv\n\n#Import Module to write output to txt files\nimport sys\n\n#Assign csv file path to a variable\ncsvpath = os.path.join('resources', 'budget_data.csv')\n\n#Read csv file in to program\nwith open(csvpath) as csvfile:\n\n #Assign contents of csv file to variable and specify delimiter\n csvreader = csv.reader(csvfile, delimiter=',')\n\n #Read csv data into a list\n rows = list(csvreader)\n\n ## Create containers for each columnar derived list\n months = []\n profitLoss = []\n\n ##Calculate total number of months and total profit/loss\n for row in rows[1:]:\n months.append(row[0])\n profitLoss.append(int(row[1]))\n totalProfitloss = sum(profitLoss)\n totalMonths = len(months) \n \n ##Calculate the changes in Profit/Losses over the entire period, then find the average\n change = []\n i = 0\n\n for i in range(len(profitLoss)-1):\n change.append(profitLoss[i+1]-profitLoss[i]) \n sumChange = sum(change)\n numChange = len(change)\n avgChange = sumChange/numChange\n\n ##Find greatest increase in profit (date and amount) over the entire period\n max_prof = max(profitLoss)\n max_prof_date = months[profitLoss.index(max_prof)]\n\n ##Find greatest decrease in profit (data and amount) over the entire period\n min_prof = min(profitLoss)\n min_prof_date = months[profitLoss.index(min_prof)]\n \n ##Print results to terminal\n print(f'\\n\\nFinancial Analysis\\n-------------------')\n print(f'Total Months: {totalMonths}\\nTotal: ${totalProfitloss}')\n print(f'Average Change: ${round(avgChange,2)}')\n print(f'Greatest Increase in Profits: {max_prof_date} (${max_prof})')\n print(f'Greatest Decrease in Profits: {min_prof_date} (${min_prof})')\n\n ##Output results to txt file using 'sys' dependancy\n txtpath = os.path.join('analysis', 'analysis.txt')\n\n with open(txtpath, 'w') as txtfile:\n\n print(f'\\n\\nFinancial Analysis\\n-------------------', file=txtfile)\n print(f'Total Months: {totalMonths}\\nTotal: ${totalProfitloss}', file=txtfile)\n print(f'Average Change: ${round(avgChange,2)}', file=txtfile)\n print(f'Greatest Increase in Profits: {max_prof_date} (${max_prof})', file=txtfile)\n print(f'Greatest Decrease in Profits: {min_prof_date} (${min_prof})', file=txtfile)\n\n txtfile.close()\n\ncsvfile.close()\n"
}
] | 3 |
dishpzga/ESunnyAPI | https://github.com/dishpzga/ESunnyAPI | 6324e3bb38a423c3fb48dab628a4abf6aadbacac | 8d888248f7ba9fd20a4a7f60549131c765a7be5d | ec270df36d11669721c89023984cf3fe9fda2c5e | refs/heads/master | 2021-04-23T07:01:57.968925 | 2020-02-12T02:25:58 | 2020-02-12T02:25:58 | 249,907,962 | 1 | 0 | null | 2020-03-25T06:54:55 | 2020-03-18T01:24:30 | 2020-02-12T02:25:59 | null | [
{
"alpha_fraction": 0.5733333230018616,
"alphanum_fraction": 0.5733333230018616,
"avg_line_length": 17.75,
"blob_id": "a44db29905a52bbb037122246e770ebf93329de9",
"content_id": "fa81a7836650af71fac5bf617751c2a12b20b20d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 75,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 4,
"path": "/Quote.py",
"repo_name": "dishpzga/ESunnyAPI",
"src_encoding": "UTF-8",
"text": "from Code.Run import get_quote\n\nif __name__ == '__main__':\n get_quote()\n"
},
{
"alpha_fraction": 0.4986208975315094,
"alphanum_fraction": 0.5163959264755249,
"avg_line_length": 22.81751823425293,
"blob_id": "194c05cf190a663ce5b8cbc97299969d7e91a803",
"content_id": "4f333b26129690a1535027a5fb282a3892b8e3dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3527,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 137,
"path": "/Code/Run.py",
"repo_name": "dishpzga/ESunnyAPI",
"src_encoding": "UTF-8",
"text": "import socket\nfrom time import sleep\n\nfrom path import Path\n\nimport redis\nfrom Include.Sarge import ES\nfrom Include.Log import log\nfrom Include.OlConfig import Config\n\nCONFIG = Config().config['root']\n\n\nclass Publish:\n def __init__(self):\n self.way = CONFIG['publish']\n self.tool = None\n\n def __config_tool(self):\n self.way = CONFIG['publish']\n if self.way == 1: # redis\n tool = self.__get_redis()\n\n def send(msg):\n chanel = ''.join(msg[:3])\n tool.publish(chanel, ','.join(msg))\n\n elif self.way == 2: # socket\n tool = self.__get_socket()\n\n def send(msg):\n tool.send(bytes(','.join(msg), encoding='gbk'))\n\n elif self.way == 3: # file\n def send(msg):\n name = f\"Bin//{''.join(msg[:3])}.txt\"\n Path(name).write_text(','.join(msg))\n\n else:\n send = print\n\n self.tool = send\n\n @staticmethod\n def __get_redis():\n redis_conf = CONFIG['redis'].dict_props\n redis_conf['socket_timeout'] = 3\n pool = redis.ConnectionPool(**redis_conf)\n r = redis.Redis(connection_pool=pool)\n log('start Redis->' + ','.join(redis_conf))\n return r\n\n @staticmethod\n def __get_socket():\n socket_conf = CONFIG['socket'].dict_props\n srv = socket.socket() # 创建一个socket\n srv.bind((socket_conf['ip'], socket_conf['port']))\n srv.listen(5)\n\n log(f\"socket等待链接\")\n\n connect_socket, _ = srv.accept()\n\n log(f\"socket链接成功\")\n return connect_socket\n\n def get_tool(self):\n self.__config_tool()\n return self.tool\n\n\n# 判断是否登录成功\ndef asset_success(es, exe_id='1'):\n for _ in range(500):\n es.reading_out()\n if es.success:\n print('for_num', _)\n log(f'{exe_id}登录成功')\n return True\n elif es.error_:\n log(f'{exe_id}登录失败->{es.error_}')\n return False\n else:\n log(f'{exe_id}登录未知')\n es.kill(f'{exe_id}Kill')\n return False\n\n\n# 循环登录获取可以使用的ES2\ndef log_es2(r, es2_list):\n # 指定登录账号\n es2 = ES('Bin/Data/APP2/9762.exe', r, app_id='2')\n es2.config_re_login(30) # 重连时间,s\n es2.config_account(ip=CONFIG['ip'],\n port=CONFIG['port'],\n username=CONFIG['username'],\n password=CONFIG['password'],\n auth_code=CONFIG['auth_code'])\n\n if asset_success(es2, '2'): # 登录成功\n log('2开始订阅')\n for i in es2_list:\n es2.config_subscribes(i) # 订阅\n log('2订阅完毕,等待交易所')\n return es2\n\n return None\n\n\n# 190810优化逻辑\ndef get_quote():\n # 分发\n r = Publish().get_tool()\n\n # 所有需要订阅的品种,190702增加去重功能\n con = CONFIG['contracts']\n\n # 断线重连,断线重连的时候也要判断是否在有效期内\n _error = 'No log in'\n while 1:\n es2 = log_es2(r, con)\n if es2 is not None: # 如果es2成功\n _error = loop(es2)\n else: # 如果es2不成功\n _error = 'No log in' # 全军覆没\n log(f'restart->{_error}')\n sleep(3)\n\n\ndef loop(es2):\n while es2.should_loop:\n es2.reading_out()\n if es2.error_:\n es2.kill()\n return es2.error_\n else:\n return 'NoCode'\n"
},
{
"alpha_fraction": 0.4242105185985565,
"alphanum_fraction": 0.47999998927116394,
"avg_line_length": 22.19512176513672,
"blob_id": "3c581eed6efdd3cad314e3de51ecc984f43deb20",
"content_id": "964108aebbec318e58b12029208608285b8fbac2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1310,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 41,
"path": "/README.md",
"repo_name": "dishpzga/ESunnyAPI",
"src_encoding": "UTF-8",
"text": "ESunnyAPI是一款基于C++/Python的易盛信息官方封装开源库,用于易盛信息的行情获取以及交易。\n如果你在开发易盛信息程序的时候遇到了奇奇怪怪的问题,请使用Quote.exe,其中打包了详尽的错误处理,可以快速定位问题。\n\n\n祝贺 ESunnyAPI 入选 GITEE 最有价值开源项目 GVP\n-----------------------------------------------\n\n\n30s内上手EsunnyAPI?\n--------------------\n- **配置账号信息**\n - ip = '61.163.243.173'\n - port = '7171'\n - username = 'ES' # 用户名\n - password = '123456' # 密码\n - auth_code = '' # 授权码,默认为空\n--------------------\n- **配置订阅品种**\n - contracts = ['COMEX F GC 1912', 'COMEX F GC 1910']\n--------------------\n- **配置发布方式**\n - publish = 0 # 0是打印;1是redis;2是socket;3是file\n--------------------\n- **配置发布信息**\n- **如果publish==1,需要配置redis**\n - redis = {'host': 'localhost',\n 'port': 6379,\n 'db': 0,\n 'password': None\n }\n- **如果publish==2,需要配置socket**\n - socket = {'ip': '127.0.0.1',\n 'port': 8080}\n\n--------------------\n # 运行\n - 慵懒版:直接运行Quote.exe\n - 勤劳版:运行Quote.py\n \n有任何问题请联系QQ:976308589;\n--------------------"
},
{
"alpha_fraction": 0.4668094217777252,
"alphanum_fraction": 0.5588865280151367,
"avg_line_length": 18.45833396911621,
"blob_id": "a2a091c6cd4427ce163b140dbddb041275b69aba",
"content_id": "80873427de914d6e7703a4f7b3ab9a681b0d21ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 577,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 24,
"path": "/Bin/Config.py",
"repo_name": "dishpzga/ESunnyAPI",
"src_encoding": "UTF-8",
"text": "# 配置账号信息\nip = '61.163.243.173'\nport = '7171'\nusername = 'ES' # 用户名\npassword = '123456' # 密码\nauth_code = '' # 授权码,默认为空\n\n# 配置订阅品种\ncontracts = ['COMEX F GC 1912', 'COMEX F GC 1910']\n\n# 配置发布方式\n\npublish = 0 # 0是打印;1是redis;2是socket;3是file\n\n# 配置发布信息\n# 如果publish==1,需要配置redis\nredis = {'host': 'localhost',\n 'port': 6379,\n 'db': 0,\n 'password': None\n }\n# 如果publish==2,需要配置socket\nsocket = {'ip': '127.0.0.1',\n 'port': 8080}\n"
}
] | 4 |
YuuichiHosomi/nicocomment | https://github.com/YuuichiHosomi/nicocomment | afbbeba5cb79c1b284430558773811fb3caebe06 | 365bf8ede171c8ee2ad4e91181258413dfb84f8d | dc73aefa6beea48e47441c7e0738e6df5f3dcdb6 | refs/heads/master | 2021-05-30T12:11:26.121477 | 2013-11-29T03:36:39 | 2013-12-02T06:29:44 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6032906770706177,
"alphanum_fraction": 0.6106032729148865,
"avg_line_length": 17.233333587646484,
"blob_id": "22f1c3681febca1f11e3a65a5a513e3ff291be79",
"content_id": "b72d48ba4021b0c8bc6c61861cce5c599e7adb80",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1094,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 60,
"path": "/nicocomment.sh",
"repo_name": "YuuichiHosomi/nicocomment",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\nset -e\n\nbasedir=$(cd $(dirname $0);pwd)\npyenv=${basedir}/venv/bin/activate\nprogram=${basedir}/nicocomment.py\nlogfile=${basedir}/log/nicocomment.log\nkill_python=\"python ${program}\"\nmonitor_threshold=$((30))\ncustomenv=${basedir}/nicocomment.env\n\nstart() {\n nohup ${program} >> ${logfile} 2>&1 &\n}\n\nstop() {\n pkill -f \"${kill_python}\" || true\n echo \"killed.\" >> ${logfile}\n}\n\nmonitor() {\n echo $(date) monitor start\n\n last_modified=$(date -r ${logfile} +%s)\n # last_modified=0\n current=$(date +%s)\n # echo $last_modified\n # echo $current\n\n if [ $((${last_modified} + ${monitor_threshold})) -lt ${current} ]\n then\n echo $(date) \"it seems that the file ${logfile} is not updated in ${monitor_threshold} seconds, so try to restart.\"\n stop\n start\n fi\n\n echo $(date) monitor end\n}\n\nsource ${pyenv}\n\nif [ -e ${customenv} ]; then\n source ${customenv}\nfi\n\ncase \"$1\" in\n start)\n stop\n start ;;\n stop)\n stop ;;\n restart)\n stop\n start ;;\n monitor)\n monitor ;;\n *)\n echo $\"usage: $prog {start|stop|restart|monitor}\" && exit 1\nesac\n"
},
{
"alpha_fraction": 0.5019259452819824,
"alphanum_fraction": 0.5165926218032837,
"avg_line_length": 35.684783935546875,
"blob_id": "ab480f01be872e307f60641dd95992a6d7512548",
"content_id": "5ae5321954cbfa57a4d9caa72dbc434118200340",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6790,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 184,
"path": "/nicoalert.py",
"repo_name": "YuuichiHosomi/nicocomment",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport ConfigParser\nimport logging\nimport logging.config\nimport datetime\nimport urllib\nimport urllib2\nimport socket\nimport threading\nfrom threading import Thread\nfrom threading import Timer\nfrom lxml import etree\n\nimport nicoerror\nimport nicolive\n\nNICOCOMMENT_CONFIG = os.path.dirname(os.path.abspath(__file__)) + '/nicocomment.config'\n\nANTENNA_URL = 'https://secure.nicovideo.jp/secure/login?site=nicolive_antenna'\nGET_ALERT_STATUS_URL = 'http://live.nicovideo.jp/api/getalertstatus'\n\n\nclass NicoAlert(object):\n# object lifecycle\n def __init__(self):\n self.logger = logging.getLogger()\n self.received_live_count = 0\n\n (self.mail, self.password) = self.get_config()\n self.logger.debug(\"mail: %s password: %s\" % (self.mail, self.password))\n self.logger.debug(\"nicoalert initialized.\")\n\n def __del__(self):\n pass\n\n# utility\n def get_config(self):\n config = ConfigParser.ConfigParser()\n config.read(NICOCOMMENT_CONFIG)\n mail = config.get(\"nicoalert\", \"mail\")\n password = config.get(\"nicoalert\", \"password\")\n\n return mail, password\n\n# nico\n def get_ticket(self):\n query = {'mail': self.mail, 'password': self.password}\n res = urllib2.urlopen(ANTENNA_URL, urllib.urlencode(query))\n\n # res_data = xml.fromstring(res.read())\n res_data = etree.fromstring(res.read())\n # self.logger.debug(etree.tostring(res_data))\n # sample response\n #{'nicovideo_user_response': {'status': {'value': 'ok'},\n # 'ticket': {'value': 'xxx'},\n # 'value': '\\n\\t'}}\n\n ticket = res_data.xpath(\"//ticket\")[0].text\n self.logger.debug(\"ticket: %s\" % ticket)\n\n return ticket\n\n def get_alert_status(self, ticket):\n query = {'ticket': ticket}\n res = urllib2.urlopen(GET_ALERT_STATUS_URL, urllib.urlencode(query))\n\n res_data = etree.fromstring(res.read())\n # self.logger.debug(etree.tostring(res_data))\n status = res_data.xpath(\"//getalertstatus\")[0].attrib[\"status\"]\n # sample response\n # {'getalertstatus':\n # {'communities': {'community_id': {'value': 'co9320'}},\n # 'ms': {'addr': {'value': 'twr02.live.nicovideo.jp'},\n # 'port': {'value': '2532'},\n # 'thread': {'value': '1000000015'}},\n # 'status': {'value': 'ok'},\n # 'time': {'value': '1324980560'},\n # 'user_age': {'value': '19'},\n # 'user_hash': {'value': 'xxxxxxxxxxxxxxxxxxxxxxxxxxx'},\n # 'user_id': {'value': 'xxxxxxxx'},\n # 'user_name': {'value': 'miettal'},\n # 'user_prefecture': {'value': '12'},\n # 'user_sex': {'value': '1'}}}\n # if res_data.getalertstatus.status != 'ok' :\n if status != 'ok':\n raise nicoerror.NicoAuthorizationError\n\n communities = []\n for community_id in res_data.xpath(\"//community_id\"):\n communities.append(community_id.text)\n # self.logger.debug(communities)\n\n host = None\n port = None\n thread = None\n\n host = res_data.xpath(\"//getalertstatus/ms/addr\")[0].text\n port = int(res_data.xpath(\"//getalertstatus/ms/port\")[0].text)\n thread = res_data.xpath(\"//getalertstatus/ms/thread\")[0].text\n self.logger.debug(\"host: %s port: %s thread: %s\" % (host, port, thread))\n\n return communities, host, port, thread\n\n# main\n def listen_alert(self, host, port, thread, handler):\n # main loop\n # self.schedule_stream_stat_timer()\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(60)\n sock.connect((host, port))\n sock.sendall(('<thread thread=\"%s\" version=\"20061206\" res_form=\"-1\"/>'\n + chr(0)) % thread)\n\n # schedule log timer\n self.log_statistics()\n\n msg = \"\"\n while True:\n rcvmsg = sock.recv(1024)\n for ch in rcvmsg:\n if ch == chr(0):\n res_data = etree.fromstring(msg)\n\n try:\n # 'thread'\n thread = res_data.xpath(\"//thread\")\n if thread:\n self.logger.debug(\"started receiving live information.\")\n\n # 'chat'\n chats = res_data.xpath(\"//chat\")\n if chats:\n for chat in chats:\n # self.logger.debug(etree.tostring(chat[0]))\n live_info = chat.text\n # self.logger.debug(live_info)\n\n # value = \"102351738,官邸前抗議の首都圏反原発連合と 脱原発を…\"\n # value = \"102373563,co1299695,7169359\"\n lives = live_info.split(',')\n\n if len(lives) == 3:\n # the stream is NOT the official one\n live_id, community_id, user_id = lives\n self.logger.debug(\"received alert, live_id: %s \"\n \"community_id: %s user_id: %s\" %\n (live_id, community_id, user_id))\n\n handler(live_id, community_id, user_id)\n self.received_live_count += 1\n except KeyError:\n self.logger.debug(\"received unknown information.\")\n msg = \"\"\n else:\n msg += ch\n self.logger.debug(\"!!! encountered unexpected alert recv() end... !!!\")\n\n def handle_live(self, live_id, community_id, user_id):\n # self.logger.debug(\"*** live started: %s\" % live_id)\n live = nicolive.NicoLive(self.mail, self.password, community_id, live_id)\n p = Thread(target=live.start, args=())\n p.start()\n\n def start(self):\n ticket = self.get_ticket()\n communities, host, port, thread = self.get_alert_status(ticket)\n self.listen_alert(host, port, thread, self.handle_live)\n\n def log_statistics(self):\n self.logger.debug(\n \"*** received lives: %s active live threads: %s sum total comments: %s\" %\n (self.received_live_count,\n threading.active_count(), nicolive.NicoLive.sum_total_comment_count))\n\n t = Timer(10, self.log_statistics)\n t.start()\n\n\nif __name__ == \"__main__\":\n nicoalert = NicoAlert()\n nicoalert.go()\n"
},
{
"alpha_fraction": 0.5267489552497864,
"alphanum_fraction": 0.5408220291137695,
"avg_line_length": 41.32903289794922,
"blob_id": "40d39fc8b8c1d6072359c0fb5c7b06373de0c0f5",
"content_id": "75ca623be819dc0be57e8d6bf9714329dc4d04a6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19757,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 465,
"path": "/nicolive.py",
"repo_name": "YuuichiHosomi/nicocomment",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport ConfigParser\nimport logging\nimport logging.config\nimport urllib2\nimport socket\nfrom threading import Thread\nfrom threading import Timer\nfrom lxml import etree\nimport time\nimport re\nimport cookielib\nimport tweepy\n\nfrom nicoerror import UnexpectedStatusError\n\nCOOKIE_CONTAINER_INITILIZATION_SLEEP_TIME = 3\nSOCKET_TIMEOUT = 60 * 30\n\nCOOKIE_CONTAINER_NOT_INITIALIZED = 0\nCOOKIE_CONTAINER_INITIALIZING = 1\nCOOKIE_CONTAINER_INITIALIZED = 2\n\nNICOCOMMENT_CONFIG = os.path.dirname(os.path.abspath(__file__)) + '/nicocomment.config'\n\nLOGIN_URL = \"https://secure.nicovideo.jp/secure/login?site=niconico\"\nGET_STREAM_INFO_URL = \"http://live.nicovideo.jp/api/getstreaminfo/lv\"\nGET_PLAYER_STATUS_URL = \"http://watch.live.nicovideo.jp/api/getplayerstatus?v=lv\"\n# DEBUG_LOG_COMMENT = True\nDEBUG_LOG_COMMENT = False\n\nLIVE_URL = \"http://live.nicovideo.jp/watch/lv\"\n\nCOMMENT_SERVER_HOST_NUMBER_FIRST = 101\nCOMMENT_SERVER_HOST_NUMBER_LAST = 104\nCOMMENT_SERVER_PORT_FIRST = 2805\nCOMMENT_SERVER_PORT_LAST = 2814\n\n\nclass NicoLive(object):\n# class variables\n logger = None\n cookie_container_status = COOKIE_CONTAINER_NOT_INITIALIZED\n cookie_container = None\n sum_total_comment_count = 0\n\n# object life cycle\n def __init__(self, mail, password, community_id, live_id):\n self.logger = logging.getLogger()\n self.mail = mail\n self.password = password\n self.community_id = community_id\n self.live_id = live_id\n self.comment_count = 0\n self.last_comment = \"\"\n\n (self.force_debug_tweet, self.monitoring_user_ids) = self.get_config()\n # self.logger.debug(\"monitoring_user_ids: %s\" % self.monitoring_user_ids)\n\n self.header_text = {}\n self.consumer_key = {}\n self.consumer_secret = {}\n self.access_key = {}\n self.access_secret = {}\n for user_id in self.monitoring_user_ids:\n (self.header_text[user_id],\n self.consumer_key[user_id], self.consumer_secret[user_id],\n self.access_key[user_id], self.access_secret[user_id]) = (\n self.get_twitter_credentials(user_id))\n \"\"\"\n self.logger.debug(\"user_id: \" + user_id)\n self.logger.debug(\"header_text: \" + self.header_text[user_id])\n self.logger.debug(\n \"consumer_key: %s consumer_secret: ***\" % self.consumer_key[user_id])\n self.logger.debug(\n \"access_key: %s access_secret: ***\" % self.access_key[user_id])\n \"\"\"\n\n # self.logger.debug(\"*** __init__ nicolive, live: %s\" % self.live_id)\n\n def __del__(self):\n # self.logger.debug(\"*** __del__ nicolive, live: %s\" % self.live_id)\n pass\n\n# config\n def get_config(self):\n config = ConfigParser.ConfigParser()\n config.read(NICOCOMMENT_CONFIG)\n\n if config.get(\"nicolive\", \"force_debug_tweet\").lower() == \"true\":\n force_debug_tweet = True\n else:\n force_debug_tweet = False\n\n try:\n monitoring_user_ids = config.get(\"nicolive\", \"monitoring_user_ids\").split(',')\n except ConfigParser.NoOptionError, unused_error:\n monitoring_user_ids = None\n\n return force_debug_tweet, monitoring_user_ids\n\n def get_twitter_credentials(self, user_id):\n config = ConfigParser.ConfigParser()\n config.read(NICOCOMMENT_CONFIG)\n section = user_id\n\n header_text = config.get(section, \"header_text\")\n consumer_key = config.get(section, \"consumer_key\")\n consumer_secret = config.get(section, \"consumer_secret\")\n access_key = config.get(section, \"access_key\")\n access_secret = config.get(section, \"access_secret\")\n\n return header_text, consumer_key, consumer_secret, access_key, access_secret\n\n# twitter\n def update_twitter_status(self, user_id, comment):\n try:\n self.last_status_update_user_id\n self.last_status_update_comment\n except AttributeError:\n self.last_status_update_user_id = None\n self.last_status_update_comment = None\n\n auth = tweepy.OAuthHandler(self.consumer_key[user_id], self.consumer_secret[user_id])\n auth.set_access_token(self.access_key[user_id], self.access_secret[user_id])\n status = \"[%s]\\n%s\\n%s%s\".encode('UTF-8') % (\n self.header_text[user_id], comment.encode('UTF-8'), LIVE_URL, self.live_id)\n\n if (user_id == self.last_status_update_user_id and\n comment == self.last_status_update_comment):\n # duplicated tweet. skip\n pass\n else:\n try:\n tweepy.API(auth).update_status(status)\n except tweepy.error.TweepError, error:\n self.logger.debug(\"error in post, user_id: %s comment: %s error_response: %s\" %\n (user_id, comment, error))\n\n self.last_status_update_user_id = user_id\n self.last_status_update_comment = comment\n\n# main\n @classmethod\n def get_cookie_container(cls, mail, password):\n if cls.cookie_container is None:\n cls.cookie_container_status = COOKIE_CONTAINER_INITIALIZING\n\n cookiejar = cookielib.CookieJar()\n opener = urllib2.build_opener(\n urllib2.HTTPCookieProcessor(cookiejar))\n # self.logger.debug(\"finished setting up cookie library.\")\n\n opener.open(LOGIN_URL, \"mail=%s&password=%s\" % (mail, password))\n # self.logger.debug(\"finished login.\")\n\n cls.cookie_container = opener\n cls.cookie_container_status = COOKIE_CONTAINER_INITIALIZED\n print \"cookie container opened\"\n\n return cls.cookie_container\n\n def get_stream_info(self, live_id):\n res = urllib2.urlopen(GET_STREAM_INFO_URL + live_id)\n xml = res.read()\n element = etree.fromstring(xml)\n # self.logger.debug(etree.tostring(element))\n\n status = element.xpath(\"//getstreaminfo\")[0].attrib[\"status\"]\n # status = \"fail\"\n if status == \"ok\":\n community_name = element.xpath(\"//getstreaminfo/communityinfo/name\")[0].text\n live_name = element.xpath(\"//getstreaminfo/streaminfo/title\")[0].text\n # set \"n/a\", when no value provided; like <title/>\n if community_name is None:\n community_name = \"n/a\"\n if live_name is None:\n live_name = \"n/a\"\n else:\n raise UnexpectedStatusError(status)\n\n return community_name, live_name\n\n def get_player_status(self, cookie_container, live_id):\n res = cookie_container.open(GET_PLAYER_STATUS_URL + live_id)\n\n element = etree.fromstring(res.read())\n # self.logger.debug(etree.tostring(element))\n status = element.xpath(\"//getplayerstatus\")[0].attrib[\"status\"]\n if status != 'ok':\n code = element.xpath(\"//getplayerstatus/error/code\")[0].text\n raise UnexpectedStatusError(status, code)\n\n room_label = element.xpath(\"//getplayerstatus/user/room_label\")[0].text\n\n host = element.xpath(\"//getplayerstatus/ms/addr\")[0].text\n port = int(element.xpath(\"//getplayerstatus/ms/port\")[0].text)\n thread = int(element.xpath(\"//getplayerstatus/ms/thread\")[0].text)\n\n self.logger.debug(\"*** getplayerstatus, live_id: %s room_label: %s \"\n \"host: %s port: %s thread: %s\" %\n (live_id, room_label, host, port, thread))\n return room_label, host, port, thread\n\n def split_host(self, host):\n matched_host = re.match('(msg)(\\d+)(\\..+)', host)\n if not matched_host:\n return (None, None, None)\n\n host_prefix = matched_host.group(1)\n host_number = int(matched_host.group(2))\n host_surfix = matched_host.group(3)\n\n return (host_prefix, host_number, host_surfix)\n\n def get_arena_comment_server(self, stand_type, arena_host, arena_port, arena_thread):\n host = arena_host\n port = arena_port\n thread = arena_thread\n\n decrement_count = 0\n if stand_type == \"A\":\n decrement_count = 1\n elif stand_type == \"B\":\n decrement_count = 2\n elif stand_type == \"C\":\n decrement_count = 3\n\n (host_prefix, host_number, host_surfix) = self.split_host(host)\n if host_prefix is None or host_number is None or host_surfix is None:\n return (host, port, thread)\n\n for i in xrange(decrement_count):\n if port == COMMENT_SERVER_PORT_FIRST:\n port = COMMENT_SERVER_PORT_LAST\n if host_number == COMMENT_SERVER_HOST_NUMBER_FIRST:\n host_number = COMMENT_SERVER_HOST_NUMBER_LAST\n else:\n host_number -= 1\n else:\n port -= 1\n thread -= 1\n\n return (host_prefix + str(host_number) + host_surfix, port, thread)\n\n def get_comment_servers(self, room_label, host, port, thread):\n \"\"\"\n self.logger.debug(\n \"provided comment server, room_label: %s host: %s port: %s thread: %s\" %\n (room_label, host, port, thread))\n \"\"\"\n comment_servers = []\n\n matched_room = re.match('co\\d+', room_label)\n if matched_room:\n # arena\n # self.logger.debug(\"no need to adjust the room\")\n pass\n else:\n matched_room = re.match(u'立ち見(\\w)列', room_label)\n if matched_room:\n # stand A, B, C. host, port, thread should be adjusted\n stand_type = matched_room.group(1)\n (host, port, thread) = self.get_arena_comment_server(\n stand_type, host, port, thread)\n # self.logger.debug(\"adjusted arena server, host: %s port: %s thread: %s\" %\n # (host, port, thread))\n else:\n # channel live? not supported for now\n self.logger.debug(\"live is not user live, so skip\")\n return comment_servers\n\n (host_prefix, host_number, host_surfix) = self.split_host(host)\n if host_prefix is None or host_number is None or host_surfix is None:\n return comment_servers\n\n for i in xrange(4):\n comment_servers.append((host_prefix + str(host_number) + host_surfix, port, thread))\n if port == COMMENT_SERVER_PORT_LAST:\n port = COMMENT_SERVER_PORT_FIRST\n if host_number == COMMENT_SERVER_HOST_NUMBER_LAST:\n host_number = COMMENT_SERVER_HOST_NUMBER_FIRST\n else:\n host_number += 1\n else:\n port += 1\n thread += 1\n\n return comment_servers\n\n def connect_to_server(self, host, port, thread):\n # main loop\n # self.schedule_stream_stat_timer()\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(SOCKET_TIMEOUT)\n sock.connect((host, port))\n sock.sendall(('<thread thread=\"%s\" version=\"20061206\" res_form=\"-1\"/>'\n + chr(0)) % thread)\n\n self.logger.debug(\"*** opened live thread, lv: %s server: %s,%s,%s\" %\n (self.live_id, host, port, thread))\n message = \"\"\n while True:\n try:\n recved = sock.recv(1024)\n except socket.timeout, e:\n self.logger.debug(\"detected timeout at socket recv().\")\n break\n should_close_connection = False\n\n for character in recved:\n if character == chr(0):\n # self.logger.debug(\"live_id: %s server: %s,%s,%s xml: %s\" %\n # (self.live_id, host, port, thread, message))\n # wrap message using dummy \"elements\" tag to avoid parse error\n message = \"<elements>\" + message + \"</elements>\"\n\n try:\n element = etree.fromstring(message)\n except etree.XMLSyntaxError, e:\n self.logger.debug(\"nicolive xml parse error: %s\" % e)\n self.logger.debug(\"xml: %s\" % message)\n\n try:\n thread_element = element.xpath(\"//elements/thread\")\n if 0 < len(thread_element):\n # self.logger.debug(\"live_id: %s server: %s,%s,%s xml: %s\" %\n # (self.live_id, host, port, thread, message))\n result_code = thread_element[0].attrib.get('resultcode')\n if result_code == \"1\":\n # no comments will be provided from this thread\n should_close_connection = True\n break\n else:\n chats = element.xpath(\"//elements/chat\")\n if 1 < len(chats):\n # self.logger.debug(\"xml: %s\" % message)\n pass\n for chat in chats:\n # self.logger.debug(etree.tostring(chat))\n user_id = chat.attrib.get('user_id')\n premium = chat.attrib.get('premium')\n if premium is None:\n premium = \"0\"\n comment = chat.text\n \"\"\"\n self.logger.debug(\n \"live_id: %s server: %s,%s,%s user_id: %s comment: %s\" %\n (self.live_id, host, port, thread, user_id, comment))\n \"\"\"\n if comment == self.last_comment:\n continue\n self.last_comment = comment\n self.comment_count += 1\n\n NicoLive.sum_total_comment_count += 1\n\n for monitoring_user_id in self.monitoring_user_ids:\n if self.force_debug_tweet:\n user_id = monitoring_user_id\n if user_id == monitoring_user_id:\n self.update_twitter_status(user_id, comment)\n if self.force_debug_tweet:\n should_close_connection = True\n break\n\n if premium in ['2', '3'] and comment == \"/disconnect\":\n # see the references below for details of the conbination of\n # premium attribute value and disconnect command:\n # - http://www.yukun.info/blog/2008/08/python-if-for-in.html \n # - https://twitter.com/Hemus_/status/6766945512\n self.logger.debug(\n \"detected command: %s w/ premium: %s\" %\n (comment, premium))\n # self.logger.debug(\"disconnect, xml: %s\" % message)\n should_close_connection = True\n break\n except KeyError:\n self.logger.debug(\"received unrecognized data.\")\n message = \"\"\n else:\n message += character\n if recved == '' or should_close_connection:\n # self.logger.debug(\"break\")\n break\n # self.logger.debug(\"%s, (socket closed.)\" % self.live_id)\n self.logger.debug(\"*** closed live thread, lv: %s server: %s,%s,%s comments: %s\" %\n (self.live_id, host, port, thread, self.comment_count))\n\n# public method\n def start(self):\n try:\n (community_name, live_name) = self.get_stream_info(self.live_id)\n self.logger.debug(\n \"*** stream info, lv: %s community name: %s live name: %s\" %\n (self.live_id, community_name, live_name))\n except Exception, e:\n self.logger.debug(\"could not get stream info: %s\" % e)\n\n if NicoLive.cookie_container_status == COOKIE_CONTAINER_INITIALIZING:\n time.sleep(COOKIE_CONTAINER_INITILIZATION_SLEEP_TIME)\n cookie_container = self.get_cookie_container(self.mail, self.password)\n\n (room_label, host, port, thread) = (None, None, None, None)\n try:\n (room_label, host, port, thread) = self.get_player_status(\n cookie_container, self.live_id)\n except UnexpectedStatusError, e:\n if e.code in [\"notfound\", \"require_community_member\"]:\n self.logger.debug(\"caught 'expected' error, so quit: %s\" % e)\n # exit\n else:\n self.logger.debug(\"caught 'unexpected' error, so try to clear session: %s\" % e)\n # TODO: improve logic\n # possible case of session expiration, so try again\n NicoLive.cookie_container = None\n try:\n cookie_container = self.get_cookie_container(self.mail, self.password)\n (room_label, host, port, thread) = self.get_player_status(\n cookie_container, self.live_id)\n except UnexpectedStatusError, e:\n self.logger.debug(\"again: could not get player status: %s\" % e)\n\n if (room_label is not None and\n host is not None and port is not None and thread is not None):\n comment_servers = self.get_comment_servers(room_label, host, port, thread)\n # self.logger.debug(\"comment servers: %s\" % comment_servers)\n\n for (host, port, thread) in comment_servers:\n nicolive = NicoLive(self.mail, self.password, self.community_id, self.live_id)\n t = Thread(target=nicolive.connect_to_server, args=(host, port, thread))\n t.start()\n\n\nif __name__ == \"__main__\":\n logging.config.fileConfig(NICOCOMMENT_CONFIG)\n\n # \"\"\"\n nicolive = NicoLive(sys.argv[1], sys.argv[2], 0, sys.argv[3])\n nicolive.start()\n # \"\"\"\n\n \"\"\"\n nicolive = NicoLive(\"mail\", \"pass\", 0, 123)\n nicolive.update_twitter_status(\"784552\", u\"日本語\")\n nicolive.update_twitter_status(\"784552\", u\"日本語\")\n nicolive.update_twitter_status(\"784552\", u\"abc\")\n nicolive.update_twitter_status(\"784552\", u\"日本語\")\n \"\"\"\n\n \"\"\"\n nicolive = NicoLive(\"mail\", \"pass\", 0, 123)\n nicolive.get_comment_servers(u\"co12345\", \"msg103.live.nicovideo.jp\", 2808, 1314071859)\n nicolive.get_comment_servers(u\"立ち見A列\", \"msg103.live.nicovideo.jp\", 2808, 1314071859)\n nicolive.get_comment_servers(u\"立ち見A列\", \"msg103.live.nicovideo.jp\", 2805, 1314071859)\n nicolive.get_comment_servers(u\"立ち見A列\", \"msg101.live.nicovideo.jp\", 2805, 1314071859)\n nicolive.get_comment_servers(u\"立ち見B列\", \"msg101.live.nicovideo.jp\", 2805, 1314071859)\n nicolive.get_comment_servers(u\"立ち見C列\", \"msg101.live.nicovideo.jp\", 2805, 1314071859)\n nicolive.get_comment_servers(u\"立ち見Z列\", \"msg101.live.nicovideo.jp\", 2805, 1314071859)\n nicolive.get_comment_servers(u\"ch12345\", \"msg101.live.nicovideo.jp\", 2805, 1314071859)\n \"\"\"\n"
},
{
"alpha_fraction": 0.44565218687057495,
"alphanum_fraction": 0.6739130616188049,
"avg_line_length": 14.333333015441895,
"blob_id": "cd24beea6f19a5414f77bed547c777d15158b809",
"content_id": "4a2031d63b3c7fc1e76956157c9a4643f83271c2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 92,
"license_type": "permissive",
"max_line_length": 18,
"num_lines": 6,
"path": "/requirements.txt",
"repo_name": "YuuichiHosomi/nicocomment",
"src_encoding": "UTF-8",
"text": "argparse==1.2.1\ndistribute==0.6.34\nlxml==3.2.4\npep8==1.4.6\npychecker==0.8.19\nwsgiref==0.1.2\n"
},
{
"alpha_fraction": 0.6502242088317871,
"alphanum_fraction": 0.6517189741134644,
"avg_line_length": 21.299999237060547,
"blob_id": "a93f17af9caa2089786f461af45c9e61f11842e3",
"content_id": "e4fcacfe2cec1adedbd8c3e117066b35c56f9a73",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 669,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 30,
"path": "/nicocomment.py",
"repo_name": "YuuichiHosomi/nicocomment",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport logging\nimport logging.config\nimport ConfigParser\nimport nicoalert\n\nNICOCOMMENT_CONFIG = os.path.dirname(os.path.abspath(__file__)) + '/nicocomment.config'\n\n\nclass NicoComment(object):\n# object life cycle\n def __init__(self):\n logging.config.fileConfig(NICOCOMMENT_CONFIG)\n self.logger = logging.getLogger(\"root\")\n self.logger.debug(\"nicocomment initialized.\")\n\n def __del__(self):\n pass\n\n# main\n def open_alert(self):\n alert = nicoalert.NicoAlert()\n alert.start()\n\nif __name__ == \"__main__\":\n nicocomment = NicoComment()\n nicocomment.open_alert()\n"
},
{
"alpha_fraction": 0.5773480534553528,
"alphanum_fraction": 0.580110490322113,
"avg_line_length": 21.625,
"blob_id": "5c6b52dc9044554ef01460b3123712994522fc91",
"content_id": "27ca146f359f4add6e33ae1b18e60aae50b68afa",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 362,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 16,
"path": "/nicoerror.py",
"repo_name": "YuuichiHosomi/nicocomment",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass NicoAuthorizationError(Exception):\n pass\n\n\nclass UnexpectedStatusError(Exception):\n def __init__(self, status, code=\"\"):\n self.status = status\n self.code = code\n\n def __str__(self):\n return ('unexpected status \"%s\", code \"%s\" found.' %\n (self.status, self.code))\n"
},
{
"alpha_fraction": 0.709932267665863,
"alphanum_fraction": 0.7313769459724426,
"avg_line_length": 20.349397659301758,
"blob_id": "130e4f700d78cc316e23c78c7b225ee03a398275",
"content_id": "629bc888c0eee98d1ae11c1b100b414706665101",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1772,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 83,
"path": "/README.md",
"repo_name": "YuuichiHosomi/nicocomment",
"src_encoding": "UTF-8",
"text": "nicocomment\n==\nmonitor specified user's niconama comments, and tweet them.\n\nsample\n-------------\n![sample](./sample/screenshot.png)\n- http://www.nicovideo.jp/watch/sm22365097\n\nrequirements\n--\n- python 2.7.x\n - versions except 2.7.x is not tested\n\nsetup\n--\nfirst, setup runtime environment.\n````\n$ git submodule update --init\n$ virtualenv --distribute venv\n$ source ./venv/bin/activate\n$ pip install -r requirements.txt\n````\n\nthen configure application specific settings. see the sample configuration contents for details.\n````\n$ cp ./nicocomment.config.sample ./nicocomment.config\n$ vi ./nicocomment.config\n````\n\nconfigure environment\n--\nnicocomment requires lots of os resources, please tune the system as followings.\n\nfirst, check the current resource limit configuration.\n````\n$ ulimit -a\n````\n\nthen configure the max open files and max open processes settings.\n````\n$ sudo vi /etc/security/limits.conf\n\n# for opening tons of sockets to comment servers.\nhonishi soft nofile 32768\nhonishi hard nofile 32768\n\n# for forking thread in the live comment listening.\n# thread is treated as process internally in the kernel that uses NPTL(Native POSIX Thread Library)\nhonishi soft nproc 32768\nhonishi hard nproc 32768\n````\n\nrestart and check the settings above are successfully configured.\n````\n$ sudo reboot\n$ ulimit -a\n````\n\nstart & stop\n--\nstart.\n````\n./nicocomment.sh start\n````\nstop.\n````\n./nicocomment.sh stop\n````\n\nmonitoring\n--\nsee `nicocomment.sh` inside for the details of monitoring.\n\n\t# monitoring nicocomment\n\t* * * * * /path/to/nicocomment/nicocomment.sh monitor >> /path/to/nicocomment/log/monitor.log 2>&1\n\nlicense\n--\ncopyright © 2013- honishi, hiroyuki onishi.\n\ndistributed under the [MIT license][mit].\n[mit]: http://www.opensource.org/licenses/mit-license.php\n"
}
] | 7 |
ideascup/bzp | https://github.com/ideascup/bzp | 3ca31fe1d06bd75e8aff8490237e0abb43237f00 | 7b9cc6c5862307b71c1cf35967241888f316b549 | 95100989de49c9fc89b74e911fd47c5c46ba47ad | refs/heads/master | 2020-04-01T02:04:58.505007 | 2018-10-12T14:49:20 | 2018-10-12T14:49:20 | 152,764,847 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5911329984664917,
"alphanum_fraction": 0.6157635450363159,
"avg_line_length": 17.454545974731445,
"blob_id": "b37101735a7266e017ae4e35a6766c77fcbb0b2b",
"content_id": "15820461348e7ae2164562d92724cfd1f8b87c90",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 203,
"license_type": "permissive",
"max_line_length": 36,
"num_lines": 11,
"path": "/bzp/__init__.py",
"repo_name": "ideascup/bzp",
"src_encoding": "UTF-8",
"text": "import bz2\nimport pickle\n\n\ndef dump(content, dst):\n with bz2.BZ2File(dst, 'w') as f:\n pickle.dump(content, f)\n\ndef load(dst):\n with bz2.BZ2File(dst, 'r') as f:\n return pickle.load(f)\n"
},
{
"alpha_fraction": 0.6194915175437927,
"alphanum_fraction": 0.6211864352226257,
"avg_line_length": 25.244443893432617,
"blob_id": "eda489b500f8f3499d9fe77d01f3950a9b174c77",
"content_id": "56ea1aa87cc561faa58bc0088685c0b08832ab9d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1180,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 45,
"path": "/setup.py",
"repo_name": "ideascup/bzp",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport sys\nimport os\nimport os.path as op\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\npackages = find_packages()\n\nabout = {}\nwith open(op.join(here, 'bzp/__version__.py'), 'r') as f:\n exec(f.read(), about)\n\n# 'setup.py publish' shortcut.\nif sys.argv[-1] == 'publish':\n repo = '-r pypi'\n if sys.argv[-2] == 'test':\n repo = '-r pypitest'\n \n os.system('python setup.py sdist bdist_wheel')\n os.system('twine upload {} dist/bzp-{}*'.format(repo, about['__version__']))\n sys.exit()\n\nwith open('README.md', 'r') as f:\n long_description = f.read()\n \n\nsetup(\n name=about['__title__'],\n version=about['__version__'],\n description=about['__description__'],\n long_description=long_description,\n long_description_content_type='text/markdown',\n license=about['__license__'],\n author=about['__author__'],\n author_email=about['__author_email__'],\n url=about['__url__'],\n zip_safe=False,\n package_data={'': ['LICENSE', 'README.md']},\n include_package_data=True,\n packages=find_packages()\n)"
},
{
"alpha_fraction": 0.5357142686843872,
"alphanum_fraction": 0.5892857313156128,
"avg_line_length": 24.454545974731445,
"blob_id": "9fb5e7bfc643f95d6ad86e7815948d5607e4b898",
"content_id": "87b3d5517f40b8c37eae63423e7a84ab08749170",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 280,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 11,
"path": "/bzp/__version__.py",
"repo_name": "ideascup/bzp",
"src_encoding": "UTF-8",
"text": "# ICE TEMPLE\n\n__title__ = 'bzp'\n__description__ = 'Manager for bz2 pickle files'\n__url__ = 'https://github.com/ideascup/bzp/'\n__version__ = '1.0.0'\n__build__ = 0x010000\n__author__ = 'Dmitriy Pleshevskiy'\n__author_email__ = '[email protected]'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2018 Dmitriy Pleshevskiy'\n"
}
] | 3 |
ebu/sentry-auth-gitlab | https://github.com/ebu/sentry-auth-gitlab | a4d954c9177be07ac52e5ac4ecee45bb70dba264 | b109689ba04bb16989b7b5403a1cfb534fa295c8 | 2562b7ece16f5de1237685ee6ad20bad73cc1973 | refs/heads/master | 2021-01-01T16:31:43.693822 | 2017-07-24T09:17:14 | 2017-07-24T09:17:14 | 97,850,272 | 0 | 0 | null | 2017-07-20T15:18:12 | 2017-07-20T15:18:14 | 2017-07-24T09:17:14 | Python | [
{
"alpha_fraction": 0.5400248765945435,
"alphanum_fraction": 0.5400248765945435,
"avg_line_length": 32.02739715576172,
"blob_id": "45f44f844896745f5ef6d9650f4461a36b5a2446",
"content_id": "e44205e6e74df4af38d208912348269741c7dc7f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2411,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 73,
"path": "/sentry_auth_gitlab/views.py",
"repo_name": "ebu/sentry-auth-gitlab",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import\n\nfrom sentry.auth.view import AuthView\n\nfrom .client import GitLabClient\n\nfrom sentry.models import AuthIdentity, Team, OrganizationMember, OrganizationMemberTeam\n\n\nclass FetchUser(AuthView):\n\n def handle(self, request, helper):\n\n access_token = helper.fetch_state('data')['access_token']\n user = GitLabClient().get_user(access_token)\n helper.bind_state('user', user)\n\n return helper.next_step()\n\n\nclass SetTeams(AuthView):\n \"\"\"View used to set sentry teams based on gitlab groups\"\"\"\n\n def handle(self, request, helper):\n\n access_token = helper.fetch_state('data')['access_token']\n user = helper.fetch_state('user')\n real_user = None\n\n # We fetch the User object for the current user. We need to use the\n # AuthIdentity to find it since there is no direct user access\n try:\n auth_identity = AuthIdentity.objects.select_related('user').get(\n auth_provider=helper.auth_provider,\n ident=user['id'],\n )\n except AuthIdentity.DoesNotExist:\n pass\n else:\n real_user = auth_identity.user\n\n if real_user:\n # We fetch the list of groups the\n groups = GitLabClient().get_groups(access_token)\n\n for group in groups:\n # We try to find the sentry team with the same name\n team = Team.objects.filter(name=group['name']).first()\n\n if team:\n member = None\n\n # We try to find the user membership for the team's\n # organisation\n try:\n member = OrganizationMember.objects.get(\n user=real_user,\n organization=team.organization,\n )\n except OrganizationMember.DoesNotExist:\n pass\n\n if member:\n # We ensure the user has access to the team (via the\n # membership for the organisation)\n OrganizationMemberTeam.objects.get_or_create(\n team=team,\n organizationmember=member,\n )\n else:\n print(\"Didn't found user\")\n\n return helper.next_step()\n"
}
] | 1 |
devillucas46/Real-time-Learning-of-Material-Constitutive-Models-Using-Convolutional-Neural-Networks | https://github.com/devillucas46/Real-time-Learning-of-Material-Constitutive-Models-Using-Convolutional-Neural-Networks | f7a1d7cd90f2b270cbb63a31315f31a7a33066de | d5442a0f177f08fe66df4c4787acf0dc9ae0ded2 | e86c7367c2b577feaa469468001b0f4d8041a8da | refs/heads/master | 2021-05-22T18:25:53.725900 | 2019-05-18T18:09:38 | 2019-05-18T18:09:38 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.566310703754425,
"alphanum_fraction": 0.6021357178688049,
"avg_line_length": 33.97590255737305,
"blob_id": "02d9483704a6f7ca0b3aec9ba34c3834e270601b",
"content_id": "768b844e018cd56ceef5f28745ee13639953a947",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2903,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 83,
"path": "/tf_2D_Conductionmatrix_prediction_pythonCG.py",
"repo_name": "devillucas46/Real-time-Learning-of-Material-Constitutive-Models-Using-Convolutional-Neural-Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport scipy.io as sio\nimport tensorflow as tf\nfrom scipy.sparse.linalg import spsolve\n\n\ndef conjgrad_tf(A_tf, b, x, n):\n result = {}\n #r = b - A.dot(x)\n r = b - tf.sparse_tensor_dense_matmul(A_tf, x, adjoint_a=False, adjoint_b=False, name=None)\n p = r\n #rsold = np.dot(r.T, r)\n rsold = tf.matmul(tf.transpose(r), r)\n for i in range(n):\n #Ap = A.dot(p)\n Ap = tf.sparse_tensor_dense_matmul(A_tf, p, adjoint_a=False, adjoint_b=False, name=None)\n #alpha = rsold / np.dot(p.T, Ap)\n alpha = rsold / tf.matmul(tf.transpose(p), Ap)\n x = x + alpha * p\n r = r - alpha * Ap\n #rsnew = np.dot(r.T, r)\n rsnew = tf.matmul(tf.transpose(r), r)\n #print('Itr:', i)\n p = r + (rsnew / rsold) * p\n rsold = rsnew\n result['final'] = x\n return result\n\ndef convert_sparse_matrix_to_sparse_tensor(X):\n coo = X.tocoo()\n indices = np.mat([coo.row, coo.col]).transpose()\n return tf.SparseTensor(indices, coo.data, coo.shape)\n\nif __name__ == '__main__':\n tol = 1e-5 # Tolerance: Decrease for grater accuracy\n conductivity = tf.Variable(1., tf.float32)\n n = 36\n A = tf.sparse_placeholder(tf.float32, shape=(110, 110))\n b = tf.placeholder(tf.float32, shape=(110, 1))\n x = tf.placeholder(tf.float32, shape=(110, 1))\n CGpy_result = conjgrad_tf(A, b, x, n)\n\n # optimizer\n CGpy_result['loss'] = loss = tf.reduce_mean(tf.abs(CGpy_result['final'] - x))\n lr = 1\n learning_rate = tf.Variable(lr) # learning rate for optimizer\n optimizer = tf.train.AdamOptimizer(learning_rate) #\n grads = optimizer.compute_gradients(loss)\n train_op = optimizer.apply_gradients(grads)\n\n ## training starts ###\n FLAGS = tf.app.flags.FLAGS\n tfconfig = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=True,\n )\n #tfconfig.gpu_options.allow_growth = True\n sess = tf.Session(config=tfconfig)\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # 10 x 10 Element Data\n data1 = sio.loadmat('./data/10x10/K_forceboundary_elements10x10.mat')\n data2 = sio.loadmat('./data/10x10/f_forceboundary_elements10x10.mat')\n data3 = sio.loadmat('./data/10x10/x0_elements10x10.mat')\n A10 = data1['K_forceboundary_elements10x10']\n b10 = data2['f_forceboundary_elements10x10']\n x10 = spsolve(A10, b10)\n x_gt = x10.reshape(1, 10, 11, 1)\n b10 = b10.reshape(1, 10, 11, 1)\n test_loss_hist = []\n train_loss_hist = []\n k_value_hist = []\n for itr in range(500):\n for i in range(1):\n x_input = x_gt\n b_input = b10\n feed_dict_train = {b: b_input, x: x_input}\n _, loss_value, k_value = sess.run([train_op, loss, conductivity], feed_dict_train)\n\n print(\"iter:{} train_cost: {} k_value: {}\".format(itr, np.mean(loss_value), k_value))\n\n print('done')\n"
},
{
"alpha_fraction": 0.5250130295753479,
"alphanum_fraction": 0.5896300077438354,
"avg_line_length": 38.56700897216797,
"blob_id": "139651ee1bb9c3ff2f2e0ff7489852aa7b63b33d",
"content_id": "fa904e9e2c65c40c760854f4b0cd58fce20d56e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3838,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 97,
"path": "/tf_2D_ConductionMatirx_prediction_1000.py",
"repo_name": "devillucas46/Real-time-Learning-of-Material-Constitutive-Models-Using-Convolutional-Neural-Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport scipy.io as sio\nimport tensorflow as tf\nfrom scipy.sparse.linalg import spsolve\nimport os\nos.environ['CUDA_DEVICE_ORDER'] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\" # set to -1 to enable CPU, set to 0 to enable GPU\n\ndef conjgrad_tf(A_weights, b, x, n):\n result = {}\n #r = b - A.dot(x) # python method\n padded_x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], \"SYMMETRIC\")\n # reshape\n A_dotx_conv = tf.nn.conv2d(input=padded_x, filter=A_weights, strides=[1, 1, 1, 1], padding='VALID')\n\n A_dotx_conv = tf.reshape(A_dotx_conv, (1001000,1))\n r = b - A_dotx_conv\n p = r\n #rsold = np.dot(r.T, r) # python method\n rsold = tf.matmul(tf.transpose(r), r)\n for i in range(n):\n #Ap = A.dot(p) # python method\n padded_p = tf.pad(tf.reshape(p, (1, 1000, 1001, 1)), [[0, 0], [1, 1], [1, 1], [0, 0]], \"SYMMETRIC\")\n Ap_c = tf.nn.conv2d(input=padded_p, filter=A_weights, strides=[1, 1, 1, 1], padding='VALID')\n Ap = tf.reshape(Ap_c, (1001000,1))\n # Ap = Ap_c[0, 0, :, :]\n #alpha = rsold / np.dot(p.T, Ap) # python method\n alpha = rsold / tf.matmul(tf.transpose(p), Ap)\n x = tf.reshape(x, (1001000, 1))\n x = x + alpha * p\n r = r - alpha * Ap\n #rsnew = np.dot(r.T, r) # python method\n rsnew = tf.matmul(tf.transpose(r), r)\n p = r + (rsnew / rsold) * p\n rsold = rsnew\n #print('Itr:', i)\n result['final'] = x\n return result\ndef convert_sparse_matrix_to_sparse_tensor(X):\n coo = X.tocoo()\n indices = np.mat([coo.row, coo.col]).transpose()\n return tf.SparseTensor(indices, coo.data, coo.shape)\n\nif __name__ == '__main__':\n tol = 1e-5 # Tolerance: Decrease for grater accuracy\n conductivity = tf.Variable(1., tf.float32)\n # Filter\n filter = np.asarray([[1, 1, 1], [1, 0, 1], [1, 1, 1]])\n A_weights = np.reshape(filter, (3, 3, 1, 1))* conductivity\n n = 2818\n b = tf.placeholder(tf.float32, shape=(1001000, 1), name=\"b\")\n x_input_pl = tf.placeholder(tf.float32, shape=(1001000, 1), name=\"x\")\n x = tf.reshape(x_input_pl, (1, 1000, 1001, 1))\n CGpy_result = conjgrad_tf(A_weights, b, x, n)\n x = tf.reshape(x, (1001000, 1))\n # optimizer\n CGpy_result['loss'] = loss = tf.reduce_mean(tf.abs(CGpy_result['final'] - x))\n lr = 1\n learning_rate = tf.Variable(lr) # learning rate for optimizer\n optimizer = tf.train.AdamOptimizer(learning_rate) #\n grads = optimizer.compute_gradients(loss)\n train_op = optimizer.apply_gradients(grads)\n\n ## training starts ###\n FLAGS = tf.app.flags.FLAGS\n tfconfig = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=True,\n )\n tfconfig.gpu_options.allow_growth = True\n sess = tf.Session(config=tfconfig)\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # 1000 x 1000 Element Data\n data1 = sio.loadmat('./data/1000x1000/K_forceboundary_elements1000x1000.mat')\n data2 = sio.loadmat('./data/1000x1000/f_forceboundary_elements1000x1000.mat')\n data3 = sio.loadmat('./data/1000x1000/x0_elements1000x1000.mat')\n A1000 = data1['K_forceboundary_elements1000x1000']\n b1000 = data2['f_forceboundary_elements1000x1000']\n x = spsolve(A1000, b1000)\n x = x.reshape(1001000, 1)\n b1000 = np.float32(b1000)\n x = np.float32(x)\n test_loss_hist = []\n train_loss_hist = []\n k_value_hist = []\n for itr in range(500):\n for i in range(1):\n x_input = x\n b_input = b1000\n feed_dict_train = {b: b_input, x_input_pl: x_input}\n _, loss_value, k_value = sess.run([train_op, loss, conductivity], feed_dict_train)\n\n print(\"iter:{} train_cost: {} k_value: {}\".format(itr, np.mean(loss_value), k_value))\n\n print('done')\n"
},
{
"alpha_fraction": 0.8636363744735718,
"alphanum_fraction": 0.8636363744735718,
"avg_line_length": 87,
"blob_id": "5bfb61e3a6655c82a2514519b7e67d29c5a265a3",
"content_id": "aadd58d2dc9a2529074fa02287d39f75e952f7e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 176,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 2,
"path": "/README.md",
"repo_name": "devillucas46/Real-time-Learning-of-Material-Constitutive-Models-Using-Convolutional-Neural-Networks",
"src_encoding": "UTF-8",
"text": "# Real-time-Learning-of-Material-Constitutive-Models-Using-Convolutional-Neural-Networks\nReal-time Learning of Material Constitutive Models Using Convolutional Neural Networks\n"
},
{
"alpha_fraction": 0.5376520156860352,
"alphanum_fraction": 0.6400842070579529,
"avg_line_length": 38.74285888671875,
"blob_id": "ff28f540d1c37975a810e077e8e764df52be421e",
"content_id": "45956b7e00be43bcfc2dc4687f8f71f9fb773a56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4276,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 105,
"path": "/2D_conduction_CG_tf.py",
"repo_name": "devillucas46/Real-time-Learning-of-Material-Constitutive-Models-Using-Convolutional-Neural-Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport scipy.io as sio\r\nfrom timeit import default_timer as timer\r\nimport tensorflow as tf\r\nimport os\r\nos.environ['CUDA_DEVICE_ORDER'] = \"PCI_BUS_ID\"\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\" # set to -1 to enable CPU, set to 0 to enable GPU\r\n\r\ndef convert_sparse_matrix_to_sparse_tensor(X):\r\n coo = X.tocoo()\r\n indices = np.mat([coo.row, coo.col]).transpose()\r\n return tf.SparseTensor(indices, coo.data, coo.shape)\r\n\r\ndef conjgrad_tf(A_tf, b, x, n):\r\n #r = b - A.dot(x)\r\n r = b - tf.sparse_tensor_dense_matmul(A_tf, x, adjoint_a=False, adjoint_b=False, name=None)\r\n p = r\r\n #rsold = np.dot(r.T, r)\r\n rsold = tf.matmul(tf.transpose(r), r)\r\n for i in range(n):\r\n #Ap = A.dot(p)\r\n Ap = tf.sparse_tensor_dense_matmul(A_tf, p, adjoint_a=False, adjoint_b=False, name=None)\r\n #alpha = rsold / np.dot(p.T, Ap)\r\n alpha = rsold / tf.matmul(tf.transpose(p), Ap)\r\n x = x + alpha * p\r\n r = r - alpha * Ap\r\n #rsnew = np.dot(r.T, r)\r\n rsnew = tf.matmul(tf.transpose(r), r)\r\n #print('Itr:', i)\r\n p = r + (rsnew / rsold) * p\r\n rsold = rsnew\r\n return x\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n\r\n # 10 x 10 Element Data\r\n data1 = sio.loadmat('./data/10x10/K_forceboundary_elements10x10.mat')\r\n data2 = sio.loadmat('./data/10x10/f_forceboundary_elements10x10.mat')\r\n data3 = sio.loadmat('./data/10x10/x0_elements10x10.mat')\r\n A10 = data1['K_forceboundary_elements10x10']\r\n b10 = data2['f_forceboundary_elements10x10']\r\n x10 = data3['x0_elements10x10']\r\n A_tensor = convert_sparse_matrix_to_sparse_tensor(A10)\r\n A_tf10 = tf.cast(A_tensor, tf.float32)\r\n b_tf10 = tf.convert_to_tensor(b10, dtype=tf.float32)\r\n x0_tf10 = tf.convert_to_tensor(x10, dtype=tf.float32)\r\n\r\n\r\n # 100 x 100 Element Data\r\n data4 = sio.loadmat('./data/100x100/K_forceboundary_elements100x100.mat')\r\n data5 = sio.loadmat('./data/100x100/f_forceboundary_elements100x100.mat')\r\n data6 = sio.loadmat('./data/100x100/x0_elements100x100.mat')\r\n A100 = data4['K_forceboundary_elements100x100']\r\n b100 = data5['f_forceboundary_elements100x100']\r\n x100 = data6['x0_elements100x100']\r\n A_tensor = convert_sparse_matrix_to_sparse_tensor(A100)\r\n A_tf100 = tf.cast(A_tensor, tf.float32)\r\n b_tf100 = tf.convert_to_tensor(b100, dtype=tf.float32)\r\n x0_tf100 = tf.convert_to_tensor(x100, dtype=tf.float32)\r\n\r\n # 1000 x 1000 Element Data\r\n data7 = sio.loadmat('./data/1000x1000/K_forceboundary_elements1000x1000.mat')\r\n data8 = sio.loadmat('./data/1000x1000/f_forceboundary_elements1000x1000.mat')\r\n data9 = sio.loadmat('./data/1000x1000/x0_elements1000x1000.mat')\r\n A1000 = data7['K_forceboundary_elements1000x1000']\r\n b1000 = data8['f_forceboundary_elements1000x1000']\r\n x1000 = data9['x0_elements1000x1000']\r\n A_tensor = convert_sparse_matrix_to_sparse_tensor(A1000)\r\n A_tf1000 = tf.cast(A_tensor, tf.float32)\r\n b_tf1000 = tf.convert_to_tensor(b1000, dtype=tf.float32)\r\n x0_tf1000 = tf.convert_to_tensor(x1000, dtype=tf.float32)\r\n\r\n\r\n FLAGS = tf.app.flags.FLAGS\r\n tfconfig = tf.ConfigProto(\r\n allow_soft_placement=True,\r\n log_device_placement=True,\r\n )\r\n tfconfig.gpu_options.allow_growth = True\r\n sess = tf.Session(config=tfconfig)\r\n init = tf.global_variables_initializer()\r\n sess.run(init)\r\n\r\n # 10 x 10 Elements\r\n n10 = 36 # Based on # of python iterations\r\n start_tf10 = timer()\r\n x_result_tf10 = conjgrad_tf(A_tf10, b_tf10, x0_tf10, n10)\r\n end_tf10 = timer()\r\n print('Tensorflow solved for 10 element case in ', end_tf10 - start_tf10, ' Seconds.')\r\n\r\n # 100 x 100 Elements\r\n n100 = 313 # Based on # of python iterations\r\n start_tf100 = timer()\r\n x_result_tf100 = conjgrad_tf(A_tf100, b_tf100, x0_tf100, n100)\r\n end_tf100 = timer()\r\n print('Tensorflow solved for 100 element case in ', end_tf100 - start_tf100, ' Seconds.')\r\n\r\n # 1000 x 1000 Elements\r\n n1000 = 2818 # Based on # of python iterations\r\n start_tf1000 = timer()\r\n x_result_tf1000 = conjgrad_tf(A_tf1000, b_tf1000, x0_tf1000, n1000)\r\n end_tf1000 = timer()\r\n print('Tensorflow solved for 1000 element case in ', end_tf1000 - start_tf1000, ' Seconds.')"
}
] | 4 |
rafa2802/banco-credimaster | https://github.com/rafa2802/banco-credimaster | 9a4f23701d70f8f33ed2258e7c7610e5f6616c8f | 8bb9d18947cbfc639379251dec4ce17f84b8e440 | a6e4a7f945a4a86af114b044e43aae5d9e1676ec | refs/heads/master | 2020-03-23T16:42:19.110885 | 2018-07-25T09:09:31 | 2018-07-25T09:09:31 | 141,823,865 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6927083134651184,
"alphanum_fraction": 0.7369791865348816,
"avg_line_length": 17.285715103149414,
"blob_id": "7956373ef3dd990bb277a6b3da9ee3e9a12022cd",
"content_id": "34fa9657c91d6a6d8dc70d47d8b0f0df16411332",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 384,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 21,
"path": "/bd/bd.sql",
"repo_name": "rafa2802/banco-credimaster",
"src_encoding": "UTF-8",
"text": "CREATE DATABASE BANCO_CREDIMASTER;\n\nCREATE TABLE USUARIO(\n\tnome varchar(255),\n\ttelefone varchar(255),\n\temail varchar(255),\n\tcpf varchar(11),\n\tsexo varchar(1),\n\tidade integer,\n\tprimary key (cpf)\n);\n\nCREATE TABLE CONTA(\n\tnumero integer,\n\tagencia integer,\n\tsenha varchar(255),\n\ttitular varchar(11),\n\tsaldo integer,\n\tprimary key(numero),\n\tforeign key(titular) references USUARIO (cpf)\n);\n"
},
{
"alpha_fraction": 0.6616897583007812,
"alphanum_fraction": 0.6702476143836975,
"avg_line_length": 35.606666564941406,
"blob_id": "5426eae49c7c0ae03264363ac41662359c599ead",
"content_id": "f1e8615dbb44024a3a939ccb304e5a89cbf668fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5495,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 150,
"path": "/app/views.py",
"repo_name": "rafa2802/banco-credimaster",
"src_encoding": "UTF-8",
"text": "import psycopg2, psycopg2.extras\n\nfrom flask import g, session, request, redirect, url_for, render_template\n\nfrom app import app\n\nfrom random import randint\n\[email protected]_request\ndef before_request():\n g.db = psycopg2.connect(\"dbname=banco_credimaster user=postgres password=rafa123 host=127.0.0.1\")\n\n# Disconnect database \[email protected]_request\ndef teardown_request(exception):\n g.db.close()\n\[email protected]('/', methods = ['POST', 'GET'])\ndef index():\n\tif request.method == 'POST':\n\t\tn_conta = request.form['conta']\n\t\tcur = g.db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\t\tcur.execute(\"SELECT * FROM conta WHERE numero = {}\".format(n_conta))\n\t\tconta = cur.fetchall()\n\t\ttitular = conta[0][2]\n\t\tcur.execute(\"SELECT * FROM usuario WHERE cpf = '{}'\".format(titular))\n\t\ttitular = cur.fetchall()\n\t\tsession['cpf'] = n_conta\n\t\tif conta[0][4] == request.form['senha']:\n\t\t\tsession['cpf'] = conta[0][2]\n\t\t\treturn redirect(url_for('cliente'))\n\t\treturn render_template('index.html', erro = 'Senha incorreta')\n\t\t\n\t\t\n\treturn render_template('index.html')\n\[email protected]('/abertura-de-conta', methods = ['GET', 'POST'])\ndef abertura_de_conta():\n\tif request.method == 'GET':\n\t\treturn render_template('abrir-conta.html')\n\telse:\n\t\tcur = g.db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\t\tcur.execute(\"SELECT * FROM usuario\")\n\t\tusuarios = cur.fetchall()\n\t\taux = 0\n\t\tusuario = request.form['cpf']\n\t\tfor user in usuarios:\n\t\t\tif user['cpf'] == usuario:\n\t\t\t\taux = 1\n\t\t\telse:\n\t\t\t\tpass\n\t\tif aux == 1:\n\t\t\treturn render_template('abrir-conta.html', error='CPF já cadastrado!')\n\t\telse:\n\t\t\tnome = request.form['nome'] \n\t\t\tcpf = request.form['cpf']\n\t\t\ttelefone = request.form['telefone']\n\t\t\temail = request.form['email']\n\t\t\tsexo = request.form['sexo']\n\t\t\tidade = request.form['idade']\n\t\t\tnumero = randint(1000, 50000)\n\t\t\ttitular = request.form['cpf']\n\t\t\tsaldo = 0\n\t\t\tsenha = request.form['senha']\n\t\t\tcur.execute(\"INSERT INTO usuario (nome, cpf, telefone, email, sexo, idade) VALUES ('{}', '{}', '{}', '{}', '{}', {})\".format(nome, cpf, telefone, email, sexo, idade))\n\t\t\tcur.execute(\"INSERT INTO conta (numero, agencia, titular, saldo, senha) VALUES ({}, {}, '{}', {}, '{}')\".format(numero, '0001', titular, saldo, senha))\n\t\t\tg.db.commit()\n\t\t\tcur.close()\n\t\t\tsession['cpf'] = request.form['cpf']\n\t\t\treturn redirect(url_for('cliente'))\n\ndef login(titular, conta):\n\treturn render_template('senha.html', titular = titular, conta = conta)\n\[email protected]('/cliente')\ndef cliente():\n\tif 'cpf' in session:\n\t\tcur = g.db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\t\tcur.execute(\"SELECT * FROM usuario WHERE cpf = '{}'\".format(session['cpf']))\n\t\ttitular = cur.fetchall()\n\t\tcur.execute(\"SELECT * FROM conta WHERE titular = '{}'\".format(session['cpf']))\n\t\tconta = cur.fetchall()\n\t\treturn render_template('cliente.html', titular = titular, conta = conta)\n\treturn redirect(url_for('index'))\n\[email protected]('/sair')\ndef sair():\n\tsession.pop('cpf')\n\treturn redirect(url_for('index'))\n\[email protected]('/deposito', methods = ['GET', 'POST'])\ndef deposito():\n\tcur = g.db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\tcur.execute(\"SELECT * FROM conta WHERE titular = '{}'\".format(session['cpf']))\n\tconta = cur.fetchall()\n\tcur.execute(\"SELECT * FROM usuario WHERE cpf = '{}'\".format(session['cpf']))\n\ttitular = cur.fetchall()\n\tif request.method == 'POST':\n\t\tsaldo = conta[0][3]\n\t\tsaldo += float(request.form['valor'])\n\t\tcur.execute(\"UPDATE conta SET saldo = {}\".format(saldo))\n\t\tg.db.commit()\n\t\treturn redirect(url_for('cliente'))\n\treturn render_template('deposito.html', titular = titular)\n\[email protected]('/saque', methods = ['GET', 'POST'])\ndef saque():\n\tcur = g.db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\tcur.execute(\"SELECT * FROM conta WHERE titular = '{}'\".format(session['cpf']))\n\tconta = cur.fetchall()\n\tcur.execute(\"SELECT * FROM usuario WHERE cpf = '{}'\".format(session['cpf']))\n\ttitular = cur.fetchall()\n\tif request.method == 'POST':\n\t\tsaldo = conta[0][3]\n\t\tvalor = float(request.form['valor'])\n\t\tif saldo >= valor:\n\t\t\tprint (saldo, valor)\n\t\t\tsaldo -= float(request.form['valor'])\n\t\t\tcur.execute(\"UPDATE conta SET saldo = {}\".format(saldo))\n\t\t\tg.db.commit()\n\t\t\treturn redirect(url_for('cliente'))\n\t\treturn render_template('deposito.html', titular = titular, erro = 'Saldo superior ao disponível!')\t\n\treturn render_template('deposito.html', titular = titular)\n\[email protected]('/transferencia', methods = ['GET', 'POST'])\ndef transferencia():\n\tcur = g.db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\tcur.execute(\"SELECT * FROM conta WHERE titular = '{}'\".format(session['cpf']))\n\tconta = cur.fetchall()\n\tcur.execute(\"SELECT * FROM usuario WHERE cpf = '{}'\".format(session['cpf']))\n\ttitular = cur.fetchall()\n\tif request.method == 'POST':\n\t\tdestino = request.form['conta']\n\t\tvalor = request.form['valor']\n\t\tsaldo = conta[0][3]\n\t\tif saldo >= float(valor):\n\t\t\tsaldo -= float(valor)\n\t\t\tcur.execute(\"UPDATE conta SET saldo = {}\".format(saldo))\n\t\t\tg.db.commit()\n\t\t\tcur.close()\n\t\t\tcur = g.db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\t\t\tcur.execute(\"SELECT * FROM conta WHERE numero = {}\".format(destino))\n\t\t\tconta_destino = cur.fetchall()\n\t\t\tsaldo_destino = conta_destino[0][3]\n\t\t\tsaldo_destino += float(valor)\n\t\t\tcur.execute(\"UPDATE conta SET saldo = {}\".format(saldo_destino))\n\t\t\tg.db.commit()\n\t\t\treturn redirect(url_for('cliente'))\n\t\treturn render_template('transferencia.html', titular = titular, erro = 'Saldo superior ao disponível!')\t\n\treturn render_template('transferencia.html', titular = titular)\n\n"
}
] | 2 |
Ravibanda410/KNN_Assignment_Zoo | https://github.com/Ravibanda410/KNN_Assignment_Zoo | d1aa9f23ff599ad28c78ae456c5ff23dcbee4728 | 2261cdb136d3bf818089535b376ed434ae1e3672 | af055d76442ad799d1d5f167d6aa5ef7aeaabd0d | refs/heads/main | 2023-02-11T20:29:51.797085 | 2020-12-28T08:06:01 | 2020-12-28T08:06:01 | 324,950,247 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6268656849861145,
"alphanum_fraction": 0.6462008357048035,
"avg_line_length": 21.69354820251465,
"blob_id": "d25f8ec20ab2940de20cb9b456bcf22796f82ba3",
"content_id": "0bc950975007c49cf9ace331cdf47246d29ee954",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2948,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 124,
"path": "/KNN_Assignment2_Zoo.R",
"repo_name": "Ravibanda410/KNN_Assignment_Zoo",
"src_encoding": "UTF-8",
"text": "\r\n\r\n\r\nZoo_data <- read.csv(\"C:/RAVI/Data science/Assignments/Module 18 KNN/KNN Assignment2 dataset/Zoo.csv/Zoo.csv\")\r\nView(Zoo_data)\r\nattach(Zoo_data)\r\n\r\n# drop the animal.name feature\r\nZoo_data1 <- Zoo_data[ ,2:18]\r\nView(Zoo_data1)\r\nstr(Zoo_data1)\r\n\r\ntable(Zoo_data1$type)\r\n\r\nsummary(Zoo_data)\r\n\r\nsummary(Zoo_data[c(\"feathers\",\"toothed\",\"domestic\",\"breathes\",\"tail\")])\r\n\r\nhead(Zoo_data)\r\nstr(Zoo_data)\r\n\r\n#Data Visualization\r\ninstall.packages('ggplot2') #for Data Visualization\r\nlibrary(ggplot2)\r\n\r\nplot(Zoo_data1)\r\n\r\ninstall.packages('corrplot') #Correlation Plot\r\nlibrary(corrplot)\r\ncorrplot(cor(Zoo_data1))\r\n\r\n# create normalization function\r\nnormalize <- function(x) {\r\n return ((x - min(x)) / (max(x) - min(x)))\r\n}\r\n\r\nZoo_data1_n <- as.data.frame(lapply(Zoo_data1[1:16], normalize))\r\nZoo_data1_n\r\n\r\nsummary(Zoo_data1_n$aquatic)\r\n\r\n# create training and test datasets\r\n\r\n#random sampling\r\nn <- nrow(Zoo_data1_n)\r\nn1 <- n*0.8\r\nn1\r\nn2 <- n-n1\r\nn2\r\n\r\ntrain_index <- sample(1:n,n1)\r\n\r\nzoo_train <- Zoo_data1[train_index, ]\r\nzoo_test <- Zoo_data1[-train_index, ]\r\n\r\n #Creating seperate dataframe for 'Type' feature which is our target.\r\n \r\n zoo_train_labels <- Zoo_data1[train_index,17]\r\n zoo_test_labels <- Zoo_data1[-train_index,17]\r\n\r\n#---- Training a model on the data ----\r\n\r\n#Find the number of observation\r\nNROW(zoo_train_labels)\r\nsqrt(80) # k=9\r\n\r\n# load the \"class\" library\r\ninstall.packages(\"class\") ##KNN \r\nlibrary(class)\r\n\r\nzoo_test_pred <- knn(train = zoo_train, test = zoo_test,\r\n cl = zoo_train_labels, k=1)\r\nzoo_test_pred\r\n\r\n#Error in prediction\r\nerror <- mean(zoo_test_pred!=zoo_test_labels)\r\nerror\r\n\r\ninstall.packages('caret')\r\nlibrary(caret)\r\n\r\n\r\n##--------Evaluating model performance ----\r\n\r\n\r\n#Calculate the proportion of correct classification for k = 1\r\n# Check prediction against actual value in tabular form for k=1\r\ntable(zoo_test_pred ,zoo_test_labels)\r\nconfusionMatrix(table(zoo_test_pred, zoo_test_labels)) \r\n \r\n\r\n \r\nzoo_test_pred <- NULL\r\nerror_rate <- NULL\r\n\r\nfor (i in 1:15) {\r\n zoo_test_pred <- knn(train = zoo_train, test = zoo_test,cl = zoo_train_labels,k=i)\r\n error_rate[i] <- mean(zoo_test_pred!=zoo_test_labels)\r\n}\r\n\r\nknn_error <- as.data.frame(cbind(k=1:15,error_type =error_rate))\r\n\r\n#K Value by Visualization\r\ninstall.packages('ggplot2') #for Data Visualization\r\nlibrary(ggplot2)\r\nggplot(knn_error,aes(k,error_type))+ \r\n geom_point()+ \r\n geom_line() + \r\n scale_x_continuous(breaks=1:15)+ \r\n theme_bw() +\r\n xlab(\"Value of K\") +\r\n ylab('Error')\r\n\r\n\r\nzoo_test_pred <- knn(train = zoo_train, test = zoo_test,\r\n cl = zoo_train_labels, k=2)\r\nzoo_test_predOO\r\n\r\n#Error in prediction\r\nerror <- mean(zoo_test_pred!=zoo_test_labels)\r\nerror\r\nconfusionMatrix(table(zoo_test_pred,zoo_test_labels))\r\n\r\n#########################################\r\nlibrary(gmodels)\r\nCrossTable(x = zoo_test_labels, y = zoo_test_pred,\r\n prop.chisq=FALSE)\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6455172300338745,
"alphanum_fraction": 0.6668965220451355,
"avg_line_length": 26.53061294555664,
"blob_id": "35032a8bba92245a80107e4575789a70f8871695",
"content_id": "df14fd8a4bded3ac30e922660d5950bd10c7289a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1450,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 49,
"path": "/KNN_Assignment2_Zoo.PY",
"repo_name": "Ravibanda410/KNN_Assignment_Zoo",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 24 06:18:36 2020\r\n\r\n@author: RAVI\r\n\"\"\"\r\n\r\n# Importing Libraries \r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nZoo_data = pd.read_csv(\"C:/RAVI/Data science/Assignments/Module 18 KNN/KNN Assignment2 dataset/Zoo.csv/Zoo.csv\")\r\nZoo_data.head()\r\nZoo_data1=Zoo_data.iloc[ :, 1:]\r\n\r\n#normalisation function\r\ndef norm_func(i):\r\n x=(i-i.min())/(i.max()-i.min())\r\n return(x)\r\n\r\n#Normalized data frame(considering numerical part of data if have)\r\nZoo_data1_n = norm_func(Zoo_data1.iloc[ :, :16]) \r\nZoo_data1_n.describe()\r\n\r\nX=np.array(Zoo_data1_n.iloc[ :, : ]) #predictors\r\nY=np.array(Zoo_data['type']) #Target\r\n\r\n\r\n# Training and Test data using \r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, Y_train, Y_test= train_test_split(X,Y,test_size = 0.2) # 0.2 => 20 percent of entire data\r\n\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\nKNN = KNeighborsClassifier(n_neighbors = 2)\r\nKNN.fit(X_train, Y_train)\r\n\r\npred = KNN.predict(X_test)\r\npred\r\n\r\n#Evaluate the model\r\nfrom sklearn.metrics import accuracy_score\r\npd.crosstab(Y_test,pred,rownames=['Actual'],colnames=['Predictions'] )\r\ntest_acc=print(accuracy_score(Y_test,pred))\r\n\r\n#error on train data\r\npred_train=KNN.predict(X_train)\r\npd.crosstab(Y_train,pred_train,rownames=['Actual'],colnames=['Predictions'] )\r\ntrain_acc=print(accuracy_score(Y_train,pred_train))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
}
] | 2 |
nyghtowl/Python_Scrapy_Tutorial | https://github.com/nyghtowl/Python_Scrapy_Tutorial | 222abc19895ca2825e5a9cc64fd1e940a7b48301 | 9d53e77c7038e264d047046857485065317f2125 | 047e0cbb0ccdeafe020d25f2487b80f0118820ea | refs/heads/master | 2021-01-19T03:18:30.587833 | 2014-07-21T01:50:07 | 2014-07-21T01:50:07 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7578125,
"alphanum_fraction": 0.7578125,
"avg_line_length": 20.5,
"blob_id": "4835479a44366f3ec02490d3f0103ea827c674ee",
"content_id": "3fa10738c19c40eab74d451b0551a97a9eea33e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 128,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 6,
"path": "/my_scraper/scraper_app/README.md",
"repo_name": "nyghtowl/Python_Scrapy_Tutorial",
"src_encoding": "UTF-8",
"text": "Scrapy Tutorial / New Coder\n----\n\nRan through tutorial material on New Coder. \n\nCurrently there is an error in loading the data."
},
{
"alpha_fraction": 0.654321014881134,
"alphanum_fraction": 0.654321014881134,
"avg_line_length": 21.18181800842285,
"blob_id": "c5b00385ff167b62cba78815cf986d3b150edefc",
"content_id": "212ed9fdb27d801a2fa6d261e0371e373286613f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 243,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 11,
"path": "/my_scraper/scraper_app/items.py",
"repo_name": "nyghtowl/Python_Scrapy_Tutorial",
"src_encoding": "UTF-8",
"text": "from scrapy.item import Item, Field\n\nclass LivingSocialDeal(Item):\n \"\"\"\n Livingsocial containery (dictionary-like object) for scraped data \n \"\"\"\n\n title = Field()\n description = Field()\n location = Field()\n price = Field()"
},
{
"alpha_fraction": 0.7402597665786743,
"alphanum_fraction": 0.7402597665786743,
"avg_line_length": 18,
"blob_id": "4af219ac792b49da4a9194cd2f6e8d94ffc2cdbc",
"content_id": "bf845613cc8124abdffd49297b058159bbc74ee5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 77,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 4,
"path": "/README.md",
"repo_name": "nyghtowl/Python_Scrapy_Tutorial",
"src_encoding": "UTF-8",
"text": "Scrapy Tutorial / New Coder\n----\n\nCompleted tutorial material on New Coder. \n"
}
] | 3 |
FightMyself/PLS | https://github.com/FightMyself/PLS | d616c9c591bc3d784b985be8243e49c5ac39d913 | 022c24d48c34e320eb728dfbb4ec8d10145db419 | 9f5f99ff21a7a4495df931d7ea65eb2da01f3994 | refs/heads/master | 2022-05-23T12:57:11.293612 | 2020-03-16T02:13:45 | 2020-03-16T02:13:45 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.45169615745544434,
"alphanum_fraction": 0.49410030245780945,
"avg_line_length": 29.307262420654297,
"blob_id": "1fc62fe47377969154962571990007542307a4f8",
"content_id": "2538b73c9ac4a56e3af3b8f524e410f881805e98",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5902,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 179,
"path": "/PLS.py",
"repo_name": "FightMyself/PLS",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\nfrom numpy import *\nfrom sklearn import preprocessing\n#import matplotlib.pyplot as plt\nimport matplotlib.pyplot as plt\nimport pylab\n#import numpy as np\nimport itertools\nimport pandas as pd\nimport random\n\n#数据读取-单因变量与多因变量\ndef loadDataSet01(filename):\n fr = open(filename)\n arrayLines = fr.readlines()\n row = len(arrayLines)\n x = mat(zeros((row, 12)))\n y = mat(zeros((row, 1)))\n index = 0\n for line in arrayLines:\n curLine = line.strip().split('\\t')\n x[index,:] = curLine[0: 12]\n y[index,:] = curLine[-1]\n index +=1\n return x, y\n\n#数据标准化\ndef stardantDataSet(x0, y0):\n e0 = preprocessing.scale(x0)\n f0 = preprocessing.scale(y0)\n return e0, f0\n\n#求均值-标准差\ndef data_Mean_Std(x0, y0):\n mean_x = mean(x0, 0)\n mean_y = mean(y0, 0)\n std_x = std(x0, axis=0, ddof=1)\n std_y = std(y0, axis=0, ddof=1)\n return mean_x, mean_y, std_x, std_y\n\n#PLS核心函数\ndef PLS(x0, y0):\n e0, f0 = stardantDataSet(x0,y0)\n e0 = mat(e0); f0 = mat(f0); m = shape(x0)[1]; ny=shape(y0)[1]\n w = mat(zeros((m, m))).T; w_star = mat(zeros((m, m))).T\n chg = mat(eye((m)))\n my = shape(x0)[0];ss = mat(zeros((m,1))).T\n t = mat(zeros((my,m))); alpha= mat(zeros((m,m)))\n press_i = mat(zeros((1,my)))\n press = mat(zeros((1, m)))\n Q_h2 = mat(zeros((1, m)))\n beta = mat(zeros((1,m))).T\n for i in range(1,m+1):\n #计算w,w*和t的得分向量\n matrix = e0.T * f0 * (f0.T * e0)\n val, vec = linalg.eig(matrix)#求特征向量和特征值\n sort_val = argsort(val)\n index_vec = sort_val[:-2:-1]\n w[:,i-1] = vec[:,index_vec]#求最大特征值对应的特征向量\n w_star[:,i-1] = chg * w[:,i-1]\n t[:,i-1] = e0 * w[:,i-1]\n #temp_t[:,i-1] = t[:,i-1]\n alpha[:,i-1] = (e0.T * t[:,i-1]) / (t[:,i-1].T * t[:,i-1])\n chg = chg * mat(eye((m)) - w[:,i-1] * alpha[:,i-1].T)\n e = e0 - t[:,i-1] * alpha[:,i-1].T\n e0 = e\n #计算ss(i)的值\n #beta = linalg.inv(t[:,1:i-1], ones((my, 1))) * f0\n #temp_t = hstack((t[:,i-1], ones((my,1))))\n #beta = f0\\linalg.inv(temp_t)\n #beta = nnls(temp_t, f0)\n beta[i-1,:] = (t[:,i-1].T * f0) /(t[:,i-1].T * t[:,i-1])\n cancha = f0 - t * beta\n ss[:,i-1] = sum(sum(power(cancha, 2),0),1)#注:对不对???\n for j in range(1,my+1):\n if i==1:\n t1 = t[:, i - 1]\n else:\n t1 = t[:,0:i]\n f1=f0\n she_t = t1[j-1,:]; she_f = f1[j-1,:]\n t1=list(t1); f1 = list(f1)\n del t1[j-1]; del f1[j-1] #删除第j-1个观察值\n #t11 = np.matrix(t1)\n #f11 = np.matrix(f1)\n t1 = array(t1); f1 = array(f1)\n if i==1:\n t1 = mat(t1).T; f1 = mat(f1).T\n else:\n t1 = mat(t1); f1 = mat(f1).T\n\n beta1 = linalg.inv(t1.T * t1) * (t1.T * f1)\n #beta1 = (t1.T * f1) /(t1.T * t1)#error???\n cancha = she_f - she_t*beta1\n press_i[:,j-1] = sum(power(cancha,2))\n press[:,i-1]=sum(press_i)\n if i>1:\n Q_h2[:,i-1] =1-press[:,i-1]/ss[:,i-2]\n else:\n Q_h2[:,0]=1\n if Q_h2[:,i-1]<0.0975:\n h = i\n break\n return h, w_star, t, beta\n\n##计算反标准化之后的系数\ndef Calxishu(xishu, mean_x, mean_y, std_x, std_y):\n n = shape(mean_x)[1]; n1 = shape(mean_y)[1]\n xish = mat(zeros((n, n1)))\n ch0 = mat(zeros((1, n1)))\n for i in range(n1):\n ch0[:, i] = mean_y[:, i] - std_y[:, i] * mean_x / std_x * xishu[:, i]\n xish[:, i] = std_y[0, i] * xishu[:, i] / std_x.T\n return ch0, xish\n\n'''\n为了获得较可靠的结果,需测试数据(测验)和训练数据(学习)\n--可按照6:4的比例划分数据集,即随机分成训练集与测试集\n'''\ndef splitDataSet(x, y):\n m =shape(x)[0]\n train_sum = int(round(m * 0.6))\n test_sum = m - train_sum\n #利用range()获得样本序列\n randomData = range(0,m)\n randomData = list(randomData)\n #根据样本序列进行分割- random.sample(A,rep)\n train_List = random.sample(randomData, train_sum)\n #获取训练集数据-train\n train_x = x[train_List,: ]\n train_y = y[train_List,: ]\n #获取测试集数据-test\n test_list = []\n for i in randomData:\n if i in train_List:\n continue\n test_list.append(i)\n test_x = x[test_list,:]\n test_y = y[test_list,:]\n return train_x, train_y, test_x, test_y\n\n#主函数\nif __name__ == '__main__':\n x0, y0 = loadDataSet01('data/000001.txt')#单因变量与多因变量\n # 随机划分数据集- (十折交叉验证)\n # train_x, train_y, test_x, test_y = splitDataSet(x0, y0)\n #标准化\n e0, f0 = stardantDataSet(x0, y0)\n mean_x, mean_y, std_x, std_y = data_Mean_Std(x0, y0)\n r = corrcoef(x0)\n m = shape(x0)[1]\n n = shape(y0)[1] # 自变量和因变量个数\n row = shape(x0)[0]\n #PLS函数\n h, w_star, t, beta = PLS(x0, y0)\n xishu = w_star * beta\n #反标准化\n ch0, xish = Calxishu(xishu, mean_x, mean_y, std_x, std_y)\n\n # 求可决系数和均方根误差\n y_predict = x0 * xish + tile(ch0[0, :], (row, 1))\n y_mean = tile(mean_y, (row, 1))\n SSE = sum(sum(power((y0 - y_predict), 2), 0))\n SST = sum(sum(power((y0 - y_mean), 2), 0))\n SSR = sum(sum(power((y_predict - y_mean), 2), 0))\n RR = SSR / SST\n RMSE = sqrt(SSE / row)\n print (\"=============================\")\n print (u\"h:\", h)\n print (u\"R2:\", RR)\n print (u\"RMSE:\", RMSE)\n # print u\"残差平方和:\", SSE\n print (u\"回归系数:\")\n #print (ch0)\n # xish = list(xish)\n #print (xish)\n print (\"=============================\")\n tofile = pd.DataFrame(y_predict)\n tofile.to_csv('data/y_predict.csv')"
}
] | 1 |
KinakoIguchi/ai-dojo | https://github.com/KinakoIguchi/ai-dojo | 883a1805d3c1b3aa75e0cfca26f027e8f37c3351 | ec561ab8f7292da46253cbce37a86bbb99c180ca | 9be9b81957bb0a7ad57b2549e0393f6f4d359554 | refs/heads/master | 2023-06-04T21:12:11.086901 | 2021-06-25T05:13:54 | 2021-06-25T05:13:54 | 299,792,008 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7277955412864685,
"alphanum_fraction": 0.7493610382080078,
"avg_line_length": 37.881988525390625,
"blob_id": "7c5c0b4d595dc3d447b58a14faa798d2b613350d",
"content_id": "dd50d5ca1bebdcfc62b6fc3481b8a962dc54a5b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 11412,
"license_type": "no_license",
"max_line_length": 171,
"num_lines": 161,
"path": "/AI開発道場_クラウド編/course_cloud/03.AWS_Rekognition/応用演習.md",
"repo_name": "KinakoIguchi/ai-dojo",
"src_encoding": "UTF-8",
"text": "# Amazon rekognition 応用演習\n\n## アプリケーションの実装\n* Amazon Rekognition は API 利用も可能です。具体的な利用方法の例は以下を参照ください。\n * [例①:イメージ内の顔の検出](https://docs.aws.amazon.com/ja_jp/rekognition/latest/dg/faces-detect-images.html) \n * [例②:イメージ間の顔の比較](https://docs.aws.amazon.com/ja_jp/rekognition/latest/dg/faces-comparefaces.html)\n* 本演習では、API を利用して、画像分析を行うアプリケーションを実装してみましょう。\n* なお、Rekognition API の使用については、以下を参照してください。\n * [Rekognition API Reference](https://docs.aws.amazon.com/ja_jp/rekognition/latest/dg/API_Reference.html)\n* 演習で使用するサービス\n * [Lambda](https://aws.amazon.com/jp/lambda/?nc2=h_ql_prod_cp_lbd)\n * [S3](https://aws.amazon.com/jp/s3/?nc2=h_ql_prod_st_s3)\n * [IAM](https://aws.amazon.com/jp/iam/)\n * [CloudWatch](https://aws.amazon.com/jp/cloudwatch/?nc2=type_a)\n * [Rekognition](https://aws.amazon.com/jp/rekognition/?blog-cards.sort-by=item.additionalFields.createdDate&blog-cards.sort-order=desc)\n * [SNS](https://aws.amazon.com/jp/sns/?nc2=type_a&whats-new-cards.sort-by=item.additionalFields.postDateTime&whats-new-cards.sort-order=desc)\n* 事前準備\n * 事前準備として、以下の課題を実施してください。(SNSとlambdaに関するチュートリアルになります。)\n * [応用演習準備課題](https://github.com/dcs-aidojo/contents/blob/master/course_cloud/AWS_Rekognition/%E5%BF%9C%E7%94%A8%E6%BC%94%E7%BF%92%E6%BA%96%E5%82%99%E8%AA%B2%E9%A1%8C.md)\n\n### 画像分析アプリケーションを実装しよう\n\n* 画像をS3にpushしたことをトリガーにlambdaでRekognition APIを呼び出し、顔の位置情報をSNS(AWSのサービス)でメール通知するアプリケーションを作成しましょう \n 成果物:作成したアプリケーションのyaml、メールの受信画面のスクショ \n ※複数のAWSリソースを使用します。他の受講者と混ざらないようにリソース作成時には必ず自分の名前をつけてください。\n* 処理イメージ\n![処理イメージ](../../images/aws/Rekognition応用課題_イメージ.png)\n\n#### 手順1:通知用のトピックを準備しよう\n\n* SNSを使用し、アプリケーションの実行結果を通知するための機能を実装しましょう。\n\n#### 手順2:画像格納用のバケットを準備しよう\n\n* 2-1. S3を使用し、アプリケーションに使用するバケットを準備しましょう。\n\n* 2-2. 画像格納用のディレクトリをバケット直下に作成しましょう。\n\n#### 手順3:Lambdaを使用し、画像分析を実施するアプリケーションを作成しよう\n\n* 3-1. lambda関数を作成しましょう。\n * IAMロールについては、「AWS ポリシーテンプレートから新しいロールを作成」を選択し、下記画像と同じポリシーをアタッチしてください。\n ![IAM](../../images/aws/IAM作成.png)\n* 3-2. S3のイベント登録を実施しましょう。\n * 無限ループが発生する恐れがあるため、入力プレフィックス(2-2で作成したディレクトリ)、サフィックス(拡張子)を設定してください。\n ![S3のイベントの設定](../../images/aws/S3event.png)\n* 3-3. 送信先のSNSを登録しましょう。 \n 手順1で作成したSNSのトピックを送信先として登録しよう。\n* 3-3. ソースコードを書き、アプリケーションを実装しましょう。 \n[RekognitionのAPIリファレンス(Python)](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rekognition.html) \nコーディングは[実装例](https://github.com/dcs-aidojo/template/blob/master/course_cloud/AWS_Rekognition/Sample_Function.py)を参考にしてください。\n * 下記のABCのアプリケーションの中から1つ選択し、実装しましょう。 \n 時間に余裕がある場合は他のアプリケーションも同様に実装し、成果物を提出してください。 \n\n A. 投入された画像(人物がうつっている写真)に対し、人物の顔を検出し、推定年齢と性別をリストとして出力し、結果をメールで送信する。\n * 入力画像(例)\n ![人の写っている画像例](../../images/aws/SAYA072155940_TP_V.jpg) \n ※画像では1人しか写っておりませんが、なるべく複数人(人間が2人以上)写っている画像を準備してください。\n * メールレスポンス\n ![メールレスポンス画像](../../images/aws/mailsampleA.png)\n\n B.投入された画像(英字で書かれたメモ)に対し、書かれている文字を検出し、文字の内容をリストとして出力し、結果をメールで送信する。\n * 入力画像(例)\n ![英語の写っている画像例](../../images/aws/eiji.png)\n * メールレスポンス\n ![メールレスポンス画像](../../images/aws/rekogmail.png)\n\n C.投入された画像に対し、事前に指定した顔画像と比較し、同じ人が写っている場合類似度をメールで送信する。\n * 指定画像(例)\n ![総理](../../images/aws/img_abe.jpg)\n * 入力画像(例_1人のみ認識)\n ![安倍内閣](../../images/aws/abenaikaku.jpg)\n * メールレスポンス(1人のみ)\n ![返信安倍](../../images/aws/mail_abe_solo.png)\n * 入力画像(例_複数認識)\n ![内閣](../../images/aws/abertinity.jpg)\n * メールレスポンス(複数)\n ![返信安倍](../../images/aws/mail_abe_all.png)\n\n* 3-4. ソースコードをテストしましょう。\n * 実際にS3のバケット内の格納用のディレクトリに画像をアップロードし、作成したアプリケーションが正常に稼働していることを確認しましょう。\n * 実行後CloudWatch Logsにてログが出力されるので、確認し、適切に動作していない場合は修正をしてください。\n * ![ログストリーム画像](../../images/aws/loggazo.png)\n\n#### 手順4:実行結果を確認しよう\n\n* 4-1. 動作確認を実施しましょう。 \n 確認事項(全体)\n * S3の指定のディレクトリに画像ファイル格納した後、SNSよりGSuiteのメールアドレス宛にレスポンスの内容が届いてくること。\n\n 確認事項(S3関連)\n * 指定のディレクトリ、拡張子にてアプリケーションの実行が確認できていること。\n * 上記以外のディレクトリ、拡張子でアプリケーションが動作しないこと。\n\n 確認事項(Lambda)\n * 要件Aを選んだ場合、画像に移っている人数分の顔情報が出力されていること。 \n (但し、一部しか移っていない場合等、Rekognitionによって「顔ではない」と判別がされた場合は出力に含まれていなくて大丈夫です。)\n * 要件Bを選んだ場合、画像に含まれる文字列について網羅されるように出力されていること。 \n (今回はRekognitionのレスポンス形式について理解できていれば良いので、メール出力をきれいにする必要はないです。)\n\n * 要件Cを選んだ場合、画像に含まれている顔面毎に元画像の人物との類似度が出力されていること。\n\n* 4-2. 成果物の提出について\n * 下記成果物を提出してください。 \n 複数のアプリケーションを実装した場合はアプリケーション毎にフォルダを分けて、それぞれ格納してください。\n * Lambdaコンソール>アクション>関数のエクスポートより、「AWS SAMファイルのダウンロード」ボタンを押下して得られるyamlファイル\n ![yamlDL](../../images/aws/yaml保存.png)\n ![成果物画像_サービスリスト](../../images/aws/yamlsample.png)\n * 実際にアプリケーションを動作させた時にGSuiteアドレスに送られたメールのスクリーンショット\n ![成果物画像_メールレスポンス](../../images/aws/受信サンプル.png)\n\n### 時間が余ったら・・・\n* 5-1. 上記要件A、Bと物体検出を同時に実行するアプリケーションを作成してください。\n * 入力画像を対象とし、顔検出・物体検出・文字検出を同時に行い、検出できたもののみ、SNS経由で検出結果を送信する。\n* 5-2. Rekognition API が提供するほかのAPIを利用して、新たなアプリケーションを作成してください。\n * [Rekognition開発者ガイド](https://docs.aws.amazon.com/ja_jp/rekognition/latest/dg/segments.html)\n * [boto3ドキュメント](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rekognition.html)\n\n### 【参照】Rekognition以外のAWSサービスについて\n\n* Rekognition以外のサービスで困った際には下記を参照してください。\n\n* [IAM](https://d1.awsstatic.com/webinars/jp/pdf/services/20190129_AWS-BlackBelt_IAM_Part1.pdf)\n * AWS では、各サービスの権限制御をIAMで実施します。 \n ※Adminnistrator権限は使用およびロールに付与しないでください。\n\n* [S3](https://d1.awsstatic.com/webinars/jp/pdf/services/20190220_AWS-BlackBelt_S3_Glacier.pdf)\n * 詳細設定はデフォルトのままで大丈夫です(下記画像参照)\n ![S3conf](../../images/aws/bucketconf.png)\n* [Lambda](https://d1.awsstatic.com/webinars/jp/pdf/services/20150701_AWS-BlackBelt-runcodeinthecloud.pdf)\n * ※lambdaファンクションを一から作成するのではなく、「設計図から作成」から、rekognition-pythonを使用して実装するようにしてください。\n ![lambdagazou](../../images/aws/lambdagazou.png)\n\n* [SNS](https://d1.awsstatic.com/webinars/jp/pdf/services/20190604_AWS-Blackbelt_AmazonSNS.pdf)\n * トピックとサブスクリプションを設定する必要があります。 \n 下記画像を参考に、トピックとサブスクリプションを作成してください。\n * トピック\n ![topicconf](../../images/aws/SNSTOPIC.png)\n\n * サブスクリプション\n ![snsconf](../../images/aws/snsconf.png)\n * 赤枠内は自分のGSuiteのメールアドレスを使用すること。 \n その他の設定はデフォルトのままで大丈夫です。 \n 作成後、メールが届くので必ずconfirmを実施してください。\n\n### 【参照】思ったように動作しない場合は…\n\n* CloudWatch logsよりエラーログを確認してください\n\n### 【参考】課題を複数実装する場合は…\n\n* 1つのlambda関数の中で複数の.pyファイルを作成し、動作確認等をする場合はハンドラーを切り替えて実行するようにしてください。\n\n * 参考画像\n ![lambda複数](../../images/aws/例.png)\n ![lambda複数_2](../../images/aws/例2.png)\n \n### その他参考になるサイト\n* 「boto3を使ってAmazon SNSへpublishしてみた」(クラスメソッド提供サイト)→main.pyの部分が参考になる\n\n https://dev.classmethod.jp/articles/publish_to_sns_using_boto3/\n"
},
{
"alpha_fraction": 0.8194444179534912,
"alphanum_fraction": 0.834967315196991,
"avg_line_length": 18.74193572998047,
"blob_id": "09ed2e24e2d79df035ef7c626bb055037401ac5b",
"content_id": "afa296ee7b123a90d1caa9fafc0e4716ab9b0b71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2626,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 62,
"path": "/AI開発道場_クラウド編/course_cloud/01.sagemaker/sagemaker.md",
"repo_name": "KinakoIguchi/ai-dojo",
"src_encoding": "UTF-8",
"text": "# AWS sagemakerの概要\nAWS SageMaker は機械学習に特化したマネージド型のクラウドコンピューティングサービスである。 \n機械学習に特化したという点が大きな特徴で、機械学習モデルの構築からデプロイまで\nAWSが提供するシステム上で行うことができる。 \n\n# Sagemakerの特徴\n「インスタンス作成」、「モデル構築」、「トレーニング」、「デプロイ」までのフローを実施することが可能\n\n# Sagemakerを使うメリット\n* 機械学習プロセスの高速化\n* 豊富なフレームワークとアルゴリズム\n* 訓練ずみのモデルへ簡単にアクセス\n\n# Sagemakerで使えるアルゴリズム\nsagemakerは主要なアルゴリズムに対応している。\nその中でもSageMakerで実装されている機械学習アルゴリズム、すなわち組み込みアルゴリズムで学習モデルを作成・使用するのが簡単である。\n線形学習・k-Means・XGboostなどが組み込みアルゴリズムに該当する。\n\n組み込みアルゴリズムについては以下も参照のこと\n\nhttps://docs.aws.amazon.com/ja_jp/sagemaker/latest/dg/algos.html\n\n# XGboostとは\n\n* XGboostは弱学習器(決定木)を構築する際に前に構築された弱学習器の結果を使用し弱学習器を構築する手法。\n\n* 最初の弱学習器で上手く推定できなかった部分を推定するために重みを付けて次の弱学習器で学習を行う。\n\n* ランダムフォレストが並列的に弱学習器を用いたのに対して、XGboostは直列的に弱学習器を用いたものである。\n\n* パラメータは多数あるが、その中でも重要なのがobjectiveである。\n\n【objective】\n\n最小化させるべき損失関数を指定する。\n\n引数 \n\n* reg:linear(線形回帰)\n* reg:logistic(ロジスティック回帰)\n* binary:logistic(2項分類で確率を返す)\n* multi:softmax(多項分類でクラスの値を返す)\n \n※multi:softmaxを指定した場合、num_class(クラス数)の指定が必要となる。\n\n\n* パラメータの詳細は以下のサイトも参照\n\n https://xgboost.readthedocs.io/en/latest/parameter.html\n\n* XGboostをイメージで理解したい方は以下サイトも参照\n\n https://qiita.com/2357gi/items/913af8b813b069617aad\n\n# sagemakerを使うために必要なawsの情報\n* ロール\n* S3のprefix\n* リージョン\n\nそれぞれの意味は環境構築のファイルを参照のこと。\n\n※セルを動かす実習については別ファイルを参照のこと\n"
},
{
"alpha_fraction": 0.7792022824287415,
"alphanum_fraction": 0.7863247990608215,
"avg_line_length": 34,
"blob_id": "081967ae1097a85bbb57610380495549a2cb14ae",
"content_id": "e9e26dab42bb7fc8ceaf5cf70aa77a15b0cf87a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1066,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 20,
"path": "/AI開発道場_クラウド編/course_cloud/03.AWS_Rekognition/Rekognition概要.md",
"repo_name": "KinakoIguchi/ai-dojo",
"src_encoding": "UTF-8",
"text": "# Rekognition概要\n\n## [Amazon Rekognition](https://docs.aws.amazon.com/ja_jp/rekognition/latest/dg/rekognition-dg.pdf)\n\n* Amazonが提供する、画像/動画分析に使用可能なリソース\n\n* [Amazon Rekognition Image](https://aws.amazon.com/jp/rekognition/image-features/?nc=sn&loc=3&dn=2) \n物体検出、シーン検出、単語抽出等が可能。\n\n* [Amazon Rekognition Video](https://aws.amazon.com/jp/rekognition/video-features/?nc=sn&loc=3&dn=1) \nS3内に保存されている動画から動画分析をすることが可能。 \nKinsesis Video Streamsを使用することで、リアルタイムで動画分析をすることが可能。\n\n* [Amazon Rekognition Custom Labels](https://aws.amazon.com/jp/rekognition/custom-labels-features/) \n画像に対してカスタムラベル設定することで、自動で分析モデルを作成し、使用することが可能。\n\n### チュートリアル\n\n* Amazon Rekognitionのデモを触ってみよう \n コンソールにログインし、Amazon Rekognitionのデモを触ってみよう\n "
},
{
"alpha_fraction": 0.8399999737739563,
"alphanum_fraction": 0.8399999737739563,
"avg_line_length": 49,
"blob_id": "8bbb4b02f49b0b684784e58b97209eefe74a2486",
"content_id": "e22a9f117a190b86d33b05d35dd20a4967c1af79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 62,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 1,
"path": "/AI開発道場_クラウド編/course_cloud/04.Vision_API/README.md",
"repo_name": "KinakoIguchi/ai-dojo",
"src_encoding": "UTF-8",
"text": "Google Vision API/ Google Intelligence API に関する資料\n"
},
{
"alpha_fraction": 0.375,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 11,
"blob_id": "9e6f5c89cbb5c7e0331e46ca94a7bfa80e4a0318",
"content_id": "03eae0fa5772f061af51bd192f1f75e3e6a5a5cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 24,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 2,
"path": "/README.md",
"repo_name": "KinakoIguchi/ai-dojo",
"src_encoding": "UTF-8",
"text": "# ai-dojo\r\nadd 2021/6/23"
},
{
"alpha_fraction": 0.7124249339103699,
"alphanum_fraction": 0.7256289720535278,
"avg_line_length": 43.59681701660156,
"blob_id": "dd915f87a2e4334e6a8cc2ce173273f2f11bb7d9",
"content_id": "ac28c2962f82adcc6fa3856cd75de62b682aa86a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 26485,
"license_type": "no_license",
"max_line_length": 223,
"num_lines": 377,
"path": "/AI開発道場_クラウド編/course_cloud/05.Speech-to-Text/README.md",
"repo_name": "KinakoIguchi/ai-dojo",
"src_encoding": "UTF-8",
"text": "## Speech-to-Text実習・演習\n\n### 目標\n- Speech-to-Textが提供する機能について理解し、利用、実装できること。\n\n### 提出課題\n- [APIを試す](#APIを試す)を実施し、音声文字変換API呼び出しのHTTP通信ログを提出\n- [クイックスタート](#クイックスタート)を実施し、実行ログを提出\n- [入門ガイド](#入門ガイド)を実施し、Pythonプログラムと実行ログを提出\n- PythonプログラムはGitHubのプライベートリポジトリ上に登録して提出(git pushやGitHubリポジトリページから直接コミット等)\n- 最終日までに実施が終わらなかった場合、実施したところまでを提出\n\n\n### 参照URL\n- [公式ドキュメント](https://cloud.google.com/speech-to-text/docs?hl=ja)\n- [Python用GitHubリポジトリ](https://github.com/googleapis/python-speech)\n - [Python用サンプルコード(v1.3.2)](https://github.com/googleapis/python-speech/tree/v1.3.2/samples/v1)\n \n### サービス概要\n- [料金](https://cloud.google.com/speech-to-text/pricing?hl=ja)\n - 毎月60分までは無料(1つの請求単位)\n - 各リクエストの音声の長さは15秒単位で切り上げとなるため、短い音声のリクエストが多く発生する使い方場合、注意が必要\n - データロギングを有効にすると割安になるが、送信された音声データをGoogleが機械学習モデル改善のため使用することになるため注意が必要\n- [サポートされている音声エンコード形式](https://cloud.google.com/speech-to-text/docs/encoding?hl=ja)\n - MP3(ベータ),FLAC,LINEAR16(wav)など\n- [音声認識精度向上ベストプラクティス](https://cloud.google.com/speech-to-text/docs/best-practices?hl=ja)\n - サンプリングレードは16 kHz 以上でキャプチャ。電話通話の場合は8 kHz\n - ユーザの近くにマイクを置く(ノイズキャンセルといった変換処理は不要)\n - 名前や用語など認識しづらい語句のヒントをリクエスト呼び出し時に加える\n- [割当上限](https://cloud.google.com/speech-to-text/quotas?hl=ja)\n - 音声の長さ上限\n - 同期リクエスト:約1分\n - 非同期リクエスト:約480分\n - ストリーミングリクエスト:約5分\n \n### 環境準備\n- 前提\n - GCPプロジェクトでSpeech-to-Textが有効化されている(今回はすでに設定済み)\n - ChromebookにLinuxのターミナルが導入されていること(環境構築で実施済みの想定)\n - 以降のコマンド系の作業はLinuxのターミナル上で実施する\n - Cloud SDKが導入・初期設定済み(環境構築で実施済みの想定)\n - [Text](https://chrome.google.com/webstore/detail/text/mmfbcljfglbokpmkimbfghdkjmjhdgbg?hl=ja)が導入済み(環境構築で実施済みの想定)\n- サービスアカウントファイル(JSONファイル)ダウンロード\n - [Google共用ドライブ](https://drive.google.com/drive/folders/1JM13tzI1WMg-hPL1p0srIywlljp0IIBH)からダウンロード(aidojopj-speech2text.json)\n - Pythonのプログラムから実行する場合に使用。\n - gcloudコマンド経由の実行の場合は不要\n - Cloud SDK初期設定(gcloud init)時の情報からアクセスに必要な情報を自動で取得するため\n- ChromebookのLinuxコンテン環境で音声サポートを有効化\n - Chromebook上のコンソール画面上で音声の録音、再生処理をするために必要\n - ChromeOSのバージョンが79以上であること(2020/1リリース)\n - Chromeブラウザ上でCtrl-Alt-TでChromeシェル画面を開き、下記を実行\n - 以下はLinux環境の再起動が入る場合や一定時間たったあとなどで無効になるため音声録音、再生がうまくいかない場合は都度実施すること\n - stopコマンドを実行すると既存で開いていたLinuxアプリ(ターミナルやVSCodeなど)が停止されるため注意\n - [参考URL](https://chromium.googlesource.com/chromiumos/docs/+/master/containers_and_vms.md#Is-audio-capture-e_g_microphone_supported)\n ```\n crosh> vmc stop termina\n crosh> vmc start termina --enable-audio-capture\n ```\n- Sox導入\n - 音声ファイルの録音、変換、再生処理で利用\n - rec(音声録音),play(音声再生),sox(音声変換)コマンドが利用可能となる\n - Linuxターミナル画面より以下を実行しインストール\n ```\n sudo apt-get install sox\n sudo apt-get install libsox-fmt-mp3\n ```\n- Soxの使い方\n - [コマンドリファレンス](http://sox.sourceforge.net/sox.html)\n - [soxコマンドで音声ファイルを編集する10の例](https://blog.asial.co.jp/885)\n - 音声録音\n - 入力の音声ファイルのエンコードはファイル拡張子によって自動判定されて出力される\n ```\n #録音停止はCtrl+C\n rec test.mp3\n ```\n - 音声再生\n ```\n play test.mp3\n ```\n - 音声情報出力\n ```\n soxi test.mp3\n ```\n - 音声編集\n ```\n #MP3形式をサンプリングレート16kHz,16bit精度,チャンネル数1(モノラル)のwav形式に変換\n sox test.mp3 --rate 16k --bits 16 --channels 1 test.wav\n soxi test.wav\n ```\n- 検証用音声ファイルダウンロード\n - 検証で使いたい音声ファイル(mp3)をダウンロードする\n - LinuxからChromebook上のファイルを参照する場合は、ファイルアプリのマイファイルを右クリック(ダブルタップ)でLinuxとの共有を選択すると`/mnt/chromeos/MyFiles/Downloads`からアクセス可能\n - 音声再生はファイルアプリから音声ファイルをダブルクリックやスペースキー(プレビュー)で確認\n - [サンプルナレーション音声](http://pro-video.jp/voice/announce/)\n - [青空朗読](https://aozoraroudoku.jp/index.html)\n - [NHKラジオニュース](https://www.nhk.or.jp/radionews/):再生中にsoxで音声録音\n - [ラジオNIKKEIポッドキャスト AIのすゝめ](http://www.radionikkei.jp/podcasting/aino/archive.html):長い音声ファイルの文字変換を試す際の音源。聴くのリンクからMP3をダウンロード。\n - [文化放送ポットキャスト](http://www.joqr.co.jp/podcast/index.php):複数人の会話の話者分離機能を試す際の音源。MP3でダウンロードできるものを選択。\n- 音声ファイル格納用GCSバケット作成\n ```\n # xxxには名前(ローマ字)を入れる\n gsutil mb gs://speech_to_text_xxx\n ```\n\n### [APIを試す](https://cloud.google.com/speech-to-text?hl=ja)\n- 課題1\n - MP3の音声ファイルをアップロードして音声文字変換API呼び出し時のHTTP通信ログ(リクエストBodyとレスポンスBody)をテキストファイルで提出(api-1-req.log,api-1-res.log)\n - 音声コンテンツ部分は除外する\n - 上記通信を含むHTTP通信ログをHAR形式で課題に提出(api-1.har)\n- 課題2\n - マイクを使った音声文字変換API呼び出し時のHTTP通信ログ(WebSocketの送受信Message)をテキストファイルで提出(api-2-send.log,api-2-recv.log)\n - 音声途中と判断している受信メッセージ([isFinal](https://cloud.google.com/speech-to-text/docs/reference/rpc/google.cloud.speech.v1#streamingrecognitionresult)がTrueとなっているもののみ)は除外\n - 音声のバイナリコンテンツ部分は除外する\n - 上記通信を含むHTTP通信ログをHAR形式で課題に提出(api-2.har)\n\n- 実施概要\n - 「Speech-to-Text を有効に活用する」の下の部分にある入力フォームを使ってAPIを試す。\n - Input TypeにMicrophoneとあるのは、ブラウザのマイク機能からリアルタイムに音声をテキスト化する。File Uploadは、音声ファイルをアップロードして認識した文字列が画面に出力される。\n - Langageには音声の言語を選択します。日本語の音声ファイルの場合、下の方の「日本語(日本)」を選択\n - Speaker diarizationをOnにすると音声に含まれる複数の話者を識別して結果を返します。Onにした場合、話者の人数もあわせて指定。\n - Punctuationを有効にすると句読点の挿入が行われる\n - Show JSONをクリックするとSpeech-to-TextのAPIを呼び出すURLとリクエストするJSONデータを確認できる(このデモでは実際にAPI呼び出しの通信先とは異なる点に注意)\n - Microphoneでのリアルタイム文字認識の場合はストリームでの音声認識APIの呼び出しはgRPCとなっているが、このブラウザベースのデモの仕組みではWebSocketのエンドポイントをプロキシを経由してデータ送受信を行っている。\n - API設計時のRESTやgRPCについては[こちら](https://cloud.google.com/blog/ja/products/api-management/understanding-grpc-openapi-and-rest-and-when-to-use-them)を参照。\n - ファイルアップロードによる方式では、音声ファイルは1分以内のものを指定する。それ以上はこのAPIではエラーとなる\n - マイクを使った方式では、実行直後、ロボットでないことを確認するチェックボックスが表示されるのでチェックする\n\n\n - ChromeのDeveloper Toolsを使ったHTTP通信ログの確認・取得方法\n - URLバーがある右端の3点ボタン(︙)より「その他ツール」>「ディベロッパーツール」を選択\n - Networkのタブを選択し、ブラウザとの通信のURLのName部分のリストが表示される\n - このウィンドウを表示している状態で、上記の音声テキスト変換処理の実行を行う\n - ファイルアップロードによる音声文字変換のURL(Name部分)\n - `proxy?url=https%3A%2F%2Fspeech.googleapis.com...`\n - マイクによる音声文字変換URL(Name)\n - `ws`\n - 効率的に探すには、フィルタ(Networkタブの直下のツールバーの左から3番目)を選択してURLの部分文字列(上記の場合は「proxy」や「ws」)を入力する\n - リクエストBodyはHeaderタブの一番下「Request Payload」で「view source」を選択\n - audioのcontent部分は音声データをBASE64エンコードした文字列のため長い文字列となり、スプレッドシートに貼り付けると1行の最大文字列を超える場合があるため、content部分の文字列は除外して貼り付ける\n - Payload選択して全体が青くなったあと、スクロールで\"content\":までをコピーする\n - レスポンスBodyはResponseタブを選択\n - WebSocketの送信メッセージは、Messagesタブ(wss://xxxのURLの場合に表示される)を選択し、FilterでALLからSendに変更する。Binary Message以外のものを選択して、下部のウィンドウに表示されたJSONメッセージを選択(トリプルクリックでメッセージ全体を選択できる)してコピーする\n - WebSocketの受信メッセージは、FilterをReceiveにして、isFinalがtrueとなっているものを1つ選択してコピー。[isFinal](https://cloud.google.com/speech-to-text/docs/reference/rpc/google.cloud.speech.v1#streamingrecognitionresult)がFalseのものは、音声途中のものとして判断しているもの。\n - 通信ログをファイルとして保存する場合は、ログのURLを1つ選択して右クリック>「Save all as HAR with content」でhar形式(HTTPアーカイブ)で保存する\n - もしくはDeveloper Toolsのツールバーから[HARエクスポート](https://developers.google.com/web/updates/2019/05/devtools#HAR)\n\n\n### [クイックスタート](https://cloud.google.com/speech-to-text/docs/quickstart)\n- 課題1\n - [gcloudツール](https://cloud.google.com/speech-to-text/docs/quickstart-gcloud)を使って以下の音声文字変換を行い、実行ログを提出(qs-1.log)\n - GCS上にある短い音声ファイルを使った音声文字変換\n - ローカル上にある短いwav形式の音声ファイルを音声文字変換\n - HTTP通信ログを有効にして音声文字変換\n - ベータ版コマンドを使ってMP3音声ファイルを直接、音声文字変換\n- 課題2\n - [curlコマンド](https://cloud.google.com/speech-to-text/docs/quickstart-protocol)を使って以下の音声文字変換を行い、実行ログを提出(qs-2.log)\n - GCS上にある短い音声ファイルを使った音声文字変換\n - GCS上にある長い音声ファイルを使った音声文字変換\n - 句読点の自動挿入による音声文字変換\n - 話者分離による音声文字変換\n\n\n- 実施概要\n - 音声文字変換の3つのやり方があり、gcloudとcurlを使ったやり方について実施\n - Pythonを使った実装方法は次の「入門ガイド」のところで実施する\n - リンク先は適宜参照し実施。一部リンク先にないものを実施する箇所もある。\n - 実行ログはscriptコマンドを使いターミナルに出力した内容をファイルに出力する。[参考URL](https://dev.classmethod.jp/articles/scriptcommand/)\n ```\n script ログファイル名\n # 終了時はexitで抜ける\n ```\n - 課題1:gcloudを使ったやり方\n - 準備\n - gcloudが導入・初期設定済み\n - 1分以内の短い音声ファイル(short.mp3)をrecコマンドやダウンロードサイトからダウンロードして準備\n - 音声文字変換リクエスト\n - GCS上の音声ファイルに対する音声文字変換\n - 音声ファイルの場所はGCS上にあるファイルパスかローカル上のファイルパスを指定\n - 音声ファイル形式によって、指定必須のオプションが変わる\n - 音声変換したい[言語コード](https://cloud.google.com/speech-to-text/docs/languages?hl=ja)は指定必須\n - [コマンドリファレンス](https://cloud.google.com/sdk/gcloud/reference/ml/speech/recognize?hl=ja)\n - [コマンドリファレンス(beta版)](https://cloud.google.com/sdk/gcloud/reference/beta/ml/speech/recognize?hl=ja)\n ```\n #GCS上にある音声ファイルを指定して実行\n gcloud ml speech recognize gs://cloud-samples-tests/speech/brooklyn.flac --language-code=en-US\n\n #ローカル上のMP3ファイルをsoxを使いチャンネル数1でwav形式変換して実行\n sox xxx.mp3 --rate 16k --bits 16 --channels 1 xxx.wav\n soxi xxx.wav\n #日本語で音声文字変換リクエストを呼び出す\n gcloud ml speech recognize xxx.wav --language-code=ja-JP\n\n #GCS上にコピーしてHTTP通信ログを有効にして実行(--log-httpオプションを付ける)\n gsutil cp xxx.wav gs://speech_to_text_xxx/\n gsutil ls gs://speech_to_text_xxx/\n gcloud ml speech recognize gs://speech_to_text_xxx/xxx.wav --language-code=ja-JP --log-http\n\n #MP3形式に対応したbeta版コマンドで実行\n #gcloud betaコマンド初めて実行する場合\n gcloud components install beta\n #MP3ファイル情報を確認\n soxi xxx.mp3\n #GCSにコピー\n gsutil cp xxx.mp3 gs://speech-to-text_xxxx/\n gsutil ls gs://speech_to_text_xxxx/\n #sample-rateはsoxiで確認したサンプリングレートにあわせて指定して実行\n gcloud beta ml speech recognize gs://speech_to_text_xxx/xxx.mp3 --encoding=mp3 --sample-rate=xxxx --language-code=ja-JP\n ```\n\n - 課題2:curlコマンドを使って音声文字変換を行う\n - [GCS上にある短い音声ファイルを使った音声文字変換](https://cloud.google.com/speech-to-text/docs/quickstart-protocol#make_an_audio_transcription_request)\n - 使用する音声ファイルを確認する場合、GCSからダウンロードして再生\n ```\n gsutil ls gs://cloud-samples-tests/speech/\n gsutil cp gs://cloud-samples-tests/speech/brooklyn.flac .\n soxi brooklyn.flac\n play brooklyn.flac\n ```\n - [ヒアドキュメント](https://qiita.com/take4s5i/items/e207cee4fb04385a9952)でJSONリクエストファイル作成\n ```\n cat <<EOS >sync-request.json\n {\n \"config\": {\n \"encoding\":\"FLAC\",\n \"sampleRateHertz\": 16000,\n \"languageCode\": \"en-US\",\n \"enableWordTimeOffsets\": false\n },\n \"audio\": {\n \"uri\":\"gs://cloud-samples-tests/speech/brooklyn.flac\"\n }\n }\n EOS\n cat sync-request.json\n ```\n - 呼び出し\n ```\n #APIに呼び出しに必要なアクセストークンが取得可能か確認。うまくいかない場合、gcloud initで初期設定\n gcloud auth application-default print-access-token\n #音声文字変換処理呼び出し\n curl -s -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer \"$(gcloud auth application-default print-access-token) \\\n https://speech.googleapis.com/v1/speech:recognize \\\n -d @sync-request.json\n ```\n - 日本語音声で試す\n ```\n #日本語音声MP3ファイルの情報を確認\n soxi xxx.mp3\n #flac形式に変換\n sox xxx.mp3 --rate 16000 --channels 1 xxx.flac\n #GCSにコピー\n gsutil cp xxx.flac gs://speech_to_text_xxxx/\n #sync-request-jp.jsonとして作成\n cat <<EOS >sync-request-jp.json\n {\n \"config\": {\n \"encoding\":\"FLAC\",\n \"sampleRateHertz\": 16000,\n \"languageCode\": \"ja-JP\",\n \"enableWordTimeOffsets\": false\n },\n \"audio\": {\n \"uri\":\"gs://speech_to_text_xxxx/xxx.flac\"\n }\n }\n EOS\n cat sync-request-jp.json\n #音声文字変換処理呼び出し\n curl -s -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer \"$(gcloud auth application-default print-access-token) \\\n https://speech.googleapis.com/v1/speech:recognize \\\n -d @sync-request-jp.json\n ```\n\n - [GCS上にある長い音声ファイルを使った音声文字変換](https://cloud.google.com/speech-to-text/docs/async-recognize#speech_transcribe_async_gcs-protocol)\n - 長い音声ファイル場合、オペレーション名(nameの値)が返却後、その値を使って結果を呼び出す\n ```\n curl -X POST \\\n -H \"Authorization: Bearer \"$(gcloud auth application-default print-access-token) \\\n -H \"Content-Type: application/json; charset=utf-8\" \\\n --data \"{\n 'config': {\n 'language_code': 'en-US'\n },\n 'audio':{\n 'uri':'gs://cloud-samples-tests/speech/vr.flac'\n }\n }\" \"https://speech.googleapis.com/v1/speech:longrunningrecognize\"\n curl -H \"Authorization: Bearer \"$(gcloud auth application-default print-access-token) \\\n -H \"Content-Type: application/json; charset=utf-8\" \\\n \"https://speech.googleapis.com/v1/operations/xxxx\"\n\n # 上記に加えて日本語の1分以上のMP3音声ファイルで試す。コマンドは各自これまでの内容を踏まえ考えて実施\n # MP3ファイルの音声情報出力\n # flac形式(チャンネル数1,サンプリングレート16000)に変換\n # flac形式の音声情報確認\n # GCSにアップロード\n # リクエストJSONをカスタマイズして実行\n ```\n - [句読点の自動挿入による音声文字変換](https://cloud.google.com/speech-to-text/docs/automatic-punctuation#speech_transcribe_auto_punctuation-protocol)\n - 上記のリンクの内容のコマンド実行に加え日本語による句読点の自動挿入による音声文字変換を実施する。\n - 日本語音声内容により句読点がうまく入らない場合もある。\n - [話者分離による音声文字変換](https://cloud.google.com/speech-to-text/docs/multiple-voices#speech_transcribe_diarization_beta-protocol)\n - 上記リンクのcurlコマンドによるコマンド実行後、出力結果ファイルはcatで表示して実行ログに出力す。\n - 上記コマンド実行に加え日本語の会話音声ファイルを使って話者分離による音声文字変換も実施する。\n \n### [入門ガイド](https://cloud.google.com/speech-to-text/docs/how-to)\n- 課題1\n - [短い音声ファイルの音声文字変換](https://cloud.google.com/speech-to-text/docs/sync-recognize)を行い、Pythonプログラムと実行ログを提出(howto-1.log)\n - ローカル ファイルでの同期音声認識の実行\n - [speech_transcribe_sync.py](https://github.com/googleapis/python-speech/blob/v1.3.2/samples/v1/speech_transcribe_sync.py)\n - リモート ファイルでの同期音声認識の実行\n - [speech_transcribe_sync_gcs.py](https://github.com/googleapis/python-speech/blob/v1.3.2/samples/v1/speech_transcribe_sync_gcs.py)\n- 課題2\n - [長い音声ファイルの音声文字変換](https://cloud.google.com/speech-to-text/docs/async-recognize)を行い、Pythonプログラムと実行ログを提出(howto-2.log)\n - Google Cloud Storage ファイルを使用した長い音声ファイルの文字変換\n - [speech_transcribe_async_gcs.py](https://github.com/googleapis/python-speech/blob/v1.3.2/samples/v1/speech_transcribe_async_gcs.py)\n - ローカル ファイルを使用した長い音声ファイルの変換\n - [speech_transcribe_async.py](https://github.com/googleapis/python-speech/blob/v1.3.2/samples/v1/speech_transcribe_async.py)\n- 課題3\n - [ストリーミング入力の音声文字変換](https://cloud.google.com/speech-to-text/docs/streaming-recognize)を行い、Pythonプログラムと実行ログを提出(howto-3.log)\n - ローカル ファイルでのストリーミング音声認識の実行\n - [transcribe_streaming.py](https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/speech/cloud-client/transcribe_streaming.py)\n - 音声ストリームでのストリーミング音声認識の実行\n - [transcribe_streaming_mic.py](https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/speech/microphone/transcribe_streaming_mic.py)\n- 課題4\n - [音声適応による認識リクエストの送信](https://cloud.google.com/speech-to-text/docs/context-strength)を行い、Pythonプログラムと実行ログを提出(howto-4.log)\n - [transcribe_context_classes.py](https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/speech/cloud-client/transcribe_context_classes.py)\n\n- 課題5\n - [異なる話者の分離](https://cloud.google.com/speech-to-text/docs/multiple-voices)を行い、Pythonプログラムと実行ログを提出(howto-5.log)\n - [speech_transcribe_diarization_beta.py](https://github.com/googleapis/python-speech/blob/v1.3.2/samples/v1p1beta1/speech_transcribe_diarization_beta.py)\n- 課題6\n - [認識メタデータの追加](https://cloud.google.com/speech-to-text/docs/recognition-metadata)を行い、Pythonプログラムと実行ログを提出(howto-6.log)\n - [speech_transcribe_recognition_metadata_beta.py](https://github.com/googleapis/python-speech/blob/v1.3.2/samples/v1p1beta1/speech_transcribe_recognition_metadata_beta.py)\n- 課題7\n - [句読点挿入の自動化](https://cloud.google.com/speech-to-text/docs/automatic-punctuation)を行い、Pythonプログラムと実行ログを提出(howto-7.log)\n - [transcribe_auto_punctuation.py](https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/speech/cloud-client/transcribe_auto_punctuation.py)\n- 課題8\n - [単語のタイムスタンプの取得](https://cloud.google.com/speech-to-text/docs/async-time-offsets)を行い、Pythonプログラムと実行ログを提出(howto-8.log)\n - [speech_transcribe_async_word_time_offsets_gcs.py](https://github.com/googleapis/python-speech/blob/v1.3.2/samples/v1/speech_transcribe_async_word_time_offsets_gcs.py)\n\n\n- 実施概要\n - 提供される機能のPythonでの実装方法について学ぶ\n - リンクの内容に従ってPythonプログラムを作成、修正し、実行\n - GitHubのリンクが一部404となっているものは、上記Pythonのソース(v1.3.2)のリンクからアクセスする\n - 日本語音声に対応するためにlanguage_codeはja-JPに差し替える\n - プログラムの実行方法は、ソースコードに記載のものを参照する\n - 音声ファイルが入力に必要なものは、soxを使った変換やgcsへのコピーを適宜行う。\n - 事前準備\n - サービスアカウントファイル(aidojopj-speech2text.json)\n - [認証設定](https://cloud.google.com/docs/authentication/getting-started#cloud-console)\n - Google Cloudが提供するAPIにアクセスするために必要な認証設定を行う\n - サービスアカウントは事前に準備したファイルをコピーして利用する\n ```\n cp /mnt/chromeos/GoogleDrive/SharedDrives/AI開発道場/クラウド編/Speech-to-Text/aidojopj-speech2text.json ~/\n #export GOOGLE_APPLICATION_CREDENTIALS={JSONファイルのパス}\n export GOOGLE_APPLICATION_CREDENTIALS=~/aidojopj-speech2text.json\n ls -l $GOOGLE_APPLICATION_CREDENTIALS\n ```\n - ライブラリインストール\n ```\n pip install --upgrade google-cloud-speech\n - ストリーム呼び出し時に必要なライブラリインストール\n ```\n sudo apt install portaudio19-dev\n pip install pyaudio\n ```\n - Pythonプログラム作成・修正\n - GitHubからソースをコピーして作成。リポジトリをまるごとgit cloneしてもよい\n - 編集はターミナルからviか、VSCodeやTEXTなど好きなものを使う\n - 実行\n ```\n python xxx.py [プログラムによってはオプション指定]\n ```\n"
},
{
"alpha_fraction": 0.7365484237670898,
"alphanum_fraction": 0.7453168630599976,
"avg_line_length": 34.842857360839844,
"blob_id": "a19193d0a01781b008e0bf295b8001d1304870e5",
"content_id": "662bf037b4d4d9b89cfdb91318f2281adbd082e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4899,
"license_type": "no_license",
"max_line_length": 211,
"num_lines": 70,
"path": "/AI開発道場_クラウド編/course_cloud/02.AutoML_Tables/AutoML_Tables_演習.md",
"repo_name": "KinakoIguchi/ai-dojo",
"src_encoding": "UTF-8",
"text": "# 前提\n\n* 本演習は、Chromebookの仮想Linux上で実施すること。\n* 演習の成果物(python、シェルスクリプト、API-KEY)は、各自のGitHubプライベートリポジトリの course_cloud/AutoML_Tables にすべてアップロードすること。\n * GitHub へのアップロードは、Linuxコンソールからコミット、プッシュ もしくは GitHub のウェブ画面から GUIでアップロード のどちらでもかまいません。GitHub のウェブ画面からアップロードする場合は、対象のディレクトリに移動した後、[Add file] > [Upload Files] > (アップロードするファイルを選択) > [Commit Changes] を実行します。(下図参照)\n\n <img src=\"https://github.com/dcs-aidojo/public-data/blob/master/course_cloud/automl_tables/cat_git_upload.png?raw=true\" width=\"750\">\n\n# 演習\n\n実習2のデータを利用して、Pythonおよびシェルスクリプトを実装し、データセット作成〜バッチ予測までの一覧の流れを実行する。\n\n<全体イメージ>\n\n <img src=\"https://github.com/dcs-aidojo/public-data/blob/master/course_cloud/automl_tables/cat_exercise_overview.png?raw=true\" width=\"750\">\n\n## 1-1. データセット作成 の実装\n\n実習2を参考に、AutoML Tablesのデータセットを作成するPythonとシェルスクリプトを実装してください。\n\n* シェルスクリプトが先に実行され、シェルスクリプトからPythonが呼ばれる形にしてください\n* サービスアカウントとAPIキーは、実習2で作成したものを再使用してください。\n* バケットは有無を確認し、無ければ作成する実装としてください。バケット名とデータセット名は実習2で設定したものと同名としてください。\n* ファイル名およびディレクトリ構成は以下のとおりとしてください。\n```\nAutoML_Tables\n ├── api-key\n │ └── aidojoPJ-XXXXXXXXXX.json:各自が作成したAPIキーファイル\n └── src\n ├── create_dataset.py:データセット作成(Python)\n └── create_dataset.sh:データセット作成(シェル)\n```\n\n## 1-2. データインポート〜バッチ予測 の実装\n\n実習2の「census」のデータを再度使って、データインポート、データセットの更新、モデル作成、バッチ予測 の各処理について、それぞれPyhonとシェルスクリプトを実装してください。なお、実装する上で以下の点にご注意ください。\n\n* データインポート\n * インポート元のデータは実習2で使用したものを再使用してください。\n* データセット更新\n * 全ての特徴量について、Null非許容とし、かつ データ型を個別に定義してください。個別定義するデータ型は[こちら](https://drive.google.com/drive/folders/16paR6WBlreDVhYbSOe7xS_wiEuWNNeUn)の「dataset_code.json」のとおりとしてください。\n \n 【注】実装する際は、`json.load` メソッドを利用して「dataset_code.json」を Pythonの辞書形式として読み取り、全てのデータ型を個別に設定してください。\n* モデル作成\n * モデル名は実習2で設定したものと同名としてください。\n* バッチ予測\n * バッチ予測対象のデータは実習2で使用したものを再使用してください。(自身で作成したバケットにコピーしてから予測する形としてください)\n* 全体\n * 変数は別ファイルとして作成し、ファイルを読み込んで変数を取得する形としてください。(変数定義のPythonファイルを作成し、importする)\n * ファイル名およびディレクトリ構成は以下のとおりとしてください。\n```\nAutoML_Tables\n ├── api-key\n │ └── aidojoPJ-XXXXXXXXXX.json:各自が作成したAPI-KEY\n ├── data\n │ └── dataset_code.json\n └── src\n ├── batch_prediction.py:バッチ予測(Python)\n ├── batch_prediction.sh:バッチ予測(シェル)\n ├── constants.py:変数用(Python)\n ├── constants.sh:変数用(シェル)\n ├── create_dataset.py:データセット作成(Python)\n ├── create_dataset.sh:データセット作成(シェル)\n ├── create_model.py:モデル作成(Python)\n ├── create_model.sh:モデル作成(シェル)\n ├── import_data.py:データインポート(Python)\n ├── import_data.sh:データインポート(シェル)\n ├── update_dataset.py:データセット更新(Python)\n └── update_dataset.sh:データセット更新(シェル)\n```\n"
},
{
"alpha_fraction": 0.7808219194412231,
"alphanum_fraction": 0.7853881120681763,
"avg_line_length": 28.200000762939453,
"blob_id": "87d0d44e8f284139c84e984b60ee7064f7b38b58",
"content_id": "21e9edd2ba2ae47f009f80c19e477a8b9edf7807",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 768,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 15,
"path": "/AI開発道場_クラウド編/course_cloud/03.AWS_Rekognition/基本演習.md",
"repo_name": "KinakoIguchi/ai-dojo",
"src_encoding": "UTF-8",
"text": "# Amazon rekognition_基本演習\n\n## チュートリアル1_画像分析\n\n* 下記リンク先を参考に、画像分析のチュートリアルを実施しましょう \n[顔を検出、分析、比較する](https://aws.amazon.com/jp/getting-started/hands-on/detect-analyze-compare-faces-rekognition/)\n\n## チュートリアル2_動画分析\n\n* 下記リンク先を参考に、動画分析のチュートリアルを実施しましょう \n[動画分析とリッチメタデータ抽出](https://aws.amazon.com/jp/getting-started/hands-on/analyze-extract-metadata-video-rekognition/)\n\n## 上記チュートリアルが完了したら…\n\n* [応用問題](./応用演習.md)に挑戦し、Rekognitionを使用したサービス開発を実施してください。\n"
},
{
"alpha_fraction": 0.8108882308006287,
"alphanum_fraction": 0.8385864496231079,
"avg_line_length": 35.10344696044922,
"blob_id": "5af0f6d9ec44dfacdd07651abbf4a67d4f649fdd",
"content_id": "ee2c25e729166ef8a792584b23d381a7fff77343",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2131,
"license_type": "no_license",
"max_line_length": 182,
"num_lines": 29,
"path": "/AI開発道場_クラウド編/course_cloud/02.BigQuery_ML/BigQuery_ML_概要.md",
"repo_name": "KinakoIguchi/ai-dojo",
"src_encoding": "UTF-8",
"text": "# BigQuery\n\nGCPで提供されるクラウドデータウェアハウス。2012年に公開された。数TB(テラバイト)あるいはPB(ペタバイト)に及ぶデータセットに対し、SQLライクのクエリを実行し、数秒あるいは数十秒程度で結果を返すことができる。他社の類似サービスとして Amazon Redshift、Azure SQL Data Warehouse がある。\n\n公式:https://cloud.google.com/bigquery/?hl=ja\n\n# BigQuery ML\n\nBigQueryのデータに対して標準SQLクエリを利用して、機械学習モデルの作成と実行が可能。2018年のCloud Nextで発表された。2020年6月時点でベータ版のみの提供となっている。DWHからデータをエクスポートして別形式に変換や加工等をせず、SQLのみでそのままモデル作成の一連の流れが実行できる為、開発工数の削減が期待できる。以下のモデルが利用可能:\n\n- 線形回帰(予測)\n- 2 項ロジスティック回帰(分類)\n- 多項ロジスティック回帰(分類)\n- K 平均法クラスタリング(データ セグメンテーション)\n- TensorFlow モデルのインポート\n\n公式:https://cloud.google.com/bigquery-ml/docs/bigqueryml-intro?hl=ja\n\n\n# AutoML Tables と BigQuery MLの使い分け\n\nBigQuery MLはシンプルなモデルによる学習が前提であり、開発速度と効率性を重視している。よって、テストや反復を迅速に行うことを重視し、シンプルなモデルタイプを使用する場合は、BigQuery ML が向いている。一方、高品質で複雑なモデルを得る必要があり、かつ ある程度の時間を待つことができる場合は AutoML Tables が向いている。\n\nなお、[2020年6月16日のリリース](https://cloud.google.com/bigquery-ml/docs/release-notes#June_16_2020)により、BigQuery ML から AutoML Tables によるアンサンブル学習を呼び出せるようになった。\n\n\n# 参考\n\n- [誰でも簡単に超高速なクエリができるBigQueryとは?](https://www.buildinsider.net/web/bigquery/01)\n"
},
{
"alpha_fraction": 0.7532544136047363,
"alphanum_fraction": 0.8017751574516296,
"avg_line_length": 50.212120056152344,
"blob_id": "f64179725d244cd73778fdd24f5567a4456e0b6c",
"content_id": "6073278d00e84878063c7675e88694724fe571f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2556,
"license_type": "no_license",
"max_line_length": 204,
"num_lines": 33,
"path": "/AI開発道場_クラウド編/course_cloud/02.AutoML_Tables/AutoML_Tables_概要.md",
"repo_name": "KinakoIguchi/ai-dojo",
"src_encoding": "UTF-8",
"text": "# AutoML\n\nAutoML(Automated Machine Learning)とは、機械学習モデルの設計・構築を自動化するための手法全般、またはその概念を指す。機械学習の専門的な知識がなくても素早く、そして簡単に機械学習モデルを構築できることを目標としている。\n\n# Google Cloud AutoML\n\nGoogle Cloud AutoMLは、2018年1月に公開されたGoogleが提供するクラウド型のAutoMLサービス。GCPコンソールのGUI操作で、データに基づいたモデルのトレーニング、評価、改善、デプロイを実行し、独自の機械学習モデルを作成できる。現在ベータ版も含めて以下5種類のサービスが提供されている。\n- [AutoML Vision](https://cloud.google.com/vision/automl/docs?hl=ja)\n- [AutoML Video Intelligence](https://cloud.google.com/video-intelligence/automl/docs?hl=ja)\n- [AutoML Natural Language](https://cloud.google.com/natural-language/automl/docs?hl=ja)\n- [AutoML Translation](https://cloud.google.com/translate/automl/docs?hl=ja)\n- [AutoML Tables](https://cloud.google.com/automl-tables/docs?hl=ja)\n\n<AutoML サービス一覧と概要>\n\n<img src=\"https://cdn-xtech.nikkei.com/atcl/learning/lecture/19/00091/00001/hyo2.jpg?__scale=w:800,h:545,q:100&_sh=0560d80fe0\" width=\"500\">\n\n公式:https://cloud.google.com/automl?hl=ja\n\n# Google AutoML Tables\n\n2019年4月に公開されたCloud AutoMLサービスの1つで、構造化データ(表形式のデータやデータベース)の解析・予測を自動で行う。2020年6月時点でベータ版のみの提供となっている。AutoML Tablesは教師付きの学習サービスで二項分類、多項分類、回帰分析をサポートしており、データの種類によって最適なモデルを複数選び、アンサンブル学習(複数のモデルの組み合わせ)を行い、モデルをビルドできる。\n\n<AutoML Tables 概要図>\n\n<img src=\"https://github.com/dcs-aidojo/public-data/blob/master/course_cloud/automl_tables/cat_automltables_overview.png?raw=true\" width=\"700\">\n\n# 参考\n\n- [[Cloud OnAir] Next ’19 サンフランシスコ最新情報 GCP 特集 2019年4月11日 放送](https://www.slideshare.net/GoogleCloudPlatformJP/cloud-onair-next-19-gcp-2019411)\n- [Cloud AutoML](https://cloud.google.com/automl?hl=ja)\n- [AutoML Tables の特徴と機能](https://cloud.google.com/automl-tables/docs/features?hl=ja)\n- [GCPのAI・機械学習サービス](https://xtech.nikkei.com/atcl/learning/lecture/19/00091/00001/)\n"
},
{
"alpha_fraction": 0.8252475261688232,
"alphanum_fraction": 0.8356435894966125,
"avg_line_length": 23.621952056884766,
"blob_id": "e8c17352dc5fccd13f82c675ef95ba6a91965389",
"content_id": "26cdae6bd6a51e7b2feba671fec1760aed631e81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4378,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 82,
"path": "/AI開発道場_クラウド編/course_cloud/03.AWS_Rekognition/応用演習準備課題.md",
"repo_name": "KinakoIguchi/ai-dojo",
"src_encoding": "UTF-8",
"text": "この演習では、Amazon SNSとlambdaについて扱う。\n\n# Amazon SNS\n\n## 説明\nAmazon SNS(Amazon Simple Notification Service) は、サブスクライブしているエンドポイントまたはクライアントへの\nメッセージの配信または送信を、調整および管理するウェブサービスである。\n\nエンドポイントを設定し、トピックをパブリッシュすることで、例えばメッセージを自分のメールアドレスに送ることができる。\n\n[Amazon SNSのリンク](https://ap-northeast-1.console.aws.amazon.com/sns/v3/home?region=ap-northeast-1#/homepage)\n\n参考:\n[公式ドキュメント](https://docs.aws.amazon.com/ja_jp/sns/index.html \"公式ドキュメント\")\n\n\n\n【用語】\n\n* Amazon Resource Name(ARN)\n\nAWSにおけるリソースのID。このARNを参照し、配信対象や配信先の呼び出しを行う。\nSNSにおいては、後述のエンドポイント、トピック、サブスクリプションにこのARNが割り当てられる。\n\n例)arn:aws:sns:us-east-1:123456789012:push_topicname\n\n* エンドポイント\n\n配信対象端末を識別するためのデータ。デバイストークンより作成する、端末を識別するためのARN。\nデバイストークンを登録するとこのエンドポイントが作成され、配信を行う際はこのエンドポイントを指定し、配信する。\n簡単に言うとメールアドレスのようなものである。\n\n* トピック\n\n配信対象をグルーピングし、配信対象に一斉に通知を配信するための機能。\nこのトピックを作成し、エンドポイントを登録したあと、トピックに対し送信したいメッセージを発行すると、トピックに登録されているエンドポイントへ一斉に通知を配信することができる。\n\n・サブスクリプション\n\nサブスクリプションとは、作成したトピックと配信対象のエンドポイントを紐づけるデータ。\nサブスクリプションによってトピックにエンドポイントが紐づけられていることで、トピックに対しメッセージを発行すると、\nそのトピックに紐づく配信対象のエンドポイントへとメッセージが配信されるようになっている。\n\n・パブリッシュ\n\n通知を配信すること。\nSNSにおいて通知の配信は、エンドポイントやトピックに対しメッセージを発行するという形をとっている。\nメッセージの発行はアプリケーション画面、トピック画面から可能。\n \n## 実習\n\nAWS SNSのチュートリアルに取り組もう。\n\n[SNSチュートリアル](https://docs.aws.amazon.com/ja_jp/sns/latest/dg/sns-tutorial-create-topic.html \"SNSチュートリアル\")\n\n簡単に説明すると、AWS SNSを使い、自分のメールアドレスにメッセージを送る内容となっている。\nチュートリアルの手順に沿って行う。なお、オプションや省略可能というところは飛ばして良い。\n\n(注意点)メールアドレスをエンドポイントに設定した際にメールが飛んでくるので、それをconfirmする必要がある。\n\n# AWS lambda\n\n## 説明\nAWS Lambdaとはクラウド上にプログラムを定義しておき、インターネットを通じて実行できるサービスである。\nサーバやミドルウェアの管理はAWSがしてくれるので、プログラムだけを考えればいい。\n\n実行させる関数を書いておき、好きなタイミング(例えば、S3に画像がアップロードされたときなど)で実行することができる。\n上述のAWS SNSと組み合わせて使うこともできる。\n\nトリガーによって、lambda_handler関数が実行される。\n\n[AWS lambdaのリンク](https://ap-northeast-1.console.aws.amazon.com/lambda/home?region=ap-northeast-1#/discover)\n\n参考:\n[公式ドキュメント](https://docs.aws.amazon.com/ja_jp/lambda/?id=docs_gateway \"公式ドキュメント\")\n\n## 実習\n以下の記事の内容を見て、記事内の「はじめに」から「ここまでのまとめ」まで動かす。\n\n[実習の記事](https://dev.classmethod.jp/articles/lambda-my-first-step/#toc-9)\n\n※S3fullaccessroleは最初から権限付与されています。\n\n"
},
{
"alpha_fraction": 0.7183098793029785,
"alphanum_fraction": 0.7183098793029785,
"avg_line_length": 22.5,
"blob_id": "3a7374d068269f2ee6f3d658be25c659a6e43f7a",
"content_id": "8dcf08b1ac4b0eabc9524a675e050547a57ff2db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 588,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 12,
"path": "/AI開発道場_クラウド編/course_cloud/03.AWS_Rekognition/README.md",
"repo_name": "KinakoIguchi/ai-dojo",
"src_encoding": "UTF-8",
"text": "# クラウド編 Amazon Rekognition\n\n## Menu\n\n* [Rekognition概要](./Rekognition概要.md)\n * 機能の簡単な説明と概要ページのリンク。\n* [基本演習](./基本演習.md)\n * 画像、動画分析のデモに触れて、入力項目と出力項目について学ぶ\n* [応用演習](./応用演習.md)\n * 画像分析のサービス作成ハンズオンを通じて、「Rekognitionを使用したサービス構築」を学ぶ\n * その他サービスの実装部分は手順に記載してあります。 \n よく読んで研修を進めてください。\n "
},
{
"alpha_fraction": 0.7543027997016907,
"alphanum_fraction": 0.7617859840393066,
"avg_line_length": 16.132478713989258,
"blob_id": "798027bb268584156c6b04f7c6445db66c2fcb58",
"content_id": "aa8227957c24fa4375d2f7cbb527f867614c99bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 14906,
"license_type": "no_license",
"max_line_length": 168,
"num_lines": 468,
"path": "/AI開発道場_クラウド編/course_cloud/00.cloud_environment/setup.md",
"repo_name": "KinakoIguchi/ai-dojo",
"src_encoding": "UTF-8",
"text": "# クラウド編環境構築\n\n* クラウド編ではGoogle Cloud PlatformとAmazon Web Serviceというクラウドサービスを利用する。 \n\n# Google Cloud Platform(GCP)とは\n* Google Cloud Platform(GCP) とは、Google がクラウド上で提供するサービス群の総称である。\n\n* 公式ドキュメントは以下\n\n https://cloud.google.com/docs?hl=ja\n \n# Amazon Web Service(AWS)とは\n* Amazon Web Services(AWS)はAmazonが提供している100以上のクラウドコンピューティングサービスの総称である。\n\n* 公式ドキュメントは以下\n\n https://docs.aws.amazon.com/index.html\n \n# GCPを使うにあたって\n* GCPを使うには、いくつかの準備が必要となる。\n\n## Gsuiteとは\n* G Suiteとは、 Google社が提供している各サービスのアプリケーションをビジネス向けにまとめて利用できる有料サービスである。\n* 管理者機能があり、G SuiteサービスのすべてをGoogle管理コンソールで一元的に管理できる。この管理機能を使って、ユーザーの追加や削除、支払いの管理、モバイルデバイスの設定などを行うことができる。\n* シングルサインオンや2段階認証の仕組みでセキュリティ面で安心である。\n\nGCPを使う際にはG Suiteのアカウントを使用する。\n\n## Google Compute Engine(GCE)\n* AWS の EC2 に相当するサービスとなっており、時間あたりの課金で仮想マシンをレンタルすることができるサービス\n* 高いネットワーク性能が期待でき、セキュリティ的に安全である。\n* 以下も参照のこと\n\n https://cloud.google.com/compute?hl=ja\n\n## GCEでインスタンスを立ち上げる\n* GCPコンソールでログインして、Compute Engine>Create Instanceを選択\n* Nameにインスタンスの名前を指定。\n* Zoneはasia-northeast1-a、\n* Machine typeは試しにやるならmicro\n* boot diskはUbuntu 16.04 LTS\n* firewallはAllow HTTP traffic\n上記を設定したらcreateするとインスタンスの設定が完了する。\n\n### View gcloud command\n\n”Connect” ボタンについている下向きの三角形をクリックすると、インスタンスに接続する方法の選択肢が表示される。この中から、 ”View gcloud command” をクリック。gcloud を使用してインスタンスに接続するためのコマンドが表示される。\n\n### Google Cloud SDKのインストール\n\n* apt-getを使用するインストール\n 以下を参照のこと\n \n https://github.com/dcs-aidojo/contents/blob/master/getting_started/usage/gcp.md#google-cloud-sdkgcloud%E5%B0%8E%E5%85%A5\n\n* インストーラの使用でインストールする場合\n\nターミナルで以下コマンドを打つ\n```\ncurl https://sdk.cloud.google.com | bash\n```\n```\nexec -l $SHELL\n```\n```\ngcloud init\n```\n* gcloud initがうまくいかない場合は.bash_profileにパスを書く、以下を実行\n```\ncd ~\nls -a\nvi .bash_profile\n```\n以下を追記\n```\nsource <sdkを入れたディレクトリ>/google-cloud-sdk/completion.bash.inc\nsource <sdkを入れたディレクトリ>/google-cloud-sdk/path.bash.inc\n```\n### インスタンスの接続\n* 設定が開始される。Google アカウントへのログインを求められるので、ログインを行う。\n* ブラウザが開くので、アカウントへのログインを行い、 OAuth の認証画面で許可を行う。\n* ターミナル画面に戻ると、プロジェクトの選択を求められるので、プロジェクトを選択する。GCP研修で利用するプロジェクトは「aidojoPJ」。\n* View gcloud commandのところで表示されたコマンドを打つ。\n\n鍵がないときwarningが出るが、そのまま作成すれば良い。\n\n【注意】\n2回目以降ssh接続する際は、cloud SDKの承認が必要である。\n以下のコマンドを実行する。\n```\ngcloud auth login\n```\nURLが表示されるので、アクセスしてgoogleアカウントでログインして許可。\nコードが表示されるので、ターミナルに貼り付ける。\nこれで承認され、gcloudコマンド(後述)が使えるようになる。\n\n\n### ポートの接続\n* GCPのコンソールページのナビゲーションメニューから、VPCネットワーク→ファイアウォール ルールに移動して、\nファイアウォール ルールを作成をクリック。\n\n 名前→任意の名前\n\n ターゲットタグ→任意のタグ\n\n ソース IP の範囲→0.0.0.0/0\n\n 指定したプロトコルとポート→tcpにチェックしてポート番号を適当に\n\n 他は初期設定のままでOK。\n\n* GCPのコンソールページのナビゲーションメニューから、Compute Engine→VMインスタンスに移動。\n\n* 使用するインスタンス内の画面上部にある編集をクリックして、ネットワークタグにファイアウォールルールで作成したターゲットタグを入力してenterキー。\n\n* 画面下部の保存で設定の保存。\n\n```\nsudo systemctl disable firewalld\nRemoved symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.\nRemoved symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.\n```\n\n* 続いて、 /etc/selinux/config を編集\n\n```\n$ sudo vi /etc/selinux/config\n```\n\n下記のように変更。\n```\nSELINUX=disabled\n```\n```\n$ sudo vi /etc/ssh/sshd_config\n```\n* sshdの設定を以下のように編集\n\n```\n#Port 22\nPort XXXXXX # 新しいポート番号\n```\n```\n$ sudo systemctl restart sshd\n```\n\n* コンソールからSSHでブラウザウィンドウで開くをクリック。ポート22で接続できないことを確認\n\n* ブラウザ ウィンドウでカスタムポートを開くをクリック。ポート番号を入力して接続できればポート番号の変更は成功。\n\n# gcloudコマンド\n\n* gcloud コマンドは、Google Cloud Platform (GCP) のサービスについて表示・管理・更新を行うコマンドラインツールで、Google が作成している Cloud SDK の中の一部である。\n* gcloud で GCP の全サービスを操作できるわけではなく、BigQuery は bq コマンド、Google Cloud Storage (GCS) は gsutil コマンド、GKE (≒Kubernetes) は kubectl コマンドをそれぞれ使用する\n\n## gcloud コマンドの基本的な使い方\n\n* gcloud の後には「グループ」を指定する。\n\n* 例:\n * gcloud compute … Compute Engine に関する表示・設定\n * gcloud sql … Cloud SQL に関する表示・設定\n\n## よく使われるコマンド\n\n【全体】\n\n* 自分のプロジェクト一覧を表示\n\n```\ngcloud projects list\n```\n\n* cloud sdkのプロパティを見る\n\n```\ngcloud config list\n```\n\n* プロジェクト切り替え\n\n```\ngcloud config set project <your-project-id>\n```\n\n【GCE】\n\n* インスタンス一覧を見る\n\n```\ngcloud compute instances list\n```\n\n* インスタンスの作成\n\n```\ngcloud compute instances create <your-instance-name> --project <your-project-name> --image-family centos-7\n```\n\n* 使用可能なimage一覧を見る\n\n```\ngcloud compute images list\n```\n\n* インスタンスの起動・停止\n\n```\ngcloud compute instances start <your-instance-name>\ngcloud compute instances stop <your-instance-name>\n```\n\n* インスタンスにssh\n\n```\ngcloud compute ssh <your-instance-name>\n```\n\n* 通常のsshコマンドで接続できるように設定\n\n```\ngcloud compute config-ssh\n```\n\n~/.ssh/configに稼働中のインスタンスへの接続設定が作られる。\n\n```\nssh <your-instance-name>.<zone>.<your-project-id>\n```\n\nssh [tab]で\n\n* インスタンスへローカルのファイルをコピーする\n\n```\ngcloud compute copy-files <local-path> <your-instance-name>:<remote-path> --zone <zone>\n```\n\n* インスタンスにあるファイルをローカルにコピーする\n\n```\ngcloud compute copy-files <your-instance-name>:<remote-path> <local-path> --zone <zone>\n```\n\n## Google Cloud Storage\n\n* Google Cloud Storage(以下、GCS)は、オブジェクトストレージである。\n* 管理がシンプルで、高耐久性、高可用性のストレージを提供している。\n* リンクは以下\n \n https://cloud.google.com/storage/docs?hl=ja\n\n### gsutilコマンド\n\n* gsutilコマンドとは、GCS(Google Cloud Storage)をコマンドラインから操作できるPythonアプリケーションのこと\n\n#### よく使われるコマンド\n\n* gsutilのバージョンを確認する場合\n```\ngsutil version\n```\n* バケットの一覧を出力する場合\n```\ngsutil ls\n```\n* バケット内のオブジェクト一覧を出力する場合\n```\ngsutil ls <bucket-name>\n```\n* オブジェクトの内容を出力する場合\n```\ngsutil cat <object-name>\n```\n\n## big queryとは\nGCPで提供されるクラウドデータウェアハウス。2012年に公開された。数TB(テラバイト)あるいはPB(ペタバイト)に及ぶデータセットに対し、SQLライクのクエリを実行し、数秒あるいは数十秒程度で結果を返すことができる。他社の類似サービスとして Amazon Redshift、Azure SQL Data Warehouse がある。\n\n公式は以下\n https://cloud.google.com/bigquery/?hl=ja\n\n## bqコマンド\n* データセット・テーブル・ビューの作成\n```\nbq mk \n```\n* テーブル作成\n```\nbq load\n```\n* データ読み込み\n```\nbq ls\n```\n* データセットの権限やテーブルのスキーマ情報等を表示\n```\nbq show\n```\n* コマンド一覧やオプションなどを確認する\n```\nbq help\n```\n* データセット・テーブルの削除\n```\nbq rm\n```\n* クエリ実行\n```\nbq query\n```\n* データセットやテーブル情報を更新する \n```\nbq update\n```\n* テーブル内容の確認\n```\nbq head \n```\n* テーブルのエクスポート\n```\nbq extract\n```\n* テーブルのコピー\n```\nbq cp\n```\n\n# サービスアカウント\n個々のユーザではなく、アプリケーションに属するアカウント。使いたい数だけ発行し、アプリに組み込んで使う。\nサービスアカウントは0以上のサービスアカウントのキーペアを持ち、Googleの認証に使用する。\n\n## サービスアカウントの付与\n* GCPのダッシュボードからサービスアカウントを選択。\n\n* 「サービスアカウントを作成」からロールを割り当てて作る。\n\n* サービスアカウント作成後は該当のサービスアカウントの操作>鍵を作成を選択し、JSONを選択すると鍵作成後、認証ファイルがダウンロードされる。 \n\n## AWSの基本的情報\n\n### Region\nAWSサービスのバックボーンは、「リージョン」と呼ばれる地理的に離れた領域のデータセンター群がそれぞれ接続されることで構成されている。\n各リージョンではAWSのサービスがそれぞれ独立して提供されている。\n東日本では基本的に東京リージョンを使うが、たまに東京リージョンで使えないサービスがあり、その際は別のリージョンを使う。\nAWSコンソールで初めてログインした際には、リージョンを東京に変えるのが最初にやることの1つである。\n\n### IAM\nAWS上のサービスを操作するユーザーとアクセス権限を管理するのがIAM(AWS Identity and Access Management)である。\nユーザーがアクセスするための認証情報やAWSリソースを制御するための権限を集中管理することができる。\n\n### S3\nストレージサービスで、バケットと呼ばれる入れ物にデータを保存する。容量は無制限。\n\n### EC2\n「仮想サーバー」を立てるサービス。LinuxやWindowsなどさまざまなOSの仮想サーバーをすぐに実行できる環境を用意することができる。\n立てたサーバーのことはインスタンスと呼ぶ。\n\n## AWSでEC2インスタンスを作成\n\n* シングルサインオンでAWSにログイン\n\n* 上の「サービス」からEC2を選択\n\n* 左のメニューから「インスタンス」を選択して、上の「インスタンスを作成」をクリック。\n\n* AMIは(こだわりが無ければ)一番上のAmazon Linux 2 AMI (HVM), SSD Volume Type>(64 ビット x86)を選択\n\n* インスタンスタイプはt2.microを選択\n\n* ストレージはデフォルト\n\n* タグはキーがName、値は適当なもの\n\n* セキュリティグループ→タイプ:SSH、プロトコル:TCP、ポート:初期値は22、ソース:マイAP\n\n* キーが作成されるので保管\n\n* 確認と作成をクリック>起動をクリック\n\n* インスタンスの状態とステータスチェックの項目が緑色になったら接続可能\n\n### AWSインスタンスの起動\n\n* 接続をクリック\n\n* 保存したキーをlinuxで共有管理するようにする。\n\n* 以下コマンドを実行\n```\nsudo chmod 400 key_name\n```\n```\nssh -i key_name ec2-user@public-dns-name\n```\n※key_name、ec2-user、public-dns-nameは各自置き換えること\n\nこれでログインできる。\n\n【ログイン後にやること】\n\n* インストール済みのパッケージをアップデートする\n```\nsudo yum update\n```\n\n* 時間を日本時間にする\n\n```\nsudo mv /etc/localtime /etc/localtime.org\nsudo ln -s /usr/share/zoneinfo/Asia/Tokyo /etc/localtime\nsudo vi /etc/sysconfig/clock\n```\n\n* viで「ZONE=\"UTC\" UTC=true」を「ZONE=\"Asia/Tokyo\" UTC=true」に修正。再起動する。\n\n```\nsudo reboot\n```\n\n### AWS CLI\n\nAWS CLIは、AWSのサービスをコマンドラインから操作し、管理するためのツールである。\nこのツールはプラットフォームや開発言語の制限がなく、Linux、Mac、Windowsなど様々なOSで利用できる。\n\n【使う準備】(アクセスキーIDとキーが分かっている人はcurlコマンドまで飛ばして良い)\n\n* サービスからIAMを選択\n\n* 左のタブからユーザを選択\n\n* 右のタブから認証情報を選択\n\n* アクセスキーを作成して情報をメモするか情報の入ったcsvをダウンロードする\n\n【インストール】\n```\ncurl \"https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip\" -o \"awscliv2.zip\"\nunzip awscliv2.zip\nsudo ./aws/install\n```\n\n* 確認\n\n```\naws --version\n```\n\n* AWS CLIのコンフィグを設定\n\n```\naws configure\n```\n\n* 下記のようなものが出るので埋める。\n\n AWS Access Key ID : アクセスキーの作成時にメモしたもの\n\n AWS Secret Access Key : アクセスキーの作成時にメモしたもの\n\n Default region name :ap-northeast-1 \n\n Default output format :json \n\n* 動作確認\n\n```\naws ec2 describe-vpcs\n```\n"
},
{
"alpha_fraction": 0.764325737953186,
"alphanum_fraction": 0.8009478449821472,
"avg_line_length": 37.68333435058594,
"blob_id": "4617fca0b75fa98fa5a24f8a62242cd8971f2a37",
"content_id": "2f48288c9d6ce9be91d82bbec78024404992973b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3845,
"license_type": "no_license",
"max_line_length": 219,
"num_lines": 60,
"path": "/AI開発道場_クラウド編/course_cloud/04.Vision_API/概要.md",
"repo_name": "KinakoIguchi/ai-dojo",
"src_encoding": "UTF-8",
"text": "# GCPのAIサービス\n\nGCPでは開発者のニーズや専門性に合わせて、AIや機械学習技術を「機械学習API」「Cloud AutoML」「AI Platform」の3つの大枠に分けて提供している。\n\n<GCP AIサービス全体像>\n\n<img src=\"https://cdn-xtech.nikkei.com/atcl/learning/lecture/19/00091/00001/ph1.jpg?__scale=w:800,h:397,q:100&_sh=0100f80c0a\" width=\"500\">\n\n# Google 機械学習API\n\nGoogleが提供するクラウド型の機械学習 APIサービス(上図の一番左に該当)。Googleにて学習した複数のモデルが GCP上に提供されており、API経由で利用できる。利用者側からするとデータを用意してAPIをコールすれば良いので、機械学習の専門知識は不要。一方で、利用者により個別の学習(モデル構築)ができないため、柔軟性に欠ける。提供されている APIサービスは以下のとおり:\n\n- [Cloud Vision API](https://cloud.google.com/vision/docs?hl=ja)\n- [Cloud Video Intelligence API](https://cloud.google.com/video-intelligence/automl/docs?hl=ja)\n- [Speech-to-Text API](https://cloud.google.com/natural-language/automl/docs?hl=ja)\n- [Text-to-Speech API](https://cloud.google.com/translate/automl/docs?hl=ja)\n- [Cloud Natural Language API](https://cloud.google.com/automl-tables/docs?hl=ja)\n- [Cloud Translation API](https://cloud.google.com/automl-tables/docs?hl=ja)\n\n<機械学習API サービス一覧と概要>\n\n<img src=\"https://cdn-xtech.nikkei.com/atcl/learning/lecture/19/00091/00001/hyo1.jpg?__scale=w:739,h:646,q:100&_sh=05705b0a00\" width=\"500\">\n\n\n# Cloud Vision API\n\nGoogleの機械学習 APIサービスの1つで画像の検出と解析を行う。2017年に発表。REST APIを介して利用可能。具体的には、画像にラベルを割り当てることで、事前定義済みの数百万のカテゴリに分類したり、オブジェクトや顔の検出、印刷テキストや手書き入力の読み取りが可能。競合製品として、Amazon Rekognition, Microsoft Computer Vision API がある。\n\n主な機能は以下のとおり\n\n* 顔検出\n* ランドマーク検出\n* ロゴ検出\n* ラベル検出\n* テキスト検出\n* 不適切なコンテンツの検出\n\n# Cloud Video Intelligence API\n\nGoogleの機械学習 APIサービスの1つで動画の検出と解析を行う。2017年にパブリックベータ版発表。REST APIを介して利用可能。動画に写っているモノのラベル検出機能や、コンテンツの遷移を検出するショット変更の検出機能、動画で検出された物体のトラッキングなどが可能。\n\n主な機能は以下のとおり\n\n* ラベル検出\n* テキスト検出\n* 不適切なコンテンツの検出\n* オブジェクトトラッキング\n* ショット(場面)切替の検出\n\n# Cloud AutoML Vision\n\nGoogle AutoML サービスの1つで画像の検出と解析を行う。2018年に発表。Cloud Vision API と同じく GCPによる画像解析サービスで REST APIを介して利用可能。独自の画像を学習させることにより、独自の画像認識モデル構築できる点が最大の特徴。Cloud Vision API と異なり GUIも提供されているが、Cloud Vision API では可能だったテキスト検出や不適切コンテンツ検出は非対応。\n\n# 参考\n\n- [Vision AI](https://cloud.google.com/vision?hl=ja)\n- [Video AI](https://cloud.google.com/video-intelligence?hl=ja)\n- [Cloud Vision API 機能リスト](https://cloud.google.com/vision/docs/features-list?hl=ja)\n- [Cloud Video Intelligence API Features](https://cloud.google.com/video-intelligence/docs/features?hl=ja)\n- [業務でつかえるGoogleのAI/機械学習サービスまとめ[2018年版]](https://www.principle-c.com/column/engineer/ai-machinelearning/)\n"
},
{
"alpha_fraction": 0.5992828607559204,
"alphanum_fraction": 0.6190049052238464,
"avg_line_length": 16.56692886352539,
"blob_id": "da156ea7039f05a1faf3811f464a54ac50af1722",
"content_id": "c5742cc3acd84a2b6767fc9effd8c93022f6811e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3721,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 127,
"path": "/AI開発道場_クラウド編/course_cloud/01.sagemaker/aws_personalize.md",
"repo_name": "KinakoIguchi/ai-dojo",
"src_encoding": "UTF-8",
"text": "## aws personalizeとは\n\n* Amazon Personalize は、アプリケーションを使用するユーザー向けに個別化したレコメンデーションを簡単に追加できる、開発者向けの機械学習サービスである。\n\n## 魅力\n\n* ターゲッティングされたマーケティングプロモーションの強化に活用でき、カスタマーエンゲージメントの向上に貢献する。\n\n* データの処理や統計、機械学習アルゴリズムを一から学ぶのではなく実践的な運用を行うために、モデルを最適化して運用することから始めることが可能で、タイムロスがない。\n\n## チュートリアルの実施\n\n* 基本的には下記のリンク内容に沿って行う。\n\n https://docs.aws.amazon.com/ja_jp/personalize/latest/dg/what-is-personalize.html\n\n(開始方法はコンソールがやりやすい)\n\n## 補足\n\n分かりづらいところなどを以下で補足する\n\n【Amazon S3 バケットポリシー】\n\nバケットポリシーは以下のように置き換える\n\n```\n{\n\n \"Version\": \"2012-10-17\",\n \n \"Id\": \"PersonalizeS3BucketAccessPolicy\",\n \n \"Statement\": [\n \n {\n \n \"Sid\": \"PersonalizeS3BucketAccessPolicy\",\n \n \"Effect\": \"Allow\",\n \n \"Principal\": {\n \n \"Service\": \"personalize.amazonaws.com\"\n \n },\n \n \"Action\": [\n \n \"s3:GetObject\",\n \n \"s3:ListBucket\"\n \n ],\n \n \"Resource\": [\n \n \"arn:aws:s3:::bucket-name\",\n \n \"arn:aws:s3:::bucket-name/*\"\n \n ]\n \n }\n \n ]\n \n}\n```\n\n\n※bucket-nameは自分の作ったものに置き換える\n\n※https://docs.aws.amazon.com/ja_jp/AmazonS3/latest/user-guide/add-bucket-policy.html\nも参照\n\n【アクセス許可の設定】\n\nIAMロールの作成\n\n5.[このロールを使用するサービスを選択] で、[Amazon Personalize] を選択します。→ Personalizeを選択\n\n12.〜20.は不要\n\n\n【開始方法】\n\nトレーニングデータを作成する\n\n2.ratings.csv ファイルを開きます。\n\na.[評価] 列を削除します。→ 評価列は\"rating\"\n\n※ratings.csvはおよそ10万件のデータが入っている。PCの動作が重くなることが予想される。10万件だと時間がかかるので1万件や5000件などにデータを絞って良い。\n\n\n\n【ステップ 2: ソリューションを作成する】\n\n3.[Recipe selection (レシピの選択)] で、[Automatic (AutoML) (自動 (AutoML))] を選択します。デフォルトのレシピリストのままにします。\n\n→ そもそもRecipe selectionがない場合がある。その場合、3.の手順は無視して良い。\n\n※ソリューションの作成に1時間以上はかかる。同様に後続のキャンペーンの作成も時間がかかる。\n\n\n\n【ステップ 4: 推奨事項を取得する】\n\n* 2020年からパーソナライズされたレコメンデーションごとに生成された推奨スコアも提供されている。スコアの具体的な内容は以下も参照。\n\n https://aws.amazon.com/jp/blogs/news/introducing-recommendation-scores-in-amazon-personalize/\n\n\n* aws cliでjson形式で取得したい場合、以下で取得できる\n\n```\naws personalize-runtime get-recommendations --<Campaign ARN> --user-id 84\n```\n\n※Campaign ARNと84は置き換える\n\n\n\n【リソースの削除】\n\n終わったら、必ずリソースを削除すること。\n"
},
{
"alpha_fraction": 0.6310241222381592,
"alphanum_fraction": 0.6411897540092468,
"avg_line_length": 36.2957763671875,
"blob_id": "432c19cd9ffeb5b4c8984c018cc5d23ee4a93c4b",
"content_id": "0786df272063aae6887bef963484b5a9c2da0b0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2860,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 71,
"path": "/AI開発道場_クラウド編/course_cloud/03.AWS_Rekognition/Sample_Function.py",
"repo_name": "KinakoIguchi/ai-dojo",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nimport boto3\nfrom decimal import Decimal\nimport json\nimport urllib\n\nprint('Loading function')\nrekognition = boto3.client('rekognition')\nsns = boto3.client('sns')\n\n# --------------- Helper Functions to call Rekognition APIs ------------------\n\ndef detect_faces(bucket, key):\n response = rekognition.detect_faces(Image={\"S3Object\": {\"Bucket\": bucket, \"Name\": key}})\n return response\n\n\ndef detect_labels(bucket, key):\n response = rekognition.detect_labels(Image={\"S3Object\": {\"Bucket\": bucket, \"Name\": key}})\n\n # Sample code to write response to DynamoDB table 'MyTable' with 'PK' as Primary Key.\n # Note: role used for executing this Lambda function should have write access to the table.\n #table = boto3.resource('dynamodb').Table('MyTable')\n #labels = [{'Confidence': Decimal(str(label_prediction['Confidence'])), 'Name': label_prediction['Name']} for label_prediction in response['Labels']]\n #table.put_item(Item={'PK': key, 'Labels': labels})\n return response\n\n\ndef index_faces(bucket, key):\n # Note: Collection has to be created upfront. Use CreateCollection API to create a collecion.\n #rekognition.create_collection(CollectionId='BLUEPRINT_COLLECTION')\n response = rekognition.index_faces(Image={\"S3Object\": {\"Bucket\": bucket, \"Name\": key}}, CollectionId=\"BLUEPRINT_COLLECTION\")\n return response\n\n\n# --------------- Main handler ------------------\n\n\ndef lambda_handler(event, context):\n '''Demonstrates S3 trigger that uses\n Rekognition APIs to detect faces, labels and index faces in S3 Object.\n '''\n #print(\"Received event: \" + json.dumps(event, indent=2))\n\n # Get the object from the event\n \n bucket = event['Records'][0]['s3']['bucket']['name']\n print(event)\n key = event['Records'][0]['s3']['object']['key']\n try:\n # ToDo:imprements\n facedata = detect_faces(bucket, key) # 要件によって使用するAPIが違うので、注意。\n print(facedata)\n ## ToDo:responseの中に顔の情報が載っているので、解析して出力できるような形にすること。\n msg =\n \n #SNSでトピックとして送信してあげる。 要件AB共通。\n TOPIC_ARN = 'arn:aws:sns:ap-northeast-1:096177927656:takao_itoi_RekognitionTopic' \n subject = '課題A' # 件名。自分の挑戦した課題がわかるようにすること。\n response = sns.publish(\n TopicArn = TOPIC_ARN,\n Message = msg,\n Subject = subject\n )\n \n return response\n except Exception as e:\n print(e)\n print(\"Error processing object {} from bucket {}. \".format(key, bucket) +\n \"Make sure your object and bucket exist and your bucket is in the same region as this function.\")\n raise e\n "
}
] | 16 |
vanreus37/pizzahawaiAI | https://github.com/vanreus37/pizzahawaiAI | 4479d5fe57dd410c1c6d0a9195b7e024c1cb41c7 | ff4e723c01afea56c5e30c5f6b73ebbd58c6aaa5 | 15cdf67881ef8edaa755d26a3502d53118b7ada8 | refs/heads/master | 2020-12-10T15:19:49.327711 | 2020-01-29T17:14:34 | 2020-01-29T17:14:34 | 233,630,856 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4774676561355591,
"alphanum_fraction": 0.4860449433326721,
"avg_line_length": 30.94618797302246,
"blob_id": "77c1f0075a663c5120c39a98b3bcaeec42ddbc1d",
"content_id": "bbaf8206a7f20aeb8ba790dab5dc2285e53decae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7347,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 223,
"path": "/mybot.py",
"repo_name": "vanreus37/pizzahawaiAI",
"src_encoding": "UTF-8",
"text": "# Import the API objects\r\nfrom api import State, util, Deck\r\nimport random, array\r\nimport numpy as np\r\n\r\nclass Node:\r\n def __init__(self):\r\n # DEBUG ONLY\r\n self.card = None\r\n\r\n self.state = None\r\n self.v = 0\r\n self.n = 0\r\n self.parent = None\r\n self.children = []\r\n\r\n\r\nbestRoot = None\r\n\r\n\r\nclass Bot:\r\n root = None\r\n player = None\r\n N = 100\r\n DL = 100\r\n debug = False\r\n\r\n\r\n def __init__(self):\r\n pass\r\n\r\n def get_move(self, state):\r\n self.player = state.whose_turn()\r\n\r\n if state.get_phase() == 1:\r\n try:\r\n state = state.make_assumption()\r\n except:\r\n pass\r\n\r\n if bestRoot is None:\r\n self.root = Node()\r\n self.root.state = state\r\n\r\n if self.debug:\r\n if self.root.state.get_opponents_played_card() is not None:\r\n self.root.card = util.get_card_name(self.root.state.get_opponents_played_card())\r\n print(self.root.card[0], self.root.card[1])\r\n else:\r\n print(\"Started With An Empty Deck\")\r\n print(\"Trump suit is: \", self.root.state.get_trump_suit())\r\n print(\"Start...\\n\")\r\n\r\n self.expansion(self.root)\r\n else:\r\n if self.root.state.get_opponents_played_card() is not None:\r\n self.root = bestRoot\r\n else:\r\n for nodes in bestRoot.children:\r\n if state == nodes.state:\r\n self.root = nodes.state\r\n break\r\n\r\n return self.root.state.moves()[self.MCTS(state)]\r\n\r\n def MCTS(self, state):\r\n for iteration in range(0, self.DL):\r\n # For iteration 0 we go slct->sim->bck instead of slct->exp->sim->bck\r\n if iteration == 0: \r\n selectedNode = self.selection(self.root)\r\n simulationResult = self.rollout(selectedNode)\r\n self.backPropogation(simulationResult,selectedNode)\r\n else:\r\n currentNode = self.root # Slct\r\n while len(currentNode.children) != 0 and currentNode.state.finished() is False:\r\n currentNode = self.selection(currentNode) # Slct\r\n\r\n if currentNode.n is 0: # If this done is never visited before\r\n self.expansion(currentNode) # Exp\r\n simulationResult = self.rollout(currentNode) # Sim\r\n self.backPropogation(simulationResult,currentNode)\r\n \r\n\r\n bestScore = 0.0 \r\n bestIndex = i = 0\r\n for child in self.root.children:\r\n if not child.n or not child.v:\r\n score = 0.0\r\n else:\r\n score = child.v / child.n\r\n if score > bestScore:\r\n bestScore = score\r\n bestIndex = i\r\n i += 1\r\n\r\n bestRoot = self.root.children[bestIndex]\r\n if self.debug:\r\n print(\"Chosed Index= \", bestIndex)\r\n # Return a random choice\r\n return bestIndex\r\n\r\n def selection(self, currentNode=Node(), C=0.5):\r\n t = self.root.n\r\n\r\n index = bestIndex = bestSi = 0\r\n\r\n for child in currentNode.children:\r\n if child.n is 0:\r\n bestIndex = index\r\n break\r\n\r\n Xi = child.v / child.n\r\n Ni = child.n\r\n Si = Xi + C * np.sqrt(np.log(t) / Ni)\r\n\r\n if Si > bestSi:\r\n bestSi = Si\r\n bestIndex = index\r\n\r\n index += 1\r\n return currentNode.children[bestIndex]\r\n\r\n def expansion(self, currentNode=Node()):\r\n state = currentNode.state\r\n if state.finished() is True:\r\n return False\r\n for move in state.moves():\r\n # Play the card and set the state acording to the result\r\n st = state.clone()\r\n\r\n childNode = Node()\r\n\r\n if st.finished() is True:\r\n return False\r\n\r\n st = st.next(move)\r\n childNode.state = st\r\n childNode.parent = currentNode\r\n currentNode.children.append(childNode)\r\n\r\n # DEBUG ONLY\r\n if move[0] is not None:\r\n childNode.card = util.get_card_name(move[0])\r\n\r\n return True\r\n\r\n def rollout(self, currentNode=Node()):\r\n score = 0\r\n for ooo in range(0, self.N):\r\n st = currentNode.state.clone()\r\n i = 0\r\n # Do some random moves\r\n maxPhase = 0\r\n phaseEnterence = Node()\r\n while not st.finished():\r\n st = st.next(random.choice(st.moves()))\r\n phase = st.get_phase()\r\n if phase > maxPhase: \r\n maxPhase = phase\r\n phaseEnterence = st\r\n i += 1\r\n score += self.heuristics(st,currentNode.state.whose_turn(),maxPhase,phaseEnterence)\r\n return score\r\n\r\n\r\n def backPropogation(self, result, currentNode):\r\n while True:\r\n currentNode.n = currentNode.n + self.N\r\n currentNode.v = currentNode.v + result\r\n\r\n if currentNode.parent is None:\r\n break\r\n currentNode = currentNode.parent\r\n return\r\n\r\n def heuristics(self, state, player, phase, phaseEnterence):\r\n def playerHeuristic():\r\n Bonus = 0\r\n if state.winner()[1] == 3:\r\n Bonus += 3\r\n elif state.winner()[1] == 2:\r\n Bonus += 1\r\n elif state.winner()[1] == 1:\r\n Bonus += 0\r\n\r\n if phase == 2: # If game enters to the phase 2 at some point more trump cards means more points\r\n for card in phaseEnterence.moves():\r\n if card[0] != None and util.get_suit(card[0]) == state.get_trump_suit():\r\n Bonus += 3\r\n\r\n for card in state.moves(): # And this is for ending the game with almost zero trumps in either case\r\n if card[0] != None and util.get_suit(card[0]) != state.get_trump_suit():\r\n Bonus += 3\r\n\r\n return 1 + Bonus if state.winner()[0] == self.player else 0\r\n \r\n def opponentHeuristic(): # NOT SURE ABOUT THIS I STILL NEED TO READ ABOUT HOW TO REACT FOR THE OPPONENTS TURNS\r\n return 3 if state.winner()[0] != self.player else 0\r\n \r\n return playerHeuristic() #if player == self.player else opponentHeuristic() # ADD THIS WHEN YOU ARE SURE\r\n \r\n def printTree(self,rt = Node()):\r\n i = 0\r\n for child in rt.children:\r\n print(\" \",i, end=\" \")\r\n i = i + 1\r\n print()\r\n for child in rt.children:\r\n print(\"\",child.card, end=\" \")\r\n i = i + 1\r\n print()\r\n for child in rt.children:\r\n print(\" o N= \", child.n, end=\" \")\r\n print()\r\n for child in rt.children: \r\n print(\" o V= \", child.v, end=\" \")\r\n print()\r\n for child in rt.children: \r\n if child.n is 0:\r\n print(\" o x̄= \", \"inf\", end=\" \")\r\n else:\r\n print(\" o x̄= \", round(child.v / child.n,2), end=\" \")\r\n print()"
},
{
"alpha_fraction": 0.46030446887016296,
"alphanum_fraction": 0.4670158922672272,
"avg_line_length": 29.48969078063965,
"blob_id": "af0f744b34f0e20a1b5c9627c05e9e2e8eaaa25b",
"content_id": "5e84d36908c4087f12a41ff93f6a912e50f06b0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6111,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 194,
"path": "/mybot2xold.py",
"repo_name": "vanreus37/pizzahawaiAI",
"src_encoding": "UTF-8",
"text": "# Import the API objects\r\nfrom api import State, util\r\nimport random, array\r\nimport numpy as np\r\nclass Node:\r\n def __init__(self):\r\n #DEBUG ONLY\r\n self.card = None\r\n \r\n self.state = None\r\n self.v = 0\r\n self.n = 0\r\n self.parent = None\r\n self.children = []\r\n\r\n\r\nbestRoot = None\r\n\r\nclass Bot:\r\n root = None\r\n player = None\r\n N = 100\r\n DL = 100\r\n def __init__(self):\r\n pass\r\n\r\n def get_move(self, state):\r\n self.player = state.whose_turn()\r\n\r\n if state.get_phase() == 1:\r\n try:\r\n state = state.make_assumption()\r\n except:\r\n pass\r\n\r\n if bestRoot is None:\r\n self.root = Node()\r\n self.root.state = state\r\n #DEBUG ONLY\r\n if self.root.state.get_opponents_played_card() is not None:\r\n self.root.card = util.get_card_name(self.root.state.get_opponents_played_card())\r\n print(self.root.card[0],self.root.card[1])\r\n else:\r\n print(\"Started With An Empty Deck\")\r\n print(\"Trump suit is: \", self.root.state.get_trump_suit())\r\n print(\"Start...\\n\")\r\n\r\n \r\n self.expansion(self.root)\r\n else:\r\n if self.root.state.get_opponents_played_card() is not None:\r\n self.root = bestRoot\r\n else:\r\n for nodes in bestRoot.children:\r\n if state == nodes.state:\r\n self.root = nodes.state\r\n break\r\n \r\n \r\n\r\n return self.root.state.moves()[self.MCTS(state)]\r\n\r\n def MCTS(self,state):\r\n for iteration in range(0, self.DL):\r\n if iteration == 0:\r\n simulationResult = self.rollout(self.root.children[self.selection(self.root)])\r\n self.backPropogation(simulationResult, self.root.children[self.selection(self.root)])\r\n else:\r\n search = True\r\n crNode = self.root\r\n while search:\r\n crNode = crNode.children[self.selection(crNode)]\r\n \r\n if len(crNode.children) > 0:\r\n search = True\r\n else:\r\n self.expansion(crNode)\r\n selection = self.selection(crNode)\r\n simulationResult = self.rollout(crNode.children[selection])\r\n self.backPropogation(simulationResult,crNode.children[self.selection(crNode)])\r\n search = False\r\n \r\n #self.printTree(self.root) \r\n #print()\r\n '''\r\n self.printTree(self.root.children[0])\r\n print()\r\n self.printTree(self.root.children[0].children[1])\r\n print()\r\n '''\r\n bestScore = bestIndex = i = 0\r\n for child in self.root.children:\r\n if child.n or child.v is 0:\r\n score = 0\r\n else:\r\n score = child.v / child.n \r\n if score > bestScore:\r\n bestScore = score\r\n bestIndex = i\r\n i += 1\r\n\r\n # Return a random choice\r\n bestRoot = self.root.children[bestIndex]\r\n print(\"Chosed Index= \", bestIndex)\r\n return bestIndex\r\n\r\n def selection(self, currentNode = Node(), C = 0.5):\r\n t = self.root.n\r\n \r\n index = bestIndex = bestSi = 0\r\n\r\n for child in currentNode.children:\r\n if child.n is 0:\r\n bestIndex = index\r\n break\r\n\r\n Xi = child.v / child.n\r\n Ni = child.n\r\n Si = Xi + C * np.sqrt(np.log(t)/Ni)\r\n\r\n if Si > bestSi:\r\n bestSi = Si\r\n bestIndex = index\r\n\r\n index += 1\r\n return bestIndex\r\n\r\n def expansion(self,currentNode = Node()):\r\n state = currentNode.state\r\n for move in state.moves():\r\n #Play the card and set the state acording to the result\r\n st = state.clone()\r\n\r\n childNode = Node()\r\n \r\n #if st.finished() is not True: NOT SURE IF THIS IS A SOLUTION BUT PROBABLY IS\r\n st = st.next(move)\r\n childNode.state = st\r\n childNode.parent = currentNode\r\n \r\n #DEBUG ONLY\r\n if move[0] is not None:\r\n childNode.card = util.get_card_name(move[0])\r\n \r\n currentNode.children.append(childNode)\r\n return\r\n\r\n def rollout(self,currentNode = Node()):\r\n score = 0\r\n for _ in range(0,self.N):\r\n st = currentNode.state.clone()\r\n i = 0\r\n # Do some random moves\r\n while not st.finished():\r\n st = st.next(random.choice(st.moves()))\r\n i += 1\r\n score += self.heuristics(st)\r\n return score\r\n\r\n def backPropogation(self,result,currentNode):\r\n while True:\r\n currentNode.n = currentNode.n + self.N\r\n currentNode.v = currentNode.v + result\r\n\r\n if currentNode.parent is None:\r\n break\r\n currentNode = currentNode.parent\r\n return\r\n\r\n def heuristics(self,state):\r\n return 1 + state.winner()[1]if state.winner()[0] == self.player else 0\r\n\r\n def printTree(self,rt = Node()):\r\n i = 0\r\n for child in rt.children:\r\n print(\" \",i, end=\" \")\r\n i = i + 1\r\n print()\r\n for child in rt.children:\r\n print(\"\",child.card, end=\" \")\r\n i = i + 1\r\n print()\r\n for child in rt.children:\r\n print(\" o N= \", child.n, end=\" \")\r\n print()\r\n for child in rt.children: \r\n print(\" o V= \", child.v, end=\" \")\r\n print()\r\n for child in rt.children: \r\n if child.n is 0:\r\n print(\" o x̄= \", \"inf\", end=\" \")\r\n else:\r\n print(\" o x̄= \", round(child.v / child.n,2), end=\" \")\r\n print()\r\n"
},
{
"alpha_fraction": 0.8125,
"alphanum_fraction": 0.8125,
"avg_line_length": 55,
"blob_id": "ff178b05ea49a0091309c8a2534e1f027197d6e8",
"content_id": "9db1fe9f3bc2c60e2a9d37aa0c8f3b55dd03b40a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 112,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 2,
"path": "/README.md",
"repo_name": "vanreus37/pizzahawaiAI",
"src_encoding": "UTF-8",
"text": "# pizzahawaiAI\nWe are developing an AI to create the best pizza Hawaii with the perfect pineapple, tomato ratio\n"
},
{
"alpha_fraction": 0.48142221570014954,
"alphanum_fraction": 0.4933333396911621,
"avg_line_length": 29.73770523071289,
"blob_id": "07cd499b5e3d69e83a49f99c600250ce6553b1c1",
"content_id": "619fade3bc978c0150c2c2f46458fdb26c1ab87f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5627,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 183,
"path": "/mybot1.py",
"repo_name": "vanreus37/pizzahawaiAI",
"src_encoding": "UTF-8",
"text": "# Import the API objects\nfrom api import State, util, Deck\nfrom datetime import datetime \nimport random, array\nimport numpy as np\n\nclass Node:\n def __init__(self):\n # DEBUG ONLY\n self.card = None\n\n self.state = None\n self.v = 0.0 \n self.n = 0\n self.parent = None\n self.children = []\n\n\nbestRoot = None\n\n\nclass Bot:\n root = None\n player = None\n N = 1000\n debug = False\n\n\n def __init__(self):\n pass\n\n def get_move(self, state):\n self.player = state.whose_turn()\n\n try: state = state.make_assumption()\n except: pass\n \n if bestRoot is None:\n self.root = Node()\n self.root.state = state\n else:\n self.root = bestRoot\n \n return self.root.state.moves()[self.MCTS(state)]\n \n def MCTS(self, state):\n currentNode = self.root\n t1 = datetime.now()\n\n while (datetime.now()-t1).seconds <= 2:\n if len(currentNode.children) > 0:\n currentNode = self.selection(currentNode)\n else:\n if self.expansion(currentNode) is True:\n for child in currentNode.children:\n simResult = self.rollout(child)\n self.backPropogation(simResult,child)\n \n #self.printTree(self.root)\n \n bestScore = 0.0 \n bestIndex = i = 0\n for child in self.root.children:\n if not child.n or not child.v:\n score = 0.0\n else:\n score = child.v / child.n\n if score > bestScore:\n bestScore = score\n bestIndex = i\n i += 1\n\n bestRoot = self.root.children[bestIndex]\n return bestIndex\n\n def selection(self, currentNode=Node(), C=1.0):\n def playerNode():\n t = self.root.n\n index = bestIndex = 0 \n bestSi = 0.0\n\n for child in currentNode.children:\n if child.n is 0:\n bestIndex = index\n break\n\n Xi = child.v / child.n\n Ni = child.n\n Si = Xi + C * np.sqrt(np.log(t) / Ni)\n\n if Si > bestSi:\n bestSi = Si\n bestIndex = index\n\n index += 1\n return currentNode.children[bestIndex]\n \n return playerNode() #if currentNode.state.whose_turn() is self.player else opponentNode()\n\n def expansion(self, currentNode=Node()):\n state = currentNode.state\n if state.finished() is True:\n return False\n for move in state.moves():\n # Play the card and set the state acording to the result\n st = state.clone()\n childNode = Node()\n\n st = st.next(move)\n childNode.state = st\n childNode.parent = currentNode\n currentNode.children.append(childNode)\n return True\n\n def rollout(self, currentNode=Node()):\n score = 0\n for _ in range(0, self.N):\n st = currentNode.state.clone()\n i = 0\n # Do some random moves\n maxPhase = 0\n phaseEnterence = Node()\n while not st.finished():\n st = st.next(random.choice(st.moves()))\n phase = st.get_phase()\n if phase > maxPhase: \n maxPhase = phase\n phaseEnterence = st\n i += 1\n score += self.heuristics(st,currentNode.state.whose_turn(),maxPhase,phaseEnterence)\n return score/float(self.N)\n\n def backPropogation(self, result, currentNode):\n while True:\n currentNode.n = currentNode.n + self.N\n currentNode.v = currentNode.v + result\n\n if currentNode.parent is None:\n break\n currentNode = currentNode.parent\n return\n\n def heuristics(self, state, player, phase, phaseEnterence):\n Bonus = 0.0\n if util.difference_points(state,self.player) >= 40:\n if state.winner()[1] == 3: Bonus += 3\n elif state.winner()[1] == 2: Bonus += 1\n elif state.winner()[1] == 1: Bonus += 0\n else:\n if state.winner()[1] == 3: Bonus += 0\n elif state.winner()[1] == 2: Bonus += 1\n elif state.winner()[1] == 1: Bonus += 3\n \n if phase == 2: # If game enters to the phase 2 at some point more trump cards means more points\n for card in phaseEnterence.moves():\n if card[0] != None and util.get_suit(card[0]) == state.get_trump_suit():\n Bonus += 3\n\n for card in state.moves(): # And this is for ending the game with almost zero trumps in either case\n if card[0] != None and util.get_suit(card[0]) != state.get_trump_suit():\n Bonus += 3\n\n return 1 + Bonus if state.winner()[0] == self.player else -2\n \n def printTree(self,rt = Node()):\n i = 0\n \n for child in rt.children:\n print(\" \",i, end=\" \")\n i = i + 1\n print()\n for child in rt.children:\n print(\" o N= \", child.n, end=\" \")\n print()\n for child in rt.children: \n print(\" o V= \", round(child.v,3), end=\" \")\n print()\n for child in rt.children: \n if child.n is 0:\n print(\" o x̄= \", \"inf\", end=\" \")\n else:\n print(\" o x̄= \", round(child.v / child.n,3), end=\" \")\n print()\n"
},
{
"alpha_fraction": 0.48983895778656006,
"alphanum_fraction": 0.4973159432411194,
"avg_line_length": 29.467836380004883,
"blob_id": "02615e8aedd75386b23370620312d9b8ded8f538",
"content_id": "9fe33900b1145a2ea23fd09c93cb82e2940b26a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5216,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 171,
"path": "/mybot_old.py",
"repo_name": "vanreus37/pizzahawaiAI",
"src_encoding": "UTF-8",
"text": "\"\"\"\nRandomBot -- A simple strategy: enumerates all legal moves, and picks one\nuniformly at random.\n\"\"\"\n\n# Import the API objects\nfrom api import State, util\nimport random, array\nimport numpy as np\nclass Node:\n def __init__(self):\n self.state = None\n self.q = 0\n self.n = 0\n self.parent = None\n self.children = []\n\nDEBUG = False\nprevious_root = None\nclass Bot:\n root = None\n currentNode = None\n player = None\n def __init__(self):\n if previous_root is not None:\n self.root = previous_root\n\n def get_move(self, state):\n self.player = state.whose_turn()\n \n if state.get_opponents_played_card() is None:\n moves = state.moves()\n return random.choice(moves)\n\n if state.get_phase() == 1:\n try:\n state = state.make_assumption()\n except:\n pass\n #Expand the root state when the game first started\n if self.root is None:\n self.root = Node()\n self.root.state = state\n self.createChilds(state,self.root)\n self.currentNode = self.root\n \n return self.monteCarloTS(state) \n\n def monteCarloTS(self,state = Node()):\n #printTree(self.currentNode)py\n for _ in range(0,100):\n search = True\n while search:\n self.currentNode = self.selection()\n #self.printTree(self.currentNode)\n if self.currentNode.n is 0:\n #print(\"Simulate\")\n self.backpropagation(self.rollout(self.currentNode.state,self.player))\n search = False\n else:\n if len(self.currentNode.children) is 0: \n #print(\"Expand\")\n self.expand(self.currentNode.state,self.currentNode)\n if DEBUG:\n self.printTree(self.currentNode)\n search = False\n else:\n #print(\"Select again\")\n search = True\n if DEBUG:\n print(\"\\n\\nFINAL RESULT: \")\n self.printTree(self.root)\n '''\n bestScore = 0\n bestIndex = 0\n i = 0\n for child in self.root.children:\n if child.n or child.q is 0:\n score = 0\n else:\n score = child.q / child.n \n if score > bestScore:\n bestScore = score\n bestIndex = i\n i = i + 1\n '''\n previous_root = self.best_child(self.root,2)\n return self.root.state.moves()[self.bestChoice(self.root,2)]\n\n def createChilds(self,state,parent):\n for move in state.moves():\n st = state.clone()\n st.next(move)\n childNode = Node()\n childNode.state = st\n childNode.parent = parent\n parent.children.append(childNode)\n return\n\n\n def selection(self):\n return self.best_child(self.currentNode,2)\n\n def expand(self,state,node):\n self.createChilds(state,node)\n pass\n\n def rollout(self,state,player):\n st = state.clone()\n i = 0\n # Do some random moves\n while not st.finished():\n if st.get_points(1) >= 66 or st.get_points(2) >= 66:\n break\n i = i + 1\n st = st.next(random.choice(st.moves()))\n return self.evaluation(st)\n \n def best_child(self,init_node, c_param=2):\n i = 0\n for child in init_node.children:\n if child.n == 0:\n #print(\"Index of q=0 is \", i)\n return child\n i = i + 1\n \n choices_weights = [\n c.q + c_param * np.sqrt((np.log(self.root.n) / c.n))\n for c in init_node.children\n ]\n return init_node.children[np.argmax(choices_weights)]\n\n def bestChoice(self,init_node, c_param=2):\n i = 0\n for child in init_node.children:\n if child.n == 0:\n #print(\"Index of q=0 is \", i)\n return child\n i = i + 1\n \n choices_weights = [\n c.q + c_param * np.sqrt((np.log(self.root.n) / c.n))\n for c in init_node.children\n ]\n return np.argmax(choices_weights)\n \n\n def backpropagation(self,result):\n while True:\n self.currentNode.n = self.currentNode.n + 1\n self.currentNode.q = self.currentNode.q + result\n #print(\"Result: \",self.currentNode.q)\n if self.currentNode.parent is None:\n break\n self.currentNode = self.currentNode.parent\n\n def evaluation(self, state):\n\t return util.ratio_points(state, self.player) \n\n def printTree(self,root = Node()):\n i = 0\n for child in root.children:\n print(\" \",i, end=\" \")\n i = i + 1\n print()\n for child in root.children:\n print(\" o N= \", child.n, end=\" \")\n print()\n for child in root.children: \n print(\" o Q= \", child.q, end=\" \")\n print()\n\n \n"
}
] | 5 |
xiaxiaowei/SPA | https://github.com/xiaxiaowei/SPA | ea1655c62231320d54bfefc404785219e085eed3 | a8724066569b1f8c6c117224a7f5bafc8e4898c7 | 8f37ca391d3659e2521ef0a118959119145c19b1 | refs/heads/master | 2021-01-10T01:19:33.690107 | 2015-12-14T12:00:45 | 2015-12-14T12:00:45 | 47,012,599 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5375494360923767,
"alphanum_fraction": 0.5415019989013672,
"avg_line_length": 27.11111068725586,
"blob_id": "d3ada6b8eba7d499f3a24584fa428d408a391027",
"content_id": "785c0a2e0a23ac091fcfd7d75587dc2062d3e7c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1012,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 36,
"path": "/static/scripts/app.js",
"repo_name": "xiaxiaowei/SPA",
"src_encoding": "UTF-8",
"text": "requirejs.config({\n //By default load any module IDs from scripts/lib\n baseUrl: 'scripts/lib',\n paths: {\n models: '../models',\n collections: '../collections',\n views: '../views',\n routers: '../routers',\n components: '../components'\n }, \n shim: {\n 'backbone': {\n deps: ['underscore', 'jquery'],\n exports: 'Backbone'\n },\n 'underscore': {\n exports: '_'\n }\n }\n});\n\nvar app = app || {};\n// Just for test begin\napp.selectedCountry = '';\napp.selectedOperator = '';\napp.countries = ['Austris', 'Belgium', 'Finland', 'Germany', 'Ireland', 'Cyprus', 'Denmark', 'France', 'Greece', 'China', 'Japan', 'US'];\napp.operators = ['TMobile', 'Orange', 'ChinaMobile', 'Unicom','X1','X2','X3','X4'];\n\n// Just for test end.\n\nrequire(['routers/router', 'components/dataService'], function (router, dataService) { \n $(document).ready(function (){\n dataService.getInitData();\n router.start();\n });\n});\n"
},
{
"alpha_fraction": 0.4259313642978668,
"alphanum_fraction": 0.43355822563171387,
"avg_line_length": 38.41618347167969,
"blob_id": "3fc6898d63648938664d556a1ce2e04777acddd2",
"content_id": "ecbad834579b6c8ddb36d11762d7ef0c36c0ecda",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 6960,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 173,
"path": "/static/scripts/views/mobileSelectCountryView.js",
"repo_name": "xiaxiaowei/SPA",
"src_encoding": "UTF-8",
"text": "define(['jquery', 'underscore', 'backbone', \"routers/router\"], function ($, _, Backbone, Router) {\n var mobileSelectCountryView = Backbone.View.extend({\n COLUMN_SIZE: 4,\n ROW_SIZE: 2,\n LOC_UP: 0,\n LOC_RIGHT: 1,\n LOC_DOWN: 2,\n LOC_LEFT: 3,\n tagName: 'div',\n template:_.template($('#mobile-select-country-template').html()),\n initialize: function() {\n this.countries = app.countries;\n this.selected = 0;\n \n // 显示\n this.rangeStart = 0;\n this.rangeEnd = this.COLUMN_SIZE - 1;\n },\n events: {\n \"click #next\": \"next\"\n },\n render: function() {\n // \n this.buildDisplayData();\n \n this.$el.empty();\n var jsonObject = {\"countries\":this.displayCountries};\n this.$el.html(this.template(jsonObject)); //模板使用JSON对象\n this.$el.find(\"div[country='\"+this.selectedCountry()+\"']\").css('background','gray'); \n // 隐藏区域外的国家\n for (var i=0; i<this.hideCountries.length; i++) {\n var country = this.hideCountries[i];\n this.$el.find(\"div[country='\"+country+\"']\").hide();\n }\n // 左右箭头指示\n if (this.rangeStart <= 0){\n this.$el.find(\".country-left\").css('background','url()');\n }\n if (this.rangeEnd >= this.totalColumns - 1){\n this.$el.find(\".country-right\").css('background','url()');\n }\n // 监听键盘事件\n self = this;\n $(document).one(\"keypress\", function(event){ // 绑定一次\n if (event.keyCode==40) { //Down\n self.changeSelected(self.LOC_DOWN);\n self.render();\n }\n else if (event.keyCode==39) {\n self.changeSelected(self.LOC_RIGHT);\n self.render();\n }\n else if (event.keyCode==38) { // Up\n self.changeSelected(self.LOC_UP);\n self.render();\n }\n else if (event.keyCode==37) {\n self.changeSelected(self.LOC_LEFT);\n self.render();\n }\n // 下40 上38\n else if (event.keyCode==13) {\n var country = self.countries[self.selected];\n app.selectedCountry = country;\n Backbone.history.history.back(); //返回\n }\n return false;\n });\n return this;\n },\n next: function(event) {\n var country = event.currentTarget.getAttribute('country');\n app.selectedCountry = country;\n Backbone.history.history.back(); //返回\n },\n selectedCountry: function() {\n var country = this.countries[this.selected];\n return country;\n },\n buildDisplayData: function() {\n // 计算行、列数\n var totalColumns = Math.ceil(this.countries.length / this.ROW_SIZE);\n if (totalColumns < this.COLUMN_SIZE) {\n totalColumns = this.COLUMN_SIZE;\n }\n var totalRows = Math.ceil(this.countries.length / totalColumns);\n \n // 展示的国家规则行、列\n var displayCountries = []; // 数组嵌套数组;\n var hideCountries = [];\n var index = 0;\n for (var row=0; row<totalRows; row++) {\n var currentRowColumns = [];\n for (var column=0; column<totalColumns; column++) {\n if (index < this.countries.length) {\n var country = this.countries[index];\n currentRowColumns.push(country);\n index ++;\n // 不在展示区域则隐藏\n if (column < this.rangeStart || column > this.rangeEnd) {\n hideCountries.push(country);\n }\n }\n else {\n break;\n }\n }\n displayCountries.push(currentRowColumns);\n }\n \n // 数据 \n this.totalColumns = totalColumns;\n this.totalRows = totalRows;\n this.displayCountries = displayCountries;\n this.hideCountries = hideCountries;\n },\n changeSelected: function(location) {\n switch(location) {\n case this.LOC_UP: {\n var currentRow = Math.ceil((this.selected + 1) / this.totalColumns) - 1;\n if (currentRow > 0) {\n this.selected = this.selected - this.totalColumns;\n }\n break;\n }\n case this.LOC_RIGHT: {\n var currentColumn = this.selected % this.totalColumns;\n if (this.selected < this.countries.length - 1 && currentColumn < this.totalColumns - 1) {\n if (currentColumn == this.rangeEnd) {\n this.changeRange(1);\n }\n this.selected = this.selected + 1;\n }\n break;\n }\n case this.LOC_DOWN: {\n var currentRow = Math.ceil((this.selected + 1) / this.totalColumns) - 1;\n if (currentRow < this.totalRows - 1) {\n if (this.selected + this.totalColumns <= this.countries.length - 1) {\n this.selected = this.selected + this.totalColumns;\n }\n }\n break;\n }\n case this.LOC_LEFT: {\n var currentColumn = this.selected % this.totalColumns;\n if (this.selected > 0 && currentColumn > 0) {\n if (currentColumn == this.rangeStart) {\n this.changeRange(-1);\n }\n this.selected = this.selected - 1;\n }\n break;\n }\n }\n },\n changeRange: function(offset) {\n if (offset > 0){\n if (this.rangeEnd < this.totalColumns - 1) {\n this.rangeStart = this.rangeStart + 1;\n this.rangeEnd = this.rangeEnd + 1;\n }\n }\n else if (offset < 0){\n if (this.rangeStart > 0) {\n this.rangeStart = this.rangeStart - 1;\n this.rangeEnd = this.rangeEnd - 1;\n }\n }\n }\n });\n return mobileSelectCountryView;\n});"
},
{
"alpha_fraction": 0.4779771566390991,
"alphanum_fraction": 0.4779771566390991,
"avg_line_length": 26.909090042114258,
"blob_id": "192600863885679851f607eb05564481515436c0",
"content_id": "61ec0c0e0d84ad21aeb9939cb707cd684b08c0af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 621,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 22,
"path": "/static/scripts/views/alertView.js",
"repo_name": "xiaxiaowei/SPA",
"src_encoding": "UTF-8",
"text": "define(['jquery', 'underscore', 'backbone', \"routers/router\"], function ($, _, Backbone, Router) {\n var alertView = Backbone.View.extend({\n tagName: 'div',\n template:_.template($('#alert-template').html()),\n /* 参数定义\n options: {\n message: \"\",\n icon: \"\",\n buttons:[]\n },\n */ \n initialize: function(options) {\n\t\t\tthis.options = options;\n },\n render: function() {\n this.$el.empty();\n this.$el.html(this.template(this.options));\n return this;\n }\n });\n return alertView;\n});"
},
{
"alpha_fraction": 0.46424242854118347,
"alphanum_fraction": 0.46464645862579346,
"avg_line_length": 37.092308044433594,
"blob_id": "cfe0586debd3e3bcb80fc3ab1222a0f25a5abded",
"content_id": "ea9ebeaa7f820e49f690d0d3ea4e2cdae19e4ce1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2513,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 65,
"path": "/static/scripts/components/appController.js",
"repo_name": "xiaxiaowei/SPA",
"src_encoding": "UTF-8",
"text": "define(['jquery', 'underscore', 'backbone'], function ($, _, Backbone) {\n var AppController = {\n currentView: null,\n home: function() {\n var self = this;\n require(['views/homeView'], function(HomeView) {\n var view = new HomeView();\n self.renderView.call(self, view);\n });\n },\n countries: function () {\n var self = this;\n require(['views/mobileSelectCountryView'], function(MobileSelectCountryView) {\n var view = new MobileSelectCountryView();\n self.renderView.call(self, view);\n });\n },\n mobile: function ()\n {\n var self = this;\n require(['views/mobileView'], function(MobileView) {\n var view = new MobileView();\n self.renderView.call(self, view);\n });\n },\n channel: function(channelId) {\n console.log(channelId);\n var self = this;\n if (channelId == 'Mobile Carrier Billing') {\n if (app.selectedCountry.length <= 0) // 如果没有国家信息,选择国家\n {\n require(['routers/router'], function(Router) {\n Router.navigate('countries', {trigger: true});\n });\n }\n else\n {\n require(['views/mobileSelectOperatorView'], function(MobileSelectOperatorView) {\n var view = new MobileSelectOperatorView();\n self.renderView.call(self, view);\n });\n }\n }\n else if (channelId == 'Credit Cards') {\n require(['views/creditView'], function(CreditView) {\n var view = new CreditView();\n self.renderView.call(self, view);\n });\n }\n else if (channelId == 'Broadband Billing') {\n require(['views/broadbandView'], function(BroadbandView) {\n var view = new BroadbandView();\n self.renderView.call(self, view);\n });\n }\n },\n renderView: function(view) {\n this.currentView && this.currentView.remove();\n $(document).unbind(\"keypress\"); // 取消键盘监听\n $('#main').html(view.render().el);\n this.currentView = view;\n }\n }\n return AppController;\n});"
},
{
"alpha_fraction": 0.5214446783065796,
"alphanum_fraction": 0.5214446783065796,
"avg_line_length": 28.600000381469727,
"blob_id": "c754b7287b0adb8df405cd89f8814c00af22723c",
"content_id": "d7b44fb356278b1e359f285345b32f30e75d544d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 443,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 15,
"path": "/static/scripts/views/creditView.js",
"repo_name": "xiaxiaowei/SPA",
"src_encoding": "UTF-8",
"text": "define(['jquery', 'underscore', 'backbone', \"routers/router\"], function ($, _, Backbone, Router) {\n var creditView = Backbone.View.extend({\n \ttagName: 'div',\n template:_.template($('#credit-template').html()),\n initialize: function() {\n\t\t\t\n },\n render: function() {\n this.$el.empty();\n this.$el.html(this.template());\n return this;\n }\n });\n return creditView;\n});"
},
{
"alpha_fraction": 0.5114504098892212,
"alphanum_fraction": 0.5139949321746826,
"avg_line_length": 25.266666412353516,
"blob_id": "2f6b6e3939ba1a44c5dafac902e30e0e39785b47",
"content_id": "1c625240b0466d8d583ac1091d8a5240fcf51959",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 393,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 15,
"path": "/static/scripts/models/channelsInfo.js",
"repo_name": "xiaxiaowei/SPA",
"src_encoding": "UTF-8",
"text": "define(['backbone'], function (Backbone) {\n var channelsInfo = Backbone.Model.extend({\n defaults: {\n channels: [],\n selectedChannel: null,\n description: ''\n },\n validate: function(attrs, options) {\n if (attrs.channels.length == 0) {\n return \"You must add a channel!\";\n\t\t\t}\n\t\t}\n\t});\n return channelsInfo;\n});"
},
{
"alpha_fraction": 0.5078909397125244,
"alphanum_fraction": 0.5078909397125244,
"avg_line_length": 32.238094329833984,
"blob_id": "66a7501bae649ba17ef36a47ca8d988210381a66",
"content_id": "28cd2499f8085db655c6c2121b6a7553e62c76ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 697,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 21,
"path": "/static/scripts/views/broadbandView.js",
"repo_name": "xiaxiaowei/SPA",
"src_encoding": "UTF-8",
"text": "define(['jquery', 'underscore', 'backbone', \"routers/router\", \"components/utils\"], function ($, _, Backbone, Router, Utils) {\n var broadbandView = Backbone.View.extend({\n tagName: 'div',\n template:_.template($('#broadband-template').html()),\n initialize: function() {\n \n },\n events: {\n \"click #pay\": \"pay\"\n },\n pay: function (params) {\n Utils.showAlert(\"Payment Successfully!\", \"images/pay_success.png\", [\"OK\",\"Cancel\"]);\n },\n render: function() {\n this.$el.empty();\n this.$el.html(this.template());\n return this;\n }\n });\n return broadbandView;\n});"
},
{
"alpha_fraction": 0.48296836018562317,
"alphanum_fraction": 0.48296836018562317,
"avg_line_length": 36.40909194946289,
"blob_id": "1a1ca61bbb21d1622024ed3c130791625cb39b3b",
"content_id": "c0ab133ef1c66575152e0d90d8259f03ee11b3aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 892,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 22,
"path": "/static/scripts/routers/router.js",
"repo_name": "xiaxiaowei/SPA",
"src_encoding": "UTF-8",
"text": "define(['jquery', 'underscore', 'backbone', 'components/appController'], function ($, _, Backbone,\nAppController) {\n var router = Backbone.Router.extend({\n routes: {\n '': 'home', // 选择支付渠道页面\n 'channel/:channelId':'channel', // 各具体渠道首页\n 'countries' : 'countries', // 进入手机支付->国家选择页面\n 'mobile' : 'mobile' // 手机支付扫码页面 \n },\n initialize: function() {\n var routeName;\n for (var route in this.routes) {\n routeName = this.routes[route];\n this.route(route, routeName, $.proxy(AppController[routeName], AppController));\n } \n },\n start: function () {\n Backbone.history.start();\n } \n });\n return new router();\n});"
},
{
"alpha_fraction": 0.5326278805732727,
"alphanum_fraction": 0.5326278805732727,
"avg_line_length": 36.83333206176758,
"blob_id": "6f67b61df2d7481d400ee699255dc1bda9118b38",
"content_id": "0175f291e284350907d41d952d101935e421f481",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1154,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 30,
"path": "/static/scripts/views/homeView.js",
"repo_name": "xiaxiaowei/SPA",
"src_encoding": "UTF-8",
"text": "define(['jquery', 'underscore', 'backbone', \"routers/router\", \"models/channelsInfo\"],\n function ($, _, Backbone, Router, ChannelsInfo) {\n var homeView = Backbone.View.extend({\n tagName: 'div',\n template:_.template($('#home-template').html()),\n initialize: function() {\n this.channelsInfo = app.channelsInfo;\n },\n events: {\n \"click #next\": \"next\"\n },\n render: function() {\n this.$el.empty();\n var info = this.channelsInfo.toJSON(); // toJSON生成json对象,不是字符串\n this.$el.html(this.template(info));\n \n // this.$el.append(\"HomeView\"+this.channelsInfo.get('selectedChannel'));\n // this.$el.append(this.addCreateAgentButton());\n // this.collection.each(function(item) {\n // this.addOne(item);\n // }, this);\n return this;\n },\n next: function(event) {\n var channelId = event.currentTarget.getAttribute('channelId');\n Router.navigate('channel/'+channelId, {trigger: true});\n }\n });\n return homeView;\n});"
},
{
"alpha_fraction": 0.6446991562843323,
"alphanum_fraction": 0.6446991562843323,
"avg_line_length": 34,
"blob_id": "12760c34c38dfaf86d8fca52cdac590e0b861685",
"content_id": "18ae8952f1ac9497613472e383b1e2cefbca312b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 349,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 10,
"path": "/static/scripts/components/utils.js",
"repo_name": "xiaxiaowei/SPA",
"src_encoding": "UTF-8",
"text": "define(['jquery', 'underscore', 'backbone', 'views/alertView'], function ($, _, Backbone, AlertView) {\n\tvar utils = {\n\t\tshowAlert: function (message, icon, buttons) {\n\t\t\tvar options = {\"message\":message, \"icon\":icon, \"buttons\":buttons};\n\t\t\tvar alertView = new AlertView(options);\n\t\t\t$('#main').html(alertView.render().el);\n\t\t}\n\t};\n\treturn utils;\n});"
},
{
"alpha_fraction": 0.6639785170555115,
"alphanum_fraction": 0.6639785170555115,
"avg_line_length": 27.69230842590332,
"blob_id": "7203a93096b60fa44e58102de2be17103d1c751f",
"content_id": "53fd2ef8582c8181565a92f9b76712efdfadcdb0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 372,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 13,
"path": "/static/scripts/components/dataService.js",
"repo_name": "xiaxiaowei/SPA",
"src_encoding": "UTF-8",
"text": "define(['jquery', 'models/channelsInfo'], function ($, ChannelsInfo) {\n\t\n\tvar DataService = {\n\t\tgetInitData: function () {\n\t\t\tvar channelsInfo = new ChannelsInfo({\n\t\t\t\tchannels: ['Mobile Carrier Billing','Broadband Billing','Credit Cards'],\n \tselectedChannel: 'Mobile Carrier Billing',\n\t\t\t});\n\t\t\tapp.channelsInfo = channelsInfo;\n\t\t}\n\t};\n\treturn DataService;\n});"
},
{
"alpha_fraction": 0.5054704546928406,
"alphanum_fraction": 0.5054704546928406,
"avg_line_length": 29.53333282470703,
"blob_id": "9681c6774b2c5b49b4d785dd7037020d821e5710",
"content_id": "f76d646bef37b21a2f0814072aa44869a4e0f5ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 457,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 15,
"path": "/static/scripts/views/mobileView.js",
"repo_name": "xiaxiaowei/SPA",
"src_encoding": "UTF-8",
"text": "define(['jquery', 'underscore', 'backbone', \"routers/router\"], function ($, _, Backbone, Router) {\n var mobileView = Backbone.View.extend({\n tagName: 'div',\n template:_.template($('#mobile-template').html()),\n initialize: function() {\n \n },\n render: function() {\n this.$el.empty();\n this.$el.html(this.template());\n return this;\n }\n });\n return mobileView;\n});"
},
{
"alpha_fraction": 0.5126811861991882,
"alphanum_fraction": 0.5181159377098083,
"avg_line_length": 19.44444465637207,
"blob_id": "2a9a510c6d23cdb589a0931fae1a81d96f39160e",
"content_id": "f24302205ddecce695be5a544fdacabb364ce09b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 552,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 27,
"path": "/server.py",
"repo_name": "xiaxiaowei/SPA",
"src_encoding": "UTF-8",
"text": "import web;\nimport json;\n\nurls = (\"/\", \"home\",\n \"/hello\", \"hello\",\n \"/ACS/vas/getPayChannelList\", \"getPayChannelList\",\n );\napp = web.application(urls, globals());\n\nclass home:\n def GET(self):\n raise web.redirect('/static/');\n\nclass hello:\n def GET(self):\n return 'Hello, world!';\n\nclass getPayChannelList:\n def GET(self):\n channels = ['1','2'];\n resp = {'result':0,\n 'channels':channels,\n };\n return json.dumps(resp);\n\nif __name__ == \"__main__\":\n app.run();\n"
}
] | 13 |
aggarwal-manisha/Python | https://github.com/aggarwal-manisha/Python | d0338db557d9d4986dcfac8cd743ca6789d1c9b6 | 2cb25a1e3dc5d909e27fda2f3a4f9a2440c616f7 | 7faa1da9cab2cc5f761975b9e114782d393b7931 | refs/heads/master | 2022-07-17T09:29:56.595511 | 2020-05-21T20:28:22 | 2020-05-21T20:28:22 | 265,014,536 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.33986929059028625,
"alphanum_fraction": 0.35947713255882263,
"avg_line_length": 20.714284896850586,
"blob_id": "879ef6deed8810640484d993d49c870e280cd7dd",
"content_id": "cea3f62fde0a4118fca6922a1716ab0579ccb58f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 153,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 7,
"path": "/prime_numbers.py",
"repo_name": "aggarwal-manisha/Python",
"src_encoding": "UTF-8",
"text": "def prime(n):\n for i in range(3, n):\n for j in range(2, i):\n if i%j == 0:\n break\n else:\n print(i)\n "
},
{
"alpha_fraction": 0.6224256157875061,
"alphanum_fraction": 0.6681922078132629,
"avg_line_length": 18.04347801208496,
"blob_id": "42dcbbb66c0013156e8afc0df6f0b8d7e00350eb",
"content_id": "adff7cad4019716cb51429ce872d8e217af3e4f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 437,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 23,
"path": "/tuple.py",
"repo_name": "aggarwal-manisha/Python",
"src_encoding": "UTF-8",
"text": "\"\"\"\n--> tuples are immutable(they cannot be changed)\n--> iteration is faster in tuple than list\n\"\"\"\n\ntup = (10, 20, 30, 10)\n\nprint(tup[1]) #-->result 20\n\ntup[1] = 33 # throws error as tuples r immutable\n\n#counts tell the number of occurance of that paricular element\ntup.count(10) #result --> 2\n\n# returns the index of that particular element\ntup.index(20) #result --> 1\n\n\nn = input()\nl = map(int, n.split())\nt = tuple(l)\n\nprint(hash(t))"
},
{
"alpha_fraction": 0.6457356810569763,
"alphanum_fraction": 0.6551077961921692,
"avg_line_length": 15.904762268066406,
"blob_id": "eb205766bb8011a8c693292860ba38b8a126f307",
"content_id": "87cd6d330e6bbbd0c39a68ed3d8cf38234ca57d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1067,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 63,
"path": "/array.py",
"repo_name": "aggarwal-manisha/Python",
"src_encoding": "UTF-8",
"text": "'''\ni --> integer\nl --> long\nf --> float\nd --> double\nb --> char\n'''\n\nfrom array import *\n\nval = array('i', [])\n\nn = int(input(\"Enter the length of the array\"))\n\nfor i in range(n):\n val = int(input(\"Enter the next value\"))\n val.append(val)\n\nprint(val)\n\nval_to_search = int(input(\"Enter the value to search\"))\n\nprint(val.index(val_to_search))\n\n#print the array\nfor obj in val:\n print(obj, end='')\nprint('\\r')\n\n#to find the memory location and size of an array\nprint(val.buffer_info())\n\n#insert value in the end of an array\nval.append(4)\nfor obj in val:\n print(obj, end='')\nprint('\\r')\n\n# insert a value 78 at index 3\nval.insert(2, 5)\nfor obj in val:\n print(obj, end='')\nprint('\\r')\n\n# pop removes the value at the mentioned index\nval.pop(2)\nfor obj in val:\n print(obj, end='')\nprint('\\r')\n\n#remove ,removes the 1st occurance of that element\nval.remove(3)\nfor obj in val:\n print(obj, end='')\nprint('\\r')\n\n\n#index function returns the index of the first occurance element\nval.index(3)\n\n#reverse function reverse the array\nval.reverse()\nprint(val)\n\n\n"
},
{
"alpha_fraction": 0.5839720964431763,
"alphanum_fraction": 0.5965156555175781,
"avg_line_length": 15.505746841430664,
"blob_id": "03773c0e579a7333f6d563a2527723a0df1dcbdd",
"content_id": "b40535b84fd42034098b507a225cc608e8f1b1bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1435,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 87,
"path": "/operators.py",
"repo_name": "aggarwal-manisha/Python",
"src_encoding": "UTF-8",
"text": "import math\n\na = input()\nb = input()\n\n\nclass operators():\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def addition(self):\n result = self.x + self.y\n return result\n\n def subtraction(self):\n result = self.x - self.y\n return result\n\n def multiplication(self):\n result = self.x * self.y\n return result\n\n def division(self):\n #here result will be float\n result = self.x / self.y\n return result\n\n def floor_division(self):\n #also called ad integer division and resturn int value\n result = self.x // self.y\n return result\n\n def remainder(self):\n result = self.x % self.y\n return result\n\n def exponent(self):\n result = self.x ** self.y\n return result\n\n\nprint(math.floor(2.9)) #result --> 2\n\nprint(math.ceil(2.1)) #result -->3\n\nprint(math.pow(3,2))\n#Relational operators\n\nprint(a > b) #greater then\n\nprint(a < b) #less then\n\nprint(a == b) # equal to\n\nprint(a >= b) # greater than equal to\n\nprint(a <= b) #less than equal to\n\nprint(a != b) # not equal to\n\n\n#logical operators\n\nprint(a>8 and b<8) #true when both the conditions r true\n\nprint(a>8 or b<8) #true when any of the conditions is true\n\na = True\n\nprint(not a) #revere the result\n\n\n#binary operators\n\n#:--> decimal to binary\n\nprint(bin(25))\n\n#:--> decimal to octal\n\nprint(oct(25))\n\n#:--> decimal to hexadecimal\n\nprint(hex(25))"
},
{
"alpha_fraction": 0.4964539110660553,
"alphanum_fraction": 0.5035461187362671,
"avg_line_length": 13.100000381469727,
"blob_id": "6606637053655b6d62b6d479782ded9c534cb9c7",
"content_id": "d1c3578941dd1ca18182cbaa143f2d0b33aa5450",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 141,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 10,
"path": "/second_largest.py",
"repo_name": "aggarwal-manisha/Python",
"src_encoding": "UTF-8",
"text": "\nli = map(int, input().split(','))\nlis = list(li)\n\nlis.sort()\nlis.reverse()\n\nfor i in lis:\n if i != lis[0]:\n print(i)\n break"
},
{
"alpha_fraction": 0.3175675570964813,
"alphanum_fraction": 0.3445945978164673,
"avg_line_length": 14.526315689086914,
"blob_id": "e4af34be878c5ce4c99dff8bbc9eafa8d46e3f36",
"content_id": "3314423d1f35593b89a5b861eda8de9d4bb2e417",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 296,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 19,
"path": "/fibonacci.py",
"repo_name": "aggarwal-manisha/Python",
"src_encoding": "UTF-8",
"text": "\n\ndef fibonacci(n):\n a = 0\n b = 1\n if n == 1:\n lis = [a]\n elif n == 2:\n lis = [a, b]\n else:\n lis = [0, 1]\n for i in range(n):\n c = a+b\n a = b\n b = c\n lis.append(c)\n\n print(lis)\n return lis\n\nfibonacci(10)"
},
{
"alpha_fraction": 0.6596026420593262,
"alphanum_fraction": 0.6675496697425842,
"avg_line_length": 19.2702693939209,
"blob_id": "a77e00cad5d6794ba5556de82b92a1779f4dba02",
"content_id": "9cbfa471c91f21f304612130464935aeed6551f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 755,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 37,
"path": "/dictionary.py",
"repo_name": "aggarwal-manisha/Python",
"src_encoding": "UTF-8",
"text": "'''\n--> these are set of key value pairs\n--> key should be unique and immutable\n\n'''\n\n\ndata = {\"manisha\": \"engineer\", \"uma\": \"lawyer\", \"preeti\": \"CA\"}\n\nprint(data[\"manisha\"]) # result-->\"enginer\n\n#it will return error if key is not present in the dictionary\n\ndata.get('xyz') #result --> None\n\n#it will return None if key is not present in the dict\n\ndata.get('xyz', 'Not found')\n\n# it will return the msg Not found if key is not present in the dict\n\n\n#creating a dictionary using two tuples\n\ntup1 = ('manisha', 'preeti', 'uma')\n\ntup2 = ('python', 'java', 'javascript')\n\ndata = dict(zip(tup1, tup2)) #both tup1 and tup2 should be of same size\n\n#adding more values to the dict\n\ndata['monika'] = 'c++'\n\n#deleting the value from dict\n\ndel data['manisha']\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.66576087474823,
"alphanum_fraction": 0.695652186870575,
"avg_line_length": 25.214284896850586,
"blob_id": "939536afcecd96eb42a78d8fd0614af6efa4347e",
"content_id": "8f6b76b90a65867f9595a605f41aaf9ba11d9e6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 368,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 14,
"path": "/sets.py",
"repo_name": "aggarwal-manisha/Python",
"src_encoding": "UTF-8",
"text": "'''\n---> collection of unique set of elements\n---> it is not stored in a particular sequence\n---> set uses concept of hash and using hash we improve thr performance , we want to fetch the element as fast as possible hence no sequencing\n---> indexing is not supported bcoz of no sequence\n'''\n\nsets = {1, 2, 3, 4}\n\nsets.update({1, 5, 6})\n\nsets.add(10)\n\nsets.remove(10)\n\n"
},
{
"alpha_fraction": 0.33176469802856445,
"alphanum_fraction": 0.3764705955982208,
"avg_line_length": 14.666666984558105,
"blob_id": "8bf942009cacad758851721c37ab0b60f30d07af",
"content_id": "76c4d80757c4dd3bbf6d5f338ec368b5f8499a9a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 425,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 27,
"path": "/program1.py",
"repo_name": "aggarwal-manisha/Python",
"src_encoding": "UTF-8",
"text": "'''\nPrint all the values from 1 to 100 which are not divisible by 3 or 5.\n\noutput should be in following format\n\n# # # #\n# # # #\n# # # #\n'''\n\ndef pattern():\n\n x = 1\n y = 1\n while x <= 100:\n if x % 3 != 0 or x % 5 != 0:\n pass\n else:\n if y % 5 == 0:\n print('\\n')\n else:\n print(x, ' ', end='')\n y += 1\n\n x += 1\n\npattern()\n\n\n"
},
{
"alpha_fraction": 0.5731707215309143,
"alphanum_fraction": 0.5731707215309143,
"avg_line_length": 7.199999809265137,
"blob_id": "b6590a4ac259a87df1b9bfb9aec76de851a62cfe",
"content_id": "b49a8b94d4aa1903080a7838869fc29554099ab0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 82,
"license_type": "no_license",
"max_line_length": 12,
"num_lines": 10,
"path": "/numpy.py",
"repo_name": "aggarwal-manisha/Python",
"src_encoding": "UTF-8",
"text": "import numpy\n\n\n\n# array()\n# linspace()\n# logspace()\n# arange()\n# zeros()\n# ones()\n"
},
{
"alpha_fraction": 0.6026393175125122,
"alphanum_fraction": 0.662756621837616,
"avg_line_length": 25.25,
"blob_id": "7493caa1e0b8707bbf42bb11ec6f8875d5c65ab7",
"content_id": "2a6fdb159f2e4a5366ae68a4d927cb92bcce3b63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1364,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 52,
"path": "/lists.py",
"repo_name": "aggarwal-manisha/Python",
"src_encoding": "UTF-8",
"text": "'''docs:-\n ---> lists are mutable\n ---> collection of same or different elements\n''' \n\n\nnums = [10, 20, 30, 40, 50]\n\nprint(nums[0])\n\nprint(nums[2:]) #result will be 2 element till end \n\nprint(nums[-1]) #print last elemnt \n\nnames = ['manisha', 'uma', 'preeti']\n\nvalues = [1, 10.5, \"manisha\"] # we can have list having different types\n\nnums.append(78) # result --> [10, 20, 30, 40, 50, 78]\n# --> append will insert in the end of the list\n\nnums.insert(2, 100) # result --> [10, 20, 100, 30, 40, 50, 78]\n#--> insert will insert at the given index value\n\nnums.remove(10) # result --> [20, 100, 30, 40, 50, 78]\n# -->remove will remove the first occurance of that elemnt from the left\n# --> remove will throw an error if that element is not present in the list\n\nnums.pop(1) #result -->[20, 30, 40, 50, 78]\n# --> pop removes elemnt at that particular index and gives error if greater index is given\n# --> if no index value is given then it will remove the last element\n\ndel nums[4]\n\ndel nums[1:]\n#--> in del list indices must be integers or slices\n\nnums.extend([1,2,3])\n# --> it will extend the nums list with metioned values\n\nprint(min(nums))\n# --> minimum value in the list\n\nprint(max(nums))\n# --> maximum value in the list\n\nprint(sum(nums))\n# --> do the sum of all the values of the list\n\n\nprint(nums.sort())\n# --> will sort the list (by default in ascending order)"
},
{
"alpha_fraction": 0.6103895902633667,
"alphanum_fraction": 0.6363636255264282,
"avg_line_length": 16.22222137451172,
"blob_id": "becb1ddba1dc1d4e80c239f0fb7a5b1629b30e83",
"content_id": "fc196eda6e7af5ac2e47e13cfcaf046a28184230",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 154,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 9,
"path": "/factorial.py",
"repo_name": "aggarwal-manisha/Python",
"src_encoding": "UTF-8",
"text": "#recursion is calling a function by itself\n\ndef factorial(num):\n if num == 0:\n return 1\n num *= factorial(num-1)\n return num\n\nfactorial(5)"
},
{
"alpha_fraction": 0.7128027677536011,
"alphanum_fraction": 0.7231833934783936,
"avg_line_length": 31,
"blob_id": "56b6b45d8c2d9bb41a26c8843c49f27aacc5ed58",
"content_id": "ac557eeaccc1f1a665739dacefb30e4bd92f452d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 289,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 9,
"path": "/readme.md",
"repo_name": "aggarwal-manisha/Python",
"src_encoding": "UTF-8",
"text": "#create a virtualenv with python version 3.6\nvirtualenv -p python3 env\n\n#docs\n\n--> If you want help on any topic then go in python command prompt type 'help()'\n--> Then write 'topics'\n--> then mention which ever thing you want to know about \n--> to come to to command prompt write 'quit'\n\n"
},
{
"alpha_fraction": 0.5748820900917053,
"alphanum_fraction": 0.5772405862808228,
"avg_line_length": 25.515625,
"blob_id": "ede77554bfbe9c16ad10ec62fc205e20e00e888e",
"content_id": "dd09869493d4b734fe392d1c7641316c1285f38c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1696,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 64,
"path": "/bitwise.py",
"repo_name": "aggarwal-manisha/Python",
"src_encoding": "UTF-8",
"text": "'''\npython bitwise operators works only on integers\nThe integers are first converted into binary and then operations are performed on bit by bit,\n hence the name bitwise operators. Then the result is returned in decimal format.\n\n'''\n\n\n\nclass Bitwise():\n def __init__(self, value):\n self.value = value\n\n def __and__(self, obj):\n print(\"And operator overloaded\")\n if isinstance(obj, Bitwise):\n return self.value & obj.value\n else:\n raise ValueError(\"Must be a object of class\")\n\n def __or__(self, obj):\n print(\"Or operator overloaded\")\n if isinstance(obj, Bitwise):\n return self.value | obj.value\n else:\n raise ValueError(\"Must be a object of class\")\n\n def __xor__(self, obj):\n print(\"Xor operator overloaded\")\n if isinstance(obj, Bitwise):\n return self.value ^ obj.value\n else:\n raise ValueError(\"Must be a object of class\")\n\n def __lshift__(self, obj):\n print(\"lshift operator overloaded\")\n if isinstance(obj, Bitwise):\n return self.value << obj.value\n else:\n raise ValueError(\"Must be a object of class\")\n\n def __rshift__(self, obj):\n print(\"rshift operator overloaded\")\n if isinstance(obj, Bitwise):\n return self.value & obj.value\n else:\n raise ValueError(\"Must be a object of class\")\n\n def __invert__(self):\n print(\"Invert operator overloaded\")\n return ~self.value\n\n\n\n\nif __name__ == \"__main__\":\n a = Bitwise(10)\n b = Bitwise(12)\n print(a & b)\n print(a | b)\n print(a ^ b)\n print(a << b)\n print(a >> b)\n print(~a)"
},
{
"alpha_fraction": 0.43501806259155273,
"alphanum_fraction": 0.46570396423339844,
"avg_line_length": 14.38888931274414,
"blob_id": "f3ace8d424b3356b4f3549fd6d44994b3a681950",
"content_id": "5733963904ef2b5d3ad6cb092fb3430bed1adc57",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 554,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 36,
"path": "/global_local_variable.py",
"repo_name": "aggarwal-manisha/Python",
"src_encoding": "UTF-8",
"text": "\na = 10\nb = 9\nprint(id(a))\n\ndef variable():\n a = 15 #local var\n print(id(a))\n globals()['a']=19\n # x = globals()['a'] #global var\n y = globals()['b']\n print(id(globals()['a']))\n\nvariable()\nprint(id(a))\nprint(a)\n\n\n\n\n#count the odd and even numbers in a list\n\ndef lis(lis):\n even = 0\n odd = 0\n for obj in lis:\n if obj % 2 ==0:\n even+=1\n else:\n odd +=1\n\n return even, odd\n\ndata = [1,2,3,5]\neven, odd = lis(data)\n\nprint(\"Even: {}, Odd: {}\".format(even, odd))"
}
] | 15 |
bufferapp/cricket | https://github.com/bufferapp/cricket | 93edfbdeaab8362c7d250595e483bcbee04b1776 | 397cc0851ad66f9e763dded280709704d028ee61 | d57ee57ecdaea3f0a2bf92255f44f32730724007 | refs/heads/main | 2023-08-24T05:32:02.788266 | 2021-10-16T07:13:06 | 2021-10-16T07:13:06 | 310,290,823 | 2 | 1 | MIT | 2020-11-05T12:24:28 | 2021-07-08T08:16:26 | 2021-10-01T11:52:54 | Python | [
{
"alpha_fraction": 0.68113774061203,
"alphanum_fraction": 0.7080838084220886,
"avg_line_length": 29.363636016845703,
"blob_id": "04dbeb91757d7eefce08b1a060ed03afb54eeaf8",
"content_id": "7f07acc6b6b2b7df2ed026793487ec7955b9cb66",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 668,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 22,
"path": "/Dockerfile",
"repo_name": "bufferapp/cricket",
"src_encoding": "UTF-8",
"text": "FROM tiangolo/uvicorn-gunicorn-fastapi:python3.9\n\nCOPY requirements.txt /tmp/\n\nRUN pip install --no-cache-dir -r /tmp/requirements.txt\n\nARG USERNAME=vscode\nARG USER_UID=1000\nARG USER_GID=$USER_UID\n\n# Install development user\nRUN groupadd --gid $USER_GID $USERNAME \\\n && useradd --uid $USER_UID --gid $USER_GID -m $USERNAME \\\n && apt-get update \\\n && apt-get install -y sudo \\\n && echo $USERNAME ALL=\\(root\\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \\\n && chmod 0440 /etc/sudoers.d/$USERNAME\n\n# Add pretraied model\nADD \"https://github.com/unitaryai/detoxify/releases/download/v0.1-alpha/toxic_original-c1212f89.ckpt\" \"/app/model.ckpt\"\n\nCOPY main.py /app/\n"
},
{
"alpha_fraction": 0.4838709533214569,
"alphanum_fraction": 0.7096773982048035,
"avg_line_length": 15,
"blob_id": "0c8189671dba87984262f7ab7f2f96ccd93708ba",
"content_id": "61ac8e03a92a35cf2eb21501f754e712d80d73b2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 31,
"license_type": "permissive",
"max_line_length": 15,
"num_lines": 2,
"path": "/requirements.txt",
"repo_name": "bufferapp/cricket",
"src_encoding": "UTF-8",
"text": "fastapi==0.68.1\ndetoxify==0.3.0"
},
{
"alpha_fraction": 0.7402912378311157,
"alphanum_fraction": 0.7475728392601013,
"avg_line_length": 44.77777862548828,
"blob_id": "051c6965b8ea20eb2c12c4ceb71b0cdfe6a9d955",
"content_id": "21a85b3e12af8b3bf9cea1f02c76f59dab8b6d7c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 412,
"license_type": "permissive",
"max_line_length": 146,
"num_lines": 9,
"path": "/README.md",
"repo_name": "bufferapp/cricket",
"src_encoding": "UTF-8",
"text": "# Cricket :cricket:\n\nYour personal *Jiminy Cricket* when posting online. This API will help you check the perceived impact a comment or post might have on a conversation.\n\n## Quickstart\n\n1. You'll need to request access to Perspective API. Place the secret API key in an `env` file under the `PERSPECTIVE_API_KEY` variable name.\n2. Run `make dev`.\n3. Play with it in [http://localhost/docs](http://localhost/docs).\n"
},
{
"alpha_fraction": 0.6908563375473022,
"alphanum_fraction": 0.7213352918624878,
"avg_line_length": 21.225807189941406,
"blob_id": "bae87de3b5d02730eebc2f4adf43c8b336ded66e",
"content_id": "09d5d54442c54a7cd81cbc4c744948e3e5d045d4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 689,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 31,
"path": "/Makefile",
"repo_name": "bufferapp/cricket",
"src_encoding": "UTF-8",
"text": ".DEFAULT_GOAL := run\n\nIMAGE_NAME := gcr.io/buffer-data/cricket:latest\n\n.PHONY: build\nbuild:\n\tdocker build -t $(IMAGE_NAME) .\n\n.PHONY: get-model\nget-model:\n\tcurl -LJ https://github.com/unitaryai/detoxify/releases/download/v0.1-alpha/toxic_original-c1212f89.ckpt -o model.ckpt\n\n.PHONY: dev\ndev: build\n\tdocker run -it -v $(PWD):/app -p 8000:8000 --rm $(IMAGE_NAME) /bin/bash\n\n.PHONY: run\nrun:\n\tuvicorn main:app --reload\n\n.PHONY: docker-run\ndocker-run: build\n\tdocker run -it -p 80:80 --rm $(IMAGE_NAME)\n\n.PHONY: docker-push\ndocker-push: build\n\tdocker push $(IMAGE_NAME)\n\n.PHONY: deploy\ndeploy: docker-push\n\tgcloud beta run services replace service.yaml --platform managed --region us-central1\n"
},
{
"alpha_fraction": 0.674082338809967,
"alphanum_fraction": 0.674082338809967,
"avg_line_length": 23.97222137451172,
"blob_id": "6d39a442d6fe1f0befe237a68f953e346242cc79",
"content_id": "d80ba2ccb32c73fb60499fe6a6f9e6026d0f0a89",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 899,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 36,
"path": "/main.py",
"repo_name": "bufferapp/cricket",
"src_encoding": "UTF-8",
"text": "from detoxify import Detoxify\nfrom fastapi import FastAPI\nfrom fastapi.responses import JSONResponse\nfrom pydantic import BaseModel\n\n\nclass TextRequest(BaseModel):\n text: str\n\n\n# Start API\napp = FastAPI(\n title=\"Cricket\", description=\"Your personal Jiminy Cricket when posting online.\"\n)\n\nmodel = Detoxify(\"original\", checkpoint=\"model.ckpt\")\n\n\[email protected](\"/check/\")\nasync def check(r: TextRequest):\n response = model.predict(r.text)\n\n response_json = {\n \"toxicity\": float(response[\"toxicity\"]),\n \"severe_toxicity\": float(response[\"severe_toxicity\"]),\n \"obscene\": float(response[\"obscene\"]),\n \"threat\": float(response[\"threat\"]),\n \"insult\": float(response[\"insult\"]),\n \"identity_hate\": float(response[\"identity_hate\"]),\n }\n return JSONResponse(content=response_json)\n\n\[email protected](\"/\")\nasync def home():\n return {\"message\": \"Hello World!\"}\n"
}
] | 5 |
shu-YangLiu/PRcourse_Project | https://github.com/shu-YangLiu/PRcourse_Project | 6f4c8dfe57ad67a3a00b7c8761a9112a51fa827c | 269f9ef53562b4b7288664b705678c7cbc141b41 | e6541f106faa8e849968d1b8402c816dddf1f872 | refs/heads/master | 2020-05-29T15:35:02.677505 | 2019-05-29T13:01:56 | 2019-05-29T13:01:56 | 189,226,360 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5808281302452087,
"alphanum_fraction": 0.6040839552879333,
"avg_line_length": 24.941177368164062,
"blob_id": "aa94772374a4e6b61c3456b6bd042cb0394cb2be",
"content_id": "0c5b2f25b0614dc2f65d050d7d84662e02ecf089",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1809,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 68,
"path": "/RandomForest/RMtest.py",
"repo_name": "shu-YangLiu/PRcourse_Project",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport os\nfrom sklearn import svm\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestClassifier\nimport csv\n\n# os.getcwd()\n# X = pd.read_csv('X_real400.csv')\n# X = np.array(X)\n# Y = pd.read_csv('Y_real400.csv')\n# Y = np.array(Y)\n# Y = np.ravel(Y)\n\ndef getDataFromCSV():\n f = open(\"trainData_500.csv\", 'r')\n csvreader = csv.reader(f)\n trainData = list(csvreader)\n\n f = open(\"trainLabels_500.csv\", 'r')\n csvreader = csv.reader(f)\n trainLabels= list(csvreader)\n\n return trainData,trainLabels[0]\n\n#测试特诊维数与准确率的关系\n# max=0\n# count1 = 0\n# count2 = 0\n# for i in range(10,40):\n# for j in range(5,30):\n# print(i,\" \",j,end=\":\")\n# rf = RandomForestClassifier(n_estimators=i, max_depth=j)\n# rf.fit(X_train,y_train)\n# if max<rf.score(X_test,y_test):\n# max = rf.score(X_test,y_test)\n# count1 = i\n# count2 = j\n# print(rf.score(X_test,y_test))\n# print('max:',end='')\n# print(max, end='count=')\n# print(count1,count2)\n\n#寻找最佳决策树个数\ndef resolve1():\n X=[]\n Y=[]\n for i in range(5,200):\n child=[]\n print(i,\" \",end=\":\")\n X.append(i)\n rf = RandomForestClassifier(n_estimators=i)\n rf.fit(X_train,y_train)\n Y.append(rf.score(X_test,y_test)*100)\n print(rf.score(X_test,y_test))\n print(X,Y)\n\n plt.title('RandomForest')\n plt.ylabel('Accuracy Rate%')\n plt.xlabel('Number of Decision Trees')\n plt.plot(X,Y)\n plt.show()\n\ntrainData,trainLabels = getDataFromCSV()\nX_train, X_test, y_train, y_test = train_test_split(trainData, trainLabels, test_size = 0.25, random_state = 0)\nresolve()"
},
{
"alpha_fraction": 0.5065359473228455,
"alphanum_fraction": 0.6789215803146362,
"avg_line_length": 20.875,
"blob_id": "e0f0f289e755fef8e4f865303ca8356771727a59",
"content_id": "b8b47569dcb24c20f3f6a2f4124876a0f76a3518",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1224,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 56,
"path": "/SIFT_BOW_SVM/svm.py",
"repo_name": "shu-YangLiu/PRcourse_Project",
"src_encoding": "UTF-8",
"text": "import csv\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.svm import SVC\n\n\ndef getDataFromCSV(k):\n f = open(\"1trainData_\"+str(k)+\".csv\", 'r')\n csvreader = csv.reader(f)\n trainData = list(csvreader)\n\n f = open(\"1trainLabels_\"+str(k)+\".csv\", 'r')\n csvreader = csv.reader(f)\n trainLabels = list(csvreader)\n\n return trainData, trainLabels[0]\n\n\nfor i in [500]:\n Data, Labels = getDataFromCSV(i)\n\n Data = np.asarray(Data)\n Labels = np.asarray(Labels)\n\n X_train, X_test, y_train, y_test = train_test_split(\n Data, Labels, test_size=0.33, random_state=42)\n\n classif = OneVsRestClassifier(SVC(C=2, kernel='rbf', gamma=\"scale\"))\n\n print(\"fit...\")\n classif.fit(X_train, y_train)\n\n pre_results = classif.predict(X_test)\n\n right = 0\n for i in range(len(y_test)):\n if pre_results[i] == y_test[i]:\n right += 1\n\n print(right/len(y_test)*100)\n\n'''\n40.85831863609641\n50.91122868900647\n58.671369782480895\n60.08230452674898\n61.140505584950034\n62.08112874779541\n64.55026455026454\n65.02057613168725\n65.78483245149911\n66.01998824221046\n66.78424456202234\n68.72427983539094\n'''"
},
{
"alpha_fraction": 0.5655527114868164,
"alphanum_fraction": 0.5790488719940186,
"avg_line_length": 25.827587127685547,
"blob_id": "48ee4a85d0d262429440bbf8a90884977999cfba",
"content_id": "632b28060a4002506e0eea3d3a2c946da8937ee8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1606,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 58,
"path": "/SIFT_BOW_SVM/test.py",
"repo_name": "shu-YangLiu/PRcourse_Project",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom pyramid import pyramid, silding_window\nimport os\nfrom non_max_suppression import nms\nfrom detector import BOW\nimport numpy as np\nimport cv2\nimport csv\n\n\ndef prepare_GestData(path):\n print(\"载入数据集GestData...\")\n files = []\n labels = []\n length = []\n for i in range(1, 11):\n fileslist = []\n labelslist = []\n for file in os.listdir(path+str(i)+\"/\"):\n fileslist.append(path+str(i)+\"/\"+file)\n labelslist.append(i)\n files.append(fileslist)\n labels.append(labelslist)\n length.append(len(fileslist))\n\n print(\"载入数据集完成:共10类。每类样本数为:\", length, \"sum:\", sum(length))\n\n return files, labels, length\n\n\nif __name__ == '__main__':\n\n bow = BOW()\n files = []\n labels = []\n\n train_Percent = 1000\n\n rootpath = 'GestData/'\n\n filesAll, labelsAll, length = prepare_GestData(rootpath)\n\n for kk in [500]:\n print(\"\\n构建bow, k=\", kk, \"...\")\n trainData, trainLabels = bow.fit(filesAll, labelsAll, kk, 200)\n\n bow.save(\"1dict_\"+str(kk)+\".pkl\")\n print(\"length of trainData:\", len(trainData), len(trainData[0]))\n print(\"length of trainLabels:\", len(trainLabels))\n\n with open('1trainData_' + str(kk) + '.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n for row in trainData:\n writer.writerow(row)\n\n with open('1trainLabels_' + str(kk) + '.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(trainLabels)\n"
},
{
"alpha_fraction": 0.5815273523330688,
"alphanum_fraction": 0.6006191968917847,
"avg_line_length": 33.625,
"blob_id": "1c01cb69751d5f11774221af8de1d8c91abadcf4",
"content_id": "effe120b6d75cfb6f9f64810158f7b1a3e4fe489",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1986,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 56,
"path": "/SVM/test3.py",
"repo_name": "shu-YangLiu/PRcourse_Project",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport os\nfrom sklearn import svm\nfrom sklearn.model_selection import train_test_split\nimport csv\nfrom sklearn.multiclass import OneVsRestClassifier\n\nos.getcwd()\n\n'''\n调用存储好的数据集\n调整svm参数\n'''\n\ndef getDataFromCSV():\n f = open(\"trainData_500.csv\", 'r')\n csvreader = csv.reader(f)\n trainData = list(csvreader)\n\n f = open(\"trainLabels_500.csv\", 'r')\n csvreader = csv.reader(f)\n trainLabels= list(csvreader)\n\n return trainData,trainLabels[0]\n\n\nX,Y=getDataFromCSV()\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.25, shuffle=True)\n\n\n# gamma = [5, 10, 100, 150,200]\n# for x in gamma:\n# for y in range(1,6):\n# # 高斯核\n# clf_rbf = svm.SVC(C=x, kernel='rbf', gamma=y, probability=True, decision_function_shape='ovr',\n# class_weight='balanced')\n# clf_rbf.fit(X_train, y_train)\n# print(\"clf_rbf(c=\", x, \"gamma=\", y, \"): \", clf_rbf.score(X_test, y_test))\n# # 线性核\n# clf_liner = svm.SVC(C=x, kernel='linear', gamma=y, decision_function_shape='ovr')\n# clf_liner.fit(X_train, y_train)\n# print(\"clf_liner(c=\", x, \"gamma=\", y, \"): \", clf_liner.score(X_test, y_test))\n# # 多项式核\n# clf_poly = svm.SVC(C=x, kernel='poly', degree=y, gamma=100, coef0=0, decision_function_shape='ovr')\n# clf_poly.fit(X_train, y_train)\n# print(\"clf_poly(c=\", x,\"degree=\",y, \"): \", clf_poly.score(X_test, y_test))\n# # sigmoid核\n# clf_sigmoid = svm.SVC(C=x, kernel='sigmoid', gamma=y, probability=True, coef0=0)\n# clf_sigmoid.fit(X_train, y_train)\n# print(\"clf_sigmoid(c=\", x, \"gamma=\", y, \"): \", clf_sigmoid.score(X_test, y_test))\nfor y in range(1,10):\n clf_poly = OneVsRestClassifier(svm.SVC(C=5, kernel='poly', degree=y,gamma=50))\n\n clf_poly.fit(X_train, y_train)\n print(\"clf_poly(degree=\",y, \"): \", clf_poly.score(X_test, y_test))"
},
{
"alpha_fraction": 0.5996055006980896,
"alphanum_fraction": 0.6235559582710266,
"avg_line_length": 30.972972869873047,
"blob_id": "e01a617a3b26a3cc24eb84b9abcf2995ee79a362",
"content_id": "ccc2b7cf862d3944c299f4c520ef82ee20617033",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3597,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 111,
"path": "/SVM/test.py",
"repo_name": "shu-YangLiu/PRcourse_Project",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport os\nfrom sklearn import svm\nfrom sklearn.model_selection import train_test_split\nimport csv\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.metrics import roc_curve, auc\nimport matplotlib.pyplot as plt\n\n'''\n调用存储好的数据集\n调整svm参数\n'''\n\nos.getcwd()\n# t = np.array([[1, 2, 3], [4, 5, 6]])\n# dt=pd.DataFrame(t)\n# dt.to_csv('result.csv',index=False,header=True)\n# print(t)\n#\n# X = pd.read_csv('X_real_30.csv')\n# X = np.array(X)\n# Y = pd.read_csv('Y_real_30.csv')\n# Y = np.array(Y)\n# Y = np.ravel(Y)\n# X_train=X\n# y_train=Y\n# # X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.25, random_state=0)\n# XX = pd.read_csv('X_test_30.csv')\n# XX = np.array(XX)\n# YY = pd.read_csv('Y_test_30.csv')\n# YY = np.array(YY)\n# YY = np.ravel(YY)\n# X_test=XX\n# y_test=YY random_state=0,\n\ndef getDataFromCSV():\n f = open(\"trainData_500.csv\", 'r')\n csvreader = csv.reader(f)\n trainData = list(csvreader)\n\n f = open(\"trainLabels_500.csv\", 'r')\n csvreader = csv.reader(f)\n trainLabels= list(csvreader)\n\n return trainData,trainLabels[0]\n\n\nX,Y=getDataFromCSV()\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=40)\n# dX = pd.DataFrame(X_train)\n# dY = pd.DataFrame(y_train)\n# dX.to_csv('X_real_train_300.csv', index=False, header=True)\n# dY.to_csv('Y_real_train_300.csv', index=False, header=True)\n#\n# dXX = pd.DataFrame(X_test)\n# dYY = pd.DataFrame(y_test)\n# dXX.to_csv('X_real_test_300.csv', index=False, header=True)\n# dYY.to_csv('Y_real_test_300.csv', index=False, header=True)\n#\n#\n# X_train = pd.read_csv('X_real_train_300.csv')\n# X_train = np.array(X_train)\n# y_train = pd.read_csv('Y_real_train_300.csv')\n# y_train = np.array(y_train)\n# y_train = np.ravel(y_train)\n#\n# X_test = pd.read_csv('X_real_test_300.csv')\n# X_test = np.array(X_test)\n# y_test = pd.read_csv('Y_real_test_300.csv')\n# y_test = np.array(y_test)\n# y_test = np.ravel(y_test)\n\n# clf_rbf = OneVsRestClassifier(svm.SVC(C=5, kernel='rbf', gamma=35))\n# clf_rbf=svm.SVC(C=5,kernel='rbf',gamma=35,probability=True,decision_function_shape='ovr')\n# clf_rbf.fit(X_train, y_train)\n# print(\"clf_rbf(c=\", 2, \"gamma=scale): \", clf_rbf.score(X_test, y_test))\n# result=clf_rbf.predict(X_test)\n# acc=0\n# print(result)\n# print(y_test)\n# for i in range(len(result)):\n# if(result[i]==y_test[i]):\n# acc+=1\n# proba=clf_rbf.predict_proba(X_test)\n# print(acc/len(result))\n# print(\"clf_rbf(c=\", 2, \"gamma=scale): \", clf_rbf.score(X_test, y_test))\n# print(proba)\n\n# draw_roc(clf_rbf,y_test,proba)\n\ngamma = [5, 10, 15, 20,100]\nfor x in range(10,15):\n for y in range(4,6):\n # # 高斯核\n # clf_rbf = OneVsRestClassifier(svm.SVC(C=x, kernel='rbf', gamma=y))\n # clf_rbf.fit(X_train, y_train)\n # print(\"clf_rbf(c=\", x, \"gamma=\", y, \"): \", clf_rbf.score(X_test, y_test))\n # # 线性核\n # clf_liner = OneVsRestClassifier(svm.SVC(C=x, kernel='linear', gamma=y))\n # clf_liner.fit(X_train, y_train)\n # print(\"clf_liner(c=\", x, \"gamma=\", y, \"): \", clf_liner.score(X_test, y_test))\n # # 多项式核\n # clf_poly = OneVsRestClassifier(svm.SVC(C=x, kernel='poly', degree=6, gamma=100))\n # clf_poly.fit(X_train, y_train)\n # print(\"clf_poly(c=\", x, \"): \", clf_poly.score(X_test, y_test))\n # sigmoid核\n clf_sigmoid = OneVsRestClassifier(svm.SVC(C=x, kernel='sigmoid', gamma=y))\n clf_sigmoid.fit(X_train, y_train)\n print(\"clf_sigmoid(c=\", x, \"gamma=\", y, \"): \", clf_sigmoid.score(X_test, y_test))\n"
},
{
"alpha_fraction": 0.6159582138061523,
"alphanum_fraction": 0.699478030204773,
"avg_line_length": 26.9375,
"blob_id": "55aea1630838344785133279fda37ab3565f6caf",
"content_id": "0d77fb69ad9ebfb8e918377b9234c254dce74b21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1525,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 48,
"path": "/SVM/bfmatcher.py",
"repo_name": "shu-YangLiu/PRcourse_Project",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\n'''\n展示sift提取的感兴趣点,并把两张图片进行匹配\n'''\n\nimgname1='img2/02/zyh_2_4.jpg'\nimgname2='img2/01/gjq_1_4.jpg'\n\nsift = cv2.xfeatures2d.SIFT_create()\n\nimg1 = cv2.imread(imgname1)\ngray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) # 灰度处理图像\n\n# gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\nkp1, des1 = sift.detectAndCompute(img1, None) # des是描述子\n\nimg2 = cv2.imread(imgname2)\ngray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) # 灰度处理图像\nkp2, des2 = sift.detectAndCompute(img2, None) # des是描述子\n\n# hmerge = np.hstack((gray1, gray2)) # 水平拼接\n# cv2.imshow(\"gray\", hmerge) # 拼接显示为gray\n# cv2.waitKey(0)\n\nimg3 = cv2.drawKeypoints(img1, kp1, img1, color=(255, 0, 255)) # 画出特征点,并显示为红色圆圈\nimg4 = cv2.drawKeypoints(img2, kp2, img2, color=(255, 0, 255)) # 画出特征点,并显示为红色圆圈\n# hmerge = np.hstack((img3, img4)) # 水平拼接\n# cv2.imshow(\"point\", hmerge) # 拼接显示为gray\ncv2.imshow(\"123\",img3)\ncv2.imshow(\"123\",img4)\n# cv2.waitKey(0)\n# BFMatcher解决匹配\nbf = cv2.BFMatcher()\nmatches = bf.knnMatch(des1, des2, k=2)\n# 调整ratio\ngood = []\nfor m, n in matches:\n if m.distance < 0.75 * n.distance:\n good.append([m])\n\n# img5 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,flags=2)\nimg5 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)\ncv2.imshow(\"BFmatch\", img5)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n"
},
{
"alpha_fraction": 0.7661290168762207,
"alphanum_fraction": 0.774193525314331,
"avg_line_length": 10.363636016845703,
"blob_id": "856c05f05eaafcc61479dd1d951e853789deeae9",
"content_id": "f94f1cfebdddc2add656e04c0d2b592b3295d350",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 194,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 11,
"path": "/ResNet/readme.md",
"repo_name": "shu-YangLiu/PRcourse_Project",
"src_encoding": "UTF-8",
"text": "gesture_res.py 网络定义\n\ntrain.py 训练模型\n\ntest.py 测试模型\n\ntrain_acc.txt 训练集上准确率\n\ntest_acc.txt 测试上准确率\n\npython3 *.py 运行程序,需配置pytorch环境"
},
{
"alpha_fraction": 0.5738193988800049,
"alphanum_fraction": 0.586246907711029,
"avg_line_length": 38.69736862182617,
"blob_id": "0917c0deb038aa93cd9a90186145a00e1ea33eb4",
"content_id": "40ba5b33432d1bb1c65e999443c10ca886a7c728",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6719,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 152,
"path": "/SVM/test2.py",
"repo_name": "shu-YangLiu/PRcourse_Project",
"src_encoding": "UTF-8",
"text": "import numpy as np # We'll be storing our data as numpy arrays\nimport os # For handling directories\nfrom PIL import Image # For handling the images\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg # Plotting\nfrom sklearn.ensemble import RandomForestClassifier\nimport cv2\nfrom sklearn.externals import joblib\nfrom sklearn import svm\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport pickle\nfrom sklearn.externals import joblib\n\n\n'''\n调用存储好的BoW模型和数据集\n调整svm参数\n进行测试\n'''\n\nclass BOW(object):\n def __init__(self, ):\n # 创建一个SIFT对象 用于关键点、描述符提取\n self.sift = cv2.xfeatures2d.SIFT_create()\n\n def getData(self, kmean=10):\n # 创建BOW训练器,指定k-means参数k 把处理好的特征数据全部合并,利用聚类把特征词分为若干类,此若干类的数目由自己设定,每一类相当于一个视觉词汇\n bow_kmeans_trainer = cv2.BOWKMeansTrainer(kmean)\n\n for i in range(0, 9): # Loop over the ten top-level folders\n for j in os.listdir('../0' + str(i) + '/'):\n len = 0\n if not j.startswith('.'): # Again avoid hidden folders\n for k in os.listdir('../0' + str(i) + '/' + j + '/'):\n len += 1\n path = '../0' + str(i) + '/' + j + '/' + k\n print(path)\n bow_kmeans_trainer.add(self.sift_descriptor_extractor(path))\n if len == 100:\n break\n\n # 进行k-means聚类,返回词汇字典 也就是聚类中心\n self.voc = bow_kmeans_trainer.cluster()\n\n # print( voc.shape)\n\n # FLANN匹配 参数algorithm用来指定匹配所使用的算法,可以选择的有LinearIndex、KTreeIndex、KMeansIndex、CompositeIndex和AutotuneIndex,这里选择的是KTreeIndex(使用kd树实现最近邻搜索)\n flann_params = dict(algorithm=1, tree=5)\n flann = cv2.FlannBasedMatcher(flann_params, {})\n # 初始化bow提取器(设置词汇字典),用于提取每一张图像的BOW特征描述\n self.bow_img_descriptor_extractor = cv2.BOWImgDescriptorExtractor(self.sift, flann)\n self.bow_img_descriptor_extractor.setVocabulary(self.voc)\n x_data = []\n y_data = []\n datacount = 0\n # 根据bow_img_descriptor_extractor提取特征向量\n for i in range(0, 9): # Loop over the ten top-level folders\n for j in os.listdir('../0' + str(i) + '/'):\n if not j.startswith('.'): # Again avoid hidden folders\n count = 0 # To tally images of a given gesture\n for k in os.listdir('../0' + str(i) + '/' + j + '/'):\n path = '../0' + str(i) + '/' + j + '/' + k\n descriptor = self.bow_descriptor_extractor(path, kmean)\n x_data.append(descriptor)\n count += 1\n y_values = np.full((count, 1), lookup[j])\n y_data.append(y_values)\n datacount += count\n x_data = np.array(x_data, dtype='float32')\n y_data = np.array(y_data).reshape(datacount)\n print(x_data.shape)\n print(y_data.shape)\n return x_data, y_data\n\n def sift_descriptor_extractor(self, img_path):\n '''\n 特征提取:提取数据集中每幅图像的特征点,然后提取特征描述符,形成特征数据(如:SIFT或者SURF方法);\n '''\n im = cv2.imread(img_path, cv2.COLOR_BGR2GRAY)\n return self.sift.compute(im, self.sift.detect(im))[1]\n\n def bow_descriptor_extractor(self, img_path, kmean):\n '''\n 提取图像的BOW特征描述(即利用视觉词袋量化图像特征)\n '''\n im = cv2.imread(img_path, cv2.COLOR_BGR2GRAY)\n return self.bow_img_descriptor_extractor.compute(im, self.sift.detect(im)).reshape(kmean)\n\n def save(self):\n pickle.dump(self.voc, open('db_yuzhi', 'wb'))\n\n def load(self):\n with open('db_yuzhi', 'rb') as f:\n voc = pickle.load(f)\n # FLANN匹配 参数algorithm用来指定匹配所使用的算法,可以选择的有LinearIndex、KTreeIndex、KMeansIndex、CompositeIndex和AutotuneIndex,这里选择的是KTreeIndex(使用kd树实现最近邻搜索)\n flann_params = dict(algorithm=1, tree=5)\n flann = cv2.FlannBasedMatcher(flann_params, {})\n # 初始化bow提取器(设置词汇字典),用于提取每一张图像的BOW特征描述\n self.bow_img_descriptor_extractor = cv2.BOWImgDescriptorExtractor(self.sift, flann)\n self.bow_img_descriptor_extractor.setVocabulary(voc)\n # self.bow_img_descriptor_extractor = cv2.BOWImgDescriptorExtractor(self.sift, flann)\n # self.bow_img_descriptor_extractor.setVocabulary(self.voc)\n\n\ndef getLookUp():\n lookup = dict()\n reverselookup = dict()\n count = 0\n for j in os.listdir('../00/'):\n if not j.startswith('.'): # If running this code locally, this is to\n # ensure you aren't reading in hidden folders\n lookup[j] = count\n reverselookup[count] = j\n count = count + 1\n return lookup, reverselookup\n\n\nif __name__ == '__main__':\n lookup, reverselookup = getLookUp()\n print(lookup, reverselookup)\n bow = BOW()\n # bow = pickle.load(open('db', 'rb'))\n bow.load()\n X = pd.read_csv('X_real.csv')\n X = np.array(X)\n Y = pd.read_csv('Y_real.csv')\n Y = np.array(Y)\n Y = np.ravel(Y)\n # clf_rbf = joblib.load('svm_yuzhi.m')\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.25, random_state=0)\n clf_rbf = svm.SVC(C=5, kernel='rbf', gamma=25, probability=True, decision_function_shape='ovr',\n class_weight='balanced')\n # clf_liner = svm.SVC(C=1, kernel='linear', gamma=200, decision_function_shape='ovr')\n # clf_rbf=joblib.load('svm_real.m')\n clf_rbf.fit(X_train, y_train)\n # print(\"clf_rbf: \", clf_rbf.score(X_test, y_test))\n # for i in range(1,11):\n # aaaa = bow.bow_descriptor_extractor(str(i)+\".png\", 20)\n # joblib.dump(clf_rbf, 'svm_real.m')\n print(clf_rbf.score(X_test,y_test))\n # 照一张图片进行测试判断\n\n aaaa = bow.bow_descriptor_extractor('../img2/06/gjq_6_16.jpg', 20)\n print(aaaa)\n # print(aaaa)\n\n # print(bow)\n # joblib.dump(clf_rbf, 'svm_yuzhi.m')\n # pickle.dump(svm,open('svm','wb'))\n # print(aaaa.reshape(1, -1))\n print(clf_rbf.predict(aaaa.reshape(1, -1)))\n\n"
},
{
"alpha_fraction": 0.5497159361839294,
"alphanum_fraction": 0.6055194735527039,
"avg_line_length": 33.22916793823242,
"blob_id": "d3d09013fe94b997ab5db8730240ae6c240eb619",
"content_id": "7637c6e3eee43095e1b531c8bb2253af548be3a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5062,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 144,
"path": "/SVM/yuzhichuli.py",
"repo_name": "shu-YangLiu/PRcourse_Project",
"src_encoding": "UTF-8",
"text": "# import cv2\n# import numpy as np\n# from matplotlib import pyplot as plt\n# img = cv2.imread('1.png')\n# GrayImage=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n# # 中值滤波\n# GrayImage= cv2.medianBlur(GrayImage,5)\n# ret,th1 = cv2.threshold(GrayImage,127,255,cv2.THRESH_BINARY)\n# #3 为Block size, 5为param1值\n# th2 = cv2.adaptiveThreshold(GrayImage,255,cv2.ADAPTIVE_THRESH_MEAN_C,\\\n# cv2.THRESH_BINARY,3,5)\n# th3 = cv2.adaptiveThreshold(GrayImage,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\\\n# cv2.THRESH_BINARY,3,5)\n# titles = ['Gray Image', 'Global Thresholding (v = 127)',\n# 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']\n# images = [GrayImage, th1, th2, th3]\n# for i in range(4):\n# plt.subplot(2,2,i+1),plt.imshow(images[i],'gray')\n# plt.title(titles[i])\n# plt.xticks([]),plt.yticks([])\n# plt.show()\n\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport os\n\n\n'''\n阈值分割预处理(验证效果不佳,已弃用)\n'''\n\n# img = cv2.imread('1.png')\n# GrayImage=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n# # 中值滤波\n# GrayImage= cv2.medianBlur(GrayImage,5)\n# ret,th1 = cv2.threshold(GrayImage,127,255,cv2.THRESH_BINARY)\n# #3 为Block size, 5为param1值\n# th2 = cv2.adaptiveThreshold(GrayImage,255,cv2.ADAPTIVE_THRESH_MEAN_C,\\\n# cv2.THRESH_BINARY,3,5)\n# th3 = cv2.adaptiveThreshold(GrayImage,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\\\n# cv2.THRESH_BINARY,3,5)\n# titles = ['Gray Image', 'Global Thresholding (v = 127)',\n# 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']\n# images = [GrayImage, th1, th2, th3]\n# for i in range(0,4):\n# plt.subplot(2,2,i+1),plt.imshow(images[i],'gray')\n# plt.title(titles[i])\n# plt.xticks([]),plt.yticks([])\n# plt.show()\n# 自适应阈值对比固定阈值\n# for i in range(1,11):\n# img = cv2.imread(str(i)+'.png', 0)\n\n# # 固定阈值\n# ret, th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)\n# # 自适应阈值\n# th2 = cv2.adaptiveThreshold(\n# img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 4)\n# th3 = cv2.adaptiveThreshold(\n# img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 17, 6)\n\n # titles = ['Original', 'Global(v = 127)', 'Adaptive Mean', 'Adaptive Gaussian']\n# images = [img, th1, th2, th3]\n#\n# for i in range(4):\n# plt.subplot(2, 2, i + 1), plt.imshow(images[i], 'gray')\n# plt.title(titles[i], fontsize=8)\n# plt.xticks([]), plt.yticks([])\n# plt.show()\n\n # cv2.imwrite(str(i)+'.png',th3)\n\n\n# os.makedirs(\"../new_img2\")\nfor j in os.listdir('../img2'):\n # print('../new/0' + str(i) + '/'+j)\n # os.makedirs('../new/0' + str(i) + '/'+j)\n if not j.startswith('.'): # Again avoid hidden folders\n for k in os.listdir('../img2/' + j + '/'):\n # print(k)\n path_old = '../img2/' + j + '/' + k\n img = cv2.imread(path_old, 0)\n th = cv2.adaptiveThreshold(\n img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 17, 6)\n path_new = '../new_img2/' + j + '/' + k\n\n cv2.imwrite(path_new,th)\n cv2.imshow(path_new,th)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n print(path_new)\n\n\n# for i in range(0, 10): # Loop over the ten top-level folders\n# for j in os.listdir('../o/'):\n# # len = 0\n# if not j.startswith('.'): # Again avoid hidden folders\n# for k in os.listdir('../o/' + j + '/'):\n# path = '../o/' + j + '/' + k\n# print(path)\n # bow_kmeans_trainer.add(self.sift_descriptor_extractor(path))\n\n\n# img = cv2.imread('14.png',0)\n# GrayImage=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n# ret, th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)\n# # 自适应阈值\n# th2 = cv2.adaptiveThreshold(\n# img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 4)\n# th3 = cv2.adaptiveThreshold(\n# img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 17, 6)\n#\n# titles = ['Original', 'Global(v = 127)', 'Adaptive Mean', 'Adaptive Gaussian']\n# images = [img, th1, th2, th3]\n#\n# for i in range(4):\n# plt.subplot(2, 2, i + 1), plt.imshow(images[i], 'gray')\n# plt.title(titles[i], fontsize=8)\n# plt.xticks([]), plt.yticks([])\n# plt.show()\n#\n# # th3 = cv2.adaptiveThreshold(GrayImage,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\n# # cv2.THRESH_BINARY,3,5)\n# cv2.imwrite('svm/14.png',th3)\n\n# # 固定阈值\n# ret, th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)\n# # 自适应阈值\n# th2 = cv2.adaptiveThreshold(\n# img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 4)\n# th3 = cv2.adaptiveThreshold(\n# img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 17, 6)\n#\n# titles = ['Original', 'Global(v = 127)', 'Adaptive Mean', 'Adaptive Gaussian']\n# images = [img, th1, th2, th3]\n#\n# for i in range(4):\n# plt.subplot(2, 2, i + 1), plt.imshow(images[i], 'gray')\n# plt.title(titles[i], fontsize=8)\n# plt.xticks([]), plt.yticks([])\n# plt.show()\n#\n# cv2.imwrite('15.png',th3)"
},
{
"alpha_fraction": 0.5032333135604858,
"alphanum_fraction": 0.5345714092254639,
"avg_line_length": 29.004974365234375,
"blob_id": "fe11ec3a69643425ad3b869fbec87d60887a7fcb",
"content_id": "95f2e77c9315b6354ffd9f182939373ce57b9d98",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6479,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 201,
"path": "/slipWindows.py",
"repo_name": "shu-YangLiu/PRcourse_Project",
"src_encoding": "UTF-8",
"text": "import cv2\nfrom mytest import *\nfrom gesture_res import *\nimport numpy as np\nimport os\nfrom PIL import Image\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import datasets\nfrom torch.optim import lr_scheduler\nfrom torch import nn\nimport torch\nfrom torch.autograd import Variable\nimport sys\nfrom gesture_res import *\nfrom torchvision import transforms\nfrom PIL import Image, ImageDraw\n\ndef py_nms(dets, thresh, mode=\"Union\"):\n # 非极大值抑制 NMS\n if len(dets) == 0:\n return []\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1]\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n if mode == \"Union\":\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n elif mode == \"Minimum\":\n ovr = inter / np.minimum(areas[i], areas[order[1:]])\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1]\n return dets[keep]\n\ndef slide_window(img, window_size, stride):\n # 构建的金字塔图片滑动窗口。\n window_list = []\n w = img.shape[1]\n h = img.shape[0]\n if w <= window_size + stride or h <= window_size + stride:\n return None\n if len(img.shape) != 3:\n return None\n for i in range(int((w - window_size) / stride)):\n for j in range(int((h - window_size) / stride)):\n box = [j * stride, i * stride, j * stride + window_size, i * stride + window_size]\n window_list.append(box)\n return img, np.asarray(window_list)\n\ndef pyramid(image, f, window_size):\n # 构建金字塔模型\n w = image.shape[1]\n h = image.shape[0]\n img_ls = []\n while (w > window_size and h > window_size):\n img_ls.append(image)\n w = int(w * f)\n h = int(h * f)\n image = cv2.resize(image, (w, h))\n return img_ls\n\ndef min_gesture(img, F, window_size, stride):\n # 取最小手势\n h, w, d = img.shape\n w_re = int(float(w) * window_size / F)\n h_re = int(float(h) * window_size / F)\n if w_re <= window_size + stride or h_re <= window_size + stride:\n print(None)\n img = cv2.resize(img, (w_re, h_re))\n return img\n\nif __name__ == \"__main__\":\n # 残差网络模型初始化\n net = resnet101(False)\n if torch.cuda.is_available():\n net=net.cuda() \n net.eval()\n\n # 导入残差网络模型\n net.load_state_dict(torch.load(\"/home/zyh/文档/res/test/res101_shou/epoch_148acc93.63557105492589_91.pth\"))\n\n # 图片路径\n image = cv2.imread('ly_img/07/45.0.jpg')\n h, w, d = image.shape # 长, 宽, 通道数\n\n # 调参的参数\n # 窗口大小\n IMAGE_SIZE = 20\n # 步长\n stride = 10 \n # 最小手势大小\n F = 100 \n # 图片放缩比例\n ff = 0.7\n # 阈值概率\n p_1 = 0.97\n p_2 = 0.9\n\n overlapThresh_1 = 0.7\n overlapThresh_2 = 0.3\n\n # 需要检测的最小\n image_ = min_gesture(image, F, IMAGE_SIZE, stride)\n\n # 金字塔\n pyd = pyramid(np.array(image_), ff, IMAGE_SIZE)\n\n # 第一层滑动窗口\n window_after_1 = []\n for i, img in enumerate(pyd):\n # 滑动窗口\n print(\"layer:\",i)\n slide_return = slide_window(img, IMAGE_SIZE, stride)\n if slide_return is None:\n break\n img_1 = slide_return[0]\n window_net_1 = slide_return[1]\n w_1 = img_1.shape[1]\n h_1 = img_1.shape[0]\n\n patch_net_1 = []\n for box in window_net_1:\n patch = img_1[box[0]:box[2], box[1]:box[3], :]\n patch_net_1.append(patch)\n patch_net_1 = np.array(patch_net_1)\n print(patch_net_1.shape)\n \n window_net = window_net_1\n # 预测手势\n windows = []\n labels = []\n for i, pred in enumerate(patch_net_1):\n # 概率大于阈值的判定为滑动窗口。\n index, possibility = predict_image2(pred, net)\n # print(possibility[index - 1])\n if possibility[index] > p_1:\n # 保存窗口位置和概率。\n windows.append([window_net[i][0], window_net[i][1], window_net[i][2], window_net[i][3], possibility[index]])\n # 保存窗口标签\n labels.append(index)\n\n # 按照概率值 由大到小排序\n windows = np.asarray(windows)\n windows = py_nms(windows, overlapThresh_1, 'Union')\n window_net = windows\n for box in window_net:\n lt_x = int(float(box[0]) * w / w_1)\n lt_y = int(float(box[1]) * h / h_1)\n rb_x = int(float(box[2]) * w / w_1)\n rb_y = int(float(box[3]) * h / h_1)\n p_box = box[4]\n window_after_1.append([lt_x, lt_y, rb_x, rb_y, p_box])\n # 按照概率值 由大到小排序\n window_net = window_after_1\n\n # 第二层滑动窗口\n windows_2 = []\n if window_net == []:\n print(\"Finish\")\n if window_net != []:\n patch_net_2 = []\n img_2 = image\n for box in window_net:\n patch = img_2[box[0]:box[2], box[1]:box[3], :]\n patch = cv2.resize(patch, (2, 2))\n patch_net_2.append(patch)\n # 预测手势\n pred_net_2 = []\n for i, pred in enumerate(patch_net_2):\n # 概率大于阈值的判定为滑动窗口。\n index, possibility = predict_image2(pred, net)\n if possibility[index] > p_2:\n # 保存窗口位置和概率。\n windows_2.append([window_net[i][0], window_net[i][1], window_net[i][2], window_net[i][3], possibility[index]])\n # 保存窗口标签\n labels.append(index)\n # 按照概率值 由大到小排序\n windows_2 = np.asarray(windows_2)\n window_net = py_nms(windows_2, overlapThresh_2, 'Union')\n\n print(\"window_net:\",window_net)\n image = Image.fromarray(cv2.cvtColor(image,cv2.COLOR_BGR2RGB))\n # 圈出框\n for box in window_net:\n ImageDraw.Draw(image).rectangle((box[1], box[0], box[3], box[2]), outline = \"red\")\n # 保存图片\n image.save(\"result.jpg\",quality=95)\n"
},
{
"alpha_fraction": 0.5766007304191589,
"alphanum_fraction": 0.6325846314430237,
"avg_line_length": 26.127273559570312,
"blob_id": "55089e979fd0993fed2770c0cad0857f9fc73ec7",
"content_id": "a927c1cc3c994b6d1b2048a791ab8798e6995a1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3097,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 110,
"path": "/ResNet/test.py",
"repo_name": "shu-YangLiu/PRcourse_Project",
"src_encoding": "UTF-8",
"text": "import numpy as np # We'll be storing our data as numpy arrays\nimport os # For handling directories\nfrom PIL import Image # For handling the images\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import datasets\nfrom torch.optim import lr_scheduler\nfrom torch import nn\nimport torch\nfrom torch.autograd import Variable\nimport sys\nfrom gesture_res import *\nfrom torchvision import transforms\nfrom PIL import Image\n\n\n# net=resnet50(False)\n# if torch.cuda.is_available():\n# net=net.cuda() \n# net.eval()\n\n\n# net.load_state_dict(torch.load(\"/home/zyh/文档/res/test/model/epoch_276acc92.87581699346406.pth\"))\n#epoch_264acc92.81045751633987.pth 70%\n#epoch_352acc93.05010893246187.pth 70%\n#epoch_400acc94.16122004357298.pth 70%\n#epoch_348acc94.46623093681917.pth 70%\n\ndef softmax(L):\n expL=np.exp(L)\n sumExpL=sum(expL)\n result=[]\n for i in expL:\n result.append(i*1.0/sumExpL)\n return result\n\ndef predict_image(image_path,net):\n # print(\"Prediction in progress\")\n image = Image.open(image_path)\n\n # Define transformations for the image, should (note that imagenet models are trained with image size 224)\n transformation = transforms.Compose([\n transforms.Grayscale(num_output_channels=1),\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n\n ])\n\n # 预处理图像 \n image_tensor = transformation(image).float().cuda()\n\n # 额外添加一个批次维度,因为PyTorch将所有的图像当做批次\n image_tensor = image_tensor.unsqueeze_(0)\n # print(image_tensor.shape)\n\n if torch.cuda.is_available():\n image_tensor.cuda()\n\n # 将输入变为变量\n input = Variable(image_tensor)\n\n # 预测图像的类\n output = net(input).data.cpu().numpy()\n print(softmax(output.reshape(10)))\n index = output.argmax()\n\n # print(output)\n return index\n\ndef testmodel(model):\n net=resnet101(False)\n if torch.cuda.is_available():\n net=net.cuda() \n net.eval()\n\n # net.load_state_dict(torch.load(\"/home/zyh/文档/res/test/model/\"+model))\n net.load_state_dict(torch.load(model))\n\n err=0\n all=0\n for i in range(1,11):\n path=\"/home/zyh/文档/res/lytest/lyImg/0\"+str(i)+\"/\"\n if i==10:\n path=\"/home/zyh/文档/res/lytest/lyImg/\"+str(i)+\"/\"\n for j in os.listdir(path):\n # print(i)\n all+=1\n aaa=int(predict_image(path+j,net))+1\n print(aaa)\n if aaa!=i:\n err+=1\n # print(j,aaa)\n print(model,err/all)\n\n# for model in os.listdir(\"/home/zyh/文档/res/test/model/\"):\n# testmodel(model)\ntestmodel(\"/home/zyh/文档/res/test/res101_shou/epoch_148acc93.63557105492589_91.pth\")\n\n# for i in range(1,11):\n# path=\"/home/zyh/文档/res/o/0\"+str(i)+\"/\"\n# if i==10:\n# path=\"/home/zyh/文档/res/o/\"+str(i)+\"/\"\n# for j in os.listdir(path):\n# # print(i)\n# all+=1\n# aaa=int(predict_image(path+j))+1\n# if aaa!=i:\n# err+=1\n# print(aaa)\n# print(err,all)"
},
{
"alpha_fraction": 0.5247302055358887,
"alphanum_fraction": 0.5332733988761902,
"avg_line_length": 33.069766998291016,
"blob_id": "29fa814697325a26778c208cacc6d139851464ad",
"content_id": "11b6cf39d44a34bd5bd75300e6ed4312c6215231",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5328,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 129,
"path": "/SIFT_BOW_SVM/detector.py",
"repo_name": "shu-YangLiu/PRcourse_Project",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport pickle\nimport cv2\nimport numpy as np\n'''\n词袋模型BOW+SVM 目标检测\n\n'''\n\nclass BOW(object):\n\n def __init__(self,):\n # 创建一个SIFT对象 用于关键点提取\n self.feature_detector = cv2.xfeatures2d.SIFT_create()\n # 创建一个SIFT对象 用于关键点描述符提取\n self.descriptor_extractor = cv2.xfeatures2d.SIFT_create()\n\n def fit(self, files, labels, k, length=None):\n '''\n files:训练集图片路径\n labes:对应的每个样本的标签\n k:k-means参数k\n length:指定用于训练词汇字典的样本长度 length<=samples\n '''\n # 类别数\n classes = len(files)\n\n # 各样本数量\n samples = []\n for i in range(10):\n # samples.append(2)\n samples.append(len(files[i]))\n \n if length is None:\n length = samples \n # elif length > samples:\n # length = samples\n \n # FLANN匹配 参数algorithm用来指定匹配所使用的算法,可以选择的有LinearIndex、KTreeIndex、KMeansIndex、CompositeIndex和AutotuneIndex,这里选择的是KTreeIndex(使用kd树实现最近邻搜索)\n flann_params = dict(algorithm=1,tree=5)\n flann = cv2.FlannBasedMatcher(flann_params,{})\n \n # 创建BOW训练器,指定k-means参数k 把处理好的特征数据全部合并,利用聚类把特征词分为若干类,此若干类的数目由自己设定,每一类相当于一个视觉词汇\n bow_kmeans_trainer = cv2.BOWKMeansTrainer(k)\n\n print('building BOWKMeansTrainer...')\n # 合并特征数据 每个类从数据集中读取length张图片,通过聚类创建视觉词汇 \n for j in range(classes): \n for i in range(length): \n # 有一些图像会抛异常,主要是因为该图片没有sift描述符\n # print(\"building BOWKMeansTrainer: \",j+1,i+1,\"/\",length)\n sys.stdout.write(\"building BOWKMeansTrainer: \"+str(j+1)+\":\"+str((i+1)/length*100)+\"%\")\n sys.stdout.write('\\r')\n sys.stdout.flush() \n descriptor = self.sift_descriptor_extractor(files[j][i]) \n if not descriptor is None:\n bow_kmeans_trainer.add(descriptor) \n # print('error:',files[j][i])\n \n # 进行k-means聚类,返回词汇字典 也就是聚类中心\n print(\"进行k-means聚类...\")\n self.voc = bow_kmeans_trainer.cluster()\n \n # 输出词汇字典 <class 'numpy.ndarray'> (40, 128)\n print(\"输出词汇字典:\",type(self.voc),self.voc.shape)\n \n # 初始化bow提取器(设置词汇字典),用于提取每一张图像的BOW特征描述\n self.bow_img_descriptor_extractor = cv2.BOWImgDescriptorExtractor(self.descriptor_extractor,flann) \n self.bow_img_descriptor_extractor.setVocabulary(self.voc) \n \n # print('adding features to svm trainer...')\n \n # 创建两个数组,分别对应训练数据和标签,并用BOWImgDescriptorExtractor产生的描述符填充\n # 按照下面的方法生成相应的正负样本图片的标签\n traindata,trainlabels = [],[]\n for j in range(classes):\n for i in range(samples[j]):\n sys.stdout.write(\"adding features to svm trainer: \"+str(j+1)+\": \"+str((i+1)/samples[j]*100)+\"%\")\n sys.stdout.write('\\r')\n sys.stdout.flush() \n\n descriptor = self.bow_descriptor_extractor(files[j][i])\n if not descriptor is None:\n traindata.extend(descriptor)\n trainlabels.append(labels[j][i]) \n\n return traindata,trainlabels\n \n def sift_descriptor_extractor(self,img_path):\n '''\n 特征提取:提取数据集中每幅图像的特征点,然后提取特征描述符,形成特征数据(如:SIFT或者SURF方法);\n \n args:\n img_path:图像全路径\n ''' \n im = cv2.imread(img_path,0)\n keypoints = self.feature_detector.detect(im)\n # print(type(keypoints))\n if keypoints:\n return self.descriptor_extractor.compute(im,keypoints)[1]\n else:\n return None\n \n\n def bow_descriptor_extractor(self,img_path):\n '''\n 提取图像的BOW特征描述(即利用视觉词袋量化图像特征)\n \n args:\n img_path:图像全路径\n ''' \n im = cv2.imread(img_path,0)\n keypoints = self.feature_detector.detect(im)\n if keypoints:\n return self.bow_img_descriptor_extractor.compute(im,keypoints)\n else:\n return None\n\n def save(self,path):\n '''\n 保存模型到指定路径\n '''\n print('saving model....')\n\n f1 = os.path.join(os.path.dirname(path),path)\n with open(f1,'wb') as f:\n pickle.dump(self.voc,f)\n\n \n \n \n \n\n \n"
},
{
"alpha_fraction": 0.6818181872367859,
"alphanum_fraction": 0.6818181872367859,
"avg_line_length": 20,
"blob_id": "5eaa357fa393a91b9c7867649ad0476c4346a87e",
"content_id": "06f0ffca174955288ed3af85bfcac6c2eda05a5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 22,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 1,
"path": "/README.md",
"repo_name": "shu-YangLiu/PRcourse_Project",
"src_encoding": "UTF-8",
"text": "\"# PRcourse_Project\" \n"
},
{
"alpha_fraction": 0.510690450668335,
"alphanum_fraction": 0.5309576988220215,
"avg_line_length": 39.11818313598633,
"blob_id": "3abe0cdeb0d43f39eb5586e51cd4fc1737fb325a",
"content_id": "4d630b9ddd804a6a2f6dc24dac93b3365a905fe6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4698,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 110,
"path": "/ResNet/train.py",
"repo_name": "shu-YangLiu/PRcourse_Project",
"src_encoding": "UTF-8",
"text": "import numpy as np # We'll be storing our data as numpy arrays\nimport os # For handling directories\nfrom PIL import Image # For handling the images\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import datasets,transforms\nfrom torch.optim import lr_scheduler\nfrom torch import nn\nimport torch\nfrom torch.autograd import Variable\nimport sys\nfrom gesture_res import *\nimport os # For handling directories\n\nEPOCH = 500 #遍历数据集次数\npre_epoch = 0 # 定义已经遍历数据集的次数\nBATCH_SIZE = 50 #批处理尺寸(batch_size)\nLR = 0.001 #学习率\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\ndata_transforms = {\n 'train': transforms.Compose([\n transforms.Grayscale(num_output_channels=1),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ]),\n 'test': transforms.Compose([\n transforms.Grayscale(num_output_channels=1),\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n ]),\n}\n\ndata_dir = '/home/zyh/文档/res/train/img2'\nimage_datasets = datasets.ImageFolder(data_dir,data_transforms[\"train\"])\n\ntest_dir = \"/home/zyh/文档/res/lytest/lyImg/\"\ntest_datasets = datasets.ImageFolder(test_dir,data_transforms[\"test\"])\n\n\ndataloders = torch.utils.data.DataLoader(image_datasets,\n batch_size=BATCH_SIZE,\n shuffle=True,\n num_workers=20)\n\ntestloders = torch.utils.data.DataLoader(test_datasets,\n batch_size=BATCH_SIZE,\n shuffle=False,\n num_workers=20)\n \n\nprint(image_datasets.class_to_idx)\nnet=resnet101(False)\nif torch.cuda.is_available():\n net=net.cuda() \n\n# net.load_state_dict(torch.load(\"/home/zyh/文档/res/test/model/epoch_44acc86.135.pth\"))\ncriterion = nn.CrossEntropyLoss() #损失函数为交叉熵,多用于多分类问题\n# optimizer = torch.optim.SGD(net.parameters(), lr=LR, momentum=0.9, weight_decay=5e-4) #优化方式为mini-batch momentum-SGD,并采用L2正则化(权重衰减)\n\noptimizer=torch.optim.Adam(net.parameters(), lr=LR, betas=(0.9, 0.99))\n# scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)\n\nwith open(\"acc.txt\",\"w\") as f:\n with open(\"test.txt\",\"w\") as f2:\n for epoch in range(EPOCH):\n print('\\nEpoch: %d' % (epoch + 1))\n sum_loss = 0.0\n correct = 0.0\n total = 0.0\n # scheduler.step()\n for i, data in enumerate(dataloders):\n length = len(dataloders)\n inputs, labels=data\n inputs, labels = inputs.to(device), labels.to(device)\n optimizer.zero_grad()\n\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n sum_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += predicted.eq(labels.data).cpu().sum()\n accuracy=100. * int(correct) / total\n print('train:[epoch:%d, iter:%d] Loss: %.03f | Acc: %.3f%% '\n % (epoch + 1, (i + 1 + epoch * length), sum_loss / (i + 1), accuracy))\n f.write('[ epoch: %d , iter: %d ] Loss: %.03f | Acc: %.3f '\n % (epoch + 1, (i + 1 + epoch * length), sum_loss / (i + 1), accuracy))\n f.write('\\n')\n f.flush()\n with torch.no_grad(): \n correct = 0 \n total = 0 \n for data in testloders: \n net.eval() \n images, labels = data \n images, labels = images.to(device), labels.to(device) \n outputs = net(images) # 取得分最高的那个类 (outputs.data的索引号) \n _, predicted = torch.max(outputs.data, 1) \n total += labels.size(0) \n correct += (predicted == labels).sum() \n acc = 100. * correct / total # 将每次测试结果实时写入acc.txt文件中 \n print('测试分类准确率为:%.3f%%' % (100 * correct/total)) \n f2.write(\"EPOCH= %d ,Accuracy= %.3f \" % (epoch + 1, acc)) \n f2.write('\\n') \n f2.flush()\n\n\n \n \n \n \n \n \n\n"
},
{
"alpha_fraction": 0.42816436290740967,
"alphanum_fraction": 0.5589668154716492,
"avg_line_length": 43.404083251953125,
"blob_id": "bdc5c9d5cdebe2da779db5457aadc5abdde1ea4b",
"content_id": "d554d3ab3931d17959ea7e8ba613ede687cb4c2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11705,
"license_type": "no_license",
"max_line_length": 599,
"num_lines": 245,
"path": "/SVM/kmeans.py",
"repo_name": "shu-YangLiu/PRcourse_Project",
"src_encoding": "UTF-8",
"text": "from sklearn.cluster import KMeans\nimport numpy as np # We'll be storing our data as numpy arrays\nimport os # For handling directories\nfrom PIL import Image # For handling the images\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg # Plotting\nfrom sklearn.ensemble import RandomForestClassifier\nimport cv2\nfrom sklearn.externals import joblib\nfrom sklearn import svm\nimport pandas as pd\nimport pickle\nimport random\nimport matplotlib.pyplot as plt\n\n\n'''\n使用SEE“手肘模型”分析K-means中K的取值\n'''\n\n\nclass BOW(object):\n def __init__(self, ):\n # 创建一个SIFT对象 用于关键点、描述符提取\n self.sift = cv2.xfeatures2d.SIFT_create()\n\n def sift_descriptor_extractor(self, img_path):\n '''\n 特征提取:提取数据集中每幅图像的特征点,然后提取特征描述符,形成特征数据(如:SIFT或者SURF方法);\n '''\n im = cv2.imread(img_path, cv2.COLOR_BGR2GRAY)\n print(img_path)\n return self.sift.compute(im, self.sift.detect(im))[1]\n\n def Kmeans_test(self):\n inertia = []\n label_pred = []\n meanall = []\n centroids = []\n for j in os.listdir('../img2/'):\n len = 0\n if not j.startswith('.'): # Again avoid hidden folders\n list = os.listdir('../img2/' + j + '/')\n random.shuffle(list)\n for k in list:\n path = '../img2/' + j + '/' + k\n len += 1\n print(path)\n # print(meanall)\n # print(self.sift_descriptor_extractor(path))\n meanall.extend(self.sift_descriptor_extractor(path))\n if len == 30:\n break\n meanall = np.array(meanall)\n print(type(meanall),meanall)\n list=[20,50,100,150,200,300,400,500,600,1000,1500,2000]\n for k in list:\n print(k)\n estimator = KMeans(n_clusters=k) # 构造聚类器\n estimator.fit(meanall) # 聚类\n label_pred.append(estimator.labels_) # 获取聚类标签\n centroids.append(estimator.cluster_centers_) # 获取聚类中心\n inertia.append(estimator.inertia_) # 获取聚类准则的总和\n print(estimator.inertia_)\n\n print(meanall)\n print(inertia)\n plt.plot(list,inertia)\n plt.show()\n\n#\nbow = BOW()\nbow.Kmeans_test()\n# inertia3_6=[2807440981.3381715, 2796506538.2439876, 2780945733.6617894, 2767701851.3814955, 2756937911.376001, 2744607911.439963, 2732137720.1895113, 2719606419.855219, 2711038134.6826506, 2702043412.1143656, 2693115787.155238, 2681774048.351797, 2674119151.492858, 2662191030.5036306, 2651660307.3031583, 2644244066.4507556, 2639959319.1411686, 2626946534.9398866, 2622493752.9224496, 2609989567.435099, 2606021879.907249, 2597840595.3112087, 2590655619.825697, 2584255486.1984296, 2578685428.081598, 2571439774.33185, 2563402479.9795794, 2562275181.183121, 2555074101.112423, 2548234910.6256666]\n# inertia1_2=[4788507000.0, 4318260475.117521, 3961525545.53348, 3797382455.6868644, 3660433407.0924478, 3554802038.714291, 3473910572.145875, 3400778969.5176764, 3334299786.793602, 3269563603.38221, 3206041672.4388824, 3152522364.9192305, 3110867415.5096865, 3071513666.350345, 3038016247.603059, 3002667862.96981, 2975213009.078604, 2949459153.803087, 2926187809.815773]\n# x=range(10,300,10)\n#\n# inertia=[3181334615.7909837, 2835016827.2197914, 2662780609.9666114, 2546392285.332779, 2468323737.532231, 2410843673.5367236, 2357523903.8650603, 2315970989.7643023, 2279071939.4661336, 2246701981.8490033, 2217988072.271117, 2191088909.0531445, 2169469012.1779327, 2147864943.5073023, 2128678996.6706283, 2109305382.4843593, 2090782868.0389545, 2075457014.3925345, 2060492311.933862, 2046981132.4789784, 2031514730.182295, 2020471172.3441532, 2007457383.683257, 1995266104.0765564, 1982893791.0792518, 1973144994.5107062, 1962793472.0599818, 1951410733.8594804, 1943011185.2518618]\n# plt.plot(x,inertia)\n# plt.show()\n\n\n# class BOW(object):\n# def __init__(self, ):\n# # 创建一个SIFT对象 用于关键点、描述符提取\n# self.sift = cv2.xfeatures2d.SIFT_create()\n#\n# def getData(self, kmean=10):\n# # 创建BOW训练器,指定k-means参数k 把处理好的特征数据全部合并,利用聚类把特征词分为若干类,此若干类的数目由自己设定,每一类相当于一个视觉词汇\n# bow_kmeans_trainer = cv2.BOWKMeansTrainer(kmean)\n#\n# # for i in range(0, 10): # Loop over the ten top-level folders\n# # for j in os.listdir('../o/' + str(i) + '/'):\n# # len = 0\n# # if not j.startswith('.'): # Again avoid hidden folders\n# # for k in os.listdir('../no/0' + str(i) + '/' + j + '/'):\n# # len += 1\n# # path = '../o/0' + str(i) + '/' + j + '/' + k\n# # print(path)\n# # bow_kmeans_trainer.add(self.sift_descriptor_extractor(path))\n# # if len == 10:\n# # break\n# for j in os.listdir('../img2/'):\n# len = 0\n# if not j.startswith('.'): # Again avoid hidden folders\n# list = os.listdir('../img2/' + j + '/')\n# random.shuffle(list)\n# for k in list:\n# path = '../img2/' + j + '/' + k\n# len += 1\n# print(path)\n# bow_kmeans_trainer.add(self.sift_descriptor_extractor(path))\n# if len == 100:\n# break\n#\n# # 进行k-means聚类,返回词汇字典 也就是聚类中心\n# self.voc = bow_kmeans_trainer.cluster()\n#\n# # print( voc.shape)\n#\n# # FLANN匹配 参数algorithm用来指定匹配所使用的算法,可以选择的有LinearIndex、KTreeIndex、KMeansIndex、CompositeIndex和AutotuneIndex,这里选择的是KTreeIndex(使用kd树实现最近邻搜索)\n# flann_params = dict(algorithm=1, tree=5)\n# flann = cv2.FlannBasedMatcher(flann_params, {})\n# # 初始化bow提取器(设置词汇字典),用于提取每一张图像的BOW特征描述\n# self.bow_img_descriptor_extractor = cv2.BOWImgDescriptorExtractor(self.sift, flann)\n# self.bow_img_descriptor_extractor.setVocabulary(self.voc)\n# x_data = []\n# y_data = []\n# datacount = 0\n# # 根据bow_img_descriptor_extractor提取特征向量\n# # for i in range(0, 10): # Loop over the ten top-level folders\n# # for j in os.listdir('../new/0' + str(i) + '/'):\n# # if not j.startswith('.'): # Again avoid hidden folders\n# # count = 0 # To tally images of a given gesture\n# # for k in os.listdir('../new/0' + str(i) + '/' + j + '/'):\n# # path = '../new/0' + str(i) + '/' + j + '/' + k\n# # descriptor = self.bow_descriptor_extractor(path, kmean)\n# # x_data.append(descriptor)\n# # count += 1\n# # y_values = np.full((count, 1), lookup[j])\n# # y_data.append(y_values)\n# # datacount += count\n# for j in os.listdir('../img2/'):\n# # len = 0\n# if not j.startswith('.'): # Again avoid hidden folders\n# count = 0\n# for k in os.listdir('../img2/' + j + '/'):\n# path = '../img2/' + j + '/' + k\n# print(path)\n# descriptor = self.bow_descriptor_extractor(path, kmean)\n# x_data.append(descriptor)\n# count += 1\n# y_values = np.full((count, 1), lookup[j])\n# y_data.extend(y_values)\n# datacount += count\n# x_data = np.array(x_data, dtype='float32')\n# y_data = np.array(y_data).reshape(datacount)\n# print(x_data.shape)\n# print(y_data.shape)\n# return x_data, y_data\n#\n# def sift_descriptor_extractor(self, img_path):\n# '''\n# 特征提取:提取数据集中每幅图像的特征点,然后提取特征描述符,形成特征数据(如:SIFT或者SURF方法);\n# '''\n# im = cv2.imread(img_path, cv2.COLOR_BGR2GRAY)\n# print(img_path)\n# return self.sift.compute(im, self.sift.detect(im))[1]\n#\n# def bow_descriptor_extractor(self, img_path, kmean):\n# '''\n# 提取图像的BOW特征描述(即利用视觉词袋量化图像特征)\n# '''\n# im = cv2.imread(img_path, cv2.COLOR_BGR2GRAY)\n# return self.bow_img_descriptor_extractor.compute(im, self.sift.detect(im)).reshape(kmean)\n#\n# def save(self):\n# pickle.dump(self.voc, open('db_real_30', 'wb'))\n#\n# def load(self):\n# with open('db_real_30', 'rb') as f:\n# voc = pickle.load(f)\n# # FLANN匹配 参数algorithm用来指定匹配所使用的算法,可以选择的有LinearIndex、KTreeIndex、KMeansIndex、CompositeIndex和AutotuneIndex,这里选择的是KTreeIndex(使用kd树实现最近邻搜索)\n# flann_params = dict(algorithm=1, tree=5)\n# flann = cv2.FlannBasedMatcher(flann_params, {})\n# # 初始化bow提取器(设置词汇字典),用于提取每一张图像的BOW特征描述\n# self.bow_img_descriptor_extractor = cv2.BOWImgDescriptorExtractor(self.sift, flann)\n# self.bow_img_descriptor_extractor.setVocabulary(voc)\n# # self.bow_img_descriptor_extractor = cv2.BOWImgDescriptorExtractor(self.sift, flann)\n# # self.bow_img_descriptor_extractor.setVocabulary(self.voc)\n#\n# def getTest(self):\n# inertia = []\n# label_pred = []\n# meanall = []\n# centroids = []\n# label = -1\n# y = []\n# for j in os.listdir('../lyImg/'):\n# len = 0\n# label += 1\n# if not j.startswith('.'): # Again avoid hidden folders\n# list = os.listdir('../lyImg/' + j + '/')\n# random.shuffle(list)\n# for k in list:\n# path = '../lyImg/' + j + '/' + k\n# len += 1\n# print(path)\n# # print(meanall)\n# # print(self.sift_descriptor_extractor(path))\n# meanall.append(np.array(self.bow_descriptor_extractor(path,30)))\n# y.extend(np.array([label]))\n# if len == 30:\n# break\n# meanall = np.array(meanall)\n# y = np.array(y)\n# print(meanall.shape, meanall)\n# print(y.shape, y)\n# return meanall, y\n#\n#\n# def getLookUp():\n# lookup = dict()\n# reverselookup = dict()\n# count = 0\n# for j in os.listdir('../img2/'):\n# if not j.startswith('.'): # If running this code locally, this is to\n# # ensure you aren't reading in hidden folders\n# lookup[j] = count\n# reverselookup[count] = j\n# count = count + 1\n# return lookup, reverselookup\n#\n#\n# lookup, reverselookup = getLookUp()\n# print(lookup, reverselookup)\n# bow = BOW()\n# bow.load()\n# print(bow)\n# X_test, y_test = bow.getTest()\n# os.getcwd()\n# dX = pd.DataFrame(X_test)\n# dY = pd.DataFrame(y_test)\n# dX.to_csv('X_test_30.csv', index=False, header=True)\n# dY.to_csv('Y_test_30.csv', index=False, header=True)\n"
}
] | 15 |
crisdeodates/DJI-Tello_HalloPy | https://github.com/crisdeodates/DJI-Tello_HalloPy | 46bf71777b72b9e389bf648e3d87d38801901e77 | 6b2a9e4000522ad51696c4472ee9362e8af5acd6 | 101422ced4fb05d6602ffef552fdf446ed39e6e7 | refs/heads/master | 2023-08-27T16:05:08.909645 | 2021-11-09T16:32:07 | 2021-11-09T16:32:07 | 362,633,207 | 0 | 0 | MIT | 2021-04-28T23:28:23 | 2021-04-28T23:29:06 | 2021-11-09T17:48:30 | null | [
{
"alpha_fraction": 0.5681611895561218,
"alphanum_fraction": 0.5838873386383057,
"avg_line_length": 39.07863998413086,
"blob_id": "d261966681a47b374e856a4736e88e72679693ca",
"content_id": "23fe2a8978ef28c1edd334212bd8b7cdcfd02474",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 34147,
"license_type": "permissive",
"max_line_length": 145,
"num_lines": 852,
"path": "/hallopy/controller.py",
"repo_name": "crisdeodates/DJI-Tello_HalloPy",
"src_encoding": "UTF-8",
"text": "\"\"\"Multi class incapsulation implementation. \"\"\"\nimport time\n\nimport av\nimport cv2\nimport logging\nimport numpy as np\nfrom hallopy.icontroller import Icontroller\nfrom hallopy import utils\n\n# Create loggers.\nflags_logger = logging.getLogger('flags_handler')\nframe_logger = logging.getLogger('frame_handler')\nface_processor_logger = logging.getLogger('face_processor_handler')\nback_ground_remover_logger = logging.getLogger('back_ground_remover_handler')\ndetector_logger = logging.getLogger('detector_handler')\nextractor_logger = logging.getLogger('extractor_handler')\ncontroller_logger = logging.getLogger('controller_handler')\nch = logging.StreamHandler()\n# create formatter and add it to the handlers.\nformatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\n# add the handlers to loggers.\nflags_logger.addHandler(ch)\nframe_logger.addHandler(ch)\nface_processor_logger.addHandler(ch)\nback_ground_remover_logger.addHandler(ch)\ndetector_logger.addHandler(ch)\nextractor_logger.addHandler(ch)\ncontroller_logger.addHandler(ch)\n\n\nclass FlagsHandler:\n \"\"\"Simple class for setting flags. \"\"\"\n\n def __init__(self):\n self.logger = logging.getLogger('flags_handler')\n self._key_board_input = None\n self.lifted = False\n self.takeoff_requested = False\n self.landing_requested = False\n self.quit_flag = False\n self.background_capture_required = True\n self.in_home_center = False\n self.is_bg_captured = False\n self.calibrated = False\n self.hand_control = False\n self.make_threshold_thinner = False\n self.make_threshold_thicker = False\n\n @property\n def keyboard_input(self):\n return self._key_board_input\n\n @keyboard_input.setter\n def keyboard_input(self, input_from_key_board):\n \"\"\"State machine. \"\"\"\n if input_from_key_board == 27 and self.lifted is False:\n # press ESC to exit\n self.logger.info('!!!quiting!!!')\n self.quit_flag = True\n\n elif input_from_key_board == 27:\n self.logger.info('!!!cant quit without landing!!!')\n\n elif input_from_key_board == ord('b'):\n # press 'b' to capture the background\n self.calibrated = False\n self.background_capture_required = True\n self.is_bg_captured = True\n self.logger.info('!!!Background Captured!!!')\n\n elif input_from_key_board == ord('t') and self.calibrated is True:\n \"\"\"Take off\"\"\"\n self.logger.info('!!!Take of!!!')\n self.lifted = True\n self.takeoff_requested = True\n\n elif input_from_key_board == ord('l'):\n \"\"\"Land\"\"\"\n self.lifted = False\n self.landing_requested = True\n self.logger.info('!!!Landing!!!')\n\n elif input_from_key_board == ord('c'):\n \"\"\"Control\"\"\"\n if self.hand_control is True:\n self.hand_control = False\n self.logger.info(\"control switched to keyboard\")\n elif self.lifted is True:\n self.logger.info(\"control switched to detected hand\")\n self.hand_control = True\n else:\n self.logger.info(\n \"Drone not in the air, can't change control to hand\")\n\n elif input_from_key_board == ord('z'):\n \"\"\" calibrating Threshold from keyboard \"\"\"\n self.make_threshold_thinner = True\n self.logger.info(\"made threshold thinner\")\n\n elif input_from_key_board == ord('x'):\n \"\"\" calibrating Threshold from keyboard \"\"\"\n self.logger.info(\"made threshold thicker\")\n self.make_threshold_thicker = True\n\n\nclass FrameHandler:\n \"\"\"FrameHandler handel input frame from controller,\n\n and perform some preprocessing.\n \"\"\"\n _input_frame = ... # type: np.ndarray\n\n def __init__(self):\n \"\"\"Init preprocessing params. \"\"\"\n self.logger = logging.getLogger('frame_handler')\n self.logger.setLevel(logging.INFO)\n self._cap_region_x_begin = 0.6\n self._cap_region_y_end = 0.6\n self._input_frame = None\n\n @property\n def input_frame(self):\n # Returns the input frame, with drawn ROI on it.\n return self._input_frame\n\n @input_frame.setter\n def input_frame(self, input_frame_from_camera):\n \"\"\"Setter with preprocessing. \"\"\"\n\n try:\n # make sure input is np.ndarray\n assert type(input_frame_from_camera).__module__ == np.__name__\n except AssertionError as error:\n self.logger.exception(error)\n return\n\n self._input_frame = cv2.bilateralFilter(\n input_frame_from_camera, 5, 50, 100) # smoothing filter\n self._input_frame = cv2.flip(input_frame_from_camera, 1)\n self._draw_roi()\n\n def _draw_roi(self):\n \"\"\"Function for drawing the ROI on input frame. \"\"\"\n cv2.rectangle(self._input_frame, (int(self._cap_region_x_begin * self._input_frame.shape[1]) - 20, 0),\n (self._input_frame.shape[1], int(\n self._cap_region_y_end * self._input_frame.shape[0]) + 20),\n (255, 0, 0), 2)\n\n\nclass FaceProcessor:\n \"\"\"FaceProcessor detect & cover faces in preprocessed input_frame. \"\"\"\n _preprocessed_input_frame = ... # type: np.ndarray\n\n def __init__(self):\n self.logger = logging.getLogger('face_processor_handler')\n self.logger.setLevel(logging.INFO)\n self._face_detector = cv2.CascadeClassifier(\n utils.get_full_path('hallopy/config/haarcascade_frontalface_default.xml'))\n self._face_padding_x = 20\n self._face_padding_y = 60\n self._preprocessed_input_frame = None\n\n @property\n def face_covered_frame(self):\n \"\"\"Return a face covered frame. \"\"\"\n return self._preprocessed_input_frame\n\n @face_covered_frame.setter\n def face_covered_frame(self, input_frame_with_faces):\n \"\"\"Function to draw black recs over detected faces.\n\n This function remove eny 'noise' and help detector detecting palm.\n :param input_frame_with_faces (np.ndarray): a frame with faces, that needed to be covered.\n \"\"\"\n\n try:\n # make sure input is np.ndarray\n assert type(input_frame_with_faces).__module__ == np.__name__\n except AssertionError as error:\n self.logger.exception(error)\n return\n\n # Preparation\n self._preprocessed_input_frame = input_frame_with_faces.copy()\n gray = cv2.cvtColor(self._preprocessed_input_frame, cv2.COLOR_BGR2GRAY)\n faces = self._face_detector.detectMultiScale(gray, 1.3, 5)\n\n # Black rectangle over faces to remove skin noises.\n for (x, y, w, h) in faces:\n self._preprocessed_input_frame[y - self._face_padding_y:y + h + self._face_padding_y,\n x - self._face_padding_x:x + w + self._face_padding_x, :] = 0\n\n\nclass BackGroundRemover:\n \"\"\"BackGroundRemover removes background from inputted\n\n (preprocessed and face covered) frame.\n \"\"\"\n _input_frame_with_hand = ... # type: np.ndarray\n\n def __init__(self, flags_handler):\n self.logger = logging.getLogger('back_ground_remover_handler')\n self._cap_region_x_begin = 0.6\n self._cap_region_y_end = 0.6\n self._bg_Sub_Threshold = 50\n self._learning_Rate = 0\n self._bg_model = None\n self._input_frame_with_hand = None\n self.flag_handler = flags_handler\n\n @property\n def detected_frame(self):\n \"\"\"Getter for getting the interest frame, with background removed. \"\"\"\n return self._input_frame_with_hand\n\n @detected_frame.setter\n def detected_frame(self, preprocessed_faced_covered_input_frame):\n \"\"\"Function for removing background from input frame. \"\"\"\n if self.flag_handler.background_capture_required is True:\n self._bg_model = cv2.createBackgroundSubtractorMOG2(\n 0, self._bg_Sub_Threshold)\n self.flag_handler.background_capture_required = False\n if self._bg_model is not None:\n fgmask = self._bg_model.apply(\n preprocessed_faced_covered_input_frame, learningRate=self._learning_Rate)\n kernel = np.ones((3, 3), np.uint8)\n fgmask = cv2.erode(fgmask, kernel, iterations=1)\n res = cv2.bitwise_and(preprocessed_faced_covered_input_frame, preprocessed_faced_covered_input_frame,\n mask=fgmask)\n self._input_frame_with_hand = res[\n 0:int(\n self._cap_region_y_end * preprocessed_faced_covered_input_frame.shape[0]),\n int(self._cap_region_x_begin * preprocessed_faced_covered_input_frame.shape[\n 1]):\n preprocessed_faced_covered_input_frame.shape[\n 1]] # clip the ROI\n\n\nclass Detector:\n \"\"\"Detector class detect hands contour and center of frame.\n\n Initiated object will receive a preprocessed frame, with detected & covered faces.\n \"\"\"\n _input_frame_with_background_removed = ... # type: np.ndarray\n\n def __init__(self, flags_handler):\n self.logger = logging.getLogger('detector_handler')\n self.flags_handler = flags_handler\n self._threshold = 50\n self._blur_Value = 41\n self.horiz_axe_offset = 60\n\n self._input_frame_with_background_removed = None\n self._detected_out_put = None\n self.raw_input_frame = None\n\n # max_area_contour: the contour of the detected hand.\n self.max_area_contour = None\n # Detected_out_put_center: the center point of the ROI\n self.detected_out_put_center = (0, 0)\n\n @property\n def input_frame_for_feature_extraction(self):\n return self._detected_out_put\n\n @input_frame_for_feature_extraction.setter\n def input_frame_for_feature_extraction(self, input_frame_with_background_removed):\n \"\"\"Function for finding hand contour. \"\"\"\n # Preparation\n # Update threshold\n self.raw_input_frame = input_frame_with_background_removed\n if self.flags_handler.make_threshold_thinner is True and self._threshold >= 0:\n self.flags_handler.make_threshold_thinner = False\n self._threshold = self._threshold - 1\n elif self.flags_handler.make_threshold_thicker is True and self._threshold <= 100:\n self.flags_handler.make_threshold_thicker = False\n self._threshold = self._threshold + 1\n\n temp_detected_gray = cv2.cvtColor(\n input_frame_with_background_removed, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(\n temp_detected_gray, (self._blur_Value, self._blur_Value), 0)\n thresh = cv2.threshold(blur, self._threshold,\n 255, cv2.THRESH_BINARY)[1]\n thresh = cv2.erode(thresh, None, iterations=2)\n thresh = cv2.dilate(thresh, None, iterations=2)\n\n # Get the contours.\n contours, hierarchy = cv2.findContours(\n thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n try:\n # Find the biggest area.\n self.max_area_contour = max(contours, key=cv2.contourArea)\n if self.max_area_contour is None:\n self.max_area_contour = [[(0, 0)]]\n self.detected_out_put_center = self._draw_axes(\n input_frame_with_background_removed)\n except (AttributeError, ValueError) as error:\n self.logger.debug(\n \"something went wrong when Detector object received input_frame!: {}\".format(error))\n\n def _draw_axes(self, detected):\n \"\"\"Function for drawing axes on detected_out_put.\n\n :return detected_out_put_center (point): the center coord' of detected_out_put.\n \"\"\"\n\n # Preparation\n temp_output = detected.copy()\n # np.array are opposite than cv2 row/cols indexing.\n detected_out_put_center = (\n int(temp_output.shape[1] / 2), int(temp_output.shape[0] / 2) + self.horiz_axe_offset)\n horiz_axe_start = (\n 0, int(temp_output.shape[0] / 2) + self.horiz_axe_offset)\n horiz_axe_end = (\n temp_output.shape[1], int(temp_output.shape[0] / 2) + self.horiz_axe_offset)\n\n vertic_y_start = (int(temp_output.shape[1] / 2), 0)\n vertic_y_end = (int(temp_output.shape[1] / 2), temp_output.shape[0])\n\n # draw movement axes.\n cv2.line(temp_output, horiz_axe_start,\n horiz_axe_end, (0, 0, 255), thickness=3)\n cv2.line(temp_output, vertic_y_start,\n vertic_y_end, (0, 0, 255), thickness=3)\n\n self._draw_contours(temp_output)\n self._detected_out_put = temp_output\n return detected_out_put_center\n\n def _draw_contours(self, input_frame_with_background_removed):\n \"\"\"Function for drawing contours of detected hand.\n\n contour color will accordingly to flags.hand_control flag.\n \"\"\"\n hand_color = None\n if self.flags_handler.hand_control is True:\n hand_color = (0, 255, 0)\n else:\n hand_color = (0, 0, 255)\n assert hand_color is not None, self.logger.error(\n \"No flags_handler.hand_control initiated\")\n cv2.drawContours(input_frame_with_background_removed, [\n self.max_area_contour], 0, hand_color, thickness=2)\n\n\nclass Extractor:\n \"\"\"Extractor receives detected object,\n\n saves its 'center_of_frame' and 'max_contour'.\n and perform the following calculations:\n 1. calculate palm center of mass --> palms center coordination.\n 2. calculate distance between palms_center to frame_center.\n 3. find contour extreme points coordination.\n 4. calculate palms rotation.\n 5. calculate top_ext_contour-palm_center max distance.\n \"\"\"\n detector = ... # type: Detector\n\n def __init__(self, flags_handler):\n self.logger = logging.getLogger('extractor_handler')\n self.flags_handler = flags_handler\n\n # detector hold: palms contour, frame_center, frame with drawn axes.\n self.detector = None\n # tracker tracks extractor palm point after calibration, using optical_flow\n self.tracker = None\n\n self._detected_hand = None\n self.calib_radius = 10\n\n self.calibration_time = 2\n self.time_captured = None\n\n self.palm_angle_in_degrees = 0\n self.palm_center_point = (0, 0)\n self.max_distance_from_ext_top_point_to_palm_center = 0\n\n self.forward_backward_movement_delta = 30\n self.zero_point = (0, 0)\n self.forward_point = (0, 0)\n self.backward_point = (0, 0)\n\n self.ext_left = (0, 0)\n self.ext_right = (0, 0)\n self.ext_top = (0, 0)\n self.ext_bot = (0, 0)\n\n @property\n def extract(self):\n return self._detected_hand\n\n @extract.setter\n def extract(self, detector):\n assert isinstance(detector, Detector), self.logger.error(\n \"input is not Detector object!\")\n self.detector = detector\n self._detected_hand = detector._detected_out_put\n # Calculate palm center of mass --> palms center coordination.\n self.palm_center_point = self._hand_center_of_mass(\n detector.max_area_contour)\n\n if self.flags_handler.calibrated is False:\n self.logger.info(\"calibrating...\")\n # Determine the most extreme points along the contour.\n if detector.max_area_contour is not None:\n c = detector.max_area_contour\n self.ext_left = tuple(c[c[:, :, 0].argmin()][0])\n self.ext_right = tuple(c[c[:, :, 0].argmax()][0])\n self.ext_top = tuple(c[c[:, :, 1].argmin()][0])\n self.ext_bot = tuple(c[c[:, :, 1].argmax()][0])\n\n # Get max distance.\n if self.ext_top[1] == 0:\n self.max_distance_from_ext_top_point_to_palm_center = 0\n else:\n temp_distance = self.palm_center_point[1] - self.ext_top[1]\n if temp_distance > self.max_distance_from_ext_top_point_to_palm_center:\n self.max_distance_from_ext_top_point_to_palm_center = temp_distance\n\n if self.tracker is not None:\n self.tracker = None\n\n elif self.flags_handler.calibrated is True:\n self.logger.info(\"calibrated!\")\n\n if self.tracker is None:\n # Initiate tracker.\n points_to_track = [self.ext_top,\n self.palm_center_point] # [self.ext_left, self.ext_right, self.ext_bot, self.ext_top, self.palm_center_point]\n self.tracker = Tracker(\n self.flags_handler, points_to_track, self.detector.raw_input_frame)\n\n else:\n # Use tracker to track.\n points_to_track = self.tracker.points_to_track\n self.tracker.track(\n points_to_track, self.detector.raw_input_frame)\n points_to_draw = self.tracker.points_to_track\n try:\n # Get only the contours middle-finger coordination.\n self.ext_top = tuple(\n points_to_draw[points_to_draw[:, :, 1].argmin()][0])\n except ValueError:\n self.logger.debug(\"points_to_draw is empty\")\n # Calculate palms angle.\n self._calculate_palm_angle()\n # Calculate distance between palms_center to frame_center.\n self._calculate_palm_distance_from_center()\n\n def get_drawn_extreme_contour_points(self):\n \"\"\"Draw extreme contours points on a copy\n\n draw the outline of the object, then draw each of the\n extreme points, where the left-most is red, right-most\n is green, top-most is blue, and bottom-most is teal\n\n :returns image: image with drawn extreme contours point.\n \"\"\"\n if self._detected_hand is not None:\n image = self._detected_hand.copy()\n\n if self.flags_handler.calibrated is True:\n cv2.circle(image, self.detector.detected_out_put_center,\n self.calib_radius, (0, 255, 0), thickness=2)\n elif self.flags_handler.in_home_center is True:\n cv2.circle(image, self.detector.detected_out_put_center,\n self.calib_radius, (0, 255, 0), thickness=-1)\n else:\n cv2.circle(image, self.detector.detected_out_put_center,\n self.calib_radius, (0, 0, 255), thickness=2)\n\n self._draw_forward_backward_line(image)\n self._draw_palm_rotation(image)\n\n cv2.circle(image, (int(self.ext_top[0]), int(\n self.ext_top[1])), 8, (255, 0, 0), -1)\n cv2.circle(image, self.palm_center_point,\n 8, (255, 255, 255), thickness=-1)\n\n return image\n\n def _draw_forward_backward_line(self, image):\n \"\"\"Draw forward/backward line. \"\"\"\n temp_delta = int(\n self.max_distance_from_ext_top_point_to_palm_center - self.max_distance_from_ext_top_point_to_palm_center / 5)\n self.zero_point = (\n self.ext_top[0], self.palm_center_point[1] - temp_delta)\n self.forward_backward_movement_delta = int(\n self.max_distance_from_ext_top_point_to_palm_center / 3)\n self.forward_point = (\n self.zero_point[0], self.zero_point[1] + self.forward_backward_movement_delta)\n self.backward_point = (\n self.zero_point[0], self.zero_point[1] - self.forward_backward_movement_delta)\n cv2.line(image, (int(self.forward_point[0]),int(self.forward_point[1])),\n (int(self.zero_point[0]),int(self.zero_point[1])), (0, 255, 0), thickness=5)\n cv2.line(image, (int(self.zero_point[0]),int(self.zero_point[1])), (int(self.backward_point[0]),int(self.backward_point[1])),\n (0, 0, 255), thickness=5)\n\n def _draw_palm_rotation(self, image):\n \"\"\"To draw the ellipse, we need to pass several arguments.\n\n One argument is the center location (x,y).\n Next argument is axes lengths (major axis length, minor axis length).\n angle is the angle of rotation of ellipse in anti-clockwise direction.\n startAngle and endAngle denotes the starting and ending of ellipse arc measured in clockwise direction from major axis.\n i.e. giving values 0 and 360 gives the full ellipse. For more details, check the documentation of cv2.ellipse().\n \"\"\"\n center_location = self.palm_center_point\n axis_length = int(self.max_distance_from_ext_top_point_to_palm_center)\n starting_angle = 270\n end_angle = 270 + (90 - self.palm_angle_in_degrees)\n cv2.ellipse(image, center_location, (axis_length, axis_length),\n 0, starting_angle, end_angle, (255, 0, 255), 3)\n\n def _hand_center_of_mass(self, hand_contour):\n \"\"\"Find contours center of mass. \"\"\"\n M = cv2.moments(hand_contour)\n if M[\"m00\"] != 0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n else:\n cX, cY = 0, 0\n\n return cX, cY\n\n def _calculate_palm_angle(self):\n \"\"\"Function for calculating palms angle. \"\"\"\n\n angelPointHelper = [self.palm_center_point[0] + 10,\n self.palm_center_point[\n 1]] # helper to calculate angle between middle finger and center of palm\n\n try:\n angle = self.simple_angle_calculator(\n self.ext_top, angelPointHelper, self.palm_center_point)\n self.palm_angle_in_degrees = np.rad2deg(angle)\n except ZeroDivisionError:\n pass\n\n def simple_angle_calculator(self, start, end, far):\n \"\"\"Simple angle calculator.\n\n :returns angle: the angle in radians.\n \"\"\"\n\n a = np.math.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)\n b = np.math.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)\n c = np.math.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)\n angle = np.math.acos((b ** 2 + c ** 2 - a ** 2) /\n (2 * b * c)) # cosine theorem\n return angle\n\n def _calculate_palm_distance_from_center(self):\n \"\"\"Simple radius calculator. \"\"\"\n frameCenter = self.detector.detected_out_put_center\n cX, cY = self.palm_center_point\n\n radius = np.math.sqrt((cX - frameCenter[0]) ** 2 + (\n cY - frameCenter[1]) ** 2)\n\n if radius <= self.calib_radius:\n # Palm is centered with self._detected_frame.\n if self.flags_handler.in_home_center is False:\n # First time entering into calib_circle, start timer.\n self.time_captured = time.time()\n self.flags_handler.in_home_center = True\n\n elif time.time() >= self.time_captured + self.calibration_time:\n # If inside calib_circle more than self.calibration_time, then set calibrated to True.\n self.flags_handler.calibrated = True\n else:\n self.flags_handler.in_home_center = False\n\n\nclass Tracker:\n \"\"\"Tracker receives Extractor object, and track extracted points. \"\"\"\n\n def __init__(self, flags_handler, points_to_track, input_image):\n self.logger = logging.getLogger('tracker_handler')\n self.flags_handler = flags_handler\n self.points_to_track = points_to_track\n\n self._input_image = input_image\n self._old_gray = None\n self._p0 = None\n\n self.lk_params = dict(winSize=(15, 15),\n maxLevel=2,\n criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n\n self.track(self.points_to_track, self._input_image)\n\n def track(self, points_to_track, input_image):\n if self._old_gray is None:\n self._old_gray = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)\n\n points_reshaped = [list(elem) for elem in points_to_track]\n self.logger.debug(\"points_to_track: {}\".format(points_reshaped))\n self._p0 = np.array(\n points_reshaped, dtype=np.float32).reshape(-1, 1, 2)\n\n # Capture current frame.\n frame_gray = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)\n self._calculate_optical_flow(self._old_gray, frame_gray, self._p0)\n\n # Update tracking points.\n self.points_to_track = self._p0\n\n def _calculate_optical_flow(self, old_gray, frame_gray, p0):\n \"\"\"This function tracks the edge of the Middle finger.\n\n points for tracking:\n expected_ext_left\n expected_ext_right\n expected_ext_top\n expected_ext_bot\n palm_center_point\n\n :param old_gray: old frame, gray scale\n :param frame_gray: current frame\n :return: p0- updated tracking point,\n\n \"\"\"\n # Calculate optical flow\n p1, st, err = cv2.calcOpticalFlowPyrLK(\n old_gray, frame_gray, p0, None, **self.lk_params)\n if p1 is None:\n good_new = p0[st == 1]\n else:\n good_new = p1[st == 1]\n\n # Now update the previous frame and previous points.\n self._old_gray = frame_gray.copy()\n self._p0 = good_new.reshape(-1, 1, 2)\n\n\nclass Controller(Icontroller):\n \"\"\"Controller class holds all elements relevant for hand features extracting.\n\n :param icontroller.Icontroller: implemented interface\n \"\"\"\n\n def __init__(self, drone=None):\n \"\"\"Init a controller object. \"\"\"\n self.logger = logging.getLogger('controller_handler')\n self.move_up = 0\n self.move_left = 0\n self.move_right = 0\n self.move_down = 0\n self.move_forward = 0\n self.move_backward = 0\n self.rotate_left = 0\n self.rotate_right = 0\n\n # Initiate inner objects.\n self.flags_handler = FlagsHandler()\n self.frame_handler = FrameHandler()\n self.face_processor = FaceProcessor()\n self.back_ground_remover = BackGroundRemover(self.flags_handler)\n self.detector = Detector(self.flags_handler)\n self.extractor = Extractor(self.flags_handler)\n\n # Get initiated drone object\n self.drone = drone\n\n def start(self):\n \"\"\"Function for starting image pipe processing. \"\"\"\n camera = cv2.VideoCapture(0)\n cv2.namedWindow('Controller')\n\n # cv2.namedWindow('Drone video stream')\n # Init video stream buffer.\n # container = av.open(self.drone.get_video_stream())\n # skip first 300 frames\n frame_skip = 300\n\n while self.flags_handler.quit_flag is False:\n # image = None\n # for frame in container.decode(video=0):\n # if 0 < frame_skip:\n # frame_skip = frame_skip - 1\n # continue\n # start_time = time.time()\n # image = cv2.cvtColor(np.array(frame.to_image()), cv2.COLOR_RGB2BGR)\n #\n # frame_skip = int((time.time() - start_time) / frame.time_base)\n\n ret, frame = camera.read()\n # Controller processing pipe:\n # 1. Draw ROI on frame.\n self.frame_handler.input_frame = frame\n # 2. Cover faces, to remove detection noises.\n self.face_processor.face_covered_frame = self.frame_handler.input_frame\n # 3. Remove background from a covered-faces-frame.\n self.back_ground_remover.detected_frame = self.face_processor.face_covered_frame\n # 4. Detect a hand.\n self.detector.input_frame_for_feature_extraction = self.back_ground_remover.detected_frame\n # 5. Extract features, and track detected hand\n self.extractor.extract = self.detector\n\n inner_image = self.extractor.get_drawn_extreme_contour_points()\n if inner_image is not None:\n # Draw detected hand on outer image.\n outer_image = self.frame_handler.input_frame\n outer_image[0: inner_image.shape[0],\n outer_image.shape[1] - inner_image.shape[1]: outer_image.shape[1]] = inner_image\n cv2.imshow('Controller', outer_image)\n self.get_drone_commands()\n\n self.flags_handler.keyboard_input = cv2.waitKey(1)\n\n if self.drone is not None:\n self.drone.quit()\n camera.release()\n cv2.destroyWindow('Controller')\n\n def get_up_param(self):\n \"\"\"Return up parameter (int between 0..100). \"\"\"\n temp_move_up = self.detector.detected_out_put_center[1] - \\\n self.extractor.palm_center_point[1]\n self.move_up = temp_move_up\n if self.move_up <= 0:\n return 0\n return self.move_up if self.move_up <= 100 else 100\n\n def get_down_param(self):\n \"\"\"Return down parameter (int between 0..100). \"\"\"\n temp_move_down = self.extractor.palm_center_point[1] - \\\n self.detector.detected_out_put_center[1]\n self.move_down = temp_move_down\n if self.move_down < 0:\n return 0\n return self.move_down if self.move_down <= 100 else 100\n\n def get_left_param(self):\n \"\"\"Return left parameter (int between 0..100). \"\"\"\n temp_move_left = self.detector.detected_out_put_center[0] - \\\n self.extractor.palm_center_point[0]\n self.move_left = temp_move_left\n if self.move_left < 0:\n return 0\n return self.move_left if self.move_left <= 100 else 100\n\n def get_right_param(self):\n \"\"\"Return right parameter (int between 0..100). \"\"\"\n temp_move_right = self.extractor.palm_center_point[0] - \\\n self.detector.detected_out_put_center[0]\n self.move_right = temp_move_right\n if self.move_right < 0:\n return 0\n return self.move_right if self.move_right <= 100 else 100\n\n def get_rotate_left_param(self):\n \"\"\"Return rotate left parameter (int between 0..100). \"\"\"\n temp_rotate_left = self.extractor.palm_angle_in_degrees - 90\n self.rotate_left = temp_rotate_left\n if self.rotate_left < 0:\n return 0\n return self.rotate_left if self.rotate_left <= 100 else 100\n\n def get_rotate_right_param(self):\n \"\"\"Return rotate right parameter (int between 0..100). \"\"\"\n temp_rotate_right = 90 - self.extractor.palm_angle_in_degrees\n self.rotate_right = temp_rotate_right\n if self.rotate_right < 0:\n return 0\n return self.rotate_right if self.rotate_right <= 100 else 100\n\n def get_forward_param(self):\n \"\"\"Return move forward parameter (int between 0..100). \"\"\"\n temp_forward_param = self.extractor.ext_top[1] - \\\n self.extractor.zero_point[1]\n self.move_forward = temp_forward_param\n if self.move_forward < 0:\n return 0\n return self.move_forward if self.move_forward <= 100 else 100\n\n def get_backward_param(self):\n \"\"\"Return move backward parameter (int between 0..100). \"\"\"\n temp_backward_param = self.extractor.zero_point[1] - \\\n self.extractor.ext_top[1]\n self.move_backward = temp_backward_param\n if self.move_backward < 0:\n return 0\n return self.move_backward if self.move_backward <= 100 else 100\n\n def get_drone_commands(self):\n try:\n # Send commands to drone\n if self.flags_handler.hand_control is False:\n # Make drone hover.\n self.drone.left(0)\n self.drone.right(0)\n self.drone.up(0)\n self.drone.down(0)\n self.drone.counter_clockwise(0)\n self.drone.clockwise(0)\n self.drone.forward(0)\n self.drone.backward(0)\n elif self.flags_handler.hand_control is True:\n # Send drone commands.\n if self.flags_handler.in_home_center is False:\n # Send right_X and right_Y movements only when out of safety circle.\n left = self.get_left_param()\n if left != 0:\n self.drone.left(left)\n right = self.get_right_param()\n if right != 0:\n self.drone.right(right)\n up = self.get_up_param()\n if up != 0:\n self.drone.up(up)\n down = self.get_down_param()\n if down != 0:\n self.drone.down(down)\n\n counter_clockwise = self.get_rotate_left_param()\n if counter_clockwise != 0:\n self.drone.counter_clockwise(counter_clockwise)\n clockwise = self.get_rotate_right_param()\n if clockwise != 0:\n self.drone.clockwise(clockwise)\n\n forward = self.get_forward_param()\n if forward != 0:\n self.drone.forward(forward)\n backward = self.get_backward_param()\n if backward != 0:\n self.drone.backward(backward)\n\n if self.flags_handler.takeoff_requested is True:\n # Takeoff requested.\n self.drone.takeoff()\n time.sleep(3)\n self.flags_handler.takeoff_requested = False\n elif self.flags_handler.landing_requested is True:\n # Landing requested.\n self.drone.land()\n time.sleep(3)\n self.flags_handler.landing_requested = False\n except TypeError as error:\n self.logger.error(error)\n\n\nif __name__ == '__main__':\n test = Controller()\n print(test.get_up_param())\n"
},
{
"alpha_fraction": 0.527856171131134,
"alphanum_fraction": 0.5630664229393005,
"avg_line_length": 37.46285629272461,
"blob_id": "0fbb297b7d6b75ba66f7eaec6d59eb8b302605b0",
"content_id": "4a07bc91545eb2001c7957ac2bdf1058054766e3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6735,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 175,
"path": "/util/image_comp_tool.py",
"repo_name": "crisdeodates/DJI-Tello_HalloPy",
"src_encoding": "UTF-8",
"text": "# import the necessary packages\nfrom skimage.measure import compare_ssim\nimport numpy as np\nimport cv2\nfrom hallopy import utils\n\n\nclass ImageTestTool:\n \"\"\"This class contain tools that helps test functionality\"\"\"\n\n @staticmethod\n def compare_imaged(img1, img2):\n \"\"\"This function compare 2 images.\n\n Return SSIM: Represents the structural similarity index between the two input images.\n This value can fall into the range [-1, 1] with a value of one being a “perfect match”.\n \"\"\"\n\n # load the two input images\n imageA = img1\n imageB = img2\n\n # convert the images to grayscale\n grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)\n grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)\n\n # compute the Structural Similarity Index (SSIM) between the two\n # images, ensuring that the difference image is returned\n (score, diff) = compare_ssim(grayA, grayB, full=True)\n # diff = (diff * 255).astype(\"uint8\")\n return score\n # print(\"SSIM: {}\".format(score))\n #\n # # threshold the difference image, followed by finding contours to\n # # obtain the regions of the two input images that differ\n # thresh = cv2.threshold(diff, 0, 255,\n # cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n # cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,\n # cv2.CHAIN_APPROX_SIMPLE)\n # cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n #\n # # loop over the contours\n # for c in cnts:\n # # compute the bounding box of the contour and then draw the\n # # bounding box on both input images to represent where the two\n # # images differ\n # (x, y, w, h) = cv2.boundingRect(c)\n # cv2.rectangle(imageA, (x, y), (x + w, y + h), (0, 0, 255), 2)\n # cv2.rectangle(imageB, (x, y), (x + w, y + h), (0, 0, 255), 2)\n #\n # # show the output images\n # cv2.imshow(\"Original\", imageA)\n # cv2.imshow(\"Modified\", imageB)\n # cv2.imshow(\"Diff\", diff)\n # cv2.imshow(\"Thresh\", thresh)\n # cv2.waitKey(0)\n\n @staticmethod\n def detect_faces(img):\n \"\"\"Function for detecting faces.\n\n :returns faces: array with detected faces coordination's.\n \"\"\"\n\n face_detector = cv2.CascadeClassifier(utils.get_full_path('hallopy/config/haarcascade_frontalface_default.xml'))\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n return face_detector.detectMultiScale(gray, 1.3, 5)\n\n @staticmethod\n def draw_black_recs(img, obj_coord):\n # Black rectangle over faces to remove skin noises.\n for (x, y, w, h) in obj_coord:\n img[y:y + h, x:x + w, :] = 0\n\n @staticmethod\n def clip_roi(img, roi):\n clipped = img[0:int(roi['cap_region_y_end'] * img.shape[0]),\n int(roi['cap_region_x_begin'] * img.shape[1]):img.shape[1]] # clip the ROI\n return clipped\n\n @staticmethod\n def get_max_area_contour(input_image):\n # Get the contours.\n expected_gray = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(expected_gray, (41, 41), 0)\n thresh = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY)[1]\n thresh = cv2.erode(thresh, None, iterations=2)\n thresh = cv2.dilate(thresh, None, iterations=2)\n _, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # Find the biggest area\n try:\n if len(contours) > 0:\n max_area_contour = max(contours, key=cv2.contourArea)\n return max_area_contour\n except ValueError as error:\n print(error)\n\n @staticmethod\n def get_contour_area(contour):\n return cv2.contourArea(contour)\n\n @staticmethod\n def get_center_of_mass(contour):\n \"\"\"Find contours center of mass. \"\"\"\n M = cv2.moments(contour)\n if M[\"m00\"] != 0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n else:\n cX, cY = 0, 0\n\n return cX, cY\n\n @staticmethod\n def get_middle_finger_edge_coord(contour):\n \"\"\"Function for calculating middle finger edge coordination.\n :type contour: collection.iter\n \"\"\"\n\n temp_y = 1000\n for point in contour: # find highest point in contour, and track that point\n if point[0][1] < temp_y:\n temp_y = point[0][1]\n\n return point[0][0], point[0][1]\n\n @staticmethod\n def get_contour_extreme_points(contour):\n c = contour\n try:\n # determine the most extreme points along the contour\n extLeft = tuple(c[c[:, :, 0].argmin()][0])\n extRight = tuple(c[c[:, :, 0].argmax()][0])\n extTop = tuple(c[c[:, :, 1].argmin()][0])\n extBot = tuple(c[c[:, :, 1].argmax()][0])\n except TypeError as error:\n extLeft = 0, 0\n extRight = 0, 0\n extTop = 0, 0\n extBot = 0, 0\n\n return extLeft, extRight, extTop, extBot\n\n @staticmethod\n def draw_contours(image, contours):\n cv2.drawContours(image, [contours], -1, (0, 255, 255), 2)\n\n @staticmethod\n def draw_tracking_points(image, points):\n\n # draw the outline of the object, then draw each of the\n # extreme points, where the left-most is red, right-most\n # is green, top-most is blue, and bottom-most is teal\n # determine the most extreme points along the contour\n c = points.reshape(-1, 1, 2)\n if points.size > 0:\n # only ext_contour points have been given.\n # ext_left = tuple(c[c[:, :, 0].argmin()][0])\n # ext_right = tuple(c[c[:, :, 0].argmax()][0])\n ext_top = tuple(c[c[:, :, 1].argmin()][0])\n ext_bot = tuple(c[c[:, :, 1].argmax()][0])\n # palm_center = points[4]\n # cv2.circle(image, ext_left, 8, (0, 0, 255), -1)\n # cv2.putText(image,'ext_left',ext_left, cv2.FONT_HERSHEY_COMPLEX, .5, (0, 0, 255))\n\n # cv2.circle(image, ext_right, 8, (0, 255, 0), -1)\n # cv2.putText(image,'ext_right',ext_right, cv2.FONT_HERSHEY_COMPLEX, .5, (0, 255, 0))\n\n cv2.circle(image, ext_top, 8, (255, 0, 0), -1)\n cv2.putText(image, 'ext_top', ext_top, cv2.FONT_HERSHEY_COMPLEX, .5, (255, 0, 0))\n\n cv2.circle(image, ext_bot, 8, (255, 255, 0), -1)\n cv2.putText(image, 'ext_bot', ext_bot, cv2.FONT_HERSHEY_COMPLEX, .5, (255, 255, 0))\n # cv2.circle(image, palm_center, 8, (255, 255, 255), thickness=-1)\n"
},
{
"alpha_fraction": 0.674369752407074,
"alphanum_fraction": 0.6796218752861023,
"avg_line_length": 31.827587127685547,
"blob_id": "ca0f4b66886985da6031d13fa676b2be76a0c4b4",
"content_id": "ce30578d799dd7d6d4ec0d0b92a8b3da599c9e43",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 952,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 29,
"path": "/tests/test_faceProcessor.py",
"repo_name": "crisdeodates/DJI-Tello_HalloPy",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\nimport logging\n\nfrom hallopy.controller import FaceProcessor\nfrom hallopy import utils\nfrom util.image_comp_tool import ImageTestTool\n\n\nclass TestFaceProcessor:\n def test_face_covered_frame(self):\n \"\"\"Test if faces are detected and covered. \"\"\"\n # setup\n test_path = utils.get_full_path('docs/material_for_testing/face_and_hand.jpg')\n test_image = cv2.imread(test_path)\n\n expected = test_image.copy()\n expected_faces = ImageTestTool.detect_faces(expected)\n ImageTestTool.draw_black_recs(expected, expected_faces)\n\n face_processor = FaceProcessor()\n face_processor.logger.setLevel(logging.DEBUG)\n # Insert image with face.\n face_processor.face_covered_frame = expected\n\n # run\n ssim = ImageTestTool.compare_imaged(face_processor.face_covered_frame, expected)\n # print(\"SSIM: {}\".format(ssim))\n assert ssim >= 0.93\n"
},
{
"alpha_fraction": 0.6561780571937561,
"alphanum_fraction": 0.6699923276901245,
"avg_line_length": 37.32352828979492,
"blob_id": "8a389a6f6684c37a5fbd4b674a506e4c7a63400c",
"content_id": "53479012a3d38671b83053922bc6198a4c4ddc52",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1307,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 34,
"path": "/tests/test_frameHandler.py",
"repo_name": "crisdeodates/DJI-Tello_HalloPy",
"src_encoding": "UTF-8",
"text": "import logging\nimport cv2\nimport numpy as np\nfrom hallopy.controller import FrameHandler\nfrom hallopy import utils\nfrom util.image_comp_tool import ImageTestTool\n\n\nclass TestFrameHandler:\n \"\"\"TestFrameHandler tests FrameHandler functionality. \"\"\"\n\n def test_input_frame(self):\n \"\"\"Test if input frame preprocessed correctly. \"\"\"\n\n # setup\n test_path = utils.get_full_path('docs/material_for_testing/face_and_hand.jpg')\n test_image = cv2.imread(test_path)\n # Because image loaded from local, and not received from web-cam, a flip is needed,\n # inside frame_handler, a frame is supposed to be received from web-cam, hence it is flipped after receiving it.\n test_image = cv2.flip(test_image, 1) # type: np.ndarray\n\n expected = test_image.copy()\n expected = cv2.bilateralFilter(expected, 5, 50, 100) # smoothing filter\n expected = cv2.flip(expected, 1)\n\n frame_handler = FrameHandler()\n frame_handler.logger.setLevel(logging.DEBUG)\n\n # run\n # range [-1, 1] with a value of one being a “perfect match”.\n frame_handler.input_frame = test_image\n ssim = ImageTestTool.compare_imaged(frame_handler.input_frame, expected)\n # print(\"SSIM: {}\".format(ssim))\n assert ssim >= 0.95\n"
},
{
"alpha_fraction": 0.5810472965240479,
"alphanum_fraction": 0.5900550484657288,
"avg_line_length": 40.31766891479492,
"blob_id": "08f5401b9a3964e5c5c0f9c077460c7ef86355d5",
"content_id": "b7bb833addd7a5ed75799319761c9f9b6d660ef6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21981,
"license_type": "permissive",
"max_line_length": 159,
"num_lines": 532,
"path": "/tests/test_controller.py",
"repo_name": "crisdeodates/DJI-Tello_HalloPy",
"src_encoding": "UTF-8",
"text": "import cv2\n\nfrom hallopy.controller import Controller\nimport tellopy\n\n\nclass TestController:\n\n @staticmethod\n def handler(event, sender, data, **args):\n \"\"\"Drone events handler, for testing. \"\"\"\n drone_handler = sender\n if event is drone_handler.EVENT_FLIGHT_DATA:\n print(data)\n\n @staticmethod\n def init_drone():\n \"\"\"Drone initiation function for testing. \"\"\"\n drone = tellopy.Tello()\n\n try:\n drone.subscribe(drone.EVENT_FLIGHT_DATA, TestController.handler)\n drone.connect()\n drone.wait_for_connection(60.0)\n\n except Exception as ex:\n print(ex)\n drone.quit()\n return None\n return drone\n\n def test_controller_initiation(self):\n \"\"\"Test if controller params initiated with 0. \"\"\"\n\n # setup\n controller = Controller()\n\n # run\n assert controller.get_up_param() == 0\n assert controller.get_down_param() == 0\n assert controller.get_left_param() == 0\n assert controller.get_right_param() == 0\n assert controller.get_forward_param() == 0\n assert controller.get_backward_param() == 0\n assert controller.get_rotate_left_param() == 0\n assert controller.get_rotate_right_param() == 0\n\n def test_start(self):\n \"\"\"Test if final image created correctly.\n\n :except final_image: a image with ROI drawn on it and the detected hand.\n \"\"\"\n\n # setup\n controller = Controller()\n\n # run\n controller.start()\n\n def test_move_left(self):\n \"\"\"Test if drone moves left properly.\n\n this test test if controller.move_left is corresponding to detected hand movements.\n \"\"\"\n\n # setup\n controller = Controller()\n controller.logger.setLevel('DEBUG')\n\n # run\n camera = cv2.VideoCapture(0)\n cv2.namedWindow('Controller')\n while controller.flags_handler.quit_flag is False:\n ret, frame = camera.read()\n\n # A good practice is to mock all this processing in pipe.\n # Controller processing pipe:\n # 1. Draw ROI on frame.\n controller.frame_handler.input_frame = frame\n # 2. Cover faces, to remove detection noises.\n controller.face_processor.face_covered_frame = controller.frame_handler.input_frame\n # 3. Remove background from a covered-faces-frame.\n controller.back_ground_remover.detected_frame = controller.face_processor.face_covered_frame\n # 4. Detect a hand.\n controller.detector.input_frame_for_feature_extraction = controller.back_ground_remover.detected_frame\n # 5. Extract features, and track detected hand\n controller.extractor.extract = controller.detector\n\n inner_image = controller.extractor.get_drawn_extreme_contour_points()\n if inner_image is not None:\n # Draw detected hand on outer image.\n outer_image = controller.frame_handler.input_frame\n outer_image[0: inner_image.shape[0],\n outer_image.shape[1] - inner_image.shape[1]: outer_image.shape[1]] = inner_image\n cv2.imshow('Controller', outer_image)\n\n # For testing.\n move_left = controller.get_left_param()\n controller.logger.debug(\"move_left: {}\".format(move_left))\n if move_left < 0:\n assert False\n break\n elif move_left > 100:\n assert False\n break\n\n controller.flags_handler.keyboard_input = cv2.waitKey(1)\n\n camera.release()\n cv2.destroyWindow('Controller')\n\n def test_move_right(self):\n \"\"\"Test if drone moves right properly.\n\n this test test if controller.move_right is corresponding to detected hand movements.\n \"\"\"\n\n # setup\n controller = Controller()\n controller.logger.setLevel('DEBUG')\n\n # run\n camera = cv2.VideoCapture(0)\n cv2.namedWindow('Controller')\n while controller.flags_handler.quit_flag is False:\n ret, frame = camera.read()\n\n # A good practice is to mock all this processing in pipe.\n # Controller processing pipe:\n # 1. Draw ROI on frame.\n controller.frame_handler.input_frame = frame\n # 2. Cover faces, to remove detection noises.\n controller.face_processor.face_covered_frame = controller.frame_handler.input_frame\n # 3. Remove background from a covered-faces-frame.\n controller.back_ground_remover.detected_frame = controller.face_processor.face_covered_frame\n # 4. Detect a hand.\n controller.detector.input_frame_for_feature_extraction = controller.back_ground_remover.detected_frame\n # 5. Extract features, and track detected hand\n controller.extractor.extract = controller.detector\n\n inner_image = controller.extractor.get_drawn_extreme_contour_points()\n if inner_image is not None:\n # Draw detected hand on outer image.\n outer_image = controller.frame_handler.input_frame\n outer_image[0: inner_image.shape[0],\n outer_image.shape[1] - inner_image.shape[1]: outer_image.shape[1]] = inner_image\n cv2.imshow('Controller', outer_image)\n\n # For testing.\n move_right = controller.get_right_param()\n controller.logger.debug(\"move_right: {}\".format(move_right))\n if move_right < 0:\n assert False\n break\n elif move_right > 100:\n assert False\n break\n\n controller.flags_handler.keyboard_input = cv2.waitKey(1)\n\n camera.release()\n cv2.destroyWindow('Controller')\n\n def test_move_up(self):\n \"\"\"Test if drone moves up properly.\n\n this test test if controller.move_up is corresponding to detected hand movements.\n \"\"\"\n\n # setup\n controller = Controller()\n controller.logger.setLevel('DEBUG')\n\n # run\n camera = cv2.VideoCapture(0)\n cv2.namedWindow('Controller')\n while controller.flags_handler.quit_flag is False:\n ret, frame = camera.read()\n\n # A good practice is to mock all this processing in pipe.\n # Controller processing pipe:\n # 1. Draw ROI on frame.\n controller.frame_handler.input_frame = frame\n # 2. Cover faces, to remove detection noises.\n controller.face_processor.face_covered_frame = controller.frame_handler.input_frame\n # 3. Remove background from a covered-faces-frame.\n controller.back_ground_remover.detected_frame = controller.face_processor.face_covered_frame\n # 4. Detect a hand.\n controller.detector.input_frame_for_feature_extraction = controller.back_ground_remover.detected_frame\n # 5. Extract features, and track detected hand\n controller.extractor.extract = controller.detector\n\n inner_image = controller.extractor.get_drawn_extreme_contour_points()\n if inner_image is not None:\n # Draw detected hand on outer image.\n outer_image = controller.frame_handler.input_frame\n outer_image[0: inner_image.shape[0],\n outer_image.shape[1] - inner_image.shape[1]: outer_image.shape[1]] = inner_image\n cv2.imshow('Controller', outer_image)\n\n # For testing.\n move_up = controller.get_up_param()\n controller.logger.debug(\"move_up: {}\".format(move_up))\n if move_up < 0:\n assert False\n break\n elif move_up > 100:\n assert False\n break\n\n controller.flags_handler.keyboard_input = cv2.waitKey(1)\n\n camera.release()\n cv2.destroyWindow('Controller')\n\n def test_move_down(self):\n \"\"\"Test if drone moves down properly.\n\n this test test if controller.move_down is corresponding to detected hand movements.\n \"\"\"\n\n # setup\n controller = Controller()\n controller.logger.setLevel('DEBUG')\n\n # run\n camera = cv2.VideoCapture(0)\n cv2.namedWindow('Controller')\n while controller.flags_handler.quit_flag is False:\n ret, frame = camera.read()\n\n # A good practice is to mock all this processing in pipe.\n # Controller processing pipe:\n # 1. Draw ROI on frame.\n controller.frame_handler.input_frame = frame\n # 2. Cover faces, to remove detection noises.\n controller.face_processor.face_covered_frame = controller.frame_handler.input_frame\n # 3. Remove background from a covered-faces-frame.\n controller.back_ground_remover.detected_frame = controller.face_processor.face_covered_frame\n # 4. Detect a hand.\n controller.detector.input_frame_for_feature_extraction = controller.back_ground_remover.detected_frame\n # 5. Extract features, and track detected hand\n controller.extractor.extract = controller.detector\n\n inner_image = controller.extractor.get_drawn_extreme_contour_points()\n if inner_image is not None:\n # Draw detected hand on outer image.\n outer_image = controller.frame_handler.input_frame\n outer_image[0: inner_image.shape[0],\n outer_image.shape[1] - inner_image.shape[1]: outer_image.shape[1]] = inner_image\n cv2.imshow('Controller', outer_image)\n\n # For testing.\n move_down = controller.get_down_param()\n controller.logger.debug(\"move_down: {}\".format(move_down))\n if move_down < 0:\n assert False\n break\n elif move_down > 100:\n assert False\n break\n\n controller.flags_handler.keyboard_input = cv2.waitKey(1)\n\n camera.release()\n cv2.destroyWindow('Controller')\n\n def test_rotate_left(self):\n \"\"\"Test if drone rotates left properly.\n\n this test test if controller.rotate_left is corresponding to detected hand movements.\n \"\"\"\n\n # setup\n controller = Controller()\n controller.logger.setLevel('DEBUG')\n\n # run\n camera = cv2.VideoCapture(0)\n cv2.namedWindow('Controller')\n while controller.flags_handler.quit_flag is False:\n ret, frame = camera.read()\n\n # A good practice is to mock all this processing in pipe.\n # Controller processing pipe:\n # 1. Draw ROI on frame.\n controller.frame_handler.input_frame = frame\n # 2. Cover faces, to remove detection noises.\n controller.face_processor.face_covered_frame = controller.frame_handler.input_frame\n # 3. Remove background from a covered-faces-frame.\n controller.back_ground_remover.detected_frame = controller.face_processor.face_covered_frame\n # 4. Detect a hand.\n controller.detector.input_frame_for_feature_extraction = controller.back_ground_remover.detected_frame\n # 5. Extract features, and track detected hand\n controller.extractor.extract = controller.detector\n\n inner_image = controller.extractor.get_drawn_extreme_contour_points()\n if inner_image is not None:\n # Draw detected hand on outer image.\n outer_image = controller.frame_handler.input_frame\n outer_image[0: inner_image.shape[0],\n outer_image.shape[1] - inner_image.shape[1]: outer_image.shape[1]] = inner_image\n cv2.imshow('Controller', outer_image)\n\n # Testing.\n rotate_left = controller.get_rotate_left_param()\n controller.logger.debug(\"rotate_left: {}\".format(rotate_left))\n if rotate_left < 0:\n assert False\n break\n elif rotate_left > 100:\n assert False\n break\n\n controller.flags_handler.keyboard_input = cv2.waitKey(1)\n\n camera.release()\n cv2.destroyWindow('Controller')\n\n def test_rotate_right(self):\n \"\"\"Test if drone rotates right properly.\n\n this test test if controller.rotate_right is corresponding to detected hand movements.\n \"\"\"\n\n # setup\n controller = Controller()\n controller.logger.setLevel('DEBUG')\n\n # run\n camera = cv2.VideoCapture(0)\n cv2.namedWindow('Controller')\n while controller.flags_handler.quit_flag is False:\n ret, frame = camera.read()\n\n # A good practice is to mock all this processing in pipe.\n # Controller processing pipe:\n # 1. Draw ROI on frame.\n controller.frame_handler.input_frame = frame\n # 2. Cover faces, to remove detection noises.\n controller.face_processor.face_covered_frame = controller.frame_handler.input_frame\n # 3. Remove background from a covered-faces-frame.\n controller.back_ground_remover.detected_frame = controller.face_processor.face_covered_frame\n # 4. Detect a hand.\n controller.detector.input_frame_for_feature_extraction = controller.back_ground_remover.detected_frame\n # 5. Extract features, and track detected hand\n controller.extractor.extract = controller.detector\n\n inner_image = controller.extractor.get_drawn_extreme_contour_points()\n if inner_image is not None:\n # Draw detected hand on outer image.\n outer_image = controller.frame_handler.input_frame\n outer_image[0: inner_image.shape[0],\n outer_image.shape[1] - inner_image.shape[1]: outer_image.shape[1]] = inner_image\n cv2.imshow('Controller', outer_image)\n\n # Testing.\n rotate_right = controller.get_rotate_right_param()\n controller.logger.debug(\"rotate_right: {}\".format(rotate_right))\n if rotate_right < 0:\n assert False\n break\n elif rotate_right > 100:\n assert False\n break\n\n controller.flags_handler.keyboard_input = cv2.waitKey(1)\n\n camera.release()\n cv2.destroyWindow('Controller')\n\n def test_move_forward(self):\n \"\"\"Test if drone moves forward properly.\n\n this test test if controller.move_forward is corresponding to detected hand movements.\n \"\"\"\n\n # setup\n controller = Controller()\n controller.logger.setLevel('DEBUG')\n\n # run\n camera = cv2.VideoCapture(0)\n cv2.namedWindow('Controller')\n while controller.flags_handler.quit_flag is False:\n ret, frame = camera.read()\n\n # A good practice is to mock all this processing in pipe.\n # Controller processing pipe:\n # 1. Draw ROI on frame.\n controller.frame_handler.input_frame = frame\n # 2. Cover faces, to remove detection noises.\n controller.face_processor.face_covered_frame = controller.frame_handler.input_frame\n # 3. Remove background from a covered-faces-frame.\n controller.back_ground_remover.detected_frame = controller.face_processor.face_covered_frame\n # 4. Detect a hand.\n controller.detector.input_frame_for_feature_extraction = controller.back_ground_remover.detected_frame\n # 5. Extract features, and track detected hand\n controller.extractor.extract = controller.detector\n\n inner_image = controller.extractor.get_drawn_extreme_contour_points()\n if inner_image is not None:\n # Draw detected hand on outer image.\n outer_image = controller.frame_handler.input_frame\n outer_image[0: inner_image.shape[0],\n outer_image.shape[1] - inner_image.shape[1]: outer_image.shape[1]] = inner_image\n cv2.imshow('Controller', outer_image)\n\n # Testing.\n move_forward = controller.get_forward_param()\n controller.logger.debug(\"move_forward: {}\".format(move_forward))\n if move_forward < 0:\n assert False\n break\n elif move_forward > 100:\n assert False\n break\n\n controller.flags_handler.keyboard_input = cv2.waitKey(1)\n\n camera.release()\n cv2.destroyWindow('Controller')\n\n def test_move_backward(self):\n \"\"\"Test if drone moves backward properly.\n\n this test test if controller.move_backward is corresponding to detected hand movements.\n \"\"\"\n\n # setup\n controller = Controller()\n controller.logger.setLevel('DEBUG')\n\n # run\n camera = cv2.VideoCapture(0)\n cv2.namedWindow('Controller')\n while controller.flags_handler.quit_flag is False:\n ret, frame = camera.read()\n\n # A good practice is to mock all this processing in pipe.\n # Controller processing pipe:\n # 1. Draw ROI on frame.\n controller.frame_handler.input_frame = frame\n # 2. Cover faces, to remove detection noises.\n controller.face_processor.face_covered_frame = controller.frame_handler.input_frame\n # 3. Remove background from a covered-faces-frame.\n controller.back_ground_remover.detected_frame = controller.face_processor.face_covered_frame\n # 4. Detect a hand.\n controller.detector.input_frame_for_feature_extraction = controller.back_ground_remover.detected_frame\n # 5. Extract features, and track detected hand\n controller.extractor.extract = controller.detector\n\n inner_image = controller.extractor.get_drawn_extreme_contour_points()\n if inner_image is not None:\n # Draw detected hand on outer image.\n outer_image = controller.frame_handler.input_frame\n outer_image[0: inner_image.shape[0],\n outer_image.shape[1] - inner_image.shape[1]: outer_image.shape[1]] = inner_image\n cv2.imshow('Controller', outer_image)\n\n # Testing.\n move_backward = controller.get_backward_param()\n controller.logger.debug(\"move_backward: {}\".format(move_backward))\n if move_backward < 0:\n assert False\n break\n elif move_backward > 100:\n assert False\n break\n\n controller.flags_handler.keyboard_input = cv2.waitKey(1)\n\n camera.release()\n cv2.destroyWindow('Controller')\n\n def test_all_movements(self):\n \"\"\"Test if drone moves properly.\n\n this test test if controller is corresponding to detected hand movements.\n \"\"\"\n\n # setup\n controller = Controller()\n controller.logger.setLevel('DEBUG')\n\n # run\n camera = cv2.VideoCapture(0)\n cv2.namedWindow('Controller')\n while controller.flags_handler.quit_flag is False:\n ret, frame = camera.read()\n\n # A good practice is to mock all this processing in pipe.\n # Controller processing pipe:\n # 1. Draw ROI on frame.\n controller.frame_handler.input_frame = frame\n # 2. Cover faces, to remove detection noises.\n controller.face_processor.face_covered_frame = controller.frame_handler.input_frame\n # 3. Remove background from a covered-faces-frame.\n controller.back_ground_remover.detected_frame = controller.face_processor.face_covered_frame\n # 4. Detect a hand.\n controller.detector.input_frame_for_feature_extraction = controller.back_ground_remover.detected_frame\n # 5. Extract features, and track detected hand\n controller.extractor.extract = controller.detector\n\n inner_image = controller.extractor.get_drawn_extreme_contour_points()\n if inner_image is not None:\n # Draw detected hand on outer image.\n outer_image = controller.frame_handler.input_frame\n outer_image[0: inner_image.shape[0],\n outer_image.shape[1] - inner_image.shape[1]: outer_image.shape[1]] = inner_image\n cv2.imshow('Controller', outer_image)\n\n # Testing.\n controller.flags_handler.hand_control = True\n controller.get_drone_commands()\n rotate_right = controller.rotate_right\n rotate_left = controller.rotate_left\n move_right = controller.move_right\n move_left = controller.move_left\n move_down = controller.move_down\n move_up = controller.move_up\n move_forward = controller.move_forward\n move_backward = controller.move_backward\n controller.logger.debug(\n \"move_backward: {}, move_forward: {}, move_up: {}, move_down: {}, move_left: {}, move_right: {}, rotate_left: {}, rotate_right: {}\".format(\n move_backward, move_forward, move_up, move_down, move_left, move_right, rotate_left,\n rotate_right))\n\n controller.flags_handler.keyboard_input = cv2.waitKey(1)\n\n camera.release()\n cv2.destroyWindow('Controller')\n"
},
{
"alpha_fraction": 0.5825279951095581,
"alphanum_fraction": 0.5991495847702026,
"avg_line_length": 40.06349182128906,
"blob_id": "8d04c12f092a983f218236288264e88e8a2c1ab4",
"content_id": "4d7c902143a424864b0e01cf2d52a65ea222a92d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2587,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 63,
"path": "/tests/test_tracker.py",
"repo_name": "crisdeodates/DJI-Tello_HalloPy",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\nfrom hallopy.controller import FlagsHandler, Tracker\nfrom util.image_comp_tool import ImageTestTool\n\n\nclass TestTracker:\n \"\"\"Unittests for a Tracker object. \"\"\"\n\n def test_track(self):\n \"\"\"Test if tracker object tracks correctly after given set of points to track, and a frame.\"\"\"\n\n # setup\n cv2.namedWindow('test_track')\n flags_handler = FlagsHandler()\n tracker = None\n\n bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)\n cap = cv2.VideoCapture(0)\n while flags_handler.quit_flag is False:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n\n # Remove background from input frame.\n fgmask = bg_model.apply(frame, learningRate=0)\n kernel = np.ones((3, 3), np.uint8)\n fgmask = cv2.erode(fgmask, kernel, iterations=1)\n res = cv2.bitwise_and(frame, frame, mask=fgmask)\n # Clip frames ROI.\n back_ground_removed_clipped = ImageTestTool.clip_roi(res,\n {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6})\n\n if flags_handler.background_capture_required is True:\n bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)\n flags_handler.background_capture_required = False\n\n max_area_contour = ImageTestTool.get_max_area_contour(back_ground_removed_clipped)\n\n extLeft, extRight, extTop, extBot = ImageTestTool.get_contour_extreme_points(max_area_contour)\n palm_center = ImageTestTool.get_center_of_mass(max_area_contour)\n\n if tracker is None:\n points = np.array([extTop, palm_center])\n\n else:\n points = tracker.points_to_track\n tracker.track(points, back_ground_removed_clipped)\n points = tracker.points_to_track\n\n ImageTestTool.draw_tracking_points(back_ground_removed_clipped, points)\n cv2.circle(back_ground_removed_clipped, palm_center, 8, (255, 255, 255), thickness=-1)\n cv2.imshow('test_track', back_ground_removed_clipped)\n keyboard_input = cv2.waitKey(1)\n flags_handler.keyboard_input = keyboard_input\n # run\n if flags_handler.background_capture_required is True:\n tracker = None\n if keyboard_input == ord('t'):\n tracker = Tracker(flags_handler, points, back_ground_removed_clipped)\n\n # teardown\n cap.release()\n cv2.destroyAllWindows()\n"
},
{
"alpha_fraction": 0.5940098166465759,
"alphanum_fraction": 0.6102154850959778,
"avg_line_length": 42.17216110229492,
"blob_id": "54db0ffb4098a636342c5a99c70f1a2b4fa89006",
"content_id": "05c75c17c3e43ca9fc52544199f8ab32461be891",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11786,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 273,
"path": "/tests/test_extractor.py",
"repo_name": "crisdeodates/DJI-Tello_HalloPy",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\nfrom hallopy import utils\nfrom hallopy.controller import FlagsHandler, Detector, Extractor, BackGroundRemover, FrameHandler, FaceProcessor\nfrom util.image_comp_tool import ImageTestTool\n\n\nclass TestExtractor:\n \"\"\"Test extractor functionality. \"\"\"\n\n def test_extract_center_of_mass(self):\n \"\"\"Test if extract find center of mass. \"\"\"\n # setup\n test_path = utils.get_full_path('docs/material_for_testing/back_ground_removed_frame.jpg')\n test_image = cv2.imread(test_path)\n expected_path = utils.get_full_path(\n 'docs/material_for_testing/back_ground_removed_and_center_of_mass_discovered.jpg')\n expected_image = cv2.imread(expected_path)\n # Because image loaded from local, and not received from web-cam, a flip is needed.\n test_image = cv2.flip(test_image, 1)\n\n # todo: use mockito here to mock detector\n flags_handler = FlagsHandler()\n detector = Detector(flags_handler)\n extractor = Extractor(flags_handler)\n detector.input_frame_for_feature_extraction = test_image\n\n # run\n extractor.extract = detector\n result_image = test_image.copy()\n cv2.circle(result_image, extractor.palm_center_point, 5, (255, 0, 0), thickness=5)\n ssim = ImageTestTool.compare_imaged(result_image, expected_image)\n # print(\"SSIM: {}\".format(ssim))\n assert ssim >= 0.95\n\n def test_get_contour_extreme_point(self):\n \"\"\"Test if middle finger edge was found correctly. \"\"\"\n # setup\n test_path = utils.get_full_path('docs/material_for_testing/back_ground_removed_frame.jpg')\n test_image = cv2.imread(test_path)\n\n max_area_contour = ImageTestTool.get_max_area_contour(test_image)\n expected_extLeft, expected_extRight, expected_extTop, expected_extBot = ImageTestTool.get_contour_extreme_points(\n max_area_contour)\n\n # todo: use mockito here to mock detector\n flags_handler = FlagsHandler()\n detector = Detector(flags_handler)\n extractor = Extractor(flags_handler)\n detector.input_frame_for_feature_extraction = test_image\n\n # run\n extractor.extract = detector\n\n assert expected_extLeft == extractor.ext_left\n assert expected_extRight == extractor.ext_right\n assert expected_extTop == extractor.ext_top\n assert expected_extBot == extractor.ext_bot\n\n def test_contour_extreme_point_tracking(self):\n \"\"\"Test for tracking extreme_points without optical flow (e.g until calibrated). \"\"\"\n # setup\n test_path = utils.get_full_path('docs/material_for_testing/back_ground_removed_frame.jpg')\n test_image = cv2.imread(test_path)\n\n # todo: use mockito here to mock preprocessing elements\n flags_handler = FlagsHandler()\n detector = Detector(flags_handler)\n extractor = Extractor(flags_handler)\n\n # Background model preparations.\n bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)\n\n cap = cv2.VideoCapture(0)\n while flags_handler.quit_flag is False:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n\n # Remove background from input frame.\n fgmask = bg_model.apply(frame, learningRate=0)\n kernel = np.ones((3, 3), np.uint8)\n fgmask = cv2.erode(fgmask, kernel, iterations=1)\n res = cv2.bitwise_and(frame, frame, mask=fgmask)\n\n # Clip frames ROI.\n back_ground_removed_clipped = ImageTestTool.clip_roi(res,\n {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6})\n\n if flags_handler.background_capture_required is True:\n bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)\n flags_handler.background_capture_required = False\n\n detector.input_frame_for_feature_extraction = back_ground_removed_clipped\n extractor.extract = detector\n\n image = extractor.get_drawn_extreme_contour_points()\n cv2.imshow('test_contour_extreme_point_tracking', image)\n flags_handler.keyboard_input = cv2.waitKey(1)\n\n def test_palm_angle_calculation(self):\n \"\"\"Test if angle is calculated correctly.\n\n Usage:\n 1. press 'b': to calibrate back_ground_remover.\n 2. insert hand into frame, so that middle_finger is aligned with the Y axe.\n 3. rotate hand 15 degrees left. (degrees should go above 90).\n 4. rotate hand 15 degrees right. (degrees should go below 90).\n 5. press esc when done.\n \"\"\"\n # setup\n # todo: use mockito here to mock preprocessing elements\n flags_handler = FlagsHandler()\n detector = Detector(flags_handler)\n extractor = Extractor(flags_handler)\n\n # Background model preparations.\n bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)\n cap = cv2.VideoCapture(0)\n\n while flags_handler.quit_flag is False:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n\n # Remove background from input frame.\n fgmask = bg_model.apply(frame, learningRate=0)\n kernel = np.ones((3, 3), np.uint8)\n fgmask = cv2.erode(fgmask, kernel, iterations=1)\n res = cv2.bitwise_and(frame, frame, mask=fgmask)\n\n # Clip frames ROI.\n back_ground_removed_clipped = ImageTestTool.clip_roi(res,\n {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6})\n\n if flags_handler.background_capture_required is True:\n bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)\n flags_handler.background_capture_required = False\n\n detector.input_frame_for_feature_extraction = back_ground_removed_clipped\n extractor.extract = detector\n\n # run\n image = extractor.get_drawn_extreme_contour_points()\n cv2.imshow('test_contour_extreme_point_tracking', image)\n print(extractor.palm_angle_in_degrees)\n flags_handler.keyboard_input = cv2.waitKey(1)\n\n def test_5_second_calibration_time(self):\n \"\"\"Test if 5 second calibration time works correctly according to flags_handler.\n\n Usage:\n 1. press 'b': to calibrate back_ground_remover.\n 2. insert hand into frame, center palms_center (white dot) with axes crossing.\n 3. wait for #calibration_time (default 5 sec).\n 4. press esc\n\n test: after calibration_time, center circle should be green.\n \"\"\"\n # setup\n # todo: use mockito here to mock preprocessing elements\n flags_handler = FlagsHandler()\n detector = Detector(flags_handler)\n extractor = Extractor(flags_handler)\n\n # Background model preparations.\n bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)\n cap = cv2.VideoCapture(0)\n\n while flags_handler.quit_flag is False:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n\n # Remove background from input frame.\n fgmask = bg_model.apply(frame, learningRate=0)\n kernel = np.ones((3, 3), np.uint8)\n fgmask = cv2.erode(fgmask, kernel, iterations=1)\n res = cv2.bitwise_and(frame, frame, mask=fgmask)\n\n # Clip frames ROI.\n back_ground_removed_clipped = ImageTestTool.clip_roi(res,\n {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6})\n\n if flags_handler.background_capture_required is True:\n bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)\n flags_handler.background_capture_required = False\n\n detector.input_frame_for_feature_extraction = back_ground_removed_clipped\n extractor.extract = detector\n\n # run\n image = extractor.get_drawn_extreme_contour_points()\n cv2.imshow('test_contour_extreme_point_tracking', image)\n flags_handler.keyboard_input = cv2.waitKey(1)\n\n def test_max_distance_between_top_ext_point_and_palm_center_point(self):\n \"\"\"Test if max distance is found correctly. \"\"\"\n # setup\n # todo: use mockito here to mock preprocessing elements\n flags_handler = FlagsHandler()\n detector = Detector(flags_handler)\n extractor = Extractor(flags_handler)\n\n # Background model preparations.\n bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)\n\n cap = cv2.VideoCapture(0)\n while flags_handler.quit_flag is False:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n\n # Remove background from input frame.\n fgmask = bg_model.apply(frame, learningRate=0)\n kernel = np.ones((3, 3), np.uint8)\n fgmask = cv2.erode(fgmask, kernel, iterations=1)\n res = cv2.bitwise_and(frame, frame, mask=fgmask)\n\n # Clip frames ROI.\n back_ground_removed_clipped = ImageTestTool.clip_roi(res,\n {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6})\n\n if flags_handler.background_capture_required is True:\n bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)\n flags_handler.background_capture_required = False\n\n detector.input_frame_for_feature_extraction = back_ground_removed_clipped\n extractor.extract = detector\n\n # run\n image = extractor.get_drawn_extreme_contour_points()\n cv2.line(image, extractor.palm_center_point, (extractor.ext_top[0], extractor.palm_center_point[\n 1] - extractor.max_distance_from_ext_top_point_to_palm_center), (255, 255, 255), thickness=2)\n cv2.imshow('test_max_distance_between_top_ext_point_and_palm_center_point', image)\n flags_handler.keyboard_input = cv2.waitKey(1)\n\n def test_drawn_correctly(self):\n \"\"\"Test if zero point is drawn correctly.\n\n zero point is the point that responsible for forward/backward commands extraction.\n \"\"\"\n # setup\n # todo: use mockito here to mock preprocessing elements\n flags_handler = FlagsHandler()\n detector = Detector(flags_handler)\n extractor = Extractor(flags_handler)\n\n # Background model preparations.\n bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)\n\n cap = cv2.VideoCapture(0)\n while flags_handler.quit_flag is False:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n\n # Remove background from input frame.\n fgmask = bg_model.apply(frame, learningRate=0)\n kernel = np.ones((3, 3), np.uint8)\n fgmask = cv2.erode(fgmask, kernel, iterations=1)\n res = cv2.bitwise_and(frame, frame, mask=fgmask)\n\n # Clip frames ROI.\n back_ground_removed_clipped = ImageTestTool.clip_roi(res,\n {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6})\n\n if flags_handler.background_capture_required is True:\n bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)\n flags_handler.background_capture_required = False\n\n detector.input_frame_for_feature_extraction = back_ground_removed_clipped\n extractor.extract = detector\n\n # run\n image = extractor.get_drawn_extreme_contour_points()\n cv2.imshow('test_drawn_correctly', image)\n flags_handler.keyboard_input = cv2.waitKey(1)\n"
},
{
"alpha_fraction": 0.7514863014221191,
"alphanum_fraction": 0.7645660042762756,
"avg_line_length": 30.148147583007812,
"blob_id": "11cdadf122495dd7c8d09d72c3322157246e4028",
"content_id": "7f715d2acc0249d4e7dd7d0887cbb12c60de8193",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 841,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 27,
"path": "/Dockerfile",
"repo_name": "crisdeodates/DJI-Tello_HalloPy",
"src_encoding": "UTF-8",
"text": "# Use the official image as a parent image\nFROM ubuntu:latest\n\n# Ref: https://rtfm.co.ua/en/docker-configure-tzdata-and-timezone-during-build/\nENV TZ=Asia/Jakarta\nRUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone\n\n# Set the working directory\nWORKDIR /usr/src/app\n\n# Install any needed packages specified in requirements.txt\nRUN apt-get update\nRUN apt-get install -y pkg-config\nRUN apt install -y python3-dev\nRUN apt-get install -y python3-pip\nRUN apt-get install -y libavformat-dev libavdevice-dev\nRUN apt-get install -y libsm6 libxext6 libxrender-dev\n# RUN pip install av\nRUN pip3 install av==6.1.2\nRUN pip3 install opencv-python\nRUN pip3 install tellopy\n\n# Copy the rest of your app's source code from your host to your image filesystem.\nCOPY . .\n\n# Run hellopy controller\nCMD [\"python3\",\"./hallopy/hallo.py\"]\n"
},
{
"alpha_fraction": 0.6048110127449036,
"alphanum_fraction": 0.6323024034500122,
"avg_line_length": 29.63157844543457,
"blob_id": "0420413cfc58e6479c2240f9ae164da73d45156a",
"content_id": "ef50f12e118302c36fcc3288f33fee52c446a44c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1164,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 38,
"path": "/hallopy/icontroller.py",
"repo_name": "crisdeodates/DJI-Tello_HalloPy",
"src_encoding": "UTF-8",
"text": "class Icontroller(object):\n \"\"\"Extractor interface.\n\n This module contain the methods a Controller need.\n\n \"\"\"\n\n def get_up_param(self):\n \"\"\"Return up parameter (int between 0..100). \"\"\"\n raise NotImplementedError\n\n def get_down_param(self):\n \"\"\"Return down parameter (int between 0..100). \"\"\"\n raise NotImplementedError\n\n def get_left_param(self):\n \"\"\"Return left parameter (int between 0..100). \"\"\"\n raise NotImplementedError\n\n def get_right_param(self):\n \"\"\"Return right parameter (int between 0..100). \"\"\"\n raise NotImplementedError\n\n def get_rotate_left_param(self):\n \"\"\"Return rotate left parameter (int between 0..100). \"\"\"\n raise NotImplementedError\n\n def get_rotate_right_param(self):\n \"\"\"Return rotate right parameter (int between 0..100). \"\"\"\n raise NotImplementedError\n\n def get_forward_param(self):\n \"\"\"Return move forward parameter (int between 0..100). \"\"\"\n raise NotImplementedError\n\n def get_backward_param(self):\n \"\"\"Return move backward parameter (int between 0..100). \"\"\"\n raise NotImplementedError\n"
},
{
"alpha_fraction": 0.6918325424194336,
"alphanum_fraction": 0.712079644203186,
"avg_line_length": 28.73469352722168,
"blob_id": "a63297bcbd56cab61b6fa9e21e6955382c2d2703",
"content_id": "4f8b30ae5e13c110c703fb5e5abfefcd7b7d6da3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2914,
"license_type": "permissive",
"max_line_length": 174,
"num_lines": 98,
"path": "/README.md",
"repo_name": "crisdeodates/DJI-Tello_HalloPy",
"src_encoding": "UTF-8",
"text": "# Hallo - a hand controlled Tello\n\nDJI Tello controller using hand gestures python app\n\n## Docker installation in 3 steps\n\nTested on ubuntu 19.10\n\n### step 1\n\n```shell script\ngit clone https://github.com/GalBrandwine/HalloPy\ncd HalloPy\ngit submodule update --init --recursive\n```\n\n### step 2\n\n```shell script\ndocker build --network=host --tag hallopy:1.3 .\n```\n\n(make sure you have docker [installed](https://docs.docker.com/get-started/).)\n\n### step 3\n\n(Make sure you're connected to the TELLO wifi)\n\n```shell script\nxhost + && docker run --rm -it --net=host --ipc=host -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix --privileged -v /dev/video0:/dev/video0 --name hallopy hallopy:1.3 \n```\n\nFor flags explanation: [running docker with gui](https://marcosnietoblog.wordpress.com/2017/04/30/docker-image-with-opencv-with-x11-forwarding-for-gui/)\n\n## Non docker users\n\nEnvironment:\n\n* ubuntu 19.10\n* python 3.7\n\nDependency libraries installation:\n\n* pip install av==6.1.2\n* pip install opencv-python\n\n### Run\n\nFrom directory `/HalloPy/`:\n\n```shell script\npython ./hallopy/hallo.py\n```\n\n## Controller Usage\n\n1. Make sure you have all dependency libraries.\n for great openCV installation tutorial refer to:\n <https://www.pyimagesearch.com/2016/10/24/ubuntu-16-04-how-to-install-opencv/>\n2. Turn on Tello drone and connect to it's wifi\n 1. there's no setup.py yet, so in order to run this project, open the project in an IDE and run: hallo.py\n3. Application usage:\n\n ```\n press 'b' - to detect palm ( depend on the environment lights, a Thresh Hold slider tuning may be needed)\n center the detected palm at the center of the detection_frame FOR 5 seconds \n pressing b again will reset calibration.\n ```\n\n After center-circle become GREEN (meaning we are now calibrated):\n\n ```\n press 't' - to take off ( a 3 second hold-up, until drone is in the air)\n after landing - if program is calibrated, press t again to take-off\n \n press 'c' - to toggle drone's control between key-board and detected-palm\n toggling back to keyboard will force drone to hover in the same place\n \n press 'l' - to land ( at any time)\n\n press 'x' - to adjust background' threshold - for thicker palm recognition\n press 'z' - to adjust background' threshold - for thinner palm recognition\n \n press 'esc' to exit program ( only after drone landed)\n ```\n\n4. Video explaining hands movements for controlling the drone can be found [here](https://youtu.be/NSwKCzxFBv4), and [here](https://youtu.be/6THFNt_5LNg)\n\n# Thanks to\n\n* OpenCV - for the greatest computer Vision library in this world ( and others)\n\n* tellopy repo - for making a super friendly drone api\n\n* Adrian and his crew at - <https://www.pyimagesearch.com/> for the best How-to's tutorials\n and email support.\n \n* Izane for his FingerDetection - <https://github.com/lzane/Fingers-Detection-using-OpenCV-and-Python>\n"
},
{
"alpha_fraction": 0.6111975312232971,
"alphanum_fraction": 0.6174183487892151,
"avg_line_length": 35.71428680419922,
"blob_id": "728c68ff5aef964c41da1dc6d19726846e6cd944",
"content_id": "60bf7dd99c57c5979b67e140b6c557d361032b4e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2572,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 70,
"path": "/tests/test_backGroundRemover.py",
"repo_name": "crisdeodates/DJI-Tello_HalloPy",
"src_encoding": "UTF-8",
"text": "\n\nimport cv2\n\nfrom hallopy import utils\nfrom hallopy.controller import BackGroundRemover, FlagsHandler\nfrom util.image_comp_tool import ImageTestTool\n\n\nclass TestBackGroundRemover:\n \"\"\"TestBackGroundRemover tests BackgroundRemover functionality. \"\"\"\n\n def test_detected_frame(self):\n \"\"\"Test if input frames background is being removed correctly. \"\"\"\n # setup\n expected_path = utils.get_full_path('docs/material_for_testing/back_ground_removed_frame.jpg')\n expected = cv2.imread(expected_path)\n test_path = utils.get_full_path('docs/material_for_testing/face_and_hand_0.avi')\n cap = cv2.VideoCapture(test_path)\n flags_handler = FlagsHandler()\n back_ground_remover = BackGroundRemover(flags_handler)\n ret = True\n\n # run\n while ret is True:\n ret, frame = cap.read()\n if ret is True:\n back_ground_remover.detected_frame = frame\n\n # write_path = utils.get_full_path('docs')\n # cv2.imwrite(write_path+'/back_ground_removed_frame.jpg',back_ground_remover.detected_frame)\n ssim = ImageTestTool.compare_imaged(back_ground_remover.detected_frame, expected)\n # print(\"SSIM: {}\".format(ssim))\n assert ssim >= 0.95\n\n # teardown\n cap.release()\n cv2.destroyAllWindows()\n\n def test_back_ground_reset(self):\n \"\"\"Test if background model is being reset correctly.\n\n resetting background model is via keyboard input,\n in Controller's flags_handler.\n \"\"\"\n # setup\n # Input from camera.\n cv2.namedWindow('test')\n cap = cv2.VideoCapture(0)\n flags_handler = FlagsHandler()\n back_ground_remover = BackGroundRemover(flags_handler)\n\n # run\n while flags_handler.quit_flag is False:\n \"\"\"\n Inside loop, remove back ground from frame using back_ground_remover,\n here we are testing for background model resetting.\n the reset flag is changed within Controller's flags_handler.\n \n Pressing 'b': will rest background.\n Pressing esc: break loop.\n \"\"\"\n ret, frame = cap.read()\n if ret is True:\n back_ground_remover.detected_frame = frame\n if back_ground_remover.detected_frame is not None:\n cv2.imshow('test', back_ground_remover.detected_frame)\n flags_handler.keyboard_input = cv2.waitKey(1)\n\n # teardown\n cap.release()\n cv2.destroyAllWindows()\n"
},
{
"alpha_fraction": 0.6062108278274536,
"alphanum_fraction": 0.6162706017494202,
"avg_line_length": 40.569698333740234,
"blob_id": "23e09a261b48efdefadb1d0b2d761d710a607a0d",
"content_id": "6da20af894d65a0d6b69bd7cd67df33772e8fbac",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6859,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 165,
"path": "/tests/test_detector.py",
"repo_name": "crisdeodates/DJI-Tello_HalloPy",
"src_encoding": "UTF-8",
"text": "import cv2\nfrom HalloPy.hallopy.controller import Detector, FlagsHandler, Extractor\nfrom hallopy import utils\nfrom util.image_comp_tool import ImageTestTool\nimport numpy as np\n\n\nclass TestDetector:\n \"\"\"Unittests for a Detector object. \"\"\"\n\n def test_find_largest_contours(self):\n \"\"\"Test if largest contours is found. \"\"\"\n # setup\n test_path = utils.get_full_path('docs/material_for_testing/back_ground_removed_frame.jpg')\n test_image = cv2.imread(test_path)\n # Because image loaded from local, and not received from web-cam, a flip is needed.\n test_image = cv2.flip(test_image, 1)\n test_image = cv2.bitwise_not(test_image)\n\n max_area_contour = ImageTestTool.get_max_area_contour(test_image)\n expected_area = ImageTestTool.get_contour_area(max_area_contour)\n # Create detector\n flags_handler = FlagsHandler()\n detector = Detector(flags_handler)\n\n # run\n detector.input_frame_for_feature_extraction = test_image\n result_area = cv2.contourArea(detector.max_area_contour)\n\n assert result_area == expected_area\n\n def test_draw_axes(self):\n \"\"\"Test if detected_out_put_center calculated properly. \"\"\"\n # setup\n\n test_path = utils.get_full_path('docs/material_for_testing/back_ground_removed_frame.jpg')\n test_image = cv2.imread(test_path)\n # Because image loaded from local, and not received from web-cam, a flip is needed.\n test_image = cv2.flip(test_image, 1)\n expected = test_image.copy()\n # Create detector\n flags_handler = FlagsHandler()\n detector = Detector(flags_handler)\n expected_detected_out_put_center = (\n int(expected.shape[1] / 2), int(expected.shape[0] / 2) + detector.horiz_axe_offset)\n\n # run\n detector.input_frame_for_feature_extraction = test_image\n cv2.imshow('expected', expected)\n cv2.imshow('result', detector.input_frame_for_feature_extraction)\n cv2.waitKey()\n assert expected_detected_out_put_center == detector.detected_out_put_center\n\n def test_draw_contour(self):\n \"\"\"Test is contour is being drawn accordingly to flags_handles. \"\"\"\n # setup\n # Input from camera.\n cv2.namedWindow('test_draw_contour')\n\n test_path = utils.get_full_path('docs/material_for_testing/back_ground_removed_frame.jpg')\n test_image = cv2.imread(test_path)\n # Because image loaded from local, and not received from web-cam, a flip is needed.\n test_image = cv2.flip(test_image, 1)\n expected = test_image.copy()\n flags_handler = FlagsHandler()\n # Set flags_handler in order to perform the test.\n flags_handler.lifted = True\n flags_handler.calibrated = True\n detector = Detector(flags_handler)\n\n # run\n while flags_handler.quit_flag is False:\n \"\"\"\n Inside loop, update self._threshold according to flags_handler,\n \n Pressing 'c': in order to toggle control (suppose to change contour's color between green and red)\n Pressing 'l': to raise 'land' flag in flags_handler, in order to be able to break loop (with esc)\n Pressing esc: break loop.\n \"\"\"\n detector.input_frame_for_feature_extraction = test_image\n cv2.imshow('test_draw_contour', detector.input_frame_for_feature_extraction)\n flags_handler.keyboard_input = cv2.waitKey(1)\n\n # teardown\n cv2.destroyAllWindows()\n\n def test_threshold_change(self):\n \"\"\"Test if threshold is changed accordingly to flags_handler. \"\"\"\n # setup\n # Input from camera.\n cv2.namedWindow('test_threshold_change')\n cap = cv2.VideoCapture(0)\n flags_handler = FlagsHandler()\n detector = Detector(flags_handler)\n\n # run\n while flags_handler.quit_flag is False:\n \"\"\"\n Inside loop, update self._threshold according to flags_handler,\n \n Pressing 'z': will make threshold thinner.\n Pressing 'x': will make threshold thicker.\n Pressing esc: break loop.\n \"\"\"\n ret, frame = cap.read()\n if ret is True:\n detector.input_frame_for_feature_extraction = frame\n result = detector.input_frame_for_feature_extraction\n cv2.drawContours(result, [detector.max_area_contour], 0, (0, 0, 255), thickness=2)\n cv2.imshow('test_threshold_change', result)\n flags_handler.keyboard_input = cv2.waitKey(1)\n\n # teardown\n cap.release()\n cv2.destroyAllWindows()\n\n def test_detector_extract_and_track(self):\n \"\"\"Test if Detector uses tracker object correctly. \"\"\"\n\n # setup\n # Input from camera.\n cv2.namedWindow('test_detector_extract_and_track')\n cap = cv2.VideoCapture(0)\n flags_handler = FlagsHandler()\n detector = Detector(flags_handler)\n extractor = Extractor(flags_handler)\n\n bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)\n while flags_handler.quit_flag is False:\n \"\"\"\n Inside loop, update self._threshold according to flags_handler,\n \n Pressing 'c': in order to toggle control (suppose to change contour's color between green and red)\n Pressing 'l': to raise 'land' flag in flags_handler, in order to be able to break loop (with esc)\n Pressing 'z': will make threshold thinner.\n Pressing 'x': will make threshold thicker. \n Pressing esc: break loop.\n \"\"\"\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n\n # Remove background from input frame.\n fgmask = bg_model.apply(frame, learningRate=0)\n kernel = np.ones((3, 3), np.uint8)\n fgmask = cv2.erode(fgmask, kernel, iterations=1)\n res = cv2.bitwise_and(frame, frame, mask=fgmask)\n # Clip frames ROI.b\n roi = {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6}\n back_ground_removed_clipped = ImageTestTool.clip_roi(res, roi)\n\n if flags_handler.background_capture_required is True:\n bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)\n flags_handler.background_capture_required = False\n\n # Pipe:\n detector.input_frame_for_feature_extraction = back_ground_removed_clipped\n extractor.extract = detector\n\n cv2.imshow('test_detector_extract_and_track', extractor.get_drawn_extreme_contour_points())\n keyboard_input = cv2.waitKey(1)\n flags_handler.keyboard_input = keyboard_input\n\n # teardown\n cap.release()\n cv2.destroyAllWindows()\n"
},
{
"alpha_fraction": 0.5381165742874146,
"alphanum_fraction": 0.5829596519470215,
"avg_line_length": 23.77777862548828,
"blob_id": "c7a1848b72f87fdb39ec551d49892b3a539a27d8",
"content_id": "15d2838c55f1b7ab58107e84ad667cc759c7f5c7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 223,
"license_type": "permissive",
"max_line_length": 38,
"num_lines": 9,
"path": "/runHallopy.sh",
"repo_name": "crisdeodates/DJI-Tello_HalloPy",
"src_encoding": "UTF-8",
"text": "xhost +\nsudo docker run --rm -it --net=host \\\n --privileged \\\n -e DISPLAY=$DISPLAY \\\n -v /tmp/.X11-unix:/tmp/.X11-unix \\\n -v /dev/video0:/dev/video0 \\\n -v /dev/video1:/dev/video1 \\\n -n hallopy \\\n hallopy:1.3\n"
},
{
"alpha_fraction": 0.6365638971328735,
"alphanum_fraction": 0.642070472240448,
"avg_line_length": 20.11627960205078,
"blob_id": "2e888e98ae2a8ce960901320bbc3f3a8b63a1a7d",
"content_id": "f5a796a9cfda774bd1b654924dab6d66fdffb86b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 908,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 43,
"path": "/hallopy/hallo.py",
"repo_name": "crisdeodates/DJI-Tello_HalloPy",
"src_encoding": "UTF-8",
"text": "\"\"\"hallopy script is an example how to use the Hand_recognition_controller. \"\"\"\n\nimport os\nimport sys\n\ncwd = os.getcwd()\nsys.path.insert(0, cwd)\nsys.path.insert(0,cwd+\"/thirdparty/TelloPy\")\nimport tellopy\nfrom hallopy.controller import Controller\n\n\ndef handler(event, sender, data, **args):\n \"\"\"Drone events handler, for testing. \"\"\"\n drone_handler = sender\n if event is drone_handler.EVENT_FLIGHT_DATA:\n print(data)\n\n\ndef init_drone():\n \"\"\"Drone initiation function for testing. \"\"\"\n drone = tellopy.Tello()\n\n try:\n drone.subscribe(drone.EVENT_FLIGHT_DATA, handler)\n drone.connect()\n drone.wait_for_connection(60.0)\n\n except Exception as ex:\n print(ex)\n drone.quit()\n return None\n return drone\n\n\ndef main():\n drone = init_drone()\n controller = Controller(drone)\n controller.start()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.7097625136375427,
"alphanum_fraction": 0.7097625136375427,
"avg_line_length": 28.153846740722656,
"blob_id": "9daf5dcabc003e28e5956a083d0d301f6545d79a",
"content_id": "09ddc8454fda4ee41c088cf911c8e7538ab054cd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 379,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 13,
"path": "/hallopy/utils.py",
"repo_name": "crisdeodates/DJI-Tello_HalloPy",
"src_encoding": "UTF-8",
"text": "\"\"\"This utils.py module holds the location of the project root path,\n\nfor further file opening in the project.\n\"\"\"\nfrom os import path as op\n\nMAIN_DIRECTORY = op.dirname(op.dirname(__file__))\nprint(\"MAIN_DIRECTORY: {}\".format(MAIN_DIRECTORY))\n\n\ndef get_full_path(*path):\n \"\"\"Function returns file path, relative to project root. \"\"\"\n return op.join(MAIN_DIRECTORY, *path)\n"
}
] | 15 |
hulinjuan/MachineLearningInAction-Python3 | https://github.com/hulinjuan/MachineLearningInAction-Python3 | 4285d552315bb0680e89b2e2acfbd8268fdd3a89 | c01f48fca57f9787fd626c5b51857d3e069a8ec6 | 704d54da1f271deb51bcc91e0532562bec0692a9 | refs/heads/master | 2020-12-07T17:03:26.640272 | 2020-01-09T09:04:27 | 2020-01-09T09:04:27 | 232,758,087 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7749999761581421,
"alphanum_fraction": 0.8166666626930237,
"avg_line_length": 15.857142448425293,
"blob_id": "93c38058b9670075fa9b24b4a81926681a0fa5d0",
"content_id": "236efcd57f459f084f1f4b7ea2ef1526a86083c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 230,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 7,
"path": "/README.md",
"repo_name": "hulinjuan/MachineLearningInAction-Python3",
"src_encoding": "UTF-8",
"text": "# MachineLearningInAction-Python3\n\n\n\n\n1. 按照书本敲的代码,以后如果用的到的话,可以直接作为模块来import;\n2. 对书本上一些老的代码进行了小的修改,使代码经过python3.6能够跑通;\n\n\n"
},
{
"alpha_fraction": 0.5919212698936462,
"alphanum_fraction": 0.6043500900268555,
"avg_line_length": 27.073530197143555,
"blob_id": "563f012db921f3fb75d833faa72f5f9509156902",
"content_id": "fc15ea0c31fc2f99db8d296328ee7a1bff903281",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2381,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 68,
"path": "/machinelearninginaction3/Part3 (ch10-ch12) 无监督学习/kMeans.py",
"repo_name": "hulinjuan/MachineLearningInAction-Python3",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 9 14:51:31 2020\n\n@author: lindsay.hu\n\"\"\"\n\nfrom numpy import *\nfrom math import *\n\n'''k均值算法中要用到的辅助函数'''\n\n#将文本文件导入列表,返回值是一个包含许多其他列表的列表\ndef loadDataSet(filename):\n dataMat = []\n fr = open(filename)\n for line in fr.readlines():\n curLine = line.strip().split('\\t')\n fltLine = list(map(float,curLine))\n dataMat.append(fltLine)\n return dataMat\n\n#计算两个向量的欧几里得距离\ndef distEclud(vecA,vecB):\n return sqrt(sum(power(vecA - vecB,2)))\n\n#函数为给定数据集构建一个包含k个随机质心的集合\ndef randCent(dataSet,k):\n n = shape(dataSet)[1] #数据集列数\n centroids = mat(zeros((k,n))) #质心的0矩阵\n for j in range(n):\n minJ = min(dataSet[:,j]) #第j列的最小值\n rangeJ = float(max(dataSet[:,j]) - minJ) #第j列的极差\n centroids[:,j] = minJ + rangeJ * random.rand(k,1) #随机生成min到max之间的值\n return centroids\n\n'''k均值聚类算法'''\n\n#kMeans()函数,数据集和簇数目是必选参数,用来计算距离和创建初始质心的函数都是可选的\n#簇分配结果矩阵clusterAssment包含两列:一列记录簇索引值,第二列存储误差,\n#这里的误差指当前点到簇质心的距离,后边使用该误差来评价聚类的效果\n\n\n \ndef kMeans(dataSet,k,distMeas=distEclud,createCent=randCent):\n m = shape(dataSet)[0]\n clusterAssment = mat(zeros((m,2)))\n centroids = createCent(dataSet,k)\n clusterChanged = True\n while clusterChanged:\n clusterChanged = False\n for i in range(m):\n minDist = inf\n minIndex = -1\n for j in range(k):\n distJI = distMeas(centroids[j,:],dataSet[i,:])\n if distJI < minDist:\n minDist = distJI\n minIndex=j\n if clusterAssment[i,0] != minIndex:\n clusterChanged = True\n clusterAssment[i,:] = minIndex,minDist**2\n print(centroids)\n #遍历所有的质心,并更新它们的取值\n for cent in range(k):\n ptsInClust = dataSet[nonzero(clusterAssment[:,0].A == cent)[0]]\n centroids[cent,:] = mean(ptsInClust,axis=0)\n return centroids,clusterAssment\n\n\n "
}
] | 2 |
ankitkatoch/DjangoAllAuth | https://github.com/ankitkatoch/DjangoAllAuth | e94497b65b317f0c2ac29e4db443644123cc63bc | 4e955e6d35cffc77d77ba536da52b9a8149ce596 | fbda47421be06e4ffd90c9eb9af386f9c4506343 | refs/heads/master | 2023-05-13T10:22:16.666699 | 2021-06-08T06:50:30 | 2021-06-08T06:50:30 | 374,910,640 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6006600856781006,
"alphanum_fraction": 0.6146864891052246,
"avg_line_length": 40.7931022644043,
"blob_id": "ef2772115ef0ad54fe81c2b1bcb37208e98d9458",
"content_id": "0024bef555696cd3f34e029a72c9587a0f4d63ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1212,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 29,
"path": "/djangoallauthproject/templates/allauth/account/password_reset.html",
"repo_name": "ankitkatoch/DjangoAllAuth",
"src_encoding": "UTF-8",
"text": "{% extends \"base.html\" %}\n\n{% load i18n %}\n{% load account %}\n{% block head_title %}{% trans \"Password Reset\" %}{% endblock %}\n\n{% block content %}\n<section id=\"action_div\" class=\"container rounded shadow-lg p-3 my-5 rounded\" style=\"max-width:35rem;\">\n\t\t<div class=\"row h-100\" style=\"max-width:35rem;\">\n\t\t\t<div class=\"col-md-12 mx-auto\">\n {% if form.errors %}\n\t\t\t\t<p class=\"px-2 text-danger\">Please check your email. This Email doesn't exist in the database.</p>\n\t\t\t{% endif %}\n <h1>{% trans \"Password Reset\" %}</h1>\n {% if user.is_authenticated %}\n {% include \"account/snippets/already_logged_in.html\" %}\n {% endif %}\n\n <p>{% trans \"Forgotten your password? Enter your e-mail address below, and we'll send you an e-mail allowing you to reset it.\" %}</p>\n\n <form method=\"POST\" action=\"{% url 'account_reset_password' %}\" class=\"password_reset\">\n {% csrf_token %}\n <input type=\"email\" class=\"form-control mt-5\" name=\"email\" placeholder=\"E-mail address\" autocomplete=\"email\" required id=\"id_email\"><br>\n <input type=\"submit\" class=\"btn btn-primary\" value=\"{% trans 'Reset My Password' %}\" />\n </form>\n </div>\n </div>\n</section>\n{% endblock %}\n"
},
{
"alpha_fraction": 0.7259474992752075,
"alphanum_fraction": 0.7259474992752075,
"avg_line_length": 25.384614944458008,
"blob_id": "fb2bf7302034324d7b1f07d969ada39b92844361",
"content_id": "94a7c705771209c95f108040286a4025b243fa1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 343,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 13,
"path": "/djangoallauthproject/djangoallauthapp/views.py",
"repo_name": "ankitkatoch/DjangoAllAuth",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.models import User\nfrom django.shortcuts import render\n\n\n# Create your views here.\ndef home(request):\n # params = User.objects.all()\n return render(request, 'home.html')\n\n\ndef user_dashboard(request):\n data = User.objects.all()\n return render(request, 'djangoallauthapp/user_dashboard.html',{'data':data})\n"
},
{
"alpha_fraction": 0.7943925261497498,
"alphanum_fraction": 0.7943925261497498,
"avg_line_length": 20.399999618530273,
"blob_id": "828663641ea37b22bbef60c1ba3171dcdad3afba",
"content_id": "70052ba1e309a48b89ab2d8a64ebece5fa7e1d7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 107,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 5,
"path": "/djangoallauthproject/djangoallauthapp/apps.py",
"repo_name": "ankitkatoch/DjangoAllAuth",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass DjangoallauthappConfig(AppConfig):\n name = 'djangoallauthapp'\n"
},
{
"alpha_fraction": 0.6764705777168274,
"alphanum_fraction": 0.6807025074958801,
"avg_line_length": 26.63157844543457,
"blob_id": "c678694354b966cf422dacb4ac1b7f440cd469e6",
"content_id": "af225a4850231648faab1d28bb502aed80aec2fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4726,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 171,
"path": "/djangoallauthproject/djangoallauthproject/settings.py",
"repo_name": "ankitkatoch/DjangoAllAuth",
"src_encoding": "UTF-8",
"text": "from pathlib import Path\nimport os\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'tewyeecxedao-^@alm&jf!&s$!2=tv0v1%l()^5pq+kwm7*6v6'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'djangoallauthapp',\n 'django.contrib.sites',\n\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'allauth.socialaccount.providers.google',\n 'chat',\n 'channels',\n]\n\n# Provider specific settings\nSOCIALACCOUNT_PROVIDERS = {\n 'google': {\n 'SCOPE': [\n 'profile',\n 'email',\n ],\n 'AUTH_PARAMS': {\n 'access_type': 'online',\n }\n }\n}\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\n\nROOT_URLCONF = 'djangoallauthproject.urls'\nASGI_APPLICATION = \"djangoallauthproject.asgi.application\"\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates'), os.path.join(BASE_DIR, 'templates', 'allauth')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n\n # `allauth` needs this from django\n 'django.template.context_processors.request',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'djangoallauthproject.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': BASE_DIR / 'db.sqlite3',\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n\nSTATIC_URL = '/static/'\n\n# custom settings\nAUTHENTICATION_BACKENDS = [\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n]\nSITE_ID = 2\nLOGIN_REDIRECT_URL = \"/\"\nACCOUNT_AUTHENTICATION_METHOD = \"username_email\"\nACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = '/'\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = \"mandatory\"\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_SESSION_REMEMBER = True\nACCOUNT_USERNAME_MIN_LENGTH = 4\nACCOUNT_SIGNUP_REDIRECT_URL = \"/\"\n\n# Mail related stuff here\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\nACCOUNT_FORMS = {\n 'login': 'allauth.account.forms.LoginForm',\n # 'login': 'djangoallauthapp.forms.MyCustomLoginForm',\n 'signup': 'allauth.account.forms.SignupForm',\n # 'signup': 'djangoallauthapp.forms.MyCustomSignupForm',\n 'add_email': 'allauth.account.forms.AddEmailForm',\n 'change_password': 'allauth.account.forms.ChangePasswordForm',\n 'set_password': 'allauth.account.forms.SetPasswordForm',\n 'reset_password': 'allauth.account.forms.ResetPasswordForm',\n 'reset_password_from_key': 'allauth.account.forms.ResetPasswordKeyForm',\n 'disconnect': 'allauth.socialaccount.forms.DisconnectForm',\n}\n\n"
},
{
"alpha_fraction": 0.5956112742424011,
"alphanum_fraction": 0.6144200563430786,
"avg_line_length": 31,
"blob_id": "b2488091638aed27d7de573ff9dc184f822eeaec",
"content_id": "29ab077b701dff24ce8bfcb050d53becfe5d9dd2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 319,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 10,
"path": "/djangoallauthproject/djangoallauthapp/templates/djangoallauthapp/user_dashboard.html",
"repo_name": "ankitkatoch/DjangoAllAuth",
"src_encoding": "UTF-8",
"text": "{% extends 'base.html' %}\n{% block content %}\n\n<h3 class=\"mt-2\">Enjoy chat with the following :-</h3>\n{% for item in data %}\n{% if request.user.username != item.username %}\n<a class=\"btn btn-block btn-success w-50 m-1\" href=\"/chat/{{ item.username }}\">{{ item.username }}</a><br>\n{% endif %}\n{% endfor %}\n{% endblock %}"
},
{
"alpha_fraction": 0.748792290687561,
"alphanum_fraction": 0.748792290687561,
"avg_line_length": 22,
"blob_id": "6171f81570feafbda1624fdef582ec97fb08e66c",
"content_id": "ba5dcffd8058a2952d4dc320a96461c6b04b4fed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 207,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 9,
"path": "/djangoallauthproject/djangoallauthapp/urls.py",
"repo_name": "ankitkatoch/DjangoAllAuth",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom django.urls import path, include\nfrom .views import user_dashboard\napp_name = 'djangoallauthapp'\n\nurlpatterns = [\n path('', user_dashboard, name='user_dashboard')\n\n]\n"
},
{
"alpha_fraction": 0.6226415038108826,
"alphanum_fraction": 0.6226415038108826,
"avg_line_length": 25.5,
"blob_id": "a068c89e7217c3601538862aa6a693632833d9d4",
"content_id": "034201ba69fdbe219db6f90ca4403035de9815ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 318,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 12,
"path": "/djangoallauthproject/chat/urls.py",
"repo_name": "ankitkatoch/DjangoAllAuth",
"src_encoding": "UTF-8",
"text": "from django.urls import path, re_path\n\n\nfrom .views import ThreadView, InboxView, IndexView\n\napp_name = 'chat'\nurlpatterns = [\n path(\"global/\", IndexView.as_view()),\n path(\"/\", InboxView.as_view()),\n re_path(r\"^(?P<username>[\\w.@+-]+)\", ThreadView.as_view()),\n # path('', users_list, name='users_list')\n]\n"
},
{
"alpha_fraction": 0.6885714530944824,
"alphanum_fraction": 0.6942856907844543,
"avg_line_length": 30.727272033691406,
"blob_id": "f756b91b50ffe236f75308bee3946c38a8224782",
"content_id": "685395bcde470bdd69d665d38870fdbddad8d19a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 350,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 11,
"path": "/djangoallauthproject/djangoallauthapp/forms.py",
"repo_name": "ankitkatoch/DjangoAllAuth",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom allauth.account.forms import SignupForm\n\n\n# class MyCustomSignupForm(SignupForm):\n# # organization = forms.CharField(max_length=30, label='Organisation')\n#\n# def signup(self, request, user):\n# user.organization = self.cleaned_data['organization']\n# user.save(active=False)\n# return user\n\n"
}
] | 8 |
zczakon/credit_scorecard | https://github.com/zczakon/credit_scorecard | 79b46b2ef2b318e098d339ef881e07c682094104 | e36d1ec3d184a046a36f75f32d6cf4723cab1411 | b6273e89754206a7cf006b432ac50b47673730b8 | refs/heads/master | 2023-03-01T07:07:08.187473 | 2021-01-15T21:30:29 | 2021-01-15T21:30:29 | 327,283,821 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7515151500701904,
"alphanum_fraction": 0.7575757503509521,
"avg_line_length": 23.75,
"blob_id": "1e935cb4cfd3e4e588c4a760507484a33ebfb7b1",
"content_id": "3a84475a6a9e14e27c4d34ee86e31bad08a31c1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 495,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 20,
"path": "/linear_regression.py",
"repo_name": "zczakon/credit_scorecard",
"src_encoding": "UTF-8",
"text": "from sklearn.linear_model import LinearRegression\nimport pandas as pd\nfrom preprocessor import Preprocessor\nimport numpy as np\n\npd.set_option('display.max_columns', 10)\ndf = pd.read_excel(\"Project 2 - Data.xls\")\n\npreprocessor = Preprocessor(df)\n\nx_train, y_train, x_test, y_test = preprocessor.combine()\n\ntotal_test_defaults = np.sum(y_test)\n\nregr = LinearRegression()\nregr.fit(x_train, y_train)\nprediction =regr.predict(x_test)\nprint(prediction)\nscore = regr.score(x_test, y_test)\nprint(score)\n"
},
{
"alpha_fraction": 0.7588996887207031,
"alphanum_fraction": 0.7653721570968628,
"avg_line_length": 29.850000381469727,
"blob_id": "2c803b85d9234c162cee0b1cf3eccf7ca9993ca6",
"content_id": "66826d9c022d81ec91b85c43b81e1b52e2a76287",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 618,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 20,
"path": "/logistic_regression.py",
"repo_name": "zczakon/credit_scorecard",
"src_encoding": "UTF-8",
"text": "from sklearn.linear_model import LogisticRegression\nimport pandas as pd\nfrom preprocessor import Preprocessor\nimport numpy as np\n\npd.set_option('display.max_columns', 10)\ndf = pd.read_excel(\"Project 2 - Data.xls\")\n\npreprocessor = Preprocessor(df)\nx_train, y_train, x_test, y_test = preprocessor.combine()\n\ntotal_test_defaults = np.sum(y_test)\nprint('Total defaults in test data: ', total_test_defaults)\n\nlogisticRegr = LogisticRegression()\nlogisticRegr.fit(x_train, y_train)\nlogisticRegr.predict(x_test)\npredict_proba = logisticRegr.predict_proba(x_test)[:, 1]\nscore = logisticRegr.score(x_test, y_test)\nprint(score)\n\n"
},
{
"alpha_fraction": 0.5665144324302673,
"alphanum_fraction": 0.5756468772888184,
"avg_line_length": 38.578311920166016,
"blob_id": "fcd7439a3947cc85a0eb32bd77e5c49c3626b182",
"content_id": "b174a2dca671d136be9d157c6ccd7c160ecd6bfd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3285,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 83,
"path": "/preprocessor.py",
"repo_name": "zczakon/credit_scorecard",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport scorecardpy as sc\n\n\nclass Preprocessor:\n def __init__(self, data):\n self.df = data\n\n def adjust_excel(self):\n self.df.drop('Unnamed: 0', axis=1, inplace=True)\n self.df.drop('Unnamed: 1', axis=1, inplace=True)\n self.rename_columns()\n self.df.drop(self.df.tail(3).index, inplace=True)\n return self.df\n\n def rename_columns(self):\n keys = ['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4', 'Unnamed: 5', 'Unnamed: 6', 'Unnamed: 7',\n 'Unnamed: 8', 'Unnamed: 9', 'Unnamed: 10', 'Unnamed: 11', 'Unnamed: 12']\n values = ['ASSESSMENT_YEAR', 'PRODUCT_DEMAND', 'OWNERS_MANAGEMENT', 'ACCESS_CREDIT', 'PROFITABILITY',\n 'SHORT_TERM_LIQUIDITY', 'MEDIUM_TERM_LIQUIDITY', 'GROUP_FLAG', 'TURNOVER', 'INDUSTRY', 'DEFAULT_FLAG']\n d = {}\n for i in range(len(keys)):\n d[keys[i]] = values[i]\n self.df.rename(columns=d, inplace=True)\n self.df.drop(self.df.index[0], inplace=True)\n pass\n\n # preprocessing\n def convert_numbers_to_numeric(self):\n to_convert = []\n for column in self.df.columns:\n if column != 'INDUSTRY':\n to_convert.append(column)\n self.df[to_convert] = self.df[to_convert].apply(pd.to_numeric, errors='coerce')\n\n @staticmethod\n def encode_categorical(train_or_test):\n return sc.one_hot(train_or_test, cols_skip=['TURNOVER', 'PRODUCT_DEMAND', 'ACCESS_CREDIT', 'OWNERS_MANAGEMENT',\n 'SHORT_TERM_LIQUIDITY', 'MEDIUM_TERM_LIQUIDITY', 'PROFITABILITY',\n 'ASSESSMENT_YEAR'], cols_encode='INDUSTRY')\n\n def split(self):\n train, test = sc.split_df(self.df, y='DEFAULT_FLAG', ratio=0.8, seed=186).values()\n return train, test\n\n def woe_transform(self, train, test):\n # includes var filtering and one-hot encoding of 'INDUSTRY' column in all data\n train = sc.var_filter(train, 'DEFAULT_FLAG', var_kp='INDUSTRY')\n self.encode_categorical(train)\n bins = sc.woebin(train, 'DEFAULT_FLAG')\n train_woe = sc.woebin_ply(train, bins)\n train_columns = ['ACCESS_CREDIT', 'ASSESSMENT_YEAR', 'MEDIUM_TERM_LIQUIDITY', 'OWNERS_MANAGEMENT',\n 'PRODUCT_DEMAND',\n 'PROFITABILITY', 'SHORT_TERM_LIQUIDITY', 'TURNOVER', 'DEFAULT_FLAG', 'INDUSTRY']\n test_selected = test[train_columns]\n self.encode_categorical(test_selected)\n test_woe = sc.woebin_ply(test_selected, bins)\n\n return train_woe, test_woe\n\n @staticmethod\n def provide_x_y(train, test):\n train = train.to_numpy()\n test = test.to_numpy()\n\n n = np.shape(train)[1]\n\n x_train = train[:, 1:n]\n y_train = train[:, 0]\n x_test = test[:, 1:n]\n y_test = test[:, 0]\n\n return x_train, y_train, x_test, y_test\n\n def combine(self):\n self.adjust_excel()\n self.convert_numbers_to_numeric()\n train, test = self.split()\n train_woe, test_woe = self.woe_transform(train, test)\n x_train, y_train, x_test, y_test = self.provide_x_y(train_woe, test_woe)\n\n return x_train, y_train, x_test, y_test\n"
},
{
"alpha_fraction": 0.6052705645561218,
"alphanum_fraction": 0.6178861856460571,
"avg_line_length": 31.135135650634766,
"blob_id": "ffb11363e60dd4a3db606987256ca28b6ddd67fa",
"content_id": "3d5a9772636fa615b466c9645252514356d0a9c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3567,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 111,
"path": "/implied_model.py",
"repo_name": "zczakon/credit_scorecard",
"src_encoding": "UTF-8",
"text": "from preprocessor import Preprocessor\nimport pandas as pd\nimport numpy as np\n\n# TODO ML score metrics should be moved elsewhere as they are general\n\npd.set_option('display.max_columns', 10)\ndf = pd.read_excel(\"Project 2 - Data.xls\")\n\n# custom preprocessing\npreprocessor = Preprocessor(df)\npreprocessor.adjust_excel()\npreprocessor.convert_numbers_to_numeric()\ndf.drop(columns=['ASSESSMENT_YEAR', 'GROUP_FLAG', 'TURNOVER', 'INDUSTRY'], axis=1, inplace=True)\nprint(df.columns)\ny_test = df['DEFAULT_FLAG'].to_numpy() #here test data is all data\n\n\n# print(df.columns)\n\nclass ImpliedModel:\n def __init__(self, data):\n self.data = data\n self.x = data[:, :6]\n self.y = data[:, 6]\n\n weights = 0.01 * np.array([20, 10, 10, 15, 25, 20])\n\n def score(self):\n return (np.sum(np.multiply(self.weights, self.x), axis=1)) / len(self.weights)\n\n def pd(self):\n exp = np.exp(-0.1 * self.score())\n denominator = 1 + exp\n return 1 - (1 / denominator) # probability of reverse of non-default\n\n # simple prediction, but could be done using sigmoid etc.\n def predict(self):\n prob_default = self.pd()\n prediction = []\n for probability in prob_default:\n if probability > 0.5:\n prediction.append(1)\n else:\n prediction.append(0)\n return prediction\n\n # accuracy is (true_positive+true_negative)/total\n def accuracy(self, y_test):\n score = 0\n prediction = self.predict()\n for i in range(len(prediction)):\n if prediction[i] == y_test[i]:\n score += 1\n return (self.true_positive(y_test) + self.true_negative(y_test)) / len(prediction)\n\n def precision(self, y_test):\n true_positive = self.true_positive(y_test)\n false_positive = np.sum(self.predict()) - true_positive\n return np.divide(true_positive, true_positive + false_positive)\n\n def recall(self, y_test):\n true_positive = self.true_positive(y_test)\n negative = len(y_test) - np.sum(self.predict())\n false_negative = negative - self.true_negative(y_test)\n\n return np.divide(true_positive, true_positive + false_negative)\n\n def true_positive(self, y_test):\n true_pos = 0\n prediction = self.predict()\n for i in range(len(prediction)):\n if y_test[i] == 1 & prediction[i] == y_test[i]:\n true_pos += 1\n return true_pos\n\n def true_negative(self, y_test):\n true_neg = 0\n prediction = self.predict()\n for i in range(len(prediction)):\n if y_test[i] == 0 & prediction[i] == y_test[i]:\n true_neg += 1\n return true_neg\n\n def f1_model_score(self, y_test):\n return 2 * np.divide(self.precision(y_test) * self.recall(y_test), self.precision(y_test) + self.recall(y_test))\n\n def model_score(self, y):\n score = 0\n prediction=self.predict()\n for i in range(len(prediction)):\n if prediction[i] == y[i]:\n score += 1\n return score/len(prediction)\n\n\nimplied_model = ImpliedModel(df.to_numpy())\nscore = implied_model.model_score(y_test)\nprecision = implied_model.precision(y_test)\nrecall=implied_model.recall(y_test)\n\npredict_proba = implied_model.pd()\nprediction = implied_model.predict()\nnumber_of_predicted_defaults = np.sum(prediction)\n\nprint('recall:',recall)\nprint('precision:',precision)\nprint('F1 score:',implied_model.f1_model_score(y_test))\nprint('accuracy:',implied_model.accuracy(y_test))\n\nprint('Score:', score)\n"
},
{
"alpha_fraction": 0.6130427122116089,
"alphanum_fraction": 0.6263622045516968,
"avg_line_length": 38.595890045166016,
"blob_id": "cb8c68c71d22d4a24ee78327d60ac7a58314c66f",
"content_id": "07a02d5f9a61234f61ca1b1dc7162ffef02ad673",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5781,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 146,
"path": "/calibration_assessment.py",
"repo_name": "zczakon/credit_scorecard",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom scipy.stats import norm\n\n\nclass Utils:\n def __init__(self, data):\n self.data = data\n\n def divide_to_groups(self, predicted_probability, n=10):\n \"\"\"\n :param predicted_probability: 1d array of PDs (nums between 0&1, 1=certain default)\n :param n: desired number of groups, defaults to 10\n :return: list od 2d arrays of PDs and default flagst\n \"\"\"\n observations = self.data[:, np.shape(self.data)[1] - 1]\n num_of_obs = np.shape(observations)[0]\n\n obs_and_predictions = np.column_stack((observations, predicted_probability))\n group_size = num_of_obs // n\n\n groups = []\n for i in range(n):\n if i <= n - 2:\n groups.append(obs_and_predictions[i * group_size:(i + 1) * group_size, :])\n else:\n groups.append(obs_and_predictions[(n - 1) * group_size:, :])\n return groups\n\n @staticmethod\n def avg_group_proba(group):\n \"\"\"\n :param group: 2d array of PDs and default flags\n :return: average PD in a group\n \"\"\"\n pds = group[:, 1] # sum of probabilities of defaults\n return np.sum(pds) / len(pds)\n\n def sorted_groups(self, predicted_probability, n=10):\n \"\"\"\n :param predicted_probability: 1d array of predicted default probabilities (1=certain default)\n :param n: number of groups\n :return: n groups of pairs in an array [realized default flag, PD](<-somehow in this order) sorted by avg groups PD in descending order\n \"\"\"\n groups = self.divide_to_groups(predicted_probability, n)\n avg_probs = [self.avg_group_proba(group) for group in groups]\n\n groups = np.column_stack((groups, avg_probs))\n\n sorted_groups = groups[groups[:, 1].argsort()]\n sorted_groups = np.flip(sorted_groups, axis=0)\n\n return sorted_groups[:, 0]\n\n\nclass CalibrationMetrics(Utils):\n def __init__(self, data):\n super().__init__(data)\n self.data = data\n\n # calibration accuracy\n def hosmer_lemeshow(self, groups):\n \"\"\"\n :groups: optional, list of 2d arrays of PDs and default flags\n :return: list of hosmer lemeshow statistic values for given groups\n \"\"\"\n hs = []\n for group in groups:\n total_number = np.shape(group)[0]\n number_of_defaults = np.sum(group[:, 0])\n realized_default_rate = number_of_defaults / total_number\n predicted_default_rate = self.avg_group_proba(group)\n hs_stat = np.divide((realized_default_rate - predicted_default_rate) ** 2,\n predicted_default_rate * (1 - predicted_default_rate)) * total_number\n hs.append(hs_stat)\n return hs\n\n def brier_score(self, groups):\n total_borrowers = 0\n bs_group = []\n for group in groups:\n borrowers_in_group = np.shape(group)[0]\n total_borrowers += borrowers_in_group\n number_of_defaults = np.sum(group[:, 0])\n realized_default_rate = number_of_defaults / borrowers_in_group\n predicted_default_rate = self.avg_group_proba(group)\n summed = realized_default_rate * (1 - realized_default_rate) + (\n predicted_default_rate - realized_default_rate) ** 2\n\n bs_group.append(borrowers_in_group * summed)\n bs = (1 / total_borrowers) * np.sum(bs_group)\n return bs\n\n def brier_skill_score(self, groups, total_defaults, num_of_obs=1161):\n pd_observed = total_defaults / num_of_obs\n return 1 - np.divide(self.brier_score(groups), pd_observed * (1 - pd_observed))\n\n def normal_approximation_bounds(self, group, q):\n borrowers_in_group = np.shape(group)[0]\n g_avg = self.avg_group_proba(group)\n upper_bound = norm.ppf((q + 1) / 2) * np.sqrt(np.divide((g_avg * (1 - g_avg)), borrowers_in_group))\n lower_bound = g_avg - upper_bound\n return lower_bound, upper_bound\n\n def normal_approximation_test(self, group):\n lower_bound_99, upper_bound_99 = self.normal_approximation_bounds(group, 0.99)\n lower_bound_95, upper_bound_95 = self.normal_approximation_bounds(group, 0.95)\n g_avg = self.avg_group_proba(group)\n if g_avg >= upper_bound_99:\n rating = 'Green'\n elif g_avg <= upper_bound_95:\n rating = 'Red'\n else:\n rating = 'Yellow'\n return rating\n\n\nfrom logistic_regression import predict_proba, x_test, y_test, total_test_defaults\n#from probit_regression import predict_proba, x_test, y_test, total_test_defaults\n\ny_test = np.reshape(y_test, (1161, 1))\ndata = np.concatenate((x_test, y_test), axis=1)\nutils = Utils(data)\ngroups = utils.sorted_groups(predict_proba, n=10)\nprint('lengths', [len(group) for group in groups])\ndefaults_in_groups = [np.sum(group[i][0] for i in range(len(group))) for group in groups]\nprint('defaults in groups:', defaults_in_groups)\nprint('PDs in groups', [utils.avg_group_proba(group) for group in groups])\n\ncalibration_metrics = CalibrationMetrics(utils)\nhs_statistics = calibration_metrics.hosmer_lemeshow(groups)\nbrier_score = calibration_metrics.brier_score(groups)\nbrier_skill_score = calibration_metrics.brier_skill_score(groups, total_test_defaults)\n\nprint('H-S statistics for groups', hs_statistics)\nprint('H-S statistic in total:', np.sum(hs_statistics))\nprint('Brier score: ', brier_score)\nprint('Brier skill score: ', brier_skill_score)\n\nfrom scipy.stats import chisquare\n\np = 1 - chisquare(hs_statistics, 8)[1]\np = \"{:.50f}\".format(float(p))\nprint('p value for H-S:', p)\n\ntraffic_lights = [calibration_metrics.normal_approximation_test(group) for group in groups]\nprint('traffic lights:', traffic_lights)\n"
},
{
"alpha_fraction": 0.6847618818283081,
"alphanum_fraction": 0.6933333277702332,
"avg_line_length": 24.585365295410156,
"blob_id": "ca4a92b0e9cc9398097412ff1a2395688484b899",
"content_id": "13a3e99a32f4ef1d9d9d8aaab825882d3ae3b1ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1050,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 41,
"path": "/probit_regression.py",
"repo_name": "zczakon/credit_scorecard",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nfrom preprocessor import Preprocessor\nfrom statsmodels.discrete.discrete_model import Probit\n\npd.set_option('display.max_columns', 10)\ndf = pd.read_excel(\"Project 2 - Data.xls\")\n\npreprocessor = Preprocessor(df)\nx_train, y_train, x_test, y_test = preprocessor.combine()\n\nmodel = Probit(y_train, x_train)\nprobit_model = model.fit()\npredict_proba = probit_model.predict(x_test)\n\n\ndef predict(predict_proba):\n prediction = []\n for probability in predict_proba:\n if probability > 0.5:\n prediction.append(1)\n else:\n prediction.append(0)\n return prediction\n\n\ndef model_score(prediction, target):\n score = 0\n for i in range(len(prediction)):\n if prediction[i] == target[i]:\n score += 1\n return score / len(prediction)\n\n\nbinary_prediction = predict(predict_proba)\nscore = model_score(binary_prediction, y_test)\n\ntotal_test_defaults=np.sum(binary_prediction)\n\nprint('Predicted number of defaults', np.sum(binary_prediction))\nprint('Score:', score)\n\n"
}
] | 6 |
ZenT3600/Image-Filterer | https://github.com/ZenT3600/Image-Filterer | a9f0c14f1769eb40ab4685a640b10ee7568714f1 | 65d2a377bd24fe40c7b92adfb4189a993051480c | 7a8c0c153c3dd7d1c83dbb923f7d00072014d89f | refs/heads/master | 2020-05-17T18:59:18.902710 | 2019-04-30T05:15:41 | 2019-04-30T05:15:41 | 183,901,494 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5601418614387512,
"alphanum_fraction": 0.5831345915794373,
"avg_line_length": 33.01713180541992,
"blob_id": "dae356aed8b0e015db3c096824a1ce357741d677",
"content_id": "2698b307cd17e0af4a720d5645dbca324f4194dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16353,
"license_type": "no_license",
"max_line_length": 231,
"num_lines": 467,
"path": "/ImageFilterer.py",
"repo_name": "ZenT3600/Image-Filterer",
"src_encoding": "UTF-8",
"text": "from openpyxl import *\r\nimport PIL\r\nfrom PIL import Image\r\nfrom PIL import ImageEnhance\r\nimport colorsys\r\nfrom webcolors import *\r\nfrom math import *\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nfrom tkinter.ttk import *\r\nfrom datetime import datetime\r\ndef on_button(action, value):\r\n try:\r\n fh = open(\"log.txt\", \"r\")\r\n except:\r\n log = open(\"log.txt\", \"w+\")\r\n log.write(\"Start Of The Log History:\")\r\n log.close()\r\n progressbar.place(x=300, y=215, anchor=\"center\")\r\n disable()\r\n image = txt.get() + variable.get()\r\n #print(image)\r\n #image = input(\"Image Name: \")\r\n #print(image)\r\n print(\"\")\r\n try:\r\n im = PIL.Image.open(image)\r\n rgb_im = im.convert('RGB')\r\n logValue(\": File Name: \", image)\r\n logAction(\": Image Opened Successfully \\n\")\r\n except:\r\n messagebox.showerror(\"Error\", \"It seems like the image name you put in the field doesn't exist. Try writing it again or make sure that the image is in the same path as this program\")\r\n progressbar.place_forget()\r\n enable()\r\n logAction(\": Error Finding File \\n\")\r\n size = im.size\r\n print(\"Width:\", size[0])\r\n print(\"Height:\", size[1])\r\n print(\"\")\r\n #print(size)\r\n nPixel = size[0] * size[1]\r\n progressbar['maximum'] = nPixel\r\n #print(nPixel)\r\n rArr = []\r\n gArr = []\r\n bArr = []\r\n x = 0\r\n y = 0\r\n print(\"Getting colors, this action could take some time depending on the image size\")\r\n for i1 in range (0, size[1]):\r\n for i2 in range (0, size[0]):\r\n #print(\"Coordinate:\", x, y)\r\n r, g, b = rgb_im.getpixel((x, y))\r\n rArr.append(r)\r\n gArr.append(g)\r\n bArr.append(b)\r\n x = x + 1\r\n x = 0\r\n y = y + 1\r\n logAction(\": Image's pixel color analyzed successfully \\n\")\r\n print(\"\")\r\n print(\"The image contains\", nPixel, \"pixels\")\r\n print(\"\")\r\n index = 0\r\n index_color = 0\r\n num = 1\r\n #n_alf = int(len(alfabeto))\r\n #if (size[0] >= len(alfabeto)):\r\n #lettera = floor(size[0] / n_alf)\r\n #l = 0\r\n #for i in range (0, lettera):\r\n #for i in range (0, 26):\r\n #alfabeto.append(alfabeto[l] + alfabeto[i])\r\n #l = l + 1\r\n #print(alfabeto)\r\n if (value <= 1):\r\n logValue(\": Filter Name: \", action)\r\n if (action == \"copy\"):\r\n img = copy(nPixel, size, rArr, gArr, bArr, value)\r\n elif (action == \"grayscale\"):\r\n img = grayscale(nPixel, size, rArr, gArr, bArr, value)\r\n elif (action == \"sepia\"):\r\n img = sepia(nPixel, size, rArr, gArr, bArr, value)\r\n elif (action == \"negative\"):\r\n img = negative(nPixel, size, rArr, gArr, bArr, value)\r\n elif (action == \"saturate\"):\r\n img = saturate(nPixel, size, rArr, gArr, bArr, value)\r\n elif (action == \"desaturate\"):\r\n img = desaturate(nPixel, size, rArr, gArr, bArr, value)\r\n elif (action == \"lighten\"):\r\n img = lighten(nPixel, size, rArr, gArr, bArr, value)\r\n elif (action == \"darken\"):\r\n img = darken(nPixel, size, rArr, gArr, bArr, value)\r\n logAction(\": Filter Added Successfully \\n\")\r\n logValue(\": Filter Value: \", str(value))\r\n elif (value < 0 or value > 1):\r\n messagebox.showerror(\"Error\", \"Please Enter A Filter Value Between 1 and 0\")\r\n progressbar.place_forget()\r\n enable()\r\n logAction(\": Error With Filter Value Ammount \\n\")\r\n return\r\n print(\"\")\r\n print(\"Process Completed!\")\r\n print(\"\")\r\n img.save(action + \"-\" + str(image))\r\n logAction(\": Process Completed \\n\")\r\n messagebox.showinfo(\"OK!\", \"Process completed, the file was saved\")\r\n progressbar.place_forget()\r\n enable()\r\n \r\ndef copy(nPixel, size, rArr, gArr, bArr, value):\r\n maxValue = nPixel\r\n i3 = 0\r\n img = PIL.Image.new( 'RGB', (size[0],size[1]), \"black\")\r\n pixels = img.load()\r\n print(\"Adding Filters\")\r\n for i1 in range (0, size[1]):\r\n index = 0\r\n for i2 in range (0, size[0]):\r\n pixels[i2, i1] = (rArr[i3], gArr[i3], bArr[i3])\r\n i3 = i3 + 1\r\n currentValue = nPixel\r\n progressbar[\"value\"] = (maxValue - (maxValue - i3))\r\n progressbar.update()\r\n return(img)\r\n\r\ndef grayscale(nPixel, size, rArr, gArr, bArr, value):\r\n maxValue = nPixel\r\n i3 = 0\r\n img = PIL.Image.new( 'RGB', (size[0],size[1]), \"black\")\r\n pixels = img.load()\r\n print(\"Adding Filters\")\r\n for i1 in range (0, size[1]):\r\n index = 0\r\n for i2 in range (0, size[0]):\r\n r = rArr[i3]\r\n g = gArr[i3]\r\n b = bArr[i3]\r\n rg = round((r + g + b)/3)\r\n gg = rg\r\n bg = rg\r\n pixels[i2, i1] = (rg, gg, bg)\r\n i3 = i3 + 1\r\n currentValue = nPixel\r\n progressbar[\"value\"] = (maxValue - (maxValue - i3))\r\n progressbar.update()\r\n return(img)\r\n\r\ndef sepia(nPixel, size, rArr, gArr, bArr, value):\r\n maxValue = nPixel\r\n i3 = 0\r\n img = copy(nPixel, size, rArr, gArr, bArr, value)\r\n sepia = PIL.Image.new( 'RGB', (size[0],size[1]), \"black\")\r\n pixels = sepia.load()\r\n print(\"Adding Filters\")\r\n for i1 in range (0, size[1]):\r\n index = 0\r\n for i2 in range (0, size[0]):\r\n r = rArr[i3]\r\n g = gArr[i3]\r\n b = bArr[i3]\r\n rs = round(0.393 * r + 0.769 * g + 0.189 * b)\r\n gs = round(0.349 * r + 0.686 * g + 0.168 * b)\r\n bs = round(0.272 * r + 0.534 * g + 0.131 * b)\r\n if (rs > 255):\r\n pixels[i2, i1] = (255, gs, bs)\r\n else:\r\n pixels[i2, i1] = (rs, gs, bs)\r\n if (gs > 255):\r\n pixels[i2, i1] = (rs, 255, bs)\r\n else:\r\n pixels[i2, i1] = (rs, gs, bs)\r\n if (bs > 255):\r\n pixels[i2, i1] = (rs, gs, 255)\r\n else:\r\n pixels[i2, i1] = (rs, gs, bs)\r\n i3 = i3 + 1\r\n currentValue = nPixel\r\n progressbar[\"value\"] = (maxValue - (maxValue - i3))\r\n progressbar.update()\r\n img = PIL.Image.blend(img, sepia, value)\r\n return(img)\r\n\r\ndef negative(nPixel, size, rArr, gArr, bArr, value):\r\n maxValue = nPixel\r\n i3 = 0\r\n img = PIL.Image.new( 'RGB', (size[0],size[1]), \"black\")\r\n pixels = img.load()\r\n print(\"Adding Filters\")\r\n for i1 in range (0, size[1]):\r\n index = 0\r\n for i2 in range (0, size[0]):\r\n r = rArr[i3]\r\n g = gArr[i3]\r\n b = bArr[i3]\r\n rn = 255 - r\r\n gn = 255 - g\r\n bn = 255 - b\r\n pixels[i2, i1] = (rn, gn, bn)\r\n i3 = i3 + 1\r\n currentValue = nPixel\r\n progressbar[\"value\"] = (maxValue - (maxValue - i3))\r\n progressbar.update()\r\n return(img)\r\n\r\ndef saturate(nPixel, size, rArr, gArr, bArr, value):\r\n maxValue = nPixel\r\n i3 = 0\r\n img = copy(nPixel, size, rArr, gArr, bArr, value)\r\n saturate = copy(nPixel, size, rArr, gArr, bArr, value)\r\n pixels = saturate.load()\r\n print(\"Adding Filters\")\r\n for i1 in range (0, size[1]):\r\n index = 0\r\n for i2 in range (0, size[0]):\r\n i3 = i3 + 1\r\n currentValue = nPixel\r\n progressbar[\"value\"] = (maxValue - (maxValue - i3))\r\n progressbar.update()\r\n converter = ImageEnhance.Color(saturate)\r\n saturate = converter.enhance(value*2)\r\n img = PIL.Image.blend(img, saturate, value)\r\n return(img)\r\n\r\ndef desaturate(nPixel, size, rArr, gArr, bArr, value):\r\n maxValue = nPixel\r\n i3 = 0\r\n img = copy(nPixel, size, rArr, gArr, bArr, value)\r\n desaturate = copy(nPixel, size, rArr, gArr, bArr, value)\r\n pixels = desaturate.load()\r\n print(\"Adding Filters\")\r\n for i1 in range (0, size[1]):\r\n index = 0\r\n for i2 in range (0, size[0]):\r\n i3 = i3 + 1\r\n currentValue = nPixel\r\n progressbar[\"value\"] = (maxValue - (maxValue - i3))\r\n progressbar.update()\r\n converter = ImageEnhance.Color(desaturate)\r\n desaturate = converter.enhance(value/2)\r\n img = PIL.Image.blend(img, desaturate, value)\r\n return(img)\r\n\r\ndef lighten(nPixel, size, rArr, gArr, bArr, value):\r\n maxValue = nPixel\r\n i3 = 0\r\n img = copy(nPixel, size, rArr, gArr, bArr, value)\r\n print(\"Adding Filters\")\r\n for i1 in range (0, size[1]):\r\n index = 0\r\n for i2 in range (0, size[0]):\r\n i3 = i3 + 1\r\n currentValue = nPixel\r\n progressbar[\"value\"] = (maxValue - (maxValue - i3))\r\n progressbar.update()\r\n light = PIL.Image.new( 'RGB', (size[0],size[1]), \"white\")\r\n img = PIL.Image.blend(img, light, value)\r\n return(img)\r\n\r\ndef darken(nPixel, size, rArr, gArr, bArr, value):\r\n maxValue = nPixel\r\n i3 = 0\r\n img = copy(nPixel, size, rArr, gArr, bArr, value)\r\n print(\"Adding Filters\")\r\n for i1 in range (0, size[1]):\r\n index = 0\r\n for i2 in range (0, size[0]):\r\n i3 = i3 + 1\r\n currentValue = nPixel\r\n progressbar[\"value\"] = (maxValue - (maxValue - i3))\r\n progressbar.update()\r\n dark = PIL.Image.new( 'RGB', (size[0],size[1]), \"black\")\r\n img = PIL.Image.blend(img, dark, value)\r\n return(img)\r\n\r\ndef cred_start(sender):\r\n credits()\r\n\r\ndef credits():\r\n messagebox.showinfo(\"About\", \"Credits: \\n Matteo Leggio \\n [email protected]\")\r\n \r\ndef help():\r\n messagebox.showinfo(\"Help\", \"Insert the name of the image you want to add a filter to (it must be in the same directory as this program) in the 'image name' entry, then click the 'OK' button and watch the program do it's work\")\r\n \r\ndef github():\r\n messagebox.showinfo(\"GitHub\", \"Github Repository: \\n github.com/ZenT3600/Image-Filterer\")\r\n \r\ndef copy_expl():\r\n print(\"\")\r\n messagebox.showinfo(\"Copy\", \"Simply Copies The Image, Value Doesn't Affect It\")\r\n\r\ndef grayscale_expl():\r\n print(\"\")\r\n messagebox.showinfo(\"Grayscale\", \"Turns The Image Gray, Value Doesn't Affect It\")\r\n\r\ndef sepia_expl():\r\n print(\"\")\r\n messagebox.showinfo(\"Sepia\", \"It's Grayscale But It Makes It Yellowish\")\r\n \r\ndef negative_expl():\r\n print(\"\")\r\n messagebox.showinfo(\"Negative\", \"Inverts The Colors Of The Image, Value Doesn't Affect It\")\r\n \r\ndef saturate_expl():\r\n print(\"\")\r\n messagebox.showinfo(\"Saturate\", \"Makes The Image Saturation Go Up\")\r\n \r\ndef desaturate_expl():\r\n print(\"\")\r\n messagebox.showinfo(\"Desaturate\", \"Makes The Image Saturation Go Down\")\r\n \r\ndef lighten_expl():\r\n print(\"\")\r\n messagebox.showinfo(\"Lighten\", \"Makes The Image Lighter\")\r\n \r\ndef darken_expl():\r\n print(\"\")\r\n messagebox.showinfo(\"Darken\", \"Makes The Image Darker\")\r\n \r\ndef value_expl():\r\n print(\"\")\r\n messagebox.showinfo(\"Filter Value\", \"0 = Original Image \\n 1 = Only Filter \\n 0.5 = Half And Half\")\r\n\r\ndef f_quitter():\r\n logAction(\": Quit The Program: \\n\")\r\n win.destroy()\r\n\r\ndef func_quitter(sender):\r\n f_quitter()\r\n \r\ndef on_closing():\r\n logAction(\": Program Closed Prematurely: \\n\")\r\n win.destroy()\r\n\r\ndef func_start(x, y):\r\n action = x\r\n ammount = y\r\n #print(y)\r\n on_button(action, ammount)\r\n \r\n#def bind_func_start(sender, x):\r\n# action = x\r\n# on_button(action)\r\n \r\ndef enable():\r\n btn_copy.config(state=NORMAL)\r\n btn_gray.config(state=NORMAL)\r\n btn_sepia.config(state=NORMAL)\r\n btn_nega.config(state=NORMAL)\r\n btn_satu.config(state=NORMAL)\r\n btn_desatu.config(state=NORMAL)\r\n btn_light.config(state=NORMAL)\r\n btn_dark.config(state=NORMAL)\r\n quitter.config(state=NORMAL)\r\n txt.config(state=NORMAL)\r\n\r\ndef disable():\r\n btn_copy.config(state=DISABLED)\r\n btn_gray.config(state=DISABLED)\r\n btn_sepia.config(state=DISABLED)\r\n btn_nega.config(state=DISABLED)\r\n btn_satu.config(state=DISABLED)\r\n btn_desatu.config(state=DISABLED)\r\n btn_light.config(state=DISABLED)\r\n btn_dark.config(state=DISABLED)\r\n quitter.config(state=DISABLED)\r\n txt.config(state=DISABLED)\r\n \r\ndef logAction(action):\r\n now = datetime.now()\r\n log = open(\"log.txt\", \"a+\")\r\n log.write(str(now))\r\n log.write(action)\r\n log.close()\r\n \r\ndef logValue(action, value):\r\n now = datetime.now()\r\n log = open(\"log.txt\", \"a+\")\r\n log.write(str(now))\r\n log.write(action)\r\n log.write(value)\r\n log.write(\"\\n\")\r\n log.close()\r\n \r\nwin = Tk()\r\nlog = open(\"log.txt\", \"a+\")\r\nlog.write(\"*******************\\n\")\r\nlog.close()\r\nwin.protocol(\"WM_DELETE_WINDOW\", on_closing)\r\nwin.resizable(False, False)\r\nwin.title(\"Image Filterer\")\r\nwin.geometry(\"600x400\")\r\n\r\nlbl = Label(win, text=\"Image Filterer\", font=(\"Verdana\", 30, \"bold\"))\r\nlbl.place(x=300, y=25, anchor=\"center\")\r\n\r\nlbl_2 = Label(win, text=\"Image Name\", font=(\"Verdana\", 10, \"bold\"))\r\nlbl_2.place(x=300, y=155, anchor=\"center\")\r\n\r\nlbl_3 = Label(win, text=\"Filter Value\", font=(\"Verdana\", 10, \"bold\"))\r\nlbl_3.place(x=45, y=155, anchor=\"center\")\r\n\r\ndesc = Label(win, text=\"Add a filter to any image you want \\n Consult 'Filters' for a more detailed explanation\", font=(\"Verdana\", 10), justify=CENTER)\r\ndesc.place(x=300, y=80, anchor=\"center\")\r\n\r\ntxt = Entry(win ,width=70)\r\ntxt.place(x=300, y=175, anchor=\"center\")\r\nammount = Entry(win ,width=8)\r\nammount.place(x=45, y=175, anchor=\"center\")\r\n\r\nvariable = StringVar(win)\r\nvariable.set(\".png\")\r\n\r\ndropdown = OptionMenu(win, variable, \".png\", \".png\", \".jpg\", \".jpeg\")\r\ndropdown.place(x=545, y=175, anchor=\"center\")\r\n\r\nprogressbar = Progressbar(win,orient=\"horizontal\",length=300,mode=\"determinate\")\r\n\r\nbtn_copy = Button(win, text=\"Copy\", command=lambda: func_start(\"copy\", float(ammount.get())))\r\nbtn_copy.place(x=300, y=260, anchor=\"center\")\r\n#btn_copy.bind('<Return>', lambda: bind_func_start(\"copy\"))\r\nbtn_gray = Button(win, text=\"Grayscale\", command=lambda: func_start(\"grayscale\", float(ammount.get())))\r\nbtn_gray.place(x=300, y=300, anchor=\"center\")\r\n#btn_gray.bind('<Return>', lambda: bind_func_start(\"grayscale\"))\r\nbtn_sepia = Button(win, text=\"Sepia\", command=lambda: func_start(\"sepia\", float(ammount.get())))\r\nbtn_sepia.place(x=380, y=260, anchor=\"center\")\r\n#btn_sepia.bind('<Return>', lambda: bind_func_start(\"sepia\"))\r\nbtn_nega = Button(win, text=\"Negative\", command=lambda: func_start(\"negative\", float(ammount.get())))\r\nbtn_nega.place(x=380, y=300, anchor=\"center\")\r\n#btn_nega.bind('<Return>', lambda: bind_func_start(\"negative\"))\r\nbtn_satu = Button(win, text=\"Saturate\", command=lambda: func_start(\"saturate\", float(ammount.get())))\r\nbtn_satu.place(x=220, y=260, anchor=\"center\")\r\n#btn_satu.bind('<Return>', lambda: bind_func_start(\"saturate\"))\r\nbtn_desatu = Button(win, text=\"Desaturate\", command=lambda: func_start(\"desaturate\", float(ammount.get())))\r\nbtn_desatu.place(x=220, y=300, anchor=\"center\")\r\n#btn_desatu.bind('<Return>', lambda: bind_func_start(\"desaturate\"))\r\nbtn_light = Button(win, text=\"Lighten\", command=lambda: func_start(\"lighten\", float(ammount.get())))\r\nbtn_light.place(x=140, y=260, anchor=\"center\")\r\n#\r\nbtn_dark = Button(win, text=\"Darken\", command=lambda: func_start(\"darken\", float(ammount.get())))\r\nbtn_dark.place(x=460, y=260, anchor=\"center\")\r\n#\r\n\r\n\r\nmenubar = Menu(win)\r\nhelpmenu = Menu(menubar, tearoff=0)\r\nhelpmenu.add_command(label=\"Help\", command=help)\r\nhelpmenu.add_command(label=\"About\", command=credits)\r\nhelpmenu.add_command(label=\"Github\", command=github)\r\nmenubar.add_cascade(label=\"Help & Links\", menu=helpmenu)\r\nfiltersmenu = Menu(menubar, tearoff=0)\r\nfiltersmenu.add_command(label=\"Copy\", command=copy_expl)\r\nfiltersmenu.add_command(label=\"Grayscale\", command=grayscale_expl)\r\nfiltersmenu.add_command(label=\"Sepia\", command=sepia_expl)\r\nfiltersmenu.add_command(label=\"Negative\", command=negative_expl)\r\nfiltersmenu.add_command(label=\"Saturate\", command=saturate_expl)\r\nfiltersmenu.add_command(label=\"Desaturate\", command=desaturate_expl)\r\nfiltersmenu.add_command(label=\"Lighten\", command=lighten_expl)\r\nfiltersmenu.add_command(label=\"Darken\", command=darken_expl)\r\nfiltersmenu.add_command(label=\"Filter Value\", command=value_expl)\r\nmenubar.add_cascade(label=\"Filters\", menu=filtersmenu)\r\nwin.config(menu=menubar)\r\n\r\nquitter = Button(win, width=6, text = \"Quit\", command=f_quitter)\r\nquitter.place(x=560, y=380, anchor=\"center\")\r\nquitter.bind('<Return>', func_quitter)\r\nwin.mainloop()\r\n"
}
] | 1 |
Mrzhangxiaohua/TCVisiable | https://github.com/Mrzhangxiaohua/TCVisiable | 2267b6eeaef360fb3cc0301bdfd28ce3f96cf34a | d56a14de67a4647607b6dfb867b6a65660843d00 | 266469dafcaaa33261224a1ce71cefc7e187737d | refs/heads/master | 2020-04-25T13:54:53.853154 | 2019-04-02T02:46:21 | 2019-04-02T02:46:21 | 172,823,517 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 17,
"blob_id": "b53637cec641d1ef76c146b641743c1154f0e29b",
"content_id": "905a66b645cabbbc903540aaa7553612c5ad4733",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 35,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 2,
"path": "/config.py",
"repo_name": "Mrzhangxiaohua/TCVisiable",
"src_encoding": "UTF-8",
"text": "DEBUG = True,\nJSON_AS_ASCII = False"
},
{
"alpha_fraction": 0.4949614703655243,
"alphanum_fraction": 0.5008891820907593,
"avg_line_length": 34.14583206176758,
"blob_id": "e386a62b270aae3c449ed9108bc08469f6d00842",
"content_id": "af95827eb2214865866651e5175d8e328d4593dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1747,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 48,
"path": "/static/js/getLocation.js",
"repo_name": "Mrzhangxiaohua/TCVisiable",
"src_encoding": "UTF-8",
"text": "// 地图js,主界面的地图交互,取学生id\nvar options = {\n size: BMAP_POINT_SIZE_SMALL,\n shape: BMAP_POINT_SHAPE_STAR,\n color: '#D3D120'\n}\n$.ajax({\n url: \"testdb\",\n type: \"post\",\n dataType: \"json\",\n success: function (result) {\n console.log(result)\n var points = []\n var myGeo = new BMap.Geocoder();\n var stuLocation = {}\n\n function getLocation(j) {\n myGeo.getPoint(result[j].bf_NativePlace, function (point) {\n if (point) {\n var random = Math.random() * 0.1\n points.push(new BMap.Point(point.lng - random, point.lat - random));\n }\n return setTimeout(function () {\n // console.log(result[j].bf_StudentID,point)\n // console.log(random)\n stuLocation[(Number(point.lng) - Number(random)) + ',' + (Number(point.lat) - random)] = {\n \"stuId\": result[j].bf_StudentID\n }\n }, 700)\n })\n }\n\n for (var j = 0; j < result.length; j++) {\n getLocation(j)\n }\n\n var pointCollection = new BMap.PointCollection(points, options);\n map.addOverlay(pointCollection);\n console.log(stuLocation)\n\n pointCollection.addEventListener('click', function (e) {\n // console.log(stuLocation[e.point.lng + ',' + e.point.lat])\n var stuId = stuLocation[e.point.lng + ',' + e.point.lat]['stuId'];\n grade(stuId);\n // alert('单击点的坐标为:' + e.point.lng + ',' + e.point.lat + \"学生ID\" + stuLocation[e.point.lng + ',' + e.point.lat]['stuId']); // 监听点击事件\n });\n }\n})\n"
},
{
"alpha_fraction": 0.4635114073753357,
"alphanum_fraction": 0.47668981552124023,
"avg_line_length": 41.76969528198242,
"blob_id": "aab6f66f279e4e46bdcd9c89de838f7065abd6c7",
"content_id": "89391cb713ff049837a66b13a68529c1efd2ce83",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7773,
"license_type": "no_license",
"max_line_length": 184,
"num_lines": 165,
"path": "/app.py",
"repo_name": "Mrzhangxiaohua/TCVisiable",
"src_encoding": "UTF-8",
"text": "from flask import Flask, current_app, g, render_template, jsonify, request\nimport sqlite3, json\n\napp = Flask(__name__)\napp.config.from_object('config')\nDATABASE = 'data/data.db'\n\n\ndef connect_db():\n return sqlite3.connect(DATABASE)\n\n\[email protected]_request\ndef before_request():\n g.db = connect_db()\n\n\[email protected]_request\ndef teardown_request(exception):\n if hasattr(g, 'db'):\n g.db.close()\n\n\[email protected]('/')\ndef hello_world():\n return render_template('index.html')\n\n\[email protected]('/course')\ndef course():\n seven_select_three = [[\"政治\", \"历史\", \"地理\"],\n [\"政治\", \"历史\", \"物理\"],\n [\"政治\", \"历史\", \"化学\"],\n [\"政治\", \"历史\", \"通用技术\"],\n [\"政治\", \"地理\", \"物理\"],\n [\"政治\", \"地理\", \"化学\"],\n [\"政治\", \"地理\", \"生物\"],\n [\"政治\", \"地理\", \"通用技术\"],\n [\"政治\", \"物理\", \"化学\"],\n [\"政治\", \"物理\", \"生物\"],\n [\"政治\", \"物理\", \"通用技术\"],\n [\"政治\", \"化学\", \"生物\"],\n [\"政治\", \"化学\", \"通用技术\"],\n [\"政治\", \"生物\", \"通用技术\"],\n [\"历史\", \"地理\", \"物理\"],\n [\"历史\", \"地理\", \"化学\"],\n [\"历史\", \"地理\", \"生物\"],\n [\"历史\", \"地理\", \"通用技术\"],\n [\"历史\", \"物理\", \"化学\"],\n [\"历史\", \"物理\", \"生物\"],\n [\"历史\", \"物理\", \"通用技术\"],\n [\"历史\", \"化学\", \"生物\"],\n [\"历史\", \"化学\", \"通用技术\"],\n [\"历史\", \"生物\", \"通用技术\"],\n [\"地理\", \"物理\", \"化学\"],\n [\"地理\", \"物理\", \"生物\"],\n [\"地理\", \"物理\", \"通用技术\"],\n [\"地理\", \"化学\", \"生物\"],\n [\"地理\", \"化学\", \"通用技术\"],\n [\"地理\", \"生物\", \"通用技术\"],\n [\"物理\", \"化学\", \"生物\"],\n [\"物理\", \"化学\", \"通用技术\"],\n [\"物理\", \"生物\", \"通用技术\"],\n [\"化学\", \"生物\", \"通用技术\"]]\n cur = g.db.execute('')\n\n return render_template('course.html')\n\n\[email protected]('/groupdata')\ndef groupData():\n # 找到高三的班级id,一共10个班级\n sql_findClassId = \" select distinct cla_id, cla_Name from student_info where cla_Name like '%高三%' order by cla_id asc \"\n cur = g.db.execute(sql_findClassId)\n fetchData = [[row[0], row[1]] for row in cur.fetchall()[2:-2]]\n entries = [k[0] for k in fetchData]\n className = [k[1] for k in fetchData]\n print(entries)\n print(className)\n # 对每个班级进行成绩的统计,每个人考七门\n sql_findClassStuInf = \" select a.bf_StudentID, a.cla_Name, a.cla_id, a.cla_term, b.exam_numname,b.mes_sub_id, \" \\\n \" b.mes_sub_name,b.exam_term,b.mes_Score, b.mes_Z_Score,b.mes_T_Score, mes_dengdi \" \\\n \" from student_info a left join chengji b on bf_StudentID=mes_StudentID \" \\\n \" where cla_Name like '%高三%' AND mes_Z_Score!='' and mes_T_Score!='' and mes_dengdi!='' \" \\\n \" and cla_term='2018-2019-1' and exam_numname='2017学年第二学期高一二平时成绩1' and cla_id=\"\n kemu = ['语文', '数学', '英语', '物理', '化学', '生物', '政治', '历史', '地理', '技术']\n sumscore = []\n for i in entries:\n cur1 = g.db.execute(sql_findClassStuInf + str(i))\n entries = [dict(bf_StudentID=row[0], cla_Name=row[1], cla_id=row[2], cla_term=row[3],\n exam_numname=row[4], mes_sub_id=row[5], mes_sub_name=row[6], exam_term=row[7],\n mes_Score=row[8], mes_Z_Score=row[9], mes_T_Score=row[10], mes_dengdi=row[11])\n for row in cur1.fetchall()]\n classGrade = [[] for i in range(10)]\n for entrie in entries:\n if entrie['mes_sub_name'] == '语文':\n classGrade[0].append(entrie['mes_Score'])\n elif entrie['mes_sub_name'] == '数学':\n classGrade[1].append(entrie['mes_Score'])\n elif entrie['mes_sub_name'] == '英语':\n classGrade[2].append(entrie['mes_Score'])\n elif entrie['mes_sub_name'] == '物理':\n classGrade[3].append(entrie['mes_Score'])\n elif entrie['mes_sub_name'] == '化学':\n classGrade[4].append(entrie['mes_Score'])\n elif entrie['mes_sub_name'] == '生物':\n classGrade[5].append(entrie['mes_Score'])\n elif entrie['mes_sub_name'] == '政治':\n classGrade[6].append(entrie['mes_Score'])\n elif entrie['mes_sub_name'] == '历史':\n classGrade[7].append(entrie['mes_Score'])\n elif entrie['mes_sub_name'] == '地理':\n classGrade[8].append(entrie['mes_Score'])\n elif entrie['mes_sub_name'] == '技术':\n classGrade[9].append(entrie['mes_Score'])\n sumscore.append(classGrade)\n print(sumscore)\n d = {'data': sumscore, 'classId': entries, 'kind': kemu, 'className':className}\n return jsonify(d)\n\n\[email protected]('/group')\ndef group():\n return render_template('group.html')\n\[email protected]('/testdb')\ndef testdb():\n sql_test = \"select distinct exam_numname from chengji where exam_term='2017-2018-1'AND mes_Z_Score!='' and mes_T_Score!='' and mes_dengdi!=''\"\n cur = g.db.execute(sql_test)\n entries = [row[0] for row in cur.fetchall()]\n print(entries)\n return jsonify(entries)\n\n\[email protected]('/getStuId', methods=['GET', 'POST'])\ndef getStuId():\n a = request.args\n if a:\n data = a['data']\n print(data)\n # 找出学生考试完全信息\n sql = \"select a.*,b.EXAM_KIND_NAME from chengji a left join exam_type b on a.exam_type=b.EXAM_KIND_ID where mes_StudentID= \" + data + \" and mes_sub_id !='' and mes_Z_Score!=''\"\n cur = g.db.execute(sql)\n entries = [dict(mes_TestID=row[0], exam_number=row[1], exam_numname=row[2], mes_sub_id=row[3],\n mes_sub_name=row[4], exam_term=row[5], exam_type=row[6], mes_StudentID=row[8],\n mes_Score=row[9], mes_Z_Score=row[10], mes_T_Score=row[11], mes_dengdi=row[12],\n EXAM_KIND_ID=row[13])\n for row in cur.fetchall()]\n # 找出学生考试时间类别,语文数学。。。\n sql1 = \" select a.mes_sub_name from chengji a left join exam_type b on a.exam_type=b.EXAM_KIND_ID \" \\\n \" where mes_StudentID= \" + data + \" and mes_sub_id !='' and mes_Z_Score!='' group by mes_sub_name \"\n cur1 = g.db.execute(sql1)\n entries1 = [dict(mes_sub_name=row[0]) for row in cur1.fetchall()]\n # 找出学生的考试时间\n sql2 = \" select distinct a.exam_numname from chengji a left join exam_type b on a.exam_type=b.EXAM_KIND_ID \" \\\n \" where mes_StudentID= \" + data + \" and mes_sub_id !='' and mes_Z_Score!='' \"\n cur2 = g.db.execute(sql2)\n entries2 = [dict(exam_numname=row[0]) for row in cur2.fetchall()]\n d = {\"data\": entries, \"examType\": entries1, \"examTime\": entries2}\n return jsonify(d)\n return jsonify('fail')\n\n\nif __name__ == '__main__':\n app.run()\n"
}
] | 3 |
lacion/FFXIV-Log-Parser | https://github.com/lacion/FFXIV-Log-Parser | 5fff1436627c6d7e4ac4230987cea30c3849d0e2 | 57f66c0109947586577144b8a829fe5e45b78227 | 82b358ace3b1fc648e0808feb4f6739e9b2d3a1b | refs/heads/master | 2017-10-04T08:28:15.103107 | 2011-05-20T21:47:16 | 2011-05-20T21:47:16 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6031249761581421,
"alphanum_fraction": 0.621874988079071,
"avg_line_length": 23.69230842590332,
"blob_id": "facf9dba01300d69f527b36de0b42d667beab23f",
"content_id": "0ff0d89b3ecc70b113cc6cd58e996fbd6a76aa07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 320,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 13,
"path": "/setup.py",
"repo_name": "lacion/FFXIV-Log-Parser",
"src_encoding": "UTF-8",
"text": "import sys\n\nfrom cx_Freeze import setup, Executable\n\nbase = None\nif sys.platform == \"win32\":\n base = \"Win32GUI\"\n\nsetup(\n name = \"FFXIVLogParser\",\n version = \"3.0\",\n description = \"FFXIV Log Parser\",\n executables = [Executable(\"logparse.py\", base = base, compress = True, icon=\"icon.ico\")])"
},
{
"alpha_fraction": 0.739409327507019,
"alphanum_fraction": 0.7471556663513184,
"avg_line_length": 60.656715393066406,
"blob_id": "359e23e1544d2c5aa5a2b9564d5299e792852fdc",
"content_id": "cfdeb0b55e46e8c28037dba7aa29052a1624a55d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8262,
"license_type": "no_license",
"max_line_length": 532,
"num_lines": 134,
"path": "/README.markdown",
"repo_name": "lacion/FFXIV-Log-Parser",
"src_encoding": "UTF-8",
"text": "# Log Parser Basics #\n\nThe logs that FFXIV produces have some issues that prevent deep statistics. A few notes on this:\n\nThere is no date/time for each log entry. Without this basic information it is not possible to gather information such as DPS.\nThere is no unique ID for each monster being attacked. This proves quite troublesome if you are attacking multiple monsters in the same group at the same time. This often shows up when fighting during Behest battles because each group has multiple of the same creature. This doesn't allow clean differentiation between one kill and the next. Because of this there will sometimes be missing or incomplete information. The log parser does what it can to extract the information about these but will often leave areas empty or missing.\nThe log format does not include damage due to damage over time. Because of this you will not see the total damage being done to you or the monster if a DOT is used.\nThe data gathered by the log viewer is reduced to just a small fraction of the overall log. An example output of a log entry being sent would be:\n\nThe website to browse your log data can be found at: <http://www.ffxivbattle.com/>\n\n### Example Data ###\n\n [{\n \"monster\": \"curious galago\",\n \"othermiss\": [[\"That Char\", \"Heavy Strike\"], [\"Some Other\", \"Light Shot\"], \n \"otherhitdamage\": [[\"51\", 0, \"Bleat\", \"Some Other\"]], \n \"othermonstermiss\": 0, \n \"damage\": [[\"219\", 0, \"Light Shot\"], [\"205\", 0, \"Light Shot\"]], \n \"datetime\": \"02/21/11 02:36:56\", \n \"skillpoints\": 331, \n \"exp\": 551, \n \"hitdamage\": [[\"423\", 0, \"Head Butt\"]], \n \"monstermiss\": 0, \n \"miss\": 0, \n \"class\": \"archery\", \n \"otherdamage\": [[\"202\", 0, \"Aura Pulse\", \"Some Other\"], [\"21\", 1, \"Light Slash\", \"That Char\"]]\n }]\n\nBelow we discuss what each of these are and how they are used.\n\n* monster - Name of the monster being attacked.\n* othermiss - Other characters in the party that have missed and the type of attack.\n* otherhitdamage - Attacks on others in your party, damage, critical, type of attack, party member.\n* damage - An array of damage delt by you, Amount of damage, critical, type of attack.\n* datetime - This is the date/time the data was gathered not when it happened. Since the log doesn't contain this we use it mostly for reference.\n* skillpoints - The total number of skill points gained from killing this monster.\n* exp - The total number of experience points gained from killing this monster.\n* hitdamage - Array of hits taken by you, damage taken, critical, type of attack by the monster.\n* miss - the total number of times your character missed when attacking.\n* class - the class your character was using when fighting this monster.\n* otherdamage - An array of damage delt by others in the party, Amount of damage, critical, type of attack, party member.\n\nThe damage is sent as individual values so we can calculate the average for each type of hit(critical or not) \nand the overall. This is also true for the hitdamage which is used to calculate the average as well as totals \nfor each fight. The miss number is used to determine the % accuracy of your hits based on the total number of \nhits you took.\n\n# What Happens to my Data? #\n\nThe log data that is uploaded gets added to a database of all users. This is then used to display the \ninformation listed on the Character Battle Stats page. We do not show the actual user data that is \nbeing uploaded and it is not a requirement that you upload the data under your own character name. \nFor consistency you should always use the same character name when uploading your data so we can prune \nand keep out duplicates.\n\nWith the addition of a password the party data is sent as well. The names on the website will only be shown if\nyou log in using character name and password. Otherwise it will show greek gods and titans in place of the names.\n\n# How the Script Works #\n\nThe script parses the logs one entry at a time and determines a code. This code tells you what type of data \nit is reading and then it parses the log text for the details such as damage, monster names and the like. \nThe script has two modes. The windows mode is very simple and it will silently gather data in the background \nevery minute and determine if there is anything new to upload. If it finds new data it will upload it and \nstart from there.\n\nYou can also run it from the command line to have a more detailed view of what the log parser is doing. In \nthis mode it will also let you see more information than the windowed version. You can parse out chat logs, \nbattle information and filter on specific monsters. It will also ask if you want to upload the information. \nIf you do not want to and just wish to view the data you can tell it that you do not want to upload and it \nwill spit out the raw data to your screen.\n\n### Command Line Parameters ###\n\n Usage:\n CharacterName PathToLogFiles LogDataType RunForever[True/False] FilterByMonster[optional]\n Example: \n python logparse.py \"c:\\Users\\\\Documents\\My Games\\Final Fantasy XIV\\user\\\\log\\\" battle false\n Available LogDataTypes:\n battle - view battle logs.\n chat - all chat logs.\n linkshell - linkshell chat logs.\n say - say chat logs.\n party - Party chat logs.\n\nExamples of FilterByMonster: \"ice elemental\" \"fat dodo\" \"warf rat\"\nif you are running the executable version an example would be:\n\nlogparse.exe \"c:\\Users\\\\Documents\\My Games\\Final Fantasy XIV\\user\\\\log\\\" battle false\n\n# Developers (fellow log parsers) #\n\n## Reading Binary Headers ##\n\nThere are a number of things this log parser does that handles just about every type of entry that the logs\noutput. Changes in the 3.2 version of the log parser introduced parsing of the binary header data to get the \noffsets. This offers a dramatic improvement on the quality of the output since it always knows the length of\nthe lines being parsed. The important part of this is the actual reading of the header:\n\n logfile = open(logfilename, 'rb')\n # read in the length of this files records\n headerparts = struct.unpack(\"2l\", logfile.read(8))\n headerlen = headerparts[1] - headerparts[0]\n header = struct.unpack(str(headerlen)+\"l\", logfile.read(headerlen*4))\n # header * 4 bytes for each and another 8 bytes for the header size\n offset = headerlen*4+8\n for headerpos in range(len(header)):\n if headerpos == 0:\n startbyte = offset\n endbyte = header[headerpos]\n else:\n startbyte = offset + header[headerpos-1]\n endbyte = header[headerpos] - header[headerpos-1]\n logfile.seek(startbyte)\n logitem = logfile.read(endbyte)[2:]\n\nUsing the struct import makes this a quick process to read the headers and loop through each log entry. Once\nit has been read it passes it to all available language parsers to interpret since there isn't a specific language\ndefined when the app starts up it has to assume every line is in any possible language. From here it hits the\nfunction_map which takes the code and maps it to a function call. Several of the function calls are ignored but all\nare defined so if someone wants to know what each type of log entry does this is a great starting point. Eventually\nall of the log types will be handled but for the statistics I am gathering this is good enough for now.\n\n## Auto-Translate ##\n\nThe auto-translate functionality attempts to convert the binary values in the logs for auto-translate into actual \ntext values. In English this is 99% complete but needs to be moved to a binary file to make for a smaller parser\nand to improve the parsing times. It still is quite fast even with the conversion of the binary values and checking\nbut could be much better. The goal is to eventually have a reference for every language and output auto-translate \nvalues but this is a labor intensive process. To do the conversion I type in game the auto-translate value in chat\nparse that log line along with the value displayed in chat. Then I add the binary value starting with 0x022E along\nwith the actual text into an array. It would be much better to be able to parse out the values from a resource\nfile in game but I haven't found where they store these translations so for now it is a manual process.\n"
},
{
"alpha_fraction": 0.545894980430603,
"alphanum_fraction": 0.5606537461280823,
"avg_line_length": 44.36906433105469,
"blob_id": "9d45b475c47b483105694277c86de8e95d2d8f43",
"content_id": "0bb320e8d541a39b5d7c6cbd044cefa1764309cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 137762,
"license_type": "no_license",
"max_line_length": 716,
"num_lines": 2948,
"path": "/logparse.py",
"repo_name": "lacion/FFXIV-Log-Parser",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# encoding: utf-8\n# -*- coding: utf-8 -*-\n\n'''\n\nCopyright (C) 2010-2011 FFXIVBattle.com\nAll rights reserved.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and \nassociated documentation files (the \"Log Parser\"), to deal in the Software without restriction, \nincluding without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, \nand/or sell copies of the Software, and to permit persons to whom the Software is furnished to do \nso, subject to the following conditions:\n\n1. Redistributions of source code must retain the above copyright notice, this list of conditions, \nand the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions \nand the following disclaimer in the documentation and/or other materials provided with the distribution, \nand in the same place and form as other copyright, license and disclaimer information.\n\n3. The end-user documentation included with the redistribution, if any, must include the following acknowledgment: \n\n\"This product includes software developed by FFXIVBattle.com (http://www.ffxivbattle.com/) and its contributors\", \n\nin the same place and form as other third-party acknowledgments. Alternately, this acknowledgment may appear in \nthe software itself, in the same form and location as other such third-party acknowledgments.\n\n4. Except as contained in this notice, the name of FFXIVBattle.com shall not be used in advertising or otherwise \nto promote the sale, use or other dealings in this Software without prior written authorization from FFXIVBattle.com.\n\nTHIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, \nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT \nSHALL FFXIVBATTLE.COM OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, \nOR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS \nOF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER \nIN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE \nUSE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n'''\n\nimport traceback\n\nimport hashlib\nimport ConfigParser\nimport wx\nimport wx.richtext\nfrom threading import Thread \nimport pickle\nimport datetime\nimport gzip\nimport sys\nimport os\nimport glob\nimport copy\nimport time\nimport json\nimport urllib\nimport urllib2\nimport uuid\nimport shutil\nimport array\nimport struct\n\nfrom subprocess import Popen\nimport wx.lib.agw.hyperlink as hl\n\n# for installations that already have a config move the file to the config directory\ntry:\n if os.path.exists('logparser.cfg'):\n if os.path.exists('config/'):\n shutil.move('logparser.cfg', 'config/logparser.cfg')\nexcept:\n pass\n\n# check to see if the config subdir exists and if the root logparser.cfg does\n# not exist. This is so when there is a problem moving the existing config it\n# won't skip it.\nif os.path.exists('config/') and not os.path.exists('logparser.cfg'):\n configfile = 'config/logparser.cfg'\nelse:\n configfile = 'logparser.cfg'\n\nversion = 4.8\ncharactername = \"\"\ndoloop = 0\napp = None\nautotranslatearray = None\ncurrentlanguage = 'en'\n\n# Store the last log parsed\nlastlogparsed = 0\n\nguithread = None\n\ndef nullstrip(s):\n # Return a string truncated at the first null character.\n try:\n s = s[:s.index('\\x00')]\n except ValueError: # No nulls were found, which is okay.\n pass\n return s\n\nclass PasswordDialog(wx.Dialog):\n def __init__(self, parent, id, title, defaultPassword, language):\n wx.Dialog.__init__(self, parent, id, title, size=(285, 160))\n\n if language == 'en':\n wx.StaticText(self, -1, 'Enter Character Password (NOT your ffxiv password)\\r\\n* This is so only you can submit records for your character. If you don\\'t have a password type in a new one to set it.', (5,3), (280, 60))\n self.password = wx.TextCtrl(self, -1, defaultPassword, (5,65), (260, 22), style=wx.TE_PASSWORD)\n self.checkbox = wx.CheckBox(self, -1, \"Save Password\", (5,95), (110, 22))\n \n wx.Button(self, wx.ID_OK, 'Ok', (115, 95), (70, 30))\n wx.Button(self, wx.ID_CANCEL, 'Cancel', (195, 95), (70, 30))\n else:\n wx.StaticText(self, -1, u'文字パスワード(はなく、あなたのFF14パスワード)を入力してください\\r\\n* このためだけでなく、あなたの文字の記録を提出することができますです。あなたはそれを設定するための新しいいずれかのパスワードタイプを持っていない場合。', (5,3), (280, 60))\n self.password = wx.TextCtrl(self, -1, defaultPassword, (5,65), (260, 22), style=wx.TE_PASSWORD)\n self.checkbox = wx.CheckBox(self, -1, u\"パスワードを保存\", (5,95), (110, 22))\n \n wx.Button(self, wx.ID_OK, u'はい', (115, 95), (70, 30))\n wx.Button(self, wx.ID_CANCEL, u'キャンセル', (195, 95), (70, 30))\n\n def SetChecked(self, value):\n self.checkbox.SetValue(value)\n\n def SetValue(self, value):\n self.password.SetValue(value)\n\n def GetChecked(self):\n return self.checkbox.GetValue()\n\n def GetValue(self):\n return self.password.GetValue()\n\nclass ChangeCharacterNameDialog(wx.Dialog):\n def __init__(self, parent, id, title, language):\n wx.Dialog.__init__(self, parent, id, title, size=(320, 200))\n if language == 'en':\n wx.StaticText(self, -1, 'Enter new character name:', (5,3), (305, 15))\n else:\n wx.StaticText(self, -1, u'新キャラクターの名前を入力してください:', (5,3), (305, 15))\n \n self.newcharactername = wx.TextCtrl(self, -1, \"\", (5,20), (300, 22))\n if language == 'en':\n wx.StaticText(self, -1, 'Enter current password:', (5,47), (300, 15))\n else:\n wx.StaticText(self, -1, u'現在のパスワードを入力してください:', (5,47), (300, 15))\n self.password = wx.TextCtrl(self, -1, \"\", (5,65), (300, 22), style=wx.TE_PASSWORD)\n if language == 'en':\n wx.StaticText(self, -1, 'This may take up to 1 hour to appear on the website.\\nChanging your character name can only be performed once an hour so choose wisely.', (5,90), (305, 40))\n else:\n wx.StaticText(self, -1, u'これは、ウェブサイト上で表示されるように1時間かかることがあります。\\n時間はとても賢明な選択一度文字の名前を変更するだけで行うことができます。', (5,90), (305, 40))\n\n if language == 'en':\n wx.Button(self, wx.ID_OK, 'Ok', (158, 135), (70, 30))\n wx.Button(self, wx.ID_CANCEL, 'Cancel', (235, 135), (70, 30))\n else:\n wx.Button(self, wx.ID_OK, u'はい', (158, 135), (70, 30))\n wx.Button(self, wx.ID_CANCEL, u'キャンセル', (235, 135), (70, 30))\n\n def GetNewCharacterName(self):\n return self.newcharactername.GetValue()\n\n def GetPassword(self):\n return self.password.GetValue()\n\nclass ReverseIterator:\n def __init__(self, sequence):\n self.sequence = sequence\n def __iter__(self):\n length = len(self.sequence)\n i = length\n while i > 0:\n i = i - 1\n yield self.sequence[i]\n\nclass LogWindowContext(wx.Menu):\n def __init__(self, chatviewer, language):\n wx.Menu.__init__(self)\n self.chatviewer = chatviewer\n if language == 'en':\n copy = self.Append(wx.ID_COPY, 'Copy' ) \n else:\n copy = self.Append(wx.ID_COPY, u'コピー' ) \n self.AppendSeparator()\n if language == 'en':\n selectall = self.Append(wx.ID_SELECTALL, 'Select All' )\n else:\n selectall = self.Append(wx.ID_SELECTALL, u'すべて選択' ) \n copy.Enable(True)\n selectall.Enable(True)\n \n self.Bind(wx.EVT_MENU, self.ExecEvent)\n\n def ExecEvent(self, event):\n if event.GetId() == wx.ID_COPY:\n clipdata = wx.TextDataObject()\n clipdata.SetText(self.chatviewer.logWindow.GetStringSelection())\n wx.TheClipboard.Open()\n wx.TheClipboard.SetData(clipdata)\n wx.TheClipboard.Close()\n elif event.GetId() == wx.ID_SELECTALL:\n self.chatviewer.logWindow.SelectAll()\n\nclass ChatViewer(wx.Frame):\n\n def __init__(self, language):\n if language == 'en':\n wx.Frame.__init__(self, wx.GetApp().TopWindow, title='Chat Viewer', size=(500,400))\n else:\n wx.Frame.__init__(self, wx.GetApp().TopWindow, title=u'チャットビューア', size=(500,400))\n self.language = language\n # this is cleanup from an earlier version. It will be removed after a few versions go by.\n if os.path.exists(os.path.join('chatlogs', '--Everything--.chat')):\n os.remove(os.path.join('chatlogs', '--Everything--.chat'))\n self.currdates = []\n self.chat_types = {\n '01': self.WriteSay, # say\n '02': self.WriteShout, # shout\n '03': self.WriteTell, # sending tell\n '04': self.WriteParty, # party\n '05': self.WriteLinkshell, # linkshell\n '06': self.WriteLinkshell, # linkshell\n '07': self.WriteLinkshell, # linkshell\n '08': self.WriteLinkshell, # linkshell\n '09': self.WriteLinkshell, # linkshell\n '0A': self.WriteLinkshell, # linkshell\n '0B': self.WriteLinkshell, # linkshell\n '0C': self.WriteLinkshell, # linkshell\n '0D': self.WriteTell, # get tell\n '0F': self.WriteLinkshell, # linkshell\n '0E': self.WriteLinkshell, # linkshell\n '0F': self.WriteLinkshell, # linkshell\n '10': self.WriteLinkshell, # linkshell\n '11': self.WriteLinkshell, # linkshell\n '12': self.WriteLinkshell, # linkshell\n '13': self.WriteLinkshell, # linkshell\n '14': self.WriteLinkshell, # linkshell\n '15': self.WriteLinkshell, # linkshell\n '19': self.WriteEmote, # other emote\n '1B': self.WriteEmote # emote\n }\n\n self.SetBackgroundColour((240,240,240))\n try:\n self.SetIcon(wx.Icon(\"icon.ico\", wx.BITMAP_TYPE_ICO))\n except Exception as e:\n print e\n self.Bind(wx.EVT_CLOSE, self.OnClose)\n self.sb = self.CreateStatusBar() # A Statusbar in the bottom of the window\n panel = wx.Panel(self, -1)\n if self.language == 'en':\n static = wx.StaticText(panel, -1, 'Select a date/time to load the chat data.', (5,12), (210, 15))\n wx.StaticText(panel, -1, 'Search', (220,12), (35, 15))\n else:\n static = wx.StaticText(panel, -1, u'選択して日付と時刻は、チャットデータをロードする.', (5,12), (210, 15))\n wx.StaticText(panel, -1, u'検索', (220,12), (35, 15))\n self.loadingMsg = wx.StaticText(panel, -1, '', (390,12), (30, 15))\n self.searchbox = wx.TextCtrl(panel, -1, pos=(260, 9), size=(120, 19), style=wx.TE_PROCESS_ENTER)\n self.searchbox.Bind(wx.EVT_TEXT_ENTER, self.DoSearch)\n self.datelist = wx.ListBox(panel, -1, pos=(0, 40), size=(140, 300))\n self.Bind(wx.EVT_LISTBOX, self.OnDateSelected, self.datelist)\n self.logWindow = wx.richtext.RichTextCtrl(panel, -1, pos=(132,40), size=(250, 300), style=wx.TE_READONLY | wx.EXPAND | wx.TE_MULTILINE)\n self.logWindow.Bind(wx.EVT_RIGHT_DOWN, self.CustomMenu)\n self.logWindow.SetBackgroundColour((243, 246, 237))\n self.LoadDates()\n self.Bind(wx.EVT_SIZE, self.OnSize)\n\n def CustomMenu(self, event):\n pos = (event.GetPosition()[0]+self.logWindow.GetPosition()[0], event.GetPosition()[1]+self.logWindow.GetPosition()[1])\n self.PopupMenu(LogWindowContext(self), pos, self.language)\n\n def DoSearch(self, event):\n if self.language == 'en':\n self.loadingMsg.SetLabel(\"Searching...\")\n else:\n self.loadingMsg.SetLabel(u\"検索...\")\n self.logWindow.Clear()\n self.datelist.SetSelection(-1)\n searchval = self.searchbox.GetValue().lower()\n idx = 0.0\n ttllen = self.datelist.GetCount()\n for index in ReverseIterator(range(ttllen - 1)):\n idx = idx + 1\n if self.language == 'en':\n self.loadingMsg.SetLabel(\"Searching... %i%%\" % ((idx / ttllen) * 100.0))\n else:\n self.loadingMsg.SetLabel(u\"検索... %i%%\" % ((idx / ttllen) * 100.0))\n app.Yield()\n filename = os.path.join('chatlogs', self.datelist.GetString(index + 1) + '.chat')\n filesize = os.path.getsize(filename)\n with open(filename, 'rb') as f:\n chatdata = pickle.load(f)\n for chatitem in chatdata:\n if (chatitem[1].lower().find(searchval) > -1) or (chatitem[2].lower().find(searchval) > -1):\n self.WriteDataToDisplay(chatitem, singlevalue=True)\n self.loadingMsg.SetLabel(\"\")\n\n def WriteDataToDisplay(self, chatdata, singlevalue=False):\n self.logWindow.BeginSuppressUndo()\n if singlevalue:\n try:\n try:\n self.chat_types[chatdata[0]](chatdata[1], chatdata[2])\n self.logWindow.ShowPosition(self.logWindow.GetLastPosition())\n except KeyError as e:\n self.WriteSay(chatdata[1], chatdata[2])\n except:\n pass \n else:\n for chatitem in chatdata:\n try:\n try:\n self.chat_types[chatitem[0]](chatitem[1], chatitem[2])\n self.logWindow.ShowPosition(self.logWindow.GetLastPosition())\n except KeyError as e:\n self.WriteSay(chatitem[1], chatitem[2])\n except:\n pass\n self.logWindow.EndSuppressUndo()\n\n def RefreshDisplay(self):\n #self.LoadDates()\n #self.DoDateLoad(self.datelist.GetString(self.datelist.GetSelection()))\n pass\n\n def OnDateSelected(self, event):\n self.DoDateLoad(event.GetString())\n \n def DoDateLoad(self, datestring):\n global app\n if self.language == 'en':\n self.loadingMsg.SetLabel(\"Loading...\")\n else:\n self.loadingMsg.SetLabel(u\"ロード...\")\n self.logWindow.Clear()\n #self.logWindow.Freeze()\n\n if datestring != \"-- Last 20 Logs --\" and datestring != u'-- 20最後にログ --':\n filename = os.path.join('chatlogs', datestring + '.chat')\n filesize = os.path.getsize(filename)\n with open(filename, 'rb') as f:\n chatdata = pickle.load(f)\n self.WriteDataToDisplay(chatdata)\n else:\n idx = 0.0\n ttllen = self.datelist.GetCount()\n if ttllen > 20:\n ttllen = 20\n for index in ReverseIterator(range(ttllen - 1)):\n idx = idx + 1\n if self.language == 'en':\n self.loadingMsg.SetLabel(\"Loading... %i%%\" % ((idx / ttllen) * 100.0))\n else:\n self.loadingMsg.SetLabel(u\"ロード... %i%%\" % ((idx / ttllen) * 100.0))\n app.Yield()\n filename = os.path.join('chatlogs', self.datelist.GetString(index + 1) + '.chat')\n filesize = os.path.getsize(filename)\n with open(filename, 'rb') as f:\n chatdata = pickle.load(f)\n self.WriteDataToDisplay(chatdata)\n\n #self.logWindow.Thaw()\n self.loadingMsg.SetLabel(\"\")\n \n def OnSize(self, event):\n event.Skip() \n size = event.GetSize()\n self.logWindow.SetSize((size[0] - 150, size[1] - 102))\n self.datelist.SetSize((130, size[1] - 102))\n \n def LoadDates(self):\n # This loads all date files into the listbox.\n chatdates = [(os.stat(i).st_mtime, i) for i in glob.glob(os.path.join('chatlogs', '*.chat'))]\n chatdates.sort(reverse=True)\n if len(self.currdates) == 0:\n if self.language == 'en':\n self.datelist.Append(\"-- Last 20 Logs --\")\n else:\n self.datelist.Append(u\"-- 20最後にログ --\")\n for date in chatdates:\n filename = os.path.basename(os.path.splitext(date[1])[0])\n self.currdates.append(filename)\n self.datelist.Append(filename)\n else:\n diff = set(chatdates).difference( set(self.currdates) )\n for date in diff:\n filename = os.path.basename(os.path.splitext(date[1])[0])\n self.currdates.append(filename)\n self.datelist.Append(filename)\n\n\n def WriteEmote(self, charname, text):\n self.logWindow.BeginTextColour((80, 50, 50))\n self.logWindow.WriteText(\"%s\\r\" % (text))\n self.logWindow.EndTextColour()\n\n def WriteParty(self, charname, text):\n self.logWindow.BeginTextColour((70, 70, 170))\n self.logWindow.WriteText(\"<%s> %s\\r\" % (charname, text))\n self.logWindow.EndTextColour()\n\n def WriteTell(self, charname, text):\n self.logWindow.BeginTextColour((190, 70, 70))\n if self.language == 'en':\n self.logWindow.WriteText(\"%s whispers %s\\r\" % (charname, text))\n else:\n self.logWindow.WriteText(u\"%s >> %s\\r\" % (charname, text))\n self.logWindow.EndTextColour()\n\n def WriteShout(self, charname, text):\n self.logWindow.BeginTextColour((140, 50, 50))\n if self.language == 'en':\n self.logWindow.WriteText(\"%s shouts %s\\r\" % (charname, text))\n else:\n self.logWindow.WriteText(u\"%s コメント %s\\r\" % (charname, text))\n self.logWindow.EndTextColour()\n \n def WriteLinkshell(self, charname, text):\n self.logWindow.BeginTextColour((50, 140, 50))\n self.logWindow.BeginBold()\n self.logWindow.WriteText(\"<\" + charname + \"> \")\n self.logWindow.EndBold()\n self.logWindow.WriteText(text + \"\\r\")\n self.logWindow.EndTextColour()\n\n def WriteSay(self, charname, text):\n if self.language == 'en':\n self.logWindow.WriteText(\"%s says %s\\r\" % (charname, text))\n else:\n self.logWindow.WriteText(u\"%s 言う %s\\r\" % (charname, text))\n \n\n def OnClose(self, e):\n self.Destroy();\n\nclass MainFrame(wx.Frame):\n def SaveLanguageSetting(self, lang):\n global configfile, currentlanguage\n config = ConfigParser.ConfigParser()\n try:\n config.add_section('Config')\n except ConfigParser.DuplicateSectionError:\n pass\n config.read(configfile)\n self.language = lang\n currentlanguage = lang\n\n config.set('Config', 'language', lang)\n with open(configfile, 'wb') as openconfigfile:\n config.write(openconfigfile)\n\n def SetEnglish(self, event):\n self.SetTitle(\"FFXIV Log Parser\")\n self.filemenu.SetLabel(1, \"&Start\")\n self.filemenu.SetHelpString(1, \" Start Processing Logs\")\n #self.filemenu.SetLabel(4, \"&Parse All Logs\")\n #self.filemenu.SetHelpString(4, \" Start Processing All Logs\")\n self.filemenu.SetLabel(wx.ID_ABOUT, \"&About\")\n self.filemenu.SetHelpString(wx.ID_ABOUT, \" Information about this program\")\n self.filemenu.SetLabel(2, \"&Check for New Version\")\n self.filemenu.SetHelpString(2, \" Check for an update to the program\")\n self.filemenu.SetLabel(wx.ID_EXIT, \"E&xit\")\n self.filemenu.SetHelpString(wx.ID_EXIT, \" Terminate the program\")\n self.chatmenu.SetLabel(13, 'Chat &Viewer')\n self.chatmenu.SetHelpString(13, \"Opens the chat viewer window.\")\n\n self.menuBar.SetLabelTop(0, \"&File\")\n self.menuBar.SetLabelTop(1, \"&Language\")\n self.menuBar.SetLabelTop(2, \"&Chat\")\n self.st.SetLabel(\"Select Log Path\")\n self.st2.SetLabel(\"Enter Your Character Name (default is unique id to hide your name)\")\n if self.btnCharChange:\n self.btnCharChange.SetLabel(\"Change\")\n self.btnStart.SetLabel(\"Start\")\n self.lblLogWindow.SetLabel(\"Activity Log\")\n self.charlink.SetLabel(\"test\")\n self.charlink.SetLabel(\"FFXIVBattle.com Character Page\")\n self.charlink.SetURL(\"http://ffxivbattle.com/character.php?charactername=%s&lang=en\" % (self.charname.GetValue()))\n self.SaveLanguageSetting('en')\n\n def SetJapanese(self, event):\n self.SetTitle(u\"FFXIVのログパーサー\")\n self.filemenu.SetLabel(1, u\"開始\")\n self.filemenu.SetHelpString(1, u\"スタート処理のログ\")\n #self.filemenu.SetLabel(4, u\"再解析のログ\")\n #self.filemenu.SetHelpString(4, u\" 再解析のログ\")\n self.filemenu.SetLabel(wx.ID_ABOUT, u\"について\")\n self.filemenu.SetHelpString(wx.ID_ABOUT, u\"このプログラムについての情報\")\n self.filemenu.SetLabel(2, u\"新しいバージョンの確認\")\n self.filemenu.SetHelpString(2, u\"プログラムの更新をチェックする\")\n self.filemenu.SetLabel(wx.ID_EXIT, u\"終了\")\n self.filemenu.SetHelpString(wx.ID_EXIT, u\"終了プログラム\")\n self.chatmenu.SetLabel(13, u'チャットビューア')\n self.chatmenu.SetHelpString(13, u\"が表示されますビューアチャットウィンドウ。\")\n\n self.menuBar.SetLabelTop(0, u\"ファイル\")\n self.menuBar.SetLabelTop(1, u\"言語\")\n self.menuBar.SetLabelTop(2, u\"チャット\")\n self.st.SetLabel(u\"選択してログのパス\")\n self.st2.SetLabel(u\"文字型の名前 (デフォルトでは、名前を非表示にする一意のIDです)\")\n if self.btnCharChange:\n self.btnCharChange.SetLabel(u\"変更\")\n self.btnStart.SetLabel(u\"開始\")\n self.lblLogWindow.SetLabel(u\"アクティビティログ\")\n self.charlink.SetLabel(u\"FFXIVBattle.com文字ページ\")\n self.charlink.SetURL(\"http://ffxivbattle.com/character.php?charactername=%s&lang=jp\" % (self.charname.GetValue()))\n self.SaveLanguageSetting('jp')\n\n def OpenChatViewer(self, event):\n self.chatviewer = ChatViewer(self.language)\n self.chatviewer.Show()\n \n def __init__(self, parent, title):\n global configfile, autotranslatearray, currentlanguage\n wx.Frame.__init__(self, parent, title=title, size=(400,314))\n try:\n self.SetIcon(wx.Icon(\"icon.ico\", wx.BITMAP_TYPE_ICO))\n except Exception as e:\n print e\n self.Bind(wx.EVT_CLOSE, self.OnClose)\n self.sb = self.CreateStatusBar() # A Statusbar in the bottom of the window\n self.salt = None\n # Setting up the menu.\n self.filemenu= wx.Menu()\n\n # wx.ID_ABOUT and wx.ID_EXIT are standard IDs provided by wxWidgets.\n self.filemenu.Append(1, \"&Start\",\" Start Processing Logs\")\n #self.filemenu.Append(4, \"&Parse All Logs\",\" Start Processing All Logs\")\n self.filemenu.Append(wx.ID_ABOUT, \"&About\",\" Information about this program\")\n self.filemenu.AppendSeparator()\n self.filemenu.Append(2, \"&Check for New Version\",\" Check for an update to the program\")\n self.filemenu.AppendSeparator()\n self.filemenu.Append(wx.ID_EXIT,\"E&xit\",\" Terminate the program\")\n self.Bind(wx.EVT_MENU, self.OnStartCollectingAll, id=1)\n #self.Bind(wx.EVT_MENU, self.OnStartCollectingAll, id=4)\n self.Bind(wx.EVT_MENU, self.OnCheckVersion, id=2)\n self.Bind(wx.EVT_MENU, self.OnExit, id=wx.ID_EXIT)\n self.Bind(wx.EVT_MENU, self.OnAbout, id=wx.ID_ABOUT)#menuItem)\n\n # Setup language menu\n self.languagemenu= wx.Menu()\n self.englishMenu = self.languagemenu.Append(11, \"&English\",\"Set application to english display\", kind=wx.ITEM_RADIO)\n self.japaneseMenu = self.languagemenu.Append(12, u\"日本語\", u\"日本語表示\", kind=wx.ITEM_RADIO)\n self.Bind(wx.EVT_MENU, self.SetEnglish, id=11)\n self.Bind(wx.EVT_MENU, self.SetJapanese, id=12)\n\n # Setup chat menu\n self.chatmenu= wx.Menu()\n self.chatviewerMenu = self.chatmenu.Append(13, \"Chat &Viewer\",\"Opens the chat viewer window\")\n self.Bind(wx.EVT_MENU, self.OpenChatViewer, id=13)\n\n # Creating the menubar.\n self.menuBar = wx.MenuBar()\n self.menuBar.Append(self.filemenu,\"&File\") # Adding the \"filemenu\" to the MenuBar\n self.menuBar.Append(self.languagemenu,\"&Language\") # Adding the \"filemenu\" to the MenuBar\n self.menuBar.Append(self.chatmenu,\"&Chat\") # Adding the \"filemenu\" to the MenuBar\n self.SetMenuBar(self.menuBar) # Adding the MenuBar to the Frame content.\n\n panel = wx.Panel(self, -1)\n logpath = \"\"\n charactername = hex(uuid.getnode())\n charinconfig = False\n # read defaults\n config = ConfigParser.ConfigParser()\n try:\n config.read(configfile)\n logpath = config.get('Config', 'logpath')\n charactername = config.get('Config', 'charactername')\n charinconfig = True\n except:\n logpath = \"\"\n pass\n if logpath == \"\":\n userdir = os.path.expanduser('~')\n logpath = os.path.join(userdir, \"Documents\\\\My Games\\\\FINAL FANTASY XIV\\\\user\\\\\") \n userdirs = os.listdir(logpath)\n newestdate = None\n try:\n for dir in userdirs:\n l = [(os.stat(i).st_mtime, i) for i in glob.glob(os.path.join(logpath, dir, 'log', '*.log'))]\n l.sort()\n if len(l) > 0:\n if newestdate != None:\n if l[0][0] > newestdate:\n newestdate = l[0][0];\n logpath = os.path.join(logpath, dir, 'log')\n else:\n newestdate = l[0][0];\n logpath = os.path.join(logpath, dir, 'log')\n except:\n logpath = os.path.join(userdir, \"Documents\\\\My Games\\\\FINAL FANTASY XIV\\\\user\\\\\")\n self.st = wx.StaticText(panel, -1, 'Select Log Path', (5,3))\n self.control = wx.TextCtrl(panel, -1, logpath, (5,21), (345, 22))\n self.btnDialog = wx.Button(panel, 102, \"...\", (350,20), (28, 24))\n self.Bind(wx.EVT_BUTTON, self.OnLogSelect, id=102)\n if charinconfig:\n self.st2 = wx.StaticText(panel, -1, 'Enter Your Character Name (default is unique id to hide your name)', (5,53))\n self.charname = wx.TextCtrl(panel, -1, charactername, (5,70), (310, 22))\n self.charname.Disable()\n self.btnCharChange = wx.Button(panel, 150, \"Change\", (320,69), (55, 24))\n self.Bind(wx.EVT_BUTTON, self.OnChangeCharacter, id=150)\n else:\n self.btnCharChange = None\n self.st2 = wx.StaticText(panel, -1, 'Enter Your Character Name (default is unique id to hide your name)', (5,53))\n self.charname = wx.TextCtrl(panel, -1, charactername, (5,70), (370, 22))\n \n self.btnStart = wx.Button(panel, 103, \"Start\", (150,100))\n self.Bind(wx.EVT_BUTTON, self.OnStartCollecting, id=103)\n\n self.Bind(wx.EVT_SIZE, self.OnSize)\n \n self.lblLogWindow = wx.StaticText( panel, -1, \"Activity Log\", (5,120))\n self.logWindow = wx.TextCtrl(panel, -1, \"\", (5,136), (370, 80), style=wx.TE_MULTILINE)\n self.logLayout = wx.BoxSizer( wx.HORIZONTAL )\n self.logLayout.Add( self.lblLogWindow, 0, wx.EXPAND )\n self.logLayout.Add( self.logWindow, 1, wx.EXPAND | wx.BOTTOM | wx.RIGHT )\n\n redir=RedirectText(self.logWindow)\n self.charlink = hl.HyperLinkCtrl(panel, -1, \"FFXIVBattle.com Character Page\", (5,216), (22, 80))\n self.charlink.SetURL(\"http://ffxivbattle.com/character.php?charactername=\" + charactername)\n self.language = 'en'\n try:\n configlang = config.get('Config', 'language')\n if configlang == 'jp':\n self.languagemenu.Check(12, True)\n self.SetJapanese(None)\n self.language = 'jp'\n currentlanguage = 'jp'\n except:\n pass\n\n sys.stdout=redir\n self.Show(True)\n '''\n if os.path.exists('autotranslate.gz'):\n print \"Opening autotranslate file...\"\n f = gzip.open('autotranslate.gz', 'rb')\n autotranslatearray = json.loads(f.read())\n f.close()\n print \"Autotranslate loaded.\" \n '''\n\n def OnChangeCharacter(self, event):\n global configfile\n if self.language == 'en':\n changecharnamedlg = ChangeCharacterNameDialog(self, -1, \"Enter New Character Name\", self.language)\n else:\n changecharnamedlg = ChangeCharacterNameDialog(self, -1, u\"新キャラクター名を入力してください\", self.language)\n \n if changecharnamedlg.ShowModal() == wx.ID_OK:\n if not self.salt:\n # extract salt from the dir\n dirparts = self.control.GetValue().split(\"\\\\\")\n # set the salt to the users directory name for the character. Not 100% but good enough to salt with.\n self.salt = \"\"\n if dirparts[len(dirparts)-1] == \"\":\n self.salt = dirparts[len(dirparts) - 3]\n else:\n self.salt = dirparts[len(dirparts) - 2]\n hash = hashlib.md5( self.salt + changecharnamedlg.GetPassword() ).hexdigest()\n results = self.ChangeCharacterName(self.charname.GetValue(), changecharnamedlg.GetNewCharacterName(), hash)\n if results:\n if results[\"code\"] < 0:\n if self.language == 'en':\n dlg = wx.MessageDialog( self, results[\"text\"], \"Error Changing Character Name\", wx.OK)\n else:\n dlg = wx.MessageDialog( self, results[\"text\"], u\"文字の名前の変更中にエラー\", wx.OK)\n dlg.ShowModal() # Show it\n dlg.Destroy() # finally destroy it when finished.\n else:\n self.charname.SetValue(changecharnamedlg.GetNewCharacterName())\n config = ConfigParser.ConfigParser()\n try:\n config.add_section('Config')\n except ConfigParser.DuplicateSectionError:\n pass\n config.read(configfile)\n\n config.set('Config', 'charactername', self.charname.GetValue())\n with open(configfile, 'wb') as openconfigfile:\n config.write(openconfigfile)\n\n\n if self.language == 'en':\n dlg = wx.MessageDialog( self, results[\"text\"], \"Success\", wx.OK)\n else:\n dlg = wx.MessageDialog( self, results[\"text\"], u\"成功\", wx.OK) \n dlg.ShowModal() # Show it\n dlg.Destroy() # finally destroy it when finished.\n else:\n if self.language == 'en':\n dlg = wx.MessageDialog( self, \"Did not understand server response.\", \"Try Again Later\", wx.OK)\n else:\n dlg = wx.MessageDialog( self, u\"サーバの応答を解釈しませんでした。\", u\"てみてください後でもう一度\", wx.OK)\n dlg.ShowModal() # Show it\n dlg.Destroy() # finally destroy it when finished.\n else:\n if self.language == 'en':\n print \"Character name change cancelled.\"\n else:\n print u\"キャラクター名の変更がキャンセルされました。\"\n\n def ChangeCharacterName(self, charactername, newcharactername, hashed_password):\n # call out to verify the password\n response = None\n try:\n encodedname = urllib.urlencode({\"oldcharname\": charactername.encode(\"utf-8\")})\n newencodedname = urllib.urlencode({\"newcharname\": newcharactername.encode(\"utf-8\")})\n response = urllib2.urlopen('http://ffxivbattle.com/updatecharactername.php?%s&%s&password=%s' % (encodedname, newencodedname, hashed_password))\n responsetext = response.read()\n #print responsetext\n return json.loads(responsetext)\n except Exception as e:\n # The result was garbage so skip it.\n print type(e)\n print e\n print \"Did not understand the response from the server for the character name change.\"\n return False\n\n def OnSize(self, event):\n event.Skip() \n size = event.GetSize()\n self.logWindow.SetSize((size[0] - 30, size[1] - 240))\n self.charlink.SetPosition((5, size[1] - 100))\n\n def CheckPassword(self, charactername, salt, hashed_password):\n # call out to verify the password\n response = None\n try:\n encodedname = urllib.urlencode({\"charactername\": charactername.encode(\"utf-8\")})\n response = urllib2.urlopen('http://ffxivbattle.com/passwordcheck.php?%s&salt=%s&password=%s' % (encodedname, salt, hashed_password))\n return json.loads(response.read())[\"result\"] == True\n except Exception, e:\n # The result was garbage so skip it.\n print e\n print \"Did not understand the response from the server for the password check.\"\n return False\n \n def GetPassword(self, config, salt):\n pass_stored = \"\"\n try:\n if self.language == 'en':\n passwordentry = PasswordDialog(self, -1, \"Enter password\", pass_stored, self.language)\n else:\n passwordentry = PasswordDialog(self, -1, u\"パスワードを入力してください\", pass_stored, self.language)\n try:\n pass_stored = config.get('Config', 'password')\n passwordentry.SetChecked(True)\n passwordentry.SetValue(pass_stored)\n except ConfigParser.NoOptionError:\n pass\n if passwordentry.ShowModal() == wx.ID_OK:\n if pass_stored != \"\":\n if pass_stored != passwordentry.GetValue():\n password = passwordentry.GetValue()\n if password != \"\":\n hash = hashlib.md5( salt + password ).hexdigest()\n return hash, passwordentry.GetChecked() \n else:\n return pass_stored, passwordentry.GetChecked()\n else:\n password = passwordentry.GetValue()\n if password != \"\":\n hash = hashlib.md5( salt + password ).hexdigest()\n return hash, passwordentry.GetChecked() \n else:\n return \"\", False\n else:\n return \"\", passwordentry.GetChecked()\n finally:\n passwordentry.Destroy()\n \n def OnIdle( self, evt ):\n if self.process is not None:\n stream = self.process.GetInputStream()\n if stream.CanRead():\n text = stream.read()\n self.logWindow.AppendText( text ) \n\n def OnLogSelect(self, e):\n if self.japaneseMenu.IsChecked():\n dlg = wx.DirDialog(self, u\"ログディレクトリを選択してください:\", self.control.GetValue(), style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON)\n else:\n dlg = wx.DirDialog(self, \"Choose the Log Directory:\", self.control.GetValue(), style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON)\n if dlg.ShowModal() == wx.ID_OK:\n self.control.SetValue(dlg.GetPath())\n dlg.Destroy()\n\n def OnAbout(self,e):\n global version\n \n license = '''Copyright (C) 2010-2011 FFXIVBattle.com All rights reserved.\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Log Parser\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\n1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution, and in the same place and form as other copyright, license and disclaimer information.\n\n3. The end-user documentation included with the redistribution, if any, must include the following acknowledgment: \"This product includes software developed by FFXIVBattle.com (http://www.ffxivbattle.com/) and its contributors\", in the same place and form as other third-party acknowledgments. Alternately, this acknowledgment may appear in the software itself, in the same form and location as other such third-party acknowledgments.\n\n4. Except as contained in this notice, the name of FFXIVBattle.com shall not be used in advertising or otherwise to promote the sale, use or other dealings in this Software without prior written authorization from FFXIVBattle.com.\n\nTHIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL FFXIVBATTLE.COM OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n '''\n info = wx.AboutDialogInfo()\n info.SetName('FFXIV Log Parser')\n info.SetVersion(str(version))\n if self.japaneseMenu.IsChecked():\n info.SetDescription(u\"ファイナルファンタジーXIVのログパーサー。\")\n info.AddTranslator(u'H3lls ([email protected]) ご了承ください、私は翻訳の間違いをしたなら、私に知らせてください。')\n else:\n info.SetDescription(\"A log parser for Final Fantasy XIV.\")\n info.AddTranslator('H3lls ([email protected]) PLEASE let me know if I have made any mistakes in translation.')\n info.SetIcon(wx.Icon('icon.ico',wx.BITMAP_TYPE_ICO))\n info.SetCopyright('(C) 2011 ffxivbattle.com')\n info.SetWebSite('http://www.ffxivbattle.com')\n info.SetLicence(license)\n info.AddDeveloper('H3lls ([email protected])')\n\n wx.AboutBox(info)\n\n def OnCheckVersion(self, e):\n if self.japaneseMenu.IsChecked():\n lang = \"JP\"\n else:\n lang = \"EN\"\n if versioncheck(status=1, language=lang):\n Popen(\"setup.exe\", shell=False) # start reloader\n self.OnExit(None)\n return\n\n def OnStartCollectingAll(self, e):\n global lastlogparsed\n lastlogparsed = 0\n self.OnStartCollecting(e)\n \n def OnStartCollecting(self, e):\n global guithread, configfile\n self.filemenu.Enable(1, False)\n #self.filemenu.Enable(4, False)\n self.btnStart.Disable()\n #try:\n config = ConfigParser.ConfigParser()\n try:\n config.add_section('Config')\n except ConfigParser.DuplicateSectionError:\n pass\n config.read(configfile)\n\n # extract salt from the dir\n dirparts = self.control.GetValue().split(\"\\\\\")\n # set the salt to the users directory name for the character. Not 100% but good enough to salt with.\n self.salt = \"\"\n if dirparts[len(dirparts)-1] == \"\":\n self.salt = dirparts[len(dirparts) - 3]\n else:\n self.salt = dirparts[len(dirparts) - 2]\n password, savepass = self.GetPassword(config, self.salt)\n if self.CheckPassword(self.charname.GetValue(), self.salt, password):\n if savepass:\n config.set('Config', 'password', password)\n else:\n try:\n config.remove_option('Config', 'password')\n except ConfigParser.NoSectionError:\n pass\n else:\n if self.language == 'en':\n dlg = wx.MessageDialog( self, \"The password provided does not match.\", \"Invalid Password\", wx.OK)\n else:\n dlg = wx.MessageDialog( self, u\"提供されたパスワードが一致しません。\", u\"無効なパスワード\", wx.OK)\n dlg.ShowModal() # Show it\n dlg.Destroy() # finally destroy it when finished.\n self.filemenu.Enable(1, True)\n #self.filemenu.Enable(4, True)\n self.btnStart.Enable()\n return\n \n config.set('Config', 'logpath', self.control.GetValue())\n config.set('Config', 'charactername', self.charname.GetValue())\n with open(configfile, 'wb') as openconfigfile:\n config.write(openconfigfile)\n #except (Exception, e):\n # print e\n try:\n self.charlink.SetURL(\"http://ffxivbattle.com/character.php?charactername=\" + self.charname.GetValue())\n guithread.updatevalues(self.control.GetValue(), self.charname.GetValue(), self.OnStatus, completecallback=self.threadcallback, password=password)\n guithread.daemon = False\n guithread.start()\n except:\n pass\n\n def threadcallback(self):\n if self:\n if self.filemenu:\n self.filemenu.Enable(1, True)\n if self.btnStart:\n self.btnStart.Enable()\n \n def OnClose(self, e):\n self.Destroy();\n\n def OnExit(self,e):\n self.Close(True) # Close the frame.\n\n def OnStatus(self, message):\n try:\n self.sb.PushStatusText(message, 0)\n except:\n pass\n\nclass RedirectText(object):\n def __init__(self,aWxTextCtrl):\n self.out=aWxTextCtrl\n\n def write(self,string):\n try:\n self.out.AppendText(string)\n except:\n pass\n\nclass GUIThread(Thread):\n def __init__(self, logpath, charactername, status): \n self.stopped = 1\n self.logpath = logpath\n self.charactername = charactername\n self.status = status\n self.exitready = 0\n Thread.__init__(self) \n\n def updatevalues(self, logpath, charactername, status, completecallback=None, password=\"\"):\n self.stopped = 0\n self.logpath = logpath\n self.charactername = charactername\n self.status = status\n self.completecallback = completecallback\n self.password = password\n \n def exit(self):\n self.stopped = 1\n\n def exitcomplete(self):\n return self.exitready\n\n def is_running(self):\n if self.stopped:\n return 0\n else:\n return 1\n\n def run(self):\n try:\n en_parser = english_parser()\n jp_parser = japanese_parser()\n en_parser.characterdata[\"charactername\"] = self.charactername\n jp_parser.characterdata[\"charactername\"] = self.charactername\n parsers = [en_parser, jp_parser]\n self.exitready = 0\n self.stopped = 0\n prev = []\n while not self.stopped:\n l = [(os.stat(i).st_mtime, i) for i in glob.glob(os.path.join(self.logpath, '*.log'))]\n l.sort()\n diff = set(l).difference( set(prev) )\n if len(diff) > 0:\n self.status(\"Found \" + str(len(l)) + \" new logs.\")\n prev = l\n if len(diff) == len(l):\n files = [i[1] for i in l]\n else:\n files = [i[1] for i in l[len(l)-len(diff):]]\n readLogFile(files, self.charactername, isrunning=self.is_running, password=self.password, parsers=parsers)\n start = datetime.datetime.now()\n self.status(\"Waiting for new log data...\")\n while (datetime.datetime.now() - start).seconds < 5:\n time.sleep(1)\n if self.stopped:\n return\n finally:\n self.exitready = 1\n self.stopped = 1\n if self.completecallback:\n self.completecallback()\n\ndef main():\n #try: \n global doloop, guithread, configfile, lastlogparsed, app, autotranslatearray\n args = sys.argv[1:]\n\n config = ConfigParser.ConfigParser()\n try:\n config.read(configfile)\n lastlogparsed = float(config.get('Config', 'lastlogparsed'))\n except:\n pass\n\n if len(args) < 1:\n try:\n guithread = GUIThread(None, None, None) \n doloop = 1\n app = wx.App()\n configlang = 'en'\n try:\n configlang = config.get('Config', 'language')\n except:\n pass\n if versioncheck(language=configlang):\n Popen(\"setup.exe\", shell=False) # start reloader\n return\n frame = MainFrame(None, \"FFXIV Log Parser\")\n if os.path.exists('autotranslate.gz'):\n print \"Opening autotranslate file...\"\n app.Yield()\n f = gzip.open('autotranslate.gz', 'rb')\n autotranslatearray = json.loads(f.read())\n f.close()\n print \"Autotranslate loaded.\" \n app.MainLoop()\n \n try:\n if guithread:\n guithread.exit()\n except (AttributeError):\n pass\n alivecount = 0\n while 1:\n if guithread:\n if guithread.isAlive():\n time.sleep(1)\n alivecount == alivecount + 1\n if alivecount > 20:\n # Exit anyways the thread is misbehaving\n break\n else:\n break\n else:\n break\n return\n except Exception as e:\n print e\n return\n if args[0] == '?' or args[0] == 'h' or args[0] == '/?' or args[0] == '/h' or args[0] == '/help' or args[0] == 'help' or args[0] == '-h' or args[0] == '-help' or args[0] == '--help' or len(args) < 4:\n print \"\\r\\nUsage: CharacterName password PathToLogFiles RunForever[True/False] FilterByMonster[optional]\"\n print \"Example: python logparse.py mychar mypass \\\"c:\\\\Users\\\\<youruser>\\\\Documents\\\\My Games\\\\Final Fantasy XIV\\\\user\\\\<yourcharid>\\\\log\\\\\\\" true\\r\\n\"\n print \"Examples of FilterByMonster:\\n\\\"ice elemental\\\"\\n\\\"fat dodo\\\"\\n\\\"warf rat\\\"\\n\"\n return\n \n # assign args to nice names\n charactername = args[0]\n password = args[1]\n logpath = args[2]\n logmonsterfilter = None\n if args[3].lower() == \"true\":\n doloop = 1\n if len(args) > 4:\n logmonsterfilter = args[4]\n prev = []\n \n en_parser = english_parser()\n jp_parser = japanese_parser()\n en_parser.characterdata[\"charactername\"] = charactername\n jp_parser.characterdata[\"charactername\"] = charactername\n parsers = [en_parser, jp_parser]\n while 1==1:\n l = [(os.stat(i).st_mtime, i) for i in glob.glob(os.path.join(logpath, '*.log'))]\n l.sort()\n diff = set(l).difference( set(prev) )\n if len(diff) > 0:\n prev = l \n files = [i[1] for i in sorted(diff)]\n try:\n readLogFile(files, charactername, password=password, logmonsterfilter=logmonsterfilter, parsers=parsers)\n except:\n traceback.print_exc()\n \n if not doloop:\n break\n time.sleep(60)\n #except ConfigParser.NoSectionError, e:\n # print \"Program Exception:\"\n # print e\n\"\"\"\n20 = \"ready (inswert combat skill)...\" as well as loot obtain\n42= all SP and EXP gain notices by you \n45= monster defeated message or you defeated\n-46 = crafting success / failure\n50=all my attacks that land \n51= all auto-attacks that mobs land on me, even crits / side attacks \n-52= Hits from left by party member\n-53= mob hits some party member\n54 = monster readying special ability \n55= all friendly AND hostile attacks on/by npc's near me \n56 = all my misses Vs monsters as well as their evades vs me \n57= all misses vs me \n-58= Party member misses\n-59= party member evades mob attack\n005c= so far it shows everytime i drain health with lancer speed surge \n-5E= used cure someone else on someone else\n\n61= players other than me casting heals on themselves/PC's as well as HP absorb messages \n67 = appears to be buffs/debuffs on players that have just been removed \n69= status effects just being inflicted upon me via monsters \n-6C= mob no longer stunned\n6D = status affects being inflicted on monsters near you AND players \n\"\"\"\n\ndef HexToByte( hexStr ):\n bytes = []\n \n hexStr = ''.join( hexStr.split(\" \") )\n \n for i in range(0, len(hexStr), 2):\n bytes.append( chr( int (hexStr[i:i+2], 16 ) ) )\n \n return unicode(''.join( bytes), 'utf-8', errors='ignore')\n\n \ndef ByteToHex( byteStr ):\n return ''.join( [ \"%02X \" % ord( x ) for x in byteStr ] ).strip()\n\n'''\ndefaultmonster = {\"datetime\":\"\", \"monster\":\"\", \"monstermiss\":0, \"othermonstermiss\":0, \"damage\":[], \"miss\":0, \"hitdamage\":[], \"otherdamage\":[], \"othermiss\":[], \"otherhitdamage\":[], \"skillpoints\":0, \"class\":\"\", \"exp\":0}\ndefaultcrafting = {\"datetime\":\"\", \"item\":\"\", \"actions\":[], \"ingredients\":[], \"success\":0, \"skillpoints\":0, \"class\":\"\", \"exp\":0}\ncharacterdata = {\"charactername\":\"\", \"deaths\":[]}\nmonsterdata = []\ncraftingdata = []\ngatheringdata = []\n#uploaddata = []\n'''\n\ndebuglevel = 0\nclass ffxiv_parser:\n def __init__(self, language): \n self.language = language\n self.defaultmonster = {\"datetime\":\"\", \"monster\":\"\", \"monstermiss\":0, \"othermonstermiss\":0, \"damage\":[], \"miss\":0, \"hitdamage\":[], \"otherdamage\":[], \"othermiss\":[], \"spells\":[], \"healing\":[], \"otherhealing\":[], \"otherhitdamage\":[], \"skillpoints\":0, \"class\":\"\", \"exp\":0}\n self.defaultcrafting = {\"datetime\":\"\", \"item\":\"\", \"quantity\":0,\"actions\":[], \"ingredients\":[], \"success\":0, \"skillpoints\":0, \"class\":\"\", \"exp\":0}\n self.characterdata = {\"charactername\":\"\"}\n self.deathsdata = {\"charactername\":\"\", \"deaths\":[]}\n self.monsterdata = []\n self.craftingdata = []\n self.gatheringdata = []\n self.currentmonster = copy.deepcopy(self.defaultmonster)\n self.currentcrafting = copy.deepcopy(self.defaultcrafting)\n self.exptotal = 0\n self.damagepermob = 0\n self.damageavgpermob = 0\n self.craftingcomplete = 0\n self.synthtype = \"\"\n self.progress = []\n self.quality = []\n self.durability = []\n self.defeated = False\n self.expset = False\n self.spset = False\n\n self.function_map = {\n '01': self.parse_chatmessage, # say\n '02': self.parse_chatmessage, # shout\n '03': self.parse_chatmessage, # sending tell\n '04': self.parse_chatmessage, # party\n '05': self.parse_chatmessage, # linkshell\n '06': self.parse_chatmessage, # linkshell\n '07': self.parse_chatmessage, # linkshell\n '08': self.parse_chatmessage, # linkshell\n '09': self.parse_chatmessage, # linkshell\n '0A': self.parse_chatmessage, # linkshell\n '0B': self.parse_chatmessage, # linkshell\n '0C': self.parse_chatmessage, # linkshell\n '0D': self.parse_chatmessage, # get tell\n '0F': self.parse_chatmessage, # linkshell\n '0E': self.parse_chatmessage, # linkshell\n '0F': self.parse_chatmessage, # linkshell\n '10': self.parse_chatmessage, # linkshell\n '11': self.parse_chatmessage, # linkshell\n '12': self.parse_chatmessage, # linkshell\n '13': self.parse_chatmessage, # linkshell\n '14': self.parse_chatmessage, # linkshell\n '15': self.parse_chatmessage, # linkshell\n '19': self.parse_chatmessage, # other emote\n '1B': self.parse_chatmessage, # emote\n '1D': self.parse_servermessage,\n '20': self.parse_genericmessage, \n '21': self.parse_invalidcommand,\n '23': self.parse_leve,\n '25': self.parse_npcchat, # battlewarden msg\n '26': self.parse_npcchat, # say text\n '27': self.parse_npcchat, # npc linkshell \n '28': self.parse_invoke, #invoke diety\n '42': self.parse_spexpgain,\n '43': self.parse_spexpgain,\n '44': self.parse_defeated,\n '45': self.parse_defeated,\n '46': self.parse_craftingsuccess,\n '47': self.parse_craftingsuccess,\n '48': self.parse_gathering,\n '49': self.parse_othergathering,\n '50': self.parse_damagedealt,\n '51': self.parse_hitdamage,\n '52': self.parse_otherdamage,\n '53': self.parse_otherhitdamage,\n '54': self.parse_readyability,\n '55': self.parse_otherdamage,\n '56': self.parse_miss,\n '57': self.parse_monstermiss,\n '58': self.parse_othermiss,\n '59': self.parse_othermiss,\n '5A': self.parse_monstermiss,\n '5B': self.parse_othermiss,\n '5C': self.parse_selfcast, #self casting\n '5D': self.parse_otherrecover, # party casting?\n '5E': self.parse_otherrecover, # other casting?\n '5F': self.parse_otherrecover, # recover mp from monster\n '60': self.parse_monstereffect, # monster starts casting\n '61': self.parse_otherrecover,\n '62': self.parse_effect,\n '63': self.parse_othereffect,\n '64': self.parse_partyabilities,\n '65': self.parse_othereffect,\n '66': self.parse_monstereffect,\n '67': self.parse_othereffect,\n '68': self.parse_inflicts,\n '69': self.parse_inflicts,\n '6B': self.parse_inflicts,\n '6A': self.parse_othereffect,\n '6C': self.parse_effect,\n '6D': self.parse_monstereffect # wears off\n }\n\n def lookup(self, d, groupid, id, lang):\n langlookup = ['ja', 'en', 'de', 'fr']\n for row in d:\n if row['groupid'] == groupid:\n for k in row['values']:\n if k['id'] == id:\n return k['translations'][[x for x, y in enumerate(langlookup) if y == lang][0]]\n\n def GetGroupAndIndex(self, bytes ):\n #bytes = bytearray()\n \n #hexStr = ''.join( hexStr.split(\" \") )\n \n #for i in range(0, len(hexStr), 2):\n # bytes.append( chr( int (hexStr[i:i+2], 16 ) ) )\n indexlen = bytes[2]\n indexval = bytes[3:3+indexlen]\n groupid = indexval[0]\n # get value without group id or terminator 0x03\n \n if (indexlen < 4):\n index = indexval[1:-1]\n elif (indexlen < 5):\n index = indexval[2:-1]\n else:\n index = indexval[2:-1]\n index.reverse()\n \n while len(index) < 4:\n index.append(0x00)\n #print ByteToHex2(index)\n index = struct.unpack_from('i', buffer(index))[0]\n if (indexlen < 4):\n index = index - 1\n\n # return tuple with groupid and index\n return groupid, index\n\n def getlanguage(self):\n return self.language\n\n def setLogFileTime(self, logfiletime):\n self.logfiletime = logfiletime\n\n def getlogparts(self, logitem):\n code = logitem[0:2]\n if logitem[2:4] == b'::':\n logvalue = logitem[4:]\n else:\n logvalue = logitem[3:]\n return str(code), logvalue #, 'ascii', errors='ignore'), logvalue #.decode('utf-8'), logvalue\n\n def getlogpartsalt(self, logitem):\n if (logitem.find(':') != -1):\n code = logitem[:logitem.find(':')]\n # trim the first char since it is likely a 0 that was written strangely\n # if its longer than 3 then its likely a crlf on a log border so\n # let it fall out\n # *** NOTE: Since going to the binary read version this should never happen.\n if len(code) > 2:\n code = code[1:]\n logvalue = logitem[logitem.find(':') + 1:]\n else:\n raise ValueError\n return code, logvalue\n \n def contains(self, findterm, text):\n if text.find(findterm) != -1:\n return False\n else:\n return True\n \n def between(self, text, starttext, endtext):\n return text[text.find(starttext) +len(starttext):text.find(endtext)]\n \n def echo(self, text, messagetype=0):\n global debuglevel\n if messagetype < debuglevel:\n try:\n print text#.encode('utf-8')\n except:\n pass\n\n def parse_line(self, logitem):\n #print ''.join( [ \"%02X \" % x for x in logitem ] ).strip()\n code, logvalue = self.getlogparts(logitem)\n #print code\n #print self.function_map[code]\n try:\n self.function_map[code](code, logvalue)\n #print logvalue.decode('utf-8')\n except: # Exception as e:\n traceback.print_exc(file=sys.stdout) \n self.echo(\"Could not parse code: %s value: %s\" % (code, ByteToHex(logvalue.decode('utf-8'))), -1)\n\nclass english_parser(ffxiv_parser):\n \n def close(self):\n if len(self.chatlog) == 0:\n return\n if self.prevchatdate != None:\n if not os.path.exists('chatlogs'):\n os.mkdir('chatlogs')\n with open(os.path.join('chatlogs', self.prevchatdate + '.chat'), 'wb') as chatfile:\n pickle.dump(self.chatlog, chatfile)\n self.chatlog = []\n\n def __init__(self): \n ffxiv_parser.__init__(self, \"en\")\n self.prevchatdate = None\n self.chatlog = []\n \n self.craftingcomplete = 0\n self.autotranslateheader = b'\\x02\\x2E'\n\n def monsterIsNM(self, monster):\n NMList = ['alux', 'bardi', 'barometz', 'bloodthirsty wolf', 'bomb baron', 'cactaur jack', 'daddy longlegs', 'dodore', 'downy dunstan', 'elder mosshorn', 'escaped goobbue', 'frenzied aurelia', 'gluttonous gertrude', 'great buffalo', 'haughtpox bloatbelly', 'jackanapes', 'kokoroon quickfingers', 'mosshorn billygoat', 'mosshorn nannygoat', 'nest commander', 'old six-arms', 'phaia', 'prince of pestilence', 'pyrausta', 'queen bolete', 'scurrying spriggan', 'sirocco', 'slippery sykes', 'spitfire', 'unknown soldier', 'uraeus']\n #print \"%s %r\" % (monster.lower(), monster.lower() in NMList)\n return monster.lower() in NMList\n \n def printCrafting(self, currentcrafting): \n #print currentcrafting\n self.currentcrafting[\"datetime\"] = time.strftime(\"%m/%d/%y %H:%M:%S\",time.gmtime(self.logfiletime))\n totalprogress = 0\n finalquality = 0\n finaldurability = 0\n for action in currentcrafting[\"actions\"]:\n if len(action[1]) > 0:\n totalprogress = totalprogress + action[1][0]\n if len(action[2]) > 0:\n finaldurability = finaldurability + action[2][0]\n if len(action[3]) > 0:\n finalquality = finalquality + action[3][0]\n itemsused = \"\"\n if len(currentcrafting[\"ingredients\"]) == 0:\n itemsused = \"Local levequests do not use ingredients.\"\n else:\n inglist = []\n first = True\n for item in currentcrafting[\"ingredients\"]:\n if first:\n itemsused = str(item[1]) + \" x \" + item[0]\n first = False\n else:\n itemsused = itemsused + \", \" + str(item[1]) + \" x \" + item[0]\n if currentcrafting[\"success\"]:\n print \"Completed Recipe for %s as %s\\nQuantity: %i\\nTotal Progress: %i\\nFinal Quality Added: %i\\nFinal Durability Lost: %i\\nIngredients Used: %s\\nExp: %i\\nSkill Points: %i\\nDate Time: %s GMT\\n\" % (currentcrafting[\"item\"], currentcrafting[\"class\"], currentcrafting[\"quantity\"], totalprogress, finalquality, finaldurability, itemsused, currentcrafting[\"exp\"], currentcrafting[\"skillpoints\"], currentcrafting[\"datetime\"])\n else:\n print \"Failed Recipe as %s\\nTotal Progress: %i\\nFinal Quality Added: %i\\nFinal Durability Lost: %i\\nIngredients Used: %s\\nExp: %i\\nSkill Points: %i\\nDate Time: %s GMT\\n\" % (currentcrafting[\"class\"], totalprogress, finalquality, finaldurability, itemsused, currentcrafting[\"exp\"], currentcrafting[\"skillpoints\"], currentcrafting[\"datetime\"])\n self.craftingdata.append(currentcrafting)\n #raw_input(\"\")\n return\n\n def printDamage(self, currentmonster):\n #print currentmonster[\"otherhitdamage\"]\n if len(currentmonster[\"damage\"]) > 0:\n hitpercent = 100\n critpercent = 0\n criticalavg = 0\n criticalavgcount = 0\n regularavg = 0\n regularavgcount = 0\n criticaldmgavg = 0\n regulardmgavg = 0\n totaldmgavg = 0\n hitdmgavg = 0\n hitdmgavgcount = 0\n crithitdmgavg = 0\n crithitdmgavgcount = 0\n totalhitdmgavg = 0\n othertotaldmg = 0\n healingavg = 0\n healingavgcount = 0\n absorbavg = 0\n absorbavgcount = 0\n totaldamage = 0\n for otherdamage in currentmonster[\"otherdamage\"]:\n if otherdamage[0] == '':\n continue\n othertotaldmg += int(otherdamage[0])\n for hitdamage in currentmonster[\"hitdamage\"]:\n if hitdamage[0] == '':\n continue\n if hitdamage[1] == True:\n crithitdmgavg = crithitdmgavg + int(hitdamage[0])\n crithitdmgavgcount = crithitdmgavgcount + 1\n else:\n hitdmgavg = hitdmgavg + int(hitdamage[0])\n hitdmgavgcount = hitdmgavgcount + 1\n\n for healing in currentmonster[\"healing\"]:\n if healing[1] == 'heal':\n healingavg = healingavg + int(healing[2])\n healingavgcount = healingavgcount + 1\n if healing[1] == 'absorb':\n absorbavg = absorbavg + int(healing[2])\n absorbavgcount = absorbavgcount + 1\n\n for damage in currentmonster[\"damage\"]:\n if damage[0] == '':\n continue\n totaldamage = totaldamage + int(damage[0])\n if damage[1] == True:\n criticalavg = criticalavg + int(damage[0])\n criticalavgcount = criticalavgcount + 1\n else:\n regularavg = regularavg + int(damage[0])\n regularavgcount = regularavgcount + 1\n if crithitdmgavg != 0:\n crithitdmgavg = crithitdmgavg / crithitdmgavgcount\n if criticalavgcount > 0:\n critpercent = int((float(criticalavgcount) / float(len(currentmonster[\"damage\"]))) * 100)\n if hitdmgavg != 0:\n hitdmgavg = hitdmgavg / hitdmgavgcount\n if crithitdmgavg + hitdmgavg != 0:\n totalhitdmgavg = (crithitdmgavg + hitdmgavg) / (crithitdmgavgcount + hitdmgavgcount)\n if criticalavg != 0:\n criticaldmgavg = criticalavg / criticalavgcount\n if regularavg != 0:\n regulardmgavg = regularavg / regularavgcount\n if criticalavg + regularavg != 0:\n totaldmgavg = (criticalavg + regularavg) / (criticalavgcount + regularavgcount)\n if healingavg != 0:\n healingavg = healingavg / healingavgcount\n if absorbavg != 0:\n absorbavg = absorbavg / absorbavgcount\n if currentmonster[\"miss\"] > 0:\n hitpercent = int((float(currentmonster[\"miss\"]) / float(len(currentmonster[\"damage\"]))) * 100)\n hitpercent = (100 - hitpercent)\n print \"Defeated %s as %s\\nAccuracy: %i%%\\nTotal Damage: %i\\nTotal Avg Dmg: %i\\nCrit Hit %%: %i\\nCrit Avg Dmg: %i%%\\nReg Avg Dmg: %i\\nTotal Hit Dmg Avg: %i\\nCrit Hit Dmg Avg: %i\\nHit Dmg Avg: %i\\nTotal Dmg From Others: %i\\nHealing Avg: %i\\nAbsorb Avg: %i\\nExp: %i\\nSkill Points: %i\\nDate Time: %s GMT\\n\" % (currentmonster[\"monster\"], currentmonster[\"class\"], hitpercent, totaldamage, totaldmgavg, critpercent, criticaldmgavg, regulardmgavg, totalhitdmgavg, crithitdmgavg, hitdmgavg, othertotaldmg, healingavg, absorbavg, currentmonster[\"exp\"], currentmonster[\"skillpoints\"], currentmonster[\"datetime\"])\n self.monsterdata.append(currentmonster)\n self.defeated = False\n self.spset = False\n self.expset = False\n self.currentmonster = copy.deepcopy(self.defaultmonster)\n\n def useitem(self, logitem):\n #print \"Use Item: \" + logitem\n if self.craftingcomplete == 1:\n self.printCrafting(self.currentcrafting)\n self.currentcrafting = copy.deepcopy(self.defaultcrafting)\n self.currentcrafting[\"datetime\"] = time.strftime(\"%m/%d/%y %H:%M:%S\",time.gmtime(self.logfiletime))\n #print self.currentcrafting[\"datetime\"]\n self.craftingcomplete = 0\n\n if logitem.find(\"Standard Synthesis\") != -1:\n # store previous value if valid:\n if self.synthtype != \"\":\n self.currentcrafting[\"actions\"].append([self.synthtype, self.progress, self.durability, self.quality])\n #print self.currentcrafting[\"actions\"]\n self.progress = []\n self.durability = []\n self.quality = []\n self.synthtype = \"Standard\"\n elif logitem.find(\"Rapid Synthesis\") != -1:\n if self.synthtype != \"\":\n self.currentcrafting[\"actions\"].append([self.synthtype, self.progress, self.durability, self.quality])\n self.progress = []\n self.durability = []\n self.quality = []\n self.synthtype = \"Rapid\"\n elif logitem.find(\"Bold Synthesis\") != -1:\n if self.synthtype != \"\":\n self.currentcrafting[\"actions\"].append([self.synthtype, self.progress, self.durability, self.quality])\n self.progress = []\n self.durability = []\n self.quality = []\n self.synthtype = \"Bold\"\n else:\n #print logitem\n # TODO: need to handle all special types or they will be ingredients, setup\n # an array with all traits and abilities and compare.\n if logitem.find(\"You use a\") != -1:\n ingcount = 1\n elif logitem.find(\"Touch Up\") != -1:\n return\n elif logitem.find(\"Preserve\") != -1:\n return\n elif logitem.find(\"Blinding Speed\") != -1:\n return\n else:\n try:\n ingcount = int(logitem.split(\" \")[2])\n except ValueError:\n # this is a special so skip it for now...\n return\n if logitem.find(\" of \") != -1:\n ingredient = logitem[logitem.find(\" of \") +4:-1]\n else:\n ingredient = \" \".join(logitem.split(\" \")[3:])[:-1]\n self.currentcrafting[\"ingredients\"].append([ingredient, ingcount])\n \n def engaged(self, logitem):\n self.echo(\"engaged \" + logitem, 1)\n if self.craftingcomplete == 1:\n if self.synthtype != \"\":\n self.currentcrafting[\"actions\"].append([self.synthtype, self.progress, self.durability, self.quality])\n self.printCrafting(self.currentcrafting)\n self.currentcrafting = copy.deepcopy(self.defaultcrafting)\n self.currentcrafting[\"datetime\"] = time.strftime(\"%m/%d/%y %H:%M:%S\",time.gmtime(self.logfiletime))\n self.craftingcomplete = 0\n self.synthtype = \"\"\n if logitem.find(\"You cannot change classes\") != -1 or logitem.find(\"Levequest difficulty\") != -1:\n return\n self.defeated = False\n self.spset = False\n self.expset = False\n self.currentmonster = copy.deepcopy(self.defaultmonster)\n\n self.currentmonster[\"datetime\"] = time.strftime(\"%m/%d/%y %H:%M:%S\",time.gmtime(self.logfiletime))\n self.currentmonster[\"monster\"] = logitem[logitem.find(\"The \") +4:logitem.find(\" is\")]\n self.currentmonster[\"monster\"] = self.currentmonster[\"monster\"].split('\\'')[0]\n if logitem.find(\"is engaged.\") != -1 and logitem.find(\"The \") == -1:\n self.currentmonster[\"monster\"] = logitem[:logitem.find(\" is\")]\n if logitem.find(\"group\") != -1:\n # This is a group start, we need to check to see if it is a NM fight.\n tmpmonster = logitem[:logitem.find(\"group\")].split('\\'')[0]\n if self.monsterIsNM(tmpmonster):\n self.currentmonster[\"monster\"] = tmpmonster\n \n def parse_gathering(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"othergathering \" + logitem, 1)\n\n def parse_othergathering(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"othergathering \" + logitem, 1)\n\n def parse_leve(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"leve \" + logitem, 1)\n\n def parse_chatmessage(self, code, logitem):\n global autotranslatearray, currentlanguage\n if currentlanguage != 'en':\n #print \"Current Lanuage in EN: \" + currentlanguage\n return\n loopcnt = 0\n while logitem.find(self.autotranslateheader) != -1:\n loopcnt +=1;\n if loopcnt > 100:\n break\n # has autotranslate value\n transstart = int(logitem.find(self.autotranslateheader))\n translen = logitem[transstart + 2]\n transbytes = logitem[transstart:transstart + translen + 3]\n groupid, index = self.GetGroupAndIndex(transbytes)\n result = '(%s)' % (self.lookup(autotranslatearray, str(groupid), str(index), 'en'))\n logitem = logitem[:logitem.find(transbytes)] + bytearray(result, 'utf-8') + logitem[logitem.find(transbytes) + len(transbytes):]\n\n logitem = logitem.decode('utf-8')\n #self.echo(\"chatmessage \" + code + logitem, 1)\n\n if (code == '1B') or (code == '19'):\n user = ' '.join(logitem.split(' ')[0:2]).strip()\n message = logitem.strip()\n else:\n logitemparts = logitem.split(\":\")\n user = logitemparts[0].strip()\n message = unicode(\":\".join(logitemparts[1:]).strip())\n \n try: \n chatdate = time.strftime(\"%d-%m-%y %H-%M-%S\",time.gmtime(self.logfiletime))\n self.prevchatdate = chatdate \n self.chatlog.append((code, nullstrip(user), message))\n self.echo(\"Code: %s User: %s Message: %s\" % (code, user, message), 1)\n except:\n traceback.print_exc(file=sys.stdout)\n\n def parse_npcchat(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"npc chat \" + logitem, 1)\n\n def parse_invalidcommand(self, code, logitem):\n try:\n logitem = logitem.decode('utf-8')\n self.echo(\"invalid command \" + logitem, 1)\n except:\n pass\n\n def parse_monstereffect(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"other abilities \" + logitem, 1)\n\n def parse_othereffect(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(\"grants you\") != -1: \n effect = logitem[logitem.find(\"effect of \") +10:-1]\n self.echo(\"other abilities \" + logitem, 1)\n\n def parse_partyabilities(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(\"grants\") != -1:\n effect = logitem[logitem.find(\"effect of \") +10:-1]\n if logitem.find(\"inflicts\") != -1:\n monsteraffliction = logitem[logitem.find(\"effect of \") +10:-1]\n self.echo(\"other abilities \" + logitem, 1)\n\n def parse_otherabilities(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"other abilities \" + logitem, 1)\n\n def parse_readyability(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"ready ability \" + logitem, 1)\n\n def parse_servermessage(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"server message \" + logitem, 1)\n\n def parse_invoke(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"invoke \" + logitem, 1)\n\n def parse_inflicts(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(\"inflicts you\") != -1:\n affliction = logitem[logitem.find(\"effect of \") +10:-1]\n return\n if logitem.find(\"inflicts\") != -1:\n othersaffliction = logitem[logitem.find(\"effect of \") +10:-1] \n self.echo(\"inflicts \" + logitem, 1)\n\n def parse_effect(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"effect \" + logitem, 1)\n\n def parse_otherrecover(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(\" MP\") != -1:\n return\n if logitem.find(\"absorbs\") != -1: \n caster = logitem[:logitem.find(\" absorbs\")]\n spell = \"Absorb\"\n target = caster\n healamount = logitem[logitem.find(\"absorbs \") +8:logitem.find(\" HP\")]\n if int(healamount) == 0:\n return\n self.currentmonster[\"otherhealing\"].append([caster, target, spell, healamount])\n if logitem.find(\"You recover\") != -1:\n usepos = logitem.find(\" uses \")\n caster = logitem[:usepos]\n spell = logitem[usepos + 6: logitem.find(\". \")]\n target = self.characterdata[\"charactername\"]\n healamount = logitem[logitem.find(\"recover \") +8:logitem.find(\" HP\")]\n if int(healamount) == 0:\n return\n self.currentmonster[\"otherhealing\"].append([caster, target, spell, healamount])\n #print self.currentmonster[\"otherhealing\"]\n if logitem.find(\"recovers\") != -1:\n usepos = logitem.find(\" uses \")\n onpos = logitem.find(\" on \")\n caster = logitem[:usepos]\n spell = logitem[usepos + 6: onpos]\n target = logitem[onpos + 4:logitem.find(\". \")]\n healamount = logitem[logitem.find(\"recovers \") +9:logitem.find(\" HP\")]\n if int(healamount) == 0:\n return\n self.currentmonster[\"otherhealing\"].append([caster, target, spell, healamount])\n self.echo(\"otherrecover %s %s\" % (code, logitem), 1)\n\n def parse_selfcast(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(\" MP\") != -1:\n return\n if logitem.find(\"You absorb\") != -1:\n monster = logitem[logitem.find(\"from the \") + 9:logitem.find(\".\")]\n if monster == self.currentmonster[\"monster\"]:\n type = \"absorb\"\n healing = logitem[logitem.find(\"absorb \") +7:logitem.find(\" HP\")]\n if int(healing) == 0:\n return\n self.currentmonster[\"healing\"].append([self.characterdata[\"charactername\"], type, healing])\n #print self.currentmonster[\"healing\"]\n return\n if logitem.find(\"You recover\") != -1:\n type = \"heal\"\n healing = logitem[logitem.find(\"recover \") +8:logitem.find(\" HP\")]\n if int(healing) == 0:\n return\n self.currentmonster[\"healing\"].append([self.characterdata[\"charactername\"], type, healing])\n #print self.currentmonster[\"healing\"]\n return\n if logitem.find(\"recovers\") != -1:\n type = \"heal\"\n healing = logitem[logitem.find(\"recovers \") +9:logitem.find(\" HP\")]\n if int(healing) == 0:\n return\n target = logitem[logitem.find(\". \") + 2:logitem.find(\" recovers\")]\n self.currentmonster[\"healing\"].append([target, type, healing])\n #print self.currentmonster[\"healing\"]\n return\n self.echo(\"recover %s %s\" % (code, logitem), 1)\n\n def parse_monstermiss(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"monstermiss \" + logitem, 1)\n\n def parse_othermiss(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(\"KO'd target\") != -1 or logitem.find(\"too far away\") != -1 or logitem.find(\"guard fails.\") != -1 or logitem.find(\"fails to take effect.\") != -1:\n return\n if logitem.find(\"evades\") != -1:\n if logitem.find(self.currentmonster[\"monster\"] + \" evades\") != -1:\n monster = logitem[:logitem.find(\" evades\")]\n else:\n monster = logitem[logitem.find(\"The \") + 4:logitem.find(\" evades\")]\n if monster == self.currentmonster[\"monster\"]:\n misschar = logitem[logitem.find(\"evades \") + 7:logitem.find(\"'s \")]\n attacktype = logitem[logitem.find(\"'s \") + 3:logitem.find(\".\")]\n self.currentmonster[\"othermiss\"].append([misschar, attacktype])\n else:\n if logitem.find(\"from the\") != -1:\n if logitem.find(self.currentmonster[\"monster\"] + \" from the\") != -1:\n monster = logitem[logitem.find(\"misses \") +7:logitem.find(\" from the\")].split('\\'')[0]\n else:\n monster = logitem[logitem.find(\"the \") +4:logitem.find(\" from the\")].split('\\'')[0]\n else:\n if logitem.find(\"misses the\") != -1:\n monster = logitem[logitem.find(\"the \") +4:logitem.find(\".\")].split('\\'')[0]\n else:\n monster = logitem[logitem.find(\"misses \") +7:logitem.find(\".\")]\n if monster == self.currentmonster[\"monster\"]:\n misschar = logitem[: logitem.find(\"'s \")]\n attacktype = logitem[logitem.find(\"'s \") + 3:logitem.find(\" misses\")]\n self.currentmonster[\"othermiss\"].append([misschar, attacktype])\n # NM monster miss: Uraeus's Body Slam fails.\n if logitem.find(\"fails.\") != -1:\n monster = logitem[:logitem.find('\\'')]\n if monster == self.currentmonster[\"monster\"]:\n self.currentmonster[\"othermonstermiss\"] += 1\n self.echo(\"othermiss \" + logitem, 1)\n\n def parse_miss(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(\"evades\") != -1:\n if logitem.find(\"The \") != -1:\n monster = logitem[logitem.find(\"The \") +4:logitem.find(\" evades\")]\n else:\n monster = logitem[:logitem.find(\" evades\")]\n else:\n if logitem.find(\"from the\") != -1:\n if logitem.find(\"misses the\") != -1:\n monster = logitem[logitem.find(\"the \") +4:logitem.find(\" from the\")].split('\\'')[0]\n else:\n monster = logitem[logitem.find(\"misses \") +7:logitem.find(\" from the\")].split('\\'')[0]\n else:\n if logitem.find(\"misses the\") != -1:\n monster = logitem[logitem.find(\"the \") +4:logitem.find(\".\")].split('\\'')[0]\n else:\n monster = logitem[logitem.find(\"misses \") +7:logitem.find(\".\")].split('\\'')[0]\n \n if monster == self.currentmonster[\"monster\"]:\n self.currentmonster[\"miss\"] += 1\n if logitem.find(\"fails.\") != -1:\n monster = logitem[:logitem.find('\\'')]\n if monster == self.currentmonster[\"monster\"]:\n self.currentmonster[\"monstermiss\"] += 1\n self.echo(\"miss \" + logitem, 1)\n\n def parse_otherhitdamage(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(\"hits \") != -1:\n if logitem.find(\"points\") == -1:\n return\n if logitem.find(\"The\") != -1:\n monsterhit = logitem[logitem.find(\"The \") +4:logitem.find(\" hits\")]\n monster = monsterhit.split('\\'')[0]\n attacktype = monsterhit[monsterhit.find(\"'s \")+3:]\n else:\n monsterhit = logitem[:logitem.find(\" hits\")]\n monster = monsterhit.split('\\'')[0]\n attacktype = monsterhit[monsterhit.find(\"'s \")+3:]\n if monster == self.currentmonster[\"monster\"]:\n if logitem.find(\"Critical!\") != -1:\n critical = 1\n else:\n critical = 0\n if logitem.find(\" points\") != -1:\n if logitem.find(\"from the\") != -1:\n hitchar = logitem[logitem.find(\"hits \") + 5:logitem.find(\" from\")]\n else:\n hitchar = logitem[logitem.find(\"hits \") + 5:logitem.find(\" for\")]\n hitdamage = logitem[logitem.find(\"for \") +4:logitem.find(\" points\")]\n self.currentmonster[\"otherhitdamage\"].append([hitdamage, critical, attacktype, hitchar])\n self.echo(\"otherhitdamage \" + logitem, 1)\n\n def parse_otherdamage(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(\"hits the\") != -1:\n if logitem.find(\"from the \") != -1:\n monster = logitem[logitem.find(\"the \") +4:logitem.find(\" from the\")]\n else:\n monster = logitem[logitem.find(\"the \") +4:logitem.find(\" for\")]\n else:\n if logitem.find(\"from the \") != -1:\n monster = logitem[logitem.find(\"hits \") +5:logitem.find(\" from the\")]\n else:\n monster = logitem[logitem.find(\"hits \") +5:logitem.find(\" for\")]\n if monster == self.currentmonster[\"monster\"]: \n if logitem.find(\"Critical!\") != -1:\n critical = 1\n else:\n critical = 0\n attackchar = \"\"\n if logitem.find(\"Counter!\") != -1:\n # \"Counter! Par Shadowmaster hits the great buffalo for 419 points of damage\"\n attackchar = logitem[logitem.find(\"! \")+2:logitem.find(\" hits\")]\n attacktype = \"Counter\"\n else:\n if critical:\n attackchar = logitem[10: logitem.find(\"'s \")]\n else:\n attackchar = logitem[: logitem.find(\"'s \")]\n attacktype = logitem[logitem.find(\"'s \") +3:logitem.find(\" hits\")]\n if logitem.find(\" points\") != -1:\n damage = logitem[logitem.find(\"for \") +4:logitem.find(\" points\")]\n self.currentmonster[\"otherdamage\"].append([damage, critical, attacktype, attackchar])\n self.echo(\"otherdamage \" + logitem, 1)\n\n def parse_hitdamage(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(\"hits you\") != -1:\n if logitem.find(\"points\") == -1:\n return\n if logitem.find(\"The \") != -1:\n monsterhit = logitem[logitem.find(\"The \") +4:logitem.find(\" hits\")]\n else:\n monsterhit = logitem[:logitem.find(\" hits\")]\n monster = monsterhit.split('\\'')[0]\n attacktype = monsterhit[monsterhit.find(\"'s \")+3:]\n if monster == self.currentmonster[\"monster\"]:\n hitdamage = logitem[logitem.find(\"for \") +4:logitem.find(\" points\")]\n if logitem.find(\"Critical!\") != -1:\n critical = 1\n else:\n critical = 0\n self.currentmonster[\"hitdamage\"].append([hitdamage, critical, attacktype])\n self.echo(\"hitdamage \" + logitem, 1)\n\n def parse_damagedealt(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(\"your\") != -1 or logitem.find(\"Your\") != -1:\n if logitem.find(\"hits the\") != -1:\n if logitem.find(\"from the \") != -1:\n monster = logitem[logitem.find(\"the \") +4:logitem.find(\" from the\")]\n else:\n monster = logitem[logitem.find(\"the \") +4:logitem.find(\" for\")]\n else:\n if logitem.find(\"from the \") != -1:\n monster = logitem[logitem.find(\"hits \") +5:logitem.find(\" from the\")]\n else:\n monster = logitem[logitem.find(\"hits \") +5:logitem.find(\" for\")]\n if monster == self.currentmonster[\"monster\"]: \n if logitem.find(\"Critical!\") != -1:\n critical = 1\n else:\n critical = 0\n attacktype = logitem[logitem.find(\"Your \") +5:logitem.find(\" hits\")]\n if logitem.find(\" points\") != -1:\n damage = logitem[logitem.find(\"for \") +4:logitem.find(\" points\")]\n self.currentmonster[\"damage\"].append([damage, critical, attacktype])\n self.echo(\"damagedealt \" + logitem, 1)\n\n def parse_craftingsuccess(self, code, logitem):\n logitem = logitem.decode('utf-8')\n # Crafting success\n if logitem.find(\"You create\") != -1:\n self.currentcrafting[\"quantity\"] = 1\n if logitem.find(\" of \") != -1:\n self.currentcrafting[\"item\"] = logitem[logitem.find(\" of \")+4:-1]\n elif logitem.find(\" a \") != -1:\n self.currentcrafting[\"item\"] = logitem[logitem.find(\" a \")+3:-1]\n else:\n itemparts = logitem.split(' ')\n idx = 0\n for item in itemparts: \n idx += 1;\n try:\n if item == 'an':\n break\n self.currentcrafting[\"quantity\"] = int(item)\n break\n except:\n continue\n self.currentcrafting[\"item\"] = ' '.join(itemparts[idx:])\n if self.currentcrafting[\"item\"].endswith(\".\"):\n self.currentcrafting[\"item\"] = self.currentcrafting[\"item\"][:-1]\n self.currentcrafting[\"success\"] = 1\n self.craftingcomplete = 1\n # botched it\n if logitem.find(\"You botch\") != -1:\n #print \"Crafting Fail: \" + logitem\n self.currentcrafting[\"quantity\"] = 0\n self.currentcrafting[\"success\"] = 0\n self.craftingcomplete = 1\n \n self.echo(\"crafting success \" + logitem, 1)\n\n def parse_defeated(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"defeated \" + logitem, 1)\n #print self.currentmonster\n if self.craftingcomplete == 1:\n #print \"Defeated:\" + logitem\n if self.synthtype != \"\":\n self.currentcrafting[\"actions\"].append([self.synthtype, self.progress, self.durability, self.quality])\n self.printCrafting(self.currentcrafting)\n self.currentcrafting = copy.deepcopy(self.defaultcrafting)\n self.currentcrafting[\"datetime\"] = time.strftime(\"%m/%d/%y %H:%M:%S\",time.gmtime(self.logfiletime))\n #print self.currentcrafting[\"datetime\"]\n self.craftingcomplete = 0\n self.synthtype = \"\"\n if logitem.find(\"group\") != -1:\n return\n if logitem.find(\"defeats you\") != -1:\n # You were killed...\n self.deathsdata[\"deaths\"].append({\"datetime\":time.strftime(\"%m/%d/%y %H:%M:%S\",time.gmtime(self.logfiletime)), \"class\":self.currentmonster[\"class\"]})\n #self.characterdata[\"deaths\"].append({\"datetime\":time.strftime(\"%m/%d/%y %H:%M:%S\",time.gmtime(self.logfiletime)), \"class\":self.currentmonster[\"class\"]})\n #0045::The fat dodo defeats you.\n return\n if logitem.find(\"You defeat the\") != -1:\n monster = logitem[logitem.find(\"defeat the \") +11:logitem.find(\".\")]\n if monster != self.currentmonster[\"monster\"]:\n return\n self.defeated = True\n if logitem.find(\"defeats\") != -1:\n monster = logitem[logitem.find(\"defeats \") +8:logitem.find(\".\")]\n if monster != self.currentmonster[\"monster\"]:\n return\n self.defeated = True\n\n if logitem.find(\"The \") == -1 and logitem.find(\"is defeated\") != -1:\n monster = logitem[:logitem.find(\" is defeated\")]\n if monster != self.currentmonster[\"monster\"]:\n return\n self.defeated = True\n elif logitem.find(\"defeated\") != -1:\n monster = logitem[logitem.find(\"The \") +4:logitem.find(\" is defeated\")].split('\\'')[0]\n #print self.currentmonster\n if monster != self.currentmonster[\"monster\"]:\n return\n self.defeated = True\n if self.monsterIsNM(self.currentmonster[\"monster\"]) and self.defeated:\n self.currentmonster[\"skillpoints\"] = 0\n self.currentmonster[\"exp\"] = 0\n self.defeated = False\n self.spset = False\n self.expset = False\n self.printDamage(self.currentmonster)\n\n\n def parse_spexpgain(self, code, logitem):\n logitem = logitem.decode('utf-8')\n pos = logitem.find(\"You gain\")\n if pos > -1:\n points = \"\"\n skill = \"\"\n if logitem.find(\"experience\") != -1:\n points = logitem[9:logitem.find(\"experience\") -1]\n #exptotal += int(points)\n self.currentmonster[\"exp\"] = int(points)\n self.currentcrafting[\"exp\"] = int(points)\n self.expset = True\n elif logitem.find(\"skill\") != -1:\n logitemparts = logitem.split(\" \")\n self.currentmonster[\"skillpoints\"] = int(logitemparts[2])\n self.currentmonster[\"class\"] = logitemparts[3]\n self.currentcrafting[\"skillpoints\"] = int(logitemparts[2])\n self.currentcrafting[\"class\"] = logitemparts[3]\n self.spset = True\n if self.craftingcomplete and self.spset:\n self.parse_defeated(\"\", \"\")\n self.defeated = False\n self.spset = False\n self.expset = False\n \n if self.defeated and self.spset and self.expset:\n self.defeated = False\n self.spset = False\n self.expset = False\n self.printDamage(self.currentmonster)\n\n #if and self.spset and self.defeated:\n # self.engaged(logitem)\n self.echo(\"spexpgain \" + logitem, 1)\n\n def throwaway(self, logitem):\n item = logitem[logitem.find(\"away the \") + 9:logitem.find(\".\")]\n #self.lostitems.append({\"datetime\":time.strftime(\"%m/%d/%y %H:%M:%S\",time.gmtime(self.logfiletime)), \"item\":item})\n \n def parse_genericmessage(self, code, logitem):\n try:\n logitem = logitem.decode('utf-8')\n except:\n # specific to: 54 68 65 20 64 61 72 6B 77 69 6E 67 20 64 65 76 69 6C 65 74 20 69 73 20 6D 61 72 6B 65 64 20 77 69 74 68 20 02 12 04 F2 01 29 03 2E\n #print ''.join( [ \"%02X \" % x for x in logitem ] ).strip()\n return\n if logitem.find(\"You throw away\") != -1:\n self.throwaway(logitem)\n if logitem.find(\"is defeated\") != -1:\n self.parse_defeated(code, logitem)\n if logitem.find(\"engaged\") != -1:\n self.engaged(logitem)\n elif logitem.find(\"You use\") != -1:\n self.useitem(logitem)\n elif logitem.find(\"Progress\") != -1:\n #print logitem\n # save progress as array of % and it was an increase or decrease\n if logitem.find(\"increases\") != -1:\n self.progress = [int(logitem[logitem.find(\"by \") +3:-2]), 1]\n else:\n self.progress = [int(logitem[logitem.find(\"by \") +3:-2]), 0]\n elif logitem.find(\"Durability\") != -1:\n if logitem.find(\"increases\") != -1:\n self.durability = [int(logitem[logitem.find(\"by \") +3:-1]), 1]\n else:\n self.durability = [int(logitem[logitem.find(\"by \") +3:-1]), 0]\n elif logitem.find(\"Quality\") != -1:\n if logitem.find(\"increases\") != -1:\n self.quality = [int(logitem[logitem.find(\"by \") +3:-1]), 1]\n else:\n self.quality = [int(logitem[logitem.find(\"by \") +3:-1]), 0]\n else:\n pass \n \n self.echo(\"generic \" + logitem, 1)\n\nclass japanese_parser(ffxiv_parser):\n\n def close(self):\n if len(self.chatlog) == 0:\n return\n if self.prevchatdate != None:\n if not os.path.exists('chatlogs'):\n os.mkdir('chatlogs')\n with open(os.path.join('chatlogs', self.prevchatdate + '.chat'), 'wb') as chatfile:\n pickle.dump(self.chatlog, chatfile)\n self.chatlog = []\n \n def __init__(self):\n ffxiv_parser.__init__(self, \"jp\")\n self.prevchatdate = None\n self.chatlog = []\n \n self.craftingcomplete = 0\n self.autotranslateheader = b'\\x02\\x2E'\n\n def monsterIsNM(self, monster):\n NMList = [u'アルシュ', u'アンノウンソルジャー', u'ウラエウス', u'エルダーモスホーン', u'オールドシックスアームズ', u'カクタージャック', u'クィーンボリート', u'グゥーブー', u'グルタナスガーティ', u'グレートバッファロー', u'シロッコ', u'ジャッカネイプス', u'スピットファイア', u'スリプリーサイクス', u'ダウニーダンスタン', u'ダディーロングレッグ', u'ドドレ', u'ネストコマンダー', u'バルディ', u'バロメッツ', u'パイア', u'ピュラウスタ', u'フレンジード・オーレリア', u'ブラッディウルフ', u'プリンスオブペスト', u'ボムバロン', u'モスホーン・ナニー', u'モスホーン・ビリー', u'太っ腹のホットポックス', u'弾指のココルン']\n return monster.lower() in NMList\n\n def printCrafting(self, currentcrafting):\n #print currentcrafting\n self.currentcrafting[\"datetime\"] = time.strftime(\"%m/%d/%y %H:%M:%S\",time.gmtime(self.logfiletime))\n totalprogress = 0\n finalquality = 0\n finaldurability = 0\n for action in currentcrafting[\"actions\"]:\n if len(action[1]) > 0:\n totalprogress = totalprogress + action[1][0]\n if len(action[2]) > 0:\n finaldurability = finaldurability + action[2][0]\n if len(action[3]) > 0:\n finalquality = finalquality + action[3][0]\n itemsused = \"\"\n if len(currentcrafting[\"ingredients\"]) == 0:\n itemsused = u'リーヴは食材を使用しないでください。'\n else:\n inglist = []\n first = True\n for item in currentcrafting[\"ingredients\"]:\n if first:\n itemsused = str(item[1]) + \" x \" + item[0]\n first = False\n else:\n itemsused = itemsused + \", \" + str(item[1]) + \" x \" + item[0]\n if currentcrafting[\"success\"]:\n print u\"%sの完成レシピとして%s\\n全体の進行状況: %i\\n最終的な品質が追加されました: %i\\n最終的な耐久性が失わ: %i\\n材料使用: %s\\n経験値: %i\\n修錬値: %i\\n日付時刻: %s GMT\\n\" % (currentcrafting[\"item\"], currentcrafting[\"class\"], totalprogress, finalquality, finaldurability, itemsused, currentcrafting[\"exp\"], currentcrafting[\"skillpoints\"], currentcrafting[\"datetime\"])\n else:\n print u\"%sとして失敗したレシピ\\n全体の進行状況: %i\\n最終的な品質が追加されました: %i\\n最終的な耐久性が失わ: %i\\n材料使用: %s\\n経験値: %i\\n修錬値: %i\\n日付時刻: %s GMT\\n\" % (currentcrafting[\"class\"], totalprogress, finalquality, finaldurability, itemsused, currentcrafting[\"exp\"], currentcrafting[\"skillpoints\"], currentcrafting[\"datetime\"])\n self.craftingdata.append(currentcrafting)\n return\n\n def printDamage(self, currentmonster):\n if len(currentmonster[\"damage\"]) > 0:\n hitpercent = 100\n criticalavg = 0\n criticalavgcount = 0\n regularavg = 0\n regularavgcount = 0\n criticaldmgavg = 0\n regulardmgavg = 0\n totaldmgavg = 0\n hitdmgavg = 0\n hitdmgavgcount = 0\n crithitdmgavg = 0\n crithitdmgavgcount = 0\n totalhitdmgavg = 0\n othertotaldmg = 0\n healingavg = 0\n healingavgcount = 0\n absorbavg = 0\n absorbavgcount = 0\n totaldamage = 0\n for otherdamage in currentmonster[\"otherdamage\"]:\n if otherdamage[0] == '':\n continue\n othertotaldmg += int(otherdamage[0])\n for hitdamage in currentmonster[\"hitdamage\"]:\n if hitdamage[0] == '':\n continue\n if hitdamage[1] == True:\n crithitdmgavg = crithitdmgavg + int(hitdamage[0])\n crithitdmgavgcount = crithitdmgavgcount + 1\n else:\n hitdmgavg = hitdmgavg + int(hitdamage[0])\n hitdmgavgcount = hitdmgavgcount + 1\n\n for healing in currentmonster[\"healing\"]:\n if healing[1] == 'heal':\n healingavg = healingavg + int(healing[2])\n healingavgcount = healingavgcount + 1\n if healing[1] == 'absorb':\n absorbavg = absorbavg + int(healing[2])\n absorbavgcount = absorbavgcount + 1\n\n for damage in currentmonster[\"damage\"]:\n if damage[0] == '':\n continue\n totaldamage = totaldamage + int(damage[0])\n if damage[1] == True:\n criticalavg = criticalavg + int(damage[0])\n criticalavgcount = criticalavgcount + 1\n else:\n regularavg = regularavg + int(damage[0])\n regularavgcount = regularavgcount + 1\n if crithitdmgavg != 0:\n crithitdmgavg = crithitdmgavg / crithitdmgavgcount\n if hitdmgavg != 0:\n hitdmgavg = hitdmgavg / hitdmgavgcount\n if crithitdmgavg + hitdmgavg != 0:\n totalhitdmgavg = (crithitdmgavg + hitdmgavg) / (crithitdmgavgcount + hitdmgavgcount)\n if criticalavg != 0:\n criticaldmgavg = criticalavg / criticalavgcount\n if regularavg != 0:\n regulardmgavg = regularavg / regularavgcount\n if criticalavg + regularavg != 0:\n totaldmgavg = (criticalavg + regularavg) / (criticalavgcount + regularavgcount)\n if healingavg != 0:\n healingavg = healingavg / healingavgcount\n if absorbavg != 0:\n absorbavg = absorbavg / absorbavgcount\n if currentmonster[\"miss\"] > 0:\n hitpercent = int((float(currentmonster[\"miss\"]) / float(len(currentmonster[\"damage\"]))) * 100)\n hitpercent = (100 - hitpercent)\n print u\"敗北 %s ⇒ %s\\nヒット %%: %i%%\\n被害総額: %i\\n合計平均ダメージ: %i\\nクリティカルの平均ダメージ: %i\\nレギュラーの平均被害: %i\\n合計ダメージ平均を撮影ヒット: %i\\nクリティカルヒットのダメージの平均: %i\\nダメージ平均ヒット: %i\\nその他から合計ダメージ: %i\\n平均ヒーリング: %i\\n吸収平均: %i\\n経験値: %i\\n修錬値: %i\\n日付時刻: %s GMT\\n\" % (currentmonster[\"monster\"], currentmonster[\"class\"], hitpercent, totaldamage, totaldmgavg, criticaldmgavg, regulardmgavg, totalhitdmgavg, crithitdmgavg, hitdmgavg, othertotaldmg, healingavg, absorbavg, currentmonster[\"exp\"], currentmonster[\"skillpoints\"], currentmonster[\"datetime\"])\n self.monsterdata.append(currentmonster)\n self.defeated = False\n self.spset = False\n self.expset = False\n self.currentmonster = copy.deepcopy(self.defaultmonster)\n\n def useitem(self, logitem):\n #print \"useitem\" + logitem\n if logitem.find(u\"は作業\") != -1:\n # store previous value if valid:\n if self.synthtype != \"\":\n self.currentcrafting[\"actions\"].append([self.synthtype, self.progress, self.durability, self.quality])\n self.progress = []\n self.durability = []\n self.quality = []\n self.synthtype = \"Standard\"\n elif logitem.find(u\"突貫\") != -1:\n if self.synthtype != \"\":\n self.currentcrafting[\"actions\"].append([self.synthtype, self.progress, self.durability, self.quality])\n self.progress = []\n self.durability = []\n self.quality = []\n self.synthtype = \"Rapid\"\n elif logitem.find(u\"入魂\") != -1:\n if self.synthtype != \"\":\n self.currentcrafting[\"actions\"].append([self.synthtype, self.progress, self.durability, self.quality])\n self.progress = []\n self.durability = []\n self.quality = []\n self.synthtype = \"Bold\"\n else:\n # TODO: Need to handle ingredients in Japanese.\n if logitem.find(\"You use a\") != -1:\n ingcount = 1\n elif logitem.find(\"Touch Up\") != -1:\n return\n elif logitem.find(\"Preserve\") != -1:\n return\n elif logitem.find(\"Blinding Speed\") != -1:\n return\n else:\n try:\n ingcount = int(logitem.split(\" \")[2])\n except ValueError:\n # this is a special so skip it for now...\n return\n if logitem.find(\" of \") != -1:\n ingredient = logitem[logitem.find(\" of \") +4:-1]\n else:\n ingredient = \" \".join(logitem.split(\" \")[3:])[:-1]\n self.currentcrafting[\"ingredients\"].append([ingredient, ingcount])\n \n def engaged(self, logitem):\n if self.craftingcomplete == 1:\n if self.synthtype != \"\":\n self.currentcrafting[\"actions\"].append([self.synthtype, self.progress, self.durability, self.quality])\n self.printCrafting(self.currentcrafting)\n self.currentcrafting = copy.deepcopy(self.defaultcrafting)\n self.currentcrafting[\"datetime\"] = time.strftime(\"%m/%d/%y %H:%M:%S\",time.gmtime(self.logfiletime))\n self.craftingcomplete = 0\n self.synthtype = \"\"\n # TODO: Find the equivelant in japanese\n if logitem.find(\"You cannot change classes\") != -1 or logitem.find(\"Levequest difficulty\") != -1:\n return\n self.defeated = False\n self.spset = False\n self.expset = False\n self.currentmonster = copy.deepcopy(self.defaultmonster)\n \n self.currentmonster[\"datetime\"] = time.strftime(\"%m/%d/%y %H:%M:%S\",time.gmtime(self.logfiletime))\n if logitem.find(u\"の一群を占有した\") != -1:\n # this is a party engage\n self.currentmonster[\"monster\"] = logitem[:logitem.find(u\"の一群を占有した\")]\n else:\n self.currentmonster[\"monster\"] = logitem[:logitem.find(u\"を占有した\")]\n \n def parse_gathering(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"othergathering \" + logitem, 1)\n\n def parse_othergathering(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"othergathering \" + logitem, 1)\n\n def parse_leve(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"leve \" + logitem, 1)\n\n def parse_chatmessage(self, code, logitem):\n #print \"here\" + code + logitem\n global autotranslatearray, currentlanguage\n if currentlanguage != 'jp':\n #print \"Current Lanuage in JP: \" + currentlanguage\n return\n #print \"starting chat msg\"\n loopcnt = 0\n while logitem.find(self.autotranslateheader) != -1:\n loopcnt +=1;\n if loopcnt > 100:\n break\n # has autotranslate value\n transstart = int(logitem.find(self.autotranslateheader))\n translen = logitem[transstart + 2]\n transbytes = logitem[transstart:transstart + translen + 3]\n groupid, index = self.GetGroupAndIndex(transbytes)\n result = '(%s)' % (self.lookup(autotranslatearray, str(groupid), str(index), 'ja'))\n logitem = logitem[:logitem.find(transbytes)] + bytearray(result, 'utf-8') + logitem[logitem.find(transbytes) + len(transbytes):]\n\n logitem = logitem.decode('utf-8')\n #self.echo(\"chatmessage \" + code + logitem, 1)\n\n if (code == '1B') or (code == '19'):\n user = ' '.join(logitem.split(' ')[0:2]).strip()\n message = logitem.strip()\n else:\n logitemparts = logitem.split(\":\")\n user = logitemparts[0].strip()\n message = unicode(\":\".join(logitemparts[1:]).strip())\n \n try: \n chatdate = time.strftime(\"%d-%m-%y %H-%M-%S\",time.gmtime(self.logfiletime))\n self.prevchatdate = chatdate \n self.chatlog.append((code, nullstrip(user), message))\n self.echo(\"Code: %s User: %s Message: %s\" % (code, user, message), 1)\n except:\n traceback.print_exc(file=sys.stdout)\n\n def parse_npcchat(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"npc chat \" + logitem, 1)\n\n def parse_invalidcommand(self, code, logitem):\n try:\n logitem = logitem.decode('utf-8')\n self.echo(\"invalid command \" + logitem, 1)\n except:\n pass\n\n def parse_monstereffect(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"other abilities \" + logitem, 1)\n\n def parse_othereffect(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(\"grants you\") != -1: \n effect = logitem[logitem.find(\"effect of \") +10:-1]\n self.echo(\"other abilities \" + logitem, 1)\n\n def parse_partyabilities(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(\"grants\") != -1:\n effect = logitem[logitem.find(\"effect of \") +10:-1]\n if logitem.find(\"inflicts\") != -1:\n monsteraffliction = logitem[logitem.find(\"effect of \") +10:-1]\n self.echo(\"other abilities \" + logitem, 1)\n\n def parse_otherabilities(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"other abilities \" + logitem, 1)\n\n def parse_readyability(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"ready ability \" + logitem, 1)\n\n def parse_servermessage(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"server message \" + logitem, 1)\n\n def parse_invoke(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"invoke \" + logitem, 1)\n\n def parse_inflicts(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(\"inflicts you\") != -1:\n affliction = logitem[logitem.find(\"effect of \") +10:-1]\n return\n if logitem.find(\"inflicts\") != -1:\n othersaffliction = logitem[logitem.find(\"effect of \") +10:-1] \n self.echo(\"inflicts \" + logitem, 1)\n\n def parse_effect(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"effect \" + logitem, 1)\n\n def parse_otherrecover(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"otherrecover %s %s\" % (code, logitem), 1)\n if logitem.find(\"MP\") != -1:\n return\n if logitem.find(u\"回復した\") != -1:\n caster = logitem[:logitem.find(u\"は\")]\n spell = logitem[logitem.find(u\"「\")+1: logitem.find(u\"」\")]\n target = logitem[logitem.find(u\"⇒ \") + 2:logitem.find(u\"はHP\")]\n healamount = logitem[logitem.find(u\"HPを\") +3:logitem.find(u\"回復した\")]\n if int(healamount) == 0:\n return\n self.currentmonster[\"otherhealing\"].append([caster, target, spell, healamount])\n\n def parse_selfcast(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"recover %s %s\" % (code, logitem), 1)\n if logitem.find(\"MP\") != -1:\n return\n if logitem.find(u\"吸収した\") != -1:\n monster = logitem[logitem.find(u\"は\") + 1:logitem.find(u\"の\")]\n if monster == self.currentmonster[\"monster\"]:\n type = \"absorb\"\n healing = logitem[logitem.find(u\"HPを\") +3:logitem.find(u\"吸収した\")]\n if int(healing) == 0:\n return\n self.currentmonster[\"healing\"].append([self.characterdata[\"charactername\"], type, healing])\n return\n if logitem.find(u\"回復した\") != -1:\n type = \"heal\"\n healing = logitem[logitem.find(u\"HPを\") +3:logitem.find(u\"回復した\")]\n if int(healing) == 0:\n return\n caster = logitem[:logitem.find(u\"は\")]\n target = logitem[logitem.find(u\"は\") + 1:logitem.find(u\"に\")]\n if caster == target:\n self.currentmonster[\"healing\"].append([self.characterdata[\"charactername\"], type, healing])\n else:\n self.currentmonster[\"healing\"].append([target, type, healing])\n return\n\n def parse_monstermiss(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"monstermiss \" + logitem, 1)\n\n def parse_othermiss(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(u\"行動不能状態\") != -1 or logitem.find(u\"目標が遠すぎます。\") != -1 or logitem.find(\"guard fails.\") != -1 or logitem.find(u\"効果がなかった\") != -1:\n return \n if logitem.find(u\"攻撃を外してしまった\") != -1:\n attacker = logitem[:logitem.find(u\"は\")]\n defender = logitem[logitem.find(u\"は\") +1:logitem.find(u\"に\")]\n \n if defender == self.currentmonster[\"monster\"]:\n misschar = logitem[:logitem.find(u\"は\")]\n attacktype = logitem[logitem.find(u\"「\") +1:logitem.find(u\"」\")]\n self.currentmonster[\"othermiss\"].append([misschar, attacktype])\n return\n elif attacker == self.currentmonster[\"monster\"]:\n self.parse_monstermiss(code, logitem)\n return\n\n self.echo(\"othermiss \" + logitem, 1)\n\n def parse_miss(self, code, logitem):\n logitem = logitem.decode('utf-8')\n monster = logitem[logitem.find(u\"は\") +1:logitem.find(u\"に\")]\n if monster == self.currentmonster[\"monster\"]:\n self.currentmonster[\"miss\"] += 1\n return\n self.echo(\"miss \" + logitem, 1)\n\n def parse_otherhitdamage(self, code, logitem):\n logitem = logitem.decode('utf-8')\n attacker = logitem[:logitem.find(u\"は\")]\n defender = logitem[logitem.find(u\"は\") +1:logitem.find(u\"に\")]\n attacktype = logitem[logitem.find(u\"「\") +1:logitem.find(u\"」\")]\n\n if defender == self.currentmonster[\"monster\"] and attacktype == u\"攻撃\":\n # The monster did damage to itself, jumping djigga im looking at you...\n return\n \n if attacker == self.currentmonster[\"monster\"]:\n if logitem.find(u\"クリティカル!\") != -1:\n critical = 1\n else:\n critical = 0\n if logitem.find(u\"ダメージを与えた\") != -1:\n if critical:\n hitdamage = int(logitem[logitem.find(u\"クリティカル! \") +9:logitem.find(u\"ダメージを与えた\")])\n else:\n hitdamage = int(logitem[logitem.find(u\"⇒ \") +2:logitem.find(u\"ダメージを与えた\")])\n self.currentmonster[\"otherhitdamage\"].append([hitdamage, critical, attacktype, defender])\n return\n self.echo(\"otherhitdamage \" + logitem, 1)\n\n def parse_otherdamage(self, code, logitem):\n logitem = logitem.decode('utf-8')\n # this one is tricky, the only way to tell if it is damage or a hit is to look at the\n # order of the names and compare to see if it is the monster. Pain in the butt because\n # they both come in from code 55... There are also quite a few variants that do not exist\n # in the english version of the logs.\n if logitem.find(u\"に命中した\") != -1:\n # this is useless because it just shows the value (may be nice for other effects or something later.\n return\n attacker = logitem[:logitem.find(u\"は\")]\n defender = logitem[logitem.find(u\"は\") +1:logitem.find(u\"に\")]\n attacktype = logitem[logitem.find(u\"「\") +1:logitem.find(u\"」\")]\n # this is a hit, not damage redirect to the right method.\n if attacker == self.currentmonster[\"monster\"] or attacktype == u\"攻撃\":\n self.parse_otherhitdamage(code, logitem)\n return\n if logitem.find(u\"クリティカル!\") != -1:\n critical = 1\n else:\n critical = 0\n # Spell Resistance\n if logitem.find(u\"魔法に抵抗し\") != -1:\n try:\n damage = int(logitem[logitem.find(u\"ダメージは\") + 6:logitem.find(u\"に半減された\")])\n except ValueError:\n return\n self.currentmonster[\"otherdamage\"].append([damage, critical, attacktype, attacker])\n return\n if logitem.find(u\"に軽減された\") != -1:\n try:\n damage = int(logitem[logitem.find(u\"ダメージは\") + 6:logitem.find(u\"に軽減された\")])\n except ValueError:\n return\n self.currentmonster[\"otherdamage\"].append([damage, critical, attacktype, attacker])\n return\n if logitem.find(u\"のMP\") != -1:\n # no use for MP drain right now. later when i do healing it will be good.\n return\n if logitem.find(u\"ダメージを与えた\") != -1:\n try:\n if critical:\n damage = int(logitem[logitem.find(u\"クリティカル! \") +9:logitem.find(u\"ダメージを与えた\")])\n else:\n # leg hit\n if logitem.find(u\"の脚部\") != -1:\n damage = int(logitem[logitem.find(u\"の脚部に\") +5:logitem.find(u\"のダメージを与えた\")])\n else:\n damage = int(logitem[logitem.find(u\"⇒ \") +2:logitem.find(u\"ダメージを与えた\")])\n self.currentmonster[\"otherdamage\"].append([damage, critical, attacktype, attacker])\n return\n except ValueError:\n return\n \n self.echo(\"otherdamage code %s: %s \" % (code, logitem), 1)\n\n def parse_hitdamage(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(u\"ダメージを与えた。\") != -1:\n if logitem.find(u\"⇒ \") == -1:\n return\n #monsterhit = logitem[logitem.find(u\"⇒ \") +2:logitem.find(\" hits\")]\n monster = logitem.split(u\"は\")[0]\n attacktype = logitem[logitem.find(u\"「\")+1:logitem.find(u\"」\")]\n if monster == self.currentmonster[\"monster\"]:\n if logitem.find(u\"クリティカル!\") != -1:\n critical = 1\n else:\n critical = 0\n if critical:\n hitdamage = logitem[logitem.find(u\"クリティカル! \") +9:logitem.find(u\"ダメージを与えた。\")]\n else:\n hitdamage = logitem[logitem.find(u\"⇒ \") +2:logitem.find(u\"ダメージを与えた。\")]\n self.currentmonster[\"hitdamage\"].append([int(hitdamage), critical, attacktype])\n return\n self.echo(\"hitdamage \" + logitem, 1)\n\n def parse_damagedealt(self, code, logitem):\n logitem = logitem.decode('utf-8')\n # we can ignore From the left / right / back because of the formatting\n # may want to record that later but not really needed for any useful stats\n monster = logitem[logitem.find(u\"は\") +1:logitem.find(u\"に\")]\n if monster == self.currentmonster[\"monster\"]: \n if logitem.find(u\"クリティカル!\") != -1:\n critical = 1\n else:\n critical = 0\n attacktype = logitem[logitem.find(u\"「\") +1:logitem.find(u\"」\")]\n if critical:\n damage = logitem[logitem.find(u\"クリティカル! \") +9:logitem.find(u\"ダメージを与えた。\")]\n else:\n damage = logitem[logitem.find(u\"⇒ \") +2:logitem.find(u\"ダメージを与えた。\")]\n try:\n self.currentmonster[\"damage\"].append([int(damage), critical, attacktype])\n except:\n if logitem.find(u\"打ち消した\") != -1:\n return\n return\n self.echo(\"damagedealt \" + logitem, 1)\n\n def parse_craftingsuccess(self, code, logitem):\n logitem = logitem.decode('utf-8')\n # Crafting success\n if logitem.find(u\"完成させた\") != -1:\n #print \"Crafting Success: \" + logitem\n self.currentcrafting[\"item\"] = logitem[logitem.find(u'「')+1:logitem.find(u'」')]\n # TODO: Get created count with -> ×\n self.currentcrafting[\"success\"] = 1\n self.craftingcomplete = 1\n # botched it\n if logitem.find(u\"製作に失敗した\") != -1:\n self.currentcrafting[\"success\"] = 0\n self.craftingcomplete = 1\n \n self.echo(\"crafting success \" + logitem, 1)\n\n def parse_defeated(self, code, logitem):\n logitem = logitem.decode('utf-8')\n self.echo(\"defeated \" + logitem, 1)\n if self.craftingcomplete == 1:\n if self.synthtype != \"\":\n self.currentcrafting[\"actions\"].append([self.synthtype, self.progress, self.durability, self.quality])\n self.printCrafting(self.currentcrafting)\n self.currentcrafting = copy.deepcopy(self.defaultcrafting)\n self.currentcrafting[\"datetime\"] = time.strftime(\"%m/%d/%y %H:%M:%S\",time.gmtime(self.logfiletime))\n self.craftingcomplete = 0\n self.synthtype = \"\"\n if logitem.find(u\"一群\") != -1:\n return\n #if logitem.find(\"defeats you\") != -1:\n # # You were killed...\n # self.deathsdata[\"deaths\"].append({\"datetime\":time.strftime(\"%m/%d/%y %H:%M:%S\",time.gmtime(self.logfiletime)), \"class\":self.currentmonster[\"class\"]})\n # #self.characterdata[\"deaths\"].append({\"datetime\":time.strftime(\"%m/%d/%y %H:%M:%S\",time.gmtime(self.logfiletime)), \"class\":self.currentmonster[\"class\"]})\n # #0045::The fat dodo defeats you.\n # return\n if logitem.find(u\"を倒した。\") != -1:\n monster = logitem[:logitem.find(u\"を倒した。\")]\n if monster != self.currentmonster[\"monster\"]:\n return\n self.defeated = True\n if self.monsterIsNM(self.currentmonster[\"monster\"]) and self.defeated:\n self.currentmonster[\"skillpoints\"] = 0\n self.currentmonster[\"exp\"] = 0\n self.defeated = False\n self.spset = False\n self.expset = False\n self.printDamage(self.currentmonster)\n\n def parse_spexpgain(self, code, logitem):\n logitem = logitem.decode('utf-8')\n if logitem.find(u\"の経験値\") != -1:\n points = logitem[logitem.find(u\"は\")+1:logitem.find(u\"の経験値\")]\n self.currentmonster[\"exp\"] = int(points)\n self.currentcrafting[\"exp\"] = int(points)\n self.expset = True\n\n elif logitem.find(u\"修錬\") != -1:\n sp = logitem[logitem.find(u\"値\") + 1:logitem.find(u\"を得\")]\n self.currentmonster[\"skillpoints\"] = int(sp)\n self.currentmonster[\"class\"] = logitem[logitem.find(u\"「\") + 1:logitem.find(u\"」\")]\n self.currentcrafting[\"skillpoints\"] = int(sp)\n self.currentcrafting[\"class\"] = logitem[logitem.find(u\"「\") + 1:logitem.find(u\"」\")]\n self.spset = True\n \n if self.spset and self.craftingcomplete:\n self.parse_defeated(\"\", \"\")\n self.defeated = False\n self.spset = False\n self.expset = False\n\n if self.defeated and self.spset and self.expset:\n self.defeated = False\n self.spset = False\n self.expset = False\n self.printDamage(self.currentmonster)\n\n self.echo(\"spexpgain \" + logitem, 1)\n \n def throwaway(self, logitem):\n item = logitem[logitem.find(\"away the \") + 9:logitem.find(\".\")]\n #self.lostitems.append({\"datetime\":time.strftime(\"%m/%d/%y %H:%M:%S\",time.gmtime(self.logfiletime)), \"item\":item})\n\n def parse_genericmessage(self, code, logitem):\n try:\n logitem = logitem.decode('utf-8')\n except:\n # specific to: 54 68 65 20 64 61 72 6B 77 69 6E 67 20 64 65 76 69 6C 65 74 20 69 73 20 6D 61 72 6B 65 64 20 77 69 74 68 20 02 12 04 F2 01 29 03 2E\n return\n if logitem.find(\"You throw away\") != -1:\n self.throwaway(logitem)\n elif logitem.find(u\"を占有した\") != -1:\n self.engaged(logitem)\n elif logitem.find(u\"開始した\") != -1:\n self.useitem(logitem)\n elif logitem.find(u\"作業進捗\") != -1:\n # save progress as array of % and it was an increase or decrease\n self.progress = [int(logitem[logitem.find(u\"作業進捗 \") +5:logitem.find(u\"%\")]), 1]\n elif logitem.find(u\"素材耐用\") != -1:\n # TODO: Figure out if there is ever an increase rather than 減少した\n if logitem.find(u\"上昇した\") != -1:\n self.durability = [int(logitem[logitem.find(u\"が \") +2:logitem.find(u\"上昇した\")]), 1]\n else:\n self.durability = [int(logitem[logitem.find(u\"が \") +2:logitem.find(u\"減少した\")]), 0]\n elif logitem.find(u\"目標品質\") != -1:\n if logitem.find(u\"上昇した\") != -1:\n self.quality = [int(logitem[logitem.find(u\"が \") +2:logitem.find(u\"上昇した\")]), 1]\n else:\n #print logitem\n #⇒ 目標品質度が 11低下した……\n self.quality = [int(logitem[logitem.find(u\"が \") +2:logitem.find(u\"低下した\")]), 0]\n else:\n pass\n \n self.echo(\"generic \" + logitem, 1)\n\ndef readLogFile(paths, charactername, logmonsterfilter = None, isrunning=None, password=\"\", parsers=[]):\n global configfile, lastlogparsed\n config = ConfigParser.ConfigParser()\n config.read(configfile)\n try:\n config.add_section('Config')\n except ConfigParser.DuplicateSectionError:\n pass\n logfile = None\n logsparsed = 0\n for logfilename in paths:\n try:\n # have to read ALL of the files in case something was missed due to a restart when a read was in the middle.\n # can't guess where a fight may start since NM fights are VERY VERY long 2000+ hits.\n logfiletime = os.stat(logfilename).st_mtime\n logsparsed = logsparsed + 1\n for parser in parsers:\n parser.setLogFileTime(logfiletime)\n logfile = open(logfilename, 'rb')\n # read in the length of this files records\n headerparts = struct.unpack(\"2l\", logfile.read(8))\n headerlen = headerparts[1] - headerparts[0]\n header = struct.unpack(str(headerlen)+\"l\", logfile.read(headerlen*4))\n # header * 4 bytes for each and another 8 bytes for the header size\n offset = headerlen*4+8\n for headerpos in range(len(header)):\n if headerpos == 0:\n startbyte = offset\n endbyte = header[headerpos]\n else:\n startbyte = offset + header[headerpos-1]\n endbyte = header[headerpos] - header[headerpos-1]\n logfile.seek(startbyte)\n logitem = logfile.read(endbyte)[2:]\n for parser in parsers:\n try:\n parser.parse_line(bytearray(logitem))\n except UnicodeDecodeError:\n pass\n except:\n traceback.print_exc(file=sys.stdout)\n if isrunning:\n if not isrunning():\n return\n continue\n for parser in parsers:\n parser.close()\n\n finally: \n if logfile:\n logfile.close()\n lastlogparsed = logfiletime\n config.set('Config', 'lastlogparsed', lastlogparsed)\n with open(configfile, 'wb') as openconfigfile:\n config.write(openconfigfile)\n if os.path.exists('newinstall'):\n os.remove('newinstall')\n # uncomment for debugging to disable uploads\n return\n if logsparsed > 0:\n uploadToDB(password, parsers)\n else:\n print \"No new log data to parse. Don't you have some leves to do?\"\n\ndef uploadDeaths(header, deathdata):\n if len(deathdata[\"deaths\"]) > 0:\n #print deathdata\n #return\n if header[\"language\"] == \"en\":\n print \"Uploading deaths data.\"\n else:\n print \"アップロードの死亡データ。\"\n header[\"deaths\"] = deathdata\n jsondata = json.dumps(header)\n #print jsondata\n url = doUpload(jsondata, 'http://ffxivbattle.com/postdeaths.php')\n if url == None:\n return\n if header[\"language\"] == \"en\":\n print \"Total New Character Deaths: %d\\n\" % int(url[\"deaths\"])\n else:\n print u\"合計新キャラクター死亡: %d\" % int(url[\"deaths\"])\n\ndef uploadBattles(header, battledata):\n if len(battledata) > 0:\n end = 100\n totalbattlerecords = 0\n recordsimported = 0\n updatedrecords = 0\n url = None\n for start in range(0, len(battledata), 100):\n if end > len(battledata):\n end = len(battledata)\n tmpbattledata = header\n tmpbattledata[\"battle\"] = battledata[start:end]\n if header[\"language\"] == \"en\":\n print \"Uploading battle data. Records %d to %d.\" % (start, end)\n else:\n print \"アップロードの戦闘データ。レコード%d〜%d。\" % (start, end)\n jsondata = json.dumps(tmpbattledata)\n url = doUpload(jsondata, 'http://ffxivbattle.com/postbattles.php')\n if url == None:\n return\n end = end+100\n try:\n totalbattlerecords = int(url[\"totalbattlerecords\"])\n recordsimported = recordsimported + int(url[\"recordsimported\"])\n updatedrecords = updatedrecords + int(url[\"updatedrecords\"])\n except:\n if parser.getlanguage() == \"en\":\n print \"Did not understand the response from the server.\"\n else:\n print u\"サーバーからの応答を理解できませんでした。\"\n if header[\"language\"] == \"en\":\n print \"\\nTotal Global Battle Records: %d\" % totalbattlerecords\n print \"Records Sent (Duplicates ignored): %d\" % recordsimported\n print \"Records Uploaded To Website: %d\" % updatedrecords\n if int(updatedrecords) > 0:\n print \"\\nYour data has been uploaded, you can view it at: \\n\\n%s\" % url[\"url\"] \n else:\n print \"\\nNo new records. You can view your data at: \\n\\n%s\\n\" % url[\"url\"] \n else:\n print u\"\\n合計グローバルバトルレコード: %d\" % totalbattlerecords\n print u\"レコード送信(無視される重複): %d\" % recordsimported\n print u\"ウェブサイトにアップロードされたレコード: %d\" % updatedrecords\n if int(updatedrecords) > 0:\n print u\"\\nあなたのデータはあなたがそれを見ることができる、アップロードされています: \\n\\n%s\" % url[\"url\"] \n else:\n print u\"\\nいいえ、新しいレコード。あなたはあなたのデータを表示することができます: \\n\\n%s\\n\" % url[\"url\"] \n \ndef uploadCrafting(header, craftingdata):\n if len(craftingdata) > 0:\n #print craftingdata\n #return\n end = 100\n craftingcount = 0\n url = None\n for start in range(0, len(craftingdata), 100):\n if end > len(craftingdata):\n end = len(craftingdata)\n tmpcraftingdata = header\n tmpcraftingdata[\"crafting\"] = craftingdata[start:end]\n if header[\"language\"] == \"en\":\n print \"Uploading crafting data. Records %d to %d.\" % (start, end)\n else:\n print \"アップロードは、データを意図的に作成。レコード%d〜%d。\" % (start, end)\n jsondata = json.dumps(tmpcraftingdata)\n url = doUpload(jsondata, 'http://ffxivbattle.com/postcrafting.php')\n if url == None:\n return\n end = end+100\n try:\n craftingcount = int(url[\"craftingcount\"])\n except:\n if parser.getlanguage() == \"en\":\n print \"Did not understand the response from the server.\"\n else:\n print u\"サーバーからの応答を理解できませんでした。\"\n if header[\"language\"] == \"en\":\n print \"Crafting Records Uploaded To Website: %d\\n\" % craftingcount\n else:\n print u\"ウェブサイトにアップロード記録クラフト: %d\\n\" % craftingcount\n \ndef uploadToDB(password=\"\", parsers=[]):\n for parser in parsers:\n header = {\"version\":version,\"language\":parser.getlanguage(),\"password\":password, \"character\":parser.characterdata} \n uploadDeaths(header, parser.deathsdata)\n uploadCrafting(header, parser.craftingdata)\n uploadBattles(header, parser.monsterdata)\n \n # Clear records for next run\n parser.monsterdata = []\n parser.craftingdata = []\n parser.gatheringdata = []\n parser.deathsdata[\"deaths\"] = []\n\ndef doUpload(jsondata, url):\n try:\n #url = 'http://ffxivbattle.com/postlog-test.php'\n user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'\n values = {'jsondata' : jsondata }\n #print values\n headers = { 'User-Agent' : \"H3lls Log Parser v %s\" % (str(version)),\n 'Content-Type': 'text/plain; charset=utf-8' }\n req = urllib2.Request(url, jsondata, headers)\n response = urllib2.urlopen(req)\n jsonresults = response.read()\n try:\n return json.loads(jsonresults)\n except:\n print \"There was an issue uploading to the server see below:\"\n print jsonresults\n return None\n except Exception as e:\n print \"There was a problem uploading your data.\"\n print e\n\ndef doAppUpdate():\n try:\n response = urllib2.urlopen('http://ffxivbattle.com/setup.exe');\n file_size = int(response.info().getheader('Content-Length').strip())\n dialog = wx.ProgressDialog ( 'Progress', 'Downloading New Installer Version.', maximum = file_size, style = wx.PD_CAN_ABORT | wx.PD_AUTO_HIDE | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME )\n chunk_size = 8192\n bytes_so_far = 0\n setupfile = 'setup.exe'\n f = open(setupfile, 'wb')\n while 1:\n chunk = response.read(chunk_size)\n f.write(chunk)\n bytes_so_far += len(chunk)\n (keep_going, skip) = dialog.Update ( bytes_so_far )\n if not keep_going:\n dialog.Destroy()\n f.close()\n os.remove(setupfile)\n return 0\n if not chunk:\n break\n f.close()\n return 1\n except Exception, e:\n return 0\n\ndef versioncheck(status=0, language=\"en\"):\n response = None\n try:\n response = urllib2.urlopen('http://ffxivbattle.com/logparserversion-2.php');\n except:\n # There was a problem reading the version page skip it.\n if language==\"jp\":\n print u\"リモートのバージョン番号を読み取ることができません。\"\n else:\n print \"Unable to read the remote version number.\"\n return 0\n try:\n versiondata = json.loads(response.read())\n if versiondata[\"version\"] > version:\n if language==\"jp\":\n verdialog = wx.MessageDialog(None, u'新しいバージョンでは、ダウンロードし、インストールすることをご希望の利用可能ですか?\\r\\n変更: \\r\\n%s' % (versiondata[\"changetext\"]), u'バージョン %d 対応' % (versiondata[\"version\"]), \n wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)\n else:\n verdialog = wx.MessageDialog(None, 'A new version is available would you like to download and install it?\\r\\nChanges: \\r\\n%s' % (versiondata[\"changetext\"]), 'Version %d Available' % (versiondata[\"version\"]), \n wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)\n if verdialog.ShowModal() == wx.ID_YES:\n return doAppUpdate()\n elif status:\n if language==\"jp\":\n okdlg = wx.MessageDialog(None, u'現在、最新のバージョンを実行している。', u'最新バージョン', wx.OK)\n else:\n okdlg = wx.MessageDialog(None, 'You are currently running the latest version.', 'Latest Version', wx.OK)\n okdlg.ShowModal()\n except ValueError, e:\n # The result was garbage so skip it.\n traceback.print_exc()\n if language==\"jp\":\n print u\"リモートのバージョン番号を理解していないか。\"\n else:\n print \"Did not understand the remote version number.\"\n return 0\n return 0\n\nif __name__ == '__main__':\n try:\n main()\n except Exception, e:\n print e\n\n\n\n"
}
] | 3 |
Dylan-TerMolen/downloader | https://github.com/Dylan-TerMolen/downloader | 458eb26b687d419d688d810e88621834a26d733e | b3b8ab5e221d897b74aa22cc734b4e527da47491 | bed9cea38720132d3b6ef075a815a7ff63928470 | refs/heads/master | 2021-01-07T04:14:56.864992 | 2020-02-19T09:03:38 | 2020-02-19T09:03:38 | 241,575,650 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5823438167572021,
"alphanum_fraction": 0.5869901776313782,
"avg_line_length": 26.28169059753418,
"blob_id": "3203c5e739d31914ce802715d345d0b51daf7c41",
"content_id": "ee1e8b381ba4ef54c59f0014e9d2e6e39d417279",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1937,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 71,
"path": "/downloader.py",
"repo_name": "Dylan-TerMolen/downloader",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport sys\nimport os\nimport requests\nimport multiprocessing\n\n\ndef download_range_of_bytes(start_bytes, end_bytes, url, out_file):\n \"\"\"Download a range of the contents of a file\n given a url and the range of bytes to read\"\"\"\n\n headers = {'Range': 'bytes=%d-%d' % (start_bytes, end_bytes)}\n req = requests.get(url, headers=headers, stream=True)\n with open(out_file, \"r+b\") as file:\n file.seek(start_bytes)\n file.write(req.content)\n\n\ndef download_file(url, nThreads):\n \"\"\"Download a file concurrently given a url\n and a number of threads to use\"\"\"\n\n req = requests.head(url)\n file_size = int(req.headers[\"Content-Length\"])\n size_per_thread = file_size // nThreads\n try:\n filename = url.split(\"/\")[-1]\n except:\n filename = \"outfile\"\n\n with open(filename, \"wb\") as outfile:\n outfile.write(b'\\0' * file_size)\n try:\n for i in range(nThreads):\n start_byte = size_per_thread * i\n end_byte = start_byte + size_per_thread\n p = multiprocessing.Process(\n target=download_range_of_bytes,\n args=(start_byte, end_byte, url, filename)\n )\n p.start()\n\n print(filename, \"was downloaded successfully\")\n\n except requests.exceptions.RequestException as e:\n # Aim for atomic downloads\n os.remove(filename)\n sys.exit(1)\n\n\ndef main():\n # Parse command line arguments\n # If only url provided, then use 1 process\n try:\n url = sys.argv[1]\n if len(sys.argv) > 2:\n nThreads = int(sys.argv[3])\n else:\n nThreads = 1\n except:\n print(\"You failed to provide the required arguments\")\n print(\"Run as ./downloader <URL>\")\n print(\"or run as ./downloader <URL> -c numThreads\")\n sys.exit(1)\n\n download_file(url, nThreads)\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.8151408433914185,
"alphanum_fraction": 0.8160211443901062,
"avg_line_length": 188.3333282470703,
"blob_id": "acae454bfe537d247c62dd5535c27173ff25cde6",
"content_id": "6289108585b06fc73074216fbbc699f20a7befc0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1136,
"license_type": "no_license",
"max_line_length": 828,
"num_lines": 6,
"path": "/README.md",
"repo_name": "Dylan-TerMolen/downloader",
"src_encoding": "UTF-8",
"text": "# CLI Concurrent File Downloader\n\nThis program was written in Python3 and uses multiprocessing to concurrently download files. \nThis program functions by separating the file into chunks the size of file_size divided by num_threads in order to insure that each process is downloading an equal piece of the file. I was initially using the Python threading module but switched to the multiprocessing module because I believed that it was more scalable. A major bottleneck is the fact that all computers have a finite number of CPUs and so there can only be a finite number of processes. It can be greater than the number of CPUs, but it is still finite. I believe that this method is sufficiently scalable because if you were working with a faster computer or a distributed system then the program would be able to take advantage of the processing power whereas the threads would be running in the same memory space as each other and would not be as powerful.\n\nThis program was complete as part of Illumio's Internship Interview Process but I thought it was a fun project that displayed some of my knowledge in concurrency and networking.\n"
}
] | 2 |
lenvladymyr/VkBot | https://github.com/lenvladymyr/VkBot | 7b15aae8b1dca742ec3070ddde5faf3ce563dfce | 4414898c7da5dc66bba9f53f65dcb9273502c247 | 234a459c5ba5ed5e82caebe28a862abf164ad223 | refs/heads/master | 2016-09-10T13:06:22.197888 | 2014-07-16T13:41:03 | 2014-07-16T13:41:03 | 21,670,472 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.35185185074806213,
"alphanum_fraction": 0.6481481194496155,
"avg_line_length": 17,
"blob_id": "f993cf75e1ecaf7ea09cef6a476dc4c030973349",
"content_id": "7273442bdb37717fcf6eb26cb734fb7481cdfc44",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 54,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 3,
"path": "/settings.py",
"repo_name": "lenvladymyr/VkBot",
"src_encoding": "UTF-8",
"text": "token = ''\n#groupId = '68585467'\ngroupId = '57953524'\n"
},
{
"alpha_fraction": 0.6867191195487976,
"alphanum_fraction": 0.698287844657898,
"avg_line_length": 42.95918273925781,
"blob_id": "836a703e7dbf1389f1222e8deb3a24d1a8b4b160",
"content_id": "e7272a3227908c11835a3ae37f5f288748a70372",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2444,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 49,
"path": "/vk.py",
"repo_name": "lenvladymyr/VkBot",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport requests, json, Image\nfrom StringIO import StringIO\nfrom settings import token, groupId\n\nclass VkBot:\n\t\n\tdef __init__(self, token, groupId):\n\t\tself.token = token\n\t\tself.groupId = groupId\n\t\n\tdef requestVkApi(self, method, payload):\n\t\t\"\"\"\tФункция для запроса к API\"\"\"\n\t\turl = 'https://api.vk.com/method/%s' %method\n\t\tres = requests.get(url, params=payload, timeout=5)\n\t\treturn res.text\n\t\t\n\tdef groupMembers(self):\n\t\t\"\"\"Функция для получения подписчиков паблика\"\"\"\n\t\tidInGroup = json.loads(self.requestVkApi('groups.getMembers', {'group_id':self.groupId,'access_token':self.token}))[u'response'][u'users']\n\t\treturn idInGroup\n\n\tdef inviteGroupMembers(self, idUser, captchaKey=None, captchaSid=None):\n\t\t\"\"\"Функция для приглашения в друзья подписчиков паблика\"\"\"\n\t\tidInvite = json.loads(self.requestVkApi('friends.add', {'user_id':idUser,'access_token':self.token,'captcha_key':captchaKey,'captcha_sid':captchaSid}))\n\t\tif 'error' in idInvite:\n\t\t\t#print idInvite['error']['error_code'], idInvite['error']['error_msg'], 'id =',idInvite['error']['request_params'][3]['value']\n\t\t\tif idInvite['error']['error_code']==14:\n\t\t\t\tcaptcha = Image.open(StringIO(requests.get(str(idInvite['error']['captcha_img'])).content))\n\t\t\t\tcaptcha.show()\n\t\t\t\traw = raw_input(\"Введите каптчу, пожалуйста: \")\n\t\t\t\tprint \"Повторим запрос: \", self.inviteGroupMembers(idUser,str(raw),str(idInvite['error']['captcha_img'][34:46]))\n\t\t\t\t#requestVkApi('friends.add', {'user_id':str(id),'access_token':token,'captcha_key':str(raw),'captcha_sid':str(idInvite['error']['captcha_img'][34:46])})\n\t\telse:\n\t\t\tif idInvite['response']==1:\n\t\t\t\tprint \"Отправлена заявка на добавление пользователя %s в друзья\" % idUser\n\t\t\telif idInvite['response']==2:\n\t\t\t\tprint \"Вы уже отправляли заявку на добавления пользователя %s\" %idUser\n\t\t\telif idInvite['response'] == 4:\n\t\t\t\tprint \"Повторная отправка заявки пользователю %s\" % idUser\n\nif __name__ == '__main__':\n\tvkBot = VkBot(token,groupId)\t\n\tprint \"id подписчиков паблика %s:\\n\" %groupId\n\tprint vkBot.groupMembers()\n\tprint \"\\n\"\n\tfor id in vkBot.groupMembers():\n\t\tvkBot.inviteGroupMembers(id)\n\t#vkBot.inviteGroupMember('248194698')\n\t\n\n\t\n\n\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 11.727272987365723,
"blob_id": "19b5f59633b73ebd250144025cbeca169a09bac3",
"content_id": "51cc6c65b7bb8fb6b5321666c90c5deb4b24c7cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 140,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 11,
"path": "/README.md",
"repo_name": "lenvladymyr/VkBot",
"src_encoding": "UTF-8",
"text": "VkBot\n=====\nDownload\n\ngit clone https://github.com/lenvladymyr/VkBot\n\nStart script:\n\npython vk.py\n\nThis script adds the members of Vk group\n"
}
] | 3 |
kevbotmckrier/TwilioLookupScanner | https://github.com/kevbotmckrier/TwilioLookupScanner | f6ff1688591e4067ba0d2688cc686cb45d48be65 | 7295ac04467c074b44bf91ba48676a972be2f334 | 9e4a323d525aa98c4422bc76877ff1089d34a3dc | refs/heads/master | 2021-01-09T06:28:41.486357 | 2016-09-19T23:00:56 | 2016-09-19T23:00:56 | 68,653,693 | 0 | 0 | null | 2016-09-19T23:00:00 | 2016-08-19T20:18:13 | 2016-08-19T20:21:34 | null | [
{
"alpha_fraction": 0.6423633098602295,
"alphanum_fraction": 0.642991840839386,
"avg_line_length": 31.46938705444336,
"blob_id": "5d33da44efbd590b82b91270f12fd2e566c24815",
"content_id": "26165000760d820d86d28c13b88840cea677def5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1591,
"license_type": "permissive",
"max_line_length": 173,
"num_lines": 49,
"path": "/scanner.py",
"repo_name": "kevbotmckrier/TwilioLookupScanner",
"src_encoding": "UTF-8",
"text": "from twilio.rest.lookups import TwilioLookupsClient\nfrom urllib.parse import quote\nimport requests\n\n# Your Account Sid and Auth Token from twilio.com/user/account\naccount_sid = input(\"Account SID?\")\nauth_token = input(\"Auth Token?\")\naddon = input(\"Addon? (leave blank for none)\")\n\nclient = TwilioLookupsClient(account_sid, auth_token)\n\n# Load the CSV file\nCSVScanner = open(\"Input.csv\", \"r\")\n\nstack = []\n\nfor line in CSVScanner:\n if(line.startswith('+')):\n phoneNumber = line.rstrip()\n else:\n phoneNumber = str('+' + line).rstrip()\n #print(phoneNumber)\n try:\n if(addon):\n number = client.phone_numbers.get(quote(phoneNumber), include_carrier_info=True, addOns='whitepages_pro_caller_identity')\n else:\n number = requests.get('https://lookups.twilio.com/v1/PhoneNumbers/' + quote(phoneNumber) + '?Type=carrier&AddOns=' + addon, auth=(account_sid,auth_token)).json()\n \n except Exception as e:\n print(\"Error On Number: \" + phoneNumber)\n print(e)\n\n try:\n print(number['add_ons']['results'])\n processedNumber = phoneNumber +\"|\"+ number['carrier']['type'] +\"|\"+ number['carrier']['name'] + \"|\" + str(number['add_ons'])\n print(processedNumber)\n stack.append(processedNumber)\n except Exception as e:\n print(e)\n # print(\"Error on Processing Number: \" + phoneNumber)\n stack.append(\"Error on Processing Number: \" + phoneNumber)\n\n#With the stack finished write it to a new CSV\nf = open('Output.txt', 'w')\n\nfor item in stack:\n f.write(\"%s\\n\" % item)\n\nf.close()\n"
}
] | 1 |
wgqsjtu/codexp | https://github.com/wgqsjtu/codexp | 7370a6f236de2553a90c75ab5faeb10009836908 | 05624347900ae0a1f2a93f893013be7fc0f0456c | a4ab64296c6dc68a601b04129ff3a2272b311996 | refs/heads/master | 2023-01-03T14:08:53.756413 | 2020-10-24T13:42:19 | 2020-10-24T13:42:19 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4886516332626343,
"alphanum_fraction": 0.5067615509033203,
"avg_line_length": 29.689319610595703,
"blob_id": "bfe20a42b68ef3003c47403822b38c89ccf52f40",
"content_id": "0926d8203dbb6ace591a896544d7eb32f6fb6a24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12645,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 412,
"path": "/codexp.py",
"repo_name": "wgqsjtu/codexp",
"src_encoding": "UTF-8",
"text": "from urllib.parse import urlencode\nfrom urllib.request import Request, urlopen\nimport os\nimport re\nimport json\nimport argparse\nimport glob\nimport time\nfrom adapter import *\nfrom string import Formatter\n\n\nclass SafeFormatter(Formatter):\n def get_value(self, key, args, kwargs):\n if key not in kwargs:\n return \"{%s}\" % key\n else:\n return kwargs[key]\n\n\nform = SafeFormatter()\n\nbase_url = 'http://127.0.0.1:42024' # Set destination URL here\n\nconf_pro = {\n \"once\": {\n \"base\": \"/home/faymek/codexp\",\n \"inpath\": \"{base}/seq\",\n \"output\": \"{base}/result/{$inname}_{$modename}_{para}\"\n },\n \"iter\": [\n \"input | $mode | para\",\n \"{inpath}/*.yuv | QP | 27,32,37,42\"\n ],\n \"each\": {\n \"$inname\": \"os.path.basename(state['input']).split('.')[0]\",\n \"$modename\": \"state['$mode'].replace('$','')\",\n \"$mode\": {\n \"QP\": \"-q {para}\",\n \"RATE\": \"--RateControl=1 --TargetBitrate={para}000\",\n \"$QPIF\": \"modeQPIF(state)\"\n },\n \"$meta\": {\n \"InputBitDepth\": \"8\",\n \"InputChromaFormat\": \"420\",\n \"FrameRate\": \"30\",\n \"SourceWidth\": \"1920\",\n \"SourceHeight\": \"1080\",\n \"$FramesToBeEncoded\": \"str(calcAllFrames(state))\",\n \"$IntraPeriod\": \"'32' if meta['FrameRate'] == '30' else '64'\",\n \"Level\": \"3.1\"\n }\n },\n \"shell\": [\n \"x265 --preset fast\",\n \"--input {input} --fps 25 --input-res 3840x2160\",\n \"--output {output}.bin\",\n \"--psnr --ssim --csv {output}.csv --csv-log-level 2\",\n \" -f 250 {$mode}\"\n ]\n}\n\ndefault_sys1 = {\n \"$inname\": \"os.path.basename(state['input']).split('.')[0]\",\n \"$modename\": \"state['$mode'].replace('$','')\"\n}\n\n\ndef calcAllFrames(state):\n meta = state['meta'][state['input']]\n return readyuv420(state['input'],\n meta[\"InputBitDepth\"], meta[\"SourceWidth\"], meta[\"SourceHeight\"])\n\n\ndef modeQPIF(state):\n # QPIF 32.7 -> QP32 qpif0.3*nframes\n if not \"FramesToBeEncoded\" in state['meta'][state['input']]:\n print(\"In QPIF mode, no meta information find. Use meta.\")\n return \"\"\n nframes = eval(state['meta'][state['input']][\"FramesToBeEncoded\"])\n para = float(state['para'])\n qp = int(para)\n qpif = int((qp + 1 - para)*nframes)\n return \"--QP={} --QPIncrementFrame={}\".format(qp, qpif)\n\n\ndef post(addr, pf):\n request = Request(base_url+addr, urlencode(pf).encode())\n return urlopen(request).read().decode()\n\n\ndef get(addr):\n request = Request(base_url+addr)\n return urlopen(request).read().decode()\n\n\ndef loadconf(fn=None):\n if not fn:\n fn = getlatestjob()\n if not os.path.exists(fn):\n print(\"The Job doesn't exist. Use new.\")\n exit(0)\n with open(fn, \"r\") as f:\n conf = json.load(f)\n return conf\n\n\ndef saveconf(conf, fn=None):\n if not fn:\n fn = getlatestjob()\n with open(fn, \"w\") as f:\n json.dump(conf, f, indent=4)\n\n\ndef getabspath(s):\n return os.path.abspath(os.path.expanduser(s))\n\n\ndef readyuv420(filename, bitdepth, W, H):\n if bitdepth == '8':\n bytesPerPixel = 1\n elif bitdepth == '10':\n bytesPerPixel = 2\n pixelsPerFrame = int(H) * int(W) * 3 // 2\n bytesPerFrame = bytesPerPixel * pixelsPerFrame\n fp = open(filename, 'rb')\n fp.seek(0, 2)\n totalframe = fp.tell() // bytesPerFrame\n return str(totalframe)\n\n\ndef getlatestjob():\n jobs = sorted(glob.glob(\"job*.json\"))\n return jobs[-1] if jobs else \"\"\n\n\ndef readcfg(fn):\n meta = {}\n with open(fn, \"r\") as f:\n for line in f:\n k, v = line.replace(':', ' ').split()\n meta[k] = v\n return meta\n\n\ndef new(template=\"conf_win_x265\"):\n lastjob = getlatestjob().split('.')[0]\n idx = int(lastjob[3:]) + 1 if lastjob else 1 # get next job id\n curjob = \"job%03d.json\" % idx\n with open(curjob, \"w\") as f:\n json.dump(conf_pro, f, indent=4)\n print(\"[ok] %s newly created.\" % curjob)\n\n\ndef meta_fn():\n conf = loadconf()\n\n for file in conf['meta']:\n filename = os.path.basename(file)\n meta = conf[\"each\"][\"$meta\"].copy()\n items = filename[:-4].split(\"_\")[1:]\n for item in items:\n if re.match(r\"^[0-9]*x[0-9]*$\", item):\n meta[\"SourceWidth\"], meta[\"SourceHeight\"] = item.split(\"x\")\n elif re.match(r\"^[0-9]*fps$\", item):\n meta[\"FrameRate\"] = item.split(\"fps\")[0]\n elif re.match(r\"^[0-9]*bit\", item):\n meta[\"InputBitDepth\"] = item.split(\"bit\")[0]\n elif item in [\"444\", \"440\", \"422\", \"411\", \"420\", \"410\", \"311\"]:\n meta[\"InputChromaFormat\"] = item\n elif re.match(r\"^[0-9]*$\", item):\n meta[\"FrameRate\"] = item\n\n state = {'input': file, 'meta': {file: meta}} # using for eval context\n new_meta = {}\n for key, value in meta.items():\n if \"$\" in key:\n new_meta[key[1:]] = str(eval(value))\n else:\n new_meta[key] = value\n conf[\"meta\"][file] = new_meta\n\n if file.endswith('.yuv'):\n cfg = file.replace(\".yuv\", \".cfg\")\n with open(cfg, \"w\") as autocfg:\n for key, value in new_meta.items():\n autocfg.write('{0:30}: {1}\\n'.format(key, value))\n\n saveconf(conf)\n print(\"[meta+%3d] Auto parsing finished. Please check.\" %\n len(conf[\"meta\"]))\n\n\ndef start(force=False):\n conf = loadconf()\n\n key_sys0 = ['$mode', '$meta']\n # TODO: default sys key, peform simple func\n # key_sys1 = ['$inname', '$modename']\n\n # get all {$var} in key_exec, include key_iter\n key_exec = key_sys0\n key_once_exec = []\n key_once_str = []\n for key in conf[\"once\"].keys():\n if \"$\" in key:\n key_once_exec.append(key)\n else:\n key_once_str.append(key)\n key_exec.extend(key_once_exec)\n for key in conf[\"each\"].keys():\n if \"$\" in key:\n key_exec.append(key)\n\n it_sheet = []\n for v in conf[\"iter\"]:\n it_sheet.append(v.replace(' ', '').split('|'))\n key_iter = it_sheet[0]\n key_exec.extend(key_iter)\n\n state = {k: \"{%s}\" % k for k in key_exec} # keep the same after format\n state.update(conf[\"once\"])\n\n for key in key_once_exec:\n state[key] = eval(conf[\"once\"][key])\n\n for key in key_once_str:\n v = conf[\"once\"][key]\n t = v.format(**state)\n if '\\\\' in v or '/' in v:\n t = getabspath(t)\n os.makedirs(os.path.dirname(t), exist_ok=True)\n state[key] = t\n\n # get sheet(2D) -> table(3D)\n it_table = [] # 3D array\n for p1 in it_sheet[1:]:\n t1 = []\n for p2 in p1:\n t2 = []\n for p3 in p2.split(','):\n p3 = p3.format(**state)\n if '*' in p3:\n t2.extend(sorted(glob.glob(p3, recursive=True)))\n else:\n t2.append(p3)\n t1.append(t2)\n it_table.append(t1)\n\n # get table(3D) ->paras(2D), using eval trick\n # 1,2|3,4,5|6|7,8 -> 1367,1368,1467,1468,...\n paras = []\n for p in it_table:\n tuples = ','.join([\"t%d\" % t for t in range(len(p))])+','\n fors = ' '.join(['for t{0} in p[{0}]'.format(t)\n for t in range(len(p))])\n trick = \"[({}) {}]\".format(tuples, fors)\n paras.extend(eval(trick, {\"p\": p}))\n\n if len(paras) == 0:\n print(\"Maybe the wrong file glob.\")\n\n # get meta, get files list\n if 'meta' not in conf or len(conf['meta']) == 0:\n files = []\n for p in it_table:\n files.extend(p[0])\n conf['meta'] = {k: {} for k in list(set(files))}\n saveconf(conf)\n meta_fn() # from filename\n conf = loadconf()\n\n # get tasks iterately by using it_dict\n tasks = {}\n cmd = form.format(' '.join(conf[\"shell\"]), **state)\n print(cmd)\n compute = conf[\"each\"]\n for values in paras:\n context = {k: v for k, v in zip(key_iter, values)}\n state.update(context)\n meta = conf['meta'][state['input']]\n state.update(meta)\n # print(state)\n\n # compute {$each}\n for k, v in compute.items():\n if type(v) is str:\n if k.startswith('$'):\n state[k] = eval(v)\n else:\n state[k] = v.format(**state)\n\n # regxp cmd to get options\n cmd_tmp = cmd.format(**state)\n opt_cfgs = re.findall(r\"-c +([^ ]+.cfg)\", cmd_tmp)\n opt_frames = re.findall(r\"-f +(\\d+) +\", cmd_tmp)\n\n # get meta, guess -c **/*.cfg\n for cfg in opt_cfgs:\n if not os.path.exists(cfg):\n print(\"%s not found. You may use meta to parse filename.\" % cfg)\n return\n cfgname = os.path.basename(cfg).split('.')[0]\n if (cfgname.split('_')[0]) == (state['$inname'].split('_')[0]):\n state['meta'] = readcfg(cfg)\n conf['meta'][state['input']] = state['meta']\n\n # get nframes\n nframes = \"0\"\n if len(opt_frames) > 0:\n nframes = opt_frames[-1]\n else:\n nframes = conf['meta'][state['input']].get(\n 'FramesToBeEncoded', '0')\n\n # process sys0.mode\n if '$mode' in key_iter:\n key = state['$mode']\n value = compute['$mode'][key]\n if \"$\" in key:\n state['$mode'] = eval(value)\n else:\n state['$mode'] = value.format(**state)\n\n shell = cmd.format(**state)\n output = state[\"output\"].format(**state)\n tasks[output] = {\"status\": \"0/%s\" % nframes, \"shell\": shell}\n\n conf[\"tasks\"] = tasks\n saveconf(conf)\n print(\"[task+%3d] Tasks generated.\" % len(tasks))\n\n\ndef run(core=4):\n try:\n print(get(\"/id\"))\n fn = getlatestjob()\n pf = {'fpath': fn, 'core': core}\n print(post(\"/add\", pf))\n except:\n print(\"Server Not Running. Try python3 server.py\")\n\n\ndef show():\n history = loadconf(fn=\"history.json\")\n recent = sorted(history.keys(), reverse=True)\n tasks = history[recent[0]]\n count = {\"wait\": 0, \"excute\": 0, \"finish\": 0}\n print('\\n---Analyze recent tasks.---')\n print(\"EXP @\", recent[0])\n\n # read log\n HASLOG = False\n if HASLOG:\n fnfix = \"%s\"\n results = []\n sample = fnfix % next(iter(tasks))\n enctype = log_getEnctype(sample)\n\n for tkey, tvalue in tasks.items():\n status = \"wait\"\n cur, total = tvalue[\"status\"].split('/')\n fn = fnfix % tkey\n if os.path.exists(fn):\n status, cur, result = log_adapter(fn, enctype)\n if result:\n results.append(result)\n tvalue[\"status\"] = \"%3d/%3d\" % (int(cur), int(total))\n count[status] += 1\n print(\"[{}] {}\".format(tvalue[\"status\"], tkey.split(\"/\")[-1]))\n print('Total %d tasks, %d wait, %d excute, %d finish.' %\n (len(tasks), count[\"wait\"], count[\"excute\"], count[\"finish\"]))\n with open(\"result.csv\", \"w\") as f:\n f.write(\",\".join(LOG_KEYS[enctype])+\"\\n\")\n for result in results:\n f.write(','.join(result)+'\\n')\n print(\"result.csv generated.\")\n saveconf(history, fn=\"history.json\")\n else:\n fnfix = \"%s.png\"\n results = []\n\n for tkey, tvalue in tasks.items():\n status = \"wait\"\n fn = fnfix % tkey\n if os.path.exists(fn):\n status = \"finish\"\n else:\n print(fn)\n count[status] += 1\n print('Total %d tasks, %d wait, %d excute, %d finish.' %\n (len(tasks), count[\"wait\"], count[\"excute\"], count[\"finish\"]))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Media Encoding Experiment Manager. Copyright @ 2016-2020')\n parser.add_argument(\n \"verb\", choices=['start', 'new', 'meta', 'run', 'show'])\n parser.add_argument(\"--force\", action='store_true', default=False,\n help=\"new force overwrite experiments.json\")\n parser.add_argument(\"--core\", type=int, default=4,\n help=\"run with n concurrent process\")\n args = parser.parse_args()\n dict_func = {'new': new, 'start': start,\n 'meta': meta_fn, 'run': run, 'show': show}\n if args.verb == 'start':\n start(args.force)\n elif args.verb == 'run':\n run(args.core)\n else:\n dict_func[args.verb]()\n\n"
},
{
"alpha_fraction": 0.5211743116378784,
"alphanum_fraction": 0.5361680388450623,
"avg_line_length": 34.8649787902832,
"blob_id": "bae0386bdbb5ee9a7e7064bae4c6513d70e0c4bf",
"content_id": "6dc7e0171de77e8e5f14308028af243594cd2ecf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17474,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 474,
"path": "/cbatch.py",
"repo_name": "wgqsjtu/codexp",
"src_encoding": "UTF-8",
"text": "import os\r\nimport sys\r\nimport time\r\nimport re\r\nimport glob\r\nimport json\r\nimport argparse\r\nimport subprocess\r\nfrom multiprocessing import Pool\r\nfrom urllib.parse import urlencode\r\nfrom urllib.request import Request, urlopen\r\n\r\nTASKS = []\r\nHOST = {\r\n \"host.local\": {\r\n \"ip\": \"127.0.0.1\",\r\n \"vtm.base\": \"/home/faymek/VTM\"\r\n },\r\n \"host.enc\": {\r\n \"ip\": \"172.16.7.84\",\r\n \"vtm.base\": \"/home/enc/faymek/VTM\",\r\n \"hpm.base\": \"/home/enc/faymek/hpm-phase-2\"\r\n },\r\n \"host.4gpu\": {\r\n \"ip\": \"10.243.65.72\"\r\n }\r\n}\r\n\r\nTRANSLATE = {}\r\nLOG_KEYS = {\r\n \"VTM\": ['Frames', '|', 'Bitrate', 'Y-PSNR', 'U-PSNR', 'V-PSNR', 'YUV-PSNR', 'Time'],\r\n \"HM\": ['Frames', '|', 'Bitrate', 'Y-PSNR', 'U-PSNR', 'V-PSNR', 'YUV-PSNR', 'Time'],\r\n \"HPM\": ['Y-PSNR', 'U-PSNR', 'V-PSNR', 'Y-MSSSIM', 'Bits', 'Bitrate', 'Frames', 'Time']\r\n}\r\n\r\nMETA = {\r\n \"InputBitDepth\": \"8\",\r\n \"InputChromaFormat\": \"\",\r\n \"FrameRate\": \"\",\r\n \"SourceWidth\": \"\",\r\n \"SourceHeight\": \"\",\r\n \"AllFrames\": \"\",\r\n \"PixelFormat\": \"\"\r\n}\r\n\r\n\r\nclass BaseConf:\r\n def __init__(self):\r\n pass\r\n\r\n\r\ndef post(addr, pf):\r\n request = Request(Conf.baseurl+addr, urlencode(pf).encode())\r\n return urlopen(request).read().decode()\r\n\r\n\r\ndef get(addr):\r\n request = Request(Conf.baseurl+addr)\r\n return urlopen(request).read().decode()\r\n\r\n\r\n# return status curframe results\r\ndef log_vtm(fn):\r\n with open(fn, \"r\") as f:\r\n lines = list(f.readlines())\r\n nline = len(lines)\r\n if nline < 10:\r\n return \"wait\", 0, None\r\n elif lines[-2] and lines[-2].split()[0] == \"finished\":\r\n values = lines[-4].split()\r\n values.append(lines[-1].split()[2]) # Total Time\r\n return \"finish\", nline-15, values\r\n else:\r\n return \"excute\", nline-10, None\r\n\r\n\r\ndef log_hm(fn):\r\n with open(fn, \"r\") as f:\r\n lines = list(f.readlines())\r\n nline = len(lines)\r\n if nline < 68:\r\n return \"wait\", 0, None\r\n elif lines[-1] and lines[-1].split()[-1] == \"sec.\":\r\n values = lines[-21].split()\r\n values.append(lines[-1].split()[2]) # Total Time\r\n return \"finish\", nline-92, values\r\n else:\r\n return \"excute\", nline-68, None\r\n\r\n\r\ndef log_hpm(fn):\r\n with open(fn, \"r\") as f:\r\n lines = list(f.readlines())\r\n nline = len(lines)\r\n if lines[0].startswith(\"Note\"):\r\n nline -= 1\r\n if nline < 48:\r\n return \"wait\", 0, None\r\n elif lines[-2] and lines[-2].split()[-1] == \"frames/sec\":\r\n cl = lines[-12:-6] + lines[-5:-4]\r\n values = [v.split()[-1] for v in cl]\r\n values.append(lines[-4].split()[-2]) # Total Time\r\n return \"finish\", nline-62, values\r\n else:\r\n return \"excute\", nline-48, None\r\n\r\n\r\ndef log_getEnctype(fn):\r\n enctype = \"\"\r\n with open(fn, \"r\") as f:\r\n lines = list(f.readlines())\r\n nline = len(lines)\r\n if nline > 1:\r\n if lines[1].startswith(\"VVCSoftware: VTM Encoder Version\"):\r\n enctype = \"VTM\"\r\n elif lines[1].startswith(\"HM software: Encoder Version\"):\r\n enctype = \"HM\"\r\n elif lines[1].startswith(\"HPM version\"):\r\n enctype = \"HPM\"\r\n return enctype\r\n\r\n\r\ndef log_adapter(fn, enctype=\"\"):\r\n if not enctype: # interpret\r\n enctype = log_getEnctype(fn)\r\n dict_func = {\r\n \"VTM\": log_vtm,\r\n \"HM\": log_hm,\r\n \"HPM\": log_hpm\r\n }\r\n return dict_func[enctype](fn)\r\n\r\n\r\ndef readyuv420(filename, bitdepth, W, H):\r\n if bitdepth == '8':\r\n bytesPerPixel = 1\r\n elif bitdepth == '10':\r\n bytesPerPixel = 2\r\n pixelsPerFrame = int(H) * int(W) * 3 // 2\r\n bytesPerFrame = bytesPerPixel * pixelsPerFrame\r\n fp = open(filename, 'rb')\r\n fp.seek(0, 2)\r\n totalframe = fp.tell() // bytesPerFrame\r\n return str(totalframe)\r\n\r\n\r\ndef meta_fn(fn, calcFrames=False):\r\n meta = META.copy()\r\n items = fn[:-4].split(\"_\")[1:]\r\n for item in items:\r\n if re.match(r\"^[0-9]*x[0-9]*$\", item):\r\n meta[\"SourceWidth\"], meta[\"SourceHeight\"] = item.split(\"x\")\r\n elif re.match(r\"^[0-9]*fps$\", item):\r\n meta[\"FrameRate\"] = item.split(\"fps\")[0]\r\n elif re.match(r\"^[0-9]*bit\", item):\r\n meta[\"InputBitDepth\"] = item.split(\"bit\")[0]\r\n elif item in [\"444\", \"440\", \"422\", \"411\", \"420\", \"410\", \"311\"]:\r\n meta[\"InputChromaFormat\"] = item\r\n elif re.match(r\"^[0-9]*$\", item):\r\n meta[\"FrameRate\"] = item\r\n if calcFrames:\r\n meta[\"AllFrames\"] = readyuv420(\r\n fn, meta[\"InputBitDepth\"], meta[\"SourceWidth\"], meta[\"SourceHeight\"])\r\n if not meta[\"InputChromaFormat\"]:\r\n meta[\"InputChromaFormat\"] = Conf.cf\r\n meta[\"PixelFormat\"] = \"yuv{}p\".format(meta[\"InputChromaFormat\"])\r\n if meta[\"InputBitDepth\"] == \"10\":\r\n meta[\"PixelFormat\"] = meta[\"PixelFormat\"]+\"10le\"\r\n return meta\r\n\r\n\r\ndef getabspath(s):\r\n return os.path.abspath(os.path.expanduser(s))\r\n\r\n\r\ndef yuvopt(fn):\r\n opt = \"-i %s\" % fn\r\n if fn.endswith(\".yuv\"):\r\n yuvinfo = \" -s {SourceWidth}x{SourceHeight} -pix_fmt {PixelFormat} \"\r\n opt = yuvinfo.format(**meta_fn(fn)) + opt\r\n return opt\r\n\r\n\r\ndef metric(enc, ref, mode='psnr', onlykey=False):\r\n shellmap = {\r\n \"psnr\": \"ffmpeg -v info %s %s -filter_complex psnr -f null -y - 2>&1\",\r\n \"ssim\": \"ffmpeg -v info %s %s -filter_complex ssim -f null -y - 2>&1\",\r\n \"vmaf\": \"ffmpeg -v info %s %s -filter_complex libvmaf -f null -y - 2>&1\",\r\n }\r\n cmd = shellmap[mode] % (yuvopt(enc), yuvopt(ref))\r\n line = list(os.popen(cmd).readlines())[-1]\r\n line = line.replace(\"score: \", \"vmaf:\")\r\n items = line[:-1].split(' ')[4:]\r\n if onlykey:\r\n return [item.split(':')[0] for item in items if ':' in item]\r\n data = [item.split(':')[1] for item in items if ':' in item]\r\n info = \"\"\r\n if \"PSNR\" in line:\r\n info = ' '.join(items[:3])\r\n elif \"SSIM\" in line:\r\n info = ' '.join(items[0:5:2])\r\n elif \"VMAF\" in line:\r\n info = items[-1]\r\n return data, info\r\n\r\n\r\ndef measure(inpath, outpath, mode='psnr'):\r\n results = []\r\n inglob = inpath + \"/*\"\r\n for fin in sorted(glob.glob(inglob)):\r\n inname = '_'.join(os.path.basename(fin).split('_')[:-1])\r\n outglob = \"{}/{}*\".format(outpath, inname)\r\n for fout in sorted(glob.glob(outglob)):\r\n outname = os.path.basename(fout)\r\n data, info = metric(fin, fout, mode)\r\n results.append([outname]+data)\r\n print(\"%-48s %s\" % (outname, info))\r\n with open(\"measure.csv\", \"w\") as f:\r\n if results:\r\n keys = metric(fin, fout, mode, onlykey=True)\r\n f.write('fn,'+','.join(keys)+'\\n')\r\n f.writelines([','.join(item)+'\\n' for item in results])\r\n else:\r\n print(\"No matches.\")\r\n\r\n\r\ndef yuv1stframe(inpath, outpath):\r\n shell = \"ffmpeg -y -f rawvideo -video_size {SourceWidth}x{SourceHeight} -pixel_format {PixelFormat} -i {fin} -vframes 1 {fout}.yuv\"\r\n inglob = \"{inpath}/*.yuv\".format(inpath=inpath)\r\n for fin in glob.glob(inglob):\r\n inname = os.path.basename(fin).split('.')[0]\r\n fout = \"{outpath}/{inname}\".format(outpath=outpath, inname=inname)\r\n meta = meta_fn(fin)\r\n cmd = shell.format(fin=fin, **meta, fout=fout)\r\n TASKS.append(cmd)\r\n\r\n\r\ndef convert(inpath, outpath, mode=\"png2yuv\"):\r\n fmtmap = {\r\n \"png2yuv\": \"ffmpeg -y -i {fin} -pix_fmt {PixelFormat} {fout}.yuv\",\r\n \"png2rgb\": \"ffmpeg -y -i {fin} -pix_fmt rgb24 {fout}.rgb\",\r\n \"yuv2png\": \"ffmpeg -y -f rawvideo -pixel_format {PixelFormat} -video_size {SourceWidth}x{SourceHeight} -i {fin} -vframes 1 {fout}.png\",\r\n \"rgb2png\": \"ffmpeg -y -f rawvideo -pixel_format rgb24 -video_size {SourceWidth}x{SourceHeight} -i {fin} -vframes 1 {fout}.png\"\r\n }\r\n shell = fmtmap[mode]\r\n fmt1, fmt2 = mode.split('2')\r\n inglob = \"{}/*.{}\".format(inpath, fmt1)\r\n for fin in glob.glob(inglob):\r\n inname = os.path.basename(fin).split('.')[0]\r\n fout = \"{}/{}\".format(outpath, inname)\r\n meta = meta_fn(fin)\r\n cmd = shell.format(fin=fin, **meta, fout=fout)\r\n TASKS.append(cmd)\r\n\r\n\r\ndef hpmenc(inpath, outpath, qplist=[]):\r\n base = Conf.host[\"hpm.base\"]\r\n shell = ' '.join([\r\n \"{base}/bin/app_encoder\",\r\n \"--config {base}/cfg/encode_AI.cfg\",\r\n \"-i {fin} -w {SourceWidth} -h {SourceHeight} -z 50 -f 1 -d {InputBitDepth} -q {qp} \",\r\n \"-o {fout}.bin -r {fout}.yuv > {fout}.log\"\r\n ])\r\n inglob = \"{inpath}/*.yuv\".format(inpath=inpath)\r\n for fin in glob.glob(inglob):\r\n inname = os.path.basename(fin).split('.')[0]\r\n meta = meta_fn(fin)\r\n for qp in qplist:\r\n fout = \"{}/{}_{:02d}\".format(outpath, inname, qp)\r\n cmd = shell.format(base=base, fin=fin, **meta, fout=fout, qp=qp)\r\n TASKS.append(cmd)\r\n\r\n\r\ndef vtmenc(inpath, outpath, qplist=[56]):\r\n base = Conf.host[\"vtm.base\"]\r\n shell = ' '.join([\r\n \"{base}/bin/EncoderAppStatic\",\r\n \"-c {base}/cfg/encoder_intra_vtm.cfg\",\r\n \"-i {fin} -wdt {SourceWidth} -hgt {SourceHeight} -fr 30 -f 1 -q {qp} --InputChromaFormat={InputChromaFormat}\",\r\n \"--InputBitDepth={InputBitDepth} --OutputBitDepth={InputBitDepth} --ConformanceMode \",\r\n \"-b {fout}.bin -o {fout}.yuv > {fout}.log\"\r\n ])\r\n inglob = \"{inpath}/*.yuv\".format(inpath=inpath)\r\n for fin in glob.glob(inglob):\r\n inname = os.path.basename(fin).split('.')[0]\r\n meta = meta_fn(fin)\r\n for qp in qplist:\r\n fout = \"{}/{}_{:02d}\".format(outpath, inname, qp)\r\n cmd = shell.format(base=base, fin=fin, **meta, fout=fout, qp=qp)\r\n TASKS.append(cmd)\r\n\r\n\r\ndef vtmencrgb(inpath, outpath, qplist=[56]):\r\n base = Conf.host[\"vtm.base\"]\r\n shell = ' '.join([\r\n \"{base}/bin/EncoderAppStatic\",\r\n \"-c {base}/cfg/encoder_intra_vtm.cfg\",\r\n \"-i {fin} -wdt {SourceWidth} -hgt {SourceHeight} -fr 30 -f 1 -q {qp} --InputChromaFormat=444\",\r\n \"--InputBitDepth={InputBitDepth} --OutputBitDepth={InputBitDepth} --ConformanceMode\",\r\n \"--InputColourSpaceConvert=RGBtoGBR --SNRInternalColourSpace=1 --OutputInternalColourSpace=0\", # improve ~0.05dB\r\n \"-b {fout}.bin -o {fout}.rgb > {fout}.log\"\r\n ])\r\n inglob = \"{inpath}/*.rgb\".format(inpath=inpath)\r\n for fin in glob.glob(inglob):\r\n inname = os.path.basename(fin).split('.')[0]\r\n meta = meta_fn(fin)\r\n for qp in qplist:\r\n fout = \"{}/{}_{:02d}\".format(outpath, inname, qp)\r\n cmd = shell.format(base=base, fin=fin, **meta, fout=fout, qp=qp)\r\n TASKS.append(cmd)\r\n\r\n\r\ndef hpmcrop(inpath, outpath):\r\n shell = ' '.join([\r\n \"ffmpeg -y -f rawvideo -pixel_format yuv420p10le -video_size {TrueWidth}x{TrueHeight}\",\r\n \"-i {fin} -filter:v 'crop={SourceWidth}:{SourceHeight}:0:0' -vframes 1\",\r\n \"-f rawvideo -pix_fmt {PixelFormat} -s {SourceWidth}x{SourceHeight} {fout}.yuv\"\r\n ])\r\n inglob = \"{inpath}/*.yuv\".format(inpath=inpath)\r\n for fin in glob.glob(inglob):\r\n inname = os.path.basename(fin).split('.')[0]\r\n fout = \"{outpath}/{inname}\".format(outpath=outpath, inname=inname)\r\n meta = meta_fn(fin)\r\n meta[\"TrueWidth\"] = -(int(meta['SourceWidth'])//-8)*8\r\n meta[\"TrueHeight\"] = -(int(meta['SourceHeight'])//-8)*8\r\n cmd = shell.format(fin=fin, **meta, fout=fout)\r\n TASKS.append(cmd)\r\n\r\n\r\ndef show(inpath, outpath):\r\n print('--- Analyze encode logs. ---')\r\n tasks = sorted(glob.glob(inpath+\"/*.log\"))\r\n count = {\"wait\": 0, \"excute\": 0, \"finish\": 0}\r\n enctype = log_getEnctype(tasks[0])\r\n results = []\r\n for fn in tasks:\r\n inname = os.path.basename(fn).split('.')[0]\r\n status, cur, result = log_adapter(fn, enctype)\r\n if result:\r\n results.append([inname]+result)\r\n count[status] += 1\r\n if status != \"finish\":\r\n print(\"[{}] {}.log\".format(status, inname))\r\n print('Total %d tasks, %d wait, %d excute, %d finish.' %\r\n (len(tasks), count[\"wait\"], count[\"excute\"], count[\"finish\"]))\r\n with open(\"enclog.csv\", \"w\") as f:\r\n f.write('fn,'+','.join(LOG_KEYS[enctype])+\"\\n\")\r\n for result in results:\r\n f.write(','.join(result)+'\\n')\r\n print(\"enclog.csv generated.\")\r\n\r\n\r\ndef netop(inpath, outpath, op):\r\n cmds = [\r\n \"rm /home/medialab/faymek/iir/datasets/cli/*\",\r\n \"mv %s/* /home/medialab/faymek/iir/datasets/cli/\" % inpath,\r\n \"/home/medialab/miniconda3/envs/iir/bin/python /home/medialab/faymek/iir/codes/test_%s.py -opt ~/faymek/iir/codes/options/test/%s.yml\" % (\r\n op[-1], op),\r\n \"mv /home/medialab/faymek/iir/results/test/%s/* %s/\" % (op, outpath)\r\n ]\r\n TASKS.extend(cmds)\r\n\r\n\r\ndef call_script(script):\r\n desc = script.split('/')[-1]\r\n stamp = time.strftime(\"%m-%d %H:%M\", time.localtime())\r\n print(\"- [%s] start :\" % stamp, desc)\r\n re = subprocess.run(script, shell=True, capture_output=True)\r\n # print(re.stderr)\r\n stamp = time.strftime(\"%m-%d %H:%M\", time.localtime())\r\n print(\"- [%s] finish:\" % stamp, desc)\r\n\r\n\r\nif __name__ == '__main__':\r\n funcMap = {'yuv1stframe': yuv1stframe, 'netop': netop,\r\n 'hpmenc': hpmenc, 'hpmcrop': hpmcrop,\r\n 'vtmenc': vtmenc, 'vtmencrgb': vtmencrgb, 'show': show}\r\n\r\n parser = argparse.ArgumentParser(\r\n description='Media Encoding Batch Utils. Copyright @ 2016-2020')\r\n parser.add_argument(\"verb\")\r\n parser.add_argument(\"inpath\")\r\n parser.add_argument(\"outpath\", default=\"./\")\r\n parser.add_argument(\"--host\", type=str, default=\"off\",\r\n help=\"run in which machine\")\r\n parser.add_argument(\"--core\", type=int, default=4,\r\n help=\"run with n concurrent process\")\r\n parser.add_argument(\"--wait\", type=int, default=5,\r\n help=\"check for every n seconds\")\r\n parser.add_argument(\"--qps\", type=str, default=\"56,\",\r\n help=\"encode qp list\")\r\n parser.add_argument(\"--op\", type=str, default=\"x2d\", choices=[\"x2d\", \"x2u\", \"x4d\", \"x4u\"],\r\n help=\"network operation\")\r\n parser.add_argument(\"--cf\", type=str, default=\"420\", choices=[\"420\", \"422\", \"444\"],\r\n help=\"chroma format\")\r\n\r\n args = parser.parse_args()\r\n args.inpath = getabspath(args.inpath)\r\n args.outpath = getabspath(args.outpath)\r\n args.qps = eval(args.qps)\r\n Conf = BaseConf()\r\n Conf.cf = args.cf\r\n if args.host == \"off\":\r\n Conf.host = HOST[\"host.local\"]\r\n else:\r\n Conf.host = HOST[\"host.\"+args.host]\r\n\r\n os.makedirs(args.outpath, exist_ok=True)\r\n if '2' in args.verb:\r\n convert(args.inpath, args.outpath, args.verb)\r\n elif args.verb in ['psnr', 'ssim', 'vmaf']:\r\n measure(args.inpath, args.outpath, args.verb)\r\n sys.exit()\r\n elif args.verb in ['show']:\r\n funcMap[args.verb](args.inpath, args.outpath)\r\n sys.exit()\r\n elif args.verb in ['hpmenc', 'vtmenc', 'vtmencrgb']:\r\n funcMap[args.verb](args.inpath, args.outpath, args.qps)\r\n elif args.verb == 'netop':\r\n netop(args.inpath, args.outpath, args.op)\r\n else:\r\n funcMap[args.verb](args.inpath, args.outpath)\r\n with open(\"tasks.json\", \"w\") as f:\r\n json.dump(TASKS, f, indent=4)\r\n\r\n if args.host == \"off\": # run instant, without server\r\n print('Excute the %d shell script with %d process.\\n' %\r\n (len(TASKS), args.core))\r\n RunPool = Pool(args.core)\r\n RunPool.map_async(call_script, TASKS)\r\n RunPool.close()\r\n RunPool.join()\r\n\r\n else: # server mode, check server\r\n Conf.baseurl = 'http://{}:42024'.format(Conf.host[\"ip\"])\r\n try:\r\n print(\"Host %s : \" % args.host+get(\"/id\"))\r\n except:\r\n print(\"Host %s : Server Not Running!\" % args.host)\r\n sys.exit()\r\n\r\n remdir = get(\"/path\")\r\n key = remdir.split('/')[-1]\r\n print(\"Job key: %s\" % key)\r\n remin = remdir + \"/inpath\"\r\n remout = remdir + \"/outpath\"\r\n\r\n if args.host != \"local\":\r\n for i in range(len(TASKS)):\r\n TASKS[i] = TASKS[i].replace(\r\n args.inpath, remin).replace(args.outpath, remout)\r\n with open(\"tasks.json\", \"w\") as f:\r\n json.dump(TASKS, f, indent=4)\r\n print(\"\\n--- SCP uploading ---\\n\")\r\n os.system(\"scp tasks.json {}:{}/\".format(args.host, remdir))\r\n os.system(\"scp -r {}/* {}:{}/\".format(args.inpath, args.host, remin))\r\n else:\r\n os.system(\"cp tasks.json {}/\".format(remdir))\r\n\r\n pf = {'fpath': remdir+\"/tasks.json\", 'core': args.core, 'key': key}\r\n print(post(\"/add\", pf))\r\n while True:\r\n left = get(\"/busy?\"+key)\r\n stamp = time.strftime(\"%m-%d %H:%M\", time.localtime())\r\n print(\"- [%s]: %s jobs left\" % (stamp, left))\r\n if left == \"0\":\r\n break\r\n time.sleep(args.wait)\r\n\r\n if args.host != \"local\":\r\n print(\"\\n--- SCP downloading ---\\n\")\r\n os.system(\"scp -r {}:{}/* {}\".format(args.host, remout, args.outpath))\r\n\r\n print(\"--- Job done. ---\")\r\n"
},
{
"alpha_fraction": 0.6401383876800537,
"alphanum_fraction": 0.6830856800079346,
"avg_line_length": 22.511960983276367,
"blob_id": "c194bb04c9c6b0bc3b7779167a8406cfe45772f3",
"content_id": "3c8c10a4112e163296729d2d36662bfa1cf99b70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7641,
"license_type": "no_license",
"max_line_length": 174,
"num_lines": 209,
"path": "/README.md",
"repo_name": "wgqsjtu/codexp",
"src_encoding": "UTF-8",
"text": "# codexp编码实验管理工具\n\n## 批处理版本 cbatch.py\n\n```\npython cbatch.py verb inpath outpath --opt options\n```\n\n对inpath目录中的每个合适的文件,执行verb,输出到outpath目录,有时需手动修改代码。\n\n针对YUV文件,会自动从文件名推断尺寸、位深等信息,如Campfire_1920x1080_10bit_420.yuv。\n\n运行模式包括:本地快速运行,本地服务器模式,远程服务器模式。服务器模式下,可以一直提交新的任务(bug)。\n\n快速任务verb包括:\n- 格式转换类,使用ffmpeg进行格式转换\n - png2yuv, yuv2png, png2rgb, rgb2png\n- 指标测量类,使用ffmpeg进行指标测量,从输入文件名匹配输出文件名,并汇总输出measure.csv到当前目录\n - psnr, ssim, vmaf\n- vtmenc:使用 VVC VTM 编码器进行编码,需配置路径和命令行\n- hpmenc:使用 AVS HPM 编码器进行编码,需配置路径和命令行\n- hpmcrop:使用ffmpeg切去HPM编码后的自动padding\n- show:分析输入目录的编码器log,输出enclog.csv到当前目录\n- run: 检查并提交用户给定的任务列表\n\n\n选项--opt包括:\n\n- --cf:强制使用指定的色度采样格式,如 420|422|444\n- --qps:编码类应用使用的QP列表,应为python中的列表或元组,如 27, | 27,32 | \"range(27,43,5)\" \n- --core:指定同时执行任务的多进程数量,默认 4,服务器上需另外设置\n- --host:指定任务在哪台机器运行,默认off:本机快速运行,local:本机服务器模式,其他自定义服务器,需配置iptables和ssh无密码连接,并在远端运行python server.py\n- --wait:服务器模式下,提交任务后定时检查运行情况的时间间隔秒数\n\n使用注意:\n\n- 无需安装任何python包\n- 运行前请先修改 host.local 中的编码器路径信息\n- 一般需使用绝对路径,可以软链接到你的实验目录,如`ln ~/codexp/cbatch.py ~/test/cbatch.py`,这样即可在实验目录使用相对路径。\n- 目前server仅支持Linux发行版,使用server模式需先在代码里配置多线程数量,如`RunPool(32)`\n\n\n\n## 开发计划\n\n[todo] Server Enhance, const overload for time measurement\n[todo] Data processing\n[todo] Batch Pipeline\n[201023] Dir-level batch operation\n[200919] Adapter to diffenent codec\n[200822] Grammer newly designed\n\n### features\n- show pid\n- gallery and job template\n- local server mode & remote mode\n- Sample. run several shells to check.\n- RD-plot\n\n### Server\n- server linux ?windows\n- client linux ?windows\n- file transfer\n - client2server \n - master2slave scp\n- \n\n## 复杂配置版本 codexp.py\n\n管理任务,对每个Job起一个标识符,排序检索已完成的任务及其结果。清理运行的结果。\n\n分为client和server,先在本地写好json/toml配置文件,之后运行:\n\n- new: 创建实验配置`jobxxx.json`\n- start:根据配置,补充生成tasks,获得yuv信息,编码帧数\n- meta: 推断输入文件的基本信息,主要用来维护yuv\n- run:将实验配置提交到服务器\n- show:查看运行情况,分析运行结果,生成表格\n- clean:清除某次任务\n\n\n### new\n\n生成序号自增的配置文件,例如`job001.json`,用户编辑文件,之后的命令将自动使用最新配置文件。gallery中提供了系列常用配置作为模板,也可以将自定义配置移入其中。new命令会自动匹配对应的文件,生成新的job,默认模板为`conf_pro.json`。\n\n```shell\npython codexp.py new\npython codexp.py new HM\n```\n\n配置文件的语法进行了重新设计,借鉴前端Vue模板写法,将模板量分为once, iter, each三类,按照这个顺序进行计算,填充生成shell指令。以$开头的键,其值可以是单行的python语句。\n\n为方便使用,系统提供了其他复杂功能:`$mode`中可以自定义编码实验模式,如QP/RATE/QPIF;`$meta`中指定了yuv文件的缺省信息,系统也会从.cfg或者yuv文件名中获取信息。\n\n模板示例`linux-HMseq.json`:\n\n```json\n{\n \"once\": {\n \"exe_name\": \"CUBayeP\",\n \"base\": \"~/HM\",\n \"inpath\": \"/home/medialab/workspace/HDD/HMSeq\",\n \"$timestamp\": \"time.strftime('%y_%m_%d_%H_%M_%S', time.localtime(time.time()))\",\n \"LocBinfiles\": \"{base}/result/{$timestamp}_{exe_name}\",\n \"output\": \"{$inname}_{para}\"\n },\n \"iter\": [\n \"input | mode | para\",\n \"{inpath}/**/*.yuv | QP | 22,27,32,37\"\n ],\n \"each\": {\n \"$inname\": \"os.path.basename(state['input']).split('.')[0]\",\n \"$cfgname\": \"os.path.basename(state['input']).split('.')[0].split('_')[0]\"\n },\n \"shell\": [\n \"{base}/exe/{exe_name} -c {base}/cfg/encoder_intra_main.cfg\",\n \"-c {base}/cfg/per-sequence/{$cfgname}.cfg\",\n \"--InputFile={input} --BitstreamFile={LocBinfiles}/yuvbin/{output}_{exe_name}.hevc\",\n \"--QP={para}\",\n \"> {LocBinfiles}/logs/{output}.log\",\n \" -f 10\"\n ]\n}\n```\n\n### start\n\n```shell\npython codexp.py start test.json\n```\n\n检查`jobxxx.json`的语法,补充生成tasks,获得yuv的元信息,编码帧数等信息。\n\n```json\n\"meta\": {\n \"055.yuv\": {\n \"SourceWidth\": \"1920\",\n \"SourceHeight\": \"1080\",\n \"FramesToBeEncoded\": \"150\",\n }\n},\n\"tasks\": {\n \"052_QP_32.log\": {\n \"status\": \"0/150\",\n \"shell\": \"EncoderAppStatic -c vtm.cfg -c 052.cfg > 052_QP_32.log \"\n }\n}\n```\n\n语法解析过程:\n\nkey_sys0为保留功能键,如`$mode`,`$meta`,实现较为复杂的功能。(未实现)key_sys1为默认功能键,如`$inname`获得输入文件`/seq/abc.yuv`的名称`abc`,这类含义是默认定义的,但是也可以被用户重写。需要检测值中所有涉及的键,现有模式是运行所有$键。state是状态字典,不断更新来填充字符串中的字段。\n\n新:引入safe_subsitude。\nTODO:给出未解析的字段\n\n1. 占位:key_sys0, key_sys1, key_iter, key_each; state: 占位 + once \n2. 执行{$once}, 更新state, 填充{once}, 更新state, \n3. 获取iter_paras\n4. 处理sys0\n5. 参数迭代key_iter,计算{$each}, key_sys1, 处理sys0, 填充\n\n尝试获得meta信息\n- 必)从iter.input获取文件列表\n- 选)推断shell中的-c **/*.cfg选项\n- 选)使用meta方法推断文件名,默认{$meta}\n\n尝试获得nframes编码帧数信息\n- 必)初始化为0\n- 选)从{meta}中获得\n- 选)推断shell中的-f n选项\n\n### meta\n\n从文件名推断输入文件的基本信息。\n\n### run\n\n本地将`jobxxx.json`提交到服务器,服务器端会检查tasks中的任务status,然后加入运行队列。参数有:\n\n- `--core 4` :同时运行的任务数,初始设置为4。\n- `--overload const`:负载恒定,队列不满时会填充假任务,直到所有指定任务运行结束\n- `--retry 5`:自动运行失败的任务,最大5次。\n- `--kill 1053`:结束某任务。\n\n\n### show\n\n检查task中的任务status,给出运行情况。如果任务全为success,则统计得出结果.csv。管理历史运行数据。参数有:\n\n- `--type hm|vtm|hpm|x265|uavs3e`: 根据编码器类型解析log,或自定义数据解析方法。\n- `--sort name|time`:按时间或文件名顺序排列结果。\n- `--grep string`:检索包含string的结果。\n\n```\ntitle: exp 01\ndate: 2020-03-09 10:00:00 -> 2020-03-10 08:30:17\npid\t\tstatus\t\tframe\t\tlog\n1033\tsuccess\t\t[300/300]\tCampfire_QP27\n1034\texcuting\t[75/300]\tCampfire_QP32\n1035\twait\t\t[0/300]\t\tCampfire_QP37\n1036\tfail\t\t[158/300]\tCampfire_QP42\n------\n1 success, 1 excuting, 1 wait, 1 fail.\n```\n\n### clean\n\n清除某一次任务,传入标识符。"
},
{
"alpha_fraction": 0.4887574017047882,
"alphanum_fraction": 0.5088757276535034,
"avg_line_length": 33.25675582885742,
"blob_id": "66a1703324fc14a0efc1eec647d92632490e42d0",
"content_id": "d6d6bb608691c7897be8680931a081d4c3a55378",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2535,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 74,
"path": "/adapter.py",
"repo_name": "wgqsjtu/codexp",
"src_encoding": "UTF-8",
"text": "TRANSLATE = {}\nLOG_KEYS = {\n \"VTM\": ['Total Frames', '|', 'Bitrate', 'Y-PSNR', 'U-PSNR', 'V-PSNR', 'YUV-PSNR', 'Total Time'],\n \"HM\": ['Total Frames', '|', 'Bitrate', 'Y-PSNR', 'U-PSNR', 'V-PSNR', 'YUV-PSNR', 'Total Time'],\n \"HPM\": ['PSNR Y(dB)', 'PSNR U(dB)', 'PSNR V(dB)', 'MsSSIM_Y', 'Total bits(bits)', 'bitrate(kbps)', 'Encoded frame count', 'Total encoding time']\n}\n\n# return status curframe results\ndef log_vtm(fn):\n with open(fn, \"r\") as f:\n lines = list(f.readlines())\n nline = len(lines)\n if nline < 10:\n return \"wait\", 0, None\n elif lines[-2] and lines[-2].split()[0] == \"finished\":\n values = lines[-4].split()\n values.append(lines[-1].split()[2]) # Total Time\n return \"finish\", nline-15, values\n else:\n return \"excute\", nline-10, None\n\n\ndef log_hm(fn):\n with open(fn, \"r\") as f:\n lines = list(f.readlines())\n nline = len(lines)\n if nline < 68:\n return \"wait\", 0, None\n elif lines[-1] and lines[-1].split()[-1] == \"sec.\":\n values = lines[-21].split()\n values.append(lines[-1].split()[2]) # Total Time\n return \"finish\", nline-92, values\n else:\n return \"excute\", nline-68, None\n\ndef log_hpm(fn):\n with open(fn, \"r\") as f:\n lines = list(f.readlines())\n nline = len(lines)\n if lines[0].startswith(\"Note\"):\n nline -= 1\n if nline < 48:\n return \"wait\", 0, None\n elif lines[-2] and lines[-2].split()[-1] == \"frames/sec\":\n cl = lines[-12:-6] + lines[-5:-4]\n values = [v.split()[-1] for v in cl]\n values.append(lines[-4].split()[-2]) # Total Time\n return \"finish\", nline-62, values\n else:\n return \"excute\", nline-48, None\n\ndef log_getEnctype(fn):\n enctype = \"\"\n with open(fn, \"r\") as f:\n lines = list(f.readlines())\n nline = len(lines)\n if nline>1:\n if lines[1].startswith(\"VVCSoftware: VTM Encoder Version\"):\n enctype = \"VTM\"\n elif lines[1].startswith(\"HM software: Encoder Version\"):\n enctype = \"HM\"\n elif lines[1].startswith(\"HPM version\"):\n enctype = \"HPM\"\n return enctype\n\ndef log_adapter(fn, enctype=\"\"):\n if not enctype: # interpret\n enctype = log_getEnctype(fn)\n dict_func = {\n \"VTM\": log_vtm,\n \"HM\": log_hm,\n \"HPM\": log_hpm\n }\n return dict_func[enctype](fn)\n"
}
] | 4 |
AuthEceSoftEng/emb-ntua-workshop | https://github.com/AuthEceSoftEng/emb-ntua-workshop | c67ecc54f4353f3607b7834bcb91654ba8933bcf | 41e03e26db38caf3d2b9c500d56be1a1327d8c84 | cb61427f15361b0dfb86af176c86d8b8a89d41cf | refs/heads/main | 2023-03-30T17:09:02.975363 | 2021-04-04T21:08:35 | 2021-04-04T21:08:35 | 352,953,094 | 14 | 9 | null | null | null | null | null | [
{
"alpha_fraction": 0.7471410632133484,
"alphanum_fraction": 0.7528589367866516,
"avg_line_length": 41.5405387878418,
"blob_id": "cfaf8145b6b4f6eecd02aacfba52041d6e866cc9",
"content_id": "589687f08fdf01e17f01ac8d2c94899b38556849",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1574,
"license_type": "permissive",
"max_line_length": 243,
"num_lines": 37,
"path": "/README.md",
"repo_name": "AuthEceSoftEng/emb-ntua-workshop",
"src_encoding": "UTF-8",
"text": "# emb-ntua-workshop\n\nThis workshop discusses introductory concepts of machine learning and data mining following a hands-on approach using popular tools and libraries such as [scikit-learn](https://scikit-learn.org/stable) and [matplotlib](https://matplotlib.org).\n\nThe sessions of the workshop include:\n- 1_BrainRun: Application on Behavioral Biometrics\n- 2_RiskFactors: Application on Behavioral Biometrics\n\nInstructions for running the workshop at repl.it:\n1. Navigate to https://repl.it\n2. Click on `Start coding` and login with your GitHub account\n3. Select `Import from GitHub`\n4. In the text field write `AuthEceSoftEng/emb-ntua-workshop`\n5. Click on 'Import from GitHub'\n\nAfter repl.it opens up, run the following commands at the console (on the right): \n`python -m poetry lock` \n`python -m poetry install`\n\nYou 're all set! Open any script to see the code and run it using commands (e.g. `python3 1_BrainRun/code.py`). \nYou can also write `python` in the console to run the interpreter and execute the commands one-by-one.\n\n## Machine Learning Useful Resources\n- [Practical Machine Learning in R](https://leanpub.com/practical-machine-learning-r)\n- [Google Machine Learning Crash Course](https://developers.google.com/machine-learning/crash-course)\n- [scikit-learn](https://scikit-learn.org/stable/)\n- [Tensorflow](https://www.tensorflow.org/)\n- [PyTorch](https://pytorch.org/)\n- [Matplotlib](https://matplotlib.org/)\n\n## Contact Info\n\nFor any questions fell free to contact us:\n\n- Dr. Andreas Symeonidis (email: [email protected])\n- Michail Papamichail (email: [email protected])\n- Thomas Karanikiotis (email: [email protected])\n"
},
{
"alpha_fraction": 0.6253077983856201,
"alphanum_fraction": 0.6378101706504822,
"avg_line_length": 25.796955108642578,
"blob_id": "fa03918535097e9240f3cd94ba6b813df30f79f8",
"content_id": "00d7a677b1e1fe9cadd6a0842b9ed76fa1ea54e8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5279,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 197,
"path": "/2_RiskFactors/code.py",
"repo_name": "AuthEceSoftEng/emb-ntua-workshop",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom sklearn import svm, tree\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.metrics import confusion_matrix\n\npd.options.mode.chained_assignment = None\n\ndef read_data():\n # Read data\n data = pd.read_csv(\"2_RiskFactors/data/riskFactorData.csv\", sep=\";\")\n return data\n\ndef get_statistics(data):\n # Get data statistics\n print(\"\\n\")\n print(data[[\"Year\", \"BMI\", \"SBP\", \"FAT\"]].describe())\n print(\"\\n\")\n print(data[[\"Country\", \"Sex\"]].describe())\n print(\"\\n\")\n print(data[[\"BMI\", \"SBP\", \"FAT\"]].corr())\n\ndef evaluate(y_true, y_pred):\n\n print(confusion_matrix(y_true, y_pred))\n\n\ndef knnModel(data):\n # Define year\n year = 2008\n \n # Filter data\n data = data[(data[\"Year\"] == year)]\n\n # Keep selected columns\n x_train = data[[\"BMI\", \"SBP\", \"FAT\"]]\n y_train = data[\"Sex\"].to_list()\n\n # Define test data\n test_BMI = 23.5\n test_SBP = 126.5\n test_FAT = 4.4\n test_data = [test_BMI, test_SBP, test_FAT]\n\n for k in range(2, 10):\n # Create kNN classifier\n model = KNeighborsClassifier(n_neighbors=k)\n # Train kNN classifier\n model.fit(x_train, y_train)\n # Predict new sample\n prediction = model.predict([test_data])[0]\n prediction_prob = model.predict_proba([test_data])[0]\n\n print(\"\\n\")\n print(str(k) + \"-Nearest Neighbors\")\n print(\"Female: \" + str(prediction_prob[0]))\n print(\"Male: \" + str(prediction_prob[1]))\n print(\"Model Prediction: \" + prediction)\n\ndef svmModel(data):\n # Keep selected columns\n x_train = data[[\"BMI\", \"SBP\", \"FAT\"]]\n y_train = data[\"AtRisk\"].to_list()\n\n # Define test data\n test_BMI = 29.5\n test_SBP = 126.5\n test_FAT = 4.4\n test_data = [test_BMI, test_SBP, test_FAT]\n\n # Create SVM classifier\n model = svm.SVC(kernel=\"rbf\", gamma=0.3)\n # Train SVM classifier\n model.fit(x_train, y_train)\n \n # Predict new sample\n prediction = model.predict([test_data])[0]\n print(\"Model Prediction: \" + prediction)\n \n x_train[\"pred\"] = model.predict(x_train)\n evaluate(data[\"AtRisk\"].to_list(), x_train[\"pred\"].to_list())\n\ndef gaussianModel(data):\n # Keep selected columns\n x_train = data[[\"BMI\", \"SBP\", \"FAT\"]]\n y_train = data[\"AtRisk\"].to_list()\n\n # Define test data\n test_BMI = 26.5\n test_SBP = 136.5\n test_FAT = 5.8\n test_data = [test_BMI, test_SBP, test_FAT]\n\n # Create Gaussian classifier\n model = GaussianNB()\n # Train Gaussian classifier\n model.fit(x_train, y_train)\n # Predict new sample\n prediction = model.predict([test_data])[0]\n print(\"Model Prediction: \" + prediction)\n\ndef decisionTreesModel(data):\n # Keep selected columns\n x_train = data[[\"BMI\", \"SBP\", \"FAT\"]]\n y_train = data[\"AtRisk\"].to_list()\n\n # Define test data\n test_BMI = 25.5\n test_SBP = 136.5\n test_FAT = 4.8\n test_data = [test_BMI, test_SBP, test_FAT]\n\n # Create Decision Tree classifier\n model = tree.DecisionTreeClassifier()\n # Train Decision Tree classifier\n model.fit(x_train, y_train)\n # Predict new sample\n prediction = model.predict([test_data])[0]\n print(\"Model Prediction: \" + prediction)\n\ndef show_clusters(data_frame, labels, x_label, y_label, title):\n\n plt.scatter(data_frame[x_label], data_frame[y_label], s=3, c = [val + 1 for val in labels])\n plt.title(title)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.show()\n\ndef compute_silhouette(data_frame, labels):\n\n return silhouette_score(data_frame, labels)\n\ndef perform_kmeans(data_frame, number_of_clusters):\n\n print(\"\\n --- Perform KMeans ---\")\n kmeans_model = KMeans(n_clusters=number_of_clusters, random_state=0).fit(data_frame[[\"BMI\", \"FAT\"]])\n\n print(\"Silhouette:\", compute_silhouette(data_frame[[\"BMI\", \"FAT\"]], kmeans_model.labels_))\n\n return kmeans_model\n\ndef perform_dbscan(data_frame, eps, min_samples):\n\n print(\"\\n --- Perform DBSCAN ---\")\n dbscan_model = DBSCAN(eps=eps, min_samples=min_samples).fit(data_frame[[\"BMI\", \"FAT\"]])\n\n n_clusters_ = len(set(dbscan_model.labels_)) - (1 if -1 in dbscan_model.labels_ else 0)\n print(\"Number of clusters:\", n_clusters_)\n if(n_clusters_ > 1):\n print(\"Silhouette:\", compute_silhouette(data_frame[[\"BMI\", \"FAT\"]], dbscan_model.labels_))\n\n return dbscan_model\n\n\n# Read data\nprint(\"Reading data...\")\ndata = read_data()\nprint(\"Reading ended...\")\n\n# Get general statistics\n# get_statistics(data)\n\n# ### CLASSIFICATION ###\n\n# Get sex by kNN\n# knnModel(data)\n\n# Get country by SVM\n# svmModel(data)\n\n# Get risk by NaiveBayes\n# gaussianModel(data)\n\n# Get risk by DecisionTrees\n# decisionTreesModel(data)\n\n\n### CLUSTERING ###\n\n# KMeans Clustering\n\n# filtered_data = data[data[\"Sex\"] == \"male\"]\n# model = perform_kmeans(filtered_data, 2)\n# show_clusters(filtered_data, model.labels_, \"BMI\", \"FAT\", \"KMeans\")\n\n\n# DBSCAN Clustering\n\n# filtered_data = data[data[\"Sex\"] == \"male\"]\n# model = perform_dbscan(filtered_data, 0.12, 20)\n# show_clusters(filtered_data, model.labels_, \"BMI\", \"FAT\", \"DBSCAN\")\n"
},
{
"alpha_fraction": 0.6528083086013794,
"alphanum_fraction": 0.6691984534263611,
"avg_line_length": 34.482269287109375,
"blob_id": "bf83e65d6f7af09df570ec788febccee926d761c",
"content_id": "de42f465443cc3d6c73ba988c5f08d128a2687d9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5003,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 141,
"path": "/1_BrainRun/code.py",
"repo_name": "AuthEceSoftEng/emb-ntua-workshop",
"src_encoding": "UTF-8",
"text": "import json\nimport numpy as np\nimport pandas as pd\nfrom collections import Counter\nfrom matplotlib import pyplot as plt\nfrom utilities import GesturesVisualizer as GV\nfrom utilities import FeaturesExtractor as FE\n\ndef read_data():\n # Read data\n users = pd.read_csv(\"1_BrainRun/data/users.csv\", sep=\";\")\n devices = pd.read_csv(\"1_BrainRun/data/devices.csv\", sep=\";\")\n games = pd.read_csv(\"1_BrainRun/data/games.csv\", sep=\";\")\n gestures = pd.read_csv(\"1_BrainRun/data/gestures.csv\", sep=\";\")\n\n # Transform string representation of list into a python list\n gestures[\"data\"] = gestures[\"data\"].apply(lambda x: json.loads(x.replace(\"'\", \"\\\"\")))\n\n return users, devices, games, gestures\n\ndef get_statistics(users, devices, games, gestures):\n # Get data statistics\n print(\"\\n\")\n print(\"Number of users: \", len(users.index))\n print(\"Number of devices: \", len(devices.index))\n print(\"Number of games: \", len(games.index))\n print(\"Number of gestures: \", len(gestures.index))\n\n # Print statistics about age, num_of_devices, num_of_games and num_of_gestures for each user\n print(\"\\n\", users[[\"age\", \"num_of_devices\", \"num_of_games\", \"num_of_gestures\"]].describe())\n\n # Print statistics about devices os\n print(\"\\n\", devices[[\"os\"]].describe())\n\n # Print statistics about game types\n print(\"\\n\", games[[\"game_type\"]].describe())\n\n # Print statistics about gesture types\n print(\"\\n\", gestures[[\"type\"]].describe())\n\ndef create_pie_charts(users, devices, games, gestures):\n\n # Percentage of playing time per game type\n types = games[\"game_type\"].tolist()\n plt.subplot(221).pie(Counter(types).values(), labels=Counter(types).keys(), autopct='%1.1f%%', shadow=True)\n\n # Percentage of swipes and taps\n types = gestures[\"type\"].tolist()\n plt.subplot(222).pie(Counter(types).values(), labels=Counter(types).keys(), autopct='%1.1f%%', shadow=True)\n\n # Percentage of male and female users\n types = users[\"gender\"].tolist()\n plt.subplot(223).pie(Counter(types).values(), labels=Counter(types).keys(), autopct='%1.1f%%', shadow=True)\n\n # Percentage of male and female users\n types = devices[\"os\"].tolist()\n plt.subplot(224).pie(Counter(types).values(), labels=Counter(types).keys(), autopct='%1.1f%%', shadow=True)\n \n plt.show()\n\ndef create_line_charts(users, gestures):\n\n # Plot users experience points\n # plt.subplot(211).plot(x=list(np.arange(1, len(users) + 1)), y=list(users[\"xp\"]))\n plt.subplot(211).plot(list(np.arange(1, len(users) + 1)), list(users[\"xp\"]), \"-\")\n plt.subplot(211).set_xlabel(\"Users\")\n plt.subplot(211).set_ylabel(\"Experience points\")\n plt.subplot(211).set_title(\"Experience points per user\")\n\n # Plot number of data points sampled from each swipe\n swipes = gestures[(gestures[\"type\"] == \"swipe\")]\n lengths = swipes[\"data\"].apply(lambda x: len(x))\n \n plt.subplot(212).plot(list(np.arange(1, len(lengths) + 1)), list(lengths))\n plt.subplot(212).set_xlabel(\"Swipes\")\n plt.subplot(212).set_ylabel(\"Number of data points\")\n plt.subplot(212).set_title(\"Number of data points sampled from each swipe\")\n \n plt.subplots_adjust(left = 0.15, top = 0.95, hspace = 0.55)\n plt.show()\n\ndef create_boxplot(devices):\n\n devices[\"os\"] = devices.set_index(\"os\").index\n devices.boxplot(column=[\"width\", \"height\"], by=\"os\", rot=45)\n plt.show()\n\ndef visualize_gestures(users, devices):\n\n uid = users[(users[\"_id\"] == \"5b5b2b94ed261d61ede3d085\")].iloc[0][\"_id\"]\n devIds = devices[(devices[\"user_id\"] == uid)].iloc[:][\"device_id\"]\n\n gests = pd.DataFrame()\n for devId in devIds.index:\n gests = gests.append(gestures[(gestures[\"device_id\"] == devIds.loc[devId])])\n gests = gests.reset_index(drop=True)\n\n gestureVisualizer = GV.GesturesVisualizer(gests.loc[0:25], deviceWidth=411, deviceHeight=798)\n gestureVisualizer.plot_gestures()\n\ndef get_features(users, devices):\n\n # Get user info\n uid = users[(users[\"_id\"] == \"5b5b2b94ed261d61ede3d085\")].iloc[0][\"_id\"]\n # Get devices info for the user\n devIds = devices[(devices[\"user_id\"] == uid)].iloc[:][\"device_id\"]\n\n # Get gestures\n gests = pd.DataFrame()\n for devId in devIds.index:\n gests = gests.append(gestures[(gestures[\"device_id\"] == devIds.loc[devId])])\n gests = gests.reset_index(drop=True)\n\n # Get calculated features for a certain swipe\n featuresExtractor = FE.FeaturesExtractor(gests)\n features_info = featuresExtractor.get_swipe_features(gests.loc[2])\n print(json.dumps(features_info, indent = 2))\n\n\n# Read data\nprint(\"Reading data...\")\nusers, devices, games, gestures = read_data()\nprint(\"Reading ended...\")\n\n# Get general statistics \nget_statistics(users, devices, games, gestures)\n\n# Create pie charts \ncreate_pie_charts(users, devices, games, gestures)\n\n# Create line charts \ncreate_line_charts(users, gestures)\n\n# Create box plot\ncreate_boxplot(devices)\n\n# Create box plot\nvisualize_gestures(users, devices)\n\n# Get Features\nget_features(users, devices)\n"
},
{
"alpha_fraction": 0.5240437388420105,
"alphanum_fraction": 0.5453552007675171,
"avg_line_length": 40.6136360168457,
"blob_id": "6895577bbbf324e4a99182813abbcb6e32a8938c",
"content_id": "1a473434ea6dd616318ce76c3d7c80235544abbb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1830,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 44,
"path": "/1_BrainRun/utilities/GesturesVisualizer.py",
"repo_name": "AuthEceSoftEng/emb-ntua-workshop",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom collections import OrderedDict\nfrom matplotlib import pyplot as plt\n\nclass GesturesVisualizer():\n\n def __init__(self, gestures, deviceWidth=360, deviceHeight=640):\n self.gestures = gestures\n self.width = deviceWidth\n self.height = deviceHeight\n\n def plot_gestures(self):\n fig = plt.figure(figsize=(3.75, 2.5 * (self.height / self.width)))\n ax = fig.add_axes([0.15, 0.05, 0.55, 0.85])\n labels = OrderedDict()\n for i, _ind in enumerate(self.gestures.index):\n labels[\"gesture_\" + str(i)] = np.random.rand(1, 3)\n x_data = []\n y_data = []\n if(len(self.gestures.iloc[i][\"data\"]) == 0):\n continue\n x_data.append(self.gestures.iloc[i][\"data\"][0][\"x0\"])\n y_data.append(self.gestures.iloc[i][\"data\"][0][\"y0\"])\n if(self.gestures.iloc[i][\"type\"] == \"swipe\"):\n for d in self.gestures.iloc[i][\"data\"]:\n x_data.append(d[\"moveX\"])\n y_data.append(d[\"moveY\"])\n keys = list(labels.keys())\n if(self.gestures.iloc[i][\"type\"] == \"tap\"):\n plt.scatter(x_data, y_data, label=keys[i], color = labels[keys[i]][0])\n else:\n plt.plot(x_data, y_data, label=keys[i], color = labels[keys[i]][0])\n\n handles, labels = plt.gca().get_legend_handles_labels()\n by_label = OrderedDict(zip(labels, handles))\n plt.xlim(0, self.width)\n plt.ylim(0, self.height)\n plt.xlabel('X - Dimension')\n plt.ylabel('Y - Dimension')\n plt.gca().invert_yaxis()\n plt.legend(by_label.values(), by_label.keys(), bbox_to_anchor=(1.01, 0.5), loc=\"center left\")\n ax.xaxis.tick_top()\n ax.xaxis.set_label_position('top') \n plt.show()"
},
{
"alpha_fraction": 0.5386309027671814,
"alphanum_fraction": 0.5490392446517944,
"avg_line_length": 37.43077087402344,
"blob_id": "57dbd0686e729edb5bf4ba0fc1f3462610429281",
"content_id": "c29c2cc75338a5a8bff9aa55cd99a639ae79f871",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4996,
"license_type": "permissive",
"max_line_length": 143,
"num_lines": 130,
"path": "/1_BrainRun/utilities/FeaturesExtractor.py",
"repo_name": "AuthEceSoftEng/emb-ntua-workshop",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, r2_score\n\nclass FeaturesExtractor():\n def __init__(self, gestures, fake_swipes_limit=30): \n gestures = gestures[(gestures[\"t_stop\"] != -1) & (gestures[\"t_start\"] != -1)]\n gestures[\"duration\"] = gestures[\"t_stop\"] - gestures[\"t_start\"]\n gestures = gestures[(gestures[\"duration\"] > 0)]\n\n self.taps = gestures[(gestures[\"type\"] == \"tap\")]\n self.swipes = gestures[(gestures[\"type\"] == \"swipe\") & (gestures[\"duration\"] >= fake_swipes_limit)]\n self.fake_swipes = gestures[(gestures[\"type\"] == \"swipe\") & (gestures[\"duration\"] < fake_swipes_limit)]\n \n print(\"\\n==== Gestures Stats ====\")\n print(\"Taps: \", len(self.taps.index))\n print(\"Swipes: \", len(self.swipes.index))\n print(\"Fake swipes: \", len(self.fake_swipes.index), \"\\n\")\n\n def get_tap_features(self, tap):\n info = {}\n info[\"type\"] = \"tap\"\n info[\"horizontal_position\"] = tap[\"data\"][0][\"x0\"]\n info[\"vertical_position\"] = tap[\"data\"][0][\"y0\"]\n return info\n\n def get_swipe_features(self, swipe):\n info = {}\n info[\"type\"] = \"swipe\"\n\n times = []\n num_data = len(swipe[\"data\"])+1\n if(num_data==2):\n times.append(swipe[\"t_start\"])\n times.append(swipe[\"t_stop\"])\n elif(num_data>2):\n step = (swipe[\"t_stop\"]-swipe[\"t_start\"])/(num_data-1)\n times.append(swipe[\"t_start\"])\n prev = swipe[\"t_start\"]\n for i in range(0,num_data-2):\n times.append(prev+step)\n prev += step\n times.append(swipe[\"t_stop\"])\n for i in range(0,len(times)):\n times[i] -= swipe[\"t_start\"]\n \n x_positions = []\n y_positions = []\n # Get horizontal and vertical starting points\n x_positions.append(swipe[\"data\"][0][\"x0\"])\n y_positions.append(swipe[\"data\"][0][\"y0\"])\n\n for d in swipe[\"data\"]:\n x_positions.append(d[\"moveX\"])\n y_positions.append(d[\"moveY\"])\n\n horizontal_length = x_positions[-1] - x_positions[0]\n vertical_length = y_positions[-1] - y_positions[0]\n info[\"horizontal_trace_length\"] = np.abs(horizontal_length)\n info[\"vertical_trace_length\"] = np.abs(vertical_length)\n if(np.abs(horizontal_length)>np.abs(vertical_length)):\n if(horizontal_length>0):\n info[\"direction\"] = \"right\"\n else:\n info[\"direction\"] = \"left\"\n else:\n if(vertical_length>0):\n info[\"direction\"] = \"up\"\n else:\n info[\"direction\"] = \"down\"\n\n # Get statistics of trace\n info[\"trace_stats\"] = self.perform_linear_regression(x_positions, y_positions)\n\n info[\"swipe_horizontal_acceleration\"] = (swipe[\"data\"][-1][\"vx\"] - swipe[\"data\"][0][\"vx\"])/((swipe[\"t_stop\"] - swipe[\"t_start\"])*0.001)\n info[\"swipe_vertical_acceleration\"] = (swipe[\"data\"][-1][\"vy\"] - swipe[\"data\"][0][\"vy\"])/((swipe[\"t_stop\"] - swipe[\"t_start\"])*0.001)\n\n mean_x = 0\n mean_y = 0\n for x in x_positions:\n mean_x += x\n for y in y_positions:\n mean_y += y\n mean_x /= len(x_positions)\n mean_y /= len(y_positions)\n info[\"mean_x\"] = mean_x\n info[\"mean_y\"] = mean_y\n \n return info\n\n def get_fake_swipe_features(self, fake_swipe):\n info = {}\n info[\"type\"] = \"fake_swipe\"\n\n info[\"fs_horizontal_position\"] = fake_swipe[\"data\"][0][\"x0\"]\n info[\"fs_vertical_position\"] = fake_swipe[\"data\"][0][\"y0\"]\n\n return info\n\n def calculate_features(self):\n features = []\n for ind in self.taps.index:\n features.append(self.get_tap_features(self.taps.loc[ind]))\n for ind in self.swipes.index:\n features.append(self.get_swipe_features(self.swipes.loc[ind]))\n for ind in self.fake_swipes.index:\n features.append(self.get_fake_swipe_features(self.fake_swipes.loc[ind]))\n\n return features\n\n def perform_linear_regression(self, x_pos, y_pos):\n \n x_train = np.array(x_pos).reshape(-1, 1)\n y_train = np.array(y_pos).reshape(-1, 1)\n \n # Create linear regression object\n regr = linear_model.LinearRegression()\n # Train the model using the training sets\n regr.fit(x_train, y_train)\n # Predict based on the constructed model\n pred = regr.predict(x_train)\n \n info = {}\n info[\"slope\"] = regr.coef_[0][0]\n info[\"mean_squared_error\"] = mean_squared_error(y_train, pred)\n info[\"mean_abs_error\"] = mean_absolute_error(y_train, pred)\n info[\"median_abs_error\"] = median_absolute_error(y_train, pred)\n info[\"coef_determination\"] = r2_score(y_train, pred)\n \n return info\n"
}
] | 5 |
MattPaul25/HitData | https://github.com/MattPaul25/HitData | 9dfcf69a3814637a62497bd0481da370c8677b49 | dd141fdd7a3978cb3c62b2230452ae9f7aedda7e | 7f973c271ce2e20fb8141b4ae1956623e5c1e749 | refs/heads/master | 2021-01-01T05:08:32.932658 | 2016-04-21T18:04:34 | 2016-04-21T18:04:34 | 56,697,322 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5694474577903748,
"alphanum_fraction": 0.580281674861908,
"avg_line_length": 36.217742919921875,
"blob_id": "03911cbdb5ccc42967b3842e6864be50cb9637f5",
"content_id": "83515bcdfdf785cdd2c4859e867252aba77fb0d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4615,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 124,
"path": "/Run_Through.py",
"repo_name": "MattPaul25/HitData",
"src_encoding": "UTF-8",
"text": "import csv\n\n\nclass Columns(object):\n \"\"\"this class takes in a series of columns and can contian a lambda expression to have a logical 'where' for the columns\"\"\"\n def __init__(self, predicate=''):\n self.columns = []\n self.predicate = predicate\n self.set_lambda(self.predicate)\n\n def add_columns(self, column):\n self.columns.append(column)\n\n def set_lambda(self, predicate):\n if callable(predicate):\n self.predicate = predicate\n else:\n self.predicate = ''\n\n def __iter__(self):\n return iter(self.columns)\n\n\n\nclass ColumnObject(object):\n \"\"\"class contains information about a column, like where it sits within tbl arr and its name.\n This object stores an option predicate -- that acts like a where clause\"\"\"\n def __init__(self, name, lookup_name, file_name, predicate=''):\n self.name = name\n self.lookup_name = lookup_name\n self.file_name = file_name\n self.predicate = predicate\n self.set_lambda(self.predicate)\n self.lookup_index = self.get_index()\n\n def set_lambda(self, predicate):\n if callable(predicate):\n self.predicate = predicate\n else:\n self.predicate = ''\n\n def get_index(self):\n \"\"\" gets the index of a column in a single column csv file or text file\n :rtype: int\n \"\"\"\n index_num = 0\n with open(self.file_name, 'r') as lookup_file:\n file_reader = csv.reader(lookup_file)\n for line in file_reader:\n if line[0] == self.lookup_name:\n return index_num\n else:\n index_num += 1\n return -1\n\n\nclass DataManipulation(object):\n \"\"\"this class manipulates data passed to it and outputs a result\"\"\"\n def __init__(self, columns, file_in, file_out, delimiter='|'):\n print('this may take a bit')\n self.columns = columns\n self.file_in = file_in\n self.file_out = file_out\n self.delimiter = delimiter\n self.rec_count = self.filter_data()\n\n def filter_data(self):\n counter = 0\n with open(self.file_in, encoding='ISO-8859-1') as tsvfile, open(self.file_out, 'w') as csvout:\n\n tsv_reader = csv.reader(tsvfile, delimiter='\\t')\n csv_writer = csv.writer(csvout)\n\n for line in tsv_reader:\n column_ok = True\n try:\n result_str = ''\n for clm in self.columns:\n if column_ok:\n result_str = result_str + (line[clm.lookup_index]) + self.delimiter\n if clm.predicate != '':\n column_ok = clm.predicate(line[clm.lookup_index])\n rec = result_str\n else:\n break\n if column_ok:\n csv_writer.writerow([rec]) # brackets wrap the string as a list object\n counter += 1\n\n except Exception as e:\n print(e)\n\n return counter\n\n def count_records(self, file_in):\n cnt = 0\n with open(file_in, encoding=\"ISO-8859-1\") as filein:\n reader = csv.reader(filein)\n for line in reader:\n cnt += 1\n return cnt\n\nlookup_fil = 'columns.csv'\n\nc = Columns()\n\nc.add_columns(ColumnObject('pagename', 'pagename', lookup_fil, predicate=lambda n: n == 'fbn:root:front:channel'))\nc.add_columns(ColumnObject('page_event', 'post_page_event', lookup_fil, predicate=lambda n: n == '0'))\nc.add_columns(ColumnObject('date_time', 'date_time', lookup_fil))\nc.add_columns(ColumnObject('referrer', 'referrer', lookup_fil))\nc.add_columns(ColumnObject('first_hit_page', 'first_hit_pagename', lookup_fil))\nc.add_columns(ColumnObject('article', 'post_prop12', lookup_fil))\nc.add_columns(ColumnObject('events', 'event_list', lookup_fil))\nc.add_columns(ColumnObject('ref_domain', 'ref_domain', lookup_fil))\nc.add_columns(ColumnObject('ref_type', 'ref_type', lookup_fil))\nc.add_columns(ColumnObject('unique_id1', 'post_visid_high', lookup_fil))\nc.add_columns(ColumnObject('unique_id2', 'post_visid_low', lookup_fil))\n\n\n# myColumns = [column1, column2, column3, column4, column5, column6, column7, column8, column9, column10, column11]\nprint(str(DataManipulation(c, \"hit_data.tsv\", \"12-21.csv\")))\nprint(str(DataManipulation(c, \"hit_data2.tsv\", \"3-7.csv\")))\n#filter_data(myColumns, \"hit_data.tsv\", \"12-21.csv\")))\n#filter_data(myColumns, \"hit_data2.tsv\", \"3-7.csv\")))\n"
}
] | 1 |
aghanti7/banti_telugu_ocr | https://github.com/aghanti7/banti_telugu_ocr | e9182db6369c21326b49f3ad78785552405efd6a | 6b15e37fd0ad93e5e2ac90b8822b06e64230f05f | 8f77423f9bcf28044343b8f2939bc2873675786a | refs/heads/master | 2021-04-29T20:26:30.313952 | 2017-01-16T09:06:12 | 2017-01-16T09:06:12 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.556009829044342,
"alphanum_fraction": 0.575633704662323,
"avg_line_length": 27.465116500854492,
"blob_id": "4c32c427008fefecdede952d18364fa348efb0f1",
"content_id": "8f16314dc0436848f2a9540bff0b2bc2fd9efdf2",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1223,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 43,
"path": "/scripts/tile.py",
"repo_name": "aghanti7/banti_telugu_ocr",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom math import ceil\nfrom scipy.ndimage.interpolation import zoom\n\n\ndef normalize(img, make_white):\n maxx, minn = img.max(), img.min()\n img -= minn\n img /= maxx - minn\n if make_white and np.mean(img) < .5:\n img = 1 - img\n return img\n\n\ndef tile_raster_images(images,\n zm=1,\n margin_width=1,\n margin_color=.1,\n make_white=False,\n global_normalize=False):\n n_images = images.shape[0]\n w = n_images // int(np.sqrt(n_images))\n h = ceil(float(n_images) / w)\n\n if global_normalize:\n images = normalize(images, make_white)\n else:\n images = [normalize(img, make_white) for img in images]\n\n if zm != 1:\n images = zoom(images, zoom=(1, zm, zm), order=0)\n\n pad_axes = (0, h*w - n_images), (0, 1), (0, 1)\n pad_width = (margin_width * np.array(pad_axes)).tolist()\n pad_fill = (margin_color * np.array(pad_axes)).tolist()\n images = np.pad(images, pad_width, 'constant', constant_values=pad_fill)\n\n t2 = np.vstack([np.hstack([images[i * w + j] for j in range(w)])\n for i in range(h)])\n t2 = t2[:-margin_width, :-margin_width]\n t2 = (255 * t2).astype(\"uint8\")\n\n return t2"
},
{
"alpha_fraction": 0.571349561214447,
"alphanum_fraction": 0.5837942361831665,
"avg_line_length": 31.576576232910156,
"blob_id": "f7e099df687e16e53376d1e662844bb44f52c6fb",
"content_id": "3a5cf40fd5b4984c6222261ebf35b49421c48fb0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3616,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 111,
"path": "/scripts/classify_paint.py",
"repo_name": "aghanti7/banti_telugu_ocr",
"src_encoding": "UTF-8",
"text": "import ast\nimport os\nimport pickle\nimport sys\n\nfrom PIL import Image as im\nimport numpy as np\nfrom theanet.neuralnet import NeuralNet\n\nfrom banti.iast_unicodes import LabelToUnicodeConverter\nfrom banti.scaler import ScalerFactory\nfrom banti.proglyph import ProGlyph, Space\nfrom banti.processedpage import ProcessedPage\nfrom scripts.tile import tile_raster_images\n\n############################################# Arguments\n\nif len(sys.argv) < 5:\n print(\"Usage:\"\n \"\\n{0} neuralnet_params.pkl inpt.box/tiff scaler_params.scl codes.lbl \"\n \"\\n\\te.g:- {0} 0default.pkl sample_images/praasa.box \"\n \"scalings/relative48.scl labellings/alphacodes.lbl\"\n \"\".format(sys.argv[0]))\n sys.exit()\n\nnnet_prms_file_name = sys.argv[1]\ninput_file_name = sys.argv[2]\nscaler_prms_file = sys.argv[3]\nlabelings_file_name = sys.argv[4]\n\n############################################# Load Params\n\nwith open(scaler_prms_file, 'r') as sfp:\n scaler_prms = ast.literal_eval(sfp.read())\n\nwith open(nnet_prms_file_name, 'rb') as nnet_prms_file:\n nnet_prms = pickle.load(nnet_prms_file)\n\nwith open(labelings_file_name, encoding='utf-8') as labels_fp:\n labellings = ast.literal_eval(labels_fp.read())\n\n# print(labellings)\nchars = LabelToUnicodeConverter(labellings).onecode\n\n############################################# Init Network\nProGlyph.scaler = ScalerFactory(scaler_prms)\nprocd_page = ProcessedPage(input_file_name)\n\nnnet_prms['training_params']['BATCH_SZ'] = 1\nntwk = NeuralNet(**nnet_prms)\ntester = ntwk.get_data_test_model(go_nuts=True)\n\n############################################# Image saver\ndir_name = os.path.basename(nnet_prms_file_name)[:-7] + '/'\nif not os.path.exists(dir_name):\n os.makedirs(dir_name)\nnamer = (dir_name + '{:03d}_{}_{:02d}.png').format\nprint(\"Look for me in :\", dir_name)\n\n\ndef saver(outs, ch, debug=True):\n saver.index += 1\n for i, out in enumerate(outs):\n global_normalize = False\n if out.ndim == 2:\n n_nodes = out.shape[1]\n w = n_nodes // int(np.sqrt(n_nodes))\n h = np.ceil(float(n_nodes) / w)\n extra = np.full((1, w*h-n_nodes), 0)\n out = np.concatenate((out, extra), 1).reshape((1, h, w))\n elif out.ndim == 4:\n out = out[0]\n if out.shape[-1] * out.shape[-2] < 65:\n global_normalize = True\n\n if debug:\n print(\"{:6.2f} {:6.2f} {:6.2f} {} GNM:{}\".format(\n out.max(), out.mean(), out.min(), out.shape,\n global_normalize))\n\n im.fromarray(tile_raster_images(out, zm=2,\n make_white=True,\n global_normalize=global_normalize)\n ).save(namer(saver.index, chars[ch], i), compress_level=1)\n\n if debug:\n print()\n\nsaver.index = 0\n\n############################################# Read glyphs & classify\nprint(\"Classifying...\")\nfor line_pglyphs in procd_page.file_glyphs:\n for pglyph in line_pglyphs:\n if pglyph is Space:\n continue\n\n scaled_glp = pglyph.scaled\n img = scaled_glp.pix.astype('float32').reshape((1,)+scaled_glp.pix.shape)\n\n if ntwk.takes_aux():\n dtopbot = scaled_glp.dtop, scaled_glp.dbot\n aux_data = np.array([[dtopbot, dtopbot]], dtype='float32')\n logprobs_or_feats, preds, *layer_outs = tester(img, aux_data)\n else:\n logprobs_or_feats, preds, *layer_outs = tester(img)\n\n saver(layer_outs, np.argmax(logprobs_or_feats))\n print(\"Saved images of {} glyphs\".format(saver.index))\n\nprint(\"Look for me in :\", dir_name)\n"
}
] | 2 |
abyshrimat/C103 | https://github.com/abyshrimat/C103 | 1afb93c2e74a252f1853e571b6f65f1cb5140bf5 | 13b4b457a868a6eac6cdabb77e719b21a13936af | 551efeb59a395bad4117a1edf4d7406c2dfdaf6c | refs/heads/main | 2023-08-07T07:52:23.987694 | 2021-09-25T23:46:18 | 2021-09-25T23:46:18 | 405,752,340 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5927505493164062,
"alphanum_fraction": 0.6183369159698486,
"avg_line_length": 31.64285659790039,
"blob_id": "fccc15b3ae3f071123c55e16abe7ac355ca32488",
"content_id": "4e10eb2bb7357e376b13a77bbd8a30112035c551",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 469,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 14,
"path": "/C103datavisualization.py",
"repo_name": "abyshrimat/C103",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\nimport plotly.express as px\r\n\r\ndf1 = pd.read_csv(\"CovidData.csv\")\r\nfig1 = px.scatter(df1, x = \"date\", y = \"cases\", color = \"country\", title = 'Covid cases')\r\nfig1.show()\r\n\r\ndf2 = pd.read_csv(\"CovidData.csv\")\r\nfig2 = px.line(df2, x = \"date\", y = \"cases\", color = \"country\", title = 'Covid cases')\r\n#fig2.show()\r\n\r\ndf3 = pd.read_csv(\"CovidData.csv\")\r\nfig3 = px.bar(df3, x = \"date\", y = \"cases\", color = \"country\", title = 'Covid cases')\r\n#fig3.show()"
}
] | 1 |
nastiag67/kdd_trading | https://github.com/nastiag67/kdd_trading | 5f1768944c91c42546a331e6057a4a4aa5ba4605 | 5a1476a36967acc3e1213b796ab9cb1f5a8adc37 | 03bd87741014f9b7a728d23a515c2c8a0a9aeeeb | refs/heads/main | 2023-09-03T01:27:17.851300 | 2021-10-11T22:48:31 | 2021-10-11T22:48:31 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.817460298538208,
"alphanum_fraction": 0.817460298538208,
"avg_line_length": 125,
"blob_id": "b1239d2ee643c5c3430818939d58c6783c57f7ca",
"content_id": "3bf2909b9d9115710f1bac23538b8f6c4f6bf686",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 252,
"license_type": "no_license",
"max_line_length": 237,
"num_lines": 2,
"path": "/README.md",
"repo_name": "nastiag67/kdd_trading",
"src_encoding": "UTF-8",
"text": "# kdd_trading\nThis educational project is aimed to analyze and research association between fundamental ratios and growth of the companies in the S&P index using classical ml and black box models in order to interprete them from XAI point of view.\n"
},
{
"alpha_fraction": 0.667913556098938,
"alphanum_fraction": 0.6793432831764221,
"avg_line_length": 35.732826232910156,
"blob_id": "6895f8e7468b32c9f6a9fa452e09930cff39d017",
"content_id": "cf2362fd9f942bde1c61e2525758083db79b76c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4812,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 131,
"path": "/data/data_loader.py",
"repo_name": "nastiag67/kdd_trading",
"src_encoding": "UTF-8",
"text": "# pip install yfinance --upgrade --no-cache-dir\n# pip install pandas_datareader\n\n\nimport datetime\nimport requests\nimport time\n\n\nimport pandas as pd\nimport pandas_datareader.data as web\n\n\nimport yfinance as yf\n\n\nfrom dateutil.relativedelta import relativedelta\n\n\ntoday = datetime.datetime.now().date()\noldest_day = today - relativedelta(years=2)\n\n\n# getting list of tickers of companies from S&P500\ntable = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')\ndf = table[0]\ndf.to_csv('S&P500-Info.csv')\ndf.to_csv(\"S&P500-Symbols.csv\", columns=['Symbol'])\n\n\nlist_of_tickers = df.Symbol\n\n\n# uploading prices of S&P500 over the last 2 years\nSP500 = web.DataReader(['sp500'], 'fred', oldest_day, today)\n\n\n# creating feature matrix\ntotal_df = pd.DataFrame()\nbad_count = 0\nfor ticker in list_of_tickers:\n # request data\n company = yf.Ticker(ticker)\n df_quarterly_financials = company.quarterly_financials\n df_quarterly_balance_sheet = company.quarterly_balance_sheet\n df_quarterly_cashflow = company.quarterly_cashflow\n df_earnings = company.quarterly_earnings\n hist = company.history(period=\"2y\")\n\n # time\n start = df_quarterly_cashflow.columns[3]\n future_date = start + datetime.timedelta(days=252)\n finish = start + datetime.timedelta(days=252) if future_date <= today else today\n\n # variables\n try:\n total_liab = df_quarterly_balance_sheet.loc['Total Liab'][start]\n total_equity = df_quarterly_balance_sheet.loc['Total Stockholder Equity'][start]\n total_assets = df_quarterly_balance_sheet.loc['Total Assets'][start]\n current_assets = df_quarterly_balance_sheet.loc['Total Current Assets'][start]\n cash = df_quarterly_balance_sheet.loc['Cash'][start]\n net_income = df_quarterly_cashflow.loc['Net Income'][start]\n earnings = df_earnings['Earnings'][0]\n number_of_stocks = df_quarterly_balance_sheet.loc['Common Stock'][start]\n start_close = hist.loc[start]['Close']\n finish_close = hist.loc[finish]['Close']\n operating_cashflow = df_quarterly_cashflow.loc['Total Cash From Operating Activities'][start]\n interest_expense = df_quarterly_financials.loc['Interest Expense'][start]\n ebit = df_quarterly_financials.loc['Ebit'][start]\n total_revenue = df_quarterly_financials.loc['Total Revenue'][start]\n except KeyError:\n bad_count += 1\n print(f'Bad company {ticker}. Overall number {bad_count}')\n continue\n\n # ratios\n try:\n # https://www.investopedia.com/terms/d/debtequityratio.asp\n debt_to_equity = total_liab / total_equity\n\n # https://www.investopedia.com/terms/c/currentratio.asp\n current_ratio = current_assets / total_liab\n\n # https://www.investopedia.com/terms/c/cash-ratio.asp\n cash_ratio = cash / total_liab\n\n # https://www.investopedia.com/terms/r/returnonassets.asp\n return_on_assets = net_income / total_assets\n\n # https://www.investopedia.com/terms/e/eps.asp\n eps = earnings / number_of_stocks\n\n # https://www.investopedia.com/terms/p/price-to-salesratio.asp\n price_to_sales = start_close / finish_close\n\n # https://www.investopedia.com/terms/p/price-to-cash-flowratio.asp\n price_to_cashflow = start_close / (operating_cashflow / number_of_stocks)\n\n # https://www.investopedia.com/terms/s/shareholderequityratio.asp\n shareholder_equity = total_equity / total_assets\n\n # https://www.investopedia.com/terms/i/interestcoverageratio.asp\n interest_coverage = ebit / interest_expense\n\n # https://www.investopedia.com/terms/n/net_margin.asp\n net_profit_margin = net_income / total_revenue\n\n except TypeError:\n bad_count += 1\n print(f'Bad company {ticker}. Overall number {bad_count}')\n continue\n\n # targets\n regression_target = start_close / finish_close # prrice_change, regression\n target = (regression_target < 1) # overweight, for classification\n SP_change = SP500.loc[start]['sp500'] / SP500.loc[finish]['sp500']\n SP_target = SP_change > regression_target # overweight, for classification\n\n df_company = pd.DataFrame(\n {'debt_to_equity': debt_to_equity, 'current_ratio': current_ratio, 'cash_ratio': cash_ratio,\n 'return_on_assets': return_on_assets, 'eps': eps, 'price_to_sales': price_to_sales,\n 'price_to_cashflow': price_to_cashflow, 'shareholder_equity': shareholder_equity,\n 'interest_coverage': interest_coverage, 'net_profit_margin': net_profit_margin,\n 'regression_target': regression_target, 'target': target, 'SP_target': SP_target}, index=[ticker])\n\n total_df = pd.concat([total_df, df_company])\n # print(ticker, len(total_df))\n time.sleep(1)\n\n\n# In the end, bad_count = 152\n"
}
] | 2 |
DanyalKh/Integrations | https://github.com/DanyalKh/Integrations | f3b127fac7b2ef92a9c444a8da36717aead3a3a9 | 0807a65bb849370c392cb6dae78cc0dd4e70afe3 | a229bb294df82ca20c580d45a9c2510378322f3c | refs/heads/master | 2021-01-04T10:03:53.979748 | 2020-02-14T12:09:38 | 2020-02-14T12:09:38 | 240,498,700 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4839767813682556,
"alphanum_fraction": 0.48902347683906555,
"avg_line_length": 33.17241287231445,
"blob_id": "b140d5081af5eee92368a88630e89afb43d6f981",
"content_id": "a9b0acbda934759dcf5690c6d400297e4e2fea4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3963,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 116,
"path": "/rest_api.py",
"repo_name": "DanyalKh/Integrations",
"src_encoding": "UTF-8",
"text": "# @frappe.whitelist()\n# def send_sms_sim(self=None, parameters=None, short_url=None, smsdata=None, data=None):\n# print(\"in queue\")\n# try:\n# import http.client\n#\n# if isinstance(data, unicode):\n# self = json.loads(self)\n# else:\n# self = self\n#\n# if isinstance(data, unicode):\n# d = json.loads(data)\n# else:\n# d = data\n#\n# req_data = {}\n# url = ''\n# for p in parameters:\n# url = p.get('sms_gateway_url')\n# post = p.get('post')\n# req_data = {\n# \"send_from\": p.get('send_from')\n# }\n# break\n#\n# conn = http.client.HTTPSConnection(\"myapt.pk\")\n# # req_data[\"send_from\"] = \"00923154503880\"\n# req_data['send_to'] = d['MessageTo']\n# req_data['send_text'] = d['MessageText']\n#\n# # payload = {\"send_to\": data['send_to'], \"send_text\": data['send_text'], \"send_from\": data['send_from']}\n# payload = json.dumps(req_data)\n# headers = {'content-type': \"application/json\"}\n#\n# conn.request(\"POST\", url, payload, headers)\n#\n# res = conn.getresponse()\n# # d = res.read()\n#\n# if res.status == 200:\n# logs = frappe.new_doc(\"SMS Logs\")\n# logs.sms_status = 'Sent'\n# try:\n# logs.reference_type = self.doctype\n# logs.reference_name = self.name\n# except Exception as ex:\n# logs.reference_type = self.get('doctype')\n# logs.reference_name = self.get('name')\n# logs.message_to = d['MessageTo']\n# logs.message_text = d['MessageText']\n# logs.sms_api = 'Sim'\n# logs.short_url = short_url\n# logs.date_time = frappe.utils.now()\n# logs.save(ignore_permissions=True)\n# return res.status\n#\n# except Exception as ex:\n# # sendsmsresponse(self=self, url=url, smsdata=smsdata, data=data)\n# print(ex)\n#\n# @frappe.whitelist()\n# def send_sms_long_code(self=None, parameters=None, short_url=None, smsdata=None, data=None):\n# print(\"in queue\")\n# try:\n# # session = requests.Session()\n# request_data = {}\n# url = ''\n# for p in parameters:\n# url = p.get('sms_gateway_url')\n# post = p.get('post')\n# request_data = {\n# \"originator\": p.get('originator'),\n# \"username\": p.get('username'),\n# \"password\": p.get('password'),\n# \"action\": p.get('action')\n# }\n# break\n#\n# if isinstance(data, unicode):\n# self = json.loads(self)\n# else:\n# self = self\n#\n# if isinstance(data, unicode):\n# d = json.loads(data)\n# else:\n# d = data\n#\n#\n# request_data['messagedata'] = d['MessageText']\n# request_data['recipient'] = d['MessageTo']\n# req = json.dumps(request_data)\n#\n# response = requests.post(url, data=req, headers={'content-type': \"application/json\"})\n#\n# print(response.status_code)\n# if response.status_code == 200:\n# logs = frappe.new_doc(\"SMS Logs\")\n# logs.sms_status = 'Sent'\n# try:\n# logs.reference_type = self.doctype\n# logs.reference_name = self.name\n# except Exception as ex:\n# logs.reference_type = self.get('doctype')\n# logs.reference_name = self.get('name')\n# logs.message_to = d['MessageTo']\n# logs.message_text = d['MessageText']\n# logs.sms_api = 'Long Code'\n# logs.short_url = short_url\n# logs.date_time = frappe.utils.now()\n# logs.save(ignore_permissions=True)\n# return response\n#\n# except Exception as ex:\n# print(ex)"
},
{
"alpha_fraction": 0.4885568916797638,
"alphanum_fraction": 0.49989405274391174,
"avg_line_length": 36.01176452636719,
"blob_id": "8b17168d0753c32599ad00064fd30f3e5ef1dc9f",
"content_id": "612a5794f7411feec42ad93ffc32ccda619ac936",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9438,
"license_type": "no_license",
"max_line_length": 195,
"num_lines": 255,
"path": "/SMPP_test2.py",
"repo_name": "DanyalKh/Integrations",
"src_encoding": "UTF-8",
"text": "import logging\nimport sys\nimport time\n\nimport smpplib.gsm\nimport smpplib.client\nimport smpplib.consts\n\n# if you want to know what's happening\nlogging.basicConfig(level='DEBUG')\n\n# number list\nnum_list = ['923322184147', '923333023982']\ndel_count = 0\nclient = smpplib.client.Client(\"smsctp3.eocean.us\", 28555)\n\n# Print when obtain message_id\nclient.set_message_sent_handler(\n lambda pdu: on_sent(pdu))\nclient.set_message_received_handler(\n lambda pdu: on_rec(pdu))\n\ndef on_rec(pdu):\n global del_count\n sys.stdout.write('delivered {}\\n'.format(pdu.short_message))\n del_count += 1\n\ndef on_sent(pdu):\n global del_count\n sys.stdout.write('sent {} {}\\n'.format(pdu.sequence, pdu.message_id))\n # del_count += 1\n\nclient.connect()\nclient.bind_transceiver(system_id='maven99095', password='NBgvftrV')\n\nfor num in num_list:\n\n pdu = client.send_message(\n source_addr_ton=0,\n source_addr='99095',\n dest_addr_ton=1,\n destination_addr=num,\n short_message='hello12'.encode(),\n\n )\n\nwhile len(num_list) != del_count:\n client.poll()\nclient.unbind()\nclient.disconnect()\n\n\n\n# @frappe.whitelist()\n# def send_sms_short_code(self=None, smsdata=None, short_url=None, data=None, parameters=None):\n# import xmltodict\n# print(\"in queue\")\n# global del_count\n# try:\n#\n# if isinstance(parameters, unicode):\n# parameters = json.loads(parameters)\n# else:\n# parameters = parameters\n#\n# if isinstance(data, unicode):\n# d = json.loads(data)\n# else:\n# d = data\n#\n# del_count = 0\n# username = ''\n# password = ''\n# client = ''\n# source_addr = ''\n# source_addr_ton = ''\n# dest_addr_ton = ''\n# destination_addr = ''\n# short_message = ''\n# registered_delivery = ''\n# alert_on_message_delivery = ''\n#\n# request_data = {}\n# url = ''\n# for p in parameters:\n# url = p.get('sms_gateway_url')\n# post = p.get('post')\n# username = p.get('username')\n# password = p.get('password')\n# source_addr = str(p.get('originator'))\n# source_addr_ton = 0\n# dest_addr_ton = 1\n# destination_addr = str(d['MessageTo'])\n# short_message = str(d['MessageText'])\n# registered_delivery = True\n# alert_on_message_delivery = True\n#\n# # request_data = {\n# # \"source_addr_ton\" : 0,\n# # \"source_addr\" : p.get('originator'),\n# # \"dest_addr_ton\" : 1,\n# # \"destination_addr\": d['MessageTo'],\n# # \"short_message\": d['MessageText'],\n# # \"registered_delivery\": True,\n# # \"alert_on_message_delivery\": True,\n# # }\n# break\n#\n# # request_data['short_message'] = d['MessageText']\n# # request_data['destination_addr'] = d['MessageTo']\n# # req = json.dumps(request_data)\n#\n# def sent_handler(pdu):\n# # global del_count\n# print (\"sent: \", pdu.sequence, pdu.message_id, pdu.status)\n#\n# def received_handler(pdu):\n# global del_count\n# print (\"delivered: \", pdu.short_message)\n# logs = ''\n# log_doc = frappe.get_all(\"SMS Logs\", filters={'message_to': destination_addr}, order_by=\"creation desc\", limit=1)\n#\n# if len(log_doc) > 0:\n# for l in log_doc:\n# logs = frappe.get_doc('SMS Logs', l.name)\n#\n# if len(log_doc) == 0:\n# logs = frappe.new_doc(\"SMS Logs\")\n#\n# logs.sms_status = 'Sent'\n# id = ''\n# date = ''\n# time = ''\n# msg = pdu.short_message.split(' ')[07]\n# if msg == 'stat:DELIVRD' or msg == 'stat:ACCEPTD':\n# id = \"{0}\".format(pdu.short_message.split(' ')[0].split(':')[1])\n# date = datetime.strptime(pdu.short_message.split(' ')[6].split(':')[1], '%y%m%d%H%M').date()\n# time = datetime.strptime(pdu.short_message.split(' ')[6].split(':')[1], '%y%m%d%H%M').time()\n# try:\n# logs.reference_type = self.doctype\n# logs.reference_name = self.name\n#\n# except Exception as ex:\n# logs.reference_type = self.get('doctype')\n# logs.reference_name = self.get('name')\n#\n# logs.message_to = destination_addr\n# logs.message_text = short_message\n# logs.sms_api = 'Short Code'\n# logs.confirmation_msg = ''\n# logs.sms_date = date\n# logs.sms_time = time\n# logs.id = id\n# logs.short_url = short_url\n# logs.response = pdu.short_message\n# logs.date_time = frappe.utils.now()\n# logs.save(ignore_permissions=True)\n# del_count += 1\n# # return response\n#\n# try:\n# client = smpplib.client.Client(url, 28555)\n# client.set_message_sent_handler(lambda pdu: sent_handler(pdu))\n# # client.set_message_sent_handler(sent_handler)\n# client.set_message_received_handler(lambda pdu: received_handler(pdu))\n# client.connect()\n# client.bind_transceiver(system_id=username, password=password)\n#\n# for num in d['num_list']:\n# print(short_message)\n# # for part in parts:\n# pdu = client.send_message(\n# source_addr_ton=source_addr_ton,\n# source_addr=source_addr,\n# dest_addr_ton=dest_addr_ton,\n# destination_addr=str(num),\n# short_message=short_message,\n# registered_delivery=registered_delivery,\n# alert_on_message_delivery=alert_on_message_delivery,\n# )\n# # pdu = client.send_message(request_data)\n# # time.sleep(4)\n# # client.poll()\n#\n# logs = frappe.new_doc(\"SMS Logs\")\n# logs.sms_status = 'Sent'\n# try:\n# logs.reference_type = self.doctype\n# logs.reference_name = self.name\n# except Exception as ex:\n# logs.reference_type = self.get('doctype')\n# logs.reference_name = self.get('name')\n# logs.message_to = d['MessageTo']\n# logs.message_text = short_message\n# logs.sms_api = 'Short Code'\n# logs.short_url = short_url\n# logs.sms_status = 'Sent'\n# logs.date_time = frappe.utils.now()\n# logs.save(ignore_permissions=True)\n# # time.sleep(4)\n# while len(d['num_list']) != del_count:\n# client.poll()\n#\n# client.unbind()\n# client.disconnect()\n#\n# except Exception as e:\n# print(e)\n# # finally:\n# # # print \"==client.state====\", client.state\n# # if client.state in [smpplib.consts.SMPP_CLIENT_STATE_BOUND_TX]:\n# # # if bound to transmitter\n# # try:\n# # client.unbind()\n# # except smpplib.exceptions.UnknownCommandError as ex:\n# # # https://github.com/podshumok/python-smpplib/issues/2\n# # try:\n# # client.unbind()\n# # except smpplib.exceptions.PDUError as ex:\n# # pass\n# #\n# # finally:\n# # if client:\n# # # print \"==client.state====\", client.state\n# # client.disconnect()\n# # print \"==client.state====\", client.state\n#\n# # response = requests.post(url, data=request_data, headers={'content-type': \"application/json\"})\n#\n# # response = requests.post(\"http://smsctp3.eocean.us:24555/api?action=sendmessage&username=maven_99095&password=msol4466&recipient=923322184147&originator=99095&messagedata=Test123.\")\n#\n# # print(response.status_code)\n# # result = xmltodict.parse(response._content)\n# # msg = result.get('response').get('data').get('acceptreport').get('statusmessage') + ' for ' + result.get('response').get('data').get('acceptreport').get('recipient')\n# # if response.status_code == 200:\n# # logs = frappe.new_doc(\"SMS Logs\")\n# # logs.sms_status = 'Sent'\n#\n# # try:\n# # logs.reference_type = self.doctype\n# # logs.reference_name = self.name\n# # except Exception as ex:\n# # logs.reference_type = self.get('doctype')\n# # logs.reference_name = self.get('name')\n#\n# # logs.message_to = d['MessageTo']\n# # logs.message_text = d['MessageText']\n# # logs.sms_api = 'Short Code'\n# # logs.confirmation_msg = msg\n# # logs.date_time = frappe.utils.now()\n# # logs.save(ignore_permissions=True)\n# # return response\n#\n# except Exception as ex:\n# print(ex)\n"
},
{
"alpha_fraction": 0.7758620977401733,
"alphanum_fraction": 0.7758620977401733,
"avg_line_length": 28,
"blob_id": "2a79ea1bd06ea1605838d51ec7bdb3cecfa5e428",
"content_id": "5fe4c743b1140aacf07764bdefada135ee10d80f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 58,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 2,
"path": "/README.md",
"repo_name": "DanyalKh/Integrations",
"src_encoding": "UTF-8",
"text": "# Integrations\nIntegrations of REST, SOAP and SMPP API's.\n"
},
{
"alpha_fraction": 0.5085483193397522,
"alphanum_fraction": 0.5247124433517456,
"avg_line_length": 30.851484298706055,
"blob_id": "6c373dbb011124a5cbdf3bf6858233978b62dcec",
"content_id": "2cc431f32ac3b6034c8853191ca4a1e09ad09a72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3217,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 101,
"path": "/Soap_api.py",
"repo_name": "DanyalKh/Integrations",
"src_encoding": "UTF-8",
"text": "from zeep import Client, Transport\n# from requests import Session\nfrom zeep.cache import SqliteCache\n# from requests.auth import HTTPBasicAuth\n\ndef soap_api():\n\n wsdl = \"http://cbs.zong.com.pk/ReachCWSv2/CorporateSMS.svc?wsdl\"\n # session = Session()\n # session.auth = HTTPBasicAuth('923120825064', 'Zong@123')\n client = Client(wsdl, transport=Transport(cache=SqliteCache()))\n # , session = session\n\n request_data = {\n 'loginId' : '923120825064',\n 'loginPassword' : 'Zong@123',\n 'Destination': '923322184147',\n 'Message': 'Hello Paelu',\n 'Mask': 'MyApt',\n 'UniCode': 0,\n 'ShortCodePrefered': 'n'\n }\n\n response = client.service.QuickSMS(request_data)\n if response.find('Submitted Successfully'):\n pass\n print response\n\nif __name__ == '__main__':\n try:\n\n soap_api()\n\n except Exception as e:\n print e\n\n# @frappe.whitelist()\n# def send_sms_masking(self=None, parameters=None, short_url=None, smsdata=None, data=None):\n# from zeep import Client, Transport\n# from zeep.cache import SqliteCache\n#\n# print(\"in queue\")\n# try:\n#\n# request_data = {}\n# url = ''\n# for p in parameters:\n# url = p.get('sms_gateway_url')\n# post = p.get('post')\n# request_data = {\n# \"loginId\": p.get('loginId'),\n# \"loginPassword\": p.get('loginPassword'),\n# \"Mask\": p.get('Mask'),\n# \"UniCode\": p.get('UniCode'),\n# \"ShortCodePrefered\": p.get('ShortCodePrefered')\n# }\n# break\n#\n# # url = 'http://cbs.zong.com.pk/ReachCWSv2/CorporateSMS.svc?wsdl'\n# client = Client(url, transport=Transport(cache=SqliteCache()))\n# # request_data = {\n# # 'loginId': '923120825064',\n# # 'loginPassword': 'Zong@123',\n# # 'Mask': 'MyApt',\n# # 'UniCode': 0,\n# # 'ShortCodePrefered': 'n'\n# # }\n#\n# if isinstance(data, unicode):\n# self = json.loads(self)\n# else:\n# self = self\n#\n# if isinstance(data, unicode):\n# d = json.loads(data)\n# else:\n# d = data\n#\n# request_data['Destination'] = d['MessageTo']\n# request_data['Message'] = d['MessageText']\n# response = client.service.QuickSMS(request_data)\n# print(response)\n# if response.find(\"Successfully\"):\n# logs = frappe.new_doc(\"SMS Logs\")\n# logs.sms_status = 'Sent'\n# try:\n# logs.reference_type = self.doctype\n# logs.reference_name = self.name\n# except Exception as ex:\n# logs.reference_type = self.get('doctype')\n# logs.reference_name = self.get('name')\n# logs.message_to = d['MessageTo']\n# logs.message_text = d['MessageText']\n# logs.sms_api = 'Masking'\n# logs.short_url = short_url\n# logs.date_time = frappe.utils.now()\n# logs.save(ignore_permissions=True)\n# return response\n#\n# except Exception as e:\n# pass\n"
}
] | 4 |
minsoo-jeong/ScenePlace | https://github.com/minsoo-jeong/ScenePlace | 6300c41fbdf359a7cd377afe6596434f0e2de8f1 | 0ab9e6bbff0ea8abb29ac5f0803d2449ab656941 | d3196b5c36ff5e9ca90557c4b18dcfe55b8da375 | refs/heads/master | 2022-07-19T11:18:20.510034 | 2019-05-15T07:12:54 | 2019-05-15T07:12:54 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5718954205513,
"alphanum_fraction": 0.6094771027565002,
"avg_line_length": 30.210525512695312,
"blob_id": "86404f6bfc5ba49578544f07efcd64c3c37c7b54",
"content_id": "0284d83f9beeebd6eac497abfcedb4a8bcaede81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 612,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 19,
"path": "/place47_dataset.py",
"repo_name": "minsoo-jeong/ScenePlace",
"src_encoding": "UTF-8",
"text": "import csv\r\nimport os\r\n\r\n\r\ndir='/data/place/filelist'\r\nPLACE47_TRAIN_DATA_TXT='places47_train_data.txt'\r\nNEW_PLACE47_TRAIN_DATA_TXT='place47_train.txt'\r\nprint(os.listdir(dir))\r\n\r\nf=open(os.path.join(dir,PLACE47_TRAIN_DATA_TXT),'r')\r\nnf=open(os.path.join('data',NEW_PLACE47_TRAIN_DATA_TXT),'w')\r\n\r\nl=f.readlines()\r\nprint(len(l))\r\nprint(l[:2])\r\nprint(os.path.splitext('/'.join(l[0].split(' ')[0].split('/')[2:]))[0]+'.jpg')\r\nprint(l[0].split(' ')[1].rstrip('\\n'))\r\nfor row in l:\r\n nf.write(' '.join([os.path.splitext('/'.join(row.split(' ')[0].split('/')[2:]))[0]+'.jpg',row.split(' ')[1].rstrip('\\n')])+'\\n')\r\n"
},
{
"alpha_fraction": 0.49217846989631653,
"alphanum_fraction": 0.5207675099372864,
"avg_line_length": 42.401336669921875,
"blob_id": "0e96648c58c732ef44409b0b07b36c440f33ef56",
"content_id": "9e77a0a72d58a770f6a1b071e347c03cf6496617",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12977,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 299,
"path": "/stage.py",
"repo_name": "minsoo-jeong/ScenePlace",
"src_encoding": "UTF-8",
"text": "from utils.utils import AverageMeter, accuracy\nimport numpy as np\nimport torch\nimport time\nimport logging\nimport csv\n\n\ndef apply_category(category, target):\n classes = [category[t] for t in target.flatten()]\n classes = np.array(classes).reshape(target.shape).tolist()\n return classes\n\n\ndef validate(val_loader, model, criterion, print_freq):\n log = logging.getLogger('my')\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n end = time.time()\n with torch.no_grad():\n for i, (input, target, path) in enumerate(val_loader):\n target = target.cuda(async=True)\n input_var = torch.autograd.Variable(input)\n target_var = torch.autograd.Variable(target)\n\n # compute output\n output = model(input_var)\n loss = criterion(output, target_var)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(prec1.item(), input.size(0))\n top5.update(prec5.item(), input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % print_freq == 0:\n '''\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n '''\n\n log.info('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n\n log.info(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, print_freq):\n log = logging.getLogger('my')\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (input, target, path) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n target = target.cuda(async=True)\n input_var = torch.autograd.Variable(input)\n target_var = torch.autograd.Variable(target)\n # compute output\n output = model(input_var)\n loss = criterion(output, target_var)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(prec1.item(), input.size(0))\n top5.update(prec5.item(), input.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % print_freq == 0:\n '''\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n '''\n log.info('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, i, len(train_loader), batch_time=batch_time\n , data_time=data_time, loss=losses, top1=top1, top5=top5))\n\n\ndef validate_video(val_loader, model, criterion, print_freq):\n log = logging.getLogger('my')\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n rank_target = AverageMeter()\n score_target = AverageMeter()\n prob_target = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n end = time.time()\n with torch.no_grad():\n for i, (input, target, path) in enumerate(val_loader):\n target = target.cuda(async=True)\n input_var = torch.autograd.Variable(input)\n target_var = torch.autograd.Variable(target)\n\n # compute output\n output = model(input_var)\n loss = criterion(output, target_var)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(prec1.item(), input.size(0))\n top5.update(prec5.item(), input.size(0))\n\n # compute top5 score, prob and target rank, score, prob\n scroe_top5, _ = output.data.topk(k=5)\n score = output.data[:, target].diag().reshape((-1, 1))\n prob = torch.nn.functional.softmax(output.data, dim=1)\n prob_top5, cls_top5 = prob.topk(k=47)\n rank = cls_top5.eq(target.reshape(-1, 1)).argmax(dim=1, keepdim=True)\n prob_top5 = prob_top5[:, :5]\n cls_top5 = cls_top5[:, :5]\n prob = prob[:, target].diag().reshape((-1, 1))\n\n rank_target.update(torch.mean(rank.type(torch.float32), dim=0).item(), input.size(0))\n prob_target.update(torch.mean(prob, dim=0).item(), input.size(0))\n score_target.update(torch.mean(score, dim=0).item(), input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % print_freq == 0:\n '''\n print('Test-Video : [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\\t'\n 'Rank {rank.val:.2f} ({rank.avg:.2f})\\t'\n 'Prob {prob.val:.5f} ({prob.avg:.5f})\\t'\n 'Score {score.val:.5f} ({score.avg:.5f})'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1, top5=top5, rank=rank_target, prob=prob_target, score=score_target))\n '''\n log.info('Test-Video : [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\\t'\n 'Rank {rank.val:.2f} ({rank.avg:.2f})\\t'\n 'Prob {prob.val:.5f} ({prob.avg:.5f})\\t'\n 'Score {score.val:.5f} ({score.avg:.5f})'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1, top5=top5, rank=rank_target, prob=prob_target, score=score_target))\n '''\n for n, info in enumerate(zip(target, path, scene, rank, prob, score, cls_top5, prob_top5, scroe_top5)):\n print(info)\n if n == 10:\n break\n '''\n log.info(\" * Prec@1 {top1.avg:.3f}\\tPrec@5 {top5.avg:.3f}\\t\"\n \" At target\\tRank {rank.avg:.2f}\\tProb {prob.avg:.5f}\\tScore {score.avg:.5f}\"\n .format(top1=top1, top5=top5, rank=rank_target, prob=prob_target, score=score_target))\n '''\n print(\" * Prec@1 {top1.avg:.3f}\\tPrec@5 {top5.avg:.3f}\\t\"\n \" At target\\tRank {rank.avg:.2f}\\tProb {prob.avg:.5f}\\tScore {score.avg:.5f}\"\n .format(top1=top1, top5=top5, rank=rank_target, prob=prob_target, score=score_target))\n '''\n return rank_target.avg, prob_target.avg, score_target.avg\n\n\ndef validate_video_csv(val_loader, model, criterion, category, csv_file, print_freq):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n rank_target = AverageMeter()\n score_target = AverageMeter()\n prob_target = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n end = time.time()\n\n f = open(csv_file, 'w', newline='', encoding='cp949')\n\n wrt = csv.writer(f)\n wrt.writerow(['path', 'target', 'rank', 'prob', 'score', 'top5', 'top5_prob', 'top5_score'])\n\n with torch.no_grad():\n for i, (input, target, path) in enumerate(val_loader):\n target = target.cuda(async=True)\n input_var = torch.autograd.Variable(input)\n target_var = torch.autograd.Variable(target)\n\n # compute output\n output = model(input_var)\n loss = criterion(output, target_var)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(prec1.item(), input.size(0))\n top5.update(prec5.item(), input.size(0))\n\n # compute top5 score, prob and target rank, score, prob\n scroe_top5, _ = output.data.topk(k=5)\n score = output.data[:, target].diag().reshape((-1, 1))\n prob = torch.nn.functional.softmax(output.data, dim=1)\n prob_top5, cls_top5 = prob.topk(k=47)\n rank = cls_top5.eq(target.reshape(-1, 1)).argmax(dim=1, keepdim=True)\n prob_top5 = prob_top5[:, :5]\n cls_top5 = cls_top5[:, :5]\n\n category_target = apply_category(category, target)\n category_top5 = apply_category(category, cls_top5)\n prob = prob[:, target].diag().reshape((-1, 1))\n\n rank_target.update(torch.mean(rank.type(torch.float32), dim=0).item(), input.size(0))\n prob_target.update(torch.mean(prob, dim=0).item(), input.size(0))\n score_target.update(torch.mean(score, dim=0).item(), input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n for n, info in enumerate(\n zip(path, category_target, rank.tolist(), prob.tolist(), score.tolist(),\n category_top5, prob_top5.tolist(), scroe_top5.tolist())):\n info = [info[0], info[1], info[2][0], round(info[3][0], 4), round(info[4][0], 4),\n ' '.join(info[5]),\n ' '.join(list(map(lambda x: str(round(x, 4)), info[6]))),\n ' '.join(list(map(lambda x: str(round(x, 4)), info[7])))]\n wrt.writerow(info)\n\n if i % print_freq == 0:\n print('Test-Video : [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\\t'\n 'Rank {rank.val:.2f} ({rank.avg:.2f})\\t'\n 'Prob {prob.val:.5f} ({prob.avg:.5f})\\t'\n 'Score {score.val:.5f} ({score.avg:.5f})'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1, top5=top5, rank=rank_target, prob=prob_target, score=score_target))\n\n summary = \" * Prec@1 {top1.avg:.3f}\\tPrec@5 {top5.avg:.3f}\\tAt target\\tRank {rank.avg:.2f}\\tProb {prob.avg:.5f}\\tScore {score.avg:.5f}\".format(\n top1=top1, top5=top5, rank=rank_target, prob=prob_target, score=score_target)\n print(summary)\n wrt.writerow([summary])\n\n return rank_target.avg, prob_target.avg, score_target.avg\n"
},
{
"alpha_fraction": 0.6176470518112183,
"alphanum_fraction": 0.6236905455589294,
"avg_line_length": 30.233766555786133,
"blob_id": "838daf7dc3fd89060b629b4d5653364e47f8591d",
"content_id": "2916f121db49fe676a5e01e2d233794c49387fad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2482,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 77,
"path": "/csv_to_txt.py",
"repo_name": "minsoo-jeong/ScenePlace",
"src_encoding": "UTF-8",
"text": "from torchvision.datasets.folder import has_file_allowed_extension\r\nfrom datasets import SceneImageFolder, ListFromTxt\r\nimport sys\r\nimport csv\r\nimport os\r\nimport re\r\n\r\nGERNE='movie'\r\nVIDEO='The_Last_Blossom'\r\nVIDEO_ROOT = '/data/korea/{}/{}'.format(GERNE,VIDEO)\r\nSCENE_TO_CLASS_CSV = VIDEO_ROOT + '.csv'\r\nOUT_TXT = 'data/{}/{}.txt'.format(GERNE,VIDEO)\r\n\r\ndef atoi(text):\r\n return int(text) if text.isdigit() else text\r\n\r\n\r\ndef natural_keys(text):\r\n '''\r\n alist.sort(key=natural_keys) sorts in human order\r\n http://nedbatchelder.com/blog/200712/human_sorting.html\r\n (See Toothy's implementation in the comments)\r\n '''\r\n return [atoi(c) for c in re.split(r'(\\d+)', text)]\r\n\r\n\r\ndef make_dataset(dir, class_to_idx, extensions):\r\n images = []\r\n dir = os.path.expanduser(dir)\r\n for target in sorted(class_to_idx.keys(), key=natural_keys):\r\n d = os.path.join(dir, target)\r\n if not os.path.isdir(d):\r\n continue\r\n\r\n for root, _, fnames in sorted(os.walk(d)):\r\n for fname in sorted(fnames, key=natural_keys):\r\n if has_file_allowed_extension(fname, extensions):\r\n path = os.path.join(root, fname)\r\n item = (path, class_to_idx[target])\r\n images.append(item)\r\n\r\n return images\r\n\r\n\r\n\r\n\r\ndef _find_classes(dir):\r\n if sys.version_info >= (3, 5):\r\n # Faster and available in Python 3.5 and above\r\n classes = [d.name for d in os.scandir(dir) if d.is_dir()]\r\n else:\r\n classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\r\n classes.sort(key=natural_keys)\r\n class_to_idx = {classes[i]: i for i in range(len(classes))}\r\n return classes, class_to_idx\r\n\r\n\r\nIMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', 'webp']\r\n\r\nclasses, class_to_idx = _find_classes(VIDEO_ROOT)\r\nsamples = make_dataset(VIDEO_ROOT, class_to_idx, IMG_EXTENSIONS)\r\n\r\nscene2class = [int(row[1]) for row in csv.reader(open(SCENE_TO_CLASS_CSV, 'r'))]\r\nprint(classes)\r\nprint(class_to_idx)\r\nprint(scene2class)\r\nprint(len(samples))\r\n\r\nl = ['{} {}\\n'.format(path, scene2class[scene]) for path, scene in samples]\r\nprint(l[0])\r\nprint(os.path.abspath(os.path.dirname(OUT_TXT)))\r\nif not os.path.exists(os.path.abspath(os.path.dirname(OUT_TXT))):\r\n print(os.path.abspath(os.path.dirname(OUT_TXT)))\r\n os.makedirs(os.path.abspath(os.path.dirname(OUT_TXT)))\r\n\r\nf = open(OUT_TXT, 'w')\r\nf.writelines(l)\r\n"
},
{
"alpha_fraction": 0.6212674975395203,
"alphanum_fraction": 0.6544789671897888,
"avg_line_length": 28.567567825317383,
"blob_id": "ccbd962a157e1835fc35ed77f075ce2b66757c13",
"content_id": "13fdbb828282d036df1874028e33161321b5a6cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3282,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 111,
"path": "/train_place47.py",
"repo_name": "minsoo-jeong/ScenePlace",
"src_encoding": "UTF-8",
"text": "from torchvision.datasets import ImageFolder\nfrom torchvision.transforms import transforms as trn\n\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import variable as V\n\nfrom torchvision import models\nfrom collections import OrderedDict\nimport csv\nimport torch\nimport shutil\n\nimport nets\n\nimport os\nimport sys\n\nfrom utils.utils import save_checkpoint, adjust_learning_rate, init_logger\nfrom stage import train, validate, validate_video\n\nfrom datasets import ListFromTxt, SceneImageFolder\n\n\nPLACE47_TRAIN_FILE = 'data/place47_train.txt'\nPLACE47_TRAIN_ROOT = '/data/place/data_large'\n\nPLACE47_VALID_FILE = 'data/place47_valid.txt'\nPLACE47_VALID_ROOT = '/data/place/val_256'\n\nVIDEO_ROOT = '/data/korea/movie/New_world'\nSCENE_TO_CLASS_CSV = VIDEO_ROOT + '.csv'\n# MODEL_CKPT = 'ckpts/resnet50_latest.pth.tar'\n\nEPOCH = 30\nLEARNING_RATE = 0.0001\nMOMENTUM = 0.9\nWEIGHT_DECAY = 0.0001\n\nTRAIN_PRINT_FREQ = 600\nVALID_PRINT_FREQ = 3\nVIDEO_PRINT_FREQ = 3\n\n\nSAVE = 'resnet50'\n\n\n\ndef main():\n start_epoch = 0\n best_prec1 = 0\n log = init_logger('logs/{}.txt'.format(SAVE))\n\n model = nets.Resnet50(47)\n\n for n, p in model.named_modules():\n if isinstance(p, torch.nn.Linear):\n torch.nn.init.xavier_normal(p.weight)\n model = model.cuda()\n model = torch.nn.DataParallel(model)\n\n normalize = trn.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n train_trn = trn.Compose([\n trn.RandomSizedCrop(224),\n trn.RandomHorizontalFlip(),\n trn.ToTensor(),\n normalize\n ])\n valid_trn = trn.Compose([\n trn.CenterCrop(224),\n trn.ToTensor(),\n normalize\n ])\n train_loader = DataLoader(ListFromTxt(PLACE47_TRAIN_FILE, PLACE47_TRAIN_ROOT, train_trn)\n , batch_size=64, shuffle=True, num_workers=4, pin_memory=True)\n valid_loader = DataLoader(ListFromTxt(PLACE47_VALID_FILE, PLACE47_VALID_ROOT, valid_trn)\n , batch_size=512, shuffle=False, num_workers=4, pin_memory=True)\n video_loader = DataLoader(SceneImageFolder(VIDEO_ROOT, SCENE_TO_CLASS_CSV, valid_trn)\n , batch_size=512, shuffle=False, num_workers=4, pin_memory=True)\n\n criterion = torch.nn.CrossEntropyLoss().cuda()\n optimizer = torch.optim.Adam(model.parameters(), LEARNING_RATE, weight_decay=WEIGHT_DECAY)\n\n for epoch in range(start_epoch, EPOCH):\n # adjust_learning_rate(optimizer, epoch)\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch, TRAIN_PRINT_FREQ)\n # evaluate on validation set\n prec1 = validate(valid_loader, model, criterion, VALID_PRINT_FREQ)\n # evaluate on validation video set\n validate_video(video_loader, model, criterion, VIDEO_PRINT_FREQ)\n\n # remember best prec@1 and save checkpoint\n is_best = prec1 > best_prec1\n best_prec1 = max(prec1, best_prec1)\n\n save_checkpoint({\n 'epoch': epoch + 1,\n # 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'best_prec1': best_prec1,\n }, is_best, 'ckpts/{}_ep{}'.format(SAVE, epoch))\n\n\n # validate(valid_loader, model, criterion)\n\n\nif __name__ == '__main__':\n a='1003'.zfill(2)\n print(a)\n\n #main()\n"
},
{
"alpha_fraction": 0.875,
"alphanum_fraction": 0.875,
"avg_line_length": 47,
"blob_id": "2832166099107d14a81f0533224829dc91d274b5",
"content_id": "a78b929a206573bc15fc385c691ce6a15c2ac9fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 1,
"path": "/README.md",
"repo_name": "minsoo-jeong/ScenePlace",
"src_encoding": "UTF-8",
"text": "Scene recognition for korea broadcasting videos\n"
},
{
"alpha_fraction": 0.518311619758606,
"alphanum_fraction": 0.5642458200454712,
"avg_line_length": 30.260000228881836,
"blob_id": "e13c0e907841dc64706fded67e5ea6190be58f91",
"content_id": "03bd6508e50bfcdb602e59c2152a899079a06761",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1611,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 50,
"path": "/nets.py",
"repo_name": "minsoo-jeong/ScenePlace",
"src_encoding": "UTF-8",
"text": "from torchvision import models\r\nfrom pooling import RMAC\r\nimport torch\r\n\r\n\r\nclass Resnet50(torch.nn.Module):\r\n def __init__(self, num_classes):\r\n super(Resnet50, self).__init__()\r\n self.base = torch.nn.Sequential(*list(models.resnet50(pretrained=True).children())[:-2])\r\n self.pool = torch.nn.AvgPool2d(kernel_size=7, stride=1)\r\n self.fc = torch.nn.Sequential(\r\n torch.nn.Linear(2048, 512),\r\n torch.nn.ReLU(inplace=True),\r\n torch.nn.Linear(512,47)\r\n )\r\n def forward(self, x):\r\n x = self.base(x)\r\n x = self.pool(x)\r\n x = x.view(x.size(0), -1)\r\n x = self.fc(x)\r\n return x\r\n\r\nclass Resnet50_RMAC(torch.nn.Module):\r\n def __init__(self, num_classes):\r\n super(Resnet50_RMAC, self).__init__()\r\n self.base = torch.nn.Sequential(*list(models.resnet50(pretrained=True).children())[:-2])\r\n self.pool = RMAC()\r\n self.fc = torch.nn.Sequential(\r\n torch.nn.Linear(2048, 512),\r\n torch.nn.ReLU(inplace=True),\r\n torch.nn.Linear(512,47)\r\n )\r\n\r\n def forward(self, x):\r\n x = self.base(x)\r\n x = self.pool(x)\r\n x = x.view(x.size(0), -1)\r\n x = self.fc(x)\r\n return x\r\n\r\nif __name__=='__main__':\r\n from PIL import Image\r\n from torchvision.transforms import transforms as trn\r\n transform=trn.Compose([trn.Resize((224,224)),trn.ToTensor()])\r\n im=Image.open('/data/place/val_256/Places365_val_00036439.jpg')\r\n im=transform(im)\r\n\r\n print(Resnet50_RMAC(47))\r\n a=Resnet50_RMAC(47)\r\n a(im.unsqueeze(0))"
},
{
"alpha_fraction": 0.5385677814483643,
"alphanum_fraction": 0.5462616086006165,
"avg_line_length": 35.27206039428711,
"blob_id": "ec564968e0cf95e4e01e28e824ed4c8794c99c6d",
"content_id": "e6d95aa95f7c17fac6b5daada11cfabc1a2a5479",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5069,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 136,
"path": "/datasets.py",
"repo_name": "minsoo-jeong/ScenePlace",
"src_encoding": "UTF-8",
"text": "from torchvision.datasets.folder import default_loader, ImageFolder, make_dataset\r\nimport torchvision.transforms as trn\r\nfrom torch.utils.data import Dataset\r\nimport numpy as np\r\nimport os\r\nimport csv\r\nimport sys\r\n\r\nIMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', 'webp']\r\n\r\n\r\nclass ListFromTxt(Dataset):\r\n r\"\"\"\r\n get Dataset from txt file like\r\n root/xxx/yyy/zzz.jpg 00\r\n root/xxx/yyy/zzz.jpg 01\r\n root/xxx/yyy/zzz.jpg 02\r\n \"\"\"\r\n\r\n def __init__(self, file, root, transform=None):\r\n super(ListFromTxt, self).__init__()\r\n self.transform = trn.ToTensor() if transform == None else transform\r\n self.loader = default_loader\r\n self.root = root\r\n self.file = file\r\n f = open(file, 'r')\r\n l = f.readlines()\r\n self.samples = [[os.path.join(root, row.split(' ')[0]), int(row.split(' ')[1].rstrip())] for row in l]\r\n f.close()\r\n\r\n def __getitem__(self, index):\r\n path, cls = self.samples[index]\r\n sample = self.transform(self.loader(path))\r\n return sample, cls, path\r\n\r\n def __len__(self):\r\n return len(self.samples)\r\n\r\n def __repr__(self):\r\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\r\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\r\n fmt_str += ' Data Root: {}\\n'.format(self.root)\r\n fmt_str += ' Data File: {}\\n'.format(self.file)\r\n tmp = ' Transforms (if any): '\r\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\r\n return fmt_str\r\n\r\n\r\nclass ListFromTxt_toy(Dataset):\r\n r\"\"\"\r\n get Dataset from txt file like\r\n root/xxx/yyy/zzz.jpg 00\r\n root/xxx/yyy/zzz.jpg 01\r\n root/xxx/yyy/zzz.jpg 02\r\n \"\"\"\r\n\r\n def __init__(self, file, root, cnt=None, transform=None):\r\n super(ListFromTxt_toy, self).__init__()\r\n self.transform = trn.ToTensor() if transform == None else transform\r\n self.loader = default_loader\r\n self.root = root\r\n self.file = file\r\n f = open(file, 'r')\r\n l = f.readlines()\r\n self.samples = np.array([[os.path.join(root, row.split(' ')[0]), int(row.split(' ')[1].rstrip())] for row in l])\r\n if cnt is not None and cnt <= len(self.samples):\r\n idx = np.random.choice(len(self.samples), cnt, replace=False)\r\n self.samples = self.samples[idx, :]\r\n f.close()\r\n\r\n def __getitem__(self, index):\r\n path, cls = self.samples[index]\r\n sample = self.transform(self.loader(path))\r\n return sample, cls # , path\r\n\r\n def __len__(self):\r\n return len(self.samples)\r\n\r\n def __repr__(self):\r\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\r\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\r\n fmt_str += ' Data Root: {}\\n'.format(self.root)\r\n fmt_str += ' Data File: {}\\n'.format(self.file)\r\n tmp = ' Transforms (if any): '\r\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\r\n return fmt_str\r\n\r\n\r\nclass SceneImageFolder(ImageFolder):\r\n def __init__(self, root, csv_file, transform=None, loader=default_loader):\r\n classes, class_to_idx = self._find_classes(root)\r\n samples = make_dataset(root, class_to_idx, IMG_EXTENSIONS)\r\n if len(samples) == 0:\r\n raise (RuntimeError(\"Found 0 files in subfolders of: \" + root + \"\\n\"\r\n \"Supported extensions are: \" + \",\".join(\r\n IMG_EXTENSIONS)))\r\n self.root = root\r\n self.loader = loader\r\n self.extensions = IMG_EXTENSIONS\r\n self.classes = classes\r\n self.class_to_idx = class_to_idx\r\n self.samples = samples\r\n self.targets = [s[1] for s in samples]\r\n\r\n self.transform = transform\r\n\r\n self.imgs = self.samples\r\n self.scene2class = [int(row[1]) for row in csv.reader(open(csv_file, 'r'))]\r\n\r\n def __getitem__(self, index):\r\n path, scene = self.samples[index]\r\n sample = self.loader(path)\r\n target = self.scene2class[scene]\r\n\r\n if self.transform is not None:\r\n sample = self.transform(sample)\r\n\r\n return sample, target, path, scene\r\n\r\n def _find_classes(self, dir):\r\n if sys.version_info >= (3, 5):\r\n # Faster and available in Python 3.5 and above\r\n classes = [d.name for d in os.scandir(dir) if d.is_dir()]\r\n else:\r\n classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\r\n classes.sort(key=int)\r\n class_to_idx = {classes[i]: i for i in range(len(classes))}\r\n return classes, class_to_idx\r\n\r\n\r\nif __name__ == '__main__':\r\n # dt = ListFromTxt('data/place47_train.txt', '/data/place/data_large')\r\n dt = ListFromTxt_toy('data/place47_train.txt', '/data/place/data_large', cnt=1000)\r\n a = dt.__getitem__(1)\r\n print(a)\r\n print(dt)\r\n"
},
{
"alpha_fraction": 0.5773088932037354,
"alphanum_fraction": 0.5952957272529602,
"avg_line_length": 26.343137741088867,
"blob_id": "69fd52236c0a64d6635801b2b72f3cebcc521198",
"content_id": "bf9390a6d8c6ac6d509574b34d300491df9abfa1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2891,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 102,
"path": "/utils/utils.py",
"repo_name": "minsoo-jeong/ScenePlace",
"src_encoding": "UTF-8",
"text": "import torch\r\nimport shutil\r\nimport os\r\nimport logging\r\nif __name__!='__main__':\r\n from .TlsSMTPHandler import TlsSMTPHandler\r\nimport time\r\n\r\nclass AverageMeter(object):\r\n \"\"\"Computes and stores the average and current value\"\"\"\r\n\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r\n\r\n\r\ndef accuracy(output, target, topk=(1,)):\r\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n return res\r\n\r\n\r\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\r\n torch.save(state, filename + '-latest.pth.tar')\r\n if is_best:\r\n #torch.save(state, filename + '_best.pth.tar')\r\n shutil.copyfile(filename + '-latest.pth.tar', filename + '-best.pth.tar')\r\n\r\n\r\n\r\n\r\ndef adjust_learning_rate(optimizer, epoch):\r\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\r\n print(optimizer.param_groups[0].keys())\r\n lr = float(optimizer.param_groups[0]['lr']) * (0.1 ** (epoch // 30))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr\r\n\r\n\r\ndef init_logger(log_path):\r\n os.environ['TZ'] = 'Asia/Seoul'\r\n time.tzset()\r\n base_dir = os.path.abspath(os.path.dirname(log_path))\r\n if not os.path.exists(base_dir):\r\n os.makedirs(base_dir)\r\n\r\n log = logging.getLogger('my')\r\n log.setLevel(logging.INFO)\r\n\r\n stream_handler = logging.StreamHandler()\r\n formatter = logging.Formatter('[ %(asctime)s ] %(message)s')\r\n stream_handler.setFormatter(formatter)\r\n\r\n file_handler = logging.FileHandler(log_path)\r\n file_handler.setFormatter(formatter)\r\n\r\n smpt_handler = TlsSMTPHandler((\"smtp.naver.com\", 587), '[email protected]', ['[email protected]'], 'Error found!',\r\n ('jms8167', 's011435a!'))\r\n smpt_handler.setLevel(logging.ERROR)\r\n smpt_handler.setFormatter(formatter)\r\n\r\n log.addHandler(stream_handler)\r\n log.addHandler(file_handler)\r\n log.addHandler(smpt_handler)\r\n\r\n return log\r\n\r\n\r\nif __name__=='__main__':\r\n import numpy as np\r\n\r\n a=np.random.randn(20).reshape(4,5)\r\n b=torch.tensor(a)\r\n ans=torch.Tensor([2,3,2,4]).type(torch.LongTensor)\r\n print(ans)\r\n print(b[:,ans].diag().reshape(-1,1))\r\n print(b)\r\n print(b.topk(k=2))\r\n c=torch.nn.functional.softmax(b,dim=1)\r\n print(c)\r\n print(c.topk(k=2))\r\n"
},
{
"alpha_fraction": 0.6220302581787109,
"alphanum_fraction": 0.6537076830863953,
"avg_line_length": 24.711538314819336,
"blob_id": "ed0b48faf67b43e6af929f4c6d75d76b96a24662",
"content_id": "b2b7fe19dcf557d093b99e15334df92dcc99c860",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1389,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 52,
"path": "/read.py",
"repo_name": "minsoo-jeong/ScenePlace",
"src_encoding": "UTF-8",
"text": "from torchvision.datasets import ImageFolder\r\nfrom torchvision.transforms import transforms as trn\r\n\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.autograd import variable as V\r\n\r\nfrom torchvision import models\r\nfrom collections import OrderedDict\r\nimport csv\r\nimport torch\r\nimport shutil\r\n\r\nimport nets\r\n\r\nct = 'categories.txt'\r\nct_f = open(ct, 'r')\r\na = ct_f.readlines()\r\na = list(map(lambda x: x.rstrip().split(' ')[1], a))\r\nprint(a)\r\ngt = 'Miss_hammurabi_E09.csv'\r\nf = open(gt, 'r')\r\nrdr = csv.reader(f)\r\nscene_cls = [r[1] for r in rdr]\r\nprint(scene_cls)\r\n\r\ntest_trn = trn.Compose([trn.Resize(224),\r\n trn.ToTensor(),\r\n trn.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\r\ntest_folder = ImageFolder('Miss_Hammurabi_E09', transform=test_trn, target_transform=lambda x: int(scene_cls[x]))\r\ntest_loader = DataLoader(test_folder, batch_size=4, shuffle=False)\r\nprint(test_folder)\r\nim = iter(test_loader).__next__()\r\nprint(im[1])\r\nprint(a[10])\r\n\r\ncriterion = torch.nn.CrossEntropyLoss()\r\n\r\nmodel = nets.Resnet50(num_classes=47)\r\nmodel = model.cuda()\r\nout = model(V(im[0].cuda()))\r\n\r\nout_prob=torch.nn.functional.softmax(out.data)\r\nprint(out_prob)\r\nprint(out.size())\r\nscore = criterion(out, im[1].cuda())\r\nprint(out)\r\nprint(score)\r\n'''\r\nwith torch.no_grad():\r\n for i in test_loader: \r\n print(i[1])\r\n'''\r\n"
},
{
"alpha_fraction": 0.6370656490325928,
"alphanum_fraction": 0.6703668236732483,
"avg_line_length": 30.393939971923828,
"blob_id": "7e164d7389c80b9a463eada4d857fa0b80555e11",
"content_id": "1475dd9be42c723bfdfac1e3b9e9bdaadf7331f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2072,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 66,
"path": "/valid_video.py",
"repo_name": "minsoo-jeong/ScenePlace",
"src_encoding": "UTF-8",
"text": "from torchvision.transforms import transforms as trn\nfrom torchvision.datasets.folder import ImageFolder\nfrom torch.utils.data import DataLoader\n\nfrom datasets import ListFromTxt, SceneImageFolder\nfrom utils.utils import init_logger\nfrom stage import validate, validate_video_csv\nimport nets\n\nimport torch\nimport csv\nimport os\n\nGENRE = 'drama'\nVIDEO = 'Miss_Hammurabi_E09'\n\n\nVIDEO_VALID_FILE = 'data/{}/{}.txt'.format(GENRE, VIDEO)\nVIDEO_ROOT = '/data/korea/{}/{}'.format(GENRE, VIDEO)\nSCENE_TO_CLASS_CSV = VIDEO_ROOT + '.csv'\nMODEL_CKPT = 'ckpts/resnet50_ep8-latest.pth.tar'\n\n\nPRINT_FREQ = 1\nSAVE = 'validation-video-resnet50-rmac'\n\nPLACE47_VALID_FILE = 'data/place47_valid.txt'\nPLACE47_VALID_ROOT = '/data/place/val_256'\n\nCATEGORY = 'data/place47_category.txt'\nf = open(CATEGORY, 'rt', encoding='utf-8')\nCATEGORY = f.read().split('\\n')\nf.close()\n\nOUT_CSV = 'out/{}.csv'.format(VIDEO)\n\n\ndef main():\n init_logger('logs/{}.txt'.format(SAVE))\n\n\n model = nets.Resnet50(47).cuda()\n\n model = torch.nn.DataParallel(model)\n model.load_state_dict(torch.load(MODEL_CKPT)['state_dict'])\n\n normalize = trn.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n video_trn = trn.Compose([\n trn.CenterCrop(224),\n trn.ToTensor(),\n normalize\n ])\n valid_loader = DataLoader(ListFromTxt(PLACE47_VALID_FILE, PLACE47_VALID_ROOT, video_trn)\n , batch_size=64, shuffle=False, num_workers=4, pin_memory=True)\n # video_loader = DataLoader(SceneImageFolder(VIDEO_ROOT, SCENE_TO_CLASS_CSV, video_trn)\n # , batch_size=512, shuffle=False, num_workers=4, pin_memory=True)\n video_loader = DataLoader(ListFromTxt(VIDEO_VALID_FILE, '', video_trn)\n , batch_size=64, shuffle=False, num_workers=4, pin_memory=True)\n criterion = torch.nn.CrossEntropyLoss().cuda()\n\n # prec1 = validate(valid_loader, model, criterion, 30)\n prec1 = validate_video_csv(video_loader, model, criterion, CATEGORY, OUT_CSV, PRINT_FREQ)\n\n\nif __name__ == '__main__':\n main()\n"
}
] | 10 |
aalroas/StajProject | https://github.com/aalroas/StajProject | a20d6814bb7821ff770afb5a6dff9832f456057b | f079a61df2143a95f3222429ee18d3a86d279013 | 41df20b617ed41921a0900ee23ecd6683fc4e01e | refs/heads/master | 2023-06-10T10:34:06.514657 | 2021-07-08T18:35:40 | 2021-07-08T18:35:40 | 384,213,253 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5416409373283386,
"alphanum_fraction": 0.5570635199546814,
"avg_line_length": 35.84090805053711,
"blob_id": "0e3c60f1397c9123e2137b9ac301c5127c866577",
"content_id": "d74915d98020600324e8dc0f979802058e1350c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1621,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 44,
"path": "/staj/migrations/0001_initial.py",
"repo_name": "aalroas/StajProject",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.2.5 on 2021-07-08 15:51\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport staj.models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Course',\n fields=[\n ('id', models.CharField(max_length=50, primary_key='True', serialize=False)),\n ('name', models.CharField(max_length=50)),\n ('date', models.DateField(blank=True)),\n ('time', models.TimeField(blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='Student',\n fields=[\n ('id', models.CharField(max_length=100, primary_key='True', serialize=False)),\n ('name', models.CharField(max_length=200)),\n ('image', models.ImageField(upload_to=staj.models.Student.user_directory_path)),\n ],\n ),\n migrations.CreateModel(\n name='Attendance',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date', models.DateField(blank=True)),\n ('time', models.TimeField(blank=True)),\n ('status', models.BooleanField(default='False')),\n ('course_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='staj.course')),\n ('student_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='staj.student')),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.4726867377758026,
"alphanum_fraction": 0.5183946490287781,
"avg_line_length": 32.5,
"blob_id": "1d0a2b54b62e51e1cb020d5ba0064ed6ad26992a",
"content_id": "22c141b04ac93f60391601fb48c2c79f13896213",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 897,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 26,
"path": "/datasetCreator.py",
"repo_name": "aalroas/StajProject",
"src_encoding": "UTF-8",
"text": "import cv2\r\nimport numpy as np\r\nimport os\r\nfaceDetect= cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\ncap = cv2.VideoCapture('http://192.168.1.20:8080/video')\r\n\r\nid= input('Enter user id ')\r\nName=input('Enter student\\'s name:')\r\nsampleNum=0\r\nwhile(True):\r\n ret, img = cap.read()\r\n #rgb_image = img[:, :, ::-1]\r\n gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n faces=faceDetect.detectMultiScale(gray,1.3,5)\r\n \r\n for(x,y,w,h) in faces:\r\n yol=os.mkdir('students/'+ id)\r\n cv2.imwrite(yol+str(Name+'.'+'.png')+'/')\r\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)\r\n cv2.waitKey(100); \r\n cv2.imshow('face',img)\r\n cv2.waitKey(1)\r\n if(sampleNum>0):\r\n break\r\ncam.release()\r\ncv2.destroyAllWindows()\r\n"
},
{
"alpha_fraction": 0.544095516204834,
"alphanum_fraction": 0.5627016425132751,
"avg_line_length": 43.28095245361328,
"blob_id": "1ffc2d50a219a2be4bffc9e0e245de594de874c0",
"content_id": "bad02a9c439787f9f0a189fc11ae86d43e12d805",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9386,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 210,
"path": "/realtime.py",
"repo_name": "aalroas/StajProject",
"src_encoding": "UTF-8",
"text": "import cv2\nimport math\nfrom sklearn import neighbors\nimport os\nimport os.path\nimport pickle\nfrom PIL import Image, ImageDraw\nimport face_recognition\nfrom face_recognition.face_recognition_cli import image_files_in_folder\nimport numpy as np\nimport glob\nimport sqlite3\nfrom sqlite3 import Error\nfrom datetime import datetime,timedelta\nimport time\n#Veri Tabanı bağlantısı\nconn = sqlite3.connect(r\"db.sqlite3\")\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'JPG'}\n\n#Yüzleri öğretme \ndef train(train_dir, model_save_path=None, n_neighbors=None, knn_algo='ball_tree', verbose=False):\n X = []\n y = []\n #yüzlerin bulunduğu klasörü alıyor\n for class_dir in os.listdir(train_dir):\n if not os.path.isdir(os.path.join(train_dir, class_dir)):\n continue\n #klösördeki öğrenci yüzleri için yüz belirleme işlemi gerçekleştiriliyor.\n for img_path in image_files_in_folder(os.path.join(train_dir, class_dir)):\n image = face_recognition.load_image_file(img_path)\n face_bounding_boxes = face_recognition.face_locations(image)\n\n #resimde birden fazla yüz olup olmadığı kontrol ediliyor.\n if len(face_bounding_boxes) != 1:\n if verbose:\n print(\"Image {} not suitable for training: {}\".format(img_path, \"Didn't find a face\" if len(face_bounding_boxes) < 1 else \"Found more than one face\"))\n else:\n X.append(face_recognition.face_encodings(image, known_face_locations=face_bounding_boxes)[0])\n y.append(class_dir)\n\n # KNN sınıflandırıcısında ağırlık olarak kaç komşu kullanılacak\n if n_neighbors is None:\n n_neighbors = int(round(math.sqrt(len(X))))\n if verbose:\n print(\"Chose n_neighbors automatically:\", n_neighbors)\n\n # KNN sınıflandırıcı ile öğretme işlemi yapılıyor.\n knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance')\n knn_clf.fit(X, y)\n\n #DOSYA YAZDIRIYOR\n if model_save_path is not None:\n with open(model_save_path, 'wb') as f:\n pickle.dump(knn_clf, f)\n return knn_clf\n\n\n#Tanıma yani tahmin işlemini gerçekleştiriyor.\ndef predict(X_frame, knn_clf=None, model_path=None, distance_threshold=0.5):\n if knn_clf is None and model_path is None:\n raise Exception(\"Must supply knn classifier either thourgh knn_clf or model_path\")\n\n # Öğrenilmiş model yüklemesi yapılıyor.\n if knn_clf is None:\n with open(model_path, 'rb') as f:\n knn_clf = pickle.load(f)\n \n\n #resimdeki yüzlerin lokasyonları belirleniyor.\n X_face_locations = face_recognition.face_locations(X_frame) \n\n if len(X_face_locations) == 0:\n return []\n\n #bilinen yüzlerle karşılaştırma \n faces_encodings = face_recognition.face_encodings(X_frame, known_face_locations=X_face_locations)\n\n # KNN ile en iyi eşleşmeyi belirliyor.\n closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)\n are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]\n\n #Belirlenen yüzleri tanınan ve tanınmayan olarak döndürüyor.\n return [(pred, loc) if rec else (\"unknown\", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]\n\n\ndef student_count(knn_clf=None, model_path=None, distance_threshold=0.5):\n if knn_clf is None and model_path is None:\n raise Exception(\"Must supply knn classifier either thourgh knn_clf or model_path\")\n\n if knn_clf is None:\n with open(model_path, 'rb') as f:\n knn_clf = pickle.load(f)\n\n closest_distances, indices_count = knn_clf.kneighbors(n_neighbors=1)\n return indices_count\n\n\ndef show_prediction_labels_on_image(frame, predictions , course_name, course_id, nowـdate, nowـtime):\n\n pil_image = Image.fromarray(frame)\n draw = ImageDraw.Draw(pil_image)\n for name, (top, right, bottom, left) in predictions:\n top *= 2\n right *= 2\n bottom *= 2\n left *= 2\n\n name = name.encode(\"UTF-8\")\n student_no = str(name, \"utf-8\")\n \n if student_no == \"unknown\": \n \n draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))\n text_width, text_height = draw.textsize(name)\n draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))\n display_text = \"Sorry We Cant recognize you please contact ogr for updating your new Photo \"\n draw.text((left + 6, bottom - text_height - 5), display_text, fill=(255, 255, 255, 255))\n else:\n cur.execute(\"SELECT status FROM staj_attendance WHERE student_id_id=? AND course_id_id=?\", (student_no,course_id,))\n rows = cur.fetchall()\n if len(rows)==0:\n \n draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 0))\n text_width, text_height = draw.textsize(name)\n draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 255, 0), outline=(0, 255, 0))\n display_text = \"ID:\" + student_no + \"Course:\" + course_name + \"iyi Dersler\"\n draw.text((left + 6, bottom - text_height - 15), display_text, fill=(0, 0, 0))\n #veri tabanına yazılıyor.\n sqlite_insert_query = \"\"\"INSERT INTO staj_attendance (date, time, status, course_id_id, student_id_id) VALUES (?, ?, ?, ?, ?);\"\"\"\n data_tuple = (nowـdate, nowـtime, 1, course_id, student_no)\n cur.execute(sqlite_insert_query, data_tuple)\n conn.commit()\n \n print(\"Python Variables inserted successfully\")\n \n else: \n \n draw.rectangle(((left, top), (right, bottom)), outline=(0, 255, 0))\n text_width, text_height = draw.textsize(name)\n draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 255, 0), outline=(0, 255, 0))\n display_text = \"ID :\" + student_no + \"Course:\" + course_name + \"its done\"\n draw.text((left + 6, bottom - text_height - 10), display_text, fill=(0, 0, 0))\n \n del draw\n opencvimage = np.array(pil_image)\n return opencvimage\n\n \nif __name__ == \"__main__\":\n \n while '1' == '1':\n now = datetime.now()\n last_student_count = len(student_count(model_path=\"students.clf\"))\n folder_num = glob.glob(\"students/*\")\n len(folder_num)\n print(\"Number of subfolders in students are:\"+str(len(folder_num)))\n if str(last_student_count) < str(folder_num):\n if now.strftime(\"%H:%M:%S\") == str('13:35:00') :\n # if sub folder count in students folder == students.clf count + \"unknown\"\n print(\"Training KNN classifier...\")\n classifier = train(\"students\", model_save_path=\"students.clf\", n_neighbors=2)\n print(\"Training complete!\")\n cur = conn.cursor()\n now = datetime.now()\n nowـdate = now.strftime(\"%Y-%m-%d\")\n nowـtime = now.strftime(\"%H:%M:%S\")\n print(nowـdate)\n print(nowـtime)\n \n cur.execute(\"SELECT * FROM staj_course WHERE date=? AND time=?\", (nowـdate,nowـtime,))\n rows = cur.fetchall()\n \n if len(rows)==0:\n print('no class in this time to take attendance')\n else: \n for row in rows:\n course_id = row[0]\n course_name = row[1]\n course_date = row[2]\n course_time = row[3]\n \n # process one frame in every 30 frames for speed\n process_this_frame = 29\n print('Setting cameras up...')\n\n url = 'http://192.168.1.20:8080/video'\n cap = cv2.VideoCapture(0)\n \n \n while 1 > 0:\n ret, frame = cap.read()\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(frame,'ERCIYES UNIVERSITESI', (50, 50), font, 1,(0, 255, 255),2,cv2.LINE_4)\n \n if ret:\n \n img = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)\n process_this_frame = process_this_frame + 1\n if process_this_frame % 30 == 0:\n predictions = predict(img, model_path=\"students.clf\")\n frame = show_prediction_labels_on_image(frame, predictions , course_name, course_id, nowـdate, nowـtime)\n cv2.imshow('camera', frame)\n if ord('q') == cv2.waitKey(10):\n cap.release()\n cv2.destroyAllWindows()\n exit(0)\n time.sleep(1)\n\n\n # gelemen ogrenciler 30 de icerinndeki gelmeyenn ogrenciler attanced ekle ve satus 0 olarak eklenmeli"
},
{
"alpha_fraction": 0.6626728177070618,
"alphanum_fraction": 0.6672810912132263,
"avg_line_length": 33.967742919921875,
"blob_id": "be577fac63de3dc5260aa8ce070c522ef413ca13",
"content_id": "b4ea54e2555ffe63ba56b6d6c979d66b96e15887",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1085,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 31,
"path": "/staj/admin.py",
"repo_name": "aalroas/StajProject",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n \nfrom .models import Student, Course, Attendance\nfrom django.utils.html import format_html\n# Register your models here.\n \nclass StudentAdmin(admin.ModelAdmin):\n list_display = ('image_tag','id', 'name')\n search_fields = ('name', 'id')\n ordering = ['id']\n def image_tag(self,obj):\n return format_html('<img src=\"{0}\" style=\"width: 45px; height:45px;\" />'.format(obj.image.url))\n\nclass CourseAdmin(admin.ModelAdmin):\n list_display = ('id', 'name','time','date')\n search_fields = ('id', 'name')\n ordering = ['id']\n\nclass AttendanceAdmin(admin.ModelAdmin):\n list_display = ('student','course','status','date','time')\n list_filter = ('status',)\n def student(self, obj):\n if obj.student_id_id:\n return Student.objects.get(id=obj.student_id_id).name\n def course(self, obj):\n if obj.course_id_id:\n return Course.objects.get(id=obj.course_id_id).name\n\nadmin.site.register(Student, StudentAdmin)\nadmin.site.register(Course, CourseAdmin)\nadmin.site.register(Attendance, AttendanceAdmin)\n "
},
{
"alpha_fraction": 0.7022256255149841,
"alphanum_fraction": 0.7122026085853577,
"avg_line_length": 42.400001525878906,
"blob_id": "d2f52ecf32da6c471ea291c2c8be0853d31fd5ca",
"content_id": "ce2ffd19bf377ec7ed908bd2ebed82cf02b1b683",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1303,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 30,
"path": "/staj/models.py",
"repo_name": "aalroas/StajProject",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\n \nclass Course(models.Model):\n id = models.CharField(primary_key='True', max_length=50)\n name = models.CharField(max_length=50)\n date = models.DateField(blank=True)\n time = models.TimeField(blank=True)\n def __str__(self):\n return self.name\n# sorgulama = selcet id from courses where date = \"biglisayardaki_tarih\" and time = \"biilgisayrdki_simmdi_timme\"\n# sorgulama loop ogreneri liste scrip facce ,ogrenci cam onunndde gelirse - o ogrenci listesi bakacak eger tandik ise \n# course id , student id , date ve tarih, 1 sataus olarak boyle sql db e yazduracak.\n \nclass Student(models.Model):\n id = models.CharField(primary_key='True', max_length=100)\n name = models.CharField(max_length=200)\n def user_directory_path(instance, filename):\n return 'students/{0}/{1}'.format(instance.id,filename)\n image = models.ImageField(upload_to=user_directory_path)\n def __str__(self):\n return self.name\n\nclass Attendance(models.Model):\n course_id = models.ForeignKey(Course, on_delete=models.CASCADE)\n student_id = models.ForeignKey(Student, on_delete=models.CASCADE)\n date = models.DateField(blank=True)\n time = models.TimeField(blank=True)\n status = models.BooleanField(default='False')\n\n"
}
] | 5 |
Neburam/DoItAgain | https://github.com/Neburam/DoItAgain | 2a3a3d936c2a71bae1800bd76b7eb064b678b214 | ab5aa7a5bb0098c1381088609316c9285d5da5b6 | 3dbd34e6848d6ef173c7afee7c9efa25a57849ba | refs/heads/main | 2023-01-11T16:44:36.676656 | 2020-11-13T17:33:36 | 2020-11-13T17:33:36 | 312,641,768 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.612500011920929,
"alphanum_fraction": 0.6196428537368774,
"avg_line_length": 23.39130401611328,
"blob_id": "a16710ba5e744c491e9f9b900afe203cae486580",
"content_id": "7478eaf5e44f3f5b75f23eeb3e54c98cf768a104",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 560,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 23,
"path": "/solution.py",
"repo_name": "Neburam/DoItAgain",
"src_encoding": "UTF-8",
"text": "import zipfile\nfrom os import getcwd, listdir, chdir\nimport shutil\nName=\"\"\nOrigin=getcwd()+\"/\"\nwhile True:\n listing=listdir()\n print(listing)\n f=\"\"\n for f in listing:\n if 'zip' in f.split('.'):break\n if f==\"\":break\n d=f.split('.')[0]\n try:\n with zipfile.ZipFile(f,'r') as zip_ref:\n zip_ref.extractall()\n except: break\n if d !=\"unzipMe200\":chdir(d)\n else :break\nwith zipfile.ZipFile(\"FinalWork.zip\",'r') as zip_ref:\n zip_ref.extractall()\n\nshutil.move(\"Congratulations.pdf\", Origin+\"Congratulations.pdf\")"
}
] | 1 |
gautamgiri-dev/drone_detection | https://github.com/gautamgiri-dev/drone_detection | f3701ba041dbf3e940c1106513113b12dcc061be | 199fa141a2f751f77087c172abb217e9e5db7d5a | 95b4a3651f5aa10fa77813f0e4cc805c748c7006 | refs/heads/main | 2023-06-24T12:10:03.849567 | 2021-07-28T03:12:31 | 2021-07-28T03:12:31 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8382353186607361,
"alphanum_fraction": 0.8382353186607361,
"avg_line_length": 33,
"blob_id": "6dce6e7632a99487eae223121253f4dfe24fdc4d",
"content_id": "a47aaf2dae6ce51a2838ec6303081f6aa5aae507",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 68,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 2,
"path": "/README.md",
"repo_name": "gautamgiri-dev/drone_detection",
"src_encoding": "UTF-8",
"text": "# drone_detection\nDrone Detection Program with Python Deep Learning\n"
},
{
"alpha_fraction": 0.6612359285354614,
"alphanum_fraction": 0.6764044761657715,
"avg_line_length": 29.169490814208984,
"blob_id": "f9765835bf566ff35c84397954d71d9d9691907b",
"content_id": "e4d37661aa334c8c46d0cd94c8169970ad2733e7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1780,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 59,
"path": "/app.py",
"repo_name": "gautamgiri-dev/drone_detection",
"src_encoding": "UTF-8",
"text": "import cv2\nfrom flask import Flask, render_template, request, jsonify, make_response\nfrom waitress import serve\nfrom werkzeug.utils import secure_filename\nimport os\nfrom tensorflow.keras.models import load_model\nfrom uuid import uuid4\nimport numpy as np\n\nTEMPLATE_PATH = os.path.join(os.getcwd(), 'templates')\nMODEL_PATH = os.path.join(os.getcwd(), 'model', 'model.h5')\nUPLOAD_PATH = os.path.join(os.getcwd(), 'static', 'uploads')\nRESULTS_PATH = os.path.join(os.getcwd(), 'results')\nREQUIRED_PATHS = [UPLOAD_PATH]\nfor path in REQUIRED_PATHS:\n os.makedirs(path, exist_ok=True)\n\nmodel = load_model(MODEL_PATH)\n\napp = Flask(__name__)\napp.config['UPLOAD_PATH'] = UPLOAD_PATH\n\n# Home Route\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n# API Endpoint\[email protected]('/api/detect', methods=['POST'])\ndef detect():\n if request.method == 'POST':\n file = request.files['file']\n filename = secure_filename(file.filename)\n FILENAME = uuid4().hex\n filename = os.path.join(app.config['UPLOAD_PATH'], FILENAME)\n file.save(filename)\n boxes, confidences = predict(filename)\n return make_response(jsonify({'image': FILENAME, 'boxes': boxes, 'confidences': confidences}), 200)\n else:\n return 'Method not allowed', 400\n\n# Util Function\ndef predict(image, target_size=(300,300)):\n # preprocessing\n image = cv2.imread(image, 1)\n image = cv2.resize(image, target_size)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = image.astype(\"float\") / 255.0\n image = np.expand_dims(image, axis=0)\n \n # prediction and post processing\n results = model.predict(image)\n boxes, confidences = results\n\n return boxes.tolist(), confidences.tolist()\n\n\nif __name__ == '__main__':\n serve(app)\n"
}
] | 2 |
nbrick/qudot | https://github.com/nbrick/qudot | cc07a3d00d2709ab9ff4ee7a6926ad841ff9415d | f0095c17536274701272cfe1578cddfb8a3d3c2c | 518722038e5c4cd531300e7e8573a688b4ac52e8 | refs/heads/master | 2021-01-15T20:48:08.449507 | 2015-06-16T00:18:33 | 2015-06-16T00:18:33 | 32,690,928 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5153915882110596,
"alphanum_fraction": 0.5335747599601746,
"avg_line_length": 32.0523681640625,
"blob_id": "336dec8e087ef815480e09505e7e82d98778f6d1",
"content_id": "c088ccefbaa70fdca4d24f6287fa014e26039adb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 13326,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 401,
"path": "/main.cpp",
"repo_name": "nbrick/qudot",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <cmath>\n#include <limits>\n#include <vector>\n#include <fstream>\n\n\n/* PHYSICAL CONSTANTS */\n\nconstexpr double e = 1.60217657e-19; // Coulombs\nconstexpr double k_B = 1.3806488e-23; // Joules per Kelvin\n\n\n/* USER CONFIGURATION */\n\n// Note: If the simulation halts, try reducing the Runge-Kutta evolution step\n// size, listed below. A starting point is 1e-1 but for complex systems this\n// may need to be reduced as far as 1e-2, or perhaps further. Larger step size\n// means a faster simulation.\n\n/* Semiclassical simulation properties */\nconstexpr double h = 0.3e-1; // Runge-Kutta evolution step size\nconstexpr double convergence_criterion = 1e-5;\n\n/* Temperature */\nconstexpr double temp = 4.2; // Kelvin\n\n/* Dot orbital energies */\nconstexpr double dot_lumo = -5.1*e; // Joules\nconstexpr double single_electron_energies[] = {\n dot_lumo + 0.000*e, dot_lumo + 0.000*e,\n dot_lumo + 0.231*e, dot_lumo + 0.231*e,\n dot_lumo + 0.231*e, dot_lumo + 0.231*e,\n dot_lumo + 0.479*e, dot_lumo + 0.479*e,\n\n}; // Joules\n\n/* Electronic properties of leads (s: source; d: drain) */\nconstexpr double d_fermi_energy = -5.1*e; // Joules\ndouble source_dos (double energy) {\n // Density of states can be an arbitrary function of energy.\n if (energy > 0.0) {} // This suppresses an \"unused variable\" g++ warning.\n return 1;\n}\n\nconstexpr double s_fermi_energy = -5.1*e; // Joules\ndouble drain_dos (double energy) {\n if (energy > 0.0) {}\n return 1;\n}\n\n/* Tunnel widths (by dot level) in arbitrary units */\nconstexpr double source_widths[] {\n 3.0, 3.0,\n 6.0, 6.0,\n 6.0, 6.0,\n 12.0, 12.0,\n};\n\nconstexpr double drain_widths[] {\n 0.5, 0.5,\n 1.0, 1.0,\n 1.0, 1.0,\n 2.0, 2.0,\n};\n\n/* Dot-system capacitances */\nconstexpr double gate_capacitance = 1e-20; // Farads\nconstexpr double source_capacitance = 1.6e-18; // Farads\nconstexpr double drain_capacitance = 1.6e-18; // Farads\nconstexpr double extra_capacitance = 0.1e-18; // Farads\n\n/* Voltage-space to be explored */\nconstexpr double v_g_min = -50; // Volts\nconstexpr double v_g_max = 200; // Volts\nconstexpr int v_g_steps = 200; // (y axis resolution)\n\nconstexpr double v_sd_min = -1.1; // Volts\nconstexpr double v_sd_max = 1.1; // Volts\nconstexpr int v_sd_steps = 200; // (x axis resolution)\n\n/* (end of user configuration) */\n\n\n/* COMPILE-TIME CALCULATIONS */\n\nconstexpr int n_levels = sizeof(single_electron_energies)\n /sizeof(single_electron_energies[0]);\n\nconstexpr int n_source_rates = sizeof(source_widths)\n /sizeof(source_widths[0]);\n\nconstexpr int n_drain_rates = sizeof(drain_widths)\n /sizeof(drain_widths[0]);\n\nstatic_assert(n_levels == n_source_rates && n_levels == n_drain_rates,\n \"Wrong number of tunnelling rates inputted by user.\");\n\nconstexpr double total_capacitance = gate_capacitance + source_capacitance\n + drain_capacitance + extra_capacitance;\n\n\n/* TYPES */\n\ntypedef struct {\n double gate;\n double sd; // Source-drain\n} v_pair;\n\ntypedef unsigned long cfg;\nconstexpr cfg n_configs = 1 << n_levels; // 2^n_levels\n\ntypedef struct {\n cfg to; // Row\n cfg from; // Column\n double value;\n} matrix_elem;\n\ntypedef double mu_spectrum[n_levels];\n\n\n/* BINARY REPRESENTATION OF CONFIGURATIONS */\n\nbool occupied (cfg config, int level) {\n return (config >> level) & 1;\n}\n\nint sum_occupation (cfg config) {\n int sum = 0;\n for (int level = 0; level < n_levels; ++level)\n sum += (int)occupied(config, level);\n return sum;\n}\n\ncfg flipped_occupation (cfg config, int level) {\n return config ^ (1 << level);\n}\n\n\n/* THE PHYSICS */\n\n/* Thermodynamics */\n\ndouble fermi (double energy, double chem_pot) {\n double exponent = (energy - chem_pot)/(k_B*temp);\n if (exponent > log(std::numeric_limits<double>::max()))\n return 0.0;\n else if (exponent < log(std::numeric_limits<double>::min()))\n return 1.0;\n else\n return 1.0/(exp(exponent) + 1);\n}\n\n/* Energy shifts in leads owing to applied bias */\n\nconstexpr double s_shift (double energy, double v_sd) {\n return energy - e*v_sd/2.0;\n}\n\nconstexpr double d_shift (double energy, double v_sd) {\n return energy + e*v_sd/2.0;\n}\n\n/* Tunnel rates */\n\ndouble in_from_source (double mu, double v_sd, int level) {\n return source_widths[level]\n * source_dos(mu - s_shift(0.0, v_sd))\n * fermi(mu, s_shift(s_fermi_energy, v_sd));\n}\n\ndouble in_from_drain (double mu, double v_sd, int level) {\n return drain_widths[level]\n * drain_dos(mu - d_shift(0.0, v_sd))\n * fermi(mu, d_shift(d_fermi_energy, v_sd));\n}\n\ndouble out_to_source (double mu, double v_sd, int level) {\n return source_widths[level]\n * source_dos(mu - s_shift(0.0, v_sd))\n * (1 - fermi(mu, s_shift(s_fermi_energy, v_sd)));\n}\n\ndouble out_to_drain (double mu, double v_sd, int level) {\n return drain_widths[level]\n * drain_dos(mu - d_shift(0.0, v_sd))\n * (1 - fermi(mu, d_shift(d_fermi_energy, v_sd)));\n}\n\ndouble current_through_level (double mu, bool occupied, double v_sd, int level)\n{\n if (occupied)\n return -e*(out_to_drain(mu, v_sd, level)\n - out_to_source(mu, v_sd, level));\n else\n return -e*(in_from_source(mu, v_sd, level)\n - in_from_drain(mu, v_sd, level));\n}\n\n/* Charging energy (constant interaction) */\n\ndouble chemical_potential (bool occupied, double single_electron_energy,\n int n_electrons_on_dot, v_pair voltage) {\n\n return single_electron_energy\n + ((e/total_capacitance)\n *((n_electrons_on_dot + (occupied ? -1 : 1)*0.5)*e\n - gate_capacitance*voltage.gate\n - (source_capacitance - drain_capacitance)*voltage.sd/2.0));\n}\n\n/* Rate matrix elements (Beenakker rate equations) */\n\nmatrix_elem diag (cfg config, mu_spectrum spectrum, double v_sd) {\n double value = 0.0;\n for (int level = 0; level < n_levels; ++level) {\n auto mu = spectrum[level];\n if (occupied(config, level))\n value -= (out_to_source(mu, v_sd, level)\n + out_to_drain(mu, v_sd, level));\n else\n value -= (in_from_source(mu, v_sd, level)\n + in_from_drain(mu, v_sd, level));\n }\n return { config, config, value };\n}\n\nmatrix_elem offdiag (cfg to, cfg from, int level, double mu, double v_sd) {\n double value;\n if (occupied(from, level))\n value = out_to_source(mu, v_sd, level)\n + out_to_drain(mu, v_sd, level);\n else\n value = in_from_source(mu, v_sd, level)\n + in_from_drain(mu, v_sd, level);\n return { to, from, value };\n}\n\n\n/* MISCELLANEOUS HELPER */\n\nv_pair voltage_pair_from_index (int index) {\n /* \n * This function is defined such that we iterate through voltage-space\n * like so:\n *\n * ┌┐┌┐┌┐┌┐┌finish\n * │││││││││\n * │││││││││\n * start┘└┘└┘└┘└┘\n *\n * where y: gate voltage; x: source-drain voltage. In this way, we ensure\n * our weights guesses will mostly be almost correct.\n */\n v_pair voltage;\n\n if ((int)floor((double)index/(double)v_g_steps) % 2 == 0)\n voltage.gate = v_g_min\n + ((index % v_g_steps)\n *(v_g_max - v_g_min)/(double)v_g_steps);\n else\n voltage.gate = v_g_max\n - (((index % v_g_steps) + 1)\n *(v_g_max - v_g_min)/(double)v_g_steps);\n\n voltage.sd = v_sd_min\n + (floor((double)index/(double)v_g_steps)\n *(v_sd_max - v_sd_min)/(double)(v_sd_steps));\n\n return voltage;\n}\n\n\n/* THE SIMULATION */\n\nint main () {\n\n std::ofstream outfile (\"output.csv\", std::ofstream::out);\n outfile << n_levels << \"\\n\"; // Needed for visualization later.\n\n /* 'Guess' that the dot is initially empty; w = { 1, 0, 0, ... }. */\n std::vector<double> guess (n_configs);\n guess[0] = 1;\n\n /* Iterate through points in voltage-space. */\n for (int voltage_index = 0;\n voltage_index < v_g_steps*v_sd_steps;\n ++voltage_index) {\n\n /* Choose a point in voltage-space. */\n auto voltage = voltage_pair_from_index(voltage_index);\n outfile << voltage.gate << \" ; \" << voltage.sd;\n std::cout << \"v_g:\" << voltage.gate << \" ; \"\n << \"v_sd:\" << voltage.sd;\n\n /* For each possible config, find all the chemical potentials. */\n std::vector<mu_spectrum> mu;\n mu.reserve(n_configs);\n for (cfg config = 0; config < n_configs; ++config) {\n for (int level = 0; level < n_levels; ++level) {\n mu[config][level] = chemical_potential(\n occupied(config, level), single_electron_energies[level],\n sum_occupation(config), voltage);\n }\n }\n\n /* Generate the rate matrix M. */\n // Use a std::vector because we might decide later to make more matrix\n // elements non-zero, e.g. for radiative decay.\n std::vector<matrix_elem> matrix;\n for (cfg to = 0; to < n_configs; ++to) {\n // TODO: Only store matrix elements greater than some tolerance.\n // For \"to\" on the following line only, read \"away\".\n matrix.push_back(diag(to, mu[to], voltage.sd));\n for (int level = 0; level < n_levels; ++level) {\n cfg from = flipped_occupation(to, level);\n matrix.push_back(\n offdiag(to, from, level, mu[from][level], voltage.sd));\n }\n }\n\n /* Iterate on dw/dt = Mw until steady state is found. */\n bool converged = false;\n int cycles = 0;\n while (!converged) {\n ++cycles;\n converged = true; // To be &&'d.\n /* Runge-Kutta (RK4) iteration happens here. */\n double k_1[n_configs] = { 0.0 };\n cfg elem = 0; // This is a summation variable.\n for (cfg config = 0; config < n_configs; ++config) {\n // We assume that matrix elements are ordered by value of\n // matrix[elem].to (as below).\n while (elem < matrix.size() && matrix[elem].to == config) {\n k_1[config] += matrix[elem].value*guess[matrix[elem].from];\n ++elem;\n }\n }\n double k_2[n_configs] = { 0.0 };\n elem = 0; // Reset summation variable.\n for (cfg config = 0; config < n_configs; ++config) {\n while (elem < matrix.size() && matrix[elem].to == config) {\n k_2[config] += matrix[elem].value\n * (guess[matrix[elem].from]\n + (k_1[matrix[elem].from] * h/2.0));\n ++elem;\n }\n }\n double k_3[n_configs] = { 0.0 };\n elem = 0;\n for (cfg config = 0; config < n_configs; ++config) {\n while (elem < matrix.size() && matrix[elem].to == config) {\n k_3[config] += matrix[elem].value\n * (guess[matrix[elem].from]\n + (k_2[matrix[elem].from] * h/2.0));\n ++elem;\n }\n }\n double k_4[n_configs] = { 0.0 };\n elem = 0;\n for (cfg config = 0; config < n_configs; ++config) {\n while (elem < matrix.size() && matrix[elem].to == config) {\n k_4[config] += matrix[elem].value\n * (guess[matrix[elem].from]\n + (k_3[matrix[elem].from] * h));\n ++elem;\n }\n }\n for (cfg config = 0; config < n_configs; ++config) {\n double increment = (k_1[config]\n + 2*k_2[config]\n + 2*k_3[config]\n + k_4[config])\n * h/6.0;\n guess[config] += increment;\n converged = converged && (increment < convergence_criterion);\n }\n }\n // Print the number of Runge-Kutta iterations required for convergence.\n std::cout << \" ; \" << cycles << \" cycles\\n\";\n\n /* Find weighted-average current and write it to file. */\n double current = 0.0;\n for (cfg config = 0; config < n_configs; ++config) {\n for (int level = 0; level < n_levels; ++level) {\n current += guess[config]*current_through_level(\n mu[config][level], occupied(config, level),\n voltage.sd, level);\n }\n }\n outfile << \" ; \" << current/e;\n\n /* Write weights vector to file for later viewing. */\n for (cfg config = 0; config < n_configs; ++config) {\n if (guess[config] > 1e-3)\n outfile << \" ; \"<< config << \" ; \" << guess[config];\n }\n \n outfile << \"\\n\";\n }\n}\n"
},
{
"alpha_fraction": 0.5412228107452393,
"alphanum_fraction": 0.5540759563446045,
"avg_line_length": 31.836502075195312,
"blob_id": "ba822373ba9aa8fc2dcd00d634f219fff6d22064",
"content_id": "01e2a9a772eb118bec22cdcce1d25818f965d8ad",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8636,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 263,
"path": "/ui.py",
"repo_name": "nbrick/qudot",
"src_encoding": "UTF-8",
"text": "from math import ceil, log2\nimport csv\n\nimport numpy as np\nimport matplotlib.pyplot as plot\nimport matplotlib.gridspec as gridspec\nimport matplotlib.cm as cm\nfrom matplotlib.widgets import Slider\n\nimport data_handling as dat\n\n\nwith open(\"output.csv\", newline=\"\") as csv_file:\n print(\"Processing CSV... (This might take a few seconds.)\")\n lines = csv.reader(csv_file, delimiter=\";\", quoting=csv.QUOTE_NONNUMERIC)\n n_levels = int(lines.__next__()[0])\n lines = sorted(sorted(lines, key=lambda x: x[1]), key=lambda x: x[0])\n v_sd_range = np.asarray(sorted(list(set([line[1] for line in lines]))))\n v_g_range = np.asarray(sorted(list(set([line[0] for line in lines]))))\n voltage_area = (v_sd_range, v_g_range)\n iw_list = []\n for line in lines:\n current = line[2]\n weights = []\n for index, configuration in enumerate(line[3::2]):\n weights.append((int(configuration), line[2*index + 4]))\n iw_list.append((current, weights))\n\n\n# Define some colors in the format required by matplotlib.\nred = np.array([[1, 0, 0]])\nblack = np.array([[0, 0, 0]])\nwhite = np.array([[1, 1, 1]])\n\nwhitespace = \"\".join([\"\\n\" for _ in range(100)])\n\n\ndef i_sd(v_g=0):\n gs = gridspec.GridSpec(1, 1)\n axes = plot.subplot(gs[0])\n data = dat.get_i_vs_v_sd(iw_list, v_g, voltage_area)\n axes.plot(v_sd_range, data)\n\n axes.set_xlim([v_sd_range[0], v_sd_range[-1]])\n\n axes.set_title(r\"at $V_\\mathrm{g} = \" + \"%.2f\" % v_g + \"\\ \\mathrm{V}$\")\n axes.set_ylabel(r\"$\\sigma / \\mathrm{arb.}$\")\n axes.set_xlabel(r\"$V_\\mathrm{sd} / \\mathrm{V}$\")\n\n plot.show(block=False)\n\n\ndef diff_cond(v_g=0):\n gs = gridspec.GridSpec(1, 1)\n axes = plot.subplot(gs[0])\n data = dat.get_diff_conductance_vs_v_sd(iw_list, v_g, voltage_area)\n axes.plot(v_sd_range, data)\n\n axes.set_xlim([v_sd_range[0], v_sd_range[-1]])\n\n axes.set_title(r\"at $V_\\mathrm{g} = \" + \"%.2f\" % v_g + \"\\ \\mathrm{V}$\")\n axes.set_ylabel(r\"$\\frac{\\mathrm{d}\\sigma} \"\n + r\"{\\mathrm{d}V_\\mathrm{sd}} / \\mathrm{arb.}$\")\n axes.set_xlabel(r\"$V_\\mathrm{sd} / \\mathrm{V}$\")\n\n plot.show(block=False)\n\n\ndef mean_occupation(v_g=0):\n gs = gridspec.GridSpec(1, 1)\n axes = plot.subplot(gs[0])\n data = dat.get_mean_occupation_vs_v_sd(iw_list, v_g, voltage_area)\n axes.plot(v_sd_range, data)\n\n axes.set_xlim([v_sd_range[0], v_sd_range[-1]])\n\n axes.set_title(r\"at $V_\\mathrm{g} = \" + \"%.2f\" % v_g + \"\\ \\mathrm{V}$\")\n axes.set_ylabel(r\"Mean dot occupation number\")\n axes.set_xlabel(r\"$V_\\mathrm{sd} / \\mathrm{V}$\")\n\n plot.show(block=False)\n\n\ndef heatmap(axes=None):\n if axes is None:\n show = True\n gs = gridspec.GridSpec(1, 1)\n heatmap_axes = plot.subplot(gs[0])\n else:\n show = False\n heatmap_axes = axes\n\n current_function, extent = dat.get_plottable_diff_conductance_in_v_space(\n iw_list, voltage_area)\n heatmap_ = heatmap_axes.imshow(current_function, extent=extent,\n interpolation=\"nearest\", aspect=\"auto\",\n cmap=cm.binary)\n\n heatmap_axes.set_xlim([v_sd_range[0], v_sd_range[-1]])\n heatmap_axes.set_ylim([v_g_range[0], v_g_range[-1]])\n\n heatmap_axes.set_xlabel(r\"$V_\\mathrm{sd}/\\mathrm{V}$\")\n heatmap_axes.set_ylabel(r\"$V_\\mathrm{g}/\\mathrm{V}$\")\n heatmap_axes.set_title(\n r\"$\\frac{\\partial I}{\\partial V_\\mathrm{sd}} /\\mathrm{arb.\\ units}$\",\n y=1.04)\n\n heatmap_axes.locator_params(axis=\"x\", nbins=5)\n\n plot.subplots_adjust(bottom=0.15)\n\n if show:\n plot.show(block=False)\n\n\ndef pretty_bin(number, max_number):\n width = ceil(log2(max_number))\n return \"\".join([\"*\" if dat.bit(number, index)\n else \"|\"\n for index in range(width)])\n\n\ndef ui(v_sd=0, v_g=0):\n \"\"\"Display a graphical user interface for data exploration.\"\"\"\n\n # Define the layout of the UI and add \"plots\" to UI regions.\n gs = gridspec.GridSpec(\n 6, 1,\n height_ratios=[10, 2, 10, 2, 1, 1])\n heatmap_axes = plot.subplot(gs[0])\n line_plot_axes = plot.subplot(gs[2])\n v_sd_slider_axes = plot.subplot(gs[4])\n v_g_slider_axes = plot.subplot(gs[5])\n\n # Slider UI elements\n # ------------------\n v_sd_slider = Slider(v_sd_slider_axes, r\"$V_\\mathrm{sd}/\\mathrm{V}$\",\n v_sd_range[0], v_sd_range[-1],\n valinit=v_sd)\n v_g_slider = Slider(\n v_g_slider_axes, r\"$V_\\mathrm{g}/\\mathrm{V}$\",\n v_g_range[0], v_g_range[-1],\n valinit=v_g)\n\n # Use an object to store interactive voltages.\n # This makes it simpler to keep track of slider changes persistently.\n class Voltages:\n sd = v_sd\n g = v_g\n\n v = Voltages()\n\n # Conductance heat map\n # --------------------\n heatmap(heatmap_axes)\n\n # We will add a small cross marking the current position in v_g-v_sd space\n # to the heatmap plot. To keep track of its position, we use the following\n # object.\n class PointAnnotation():\n point = heatmap_axes.scatter(v_sd, v_g, marker=\"+\")\n\n annotation = PointAnnotation()\n\n def replot_point_annotation(v_sd_, v_g_):\n annotation.point.remove()\n annotation.point = heatmap_axes.scatter(v_sd_, v_g_, marker=\"+\")\n\n # We also add lines annotating values of v_sd and v_g on the two large\n # plots. v_g_line is persistent and has its y_data updated in redraw().\n # There is no persistent equivalent v_sd_line, because the line plot is\n # completely erased and replotted on each redraw() call; we will make a\n # new axvline each time.\n line_plot_axes.axvline(x=v.sd)\n v_g_line = heatmap_axes.axhline(y=v.g)\n\n # Labels\n # ------\n def label_figures():\n line_plot_axes.set_xlabel(r\"$V_\\mathrm{sd}/\\mathrm{V}$\")\n line_plot_axes.set_ylabel(r\"Current $I / \\mathrm{arb.}$\")\n\n # Slider update actions\n # ---------------------\n def redraw():\n # Move annotations.\n replot_point_annotation(v.sd, v.g)\n v_g_line.set_ydata(v.g)\n\n # Retrieve from memory and plot current.\n i_vs_v_sd = dat.get_i_vs_v_sd(iw_list, v.g, voltage_area)\n line_plot_axes.clear()\n line_plot_axes.plot(v_sd_range,\n [current for current in i_vs_v_sd],\n \"black\")\n line_plot_axes.set_xlim([v_sd_range[0], v_sd_range[-1]])\n line_plot_axes.axvline(x=v.sd)\n\n current, weights = dat.get_iw_tuple(iw_list, v.sd, v.g, voltage_area)\n\n print(whitespace)\n\n v.sd, v.g = dat.get_voltage_pair_from_index(\n dat.get_index_from_voltage_pair(v.sd, v.g, voltage_area),\n voltage_area)\n\n occupancy_weights = np.zeros(n_levels)\n\n for configuration, weight in sorted([(configuration, weight)\n for configuration, weight in weights],\n key=lambda x: x[1], reverse=False):\n\n weight_bar = \"\".join([\"=\" if point < weight\n else \" \"\n for point\n in np.linspace(0, 1 - 1e-10, 40)])\n\n print(\" \" + pretty_bin(configuration, 2**n_levels),\n \"%.3f\" % weight, weight_bar)\n\n occupancy_weights[dat.sum_bits(configuration)] += weight\n\n print(\"\\nabove: configuration weights ; below: occupancy weights\\n\")\n\n for non_occupancy, weight in enumerate(occupancy_weights[::-1]):\n\n if (weight > 1e-3):\n\n weight_bar = \"\".join([\"=\" if point < weight\n else \" \"\n for point\n in np.linspace(0, 1 - 1e-10, 40)])\n\n print(\"\", n_levels - non_occupancy - 1, \":\",\n \"%.3f\" % weight, weight_bar)\n\n print(\"\\nv_g/V =\", \"%.3f\" % v.g, \"; v_sd/V =\", \"%.3f\" % v.sd)\n print(\"mean occupation =\", \"%.3f\" % dat.mean_occupation(weights))\n print(\"current =\", \"%.3f\" % (current), \"arb. units\")\n\n print(\"\\n>>> \", end=\"\")\n\n label_figures()\n\n def update_v_sd(v_sd_):\n v.sd = v_sd_\n redraw()\n\n def update_v_g(v_g_):\n v.g = v_g_\n redraw()\n\n # Set sliders to listen for clicks.\n v_sd_slider.on_changed(update_v_sd)\n v_g_slider.on_changed(update_v_g)\n\n # Draw with default v_g, v_sd values.\n redraw()\n\n # Launch the matplotlib window.\n plot.show(block=False)\n\nprint(\"Try for instance ui(), heatmap(), i_sd(0.1) or diff_cond(0.1).\")\n"
},
{
"alpha_fraction": 0.7558411359786987,
"alphanum_fraction": 0.764602780342102,
"avg_line_length": 34.66666793823242,
"blob_id": "d2a3eb2faf2ec09fc29f230806201b6a761aa3d2",
"content_id": "d6a1a6f42dc94f6547ee6aa332ef17cea617246e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1712,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 48,
"path": "/README.md",
"repo_name": "nbrick/qudot",
"src_encoding": "UTF-8",
"text": "# qudot\n\nThis program simulates current flowing through a nanocrystal on the single-\nelectron level, taking into account the capacitance of the nanocrystal and the\nsingle-particle energies of electrons in the nanocrystal. It is assumed that the\nnanocrystal has a discrete electron spectrum, i.e. that it constitutes a\nquantum dot.\n\nThe core program (`main.cpp`) implements rate equations similar to those in\n[a paper of C. W. J. Beenakker](http://dx.doi.org/10.1103/PhysRevB.44.1646).\nA custom data visualisation tool (based on `matplotlib`) is also included.\n\nFor more details, see\n[the project report](https://github.com/nbrick/qudot-doc/raw/master/report.pdf).\n\n## Requirements\n\n* GCC\n* Python 3\n* matplotlib\n\n### Installing the requirements on Debian-based Linuxes (including Ubuntu)\n\n```\nsudo apt-get install gcc python3 python3-matplotlib\n```\n\n### Installing elsewhere\n\nThis project was developed on Linux but there's no reason it couldn't be run on\nWindows, following changes to some of the boilerplate. Please message me at\n`nebricks`(whirlpool-symbol)`gmail.com` if you need help, or make a pull request\nif you get a Windows version working.\n\n## Usage\n\n* Set the desired simulation parameters in the `USER CONFIGURATION` section of\n `main.cpp`.\n* Execute `./run.sh`. This operation could take anywhere from under a second to\n several hours, depending on the parameters set in `main.cpp`. The results of\n the computation are stored in `output.csv` but are not meant to be\n human-readable.\n* Execute `./view.sh`. This launches a Python interpreter with the output data\n already loaded. To view the data graphically, type in `ui()` or `heatmap()`.\n\n## License\n\nThis project is open-sourced under the MIT License.\n"
},
{
"alpha_fraction": 0.5443037748336792,
"alphanum_fraction": 0.5822784900665283,
"avg_line_length": 25.33333396911621,
"blob_id": "c241f11ce376a51e13cf175961edea36e94846ec",
"content_id": "5cec2d3c848f428d1bd18d7e37a8d3b34536ff96",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 79,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 3,
"path": "/run.sh",
"repo_name": "nbrick/qudot",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\ng++ -std=c++11 -O3 -Werror -Wall -Wextra -o main main.cpp && ./main\n"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.6071428656578064,
"avg_line_length": 8.333333015441895,
"blob_id": "641f497bbae9fcab061dd3598c2c5b7b94b9057e",
"content_id": "9f8c573edac1b8e809b944b44eed00b2d17c1109",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 28,
"license_type": "permissive",
"max_line_length": 16,
"num_lines": 3,
"path": "/view.sh",
"repo_name": "nbrick/qudot",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\npython3 -i ui.py\n"
},
{
"alpha_fraction": 0.6310827136039734,
"alphanum_fraction": 0.6392943859100342,
"avg_line_length": 32.896907806396484,
"blob_id": "362c1421e2c6678c526b2ce5641ffc57ef7b302f",
"content_id": "f5b4dc40d05b910ed702c56ac4d875a5e9919a76",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3288,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 97,
"path": "/data_handling.py",
"repo_name": "nbrick/qudot",
"src_encoding": "UTF-8",
"text": "from math import floor, ceil, log2\n\nimport numpy as np\n\n\ndef bit(number, index):\n \"\"\"Return the indexth bit of number.\"\"\"\n return (number >> index) & 1\n\ndef sum_bits(number):\n \"\"\"Return the sum of digits in the binary representation of number.\"\"\"\n if number == 0:\n return 0\n sum_ = 0\n for i in range(ceil(log2(number)) + 1):\n sum_ += bit(number, i)\n return sum_\n\n\ndef get_voltage_pair_from_index(n, voltage_area):\n v_sd_range, v_g_range = voltage_area\n v_sd = v_sd_range[n % len(v_sd_range)]\n v_g = v_g_range[floor(n / len(v_sd_range))]\n return v_sd, v_g\n\n\ndef index_of_closest_element(ascending_list, datum):\n \"\"\"Return index of the list element whose value is closest to datum.\"\"\"\n old_delta = abs(ascending_list[0] - datum)\n for index, element in enumerate(ascending_list[1:]):\n delta = abs(element - datum)\n if delta > old_delta:\n return index\n old_delta = delta\n return len(ascending_list) - 1\n\n\ndef get_index_from_voltage_pair(v_sd, v_g, voltage_area):\n # It is assumed that v_sd_range, v_g_range are ascending.\n v_sd_range, v_g_range = voltage_area\n small_number = index_of_closest_element(v_sd_range, v_sd)\n big_number = index_of_closest_element(v_g_range, v_g)*len(v_sd_range)\n index = big_number + small_number\n return index\n\n\ndef get_iw_tuple(iw_list, v_sd, v_g, voltage_area):\n index = get_index_from_voltage_pair(v_sd, v_g, voltage_area)\n return iw_list[index]\n\n\ndef get_iw_vs_v_sd(iw_list, v_g, voltage_area):\n v_sd_range, _ = voltage_area\n start = get_index_from_voltage_pair(v_sd_range[0], v_g, voltage_area)\n end = get_index_from_voltage_pair(v_sd_range[-1], v_g, voltage_area) + 1\n return iw_list[start:end]\n\n\ndef get_i_vs_v_sd(iw_list, v_g, voltage_area):\n return np.asarray([iw_tuple[0] for iw_tuple\n in get_iw_vs_v_sd(iw_list, v_g, voltage_area)])\n\n\ndef get_diff_conductance_vs_v_sd(iw_tuple, v_g, voltage_area):\n return np.gradient(get_i_vs_v_sd(iw_tuple, v_g, voltage_area))\n\n\ndef get_i_vs_v_g(iw_list, v_sd, voltage_area):\n v_sd_range, v_g_range = voltage_area\n start = get_index_from_voltage_pair(v_sd, v_g_range[0], voltage_area)\n end = get_index_from_voltage_pair(v_sd, v_g_range[-1], voltage_area) + 1\n step = len(v_sd_range)\n return np.asarray([ie_tuple[0] for ie_tuple in iw_list[start:end:step]])\n\n\ndef get_diff_conductance_vs_v_g(iw_list, v_sd, voltage_area):\n return np.gradient(get_i_vs_v_g(iw_list, v_sd, voltage_area))\n\n\ndef get_plottable_diff_conductance_in_v_space(iw_list, voltage_area):\n # Informed by http://stackoverflow.com/questions/6323737/\n x, y = voltage_area\n # 0th element of ie_tuple is the current.\n z = np.asarray([get_diff_conductance_vs_v_sd(iw_list, v_g, voltage_area)\n for v_g in voltage_area[1]])\n nrows, ncols = len(y), len(x)\n grid = z.reshape((nrows, ncols))\n return grid, (x.min(), x.max(), y.max(), y.min())\n\n\ndef mean_occupation(weights):\n return sum(weight*sum_bits(config) for config, weight in weights)\n\n\ndef get_mean_occupation_vs_v_sd(iw_list, v_g, voltage_area):\n return np.asarray([mean_occupation(weights) for current, weights\n in get_iw_vs_v_sd(iw_list, v_g, voltage_area)])\n"
}
] | 6 |
sadunalpdag/ratan_lessons | https://github.com/sadunalpdag/ratan_lessons | 9adb47129dacca31eb0f22a1274049b1ea4e5de8 | 84c70dc938f6c027700680fb5671c2b7775f6301 | aa434b17dd1a1fe676f621bd0e5e765fc9f9c373 | refs/heads/master | 2020-05-18T09:08:33.559752 | 2019-05-01T13:07:58 | 2019-05-01T13:07:58 | 184,315,998 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.611940324306488,
"alphanum_fraction": 0.6567164063453674,
"avg_line_length": 15.75,
"blob_id": "2501ee34fe122b003fcc3fad5085024474068db5",
"content_id": "3173d4f339b60eb3d37ff83caca27742c694dff5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 134,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 8,
"path": "/4.py",
"repo_name": "sadunalpdag/ratan_lessons",
"src_encoding": "UTF-8",
"text": "num1 =int(input(\"enter 1. number\"))\n\n\nnum2 = int(input(\"enter 2. number\"))\n\nadd = num1+num2\n\nprint(\"addition of two number is\" , add)\n"
},
{
"alpha_fraction": 0.5322033762931824,
"alphanum_fraction": 0.5898305177688599,
"avg_line_length": 12.615385055541992,
"blob_id": "e04400d4a6b9e802f53d0bec4bbaf28e5b8a455d",
"content_id": "786ed18a9433dbc288f562c5972924bf26af8c47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 885,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 65,
"path": "/5.py",
"repo_name": "sadunalpdag/ratan_lessons",
"src_encoding": "UTF-8",
"text": "a=10\nif a>20:\n print (\"true statemet\")\nelse:\n print (\"false statement\")\n\nif True:\n print (\"true statement\")\n\nprint(\"ratan\"),print(\"ratana\") if 10>20 else print (\"durga\"),print(\"durgara\")\n\n{print(\"ratan\"),print(\"ratana\")} if 10>20 else {print (\"durga\"),print(\"durgara\")}\n\na=10\n\nif a==10:\n print(\"true statement\")\nelif a==20:\n print(\"false statement1\")\nelse:\n print(\"false stateement2\")\n\n\n\nfor data in range(10):\n\n print(data)\n\n\n\nfor data in range(2,10):\n print(data)\n\nfor data in range(2,10,5):\n print(data)\n\nL=[10,20,30]\n\nfor x in L:\n print(x)\n\nt=(10,20,30)\nfor x in t:\n print(x)\n\nfor x in range(10):\n print (x)\n print(\"normal\")\nelse:\n print (\"normal\")\n\nfor x in range(10):\n if x==3:\n break\n print(x)\nelse:\n print(\"dkamls\")\n\nimport os\n\nfor x in range(-10,10):\n print(x)\n os.exit(0)\nelse:\n print(\"normal execution\")\n"
}
] | 2 |
vishalgolcha/ML--Clustering-News-Data | https://github.com/vishalgolcha/ML--Clustering-News-Data | f831de5b96710ac78199bd1962e51be2f98d18f8 | 88ea7919d8ef6230ea9f728e8240b5762bf40e08 | f4c007abd5342a3ae4b974d10f3174d69de52bf5 | refs/heads/master | 2021-01-11T19:26:37.924494 | 2017-01-18T17:59:03 | 2017-01-18T17:59:03 | 79,367,581 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5427766442298889,
"alphanum_fraction": 0.5847848653793335,
"avg_line_length": 33.83928680419922,
"blob_id": "04cf7a062e5c0116a8fa9490072e81d487c0204f",
"content_id": "53ae8080d06bcd756061eac53a8d9e80aaf634e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3904,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 112,
"path": "/secndhalf.py",
"repo_name": "vishalgolcha/ML--Clustering-News-Data",
"src_encoding": "UTF-8",
"text": "\n# print freq('Mumbai',tdata[0],0)\nfor k in range(len(tdata)):\n # print doc\n # print 'The doc is \"' + doc + '\"'\n tf_vector = [ freq(word,tdata[k],k) for word in vocabulary]\n # print tf_vector\n # tf_vector_string = ', '.join(format(freq, 'd') for freq in tf_vector)\n # print 'The tf vector for Document %d is [%s]' % ((mydoclist.index(doc)+1), tf_vector_string)\n doc_term_matrix.append(tf_vector)\n \n \n# print 'All combined, here is our master document term matrix: '\n# print doc_term_matrix\n\ndoc_term_matrix_l2 = []\nfor vec in doc_term_matrix:\n doc_term_matrix_l2.append(l2_normalizer(vec))\n\n# print 'A regular old document term matrix: ' \n# print np.matrix(doc_term_matrix)\n# print '\\nA document term matrix with row-wise L2 norms of 1:'\n# print np.matrix(doc_term_matrix_l2)\n\nmy_idf_vector = [idf(word, tdata) for word in vocabulary]\n\n# print 'Our vocabulary vector is [' + ', '.join(list(vocabulary)) + ']'\n# print 'The inverse document frequency vector is [' + ', '.join(format(freq, 'f') for freq in my_idf_vector) + ']'\n\nmy_idf_matrix = build_idf_matrix(my_idf_vector)\n\ndoc_term_matrix_tfidf = []\n\n#performing tf-idf matrix multiplication\nfor tf_vector in doc_term_matrix:\n doc_term_matrix_tfidf.append(np.dot(tf_vector, my_idf_matrix))\n\n#normalizing\ndoc_term_matrix_tfidf_l2 = []\nfor tf_vector in doc_term_matrix_tfidf:\n doc_term_matrix_tfidf_l2.append(l2_normalizer(tf_vector))\n \n# print vocabulary\n# print np.matrix(doc_term_matrix_tfidf_l2) \npca = PCA( n_components=2 )\n# print pca.fit_transform(np.matrix(doc_term_matrix_tfidf_l2))\nred_matrix=pca.fit_transform(np.matrix(doc_term_matrix_tfidf_l2))\ndist = 1 - cosine_similarity(red_matrix)\n\nnum_clusters = 15\nkm = KMeans(n_clusters=num_clusters,n_init=30)\nkm.fit(dist)\nclusters = km.labels_.tolist()\nprint clusters\n \n\n\npos=red_matrix\nxs, ys = pos[:, 0], pos[:, 1]\n#set up colors per clusters using a dict\ncluster_colors = {0: '#1b9e77', 1: '#d95f02', 2: '#7570b3', 3: '#e7298a', 4: '#66a61e',5:'#800000',6:'#808080',7:'#FFC300',\n 8:'#7d5147', 9:'#FF5733',10:'#05de40',11:'#34495e',12:'#f9e79f',13:'#a569bd',14:'#9c640c' }\n\n#set up cluster names using a dict\ncluster_names = {0: '0', \n 1: '1', \n 2: '2', \n 3: '3', \n 4: '4',\n 5: '5',\n 6:'6',\n 7:'7',\n 8:'8',\n 9:'9',\n 10:'10',\n 11:'11',\n 12:'12',\n 13:'13',\n 14:'14'\n }\n\n# matplotlib inline\ndf = pd.DataFrame(dict(x=xs, y=ys, label=clusters, title=titles))\ngroups = df.groupby('label')\n\nfig, ax = plt.subplots(figsize=(25, 16)) # set size\nax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\n# grouped = frame['rank'].groupby(frame['cluster'])\n\nfor name, group in groups:\n ax.plot(group.x, group.y, marker='o', linestyle='', ms=12, \n label=cluster_names[name], color=cluster_colors[name], \n mec='none')\n ax.set_aspect('auto')\n ax.tick_params(\\\n axis= 'x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelbottom='off')\n ax.tick_params(\\\n axis= 'y', # changes apply to the y-axis\n which='both', # both major and minor ticks are affected\n left='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelleft='off')\n \nax.legend(numpoints=1) #show legend with only 1 point\nfor i in range(len(df)):\n ax.text(df.ix[i]['x'], df.ix[i]['y'], df.ix[i]['title'], size=6)\nplt.savefig('start4.png', dpi=400)\nplt.show()\n\n"
},
{
"alpha_fraction": 0.680789589881897,
"alphanum_fraction": 0.687048614025116,
"avg_line_length": 28.26760482788086,
"blob_id": "62fd16989defb99b5eda329c0bf15ebb9ee31566",
"content_id": "6b3c239977618f3767e77e04c6edc912d7da23fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2077,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 71,
"path": "/named_entity.py",
"repo_name": "vishalgolcha/ML--Clustering-News-Data",
"src_encoding": "UTF-8",
"text": "import nltk\nimport json\n\n#queries dbpedia for additonal entities\ndef get_more_entities(sample):\n\ta=get_keywords(sample)\n\tb=get_entity(sample)\n\tc=list(set(a)^set(b))\n\tmore=[]\n\tfor i in range(len(c)):\n\t\ttry :\n\t\t\tannotations = spotlight.annotate('http://spotlight.sztaki.hu:2222/rest/annotate',\\\n\t\t\t\tc[i],confidence=0.4,support=20,spotter='Default')\n\t\t\t# print annotations\n\t\t\tb.append(c[i])\n\t\texcept :\n\t\t\tpass\n\n\treturn list(set(b))\n\n\n\ndef get_keywords(sample):\n\tsentences = nltk.sent_tokenize(sample)\n\ttokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]\n\t# print tokenized_sentences \n\ttagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]\n\t# print tagged_sentences\n\tfilt=['NN','NNP','NNS','FW']\n\td= [extra[i][0] for extra in tagged_sentences for i in range(len(extra)) if extra[i][1] in filt ]\n\treturn list(set(d))\n\n#detect named entities \ndef get_entity(sample):\n\tsentences = nltk.sent_tokenize(sample)\n\ttokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]\n\t# print tokenized_sentences \n\ttagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]\n\t# print tagged_sentences\n\tchunked_sentences = nltk.ne_chunk_sents(tagged_sentences, binary=True)\n\t# print chunked_sentences\n\n\t# tags=['NN','NNP','NNS','FW']\n\t# d= [extra[i][0] for extra in tagged_sentences for i in range(len(extra)) if extra[i][1] in tags ]\n\t# keywords.extend(list(set(d)))\n\tdef extract_entity_names(t):\n\t entity_names = []\n\n\t if hasattr(t, 'label') and t.label:\n\t if t.label()=='NE':\n\t entity_names.append(' '.join([child[0] for child in t]))\n\t else:\n\t for child in t:\n\t entity_names.extend(extract_entity_names(child))\n\n\t return entity_names\n\n\tentity_names = []\n\n\tfor tree in chunked_sentences:\n\t # Print results per sentence\n\t # print extract_entity_names(tree)\n\t entity_names.extend(extract_entity_names(tree))\n\n\t# Print all entity names\n\t# print entity_names\n\n\t# Print unique entity names\n\tx=set(entity_names)\n\t# for y in x \n\treturn list(x)"
},
{
"alpha_fraction": 0.69010990858078,
"alphanum_fraction": 0.7010989189147949,
"avg_line_length": 21.799999237060547,
"blob_id": "a197c7af7c1a19f84c3f6b55c13bc46d39090764",
"content_id": "59a847a1fd7b4ca91befa71c1fdd2d9ff7addb3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 455,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 20,
"path": "/pandas1.py",
"repo_name": "vishalgolcha/ML--Clustering-News-Data",
"src_encoding": "UTF-8",
"text": "import quandl\nimport pandas as pd \n\napi_key= 'bjy2zSTV6VgrdnoNFNdk'\n# df= quandl.get('FMAC/HPI_AK',authtoken=api_key)\n\nprint(df.head())\nfiddy_states=pd.read_html('https://en.wikipedia.org/wiki/List_of_states_and_territories_of_the_United_States')\n# print(fiddy_states[0][1])\n\nfor abbv in fiddy_states[0][1][1:]:\n\tquery=\"FMAC/HPI_\"+str(abbv)\n\tdf=quandl.get(query,authtoken=api_key)\n\n\tif main_df.empty:\n\t\tmain_df=df\n\telse:\n\t\tmain_df=main_df.join(df)\n\nprint(main_df.head())"
},
{
"alpha_fraction": 0.623826265335083,
"alphanum_fraction": 0.6402581930160522,
"avg_line_length": 19.45783042907715,
"blob_id": "056e95a1d51a8b8c710b2b7b0a3a316f8755863f",
"content_id": "f3844d4507672eeddf3c0128cfc162ad46edeaf7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1704,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 83,
"path": "/suggestion.py",
"repo_name": "vishalgolcha/ML--Clustering-News-Data",
"src_encoding": "UTF-8",
"text": "import nltk\nfrom nltk import ngrams\nimport json\nimport pymongo\nfrom pymongo import MongoClient\nfrom nltk.corpus import stopwords\n\n\n\n\nclient =MongoClient('mongodb://digi1:[email protected]:27017')\ndb=client.links \ncollection=db.toi_feed\n# word=\"gistai\"\n\nstop = stopwords.words('english')\nm=[')','(','we','he','she']\nstop.extend(m)\n\n# tags=db.cumulative\n\nwords=[]\nsent=\"\"\ncnt =0 \nfor x in collection.find():\n\tfor i in range(len(x['VisualData'])):\n\t\tcnt+=1\n\t\t# (x['VisualData'][i]['dtext'])\n\t\twords.extend(set(nltk.word_tokenize(x['VisualData'][i]['dtext'])))\n\t\t# print '\\n'\n# print cnt\nred=0\n# filt=[\"AT\",\"WP\",\"PRP\"]\n\nfor i in words :\n\t# print nltk.pos_tag(i) \n\tif i in stop :\n\t\tred+=1\n\t\twords.remove(i)\n# print words\n# print red\n\n#\n\nmix=(list(set(words)))\nlo= [x.lower() for x in mix] \n\ntup=enumerate(list(set(lo)))\ntup = [reversed(x) for x in tup]\nkeyval=dict(tup)\n# print keyval\n\nlosos=[]\n\ndocwords={}\n\n# for t in words:\n# \tfor x in collection.find():\t\n# \t\tfor i in range(len(x['VisualData'])):\t\n# \t\t\tif x['VisualData'][i]['dtext'].find(t) != -1 :\n# \t\t\t\tdocwords[t]+=1\n# [ for t in words for x in collection.find() for i in range(len(x['VisualData'])) if t in x['VisualData'][i]['dtext']]\t\t\t\t\n\ndef idf(word,n_doc):\n return 1+np.log( n_doc/float(1+docwords[word]))\n\nfor x in collection.find():\t\n\tfor i in range(len(x['VisualData'])):\t\n\t\tsos=x['VisualData'][i]['dtext']\n\t\tsos=nltk.word_tokenize(sos)\n\t\tfor k in sos:\n\t\t\tdocwords[k]+=1\n\t\tsos=[keyval[d.lower()] for d in sos if d not in stop]\n\t\t\n\t\t# print sos\n\t\t# print '\\n'\n\tlosos.append(list(set(sos)).sort())\n\n# calculate idf \ns=raw_input()\nk=s.split(' ')\n# take input \n# get all documents which have the term and simultaneously score them \n\n\n\n\n"
},
{
"alpha_fraction": 0.6766612529754639,
"alphanum_fraction": 0.6965153813362122,
"avg_line_length": 26.741573333740234,
"blob_id": "32c583db2df543bd71ff7811d9448a809dfeef27",
"content_id": "fde4bfa9154671665009ead706b736b32a3d7118",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2468,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 89,
"path": "/sim.py",
"repo_name": "vishalgolcha/ML--Clustering-News-Data",
"src_encoding": "UTF-8",
"text": "from skimage.measure import compare_ssim as ssim\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport glob\n\n# im=[]\n# n=glob.glob(\"/home/ubuntu/imtag/modi/*.png\")\n# n.extend(\"/home/ubuntu/imtag/modi/*.cms\")\n# n.extend(\"/home/ubuntu/imtag/modi/*.jpg\")\n# n.extend(\"/home/ubuntu/imtag/modi/*.jpeg\")\n# #make a dict \n# ima={}\n\n# for i in range(len(im)):\n# \tima[im[i]]=i\n\n# visited={}\n\n#calculates Mean squared error\ndef mse(imageA, imageB):\n\t# the 'Mean Squared Error' between the two images is the\n\t# sum of the squared difference between the two images;\n\t# NOTE: the two images must have the same dimension\n\terr = np.sum((imageA.astype(\"float\") - imageB.astype(\"float\")) ** 2)\n\terr /= float(imageA.shape[0] * imageA.shape[1])\n\t\n\t# return the MSE, the lower the error, the more \"similar\"\n\t# the two images are\n\treturn err\n \ndef compare_images(imageA, imageB):\n\t# compute the mean squared error and structural similarity\n\t# index for the images\n\tm = mse(imageA, imageB)\n\ts = ssim(imageA, imageB)\n \treturn s \n\ndim =(100,100)\n\n\n# for i in range(len(im)):\n# \tvisited[i]=i\n\n# for i in range(len(im)):\n# \tone = cv2.imread(im[i])\n# \tone = cv2.resize(one, dim, interpolation = cv2.INTER_AREA)\n# \tif visited[i]==i:\n# \t\tfor j in (i+1,len(im)):\n# \t\t\ttwo=cv2.imread(im[j])\n# \t\t\ttwo=cv2.resize(two, dim, interpolation = cv2.INTER_AREA)\n# \t\t\ts=compare_ssim(one,two)\n# \t\t\tif s>=0.9 :\n\n\n\noriginal = cv2.imread(\"copy1.cms\")\ncontrast = cv2.imread(\"copy2.png\")\nshopped = cv2.imread(\"copy1.cms\")\n\n\noriginal1 = cv2.resize(original, dim, interpolation = cv2.INTER_AREA)\ncontrast1 = cv2.resize(original, dim, interpolation = cv2.INTER_AREA)\nshopped1 = cv2.resize(original, dim, interpolation = cv2.INTER_AREA)\n \n# convert the images to grayscale\noriginal = cv2.cvtColor(original1, cv2.COLOR_BGR2GRAY)\ncontrast = cv2.cvtColor(contrast1, cv2.COLOR_BGR2GRAY)\nshopped = cv2.cvtColor(shopped1, cv2.COLOR_BGR2GRAY)\n\n# initialize the figure\nfig = plt.figure(\"Images\")\nimages = (\"Original\", original), (\"Contrast\", contrast), (\"Photoshopped\", shopped)\n \n# loop over the images\nfor (i, (name, image)) in enumerate(images):\n\t# show the image\n\tax = fig.add_subplot(1, 3, i + 1)\n\tax.set_title(name)\n\tplt.imshow(image, cmap = plt.cm.gray)\n\tplt.axis(\"off\")\n \n# show the figure\nplt.show()\n \n# compare the images\ncompare_images(original, original, \"Original vs. Original\")\ncompare_images(original, contrast, \"Original vs. Contrast\")\ncompare_images(original, shopped, \"Original vs. Photoshopped\")"
},
{
"alpha_fraction": 0.5864973664283752,
"alphanum_fraction": 0.6133127808570862,
"avg_line_length": 28.16609001159668,
"blob_id": "55767d2a3b4f7783109b74f0c9ddd33678de94e8",
"content_id": "b04a147303dbaa5e455cf41b16661eafd3934293",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8428,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 289,
"path": "/types.py",
"repo_name": "vishalgolcha/ML--Clustering-News-Data",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport pandas as pd \nimport nltk \nimport pymongo\nfrom pymongo import MongoClient\nfrom nltk.corpus import stopwords\nfrom nltk.stem.snowball import SnowballStemmer\n# from collections import counter\nfrom named_entity import get_more_entities\nfrom named_entity import get_keywords\nfrom named_entity import get_entity \nimport string \nimport math\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import KMeans\n# from sklearn.cluster import KMeans\nfrom sklearn.externals import joblib\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nclient =MongoClient('mongodb://digi1:[email protected]:27017')\ndb=client.links \ncollection=db.toi_feed\n\n# titles =[\"Trump\",\"Hillary\",\"Bill\"]\ntitles=[]\ndata=[] \n\n# str1 = \"Donald John Trump is an American businessman , television personality , author and politician\"\n# str2 = \"Hillary Diane Rodham Clinton is an American politician and the presumptive nominee of the Democratic Party for President of the United States in the 2016 election\"\n# str3 = \"Bill Maher is an American comedian, writer , producer , political commentator , actor , media critic , and television host \"\n\nfor x in collection.find():\n # titles.append(x['title'])\n # d=get_keywords(x['title'])\n # b=[]\n # for i in range(len(d)):\n # if i<3:\n # b.append(d[i])\n # delimiter=' '\n # k=delimiter.join(b)\n titles.append('')\n # for i in d :\n\n if x['text']== '':\n data.append(x['title'])\n else :\n data.append(x['text'])\n\ndef build_lexicon(corpus):\n lexicon = set()\n for doc in corpus:\n lexicon.update([word for word in doc])\n return lexicon\n\n# def tf(term, document):\n# return freq(term, document)\n\n\n\n\ndef freq(term, document,num):\n # return document.count(term)\n cnt =0\n mulfact=1 \n # print len(document)\n for i in range(len(document)):\n if term in entity_collection[num]:\n mulfact=3\n if term==document[i] :\n cnt+=1\n\n return cnt*mulfact\n\n\ndef l2_normalizer(vec):\n denom = np.sum([el**2 for el in vec])\n return [(el / math.sqrt(denom)) for el in vec]\n\ndef numDocsContaining(word, doclist):\n doccount = 0\n for j in range(len(doclist)):\n if freq(word,doclist[j],j) > 0:\n doccount +=1\n return doccount \n\ndef idf(word, doclist):\n n_samples = len(doclist)\n df = numDocsContaining(word, doclist)\n # print 1+np.log(n_samples / float(1+df))\n return 1+np.log(n_samples / float(1+df))\n\ndef build_idf_matrix(idf_vector):\n idf_mat = np.zeros((len(idf_vector), len(idf_vector)))\n np.fill_diagonal(idf_mat, idf_vector)\n return idf_mat\n\n\n\nstop = stopwords.words('english')\n# print stop \n\n\t\t\t\t\t\t\n\n# save all text as list here \n# str= \"abc.\\nabc.\\nabc\"\t\n\npdata=[x.replace(\"\\n\",\"\") for x in data] \n\nentity_collection=[ list(set(get_more_entities(x)+get_keywords(x))) for x in pdata ]\n# print \"entity\"\n# print entity_collection\n# print \"collection\"\n\npdata=[ x.replace(\".\",\" \") for x in data ]\n# str=str.replace\n# print str\n#replace the full stops with spaces \n\n\ndoc_count={}\n\ntdata=[ nltk.word_tokenize(x) for x in pdata ]\n# print tdata\n\nfor i in tdata :\n # print i \n for j in i :\n # print j \n if j in stop:\n i.remove(j)\n# tdata=[ i.remove(j) for i in tdata for j in i if j in stop ]\n# print tdata \n\nvocabulary = build_lexicon(tdata)\n# print \"vocab\"\n# print vocabulary\n# print \"bingo\"\ndoc_term_matrix = []\n# print tf.items()\n\n# print freq('Mumbai',tdata[0],0)\nfor k in range(len(tdata)):\n # print doc\n # print 'The doc is \"' + doc + '\"'\n tf_vector = [ freq(word,tdata[k],k) for word in vocabulary]\n # print tf_vector\n # tf_vector_string = ', '.join(format(freq, 'd') for freq in tf_vector)\n # print 'The tf vector for Document %d is [%s]' % ((mydoclist.index(doc)+1), tf_vector_string)\n doc_term_matrix.append(tf_vector)\n \n \n# print 'All combined, here is our master document term matrix: '\n# print doc_term_matrix\n\ndoc_term_matrix_l2 = []\nfor vec in doc_term_matrix:\n doc_term_matrix_l2.append(l2_normalizer(vec))\n\n# print 'A regular old document term matrix: ' \n# print np.matrix(doc_term_matrix)\n# print '\\nA document term matrix with row-wise L2 norms of 1:'\n# print np.matrix(doc_term_matrix_l2)\n\nmy_idf_vector = [idf(word, tdata) for word in vocabulary]\n\n# print 'Our vocabulary vector is [' + ', '.join(list(vocabulary)) + ']'\n# print 'The inverse document frequency vector is [' + ', '.join(format(freq, 'f') for freq in my_idf_vector) + ']'\n\nmy_idf_matrix = build_idf_matrix(my_idf_vector)\n\ndoc_term_matrix_tfidf = []\n\n#performing tf-idf matrix multiplication\nfor tf_vector in doc_term_matrix:\n doc_term_matrix_tfidf.append(np.dot(tf_vector, my_idf_matrix))\n\n#normalizing\ndoc_term_matrix_tfidf_l2 = []\nfor tf_vector in doc_term_matrix_tfidf:\n doc_term_matrix_tfidf_l2.append(l2_normalizer(tf_vector))\n \n# print vocabulary\n# print np.matrix(doc_term_matrix_tfidf_l2) \npca = PCA( n_components=2 )\n# print pca.fit_transform(np.matrix(doc_term_matrix_tfidf_l2))\nred_matrix=pca.fit_transform(np.matrix(doc_term_matrix_tfidf_l2))\ndist = 1 - cosine_similarity(red_matrix)\n\nnum_clusters = 15\nkm = KMeans(n_clusters=num_clusters,n_init=30)\nkm.fit(dist)\nclusters = km.labels_.tolist()\nprint clusters\n \n\n\npos=red_matrix\nxs, ys = pos[:, 0], pos[:, 1]\n#set up colors per clusters using a dict\ncluster_colors = {0: '#1b9e77', 1: '#d95f02', 2: '#7570b3', 3: '#e7298a', 4: '#66a61e',5:'#800000',6:'#808080',7:'#FFC300',\n 8:'#7d5147', 9:'#FF5733',10:'#05de40',11:'#34495e',12:'#f9e79f',13:'#a569bd',14:'#9c640c' }\n\n#set up cluster names using a dict\ncluster_names = {0: '0', \n 1: '1', \n 2: '2', \n 3: '3', \n 4: '4',\n 5: '5',\n 6:'6',\n 7:'7',\n 8:'8',\n 9:'9',\n 10:'10',\n 11:'11',\n 12:'12',\n 13:'13',\n 14:'14'\n }\n\n# matplotlib inline\ndf = pd.DataFrame(dict(x=xs, y=ys, label=clusters, title=titles))\ngroups = df.groupby('label')\n\nfig, ax = plt.subplots(figsize=(25, 16)) # set size\nax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\n# grouped = frame['rank'].groupby(frame['cluster'])\n\nfor name, group in groups:\n ax.plot(group.x, group.y, marker='o', linestyle='', ms=12, \n label=cluster_names[name], color=cluster_colors[name], \n mec='none')\n ax.set_aspect('auto')\n ax.tick_params(\\\n axis= 'x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelbottom='off')\n ax.tick_params(\\\n axis= 'y', # changes apply to the y-axis\n which='both', # both major and minor ticks are affected\n left='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelleft='off')\n \nax.legend(numpoints=1) #show legend with only 1 point\nfor i in range(len(df)):\n ax.text(df.ix[i]['x'], df.ix[i]['y'], df.ix[i]['title'], size=6)\nplt.savefig('start4.png', dpi=400)\nplt.show()\n\n# print km\n# clusters=km.labels_\n# print clusters \n\n# h = .02 \n\n# x_min, x_max = dist[:, 0].min() - 1, dist[:, 0].max() + 1\n# y_min, y_max = dist[:, 1].min() - 1, dist[:, 1].max() + 1\n# xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n# Z = km.predict(np.c_[xx.ravel(), yy.ravel()])\n# print type(Z)\n# Z = Z.reshape(xx.shape)\n\n# plt.figure(1)\n# plt.clf()\n# plt.imshow(Z, interpolation='nearest',\n# extent=(xx.min(), xx.max(), yy.min(), yy.max()),\n# cmap=plt.cm.Paired,\n# aspect='auto', origin='lower')\n\n# plt.plot(dist[:, 0],dist[:, 1], 'k.', markersize=2)\n# # Plot the centroids as a white X\n# centroids = kmeans.cluster_centers_\n# plt.scatter(centroids[:, 0], centroids[:, 1],\n# marker='x', s=169, linewidths=3,\n# color='w', zorder=10)\n# plt.title('K-means clustering on the digits dataset (PCA-reduced data)\\n'\n# 'Centroids are marked with white cross')\n# plt.xlim(x_min, x_max)\n# plt.ylim(y_min, y_max)\n# plt.xticks(())\n# plt.yticks(())\n# plt.show()"
},
{
"alpha_fraction": 0.7829670310020447,
"alphanum_fraction": 0.7829670310020447,
"avg_line_length": 27.076923370361328,
"blob_id": "3a5eeaa402ce6e4d88410447e840144dc0ccf6cc",
"content_id": "d43379c95fd00d3b4b22b9480ff5c90722504d7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 364,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 13,
"path": "/readme.txt",
"repo_name": "vishalgolcha/ML--Clustering-News-Data",
"src_encoding": "UTF-8",
"text": "bitri.py \t: code for data clustering including bigrams and trigrams for voaculary \n\nnamed_entity: fetches named entities and keywords\n\nsearch : fetches images from internal toi database depending on the given query\n\nsim.py : checks similarity between two images \n\ntfidf.py : code for tfidf \n\ntitled :similar to bitri.py \n\ntypes :data cluster on type of articles ."
},
{
"alpha_fraction": 0.5848056674003601,
"alphanum_fraction": 0.6139575839042664,
"avg_line_length": 15.880597114562988,
"blob_id": "2be0d1f9a5443f2d6430254325fb4bb9f99100fc",
"content_id": "86c1b771f00c4c9f5e737b88108481aedec4e38b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1132,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 67,
"path": "/search.py",
"repo_name": "vishalgolcha/ML--Clustering-News-Data",
"src_encoding": "UTF-8",
"text": "import nltk \nimport pymongo\nfrom pymongo import MongoClient\nimport string \nimport math\nfrom nltk import ngrams\nimport urllib\n\nclient =MongoClient('mongodb://digi1:[email protected]:27017')\ndb=client.links \ncollection=db['toi_feed_2']\n\na= raw_input().split(',')\n\n# def ng():\nt= a[0].split(' ')\n\nlinks=[]\n\nbigrams=[]\ntrigrams=[]\n\n#get ngrams \nb = ngrams(a[0].split(),2)\nf = [e for e in b]\nf = [list(e) for e in f]\nf = [' '.join(h) for h in f]\na.extend(f)\n\nb = ngrams(a[0].split(),3)\nf = [e for e in b]\nf = [list(e) for e in f]\nf = [' '.join(h) for h in f]\na.extend(f)\n\na.extend(f)\na=list(set(a))\n\ncut=0 \n\n#retrieve modi images from database depending on query\n\nfor x in collection.find({'VisualData': { '$exists': True }}):\n\t# print x\n\t# print \"\\n\"\n\t# cut+=1\n\tfor i in x['VisualData']:\n\t\tflag=0\n\t\tfor j in a :\n\t\t\tif j in i['dtext']:\n\t\t\t\tflag=1\n\t\t\t\tbreak\n\t\tif flag==1:\n\t\t\tlinks.extend(i['vsrc'].split('<__>'))\n\t\t\t# print i['vsrc'].split('<__>')\n\nprint a \n\nfor i in links:\n\tif i=='' or i==' ':\n\t\tlinks.remove(i)\n\nprint links \ncnt=0\nfor link in links:\n\tcnt+=1\n\turllib.urlretrieve(link,\"/home/ubuntu/imtag/modi/\"+str(cnt)+link[-5:])\n\n"
}
] | 8 |
jacarolan/BNL_QCD_ML | https://github.com/jacarolan/BNL_QCD_ML | 29cb2b72c4c9124b82dfaca9a22283c4ed095c1e | c724fb0a0419f50fea755b07eb37177d2ce4154c | 49940044ac19772159b8c50b84c729c70ff1d0cb | refs/heads/master | 2022-12-06T05:31:20.984261 | 2020-07-31T21:39:49 | 2020-07-31T21:39:49 | 274,747,103 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5234330892562866,
"alphanum_fraction": 0.581818163394928,
"avg_line_length": 35.14285659790039,
"blob_id": "08c06a6657fa84431548283fcec706b61dc37af4",
"content_id": "c75d518be8451781a2cad981cb43723c588e9b4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8855,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 245,
"path": "/ML/Python/FakeData Conglomerate.py",
"repo_name": "jacarolan/BNL_QCD_ML",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom sklearn.ensemble import GradientBoostingRegressor \nimport matplotlib.pyplot as plt\nimport os.path\nfrom os import path\nfrom sklearn.neural_network import MLPRegressor\n\ndef LoadRawVariables():\n c2pt = []\n ts = []\n taus = []\n xs = []\n ys = []\n zs = []\n c3pt_S = []\n c3pt_V = []\n c3pt_A = []\n c2pt_OTHER = []\n \n \n for tau in range(0, 49, 8):\n for x in range(0, 25, 8):\n for y in range(0, 25, 8):\n for z in range(0, 25, 8):\n for sample in range(748, 800, 16): #1421, 16):\n fname = \"../Data/T\" + str(tau) + \"/x\" + str(x) + \"y\" + str(y) + \"z\" + str(z) + \"/nuc3pt.dat.\" + str(sample)\n if path.exists(fname):\n with open(fname) as fp:\n for i, line in enumerate(fp):\n if i >= 7 and i <= 70: # The start of Gauss -> Point 2pt correlation functions\n c2pt_OTHER.append([float(x) for x in line.rstrip().split()[1:5]])\n if i >= 5182 and i <= 5245: # The start of Gauss -> Gauss 2pt correlation functions\n c2pt.append([float(x) for x in line.rstrip().split()[1:5]])\n ts.append(i - 5182)\n taus.append(tau)\n xs.append(x)\n ys.append(y)\n zs.append(z)\n elif i >= 10154 and i <= 10217:\n c3pt_S.append([float(x) for x in line.rstrip().split()[1:5]])\n elif i >= 10229 and i <= 10292:\n c3pt_V.append([float(x) for x in line.rstrip().split()[1:5]])\n elif i >= 19979 and i <= 20042:\n c3pt_A.append([float(x) for x in line.rstrip().split()[1:5]])\n elif i > 20042:\n break\n \n return ts, taus, xs, ys, zs, c2pt, c3pt_S, c3pt_V, c3pt_A, c2pt_OTHER\n\nts, taus, xs, ys, zs, c2pt, c3pt_S, c3pt_V, c3pt_A, c2pt_OTHER = LoadRawVariables()\n\nc2pt_factor_raw = sum(np.array(c2pt)) / len(c2pt)\nN_factor = np.sqrt(c2pt_factor_raw[0] ** 2 + c2pt_factor_raw[1] ** 2)\n\nfor i in range(len(c2pt)):\n for j in range(len(c2pt[i])):\n c2pt[i][j] /= N_factor\n for j in range(len(c3pt_S[i])):\n c3pt_S[i][j] /= N_factor\n c3pt_V[i][j] /= N_factor\n c3pt_A[i][j] /= N_factor\n c2pt_OTHER[i][j] /= N_factor\n\n# features = np.array([np.array([ts[i], taus[i], xs[i], ys[i], zs[i], c2pt[i][0], c2pt[i][1]]) for i in range(len(ts))])\nfeatures_unshifted = np.array([[taus[i]] + [c2pt[i + j][0] for j in range(64)] + [c2pt[i + j][1] for j in range(64)] for i in range(0, len(ts), 64)])\nfeatures = []\nfor f in features_unshifted:\n shift = int(f[0])\n features.append(np.roll(f[1:], -shift))\n\nfeatures = np.array(features)\n\nlabels_S_up = np.array([sum(c3pt_S[i:i+64][0]) / 64 for i in range(0, len(c3pt_S), 64)])\nlabels_A_up = np.array([sum(c3pt_A[i:i+64][0]) / 64 for i in range(0, len(c3pt_A), 64)])\nlabels_V_up = np.array([sum(c3pt_V[i:i+64][0]) / 64 for i in range(0, len(c3pt_V), 64)])\n\nlabels_S_down = np.array([sum(c3pt_S[i:i+64][2]) / 64 for i in range(0, len(c3pt_S), 64)])\nlabels_A_down = np.array([sum(c3pt_A[i:i+64][2]) / 64 for i in range(0, len(c3pt_A), 64)])\nlabels_V_down = np.array([sum(c3pt_V[i:i+64][2]) / 64 for i in range(0, len(c3pt_V), 64)])\n\nlabelFrac = 0.5\nBCFrac = 0.1\n\nc2pt_footer = \"ENDPROP\\n\"\nc3pt_footer = \"END_NUC3PT\\n\"\nc2pt_header = \"\"\"STARTPROP\nMASSES: 1.000000e-03 1.000000e-03 1.000000e-03\nSOURCE: GAUSS 70 600 0 \nSINK: POINT\nMOM: 0 0 0\nOPER: NUC_G5C_PP5\n\"\"\"\nc3pt_V_header = \"\"\"START_NUC3PT\nMASSES: 1.000000e-03 1.000000e-03 1.000000e-03\nSOURCE: GAUSS 70 600 0\nSINK: GAUSS 9\nSNK_MOM: 0 0 0\nOPER: G4\nOP_MOM: 0 0 0\nFACT: 1.000000e+00 0.000000e+00\nPROJ: PPAR\nQUARKS: up down\n\"\"\"\nc3pt_S_header = \"\"\"START_NUC3PT\nMASSES: 1.000000e-03 1.000000e-03 1.000000e-03\nSOURCE: GAUSS 70 600 0\nSINK: GAUSS 9\nSNK_MOM: 0 0 0\nOPER: G0\nOP_MOM: 0 0 0\nFACT: 1.000000e+00 0.000000e+00\nPROJ: PPAR\nQUARKS: up down\n\"\"\"\nc3pt_A_header = \"\"\"START_NUC3PT\nMASSES: 1.000000e-03 1.000000e-03 1.000000e-03\nSOURCE: GAUSS 70 600 0\nSINK: GAUSS 9\nSNK_MOM: 0 0 0\nOPER: G5G3\nOP_MOM: 0 0 0\nFACT: 0.000000e+00 1.000000e+00\nPROJ: PPAR_5Z\nQUARKS: up down\n\"\"\"\n\n\n\nlabelEnd = int(len(labels_S_up) * labelFrac)\nBCEnd = int(len(labels_S_up) * (BCFrac + labelFrac))\n\n### Scalar Charge\n\nX_train, Y_train_up, Y_train_down = features[:labelEnd], labels_S_up[:labelEnd], labels_S_down[:labelEnd]\nX_bc, Y_bc_up, Y_bc_down = features[labelEnd:BCEnd], labels_S_up[labelEnd:BCEnd], labels_S_down[labelEnd:BCEnd]\nX_test, Y_test_up, Y_test_down = features[BCEnd:], labels_S_up[BCEnd:], labels_S_down[BCEnd:]\n\ngbr_up = GradientBoostingRegressor(learning_rate=0.05, n_estimators=50, max_depth=3)\ngbr_up.fit(X_train, Y_train_up)\n\ny_bc_pred = gbr_up.predict(X_bc)\n\nbiasCrxn_up = np.average(Y_bc_up - y_bc_pred)\n\ngbr_down = GradientBoostingRegressor(learning_rate=0.05, n_estimators=50, max_depth=3)\ngbr_down.fit(X_train, Y_train_down)\n\ny_bc_pred = gbr_down.predict(X_bc)\n\nbiasCrxn_down = np.average(Y_bc_down - y_bc_pred)\n\ntrials = 0\nerrors = []\nraw_RMS = 0\nML_samples = []\nDM_samples = []\nfor i in range(len(X_test)):\n testImg = X_test[i]\n pred_up = gbr_up.predict([testImg])[0] + biasCrxn_up\n pred_down = gbr_down.predict([testImg])[0] + biasCrxn_down\n fakeName = \"../Data/FakeData/FakeData\" + str(i) + \".txt\"\n if not os.path.exists(fakeName):\n \twith open(fakeName, 'w+'): pass\n fakeDataFile = open(fakeName, \"r+\")\n fakeDataFile.truncate(0)\n fakeDataFile.write(c2pt_header)\n for j in range(64):\n \tfakeDataFile.write(str(j) + \" \" + str(X_test[i][j]) + \" \" + str(X_test[i][j + 64]) + \"\\n\")\n fakeDataFile.write(c2pt_footer)\n fakeDataFile.write(c3pt_S_header)\n for j in range(64):\n \tfakeDataFile.write(str(j) + \" \" + str(pred_up) + \" 0.0 \" + str(pred_down) + \" 0.0\\n\")\n fakeDataFile.write(c3pt_footer)\n\n### Vector Charge\n\nX_train, Y_train_up, Y_train_down = features[:labelEnd], labels_V_up[:labelEnd], labels_V_down[:labelEnd]\nX_bc, Y_bc_up, Y_bc_down = features[labelEnd:BCEnd], labels_V_up[labelEnd:BCEnd], labels_V_down[labelEnd:BCEnd]\nX_test, Y_test_up, Y_test_down = features[BCEnd:], labels_V_up[BCEnd:], labels_V_down[BCEnd:]\n\ngbr_up = GradientBoostingRegressor(learning_rate=0.05, n_estimators=50, max_depth=3)\ngbr_up.fit(X_train, Y_train_up)\n\ny_bc_pred = gbr_up.predict(X_bc)\n\nbiasCrxn_up = np.average(Y_bc_up - y_bc_pred)\n\ngbr_down = GradientBoostingRegressor(learning_rate=0.05, n_estimators=50, max_depth=3)\ngbr_down.fit(X_train, Y_train_down)\n\ny_bc_pred = gbr_down.predict(X_bc)\n\nbiasCrxn_down = np.average(Y_bc_down - y_bc_pred)\n\ntrials = 0\nerrors = []\nraw_RMS = 0\nML_samples = []\nDM_samples = []\nfor i in range(len(X_test)):\n testImg = X_test[i]\n pred_up = gbr_up.predict([testImg])[0] + biasCrxn_up\n pred_down = gbr_down.predict([testImg])[0] + biasCrxn_down\n fakeName = \"../Data/FakeData/FakeData\" + str(i) + \".txt\"\n fakeDataFile = open(fakeName, \"a\")\n fakeDataFile.write(c3pt_V_header)\n for j in range(64):\n \tfakeDataFile.write(str(j) + \" \" + str(pred_up) + \" 0.0 \" + str(pred_down) + \" 0.0\\n\")\n fakeDataFile.write(c3pt_footer)\n\n### Axial Charge\n\nX_train, Y_train_up, Y_train_down = features[:labelEnd], labels_A_up[:labelEnd], labels_A_down[:labelEnd]\nX_bc, Y_bc_up, Y_bc_down = features[labelEnd:BCEnd], labels_A_up[labelEnd:BCEnd], labels_A_down[labelEnd:BCEnd]\nX_test, Y_test_up, Y_test_down = features[BCEnd:], labels_A_up[BCEnd:], labels_A_down[BCEnd:]\n\ngbr_up = GradientBoostingRegressor(learning_rate=0.05, n_estimators=50, max_depth=3)\ngbr_up.fit(X_train, Y_train_up)\n\ny_bc_pred = gbr_up.predict(X_bc)\n\nbiasCrxn_up = np.average(Y_bc_up - y_bc_pred)\n\ngbr_down = GradientBoostingRegressor(learning_rate=0.05, n_estimators=50, max_depth=3)\ngbr_down.fit(X_train, Y_train_down)\n\ny_bc_pred = gbr_down.predict(X_bc)\n\nbiasCrxn_down = np.average(Y_bc_down - y_bc_pred)\n\ntrials = 0\nerrors = []\nraw_RMS = 0\nML_samples = []\nDM_samples = []\nfor i in range(len(X_test)):\n testImg = X_test[i]\n pred_up = gbr_up.predict([testImg])[0] + biasCrxn_up\n pred_down = gbr_down.predict([testImg])[0] + biasCrxn_down\n fakeName = \"../Data/FakeData/FakeData\" + str(i) + \".txt\"\n fakeDataFile = open(fakeName, \"a\")\n fakeDataFile.write(c3pt_V_header)\n for j in range(64):\n \tfakeDataFile.write(str(j) + \" \" + str(pred_up) + \" 0.0 \" + str(pred_down) + \" 0.0\\n\")\n fakeDataFile.write(c3pt_footer)\n"
},
{
"alpha_fraction": 0.6770833134651184,
"alphanum_fraction": 0.6973379850387573,
"avg_line_length": 28.305084228515625,
"blob_id": "635fb47f7f1e329b64eea725693efd5955548c90",
"content_id": "005b1801732e50f08ec46e403cbbf8afa7ed2c13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1728,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 59,
"path": "/ML/Data/PNDME_3pt_2pt_ML_data/read_data.py",
"repo_name": "jacarolan/BNL_QCD_ML",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport numpy as np\nfrom sklearn import ensemble\nimport matplotlib.pyplot as plt\n\n# Read data\ndata = np.load('data-axial.npy', allow_pickle=True).tolist()\n# data['train'/'test']['input'/'output']\n# inputs are vectors with 40 elements, and outputs are single numbers\n\nprint(data['train']['input'].shape)\nprint(data['train']['output'].shape)\nprint(data['test']['input'].shape)\nprint(data['test']['output'].shape)\n\n# Train regression algorithm\nregr = ensemble.GradientBoostingRegressor(learning_rate=0.05, n_estimators=50, max_depth=3)\nregr.fit(data['train']['input'], data['train']['output'])\n\n# Make predictions\ny_pred = regr.predict(data['test']['input'])\n\n# Calculate error\npred_err = data['test']['output'] - y_pred\npred_quality = np.std(pred_err) / np.std(data['train']['output'])\n\n# Here pred_quality (prediction quality) is square-root of \n# the mean square error normalized by the standard deviation\n# of raw data. It becomes 0 for a perfect prediction, and \n# pred_quality > 1 indicates no prediction.\n\nprint(\"Prediction quality = \", pred_quality)\n# Expected output: \"Prediction quality = 0.5220197021694335\"\n\nX_test = data['test']['input']\nY_test = data['test']['output']\n\ntrials = 0\nerrors = []\nraw_RMS = 0\nML_samples = []\nDM_samples = []\nfor i in range(len(X_test)):\n testImg = X_test[i]\n testLabel = Y_test[i]\n pred = regr.predict([testImg])\n errors.append(pred - testLabel)\n ML_samples.append(pred[0])\n DM_samples.append(testLabel)\n \nprint(\"Prediction quality:\", np.std(errors) / np.std(Y_test))\n\nplt.hist(DM_samples, bins=20)\nplt.hist(ML_samples, bins=20)\nplt.legend([\"Raw Data\", \"ML Prediction\"])\nplt.xlabel(\"Real part of c3pt\")\nplt.ylabel(\"Prediction count\")\nplt.show()"
},
{
"alpha_fraction": 0.5248627066612244,
"alphanum_fraction": 0.5779733657836914,
"avg_line_length": 44.21768569946289,
"blob_id": "c3b7be4cc3499da7175333f24c49adade90b066c",
"content_id": "41db992bac8ceee2fa09e419d98acbf12719c862",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13293,
"license_type": "no_license",
"max_line_length": 260,
"num_lines": 294,
"path": "/ML/Python/Fake Data Sliced.py",
"repo_name": "jacarolan/BNL_QCD_ML",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom sklearn.ensemble import GradientBoostingRegressor \nimport matplotlib.pyplot as plt\nimport os.path\nfrom os import path\nfrom sklearn.neural_network import MLPRegressor\n\ndef LoadRawVariables():\n c2pt = []\n ts = []\n taus = []\n xs = []\n ys = []\n zs = []\n c3pt_S = []\n c3pt_V = []\n c3pt_A = []\n c2pt_OTHER = []\n sample_num = []\n \n \n for tau in range(0, 49, 8):\n for x in range(0, 25, 8):\n for y in range(0, 25, 8):\n for z in range(0, 25, 8):\n for sample in range(748, 1421, 16):\n fname = \"../Data/T\" + str(tau) + \"/x\" + str(x) + \"y\" + str(y) + \"z\" + str(z) + \"/nuc3pt.dat.\" + str(sample)\n if path.exists(fname):\n with open(fname) as fp:\n for i, line in enumerate(fp):\n if i >= 7 and i <= 70: # The start of Gauss -> Point 2pt correlation functions\n c2pt_OTHER.append([float(x) for x in line.rstrip().split()[1:5]])\n if i >= 5182 and i <= 5245: # The start of Gauss -> Gauss 2pt correlation functions\n c2pt.append([float(x) for x in line.rstrip().split()[1:5]])\n ts.append(i - 5182)\n taus.append(tau)\n sample_num.append(sample)\n xs.append(x)\n ys.append(y)\n zs.append(z)\n elif i >= 10154 and i <= 10217:\n c3pt_S.append([float(x) for x in line.rstrip().split()[1:5]])\n elif i >= 10229 and i <= 10292:\n c3pt_V.append([float(x) for x in line.rstrip().split()[1:5]])\n elif i >= 19979 and i <= 20042:\n c3pt_A.append([float(x) for x in line.rstrip().split()[1:5]])\n elif i > 20042:\n break\n \n return ts, taus, xs, ys, zs, c2pt, c3pt_S, c3pt_V, c3pt_A, c2pt_OTHER, sample_num\n\nts, taus, xs, ys, zs, c2pt, c3pt_S, c3pt_V, c3pt_A, c2pt_OTHER, sample_num = LoadRawVariables()\n\nc2pt_factor_raw = sum(np.array(c2pt)) / len(c2pt)\nN_factor = np.sqrt(c2pt_factor_raw[0] ** 2 + c2pt_factor_raw[1] ** 2)\n\nfor i in range(len(c2pt)):\n for j in range(len(c2pt[i])):\n c2pt_OTHER[i][j] /= N_factor\n c2pt[i][j] /= N_factor\n for j in range(len(c3pt_S[i])):\n c3pt_S[i][j] /= N_factor\n c3pt_V[i][j] /= N_factor\n c3pt_A[i][j] /= N_factor\n\n## Features is a <# Data> x <Size Data> 2D array for all lists of features (same for each time slice)\n## Labels is a 64 x <# Data> 2D array for all 64 sets of lists of labels\n\nfeatures_unshifted = np.array([[taus[i]] + [c2pt[i + j][0] for j in range(64)] + [c2pt[i + j][1] for j in range(64)] for i in range(0, len(ts), 64)])\nfeatures = []\n\nlabels_S_up = np.array([[c3pt_S[i+j][0] for i in range(0, len(c3pt_S), 64)] for j in range(64)])\nlabels_A_up = np.array([[c3pt_A[i+j][0] for i in range(0, len(c3pt_A), 64)] for j in range(64)])\nlabels_V_up = np.array([[c3pt_V[i+j][0] for i in range(0, len(c3pt_V), 64)] for j in range(64)])\n\nlabels_S_down = np.array([[c3pt_S[i+j][2] for i in range(0, len(c3pt_S), 64)] for j in range(64)])\nlabels_A_down = np.array([[c3pt_A[i+j][2] for i in range(0, len(c3pt_A), 64)] for j in range(64)])\nlabels_V_down = np.array([[c3pt_V[i+j][2] for i in range(0, len(c3pt_V), 64)] for j in range(64)])\n\nfor i in range(len(features_unshifted)):\n shift = int(features_unshifted[i, 0])\n features.append(np.append(np.roll(features_unshifted[i, 1:65], -shift), np.roll(features_unshifted[i, 65:], -shift)))\n labels_S_up[:, i] = np.roll(labels_S_up[:, i], -shift)\n labels_A_up[:, i] = np.roll(labels_A_up[:, i], -shift)\n labels_V_up[:, i] = np.roll(labels_V_up[:, i], -shift)\n labels_S_down[:, i] = np.roll(labels_S_down[:, i], -shift)\n labels_A_down[:, i] = np.roll(labels_A_down[:, i], -shift)\n labels_V_down[:, i] = np.roll(labels_V_down[:, i], -shift)\n\nfeatures = np.array(features)\n\n\ndef shuffle64Block(arr, perm):\n new_arr = arr.reshape((len(arr) // 64, 64))\n np.take(new_arr, perm, axis=0, out=new_arr)\n return new_arr.reshape(len(new_arr) * 64)\n\nf_len = features.shape[0]\n\nperm = np.append(np.append(np.append(np.append(np.arange(0, f_len, 5), np.arange(1, f_len, 5)), np.arange(2, f_len, 5)), np.arange(3, f_len, 5)), np.arange(4, f_len, 5))\nprint(perm)\nnp.take(features, perm, axis=0, out=features)\nnp.take(labels_S_up, perm, axis=1, out=labels_S_up)\nnp.take(labels_A_up, perm, axis=1, out=labels_A_up)\nnp.take(labels_V_up, perm, axis=1, out=labels_V_up)\nnp.take(labels_S_down, perm, axis=1, out=labels_S_down)\nnp.take(labels_A_down, perm, axis=1, out=labels_A_down)\nnp.take(labels_V_down, perm, axis=1, out=labels_V_down)\nxs = shuffle64Block(np.array(xs), perm)\nys = shuffle64Block(np.array(ys), perm)\nzs = shuffle64Block(np.array(zs), perm)\nts = shuffle64Block(np.array(ts), perm)\ntaus = shuffle64Block(np.array(taus), perm)\nsample_num = shuffle64Block(np.array(sample_num), perm)\n\nc2pt_footer = \"ENDPROP\\n\"\nc3pt_footer = \"END_NUC3PT\\n\"\nc2pt_header = \"\"\"STARTPROP\nMASSES: 1.000000e-03 1.000000e-03 1.000000e-03\nSOURCE: GAUSS 70 600 0\nSINK: GAUSS\nMOM: 0 0 0\nOPER: NUC_G5C_PP\n\"\"\"\nc3pt_V_header = \"\"\"START_NUC3PT\nMASSES: 1.000000e-03 1.000000e-03 1.000000e-03\nSOURCE: GAUSS 70 600 0\nSINK: GAUSS 9\nSNK_MOM: 0 0 0\nOPER: G4\nOP_MOM: 0 0 0\nFACT: 1.000000e+00 0.000000e+00\nPROJ: PPAR\nQUARKS: up down\n\"\"\"\nc3pt_S_header = \"\"\"START_NUC3PT\nMASSES: 1.000000e-03 1.000000e-03 1.000000e-03\nSOURCE: GAUSS 70 600 0\nSINK: GAUSS 9\nSNK_MOM: 0 0 0\nOPER: G0\nOP_MOM: 0 0 0\nFACT: 1.000000e+00 0.000000e+00\nPROJ: PPAR\nQUARKS: up down\n\"\"\"\nc3pt_A_header = \"\"\"START_NUC3PT\nMASSES: 1.000000e-03 1.000000e-03 1.000000e-03\nSOURCE: GAUSS 70 600 0\nSINK: GAUSS 9\nSNK_MOM: 0 0 0\nOPER: G5G3\nOP_MOM: 0 0 0\nFACT: 0.000000e+00 1.000000e+00\nPROJ: PPAR_5Z\nQUARKS: up down\n\"\"\"\n\nlabelFrac = 0.2\n\nlabelEnd = int(len(labels_S_up[0]) * labelFrac)\n\n## Writing Fake Data\n\nX_train, Y_train_up, Y_train_down = features[:labelEnd], labels_S_up[:, :labelEnd], labels_S_down[:, :labelEnd]\nX_test, Y_test_up, Y_test_down = features[labelEnd:], labels_S_up[:, labelEnd:], labels_S_down[:, labelEnd:]\n\ngbr_up = list(range(64))\ngbr_down = list(range(64))\nfor i in range(64):\n gbr_up[i] = GradientBoostingRegressor(learning_rate=0.1, n_estimators=100, max_depth=3)\n gbr_up[i].fit(X_train, Y_train_up[i])\n \n gbr_down[i] = GradientBoostingRegressor(learning_rate=0.1, n_estimators=100, max_depth=3)\n gbr_down[i].fit(X_train, Y_train_down[i])\n\nfor i in range(len(X_test)):\n fakeName = \"../Data/FakeData/FakeData_x\" + str(xs[64 * labelEnd + 64 * i]) + \"y\" + str(ys[64 * labelEnd + 64 * i]) + \"z\" + str(zs[64 * labelEnd + 64 * i]) + \"samp\" + str(sample_num[64 * labelEnd + 64 * i]) + \"t\" + str(taus[64 * labelEnd + 64 * i]) + \".txt\"\n if not os.path.exists(fakeName):\n with open(fakeName, 'w+'): pass\n fakeDataFile = open(fakeName, \"r+\")\n fakeDataFile.truncate(0)\n fakeDataFile.write(c2pt_header)\n testImg = X_test[i]\n for t in range(64):\n \tfakeDataFile.write(str(t) + \" \" + str(X_test[i][t] * N_factor) + \" \" + str(X_test[i][t + 64] * N_factor) + \"\\n\")\n fakeDataFile.write(c2pt_footer)\n fakeDataFile.write(c3pt_S_header)\n for t in range(64):\n pred_up = gbr_up[t].predict([testImg])[0]\n pred_down = gbr_down[t].predict([testImg])[0]\n fakeDataFile.write(str(t) + \" \" + str(pred_up * N_factor) + \" 0.0 \" + str(pred_down * N_factor) + \" 0.0\\n\")\n fakeDataFile.write(c3pt_footer)\n\n### Vector Charge\n\nX_train, Y_train_up, Y_train_down = features[:labelEnd], labels_V_up[:, :labelEnd], labels_V_down[:, :labelEnd]\nX_test, Y_test_up, Y_test_down = features[labelEnd:], labels_V_up[:, labelEnd:], labels_V_down[:, labelEnd:]\n\ngbr_up = list(range(64))\ngbr_down = list(range(64))\nfor i in range(64):\n gbr_up[i] = GradientBoostingRegressor(learning_rate=0.1, n_estimators=100, max_depth=3)\n gbr_up[i].fit(X_train, Y_train_up[i])\n \n gbr_down[i] = GradientBoostingRegressor(learning_rate=0.1, n_estimators=100, max_depth=3)\n gbr_down[i].fit(X_train, Y_train_down[i])\n\nfor i in range(len(X_test)):\n fakeName = \"../Data/FakeData/FakeData_x\" + str(xs[64 * labelEnd + 64 * i]) + \"y\" + str(ys[64 * labelEnd + 64 * i]) + \"z\" + str(zs[64 * labelEnd + 64 * i]) + \"samp\" + str(sample_num[64 * labelEnd + 64 * i]) + \"t\" + str(taus[64 * labelEnd + 64 * i]) + \".txt\"\n fakeDataFile = open(fakeName, \"a\")\n testImg = X_test[i]\n fakeDataFile.write(c3pt_V_header)\n for t in range(64):\n pred_up = gbr_up[t].predict([testImg])[0]\n pred_down = gbr_down[t].predict([testImg])[0]\n fakeDataFile.write(str(t) + \" \" + str(pred_up * N_factor) + \" 0.0 \" + str(pred_down * N_factor) + \" 0.0\\n\")\n fakeDataFile.write(c3pt_footer)\n\n### Axial Charge\n\nX_train, Y_train_up, Y_train_down = features[:labelEnd], labels_A_up[:, :labelEnd], labels_A_down[:, :labelEnd]\nX_test, Y_test_up, Y_test_down = features[labelEnd:], labels_A_up[:, labelEnd:], labels_A_down[:, labelEnd:]\n\ngbr_up = list(range(64))\ngbr_down = list(range(64))\nfor i in range(64):\n gbr_up[i] = GradientBoostingRegressor(learning_rate=0.1, n_estimators=100, max_depth=3)\n gbr_up[i].fit(X_train, Y_train_up[i])\n \n gbr_down[i] = GradientBoostingRegressor(learning_rate=0.1, n_estimators=100, max_depth=3)\n gbr_down[i].fit(X_train, Y_train_down[i])\n\nfor i in range(len(X_test)):\n fakeName = \"../Data/FakeData/FakeData_x\" + str(xs[64 * labelEnd + 64 * i]) + \"y\" + str(ys[64 * labelEnd + 64 * i]) + \"z\" + str(zs[64 * labelEnd + 64 * i]) + \"samp\" + str(sample_num[64 * labelEnd + 64 * i]) + \"t\" + str(taus[64 * labelEnd + 64 * i]) + \".txt\"\n fakeDataFile = open(fakeName, \"a\")\n testImg = X_test[i]\n fakeDataFile.write(c3pt_A_header)\n for t in range(64):\n pred_up = gbr_up[t].predict([testImg])[0]\n pred_down = gbr_down[t].predict([testImg])[0]\n fakeDataFile.write(str(t) + \" \" + str(pred_up * N_factor) + \" 0.0 \" + str(pred_down * N_factor) + \" 0.0\\n\")\n fakeDataFile.write(c3pt_footer)\n\n\n## Writing Real data\n\nX_train, Y_train_up, Y_train_down = features[:labelEnd], labels_S_up[:, :labelEnd], labels_S_down[:, :labelEnd]\nX_test, Y_test_up, Y_test_down = features[labelEnd:], labels_S_up[:, labelEnd:], labels_S_down[:, labelEnd:]\n\n\nfor i in range(len(X_test)):\n fakeName = \"../Data/RealData/RealData_x\" + str(xs[64 * labelEnd + 64 * i]) + \"y\" + str(ys[64 * labelEnd + 64 * i]) + \"z\" + str(zs[64 * labelEnd + 64 * i]) + \"samp\" + str(sample_num[64 * labelEnd + 64 * i]) + \"t\" + str(taus[64 * labelEnd + 64 * i]) + \".txt\"\n if not os.path.exists(fakeName):\n with open(fakeName, 'w+'): pass\n fakeDataFile = open(fakeName, \"r+\")\n fakeDataFile.truncate(0)\n fakeDataFile.write(c2pt_header)\n testImg = X_test[i]\n for t in range(64):\n fakeDataFile.write(str(t) + \" \" + str(X_test[i][t] * N_factor) + \" \" + str(X_test[i][t + 64] * N_factor) + \"\\n\")\n fakeDataFile.write(c2pt_footer)\n fakeDataFile.write(c3pt_S_header)\n for t in range(64):\n fakeDataFile.write(str(t) + \" \" + str(Y_test_up[t][i] * N_factor) + \" 0.0 \" + str(Y_test_down[t][i] * N_factor) + \" 0.0\\n\")\n fakeDataFile.write(c3pt_footer)\n \n### Vector Charge\n\nX_train, Y_train_up, Y_train_down = features[:labelEnd], labels_V_up[:, :labelEnd], labels_V_down[:, :labelEnd]\nX_test, Y_test_up, Y_test_down = features[labelEnd:], labels_V_up[:, labelEnd:], labels_V_down[:, labelEnd:]\n\nfor i in range(len(X_test)):\n fakeName = \"../Data/RealData/RealData_x\" + str(xs[64 * labelEnd + 64 * i]) + \"y\" + str(ys[64 * labelEnd + 64 * i]) + \"z\" + str(zs[64 * labelEnd + 64 * i]) + \"samp\" + str(sample_num[64 * labelEnd + 64 * i]) + \"t\" + str(taus[64 * labelEnd + 64 * i]) + \".txt\"\n fakeDataFile = open(fakeName, \"a\")\n testImg = X_test[i]\n fakeDataFile.write(c3pt_V_header)\n for t in range(64):\n fakeDataFile.write(str(t) + \" \" + str(Y_test_up[t][i] * N_factor) + \" 0.0 \" + str(Y_test_down[t][i] * N_factor) + \" 0.0\\n\")\n fakeDataFile.write(c3pt_footer)\n\n### Axial Charge\n\nX_train, Y_train_up, Y_train_down = features[:labelEnd], labels_A_up[:, :labelEnd], labels_A_down[:, :labelEnd]\nX_test, Y_test_up, Y_test_down = features[labelEnd:], labels_A_up[:, labelEnd:], labels_A_down[:, labelEnd:]\n\nfor i in range(len(X_test)):\n fakeName = \"../Data/RealData/RealData_x\" + str(xs[64 * labelEnd + 64 * i]) + \"y\" + str(ys[64 * labelEnd + 64 * i]) + \"z\" + str(zs[64 * labelEnd + 64 * i]) + \"samp\" + str(sample_num[64 * labelEnd + 64 * i]) + \"t\" + str(taus[64 * labelEnd + 64 * i]) + \".txt\"\n fakeDataFile = open(fakeName, \"a\")\n testImg = X_test[i]\n fakeDataFile.write(c3pt_A_header)\n for t in range(64):\n fakeDataFile.write(str(t) + \" \" + str(Y_test_up[t][i] * N_factor) + \" 0.0 \" + str(Y_test_down[t][i] * N_factor) + \" 0.0\\n\")\n fakeDataFile.write(c3pt_footer)"
},
{
"alpha_fraction": 0.6786961555480957,
"alphanum_fraction": 0.6926658749580383,
"avg_line_length": 30.851852416992188,
"blob_id": "523d7d12554cdce828d90d5c565500027e20fffa",
"content_id": "3dab50bc116e959cdd35f902cbdde93bb3a343c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 859,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 27,
"path": "/ML/Data/PNDME_3pt_2pt_ML_data/correlate_plots.py",
"repo_name": "jacarolan/BNL_QCD_ML",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport numpy as np\nfrom sklearn import ensemble\nfrom scipy.stats import pearsonr\n\n# Read data\ndata = np.load('data-axial.npy', allow_pickle=True).tolist()\n# data['train'/'test']['input'/'output']\n# inputs are vectors with 40 elements, and outputs are single numbers\n\nc2pt_avgs = np.array([sum(x) / len(x) for x in data['train']['input']])\n\ncorr, _ = pearsonr(c2pt_avgs, data['train']['output'])\n\nprint(\"Axial correlation at 10a separation: \", np.sqrt(corr))\n\n# Read data\ndata = np.load('data-vector.npy', allow_pickle=True).tolist()\n# data['train'/'test']['input'/'output']\n# inputs are vectors with 40 elements, and outputs are single numbers\n\nc2pt_avgs = np.array([sum(x) / len(x) for x in data['train']['input']])\n\ncorr, _ = pearsonr(c2pt_avgs, data['train']['output'])\n\nprint(\"Vector correlation at 10a separation: \", np.sqrt(corr))"
}
] | 4 |
Ethan0507/placement-predictor | https://github.com/Ethan0507/placement-predictor | 5564cd9fe21eb3e7878f3b877cce443b999ade5e | 2e3cdfff378be16f11047f2353beee0355a6cb58 | 2f88a9154f8918eb54525ca06b7d884337a04d9f | refs/heads/master | 2023-03-19T05:00:55.427604 | 2021-03-06T14:00:01 | 2021-03-06T14:00:01 | 341,251,802 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6595582365989685,
"alphanum_fraction": 0.6694592833518982,
"avg_line_length": 25.816326141357422,
"blob_id": "29c0a8201dedaddf4e8a97312c25aae2c93795dc",
"content_id": "386f5dc1b549ad3f9be48af22c5410057b41d8d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1313,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 49,
"path": "/server/app.js",
"repo_name": "Ethan0507/placement-predictor",
"src_encoding": "UTF-8",
"text": "const express = require(\"express\");\nconst bodyParser = require(\"body-parser\");\nconst mongoose = require(\"mongoose\");\nconst fs = require(\"fs\");\nconst path = require(\"path\");\n\nconst HttpError = require('./models/http-error');\nconst studentRoutes = require('./routes/student-routes');\nconst tpoRoutes = require('./routes/tpo-routes');\nconst adminRoutes = require('./routes/admin-routes');\nconst sharedRoutes = require('./routes/shared-routes');\n\nconst app = express();\n\napp.use(bodyParser.json());\n\napp.use((req, res, next) => {\n res.setHeader(\"Access-Control-Allow-Origin\", \"*\");\n res.setHeader(\n \"Access-Control-Allow-Headers\",\n \"Origin, X-Requested-With, Content-Type, Accept, Authorization\"\n );\n res.setHeader(\"Access-Control-Allow-Methods\", \"GET, POST, PATCH, DELETE\");\n\n next();\n});\n\n// Push the routes here\napp.use('/api/student', studentRoutes);\napp.use('/api/tpo', tpoRoutes);\napp.use('/api/admin', adminRoutes);\napp.use('/api', sharedRoutes);\n\napp.use((req, res, next) => {\n const error = new HttpError(\"Could not find this route.\", 404);\n throw error;\n});\n\nmongoose\n .connect(\n `mongodb+srv://Ethan:[email protected]/placement-prediction?retryWrites=true&w=majority`\n )\n .then(() => {\n console.log(\"Connected\");\n app.listen(5000);\n })\n .catch((err) => {\n console.log(err);\n });"
},
{
"alpha_fraction": 0.7099391222000122,
"alphanum_fraction": 0.7099391222000122,
"avg_line_length": 21.409090042114258,
"blob_id": "b7c3e1a15ec721038e442d2058dabf41cfded74e",
"content_id": "ee0dffa7257719b1e90f73517fa4ab9fe8b6304e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 493,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 22,
"path": "/server/routes/student-routes.js",
"repo_name": "Ethan0507/placement-predictor",
"src_encoding": "UTF-8",
"text": "const express = require('express');\nconst { check } = require('express-validator');\n\nconst studentController = require('../controllers/student-controller');\nconst checkAuth = require(\"../middleware/check-auth\");\n\nconst router = express.Router();\n\nrouter.use((req, res, next) => {\n res.locals.accessRole = \"student\";\n checkAuth(req, res, next);\n});\n\nrouter.get('/', studentController.getStudentDetailsById)\n\nrouter.post(\n '/',\n studentController.updateDetails\n);\n\n\nmodule.exports = router;\n"
},
{
"alpha_fraction": 0.6247947216033936,
"alphanum_fraction": 0.6247947216033936,
"avg_line_length": 23.85714340209961,
"blob_id": "dff951c23b71826a6c24a26a33322608f93fad87",
"content_id": "0e17fcd9c0943769029c356dc92c7f2ac5c464b6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1218,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 49,
"path": "/client/src/routes.js",
"repo_name": "Ethan0507/placement-predictor",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\n\nconst ViewStatus = React.lazy(() => import(\"./views/student/ViewStatus\"));\n\nconst ViewDashboard = React.lazy(() => import(\"./views/tpo/ViewDashboard\"));\nconst ViewStudents = React.lazy(() => import(\"./views/tpo/ViewStudents\"));\n\nconst ViewUsers = React.lazy(() => import(\"./views/admin/ViewUsers\"));\nconst ViewStudentDetails = React.lazy(() =>\n import(\"./views/admin/ViewStudentDetails\")\n);\n\nconst routes = [\n { path: \"/\", exact: true, name: \"Home\" },\n { path: \"/student\", name: \"Student\", component: ViewStatus, exact: true },\n { path: \"/student/view-status\", name: \"View Status\", component: ViewStatus },\n {\n path: \"/tpo\",\n component: ViewDashboard,\n exact: true,\n },\n {\n path: \"/tpo/view-dashboard\",\n name: \"View Dashboard\",\n component: ViewDashboard,\n },\n {\n path: \"/tpo/view-students\",\n name: \"View Students\",\n component: ViewStudents,\n },\n {\n path: \"/admin\",\n component: ViewUsers,\n exact: true,\n },\n {\n path: \"/admin/view-users\",\n name: \"View Users\",\n component: ViewUsers,\n },\n {\n path: \"/admin/view-student-details\",\n name: \"View Student Details\",\n component: ViewStudentDetails,\n },\n];\n\nexport default routes;\n"
},
{
"alpha_fraction": 0.5811362862586975,
"alphanum_fraction": 0.589305579662323,
"avg_line_length": 24.40566062927246,
"blob_id": "d6d733914959e449fd282c3b50f71668f3169b3e",
"content_id": "1b3d11d3df993c09be0e864ac5b474698393c580",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2693,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 106,
"path": "/server/controllers/student-controller.js",
"repo_name": "Ethan0507/placement-predictor",
"src_encoding": "UTF-8",
"text": "const { validationResult } = require('express-validator');\nconst mongoose = require('mongoose');\n\nconst HttpError = require('../models/http-error');\nconst User = require('../models/user');\nconst StudentDetail = require('../models/studentdetail');\n\nconst getStudentDetailsById = async (req, res, next) => {\n\n const studentId = res.locals.userData.userId;\n let student;\n\n try {\n student = await StudentDetail.find({ userId: studentId });\n } catch (err) {\n const error = new HttpError(\n 'Something went wrong, could not find details for student.',\n 500\n );\n return next(error);\n }\n \n if (!student) {\n const error = new HttpError(\n 'Could not find details for the provided student-id.',\n 404\n );\n return next(error);\n }\n \n res.status(200).json({ student: student[0] });\n};\n\nconst updateDetails = async (req, res, next) => {\n\n const { name,\n gender,\n xPercentage,\n xiiPercentage,\n degreePercentage,\n etestP,\n mbaP,\n xiiBoard,\n xBoard,\n specialisation,\n workex,\n hscStream,\n degreeT,\n yearOfGrad,\n placement_status } = req.body;\n \n let userDetails;\n\n try {\n userDetails = await User.findById(res.locals.userData.userId);\n } catch (err) {\n const error = new HttpError(\n 'Updating Details failed, please try again!',\n 500\n );\n return next(error);\n }\n \n if (!userDetails) {\n const error = new HttpError('Could not find user for provided id.', 404);\n return next(error);\n }\n \n \n const StudentDetails = new StudentDetail({\n name: name,\n gender: gender,\n xPercentage: xPercentage,\n xiiPercentage: xiiPercentage,\n degreePercentage: degreePercentage,\n etestP: etestP,\n mbaP: mbaP,\n xiiBoard: xiiBoard,\n xBoard: xBoard,\n specialisation: specialisation,\n workex: workex,\n hscStream: hscStream,\n degreeT: degreeT,\n yearOfGrad: yearOfGrad,\n placement_status: placement_status,\n userId: userDetails\n });\n \n try {\n const sess = await mongoose.startSession();\n sess.startTransaction();\n await StudentDetails.save({ session: sess }); \n await sess.commitTransaction();\n } catch (err) {\n const error = new HttpError(\n 'Updating details failed, please try again later!',\n 500\n );\n return next(err);\n }\n \n res.status(201).json({ details: StudentDetails });\n}\n\nexports.updateDetails = updateDetails;\nexports.getStudentDetailsById = getStudentDetailsById;\n"
},
{
"alpha_fraction": 0.6846985816955566,
"alphanum_fraction": 0.6867594122886658,
"avg_line_length": 52.94444274902344,
"blob_id": "abb0a2defe01f91cb04b2fd57460f2e673cd1fd9",
"content_id": "82311526f573033327d5736d99f04c1ab73af27f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1941,
"license_type": "no_license",
"max_line_length": 251,
"num_lines": 36,
"path": "/ml-python/app.py",
"repo_name": "Ethan0507/placement-predictor",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request, jsonify, json, Response\nfrom flask_cors import CORS, cross_origin\nimport predict\n\napp = Flask(__name__)\n\nCORS(app, supports_credentials=True)\n\[email protected]('/')\ndef home():\n response = Response('Hello', headers={\n 'Access-Control-Allow-Origin' : '*',\n 'Access-Control-Allow-Headers' : 'Origin, X-Requested-With, Content-Type, Accept, Authorization',\n 'Access-Control-Allow-Methods' : 'GET, POST'\n }, mimetype='text/json')\n # response.headers['Access-Control-Allow-Origin'] = '*'\n # response.headers['Access-Control-Allow-Headers'] = 'Origin, X-Requested-With, Content-Type, Accept, Authorization'\n # response.headers['Access-Control-Allow-Methods'] = 'GET, POST'\n return response\n\[email protected]('/predict', methods=['POST'])\n@cross_origin()\ndef getPlacementPrediction():\n received = request.get_json()\n # response = Response(predict.predict([received['gender'], received['xPercentage'], received['xiiPercentage'], received['degreePercentage'], received['workex'], received['etestP'], received['specialisation'], received['mbaP']]), headers={\n # 'Access-Control-Allow-Origin' : '*',\n # 'Access-Control-Allow-Headers' : 'Origin, X-Requested-With, Content-Type, Accept, Authorization',\n # 'Access-Control-Allow-Methods' : 'GET, POST'\n # }, mimetype='text/json')\n # response.headers['Access-Control-Allow-Origin'] = '*'\n # response.headers['Access-Control-Allow-Headers'] = 'Origin, X-Requested-With, Content-Type, Accept, Authorization'\n # response.headers['Access-Control-Allow-Methods'] = 'GET, POST'\n return predict.predict([received['gender'], float(received['xPercentage']), float(received['xiiPercentage']), float(received['degreePercentage']), received['workex'], float(received['etestP']), received['specialisation'], float(received['mbaP'])])\n\nif __name__ == '__main__':\n app.run(port=7000, debug=True, use_reloader=False)"
},
{
"alpha_fraction": 0.6158088445663452,
"alphanum_fraction": 0.6227940917015076,
"avg_line_length": 31.783132553100586,
"blob_id": "2bb4d58777a0b32a61e2358ca396002002523d0e",
"content_id": "08d4b9fd00ec34219de45f5c4d990f29c8eb5d1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2720,
"license_type": "no_license",
"max_line_length": 269,
"num_lines": 83,
"path": "/server/controllers/tpo-controller.js",
"repo_name": "Ethan0507/placement-predictor",
"src_encoding": "UTF-8",
"text": "const HttpError = require('../models/http-error');\nconst StudentDetail = require('../models/studentdetail');\n\n\n\nconst getStudentDetailsById = async (req, res, next) => {\n\n const studentId = req.params.student;\n\n let student;\n try {\n student = await StudentDetail.find({ userId: studentId });\n } catch (err) {\n const error = new HttpError(\n 'Something went wrong, could not find details for student.',\n 500\n );\n return next(error);\n }\n \n if (!student) {\n const error = new HttpError(\n 'Could not find details for the provided student-id.',\n 404\n );\n return next(error);\n }\n \n res.json({ student: student[0] });\n};\n\nconst getStudentDetails = async (req, res, next) => {\n let students;\n\n try {\n students = await StudentDetail.find({});\n } catch (err) {\n const error = new HttpError(\n 'Something went wrong, could not find students.',\n 500\n );\n return next(error);\n }\n \n if (!students) {\n const error = new HttpError(\n 'Could not find students.',\n 404\n );\n return next(error);\n }\n \n res.json({ students: students });\n}\n\n\nconst getDashboardDetails = async (req, res, next) => {\n\n let students_placed, students_unplaced, m_students_placed, m_students_unplaced, hr_students_placed, hr_students_unplaced;\n try {\n students_placed = await StudentDetail.find({ placement_status : \"placed\" });\n students_unplaced = await StudentDetail.find({ placement_status : \"unplaced\" });\n\n m_students_placed = await StudentDetail.find({ $and: [ { specialisation : \"Mkt&Fin\" }, { placement_status : \"placed\" } ] });\n m_students_unplaced = await StudentDetail.find({ $and: [ { specialisation : \"Mkt&Fin\" }, { placement_status : \"unplaced\" } ] });\n hr_students_placed = await StudentDetail.find({ $and: [ { specialisation : \"Mkt&HR\" }, { placement_status : \"placed\" } ] });\n hr_students_unplaced = await StudentDetail.find({ $and: [ { specialisation : \"Mkt&HR\" }, { placement_status : \"unplaced\" } ] });\n } catch (err) {\n const error = new HttpError(\n 'Something went wrong, could not find students.',\n 500\n );\n return next(error);\n }\n\n res.status(200).json({ total_placed : students_placed.length, total_unplaced : students_unplaced.length, m_placed : m_students_placed.length, m_unplaced : m_students_unplaced.length, hr_placed : hr_students_placed.length, hr_unplaced : hr_students_unplaced.length });\n\n\n}\n\nexports.getStudentDetailsById = getStudentDetailsById;\nexports.getStudentDetails = getStudentDetails;\nexports.getDashboardDetails = getDashboardDetails;"
},
{
"alpha_fraction": 0.5251798629760742,
"alphanum_fraction": 0.5274516940116882,
"avg_line_length": 21.96521759033203,
"blob_id": "038ec44a5a8e2e39f57e51ae8340d912934e7e35",
"content_id": "355a4996f3ed02ee2b6854518bc0dcb3fc9eea8b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2641,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 115,
"path": "/client/src/containers/TheSidebar.js",
"repo_name": "Ethan0507/placement-predictor",
"src_encoding": "UTF-8",
"text": "import React, { useContext } from \"react\";\nimport { useSelector, useDispatch } from \"react-redux\";\nimport {\n CCreateElement,\n CSidebar,\n CSidebarBrand,\n CSidebarNav,\n CSidebarNavDivider,\n CSidebarNavTitle,\n CSidebarMinimizer,\n CSidebarNavDropdown,\n CSidebarNavItem,\n} from \"@coreui/react\";\n\nimport CIcon from \"@coreui/icons-react\";\nimport { freeSet } from \"@coreui/icons\";\n\nimport { AuthContext } from \"../context/auth-context\";\n\nconst TheSidebar = () => {\n const dispatch = useDispatch();\n\n const auth = useContext(AuthContext);\n let show = useSelector((state) => state.sidebarShow);\n\n if (auth.isLoggedIn && auth.userRole === \"student\") {\n show = false;\n }\n\n let navigation;\n\n switch (auth.userRole) {\n case \"tpo\":\n navigation = [\n {\n _tag: \"CSidebarNavTitle\",\n _children: [\"TPO Panel\"],\n },\n {\n _tag: \"CSidebarNavItem\",\n name: \"View Dashboard\",\n to: \"/tpo/view-dashboard\",\n icon: \"cil-chart-line\",\n },\n {\n _tag: \"CSidebarNavItem\",\n name: \"View Students\",\n to: \"/tpo/view-students\",\n icon: \"cil-people\",\n },\n ];\n break;\n\n case \"admin\": {\n navigation = [\n {\n _tag: \"CSidebarNavTitle\",\n _children: [\"Admin Panel\"],\n },\n {\n _tag: \"CSidebarNavItem\",\n name: \"View Users\",\n to: \"/admin/view-users\",\n icon: \"cil-people\",\n },\n {\n _tag: \"CSidebarNavItem\",\n name: \"View Student Details\",\n to: \"/admin/view-student-details\",\n icon: \"cil-notes\",\n },\n ];\n break;\n }\n\n default:\n navigation = [];\n break;\n }\n\n return (\n <CSidebar\n show={show}\n onShowChange={(val) => dispatch({ type: \"set\", sidebarShow: val })}\n >\n <CSidebarBrand className=\"d-md-down-none\" to=\"/\">\n {/* <CIcon\n className=\"c-sidebar-brand-full\"\n name=\"logo-negative\"\n height={35}\n /> */}\n <h4 className=\"c-sidebar-brand-full\">Placement Predictor</h4>\n <CIcon\n className=\"c-sidebar-brand-minimized\"\n name=\"sygnet\"\n height={35}\n />\n </CSidebarBrand>\n <CSidebarNav>\n <CCreateElement\n items={navigation}\n components={{\n CSidebarNavDivider,\n CSidebarNavDropdown,\n CSidebarNavItem,\n CSidebarNavTitle,\n }}\n />\n </CSidebarNav>\n <CSidebarMinimizer className=\"c-d-md-down-none\" />\n </CSidebar>\n );\n};\n\nexport default React.memo(TheSidebar);\n"
},
{
"alpha_fraction": 0.4592438340187073,
"alphanum_fraction": 0.4621921479701996,
"avg_line_length": 31.03333282470703,
"blob_id": "476bf4cd1f65b4ebd582c3150421f10d8c8f1328",
"content_id": "8fb8a05f016e836896cc2dfe9cc138fab44df8a4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 5766,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 180,
"path": "/client/src/views/pages/login/Login.js",
"repo_name": "Ethan0507/placement-predictor",
"src_encoding": "UTF-8",
"text": "import React, { useContext, useEffect, useState } from \"react\";\nimport { useHistory } from \"react-router-dom\";\n\nimport { useFormik } from \"formik\";\nimport * as Yup from \"yup\";\nimport {\n CButton,\n CCard,\n CCardBody,\n CCardGroup,\n CCol,\n CContainer,\n CForm,\n CInput,\n CInputGroup,\n CInputGroupPrepend,\n CInputGroupText,\n CRow,\n CSpinner\n} from \"@coreui/react\";\n\nimport CIcon from \"@coreui/icons-react\";\n\nimport { AuthContext } from \"../../../context/auth-context\";\n\nconst validationSchema = Yup.object({\n username: Yup.string().required(\"Username Required!\"),\n password: Yup.string().required(\"Password required!\"),\n});\n\nconst Login = () => {\n const authContext = useContext(AuthContext);\n\n const history = useHistory();\n\n const [isLoading, setLoading] = useState(false);\n\n useEffect(() => {\n if (authContext.isLoggedIn) {\n if (authContext.userRole === \"student\") {\n history.push(\"/student/view-status\");\n } else if (authContext.userRole === \"tpo\") {\n history.push(\"/tpo/view-dashboard\");\n } else {\n history.push(\"/admin/view-users\");\n }\n }\n }, []);\n\n const { handleSubmit, handleChange, values, errors, touched } = useFormik({\n initialValues: {\n username: \"\",\n password: \"\",\n },\n validationSchema,\n async onSubmit(values, actions) {\n setLoading(true);\n try {\n const response = await fetch(\"http://localhost:5000/api/login\", {\n method: \"POST\",\n body: JSON.stringify({\n username: values.username,\n password: values.password,\n }),\n headers: {\n \"Content-Type\": \"application/json\",\n },\n });\n\n if (response.ok) {\n const responseData = await response.json();\n authContext.login(\n responseData.userId,\n responseData.token,\n responseData.role,\n null\n );\n if (responseData.role === \"student\") {\n history.push(\"/student/view-status\");\n } else if (responseData.role === \"tpo\") {\n history.push(\"/tpo/view-dashboard\");\n } else {\n history.push(\"/admin/view-users\");\n }\n } else {\n actions.setSubmitting(false);\n setLoading(false);\n actions.setErrors({ username: \"Invalid username or password.\", password: \"Invalid username or password.\" });\n }\n } catch (err) {\n actions.setSubmitting(false);\n setLoading(false);\n actions.setErrors({ username: err.message, password: err.message });\n }\n },\n });\n\n return (\n <div className=\"c-app c-default-layout flex-row align-items-center\">\n <CContainer>\n <CRow className=\"justify-content-center\">\n <CCol md=\"8\">\n <CCardGroup>\n <CCard className=\"p-4\">\n <CCardBody>\n <CForm onSubmit={handleSubmit}>\n <h1>Login</h1>\n <p className=\"text-muted\">Sign In to your account</p>\n <CInputGroup className=\"mb-3\">\n <CInputGroupPrepend>\n <CInputGroupText>\n <CIcon name=\"cil-user\" />\n </CInputGroupText>\n </CInputGroupPrepend>\n <CInput\n name=\"username\"\n invalid={errors.username}\n type=\"text\"\n placeholder=\"Username\"\n autoComplete=\"username\"\n value={values.username}\n onChange={handleChange}\n />\n </CInputGroup>\n {errors.username ? (\n <p style={{ color: \"red\" }}>{errors.username}</p>\n ) : null}\n <CInputGroup className=\"mb-4\">\n <CInputGroupPrepend>\n <CInputGroupText>\n <CIcon name=\"cil-lock-locked\" />\n </CInputGroupText>\n </CInputGroupPrepend>\n <CInput\n name=\"password\"\n invalid={errors.password && touched.password}\n type=\"password\"\n placeholder=\"Password\"\n autoComplete=\"current-password\"\n value={values.password}\n onChange={handleChange}\n />\n </CInputGroup>\n {errors.password && touched.password ? (\n <p style={{ color: \"red\" }}>{errors.password}</p>\n ) : null}\n <CRow>\n <CCol xs=\"6\">\n {!isLoading && <CButton color=\"primary\" type=\"submit\" className=\"px-4\">\n Login\n </CButton>}\n {isLoading && <CSpinner color=\"info\" />}\n </CCol>\n </CRow>\n </CForm>\n </CCardBody>\n </CCard>\n <CCard\n className=\"text-white bg-primary py-5 d-md-down-none\"\n style={{ width: \"44%\" }}\n >\n <CCardBody className=\"text-center\">\n <div>\n <h2>Placement Predictor</h2>\n <p>\n Tracking the porbability of your placements made easier\n than ever.\n </p>\n </div>\n </CCardBody>\n </CCard>\n </CCardGroup>\n </CCol>\n </CRow>\n </CContainer>\n </div>\n );\n};\n\nexport default Login;\n"
},
{
"alpha_fraction": 0.7498286366462708,
"alphanum_fraction": 0.7498286366462708,
"avg_line_length": 18.197368621826172,
"blob_id": "148b4d8b42be295059e29924e286153811786cca",
"content_id": "3cd299400e2db3e13d7683bbec9a0f3f27f08ac3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1459,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 76,
"path": "/README.md",
"repo_name": "Ethan0507/placement-predictor",
"src_encoding": "UTF-8",
"text": "# Placement Predictor\n\nSimple overview of use/purpose.\n\n## Description\n\nA machine learning based web application that allows students to predict the status of their placement based on their respective profiles.\nThe training and placement officer (TPO) can view the prediction results in an interactive dashboard, and also filter and search for students based on various properties.\nAn admin can easily manipulate and verify the data available in the application, if need be.\n\n### Installing\n\n(Python Server setup)\n\nOpen up a terminal in VScode (make sure you're in the 'ml-python' directory)\n\n* Installing virtualenv\n```\npip install virtualenv\n```\n* Creating a virtual environment\n```\nvirtualenv venv\n```\n* Activating virtual environment in Windows\n```\nvenv/Scripts/activate\n```\n* Installing dependencies in virtual environment\n```\npip install -r .\\requirements.txt\n```\n* Running the Flask server\n```\npython app.py\n```\n\n\n\n(React server setup)\n\nOpen another terminal in the same or different VS code window and navigate to 'client' directory\n\n* Move into the client repo\n```\ncd client\n```\n\n* Install necessary dependencies\n```\nnpm install\n```\n\n* Running the React server\n```\nnpm start\n```\n\n(NodeJS server setup)\n\nOpen another terminal in the same or different VS code window and navigate to 'server' directory\n\n* Move into the 'server' repo\n```\ncd client\n```\n\n* Install necessary dependencies\n```\nnpm install\n```\n\n* Running the NodeJS server\n```\nnpm start\n```\n"
},
{
"alpha_fraction": 0.40791377425193787,
"alphanum_fraction": 0.4140012860298157,
"avg_line_length": 38.21917724609375,
"blob_id": "93ac8e06fa4e7d3893bd4e2881d46bcba82ec807",
"content_id": "6626b61f27c0417a5cfcad96b38e11432d48cd0d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 20041,
"license_type": "permissive",
"max_line_length": 281,
"num_lines": 511,
"path": "/client/src/views/student/ViewStatus.js",
"repo_name": "Ethan0507/placement-predictor",
"src_encoding": "UTF-8",
"text": "import React, { useContext, useEffect, useState } from \"react\";\nimport { Formik, Form, Field } from \"formik\";\nimport * as Yup from \"yup\";\nimport {\n CAlert,\n CCol,\n CSelect,\n CCardFooter,\n CButton,\n CCardHeader,\n CCardBody,\n CCard,\n CFormGroup,\n CLabel,\n CInput,\n CInvalidFeedback,\n CSpinner,\n} from \"@coreui/react\";\nimport CIcon from \"@coreui/icons-react\";\nimport \"./ViewStatus.css\";\nimport { AuthContext } from \"src/context/auth-context\";\n\n\nconst axios = require('axios');\n\nconst SignupSchema = Yup.object().shape({\n name: Yup.string().required(\"Required\"),\n gender: Yup.string().required(\"Required\"),\n xPercentage: Yup.number(\"Must be a number\")\n .lessThan(100, \"Please enter valid percentage!\")\n .required(\"Required\"),\n xiiPercentage: Yup.number(\"Must be a number\")\n .lessThan(100, \"Please enter valid percentage!\")\n .required(\"Required\"),\n degreePercentage: Yup.number(\"Must be a number\")\n .required(\"Required\")\n .lessThan(100, \"Please enter valid percentage!\"),\n etestP: Yup.number(\"Must be a number\")\n .lessThan(100, \"Please enter valid percentage\")\n .required(\"Required\"),\n mbaP: Yup.number(\"Must be a number\")\n .lessThan(100, \"Please enter valid percentage\")\n .required(\"Required\"),\n xiiBoard: Yup.string(\"Must be string\").required(\"Required\"),\n xBoard: Yup.string(\"Must be string\").required(\"Required\"),\n hscStream: Yup.string(\"Must be string\").required(\"Required\"),\n degreeT: Yup.string(\"Must be string\").required(\"Required\"),\n workex: Yup.string(\"Must be string\").required(\"Required\"),\n specialisation: Yup.string(\"Must be string\").required(\"Required\"),\n yearOfGrad: Yup.number(\"Must be a valid year\").required(\"Required\"),\n});\n\nconst ViewStatus = () => {\n const auth = useContext(AuthContext);\n\n const [isLoading, setLoading] = useState(false);\n\n const [studentDetails, setStudentDetails] = useState({\n name: \"\",\n gender: \"\",\n xPercentage: \"\",\n xiiPercentage: \"\",\n degreePercentage: \"\",\n etestP: \"\",\n mbaP: \"\",\n xiiBoard: \"\",\n xBoard: \"\",\n specialisation: \"\",\n workex: \"\",\n hscStream: \"\",\n degreeT: \"\",\n yearOfGrad: \"\",\n placement_status: \"\",\n });\n\n const [errorOccured, setErrorOccured] = useState(false);\n\n useEffect(() => {\n (async () => {\n setErrorOccured(false);\n setLoading(true);\n try {\n const response = await fetch(\"http://localhost:5000/api/student/\", {\n method: \"GET\",\n headers: {\n Authorization: \"Bearer \" + auth.token,\n },\n });\n\n let responseData;\n\n if (response.ok) {\n setLoading(false);\n responseData = await response.json();\n if (!!responseData.student) {\n setStudentDetails(responseData.student);\n }\n }\n } catch (err) {\n setLoading(false);\n setErrorOccured(true);\n }\n })();\n }, []);\n\n return (\n <div>\n <Formik\n enableReinitialize={true}\n initialValues={studentDetails}\n validationSchema={SignupSchema}\n onSubmit={async (values, actions) => {\n // same shape as initial values\n setLoading(true);\n try {\n var data = JSON.stringify({\"gender\":values.gender,\"xPercentage\":values.xPercentage,\"xiiPercentage\":values.xiiPercentage,\"degreePercentage\":values.degreePercentage,\"workex\":values.workex,\"etestP\":values.etestP,\"specialisation\":values.specialisation,\"mbaP\":values.mbaP});\n\n var config = {\n method: 'post',\n url: 'http://localhost:7000/predict',\n headers: { \n 'Content-Type': 'application/json', \n 'Authorization': 'Basic Og=='\n },\n data : data\n };\n\n axios(config)\n .then(async function (res) {\n const placement_data = res.data;\n const response = await fetch(\"http://localhost:5000/api/student/\", {\n method: \"POST\",\n body: JSON.stringify({\n name: values.name,\n gender: values.gender,\n xPercentage: values.xPercentage,\n xiiPercentage: values.xiiPercentage,\n degreePercentage: values.degreePercentage,\n etestP: values.etestP,\n mbaP: values.mbaP,\n xiiBoard: values.xiiBoard,\n xBoard: values.xBoard,\n specialisation: values.specialisation,\n workex: values.workex,\n hscStream: values.hscStream,\n degreeT: values.degreeT,\n yearOfGrad: values.yearOfGrad,\n placement_status: placement_data\n }),\n headers: {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + auth.token,\n },\n });\n \n if (response.ok) {\n setLoading(false);\n window.location.reload();\n }\n })\n .catch(function (error) {\n setLoading(false);\n console.log(error);\n });\n \n } catch (err) {\n actions.setSubmitting(false);\n setLoading(false);\n setErrorOccured(true);\n }\n }}\n >\n {({ errors, values, touched }) => (\n <div>\n {errorOccured && (\n <CAlert color=\"warning\" closeButton>\n Some error occurred, please try again!\n </CAlert>\n )}\n <CCard className=\"card-container\">\n <CCardHeader style={{ fontWeight: \"bold\" }}>\n Check Placement Status\n {/* <small> validation feedback</small> */}\n </CCardHeader>\n <Form>\n <CCardBody>\n {!!studentDetails.name &&\n studentDetails.placement_status === \"placed\" && (\n <CAlert name=\"status\" color=\"success\">\n Placed\n </CAlert>\n )}\n {!!studentDetails.name &&\n studentDetails.placement_status === \"unplaced\" && (\n <CAlert name=\"status\" color=\"warning\">\n Unplaced\n </CAlert>\n )}\n <CFormGroup row>\n <CCol xs=\"12\" xl=\"4\">\n <CLabel htmlFor=\"name\">Name</CLabel>\n <Field\n name=\"name\"\n invalid={!!errors.name}\n as={CInput}\n value={values.name}\n placeholder=\"John Doe\"\n disabled={!!studentDetails.name}\n />\n <CInvalidFeedback>\n {!!errors.name && !!touched.name ? (\n <div>{errors.name}</div>\n ) : null}\n </CInvalidFeedback>\n </CCol>\n <CCol xs=\"12\" xl=\"4\">\n <CLabel htmlFor=\"gender\">Gender</CLabel>\n <Field\n id=\"select\"\n invalid={!!errors.gender && !!touched.gender}\n name=\"gender\"\n as={CSelect}\n disabled={!!studentDetails.gender}\n >\n <option value=\"\">Select an option</option>\n <option value=\"Male\">Male</option>\n <option value=\"Female\">Female</option>\n {/* <option value=\"Others\">Others</option> */}\n </Field>\n <CInvalidFeedback>\n {errors.gender && touched.gender ? (\n <div>{errors.gender}</div>\n ) : null}\n </CInvalidFeedback>\n </CCol>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"yearOfGrad\">Graduation Year</CLabel>\n <Field\n id=\"select\"\n invalid={!!errors.yearOfGrad && !!touched.yearOfGrad}\n name=\"yearOfGrad\"\n as={CSelect}\n disabled={!!studentDetails.yearOfGrad}\n >\n <option value=\"\">Select a year</option>\n <option value=\"2020\">2020</option>\n <option value=\"2021\">2021</option>\n <option value=\"2022\">2022</option>\n <option value=\"2022\">2023</option>\n <option value=\"2023\">2024</option>\n <option value=\"2024\">2025</option>\n <option value=\"2025\">2026</option>\n </Field>\n <CInvalidFeedback>\n {errors.yearOfGrad && touched.yearOfGrad ? (\n <div>{errors.yearOfGrad}</div>\n ) : null}\n </CInvalidFeedback>\n </CCol>\n </CFormGroup>\n\n <CFormGroup row>\n <CCol xl=\"6\">\n <CLabel htmlFor=\"xPercentage\">Xth Percentage</CLabel>\n <Field\n name=\"xPercentage\"\n invalid={!!errors.xPercentage && !!touched.xPercentage}\n as={CInput}\n value={values.xPercentage}\n placeholder=\"0-100\"\n disabled={!!studentDetails.xPercentage}\n />\n <CInvalidFeedback>\n {errors.xPercentage && touched.xPercentage ? (\n <div>{errors.xPercentage}</div>\n ) : null}\n </CInvalidFeedback>\n </CCol>\n\n <CCol xl=\"6\">\n <CLabel htmlFor=\"xBoard\">Xth Board</CLabel>\n <Field\n invalid={!!errors.xBoard && !!touched.xBoard}\n name=\"xBoard\"\n as={CSelect}\n disabled={!!studentDetails.xBoard}\n >\n <option value=\"\">Select an option</option>\n <option value=\"Central\">Central</option>\n <option value=\"Others\">Others</option>\n </Field>\n <CInvalidFeedback>\n {errors.xBoard && touched.xBoard ? (\n <div>{errors.xBoard}</div>\n ) : null}\n </CInvalidFeedback>\n </CCol>\n </CFormGroup>\n\n <CFormGroup row>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"xiiPercentage\">XIIth Percentage</CLabel>\n <Field\n name=\"xiiPercentage\"\n invalid={\n !!touched.xiiPercentage && !!errors.xiiPercentage\n }\n as={CInput}\n value={values.xiiPercentage}\n placeholder=\"0-100\"\n disabled={!!studentDetails.xiiPercentage}\n />\n <CInvalidFeedback>\n {errors.xiiPercentage && touched.xiiPercentage ? (\n <div>{errors.xiiPercentage}</div>\n ) : null}\n </CInvalidFeedback>\n </CCol>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"xiiBoard\">XIIth Board</CLabel>\n <Field\n invalid={!!errors.xiiBoard && !!touched.xiiBoard}\n name=\"xiiBoard\"\n as={CSelect}\n disabled={!!studentDetails.xiiBoard}\n >\n <option value=\"\">Select an option</option>\n <option value=\"Central\">Central</option>\n <option value=\"Others\">Others</option>\n </Field>\n <CInvalidFeedback>\n {errors.xiiBoard && touched.xiiBoard ? (\n <div>{errors.xiiBoard}</div>\n ) : null}\n </CInvalidFeedback>\n </CCol>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"hscStream\">XIIth Stream</CLabel>\n <Field\n invalid={!!errors.hscStream && !!touched.hscStream}\n name=\"hscStream\"\n as={CSelect}\n disabled={!!studentDetails.hscStream}\n >\n <option value=\"\">Select an option</option>\n <option value=\"Science\">Science</option>\n <option value=\"Commerce\">Commerce</option>\n <option value=\"Arts\">Arts</option>\n </Field>\n <CInvalidFeedback>\n {errors.hscStream && touched.hscStream ? (\n <div>{errors.hscStream}</div>\n ) : null}\n </CInvalidFeedback>\n </CCol>\n </CFormGroup>\n\n <CFormGroup row>\n <CCol xl=\"6\">\n <CLabel htmlFor=\"degreePercentage\">\n Enter Degree Percentage\n </CLabel>\n <Field\n name=\"degreePercentage\"\n as={CInput}\n invalid={\n !!touched.degreePercentage &&\n !!errors.degreePercentage\n }\n value={values.degreePercentage}\n placeholder=\"0-100\"\n disabled={!!studentDetails.degreePercentage}\n />\n <CInvalidFeedback>\n {errors.degreePercentage && touched.degreePercentage ? (\n <div>{errors.degreePercentage}</div>\n ) : null}\n </CInvalidFeedback>\n </CCol>\n <CCol xl=\"6\">\n <CLabel htmlFor=\"degreeT\">Degree T</CLabel>\n <Field\n invalid={!!errors.degreeT && !!touched.degreeT}\n name=\"degreeT\"\n as={CSelect}\n disabled={!!studentDetails.degreeT}\n >\n <option value=\"\">Select an option</option>\n <option value=\"Science & Technology\">\n Science & Technology\n </option>\n <option value=\"Commerce & Mgmt\">Commerce & Mgmt</option>\n <option value=\"Others\">Others</option>\n </Field>\n <CInvalidFeedback>\n {errors.degreeT && touched.degreeT ? (\n <div>{errors.degreeT}</div>\n ) : null}\n </CInvalidFeedback>\n </CCol>\n </CFormGroup>\n\n <CFormGroup row>\n <CCol xl=\"6\">\n <CLabel htmlFor=\"workex\">Work Experience</CLabel>\n <Field\n invalid={!!errors.workex && !!touched.workex}\n name=\"workex\"\n as={CSelect}\n disabled={!!studentDetails.workex}\n >\n <option value=\"\">Select an option</option>\n <option value=\"Yes\">Yes</option>\n <option value=\"No\">No</option>\n </Field>\n <CInvalidFeedback>\n {errors.workex && touched.workex ? (\n <div>{errors.workex}</div>\n ) : null}\n </CInvalidFeedback>\n </CCol>\n <CCol xl=\"6\">\n <CLabel htmlFor=\"etestP\">Enter ETest Percentage</CLabel>\n <Field\n name=\"etestP\"\n invalid={!!touched.etestP && !!errors.etestP}\n as={CInput}\n value={values.etestP}\n placeholder=\"0-100\"\n disabled={!!studentDetails.etestP}\n />\n <CInvalidFeedback>\n {errors.etestP && touched.etestP ? (\n <div>{errors.etestP}</div>\n ) : null}\n </CInvalidFeedback>\n </CCol>\n </CFormGroup>\n\n <CFormGroup row>\n <CCol xl=\"6\">\n <CLabel htmlFor=\"specialisation\">Specialisation</CLabel>\n <Field\n invalid={\n !!errors.specialisation && !!touched.specialisation\n }\n name=\"specialisation\"\n as={CSelect}\n disabled={!!studentDetails.specialisation}\n >\n <option value=\"\">Select an option</option>\n <option value=\"Mkt&HR\">Mkt&HR</option>\n <option value=\"Mkt&Fin\">Mkt&Fin</option>\n <option value=\"Others\">Others</option>\n </Field>\n <CInvalidFeedback>\n {errors.specialisation && touched.specialisation ? (\n <div>{errors.specialisation}</div>\n ) : null}\n </CInvalidFeedback>\n </CCol>\n <CCol xl=\"6\">\n <CLabel htmlFor=\"mbaP\">Enter MBA Percentage</CLabel>\n <Field\n name=\"mbaP\"\n invalid={!!touched.mbaP && !!errors.mbaP}\n as={CInput}\n value={values.mbaP}\n placeholder=\"0-100\"\n disabled={!!studentDetails.mbaP}\n />\n <CInvalidFeedback>\n {errors.mbaP && touched.mbaP ? (\n <div>{errors.mbaP}</div>\n ) : null}\n </CInvalidFeedback>\n </CCol>\n </CFormGroup>\n </CCardBody>\n {!studentDetails.name && (\n <CCardFooter>\n {!isLoading && (\n <CButton type=\"submit\" size=\"sm\" color=\"success\">\n <CIcon name=\"cil-scrubber\" /> Submit\n </CButton>\n )}\n {!isLoading && (\n <CButton\n type=\"reset\"\n size=\"sm\"\n color=\"danger\"\n className=\"ml-1\"\n >\n <CIcon name=\"cil-ban\" /> Reset\n </CButton>\n )}\n {isLoading && <CSpinner color=\"info\" />}\n </CCardFooter>\n )}\n </Form>\n </CCard>\n </div>\n )}\n </Formik>\n <br />\n <br />\n </div>\n );\n};\n\nexport default ViewStatus;\n"
},
{
"alpha_fraction": 0.37397539615631104,
"alphanum_fraction": 0.3816204369068146,
"avg_line_length": 36.09941482543945,
"blob_id": "68f2e2be260ce848feed41712fdae597ced0506d",
"content_id": "8f09feaef71e13ff80a99fc7205460afda924309",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 12688,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 342,
"path": "/client/src/views/admin/ViewStudentDetails.js",
"repo_name": "Ethan0507/placement-predictor",
"src_encoding": "UTF-8",
"text": "import React, { useState, useContext, useEffect } from \"react\";\nimport {\n CLabel,\n CCol,\n CSelect,\n CInput,\n CDataTable,\n CBadge,\n CButton,\n CCollapse,\n CCardBody,\n CFormGroup,\n} from \"@coreui/react\";\nimport { AuthContext } from \"src/context/auth-context\";\nimport { Formik, Form, Field } from \"formik\";\n\nconst ViewStudentDetails = () => {\n const auth = useContext(AuthContext);\n const [db_values, setDBValues] = useState([]);\n\n useEffect(() => {\n (async () => {\n try {\n const response = await fetch(\n \"http://localhost:5000/api/admin/studentdetails\",\n {\n method: \"GET\",\n headers: {\n \"Content-Type\": \"application/json\",\n Authorization: \"Bearer \" + auth.token,\n },\n }\n );\n\n let responseData;\n if (response.ok) {\n responseData = await response.json();\n }\n\n if (responseData) {\n setDBValues(responseData);\n }\n } catch (err) {}\n })();\n }, []);\n\n const [details, setDetails] = useState([]);\n // const [items, setItems] = useState(usersData)\n\n const toggleDetails = (index) => {\n const position = details.indexOf(index);\n let newDetails = details.slice();\n if (position !== -1) {\n newDetails.splice(position, 1);\n } else {\n newDetails = [...details, index];\n }\n setDetails(newDetails);\n };\n\n const fields = [\n // { key: \"_id\", _style: { width: \"20%\" } },\n\n { key: \"name\", _style: { width: \"40%\" } },\n { key: \"userId\", _style: { width: \"20%\" } },\n { key: \"placement_status\", _style: { width: \"20%\" } },\n {\n key: \"show_details\",\n label: \"\",\n _style: { width: \"1%\" },\n sorter: false,\n filter: false,\n },\n ];\n\n const getBadge = (placement_status) => {\n switch (placement_status) {\n case \"placed\":\n return \"success\";\n case \"unplaced\":\n return \"danger\";\n default:\n return \"primary\";\n }\n };\n\n return (\n <CDataTable\n items={db_values.studentdetails}\n fields={fields}\n columnFilter\n tableFilter\n footer\n itemsPerPageSelect\n itemsPerPage={5}\n hover\n sorter\n pagination\n scopedSlots={{\n status: (item) => (\n <td>\n <CBadge color={getBadge(item.placement_status)}>\n {item.placement_status}\n </CBadge>\n </td>\n ),\n show_details: (item, index) => {\n return (\n <td className=\"py-2\">\n <CButton\n color=\"primary\"\n variant=\"outline\"\n shape=\"square\"\n size=\"sm\"\n onClick={() => {\n toggleDetails(index);\n }}\n >\n {details.includes(index) ? \"Cancel\" : \"Show\"}\n </CButton>\n </td>\n );\n },\n details: (item, index) => {\n // console.log(item);\n return (\n <CCollapse show={details.includes(index)}>\n <CCardBody>\n <Formik\n initialValues={{\n name: item.name,\n gender: item.gender,\n placement_status: item.placement_status,\n yearOfGrad: item.yearOfGrad,\n xBoard: item.xBoard,\n xPercentage: item.xPercentage,\n xiiPercentage: item.xiiPercentage,\n xiiBoard: item.xiiBoard,\n hscStream: item.hscStream,\n etestP: item.etestP,\n degreeT: item.degreeT,\n degreePercentage: item.degreePercentage,\n specialisation: item.specialisation,\n mbaP: item.mbaP,\n workex: item.workex,\n }}\n onSubmit={async (values) => {\n try {\n const response = await fetch(\n \"http://localhost:5000/api/admin/studentdetails/update/\" +\n item._id,\n {\n method: \"POST\",\n headers: {\n \"Content-Type\": \"application/json\",\n Authorization: \"Bearer \" + auth.token,\n },\n body: JSON.stringify(values),\n }\n );\n\n let responseData;\n if (response.ok) {\n alert(\"Updated successfully!\");\n }\n // window.location.reload();\n } catch (err) {}\n }}\n >\n <Form>\n <CFormGroup row>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"name\">Name</CLabel>\n <Field name=\"name\" as={CInput}></Field>\n </CCol>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"gender\">Gender</CLabel>\n <Field name=\"gender\" as={CSelect}>\n {/* <option value=\"\">Select a role</option> */}\n <option value=\"Male\">Male</option>\n <option value=\"Female\">Female</option>\n </Field>\n </CCol>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"placement_status\">\n Placement Status\n </CLabel>\n <Field name=\"placement_status\" as={CSelect}>\n <option value=\"placed\">Placed</option>\n <option value=\"unplaced\">Unplaced</option>\n </Field>\n </CCol>\n </CFormGroup>\n <CFormGroup row>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"yearOfGrad\">Graduation Year</CLabel>\n <Field name=\"yearOfGrad\" as={CSelect}>\n {/* <option value=\"\">Select a year</option> */}\n <option value=\"2020\">2020</option>\n <option value=\"2021\">2021</option>\n <option value=\"2022\">2022</option>\n <option value=\"2022\">2023</option>\n <option value=\"2023\">2024</option>\n <option value=\"2024\">2025</option>\n <option value=\"2025\">2026</option>\n </Field>\n </CCol>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"xBoard\">Xth Board</CLabel>\n <Field name=\"xBoard\" as={CSelect}>\n <option value=\"\">Select an option</option>\n <option value=\"Central\">Central</option>\n <option value=\"Others\">Others</option>\n </Field>\n </CCol>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"xPercentage\">Xth Percentage</CLabel>\n <Field name=\"xPercentage\" as={CInput}></Field>\n </CCol>\n </CFormGroup>\n <CFormGroup row>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"xiiBoard\">XIIth Board</CLabel>\n <Field name=\"xiiBoard\" as={CSelect}>\n {/* <option value=\"\">Select an option</option> */}\n <option value=\"Central\">Central</option>\n <option value=\"Others\">Others</option>\n </Field>\n </CCol>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"xiiPercentage\">\n XIIth Percentage\n </CLabel>\n <Field name=\"xiiPercentage\" as={CInput}></Field>\n </CCol>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"hscStream\">XIIth Stream</CLabel>\n <Field name=\"hscStream\" as={CSelect}>\n {/* <option value=\"\">Select an option</option> */}\n <option value=\"Science\">Science</option>\n <option value=\"Commerce\">Commerce</option>\n <option value=\"Arts\">Arts</option>\n </Field>\n </CCol>\n </CFormGroup>\n <CFormGroup row>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"degreeT\">DegreeT</CLabel>\n <Field name=\"degreeT\" as={CSelect}>\n {/* <option value=\"\">Select an option</option> */}\n <option value=\"Science & Technology\">\n Science & Technology\n </option>\n <option value=\"Commerce & Mgmt\">\n Commerce & Mgmt\n </option>\n <option value=\"Others\">Others</option>\n </Field>\n </CCol>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"etestP\">EtestP</CLabel>\n <Field name=\"etestP\" as={CInput}></Field>\n </CCol>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"degreePercentage\">\n Degree Percentage\n </CLabel>\n <Field name=\"degreePercentage\" as={CInput}></Field>\n </CCol>\n </CFormGroup>\n <CFormGroup row>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"specialisation\">\n MBA specialisation\n </CLabel>\n <Field name=\"specialisation\" as={CSelect}>\n <option value=\"Mkt&HR\">Mkt&HR</option>\n <option value=\"Mkt&Fin\">Mkt&Fin</option>\n <option value=\"Others\">Others</option>\n </Field>\n </CCol>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"mbaP\">MBA Percentage</CLabel>\n <Field name=\"mbaP\" as={CInput}></Field>\n </CCol>\n <CCol xl=\"4\">\n <CLabel htmlFor=\"workex\">Work Experience</CLabel>\n <Field name=\"workex\" as={CSelect}>\n <option value=\"Yes\">Yes</option>\n <option value=\"No\">No</option>\n </Field>\n </CCol>\n </CFormGroup>\n <CButton type=\"submit\" size=\"sm\" color=\"info\">\n Update\n </CButton>\n <CButton\n size=\"sm\"\n color=\"danger\"\n className=\"ml-1\"\n onClick={async () => {\n if (\n window.confirm(\n \"Are you sure you want to delete the student details?\"\n )\n ) {\n try {\n const response = await fetch(\n \"http://localhost:5000/api/admin/studentdetails/delete/\" +\n item._id,\n {\n method: \"DELETE\",\n headers: {\n \"Content-Type\": \"application/json\",\n Authorization: \"Bearer \" + auth.token,\n },\n }\n );\n\n let responseData;\n if (response.ok) {\n alert(\"Delete successful!\");\n }\n window.location.reload();\n } catch (err) {}\n }\n }}\n >\n Delete\n </CButton>\n </Form>\n </Formik>\n </CCardBody>\n </CCollapse>\n );\n },\n }}\n />\n );\n};\n\nexport default ViewStudentDetails;\n"
},
{
"alpha_fraction": 0.3700818419456482,
"alphanum_fraction": 0.3738290071487427,
"avg_line_length": 30.107362747192383,
"blob_id": "8209b2a18793c4a3e877fdb49136b6b2ff9bd651",
"content_id": "045b909f1292237c26da448821089c6a1b479da3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 10141,
"license_type": "permissive",
"max_line_length": 123,
"num_lines": 326,
"path": "/client/src/views/admin/ViewUsers.js",
"repo_name": "Ethan0507/placement-predictor",
"src_encoding": "UTF-8",
"text": "import React, { useState, useContext, useEffect } from \"react\";\nimport {\n CLabel,\n CCol,\n CSelect,\n CInput,\n CDataTable,\n CBadge,\n CButton,\n CCollapse,\n CCardBody,\n CModal,\n CModalHeader,\n CModalBody,\n CModalFooter,\n CForm,\n CFormGroup,\n CTooltip,\n} from \"@coreui/react\";\n\nimport { CIcon } from \"@coreui/icons-react\";\n\nimport { AuthContext } from \"src/context/auth-context\";\nimport { Formik, Form, Field } from \"formik\";\nimport * as Yup from \"yup\";\n\nconst ViewUsers = () => {\n //Add modal\n const [modal, setModal] = useState(false);\n const toggle = () => {\n setModal(!modal);\n };\n\n const auth = useContext(AuthContext);\n const [db_values, setDBValues] = useState([]);\n\n useEffect(() => {\n (async () => {\n try {\n const response = await fetch(\"http://localhost:5000/api/admin/users\", {\n method: \"GET\",\n headers: {\n \"Content-Type\": \"application/json\",\n Authorization: \"Bearer \" + auth.token,\n },\n });\n\n let responseData;\n if (response.ok) {\n responseData = await response.json();\n }\n\n if (responseData) {\n setDBValues(responseData.users);\n }\n } catch (err) {}\n })();\n }, []);\n\n const [details, setDetails] = useState([]);\n // const [items, setItems] = useState(usersData)\n\n const toggleDetails = (index) => {\n const position = details.indexOf(index);\n let newDetails = details.slice();\n if (position !== -1) {\n newDetails.splice(position, 1);\n } else {\n newDetails = [...details, index];\n }\n setDetails(newDetails);\n };\n\n const fields = [\n { key: \"username\", _style: { width: \"40%\" } },\n { key: \"password\", _style: { width: \"20%\" } },\n { key: \"role\", _style: { width: \"20%\" } },\n {\n key: \"show_details\",\n label: \"\",\n _style: { width: \"1%\" },\n sorter: false,\n filter: false,\n },\n ];\n\n const getBadge = (placement_status) => {\n switch (placement_status) {\n case \"placed\":\n return \"success\";\n case \"unplaced\":\n return \"danger\";\n default:\n return \"primary\";\n }\n };\n\n return (\n <>\n <CButton\n className=\"float-right ml-4\"\n shape=\"pill\"\n color=\"info\"\n onClick={toggle}\n >\n Add User\n </CButton>\n <CModal show={modal} onClose={toggle}>\n <Formik\n initialValues={{ username: \"\", password: \"\", role: \"\" }}\n onSubmit={async (values) => {\n try {\n const response = await fetch(\n \"http://localhost:5000/api/admin/users/new\",\n {\n method: \"POST\",\n headers: {\n \"Content-Type\": \"application/json\",\n Authorization: \"Bearer \" + auth.token,\n },\n body: JSON.stringify(values),\n }\n );\n\n let responseData;\n if (response.ok) {\n alert(\"User Added Successfully!\");\n }\n window.location.reload();\n } catch (err) {}\n }}\n >\n <Form>\n <CModalHeader closeButton>Enter details of new user</CModalHeader>\n\n <CModalBody>\n <CFormGroup>\n <Field\n as={CInput}\n type=\"string\"\n placeholder=\"Username\"\n name=\"username\"\n />\n <br />\n <Field\n as={CInput}\n type=\"password\"\n placeholder=\"Password\"\n name=\"password\"\n />\n <br />\n <Field\n as={CSelect}\n type=\"string\"\n placeholder=\"Role\"\n name=\"role\"\n >\n <option value=\"\">Select a role</option>\n <option value=\"student\">Student</option>\n <option value=\"tpo\">TPO</option>\n <option value=\"admin\">Admin</option>\n </Field>\n </CFormGroup>\n </CModalBody>\n <CModalFooter>\n <CButton color=\"primary\" type=\"submit\">\n Add\n </CButton>{\" \"}\n <CButton color=\"secondary\" onClick={toggle}>\n Cancel\n </CButton>\n </CModalFooter>\n </Form>\n </Formik>\n </CModal>\n <CDataTable\n items={db_values}\n fields={fields}\n columnFilter\n tableFilter\n footer\n itemsPerPageSelect\n itemsPerPage={5}\n hover\n sorter\n pagination\n scopedSlots={{\n status: (item) => (\n <td>\n <CBadge color={getBadge(item.status)}>{item.status}</CBadge>\n </td>\n ),\n show_details: (item, index) => {\n return (\n <td className=\"py-2\">\n <CButton\n color=\"primary\"\n variant=\"outline\"\n shape=\"square\"\n size=\"sm\"\n onClick={() => {\n toggleDetails(index);\n }}\n >\n {details.includes(index) ? \"Cancel\" : \"Show\"}\n </CButton>\n </td>\n );\n },\n details: (item, index) => {\n return (\n <CCollapse show={details.includes(index)}>\n <CCardBody>\n <Formik\n enableReinitialize={true}\n initialValues={{\n username: item.username,\n password: \"\",\n role: item.role,\n }}\n onSubmit={async (values) => {\n try {\n const response = await fetch(\n \"http://localhost:5000/api/admin/users/update/\" +\n item._id,\n {\n method: \"POST\",\n headers: {\n \"Content-Type\": \"application/json\",\n Authorization: \"Bearer \" + auth.token,\n },\n body: JSON.stringify(values),\n }\n );\n\n let responseData;\n if (response.ok) {\n alert(\"User Updated Successfully!\");\n }\n window.location.reload();\n } catch (err) {}\n }}\n >\n <Form>\n <CFormGroup row>\n <CCol xs=\"12\" xl=\"6\">\n <CLabel htmlFor=\"username\">Username</CLabel>\n <Field name=\"username\" as={CInput} key={item._id +'_username'}></Field>\n </CCol>\n <CCol xs=\"12\" xl=\"6\">\n <CLabel htmlFor=\"password\">\n Password{\" \"}\n <CTooltip content=\"Only enter text, if you want to update password. Else, please leave blank.\">\n <CIcon name={\"cilLightbulb\"} />\n </CTooltip>\n </CLabel>\n <Field name=\"password\" as={CInput} key={item._id +'_password'}></Field>\n </CCol>\n </CFormGroup>\n <CFormGroup row>\n <CCol xs=\"12\" xl=\"6\">\n <CLabel htmlFor=\"role\">Role</CLabel>\n <Field\n as={CSelect}\n type=\"string\"\n placeholder=\"Role\"\n name=\"role\"\n key={item._id+'_role'}\n >\n <option value=\"\">Select a role</option>\n <option value=\"student\">Student</option>\n <option value=\"tpo\">TPO</option>\n <option value=\"admin\">Admin</option>\n </Field>\n </CCol>\n </CFormGroup>\n\n <CButton type=\"submit\" size=\"sm\" color=\"info\">\n Update\n </CButton>\n <CButton\n size=\"sm\"\n color=\"danger\"\n className=\"ml-1\"\n onClick={async () => {\n if (\n window.confirm(\n \"Are you sure you want to delete this user?\"\n )\n ) {\n try {\n const response = await fetch(\n \"http://localhost:5000/api/admin/users/delete/\" +\n item._id,\n {\n method: \"DELETE\",\n headers: {\n \"Content-Type\": \"application/json\",\n Authorization: \"Bearer \" + auth.token,\n },\n }\n );\n\n if (response.ok) {\n alert(\"Delete successful!\");\n }\n window.location.reload();\n } catch (err) {}\n }\n }}\n >\n Delete\n </CButton>\n </Form>\n </Formik>\n </CCardBody>\n </CCollapse>\n );\n },\n }}\n />\n </>\n );\n};\n\nexport default ViewUsers;\n"
},
{
"alpha_fraction": 0.6711007356643677,
"alphanum_fraction": 0.688393771648407,
"avg_line_length": 32.797752380371094,
"blob_id": "f592b879e4e6f8bfeebd0b79499ae6aeb0a5967e",
"content_id": "eeeb0689c69abdca7cfeae559d46703d5482a3b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3007,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 89,
"path": "/ml-python/predict.py",
"repo_name": "Ethan0507/placement-predictor",
"src_encoding": "UTF-8",
"text": "import warnings\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport sklearn \nfrom sklearn import model_selection\nfrom sklearn.linear_model import LogisticRegression\nwarnings.filterwarnings('ignore')\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import metrics\nimport pickle\n\n\n#Loading the single csv file to a variable named 'placement'\nplacement=cereal_df =pd.read_csv(r'Placement_Data_Full_Class.csv')\nplacement_copy=placement.copy()\n\n# Cleaning the data\nplacement_copy['salary'].fillna(value=0, inplace=True)\nplacement_copy.drop(['sl_no','ssc_b','hsc_b'], axis = 1,inplace=True) \n\n#clearing outliers\nQ1 = placement_copy['hsc_p'].quantile(0.25)\nQ3 = placement_copy['hsc_p'].quantile(0.75)\nIQR = Q3 - Q1 #IQR is interquartile range. \n\nfilter = (placement_copy['hsc_p'] >= Q1 - 1.5 * IQR) & (placement_copy['hsc_p'] <= Q3 + 1.5 *IQR)\nplacement_filtered=placement_copy.loc[filter]\n\n\n# Make copy to avoid changing original data \nobject_cols=['gender','workex','specialisation','status']\n\n# Apply label encoder to each column with categorical data\nlabel_encoder = LabelEncoder()\nfor col in object_cols:\n placement_filtered[col] = label_encoder.fit_transform(placement_filtered[col])\n\ndummy_hsc_s=pd.get_dummies(placement_filtered['hsc_s'], prefix='dummy')\ndummy_degree_t=pd.get_dummies(placement_filtered['degree_t'], prefix='dummy')\nplacement_coded = pd.concat([placement_filtered,dummy_hsc_s,dummy_degree_t],axis=1)\nplacement_coded.drop(['hsc_s','degree_t','salary'],axis=1, inplace=True)\n\nfeature_cols=['gender','ssc_p','hsc_p','hsc_p','workex','etest_p','specialisation','mba_p',\n 'dummy_Arts','dummy_Commerce','dummy_Science','dummy_Comm&Mgmt','dummy_Others','dummy_Sci&Tech']\n\nX=placement_coded.drop(['status'],axis=1)\ny=placement_coded.status\n\n# Train-Test split\nX_train, X_test, y_train, y_test = train_test_split(X, y,train_size=0.8,random_state=1)\n\n\n# Log regression model creation\nlogreg = LogisticRegression(solver='liblinear', random_state=0).fit(X_train, y_train)\n# gender, ssc, hsc, degree, workex, e-test, specialisation, mba\ndef predict(details):\n\n if details[0] ==\"Male\":\n details[0] = 1\n else:\n details[0] = 0\n # 1 ip_l.append(float(input(\"Enter ssc_p\")))\n # 2 p_l.append(float(input(\"Enter hsc_p\")))\n # 3 ip_l.append(float(input(\"Enter degree %\")))\n if details[4] ==\"yes\" or \"Yes\":\n details[4] = 1\n else:\n details[4] = 0\n # 5 ip_l.append(float(input(\"Enter e test score\")))\n if details[6] != \"\":\n details[6] = 1\n else:\n details[6] = 0\n \n details = details + [0, 0, 0, 0, 0, 0]\n ip=[details]\n\n\n y_pred=logreg.predict(ip)\n if (y_pred[0]==1):\n print(\"Prediction for this request was: 'placed'\")\n return \"placed\"\n else:\n print(\"Prediction for this request was: 'unplaced'\")\n return \"unplaced\""
},
{
"alpha_fraction": 0.4494030773639679,
"alphanum_fraction": 0.4619101881980896,
"avg_line_length": 26.700786590576172,
"blob_id": "3a80b5d5acdc9d6dcf33e84fb4bb1ea9188a081e",
"content_id": "3e4b4134aab49e6bb43df100abdef225647d857d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3518,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 127,
"path": "/client/src/views/tpo/ViewDashboard.js",
"repo_name": "Ethan0507/placement-predictor",
"src_encoding": "UTF-8",
"text": "import React, { lazy, useContext, useEffect, useState } from \"react\";\nimport { CRow, CCol, CWidgetBrand, CCard, CCardHeader } from \"@coreui/react\";\nimport CIcon from \"@coreui/icons-react\";\nimport { CChartDoughnut } from \"@coreui/react-chartjs\";\nimport { CWidgetProgressIcon } from \"@coreui/react\";\nimport { AuthContext } from \"src/context/auth-context\";\nconst ViewDashboard = () => {\n const auth = useContext(AuthContext);\n\n const [db_values, setDBValues] = useState({\n total_placed: 0,\n total_unplaced: 0,\n m_placed: 0,\n m_unplaced: 0,\n hr_placed: 0,\n hr_unplaced: 0,\n });\n\n useEffect(() => {\n (async () => {\n try {\n const response = await fetch(\n \"http://localhost:5000/api/tpo/view-dashboard\",\n {\n method: \"GET\",\n headers: {\n \"Content-Type\": \"application/json\",\n Authorization: \"Bearer \" + auth.token,\n },\n }\n );\n\n let responseData;\n if (response.ok) {\n responseData = await response.json();\n setDBValues(responseData);\n }\n } catch (err) {}\n })();\n }, []);\n\n return (\n <>\n <CRow>\n <CCol sm=\"6\" lg=\"6\">\n <CWidgetProgressIcon\n header={db_values.total_placed}\n text=\"Total Placed Students\"\n color=\"gradient-success\"\n value=\"100\"\n inverse\n >\n <CIcon name=\"cil-userFollow\" height=\"36\" />\n </CWidgetProgressIcon>\n </CCol>\n\n <CCol sm=\"6\" lg=\"6\">\n <CWidgetProgressIcon\n header={db_values.total_unplaced}\n text=\"Total Unplaced Students\"\n color=\"gradient-warning\"\n value=\"100\"\n inverse\n >\n <CIcon name=\"cil-basket\" height=\"36\" />\n </CWidgetProgressIcon>\n </CCol>\n </CRow>\n <CRow>\n <CCol>\n <CCard>\n <CCardHeader>Marketing and Finance</CCardHeader>\n <CChartDoughnut\n datasets={[\n {\n backgroundColor: [\"#41B883\", \"#E46651\"],\n data: [db_values.m_placed, db_values.m_unplaced],\n },\n ]}\n labels={[\"Placed\", \"Unplaced\"]}\n options={{\n tooltips: {\n enabled: true,\n },\n }}\n />\n <br />\n <CWidgetBrand\n rightHeader={db_values.m_placed}\n rightFooter=\"PLACED\"\n leftHeader={db_values.m_unplaced}\n leftFooter=\"UNPLACED\"\n ></CWidgetBrand>\n </CCard>\n </CCol>\n <CCol>\n <CCard>\n <CCardHeader>Marketing and HR</CCardHeader>\n <CChartDoughnut\n datasets={[\n {\n backgroundColor: [\"#41B883\", \"#E46651\"],\n data: [db_values.hr_placed, db_values.hr_unplaced],\n },\n ]}\n labels={[\"Placed\", \"Unplaced\"]}\n options={{\n tooltips: {\n enabled: true,\n },\n }}\n />\n <br />\n <CWidgetBrand\n rightHeader={db_values.hr_placed}\n rightFooter=\"PLACED\"\n leftHeader={db_values.hr_unplaced}\n leftFooter=\"UNPLACED\"\n ></CWidgetBrand>\n </CCard>\n </CCol>\n </CRow>\n </>\n );\n};\n\nexport default ViewDashboard;\n"
}
] | 14 |
tejasghutukade/dashcam | https://github.com/tejasghutukade/dashcam | b3a27ef41a5067baa36fb277a1781f8c66023396 | 1652d38b917d33f5726c48d5d994d8e6474e3538 | 6ec3f6a6aa330915ac6cdb5e2468ed5b80e25a23 | refs/heads/master | 2023-04-04T05:17:12.344484 | 2021-04-18T14:47:11 | 2021-04-18T14:47:11 | 312,849,400 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6343114972114563,
"alphanum_fraction": 0.6576372981071472,
"avg_line_length": 27.913043975830078,
"blob_id": "c3864f81b739b45f6887764f83dbe6c5aa5e409f",
"content_id": "0e172780cfd132b0f539c1adc0edbca4f7c05475",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1329,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 46,
"path": "/streamCamera2.py",
"repo_name": "tejasghutukade/dashcam",
"src_encoding": "UTF-8",
"text": "import io\nimport picamera\nimport socket\n\n\n# An output (as far as picamera is concerned), is just a filename or an object\n# which implements a write() method (and optionally the flush() and close()\n# methods)\nclass MyOutput(object):\n def __init__(self, filename, sock):\n self.output_file = io.open(filename, 'wb')\n #self.output_sock = sock.makefile('wb')\n\n def write(self, buf):\n self.output_file.write(buf)\n #print(buf)\n #self.output_sock.write(buf)\n\n def flush(self):\n self.output_file.flush()\n print(\"FLsh\")\n #self.output_sock.flush()\n\n def close(self):\n self.output_file.close()\n print(\"close\")\n #self.output_sock.close()\n\n\n# Connect a socket to a remote server on port 8000\nsock = socket.socket()\n#sock.connect(('10.0.0.13', 5000))\n\nwith picamera.PiCamera() as camera:\n camera.resolution = (640, 480)\n camera.framerate = 24\n\n # Construct an instance of our custom output splitter with a filename\n # and a connected socket\n my_output = MyOutput('output1.h264', sock)\n\n # Record video to the custom output (we need to specify the format as\n # the custom output doesn't pretend to be a file with a filename)\n camera.start_recording(my_output, format='h264')\n camera.wait_recording(60)\n camera.stop_recording()"
},
{
"alpha_fraction": 0.5810055732727051,
"alphanum_fraction": 0.6052141785621643,
"avg_line_length": 25.850000381469727,
"blob_id": "67fa5b471ed5fc4f352a82bd60fa41a570dc8b5e",
"content_id": "2d9a9e4ceaaff222fc4ce88c40c4efea6201be8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1074,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 40,
"path": "/testupload.py",
"repo_name": "tejasghutukade/dashcam",
"src_encoding": "UTF-8",
"text": "import pysftp\nimport os\n\ncommand = \"sudo nslookup aatuploadserver | tail -2 | head -1 | awk '{print $2}'\"\nresult = os.popen(command)\n\nipaddress = list(result)\nif ipaddress:\n print(ipaddress[0].strip())\n\nmyHostname = \"aatuploadserver.local\"\nmyUsername = \"pi\"\nmyPassword = \"aatracking\"\n\nmyfiles = os.listdir(\"./\")\n# print(list(myfiles))\n\n# if os.path.exists('./video_1601221500s.h264'):\n# print(\"file exist\")\n# else:\n# print(\"file doesn not exist\")\n\n# for y in range(10):\n# for i in range(10):\n# if(i == 5):\n# print(\"5 found breaking now\")\n# break\n# print(i)\ncnopts = pysftp.CnOpts()\ncnopts.hostkeys = None\nwith pysftp.Connection(host=myHostname, username=myUsername, password=myPassword, cnopts=cnopts) as sftp:\n\n for _file in myfiles:\n if(\".h264\" in _file):\n print(_file)\n remoteFilepath = '../../media/pi/aatstorage/' + _file\n localFilepath = _file\n sftp.put(localFilepath, remoteFilepath)\n print(\"uploaded file -\" + _file)\n os.remove(_file)\n"
},
{
"alpha_fraction": 0.5641025900840759,
"alphanum_fraction": 0.5641025900840759,
"avg_line_length": 26.399999618530273,
"blob_id": "ad0eca877c5b0ba890941f5484d27f3ef16eac82",
"content_id": "00522f77e280ec513e9693627875d51c3996581f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 273,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 10,
"path": "/location.py",
"repo_name": "tejasghutukade/dashcam",
"src_encoding": "UTF-8",
"text": "import gpsd-json\n\n\n\ndef getPosition(gps):\n nx=gpsd.next()\n if nx['class'] == 'TPV':\n latitude = getattr(nx, 'lat', \"Unknown\")\n longitude = getattr(nx, 'lan', \"Unknown\")\n print(\"Your Position : lon = \" + str(longitude) + \", lat =\" + str(latitude))"
},
{
"alpha_fraction": 0.556506872177124,
"alphanum_fraction": 0.573630154132843,
"avg_line_length": 30.58108139038086,
"blob_id": "66f6695ba47f45699e986ae7e309bf168f699856",
"content_id": "7845cdd5d585ecbbbfb05a488bcb6df42f5d93a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2336,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 74,
"path": "/video.py",
"repo_name": "tejasghutukade/dashcam",
"src_encoding": "UTF-8",
"text": "from picamera import PiCamera\nimport os\nimport time\nimport json\n\n\ndataLogFile = 'dataLog.json'\nconfigFile = '/home/pi/cameraProject/cameraConfig.json'\nlengthOfVideo = 60\nframeRate = 30\n\nclass RecordVideo:\n def __init__(self, *args, **kwargs):\n self.dataLogFile = kwargs['dataLogFile']\n self.configFile = kwargs['configFile']\n self.camera = PiCamera()\n self.lengthOfVideo = 180\n self.camera.framerate = 30\n self.camera.resolution = (800,600)\n self.camera.annotate_text = kwargs['annotate_text']\n if os.path.exists(dataLogFile):\n pass\n else:\n data = { \n 'videoFiles': []\n }\n with open(self.dataLogFile, 'w') as outfile: \n json.dump(data, outfile)\n\n with open(self.configFile) as json_file:\n config = json.load(json_file)\n self.camera.resolution = (config['resolution']['x'],config['resolution']['y'])\n self.lengthOfVideo = config['interval'] * 60\n self.camera.frameRate = config['framerate']\n\n\n def getStorageleft(self):\n path = '/'\n st = os.statvfs(path)\n # free blocks available * fragment size\n bytes_avail = (st.f_bavail * st.f_frsize)\n gigabytes = bytes_avail / 1024 / 1024 / 1024\n \n return gigabytes\n\n def getFilename(self):\n storageLeft = self.getStorageleft()\n timestamp = int(time.time()) \n fname = 'video_'+str(timestamp)+'.h264'\n \n a_file = open(self.dataLogFile, \"r\") # read dataLog File\n json_object = json.load(a_file)\n a_file.close()\n videoFiles = json_object['videoFiles']\n if storageLeft < 1:\n if os.path.exists(videoFiles[0]):\n os.remove(videoFiles[0])\n \n del videoFiles[0]\n #delete file\n videoFiles.append(fname)\n \n json_object['videoFiles'] = videoFiles\n a_file = open(dataLogFile, \"w\")\n json.dump(json_object, a_file,indent = 4)\n a_file.close() \n filename = '/home/pi/cameraProject/'+fname\n \n return filename\n\n def startVideo(self,annotate_text):\n self.camera.start_preview(alpha=200)\n filename = self.getFilename()\n self.camera.start_recording(filename)"
},
{
"alpha_fraction": 0.5521281361579895,
"alphanum_fraction": 0.5660685300827026,
"avg_line_length": 29.369369506835938,
"blob_id": "24474ef2a4369f3b8d3f7878b8a6ab0fabcd2f08",
"content_id": "5ee9063b35ee1a3d71960d76244adc70f3396755",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6743,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 222,
"path": "/camera.py",
"repo_name": "tejasghutukade/dashcam",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\nfrom picamera import PiCamera\nimport time\nimport datetime\nimport json\nimport os\nimport serial\nfrom gps import *\nimport subprocess\nimport pysftp\nfrom wifiConnect import Finder\n\nsubprocess.call('sudo systemctl stop gpsd.socket' , shell=True)\nsubprocess.call('sudo gpsd /dev/serial0 -F /var/run/gpsd.sock',shell=True)\n\ndataLogFile = 'dataLog.json'\n\nlengthOfVideo = 60\nframeRate = 30\n\nmyHostname = \"10.130.247.140\"\nmyUsername = \"pi\"\nmyPassword = \"aatracking\"\n\nmediaStorageLocation = '../../media/pi/aatstorage/'\n\n\ndef uploadCallback(a,b):\n print(\"\\r\"+str(a/1000000) + \"Mb uploaded of \" + str(b/1000000)+\"MB\",end='', flush=True)\n\ndef uploadFiles():\n print(\"u have 5 seconds to turn on the ignition\")\n time.sleep(5)\n server_name = \"OakOne\"\n password = \"ganesha2301\"\n interface_name = \"wlan0\" # i. e wlp2s0 \n F = Finder(server_name=server_name,password=password,interface=interface_name)\n counter = 0\n response = F.run()\n while (response == False):\n counter += 1\n if(counter < 60):\n time.sleep(2)\n print('waiting for a second to try again')\n response = F.run()\n else:\n break\n print(\"**************************** did come here\")\n if (response==True) : \n print(\"Starting Upload\")\n a_file = open(dataLogFile, \"r\") # read dataLog File\n json_object = json.load(a_file)\n a_file.close()\n videoFiles = json_object['videoFiles']\n _videoFiles = videoFiles\n cnopts = pysftp.CnOpts()\n cnopts.hostkeys = None\n print(\"**************************** hostkeys none\")\n with pysftp.Connection(host=myHostname,username=myUsername,password=myPassword,cnopts=cnopts) as sftp:\n cnt = 0\n for _file in videoFiles:\n if os.path.exists(_file):\n print(\"starting upload -\" + _file)\n remoteFilepath = mediaStorageLocation + _file\n localFilepath = _file\n #del _videoFiles[cnt]\n sftp.put(localFilepath,remoteFilepath,uploadCallback)\n os.remove(_file)\n print(\"Uploaded File \" + _file)\n\n myfiles= os.listdir(\"./\")\n print(myfiles)\n for __file in myfiles: \n if(\".h264\" in __file):\n print(__file)\n remoteFilepath = mediaStorageLocation + __file\n localFilepath = __file\n sftp.put(localFilepath,remoteFilepath)\n print(\"uploaded file -\" + _file) \n\n sftp.close()\n json_object['videoFiles'] = _videoFiles\n a_file = open(dataLogFile, \"w\")\n json.dump(json_object, a_file,indent = 4)\n a_file.close() \n return True\n else :\n return False\n\n\ndef getPositionData(gps):\n nx = gpsd.next() \n position = \"\"\n # For a list of all supported classes and fields refer to:\n # https://gpsd.gitlab.io/gpsd/gpsd_json.html\n if nx['class'] == 'TPV':\n latitude = getattr(nx,'lat', \"Unknown\")\n longitude = getattr(nx,'lon', \"Unknown\")\n speed = getattr(nx,'speed',\"Unknown\")\n time = getattr(nx,'time',\"Unknown\")\n alt = getattr(nx,'alt',\"Unknown\")\n position = \"Your position: lon = \" + str(longitude) + \", lat = \" + str(latitude)+\", speed =\"+ str(speed) + \", time = \" + str(time) + \", alt = \" + str(alt)\n \n return position\n\ndef getStorageleft():\n path = '/'\n st = os.statvfs(path)\n # free blocks available * fragment size\n bytes_avail = (st.f_bavail * st.f_frsize)\n gigabytes = bytes_avail / 1024 / 1024 / 1024\n \n return gigabytes\n\n\ndef getFilename():\n storageLeft = getStorageleft()\n timestamp = int(time.time()) \n fname = 'video_'+str(timestamp)+'.h264'\n \n a_file = open(dataLogFile, \"r\") # read dataLog File\n json_object = json.load(a_file)\n a_file.close()\n videoFiles = json_object['videoFiles']\n if storageLeft < 1:\n if os.path.exists(videoFiles[0]):\n os.remove(videoFiles[0])\n \n del videoFiles[0]\n #delete file\n videoFiles.append(fname)\n \n json_object['videoFiles'] = videoFiles\n a_file = open(dataLogFile, \"w\")\n json.dump(json_object, a_file,indent = 4)\n a_file.close() \n filename = '/home/pi/cameraProject/'+fname\n \n return filename\n\nif os.path.exists(dataLogFile):\n pass\nelse:\n\n data = { \n 'videoFiles': []\n }\n with open(dataLogFile, 'w') as outfile: \n json.dump(data, outfile)\n\n\ngpsd = gps(mode=WATCH_ENABLE|WATCH_NEWSTYLE)\n\nstorageLeft = getStorageleft()\n\nshouldStart = True\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setup(17, GPIO.IN)\nGPIO.setup(18,GPIO.OUT,initial = GPIO.HIGH)\nGPIO.setup(24,GPIO.OUT,initial = GPIO.HIGH)\nprint(\"GPIO 18 set to high\")\n\ntry:\n ignition = GPIO.input(17) \n print(\"Ignition - \" + str(ignition))\n\n if (shouldStart):\n camera = PiCamera()\n path = '/'\n st = os.statvfs(path)\n\n with open('/home/pi/cameraProject/cameraConfig.json') as json_file:\n config = json.load(json_file)\n camera.resolution = (config['resolution']['x'],config['resolution']['y'])\n lengthOfVideo = config['interval'] * 60\n frameRate = config['framerate']\n\n # camera.resolution = (800,600)\n now = datetime.datetime.now()\n camera.annotate_text = now.strftime('%Y-%m-%dT%H:%M:%S')\n \n camera.framerate = frameRate\n count = 0\n while ignition: #While Ignition Is on\n \n ignition = GPIO.input(17) # Check for ignition \n\n timestamp = int(time.time()) \n filename = getFilename()\n print('Starting - ' + filename)\n camera.start_preview(alpha=200)\n camera.start_recording(filename)\n\n for i in range(lengthOfVideo):\n position = getPositionData(gpsd)\n now = datetime.datetime.now()\n camera.annotate_text = now.strftime('%Y-%m-%dT%H:%M:%S') + \" \" + position\n time.sleep(1)\n ignition = GPIO.input(17) # Check for ignition \n if(ignition==False):\n break\n \n camera.stop_preview()\n camera.stop_recording()\n print('Finished - ' + filename)\n st = os.statvfs(path)\n\n #Handle Upload\n isUpolad = uploadFiles() \n GPIO.output(18,GPIO.LOW)\n\nexcept KeyboardInterrupt: # If CTRL+C is pressed, exit cleanly:\n print(\"Keyboard interrupt\")\n GPIO.cleanup() \n\nexcept Exception as e:\n print(e) \n\nfinally:\n print(\"clean up\") \n GPIO.cleanup()\n\n"
},
{
"alpha_fraction": 0.4823538064956665,
"alphanum_fraction": 0.4920741617679596,
"avg_line_length": 35.741756439208984,
"blob_id": "84bf0be45920e77f1f9df57b30cebb1cc3b4e517",
"content_id": "0f264691c4be9fc4498904130648eb77e02c54d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13374,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 364,
"path": "/aatDashCam.py",
"repo_name": "tejasghutukade/dashcam",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\nfrom picamera import PiCamera\nimport time\nimport datetime\nimport json\nimport os\nimport serial\nfrom gps import *\nimport subprocess\nimport pysftp\nfrom wifiConnect import Finder\nfrom pathlib import Path\n\ndataLogFile = 'dataLog.json'\n\nlengthOfVideo = 60\nframeRate = 30\n\nmyHostname = \"aatuploadserver.local\"\nmyUsername = \"pi\"\nmyPassword = \"aatracking\"\n\nmediaStorageLocation = '../../media/pi/aatstorage/'\n\n\nclass AatDashCam:\n def __init__(self):\n print(\"this is init\")\n self.ROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n print(\"Root Directory\" + self.ROOT_DIR)\n #self.home = str(Path.home())+\"/cameraProject/\"\n self.home = self.ROOT_DIR + \"/\"\n # Inititalize GPS\n gpsd = gps(mode=WATCH_ENABLE | WATCH_NEWSTYLE)\n self.gpsd = gpsd\n \n # Fetch Intial Storage\n self.storageLeft = self.getStorageleft()\n\n # Initialize GPIO pins\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(17, GPIO.IN)\n # HiGH for keeping the power ON\n GPIO.setup(18, GPIO.OUT, initial=GPIO.HIGH)\n # HIGH for LED to imdicate processing\n GPIO.setup(24, GPIO.OUT, initial=GPIO.HIGH)\n self.ignitionStatus = GPIO.input(17)\n self.ledON = True\n # Initialize camera\n self.camera = PiCamera()\n self.initDataLogFile()\n self.lengthOfVideo = 60 # in seconds\n print('Init Successfull')\n self.processStarted = True\n\n def intiCameraConfiguration(self):\n print(self.home + 'cameraConfig.json')\n if os.path.exists(self.home + 'cameraConfig.json'):\n with open(self.home + 'cameraConfig.json') as json_file:\n print(json_file)\n config = json.load(json_file)\n\n self.camera.resolution = (\n config['resolution']['x'], config['resolution']['y'])\n self.lengthOfVideo = config['interval'] * 60\n frameRate = config['framerate']\n self.camera.framerate = frameRate\n\n def initDataLogFile(self):\n if os.path.exists(dataLogFile):\n pass\n else:\n data = {\n 'videoFiles': []\n }\n with open(dataLogFile, 'w') as outfile:\n json.dump(data, outfile)\n\n def getStorageleft(self):\n path = '/'\n st = os.statvfs(path)\n # free blocks available * fragment size\n bytes_avail = (st.f_bavail * st.f_frsize)\n gigabytes = bytes_avail / 1024 / 1024 / 1024\n\n return gigabytes\n\n def getFilename(self):\n self.storageLeft = self.getStorageleft()\n timestamp = int(time.time())\n fname = 'video_'+str(timestamp)+'.h264'\n\n a_file = open(dataLogFile, \"r\") # read dataLog File\n json_object = json.load(a_file)\n a_file.close()\n videoFiles = json_object['videoFiles']\n if self.storageLeft < 1:\n if os.path.exists(videoFiles[0]):\n os.remove(videoFiles[0])\n\n del videoFiles[0]\n # delete file\n videoFiles.append(fname)\n\n json_object['videoFiles'] = videoFiles\n a_file = open(dataLogFile, \"w\")\n json.dump(json_object, a_file, indent=4)\n a_file.close()\n filename = self.home + fname\n print(\"filename \" + filename)\n return filename\n\n def getPositionData(self):\n try:\n nx = self.gpsd.next()\n position = \"\"\n # For a list of all supported classes and fields refer to:\n # https://gpsd.gitlab.io/gpsd/gpsd_json.html\n if nx['class'] == 'TPV':\n latitude = getattr(nx, 'lat', \"Unknown\")\n longitude = getattr(nx, 'lon', \"Unknown\")\n speed = getattr(nx, 'speed', \"Unknown\")\n time = getattr(nx, 'time', \"Unknown\")\n alt = getattr(nx, 'alt', \"Unknown\")\n position = \"Your position: lon = \" + str(longitude) + \", lat = \" + str(\n latitude)+\", speed =\" + str(speed) + \", time = \" + str(time) + \", alt = \" + str(alt)\n print(position)\n return position\n except BaseException as e:\n print(str(e))\n subprocess.call('sudo systemctl stop gpsd.socket', shell=True)\n subprocess.call(\n 'sudo gpsd /dev/serial0 -F /var/run/gpsd.sock', shell=True)\n gpsd = gps(mode=WATCH_ENABLE | WATCH_NEWSTYLE)\n self.gpsd = gpsd\n print(\"re initilizing socket\")\n\n return \"\"\n\n def uploadCallback(self, a, b):\n print(\"\\r\"+str(b/1000000)+\" MB \" + \" - \" +\n str(a/1000000)+\" MB\", end='', flush=True)\n if self.ledON:\n GPIO.output(24, GPIO.LOW)\n self.ledON = False\n else:\n GPIO.output(24, GPIO.HIGH)\n self.ledON = True\n\n def startUploadingDebug(self):\n print(\"u have 5 seconds to turn on the ignition\")\n time.sleep(5)\n\n interface_name = \"wlan0\" # i. e wlp2s0\n server_name = \"WHE-BELL\"\n password = \"Martin123\"\n F = Finder(server_name=server_name, password=password,\n interface=interface_name)\n response = F.run()\n counter = 0\n while (response == False):\n counter += 1\n self.ignitionStatus = GPIO.input(17)\n if self.ignitionStatus:\n break\n\n if(counter < 30):\n time.sleep(2)\n print('waiting for a second to try again')\n response = F.run()\n else:\n break\n\n if (response == True):\n time.sleep(10)\n\n cnopts = pysftp.CnOpts()\n cnopts.hostkeys = None\n print(\"**************************** hostkeys none\")\n with pysftp.Connection(host=myHostname, username=myUsername, password=myPassword, cnopts=cnopts) as sftp:\n print(\"=========================> pysftp connection successfull\")\n myfiles = os.listdir(self.home)\n for __file in myfiles: \n if(\".h264\" in __file):\n print(__file) \n remoteFilepath = mediaStorageLocation + __file\n localFilepath = self.home + __file\n sftp.put(localFilepath, remoteFilepath,\n self.uploadCallback)\n\n #self.ignitionStatus = GPIO.input(17)\n #if self.ignitionStatus:\n # break\n\n os.remove(self.home+__file)\n print(\"\\nuploaded file -\" + __file)\n \n print(\"file upload successfull\")\n sftp.close()\n self.processStarted = False\n print(\"this is a good time to turn on the ignition\")\n time.sleep(10)\n return True\n else:\n return False\n \n def startUploading(self):\n print(\"u have 5 seconds to turn on the ignition\")\n time.sleep(5)\n\n interface_name = \"wlan0\" # i. e wlp2s0\n server_name = \"OakOne\"\n password = \"ganesha2301\"\n F = Finder(server_name=server_name, password=password,\n interface=interface_name)\n response = F.run()\n counter = 0\n while (response == False):\n counter += 1\n self.ignitionStatus = GPIO.input(17)\n if self.ignitionStatus:\n break\n\n if(counter < 10):\n time.sleep(2)\n print('waiting for a second to try again - ' + str(counter))\n response = F.run()\n else:\n print(\"breaking here\")\n break\n\n if (response == True and self.ignitionStatus == False):\n time.sleep(10)\n\n cnopts = pysftp.CnOpts()\n cnopts.hostkeys = None\n print(\"**************************** hostkeys none\")\n with pysftp.Connection(host=myHostname, username=myUsername, password=myPassword, cnopts=cnopts) as sftp:\n print(\"=========================> pysftp connection successfull\")\n myfiles = os.listdir(self.home)\n for __file in myfiles: \n if(\".h264\" in __file):\n print(__file) \n remoteFilepath = mediaStorageLocation + __file\n localFilepath = self.home + __file\n sftp.put(localFilepath, remoteFilepath,\n self.uploadCallback)\n\n #self.ignitionStatus = GPIO.input(17)\n #if self.ignitionStatus:\n # break\n\n os.remove(self.home+__file)\n print(\"\\nuploaded file -\" + __file)\n \n print(\"file upload successfull\")\n sftp.close()\n self.processStarted = False\n print(\"this is a good time to turn on the ignition\")\n time.sleep(10)\n return True\n else:\n return False\n \n def runDebug(self):\n try:\n self.ignitionStatus = GPIO.input(17) # Check for ignition\n if(self.ignitionStatus):\n self.intiCameraConfiguration()\n self.ignitionStatus = GPIO.input(\n 17) # Check for ignition\n print(\"Ignition Status\" + str(self.ignitionStatus))\n filename = self.getFilename()\n self.camera.start_preview(alpha=200)\n self.camera.start_recording(filename)\n now = datetime.datetime.now()\n self.camera.annotate_text = now.strftime(\n '%Y-%m-%dT%H:%M:%S')\n print(str(self.lengthOfVideo))\n for i in range(self.lengthOfVideo):\n position = self.getPositionData()\n now = datetime.datetime.now()\n self.camera.annotate_text = now.strftime(\n '%Y-%m-%dT%H:%M:%S') + \" \" + position\n time.sleep(1)\n self.ignitionStatus = GPIO.input(\n 17) # Check for ignition\n if(self.ignitionStatus == False):\n break\n\n self.camera.stop_preview()\n self.camera.stop_recording() \n\n isUploaded = self.startUploadingDebug()\n print(\" is Uploaded folag \" + str(isUploaded)) \n\n GPIO.output(18, GPIO.LOW)\n except Exception as e:\n print(e)\n finally:\n print(\"clean up\")\n GPIO.cleanup()\n \n def run(self):\n try:\n #pos = self.getPositionData()\n # for i in range(100):\n # print(str(pos))\n # pos = self.getPositionData()\n # time.sleep(1)\n\n while self.processStarted:\n self.ignitionStatus = GPIO.input(17) # Check for ignition\n if(self.ignitionStatus):\n self.intiCameraConfiguration()\n while self.ignitionStatus:\n self.ignitionStatus = GPIO.input(\n 17) # Check for ignition\n print(\"Ignition Status\" + str(self.ignitionStatus))\n filename = self.getFilename()\n self.camera.start_preview(alpha=200)\n self.camera.start_recording(filename)\n now = datetime.datetime.now()\n self.camera.annotate_text = now.strftime(\n '%Y-%m-%dT%H:%M:%S')\n print(str(self.lengthOfVideo))\n for i in range(self.lengthOfVideo):\n position = self.getPositionData()\n now = datetime.datetime.now()\n self.camera.annotate_text = now.strftime(\n '%Y-%m-%dT%H:%M:%S') + \" \" + position\n time.sleep(1)\n self.ignitionStatus = GPIO.input(\n 17) # Check for ignition\n if(self.ignitionStatus == False):\n break\n\n self.camera.stop_preview()\n self.camera.stop_recording()\n\n isUploaded = self.startUploading()\n self.processStarted = False\n print(\" is Uploaded flag \" + str(isUploaded))\n\n GPIO.output(18, GPIO.LOW)\n except Exception as e:\n print(e)\n finally:\n print(\"clean up\")\n GPIO.cleanup()\n #subprocess.call('sudo shutdown' , shell=True)\n\n\nif __name__ == \"__main__\":\n p1 = subprocess.Popen('sudo systemctl stop gpsd.socket',\n stdout=subprocess.PIPE, shell=True)\n p1.wait()\n p2 = subprocess.Popen(\n 'sudo gpsd /dev/serial0 -F /var/run/gpsd.sock', stdout=subprocess.PIPE, shell=True)\n p2.wait()\n\n aat = AatDashCam()\n aat.run()\n"
},
{
"alpha_fraction": 0.6192959547042847,
"alphanum_fraction": 0.6401564478874207,
"avg_line_length": 27.22222137451172,
"blob_id": "5c3374665572121c09d594ad4a6f707d279204b4",
"content_id": "60af05b14fa626ae20dca130b8421903acc864a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 767,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 27,
"path": "/newwificonnect.py",
"repo_name": "tejasghutukade/dashcam",
"src_encoding": "UTF-8",
"text": "import os\nimport subprocess\nimport time\n\n\n\nif __name__ == \"__main__\":\n p1 = subprocess.Popen(\"sudo systemctl stop dnsmasq\", shell=True)\n p1.wait()\n print(\"stop dnsmasq\")\n #time.sleep(2)\n p2 = subprocess.Popen(\"sudo systemctl stop hostapd\", shell=True)\n p2.wait()\n print(\"stop hostapd\")\n #time.sleep(2)\n p3 = subprocess.Popen(\"sudo dhclient -r\", shell=True)\n p3.wait()\n print(\"stop dhclient -r\")\n #time.sleep(2)\n p4 = subprocess.Popen(\"sudo systemctl restart dhcpcd\", shell=True)\n p4.wait()\n print(\"stop restart dhcpcd\")\n #time.sleep(2)\n p5 = subprocess.Popen(\"sudo wpa_supplicant -B -i wlan0 -c wpa_supplicant_WHE-BELL.conf\", shell=True)\n p5.wait()\n print(\"stop connect wpasupplicant\")\n #time.sleep(2)\n \n"
},
{
"alpha_fraction": 0.6063829660415649,
"alphanum_fraction": 0.6210106611251831,
"avg_line_length": 18.91666603088379,
"blob_id": "b87405dc39ca7c62e54255dff33c61071788fc38",
"content_id": "a8aa8e3b19f06f24771b54fddaa7001bda909825",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 752,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 36,
"path": "/testIgnitionStatus.py",
"repo_name": "tejasghutukade/dashcam",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\nimport time\n\n\nGPIO.setmode(GPIO.BCM)\n\n\nGPIO.setup(17, GPIO.IN) # Ignition Status\nGPIO.setup(18,GPIO.OUT,initial = GPIO.HIGH) #Power continuation Output High=KeepON, Low=SHutDown\n\n\n\n\ntry:\n ignition = GPIO.input(17)\n while ignition:\n ignition = GPIO.input(17)\n print(\"ignition Status - Ignition ON\")\n time.sleep(1)\n if(not ignition): \n break \n\n print('Ignition Turned OFF')\n print(\"Upload will start\")\n time.sleep(60)\nexcept KeyboardInterrupt: # If CTRL+C is pressed, exit cleanly:\n print(\"Keyboard interrupt\")\n #GPIO.cleanup()\n \nexcept Exception as e:\n print(e)\n #GPIO.cleanup()\n\nfinally:\n print(\"clean up\") \n GPIO.cleanup() \n \n "
},
{
"alpha_fraction": 0.48089173436164856,
"alphanum_fraction": 0.48782315850257874,
"avg_line_length": 36.328670501708984,
"blob_id": "0933ff14cba7a6d1c2dfe04b79cc4759480e9b85",
"content_id": "51d2ef2efd939d7ea156c5a9b355718e90a82190",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5338,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 143,
"path": "/wifiConnect.py",
"repo_name": "tejasghutukade/dashcam",
"src_encoding": "UTF-8",
"text": "import os\nimport subprocess\nfrom pathlib import Path\nimport json\nimport time\n\nclass Finder:\n def __init__(self, *args, **kwargs):\n self.interface_name = \"wlan0\"\n self.main_dict = {}\n self.skip = False\n self.home = str(Path.home())+\"/cameraProject/\"\n self.server_name = kwargs['server_name']\n self.password = kwargs['password']\n self.interface_name = kwargs['interface']\n check = self.searchForavailableWIFI(server_name=self.server_name)\n self.isAlreadyConnected = False\n if check:\n if(os.path.exists(self.home + 'wifiConfig.json')):\n with open(self.home+'wifiConfig.json', 'rb') as json_file:\n config = json.load(json_file)\n settings = config[\"settings\"]\n self.interface_name = \"wlan0\" # i. e wlp2s0\n if settings:\n for setting in settings:\n self.server_name = setting[\"servername\"]\n self.password = setting[\"password\"]\n check = self.searchForavailableWIFI(\n server_name=self.server_name)\n if not check:\n print(server_name + \" WIFI found\")\n # self.run()\n break\n else:\n print(server_name + \" wifi not found\")\n else:\n self.isAlreadyConnected = True\n print(\"already connected to \" + self.server_name)\n\n def run(self):\n if self.isAlreadyConnected:\n return self.isAlreadyConnected\n \n checkCommand = \"iwgetid -r\"\n checkResult = os.popen(checkCommand)\n checkResult = list(checkResult)\n if checkResult:\n if self.server_name == checkResult[0].strip():\n print(checkResult)\n self.skip = True\n\n if self.skip == False:\n command = \"\"\"sudo iwlist wlan0 scan | grep -ioE 'ssid:\"(.*{}.*)'\"\"\"\n result = os.popen(command.format(self.server_name))\n result = list(result)\n\n if \"Device or resource busy\" in result:\n return None\n else:\n ssid_list = [item.lstrip('SSID:').strip('\"\\n')\n for item in result]\n print(\"Successfully get ssids {}\".format(str(ssid_list)))\n\n for name in ssid_list:\n try:\n result = self.connect(name)\n check = self.searchForavailableWIFI(server_name=\"Oak560\")\n count = 0\n while check == False:\n result = self.connect(name)\n count = count + 1\n time.sleep(5)\n check = self.searchForavailableWIFI(server_name=\"Oak560\")\n if count == 5:\n break\n \n return result\n except Exception as exp:\n print(\"Couldn't connect to name : {}. {}\".format(name, exp))\n return False\n else:\n if result:\n print(\"Successfully connected to {}\".format(name))\n return True\n return False\n else:\n print(\"Already Connected\")\n return True\n\n def connection(self, name):\n try:\n os.system(\"sudo nmcli d wifi connect {} password {}\".format(\n name, self.password))\n except:\n raise\n else:\n return True\n def connect(self,name):\n try:\n p1 = subprocess.Popen(\"sudo systemctl stop dnsmasq\", shell=True)\n p1.wait()\n print(\"stop dnsmasq\")\n #time.sleep(2)\n p2 = subprocess.Popen(\"sudo systemctl stop hostapd\", shell=True)\n p2.wait()\n print(\"stop hostapd\")\n #time.sleep(2)\n p3 = subprocess.Popen(\"sudo dhclient -r\", shell=True)\n p3.wait()\n print(\"stop dhclient -r\")\n #time.sleep(2)\n p4 = subprocess.Popen(\"sudo systemctl restart dhcpcd\", shell=True)\n p4.wait()\n print(\"stop restart dhcpcd\")\n time.sleep(2)\n print(\"sudo wpa_supplicant -B -i wlan0 -c wpa_supplicant_OakOne.conf\")\n p5 = subprocess.Popen(\"sudo wpa_supplicant -B -i wlan0 -c wpa_supplicant_OakOne.conf\", shell=True)\n p5.wait()\n time.sleep(5)\n print(\"stop connect wpasupplicant\")\n return True\n except:\n raise\n else:\n return False\n \n \n def searchForavailableWIFI(self, server_name):\n process = os.popen(\"sudo iw dev wlan0 scan | grep SSID\")\n preprocessed = process.read()\n if(server_name in preprocessed):\n return False\n else:\n return True\n\n\nif __name__ == \"__main__\":\n home = str(Path.home())\n interface_name = \"wlan0\" # i. e wlp2s0\n server_name = \"OakOne\"\n password = \"ganesha2301\"\n F = Finder(server_name=server_name, password=password,\n interface=interface_name)\n"
},
{
"alpha_fraction": 0.6218130588531494,
"alphanum_fraction": 0.6657223701477051,
"avg_line_length": 32.66666793823242,
"blob_id": "444095aa620791dae5bfaf698fa6ca580b9ad294",
"content_id": "a2043514a326f12a8bdaa5bffdd430b3d2615aef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 706,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 21,
"path": "/testconnectionsettings.py",
"repo_name": "tejasghutukade/dashcam",
"src_encoding": "UTF-8",
"text": "import os\nimport subprocess\n\nif __name__ == \"__main__\":\n p0 = subprocess.Popen(\"sudo killall wpa_supplicant\", shell=True)\n p0.wait()\n p1 = subprocess.Popen(\"sudo systemctl restart dhcpcd\", shell=True)\n p1.wait()\n print(\"stop systemctl restart dhcpcd\")\n #time.sleep(2)\n p2 = subprocess.Popen(\"sudo ifconfig wlan0 192.168.4.0\", shell=True)\n p2.wait()\n print(\"stop ifconfig wlan0 192.168.4.0\")\n #time.sleep(2)\n p3 = subprocess.Popen(\"sudo systemctl restart dnsmasq\", shell=True)\n p3.wait()\n print(\"stop restart dnsmasq\")\n #time.sleep(2)\n p4 = subprocess.Popen(\"sudo systemctl restart hostapd\", shell=True)\n p4.wait()\n print(\"stop rsystemctl restart hostapd\")"
},
{
"alpha_fraction": 0.6931079030036926,
"alphanum_fraction": 0.7061118483543396,
"avg_line_length": 23.74193572998047,
"blob_id": "b49556e2378d2fc59e6c71585be0c863abc27037",
"content_id": "15f0abe3af55e87a4209552cdad7bec23d76e951",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 769,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 31,
"path": "/testConnectWIFI.py",
"repo_name": "tejasghutukade/dashcam",
"src_encoding": "UTF-8",
"text": "import subprocess\n\nimport time\n\n#ifdown = subprocess.check_output('sudo ifconfig wlan0 down')\n\n#time.sleep(30)\n\n#ifup =subprocess.check_output('sudo ifconfig wlan0 up')\n\ntry:\n output = subprocess.check_output('sudo iwgetid',stderr=subprocess.STDOUT,shell=True)\n print(output)\n \nexcept Exception as e:\n print(e)\n output = \"\"\n \n\n#print(\"OakOne\" in str(output))\n\nisConnected = \"OakOne\" in str(output)\n\nprint(\"is connected \" + str(isConnected))\n\nif(isConnected == False):\n newoutput = subprocess.check_output('sudo iwlist wlan0 scan|grep SSID',shell=True)\n print(newoutput)\n if(\"OakOne\" in str(newoutput)):\n newnewoutput = subprocess.check_output('sudo iwconfig wlan0 essid OakOne key ganesha2301',shell=True)\n print(newnewoutput)\n\n\n"
},
{
"alpha_fraction": 0.5651564598083496,
"alphanum_fraction": 0.5730966925621033,
"avg_line_length": 38.64814758300781,
"blob_id": "ca9d00dae78626aaf57f4c70140ac94274cb5f8c",
"content_id": "89ca2a25907ad9703ef5464f0d61ac3006c0c66d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2141,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 54,
"path": "/upload.py",
"repo_name": "tejasghutukade/dashcam",
"src_encoding": "UTF-8",
"text": "import os\nimport pysftp\nfrom pathlib import Path\n\n\nclass Upload:\n def __init__(self, *args, **kwargs):\n self.myHostname = kwargs['myHostname']\n self.myUsername = kwargs['myUsername']\n self.myPassword = kwargs['myPassword']\n self.media_storage = kwargs['mediaStorageLocation']\n self.main_dict = {}\n self.home = str(Path.home())+\"/cameraProject/\"\n self.local_file_path = kwargs['local_file_path']\n #self.sftp = pysftp.Connection(host=myHostname, username=myUsername, password=myPassword)\n self.readFilesandUpload()\n\n def readFilesandUpload(self):\n cnopts = pysftp.CnOpts()\n cnopts.hostkeys = None\n print(\"**************************** hostkeys none\")\n with pysftp.Connection(host=self.myHostname, username=self.myUsername, password=self.myPassword, cnopts=cnopts) as sftp:\n print(\"=========================> pysftp connection successfull\")\n myfiles = os.listdir(self.home)\n for __file in myfiles:\n print(__file)\n if(\".h264\" in __file):\n print(__file)\n remoteFilepath = mediaStorageLocation + __file\n localFilepath = self.home + __file\n sftp.put(localFilepath, remoteFilepath,\n self.uploadCallback)\n os.remove(self.home+__file)\n print(\"\\nuploaded file -\" + __file)\n print(\"file upload successfull\")\n sftp.close()\n\n def uploadCallback(self, a, b):\n print(\"\\r\"+str(a/1000000) + \"Mb uploaded of \" +\n str(b/1000000)+\"MB\", end='', flush=True)\n\n def closeConnection(self):\n self.sftp.close()\n\n\nif __name__ == \"__main__\":\n myHostname = \"aatuploadserver.local\"\n myUsername = \"pi\"\n myPassword = \"aatracking\"\n mediaStorageLocation = '../../media/pi/aatstorage/'\n local_file_path = './'\n U = Upload(myHostname=myHostname, myUsername=myUsername, myPassword=myPassword,\n mediaStorageLocation=mediaStorageLocation, local_file_path=local_file_path)\n U.readFilesandUpload()\n"
},
{
"alpha_fraction": 0.5927602052688599,
"alphanum_fraction": 0.5995475053787231,
"avg_line_length": 28.16666603088379,
"blob_id": "eed434df4b98804d601e884b55a2dc28513c9f9e",
"content_id": "f9d66b1df76fa5770efc756e22b14bf0d7ec57fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 884,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 30,
"path": "/testwifi.py",
"repo_name": "tejasghutukade/dashcam",
"src_encoding": "UTF-8",
"text": "\nimport time\nimport os\nfrom wifiConnect import Finder\n\n\nif __name__ == \"__main__\":\n # Server_name is a case insensitive string, and/or regex pattern which demonstrates\n # the name of targeted WIFI device or a unique part of it.\n server_name = \"COMFAST_5G\"\n password = \"ganesha2301\"\n interface_name = \"wlan0\" # i. e wlp2s0 \n \n \n process = os.popen(\"sudo iw dev wlan0 scan | grep SSID\")\n preprocessed = process.read()\n print(preprocessed)\n if(server_name in preprocessed):\n print(\"Wifi FOund\")\n else:\n print(\"wifi not found\")\n\n process.close()\n #F = Finder(server_name=server_name,\n # password=password,\n # interface=interface_name)\n #response = F.run()\n #while (response == False):\n # time.sleep(1)\n # print('waiting for a second to try again')\n # response = F.run()\n "
},
{
"alpha_fraction": 0.6218323707580566,
"alphanum_fraction": 0.623781681060791,
"avg_line_length": 30.121212005615234,
"blob_id": "6d5fb9de235eb443f0e7d0f3cd27d9f92f6e396b",
"content_id": "c2a7d4c0ed805fdb7b5ce1e97c19fa338c4c6435",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1026,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 33,
"path": "/library_gps.py",
"repo_name": "tejasghutukade/dashcam",
"src_encoding": "UTF-8",
"text": "from gps import *\nimport time\nimport subprocess\n\nsubprocess.call('sudo systemctl stop gpsd.socket' , shell=True)\nsubprocess.call('sudo gpsd /dev/serial0 -F /var/run/gpsd.sock',shell=True)\n\nrunning = True\n\ndef getPositionData():\n nx = gpsd.next()\n # print(nx)\n # For a list of all supported classes and fields refer to:\n # https://gpsd.gitlab.io/gpsd/gpsd_json.html\n if nx['class'] == 'TPV':\n latitude = getattr(nx,'lat', \"Unknown\")\n longitude = getattr(nx,'lon', \"Unknown\")\n speed = getattr(nx,'speed',\"Unknown\")\n time = getattr(nx,'time',\"Unknown\")\n alt = getattr(nx,'alt',\"Unknown\")\n print(\"Your position: lon = \" + str(longitude) + \", lat = \" + str(latitude)+\", speed =\"+ str(speed) + \", time = \" + str(time) + \", alt = \" + str(alt))\n\ngpsd = gps(mode=WATCH_ENABLE|WATCH_NEWSTYLE)\n\ntry:\n print(\"Application started!\")\n while True:\n getPositionData()\n time.sleep(2)\n\nexcept (KeyboardInterrupt):\n running = False\n print(\"Applications closed!\")"
},
{
"alpha_fraction": 0.6164693832397461,
"alphanum_fraction": 0.6295643448829651,
"avg_line_length": 24.612903594970703,
"blob_id": "211897039a2f052c11a9ab8e6563cc095396ed6d",
"content_id": "f1928542a4b439fa83a61a831f8eb73eee23ec28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3971,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 155,
"path": "/dashcam.py",
"repo_name": "tejasghutukade/dashcam",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\nfrom picamera import PiCamera\nimport time\nimport datetime\nimport json\nimport os\nimport serial\nfrom gps import *\nimport subprocess\n\n\nsubprocess.call('sudo systemctl stop gpsd.socket' , shell=True)\nsubprocess.call('sudo gpsd /dev/serial0 -F /var/run/gpsd.sock',shell=True)\n\ndataLogFile = '/home/pi/cameraProject/dataLog.json'\n\ndef where_json(file_name):\n return os.path.exists(file_name)\n\n\nif where_json(dataLogFile):\n pass\n\nelse:\n\n data = { \n 'videoFiles': {}\n }\n with open(dataLogFile, 'w') as outfile: \n json.dump(data, outfile)\n\n\n# constants\ninterval = 3\nresolutionx=640\nresolutiony=720\nframerate = 30\n#intialize\ncamera = PiCamera()\ngpsd = gps(mode=WATCH_ENABLE|WATCH_NEWSTYLE)\nGPIO.setmode(GPIO.BCM)\n\n\n# get settings\nwith open('/home/pi/cameraProject/cameraConfig.json') as json_file:\n config = json.load(json_file)\n resolutionx = config['resolution']['x']\n resolutiony = config['resolution']['y']\n interval = config['interval']\n interval = interval * 60\n framerate = config['framerate']\n \n# init gpio pins\nGPIO.setup(17, GPIO.IN) # Ignition Status\n\nGPIO.setup(18,GPIO.OUT,initial = GPIO.HIGH) #Power continuation Output High=KeepON, Low=SHutDown\n\n#PIO.setup(24,GPIO.OUT,initial = GPIO.HIGH)\n#GPIO.cleanup()\n\n\n\n#inti camera\ncamera.resolution = (resolutionx,resolutiony)\ncamera.framerate = framerate\nprint(\"interval - \" + str(interval))\n\n\n#functions\ndef getPositionData(gps):\n nx = gpsd.next()\n print(nx)\n position = \"\"\n # For a list of all supported classes and fields refer to:\n # https://gpsd.gitlab.io/gpsd/gpsd_json.html\n if nx['class'] == 'TPV':\n latitude = getattr(nx,'lat', \"Unknown\")\n longitude = getattr(nx,'lon', \"Unknown\")\n speed = getattr(nx,'speed',\"Unknown\")\n time = getattr(nx,'time',\"Unknown\")\n alt = getattr(nx,'alt',\"Unknown\")\n position = \"Your position: lon = \" + str(longitude) + \", lat = \" + str(latitude)+\", speed =\"+ str(speed) + \", time = \" + str(time) + \", alt = \" + str(alt)\n \n return position\n\n\ndef getStorageleft():\n path = '/'\n st = os.statvfs(path)\n # free blocks available * fragment size\n bytes_avail = (st.f_bavail * st.f_frsize)\n gigabytes = bytes_avail / 1024 / 1024 / 1024\n \n return gigabytes\n\n\ndef getFilename():\n timestamp = int(time.time()) \n filename = '/home/pi/cameraProject/video_'+str(timestamp)+'.h264'\n \n return filename\n\n\ndef getAnnotationText():\n position = getPositionData(gpsd)\n print(position)\n \n return position\n\ndef connectToWIFI():\n output = subprocess.check_output('sudo iwgetid',shell=True)\n isConnected = \"OakOne\" in str(output)\n if(isConnected == False):\n newoutput = subprocess.check_output('sudo iwlist wlan0 scan|grep SSID',shell=True) \n \n if(\"OakOne 5G\" in str(newoutput)):\n newnewoutput = subprocess.check_output('sudo iwconfig wlan0 essid OakOne key ganesha2301',shell=True)\n\n# Logic\ntry : \n #ignition = GPIO.input(17)\n ignition = False\n while ignition :\n \n #camera.start_preview(alpha=200)\n fname = getFilename()\n camera.start_recording(fname)\n\n for i in range(interval):\n #ignition = GPIO.input(17)\n ignition = True\n annot = getAnnotationText()\n camera.annotate_text = annot\n print(\"Ignition - \" + str(ignition))\n time.sleep(1)\n if(not ignition):\n camera.stop_recording()\n break\n \n camera.stop_recording()\n #camera.stop_preview() \n print('Done Processing')\n #GPIO.output(24,GPIO.LOW) \n #GPIO.output(18,GPIO.LOW) \nexcept KeyboardInterrupt: # If CTRL+C is pressed, exit cleanly:\n print(\"Keyboard interrupt\")\n #GPIO.cleanup()\n \nexcept Exception as e:\n print(e)\n #GPIO.cleanup()\n\nfinally:\n print(\"clean up\") \n GPIO.cleanup()\n\n"
}
] | 15 |
exblematique/Project | https://github.com/exblematique/Project | 75a7efd3d3bddb5467bb0bb264722374e79e56bd | 5428e93645a4e038f552111df0908f83110e7be6 | 635014dbfde417d9ad11833cb52634733db2424c | refs/heads/master | 2020-04-28T09:32:46.464236 | 2019-05-15T10:48:27 | 2019-05-15T10:48:27 | 175,170,498 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.567556619644165,
"alphanum_fraction": 0.592839777469635,
"avg_line_length": 24.29787254333496,
"blob_id": "55c6cbc54ae3690b385c9f34f191ef3a508a522a",
"content_id": "340af7b3ca90cd8bd7120749a8db4e0767dc51a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4944,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 188,
"path": "/2_RFID/MQTT_RFID_26.04/MQTT_RFID_26.04..ino",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "/******************************************************************************\r\nMQTT_Switch_Example.ino\r\nExample for controlling a light using an MQTT switch\r\nby: Alex Wende, SparkFun Electronics\r\n\r\nThis sketch connects the ESP32 to a MQTT broker and subcribes to the topic\r\nroom/light. When the button is pressed, the client will toggle between\r\npublishing \"on\" and \"off\".\r\n******************************************************************************/\r\n\r\n//Last modification in 26.04.19: Cards are working and sending information via MQTT. \r\n\r\n#include <WiFi.h>\r\n#include <PubSubClient.h>\r\n#include <SPI.h>\r\n#include <MFRC522.h>\r\n#define SS_PIN 33\r\n#define RST_PIN 27\r\nMFRC522 mfrc522(SS_PIN, RST_PIN); // Create MFRC522 instance.\r\n \r\n\r\nconst char *ssid = \"Test\"; // name of your WiFi network\r\nconst char *password = \"qwerty1234\"; // password of the WiFi network\r\n\r\nconst byte LIGHT_PIN = 21; // Pin to control the light with\r\nconst byte SWITCH_PIN = 14; \r\nconst char *ID = \"Boss1\"; // Name of our device, must be unique\r\nconst char *SUB_TOPIC = \"getPC\"; // Topic to subcribe to\r\nconst char *PUB_TOPIC = \"sendPC\"; // Topic to publish the light state to\r\nString previousID = \"\";\r\n\r\nIPAddress broker(192,168,56,101); // IP address of your MQTT broker eg. 192.168.1.50\r\nWiFiClient wclient;\r\n\r\nPubSubClient client(wclient); // Setup MQTT client\r\nbool state=0;\r\n\r\nvoid callback(char* topic, byte* payload, unsigned int length) {\r\n String response;\r\n\r\n for (int i = 0; i < length; i++) {\r\n response += (char)payload[i];\r\n }\r\n Serial.println('\\n');\r\n Serial.print(\"Message arrived \");\r\n Serial.print(topic);\r\n Serial.print(\" \");\r\n Serial.println(response);\r\n digitalWrite(LIGHT_PIN, HIGH);\r\n delay(5000);\r\n digitalWrite(LIGHT_PIN, LOW);\r\n \r\n\r\n}\r\n// Connect to WiFi network\r\nvoid setup_wifi() {\r\n Serial.print(\"\\nConnecting to \");\r\n Serial.println(ssid);\r\n\r\n WiFi.begin(ssid, password); // Connect to network\r\n\r\n while (WiFi.status() != WL_CONNECTED) { // Wait for connection\r\n delay(500);\r\n Serial.print(\".\");\r\n }\r\n\r\n Serial.println();\r\n Serial.println(\"WiFi connected\");\r\n Serial.print(\"IP address: \");\r\n Serial.println(WiFi.localIP());\r\n}\r\n\r\n// Reconnect to client\r\nvoid reconnect() {\r\n // Loop until we're reconnected\r\n while (!client.connected()) {\r\n Serial.print(\"Attempting MQTT connection...\");\r\n // Attempt to connect\r\n if (client.connect(ID)) {\r\n Serial.println(\"connected\");\r\n Serial.print(\"Publishing to: \");\r\n Serial.println(PUB_TOPIC);\r\n Serial.println('\\n');\r\n client.subscribe(SUB_TOPIC);\r\n Serial.println(\"connected\");\r\n Serial.print(\"Subcribed to: \");\r\n Serial.println(SUB_TOPIC);\r\n Serial.println('\\n');\r\n \r\n\r\n } else {\r\n Serial.println(\" try again in 5 seconds\");\r\n // Wait 5 seconds before retrying\r\n delay(5000);\r\n }\r\n }\r\n}\r\n\r\nvoid setup() {\r\n Serial.begin(115200); // Start serial communication at 115200 baud\r\n pinMode(LIGHT_PIN,OUTPUT); // Configure SWITCH_Pin as an input\r\n digitalWrite(LIGHT_PIN,LOW); // enable pull-up resistor (active low)\r\n delay(100);\r\n setup_wifi(); // Connect to network\r\n client.setServer(broker, 1883);\r\n SPI.begin(); // Initiate SPI bus\r\n mfrc522.PCD_Init(); // Initiate MFRC522\r\n client.setCallback(callback);\r\n}\r\n\r\nvoid sendMessage(){\r\n \r\n String content= \"\";\r\n byte letter;\r\n for (byte i = 0; i < mfrc522.uid.size; i++) \r\n {\r\n content.concat(String(mfrc522.uid.uidByte[i] < 0x10 ? \" 0\" : \"\"));\r\n content.concat(String(mfrc522.uid.uidByte[i], HEX));\r\n }\r\n if(!previousID.equals(content)){\r\n previousID=content;\r\n Serial.println();\r\n Serial.print(\"UID tag :\");\r\n Serial.print(content.c_str());\r\n client.publish(PUB_TOPIC, content.c_str() ); \r\n delay(1000);\r\n }\r\n // Halt PICC\r\n mfrc522.PICC_HaltA();\r\n\r\n // Stop encryption on PCD\r\n mfrc522.PCD_StopCrypto1();\r\n}\r\n\r\nvoid loop() {\r\n\r\n if (!client.connected()) // Reconnect if connection is lost\r\n {\r\n reconnect();\r\n }\r\n client.loop();\r\n if ( ! mfrc522.PICC_IsNewCardPresent()) \r\n {\r\n previousID=\"\";\r\n return;\r\n }\r\n // Select one of the cards\r\n if ( ! mfrc522.PICC_ReadCardSerial()) \r\n {\r\n return;\r\n }\r\n sendMessage(); \r\n\r\n \r\n }\r\n\r\n\r\n\r\n/*\r\n if (!client.connected()) // Reconnect if connection is lost\r\n {\r\n reconnect();\r\n }\r\n client.loop();\r\n\r\n // if the switch is being pressed\r\n if(digitalRead(SWITCH_PIN) == 0) \r\n {\r\n state = !state; //toggle state\r\n if(state == 1) // ON\r\n {\r\n client.publish(PUB_TOPIC, \"on\");\r\n Serial.println((String)PUB_TOPIC + \" => on\");\r\n }\r\n else // OFF\r\n {\r\n client.publish(PUB_TOPIC, \"off\");\r\n Serial.println((String)PUB_TOPIC + \" => off\");\r\n }\r\n\r\n while(digitalRead(SWITCH_PIN) == 0) // Wait for switch to be released\r\n {\r\n // Let the ESP handle some behind the scenes stuff if it needs to\r\n yield(); \r\n delay(20);\r\n }\r\n }\r\n}*/\r\n"
},
{
"alpha_fraction": 0.4829157292842865,
"alphanum_fraction": 0.5027159452438354,
"avg_line_length": 21.206226348876953,
"blob_id": "5491620139b757e756ba943abda6ca11df103eba",
"content_id": "e17744b5d2079a05c7acee3cbfdc2ee5d9d3cfd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5707,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 257,
"path": "/2_RFID/settings.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDebugging\n\"\"\"\n\nDEBUG = True\n\n\"\"\"\nTime configuration in seconds\n\"\"\"\nCONFIG_SEND_DELAY = 0.2\n\nSYNC_DELAY = 20\n\n\"\"\"\nModule configuration file location\n\"\"\"\nMODULE_CONFIG_FILE = 'config/moduleConfig.json'\nTABLE_CONFIG_FILE = 'config/tableConfig.json'\n\n\"\"\"\nColors used in application\n\"\"\"\nCOLOR_DICT = {\n \"vlow\": {\n \"id\": 0,\n \"color\": (0.1, 0.8, 1.0, 1.0) # white-ish\n },\n \"vmedium\": {\n \"id\": 1,\n \"color\": (0.6, 0.3, 1.0, 1.0) # light purple\n },\n \"vhigh\": {\n \"id\": 2,\n \"color\": (0.2, 0.1, 1.0, 1.0) # Dark blue\n },\n \"lnormal\": {\n \"id\": 3,\n \"color\": (0, 1, 0, 1), # Green\n },\n \"lhigh\": {\n \"id\": 4,\n \"color\": (1, 1, 0, 1), # Yellow\n },\n \"lstressed\": {\n \"id\": 5,\n \"color\": (1, 0, 0, 1) # Red\n }\n}\n\n\"\"\"\nEnums\n\"\"\"\n\n# Grid voltages\n\n\nclass Voltages:\n ERROR, LOW, MEDIUM, HIGH, ADAPTIVE = range(-1, 4)\n\n @staticmethod\n def enum_to_color(e):\n if e is Voltages.ERROR:\n return (1.0, .0, .0, 1.0)\n elif e is Voltages.LOW:\n return COLOR_DICT[\"vlow\"][\"color\"]\n elif e is Voltages.MEDIUM:\n return COLOR_DICT[\"vmedium\"][\"color\"]\n elif e is Voltages.HIGH:\n return COLOR_DICT[\"vhigh\"][\"color\"]\n raise Exception('Cannot convert this to color')\n\n @staticmethod\n def enum_to_flow_color(e):\n if e is Voltages.ERROR:\n return (1, 1, 1, 1)\n elif e is Voltages.LOW:\n return COLOR_DICT[\"vlow\"][\"color\"]\n elif e is Voltages.MEDIUM:\n return COLOR_DICT[\"vmedium\"][\"color\"]\n elif e is Voltages.HIGH:\n return COLOR_DICT[\"vhigh\"][\"color\"]\n raise Exception('Cannot convert this to color')\n\n @staticmethod\n def str_to_enum(s):\n if s == \"error\":\n return Voltages.ERROR\n elif s == \"low\":\n return Voltages.LOW\n elif s == \"medium\":\n return Voltages.MEDIUM\n elif s == \"high\":\n return Voltages.HIGH\n elif s == None:\n return Voltages.ADAPTIVE\n raise Exception('Cannot convert this to enum')\n\n @staticmethod\n def enum_to_str(e):\n if e is Voltages.ERROR:\n return \"Error\"\n elif e is Voltages.LOW:\n return \"Low\"\n elif e is Voltages.MEDIUM:\n return \"Medium\"\n elif e is Voltages.HIGH:\n return \"High\"\n elif e is Voltages.ADAPTIVE:\n return \"Adaptive\"\n raise Exception('Cannot convert this to string')\n\n\nclass Roles:\n PRODUCTION, CONSUMPTION = range(2)\n\n @staticmethod\n def str_to_enum(s):\n if s == \"production\":\n return Roles.PRODUCTION\n elif s == \"consumption\":\n return Roles.CONSUMPTION\n raise Exception('Cannot convert this to enum')\n\n @staticmethod\n def enum_to_str(e):\n if e == Roles.PRODUCTION:\n return \"production\"\n elif e == Roles.CONSUMPTION:\n return \"consumption\"\n raise Exception('Cannot convert this to enum')\n\n\n# FlowSegment speed\nclass Speed:\n NORMAL, FAST, FASTER, FASTEST = range(4)\n\n# FlowSegment direction\n\n\nclass Direction:\n FORWARDS, BACKWARDS = range(2)\n\n# FlowSegment load\n\n\nclass Load:\n NORMAL, HIGH, CRITICAL = range(3)\n\n# FlowSegment state\n\n\nclass State:\n OFF, ERROR, PASSIVE, ACTIVE = range(4)\n\n\n\"\"\"\nBoundaries\n\"\"\"\nPOWER_SPEED_BOUNDARIES = {\n Speed.NORMAL: 50,\n Speed.FAST: 200,\n Speed.FASTER: 300\n}\n\nVOLTAGE_POWER_LOAD_BOUNDARIES = {\n Voltages.LOW: {\n Load.CRITICAL: 300, # capacity, power > capacity -> critical load\n Load.HIGH: .75 # high modifier, power > capacity * high modifier -> high load\n },\n Voltages.MEDIUM: {\n Load.CRITICAL: 500, # capacity, critical load\n Load.HIGH: .80 # x% of capacity, high load\n },\n Voltages.HIGH: {\n Load.CRITICAL: 1300, # capacity, critical load\n Load.HIGH: .90 # x% of capacity, high load\n }\n}\n\n\"\"\"\nHelper functions\n\"\"\"\n\n\ndef GET_LOAD(voltage, power):\n power = abs(power)\n high_mod = VOLTAGE_POWER_LOAD_BOUNDARIES[voltage][Load.HIGH]\n capacity = VOLTAGE_POWER_LOAD_BOUNDARIES[voltage][Load.CRITICAL]\n if power <= high_mod * capacity:\n return Load.NORMAL\n elif power <= capacity:\n return Load.HIGH\n else:\n return Load.CRITICAL\n\n\ndef GET_SPEED(power):\n if power <= POWER_SPEED_BOUNDARIES[Speed.NORMAL]:\n return Speed.NORMAL\n elif power <= POWER_SPEED_BOUNDARIES[Speed.FAST]:\n return Speed.FAST\n elif power <= POWER_SPEED_BOUNDARIES[Speed.FASTER]:\n return Speed.FASTER\n else:\n return Speed.FASTEST\n\n\n\"\"\"\nTable Section settings, per type.\nexample: x in TABLE_PART[type]['module_locations']\n\"\"\"\nTABLE_PART = {\n 1: {\n 'module_locations': [\n {\n 'position': (0, 2)\n },\n {\n 'position': (1, 1)\n },\n {\n 'position': (3, 1)\n },\n {\n 'position': (4, 2)\n },\n {\n 'position': (3, 3)\n },\n {\n 'position': (1, 3)\n }\n ],\n },\n 2: {\n 'module_locations': [\n {\n 'position': (0, 2)\n },\n {\n 'position': (0, 0)\n },\n {\n 'position': (-1, -1)\n },\n {\n 'position': (3, 2)\n },\n {\n 'position': (-1, -1)\n },\n {\n 'position': (0, 4)\n }\n ]\n }\n}\n"
},
{
"alpha_fraction": 0.6090138554573059,
"alphanum_fraction": 0.6290447115898132,
"avg_line_length": 25.617021560668945,
"blob_id": "d14fcebdaa29dea0a1c7d1ba126bbc12b7cb95c7",
"content_id": "058ef24d3e9d161f0f2a914610025875aa3e8827",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2596,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 94,
"path": "/1_BlinkLED/Getinformation_12.3/Getinformation_12.3.ino",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#include <WiFi.h>\r\n#include <PubSubClient.h>\r\n\r\nconst char *ssid = \"Test\"; // name of your WiFi network\r\nconst char *password = \"qwerty1234\"; // password of the WiFi network\r\n\r\nconst byte LIGHT_PIN = 21; // Pin to control the light with\r\nconst char *ID = \"Boss1\"; // Name of our device, must be unique\r\nconst char *TOPIC = \"getPC\"; // Topic to subcribe to\r\nconst char *STATE_TOPIC = \"sendPC\"; // Topic to publish the light state to\r\n\r\nIPAddress broker(192,168,56,101); // IP address of your MQTT broker eg. 192.168.1.50\r\nWiFiClient wclient;\r\n\r\nPubSubClient client(wclient); // Setup MQTT client\r\n\r\n// Handle incomming messages from the broker\r\nvoid callback(char* topic, byte* payload, unsigned int length) {\r\n String response;\r\n\r\n for (int i = 0; i < length; i++) {\r\n response += (char)payload[i];\r\n }\r\n Serial.print(\"Message arrived \");\r\n Serial.print(topic);\r\n Serial.print(\" \");\r\n Serial.println(response);\r\n if(response == \"ON\") // Turn the light on\r\n {\r\n digitalWrite(LIGHT_PIN, HIGH);\r\n client.publish(STATE_TOPIC,\"OK\");\r\n }\r\n else if(response == \"OFF\") // Turn the light off\r\n {\r\n digitalWrite(LIGHT_PIN, LOW);\r\n client.publish(STATE_TOPIC,\"OK\");\r\n }\r\n}\r\n\r\n// Connect to WiFi network\r\nvoid setup_wifi() {\r\n Serial.print(\"\\nConnecting to \");\r\n Serial.println(ssid);\r\n\r\n WiFi.begin(ssid, password); // Connect to network\r\n\r\n while (WiFi.status() != WL_CONNECTED) { // Wait for connection\r\n delay(500);\r\n Serial.print(\".\");\r\n }\r\n\r\n Serial.println();\r\n Serial.println(\"WiFi connected\");\r\n Serial.print(\"IP address: \");\r\n Serial.println(WiFi.localIP());\r\n}\r\n\r\n// Reconnect to client\r\nvoid reconnect() {\r\n // Loop until we're reconnected\r\n while (!client.connected()) {\r\n Serial.print(\"Attempting MQTT connection...\");\r\n // Attempt to connect\r\n if(client.connect(ID)) {\r\n client.subscribe(TOPIC);\r\n Serial.println(\"connected\");\r\n Serial.print(\"Subcribed to: \");\r\n Serial.println(TOPIC);\r\n Serial.println('\\n');\r\n\r\n } else {\r\n Serial.println(\" try again in 5 seconds\");\r\n // Wait 5 seconds before retrying\r\n delay(5000);\r\n }\r\n }\r\n}\r\n\r\nvoid setup() {\r\n Serial.begin(115200); // Start serial communication at 115200 baud\r\n pinMode(LIGHT_PIN, OUTPUT); // Configure LIGHT_PIN as an output\r\n delay(100);\r\n setup_wifi(); // Connect to network\r\n client.setServer(broker, 1883);\r\n client.setCallback(callback);// Initialize the callback routine\r\n}\r\n\r\nvoid loop() {\r\n if (!client.connected()) // Reconnect if connection is lost\r\n {\r\n reconnect();\r\n }\r\n client.loop();\r\n}\r\n"
},
{
"alpha_fraction": 0.6979866027832031,
"alphanum_fraction": 0.7046979665756226,
"avg_line_length": 15.55555534362793,
"blob_id": "ac0bed8b965a897652a5f21884f2ff5265bb95bc",
"content_id": "8c6f126d8a321805fbbc6d41c0bed2ffd1c13321",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 149,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 9,
"path": "/final_prog/PC/test_scripts/test_dummy.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "import sys\n\nfrom dummySerial import DummySerial\n\nsys.path.append(\"./dummy\")\n\n\ndef test_valid_serial():\n assert DummySerial.name[:8] == '/dev/pts'\n"
},
{
"alpha_fraction": 0.6472727060317993,
"alphanum_fraction": 0.6654545664787292,
"avg_line_length": 20.153846740722656,
"blob_id": "75d6968cd9cad93baee336d7e4a781e632d20186",
"content_id": "e67d142fa0a2890b623a35bf9a032aa58597cf0f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 275,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 13,
"path": "/install_MQTT_broker.sh",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nif [ `whoami` != 'root' ]\nthen\n echo 'Sorry, you must be a root'\n\nelse\n apt-get install mosquitto -y\n apt-get install mosquitto-clients -y\n echo \"listener 1883\" >> /etc/mosquitto/mosquitto.conf\n pip install paho-mqtt\n pip3 install paho-mqtt\nfi\n"
},
{
"alpha_fraction": 0.5359400510787964,
"alphanum_fraction": 0.5474986433982849,
"avg_line_length": 31.66666603088379,
"blob_id": "95dde0d2412d8271a998976fbb24f09485df1c01",
"content_id": "e96693c0f97f9c8ee812776aebeb8ab9904f44c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11074,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 339,
"path": "/final_prog/PC/table_section.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "from settings import *\nfrom file_writer import read_contents_from_file\nfrom flow_segment import *\nfrom logger import log\nfrom module import Module\n\n\ndef load_table_info():\n \"\"\"\n Load tables from the config/tableConfig.json file.\n \"\"\"\n table_sections = []\n\n data = read_contents_from_file(TABLE_CONFIG_FILE)\n for table_info in data.get(\"tableParts\"):\n table_sections.append(TableSection(\n int(table_info.get(\"id\")),\n int(table_info.get(\"type\")),\n tuple(table_info.get(\"startPosition\"))))\n\n return table_sections\n\n\nclass TableSection(object):\n \"\"\"\n Table Section object, contains flow structure\n \"\"\"\n\n def __init__(self, id, table_type, start_pos):\n super(TableSection, self).__init__()\n \"\"\"\n Init Table Section, set id, table type and flows based on table type\n \"\"\"\n\n # Set fields\n self.id = id\n self.type = table_type\n self.voltage = Voltages.ERROR\n self.connected = False\n self.modules = []\n self.pos = (0, 0)\n self.fake_disabled = False # This is being used for fake syncing table sections to surrounding table sections (for battery)\n\n # Set flows\n if table_type == 1: # final table design\n self.flows = [\n FlowSegment((0, 2), (1, 2)),\n FlowSegment((1, 2), (1, 1)),\n FlowSegment((1, 1), (2, 1)),\n NeighborFlowSegment((2, 1), (2, 0)),\n FlowSegment((2, 1), (3, 1)),\n FlowSegment((3, 1), (3, 2)),\n FlowSegment((3, 2), (4, 2)),\n FlowSegment((3, 2), (3, 3)),\n FlowSegment((3, 3), (2, 3)),\n NeighborFlowSegment((2, 3), (2, 4)),\n FlowSegment((2, 3), (1, 3)),\n FlowSegment((1, 3), (1, 2))\n ]\n elif table_type == 2:\n self.flows = [\n FlowSegment((0, 4), (1, 4)),\n FlowSegment((1, 4), (1, 3)),\n FlowSegment((1, 3), (1, 2)),\n FlowSegment((1, 2), (2, 2)),\n FlowSegment((2, 2), (3, 2)),\n FlowSegment((3, 2), (2, 2)),\n FlowSegment((2, 2), (1, 2)),\n FlowSegment((1, 2), (0, 2)),\n FlowSegment((1, 2), (1, 1)),\n FlowSegment((1, 1), (1, 0)),\n FlowSegment((1, 0), (0, 0))\n ]\n\n else: # Unknown table type\n self.type = 1\n log('Unknown table type initiated', table_type)\n self.flows = []\n\n for flow in self.flows:\n flow.table_section = self\n\n # Set position of flow segments relative to starting position of\n # table section\n self.set_position(start_pos)\n \n def add_module(self, module):\n self.modules.append(module)\n \n def remove_module(self, module):\n self.modules.remove(module)\n\n def set_position(self, position):\n if position is None:\n self.pos = None\n return\n\n position_increase = (position[\n 0] - self.pos[0], position[1] - self.pos[1]) if self.pos is not None else position\n self.pos = position\n for flow_segment in self.flows:\n flow_segment.start_pos = (flow_segment.start_pos[\n 0] + position_increase[0], flow_segment.start_pos[1] + position_increase[1])\n flow_segment.end_pos = (flow_segment.end_pos[\n 0] + position_increase[0], flow_segment.end_pos[1] + position_increase[1])\n\n for module in self.get_placed_modules():\n if hasattr(module, 'position') and module.position is not None:\n module.position = (module.position[\n 0] + position_increase[0], module.position[1] + position_increase[1])\n\n def get_placed_modules(self, module_type=None, no_error_state=False):\n \"\"\"\n Returns placed modules based on given module type, default on all modules\n \"\"\"\n module_type = module_type if module_type else Module\n return [module for module in self.modules if isinstance(module, module_type)]\n\n def get_flows(self, flow_type=None):\n \"\"\"\n Returns flows based on given flow type, default on all flows\n \"\"\"\n flow_type = flow_type if flow_type else FlowSegment\n return [f for f in self.flows if isinstance(f, flow_type)]\n\n def get_voltage(self, string=False):\n return Voltages.enum_to_str(self.voltage) if string else self.voltage\n\n def clear_table(self):\n \"\"\"\n Remove all modules and neighbors from the table\n \"\"\"\n for module in self.get_placed_modules():\n module.set_table_section(None)\n\n def get_remaining_power(self, excluded_table_sections=None, split=False):\n \"\"\"\n Get power of this table section and all sections behind it\n \"\"\"\n power = 0\n for module in self.get_placed_modules(DefaultModule):\n power += module.remaining_power\n\n return power\n\n def update(self):\n preferred_voltage = {}\n for module in self.get_placed_modules():\n if module.voltage is None or module.voltage is Voltages.ADAPTIVE:\n continue\n\n if module.voltage not in preferred_voltage:\n preferred_voltage[module.voltage] = 0\n\n preferred_voltage[module.voltage] += 1\n\n if len(preferred_voltage) > 0:\n self.voltage = max(preferred_voltage, key=preferred_voltage.get)\n else:\n for f in self.flows:\n f.state = State.OFF\n \n self.fake_disabled = False\n\n def update_after_calculation(self):\n \"\"\"\n Returns:\n False if the table section is already disabled.\n True if the whole table section gets disabled.\n Load.CRITICAL if the flow segments get set to Load.CRITICAL\n Load.HIGH if the flow segments get set to Load.HIGH\n \"\"\"\n prefer_load = None\n if not self.is_enabled():\n return False\n\n # Flash critical when there is not enough power\n remaining_power = self.get_remaining_power()\n if remaining_power < -200:\n return self.disable()\n\n elif remaining_power < -100:\n for flow_segment in self.flows:\n if flow_segment.direction is not None:\n prefer_load = Load.CRITICAL\n\n elif remaining_power < 0:\n for flow_segment in self.flows:\n if flow_segment.direction is not None:\n prefer_load = Load.HIGH\n\n log(\"Remaining power of <TS #\" + str(self.id) + \"> = \" + str(self.get_remaining_power()))\n\n # Disable flow segments if no power is flowing\n should_disable = True\n for flow_segment in self.flows:\n if flow_segment.direction is not None:\n should_disable = False\n break\n\n if should_disable:\n return self.disable()\n\n if prefer_load is not None and not self.is_synced_to_result(prefer_load):\n self.sync_to_result(prefer_load)\n return prefer_load\n\n return False\n\n def sync_to_result(self, result):\n if not self.is_enabled():\n return False\n\n if self.is_synced_to_result(result):\n return False\n\n if result is True:\n if self.is_battery_placed_on_table_section():\n if self.fake_disabled: return False\n\n self.fake_disabled = True\n return True\n\n return self.disable()\n elif result is Load.CRITICAL:\n for flow_segment in self.flows:\n if flow_segment.direction is not None:\n flow_segment.load = Load.CRITICAL\n\n return True\n elif result is Load.HIGH:\n for flow_segment in self.flows:\n if flow_segment.direction is not None:\n flow_segment.load = Load.HIGH\n\n return True\n return False\n\n def is_synced_to_result(self, result):\n is_synced = True\n if result is True:\n return not self.is_enabled()\n\n if result is Load.CRITICAL:\n for flow_segment in self.flows:\n if flow_segment.direction is not None and flow_segment.load is not Load.CRITICAL:\n is_synced = False\n break\n\n elif result is Load.HIGH:\n for flow_segment in self.flows:\n if flow_segment.direction is not None and flow_segment.load is not Load.HIGH and flow_segment.load is not Load.CRITICAL:\n is_synced = False\n break\n\n return is_synced\n\n def disable(self):\n if not self.is_enabled():\n return False\n\n self.voltage = Voltages.ERROR\n\n for flow_segment in self.flows:\n flow_segment.reset()\n flow_segment.state = State.OFF\n\n log(\"<TS #\" + str(self.id) + \"> disabled\")\n\n return True\n \n def is_battery_placed_on_table_section(self):\n for module in self.get_placed_modules():\n if module.voltage is Voltages.ADAPTIVE:\n return True\n\n return False\n \n def enable(self):\n if self.is_enabled():\n return False\n \n for flow_segment in self.flows:\n flow_segment.reset()\n \n self.update()\n return True\n\n def is_enabled(self):\n any_segment_active = False\n for flow_segment in self.flows:\n if flow_segment.state is not State.OFF:\n any_segment_active = True\n break\n\n return any_segment_active\n\n def update_speed_after_calculation(self):\n desired_speed = max(Speed.NORMAL, GET_SPEED(\n self.get_remaining_power()), self.get_speed())\n\n if desired_speed is not self.get_speed():\n self.set_speed(desired_speed)\n return True\n\n return False\n\n def get_speed(self):\n speed = Speed.NORMAL\n for flow_segment in self.flows:\n if flow_segment.speed > speed:\n speed = flow_segment.speed\n\n return speed\n\n def set_speed(self, speed):\n for flow_segment in self.flows:\n flow_segment.speed = speed\n\n def activate(self):\n for f in self.flows:\n f.activate()\n\n def get_header_byte(self):\n voltage = self.voltage if self.voltage is not Voltages.ERROR else 0\n voltage = voltage << 6\n byte = hexify(voltage)\n return [byte]\n\n def get_flow_bytes(self):\n # Get header byte and byte array\n header_byte = self.get_header_byte()\n flow_byte_array = []\n for f in self.flows:\n flow_byte_array += f.get_byte()\n return header_byte + flow_byte_array\n\n def __repr__(self):\n return 'table section {0}, {1} voltage'.format(self.id, self.get_voltage(True))\n"
},
{
"alpha_fraction": 0.7033898234367371,
"alphanum_fraction": 0.7217513918876648,
"avg_line_length": 26.31999969482422,
"blob_id": "1f868184061bff95e0f3339b23530ecf6d7fb116",
"content_id": "f367ed296c883856bd11e98242c7b651b6a05264",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 708,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 25,
"path": "/final_prog/Arduino/boss/protocol.h",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#ifndef PROTOCOL_H\r\n#define PROTOCOL_H\r\n\r\n#include <core/MyMessage.h>\r\n\r\n//Message types as described in mysensors library: https://www.mysensors.org/download/serial_api_20\r\nenum MessageType {\r\n\t// outgoing messages \r\n\tREBOOT_BOSS_AND_HELPER_MSG = V_VAR1,\r\n\tMODULE_CHANGE_MSG = V_VAR2,\r\n\tNEIGHBOR_CHANGE_MSG = V_VAR3,\r\n\r\n\t// incoming messages \r\n\tFLOW_CONFIG_CHANGE_MSG = V_VAR4,\r\n\tTIME_SYNC_MSG = V_VAR5,\r\n\tCOLOR_CHANGE_MSG = V_RGB,\r\n};\r\n\r\n//Sends change module (placed or removed) message to main-controller\r\nvoid change_module(uint8_t location, uint32_t module_id);\r\n\r\n//Sends neighbor connected/removed message to main-controller\r\nvoid change_neighbor(uint8_t side, uint32_t table_section_id);\r\n\r\n#endif\r\n"
},
{
"alpha_fraction": 0.6431535482406616,
"alphanum_fraction": 0.6763485670089722,
"avg_line_length": 29.25,
"blob_id": "14eee17be3d8f5231ee1448bfe446c6b595e783a",
"content_id": "150b417d7672847a909fbdb7ddb59eb046205096",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 241,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 8,
"path": "/final_prog/PC/tests/test_flask_api.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom flask_api import FlowSegmentColorView\n\nclass TestFlaskApi(unittest.TestCase):\n\n def test_format_to_rgb(self):\n hex = \"ff0000\"\n assert FlowSegmentColorView.format_to_rgb(hex) == {'b': 0, 'g': 0, 'r': 1.0}"
},
{
"alpha_fraction": 0.6728624701499939,
"alphanum_fraction": 0.6728624701499939,
"avg_line_length": 17.214284896850586,
"blob_id": "380702e4a73b06918ccd3094ad3b57e104eace02",
"content_id": "a8e19e38cd40cf3737d1a076dae33c1743fa454a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 269,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 14,
"path": "/final_prog/Arduino/boss/load.h",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#ifndef LOAD_H\r\n#define LOAD_H\r\n\r\n// Load properties \r\nenum Load {\r\n\tLOAD_NORMAL, //leds by default green\r\n\tLOAD_HIGH, //leds by default orange\r\n\tLOAD_CRITICAL, //leds by default red\r\n};\r\n\r\n//Converts load to string\r\nconst char *load_to_string(Load load);\r\n\r\n#endif\r\n"
},
{
"alpha_fraction": 0.6053582429885864,
"alphanum_fraction": 0.6068536043167114,
"avg_line_length": 28.72222137451172,
"blob_id": "aa606d6961bc892b0de13cabc5b70525734cc03a",
"content_id": "4b019428e528b57ed6d2c4b3dac78a498cb03f82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8025,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 270,
"path": "/2_RFID/module.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "from settings import *\nfrom file_writer import read_contents_from_file, save_module_config\nfrom logger import log\nimport time\n\n\ndef load_module_info():\n \"\"\"\n Read and return modules from the config/moduleConfig.json file. \n \"\"\"\n\n data = read_contents_from_file(MODULE_CONFIG_FILE)\n modules = []\n\n # Load all configuration types\n config_types = [\n ModuleConfigurationType( # id, name, min_value, max_value, role, voltage\n config[\"id\"],\n config[\"name\"],\n config[\"min\"],\n config[\"max\"],\n Roles.str_to_enum(config[\"role\"]),\n Voltages.str_to_enum(config[\"voltage\"])\n ) for config in data[\"configTypes\"]\n ]\n\n for c in config_types:\n log('config', c)\n\n # Load default modules\n for module in data[\"modules\"]:\n # Create a list of module configurations\n module_configs = [\n ModuleConfiguration( # config_type, value\n next((t for t in config_types if t.id == mc[\"type\"])),\n mc[\"value\"]\n ) for mc in module[\"configurations\"]\n ]\n # Create the module\n module = DefaultModule( # id, name, voltage, configurations\n module[\"id\"],\n module[\"name\"],\n Voltages.str_to_enum(module[\"voltage\"]),\n module_configs)\n\n modules.append(module)\n\n # Load transformer modules\n for module in data[\"transformers\"]:\n # Get linked transformer\n linked = next((m for m in modules if m.id == module[\"linked\"]), None)\n\n # Create the module\n module = TransformerModule( # id, name, voltage, linked module\n module[\"id\"],\n module[\"name\"],\n Voltages.str_to_enum(module[\"voltage\"]),\n linked)\n\n modules.append(module)\n\n # Load transformer modules\n for module in data[\"wireModules\"]:\n # Get linked transformer\n linked = next((m for m in modules if m.id == module[\"linked\"]), None)\n\n # Create the module\n module = WireModule( # id, name, voltage, linked module\n module[\"id\"],\n module[\"name\"],\n Voltages.str_to_enum(module[\"voltage\"]),\n linked)\n\n modules.append(module)\n\n # Load transformer modules\n for module in data[\"importModules\"]:\n # Create the module\n module = ImportExportModule( # id, name, voltage\n module[\"id\"],\n module[\"name\"],\n Voltages.str_to_enum(module[\"voltage\"]))\n\n modules.append(module)\n\n [log('loaded module:', module) for module in modules]\n\n return modules\n\n\nclass ModuleConfigurationType(object):\n \"\"\"\n Module configuration type is a configuration for a module, like an electric\n car or wind turbine. Each configuration has its own min and max value.\n \"\"\"\n\n def __init__(self, id, name, min_value, max_value, role, voltage):\n super(ModuleConfigurationType, self).__init__()\n self.id = id\n self.name = name\n self.min_value = min_value\n self.max_value = max_value\n self.role = role # production or consumption\n self.voltage = voltage\n\n def get_role(self):\n return self.role # production or consumption\n\n def __repr__(self):\n return '{0} <{1}-{2}>)'.format(self.name, self.min_value, self.max_value)\n\n\nclass ModuleConfiguration(object):\n \"\"\"\n Module configuration object, has module config type and value\n \"\"\"\n\n def __init__(self, config_type, value):\n super(ModuleConfiguration, self).__init__()\n self.config_type = config_type\n self.value = int(float(value))\n\n def get_value(self):\n return self.value\n\n def get_name(self):\n return self.config_type.name\n\n def set_value(self, value):\n self.value = int(float(value))\n\n def get_voltage(self):\n return self.config_type.voltage\n\n def get_role(self, string=False):\n return Roles.enum_to_str(self.config_type.role) if string else self.config_type.role\n\n def get_min_value(self):\n return self.config_type.min_value\n\n def get_max_value(self):\n return self.config_type.max_value\n\n def get_config_id(self):\n return self.config_type.id\n\n\nclass Module(object):\n \"\"\"\n Module object, contains id, name, voltage.\n \"\"\"\n\n def __init__(self, id, name, voltage):\n super(Module, self).__init__()\n self.id = id\n self.name = name\n self.voltage = voltage\n self.table_section = None\n self.time_placed = None\n self.position = None\n self.priority = 1\n\n def get_voltage(self, string=False):\n return Voltages.enum_to_str(self.voltage) if string else self.voltage\n\n def set_table_section(self, table):\n if self.table_section is not None:\n self.table_section.remove_module(self)\n\n self.table_section = table\n \n if self.table_section is not None:\n self.table_section.add_module(self)\n \n self.time_placed = time.time()\n\n def __repr__(self):\n return '{0} ({1})'.format(self.name, self.id)\n\n\nclass DefaultModule(Module):\n \"\"\"\n Default module, like house or windmill. Has configurations to define the \n production and/or consumption of a module.\n \"\"\"\n\n def __init__(self, id, name, voltage, configurations):\n super(DefaultModule, self).__init__(id, name, voltage)\n self.configurations = configurations\n self.remaining_power = 0\n\n if voltage is Voltages.ADAPTIVE:\n self.priority = 0\n self.reset_power()\n\n def get_production(self):\n return sum([c.get_value() for c in self.configurations if c.get_role() is Roles.PRODUCTION])\n\n def get_consumption(self):\n return sum([c.get_value() for c in self.configurations if c.get_role() is Roles.CONSUMPTION])\n\n def reset_power(self):\n self.remaining_power = -self.get_power()\n\n def get_power(self):\n return self.get_consumption() - self.get_production()\n\n def get_configurations(self):\n return self.configurations\n\n def get_configuration(self, config_id):\n config = next(\n (c for c in self.configurations if c.get_config_id() == config_id), None)\n return config\n\n def save_configuration(self, config_id, value):\n save_module_config(MODULE_CONFIG_FILE, self.id, config_id, value)\n\n for config in self.configurations:\n if config.get_config_id() is config_id:\n config.set_value(value)\n break\n\n\nclass ConnectionModule(Module):\n \"\"\"\n Connection module, is linked with another transformer module\n \"\"\"\n\n def __init__(self, id, name, voltage, linked_module):\n super(ConnectionModule, self).__init__(id, name, voltage)\n self.linked_module = linked_module\n if linked_module is not None:\n linked_module.set_linked_module(self)\n\n def set_linked_module(self, linked_module):\n self.linked_module = linked_module\n\n\nclass TransformerModule(ConnectionModule):\n \"\"\"\n Transformer module, is linked with another transformer module\n \"\"\"\n\n def __init__(self, id, name, voltage, linked_module):\n super(TransformerModule, self).__init__(\n id, name, voltage, linked_module)\n\nclass WireModule(ConnectionModule):\n \"\"\"\n Transformer module, is linked with another transformer module\n \"\"\"\n\n def __init__(self, id, name, voltage, linked_module):\n super(WireModule, self).__init__(id, name, voltage, linked_module)\n self.index = 0\n\n def set_table_section(self, table):\n super(WireModule, self).set_table_section(table)\n self.index = 0\n self.linked_module.index = 0\n\nclass ImportExportModule(ConnectionModule):\n \"\"\"\n Import/export module\n \"\"\"\n\n def __init__(self, id, name, voltage):\n linked = ConnectionModule(0, \"non-existing\", Voltages.ERROR, None)\n super(ImportExportModule, self).__init__(id, name, voltage, linked)\n"
},
{
"alpha_fraction": 0.707454264163971,
"alphanum_fraction": 0.7158930897712708,
"avg_line_length": 21.700000762939453,
"blob_id": "be89616cd72893852c264a4d59e90f24b69309d7",
"content_id": "019f848932f0e93aa2ccd419852d54717923f28b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 711,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 30,
"path": "/final_prog/Arduino/boss/grid.h",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#ifndef GRID_H\r\n#define GRID_H\r\n\r\n#include \"config.h\"\r\n#include \"flow-segment.h\"\r\n#include \"voltage.h\"\r\n\r\n//Grid properties\r\nstruct Grid {\r\n\tVoltage voltage;\r\n\tFlowSegment flow_segments[FLOW_SEGMENT_COUNT];\r\n};\r\n\r\n//Parsed grid properties\r\nstruct ParsedGrid {\r\n\tuint8_t : 6; //empty bits in header byte\r\n\tuint8_t voltage: 2; //2 bits for voltage in header byte\r\n\tParsedFlowSegment flow_segments[FLOW_SEGMENT_COUNT]; //rest of bytes for flow segments\r\n};\r\n\r\n//Converts parsed grid to normal grid\r\nGrid grid_from_parsed_grid(const ParsedGrid *parsed_grid);\r\n\r\n//Converts byte grid to normal grid\r\nGrid grid_from_bytes(const uint8_t *b);\r\n\r\n//Prints grid information\r\nvoid grid_print(const Grid *grid);\r\n\r\n#endif\r\n"
},
{
"alpha_fraction": 0.7482993006706238,
"alphanum_fraction": 0.7482993006706238,
"avg_line_length": 27.399999618530273,
"blob_id": "58d6ff4df75345cfd7e4095e0ae7959189687e09",
"content_id": "59731fb0cc40c113e609c07bcac7351df21dab83",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 441,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 15,
"path": "/final_prog/Arduino/boss/flow-segment-state.h",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#ifndef FLOW_SEGMENT_STATE_H\r\n#define FLOW_SEGMENT_STATE_H\r\n\r\nenum FlowSegmentState {\r\n\tFLOW_SEGMENT_STATE_OFF, //leds off\r\n\tFLOW_SEGMENT_STATE_ERROR, //leds red blinking\r\n\tFLOW_SEGMENT_STATE_PASSIVE, //leds only background color\r\n\tFLOW_SEGMENT_STATE_ACTIVE, //leds background and foreground color\r\n};\r\n\r\n\r\n// Converts flow_segment state to string\r\nconst char *flow_segment_state_to_string(FlowSegmentState flow_segment_state);\r\n\r\n#endif\r\n"
},
{
"alpha_fraction": 0.6535617113113403,
"alphanum_fraction": 0.6609575748443604,
"avg_line_length": 31.363636016845703,
"blob_id": "16901ea60f6a8d27f79063b85afa2cf8c7c405f5",
"content_id": "523254f68e6ee1eb8ff0b305728295f855fa7ce8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2569,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 77,
"path": "/1_BlinkLED/First.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nPython MQTT Subscription client - No Username/Password\r\nThomas Varnish (https://github.com/tvarnish), (https://www.instructables.com/member/Tango172)\r\nWritten for my Instructable - \"How to use MQTT with the Raspberry Pi and ESP8266\"\r\n\"\"\"\r\nimport paho.mqtt.client as mqtt\r\nfrom time import time, sleep\r\n\r\n# Don't forget to change the variables for the MQTT broker!\r\nmqtt_topic_receve = \"sendPC\"\r\nmqtt_topic_send = \"getPC\"\r\nmqtt_broker_ip = \"localhost\"\r\n\r\nclient = mqtt.Client()\r\n\r\n\r\n#Variable needs for the program\r\nok = False #Confirmation of reception\r\nsysTime = time() #Time of last step for the program\r\n\r\n# These functions handle what happens when the MQTT client connects\r\n# to the broker, and what happens then the topic receives a message\r\ndef on_connect(client, userdata, flags, rc):\r\n # rc is the error code returned when connecting to the broker\r\n print(\"Connected!\"+str(rc))\r\n # Once the client has connected to the broker, subscribe to the topic\r\n client.subscribe(mqtt_topic_receve)\r\n \r\ndef on_message(client, userdata, msg):\r\n global ok\r\n # This function is called everytime the topic is published to.\r\n # If you want to check each message, and do something depending on\r\n # the content, the code to do this should be run in this function\r\n\r\n message = str(msg.payload)\r\n print(\"Topic: \"+ msg.topic + \"\\nMessage: \" + message)\r\n\r\n if message == \"OK\":\r\n ok = True\r\n # The message itself is stored in the msg variable\r\n # and details about who sent it are stored in userdata\r\n\r\n# Here, we are telling the client which functions are to be run\r\n# on connecting, and on receiving a message\r\nclient.on_connect = on_connect\r\nclient.on_message = on_message\r\n\r\n# Once everything has been set up, we can (finally) connect to the broker\r\n# 1883 is the listener port that the MQTT broker is using\r\nclient.connect(mqtt_broker_ip, 1883)\r\n\r\n\r\n# Once we have told the client to connect, let the client object run itself\r\nclient.loop_start()\r\n\r\nwhile True:\r\n client.publish(mqtt_topic_send, \"ON\")\r\n print(\"ON\")\r\n sysTime = time()\r\n while not ok:\r\n if time() - sysTime > 2:\r\n client.publish(mqtt_topic_send, \"ON\")\r\n print(\"ON\")\r\n sysTime = time()\r\n ok = False\r\n sleep(5)\r\n client.publish(mqtt_topic_send, \"OFF\")\r\n print(\"OFF\")\r\n sysTime = time()\r\n while not ok:\r\n if time() - sysTime > 2:\r\n client.publish(mqtt_topic_send, \"OFF\")\r\n print(\"OFF\")\r\n sysTime = time()\r\n ok = False\r\n sleep(5)\r\nclient.disconnect()\r\n"
},
{
"alpha_fraction": 0.697604775428772,
"alphanum_fraction": 0.7145708799362183,
"avg_line_length": 22.439023971557617,
"blob_id": "819a8eb288d8e86ae0668e8e5a509df2f43cd5d0",
"content_id": "30e68f52b9d5965735a876cca67e35da63eec281",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1002,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 41,
"path": "/final_prog/Arduino/boss/rfid.h",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#ifndef RFID_H\r\n#define RFID_H\r\n\r\n#include <MFRC522.h>\r\n\r\n// Default authentication key \r\nextern MFRC522::MIFARE_Key default_key;\r\n \r\nstruct RFID {\r\n\tMFRC522 mfrc522;\r\n\tbool tag_present;\r\n};\r\n\r\nstruct RFID_message {\r\n\tuint8_t sensor_id;\r\n\tbool tag_present;\r\n\tuint32_t tag_id;\r\n};\r\n\r\n//Creates RFID object\r\nRFID RFID_create(uint8_t ss_pin, uint8_t rst_pin);\r\n\r\n//Initializes RFID module\r\nvoid RFID_init(RFID *RFID);\r\n\r\n//Checks if RFID state has changed (tag placed or removed)\r\nbool RFID_state_changed(RFID *RFID);\r\n\r\n//Starts authenticated session with RFID tag. This must be called before any read operation on the tag\r\nbool RFID_start_auth(RFID *RFID);\r\n\r\n//Stops authenticated session with RFID tag. This must be called to end an authenticated session, otherwise no RFID tags can be detected\r\nvoid RFID_stop_auth(RFID *RFID);\r\n\r\n//Reads RFID tag ID (UID)\r\nbool RFID_tag_read_id(RFID *RFID, RFID_message *msg);\r\n\r\n// Prints RFID message\r\nvoid RFID_message_print(const RFID_message *msg);\r\n\r\n#endif\r\n"
},
{
"alpha_fraction": 0.5158730149269104,
"alphanum_fraction": 0.5158730149269104,
"avg_line_length": 14.75,
"blob_id": "842b0e1fad4443666152a0fd9e87c68ce1dd3ca5",
"content_id": "12b596033b1b8e5129a0015b34c58cf4c5f8a96f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 126,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 8,
"path": "/final_prog/PC/logger.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "from settings import DEBUG\n\n\ndef log(*args):\n if DEBUG:\n for val in args:\n print(val),\n print('')\n"
},
{
"alpha_fraction": 0.6344239115715027,
"alphanum_fraction": 0.6429587602615356,
"avg_line_length": 28.29166603088379,
"blob_id": "8a2a89c524e9350baaa9ebddc744513b8748a162",
"content_id": "0e20f7e06cb98b6853e384fa23d28f0337383365",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1406,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 48,
"path": "/final_prog/PC/test_scripts/test_voltage_settings.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "import sys\n\nimport pytest\nfrom helper_fns import INVALID_VALUE\n\nfrom settings import Voltages, COLOR_DICT\n\nsys.path.append(\"../\")\n\n\ndef test_enum_to_color():\n fn = Voltages.enum_to_color\n assert fn(Voltages.ERROR) == (1.0, 0.0, 0.0, 1.0)\n assert fn(Voltages.LOW) == COLOR_DICT[\"vlow\"][\"color\"]\n assert fn(Voltages.MEDIUM) == COLOR_DICT[\"vmedium\"][\"color\"]\n assert fn(Voltages.HIGH) == COLOR_DICT[\"vhigh\"][\"color\"]\n with pytest.raises(Exception):\n fn(INVALID_VALUE)\n\n\ndef test_enum_to_flow_color():\n fn = Voltages.enum_to_flow_color\n assert fn(Voltages.ERROR) == (1, 1, 1, 1)\n assert fn(Voltages.LOW) == COLOR_DICT[\"vlow\"][\"color\"]\n assert fn(Voltages.MEDIUM) == COLOR_DICT[\"vmedium\"][\"color\"]\n assert fn(Voltages.HIGH) == COLOR_DICT[\"vhigh\"][\"color\"]\n with pytest.raises(Exception):\n fn(INVALID_VALUE)\n\n\ndef test_str_to_enum():\n fn = Voltages.str_to_enum\n assert fn(\"error\") == Voltages.ERROR\n assert fn(\"low\") == Voltages.LOW\n assert fn(\"medium\") == Voltages.MEDIUM\n assert fn(\"high\") == Voltages.HIGH\n with pytest.raises(Exception):\n fn(INVALID_VALUE)\n\n\ndef test_enum_to_str():\n fn = Voltages.enum_to_str\n assert fn(Voltages.ERROR) == \"Error\"\n assert fn(Voltages.LOW) == \"Low\"\n assert fn(Voltages.MEDIUM) == \"Medium\"\n assert fn(Voltages.HIGH) == \"High\"\n with pytest.raises(Exception):\n fn(INVALID_VALUE)\n"
},
{
"alpha_fraction": 0.6790352463722229,
"alphanum_fraction": 0.6790352463722229,
"avg_line_length": 21.45833396911621,
"blob_id": "086d142a9a64bd2db2749613d27563f49e7310a7",
"content_id": "4be7883df0d142d5a9c35517cbbb62ac01454c94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 539,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 24,
"path": "/Software_currently_implement/test_scripts/test_role_settings.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "import sys\n\nimport pytest\nfrom helper_fns import INVALID_VALUE\n\nfrom settings import Roles\n\nsys.path.append(\"../\")\n\n\ndef test_str_to_enum():\n fn = Roles.str_to_enum\n assert fn(\"production\") == Roles.PRODUCTION\n assert fn(\"consumption\") == Roles.CONSUMPTION\n with pytest.raises(Exception):\n fn(INVALID_VALUE)\n\n\ndef test_enum_to_str():\n fn = Roles.enum_to_str\n assert fn(Roles.PRODUCTION) == \"production\"\n assert fn(Roles.CONSUMPTION) == \"consumption\"\n with pytest.raises(Exception):\n fn(INVALID_VALUE)\n"
},
{
"alpha_fraction": 0.7307692170143127,
"alphanum_fraction": 0.7307692170143127,
"avg_line_length": 25,
"blob_id": "cea36827937b2dc67321b866493040ca86ab89e1",
"content_id": "804ed401cf1ca8efb118f8b969eb482b8e12c73f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 26,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 1,
"path": "/final_prog/PC/test_scripts/helper_fns.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "INVALID_VALUE = 'invalid'\n"
},
{
"alpha_fraction": 0.7295645475387573,
"alphanum_fraction": 0.735676109790802,
"avg_line_length": 26.45652198791504,
"blob_id": "093c7866cd774f79e3b427c2d9be060da4b52678",
"content_id": "88e387c097f3dd6f8dcea27b756b59acce09fa66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1309,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 46,
"path": "/final_prog/Arduino/boss/ledstrip.h",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#ifndef LEDSTRIP_H\r\n#define LEDSTRIP_H\r\n\r\n#include \"flow-segment-state.h\"\r\n#include \"load.h\"\r\n#include \"grid.h\"\r\n\r\nextern bool testReady;\r\n\r\nstruct LedstripSegment {\r\n\tFlowSegmentState state;\r\n\tLoad load;\r\n\tbool direction;\r\n\tuint8_t speed; \r\n\tuint8_t position;\r\n};\r\n\r\n// Sets up the led strip\r\nvoid ledstrip_setup();\r\n\r\n// Updates the led strip animation\r\n// @param force Set this to true to force an update. Defaults to false\r\nvoid ledstrip_update(bool force = false);\r\n\r\n// Calculates animation position of led strip segment\r\n// @param segment Pointer to the LED strip segment\r\nuint8_t ledstrip_calculate_position(LedstripSegment *segment);\r\n\r\n// Shows active or passive flow_segment on led strip segment\r\n// @param segment The segment to show the new flow segment on\r\n// @param first_led The first position to start the flow\r\nvoid ledstrip_show_flow_segment(LedstripSegment *segment, uint8_t first_led);\r\n\r\n// Shows error indication on led strip segment\r\nvoid ledstrip_show_error(LedstripSegment *segment, uint8_t first_led);\r\n\r\n// Sets grid to visualize with led strip\r\nvoid ledstrip_set_grid(const Grid *grid);\r\n\r\n// Sets the LED strip colors\r\nvoid ledstrip_set_color(uint8_t id, uint32_t rgb);\r\n\r\n// Tests ledstrip by color, segments and individual leds\r\nbool ledstrip_test(bool testReady);\r\n\r\n#endif\r\n"
},
{
"alpha_fraction": 0.6297872066497803,
"alphanum_fraction": 0.6297872066497803,
"avg_line_length": 17.58333396911621,
"blob_id": "b19756b740c0ece4e2da7c070d41d751dc7b3d40",
"content_id": "d832eea7da90febe1b82d359cf7411e0fa7b0d0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 235,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 12,
"path": "/final_prog/Arduino/boss/voltage.cpp",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#include \"voltage.h\"\r\n\r\nconst char *voltage_string[] = {\r\n\t[VOLTAGE_LOW] = \"Low\",\r\n\t[VOLTAGE_MEDIUM] = \"Medium\",\r\n\t[VOLTAGE_HIGH] = \"High\",\r\n};\r\n\r\nconst char *voltage_to_string(Voltage voltage)\r\n{\r\n\treturn voltage_string[voltage];\r\n}\r\n"
},
{
"alpha_fraction": 0.648419201374054,
"alphanum_fraction": 0.6794496774673462,
"avg_line_length": 26,
"blob_id": "b042034ecdcac81846878f2074ac8e5d8f5a07aa",
"content_id": "032d558ea947008461d04c1657973b90e08d19a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3416,
"license_type": "no_license",
"max_line_length": 188,
"num_lines": 122,
"path": "/final_prog/Arduino/boss/config.h",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#ifndef CONFIG_H\r\n#define CONFIG_H\r\n\r\n/* ---------------To setup table ------------------\r\n * You need change only this file\r\n * With the following variables: \r\n */\r\n#define TABLE_ID \"3\"\r\n#define SSID_WIFI \"Test\"\r\n#define PASSWORD_WIFI \"qwerty1234\"\r\n#define MQTT_SERVER_IP 192, 168, 56, 101\r\n#define MQTT_SERVER_PORT 1883 //The default port is 1883\r\n/* ----------- The setup is finished -------------*/\r\n\r\n\r\n#define FASTLED_INTERNAL\r\n#include <FastLED.h>\r\n\r\n#include \"rfid.h\"\r\n#include \"sensor-info.h\"\r\n\r\n// MySensors radio defines\r\n#define MY_NODE_ID TABLE_SECTION_ID\r\n\r\n// Table section ID set node id between 1-254\r\n// You only need to change TABLE_ID\r\n#define TABLE_SECTION_ID int(TABLE_ID)\r\n\r\n\r\n// Enable the MY_DEBUG define to show debug messages\r\n#define MY_DEBUG\r\n\r\n/**************************************\r\n* Ethernet Configuration *\r\n***************************************/\r\n#define MY_GATEWAY_MQTT_CLIENT\r\n#define MY_GATEWAY_ESP32\r\n\r\n/** Configuration of WiFi */\r\n#define MY_WIFI_SSID SSID_WIFI\r\n#define MY_WIFI_PASSWORD PASSWORD_WIFI\r\n#define MY_HOSTNAME \"Boss\" TABLE_ID\r\n\r\n/** MQTT Configuration **/\r\n#define MY_MQTT_CLIENT_ID \"Boss\" TABLE_ID\r\n#define MY_MQTT_PUBLISH_TOPIC_PREFIX \"sendToPc/\" TABLE_ID\r\n#define MY_MQTT_SUBSCRIBE_TOPIC_PREFIX \"getFromPc/\" TABLE_ID\r\n#define MY_CONTROLLER_IP_ADDRESS MQTT_SERVER_IP\r\n#define MY_PORT MQTT_SERVER_PORT\r\n/*************************************/\r\n\r\n\r\n// Variables for serial communications\r\n#define BAUDRATE 115200\r\n#define TX_PIN 17\r\n#define RX_PIN 16\r\n\r\n// ESP32 helper reset pin\r\n#define HELPER_RST_PIN A3\r\n\r\n// Shared RFID reset pin\r\n#define RFID_RST_PIN A0\r\n\r\n// RFID data pins \r\n#define RFID0_SDA_PIN 13\r\n#define RFID1_SDA_PIN 12\r\n#define RFID2_SDA_PIN 27\r\n#define RFID3_SDA_PIN 33\r\n\r\n// Number of RFIDs on this Arduino\r\n#define RFID_COUNT 4\r\n\r\n// Delay in ms between RFID checks\r\n#define RFID_CHECK_DELAY 75\r\n\r\n// Led strip data pin\r\n#define LEDSTRIP_DATA_PIN 14\r\n\r\n// Led strip clock pin\r\n#define LEDSTRIP_CLK_PIN 32\r\n\r\n// Led strip type with clock pin (currently SK9822 is used, not APA102 even though it says on the label because of chinese clone manufacturers)\r\n// if you do use APA102 make sure that they are not clones, because of the difference in CLOCK signals ------ you can add more types, see: https://github.com/FastLED/FastLED/wiki/Overview \r\n#define LEDSTRIP_TYPE SK9822\r\n//define LEDSTRIP_TYPE APA102\r\n//#define LEDSTRIP_TYPE WS2801\r\n//#define LEDSTRIP_TYPE DOTSTAR\r\n\r\n// Led strip types without clock pin ---- you can add more types, see: https://github.com/FastLED/FastLED/wiki/Overview\r\n//#define LEDSTRIP_TYPE_WITHOUT_CLOCK WS2812\r\n//#define LEDSTRIP_TYPE_WITHOUT_CLOCK WS2812B\r\n//#define LEDSTRIP_TYPE_WITHOUT_CLOCK WS2811\r\n//#define LEDSTRIP_TYPE_WITHOUT_CLOCK NEOPIXEL\r\n\r\n// Number of flow_segments\r\n#define FLOW_SEGMENT_COUNT 12\r\n\r\n// Number of LEDs in each flow_segment\r\n#define FLOW_SEGMENT_LENGTH 5\r\n\r\n// Adjust brightness of LEDs\r\n#define LED_BRIGHTNESS 20 \r\n\r\n// Total number of LEDS\r\n#define LED_COUNT (FLOW_SEGMENT_COUNT * FLOW_SEGMENT_LENGTH)\r\n\r\n// Array of RFID sensors with grid positions\r\nextern const SensorInfo sensor_info[];\r\n\r\n// Create RFIDs\r\nextern RFID RFIDs[];\r\n\r\n// LED strip colors\r\nextern CRGB off_color;\r\nextern CRGB error_color;\r\nextern CRGB voltage_colors[];\r\nextern CRGB load_colors[];\r\n\r\n// Edit module id if you want to edit test module\r\n#define TEST_MODULE_ID 439560267\r\n\r\n#endif\r\n"
},
{
"alpha_fraction": 0.6274809241294861,
"alphanum_fraction": 0.6664122343063354,
"avg_line_length": 26.29166603088379,
"blob_id": "4428b6b13b431ca349a452461970a27670b9bfe0",
"content_id": "a0e39e094ee2d8638b6928711dd84abb1e03fbbb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1310,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 48,
"path": "/final_prog/PC/test_scripts/test_grid.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "import sys\n\nfrom flow_segment import FlowSegment\nfrom grid import Grid\nfrom table_section import TableSection\n\nsys.path.append(\"../\")\n\n# Create grid for testing\ngrid = Grid(None)\ntablesection = TableSection(0, 1, (0, 0))\nflows = tablesection.get_flows()\n\nfor flow in flows:\n grid.add_flow_segment(flow)\n\n\ndef test_distance_between_correct():\n \"\"\"\n Test if the distance_between method uses the correct formula to calculate distance between two points\n \"\"\"\n node1 = (1, 1)\n node2 = (2, 3)\n # Distance between 1,1 and 2,3 = square root of 1*1+2*2 (5) according to\n # Pythagoras\n assert grid.distance_between(node1, node2) == 5 ** 0.5\n\n\ndef test_get_neighbour_works():\n \"\"\"\n Test if correct neighbours are returned for top-left node for table type 1.\n \"\"\"\n node = (1, 1)\n assert grid.neighbors(node) == [(2, 1), (1, 2)]\n\n\ndef test_get_flow_segment():\n \"\"\"\n Test if a flow segment is returned when you want to retrieve an existing flow segment by start- and endpoints\n \"\"\"\n assert isinstance(grid.flow_find_segments((0, 0), (1, 1))[0], FlowSegment)\n\n\ndef test_get_no_flow_segment():\n \"\"\"\n Test if no flow segment is returned when you use points of a non-existing flow segment\n \"\"\"\n assert len(grid.flow_find_segments((2402, 141), (5151, 566))) is 0\n"
},
{
"alpha_fraction": 0.5514011383056641,
"alphanum_fraction": 0.5533205270767212,
"avg_line_length": 38.3504524230957,
"blob_id": "cad71ff8ab9993b65b3f2730db54771ce60439e2",
"content_id": "253f952fe93972ca342baa5ed096918ea073a5b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13025,
"license_type": "no_license",
"max_line_length": 180,
"num_lines": 331,
"path": "/final_prog/PC/smart_grid_table.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "from settings import *\nfrom logger import log\nfrom table_section import *\nfrom flow_segment import *\nfrom module import *\nfrom grid import Grid\nimport datetime\n\nimport json\n\n\nclass SmartGridTable(object):\n \"\"\"\n SmartGridTable\n \"\"\"\n\n def __init__(self, grid):\n super(SmartGridTable, self).__init__()\n self.table_sections = []\n self.modules = []\n self.load_modules()\n self.load_table_info()\n self.grid = grid\n self.last_recalculation_request_time = None\n\n def load_modules(self):\n self.modules = load_module_info()\n print self.modules\n\n def load_table_info(self):\n self.table_sections = load_table_info()\n\n def get_module(self, id):\n return next((module for module in self.modules if module.id == id), None)\n\n def get_modules(self):\n return self.modules\n\n def get_table_section(self, id):\n return next((tp for tp in self.get_table_sections() if tp.id == id), None)\n\n def get_table_sections(self):\n return [tp for tp in self.table_sections if tp.connected]\n\n def write_table_config(self):\n data = {\n \"tableParts\": []\n }\n for table_section in self.table_sections:\n data[\"tableParts\"].append({\n \"id\": table_section.id,\n \"type\": table_section.type,\n \"startPosition\": table_section.pos\n })\n\n with open('config/tableConfig.json', 'w') as outfile:\n json.dump(data, outfile, indent=4, sort_keys=True)\n\n def table_connected(self, table_id, location_id, payload):\n \"\"\"\n A Table Section first connection to the main controller\n \"\"\"\n table_section = next(\n (tp for tp in self.table_sections if tp.id == table_id), None)\n\n if not table_section.connected:\n log('New Table Section {0} connected'.format(table_id))\n table_section.connected = True\n\n # Add flow segments of table section to grid.\n for flow_segment in table_section.get_flows():\n self.grid.add_flow_segment(flow_segment)\n else:\n log('duplicate Table Section', table_section.id)\n table_section.clear_table()\n\n def module_config_changed(self, module_id, config_id, value):\n \"\"\"\n Module configuration changed\n \"\"\"\n module = self.get_module(module_id)\n if module is not None:\n module.save_configuration(config_id, value)\n if module.table_section is not None:\n self.last_recalculation_request_time = datetime.datetime.now()\n return True\n return False\n\n def module_placed(self, table_id, location_id, module_id):\n \"\"\"\n A module is placed or removed from a Table Section\n \"\"\"\n\n table_section = next(\n (tp for tp in self.get_table_sections() if tp.id == table_id), None)\n module = next((m for m in self.modules if m.id == module_id), None)\n\n if module is None and module_id is not 0:\n log('Unknown module id \"{0}\" using None instead'.format(module_id))\n\n # Place module on table section\n if table_section:\n unparsed_position = TABLE_PART[table_section.type][\n \"module_locations\"][location_id][\"position\"]\n position = (unparsed_position[\n 0] + table_section.pos[0], unparsed_position[1] + table_section.pos[1])\n \n log('Module {0} placed on tablesection {1}, location {2}, position {3}'.format(\n module, table_id, location_id, position))\n if module is not None:\n module.position = position\n module.set_table_section(table_section)\n\n if hasattr(module, 'linked_module') and module.linked_module.position is None:\n # Get closest position to place a module\n distance = 3\n closest_position = None\n closest_ts = None\n\n for ts in self.table_sections:\n if ts is table_section: continue\n \n for location in TABLE_PART[ts.type][\"module_locations\"]:\n unparsed_position = location[\"position\"]\n module_position = (unparsed_position[0] + ts.pos[0], unparsed_position[1] + ts.pos[1])\n\n if self.grid.get_module_on_position(module_position):\n continue\n \n location_distance = self.grid.get_actual_distance_between_nodes(position, module_position)\n\n if location_distance < distance:\n distance = location_distance\n closest_position = module_position\n closest_ts = ts\n\n if closest_position is None:\n log('Tried to place a linked module, but there is no place to set the linked module less than ' + str(distance) + ' units away.')\n else:\n log('Added a linked module on position ' + str(closest_position))\n module.linked_module.position = closest_position\n module.linked_module.set_table_section(closest_ts)\n else:\n module = self.grid.get_module_on_position(position)\n\n \n if hasattr(module, 'linked_module') and module.linked_module.position is not None:\n module.linked_module.position = None\n module.linked_module.set_table_section(None)\n\n if hasattr(module, 'position'):\n module.set_table_section(None)\n module.position = None\n\n self.last_recalculation_request_time = datetime.datetime.now()\n return True\n return False\n\n def calculate(self):\n start = datetime.datetime.now()\n\n self.last_recalculation_request_time = None\n\n self.grid.reset()\n for table_section in self.get_table_sections():\n table_section.update()\n \n self.check_table_sections()\n self.grid.calculate()\n\n # Sync table section load and disabled state\n continue_loop = True\n while continue_loop:\n continue_loop = False\n for table_section in self.get_table_sections():\n result = table_section.update_after_calculation()\n if result is not False:\n continue_loop = True\n\n self.sync_attached_table_sections(table_section, result)\n\n # Sync table section speed\n continue_loop = True\n while continue_loop:\n continue_loop = False\n for table_section in self.get_table_sections():\n result = table_section.update_speed_after_calculation()\n if result:\n continue_loop = True\n\n self.sync_speed_of_attached_table_sections(table_section)\n\n self.grid.give_power_back_to_modules()\n\n end = datetime.datetime.now()\n delta = end - start\n\n log('* Finished calculating in ' + str(delta.total_seconds() * 1000) + 'ms.')\n \n def check_table_sections(self):\n log ('> Checking table sections for transformers and batteries')\n active_table_sections = []\n low_voltage_table_sections = []\n\n for table_section in self.table_sections:\n for module in table_section.modules:\n if not isinstance(module, TransformerModule) and not module.get_configuration(15):\n continue\n\n # Check if any module can reach the transformer module\n should_be_active = True\n for other_module in table_section.modules:\n if other_module is module:\n continue\n path = self.grid.astar(other_module.position, module.position)\n\n if path is None:\n should_be_active = False\n continue\n \n should_be_active = True\n break\n\n if should_be_active:\n active_table_sections.append(table_section)\n\n break\n \n if table_section not in active_table_sections:\n table_section.disable()\n low_voltage_table_sections.append(table_section)\n\n # Enable attached table sections\n while len(active_table_sections):\n active_table_section = active_table_sections[0]\n active_table_section.enable()\n\n for flow_segment in active_table_section.flows:\n if not isinstance(flow_segment, NeighborFlowSegment):\n continue\n \n affected_flow_segments = self.grid.find_flow_segments_on_position(flow_segment.start_pos) + self.grid.find_flow_segments_on_position(flow_segment.end_pos)\n\n for affected_flow_segment in affected_flow_segments:\n if affected_flow_segment.table_section is flow_segment.table_section:\n continue\n\n log (\"Trying to sync \" + str(affected_flow_segment.table_section.id))\n \n synced = affected_flow_segment.table_section.enable()\n\n if synced:\n active_table_sections.append(affected_flow_segment.table_section)\n log(\"<TS #\" + str(affected_flow_segment.table_section.id) + \"> has been reactivated as active <TS #\" + str(active_table_section.id) + \"> is attached to it\")\n \n active_table_sections = active_table_sections[1:]\n \n # Enable a low voltage table section if there is a battery module placed\n for table_section in low_voltage_table_sections:\n for module in table_section.modules:\n if module.voltage is not Voltages.ADAPTIVE:\n continue\n \n log (\"<TS #\" + str(table_section.id) + \"> has been re-enabled as there is a battery on it.\")\n table_section.enable()\n break\n \n log('* Finished checking table sections.')\n\n\n def sync_speed_of_attached_table_sections(self, table_section):\n for flow in table_section.flows:\n if not isinstance(flow, NeighborFlowSegment):\n continue\n if not flow.enabled:\n continue\n\n affected_flow_segments = self.grid.find_flow_segments_on_position(\n flow.start_pos) + self.grid.find_flow_segments_on_position(flow.end_pos)\n\n for affected_flow_segment in affected_flow_segments:\n if not affected_flow_segment.enabled:\n continue\n if affected_flow_segment.table_section is table_section:\n continue\n\n if affected_flow_segment.table_section.get_speed() < table_section.get_speed():\n affected_flow_segment.table_section.set_speed(\n table_section.get_speed())\n self.sync_speed_of_attached_table_sections(\n affected_flow_segment.table_section)\n\n def sync_attached_table_sections(self, table_section, result):\n for flow in table_section.flows:\n if not isinstance(flow, NeighborFlowSegment):\n continue\n if not flow.enabled:\n continue\n\n affected_flow_segments = self.grid.find_flow_segments_on_position(\n flow.start_pos) + self.grid.find_flow_segments_on_position(flow.end_pos)\n\n for affected_flow_segment in affected_flow_segments:\n if not affected_flow_segment.enabled:\n continue\n if affected_flow_segment.table_section is table_section:\n continue\n\n affected_result = affected_flow_segment.table_section.sync_to_result(\n result)\n log(\"Affecting \" + str(table_section.id) +\n \" to result \" + str(result))\n\n if affected_result:\n self.sync_attached_table_sections(\n affected_flow_segment.table_section, result)\n log(\"Affected result = \" + str(affected_result))\n\n def get_flow_configurations(self):\n connected_table_sections = self.get_table_sections()\n flow_configs = []\n\n # Get config\n for table_section in connected_table_sections:\n flow_config_string = ''.join(table_section.get_flow_bytes())\n flow_configs.append({\n 'destination': table_section.id,\n 'config': flow_config_string\n })\n\n return flow_configs\n"
},
{
"alpha_fraction": 0.6287557482719421,
"alphanum_fraction": 0.6353759765625,
"avg_line_length": 25.395349502563477,
"blob_id": "3a81ebb28b0599c4695ea2284b097af280863b4e",
"content_id": "f9b5672a41e9542e73a96ad2452404aa6c2dc172",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5891,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 215,
"path": "/final_prog/Arduino/boss/boss.ino",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "/*\r\n * Smart Grid TableController handles messages received by Smart Grid MainController\r\n * It displays a simulation of the electrical grid between consumers and producers \r\n * \r\n * The TableController sends messages to the MainController when a module/other table is placed.\r\n * The MainController receives these messages and does the calculation of the simulation. \r\n * When the calculation is done, the MainController sends the grid/flow configuration back and the TableController shows the simulation\r\n * \r\n * For configurations see config.h\r\n * \r\n * Created by: Joris van Zeeland, Jelle Bouwhuis, Kevin Taartmans, Sam Teuwsen, Willem van der Krol, Derk Wiegerinck\r\n * Contact: [email protected]\r\n * Date: July 11th, 2017\r\n * Commisioned by: Dr. Ballard Asare-Bediako, Dhr. Eddy Luursema \r\n * Location: HAN University of Applied Sciences - Arnhem\r\n */\r\n\r\n#include \"config.h\"\r\n\r\n#include <MySensors.h>\r\n\r\n#include \"ledstrip.h\"\r\n#include \"grid.h\"\r\n#include \"protocol.h\"\r\n#include \"rfid.h\"\r\n#include \"synced-millis.h\"\r\n\r\n//present tablesection to maincontroller\r\nvoid presentation() {\r\n present(TABLE_SECTION_ID, S_CUSTOM);\r\n}\r\n\r\nvoid setup()\r\n{\r\n /* start Serials:\r\n * Serial is used to print something in the serial monitor\r\n * Serial2 is used to receive a piece of information from ESP helper\r\n */\r\n Serial.begin(BAUDRATE);\r\n Serial2.begin(BAUDRATE, SERIAL_8N1, RX_PIN, TX_PIN);\r\n \r\n // start SPI for RFID readers\r\n SPI.begin();\r\n\r\n // initialize RFID readers \r\n for (size_t i = 0; i < RFID_COUNT; i++)\r\n {\r\n RFID_init(&RFIDs[i]);\r\n }\r\n\r\n // initialize led strips\r\n ledstrip_setup();\r\n\r\n //Set RST pin on helper high default status\r\n pinMode(HELPER_RST_PIN, OUTPUT);\r\n digitalWrite(HELPER_RST_PIN, LOW);\r\n delay(5);\r\n digitalWrite(HELPER_RST_PIN, HIGH);\r\n\r\n} // End setup()\r\n\r\nvoid loop()\r\n{\r\n // handle an RFID message on this ESP32\r\n for (int i=0; i<RFID_COUNT; i++){\r\n handle_RFID(&RFIDs[i], i);\r\n }\r\n\r\n /* handle an RFID message received by serial:\r\n * Receives the first byte which contain the sensor_id (config.cpp for more information)\r\n * The receives the second byte for the tag_present\r\n * The lasts bytes it's for the tag_id which is a 4-bytes value\r\n */\r\n if (Serial2.available()){\r\n noInterrupts();\r\n RFID_message RFID_msg;\r\n RFID_msg.sensor_id = Serial2.read() + RFID_COUNT;\r\n RFID_msg.tag_present = (bool) Serial2.read();\r\n \r\n if (RFID_msg.tag_present){\r\n for (int i=0; i<4; i++){\r\n RFID_msg.tag_id = (RFID_msg.tag_id >> 8) | (Serial2.read() << 24);\r\n } \r\n }\r\n interrupts();\r\n handle_RFID_message(&RFID_msg);\r\n }\r\n \r\n ledstrip_update();\r\n} // End loop()\r\n\r\n// Handles incoming message from main-controller. This is a built-in function from the MySensors library\r\nvoid receive(const MyMessage &msg)\r\n{\r\n // if TEST_MODULE is placed don't receive messages\r\n if (ledstrip_test(testReady)) {\r\n return;\r\n }\r\n\r\n // Check if it needs a reboot\r\n if (msg.type == REBOOT_BOSS_AND_HELPER_MSG) {\r\n Serial.println(\"Reboot command received from maincontroller\");\r\n reboot_boss_and_helper();\r\n }\r\n\r\n // Check if there is an update for the flow_segment configuration\r\n if (msg.type == FLOW_CONFIG_CHANGE_MSG)\r\n {\r\n Grid grid = grid_from_parsed_grid((const ParsedGrid*) msg.getCustom());\r\n\r\n ledstrip_set_grid(&grid);\r\n\r\n // Force led strip update\r\n ledstrip_update(true);\r\n\r\n#ifdef MY_DEBUG\r\n // If debugging is enabled, this will print the energy grid (grid) info\r\n grid_print(&grid);\r\n#endif\r\n\r\n }\r\n // Check if there is a color change message \r\n else if (msg.type == COLOR_CHANGE_MSG)\r\n {\r\n // convert string to RGB value\r\n char rgbHex[7];\r\n msg.getString(rgbHex);\r\n rgbHex[6] = '\\0';\r\n\r\n uint32_t rgb = strtoul(rgbHex, NULL, 16);\r\n\r\n ledstrip_set_color(msg.sensor, rgb);\r\n\r\n // force led strip update\r\n ledstrip_update(true);\r\n }\r\n // Check if there is a time synchronization message (for cross-table segments flow_segment synchronization)\r\n else if (msg.type == TIME_SYNC_MSG)\r\n {\r\n set_millis(msg.getULong());\r\n }\r\n\r\n\r\n} // End receive()\r\n\r\n// Handles RFID tag detection from boss\r\nvoid handle_RFID(RFID *RFID, uint8_t sensor_id)\r\n{\r\n // Jump out of the function if the RFID state has not changed\r\n if (!RFID_state_changed(RFID))\r\n {\r\n return;\r\n }\r\n\r\n // create RFID message\r\n RFID_message msg = {\r\n .sensor_id = sensor_id,\r\n .tag_present = RFID->tag_present,\r\n };\r\n\r\n // read tag ID if a tag is present\r\n if (msg.tag_present)\r\n {\r\n bool tag_id_read = RFID_start_auth(RFID) &&\r\n RFID_tag_read_id(RFID, &msg);\r\n\r\n RFID_stop_auth(RFID);\r\n \r\n // Jump out of the function if the tag can't be read properly\r\n if (!tag_id_read)\r\n {\r\n return;\r\n }\r\n }\r\n handle_RFID_message(&msg);\r\n} // End handle_RFID()\r\n\r\n// Processes the incoming RFID message\r\nvoid handle_RFID_message(const RFID_message *msg)\r\n{\r\n #ifdef MY_DEBUG\r\n // If debugging is enabled, this prints the RFID message\r\n RFID_message_print(msg);\r\n #endif\r\n\r\n // report change to main controller\r\n const SensorInfo *sensor = &sensor_info[msg->sensor_id];\r\n\r\n if (sensor->type == MODULE_SENSOR)\r\n {\r\n uint32_t module_id = msg->tag_present ? msg->tag_id : 0;\r\n\r\n change_module(sensor->location, module_id);\r\n }\r\n else // If the sensor type is not a module sensor, it must be a table detection sensor\r\n {\r\n uint32_t table_section_id = msg->tag_present ? msg->tag_id : 0;\r\n\r\n change_neighbor(sensor->location, table_section_id);\r\n }\r\n\r\n // if TEST_MODULE is placed execute ledstrip_test\r\n if (msg->tag_id == TEST_MODULE_ID && msg->tag_present == true)\r\n {\r\n Serial.println(\"Test module is placed\");\r\n testReady = false;\r\n testReady = ledstrip_test(testReady);\r\n }\r\n} // End handle_RFID_message()\r\n\r\nvoid reboot_boss_and_helper() {\r\n Serial.println(\"rebooting system\");\r\n ESP.restart();\r\n //wait for it to reboot\r\n} \r\n"
},
{
"alpha_fraction": 0.7128801345825195,
"alphanum_fraction": 0.7128801345825195,
"avg_line_length": 29.05555534362793,
"blob_id": "10885dd8a0ed7bf8db2233eacb81f09ab9e41e4f",
"content_id": "d90ed7a8560b18ee47cf12ad703d4e6fea800aa9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1118,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 36,
"path": "/final_prog/Arduino/boss/flow-segment.cpp",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#include <Arduino.h>\r\n\r\n#include \"flow-segment.h\"\r\n\r\n//sets flow segment properties with parsed flow segment properties that are received in FLOW_CONFIG_CHANGE_MSG from maincontroller\r\nFlowSegment flow_segment_from_parsed_flow_segment(const ParsedFlowSegment *parsed_flow_segment)\r\n{\r\n\tFlowSegment flow_segment;\r\n\r\n\tflow_segment.state = (FlowSegmentState) parsed_flow_segment->state;\r\n\tflow_segment.load = (Load) parsed_flow_segment->load;\r\n\tflow_segment.speed = parsed_flow_segment->speed;\r\n\tflow_segment.direction = parsed_flow_segment->direction;\r\n\r\n\treturn flow_segment;\r\n}\r\n\r\n//print the state, load, direction(direction or normal), \r\nvoid flow_segment_print(const FlowSegment *flow_segment)\r\n{\r\n\tSerial.print(\"State: \");\r\n\tSerial.print(flow_segment_state_to_string(flow_segment->state));\r\n\tSerial.print(\", \");\r\n\r\n\tSerial.print(\"Load: \");\r\n\tSerial.print(load_to_string(flow_segment->load));\r\n\tSerial.print(\", \");\r\n\r\n\tSerial.print(\"Reversed: \");\r\n\tSerial.print(flow_segment->direction ? \"yes\" : \"no\");\r\n\tSerial.print(\", \");\r\n\r\n\tSerial.print(\"Speed: \");\r\n\tSerial.print(flow_segment->speed);\r\n\tSerial.println();\r\n}\r\n"
},
{
"alpha_fraction": 0.5875937342643738,
"alphanum_fraction": 0.6196318864822388,
"avg_line_length": 21.661291122436523,
"blob_id": "09caa9353b4478046203fde4a77d437c94bbd279",
"content_id": "948e4b66ef72a9430ceb2f51d7a1f6fdbcbabd9a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1467,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 62,
"path": "/3_MySensors/3_MySensors.ino",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#define MY_DEBUG\r\n#define MY_NODE_ID 3\r\n//#define CONFIG_AUTOSTART_ARDUINO 1\r\n/**********************************************\r\n* Ethernet Gateway Transport Defaults */\r\n#define MY_GATEWAY_MQTT_CLIENT\r\n#define MY_GATEWAY_ESP32\r\n\r\n/** Configuration of WiFi */\r\n#define MY_WIFI_SSID \"Test\"\r\n#define MY_WIFI_PASSWORD \"qwerty1234\"\r\n#define MY_HOSTNAME \"TestRight\"\r\n\r\n/** MQTT Configuration **/\r\n#define MY_MQTT_PUBLISH_TOPIC_PREFIX \"sendToPc\"\r\n#define MY_MQTT_SUBSCRIBE_TOPIC_PREFIX \"getFromPc\"\r\n#define MY_MQTT_CLIENT_ID \"Boss2\"\r\n#define MY_CONTROLLER_IP_ADDRESS 192, 168, 56, 101\r\n#define MY_PORT 1883\r\n/**********************************************/\r\n\r\n//#define FASTLED_INTERNAL\r\n//#include <FastLED.h>\r\n//#include <ESP32Wifi.h>\r\n#include <MySensors.h>\r\n//#define MY_HOSTNAME \"Boss2\"\r\n\r\n#define MY_BAUD_RATE 115200\r\n#define OPEN 1\r\n#define CLOSE 0\r\n#define CHILD_ID 1\r\n\r\nMyMessage msg;\r\n\r\nuint8_t value = OPEN;\r\n\r\nvoid presentation() {\r\n present(CHILD_ID, S_DOOR);\r\n}\r\n\r\nvoid setup(){\r\n Serial.begin(115200);\r\n /*Serial.println(\"Started clearing. Please wait...\");\r\n for (uint16_t i=0; i<EEPROM_LOCAL_CONFIG_ADDRESS; i++) {\r\n hwWriteConfig(i,0xFF);\r\n }\r\n Serial.println(\"Clearing done.\");*/\r\n msg.setType(V_TRIPPED);\r\n msg.setSensor(CHILD_ID);\r\n}\r\n\r\nvoid loop() {\r\n /*value = value == OPEN ? CLOSE : OPEN;\r\n send(msg.set(value));\r\n Serial.println(\"\");\r\n delay(1000);\r\n*/}\r\n\r\nvoid receive(const MyMessage &msg)\r\n{\r\n Serial.println(\"Smgth\");\r\n}\r\n"
},
{
"alpha_fraction": 0.7032424807548523,
"alphanum_fraction": 0.7169303297996521,
"avg_line_length": 59.625850677490234,
"blob_id": "9136e0060e6f369ee2df9aa0e502b9456b9c9d64",
"content_id": "d31be4c6ea9dc9d625d4b72e5dd2254747c5823a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8945,
"license_type": "no_license",
"max_line_length": 774,
"num_lines": 147,
"path": "/Software_currently_implement/README.md",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "# Installation\nMake sure you have pip installed. Run `pip2 install -r requirements.txt` from the root of the project to install the modules that are required by Python. The requirements.txt file contains all the modules that the software needs.\n\nTo run the application just execute `sudo python2 smart_grid_app.py` from the root of the project. Sudo is needed to access `/dev/ttyMySensorsGateway`, and should probably be fixed in the future.\n\n# REST API\nThe application contains a REST API so it can be controlled by external applications. It's documented in the file [API.md](API.md).\n\n# Grid\nThe way the flows are generated is by using a grid. This is a total rewrite of the old system of the smart grid table. Basic keywords are graph theory, vertice, nodes and A-Star pathfinding.\n\n## Basic theory\nA node is a point on the table which is represented by a X-coordinate and Y-coordinate, in that order. Starting from the top left, represented by `(0, 0)`, node `(4, 3)` is a point on the table which is 4 to the right and 3 down from the top left.\nIn Python a node is represented as the object 'tuple', which can be compared to an array with 2 elements in it, the only difference is that a tuple is immutable. \n\n\n## Table sections\n### Layout\n| Symbol | Meaning\n| ----------- | -------\n| `x` | A node where a module can be placed\n| `—` and `|` | A single flow segment\n| `=` | Two flow segments that represent the same flow\n\n#### Table section 1\n```\n |\n x — — x\n | |\nx — — x\n | |\n x — — x\n |\n```\n\n#### Table section 2\n```\nx —\n |\n |\nx — = = x\n |\n |\nx —\n```\n\n### Modules\n| Position ID | Point\n| ----------- | ----------\n| 0 | `[0, 2]`\n| 1 | `[1, 1]`\n| 2 | `[3, 1]`\n| 3 | `[4, 2]`\n| 4 | `[3, 3]`\n| 5 | `[1, 3]`\n\n#### Table section 1\n```\n |\n [1,1]> x — — x <[3,1]\n | |\n[0,2]> x — — x <[4,2]\n | |\n [1,3]> x — — x <[3,3]\n |\n```\n\n\n## How to register flow segments to the grid?\nLets say you want to have a new table section which looks like the following:\n\nEach flow segment internally has an id. If there are 10 LED-strips registered on the Arduino, then the ids are 0 through 9. These flow segments should be registered in order.\n\nA normal flow segment should be registered with an instance of `FlowSegment`. A flow segment that is connected to another flow segment that is NOT on the same table section should be registered with `NeighborFlowSegment`. This differentiation is because the Python-code checks the instance type to know if a flow segment is attached to a flow segment of another table section.\n\nThe flow segments should be registered in the file table_section.py. Here is an example of table type 1:\n\n```python\nif table_type == 1:\n self.flows = [\n FlowSegment((0, 2), (1, 2)),\n FlowSegment((1, 2), (1, 1)),\n FlowSegment((1, 1), (2, 1)),\n NeighborFlowSegment((2, 1), (2, 0)),\n FlowSegment((2, 1), (3, 1)),\n FlowSegment((3, 1), (3, 2)),\n FlowSegment((3, 2), (4, 2)),\n FlowSegment((3, 2), (3, 3)),\n FlowSegment((3, 3), (2, 3)),\n NeighborFlowSegment((2, 3), (2, 4)),\n FlowSegment((2, 3), (1, 3)),\n FlowSegment((1, 3), (1, 2))\n ]\n```\n\nThe locations of where a module is placed on the grid should be set in [settings.py](settings.py), in the variable `TABLE_PART`. Please notice that these positions SHOULD NOT take the position of the table section into account. Read paragraph [How to change the position of a table section?](#how-to-change-the-position-of-a-table-section) to see how to change the starting position of a table section.\n\n## how to improve neighbouring tables\nCurrently the neighboring tables endpoint gets the neighbouring tables from the tableConfig file where it is hardcoded. This can be improved by creating a function that dynamically gives back the neighbours.\n\n## How to change the pathfinding algorithm?\nThere are two branches for two different pathfinding algorithms. These three are:\n- grid-master\n- grid-match\n\nTo switch to an other branch, use the following command in terminal:\n```shell\ngit checkout [BRANCH]\n```\n\nFor example:\n```shell\ngit checkout grid-match\n```\n\nThe grid master makes consuming modules search for the nearest producing modules. The other one is grid-match, which will traverse modules to see if the nearest relevant module of the nearest relevant module of that module is equal to that module. In simpler words, lets take the following example:\n\n- Module A\n- Module B\n- Module C\n\nThe distance between A-B is 2, the distance between A-C is 3 and the distance between B-C is 1. The matching asks module A: what is the closest module that is relevant to you? It will answer with B, as 2 (A-B) < 3 (A-C). Then it will ask the same question to B, however B answers with C, as 1 (B-C) < 2 (A-B). This will make the traversion continue, as it will ask C what the closest relevant module is. It will answer with B, because 1 (B-C) < 3 (A-C). As B answers that the closest module is C, and C answers that the closest module is B, they have matched together and will have energy flowing. This loop will continue until there are no matches to be made anymore, as there is no power to deliver anymore, or the modules that are left have no way to reach other modules.\n\n## How to change the position of a table section?\nTo change the position of a table section, you have to go to the [config/tableConfig.json](config/tableConfig.json) file. Each table section is added to this configuration file. To change the position, you have to set the `startingPosition` of the table section you want to change. This will make the starting point of the table section different which will also adjust the position of the flow segments. For example, lets say you have a diagonal flow segment going from `(1, 1)` to `(2, 2)`. If your table section starts at `(0, 0)`, then the flow segment will start at `(1, 1)`. When you change the table section to start at `(5, 5)`, the flow segment will start at `(6, 6)` and go to `(7, 7)`.\n\n# The software on the Raspberry Pi\nCurrently this software is being ran on a Raspberry Pi. The Raspberry Pi which this software is installed runs on Raspbian and contains the Git repository under folder `~/smartgrid/Python`. The Raspberry Pi is using the shell `fish`, which means that whenever you go to the project directory in the terminal, you will see, next to the current directory, a branch icon. If you are on the master branch, you would only see a branch icon, otherwise you would also see the name of the branch. If the local repository is equal to the remote repository, the background where the branch is shown is green. For more information, read ['The Prompt' of theme 'bobthefish'](https://github.com/oh-my-fish/theme-bobthefish/blob/master/README.md#the-prompt).\n\n# Future\nTo improve performance in the future, there are a few different options, listed below:\n\n## Use multiprocessing to use all processing power\nCurrently a very small bit of multiprocessing is implemented but this could be inproved.\nThe current script does not use threads for the main pathfinding, as the pathfinding can not easily be done in multiple threads. It can not easily be done in multiple threads for the following reason:\n- The pathfinding is dependant on the direction of the flow segments.\n- The reduction and increment of power of modules is harder with multiple threads as you have to take account which thread gets priority.\n\n## Move the Python-software from the Raspberry Pi to a more powerful device\nCalculating the grid currently uses 100 percent of a single processor of the Raspberry Pi, meaning the processor is using its full strength. Other hardware could be more capable of running the Python-software.\n\n### How can this be done?\nThe Python-software is MOSTLY cross-platform. The only thing that is not cross-platform is the reading of the serial connection. The library that is being used for that is [MySensors](https://www.mysensors.org/). The Python-software is reading the serial connection through psuedo-terminal `/dev/ttyMySensorsGateway`. To move the Python-software to a different device, one should probably do the following:\n- Check if there is compability between the device you want to run the Python-software on and the MySensors library.\n- Potentially rewrite the receiving of data as `/dev/ttyMySensorsGateway` might be something of the deprecated [MySensors Raspberry Port](https://github.com/mysensors/Raspberry).\n\nAnother solution is to write a small Python-script that is being run on the Raspberry Pi that reads `/dev/ttyMySensorsGateway`, and sends that data over to another device that do all other things. That other device should then send over the serial information to the Raspberry Pi, which then uses MySensors to send it over to the Arduino.\n\n"
},
{
"alpha_fraction": 0.5652827620506287,
"alphanum_fraction": 0.5808754563331604,
"avg_line_length": 33.56493377685547,
"blob_id": "fedc77c9043f4e204a59056436a9402af3081d68",
"content_id": "4c3b735b75c6f714990733b1c61f1ff1fe1a0a59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5323,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 154,
"path": "/final_prog/PC/mysensors_gateway_connector.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "import time\n\nimport paho.mqtt.client as mqtt\n\nfrom logger import log\nfrom smart_grid_messaging import *\n\n\n# Mysensors protocol types\n# ref https://www.mysensors.org/download/serial_api_20\nclass MySenTypes:\n S_CUSTOM = 23\n V_VAR1 = 24\n V_VAR2 = 25\n V_VAR3 = 26\n V_VAR4 = 27\n V_VAR5 = 28\n V_RGB = 40\n\n @staticmethod\n def types():\n return vars(MySenTypes).values()\n\n @staticmethod\n def my_sen_type_to_smart_grid_type(mysen):\n d = {\n MySenTypes.S_CUSTOM: MessageTypes.TABLE_CONNECTED,\n MySenTypes.V_VAR2: MessageTypes.MODULE_PLACED,\n MySenTypes.V_VAR3: MessageTypes.NEIGHBOR_CHANGED\n }\n return d.get(mysen, -1)\n\n\nclass MySenCommands:\n PRESENT, SET, REQ, INTERNAL, STREAM = range(5)\n\n @staticmethod\n def types():\n return vars(MySenCommands).values()\n\n\nclass MySenMessage(object):\n\n def __init__(self, node_id, child_id, command, ack, type, payload):\n self.node_id = node_id\n self.child_id = child_id\n self.command = command\n self.ack = ack\n self.type = type\n self.payload = int(payload) if payload.isdigit() else payload\n\n def __repr__(self):\n return 'command {0}, type {1}, payload {2}'.format(self.command, self.type, self.payload)\n\n\nclass GatewayConnector(object):\n\n def __init__(self, message_func, mqtt_broker_ip, mqtt_broker_port):\n self.message_func = message_func\n self.mqtt_ip = mqtt_broker_ip\n self.mqtt_port = mqtt_broker_port\n self.mqtt_topic_subscribe = 'sendToPc/#' # '/#' to subscribe on all devices connected\n self.mqtt_topic_publish = 'getFromPc' #To enable received by devices connected\n self.create_mqtt_client() #Creating mqtt_client\n \n def create_mqtt_client(self):\n #Configure the client mqtt\n self.mqtt_client = mqtt.Client()\n self.mqtt_client.on_connect = self.on_connect\n self.mqtt_client.on_message = self.handle_incoming_message\n self.mqtt_client.connect(self.mqtt_ip, self.mqtt_port)\n self.mqtt_client.loop_start()\n \n def on_connect(self, client, userdata, flags, rc):\n # rc is the error code returned when connecting to the broker\n\n log('Connected on MQTT broker!'+str(rc))\n client.subscribe(self.mqtt_topic_subscribe)\n \n def send_serial_message(self, table_section_id, payload, command, type, child_id=0):\n ''' This function check the destination of the message:\n - If the message is not send through broadcast (255), the message is send\n - Else the message is send to each table_id between 1 and 254\n '''\n if table_section_id is not 255:\n topic = '{0}/{1}/0/{2}/{3}/0/{4}'.format(\n self.mqtt_topic_publish, table_section_id, child_id, command, type)\n log(\"\\nSend message\\nTopic: \" + topic + \"\\nMessage: \" + str(payload) + \"\\n\\n\")\n self.mqtt_client.publish(topic, str(payload))\n else:\n log(\"Send broadcast:\")\n log(\"Topic: \" + '{0}/255/0/{1}/{2}/0/{3}'.format(self.mqtt_topic_publish, child_id, command, type) + \" and Message: \" + str(payload) + \"\\n\")\n for table_id in range(1,255): \n topic = '{0}/{1}/0/{2}/{3}/0/{4}'.format(\n self.mqtt_topic_publish, table_id, child_id, command, type)\n self.mqtt_client.publish(topic, str(payload)) \n\n def handle_incoming_message(self, client, userdata, msg):\n message = self.validate_data(msg)\n if message:\n if message.node_id is 0:\n log('from gateway: ', message)\n elif message.command in MySenCommands.types() and message.type in MySenTypes.types():\n s_message = SmartMessage(\n MySenTypes.my_sen_type_to_smart_grid_type(message.type),\n (message.node_id, message.child_id, message.payload)\n )\n self.message_func(s_message)\n else:\n log('Unknown message received ' + data_line)\n else:\n log('Unknown message received \"' + data_line + '\"')\n \n def start_serial_read(self):\n self.mqtt_client.loop_forever()\n\n @staticmethod\n def validate_data(msg):\n log(\"\\n\\nTopic: \" + msg.topic + \"\\n Message: \" + msg.payload)\n data_array = str(msg.topic).split('/')[1:]\n del(data_array[1])\n data_array.append(str(msg.payload))\n\n # Check if data contains 6 elements and ends with \\n\n if len(data_array) is not 6:\n return None\n\n # Check if each data is a digit except last (which is the payload)\n for data in data_array[:5]:\n if not data.isdigit():\n return None\n\n # Return message object\n return MySenMessage(\n int(data_array[0]),\n int(data_array[1]),\n int(data_array[2]),\n int(data_array[3]),\n int(data_array[4]),\n data_array[5].rstrip() # strip newline\n )\n\n def start_serial_read(self):\n while 1:\n continue\n\n\nif __name__ == \"__main__\":\n\n def msg_func(msg):\n log(msg)\n\n gw_conn = GatewayConnector(msg_func, 'localhost', 1883)\n gw_conn.start_serial_read()\n"
},
{
"alpha_fraction": 0.6059850454330444,
"alphanum_fraction": 0.665835440158844,
"avg_line_length": 17.095237731933594,
"blob_id": "ab40af17f3ad043b0616111d18dac434f11ded51",
"content_id": "abdf008f28e19f3c047504ba14941d0a74b73bdf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 401,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 21,
"path": "/final_prog/Arduino/helper/config.h",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#include \"RFID.h\"\r\n\r\n/* Serial Pins */\r\n#define BAUDRATE 115200\r\n#define TX_PIN 17\r\n#define RX_PIN 16\r\n\r\n/* Shared RFID reset pin */\r\n#define RFID_RST_PIN A0\r\n\r\n// RFID data pins \r\n#define RFID0_SDA_PIN 13\r\n#define RFID1_SDA_PIN 12\r\n#define RFID2_SDA_PIN 27\r\n#define RFID3_SDA_PIN 33\r\n\r\n/* Number of RFIDs on this Arduino */\r\n#define RFID_COUNT 4\r\n\r\n/* RFIDs on this Arduino */\r\nextern RFID RFIDs[];\r\n"
},
{
"alpha_fraction": 0.6415094137191772,
"alphanum_fraction": 0.6792452931404114,
"avg_line_length": 25.5,
"blob_id": "a1f60b0a37067624fe4ac78267d9f90024d042b8",
"content_id": "2974eb99c34a5c97e32a46ad2c2219dda8626332",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 477,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 18,
"path": "/final_prog/PC/test_scripts/test_helper_functions.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "import sys\n\nfrom settings import Voltages, Load, get_load, Speed, get_speed\n\nsys.path.append(\"../\")\n\n\ndef test_get_load():\n assert get_load(Voltages.LOW, 621) == Load.CRITICAL\n assert get_load(Voltages.MEDIUM, 489) == Load.HIGH\n assert get_load(Voltages.HIGH, 2) == Load.NORMAL\n\n\ndef test_get_speed():\n assert get_speed(50) == Speed.NORMAL\n assert get_speed(200) == Speed.FAST\n assert get_speed(300) == Speed.FASTER\n assert get_speed(400) == Speed.FASTEST\n"
},
{
"alpha_fraction": 0.5680205225944519,
"alphanum_fraction": 0.5830047130584717,
"avg_line_length": 24.010255813598633,
"blob_id": "3167cbb41821981b2ee49b51045789b8622ffd2b",
"content_id": "da993d2cdd86340fd95f45cfb2b11eb1a46ff3bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5072,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 195,
"path": "/final_prog/Arduino/boss/ledstrip.cpp",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#define FASTLED_INTERNAL\r\n#include <FastLED.h>\r\n\r\n#include \"config.h\"\r\n#include \"ledstrip.h\"\r\n#include \"synced-millis.h\"\r\n\r\nLedstripSegment segments[FLOW_SEGMENT_COUNT];\r\nCRGB leds[LED_COUNT];\r\nVoltage voltage;\r\nbool testReady = true;\r\n\r\n//initialize FastLed library and ledstrip \r\nvoid ledstrip_setup()\r\n{ \r\n#ifndef LEDSTRIP_TYPE_WITHOUT_CLOCK \r\n FastLED.addLeds<LEDSTRIP_TYPE, LEDSTRIP_DATA_PIN, LEDSTRIP_CLK_PIN, BGR>(leds, LED_COUNT);\r\n#else\r\n FastLED.addLeds<LEDSTRIP_TYPE_WITHOUT_CLOCK, LEDSTRIP_DATA_PIN, GRB>(leds, LED_COUNT);\r\n#endif\r\n\r\n //initialize leds on error(red) color, so that if maincontroller hasn't send flow config message yet, you'll see red led's\r\n for (uint8_t i = 0; i < LED_COUNT; i++) {\r\n leds[i] = error_color;\r\n }\r\n\r\n FastLED.setBrightness(LED_BRIGHTNESS);\r\n FastLED.show();\r\n}\r\n\r\n//update ledstrips according to received flow message\r\nvoid ledstrip_update(bool force)\r\n{\r\n bool update = false;\r\n\r\n for (uint8_t i = 0; i < FLOW_SEGMENT_COUNT; i++) {\r\n uint8_t first_led = i * FLOW_SEGMENT_LENGTH;\r\n uint8_t position = ledstrip_calculate_position(&segments[i]);\r\n\r\n if (!force && position == segments[i].position) {\r\n continue;\r\n }\r\n\r\n segments[i].position = position;\r\n\r\n if (segments[i].state == FLOW_SEGMENT_STATE_OFF) {\r\n for (uint8_t led = 0; led < FLOW_SEGMENT_LENGTH; led++) {\r\n leds[first_led + led] = off_color;\r\n }\r\n } else if (segments[i].state == FLOW_SEGMENT_STATE_ERROR) {\r\n ledstrip_show_error(&segments[i], first_led);\r\n } else {\r\n ledstrip_show_flow_segment(&segments[i], first_led);\r\n }\r\n\r\n update = true;\r\n }\r\n\r\n if (update) {\r\n //update all ledstrips\r\n FastLED.show();\r\n }\r\n}\r\n\r\nuint8_t ledstrip_calculate_position(LedstripSegment *segment)\r\n{\r\n uint32_t now = synced_millis();\r\n\r\n if (segment->state == FLOW_SEGMENT_STATE_OFF) {\r\n return 0;\r\n } else if (segment->state == FLOW_SEGMENT_STATE_ERROR) {\r\n return (now / 200) % 2;\r\n } else if (segment->state == FLOW_SEGMENT_STATE_PASSIVE) {\r\n return 0;\r\n } else if (segment->state == FLOW_SEGMENT_STATE_ACTIVE) {\r\n uint32_t delay = 150 * (4 - segment->speed);\r\n uint8_t position = (now / delay) % FLOW_SEGMENT_LENGTH;\r\n\r\n return segment->direction ? FLOW_SEGMENT_LENGTH - 1 - position : position;\r\n }\r\n}\r\n\r\nvoid ledstrip_show_flow_segment(LedstripSegment *segment, uint8_t first_led)\r\n{\r\n // apply voltage color\r\n for (uint8_t i = 0; i < FLOW_SEGMENT_LENGTH; i++) {\r\n leds[first_led + i] = voltage_colors[voltage];\r\n }\r\n\r\n // apply load color if flow_segment is active\r\n if (segment->state == FLOW_SEGMENT_STATE_ACTIVE) {\r\n leds[first_led + segment->position] = load_colors[segment->load];\r\n }\r\n}\r\n\r\nvoid ledstrip_show_error(LedstripSegment *segment, uint8_t first_led)\r\n{\r\n CRGB color = segment->position == 0 ? off_color : error_color;\r\n\r\n for (uint8_t i = 0; i < FLOW_SEGMENT_LENGTH; ++i) {\r\n leds[first_led + i] = color;\r\n }\r\n}\r\n\r\nvoid ledstrip_set_grid(const Grid *grid)\r\n{\r\n voltage = grid->voltage;\r\n for (uint8_t i = 0; i < FLOW_SEGMENT_COUNT; i++) {\r\n const FlowSegment *flow_segment = &grid->flow_segments[i];\r\n\r\n segments[i].state = flow_segment->state;\r\n segments[i].load = flow_segment->load;\r\n segments[i].direction = flow_segment->direction;\r\n segments[i].speed = flow_segment->speed;\r\n }\r\n}\r\n\r\nvoid ledstrip_set_color(uint8_t id, uint32_t rgb)\r\n{\r\n CRGB color = CRGB(rgb);\r\n switch (id) {\r\n case 0:\r\n voltage_colors[VOLTAGE_LOW] = color;\r\n break;\r\n case 1:\r\n voltage_colors[VOLTAGE_MEDIUM] = color;\r\n break;\r\n case 2:\r\n voltage_colors[VOLTAGE_HIGH] = color;\r\n break;\r\n case 3:\r\n load_colors[LOAD_NORMAL] = color;\r\n break;\r\n case 4:\r\n load_colors[LOAD_HIGH] = color;\r\n break;\r\n case 5:\r\n load_colors[LOAD_CRITICAL] = color;\r\n break;\r\n }\r\n}\r\n\r\nbool ledstrip_test(bool testReady) {\r\n if (testReady) {\r\n return false;\r\n } else {\r\n //COLOR TEST\r\n for (uint8_t i = 0; i < LED_COUNT; i++)\r\n {\r\n leds[i] = CRGB::Red;\r\n }\r\n FastLED.show();\r\n delay(1000);\r\n for (uint8_t i = 0; i < LED_COUNT; i++)\r\n {\r\n leds[i] = CRGB::Green;\r\n }\r\n FastLED.show();\r\n delay(1000);\r\n for (uint8_t i = 0; i < LED_COUNT; i++)\r\n {\r\n leds[i] = CRGB::Blue;\r\n }\r\n FastLED.show();\r\n delay(1000);\r\n // FLOW_SEGMENT SEGMENT TEST\r\n for (uint8_t i = 0; i < LED_COUNT; i += FLOW_SEGMENT_LENGTH)\r\n {\r\n for (uint8_t j = 0; j < FLOW_SEGMENT_LENGTH; ++j)\r\n {\r\n leds[i + j] = CRGB::White;\r\n }\r\n FastLED.show();\r\n delay(200);\r\n for (uint8_t j = 0; j < FLOW_SEGMENT_LENGTH; ++j)\r\n {\r\n leds[i + j] = CRGB::Black;\r\n }\r\n FastLED.show();\r\n }\r\n // LED BY LED TEST\r\n for (uint8_t i = 0; i < LED_COUNT; ++i)\r\n {\r\n leds[i] = CRGB::White;\r\n FastLED.show();\r\n delay(50);\r\n }\r\n for (uint8_t i = 0; i < LED_COUNT; ++i)\r\n {\r\n leds[i] = CRGB::Black;\r\n }\r\n Serial.println(\"einde test\");\r\n return true;\r\n }\r\n}\r\n"
},
{
"alpha_fraction": 0.5450819730758667,
"alphanum_fraction": 0.5737704634666443,
"avg_line_length": 19.16666603088379,
"blob_id": "5bce335298b63837e91445f99e69af05d1dd68ab",
"content_id": "3a98d1a8c42457036354164e77f925578c7e64ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 488,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 24,
"path": "/2_RFID/array.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "\nfrom module import *\nfrom settings import *\nfrom First import *\n\na = load_module_info()\nprint a\n\n#have a list with name of module and ID not in the same column\nfor i in range(len(a)):\n a[i] = str(a[i])\n a[i] = a[i][:-1]\n a[i] = a[i].split(\"(\")\n\n\nID_tag = '23099302756'\nex_tag = False\n\nfor i in a:\n if i[1] == ID_tag:\n ex_tag = True\n print ('The ID of this tag is ' + ID_tag + ' and this is' + i[0])\n\nif ex_tag == False:\n print ('This ID does not exist yet')\n\n\n\n"
},
{
"alpha_fraction": 0.7463414669036865,
"alphanum_fraction": 0.7658536434173584,
"avg_line_length": 36.272727966308594,
"blob_id": "96f6fc2d248c48b9c349cff5044893fe44509fee",
"content_id": "0349c3e105c8e99e9282dd583c5ead03814fb949",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 410,
"license_type": "no_license",
"max_line_length": 255,
"num_lines": 11,
"path": "/final_prog/Makefile.inc",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "SOC=unknown\nCPPFLAGS= -DMY_RADIO_RF24 -DMY_GATEWAY_LINUX -DMY_GATEWAY_MQTT_CLIENT -DMY_DEBUG -DLINUX_SPI_SPIDEV -DMY_MQTT_CLIENT_ID=\\\"PC\\\" -DMY_MQTT_SUBSCRIBE_TOPIC_PREFIX=\\\"sendToPc\\\" -DMY_MQTT_PUBLISH_TOPIC_PREFIX=\\\"getFromPc\\\" -DMY_CONTROLLER_IP_ADDRESS=127,0,0,1 \nLDFLAGS=-pthread \nPREFIX=/usr/local\nCC=gcc\nCXX=g++\nBUILDDIR=build\nBINDIR=bin\nGATEWAY_DIR=/usr/local/bin\nINIT_SYSTEM=systemd\nSPI_DRIVER=SPIDEV\n"
},
{
"alpha_fraction": 0.5611510872840881,
"alphanum_fraction": 0.5755395889282227,
"avg_line_length": 12.899999618530273,
"blob_id": "4faa7c7634506d73b4af925d1a73a6011f5594bc",
"content_id": "91f1b24de44732af4d4e21d90cb4aa72c4863c5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 139,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 10,
"path": "/Software_currently_implement/.coveragerc",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "[run]\nbranch = True\n\nomit =\n */python2.7/*\n */test_scripts/*\n gui.py\n flask_api.py\n smart_grid_app.py\n daemon_threads.py\n"
},
{
"alpha_fraction": 0.5452865362167358,
"alphanum_fraction": 0.5594577789306641,
"avg_line_length": 18.80769157409668,
"blob_id": "609dc54f0e96ea4be06cdeed2e153a8577a21116",
"content_id": "38de9dfb38a1fc210d9f956ed73521638d1125da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1623,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 78,
"path": "/final_prog/Arduino/helper/helper.ino",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#include <SPI.h>\r\n\r\n#include \"config.h\"\r\n\r\nvoid setup()\r\n{\r\n\t/* start SPI for RFID readers */\r\n\tSPI.begin();\r\n\r\n\t/* initialize RFID readers */\r\n\tfor (size_t i = 0; i < RFID_COUNT; i++) {\r\n\t\tRFID_init(&RFIDs[i]);\r\n\t}\r\n \r\n /* start Serial communications (USB then ESP32 boss */\r\n Serial.begin(BAUDRATE);\r\n Serial2.begin(BAUDRATE, SERIAL_8N1, RX_PIN, TX_PIN);\r\n\tSerial.println(\"setup() finished\");\r\n}\r\n\r\nvoid loop()\r\n{\r\n\tfor (size_t i = 0; i < RFID_COUNT; i++) {\r\n\t\thandle_RFID(&RFIDs[i], i);\r\n\t}\r\n}\r\n\r\nvoid handle_RFID(RFID *RFID, uint8_t sensor_id)\r\n{\r\n\t/* skip RFID if state has not changed */\r\n\tif (!RFID_state_changed(RFID)) {\r\n\t\treturn;\r\n\t}\r\n\r\n\t/* create RFID message */\r\n\tRFID_message msg = {\r\n\t\t.sensor_id = sensor_id,\r\n\t\t.tag_present = RFID->tag_present,\r\n\t};\r\n\r\n\t/* read tag ID if tag is present */\r\n\tif (msg.tag_present) {\r\n\t\tbool tag_id_read = RFID_start_auth(RFID) &&\r\n\t\t RFID_tag_read_id(RFID, &msg);\r\n\r\n\t\tRFID_stop_auth(RFID);\r\n\r\n\t\t/* ignore RFID if tag ID could not be read */\r\n\t\tif (!tag_id_read) {\r\n\t\t\treturn;\r\n\t\t}\r\n\t}\r\n\r\n\thandle_RFID_message(&msg);\r\n}\r\n\r\nvoid handle_RFID_message(const RFID_message *msg)\r\n{\r\n\t/* print RFID message */\r\n\tRFID_message_print(msg);\r\n \r\n\t/* send RFID message to serial */\r\n if (msg->tag_present) {\r\n byte buf[6];\r\n buf[0] = msg->sensor_id;\r\n buf[1] = (byte) msg->tag_present;\r\n for (int i=0; i<4; i++){\r\n buf[i+2] = (msg->tag_id >> i*8) & 255;\r\n }\r\n Serial2.write(buf, sizeof(buf));\r\n }\r\n else {\r\n byte buf[2];\r\n buf[0] = msg->sensor_id;\r\n buf[1] = (byte) msg->tag_present;\r\n Serial2.write(buf, sizeof(buf));\r\n }\r\n}\r\n"
},
{
"alpha_fraction": 0.801980197429657,
"alphanum_fraction": 0.8217821717262268,
"avg_line_length": 16,
"blob_id": "0378666f887766dbce45ab95b21c892600aa703a",
"content_id": "8f26acb69ebfc6c991a8bcb771e84409732b8011",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 101,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 6,
"path": "/Software_currently_implement/requirements.txt",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "flask\npyserial\nnumpy\nrequests\ngit+https://github.com/jrialland/python-astar.git\nwebsocket-server==0.4"
},
{
"alpha_fraction": 0.44530245661735535,
"alphanum_fraction": 0.4835447669029236,
"avg_line_length": 11.305429458618164,
"blob_id": "0ae571f83bf8a427f3bf3a54ad1e24648e6e6327",
"content_id": "52cf5672d8f5c2290055ae7beebc8e09bb1506e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5439,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 442,
"path": "/Software_currently_implement/API.md",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "# REST endpoints\n\nAll requests should append a prefix of `/api`. Meaning `/tablesections` would instead be `/api/tablesections`.\n\n## Get all table sections\n\n```\nurl:\t/tablesections/\nmethod:\tGET\n```\n\nExample response:\n\n```json\n{\n\t\"table_sections\": [\n\t\t{\n\t\t\t\"id\": 1,\n\t\t\t\"pos\": [\n\t\t\t\t3, 5\n\t\t\t],\n\t\t\t\"type\": 1\n\t\t},\n\t\t{\n\t\t\t\"id\": 2,\n\t\t\t\"pos\": [\n\t\t\t\t5, 3\n\t\t\t],\n\t\t\t\"type\": 2\n\t\t}\n\t]\n}\n```\n## Get information of a given table id\n```\nurl: /tablesections/:id/\nMethod: GET\n```\n\nExample response:\n\n```json\n{\n \"id\": 5,\n \"pos\": [\n\t9,\n\t8\n ],\n \"type\": 1,\n \"voltage\": 0\n}\n```\n \n\n## Get all modules of a given table section\n\n```\nurl:\t/tablesections/:id/modules/\nmethod: GET\n```\n\nExample response:\n\n```json\n{\n\t\"modules\": [\n\t\t{\n\t\t\t\"id\": 2090677472,\n\t\t\t\"locationId\": 0\n\t\t},\n\t\t{\n\t\t\t\"id\": 1945556720,\n\t\t\t\"locationId\": 1\n\t\t}\n\t]\n}\n```\n\n## Get all modules\n\n```\nurl:\t/modules/\nmethod:\tGET\n```\n\nExample response:\n\n```json\n{\n\t\"modules\": [\n\t\t{\n\t\t\t\"id\": 3920381746\n\t\t},\n\t\t{\n\t\t\t\"id\": 2310312452\n\t\t}\n\t]\n}\n```\n\n## Get info of a given module\n\n```\nurl:\t/modules/:id/\nmethod:\tGET\n```\n\nExample response:\n\n```json\n{\n\t\"id\": 3920381746,\n\t\"name\": \"Nuclear Power Plant 2\",\n\t\"power\": -500,\n\t\"voltage\": \"High\"\n}\n```\n\n## Get configurations of a given module\n\n```\nurl:\t/modules/:id/configs/\nmethod:\tGET\n```\n\nExample response:\n\n```json\n{\n\t\"configurations\": [\n\t\t{\n\t\t\t\"id\": 9,\n\t\t\t\"max\": 1200,\n\t\t\t\"min\": 0,\n\t\t\t\"name\": \"Generator\",\n\t\t\t\"role\": \"production\",\n\t\t\t\"value\": 500\n\t\t}\n\t]\n}\n```\n\n## Get given configuration of a given module\n\n```\nurl:\t/modules/:id/configs/:config_id/\nmethod:\tGET\n```\n\nExample response:\n\n```json\n{\n\t\"id\": 9,\n\t\"max\": 1200,\n\t\"min\": 0,\n\t\"name\": \"Generator\",\n\t\"role\": \"production\",\n\t\"value\": 500\n}\n```\n\n## Set property of given configuration on a given module\n\n```\nurl:\t/modules/:id/configs/:config_id/\nmethod:\tPUT\n```\n\nExample request content:\n\n```json\n{\n\t\"name\": \"Old Generator\"\n}\n```\n\nExample response:\n\n```json\n{\n\t\"id\": 9,\n\t\"max\": 1200,\n\t\"min\": 0,\n\t\"name\": \"Old Generator\",\n\t\"role\": \"production\",\n\t\"value\": 500\n}\n```\n\n## Set a flow colour\n\nThe color is equivalent to certain voltages and loads.\n```\n| ID | Description |\n| -- | -------------- |\n| 0 | low voltage |\n| 1 | medium voltage |\n| 2 | high voltage |\n| 3 | normal load | \n| 4 | high load |\n| 5 | stressed load |\n```\n\n```\nurl:\t/flowcolor/:id/\nmethod:\tPUT\n```\n\nExample request content:\n\n```json\n{\n\t\"value\": \"ff0000\"\n}\n```\n\n## Get all currently used colors\n\nThe color is equivalent to certain voltages and loads.\n```\n| ID | Description |\n| -- | -------------- |\n| 0 | low voltage |\n| 1 | medium voltage |\n| 2 | high voltage |\n| 3 | normal load |\n| 4 | high load |\n| 5 | stressed load |\n```\n\n```\nurl:\t/flowcolor/\nmethod:\tGET\n```\n\nExample response:\n\n```json\n{\n \"colors\": [\n {\n \"color\": \"19ccff\",\n \"id\": 0\n },\n {\n \"color\": \"994cff\",\n \"id\": 1\n },\n {\n \"color\": \"3319ff\",\n \"id\": 2\n },\n {\n \"color\": \"00ff00\",\n \"id\": 3\n },\n {\n \"color\": \"ffff00\",\n \"id\": 4\n },\n {\n \"color\": \"ff0000\",\n \"id\": 5\n }\n ]\n}\n```\n\n## Get powerboundaries for voltages and flows\n\n```\nurl:\t/powerboundaries/\nmethod:\tGET\n```\n\nExample response:\n\n```json\n{\n\t\"boundaries\": {\n\t\t\"0\": {\n\t\t\t\"high\": 0.75,\n\t\t\t\"critical\": 300\n\t\t},\n\t\t\"1\": {\n\t\t\t\"high\": 0.75,\n\t\t\t\"critical\": 500\n\t\t},\n\t\t\"2\": {\n\t\t\t\"high\": 0.75,\n\t\t\t\"critical\": 1300\n\t\t}\n\t}\n}\n```\n\n* 0/1/2: voltages. 0 means low, 1 means medium, 2 means high\n* High: modifier for high load\n* Critical: absolute value for critical load\n\n## Set one of the boundaries for load\n\n```\nurl:\t/powerboundaries/\nmethod:\tPUT\n```\n\nExample request content:\n\n```json\n{\n\t\"voltage\": 0,\n\t\"load\": 2,\n\t\"value\": 500\n}\n```\n\n## Get the grid\n\n```\nurl:\t/grid/\nmethod:\tGET\n```\n\nExample response:\n\n```json\n{\n\t\"flow_segments\": [\n\t\t{\n\t\t\t\"start_pos\": [\n\t\t\t 9,\n\t\t\t 10\n\t\t\t],\n\t\t\t\"end_pos\": [\n\t\t\t 10,\n\t\t\t 10\n\t\t\t],\n\t\t\t\"direction\": 0,\n\t\t\t\"enabled\": true,\n\t\t\t\"table_section\": 6\n\t\t}\n\t],\n\t\"modules\": [\n\t {\n \"pos\": [\n 12,\n 11\n ],\n \"remainingPower\": 0,\n \"table_section\": 6\n }\n\t]\n}\n```\n\nWhere `direction` is either 0 (forwards) or 1 (backwards).\n\n## Get the grid data of a specific table id\n```\nurl: /grid/:table_id/\nMethod: GET\n```\n\nExample response:\n\n```json\n{\n\t\"flow_segments\": [\n {\n \"direction\": null,\n \"enabled\": true,\n \"end_pos\": [\n 10,\n 10\n ],\n \"load\": 0,\n \"start_pos\": [\n 9,\n 10\n ],\n \"table_section\": 5\n },\n {\n \"direction\": 1,\n \"enabled\": true,\n \"end_pos\": [\n 10,\n 9\n ],\n \"load\": 0,\n \"start_pos\": [\n 10,\n 10\n ],\n \"table_section\": 5\n }\n ]\n}\n``` \n\n## Get neighboring table\n```\nurl: /api/neighbours/:id/\nmethod: GET\n```\n\nExample response\n\n```json\n{\n \"bottom\": null,\n \"left\": null,\n \"right\": 5,\n \"top\": 2\n}\n```\n\n## Flowsegment\n\n```\nurl:\t/flowsegment/:table_id/:segment_id/\nmethod:\tPUT\n```\n\nWhere id comes from `/grid/` in the \"flow_segments\" array (based on order of the result).\n\nExampe request content:\n\n```json\n{\n\t\"enabled\": \"true\"\n}\n```\n\n## Reboot all table sections\n\n```\nurl:\t/reboot/\nmethod:\tPUT\n```\n"
},
{
"alpha_fraction": 0.4294053316116333,
"alphanum_fraction": 0.46187523007392883,
"avg_line_length": 38.30882263183594,
"blob_id": "f8571d71b606a3ddc3539063e3d9308010de8fb9",
"content_id": "77ddec58cfb7be794d7e96e2cfea2e0344033ca1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2741,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 68,
"path": "/final_prog/Arduino/boss/config.cpp",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#include \"config.h\"\r\n#include \"load.h\"\r\n#include \"voltage.h\"\r\n\r\n/* CURRENT TABLE RFID SETUP (AS SEEN FROM THE FRONT OF THE TABLE)\r\n * ************************************************************************************\r\n * NORTH SIDE \r\n * \r\n * T0 D33 \r\n * M1 M2 D13 D27 \r\n * \\|_|/ \\|_|/\r\n * M0___/ \\___M3 HELPER SIDE D12___/ \\___D12 BOSS SIDE\r\n * \\___/ \\___/\r\n * /| |\\ /| |\\\r\n * M5 M4 D27 D13\r\n * T1 D33\r\n * \r\n * SOUTH SIDE \r\n * ************************************************************************************ \r\n * M0/5 = MODULE_SENSORS\r\n * T0/1 = TABLE_SECTION_SENSORS\r\n * \r\n * I/O Arduino pinout boss (as seen from the front of the table):\r\n * \r\n * M4 = D13 (boss)\r\n * M3 = D12 (boss)\r\n * M2 = D27 (boss)\r\n * T0 = D33 (boss)\r\n *\r\n * M1 = D13 (helper)\r\n * M0 = D12 (helper)\r\n * M5 = D27 (helper)\r\n * T1 = D33 (helper)\r\n */\r\n\r\n// initialization of table sensors and locations on the table \r\nconst SensorInfo sensor_info[] = {\r\n [0] = { .type = MODULE_SENSOR, .location = 4 }, //boss --digitalpin 13\r\n [1] = { .type = MODULE_SENSOR, .location = 3 }, //boss --digitalpin 12\r\n [2] = { .type = MODULE_SENSOR, .location = 2 }, //boss --digitalpin 27\r\n [3] = { .type = TABLE_SECTION_SENSOR, .location = 0 }, //boss --digitalpin 33\r\n [4] = { .type = MODULE_SENSOR, .location = 1 }, //helper --digitalpin 13\r\n [5] = { .type = MODULE_SENSOR, .location = 0 }, //helper --digitalpin 12\r\n [6] = { .type = MODULE_SENSOR, .location = 5 }, //helper --digitalpin 27\r\n [7] = { .type = TABLE_SECTION_SENSOR, .location = 1 }, //helper --digitalpin 33\r\n};\r\n\r\n// initialization of RFID objects on the boss (helper RFIDs are send through I2C communication)\r\nRFID RFIDs[RFID_COUNT] = {\r\n RFID_create(RFID0_SDA_PIN, RFID_RST_PIN),\r\n RFID_create(RFID1_SDA_PIN, RFID_RST_PIN),\r\n RFID_create(RFID2_SDA_PIN, RFID_RST_PIN),\r\n RFID_create(RFID3_SDA_PIN, RFID_RST_PIN),\r\n};\r\n\r\n// initialization of color settings with voltage being background color and load being foreground color\r\nCRGB off_color = CRGB::Black;\r\nCRGB error_color = CRGB::Red;\r\nCRGB voltage_colors[] = {\r\n [VOLTAGE_LOW] = CRGB::Grey,\r\n [VOLTAGE_MEDIUM] = CRGB::DarkMagenta,\r\n [VOLTAGE_HIGH] = CRGB::DarkBlue,\r\n};\r\nCRGB load_colors[] = {\r\n [LOAD_NORMAL] = CRGB::Green,\r\n [LOAD_HIGH] = CRGB::Yellow,\r\n [LOAD_CRITICAL] = CRGB::Red,\r\n};\r\n"
},
{
"alpha_fraction": 0.7471697926521301,
"alphanum_fraction": 0.7622641324996948,
"avg_line_length": 24.5,
"blob_id": "88de23e85f52a568c9665cea2699d24420840618",
"content_id": "262dff1c2b85d49a74d18dc0f7f3195a47358bdf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 10,
"path": "/final_prog/Arduino/boss/synced-millis.h",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#ifndef SYNCED_MILLIS_H\r\n#define SYNCED_MILLIS_H\r\n\r\n//set millis that is received from maincontroller\r\nvoid set_millis(uint32_t received_millis);\r\n\r\n//synchronizes received time from maincontroller with time on table section\r\nuint32_t synced_millis();\r\n\r\n#endif z\r\n"
},
{
"alpha_fraction": 0.5723205208778381,
"alphanum_fraction": 0.5806451439857483,
"avg_line_length": 25.69444465637207,
"blob_id": "7dae3f7031210f3f2d1adde77de95e8869f1c50d",
"content_id": "a53112b1407d8baba7599b3b57f4ae6f8a188ad9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1922,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 72,
"path": "/Software_currently_implement/websocket.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "from websocket_server import WebsocketServer\nimport json\nimport threading\nfrom logger import log\n\n\nclass Websocket():\n server = None\n\n def __init__(self, events):\n self.events = events\n\n def start(self, host='0.0.0.0', port=9001):\n self.server = self.__connect(host, port)\n\n # Start a new thread and keep the connection alive\n thread = threading.Thread(target=self.server.run_forever)\n thread.daemon = True\n thread.start()\n\n def __connect(self, host='0.0.0.0', port=9001):\n \"\"\"\n Create a new websocket server and return it\n \"\"\"\n server = WebsocketServer(port, host)\n server.set_fn_new_client(self.__client_connected)\n server.set_fn_client_left(self.__client_left)\n server.set_fn_message_received(self.__message_received)\n\n return server\n\n @staticmethod\n def __client_connected(client, server):\n \"\"\"\n Called for every client connecting (after handshake)\n \"\"\"\n log(\"New Client(%d) connected.\" % client['id'])\n\n @staticmethod\n def __client_left(client, server):\n \"\"\"\n Called for every client disconnecting\n \"\"\"\n log(\"Client(%d) disconnected\" % client['id'])\n\n @staticmethod\n def __message_received(client, server, message):\n \"\"\"\n Called when a client sends a message\n \"\"\"\n data = json.loads(message)\n\n log(\"Client(%d) said: %s\" % (client['id'], data['type']))\n\n def send_data_message(self, type, args=None):\n \"\"\"\n Sends a message to all connected clients.\n Only listens to MessageTypes.\n \"\"\"\n\n if type not in list(self.events):\n return\n\n if args is None:\n args = {}\n\n log('WEBSOCKET CALL - Type: %d' % type)\n\n data = {'type': type, 'data': args}\n data = json.dumps(data)\n\n self.server.send_message_to_all(data)\n"
},
{
"alpha_fraction": 0.5618436336517334,
"alphanum_fraction": 0.5665110945701599,
"avg_line_length": 29.070175170898438,
"blob_id": "e9aae05bacf67147832371d06223188e73f3b7e2",
"content_id": "ad2ff57c732bb3553ad327f08b8d6a139630fd19",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1714,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 57,
"path": "/Software_currently_implement/flow_segment.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "from hexifier import hexify\nfrom settings import *\n\n\nclass FlowSegment(object):\n \"\"\"\n Abstract flow segment object\n \"\"\"\n\n def __init__(self, start_pos, end_pos):\n super(FlowSegment, self).__init__()\n self.start_pos = start_pos\n self.end_pos = end_pos\n self.enabled = True\n self.table_section = None\n self.reset()\n\n def reset(self):\n self.direction = None\n self.state = State.PASSIVE if self.enabled else State.OFF\n self.voltage = Voltages.ERROR\n self.load = Load.NORMAL\n self.speed = Speed.NORMAL\n\n def set_force_disabled(self, enabled):\n self.enabled = enabled\n self.state = State.OFF if self.enabled is False else State.ACTIVE\n\n def activate(self):\n \"\"\"\n Activates the flow if it is passive\n \"\"\"\n if self.state is State.PASSIVE:\n self.state = State.ACTIVE\n\n def get_byte(self):\n \"\"\"\n Get byte (hex string) with information about speed, direction, load and state\n \"\"\"\n\n speed = self.speed << 5\n direction = (\n self.direction if self.direction is not None else Direction.FORWARDS) << 4\n load = self.load << 2\n state = self.state\n byte = hexify(speed ^ direction ^ load ^ state)\n return [byte]\n\n def __repr__(self):\n return 'FlowSegment({0}, {1}) [s {2},v {3},l {4}]'.format(self.start_pos, self.end_pos, self.state,\n self.voltage, self.load)\n\n\nclass NeighborFlowSegment(FlowSegment):\n\n def __init__(self, start_pos, end_pos):\n super(NeighborFlowSegment, self).__init__(start_pos, end_pos)\n"
},
{
"alpha_fraction": 0.5519103407859802,
"alphanum_fraction": 0.5603028535842896,
"avg_line_length": 33.42424392700195,
"blob_id": "c74db761354cdc6613d5df7faa7e85371b5b51aa",
"content_id": "dd5546136df9a0a44a53b1bf1ffc96e7df2b754f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17039,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 495,
"path": "/Software_currently_implement/flask_api.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#!/bin/env python2\nimport ast\n\nfrom flask import Flask, request, jsonify\nfrom flask.views import MethodView\n\nfrom smart_grid_messaging import *\nfrom smart_grid_table import *\n\n_smart_grid_table = None\n_add_message_func = None\n\n\nclass InvalidId(Exception):\n status_code = 400\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv['message'] = self.message\n return rv\n\n\nclass TableSectionView(MethodView):\n\n def get(self, table_id=None):\n global _smart_grid_table\n\n if table_id is None:\n tps = _smart_grid_table.get_table_sections()\n data = {\n \"table_sections\": [{\n \"id\": tp.id,\n \"type\": tp.type,\n \"pos\": tp.pos,\n \"voltage\": tp.get_voltage()\n } for tp in tps]\n }\n else:\n tp = _smart_grid_table.get_table_section(table_id)\n if tp is None:\n raise InvalidId('Table Section {0} does not exist'.format(\n table_id), status_code=410)\n\n data = {\n \"id\": tp.id,\n \"type\": tp.type,\n \"pos\": tp.pos,\n \"voltage\": tp.get_voltage()\n }\n\n return jsonify(data)\n\n def put(self):\n put_data = request.get_json()\n id, pos = put_data.get('id', None), put_data.get('pos', None)\n\n _smart_grid_table.get_table_section(id).set_position(pos)\n _smart_grid_table.write_table_config()\n return jsonify(request.get_json())\n\n\nclass TableSectionModuleView(MethodView):\n\n def get(self, table_section_id):\n global _smart_grid_table\n tp = _smart_grid_table.get_table_section(table_section_id)\n if tp is None:\n raise InvalidId('Table Section {0} does not exist'.format(\n table_section_id), status_code=410)\n modules = tp.get_placed_modules()\n data = {\n \"modules\": [{\n \"id\": m.id,\n \"locationId\": location,\n \"name\": m.name\n } for location, m in enumerate(modules) if m is not None]\n }\n\n return jsonify(data)\n\n\nclass ModuleView(MethodView):\n\n def get(self, module_id=None):\n global _smart_grid_table\n if module_id is None:\n modules = _smart_grid_table.get_modules()\n data = {\n \"modules\": [{\n \"id\": m.id\n } for m in modules if isinstance(m, DefaultModule)],\n \"transformers\": [{\n \"id\": m.id\n } for m in modules if isinstance(m, ConnectionModule)]\n }\n return jsonify(data)\n else:\n module = _smart_grid_table.get_module(module_id)\n if module is None:\n raise InvalidId('Module {0} does not exist'.format(\n module_id), status_code=410)\n if isinstance(module, DefaultModule):\n data = {\n \"id\": module.id,\n \"name\": module.name,\n \"voltage\": module.get_voltage(string=True),\n \"type\": module.type,\n \"power\": module.get_power(),\n }\n return jsonify(data)\n else:\n raise InvalidId('Module {0} is not a DefaultModule'.format(\n module_id), status_code=410)\n\nclass ModuleConfigView(MethodView):\n\n def get(self, module_id, config_id=None):\n global _smart_grid_table\n module = _smart_grid_table.get_module(module_id)\n if module is None:\n raise InvalidId('Module {0} does not exist'.format(\n module_id), status_code=410)\n\n if config_id is None:\n configs = module.get_configurations()\n data = {\n \"configurations\": [{\n \"id\": config.get_config_id(),\n \"min\": config.get_min_value(),\n \"max\": config.get_max_value(),\n \"value\": config.get_value(),\n \"name\": config.get_name(),\n \"role\": config.get_role(string=True)\n } for config in configs]\n }\n return jsonify(data)\n else:\n config = module.get_configuration(config_id)\n if config is None:\n raise InvalidId('Config {0} does not exist'.format(\n config_id), status_code=410)\n\n data = {\n \"id\": config.get_config_id(),\n \"min\": config.get_min_value(),\n \"max\": config.get_max_value(),\n \"value\": config.get_value(),\n \"name\": config.get_name(),\n \"role\": config.get_role(string=True)\n }\n return jsonify(data)\n\n def put(self, module_id, config_id):\n global _smart_grid_table\n\n module = _smart_grid_table.get_module(module_id)\n if module is None:\n raise InvalidId('Module {0} does not exist'.format(\n module_id), status_code=410)\n\n config = module.get_configuration(config_id)\n if config is None:\n raise InvalidId('Config {0} does not exist'.format(\n config_id), status_code=410)\n\n put_data = request.get_json()\n new_value = int(put_data.get('value'))\n\n if new_value > config.get_max_value():\n raise InvalidId('Value {0} for Config {1} was higher than allowed'.format(\n new_value, config_id), status_code=410)\n if new_value < config.get_min_value():\n raise InvalidId('Value {0} for Config {1} was lower than allowed'.format(\n new_value, config_id), status_code=410)\n\n s_message = SmartMessage(\n MessageTypes.CONFIG_CHANGED,\n (module_id, config_id, new_value))\n\n _add_message_func(s_message)\n\n data = {\n \"id\": config.get_config_id(),\n \"min\": config.get_min_value(),\n \"max\": config.get_max_value(),\n \"value\": new_value,\n \"name\": config.get_name(),\n \"role\": config.get_role(string=True)\n }\n return jsonify(data)\n\n\nclass FlowSegmentColorView(MethodView):\n\n def put(self, color_id):\n color_ids = [COLOR_DICT[c]['id'] for c in COLOR_DICT]\n if color_id not in color_ids:\n raise InvalidId('Color id {0} does not exist'.format(\n color_id), status_code=410)\n\n put_data = request.get_json()\n rgb = put_data.get('rgb', '')\n\n if not self.valid_rgb(rgb):\n raise InvalidId('RGB {0} not valid'.format(rgb), status_code=410)\n\n for color in COLOR_DICT:\n if COLOR_DICT[color]['id'] is color_id:\n formatted = self.format_to_rgb(rgb)\n COLOR_DICT[color]['color'] = (formatted['r'], formatted['g'], formatted['b'], 1.0)\n\n s_message = SmartMessage(MessageTypes.COLOR_CHANGED, (color_id, rgb))\n _add_message_func(s_message)\n\n return jsonify(put_data)\n\n def get(self):\n\n data = {\n \"all\": [\n self.format_to_hex(color)\n for color in ALL_COLORS_DICT\n ],\n \"current\": [{\n \"color\": self.format_to_hex(COLOR_DICT[color]['color']),\n \"id\": COLOR_DICT[color]['id']\n } for color in COLOR_DICT]\n }\n return jsonify(data)\n\n def valid_rgb(self, rgb):\n \"\"\"\n RGB should be string of 6 characters. + check for hex string\n \"\"\"\n if len(rgb) is 6:\n try:\n int(rgb, 16)\n return True\n except:\n pass\n return False\n\n @staticmethod\n def format_to_rgb(hex):\n \"\"\"\n Formats a hexadecimal value to a RGB color.\n \"\"\"\n r, g, b = bytearray.fromhex(hex)\n\n r = round(float(r) / 255, 1) if r > 0 else 0\n g = round(float(g) / 255, 1) if g > 0 else 0\n b = round(float(b) / 255, 1) if b > 0 else 0\n\n return {'r': r, 'g': g, 'b': b}\n\n def format_to_hex(self, color):\n \"\"\"\n Formats an RGB value to a hexadecimal color.\n \"\"\"\n return '%02x%02x%02x' % (color[0] * 255, color[1] * 255, color[2] * 255)\n\n\nclass PowerBoundaryView(MethodView):\n\n def get(self):\n data = {\n 'boundaries': VOLTAGE_POWER_LOAD_BOUNDARIES\n }\n\n return jsonify(data)\n\n def put(self):\n put_data = request.get_json()\n voltage, load, value = put_data.get('voltage', None), put_data.get(\n 'load', None), put_data.get('value', None)\n if voltage not in VOLTAGE_POWER_LOAD_BOUNDARIES:\n raise InvalidId('Voltage {0} not supported'.format(\n voltage), status_code=410)\n if load not in VOLTAGE_POWER_LOAD_BOUNDARIES[voltage]:\n raise InvalidId('Load {0} not supported'.format(\n load), status_code=410)\n if not self.validate_value(value, load):\n raise InvalidId('Invalid value {0}'.format(value), status_code=410)\n\n s_message = SmartMessage(\n MessageTypes.POWER_BOUNDARIES_CHANGED, (voltage, load, value))\n _add_message_func(s_message)\n\n return jsonify(put_data)\n\n def validate_value(self, value, load):\n \"\"\"\n value should be > 0 if load is critical,\n value should be between 0-1 if load is high\n \"\"\"\n if load is Load.HIGH:\n value = float(value)\n return True if 0 < value < 1 else False\n if load is Load.CRITICAL:\n value = int(value)\n return True if value > 0 else False\n return False\n\n\nclass Reboot(MethodView):\n\n def put(self):\n s_message = SmartMessage(MessageTypes.RESET_TABLES)\n _add_message_func(s_message)\n return jsonify(enabled=\"\")\n\n\nclass GridView(MethodView):\n\n def get(self, table_id=None):\n global _smart_grid_table\n\n grid = _smart_grid_table.grid\n\n data = {\"flow_segments\": [], \"modules\": []}\n\n # If the table ID is not set, return all segments and modules\n # of the complete setup, else return only the data relevant to\n # the specified table.\n if table_id is None:\n for flow_segment in grid.flow_segments:\n data[\"flow_segments\"].append(self.format_segment(flow_segment))\n\n for grid_module in (module for module in grid.get_modules() if isinstance(module, DefaultModule)):\n data[\"modules\"].append(self.format_module(grid_module))\n else:\n for flow_segment in (segment for segment in grid.flow_segments if segment.table_section.id is table_id):\n data[\"flow_segments\"].append(self.format_segment(flow_segment))\n\n for grid_module in (module for module in grid.get_modules() if isinstance(module, DefaultModule)):\n if grid_module.table_section.id is not None and grid_module.table_section.id is table_id:\n data[\"modules\"].append(self.format_module(grid_module))\n\n return jsonify(data)\n\n def format_segment(self, segment):\n data = {\n \"start_pos\": segment.start_pos,\n \"end_pos\": segment.end_pos,\n \"direction\": segment.direction,\n \"enabled\": segment.enabled,\n \"table_section\": segment.table_section.id,\n \"load\": segment.load\n }\n return data\n\n def format_module(self, module):\n data = {\n \"pos\": module.position,\n \"remainingPower\": module.remaining_power if hasattr(module, 'remaining_power') else 0,\n \"table_section\": module.table_section.id if module.table_section else None,\n \"module_id\": module.id,\n \"type\": module.type\n }\n return data\n\nclass FlowSegmentState(MethodView):\n\n def put(self, table_id, flow_id):\n global _smart_grid_table\n grid = _smart_grid_table.grid\n\n put_data = request.get_json()\n enabled = bool(ast.literal_eval(put_data.get('enabled', None).title()))\n grid.disable_flow(table_id, flow_id, enabled)\n _smart_grid_table.calculate()\n\n return jsonify(enabled=enabled)\n\n\nclass NeighboringTables(MethodView):\n\n def get(self, table_id):\n global _smart_grid_table\n neighbours = _smart_grid_table.get_neighbours(table_id)\n\n data = ({\n \"top\": neighbours[0],\n \"bottom\": neighbours[1],\n \"right\": neighbours[2],\n \"left\": neighbours[3]\n })\n return jsonify(data)\n\n\nclass ApiServer(Flask):\n\n def __init__(self, smart_grid_table, add_message_func):\n super(ApiServer, self).__init__(__name__)\n\n # Set smart grid table / message func so views can reach it\n global _smart_grid_table\n _smart_grid_table = smart_grid_table\n global _add_message_func\n _add_message_func = add_message_func\n\n # Add endpoints\n view_func = TableSectionView.as_view('table_sections')\n self.add_endpoint('/api/tablesections/', view_func, ['GET', 'PUT'])\n self.add_endpoint('/api/tablesections/<int:table_id>/', view_func, ['GET'])\n\n view_func = TableSectionModuleView.as_view('table_sections_modules')\n self.add_endpoint(\n '/api/tablesections/<int:table_section_id>/modules/', view_func, ['GET'])\n\n view_func = ModuleView.as_view('modules')\n self.add_endpoint('/api/modules/', view_func, ['GET'])\n self.add_endpoint('/api/modules/<int:module_id>/', view_func, ['GET'])\n\n view_func = ModuleConfigView.as_view('modules_configs')\n self.add_endpoint(\n '/api/modules/<int:module_id>/configs/', view_func, ['GET'])\n self.add_endpoint(\n '/api/modules/<int:module_id>/configs/<int:config_id>/', view_func, ['GET', 'PUT'])\n\n view_func = FlowSegmentColorView.as_view('flow_colors')\n self.add_endpoint('/api/flowcolor/', view_func, ['GET'])\n self.add_endpoint('/api/flowcolor/<int:color_id>/', view_func, ['PUT'])\n\n view_func = PowerBoundaryView.as_view('power_boundaries')\n self.add_endpoint('/api/powerboundaries/', view_func, ['GET', 'PUT'])\n\n view_func = GridView.as_view('grid')\n self.add_endpoint('/api/grid/', view_func, ['GET'])\n self.add_endpoint('/api/grid/<int:table_id>/', view_func, ['GET'])\n\n view_func = Reboot.as_view('Reboot')\n self.add_endpoint('/api/reboot/', view_func, ['PUT'])\n\n view_func = FlowSegmentState.as_view('FlowSegmentState')\n self.add_endpoint('/api/flowsegment/<int:table_id>/<int:flow_id>/', view_func, ['PUT'])\n\n view_func = NeighboringTables.as_view('table_neighbours')\n self.add_endpoint('/api/neighbours/<int:table_id>/', view_func, ['GET'])\n\n @self.errorhandler(InvalidId)\n def handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n def add_endpoint(self, url, view_func, methods):\n self.add_url_rule(url, methods=methods, view_func=view_func)\n\n\nif __name__ == \"__main__\":\n def print_msg(msg):\n print(msg)\n\n\n # Import tests values\n from test_scripts.values_for_testing import *\n\n # Create table (also creates modules)\n table = SmartGridTable()\n\n # Connect a table (table_id, child_id, table_type)\n table.table_connected(table_1_id, None, None)\n table.table_connected(table_2_id, None, None)\n table.table_connected(table_3_id, None, None)\n table_section1 = table.get_table_section(table_1_id)\n table_section2 = table.get_table_section(table_2_id)\n table_section3 = table.get_table_section(table_3_id)\n\n # Place modules on Table Section (table_id, location_id, module_id)\n table.module_placed(table_1_id, module_location_west, module_low)\n table.module_placed(table_1_id, module_location_northwest, module_low2)\n table.module_placed(table_1_id, module_location_northeast, module_low3)\n\n # connect neighbor. table_neighbor_changed(table_id, location_id,\n # connected_neighbor_id)\n table.table_neighbor_changed(\n table_1_id, table_conn_point_north, table_2_conn_south)\n table.table_neighbor_changed(\n table_2_id, table_conn_point_south, table_1_conn_north)\n\n # connect by transformers. module_placed(table_id, location_id, module_id)\n table.module_placed(table_3_id, module_location_east,\n transformer_high) # Place transformer high\n table.module_placed(table_2_id, module_location_west,\n transformer_mediumH) # Place transformer medium\n\n ApiServer(table, print_msg).run(host='0.0.0.0')"
},
{
"alpha_fraction": 0.6483632326126099,
"alphanum_fraction": 0.6515311598777771,
"avg_line_length": 21.09756088256836,
"blob_id": "78268552a7b4b07fa8f98b494d88f7bb6d04e2cf",
"content_id": "c43ed68605b2eff33b28d48819136b1ceb74b1b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 947,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 41,
"path": "/final_prog/Arduino/boss/grid.cpp",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#include <Arduino.h>\r\n\r\n#include \"grid.h\"\r\n\r\n//initialize grid with parsed flow segments\r\nGrid grid_from_parsed_grid(const ParsedGrid *parsed_grid)\r\n{\r\n\tGrid grid;\r\n\r\n\tgrid.voltage = (Voltage) parsed_grid->voltage;\r\n\tfor (size_t i = 0; i < FLOW_SEGMENT_COUNT; i++) {\r\n\t\tgrid.flow_segments[i] = flow_segment_from_parsed_flow_segment(&parsed_grid->flow_segments[i]);\r\n\t}\r\n\r\n\treturn grid;\r\n}\r\n\r\n//Converts byte grid with normal grid\r\nGrid grid_from_bytes(const uint8_t *b)\r\n{\r\n\tconst ParsedGrid *parsed_grid = (const ParsedGrid *) b;\r\n\r\n\treturn grid_from_parsed_grid(parsed_grid);\r\n}\r\n\r\n//Prints grid information\r\nvoid grid_print(const Grid *grid)\r\n{\r\n\tSerial.println(\"Grid ----\");\r\n\r\n\tSerial.print(\" Voltage: \");\r\n\tSerial.println(voltage_to_string(grid->voltage));\r\n\r\n\tfor (size_t i = 0; i < FLOW_SEGMENT_COUNT; i++) {\r\n\t\tSerial.print(\" Flow \");\r\n\t\tSerial.print(i);\r\n\t\tSerial.print(\": \");\r\n\r\n\t\tflow_segment_print(&grid->flow_segments[i]);\r\n\t}\r\n}\r\n"
},
{
"alpha_fraction": 0.5877944231033325,
"alphanum_fraction": 0.6263383030891418,
"avg_line_length": 24.94444465637207,
"blob_id": "dfd48691047a34e49dc1da7be67cfd9e1ee152af",
"content_id": "8c02988b3c822b5d9d8e55926539e145e39be767",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 934,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 36,
"path": "/final_prog/PC/test_scripts/test_neighbors.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "import sys\n\nfrom grid import Grid\nfrom table_section import TableSection\n\nsys.path.append(\"../\")\n\n# Create grid for testing\ngrid = Grid(None)\ntablesection = TableSection(0, 1, (0, 0))\nflows = tablesection.get_flows()\n\nfor flow in flows:\n grid.add_flow_segment(flow)\n\n\ndef test_generate_colliding_path():\n \"\"\"\n Test if second_path does not go through first_path, as direction of the flow segment of first_path are set forwards.\n \"\"\"\n grid.reset()\n first_path = grid.generate_path((2, 2), (4, 3))\n print(first_path)\n assert first_path[0] == (2, 2)\n assert first_path[-1] == (4, 3)\n\n second_path = grid.generate_path((1, 3), (4, 3))\n print(second_path)\n assert second_path[0] == (1, 3)\n assert second_path[-1] == (4, 3)\n\n third_path = grid.generate_path((3, 2), (2, 2))\n print(third_path)\n assert third_path[0] == (3, 2)\n assert third_path[-1] == (2, 2)\n assert (4, 3) in third_path\n"
},
{
"alpha_fraction": 0.6510995030403137,
"alphanum_fraction": 0.6764397621154785,
"avg_line_length": 25.612716674804688,
"blob_id": "99479de4a26fb6383595c43edcc33e99cf37e48c",
"content_id": "f738396b514c3db7c5c51d46c6748e28cb3fe04e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4775,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 173,
"path": "/Config.h",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#ifndef CONFIG_H\r\n#define CONFIG_H\r\n\r\n#include <FastLED.h>\r\n\r\n#include \"rfid.h\"\r\n#include \"sensor-info.h\"\r\n\r\n// Enable the MY_DEBUG define to show debug messages\r\n#define MY_DEBUG\r\n\r\n\r\n/**************************************\r\n* Ethernet Gateway Transport Defaults\r\n* To use network, use the library <Ethernet.h>\r\n***************************************/\r\n#define MY_GATEWAY_ESP32\r\n\r\n/** Configuration of WiFi */\r\n#define MY_WIFI_SSID \"MySSID\"\r\n#define MY_WIFI_PASSWORD \"MyVerySecretPassword\"\r\n\r\n/** Static IP/MAC address. If not defined, DHCP will be used */\r\n#define MY_HOSTNAME \"Boss1\"\r\n#define MY_IP_ADDRESS 192,168,1,1\r\n#define MY_MAC_ADDRESS 0xDE,0xAD,0xBE,0xEF,0xFE,0x01\r\n\r\n/** IP/subnet of gateway. If not defined, DHCP will be used\r\n#define MY_IP_GATEWAY_ADDRESS 192,168,1,254\r\n#define MY_IP_SUBNET_ADDRESS 255,255,255,0\r\n#define MY_PORT 5003\t//Port open in gateway\r\n*/\r\n\r\n\r\n//#define MY_DEBUG_VERBOSE_GATEWAY\t//Verbose debug prints related to the gateway transport\r\n/**\r\n * @def MY_PORT\r\n * @brief The Ethernet TCP/UDP port to open on controller or gateway.\r\n */\r\n//#ifndef MY_PORT\r\n//#ifdef MY_GATEWAY_MQTT_CLIENT\r\n//#define MY_PORT 1883\r\n//#else\r\n//#define MY_PORT 5003\r\n//#endif\r\n//#endif\r\n\r\n/**\r\n * @def MY_MQTT_CLIENT_PUBLISH_RETAIN\r\n * @brief Enables MQTT client to set the retain flag when publishing specific messages.\r\n */\r\n//#define MY_MQTT_CLIENT_PUBLISH_RETAIN\r\n/**\r\n * @def MY_MQTT_PASSWORD\r\n * @brief Used for authenticated MQTT connections.\r\n *\r\n * Set if your MQTT broker requires username/password.\r\n * Example: @code #define MY_MQTT_PASSWORD \"secretpassword\" @endcode\r\n * @see MY_MQTT_USER\r\n */\r\n//#define MY_MQTT_PASSWORD \"secretpassword\"\r\n//#define MY_MQTT_USER \"username\"\r\n/**\r\n * @def MY_MQTT_CLIENT_ID\r\n * @brief Set client ID for MQTT connections\r\n *\r\n * This define is mandatory for all MQTT client gateways.\r\n * Example: @code #define MY_MQTT_CLIENT_ID \"mysensors-1\" @endcode\r\n */\r\n//#define MY_MQTT_CLIENT_ID \"mysensors-1\"\r\n/**\r\n * @def MY_MQTT_PUBLISH_TOPIC_PREFIX\r\n * @brief Set prefix for MQTT topic to publish to.\r\n *\r\n * This define is mandatory for all MQTT client gateways.\r\n * Example: @code #define MY_MQTT_PUBLISH_TOPIC_PREFIX \"mygateway1-out\" @endcode\r\n */\r\n//#define MY_MQTT_PUBLISH_TOPIC_PREFIX \"mygateway1-out\"\r\n/**\r\n * @def MY_MQTT_SUBSCRIBE_TOPIC_PREFIX\r\n * @brief Set prefix for MQTT topic to subscribe to.\r\n *\r\n * This define is mandatory for all MQTT client gateways.\r\n * Example: @code #define MY_MQTT_SUBSCRIBE_TOPIC_PREFIX \"mygateway1-in\" @endcode\r\n */\r\n//#define MY_MQTT_SUBSCRIBE_TOPIC_PREFIX \"mygateway1-in\"\r\n\r\n\r\n/**\r\n * @def MY_IP_RENEWAL_INTERVAL_MS\r\n * @brief DHCP, default renewal setting in milliseconds.\r\n */\r\n//#ifndef MY_IP_RENEWAL_INTERVAL_MS\r\n//#define MY_IP_RENEWAL_INTERVAL_MS (60*1000ul)\r\n//#endif\r\n\r\n/**\r\n * @def MY_CONTROLLER_IP_ADDRESS\r\n * @brief If this is defined, gateway will act as a client trying to contact controller on\r\n * @ref MY_PORT using this IP address.\r\n * If left un-defined, gateway acts as server allowing incoming connections.\r\n */\r\n//#define MY_CONTROLLER_IP_ADDRESS 192,168,178,254\r\n\r\n/**\r\n * @def MY_CONTROLLER_URL_ADDRESS\r\n * @brief If this is defined, gateway will act as a client (ethernet or MQTT) trying to\r\n * contact controller on the given URL.\r\n */\r\n//#define MY_CONTROLLER_URL_ADDRESS \"test.mosquitto.org\"\r\n\r\n\r\n/***********************************\r\n *\tThe rest of original software\r\n ***********************************/\r\n// Table section ID set node id between 1-254\r\n#define TABLE_SECTION_ID 4\r\n\r\n// ID of this I2C slave\r\n#define SLAVE_ID 1\r\n\r\n// Shared RFID reset pin\r\n#define RFID_RST_PIN A0\r\n\r\n// RFID data pins \r\n#define RFID0_SDA_PIN 5\r\n#define RFID1_SDA_PIN 6\r\n#define RFID2_SDA_PIN 7\r\n#define RFID3_SDA_PIN 8\r\n\r\n// Number of RFIDs on this Arduino\r\n#define RFID_COUNT 4\r\n\r\n// Delay in ms between RFID checks\r\n#define RFID_CHECK_DELAY 75\r\n\r\n// Led strip data pin\r\n#define LEDSTRIP_DATA_PIN 3\r\n\r\n// Led strip clock pin\r\n#define LEDSTRIP_CLK_PIN 2\r\n\r\n// Led strip type with clock pin (currently SK9822 is used, not APA102 even though it says on the label because of chinese clone manufacturers)\r\n#define LEDSTRIP_TYPE SK9822\r\n\r\n// Number of flow_segments\r\n#define FLOW_SEGMENT_COUNT 20\r\n\r\n// Number of LEDs in each flow_segment\r\n#define FLOW_SEGMENT_LENGTH 6\r\n\r\n// Adjust brightness of LEDs\r\n#define LED_BRIGHTNESS 20 \r\n\r\n// Total number of LEDS\r\n#define LED_COUNT (FLOW_SEGMENT_COUNT * FLOW_SEGMENT_LENGTH)\r\n\r\n// Array of RFID sensors with grid positions\r\nextern const SensorInfo sensor_info[];\r\n\r\n// Create RFIDs\r\nextern RFID RFIDs[];\r\n\r\n// LED strip colors\r\nextern CRGB off_color;\r\nextern CRGB error_color;\r\nextern CRGB voltage_colors[];\r\nextern CRGB load_colors[];\r\n\r\n// Edit module id if you want to edit test module\r\n#define TEST_MODULE_ID 439560267\r\n\r\n#endif"
},
{
"alpha_fraction": 0.5855262875556946,
"alphanum_fraction": 0.5855262875556946,
"avg_line_length": 29.399999618530273,
"blob_id": "101344d7bd3968883ca2b53a52e65cb4f8b6c1e8",
"content_id": "2076ebd833734f507431c0ed325c1bbe068b8a0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 152,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 5,
"path": "/Software_currently_implement/flow.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "class Flow(object):\n def __init__(self, path, power, distance):\n self.path = path\n self.power = power\n self.distance = distance\n"
},
{
"alpha_fraction": 0.7208672165870667,
"alphanum_fraction": 0.7235772609710693,
"avg_line_length": 21.0625,
"blob_id": "89eb03630a0dcce7e294e90201b0870a8274873e",
"content_id": "dd25d333d64c56d566e3109ab4476e2eafd140a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 369,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 16,
"path": "/final_prog/Arduino/boss/sensor-info.h",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#ifndef SENSOR_INFO_H\r\n#define SENSOR_INFO_H\r\n\r\n//Sensor types, module sensors for rfids on top of the table, table section sensors for rfids on sides of the table (see config.cpp file for layout)\r\nenum SensorType {\r\n\tMODULE_SENSOR,\r\n\tTABLE_SECTION_SENSOR,\r\n};\r\n\r\n//properties of table sensors\r\nstruct SensorInfo {\r\n\tSensorType type;\r\n\tuint8_t location;\r\n};\r\n\r\n#endif\r\n"
},
{
"alpha_fraction": 0.7941176295280457,
"alphanum_fraction": 0.813725471496582,
"avg_line_length": 16.16666603088379,
"blob_id": "d6dac83b30818447facc283d0614151f003d0853",
"content_id": "45ba4838abc2cad3e8b86b41fbd1845f54f9424e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 102,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 6,
"path": "/final_prog/PC/requirements.txt",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "flask\npaho-mqtt\nnumpy\nrequests\ngit+https://github.com/jrialland/python-astar.git\nwebsocket-server==0.4"
},
{
"alpha_fraction": 0.5600141286849976,
"alphanum_fraction": 0.5660067200660706,
"avg_line_length": 35.92190933227539,
"blob_id": "5432f78cdd6e5bad2a3cd19e5e430e26b3563374",
"content_id": "a6e178f27271e4f6c3430d31bc1544f18f37180e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17021,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 461,
"path": "/Software_currently_implement/grid.py",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "from astar import AStar\nimport math\nimport datetime\nfrom settings import Direction, State, Speed, Load, get_speed, get_load, Voltages\nfrom module import DefaultModule, TransformerModule\nfrom logger import log\nfrom flow import Flow\n\n\nclass Grid(AStar, object):\n \"\"\"\n A class to define a network of flow segments and modules for distributing power,\n which allows pathfinding to happen between modules using the A* search algorithm.\n \"\"\"\n\n def __init__(self, smart_grid_app):\n super(Grid, self).__init__()\n self.flow_segments = []\n self.flow_segments_table = {}\n self.exclude_match_list = []\n self.flows = []\n self.smart_grid_app = smart_grid_app\n\n def distance_between(self, n1, n2):\n \"\"\"\n Get the distance between two vertices, using heuristic_cost_estimate\n \"\"\"\n return self.heuristic_cost_estimate(n1, n2)\n\n def get_total_path_distance(self, path):\n \"\"\"\n Get total distance in units of a path\n \"\"\"\n distance = 0\n\n for i, node in enumerate(path):\n if i > len(path) - 2:\n break\n\n next_node = path[i + 1]\n distance += self.distance_between(node, next_node)\n\n return distance\n\n def heuristic_cost_estimate(self, node_1, node_2):\n \"\"\"\n Calculate the distance between two vertices\n \"\"\"\n (x1, y1) = node_1\n (x2, y2) = node_2\n return math.hypot(x2 - x1, y2 - y1)\n\n def get_actual_distance_between_nodes(self, node_1, node_2):\n \"\"\"\n Calculate the distance between two vertices\n \"\"\"\n (x1, y1) = node_1\n (x2, y2) = node_2\n return math.hypot(x2 - x1, y2 - y1)\n\n def neighbors(self, node):\n \"\"\"\n Retrieve the connected vertice(s) of a given vertice\n \"\"\"\n nodes = []\n for flow_segment in self.flow_segments:\n # Only add a neighbor if the flow segment direction allows it to\n if flow_segment.enabled is False or flow_segment.state is State.ERROR or flow_segment.state is State.OFF:\n continue\n\n is_start_pos_equal = flow_segment.start_pos[0] is node[\n 0] and flow_segment.start_pos[1] is node[1]\n is_end_pos_equal = flow_segment.end_pos[0] is node[\n 0] and flow_segment.end_pos[1] is node[1]\n\n if (flow_segment.direction is None or flow_segment.direction is\n Direction.FORWARDS) and is_start_pos_equal:\n nodes.append(flow_segment.end_pos)\n elif (flow_segment.direction is None or flow_segment.direction is\n Direction.BACKWARDS) and is_end_pos_equal:\n nodes.append(flow_segment.start_pos)\n\n # Add transformer locations\n for module in self.get_modules():\n if not isinstance(module, TransformerModule):\n continue\n\n pos = node\n module_pos = module.position\n if module_pos[0] != pos[0] or module_pos[1] != pos[1]:\n continue\n\n # Find linked module position\n if module.linked_module.position is not None:\n nodes.append(module.linked_module.position)\n\n # Remove the duplicate vertices from the final result\n filtered_neighbors = []\n for i in nodes:\n if i not in filtered_neighbors:\n filtered_neighbors.append(i)\n\n return filtered_neighbors\n\n def add_flow_segment(self, table_section_id, flow_segment):\n \"\"\"\n Add flow segment to the list of flow segments\n \"\"\"\n self.flow_segments.append(flow_segment)\n\n # If the table ID doesn't exist in the segments tuple yet, add it first.\n if table_section_id not in self.flow_segments_table:\n self.flow_segments_table[table_section_id] = []\n self.flow_segments_table[table_section_id].append(flow_segment)\n\n def calculate(self):\n \"\"\"\n Calculate how the energy flows to the modules\n \"\"\"\n log('\\n> Recalculating grid')\n\n most_important_modules = sorted(self.get_modules(), key=lambda v: v.priority, reverse=True)\n\n # Error out incorrect voltage\n for module in most_important_modules:\n if module.voltage is not module.table_section.voltage and module.voltage is not Voltages.ADAPTIVE:\n self.give_error_module(module)\n\n # Disable modules without attached flow segments\n if len(most_important_modules) >= 2:\n for module in most_important_modules:\n if not isinstance(module, DefaultModule) or module.remaining_power >= 0:\n continue\n\n should_reset_power = True\n\n for other_module in most_important_modules:\n if other_module is module:\n continue\n\n if not isinstance(other_module, DefaultModule) or other_module.remaining_power <= 0:\n continue\n\n unparsed_path = self.astar(module.position, other_module.position)\n\n if unparsed_path is None:\n continue\n\n should_reset_power = False\n break\n\n if should_reset_power:\n log('- [' + module.name + '] has been reset as there are no paths to any other module!')\n module.remaining_power = 0\n\n # Find paths for modules\n for module in most_important_modules:\n if not isinstance(\n module, DefaultModule) or module.remaining_power <= 0:\n continue\n\n while module.remaining_power is None or module.remaining_power > 0:\n producing_module = module\n (consuming_module, path, distance) = self.get_closest_relevant_module(producing_module)\n if consuming_module is None:\n break\n\n from_position = producing_module.position\n to_position = consuming_module.position\n\n power_consumption = min(\n -consuming_module.remaining_power, producing_module.remaining_power)\n consuming_module.remaining_power += power_consumption\n producing_module.remaining_power -= power_consumption\n\n log('- [' + producing_module.name + '] => [' + consuming_module.name + '] Path from ' + str(\n from_position) + ' to ' + str(to_position) + ' (distance: ' + str(distance) + ')')\n\n desired_voltage = producing_module.voltage if producing_module.voltage is not Voltages.ADAPTIVE else consuming_module.voltage\n\n if desired_voltage is Voltages.ADAPTIVE:\n desired_voltage = Voltages.HIGH\n\n self.flows.append(Flow(path, power_consumption, distance))\n self.generate_path(path, power_consumption, desired_voltage)\n\n self.smart_grid_app.reset_flow_config_timer()\n return\n\n def give_power_back_to_modules(self):\n for flow in self.flows:\n if flow.power is None:\n continue\n\n producing_module = self.get_module_on_position(flow.path[0])\n consuming_module = self.get_module_on_position(flow.path[-1])\n\n # Remove path\n if producing_module.table_section.voltage is Voltages.ERROR or consuming_module.table_section.voltage is Voltages.ERROR:\n if producing_module.remaining_power is None or consuming_module.remaining_power is None:\n continue\n\n producing_module.remaining_power += flow.power\n consuming_module.remaining_power -= flow.power\n log(\"V [\" + producing_module.name + \"] X> [\" + consuming_module.name + \"] \" + str(\n flow.power) + \" power has been reverted\")\n flow.power = None\n\n final_flows = [flow for flow in self.flows if flow.power is not None]\n removed_flows = [flow for flow in self.flows if flow.power is None]\n\n for removed_flow in removed_flows:\n for removed_node in removed_flow.path:\n should_remove_node = True\n\n for existing_flow in final_flows:\n for existing_node in existing_flow.path:\n if removed_node[0] is existing_node[0] and removed_node[1] is existing_node[1]:\n should_remove_node = False\n break\n\n if should_remove_node is False: break\n\n if should_remove_node is False: continue\n\n # Remove node\n for flow_segment in self.find_flow_segments_on_position(removed_node):\n flow_segment.reset()\n\n if flow_segment.table_section.voltage is Voltages.ERROR:\n flow_segment.state = State.OFF\n else:\n flow_segment.state = State.PASSIVE\n\n self.flows = final_flows\n\n log(\"\\n===== FINAL FLOWS =====\")\n for flow in self.flows:\n from_position = flow.path[0]\n to_position = flow.path[-1]\n producing_module = self.get_module_on_position(from_position)\n consuming_module = self.get_module_on_position(to_position)\n distance = flow.distance\n\n log('- [' + producing_module.name + '] => [' + consuming_module.name + '] Path from ' + str(\n from_position) + ' to ' + str(to_position) + ' (distance: ' + str(distance) + ')')\n\n log(\"\")\n\n def find_flow_segments_on_position(self, node):\n flow_segments = []\n for flow_segment in self.flow_segments:\n if (\n (flow_segment.start_pos[0] is node[0] and flow_segment.start_pos[1] is node[1]) or\n (flow_segment.end_pos[0] is node[0] and flow_segment.end_pos[1] is node[1])\n ): flow_segments.append(flow_segment)\n\n return flow_segments\n\n def give_error_module(self, module):\n \"\"\"\n Show error to the flow segments that are connected to a module\n \"\"\"\n for fs in self.flow_segments:\n if (fs.start_pos[0] != module.position[0]\n or fs.start_pos[1] != module.position[1]) and (\n fs.end_pos[0] != module.position[0]\n or fs.end_pos[1] != module.position[1]):\n continue\n\n fs.state = State.ERROR\n\n def generate_path(self, found_path, power=None, voltage=None):\n \"\"\"\n Returns the shortest path between two vertices\n \"\"\"\n # Assign the directions for the appropriate flow segments to alter\n # future pathfinding\n for i, node in enumerate(found_path):\n if i > len(found_path) - 2:\n break\n\n next_node = found_path[i + 1]\n\n while True:\n flow_segments = self.flow_find_segments(node, next_node)\n\n if len(flow_segments) == 0:\n break\n\n for flow_segment in flow_segments:\n if flow_segment.start_pos == node:\n flow_segment.direction = Direction.FORWARDS\n else:\n flow_segment.direction = Direction.BACKWARDS\n\n flow_segment.state = State.ACTIVE\n flow_segment.load = Load.NORMAL\n flow_segment.speed = Speed.NORMAL\n if voltage:\n flow_segment.voltage = voltage\n\n return found_path\n\n def flow_find_segments(self, start_pos, end_pos, exclude_state=State.ACTIVE):\n \"\"\"\n Find a flow segment based on given starting and ending vertices.\n \"\"\"\n segments = []\n for flow_segment in self.flow_segments:\n if exclude_state is not None and flow_segment.state == exclude_state:\n continue\n\n if (flow_segment.start_pos == start_pos\n and flow_segment.end_pos == end_pos) or (\n flow_segment.start_pos == end_pos\n and flow_segment.end_pos == start_pos):\n segments.append(flow_segment)\n\n return segments\n\n def get_closest_relevant_module(self, for_module, priority=1):\n \"\"\"\n Find a producing module that can give energy to a consuming module, or find a consuming module that can receive energy from a producing module\n \"\"\"\n available_modules = []\n for grid_module in self.get_modules():\n if not isinstance(grid_module, DefaultModule):\n continue\n\n # Grid module should have enough power\n if ((for_module.remaining_power < 0 and grid_module.remaining_power <= 0) or (\n for_module.remaining_power > 0 and grid_module.remaining_power >= 0)) or grid_module.priority != priority:\n continue\n\n available_modules.append(grid_module)\n\n if len(available_modules) is 0:\n return self.get_closest_relevant_module(for_module, priority - 1) if priority > 0 else (None, None, None)\n\n closest_module = None\n closest_module_distance = 99999999\n closest_module_path = None\n\n for module in available_modules:\n consuming_module = for_module\n producing_module = module\n\n if consuming_module.remaining_power > 0:\n consuming_module = module\n producing_module = for_module\n\n unparsed_path = self.astar(producing_module.position, consuming_module.position)\n if unparsed_path is None:\n continue\n\n path = list(unparsed_path)\n\n voltage = self.get_path_voltage(path)\n if voltage is Voltages.ERROR:\n continue\n\n distance = self.get_total_path_distance(path)\n\n if module.table_section is not for_module.table_section and (\n module.table_section.type is not 2 and for_module.table_section.type is not 2):\n distance += 5 + 5 * abs(module.table_section.voltage - for_module.table_section.voltage)\n\n set_new_closest_module = False\n\n if distance < closest_module_distance:\n set_new_closest_module = True\n\n elif distance is closest_module_distance:\n if module.remaining_power > closest_module.remaining_power:\n set_new_closest_module = True\n\n if set_new_closest_module:\n closest_module = module\n closest_module_distance = distance\n closest_module_path = path\n\n return (closest_module, closest_module_path, closest_module_distance)\n\n def get_path_voltage(self, path):\n \"\"\"\n Get voltage of the given path\n \"\"\"\n voltage = None\n\n for i, node in enumerate(path):\n module = self.get_module_on_position(node)\n if module is None or module.voltage is Voltages.ADAPTIVE:\n continue\n\n if voltage is None:\n voltage = module.voltage\n elif voltage is not module.voltage:\n voltage = Voltages.ERROR\n break\n\n if not isinstance(module, TransformerModule):\n continue\n\n linked_module = module.linked_module\n\n if linked_module is None:\n continue\n\n next_node = path[i + 1]\n\n if self.get_module_on_position(next_node) is not linked_module:\n continue\n\n voltage = linked_module.voltage\n\n return voltage\n\n def get_module_on_position(self, pos):\n \"\"\"\n Retrieve which module sits on a position\n \"\"\"\n for module in self.get_modules():\n module_pos = module.position\n if module_pos[0] is pos[0] and module_pos[1] is pos[1]:\n return module\n\n return None\n\n def disable_flow(self, table_id, flow_id, enabled):\n \"\"\"\n Disable a flow of the graph structure\n \"\"\"\n self.flow_segments_table[table_id][flow_id].set_force_disabled(enabled)\n\n def reset(self):\n \"\"\"\n Reset the state of the flow segments to allow recalculating\n \"\"\"\n self.flows = []\n\n for flow_segment in self.flow_segments:\n flow_segment.reset()\n\n for module in self.get_modules():\n if not isinstance(module, DefaultModule):\n continue\n\n module.reset_power()\n\n def get_modules(self):\n \"\"\"\n Function to retrieve all modules that are standing on the grid\n \"\"\"\n modules = []\n for module in self.smart_grid_app.smart_grid_table.modules:\n if module.position is not None:\n modules.append(module)\n\n modules = sorted(modules, key=lambda module: module.time_placed, reverse=True)\n return modules\n"
},
{
"alpha_fraction": 0.6995515823364258,
"alphanum_fraction": 0.6995515823364258,
"avg_line_length": 13.928571701049805,
"blob_id": "e9b008acfb0b5aee22056103f623f81ebb976563",
"content_id": "597c20dc0b3cfc4ddb33ed262c783c1f9bea9399",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 223,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 14,
"path": "/final_prog/Arduino/boss/voltage.h",
"repo_name": "exblematique/Project",
"src_encoding": "UTF-8",
"text": "#ifndef VOLTAGE_H\r\n#define VOLTAGE_H\r\n\r\n//Voltage categories\r\nenum Voltage {\r\n\tVOLTAGE_LOW,\r\n\tVOLTAGE_MEDIUM,\r\n\tVOLTAGE_HIGH,\r\n};\r\n\r\n// Converts voltage to string\r\nconst char *voltage_to_string(Voltage voltage);\r\n\r\n#endif\r\n"
}
] | 50 |
MaungSan/python-tutorials | https://github.com/MaungSan/python-tutorials | e9056111d8fb76205c0014881e081b9dc342b914 | 4f2330d5d44022f88650a5199d82e38f726ceda9 | 06c0ba5ebc400dda43fe3cf30f797bd7739cb40c | refs/heads/main | 2023-06-11T16:38:07.259818 | 2021-07-01T04:32:29 | 2021-07-01T04:32:29 | 381,910,190 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.3686274588108063,
"alphanum_fraction": 0.3803921639919281,
"avg_line_length": 18.078947067260742,
"blob_id": "c2de7928fb2353925991e85c6d784e4cee1ce1ec",
"content_id": "a1f10f84c3bcc6d7ad657cc2e931e091e27acfa1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 765,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 38,
"path": "/68 File Handling.py",
"repo_name": "MaungSan/python-tutorials",
"src_encoding": "UTF-8",
"text": "\r\n\r\n#\r\n# f = open('MyData','r')\r\n# # print(f.read())\r\n# # print(f.readline(4),end=\"\")\r\n# f1 = open('abc','a')\r\n# # f1.write(\"Something\")\r\n# # f1.write(\"People\")\r\n# # f1.write('Mobile')\r\n\r\n\r\n\r\n# f = open('MyData','r')\r\n#\r\n# f1 = open('abc','w')\r\n#\r\n# for data in f:\r\n# f1.write(data)\r\n\r\n\r\n\r\nf = open('mumbai.jpg','rb')\r\n\r\nf1 = open('Mumbaii.jpg','wb')\r\n\r\nfor i in f:\r\n f1.write(i)\r\n\r\n\r\n-------------------------------------------------------------------------------- MyData \r\n--------------------------------------------------------------------------------\r\n\r\nMy Name is Navin Reddy\r\nTelusko on Youtube\r\nProgramming and Technology Videos\r\nCorporate Trainer\r\nMumbai\r\nSubscribers\r\nAsus Laptop"
},
{
"alpha_fraction": 0.35476717352867126,
"alphanum_fraction": 0.39246121048927307,
"avg_line_length": 10.189188957214355,
"blob_id": "c52ca89e666a3d697af2b01a313616910e1c2ac1",
"content_id": "07c208aa447cbdbb5f0519c608cda9580b3b9a06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 451,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 37,
"path": "/24 Break Continue Pass in Python.py",
"repo_name": "MaungSan/python-tutorials",
"src_encoding": "UTF-8",
"text": "# av = 5\r\n#\r\n# x = int(input(\"How many Candies you want?\"))\r\n#\r\n# i = 1\r\n# while i <= x:\r\n#\r\n# if i>av:\r\n# print(\"Out of stock\")\r\n# break\r\n#\r\n#\r\n# print(\"Candy\")\r\n# i+=1\r\n#\r\n# print(\"Bye\")\r\n\r\n\r\n# for i in range(1,101):\r\n#\r\n# if i%3==0 and i%5==0:\r\n# continue\r\n#\r\n# print(i)\r\n#\r\n# print(\"Bye\")\r\n\r\n\r\nfor i in range(1, 101):\r\n\r\n if(i % 2 != 0):\r\n pass\r\n\r\n else:\r\n print(i)\r\n\r\nprint(\"Bye\")\r\n"
},
{
"alpha_fraction": 0.2803531885147095,
"alphanum_fraction": 0.2803531885147095,
"avg_line_length": 19.33333396911621,
"blob_id": "c416eb261217c8a41dfd8dca75401e56f622e70a",
"content_id": "a1b2c34d5f20de3d8c3ac6154dd93d76563a5e8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 453,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 21,
"path": "/49 Special Vairable__name___.py",
"repo_name": "MaungSan/python-tutorials",
"src_encoding": "UTF-8",
"text": " \r\n\r\n# import Calc\r\n# print(\"Demo Says : \" + __name__)\r\n\r\ndef main():\r\n\r\n print(\"Hello\")\r\n print(\"Welcome User\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n--------------------------------------------------------------------------------\r\n Calc.py\r\n--------------------------------------------------------------------------------\r\n\r\n# print(\"Hello \" + __name__)\r\n\r\nimport demo\r\n\r\nprint(\"Its Time to Calculate\")"
},
{
"alpha_fraction": 0.35410764813423157,
"alphanum_fraction": 0.41926345229148865,
"avg_line_length": 11.576923370361328,
"blob_id": "d2ddb6cd3c131f91699860ad5905f66f8155de98",
"content_id": "1db4967db689b265be0ac943f12f5a02400df585",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 353,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 26,
"path": "/23 For Loop in Python.py",
"repo_name": "MaungSan/python-tutorials",
"src_encoding": "UTF-8",
"text": "# x = ['navin',65,2.5]\r\n#\r\n# for i in x:\r\n# print(i)\r\n\r\n\r\n# x = 'NAVIN'\r\n#\r\n# for i in x:\r\n# print(i)\r\n\r\n# for i in [2,6,'Paul']:\r\n# print(i)\r\n\r\n# for i in range(10):\r\n# print(i)\r\n\r\n# for i in range(11,21,5):\r\n# print(i)\r\n\r\n# for i in range(20,10,-1):\r\n# print(i)\r\n\r\nfor i in range(1, 21):\r\n if i % 5 != 0:\r\n print(i)\r\n"
},
{
"alpha_fraction": 0.4533333480358124,
"alphanum_fraction": 0.5333333611488342,
"avg_line_length": 13,
"blob_id": "acfe2a61e4523cd552bdf26065694b9890d4d963",
"content_id": "ae06a4cb020156367d5486ed9b90c4695e72da1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 75,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 5,
"path": "/31 Why Numpy Installing Numpy in Pycharm.py",
"repo_name": "MaungSan/python-tutorials",
"src_encoding": "UTF-8",
"text": "from numpy import *\r\n\r\narr = array([1, 2, 3, 2, 5, 4], int)\r\n\r\nprint(arr)\r\n"
},
{
"alpha_fraction": 0.4575163424015045,
"alphanum_fraction": 0.4771241843700409,
"avg_line_length": 8.5,
"blob_id": "5569dc8ecde15cd5d49bb776a82b46a895435672",
"content_id": "592df17966be3111bfed57dced675fff3b6fd942",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 153,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 14,
"path": "/45 Anonymous Function Lambda.py",
"repo_name": "MaungSan/python-tutorials",
"src_encoding": "UTF-8",
"text": " \r\n\r\n# def square(a):\r\n# return a * a\r\n#\r\n# result = square(5)\r\n#\r\n# print(result)\r\n\r\n\r\n\r\nf = lambda a,b : a+b\r\n\r\nresult = f(5,6)\r\n\r\nprint(result)\r\n"
},
{
"alpha_fraction": 0.31506848335266113,
"alphanum_fraction": 0.3561643958091736,
"avg_line_length": 10.586206436157227,
"blob_id": "5a846badcfe6d76b973c0e94c85bd7af05f269b7",
"content_id": "d403147b34a0db6cfce2a8d6623eba87d11445d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 365,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 29,
"path": "/22 While Loop in Python.py",
"repo_name": "MaungSan/python-tutorials",
"src_encoding": "UTF-8",
"text": "# i = 1\r\n#\r\n# while i<=5:\r\n# print(\"Telusko\")\r\n# i=i+1\r\n\r\n# i = 5\r\n#\r\n# while i>=1:\r\n# print(\"Telusko\")\r\n# i=i-1\r\n\r\n# i = 5\r\n#\r\n# while i>=1:\r\n# print(\"Telusko\" , i)\r\n# i=i-1\r\n\r\ni = 1\r\n\r\nwhile i <= 5:\r\n print(\"Telusko \", end=\"\")\r\n j = 1\r\n while j <= 4:\r\n print(\"Rocks \", end=\"\")\r\n j = j+1\r\n\r\n i = i+1\r\n print()\r\n"
},
{
"alpha_fraction": 0.375,
"alphanum_fraction": 0.4027777910232544,
"avg_line_length": 7.333333492279053,
"blob_id": "ce4386425c278ce646d6fd8acdca2010a82d552d",
"content_id": "e12820d36f86cda39d11de7a93e57e1004da35b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 144,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 15,
"path": "/42 Factorial.py",
"repo_name": "MaungSan/python-tutorials",
"src_encoding": "UTF-8",
"text": " \r\n\r\ndef fact(n):\r\n\r\n f = 1\r\n\r\n for i in range(1,n+1):\r\n f = f * i\r\n return f\r\n\r\n\r\nx = 4\r\n\r\nresult = fact(x)\r\n\r\n\r\nprint(result)"
}
] | 8 |