code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
# Generated by Django 2.2.9 on 2020-04-12 18:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contest', '0020_auto_20200411_2339'),
]
operations = [
migrations.AddField(
model_name='contesttest',
name='judge_answer',
field=models.CharField(blank=True, max_length=1000000),
),
]
| [
"django.db.models.CharField"
] | [((345, 393), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(1000000)'}), '(blank=True, max_length=1000000)\n', (361, 393), False, 'from django.db import migrations, models\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from RequirementsDocumentationPanel import RequirementsDocumentationPanel
from PersonasDocumentationPanel import PersonasDocumentationPanel
from cairis.core.armid import *
__author__ = '<NAME>'
class GenerateDocumentationPanel(wx.Panel):
def __init__(self,parent):
wx.Panel.__init__(self,parent,GENDOCPANEL_ID)
mainSizer = wx.BoxSizer(wx.VERTICAL)
docTypeBox = wx.StaticBox(self,-1,'Type')
docTypeSizer = wx.StaticBoxSizer(docTypeBox,wx.HORIZONTAL)
self.docTypeCtrl = wx.ComboBox(self,GENDOCPANEL_COMBODOCTYPE_ID,choices=['Requirements','Personas'], size=wx.DefaultSize, style=wx.CB_READONLY)
self.docTypeCtrl.SetSelection(0)
docTypeSizer.Add(self.docTypeCtrl,1,wx.EXPAND)
mainSizer.Add(docTypeSizer,0,wx.EXPAND)
checkBox = wx.StaticBox(self,-1,'Sections')
self.checkSizer = wx.StaticBoxSizer(checkBox,wx.VERTICAL)
mainSizer.Add(self.checkSizer,0,wx.EXPAND)
self.reqPanel = RequirementsDocumentationPanel(self)
self.perPanel = PersonasDocumentationPanel(self)
self.checkSizer.Add(self.reqPanel,1,wx.EXPAND)
self.checkSizer.Add(self.perPanel,1,wx.EXPAND)
self.checkSizer.Show(0,True,True)
self.checkSizer.Hide(1,True)
otBox = wx.StaticBox(self,-1,'Output Type')
otSizer = wx.StaticBoxSizer(otBox,wx.VERTICAL)
mainSizer.Add(otSizer,0,wx.EXPAND)
self.htmlCheck = wx.CheckBox(self,DOCOPT_HTML_ID,'HTML')
self.htmlCheck.SetValue(True)
otSizer.Add(self.htmlCheck,0,wx.EXPAND)
self.rtfCheck = wx.CheckBox(self,DOCOPT_RTF_ID,'RTF')
self.rtfCheck.SetValue(False)
otSizer.Add(self.rtfCheck,0,wx.EXPAND)
self.pdfCheck = wx.CheckBox(self,DOCOPT_PDF_ID,'PDF')
self.pdfCheck.SetValue(False)
otSizer.Add(self.pdfCheck,0,wx.EXPAND)
mainSizer.Add(wx.StaticText(self,-1,''),1,wx.EXPAND)
buttonSizer = wx.BoxSizer(wx.HORIZONTAL)
mainSizer.Add(buttonSizer,0,wx.ALIGN_CENTER)
buttonSizer.Add(wx.Button(self,GENDOCPANEL_BUTTONGENERATE_ID,'Generate'))
buttonSizer.Add(wx.Button(self,wx.ID_CANCEL,'Cancel'))
self.SetSizer(mainSizer)
self.docTypeCtrl.Bind(wx.EVT_COMBOBOX,self.onDocTypeChange)
def onDocTypeChange(self,evt):
if (self.docTypeCtrl.GetStringSelection() == 'Requirements'):
self.checkSizer.Show(0,True,True)
self.checkSizer.Show(1,False,True)
else:
self.checkSizer.Show(0,False,True)
self.checkSizer.Show(1,True,True)
self.checkSizer.Layout()
def sectionFlags(self):
if self.docTypeCtrl.GetStringSelection() == 'Requirements':
return self.reqPanel.sectionFlags()
else:
return self.perPanel.sectionFlags()
def typeFlags(self):
flags = [
self.htmlCheck.GetValue(),
self.rtfCheck.GetValue(),
self.pdfCheck.GetValue()]
return flags
def documentType(self):
return self.docTypeCtrl.GetStringSelection()
| [
"wx.Button",
"wx.ComboBox",
"wx.BoxSizer",
"RequirementsDocumentationPanel.RequirementsDocumentationPanel",
"wx.StaticBoxSizer",
"wx.CheckBox",
"wx.StaticText",
"wx.StaticBox",
"PersonasDocumentationPanel.PersonasDocumentationPanel",
"wx.Panel.__init__"
] | [((1082, 1129), 'wx.Panel.__init__', 'wx.Panel.__init__', (['self', 'parent', 'GENDOCPANEL_ID'], {}), '(self, parent, GENDOCPANEL_ID)\n', (1099, 1129), False, 'import wx\n'), ((1144, 1168), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (1155, 1168), False, 'import wx\n'), ((1187, 1217), 'wx.StaticBox', 'wx.StaticBox', (['self', '(-1)', '"""Type"""'], {}), "(self, -1, 'Type')\n", (1199, 1217), False, 'import wx\n'), ((1235, 1279), 'wx.StaticBoxSizer', 'wx.StaticBoxSizer', (['docTypeBox', 'wx.HORIZONTAL'], {}), '(docTypeBox, wx.HORIZONTAL)\n', (1252, 1279), False, 'import wx\n'), ((1302, 1433), 'wx.ComboBox', 'wx.ComboBox', (['self', 'GENDOCPANEL_COMBODOCTYPE_ID'], {'choices': "['Requirements', 'Personas']", 'size': 'wx.DefaultSize', 'style': 'wx.CB_READONLY'}), "(self, GENDOCPANEL_COMBODOCTYPE_ID, choices=['Requirements',\n 'Personas'], size=wx.DefaultSize, style=wx.CB_READONLY)\n", (1313, 1433), False, 'import wx\n'), ((1575, 1609), 'wx.StaticBox', 'wx.StaticBox', (['self', '(-1)', '"""Sections"""'], {}), "(self, -1, 'Sections')\n", (1587, 1609), False, 'import wx\n'), ((1630, 1670), 'wx.StaticBoxSizer', 'wx.StaticBoxSizer', (['checkBox', 'wx.VERTICAL'], {}), '(checkBox, wx.VERTICAL)\n', (1647, 1670), False, 'import wx\n'), ((1738, 1774), 'RequirementsDocumentationPanel.RequirementsDocumentationPanel', 'RequirementsDocumentationPanel', (['self'], {}), '(self)\n', (1768, 1774), False, 'from RequirementsDocumentationPanel import RequirementsDocumentationPanel\n'), ((1795, 1827), 'PersonasDocumentationPanel.PersonasDocumentationPanel', 'PersonasDocumentationPanel', (['self'], {}), '(self)\n', (1821, 1827), False, 'from PersonasDocumentationPanel import PersonasDocumentationPanel\n'), ((2014, 2051), 'wx.StaticBox', 'wx.StaticBox', (['self', '(-1)', '"""Output Type"""'], {}), "(self, -1, 'Output Type')\n", (2026, 2051), False, 'import wx\n'), ((2064, 2101), 'wx.StaticBoxSizer', 'wx.StaticBoxSizer', (['otBox', 'wx.VERTICAL'], {}), '(otBox, wx.VERTICAL)\n', (2081, 2101), False, 'import wx\n'), ((2162, 2203), 'wx.CheckBox', 'wx.CheckBox', (['self', 'DOCOPT_HTML_ID', '"""HTML"""'], {}), "(self, DOCOPT_HTML_ID, 'HTML')\n", (2173, 2203), False, 'import wx\n'), ((2301, 2340), 'wx.CheckBox', 'wx.CheckBox', (['self', 'DOCOPT_RTF_ID', '"""RTF"""'], {}), "(self, DOCOPT_RTF_ID, 'RTF')\n", (2312, 2340), False, 'import wx\n'), ((2437, 2476), 'wx.CheckBox', 'wx.CheckBox', (['self', 'DOCOPT_PDF_ID', '"""PDF"""'], {}), "(self, DOCOPT_PDF_ID, 'PDF')\n", (2448, 2476), False, 'import wx\n'), ((2629, 2655), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (2640, 2655), False, 'import wx\n'), ((2571, 2598), 'wx.StaticText', 'wx.StaticText', (['self', '(-1)', '""""""'], {}), "(self, -1, '')\n", (2584, 2598), False, 'import wx\n'), ((2726, 2784), 'wx.Button', 'wx.Button', (['self', 'GENDOCPANEL_BUTTONGENERATE_ID', '"""Generate"""'], {}), "(self, GENDOCPANEL_BUTTONGENERATE_ID, 'Generate')\n", (2735, 2784), False, 'import wx\n'), ((2804, 2843), 'wx.Button', 'wx.Button', (['self', 'wx.ID_CANCEL', '"""Cancel"""'], {}), "(self, wx.ID_CANCEL, 'Cancel')\n", (2813, 2843), False, 'import wx\n')] |
from django.db import models
import os, datetime
import shutil
from research.models import CurrentResearch
from directory.models import Faculty as Fac
from django.conf import settings
def get_image_path_phd(instance, filename):
return os.path.join("PeopleApp", "static", "UserImages", type(instance).__name__, filename)
def get_image_path(instance, filename):
return os.path.join("PeopleApp", "static", "UserImages", type(instance).__name__, str(instance.pk), filename)
class Designations(models.Model):
designation = models.CharField(max_length=100, blank=False)
def __str__(self):
return self.designation
class Meta:
verbose_name_plural = "Designations"
class Faculty(models.Model):
name = models.CharField(max_length=100, blank=False)
designation = models.ForeignKey('Designations')
additional_info = models.CharField(max_length=200, blank=True, null=True)
directory = models.ForeignKey(Fac, on_delete=models.SET_NULL, blank=True, null=True)
email = models.CharField(primary_key=True, max_length=50)
profile_link = models.CharField(max_length=200, default="#")
profile_picture = models.ImageField(upload_to=get_image_path, blank=True, null=True)
research_areas = models.TextField(blank=True, null=True)
list_position = models.IntegerField(default=1)
phd = models.CharField(max_length=100, blank=True, null=True)
fax = models.CharField(max_length=20, blank=True, null=True)
google_scholar = models.CharField(max_length=200, blank=True, null=True)
webpage = models.CharField(max_length=200, blank=True, null=True)
# phd_students = models.TextField(blank=True, null=True)
research_group = models.TextField(blank=True, null=True)
former_research_group = models.TextField(blank=True, null=True)
professional_experience = models.TextField(blank=True, null=True)
awards_honors = models.TextField(blank=True, null=True)
member_of_professional_bodies = models.TextField(blank=True, null=True)
publications = models.TextField(blank=True, null=True)
invited_talks = models.TextField(blank=True, null=True)
conference_presentations = models.TextField(blank=True, null=True)
conference_attended = models.TextField(blank=True, null=True)
sponsored_projects = models.TextField(blank=True, null=True)
teaching = models.TextField(blank=True, null=True)
patents = models.TextField(blank=True, null=True)
current_research = models.ManyToManyField(CurrentResearch, blank=True)
personal_cv_link = models.TextField(blank=True, null=True, max_length=500)
def __str__(self):
return self.name
def delete(self, *args, **kwargs):
# object is being removed from db, remove the file from storage first
path = os.path.join("PeopleApp", "static", "UserImages", type(self).__name__, str(self.pk))
if os.path.exists(path):
shutil.rmtree(path)
return super(Faculty, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
# object is possibly being updated, if so, clean up.
self.remove_on_image_update()
return super(Faculty, self).save(*args, **kwargs)
def remove_on_image_update(self):
try:
# is the object in the database yet?
obj = Faculty.objects.get(pk=self.pk)
except Faculty.DoesNotExist:
# object is not in db, nothing to worry about
return
# is the save due to an update of the actual image file?
if obj.profile_picture and self.profile_picture and obj.profile_picture != self.profile_picture:
# delete the old image file from the storage in favor of the new file
obj.profile_picture.delete()
def get_image_path(self):
return str(self.profile_picture.url)[16:]
class Meta:
ordering = ('name',)
verbose_name_plural = "Faculty"
class Staff(models.Model):
name = models.CharField(max_length=100, blank=False)
designation = models.ForeignKey('Designations')
email = models.CharField(primary_key=True, max_length=50)
phone = models.CharField(max_length=12, blank=True, null=True)
profile_picture = models.ImageField(upload_to=get_image_path, blank=True, null=True)
def __str__(self):
return self.name
def delete(self, *args, **kwargs):
# object is being removed from db, remove the file from storage first
path = os.path.join("PeopleApp", "static", "UserImages", type(self).__name__, str(self.pk))
if os.path.exists(path):
shutil.rmtree(path)
return super(Staff, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
# object is possibly being updated, if so, clean up.
self.remove_on_image_update()
return super(Staff, self).save(*args, **kwargs)
def remove_on_image_update(self):
try:
# is the object in the database yet?
obj = Staff.objects.get(pk=self.pk)
except Staff.DoesNotExist:
# object is not in db, nothing to worry about
return
# is the save due to an update of the actual image file?
if obj.profile_picture and self.profile_picture and obj.profile_picture != self.profile_picture:
# delete the old image file from the storage in favor of the new file
obj.profile_picture.delete()
def get_image_path(self):
return str(self.profile_picture.url)[16:]
class Meta:
verbose_name_plural = "Staff"
class Batch(models.Model):
batch = models.CharField(max_length=20, blank=False)
def __str__(self):
return self.batch
class Meta:
verbose_name_plural = "Batch"
class UndergraduateStudents(models.Model):
rollno = models.CharField(max_length=12, primary_key=True)
name = models.CharField(max_length=100, blank=False)
batch = models.ForeignKey('Batch')
def __str__(self):
return self.rollno
class Meta:
verbose_name_plural = "UndergraduateStudents"
class MscStudents(models.Model):
rollno = models.CharField(max_length=12, primary_key=True)
name = models.CharField(max_length=100, blank=False)
batch = models.ForeignKey('Batch')
email = models.CharField(unique=True, max_length=50, blank=False)
profile_picture = models.ImageField(upload_to=get_image_path, blank=True, null=True)
def __str__(self):
return self.rollno
def delete(self, *args, **kwargs):
# object is being removed from db, remove the file from storage first
path = os.path.join("PeopleApp", "static", "UserImages", type(self).__name__, str(self.pk))
if os.path.exists(path):
shutil.rmtree(path)
return super(MscStudents, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
# object is possibly being updated, if so, clean up.
self.remove_on_image_update()
return super(MscStudents, self).save(*args, **kwargs)
def remove_on_image_update(self):
try:
# is the object in the database yet?
obj = MscStudents.objects.get(pk=self.pk)
except MscStudents.DoesNotExist:
# object is not in db, nothing to worry about
return
# is the save due to an update of the actual image file?
if obj.profile_picture and self.profile_picture and obj.profile_picture != self.profile_picture:
# delete the old image file from the storage in favor of the new file
obj.profile_picture.delete()
def get_image_path(self):
return str(self.profile_picture.url)[16:]
class Meta:
verbose_name_plural = "MscStudents"
class PhdStudents(models.Model):
broadcast_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100, blank=False)
batch = models.ForeignKey('Batch')
email = models.CharField(max_length=50, blank=True, null=True)
profile_picture = models.ImageField(upload_to=get_image_path_phd, blank=True, null=True)
# At first thought of making it as ForeignKey of Faculty model but what if a faculty leaves
# and this particular student has already passed out ?
# We would want to delete the faculty from our Faculty database but at the same time would want
# to show the faculty as supervisor of this student.
# It is only possible if this is an independent field.
# On second thoughts making it a foreign key so that profile of faculty
# can be visited from there itself. If faculty is no longer in institute,
# it will simply be set to null, which can be updated to point to another faculty if needed.
supervisor = models.ForeignKey('Faculty', on_delete=models.SET_NULL, blank=True, null=True)
research = models.TextField(max_length=500, blank=True, null=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
# object is possibly being updated, if so, clean up.
self.remove_on_image_update()
return super(PhdStudents, self).save(*args, **kwargs)
def remove_on_image_update(self):
try:
# is the object in the database yet?
obj = PhdStudents.objects.get(pk=self.pk)
except PhdStudents.DoesNotExist:
# object is not in db, nothing to worry about
return
# is the save due to an update of the actual image file?
if obj.profile_picture and self.profile_picture and obj.profile_picture != self.profile_picture:
# delete the old image file from the storage in favor of the new file
obj.profile_picture.delete()
def get_image_path(self):
return str(self.profile_picture.url)[16:]
class Meta:
verbose_name_plural = "PhdStudents"
class PhdAlumni(models.Model):
name = models.CharField(max_length=100, blank=False)
emails = models.CharField(max_length=300, blank=True, null=True)
profile_picture = models.ImageField(upload_to=get_image_path_phd, blank=True, null=True)
thesis_title = models.TextField(max_length=400, blank=True)
thesis_link = models.CharField(max_length=200, blank=True, default="#")
date_defended = models.DateField(blank=True, null=True)
supervisor = models.ForeignKey('Faculty', on_delete=models.SET_NULL, blank=True, null=True)
# phd_supervisor = models.CharField(max_length=100, blank=True)
# phd_supervisor_link = models.CharField(max_length=200, blank=True, default="#")
current_position = models.TextField(max_length=400, blank=True)
current_supervisor = models.CharField(max_length=100, blank=True)
current_supervisor_link = models.CharField(max_length=200, blank=True, default="#")
extra_info = models.TextField(max_length=500, blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
# object is possibly being updated, if so, clean up.
self.remove_on_image_update()
return super(PhdAlumni, self).save(*args, **kwargs)
def remove_on_image_update(self):
try:
# is the object in the database yet?
obj = PhdAlumni.objects.get(pk=self.pk)
except PhdAlumni.DoesNotExist:
# object is not in db, nothing to worry about
return
# is the save due to an update of the actual image file?
if obj.profile_picture and self.profile_picture and obj.profile_picture != self.profile_picture:
# delete the old image file from the storage in favor of the new file
obj.profile_picture.delete()
def get_image_path(self):
return str(self.profile_picture.url)[16:]
class Meta:
verbose_name_plural = "PhdAlumni"
YEAR_CHOICES = []
for r in range(1980, 2031):
YEAR_CHOICES.append((r, r))
class Publication(models.Model):
year = models.IntegerField(choices=YEAR_CHOICES, default=datetime.datetime.now().year)
faculty = models.ManyToManyField(Faculty, blank=False)
matter = models.TextField(max_length=5000)
def __str__(self):
return "Entry " + str(self.id)
class Meta:
verbose_name_plural = "Publications"
| [
"os.path.exists",
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"datetime.datetime.now",
"django.db.models.AutoField",
"shutil.rmtree",
"django.db.models.ImageField",
"django.db.models.CharField"
] | [((536, 581), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(False)'}), '(max_length=100, blank=False)\n', (552, 581), False, 'from django.db import models\n'), ((742, 787), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(False)'}), '(max_length=100, blank=False)\n', (758, 787), False, 'from django.db import models\n'), ((806, 839), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Designations"""'], {}), "('Designations')\n", (823, 839), False, 'from django.db import models\n'), ((862, 917), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(True)', 'null': '(True)'}), '(max_length=200, blank=True, null=True)\n', (878, 917), False, 'from django.db import models\n'), ((934, 1006), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Fac'], {'on_delete': 'models.SET_NULL', 'blank': '(True)', 'null': '(True)'}), '(Fac, on_delete=models.SET_NULL, blank=True, null=True)\n', (951, 1006), False, 'from django.db import models\n'), ((1019, 1068), 'django.db.models.CharField', 'models.CharField', ([], {'primary_key': '(True)', 'max_length': '(50)'}), '(primary_key=True, max_length=50)\n', (1035, 1068), False, 'from django.db import models\n'), ((1088, 1133), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'default': '"""#"""'}), "(max_length=200, default='#')\n", (1104, 1133), False, 'from django.db import models\n'), ((1156, 1222), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'get_image_path', 'blank': '(True)', 'null': '(True)'}), '(upload_to=get_image_path, blank=True, null=True)\n', (1173, 1222), False, 'from django.db import models\n'), ((1244, 1283), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1260, 1283), False, 'from django.db import models\n'), ((1304, 1334), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)'}), '(default=1)\n', (1323, 1334), False, 'from django.db import models\n'), ((1345, 1400), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (1361, 1400), False, 'from django.db import models\n'), ((1411, 1465), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)', 'null': '(True)'}), '(max_length=20, blank=True, null=True)\n', (1427, 1465), False, 'from django.db import models\n'), ((1487, 1542), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(True)', 'null': '(True)'}), '(max_length=200, blank=True, null=True)\n', (1503, 1542), False, 'from django.db import models\n'), ((1557, 1612), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(True)', 'null': '(True)'}), '(max_length=200, blank=True, null=True)\n', (1573, 1612), False, 'from django.db import models\n'), ((1695, 1734), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1711, 1734), False, 'from django.db import models\n'), ((1763, 1802), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1779, 1802), False, 'from django.db import models\n'), ((1833, 1872), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1849, 1872), False, 'from django.db import models\n'), ((1893, 1932), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1909, 1932), False, 'from django.db import models\n'), ((1969, 2008), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1985, 2008), False, 'from django.db import models\n'), ((2028, 2067), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2044, 2067), False, 'from django.db import models\n'), ((2088, 2127), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2104, 2127), False, 'from django.db import models\n'), ((2159, 2198), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2175, 2198), False, 'from django.db import models\n'), ((2225, 2264), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2241, 2264), False, 'from django.db import models\n'), ((2290, 2329), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2306, 2329), False, 'from django.db import models\n'), ((2345, 2384), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2361, 2384), False, 'from django.db import models\n'), ((2399, 2438), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2415, 2438), False, 'from django.db import models\n'), ((2462, 2513), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['CurrentResearch'], {'blank': '(True)'}), '(CurrentResearch, blank=True)\n', (2484, 2513), False, 'from django.db import models\n'), ((2537, 2592), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'max_length': '(500)'}), '(blank=True, null=True, max_length=500)\n', (2553, 2592), False, 'from django.db import models\n'), ((3945, 3990), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(False)'}), '(max_length=100, blank=False)\n', (3961, 3990), False, 'from django.db import models\n'), ((4009, 4042), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Designations"""'], {}), "('Designations')\n", (4026, 4042), False, 'from django.db import models\n'), ((4055, 4104), 'django.db.models.CharField', 'models.CharField', ([], {'primary_key': '(True)', 'max_length': '(50)'}), '(primary_key=True, max_length=50)\n', (4071, 4104), False, 'from django.db import models\n'), ((4117, 4171), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(12)', 'blank': '(True)', 'null': '(True)'}), '(max_length=12, blank=True, null=True)\n', (4133, 4171), False, 'from django.db import models\n'), ((4194, 4260), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'get_image_path', 'blank': '(True)', 'null': '(True)'}), '(upload_to=get_image_path, blank=True, null=True)\n', (4211, 4260), False, 'from django.db import models\n'), ((5575, 5619), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(False)'}), '(max_length=20, blank=False)\n', (5591, 5619), False, 'from django.db import models\n'), ((5783, 5832), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(12)', 'primary_key': '(True)'}), '(max_length=12, primary_key=True)\n', (5799, 5832), False, 'from django.db import models\n'), ((5844, 5889), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(False)'}), '(max_length=100, blank=False)\n', (5860, 5889), False, 'from django.db import models\n'), ((5902, 5928), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Batch"""'], {}), "('Batch')\n", (5919, 5928), False, 'from django.db import models\n'), ((6099, 6148), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(12)', 'primary_key': '(True)'}), '(max_length=12, primary_key=True)\n', (6115, 6148), False, 'from django.db import models\n'), ((6160, 6205), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(False)'}), '(max_length=100, blank=False)\n', (6176, 6205), False, 'from django.db import models\n'), ((6218, 6244), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Batch"""'], {}), "('Batch')\n", (6235, 6244), False, 'from django.db import models\n'), ((6257, 6314), 'django.db.models.CharField', 'models.CharField', ([], {'unique': '(True)', 'max_length': '(50)', 'blank': '(False)'}), '(unique=True, max_length=50, blank=False)\n', (6273, 6314), False, 'from django.db import models\n'), ((6337, 6403), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'get_image_path', 'blank': '(True)', 'null': '(True)'}), '(upload_to=get_image_path, blank=True, null=True)\n', (6354, 6403), False, 'from django.db import models\n'), ((7763, 7797), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (7779, 7797), False, 'from django.db import models\n'), ((7809, 7854), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(False)'}), '(max_length=100, blank=False)\n', (7825, 7854), False, 'from django.db import models\n'), ((7867, 7893), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Batch"""'], {}), "('Batch')\n", (7884, 7893), False, 'from django.db import models\n'), ((7906, 7960), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'blank': '(True)', 'null': '(True)'}), '(max_length=50, blank=True, null=True)\n', (7922, 7960), False, 'from django.db import models\n'), ((7983, 8053), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'get_image_path_phd', 'blank': '(True)', 'null': '(True)'}), '(upload_to=get_image_path_phd, blank=True, null=True)\n', (8000, 8053), False, 'from django.db import models\n'), ((8695, 8773), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Faculty"""'], {'on_delete': 'models.SET_NULL', 'blank': '(True)', 'null': '(True)'}), "('Faculty', on_delete=models.SET_NULL, blank=True, null=True)\n", (8712, 8773), False, 'from django.db import models\n'), ((8789, 8844), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(500)', 'blank': '(True)', 'null': '(True)'}), '(max_length=500, blank=True, null=True)\n', (8805, 8844), False, 'from django.db import models\n'), ((9845, 9890), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(False)'}), '(max_length=100, blank=False)\n', (9861, 9890), False, 'from django.db import models\n'), ((9904, 9959), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)', 'blank': '(True)', 'null': '(True)'}), '(max_length=300, blank=True, null=True)\n', (9920, 9959), False, 'from django.db import models\n'), ((9982, 10052), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'get_image_path_phd', 'blank': '(True)', 'null': '(True)'}), '(upload_to=get_image_path_phd, blank=True, null=True)\n', (9999, 10052), False, 'from django.db import models\n'), ((10072, 10116), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(400)', 'blank': '(True)'}), '(max_length=400, blank=True)\n', (10088, 10116), False, 'from django.db import models\n'), ((10135, 10192), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(True)', 'default': '"""#"""'}), "(max_length=200, blank=True, default='#')\n", (10151, 10192), False, 'from django.db import models\n'), ((10213, 10252), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (10229, 10252), False, 'from django.db import models\n'), ((10270, 10348), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Faculty"""'], {'on_delete': 'models.SET_NULL', 'blank': '(True)', 'null': '(True)'}), "('Faculty', on_delete=models.SET_NULL, blank=True, null=True)\n", (10287, 10348), False, 'from django.db import models\n'), ((10526, 10570), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(400)', 'blank': '(True)'}), '(max_length=400, blank=True)\n', (10542, 10570), False, 'from django.db import models\n'), ((10596, 10640), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)'}), '(max_length=100, blank=True)\n', (10612, 10640), False, 'from django.db import models\n'), ((10671, 10728), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(True)', 'default': '"""#"""'}), "(max_length=200, blank=True, default='#')\n", (10687, 10728), False, 'from django.db import models\n'), ((10746, 10790), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(500)', 'blank': '(True)'}), '(max_length=500, blank=True)\n', (10762, 10790), False, 'from django.db import models\n'), ((11959, 12003), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Faculty'], {'blank': '(False)'}), '(Faculty, blank=False)\n', (11981, 12003), False, 'from django.db import models\n'), ((12017, 12050), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(5000)'}), '(max_length=5000)\n', (12033, 12050), False, 'from django.db import models\n'), ((2871, 2891), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2885, 2891), False, 'import os, datetime\n'), ((4539, 4559), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4553, 4559), False, 'import os, datetime\n'), ((6684, 6704), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6698, 6704), False, 'import os, datetime\n'), ((2905, 2924), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (2918, 2924), False, 'import shutil\n'), ((4573, 4592), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (4586, 4592), False, 'import shutil\n'), ((6718, 6737), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (6731, 6737), False, 'import shutil\n'), ((11915, 11938), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11936, 11938), False, 'import os, datetime\n')] |
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/get_pet_labels.py
# PROGRAMMER: <NAME>
# DATE CREATED: March 18, 2022
# REVISED DATE: March 30, 2022
# PURPOSE: Create the function get_pet_labels that creates the pet labels from
# the image's filename. This function inputs:
# - The Image Folder as image_dir within get_pet_labels function and
# as in_arg.dir for the function call within the main function.
# This function creates and returns the results dictionary as results_dic
# within get_pet_labels function and as results within main.
# The results_dic dictionary has a 'key' that's the image filename and
# a 'value' that's a list. This list will contain the following item
# at index 0 : pet image label (string).
from os import listdir
def get_pet_labels(image_dir):
"""
Creates a dictionary of pet labels (results_dic) based upon the filenames
of the image files. These pet image labels are used to check the accuracy
of the labels that are returned by the classifier function, since the
filenames of the images contain the true identity of the pet in the image.
Be sure to format the pet labels so that they are in all lower case letters
and with leading and trailing whitespace characters stripped from them.
(ex. filename = 'Boston_terrier_02259.jpg' Pet label = 'boston terrier')
Parameters:
image_dir - The (full) path to the folder of images that are to be
classified by the classifier function (string)
Returns:
results_dic - Dictionary with 'key' as image filename and 'value' as a
List. The list contains for following item:
index 0 = pet image label (string)
"""
result_dic = dict()
filename_list = listdir(image_dir)
for filename in filename_list:
if not filename.startswith('.'):
if filename not in result_dic:
pet_label = [word for word in filename.split('_') if word.isalpha()]
pet_label = [" ".join(pet_label).lower()]
result_dic[filename] = pet_label
else:
print("Warning: Duplicate files exist in directory: {}".format(filename))
return result_dic
| [
"os.listdir"
] | [((1934, 1952), 'os.listdir', 'listdir', (['image_dir'], {}), '(image_dir)\n', (1941, 1952), False, 'from os import listdir\n')] |
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
# Creating four arrays with 20 elements each
linear = np.arange(1, 20) # 1 to 20
square = linear ** 2 # 1 to 400
log = np.log(linear) # 1 to 3
random = np.random.randint(0, 100, 20) # 1 to 100
# Plotting all four arrays with plot
plt.plot(linear)
plt.plot(square)
plt.plot(log)
plt.plot(random)
# Cleaning the figure with clf
# plt.clf()
# The plot function also allows for two parameters to be passed X, y
# plt.plot(log, square)
# Setting title and labels
plt.title('Linear Plot')
plt.xlabel('Index')
plt.ylabel('Linear Y')
# Show displays figures in a separate window
plt.show()
# Closing the used resources
plt.close()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.log",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.random.randint",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((130, 146), 'numpy.arange', 'np.arange', (['(1)', '(20)'], {}), '(1, 20)\n', (139, 146), True, 'import numpy as np\n'), ((200, 214), 'numpy.log', 'np.log', (['linear'], {}), '(linear)\n', (206, 214), True, 'import numpy as np\n'), ((238, 267), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', '(20)'], {}), '(0, 100, 20)\n', (255, 267), True, 'import numpy as np\n'), ((317, 333), 'matplotlib.pyplot.plot', 'plt.plot', (['linear'], {}), '(linear)\n', (325, 333), True, 'import matplotlib.pyplot as plt\n'), ((334, 350), 'matplotlib.pyplot.plot', 'plt.plot', (['square'], {}), '(square)\n', (342, 350), True, 'import matplotlib.pyplot as plt\n'), ((351, 364), 'matplotlib.pyplot.plot', 'plt.plot', (['log'], {}), '(log)\n', (359, 364), True, 'import matplotlib.pyplot as plt\n'), ((365, 381), 'matplotlib.pyplot.plot', 'plt.plot', (['random'], {}), '(random)\n', (373, 381), True, 'import matplotlib.pyplot as plt\n'), ((548, 572), 'matplotlib.pyplot.title', 'plt.title', (['"""Linear Plot"""'], {}), "('Linear Plot')\n", (557, 572), True, 'import matplotlib.pyplot as plt\n'), ((573, 592), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Index"""'], {}), "('Index')\n", (583, 592), True, 'import matplotlib.pyplot as plt\n'), ((593, 615), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Linear Y"""'], {}), "('Linear Y')\n", (603, 615), True, 'import matplotlib.pyplot as plt\n'), ((662, 672), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (670, 672), True, 'import matplotlib.pyplot as plt\n'), ((703, 714), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (712, 714), True, 'import matplotlib.pyplot as plt\n')] |
"""
Simple python (2 and 3) interface to the Eye Tribe eye tracker (http://theeyetribe.com)
See README.md for instructions
Created by <NAME> / <EMAIL> / <EMAIL>, March 2014
Licensed under the MIT License:
Copyright (c) 2014, <NAME>, Technical University of Denmark, DTU Informatics, Cognitive Systems Section
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without
limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = "<NAME>"
__copyright__ = \
"Copyright (c) 2014, <NAME>, Technical University of Denmark, DTU Informatics, Cognitive Systems Section"
__license__ = "MIT"
__version__ = "0.4"
__email__ = "<EMAIL>"
__status__ = "Alpha"
import sys
if sys.version_info[0] == 2:
import Queue as q
else:
import queue as q
import time
from datetime import datetime
import threading
import socket
import json
import pygame
from random import *
class EyeTribe():
"""
Main class to handle the Eye Tracker interface and values.
Includes subclasses Frame (that holds an entire tracker frame with
both-eye positions) and Frame.Eye and Coord subclasses holding (single eye)
data and all (x,y) coordinates of eye and bounding boxes
"""
etm_get_init = '{ "category": "tracker", "request" : "get", "values": [ "iscalibrated", "heartbeatinterval" ] }'
etm_calib = '{ "category": "calibration", "request" : "start", "values": { "pointcount": %d } }'
etm_calib_abort = '{ "category": "calibration", "request" : "abort" }'
etm_calib_clear = '{ "category": "calibration", "request" : "clear" }'
etm_cpstart = '{ "category": "calibration", "request" : "pointstart", "values": { "x": %d, "y": %d } }'
etm_cpend = '{ "category": "calibration", "request" : "pointend" }'
etm_get_screenres = '{ "category": "tracker", "request" : "get", "values": [ "screenresw", "screenresh" ] }'
etm_set_push = '{ "category": "tracker", "request" : "set", "values": { "push": true } }'
etm_set_pull = '{ "category": "tracker", "request" : "set", "values": { "push": false } }'
etm_get_frame = '{ "category": "tracker", "request" : "get", "values": [ "frame" ] }'
etm_heartbeat = '{ "category": "heartbeat" }'
etm_tracker_state = '{ "category": "tracker", "request" : "get", "values": [ "trackerstate" ] }'
etm_buffer_size = 4096
def getTrackerState(self):
#'__author__' = '<NAME>'
result = self._tell_tracker(EyeTribe.etm_tracker_state)
return result['values']['trackerstate']
class Coord():
"""Single (x,y) positions relative to screen or bounding box. Used in Frame and Calibration."""
def __init__(self, x=0, y=0, ssep=';', fmt="%d"):
self._x = x
self._y = y
self._ssep = ssep
self._fmt = fmt
@property
def x(self):
"""The horizontal cartesian offset (abcissa)."""
return self._x
@x.setter
def x(self, val):
self._x = val
@property
def y(self):
"""The vertical cartesian offset (ordinate)."""
return self._y
@y.setter
def y(self, val):
self._y = val
def __str__(self):
return (self._fmt + "%s" + self._fmt) % (self._x, self._ssep, self._y)
class Frame():
"""
Holds a complete decoded frame from the eye tracker.
Access members via accessor functions or convert to string via str(...)
"""
class Eye:
"""Single-eye data, including gaze coordinates and pupil size"""
def __init__(self, raw, avg, psize, pcenter, ssep=';'):
self._raw = raw
self._avg = avg
self._psize = psize
self._pcenter = pcenter
self._ssep = ssep
@property
def raw(self):
"""The raw (unfiltered) cartesian eye coordinate vs screen coordinates."""
return self._raw
@raw.setter
def raw(self, val):
self._raw = val
@property
def avg(self):
"""The averaged (filtered) cartesian eye coordinate vs screen coordinates."""
return self._avg
@avg.setter
def avg(self, val):
self._avg = val
@property
def psize(self):
"""A relative estimate of the pupil size."""
return self._psize
@psize.setter
def psize(self, val):
self._psize = val
@property
def pcenter(self):
"""The center coordinate of the eye within the bounding box."""
return self._pcenter
@pcenter.setter
def pcenter(self, val):
self._pcenter = val
def __str__(self):
return "%s%s%s%s%.1f%s%s" % \
(str(self._raw), self._ssep, str(self._avg), self._ssep, self._psize, self._ssep, str(self._pcenter))
def __init__(self, json, ssep=';'):
"""
Creates a frame based on an unpacked version of the eye tracker json string.
The ssep is used for separating values when the frame is converted to
a string, as in a print statement. This is useful for dumping csv files.
"""
self._json = json
self._etime = time.time()
self._time = json['time'] / 1000.0
ts = datetime.strptime(json['timestamp'], "%Y-%m-%d %H:%M:%S.%f")
self._timestamp = int(time.mktime(ts.timetuple())) + int(ts.strftime("%f"))/1000000.0
self._fix = json['fix']
self._state = json['state']
self._raw = EyeTribe.Coord(json['raw']['x'], json['raw']['y'])
self._avg = EyeTribe.Coord(json['avg']['x'], json['avg']['y'])
eye = json['lefteye']
self._lefteye = EyeTribe.Frame.Eye(
EyeTribe.Coord(eye['raw']['x'], eye['raw']['y']),
EyeTribe.Coord(eye['avg']['x'], eye['avg']['y']),
eye['psize'],
EyeTribe.Coord(eye['pcenter']['x'], eye['pcenter']['y'], fmt="%.3f")
)
eye = json['righteye']
self._righteye = EyeTribe.Frame.Eye(
EyeTribe.Coord(eye['raw']['x'], eye['raw']['y']),
EyeTribe.Coord(eye['avg']['x'], eye['avg']['y']),
eye['psize'],
EyeTribe.Coord(eye['pcenter']['x'], eye['pcenter']['y'], fmt="%.3f")
)
self._ssep = ssep
@property
def json(self):
"""The 'original' json dict from the eye tracker -- for the curious or for debugging"""
return self._json
@property
def etime(self):
"""The wall-time epoch at the point when the frame is unpacked on the client."""
return self._etime
@etime.setter
def etime(self, val):
self._etime = val
@property
def time(self):
"""A monotoneous clock value from the tracker."""
return self._time
@time.setter
def time(self, val):
self._time = val
@property
def timestamp(self):
"""The wall-time epoch at the point the eye tracker server created the frame."""
return self._timestamp
@timestamp.setter
def timestamp(self, val):
self._timestamp = val
@property
def fix(self):
"""The fixation flag (True or False) from the eye tracker."""
return self._fix
@fix.setter
def fix(self, val):
self._fix = val
@property
def state(self):
"""The state from the eye tracker (a numeric value)."""
return self._state
@state.setter
def state(self, val):
self._state = val
@property
def avg(self):
"""An averaged fixation coordinate based on both eyes."""
return self._avg
@avg.setter
def avg(self, val):
self._avg = val
@property
def raw(self):
"""The raw (unfiltered) fixation coordinate based on both eyes."""
return self._raw
@raw.setter
def raw(self, val):
self._raw = val
@property
def lefteye(self):
"""Left eye coordinates, pupil position and size."""
return self._lefteye
@lefteye.setter
def lefteye(self, val):
self._lefteye = val
@property
def righteye(self):
"""Right eye coordinates, pupil position and size."""
return self._righteye
@righteye.setter
def righteye(self, val):
self._righteye = val
def eye(self, left=False):
if left:
return self._lefteye
else:
return self._righteye
def __str__(self):
# header = "eT;dT;aT;Fix;State;Rwx;Rwy;Avx;Avy;LRwx;LRwy;LAvx;LAvy;LPSz;LCx;LCy;RRwx;RRwy;RAvx;RAvy;RPSz;RCx;RCy"
st = 'L' if (self._state & 0x10) else '.'
st += 'F' if (self._state & 0x08) else '.'
st += 'P' if (self._state & 0x04) else '.'
st += 'E' if (self._state & 0x02) else '.'
st += 'G' if (self._state & 0x01) else '.'
f = 'F' if self._fix else 'N'
s = "%014.3f%s%07.3f%s%07.3f%s" % (self._etime, self._ssep, self._time, self._ssep, self._timestamp, self._ssep,)
s += "%s%s%s%s%s%s%s" % (f, self._ssep, st, self._ssep, str(self._raw), self._ssep, str(self._avg))
s += "%s%s" % (self._ssep, str(self._lefteye))
s += "%s%s" % (self._ssep, str(self._righteye))
return s
class Calibration():
def __init__(self):
self.result = False
self.deg = None
self.degl = None
self.degr = None
self.pointcount = 0
self.points = None
class CalibrationPoint():
def __init__(self):
self.state = -1
self.cp = EyeTribe.Coord()
self.mecp = EyeTribe.Coord()
self.ad = None
self.adl = None
self.adr = None
self.mep = None
self.mepl = None
self.mepr = None
self.asd = None
self.asdl = None
self.asdr = None
def __init__(self, host='localhost', port=6555, ssep=';', screenindex=0):
"""
Create an EyeTribe connection object that can be used to connect to an eye tracker.
Parameters host and port are the values to use when connecting to the tracker.
The ssep can be used to specify an alternative value for value separators when
printing out a value.
"""
self._host = host
self._port = port
self._sock = None
self._ispushmode = False
self._hbinterval = 0 # Note: this is (converted to a value in) seconds
self._hbeater = None
self._listener = None
self._frameq = q.Queue()
self._replyq = q.Queue()
self._reply_lock = threading.Semaphore() # Keeps track of whether someone needs a reply
self._pmcallback = None
self._ssep = ssep
self._screenindex = screenindex
self._calibres = EyeTribe.Calibration()
def _tell_tracker(self, message):
"""
Send the (canned) message to the tracker and return the reply properly parsed.
Raises an exception if we get an error message back from the tracker (anything status!=200)
"""
if not self._listener:
raise Exception("Internal error; listener is not running so we cannot get replies from the tracker!")
if not self._replyq.empty():
raise Exception("Tracker protocol error; we have a queue reply before asking for something: %s" % (self._replyq.get()))
# lock semaphore to ensure we're the only ones opening a request that expects a reply from the tracker
self._reply_lock.acquire()
self._sock.send(message.encode())
reply = self._replyq.get(True)
# release the lock again now that we have the expected reply
self._reply_lock.release()
sc = reply['statuscode']
# if sc != 200:
# raise Exception("Tracker protocol error (%d) on message '%s'" % (sc, message))
return reply
def connect(self):
"""
Connect an eyetribe object to the actual Eye Tracker by establishing a TCP/IP connection.
Also gets heartbeatinterval information, and sets up the heartbeater and listener threads
"""
def _hbeater_thread():
"""sends heartbeats at the required interval until the connection is closed, but does not read any replies"""
sys.stderr.write("_hbeater starting\n")
while self._sock:
self._sock.send(EyeTribe.etm_heartbeat.encode())
time.sleep(self._hbinterval)
sys.stderr.write("_hbeater ending\n")
return
def _listener_thread():
"""
Listens for replies from the tracker (including heartbeat replies) and dispatches or deletes those as needed
This is the only place where we listen for replies from the tracker
Currently assumes there are continous heartbeats, otherwise we will time out at some point...
"""
sys.stderr.write("_listener starting\n")
while self._sock:
# Keep going until we're asked to terminate (or we timeout with an error)
try:
r = self._sock.recv(EyeTribe.etm_buffer_size)
# Multiple replies handled assuming non-documented \n is sent from the tracker, but (TODO) not split frames,
for js in r.decode().split("\n"):
if js.strip() != "":
f = json.loads(js)
# handle heartbeat and calibration OK results, and store other stuff to proper queues
sc = f['statuscode']
if f['category'] == "heartbeat":
pass
elif f['category'] == 'calibration' and sc == 800:
pass
elif self._ispushmode and 'values' in f and 'frame' in f['values']:
if sc != 200:
raise Exception("Connection failed, protocol error (%d)", sc)
ef = EyeTribe.Frame(f['values']['frame'])
if self._pmcallback != None:
dont_queue = self._pmcallback(ef)
else:
dont_queue = False
if not dont_queue:
self._frameq.put(ef)
else:
# use semaphore to verify someone is waiting for a reply and give it to them (or fail!)
if self._reply_lock.acquire(False):
self._reply_lock.release()
raise Exception("Connection protocol error; got reply but no-one asked for it: %s" % js)
else:
self._replyq.put(f)
except (socket.timeout, OSError):
if self._sock:
raise Exception("The connection failed with a timeout or OSError; lost tracker connection?")
sys.stderr.write("_listener ending\n")
if self._sock is None:
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((self._host, self._port))
self._sock.settimeout(30)
try:
# setup listener to picks up replies etc; is needed very early on for comms to work
self._listener = threading.Thread(target=_listener_thread)
self._listener.daemon = True
self._listener.start()
p = self._tell_tracker(EyeTribe.etm_get_init)
self._hbinterval = int(p['values']['heartbeatinterval']) / 1000.0
if self._hbinterval != 0:
self._sock.settimeout(self._hbinterval*2)
# setup heart-beat generator
if self._hbinterval != 0:
self._hbeater = threading.Thread(target=_hbeater_thread)
self._hbeater.daemon = True
self._hbeater.start()
else:
self._hbeater = None
except ValueError:
raise
else:
raise Exception("cannot connect an already connected socket; close it first")
def bind(self, host='localhost', port=6555):
"""(Re)binds a non-connected Eye Tribe object to another host/port."""
if not self._sock is None:
self._host = host
self._port = port
else:
raise Exception("cannot (re)bind a connected socket; close it first")
def close(self, quick=False):
"""
Close TCP/IP connection, returning the object back to its starting condition.
If quick is True, do NOT wait for the listener and heartbeat threads to stop.
"""
if not self._sock.close is None:
_s = self._sock
self._sock = None
_s.close()
if not quick:
# sync for listener to stop
self._listener.join(min((self._hbinterval*3, 30)))
if self._listener.is_alive():
raise Exception("Listener thread did not terminate as expected; protocol error?")
# and for the heartbeater to stop as well
if self._hbinterval != 0:
self._hbeater.join(min((self._hbinterval*3, 10)))
if self._hbeater.is_alive():
raise Exception("HeartBeater thread did not terminate as expected; protocol error?")
self._listener = None
self._hbeater = None
else:
raise Exception("cannot close an already closed connection")
def pushmode(self, callback=None):
"""
Change to push mode, i.e. setup and start receiving tracking data
Requires a connected tracker that also has been calibrated
If callback is not given, frames are just stored to the queue and can be retrieved with
the next() operation; otherwise the callback is invoked with the new frame as parameter.
The callback can return True to indicate that no further processing should be done
on the frame; otherwise the frame will be queued as normal for later retrieval by next().
Note that the callback is called on the sockets listener thread!
"""
# if already in pushmode, do nothing...
if self._ispushmode:
return
if callback!=None:
self._pmcallback = callback
self._tell_tracker(EyeTribe.etm_set_push)
self._ispushmode = True
def pullmode(self):
"""
Change to pull mode, i.e. prompt by calling next() whenever you pull for a frame.
Requires a connected tracker that also has been calibrated
"""
if self._ispushmode:
self._tell_tracker(EyeTribe.etm_set_pull)
self._ispushmode = False
self._pmcallback = None
def next(self, block=True):
"""
Returns the next (queued or pulled) dataset from the eyetracker.
If block is False, and we're in pushmode and the queue is empty, None is returned immediatedly,
otherwise we will wait for the next frame to arrive and return that
"""
if self._ispushmode:
try:
return self._frameq.get(block)
except q.Empty:
return None
else:
p = self._tell_tracker(EyeTribe.etm_get_frame)
return EyeTribe.Frame(p['values']['frame'])
def get_screen_res(self):
p = self._tell_tracker(EyeTribe.etm_get_screenres)
maxx = p['values']['screenresw']
maxy = p['values']['screenresh']
return (maxx, maxy)
def calibration_start(self, pointcount=9):
"""
(Re)run the calibration procedure with pointcount points.
Call calibration_point_start and calibration_point_end for each point when it displays on the screen.
The result can be retrieved by latest_calibration_result after the calibration has completed.
"""
self._tell_tracker(EyeTribe.etm_calib % pointcount)
def calibration_point_start(self, x, y):
self._tell_tracker(EyeTribe.etm_cpstart % (x, y))
def calibration_point_end(self):
p = self._tell_tracker(EyeTribe.etm_cpend)
#if 'values' in p:
self._calibres.result = p['values']['calibresult']['result']
self._calibres.deg = p['values']['calibresult']['deg']
self._calibres.degl = p['values']['calibresult']['degl']
self._calibres.degr = p['values']['calibresult']['degr']
cps = p['values']['calibresult']['calibpoints']
self._calibres.points = [ EyeTribe.CalibrationPoint() for i in range(len(cps)) ]
for i in range(len(cps)):
self._calibres.points[i].state = cps[i]['state']
self._calibres.points[i].cp = EyeTribe.Coord(cps[i]['cp']['x'], cps[i]['cp']['y'])
self._calibres.points[i].mecp = EyeTribe.Coord(cps[i]['cp']['x'], cps[i]['cp']['y'])
self._calibres.points[i].ad = cps[i]['acd']['ad']
self._calibres.points[i].adl = cps[i]['acd']['adl']
self._calibres.points[i].adr = cps[i]['acd']['adr']
self._calibres.points[i].mep = cps[i]['mepix']['mep']
self._calibres.points[i].mepl = cps[i]['mepix']['mepl']
self._calibres.points[i].mepr = cps[i]['mepix']['mepr']
self._calibres.points[i].asd = cps[i]['asdp']['asd']
self._calibres.points[i].asdl = cps[i]['asdp']['asdl']
self._calibres.points[i].asdr = cps[i]['asdp']['asdr']
if self._calibres.result:
print("NOTICE: Tracker calibrated succesfully, average error is %0.1f deg (L: %0.1f, R: %0.1f)" %
(self._calibres.deg, self._calibres.degl, self._calibres.degr))
else:
print("WARNING: Tracker failed to calibrate")
def calibration_abort(self):
self._tell_tracker(EyeTribe.etm_calib_abort)
def calibration_clear(self):
self._tell_tracker(EyeTribe.etm_calib_clear)
def latest_calibration_result(self):
return self._calibres
if __name__ == "__main__":
"""
Example usage -- this code is only executed if file is run directly
not when imported as a module, but it shows how to use this module:
from peyetribe import EyeTribe
import time
"""
tracker = EyeTribe()
tracker.connect()
n = tracker.next()
print("eT;dT;aT;Fix;State;Rwx;Rwy;Avx;Avy;LRwx;LRwy;LAvx;LAvy;LPSz;LCx;LCy;RRwx;RRwy;RAvx;RAvy;RPSz;RCx;RCy")
tracker.pushmode()
count = 0
while count < 100:
n = tracker.next()
print(n)
count += 1
tracker.pullmode()
tracker.close()
| [
"json.loads",
"socket.socket",
"datetime.datetime.strptime",
"time.sleep",
"threading.Semaphore",
"sys.stderr.write",
"threading.Thread",
"queue.Queue",
"time.time"
] | [((12144, 12153), 'queue.Queue', 'q.Queue', ([], {}), '()\n', (12151, 12153), True, 'import queue as q\n'), ((12177, 12186), 'queue.Queue', 'q.Queue', ([], {}), '()\n', (12184, 12186), True, 'import queue as q\n'), ((12214, 12235), 'threading.Semaphore', 'threading.Semaphore', ([], {}), '()\n', (12233, 12235), False, 'import threading\n'), ((6361, 6372), 'time.time', 'time.time', ([], {}), '()\n', (6370, 6372), False, 'import time\n'), ((6437, 6497), 'datetime.datetime.strptime', 'datetime.strptime', (["json['timestamp']", '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(json['timestamp'], '%Y-%m-%d %H:%M:%S.%f')\n", (6454, 6497), False, 'from datetime import datetime\n'), ((13911, 13950), 'sys.stderr.write', 'sys.stderr.write', (['"""_hbeater starting\n"""'], {}), "('_hbeater starting\\n')\n", (13927, 13950), False, 'import sys\n'), ((14103, 14140), 'sys.stderr.write', 'sys.stderr.write', (['"""_hbeater ending\n"""'], {}), "('_hbeater ending\\n')\n", (14119, 14140), False, 'import sys\n'), ((14557, 14597), 'sys.stderr.write', 'sys.stderr.write', (['"""_listener starting\n"""'], {}), "('_listener starting\\n')\n", (14573, 14597), False, 'import sys\n'), ((16828, 16866), 'sys.stderr.write', 'sys.stderr.write', (['"""_listener ending\n"""'], {}), "('_listener ending\\n')\n", (16844, 16866), False, 'import sys\n'), ((16924, 16973), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (16937, 16973), False, 'import socket\n'), ((14062, 14090), 'time.sleep', 'time.sleep', (['self._hbinterval'], {}), '(self._hbinterval)\n', (14072, 14090), False, 'import time\n'), ((17220, 17261), 'threading.Thread', 'threading.Thread', ([], {'target': '_listener_thread'}), '(target=_listener_thread)\n', (17236, 17261), False, 'import threading\n'), ((17719, 17759), 'threading.Thread', 'threading.Thread', ([], {'target': '_hbeater_thread'}), '(target=_hbeater_thread)\n', (17735, 17759), False, 'import threading\n'), ((15066, 15080), 'json.loads', 'json.loads', (['js'], {}), '(js)\n', (15076, 15080), False, 'import json\n')] |
import grpc
from github.com.metaprov.modelaapi.pkg.apis.data.v1alpha1.generated_pb2 import LabelingPipeline as MDLabelingPipeline
from github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2_grpc import LabelingPipelineServiceStub
from github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2 import CreateLabelingPipelineRequest, \
UpdateLabelingPipelineRequest, \
DeleteLabelingPipelineRequest, GetLabelingPipelineRequest, ListLabelingPipelineRequest
from modela.Resource import Resource
from modela.ModelaException import ModelaException
from typing import List, Union
class LabelingPipeline(Resource):
def __init__(self, item: MDLabelingPipeline = MDLabelingPipeline(), client=None, namespace="", name="", version=Resource.DefaultVersion):
super().__init__(item, client, namespace=namespace, name=name, version=version)
class LabelingPipelineClient:
def __init__(self, stub, modela):
self.modela = modela
self.__stub: LabelingPipelineServiceStub = stub
def create(self, labelingpipeline: LabelingPipeline) -> bool:
request = CreateLabelingPipelineRequest()
request.labelingpipeline.CopyFrom(labelingpipeline.raw_message)
try:
response = self.__stub.CreateLabelingPipeline(request)
return True
except grpc.RpcError as err:
error = err
ModelaException.process_error(error)
return False
def update(self, labelingpipeline: LabelingPipeline) -> bool:
request = UpdateLabelingPipelineRequest()
request.labelingpipeline.CopyFrom(labelingpipeline.raw_message)
try:
self.__stub.UpdateLabelingPipeline(request)
return True
except grpc.RpcError as err:
error = err
ModelaException.process_error(error)
return False
def get(self, namespace: str, name: str) -> Union[LabelingPipeline, bool]:
request = GetLabelingPipelineRequest()
request.namespace = namespace
request.name = name
try:
response = self.__stub.GetLabelingPipeline(request)
return LabelingPipeline(response.labelingpipeline, self)
except grpc.RpcError as err:
error = err
ModelaException.process_error(error)
return False
def delete(self, namespace: str, name: str) -> bool:
request = DeleteLabelingPipelineRequest()
request.namespace = namespace
request.name = name
try:
response = self.__stub.DeleteLabelingPipeline(request)
return True
except grpc.RpcError as err:
error = err
ModelaException.process_error(error)
return False
def list(self, namespace: str) -> Union[List[LabelingPipeline], bool]:
request = ListLabelingPipelineRequest()
request.namespace = namespace
try:
response = self.__stub.ListLabelingPipelines(request)
return [LabelingPipeline(item, self) for item in response.labelingpipelines.items]
except grpc.RpcError as err:
error = err
ModelaException.process_error(error)
return False
| [
"github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2.ListLabelingPipelineRequest",
"github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2.DeleteLabelingPipelineRequest",
"github.com.metaprov.modelaapi.pkg.apis.data.v1alpha1.generated_pb2.LabelingPipeline",
"github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2.CreateLabelingPipelineRequest",
"modela.ModelaException.ModelaException.process_error",
"github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2.GetLabelingPipelineRequest",
"github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2.UpdateLabelingPipelineRequest"
] | [((714, 734), 'github.com.metaprov.modelaapi.pkg.apis.data.v1alpha1.generated_pb2.LabelingPipeline', 'MDLabelingPipeline', ([], {}), '()\n', (732, 734), True, 'from github.com.metaprov.modelaapi.pkg.apis.data.v1alpha1.generated_pb2 import LabelingPipeline as MDLabelingPipeline\n'), ((1134, 1165), 'github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2.CreateLabelingPipelineRequest', 'CreateLabelingPipelineRequest', ([], {}), '()\n', (1163, 1165), False, 'from github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2 import CreateLabelingPipelineRequest, UpdateLabelingPipelineRequest, DeleteLabelingPipelineRequest, GetLabelingPipelineRequest, ListLabelingPipelineRequest\n'), ((1412, 1448), 'modela.ModelaException.ModelaException.process_error', 'ModelaException.process_error', (['error'], {}), '(error)\n', (1441, 1448), False, 'from modela.ModelaException import ModelaException\n'), ((1555, 1586), 'github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2.UpdateLabelingPipelineRequest', 'UpdateLabelingPipelineRequest', ([], {}), '()\n', (1584, 1586), False, 'from github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2 import CreateLabelingPipelineRequest, UpdateLabelingPipelineRequest, DeleteLabelingPipelineRequest, GetLabelingPipelineRequest, ListLabelingPipelineRequest\n'), ((1822, 1858), 'modela.ModelaException.ModelaException.process_error', 'ModelaException.process_error', (['error'], {}), '(error)\n', (1851, 1858), False, 'from modela.ModelaException import ModelaException\n'), ((1978, 2006), 'github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2.GetLabelingPipelineRequest', 'GetLabelingPipelineRequest', ([], {}), '()\n', (2004, 2006), False, 'from github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2 import CreateLabelingPipelineRequest, UpdateLabelingPipelineRequest, DeleteLabelingPipelineRequest, GetLabelingPipelineRequest, ListLabelingPipelineRequest\n'), ((2289, 2325), 'modela.ModelaException.ModelaException.process_error', 'ModelaException.process_error', (['error'], {}), '(error)\n', (2318, 2325), False, 'from modela.ModelaException import ModelaException\n'), ((2423, 2454), 'github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2.DeleteLabelingPipelineRequest', 'DeleteLabelingPipelineRequest', ([], {}), '()\n', (2452, 2454), False, 'from github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2 import CreateLabelingPipelineRequest, UpdateLabelingPipelineRequest, DeleteLabelingPipelineRequest, GetLabelingPipelineRequest, ListLabelingPipelineRequest\n'), ((2695, 2731), 'modela.ModelaException.ModelaException.process_error', 'ModelaException.process_error', (['error'], {}), '(error)\n', (2724, 2731), False, 'from modela.ModelaException import ModelaException\n'), ((2847, 2876), 'github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2.ListLabelingPipelineRequest', 'ListLabelingPipelineRequest', ([], {}), '()\n', (2874, 2876), False, 'from github.com.metaprov.modelaapi.services.labelingpipeline.v1.labelingpipeline_pb2 import CreateLabelingPipelineRequest, UpdateLabelingPipelineRequest, DeleteLabelingPipelineRequest, GetLabelingPipelineRequest, ListLabelingPipelineRequest\n'), ((3159, 3195), 'modela.ModelaException.ModelaException.process_error', 'ModelaException.process_error', (['error'], {}), '(error)\n', (3188, 3195), False, 'from modela.ModelaException import ModelaException\n')] |
from anytree import Node, RenderTree
from anytree.exporter import DotExporter
launch = Node("<launch_file>")
cf = Node("crazyflie<no>", parent=launch)
hover = Node("hoverStiff", parent=cf)
traj = Node("trajTracking", parent=cf)
land = Node("land", parent=cf)
altitude = Node("AltitudeControllerPhys", parent=hover)
xy_hover = Node("XYControllerPhys", parent=hover)
yaw = Node("YawControllerPhys", parent=hover)
altitude = Node("AltitudeControllerPhys", parent=traj)
xy_traj = Node("XYControllerTrajPhys", parent=traj)
yaw = Node("YawControllerPhys", parent=traj)
xy_hover = Node("XYControllerPhys", parent=land)
yaw = Node("YawControllerPhys", parent=land)
traj_gen = Node("TrajGenerator", parent=cf)
wave = Node("wave_traj", parent=traj_gen)
circle = Node("circle_traj", parent=traj_gen)
# graphviz needs to be installed for the next line!
DotExporter(launch).to_picture("plots/tree.png")
| [
"anytree.exporter.DotExporter",
"anytree.Node"
] | [((90, 111), 'anytree.Node', 'Node', (['"""<launch_file>"""'], {}), "('<launch_file>')\n", (94, 111), False, 'from anytree import Node, RenderTree\n'), ((123, 159), 'anytree.Node', 'Node', (['"""crazyflie<no>"""'], {'parent': 'launch'}), "('crazyflie<no>', parent=launch)\n", (127, 159), False, 'from anytree import Node, RenderTree\n'), ((172, 201), 'anytree.Node', 'Node', (['"""hoverStiff"""'], {'parent': 'cf'}), "('hoverStiff', parent=cf)\n", (176, 201), False, 'from anytree import Node, RenderTree\n'), ((213, 244), 'anytree.Node', 'Node', (['"""trajTracking"""'], {'parent': 'cf'}), "('trajTracking', parent=cf)\n", (217, 244), False, 'from anytree import Node, RenderTree\n'), ((256, 279), 'anytree.Node', 'Node', (['"""land"""'], {'parent': 'cf'}), "('land', parent=cf)\n", (260, 279), False, 'from anytree import Node, RenderTree\n'), ((292, 336), 'anytree.Node', 'Node', (['"""AltitudeControllerPhys"""'], {'parent': 'hover'}), "('AltitudeControllerPhys', parent=hover)\n", (296, 336), False, 'from anytree import Node, RenderTree\n'), ((348, 386), 'anytree.Node', 'Node', (['"""XYControllerPhys"""'], {'parent': 'hover'}), "('XYControllerPhys', parent=hover)\n", (352, 386), False, 'from anytree import Node, RenderTree\n'), ((398, 437), 'anytree.Node', 'Node', (['"""YawControllerPhys"""'], {'parent': 'hover'}), "('YawControllerPhys', parent=hover)\n", (402, 437), False, 'from anytree import Node, RenderTree\n'), ((450, 493), 'anytree.Node', 'Node', (['"""AltitudeControllerPhys"""'], {'parent': 'traj'}), "('AltitudeControllerPhys', parent=traj)\n", (454, 493), False, 'from anytree import Node, RenderTree\n'), ((505, 546), 'anytree.Node', 'Node', (['"""XYControllerTrajPhys"""'], {'parent': 'traj'}), "('XYControllerTrajPhys', parent=traj)\n", (509, 546), False, 'from anytree import Node, RenderTree\n'), ((558, 596), 'anytree.Node', 'Node', (['"""YawControllerPhys"""'], {'parent': 'traj'}), "('YawControllerPhys', parent=traj)\n", (562, 596), False, 'from anytree import Node, RenderTree\n'), ((609, 646), 'anytree.Node', 'Node', (['"""XYControllerPhys"""'], {'parent': 'land'}), "('XYControllerPhys', parent=land)\n", (613, 646), False, 'from anytree import Node, RenderTree\n'), ((658, 696), 'anytree.Node', 'Node', (['"""YawControllerPhys"""'], {'parent': 'land'}), "('YawControllerPhys', parent=land)\n", (662, 696), False, 'from anytree import Node, RenderTree\n'), ((709, 741), 'anytree.Node', 'Node', (['"""TrajGenerator"""'], {'parent': 'cf'}), "('TrajGenerator', parent=cf)\n", (713, 741), False, 'from anytree import Node, RenderTree\n'), ((753, 787), 'anytree.Node', 'Node', (['"""wave_traj"""'], {'parent': 'traj_gen'}), "('wave_traj', parent=traj_gen)\n", (757, 787), False, 'from anytree import Node, RenderTree\n'), ((799, 835), 'anytree.Node', 'Node', (['"""circle_traj"""'], {'parent': 'traj_gen'}), "('circle_traj', parent=traj_gen)\n", (803, 835), False, 'from anytree import Node, RenderTree\n'), ((889, 908), 'anytree.exporter.DotExporter', 'DotExporter', (['launch'], {}), '(launch)\n', (900, 908), False, 'from anytree.exporter import DotExporter\n')] |
import os
from tqdm import tqdm
from utils.file import loadJson, dumpIterable, dumpJson
from utils.general import datasetTraverse, jsonPathListLogTraverse
#####################################################
# 统计数据集中存在API调用序列且序列长度满足要求的文件
#####################################################
def statValidJsonReport(dir_path, len_thresh=10,
class_dir=False,
name_prefix=None,
dump_valid_path=None):
valid = invalid = too_short = total = 0
valid_list = []
for folder in os.listdir(dir_path):
folder_path = dir_path+folder+'/'
if class_dir:
items = os.listdir(folder_path)
else:
items = [name_prefix+'.json']
for item in items:
total_length = 0
total += 1
print('#%d'%total, folder_path+item, end=': ')
try:
report = loadJson(folder_path+item)
raw_file_name = report['target']['file']['name']
for process in report['behavior']['processes']:
total_length += len(process['calls'])
if total_length < len_thresh:
too_short += 1
print('too short:', total_length)
else:
valid += 1
valid_list.append({'file':raw_file_name,
'len':total_length,
'rawPath':folder_path+item})
print('valid')
except Exception as e:
invalid += 1
print('Error: ', str(e))
print('Total:', total)
print('Valid:', valid)
print('Invalid:', invalid)
print('Too Short:', too_short)
if dump_valid_path is not None:
dumpIterable(valid_list, title='valid_file_name', path=dump_valid_path)
#####################################################
# 统计数据集中存在__exception__的数据集文件数量
#####################################################
def statExceptionReport(dir_path, class_dir=False,
name_prefix=None,
exception_call_patience=20,
dump_noexp_path=None):
def statExceptionReportInner(count_, filep_, report_, list_, dict_, **kwargs):
print('# %d'%count_, filep_, end=' ')
if len(dict_) == 0:
dict_ = {
'noexc': 0,
'exc': 0,
'err': 0,
'exc_list': [],
'noexc_list': []
}
apis = report_['apis']
for i in range(len(apis)):
if apis[i] == '__exception__' and i+1 < len(apis): # 只关注exception出现的位置
if apis[i+1] == 'NtTerminateProcess' and i+2==len(apis): # 如果exception发生以后立刻terminate且进程结束,检测成功
print('terminate', end=' ')
elif apis[i+1] == '__exception__': # 如果连续的exception出现
j = 1
flag = False
while i+j < len(apis): # 检测连续的exception是否超过了耐心值
if j == exception_call_patience: # 连续的exception达到了耐心值,检测成功
flag = True
print('successive exceptions', end=' ')
break
elif apis[i+j] != '__exception__':
break
else:
j += 1
if not flag:
continue
else: # 其余所有情况都视为检测失败
continue
dict_['exc'] += 1
dict_['exc_list'].append(filep_)
print('Exception')
return list_, dict_
dict_['noexc'] += 1
dict_['noexc_list'].append(filep_)
print('Normal')
return list_,dict_
def statExceptionReportFcb(e, list_, dict_):
dict_['err'] += 1
print("Error")
def statExceptionReportFNcb(reporter_, list_, dict_):
print('*'*50)
print("Total:", dict_['noexc']+dict_['exc']+dict_['err'])
print("No Exception:", dict_['noexc'])
print('Exception:', dict_['exc'])
print('Error:', dict_['err'])
print('*' * 50)
if dump_noexp_path is not None:
dumpJson({'has_exception': dict_['exc_list'],
'no_exception': dict_['noexc_list']},
dump_noexp_path)
datasetTraverse(dir_path=dir_path,
exec_kernel=statExceptionReportInner,
class_dir=class_dir,
name_prefix=name_prefix,
success_callback=lambda x,y:None, # 不做success的默认打印
fail_callback=statExceptionReportFcb,
final_callback=statExceptionReportFNcb)
#####################################################
# 统计数据集中按照name划分的family的规模情况
#####################################################
def statMalClassesOnNames(exc_log_file_path=None,
exc_log_list_key=None, # list型log的list键值
dump_log_path=None,
scale_stairs=[20,40,50,60,80,100]):
def statMalClassesOnNamesInner(count_, filep_, report_, list_, dict_):
print('# %d'%count_, filep_, end=' ')
family = '.'.join(report_['name'].split('.')[:3]) # family是name中点隔开的前三个字段
if family not in dict_:
dict_[family] = [filep_]
else:
dict_[family].append(filep_) # family键对应的是对应的数据文件的path
return list_, dict_
def statMalClassesOnNamesFNcb(reporter_, list_, dict_):
for f,c in dict_.items():
print(f,len(c))
if dump_log_path is not None:
dumpJson(dict_, dump_log_path)
counts = [0]*len(scale_stairs)
for family, f_list in dict_.items():
for i,s in enumerate(scale_stairs):
if len(f_list) >= s:
counts[i] += 1
for s,c in zip(scale_stairs, counts):
print("More than %d items:"%s, c)
jsonPathListLogTraverse(log_file_path=exc_log_file_path,
exec_kernel=statMalClassesOnNamesInner,
list_key=exc_log_list_key,
final_callback=statMalClassesOnNamesFNcb)
if __name__ == '__main__':
# statValidJsonReport(dir_path='F:/result2/cuckoo/analyses/',
# class_dir=False,
# name_prefix='reports/report',
# dump_valid_path='D:/datasets/LargePE-API-raw/reports/valid_file_list.json')
# statExceptionReport(dir_path='E:/LargePE-API-raw/extracted/',
# class_dir=True,
# dump_noexp_path='E:/LargePE-API-raw/reports/exception_stat_log.json')
statMalClassesOnNames(exc_log_file_path='E:/LargePE-API-raw/reports/exception_stat_log.json',
exc_log_list_key='no_exception',
dump_log_path='E:/LargePE-API-raw/reports/class_stat_log.json') | [
"utils.file.dumpJson",
"os.listdir",
"utils.file.dumpIterable",
"utils.file.loadJson",
"utils.general.datasetTraverse",
"utils.general.jsonPathListLogTraverse"
] | [((558, 578), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (568, 578), False, 'import os\n'), ((4540, 4785), 'utils.general.datasetTraverse', 'datasetTraverse', ([], {'dir_path': 'dir_path', 'exec_kernel': 'statExceptionReportInner', 'class_dir': 'class_dir', 'name_prefix': 'name_prefix', 'success_callback': '(lambda x, y: None)', 'fail_callback': 'statExceptionReportFcb', 'final_callback': 'statExceptionReportFNcb'}), '(dir_path=dir_path, exec_kernel=statExceptionReportInner,\n class_dir=class_dir, name_prefix=name_prefix, success_callback=lambda x,\n y: None, fail_callback=statExceptionReportFcb, final_callback=\n statExceptionReportFNcb)\n', (4555, 4785), False, 'from utils.general import datasetTraverse, jsonPathListLogTraverse\n'), ((6203, 6378), 'utils.general.jsonPathListLogTraverse', 'jsonPathListLogTraverse', ([], {'log_file_path': 'exc_log_file_path', 'exec_kernel': 'statMalClassesOnNamesInner', 'list_key': 'exc_log_list_key', 'final_callback': 'statMalClassesOnNamesFNcb'}), '(log_file_path=exc_log_file_path, exec_kernel=\n statMalClassesOnNamesInner, list_key=exc_log_list_key, final_callback=\n statMalClassesOnNamesFNcb)\n', (6226, 6378), False, 'from utils.general import datasetTraverse, jsonPathListLogTraverse\n'), ((1823, 1894), 'utils.file.dumpIterable', 'dumpIterable', (['valid_list'], {'title': '"""valid_file_name"""', 'path': 'dump_valid_path'}), "(valid_list, title='valid_file_name', path=dump_valid_path)\n", (1835, 1894), False, 'from utils.file import loadJson, dumpIterable, dumpJson\n'), ((664, 687), 'os.listdir', 'os.listdir', (['folder_path'], {}), '(folder_path)\n', (674, 687), False, 'import os\n'), ((4390, 4495), 'utils.file.dumpJson', 'dumpJson', (["{'has_exception': dict_['exc_list'], 'no_exception': dict_['noexc_list']}", 'dump_noexp_path'], {}), "({'has_exception': dict_['exc_list'], 'no_exception': dict_[\n 'noexc_list']}, dump_noexp_path)\n", (4398, 4495), False, 'from utils.file import loadJson, dumpIterable, dumpJson\n'), ((5868, 5898), 'utils.file.dumpJson', 'dumpJson', (['dict_', 'dump_log_path'], {}), '(dict_, dump_log_path)\n', (5876, 5898), False, 'from utils.file import loadJson, dumpIterable, dumpJson\n'), ((926, 954), 'utils.file.loadJson', 'loadJson', (['(folder_path + item)'], {}), '(folder_path + item)\n', (934, 954), False, 'from utils.file import loadJson, dumpIterable, dumpJson\n')] |
"""Test suite for Reads Classified display module."""
from random import randint
from app.display_modules.display_module_base_test import BaseDisplayModuleTest
from app.display_modules.volcano import VolcanoDisplayModule
from app.display_modules.volcano.models import VolcanoResult
from app.display_modules.volcano.constants import MODULE_NAME
from app.display_modules.volcano.tests.factory import VolcanoFactory
from app.samples.sample_models import Sample
from app.tool_results.card_amrs import CARDAMRResultModule
from app.tool_results.card_amrs.tests.factory import create_card_amr
from app.tool_results.kraken import KrakenResultModule
from app.tool_results.kraken.tests.factory import create_kraken
from app.tool_results.metaphlan2 import Metaphlan2ResultModule
from app.tool_results.metaphlan2.tests.factory import create_metaphlan2
from .factory import make_tool_doc
class TestVolcanoModule(BaseDisplayModuleTest):
"""Test suite for Volcano diplay module."""
def test_get_volcano(self):
"""Ensure getting a single Volcano behaves correctly."""
reads_class = VolcanoFactory()
self.generic_getter_test(reads_class, MODULE_NAME,
verify_fields=('categories', 'tools'))
def test_add_volcano(self):
"""Ensure Volcano model is created correctly."""
categories = {
f'cat_name_{i}': [
f'cat_name_{i}_val_{j}'
for j in range(randint(3, 6))
] for i in range(randint(3, 6))
}
tool_names = [f'tool_{i}' for i in range(randint(3, 6))]
tools = {
tool_name: make_tool_doc(categories)
for tool_name in tool_names
}
volcano_result = VolcanoResult(tools=tools, categories=categories)
self.generic_adder_test(volcano_result, MODULE_NAME)
def test_run_volcano_sample_group(self): # pylint: disable=invalid-name
"""Ensure Volcano run_sample_group produces correct results."""
def create_sample(i):
"""Create unique sample for index i."""
args = {
'name': f'Sample{i}',
'metadata': {'foobar': f'baz{i}'},
CARDAMRResultModule.name(): create_card_amr(),
KrakenResultModule.name(): create_kraken(),
Metaphlan2ResultModule.name(): create_metaphlan2(),
}
return Sample(**args).save()
self.generic_run_group_test(create_sample,
VolcanoDisplayModule)
| [
"app.tool_results.metaphlan2.Metaphlan2ResultModule.name",
"app.display_modules.volcano.models.VolcanoResult",
"app.samples.sample_models.Sample",
"random.randint",
"app.display_modules.volcano.tests.factory.VolcanoFactory",
"app.tool_results.card_amrs.CARDAMRResultModule.name",
"app.tool_results.metaphlan2.tests.factory.create_metaphlan2",
"app.tool_results.kraken.tests.factory.create_kraken",
"app.tool_results.kraken.KrakenResultModule.name",
"app.tool_results.card_amrs.tests.factory.create_card_amr"
] | [((1096, 1112), 'app.display_modules.volcano.tests.factory.VolcanoFactory', 'VolcanoFactory', ([], {}), '()\n', (1110, 1112), False, 'from app.display_modules.volcano.tests.factory import VolcanoFactory\n'), ((1735, 1784), 'app.display_modules.volcano.models.VolcanoResult', 'VolcanoResult', ([], {'tools': 'tools', 'categories': 'categories'}), '(tools=tools, categories=categories)\n', (1748, 1784), False, 'from app.display_modules.volcano.models import VolcanoResult\n'), ((2204, 2230), 'app.tool_results.card_amrs.CARDAMRResultModule.name', 'CARDAMRResultModule.name', ([], {}), '()\n', (2228, 2230), False, 'from app.tool_results.card_amrs import CARDAMRResultModule\n'), ((2267, 2292), 'app.tool_results.kraken.KrakenResultModule.name', 'KrakenResultModule.name', ([], {}), '()\n', (2290, 2292), False, 'from app.tool_results.kraken import KrakenResultModule\n'), ((2327, 2356), 'app.tool_results.metaphlan2.Metaphlan2ResultModule.name', 'Metaphlan2ResultModule.name', ([], {}), '()\n', (2354, 2356), False, 'from app.tool_results.metaphlan2 import Metaphlan2ResultModule\n'), ((2232, 2249), 'app.tool_results.card_amrs.tests.factory.create_card_amr', 'create_card_amr', ([], {}), '()\n', (2247, 2249), False, 'from app.tool_results.card_amrs.tests.factory import create_card_amr\n'), ((2294, 2309), 'app.tool_results.kraken.tests.factory.create_kraken', 'create_kraken', ([], {}), '()\n', (2307, 2309), False, 'from app.tool_results.kraken.tests.factory import create_kraken\n'), ((2358, 2377), 'app.tool_results.metaphlan2.tests.factory.create_metaphlan2', 'create_metaphlan2', ([], {}), '()\n', (2375, 2377), False, 'from app.tool_results.metaphlan2.tests.factory import create_metaphlan2\n'), ((1503, 1516), 'random.randint', 'randint', (['(3)', '(6)'], {}), '(3, 6)\n', (1510, 1516), False, 'from random import randint\n'), ((1577, 1590), 'random.randint', 'randint', (['(3)', '(6)'], {}), '(3, 6)\n', (1584, 1590), False, 'from random import randint\n'), ((2412, 2426), 'app.samples.sample_models.Sample', 'Sample', ([], {}), '(**args)\n', (2418, 2426), False, 'from app.samples.sample_models import Sample\n'), ((1459, 1472), 'random.randint', 'randint', (['(3)', '(6)'], {}), '(3, 6)\n', (1466, 1472), False, 'from random import randint\n')] |
import numpy as np
import scipy.stats as sp
from matplotlib import pyplot as plt
def kde(mu, tau, bbox=[-5, 5, -5, 5], save_file="", xlabel="", ylabel="", cmap='Blues'):
values = np.vstack([mu, tau])
kernel = sp.gaussian_kde(values)
fig, ax = plt.subplots()
ax.axis(bbox)
ax.set_aspect(abs(bbox[1]-bbox[0])/abs(bbox[3]-bbox[2]))
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
xx, yy = np.mgrid[bbox[0]:bbox[1]:300j, bbox[2]:bbox[3]:300j]
positions = np.vstack([xx.ravel(), yy.ravel()])
f = np.reshape(kernel(positions).T, xx.shape)
cfset = ax.contourf(xx, yy, f, cmap=cmap)
# plt.show()
# Display Result
def display_result(data, cmap='Reds'):
x_out = np.concatenate([data for i in range(10)], axis=0)
kde(x_out[:, 0], x_out[:, 1], bbox=[-2, 2, -2, 2], cmap=cmap)
| [
"matplotlib.pyplot.subplots",
"scipy.stats.gaussian_kde",
"numpy.vstack"
] | [((185, 205), 'numpy.vstack', 'np.vstack', (['[mu, tau]'], {}), '([mu, tau])\n', (194, 205), True, 'import numpy as np\n'), ((219, 242), 'scipy.stats.gaussian_kde', 'sp.gaussian_kde', (['values'], {}), '(values)\n', (234, 242), True, 'import scipy.stats as sp\n'), ((258, 272), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (270, 272), True, 'from matplotlib import pyplot as plt\n')] |
import json
import os
from get import get
import re
from readFileError import ReadFileError
ruleArr = ["BV[A-Za-z0-9]{10}/?$","ss[0-9]{5}/?$","ep[0-9]{6}/?$"]
class GetCid:
def __init__(self,ruleArr):
self.ruleArr = ruleArr
#输入含有aid的Url获取该aid视频的标题
def aidToTopic(self,aidUrl):
res = get(aidUrl)
res = res["data"]["title"].split(" ")[0]
return res
#根据数字进行相应的正则匹配,返回需要的部分
def reg(self,num,url):
url = url.split("?")
url = url[0]
result = re.findall(self.ruleArr[num],url)
return result[0].replace("/","") if result != [] else False
# 核心函数:根据不同的Url种类获取cid和对应的title信息
def getCid(self,url):
if self.reg(0,url):
bid = self.reg(0,url)
getCidUrl = "https://api.bilibili.com/x/web-interface/view?bvid="+bid
inform = get(getCidUrl)
Obj = {"cid":[inform["data"]["cid"]],"title":[inform["data"]["title"]]}
return Obj
elif self.reg(1,url):
ss = self.reg(1,url)
ss = ss.replace("ss","")
getCidUrl = "https://api.bilibili.com/pgc/web/season/section?season_id="+ss
inform = get(getCidUrl)
inform = inform["result"]["main_section"]["episodes"]
length = len(inform)
cidArr = []
titleArr = []
for i in range(length):
cidArr.append(inform[i]["cid"])
titleArr.append(inform[i]["long_title"])
aid = inform[0]["aid"]
aidUrl = "https://api.bilibili.com/x/web-interface/view?aid="+str(aid)
topic = self.aidToTopic(aidUrl)
Obj = {"cid":cidArr,"title":titleArr,"topic":topic}
return Obj
elif self.reg(2,url):
ep = self.reg(2,url)
ep = ep.replace("ep","")
epUrl = "https://www.bilibili.com/bangumi/play/ep%s"%ep
res = get(epUrl)
#求解ep_id对应的cid
res = re.findall(r'__INITIAL_STATE__=(.*?);\(function\(\)',res)[0]
res = json.loads(res)
cid = res["initEpList"]
counter = 0
for list in cid:
if list["loaded"] == True:
break
counter += 1
cid = res["mediaInfo"]["episodes"][counter]["cid"]
#抓取ep_id对应的title
title = res["h1Title"]
title = title.replace(" ","")
Obj = {"cid":[cid],"title":[title]}
return Obj
else:
return False
#根据GetCid类获取视频对应的cid和title
class GetBilibiliXml:
#输入b站视频的url链接获取对应的xml文件
#输入链接包含多种情况:
# ep号:下载对应的番剧弹幕
# Bv_id:下载对应的视频弹幕
# ss_id:下载对应番剧全集的弹幕文件,并存储在单独文件夹中
def getXml(self,url,xmlPath):
getCid = GetCid(ruleArr).getCid
inform = getCid(url)
if inform == False:
print("****输入Url错误,无法解析出弹幕xml!****")
return False
# print(inform)
length = len(inform["cid"])
for i in range(length):
cid = inform["cid"][i]
title = inform["title"][i].replace(" ","")
getXmlUrl = "https://comment.bilibili.com/"+str(cid)+".xml"
xml_data = get(getXmlUrl)
# byte = data.encode("iso-8859-1")
# xml_data = byte.decode("utf-8")
if length>1:
file_dir = xmlPath+inform["topic"]+"/"
file_path = file_dir+str(i+1)+"."+title+".xml"
else:
file_dir = xmlPath
file_path = file_dir+title+".xml"
if not os.path.exists(file_dir):
os.makedirs(file_dir)
with open(file_path,"w",encoding="utf-8") as w:
outputStart = str(i+1)+"."+"写入文件"+":《"+title+"》中..."
outputEnd = str(i+1)+"."+"写入文件"+":《"+title+"》中...\n****************************************************"
output = outputStart if i!=length-1 else outputEnd
print(output)
w.write(xml_data)
w.close()
#解析输入的json文件路径为列表数据类型
def parseJson(self,path):
with open(path,'r') as load_f:
load_dict = json.load(load_f)
load_f.close()
return load_dict
#解析输入的txt文件路径为列表数据类型
def parseTxt(self,path):
with open(path,"r") as load_f:
load_list = load_f.readlines()
return load_list
#获取输入路径字符串的文件类型
def getFileType(self,path):
if not os.path.exists(path):
raise ReadFileError(0)
if path[-1:]=='/':
raise ReadFileError(1)
fileType = path.split(".")
fileType = fileType[-1]
return fileType
#输入文件路径字符串,逐行下载url对应视频的b站弹幕
def readListToDownload(self,filePath,xmlPath):
fileType = self.getFileType(filePath)
if fileType == "json":
lists = self.parseJson(filePath)
elif fileType == "txt":
lists = self.parseTxt(filePath)
else:
raise ReadFileError(2)
for url in lists:
self.getXml(url,xmlPath)
return
| [
"os.path.exists",
"json.loads",
"os.makedirs",
"readFileError.ReadFileError",
"get.get",
"json.load",
"re.findall"
] | [((312, 323), 'get.get', 'get', (['aidUrl'], {}), '(aidUrl)\n', (315, 323), False, 'from get import get\n'), ((514, 548), 're.findall', 're.findall', (['self.ruleArr[num]', 'url'], {}), '(self.ruleArr[num], url)\n', (524, 548), False, 'import re\n'), ((845, 859), 'get.get', 'get', (['getCidUrl'], {}), '(getCidUrl)\n', (848, 859), False, 'from get import get\n'), ((3208, 3222), 'get.get', 'get', (['getXmlUrl'], {}), '(getXmlUrl)\n', (3211, 3222), False, 'from get import get\n'), ((4172, 4189), 'json.load', 'json.load', (['load_f'], {}), '(load_f)\n', (4181, 4189), False, 'import json\n'), ((4480, 4500), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4494, 4500), False, 'import os\n'), ((4520, 4536), 'readFileError.ReadFileError', 'ReadFileError', (['(0)'], {}), '(0)\n', (4533, 4536), False, 'from readFileError import ReadFileError\n'), ((4582, 4598), 'readFileError.ReadFileError', 'ReadFileError', (['(1)'], {}), '(1)\n', (4595, 4598), False, 'from readFileError import ReadFileError\n'), ((1184, 1198), 'get.get', 'get', (['getCidUrl'], {}), '(getCidUrl)\n', (1187, 1198), False, 'from get import get\n'), ((3581, 3605), 'os.path.exists', 'os.path.exists', (['file_dir'], {}), '(file_dir)\n', (3595, 3605), False, 'import os\n'), ((3623, 3644), 'os.makedirs', 'os.makedirs', (['file_dir'], {}), '(file_dir)\n', (3634, 3644), False, 'import os\n'), ((5004, 5020), 'readFileError.ReadFileError', 'ReadFileError', (['(2)'], {}), '(2)\n', (5017, 5020), False, 'from readFileError import ReadFileError\n'), ((1928, 1938), 'get.get', 'get', (['epUrl'], {}), '(epUrl)\n', (1931, 1938), False, 'from get import get\n'), ((2063, 2078), 'json.loads', 'json.loads', (['res'], {}), '(res)\n', (2073, 2078), False, 'import json\n'), ((1984, 2044), 're.findall', 're.findall', (['"""__INITIAL_STATE__=(.*?);\\\\(function\\\\(\\\\)"""', 'res'], {}), "('__INITIAL_STATE__=(.*?);\\\\(function\\\\(\\\\)', res)\n", (1994, 2044), False, 'import re\n')] |
"""
Quantiphyse - Registration method using DEEDS
Copyright (c) 2013-2018 University of Oxford
"""
import numpy as np
from PySide2 import QtGui, QtCore, QtWidgets
from quantiphyse.data import QpData, NumpyData
from quantiphyse.gui.widgets import Citation
from quantiphyse.gui.options import OptionBox, NumericOption
from quantiphyse.utils import get_plugins
from quantiphyse.utils.exceptions import QpException
from .deeds_wrapper import deedsReg, deedsWarp
CITE_TITLE = "MRF-Based Deformable Registration and Ventilation Estimation of Lung CT"
CITE_AUTHOR = "<NAME>, <NAME>, <NAME> and <NAME>"
CITE_JOURNAL = "IEEE Transactions on Medical Imaging 2013, Volume 32, Issue 7, July 2013, Pages 1239-1248"
CITE_LINK = "http://dx.doi.org/10.1109/TMI.2013.2246577"
RegMethod = get_plugins("base-classes", class_name="RegMethod")[0]
class DeedsRegMethod(RegMethod):
"""
Registration method using Matthias Heinrich's DEEDS code via a Python wrapper
"""
def __init__(self, ivm):
RegMethod.__init__(self, "deeds", ivm, display_name="DEEDS")
self.options_widget = None
@classmethod
def reg_3d(cls, reg_data, ref_data, options, queue):
"""
Basic 3d registration
This is the only method that DEEDS implements - 4d and moco are implemented
using the default multiple calls to reg_3d
"""
# Handle output space by resampling onto whichever grid we want to output on
output_space = options.pop("output-space", "ref")
order = options.pop("interp-order", 1)
if output_space == "ref":
if not reg_data.grid.matches(ref_data.grid):
reg_data = reg_data.resample(ref_data.grid, suffix="", order=order)
elif output_space == "reg":
if not reg_data.grid.matches(ref_data.grid):
ref_data = ref_data.resample(reg_data.grid, suffix="", order=order)
else:
raise QpException("DEEDS does not support output in transformed space")
# FIXME DEEDS is currently ignoring voxel sizes?
data, trans, log = deedsReg(reg_data.raw(), ref_data.raw(), **options)
qpdata = NumpyData(data, grid=reg_data.grid, name=reg_data.name)
trans_data = np.zeros(list(reg_data.grid.shape) + [len(trans),])
for idx, data in enumerate(trans):
trans_data[..., idx] = data.reshape(reg_data.grid.shape)
qptrans = NumpyData(trans_data, grid=reg_data.grid, name="deeds_warp")
qptrans.metadata["QpReg"] = "deeds"
return qpdata, qptrans, log.decode("UTF-8")
@classmethod
def apply_transform(cls, reg_data, transform, options, queue):
"""
Apply a previously calculated DEEDS transformation
"""
if not isinstance(transform, QpData) or transform.nvols != 3 or transform.metadata["QpReg"] != "deeds":
raise QpException("Transform provided is not a DEEDS transform")
# Handle output space by resampling onto whichever grid we want to output on
output_space = options.pop("output-space", "ref")
order = options.pop("interp-order", 1)
if output_space == "ref":
if not reg_data.grid.matches(transform.grid):
reg_data = reg_data.resample(transform.grid, suffix="", order=order)
elif output_space == "reg":
if not reg_data.grid.matches(transform.grid):
transform = transform.resample(reg_data.grid, suffix="", order=order)
else:
raise QpException("DEEDS does not support output in transformed space")
ux, vx, wx = transform.volume(0), transform.volume(1), transform.volume(2)
npdata, log = deedsWarp(reg_data.raw(), ux, vx, wx)
return NumpyData(npdata, grid=reg_data.grid, name=reg_data.name), log.decode("UTF-8")
def interface(self, generic_options=None):
"""
:return: QtWidgets.QWidget containing DEEDS options
"""
if self.options_widget is None:
self.options_widget = QtWidgets.QWidget()
vbox = QtWidgets.QVBoxLayout()
self.options_widget.setLayout(vbox)
cite = Citation(CITE_TITLE, CITE_AUTHOR, CITE_JOURNAL)
vbox.addWidget(cite)
self.optbox = OptionBox()
self.optbox.add("Regularisation parameter (alpha)", NumericOption(minval=0, maxval=10, default=2), key="alpha")
self.optbox.add("Num random samples per node", NumericOption(intonly=True, minval=1, maxval=100, default=50), key="randsamp")
self.optbox.add("Number of levels", NumericOption(intonly=True, minval=1, maxval=10, default=5), key="levels")
#grid.addWidget(QtWidgets.QLabel("Grid spacing for each level"), 3, 0)
#self.spacing = QtWidgets.QLineEdit()
#grid.addWidget(QtWidgets.QLabel("Search radius for each level"),4, 0)
#self.radius = QtWidgets.QLineEdit()
#grid.addWidget(QtWidgets.QLabel("Quantisation of search step size for each level"),5, 0)
#self.radius = QtWidgets.QLineEdit()
#grid.addWidget(QtWidgets.QLabel("Use symmetric approach"),6, 0)
#self.symm = QtWidgets.QCheckBox()
#self.symm.setChecked(True)
vbox.addWidget(self.optbox)
return self.options_widget
def options(self):
self.interface()
return self.optbox.values()
| [
"quantiphyse.utils.get_plugins",
"quantiphyse.data.NumpyData",
"quantiphyse.gui.widgets.Citation",
"PySide2.QtWidgets.QWidget",
"quantiphyse.utils.exceptions.QpException",
"quantiphyse.gui.options.OptionBox",
"quantiphyse.gui.options.NumericOption",
"PySide2.QtWidgets.QVBoxLayout"
] | [((777, 828), 'quantiphyse.utils.get_plugins', 'get_plugins', (['"""base-classes"""'], {'class_name': '"""RegMethod"""'}), "('base-classes', class_name='RegMethod')\n", (788, 828), False, 'from quantiphyse.utils import get_plugins\n'), ((2156, 2211), 'quantiphyse.data.NumpyData', 'NumpyData', (['data'], {'grid': 'reg_data.grid', 'name': 'reg_data.name'}), '(data, grid=reg_data.grid, name=reg_data.name)\n', (2165, 2211), False, 'from quantiphyse.data import QpData, NumpyData\n'), ((2416, 2476), 'quantiphyse.data.NumpyData', 'NumpyData', (['trans_data'], {'grid': 'reg_data.grid', 'name': '"""deeds_warp"""'}), "(trans_data, grid=reg_data.grid, name='deeds_warp')\n", (2425, 2476), False, 'from quantiphyse.data import QpData, NumpyData\n'), ((2871, 2929), 'quantiphyse.utils.exceptions.QpException', 'QpException', (['"""Transform provided is not a DEEDS transform"""'], {}), "('Transform provided is not a DEEDS transform')\n", (2882, 2929), False, 'from quantiphyse.utils.exceptions import QpException\n'), ((3735, 3792), 'quantiphyse.data.NumpyData', 'NumpyData', (['npdata'], {'grid': 'reg_data.grid', 'name': 'reg_data.name'}), '(npdata, grid=reg_data.grid, name=reg_data.name)\n', (3744, 3792), False, 'from quantiphyse.data import QpData, NumpyData\n'), ((4020, 4039), 'PySide2.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (4037, 4039), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((4061, 4084), 'PySide2.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (4082, 4084), False, 'from PySide2 import QtGui, QtCore, QtWidgets\n'), ((4153, 4200), 'quantiphyse.gui.widgets.Citation', 'Citation', (['CITE_TITLE', 'CITE_AUTHOR', 'CITE_JOURNAL'], {}), '(CITE_TITLE, CITE_AUTHOR, CITE_JOURNAL)\n', (4161, 4200), False, 'from quantiphyse.gui.widgets import Citation\n'), ((4261, 4272), 'quantiphyse.gui.options.OptionBox', 'OptionBox', ([], {}), '()\n', (4270, 4272), False, 'from quantiphyse.gui.options import OptionBox, NumericOption\n'), ((1936, 2001), 'quantiphyse.utils.exceptions.QpException', 'QpException', (['"""DEEDS does not support output in transformed space"""'], {}), "('DEEDS does not support output in transformed space')\n", (1947, 2001), False, 'from quantiphyse.utils.exceptions import QpException\n'), ((3510, 3575), 'quantiphyse.utils.exceptions.QpException', 'QpException', (['"""DEEDS does not support output in transformed space"""'], {}), "('DEEDS does not support output in transformed space')\n", (3521, 3575), False, 'from quantiphyse.utils.exceptions import QpException\n'), ((4337, 4382), 'quantiphyse.gui.options.NumericOption', 'NumericOption', ([], {'minval': '(0)', 'maxval': '(10)', 'default': '(2)'}), '(minval=0, maxval=10, default=2)\n', (4350, 4382), False, 'from quantiphyse.gui.options import OptionBox, NumericOption\n'), ((4456, 4517), 'quantiphyse.gui.options.NumericOption', 'NumericOption', ([], {'intonly': '(True)', 'minval': '(1)', 'maxval': '(100)', 'default': '(50)'}), '(intonly=True, minval=1, maxval=100, default=50)\n', (4469, 4517), False, 'from quantiphyse.gui.options import OptionBox, NumericOption\n'), ((4583, 4642), 'quantiphyse.gui.options.NumericOption', 'NumericOption', ([], {'intonly': '(True)', 'minval': '(1)', 'maxval': '(10)', 'default': '(5)'}), '(intonly=True, minval=1, maxval=10, default=5)\n', (4596, 4642), False, 'from quantiphyse.gui.options import OptionBox, NumericOption\n')] |
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import gridspec
import src.utilities as U
from latent_plots import get_models, visualise_latent_space
plt.rcParams['figure.figsize'] = 8, 5
#use true type fonts only
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
if __name__ == '__main__':
model, encoder, decoder = get_models()
#move along a random line in latent space
_,_,mnist, label = U.get_mnist()
x1 = mnist[label.argmax(axis=1) == 6][200]
x2 = mnist[label.argmax(axis=1) == 8][200]
x_ims = np.stack([(1 - t) * x1 + t * x2 for t in np.linspace(0,1, 15)])
x_preds, x_entropy, x_bald = model.get_results(x_ims)
z_begin = encoder.predict(x1[None, :]).flatten()
z_end = encoder.predict(x2[None, :]).flatten()
z_lin = np.stack([(1 - t) * z_begin + t * z_end for t in np.linspace(0,1,15)])
z_ims = decoder.predict(z_lin)
z_preds, z_entropy, z_bald = model.get_results(z_ims)
f = plt.figure()
gs = gridspec.GridSpec(4, 1, height_ratios=[1,1,3,3])
ax0 = plt.subplot(gs[0])
ax0.set_axis_off()
ax0.imshow(np.concatenate([im.squeeze() for im in z_ims], axis=1), extent=[-.5, z_ims.shape[0] + .5, 0, 1], cmap='gray_r')
ax1 = plt.subplot(gs[1])
ax1.set_axis_off()
ax1.imshow(np.concatenate([im.squeeze() for im in x_ims], axis=1), extent=[-.5, x_ims.shape[0] + .5, 0, 1], cmap='gray_r')
ax2 = plt.subplot(gs[2])
ax2.plot(z_entropy, label='Latent Space', c ='r')
ax2.plot(x_entropy, label='Image Space', c = 'b')
ax2.legend()
ax3 = plt.subplot(gs[3])
ax3.plot(z_bald, label='Latent Space', c = 'r')
ax3.plot(x_bald, label='Image Space', c = 'b')
ax3.legend()
plt.savefig('my-figure.png')
plt.show()
| [
"matplotlib.pyplot.savefig",
"latent_plots.get_models",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"numpy.linspace",
"src.utilities.get_mnist",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((368, 380), 'latent_plots.get_models', 'get_models', ([], {}), '()\n', (378, 380), False, 'from latent_plots import get_models, visualise_latent_space\n'), ((452, 465), 'src.utilities.get_mnist', 'U.get_mnist', ([], {}), '()\n', (463, 465), True, 'import src.utilities as U\n'), ((998, 1010), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1008, 1010), True, 'from matplotlib import pyplot as plt\n'), ((1020, 1071), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(4)', '(1)'], {'height_ratios': '[1, 1, 3, 3]'}), '(4, 1, height_ratios=[1, 1, 3, 3])\n', (1037, 1071), False, 'from matplotlib import gridspec\n'), ((1080, 1098), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (1091, 1098), True, 'from matplotlib import pyplot as plt\n'), ((1260, 1278), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1]'], {}), '(gs[1])\n', (1271, 1278), True, 'from matplotlib import pyplot as plt\n'), ((1441, 1459), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (1452, 1459), True, 'from matplotlib import pyplot as plt\n'), ((1600, 1618), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[3]'], {}), '(gs[3])\n', (1611, 1618), True, 'from matplotlib import pyplot as plt\n'), ((1744, 1772), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""my-figure.png"""'], {}), "('my-figure.png')\n", (1755, 1772), True, 'from matplotlib import pyplot as plt\n'), ((1777, 1787), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1785, 1787), True, 'from matplotlib import pyplot as plt\n'), ((614, 635), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(15)'], {}), '(0, 1, 15)\n', (625, 635), True, 'import numpy as np\n'), ((868, 889), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(15)'], {}), '(0, 1, 15)\n', (879, 889), True, 'import numpy as np\n')] |
"""
richtextpy.tests.test_delta
Copyright (c) 2016 <NAME>
"""
from unittest import TestCase, main
from richtextpy import Delta
class TestDelta(TestCase):
def test_constructor(self):
delta = Delta()
self.assertEqual(delta.get_ops(), [])
delta = Delta([])
self.assertEqual(delta.get_ops(), [])
delta = Delta([{'delete': 3}])
self.assertEqual(delta.get_ops(), [{'delete': 3}])
existing_delta = Delta([{'delete': 3}])
delta = Delta(existing_delta)
self.assertEqual(delta.get_ops(), [{'delete': 3}])
delta = Delta({'ops': [{'delete': 3}]})
self.assertEqual(delta.get_ops(), [{'delete': 3}])
delta = Delta('whoops')
self.assertEqual(delta.get_ops(), [])
def test_insert(self):
delta = Delta()
delta.insert('')
self.assertEqual(delta.get_ops(), [])
delta = Delta()
delta.insert(' ')
self.assertEqual(delta.get_ops(), [{'insert': ' '}])
delta = Delta()
delta.insert('hello')
self.assertEqual(delta.get_ops(), [{'insert': 'hello'}])
delta = Delta()
delta.insert('hello', {})
self.assertEqual(delta.get_ops(), [{'insert': 'hello'}])
delta = Delta()
delta.insert('hello', {'bold': True})
self.assertEqual(delta.get_ops(), [
{'insert': 'hello', 'attributes': {'bold': True}}])
# old Quill format for embeds
delta = Delta()
delta.insert(
1, {'image': 'https://octodex.github.com/images/labtocat.png'})
self.assertEqual(delta.get_ops(), [{'insert': 1, 'attributes': {
'image': 'https://octodex.github.com/images/labtocat.png'}}])
# new Quill format for embeds
delta = Delta()
delta.insert(
{'image': 'https://octodex.github.com/images/labtocat.png'}, {'alt': 'Lab Octocat'})
self.assertEqual(delta.get_ops(), [{'insert': {
'image': 'https://octodex.github.com/images/labtocat.png'}, 'attributes': {'alt': 'Lab Octocat'}}])
def test_delete(self):
delta = Delta()
delta.delete(0)
self.assertEqual(delta.get_ops(), [])
delta = Delta()
delta.delete(-10)
self.assertEqual(delta.get_ops(), [])
delta = Delta()
delta.delete('whoops')
self.assertEqual(delta.get_ops(), [])
delta = Delta()
delta.delete(15)
self.assertEqual(delta.get_ops(), [{'delete': 15}])
delta = Delta()
delta.delete(15)
self.assertEqual(delta.get_ops(), [{'delete': 15}])
def test_retain(self):
delta = Delta()
delta.retain(0)
self.assertEqual(delta.get_ops(), [])
delta = Delta()
delta.retain(-10)
self.assertEqual(delta.get_ops(), [])
delta = Delta()
delta.retain('whoops')
self.assertEqual(delta.get_ops(), [])
delta = Delta()
delta.retain(15)
self.assertEqual(delta.get_ops(), [{'retain': 15}])
delta = Delta()
delta.retain(15, {})
self.assertEqual(delta.get_ops(), [{'retain': 15}])
delta = Delta()
delta.retain(15, {'bold': True})
self.assertEqual(delta.get_ops(), [
{'retain': 15, 'attributes': {'bold': True}}])
def test_simple_combines(self):
delta = Delta().insert('hello ').insert('world')
self.assertEqual(delta.get_ops(), [{'insert': 'hello world'}])
delta = Delta().delete(10).delete(5)
self.assertEqual(delta.get_ops(), [{'delete': 15}])
delta = Delta().retain(10).retain(5)
self.assertEqual(delta.get_ops(), [{'retain': 15}])
delta = Delta().retain(10).retain(10).retain(10).delete(5).delete(5).delete(5)
self.assertEqual(delta.get_ops(), [{'retain': 30}, {'delete': 15}])
def test_cant_combine(self):
# differing attributes
delta = Delta().insert('hello ').insert('world', {'bold': True})
self.assertEqual(delta.get_ops(), [{'insert': 'hello '}, {
'insert': 'world', 'attributes': {'bold': True}}])
delta = Delta().insert('world', {'bold': True}).insert('hello ')
self.assertEqual(delta.get_ops(), [
{'insert': 'world', 'attributes': {'bold': True}}, {'insert': 'hello '}])
delta = Delta().retain(10).retain(5, {'bold': True})
self.assertEqual(delta.get_ops(), [{'retain': 10}, {
'retain': 5, 'attributes': {'bold': True}}])
delta = Delta().retain(5, {'bold': True}).retain(10)
self.assertEqual(delta.get_ops(), [
{'retain': 5, 'attributes': {'bold': True}}, {'retain': 10}])
# insert text + insert embed
delta = Delta().insert('hello').insert(
{'image': 'https://octodex.github.com/images/labtocat.png'}, {'alt': 'Lab Octocat'})
self.assertEqual(delta.get_ops(), [{'insert': 'hello'}, {'insert': {
'image': 'https://octodex.github.com/images/labtocat.png'}, 'attributes': {'alt': 'Lab Octocat'}}])
def test_reorder(self):
delta = Delta().insert('hello').delete(3)
self.assertEqual(delta.get_ops(), [{'insert': 'hello'}, {'delete': 3}])
delta = Delta().delete(3).insert('hello')
self.assertEqual(delta.get_ops(), [{'insert': 'hello'}, {'delete': 3}])
delta = Delta().delete(3).delete(3).insert('hello')
self.assertEqual(delta.get_ops(), [{'insert': 'hello'}, {'delete': 6}])
def test_reorder_and_combine(self):
delta = Delta().insert('hello').delete(3).insert(' world')
self.assertEqual(delta.get_ops(), [
{'insert': 'hello world'}, {'delete': 3}])
delta = Delta().insert('hello').delete(
3).insert(' world', {'bold': True})
self.assertEqual(delta.get_ops(), [{'insert': 'hello'}, {
'insert': ' world', 'attributes': {'bold': True}}, {'delete': 3}])
delta = Delta().delete(3).delete(3).insert('hello').delete(3)
self.assertEqual(delta.get_ops(), [{'insert': 'hello'}, {'delete': 9}])
def test_length(self):
delta = Delta().retain(10).retain(10).retain(10).delete(5).delete(5).delete(5)
self.assertEqual(delta.length(), 45)
delta = Delta().insert('hello').delete(
3).insert(' world', {'bold': True})
self.assertEqual(delta.length(), 14)
delta = Delta().insert('hello').insert(
{'image': 'https://octodex.github.com/images/labtocat.png'}, {'alt': 'Lab Octocat'})
self.assertEqual(delta.length(), 6)
def test_chop(self):
delta = Delta().retain(10).retain(10).retain(10).delete(5).delete(5).delete(5)
self.assertEqual(delta.get_ops(), [{'retain': 30}, {'delete': 15}])
delta = Delta().delete(5).delete(5).delete(5).retain(10).retain(10).retain(10)
delta.chop()
self.assertEqual(delta.get_ops(), [{'delete': 15}])
def test_compose(self):
# tests replicated from: https://github.com/ottypes/rich-text/blob/master/test/delta/compose.js
# insert + insert
a = Delta().insert('A')
b = Delta().insert('B')
expected = Delta().insert('B').insert('A')
self.assertEqual(a.compose(b), expected)
# insert + retain
a = Delta().insert('A')
b = Delta().retain(1, {'bold': True, 'color': 'red', 'font': None})
expected = Delta().insert('A', {'bold': True, 'color': 'red'})
self.assertEqual(a.compose(b), expected)
# insert + delete
a = Delta().insert('A')
b = Delta().delete(1)
expected = Delta()
self.assertEqual(a.compose(b), expected)
# delete + insert
a = Delta().delete(1)
b = Delta().insert('B')
expected = Delta().insert('B').delete(1)
self.assertEqual(a.compose(b), expected)
# delete + retain
a = Delta().delete(1)
b = Delta().retain(1, {'bold': True, 'color': 'red'})
expected = Delta().delete(1).retain(1, {'bold': True, 'color': 'red'})
self.assertEqual(a.compose(b), expected)
# delete + delete
a = Delta().delete(1)
b = Delta().delete(1)
expected = Delta().delete(2)
self.assertEqual(a.compose(b), expected)
# retain + insert
a = Delta().retain(1, {'color': 'blue'})
b = Delta().insert('B')
expected = Delta().insert('B').retain(1, {'color': 'blue'})
self.assertEqual(a.compose(b), expected)
# retain + retain
a = Delta().retain(1, {'color': 'blue'})
b = Delta().retain(1, {'bold': True, 'color': 'red', 'font': None})
expected = Delta().retain(
1, {'bold': True, 'color': 'red', 'font': None})
self.assertEqual(a.compose(b), expected)
# retain + delete
a = Delta().retain(1, {'color': 'blue'})
b = Delta().delete(1)
expected = Delta().delete(1)
self.assertEqual(a.compose(b), expected)
# insert in middle of text
a = Delta().insert('Hello')
b = Delta().retain(3).insert('X')
expected = Delta().insert('HelXlo')
self.assertEqual(a.compose(b), expected)
# insert and delete ordering
a = Delta().insert('Hello')
b = Delta().insert('Hello')
insertFirst = Delta().retain(3).insert('X').delete(1)
deleteFirst = Delta().retain(3).delete(1).insert('X')
expected = Delta().insert('HelXo')
self.assertEqual(a.compose(insertFirst), expected)
self.assertEqual(b.compose(deleteFirst), expected)
# insert embed
a = Delta().insert(1, {'src': 'http://quilljs.com/image.png'})
b = Delta().retain(1, {'alt': 'logo'})
expected = Delta().insert(
1, {'src': 'http://quilljs.com/image.png', 'alt': 'logo'})
self.assertEqual(a.compose(b), expected)
# delete entire text
a = Delta().retain(4).insert('Hello')
b = Delta().delete(9)
expected = Delta().delete(4)
self.assertEqual(a.compose(b), expected)
# retain more than length of text
a = Delta().insert('Hello')
b = Delta().retain(10)
expected = Delta().insert('Hello')
self.assertEqual(a.compose(b), expected)
# retain empty embed
a = Delta().insert(1)
b = Delta().retain(1)
expected = Delta().insert(1)
self.assertEqual(a.compose(b), expected)
# remove all attributes
a = Delta().insert('A', {'bold': True})
b = Delta().retain(1, {'bold': None})
expected = Delta().insert('A')
self.assertEqual(a.compose(b), expected)
# remove all embed attributes
a = Delta().insert(2, {'bold': True})
b = Delta().retain(1, {'bold': None})
expected = Delta().insert(2)
self.assertEqual(a.compose(b), expected)
# immutability
attr1 = {'bold': True}
attr2 = {'bold': True}
a1 = Delta().insert('Test', attr1)
a2 = Delta().insert('Test', attr1)
b1 = Delta().retain(1, {'color': 'red'}).delete(2)
b2 = Delta().retain(1, {'color': 'red'}).delete(2)
expected = Delta().insert(
'T', {'color': 'red', 'bold': True}).insert('t', attr1)
self.assertEqual(a1.compose(b1), expected)
self.assertEqual(a1, a2)
self.assertEqual(b1, b2)
self.assertEqual(attr1, attr2)
def test_transform(self):
# tests replicated from https://github.com/ottypes/rich-text/blob/master/test/delta/transform.js
# insert + insert
a1 = Delta().insert('A')
b1 = Delta().insert('B')
a2 = Delta(a1)
b2 = Delta(b1)
expected1 = Delta().retain(1).insert('B')
expected2 = Delta().insert('B')
self.assertEqual(a1.transform(b1, True), expected1)
self.assertEqual(a2.transform(b2, False), expected2)
# insert + retain
a = Delta().insert('A')
b = Delta().retain(1, {'bold': True, 'color': 'red'})
expected = Delta().retain(1).retain(1, {'bold': True, 'color': 'red'})
self.assertEqual(a.transform(b, True), expected)
# insert + delete
a = Delta().insert('A')
b = Delta().delete(1)
expected = Delta().retain(1).delete(1)
self.assertEqual(a.transform(b, True), expected)
# delete + insert
a = Delta().delete(1)
b = Delta().insert('B')
expected = Delta().insert('B')
self.assertEqual(a.transform(b, True), expected)
# delete + retain
a = Delta().delete(1)
b = Delta().retain(1, {'bold': True, 'color': 'red'})
expected = Delta()
self.assertEqual(a.transform(b, True), expected)
# delete + delete
a = Delta().delete(1)
b = Delta().delete(1)
expected = Delta()
self.assertEqual(a.transform(b, True), expected)
# retain + insert
a = Delta().retain(1, {'color': 'blue'})
b = Delta().insert('B')
expected = Delta().insert('B')
self.assertEqual(a.transform(b, True), expected)
# retain + retain
a1 = Delta().retain(1, {'color': 'blue'})
b1 = Delta().retain(1, {'bold': True, 'color': 'red'})
a2 = Delta().retain(1, {'color': 'blue'})
b2 = Delta().retain(1, {'bold': True, 'color': 'red'})
expected1 = Delta().retain(1, {'bold': True})
expected2 = Delta()
self.assertEqual(a1.transform(b1, True), expected1)
self.assertEqual(b2.transform(a2, True), expected2)
# retain + retain without priority
a1 = Delta().retain(1, {'color': 'blue'})
b1 = Delta().retain(1, {'bold': True, 'color': 'red'})
a2 = Delta().retain(1, {'color': 'blue'})
b2 = Delta().retain(1, {'bold': True, 'color': 'red'})
expected1 = Delta().retain(1, {'bold': True, 'color': 'red'})
expected2 = Delta().retain(1, {'color': 'blue'})
self.assertEqual(a1.transform(b1, False), expected1)
self.assertEqual(b2.transform(a2, False), expected2)
# retain + delete
a = Delta().retain(1, {'color': 'blue'})
b = Delta().delete(1)
expected = Delta().delete(1)
self.assertEqual(a.transform(b, True), expected)
# alternating edits
a1 = Delta().retain(2).insert('si').delete(5)
b1 = Delta().retain(1).insert('e').delete(5).retain(1).insert('ow')
a2 = Delta(a1)
b2 = Delta(b1)
expected1 = Delta().retain(1).insert('e').delete(1).retain(2).insert('ow')
expected2 = Delta().retain(2).insert('si').delete(1)
self.assertEqual(a1.transform(b1, False), expected1)
self.assertEqual(b2.transform(a2, False), expected2)
# conflicting appends
a1 = Delta().retain(3).insert('aa')
b1 = Delta().retain(3).insert('bb')
a2 = Delta(a1)
b2 = Delta(b1)
expected1 = Delta().retain(5).insert('bb')
expected2 = Delta().retain(3).insert('aa')
self.assertEqual(a1.transform(b1, True), expected1)
self.assertEqual(b2.transform(a2, False), expected2)
# prepend + append
a1 = Delta().insert('aa')
b1 = Delta().retain(3).insert('bb')
expected1 = Delta().retain(5).insert('bb')
a2 = Delta(a1)
b2 = Delta(b1)
expected2 = Delta().insert('aa')
self.assertEqual(a1.transform(b1, False), expected1)
self.assertEqual(b2.transform(a2, False), expected2)
# trailing deletes with differing lengths
a1 = Delta().retain(2).delete(1)
b1 = Delta().delete(3)
expected1 = Delta().delete(2)
a2 = Delta(a1)
b2 = Delta(b1)
expected2 = Delta()
self.assertEqual(a1.transform(b1, False), expected1)
self.assertEqual(b2.transform(a2, False), expected2)
# immutability
a1 = Delta().insert('A')
a2 = Delta().insert('A')
b1 = Delta().insert('B')
b2 = Delta().insert('B')
expected = Delta().retain(1).insert('B')
self.assertEqual(a1.transform(b1, True), expected)
self.assertEqual(a1, a2)
self.assertEqual(b1, b2)
def test_transform_position(self):
# tests replicated from https://github.com/ottypes/rich-text/blob/master/test/delta/transform-position.js
# insert before position
delta = Delta().insert('A')
self.assertEqual(delta.transform_position(2), 3)
# insert after position
delta = Delta().retain(2).insert('A')
self.assertEqual(delta.transform_position(1), 1)
# insert at position
delta = Delta().retain(2).insert('A')
self.assertEqual(delta.transform_position(2, True), 2)
self.assertEqual(delta.transform_position(2, False), 3)
# delete before position
delta = Delta().delete(2)
self.assertEqual(delta.transform_position(4), 2)
# delete after position
delta = Delta().retain(4).delete(2)
self.assertEqual(delta.transform_position(2), 2)
# delete across position
delta = Delta().retain(1).delete(4)
self.assertEqual(delta.transform_position(2), 1)
# insert and delete before position
delta = Delta().retain(2).insert('A').delete(2)
self.assertEqual(delta.transform_position(4), 3)
# insert before and delete across position
delta = Delta().retain(2).insert('A').delete(4)
self.assertEqual(delta.transform_position(4), 3)
# delete before and delete across position
delta = Delta().delete(1).retain(1).delete(4)
self.assertEqual(delta.transform_position(4), 1)
def test_slice(self):
# tests replicated from https://github.com/ottypes/rich-text/blob/master/test/delta/helpers.js
# start
slice = Delta().retain(2).insert('A').slice(2)
expected = Delta().insert('A')
self.assertEqual(slice, expected)
# start and end chop
slice = Delta().insert('0123456789').slice(2, 7)
expected = Delta().insert('23456')
self.assertEqual(slice, expected)
# start and end multiple chop
slice = Delta().insert(
'0123', {'bold': True}).insert('4567').slice(3, 5)
expected = Delta().insert('3', {'bold': True}).insert('4')
self.assertEqual(slice, expected)
# start and end
slice = Delta().retain(2).insert(
'A', {'bold': True}).insert('B').slice(2, 3)
expected = Delta().insert('A', {'bold': True})
self.assertEqual(slice, expected)
# no params
delta = Delta().retain(2).insert('A', {'bold': True}).insert('B')
slice = delta.slice()
self.assertEqual(slice, delta)
# split ops
slice = Delta().insert('AB', {'bold': True}).insert('C').slice(1, 2)
expected = Delta().insert('B', {'bold': True})
self.assertEqual(slice, expected)
# split ops multiple times
slice = Delta().insert('ABC', {'bold': True}).insert('D').slice(1, 2)
expected = Delta().insert('B', {'bold': True})
self.assertEqual(slice, expected)
def test_concat(self):
# tests replicated from https://github.com/ottypes/rich-text/blob/master/test/delta/helpers.js
# empty delta
delta = Delta().insert('Test')
concat = Delta()
expected = Delta().insert('Test')
self.assertEqual(delta.concat(concat), expected)
# unmergeable
delta = Delta().insert('Test')
concat = Delta().insert('!', {'bold': True})
expected = Delta().insert('Test').insert('!', {'bold': True})
self.assertEqual(delta.concat(concat), expected)
# mergeable
delta = Delta().insert('Test', {'bold': True})
concat = Delta().insert('!', {'bold': True}).insert('\n')
expected = Delta().insert('Test!', {'bold': True}).insert('\n')
self.assertEqual(delta.concat(concat), expected)
def test_diff(self):
# tests replicated from https://github.com/ottypes/rich-text/blob/master/test/delta/diff.js
# insert
a = Delta().insert('A')
b = Delta().insert('AB')
expected = Delta().retain(1).insert('B')
self.assertEqual(a.diff(b), expected)
# delete
a = Delta().insert('AB')
b = Delta().insert('A')
expected = Delta().retain(1).delete(1)
self.assertEqual(a.diff(b), expected)
# retain
a = Delta().insert('A')
b = Delta().insert('A')
expected = Delta()
self.assertEqual(a.diff(b), expected)
# format
a = Delta().insert('A')
b = Delta().insert('A', {'bold': True})
expected = Delta().retain(1, {'bold': True})
self.assertEqual(a.diff(b), expected)
# embed integer match
a = Delta().insert(1)
b = Delta().insert(1)
expected = Delta()
self.assertEqual(a.diff(b), expected)
# embed integer mismatch
a = Delta().insert(1)
b = Delta().insert(2)
expected = Delta().delete(1).insert(2)
self.assertEqual(a.diff(b), expected)
# embed object match
a = Delta().insert({'image': 'http://quilljs.com'})
b = Delta().insert({'image': 'http://quilljs.com'})
expected = Delta()
self.assertEqual(a.diff(b), expected)
# embed object mismatch
a = Delta().insert({'image': 'http://quilljs.com', 'alt': 'Overwrite'})
b = Delta().insert({'image': 'http://quilljs.com'})
expected = Delta().insert({'image': 'http://quilljs.com'}).delete(1)
self.assertEqual(a.diff(b), expected)
# embed object change
embed = {'image': 'http://quilljs.com'}
a = Delta().insert(embed)
embed['image'] = 'http://github.com'
b = Delta().insert(embed)
expected = Delta().insert({'image': 'http://github.com'}).delete(1)
self.assertEqual(a.diff(b), expected)
# embed false positive
a = Delta().insert(1)
b = Delta().insert(chr(0)) # Placeholder char for embed in diff()
expected = Delta().insert(chr(0)).delete(1)
self.assertEqual(a.diff(b), expected)
# error on non-documents
a = Delta().insert('A')
b = Delta().retain(1).insert('B')
with self.assertRaises(Exception):
a.diff(b)
with self.assertRaises(Exception):
b.diff(a)
# inconvenient indexes
a = Delta().insert('12', {'bold': True}).insert('34', {'italic': True})
b = Delta().insert('123', {'color': 'red'})
expected = Delta().retain(2, {'bold': None, 'color': 'red'}).retain(
1, {'italic': None, 'color': 'red'}).delete(1)
self.assertEqual(a.diff(b), expected)
# combination
a = Delta().insert('Bad', {'color': 'red'}
).insert('cat', {'color': 'blue'})
b = Delta().insert('Good', {'bold': True}
).insert('dog', {'italic': True})
expected = Delta().insert('Good', {'bold': True}).delete(2).retain(
1, {'italic': True, 'color': None}).delete(3).insert('og', {'italic': True})
self.assertEqual(a.diff(b), expected)
# same document
a = Delta().insert('A').insert('B', {'bold': True})
expected = Delta()
self.assertEqual(a.diff(a), expected)
# immutability
attr1 = {'color': 'red'}
attr2 = {'color': 'red'}
a1 = Delta().insert('A', attr1)
a2 = Delta().insert('A', attr1)
b1 = Delta().insert('A', {'bold': True}).insert('B')
b2 = Delta().insert('A', {'bold': True}).insert('B')
expected = Delta().retain(1, {'bold': True, 'color': None}).insert('B')
self.assertEqual(a1.diff(b1), expected)
self.assertEqual(a1, a2)
self.assertEqual(b2, b2)
self.assertEqual(attr1, attr2)
if __name__ == "__main__":
main()
| [
"unittest.main",
"richtextpy.Delta"
] | [((24232, 24238), 'unittest.main', 'main', ([], {}), '()\n', (24236, 24238), False, 'from unittest import TestCase, main\n'), ((206, 213), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (211, 213), False, 'from richtextpy import Delta\n'), ((277, 286), 'richtextpy.Delta', 'Delta', (['[]'], {}), '([])\n', (282, 286), False, 'from richtextpy import Delta\n'), ((350, 372), 'richtextpy.Delta', 'Delta', (["[{'delete': 3}]"], {}), "([{'delete': 3}])\n", (355, 372), False, 'from richtextpy import Delta\n'), ((458, 480), 'richtextpy.Delta', 'Delta', (["[{'delete': 3}]"], {}), "([{'delete': 3}])\n", (463, 480), False, 'from richtextpy import Delta\n'), ((497, 518), 'richtextpy.Delta', 'Delta', (['existing_delta'], {}), '(existing_delta)\n', (502, 518), False, 'from richtextpy import Delta\n'), ((595, 626), 'richtextpy.Delta', 'Delta', (["{'ops': [{'delete': 3}]}"], {}), "({'ops': [{'delete': 3}]})\n", (600, 626), False, 'from richtextpy import Delta\n'), ((703, 718), 'richtextpy.Delta', 'Delta', (['"""whoops"""'], {}), "('whoops')\n", (708, 718), False, 'from richtextpy import Delta\n'), ((809, 816), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (814, 816), False, 'from richtextpy import Delta\n'), ((905, 912), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (910, 912), False, 'from richtextpy import Delta\n'), ((1017, 1024), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (1022, 1024), False, 'from richtextpy import Delta\n'), ((1137, 1144), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (1142, 1144), False, 'from richtextpy import Delta\n'), ((1261, 1268), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (1266, 1268), False, 'from richtextpy import Delta\n'), ((1491, 1498), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (1496, 1498), False, 'from richtextpy import Delta\n'), ((1812, 1819), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (1817, 1819), False, 'from richtextpy import Delta\n'), ((2164, 2171), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (2169, 2171), False, 'from richtextpy import Delta\n'), ((2259, 2266), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (2264, 2266), False, 'from richtextpy import Delta\n'), ((2356, 2363), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (2361, 2363), False, 'from richtextpy import Delta\n'), ((2458, 2465), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (2463, 2465), False, 'from richtextpy import Delta\n'), ((2568, 2575), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (2573, 2575), False, 'from richtextpy import Delta\n'), ((2705, 2712), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (2710, 2712), False, 'from richtextpy import Delta\n'), ((2800, 2807), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (2805, 2807), False, 'from richtextpy import Delta\n'), ((2897, 2904), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (2902, 2904), False, 'from richtextpy import Delta\n'), ((2999, 3006), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (3004, 3006), False, 'from richtextpy import Delta\n'), ((3109, 3116), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (3114, 3116), False, 'from richtextpy import Delta\n'), ((3223, 3230), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (3228, 3230), False, 'from richtextpy import Delta\n'), ((7792, 7799), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (7797, 7799), False, 'from richtextpy import Delta\n'), ((11862, 11871), 'richtextpy.Delta', 'Delta', (['a1'], {}), '(a1)\n', (11867, 11871), False, 'from richtextpy import Delta\n'), ((11885, 11894), 'richtextpy.Delta', 'Delta', (['b1'], {}), '(b1)\n', (11890, 11894), False, 'from richtextpy import Delta\n'), ((12879, 12886), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (12884, 12886), False, 'from richtextpy import Delta\n'), ((13050, 13057), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (13055, 13057), False, 'from richtextpy import Delta\n'), ((13646, 13653), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (13651, 13653), False, 'from richtextpy import Delta\n'), ((14665, 14674), 'richtextpy.Delta', 'Delta', (['a1'], {}), '(a1)\n', (14670, 14674), False, 'from richtextpy import Delta\n'), ((14688, 14697), 'richtextpy.Delta', 'Delta', (['b1'], {}), '(b1)\n', (14693, 14697), False, 'from richtextpy import Delta\n'), ((15096, 15105), 'richtextpy.Delta', 'Delta', (['a1'], {}), '(a1)\n', (15101, 15105), False, 'from richtextpy import Delta\n'), ((15119, 15128), 'richtextpy.Delta', 'Delta', (['b1'], {}), '(b1)\n', (15124, 15128), False, 'from richtextpy import Delta\n'), ((15522, 15531), 'richtextpy.Delta', 'Delta', (['a1'], {}), '(a1)\n', (15527, 15531), False, 'from richtextpy import Delta\n'), ((15545, 15554), 'richtextpy.Delta', 'Delta', (['b1'], {}), '(b1)\n', (15550, 15554), False, 'from richtextpy import Delta\n'), ((15892, 15901), 'richtextpy.Delta', 'Delta', (['a1'], {}), '(a1)\n', (15897, 15901), False, 'from richtextpy import Delta\n'), ((15915, 15924), 'richtextpy.Delta', 'Delta', (['b1'], {}), '(b1)\n', (15920, 15924), False, 'from richtextpy import Delta\n'), ((15945, 15952), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (15950, 15952), False, 'from richtextpy import Delta\n'), ((19604, 19611), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (19609, 19611), False, 'from richtextpy import Delta\n'), ((20805, 20812), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (20810, 20812), False, 'from richtextpy import Delta\n'), ((21166, 21173), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (21171, 21173), False, 'from richtextpy import Delta\n'), ((21576, 21583), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (21581, 21583), False, 'from richtextpy import Delta\n'), ((23621, 23628), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (23626, 23628), False, 'from richtextpy import Delta\n'), ((7277, 7284), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (7282, 7284), False, 'from richtextpy import Delta\n'), ((7309, 7316), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (7314, 7316), False, 'from richtextpy import Delta\n'), ((7468, 7475), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (7473, 7475), False, 'from richtextpy import Delta\n'), ((7500, 7507), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (7505, 7507), False, 'from richtextpy import Delta\n'), ((7583, 7590), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (7588, 7590), False, 'from richtextpy import Delta\n'), ((7723, 7730), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (7728, 7730), False, 'from richtextpy import Delta\n'), ((7755, 7762), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (7760, 7762), False, 'from richtextpy import Delta\n'), ((7888, 7895), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (7893, 7895), False, 'from richtextpy import Delta\n'), ((7918, 7925), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (7923, 7925), False, 'from richtextpy import Delta\n'), ((8075, 8082), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (8080, 8082), False, 'from richtextpy import Delta\n'), ((8105, 8112), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (8110, 8112), False, 'from richtextpy import Delta\n'), ((8322, 8329), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (8327, 8329), False, 'from richtextpy import Delta\n'), ((8352, 8359), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (8357, 8359), False, 'from richtextpy import Delta\n'), ((8389, 8396), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (8394, 8396), False, 'from richtextpy import Delta\n'), ((8495, 8502), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (8500, 8502), False, 'from richtextpy import Delta\n'), ((8544, 8551), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (8549, 8551), False, 'from richtextpy import Delta\n'), ((8720, 8727), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (8725, 8727), False, 'from richtextpy import Delta\n'), ((8769, 8776), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (8774, 8776), False, 'from richtextpy import Delta\n'), ((8852, 8859), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (8857, 8859), False, 'from richtextpy import Delta\n'), ((9017, 9024), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (9022, 9024), False, 'from richtextpy import Delta\n'), ((9066, 9073), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (9071, 9073), False, 'from richtextpy import Delta\n'), ((9103, 9110), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (9108, 9110), False, 'from richtextpy import Delta\n'), ((9218, 9225), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (9223, 9225), False, 'from richtextpy import Delta\n'), ((9303, 9310), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (9308, 9310), False, 'from richtextpy import Delta\n'), ((9427, 9434), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (9432, 9434), False, 'from richtextpy import Delta\n'), ((9463, 9470), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (9468, 9470), False, 'from richtextpy import Delta\n'), ((9630, 9637), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (9635, 9637), False, 'from richtextpy import Delta\n'), ((9808, 9815), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (9813, 9815), False, 'from richtextpy import Delta\n'), ((9879, 9886), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (9884, 9886), False, 'from richtextpy import Delta\n'), ((9933, 9940), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (9938, 9940), False, 'from richtextpy import Delta\n'), ((10157, 10164), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (10162, 10164), False, 'from richtextpy import Delta\n'), ((10194, 10201), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (10199, 10201), False, 'from richtextpy import Delta\n'), ((10316, 10323), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (10321, 10323), False, 'from richtextpy import Delta\n'), ((10352, 10359), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (10357, 10359), False, 'from richtextpy import Delta\n'), ((10390, 10397), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (10395, 10397), False, 'from richtextpy import Delta\n'), ((10505, 10512), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (10510, 10512), False, 'from richtextpy import Delta\n'), ((10535, 10542), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (10540, 10542), False, 'from richtextpy import Delta\n'), ((10572, 10579), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (10577, 10579), False, 'from richtextpy import Delta\n'), ((10684, 10691), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (10689, 10691), False, 'from richtextpy import Delta\n'), ((10732, 10739), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (10737, 10739), False, 'from richtextpy import Delta\n'), ((10785, 10792), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (10790, 10792), False, 'from richtextpy import Delta\n'), ((10905, 10912), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (10910, 10912), False, 'from richtextpy import Delta\n'), ((10951, 10958), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (10956, 10958), False, 'from richtextpy import Delta\n'), ((11004, 11011), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (11009, 11011), False, 'from richtextpy import Delta\n'), ((11170, 11177), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (11175, 11177), False, 'from richtextpy import Delta\n'), ((11213, 11220), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (11218, 11220), False, 'from richtextpy import Delta\n'), ((11796, 11803), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (11801, 11803), False, 'from richtextpy import Delta\n'), ((11829, 11836), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (11834, 11836), False, 'from richtextpy import Delta\n'), ((11965, 11972), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (11970, 11972), False, 'from richtextpy import Delta\n'), ((12145, 12152), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (12150, 12152), False, 'from richtextpy import Delta\n'), ((12177, 12184), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (12182, 12184), False, 'from richtextpy import Delta\n'), ((12402, 12409), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (12407, 12409), False, 'from richtextpy import Delta\n'), ((12434, 12441), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (12439, 12441), False, 'from richtextpy import Delta\n'), ((12595, 12602), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (12600, 12602), False, 'from richtextpy import Delta\n'), ((12625, 12632), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (12630, 12632), False, 'from richtextpy import Delta\n'), ((12664, 12671), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (12669, 12671), False, 'from richtextpy import Delta\n'), ((12780, 12787), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (12785, 12787), False, 'from richtextpy import Delta\n'), ((12810, 12817), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (12815, 12817), False, 'from richtextpy import Delta\n'), ((12983, 12990), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (12988, 12990), False, 'from richtextpy import Delta\n'), ((13013, 13020), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (13018, 13020), False, 'from richtextpy import Delta\n'), ((13154, 13161), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (13159, 13161), False, 'from richtextpy import Delta\n'), ((13203, 13210), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (13208, 13210), False, 'from richtextpy import Delta\n'), ((13242, 13249), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (13247, 13249), False, 'from richtextpy import Delta\n'), ((13359, 13366), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (13364, 13366), False, 'from richtextpy import Delta\n'), ((13409, 13416), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (13414, 13416), False, 'from richtextpy import Delta\n'), ((13472, 13479), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (13477, 13479), False, 'from richtextpy import Delta\n'), ((13522, 13529), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (13527, 13529), False, 'from richtextpy import Delta\n'), ((13592, 13599), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (13597, 13599), False, 'from richtextpy import Delta\n'), ((13831, 13838), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (13836, 13838), False, 'from richtextpy import Delta\n'), ((13881, 13888), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (13886, 13888), False, 'from richtextpy import Delta\n'), ((13944, 13951), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (13949, 13951), False, 'from richtextpy import Delta\n'), ((13994, 14001), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (13999, 14001), False, 'from richtextpy import Delta\n'), ((14064, 14071), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (14069, 14071), False, 'from richtextpy import Delta\n'), ((14134, 14141), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (14139, 14141), False, 'from richtextpy import Delta\n'), ((14332, 14339), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (14337, 14339), False, 'from richtextpy import Delta\n'), ((14381, 14388), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (14386, 14388), False, 'from richtextpy import Delta\n'), ((14418, 14425), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (14423, 14425), False, 'from richtextpy import Delta\n'), ((15393, 15400), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (15398, 15400), False, 'from richtextpy import Delta\n'), ((15575, 15582), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (15580, 15582), False, 'from richtextpy import Delta\n'), ((15823, 15830), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (15828, 15830), False, 'from richtextpy import Delta\n'), ((15861, 15868), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (15866, 15868), False, 'from richtextpy import Delta\n'), ((16112, 16119), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (16117, 16119), False, 'from richtextpy import Delta\n'), ((16145, 16152), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (16150, 16152), False, 'from richtextpy import Delta\n'), ((16178, 16185), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (16183, 16185), False, 'from richtextpy import Delta\n'), ((16211, 16218), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (16216, 16218), False, 'from richtextpy import Delta\n'), ((16609, 16616), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (16614, 16616), False, 'from richtextpy import Delta\n'), ((17075, 17082), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (17080, 17082), False, 'from richtextpy import Delta\n'), ((18126, 18133), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (18131, 18133), False, 'from richtextpy import Delta\n'), ((18294, 18301), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (18299, 18301), False, 'from richtextpy import Delta\n'), ((18746, 18753), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (18751, 18753), False, 'from richtextpy import Delta\n'), ((19105, 19112), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (19110, 19112), False, 'from richtextpy import Delta\n'), ((19316, 19323), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (19321, 19323), False, 'from richtextpy import Delta\n'), ((19564, 19571), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (19569, 19571), False, 'from richtextpy import Delta\n'), ((19631, 19638), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (19636, 19638), False, 'from richtextpy import Delta\n'), ((19750, 19757), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (19755, 19757), False, 'from richtextpy import Delta\n'), ((19790, 19797), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (19795, 19797), False, 'from richtextpy import Delta\n'), ((19990, 19997), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (19995, 19997), False, 'from richtextpy import Delta\n'), ((20380, 20387), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (20385, 20387), False, 'from richtextpy import Delta\n'), ((20412, 20419), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (20417, 20419), False, 'from richtextpy import Delta\n'), ((20558, 20565), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (20563, 20565), False, 'from richtextpy import Delta\n'), ((20591, 20598), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (20596, 20598), False, 'from richtextpy import Delta\n'), ((20734, 20741), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (20739, 20741), False, 'from richtextpy import Delta\n'), ((20766, 20773), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (20771, 20773), False, 'from richtextpy import Delta\n'), ((20889, 20896), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (20894, 20896), False, 'from richtextpy import Delta\n'), ((20921, 20928), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (20926, 20928), False, 'from richtextpy import Delta\n'), ((20976, 20983), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (20981, 20983), False, 'from richtextpy import Delta\n'), ((21099, 21106), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (21104, 21106), False, 'from richtextpy import Delta\n'), ((21129, 21136), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (21134, 21136), False, 'from richtextpy import Delta\n'), ((21266, 21273), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (21271, 21273), False, 'from richtextpy import Delta\n'), ((21296, 21303), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (21301, 21303), False, 'from richtextpy import Delta\n'), ((21449, 21456), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (21454, 21456), False, 'from richtextpy import Delta\n'), ((21509, 21516), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (21514, 21516), False, 'from richtextpy import Delta\n'), ((21675, 21682), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (21680, 21682), False, 'from richtextpy import Delta\n'), ((21755, 21762), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (21760, 21762), False, 'from richtextpy import Delta\n'), ((22017, 22024), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (22022, 22024), False, 'from richtextpy import Delta\n'), ((22096, 22103), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (22101, 22103), False, 'from richtextpy import Delta\n'), ((22284, 22291), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (22289, 22291), False, 'from richtextpy import Delta\n'), ((22314, 22321), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (22319, 22321), False, 'from richtextpy import Delta\n'), ((22521, 22528), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (22526, 22528), False, 'from richtextpy import Delta\n'), ((22837, 22844), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (22842, 22844), False, 'from richtextpy import Delta\n'), ((23778, 23785), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (23783, 23785), False, 'from richtextpy import Delta\n'), ((23818, 23825), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (23823, 23825), False, 'from richtextpy import Delta\n'), ((3441, 3448), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (3446, 3448), False, 'from richtextpy import Delta\n'), ((3570, 3577), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (3575, 3577), False, 'from richtextpy import Delta\n'), ((3676, 3683), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (3681, 3683), False, 'from richtextpy import Delta\n'), ((4010, 4017), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (4015, 4017), False, 'from richtextpy import Delta\n'), ((4227, 4234), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (4232, 4234), False, 'from richtextpy import Delta\n'), ((4444, 4451), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (4449, 4451), False, 'from richtextpy import Delta\n'), ((4637, 4644), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (4642, 4644), False, 'from richtextpy import Delta\n'), ((4867, 4874), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (4872, 4874), False, 'from richtextpy import Delta\n'), ((5243, 5250), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (5248, 5250), False, 'from richtextpy import Delta\n'), ((5374, 5381), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (5379, 5381), False, 'from richtextpy import Delta\n'), ((6574, 6581), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (6579, 6581), False, 'from richtextpy import Delta\n'), ((7348, 7355), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (7353, 7355), False, 'from richtextpy import Delta\n'), ((7957, 7964), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (7962, 7964), False, 'from richtextpy import Delta\n'), ((8174, 8181), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (8179, 8181), False, 'from richtextpy import Delta\n'), ((8583, 8590), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (8588, 8590), False, 'from richtextpy import Delta\n'), ((9254, 9261), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (9259, 9261), False, 'from richtextpy import Delta\n'), ((10111, 10118), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (10116, 10118), False, 'from richtextpy import Delta\n'), ((11256, 11263), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (11261, 11263), False, 'from richtextpy import Delta\n'), ((11315, 11322), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (11320, 11322), False, 'from richtextpy import Delta\n'), ((11380, 11387), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (11385, 11387), False, 'from richtextpy import Delta\n'), ((11915, 11922), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (11920, 11922), False, 'from richtextpy import Delta\n'), ((12246, 12253), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (12251, 12253), False, 'from richtextpy import Delta\n'), ((12471, 12478), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (12476, 12478), False, 'from richtextpy import Delta\n'), ((15008, 15015), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (15013, 15015), False, 'from richtextpy import Delta\n'), ((15052, 15059), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (15057, 15059), False, 'from richtextpy import Delta\n'), ((15149, 15156), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (15154, 15156), False, 'from richtextpy import Delta\n'), ((15200, 15207), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (15205, 15207), False, 'from richtextpy import Delta\n'), ((15427, 15434), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (15432, 15434), False, 'from richtextpy import Delta\n'), ((15478, 15485), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (15483, 15485), False, 'from richtextpy import Delta\n'), ((15782, 15789), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (15787, 15789), False, 'from richtextpy import Delta\n'), ((16250, 16257), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (16255, 16257), False, 'from richtextpy import Delta\n'), ((16735, 16742), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (16740, 16742), False, 'from richtextpy import Delta\n'), ((16868, 16875), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (16873, 16875), False, 'from richtextpy import Delta\n'), ((17199, 17206), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (17204, 17206), False, 'from richtextpy import Delta\n'), ((17334, 17341), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (17339, 17341), False, 'from richtextpy import Delta\n'), ((18234, 18241), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (18239, 18241), False, 'from richtextpy import Delta\n'), ((18513, 18520), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (18518, 18520), False, 'from richtextpy import Delta\n'), ((19845, 19852), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (19850, 19852), False, 'from richtextpy import Delta\n'), ((20046, 20053), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (20051, 20053), False, 'from richtextpy import Delta\n'), ((20114, 20121), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (20119, 20121), False, 'from richtextpy import Delta\n'), ((20452, 20459), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (20457, 20459), False, 'from richtextpy import Delta\n'), ((20630, 20637), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (20635, 20637), False, 'from richtextpy import Delta\n'), ((21333, 21340), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (21338, 21340), False, 'from richtextpy import Delta\n'), ((21822, 21829), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (21827, 21829), False, 'from richtextpy import Delta\n'), ((22137, 22144), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (22142, 22144), False, 'from richtextpy import Delta\n'), ((22396, 22403), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (22401, 22403), False, 'from richtextpy import Delta\n'), ((22553, 22560), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (22558, 22560), False, 'from richtextpy import Delta\n'), ((22757, 22764), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (22762, 22764), False, 'from richtextpy import Delta\n'), ((23094, 23101), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (23099, 23101), False, 'from richtextpy import Delta\n'), ((23207, 23214), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (23212, 23214), False, 'from richtextpy import Delta\n'), ((23554, 23561), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (23559, 23561), False, 'from richtextpy import Delta\n'), ((23858, 23865), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (23863, 23865), False, 'from richtextpy import Delta\n'), ((23919, 23926), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (23924, 23926), False, 'from richtextpy import Delta\n'), ((23986, 23993), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (23991, 23993), False, 'from richtextpy import Delta\n'), ((5505, 5512), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (5510, 5512), False, 'from richtextpy import Delta\n'), ((5686, 5693), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (5691, 5693), False, 'from richtextpy import Delta\n'), ((5866, 5873), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (5871, 5873), False, 'from richtextpy import Delta\n'), ((6432, 6439), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (6437, 6439), False, 'from richtextpy import Delta\n'), ((9509, 9516), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (9514, 9516), False, 'from richtextpy import Delta\n'), ((9571, 9578), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (9576, 9578), False, 'from richtextpy import Delta\n'), ((14535, 14542), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (14540, 14542), False, 'from richtextpy import Delta\n'), ((14801, 14808), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (14806, 14808), False, 'from richtextpy import Delta\n'), ((17480, 17487), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (17485, 17487), False, 'from richtextpy import Delta\n'), ((17645, 17652), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (17650, 17652), False, 'from richtextpy import Delta\n'), ((17810, 17817), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (17815, 17817), False, 'from richtextpy import Delta\n'), ((18068, 18075), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (18073, 18075), False, 'from richtextpy import Delta\n'), ((18415, 18422), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (18420, 18422), False, 'from richtextpy import Delta\n'), ((18861, 18868), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (18866, 18868), False, 'from richtextpy import Delta\n'), ((19025, 19032), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (19030, 19032), False, 'from richtextpy import Delta\n'), ((19235, 19242), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (19240, 19242), False, 'from richtextpy import Delta\n'), ((22896, 22903), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (22901, 22903), False, 'from richtextpy import Delta\n'), ((6121, 6128), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (6126, 6128), False, 'from richtextpy import Delta\n'), ((18644, 18651), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (18649, 18651), False, 'from richtextpy import Delta\n'), ((14589, 14596), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (14594, 14596), False, 'from richtextpy import Delta\n'), ((14718, 14725), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (14723, 14725), False, 'from richtextpy import Delta\n'), ((23325, 23332), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (23330, 23332), False, 'from richtextpy import Delta\n'), ((3782, 3789), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (3787, 3789), False, 'from richtextpy import Delta\n'), ((6299, 6306), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (6304, 6306), False, 'from richtextpy import Delta\n'), ((6789, 6796), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (6794, 6796), False, 'from richtextpy import Delta\n'), ((6953, 6960), 'richtextpy.Delta', 'Delta', ([], {}), '()\n', (6958, 6960), False, 'from richtextpy import Delta\n')] |
from fbs_runtime.application_context.PyQt5 import ApplicationContext
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import sys
import re
from .background import background
from .Emotion_Detection2 import Emotion_Detection
import numpy as np
import pandas as pd
import pickle
import os
import platform
import cv2
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QLabel, \
QSlider, QStyle, QSizePolicy, QFileDialog
import sys
from PyQt5.QtMultimedia import QMediaPlayer, QMediaContent
from PyQt5.QtMultimediaWidgets import QVideoWidget
import subprocess
def read_stylesheet(path_to_sheet):
with open(path_to_sheet, 'r') as f:
stylesheet = f.read()
return stylesheet
class Ui_MainWindow(QtCore.QObject):
def setupUi(self, MainWindow, AppContext):
# 'Select Video' button
self.stylesheet_select_unselected = read_stylesheet(AppContext.get_resource('btn_select_unselected.qss'))
self.stylesheet_select_selected = read_stylesheet(AppContext.get_resource('btn_select_selected.qss'))
# 'Process' button
self.stylesheet_process_inactive = read_stylesheet(AppContext.get_resource('btn_process_inactive.qss'))
self.stylesheet_process_active = read_stylesheet(AppContext.get_resource('btn_process_active.qss'))
# Progressbar
self.stylesheet_progressbar_busy = read_stylesheet(AppContext.get_resource('progressbar_busy.qss'))
self.stylesheet_progressbar_finshed = read_stylesheet(AppContext.get_resource('progressbar_finished.qss'))
## Fonts
# Process inactive
self.font_asleep = QtGui.QFont('Metropolis', 18)
# Process active
self.font_awake = QtGui.QFont('Metropolis', 18)
self.font_awake.setBold(True)
# ML Models
self.emotion_model_path = AppContext.get_resource('model.h5') # Path for Emotion Classification Model
self.prototext = AppContext.get_resource('deploy.prototxt.txt') # Prototxt file for face detection
self.model = AppContext.get_resource('res10_300x300_ssd_iter_140000.caffemodel') # Model for face Detection
self.model_path = AppContext.get_resource('finalized_model.sav')
self.loaded_model = pickle.load(open(self.model_path, 'rb'))
# Select Video
font_select = QtGui.QFont('Metropolis', 18)
font_select.setBold(True)
### UI Elements
dsc_logo_img = AppContext.get_resource('dsc_logo1.png')
path_logo_small = AppContext.get_resource('GPLogo2.png')
path_logo = AppContext.get_resource('GenrePrediction2.png')
self.MainWindow = MainWindow
self.MainWindow.setObjectName("Genre Prediction")
self.MainWindow.setStyleSheet("QMainWindow {background:'white'}")
self.MainWindow.setFixedSize(800, 600)
self.MainWindow.setWindowFlags(QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.WindowMinimizeButtonHint)
self.centralwidget = QtWidgets.QWidget(self.MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.MainWindow.setCentralWidget(self.centralwidget)
self.MainWindow.setWindowIcon(QtGui.QIcon(path_logo_small))
# DSC Logo
dsc_logo = QtWidgets.QLabel(self.centralwidget)
dsc_logo.setPixmap(QtGui.QPixmap(dsc_logo_img).scaled(540, 100, QtCore.Qt.KeepAspectRatio, transformMode = QtCore.Qt.SmoothTransformation))
dsc_logo.setObjectName("dsc_logo")
dsc_logo.setGeometry(300, 10, 1000, 150)
# Application Mini Logo
app_mini_logo = QtWidgets.QLabel(self.centralwidget)
app_mini_logo.setPixmap(QtGui.QPixmap(path_logo_small).scaled(540, 100, QtCore.Qt.KeepAspectRatio, transformMode = QtCore.Qt.SmoothTransformation))
app_mini_logo.setObjectName("app_mini_logo")
app_mini_logo.setGeometry(330, -30, 300, 500)
# Application Name
app_logo = QtWidgets.QLabel(self.centralwidget)
app_logo.setPixmap(QtGui.QPixmap(path_logo).scaled(540, 100, QtCore.Qt.KeepAspectRatio, transformMode = QtCore.Qt.SmoothTransformation))
app_logo.setObjectName("app_logo")
app_logo.setGeometry(170, 285, 700, 150)
# Select Video Button
self.btn_select_video = QtWidgets.QPushButton('Select Video', self.centralwidget)
self.btn_select_video.setStyleSheet(self.stylesheet_select_unselected)
self.btn_select_video.setEnabled(True)
self.btn_select_video.setFixedSize(200, 50)
self.btn_select_video.setFont(font_select)
self.btn_select_video.setShortcut('Ctrl+O')
self.btn_select_video.setGeometry(175, 445, 150, 50)
# Process Button
self.btn_process = QtWidgets.QPushButton('Process', self.centralwidget)
self.btn_process.setEnabled(False)
self.btn_process.setFixedSize(200, 50)
self.btn_process.setFont(self.font_asleep)
self.btn_process.setStyleSheet(self.stylesheet_process_inactive)
self.btn_process.setShortcut('Ctrl+E')
self.btn_process.setGeometry(435, 445, 150, 50)
# self.btn_process.clicked.connect(self.Processing)
# Progress Bar
self.progress = QtWidgets.QProgressBar(self.MainWindow)
self.progress.setStyleSheet(self.stylesheet_progressbar_busy)
self.progress.setGeometry(0, 590, 800, 10)
self.retranslateUi()
QtCore.QMetaObject.connectSlotsByName(self.MainWindow)
def retranslateUi(self):
_translate = QtCore.QCoreApplication.translate
self.MainWindow.setWindowTitle(_translate("MainWindow", "Genre Prediction"))
self.btn_select_video.clicked.connect(self.open_document)
self.btn_process.clicked.connect(self.Processing)
self.MainWindow.show()
def open_document(self):
self.video_path = QFileDialog.getOpenFileName(self.MainWindow, 'Open Document', filter = '*.mp4 *.mov *.avi')
self.video_path = self.video_path[0]
# print(self.video_path)
self.output_path = re.sub('mp4', 'avi', self.video_path)
if self.video_path == '':
self.sleep_btn_process()
self.unselect_btn_select()
return
self.selected_btn_select()
self.wake_process()
def Processing(self):
self.progress.setRange(0, 100)
self.progress.setStyleSheet(self.stylesheet_progressbar_busy)
self.input_video = self.video_path # Path for video
self.c = 0.7 # Confidence score for detecting the face of a person
background_labels, background_probabilities = background(self.video_path)
emotion_labels, emotion_probabilities = Emotion_Detection(self.emotion_model_path, self.prototext, self.model, self.video_path, self.c)
rows = []
if len(emotion_probabilities) == len(background_probabilities):
for i in range(0, len(emotion_probabilities)):
rows.append(emotion_probabilities[i] + background_probabilities[i]) # Concatenating the two lists.
if rows != []:
df = pd.DataFrame(rows)
predictions = list(self.loaded_model.predict(df.values))
Genres = {0 : 'Horror', 1 : 'Action' , 2 : 'Comedy', 3 : 'Romantic'}
predictions = list(map(Genres.get, predictions))
# print(predictions)
self.popup_success()
self.final_predictions = predictions
else:
self.popup_error()
cap = cv2.VideoCapture(self.input_video)
fps = cap.get(cv2.CAP_PROP_FPS)
seconds_interval = fps * 10
limit = 0 # A variable used to wait until seconds_interval is reached
n = len(predictions)
k = 0
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
total1 = 0
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
size = (frame_width, frame_height)
result = cv2.VideoWriter(self.output_path, cv2.VideoWriter_fourcc(*'MJPG'), fps, size)
# print("Output video path is", self.output_path)
while True:
ret, frame = cap.read()
if not ret:
break
limit += 1
total1 += 1
if(limit != int(seconds_interval) and k < n):
font = cv2.FONT_HERSHEY_SIMPLEX
org = (100, 100)
fontScale = 3
color = (255, 0, 0)
thickness = 2
# Using cv2.putText() method
frame = cv2.putText(frame, predictions[k], org, font, fontScale, color, thickness, cv2.LINE_AA)
# print("Written")
if limit == int(seconds_interval):
k += 1
limit = 0
result.write(frame)
cap.release()
result.release()
def popup_error(self):
self.stop_progressbar()
error_popup = QtWidgets.QMessageBox(self.centralwidget)
error_popup.setIcon(QtWidgets.QMessageBox.Critical)
error_popup.setWindowTitle('Error: Unable to process video')
error_popup.setText('Unabel to process video. Raise an issue on the official GDGVIT repo')
error_popup.setStandardButtons(QtWidgets.QMessageBox.Ok)
error_popup.show()
def stop_progressbar(self):
self.sleep_btn_process()
self.progress.setRange(0, 1)
self.progress.setStyleSheet(self.stylesheet_progressbar_finshed)
self.progress.setTextVisible(False)
self.progress.setValue(1)
self.unselect_btn_select()
def popup_success(self):
self.stop_progressbar()
success_popup = QtWidgets.QMessageBox(self.centralwidget)
success_popup.setIcon(QtWidgets.QMessageBox.NoIcon)
success_popup.setWindowTitle('Success: File Written')
success_popup.setText('The Processed Video was successfully saved at ' + self.output_path)
btn_open_folder = QtWidgets.QPushButton('Play the Processed Video')
btn_open_folder.clicked.connect(self.showvideo)
success_popup.addButton(btn_open_folder, QtWidgets.QMessageBox.AcceptRole)
success_popup.setStandardButtons(QtWidgets.QMessageBox.Ok)
success_popup.show()
def open_containing_folder(self):
if platform.system() == 'Windows':
video_path = re.search('^(.+)\\([^\\]+)$', self.output_path).groups[0]
os.startfile(output_path)
# print(output_path)
elif platform.system() == 'Darwin':
output_path = re.search('^(.+)/([^/]+)$', self.output_path).groups()[0]
# print(output_path)
subprocess.Popen(['open', output_path])
else:
output_path = re.search('^(.+)/([^/]+)$', self.output_path).groups()[0]
subprocess.Popen(['xdg-open', output_path])
def showvideo(self):
self.mydialog = QDialog()
self.mydialog.setModal(True)
self.mydialog.setWindowTitle("Output")
self.mydialog.setGeometry(350, 100, 700, 500)
self.mydialog.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
videowidget = QVideoWidget()
# openBtn = QPushButton('Open Video')
self.mydialog.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile(self.output_path)))
self.mydialog.playBtn = QPushButton()
self.mydialog.playBtn.setIcon(self.mydialog.style().standardIcon(QStyle.SP_MediaPlay))
self.mydialog.playBtn.clicked.connect(self.play_video)
self.mydialog.slider = QSlider(Qt.Horizontal)
self.mydialog.slider.setRange(0,0)
self.mydialog.slider.sliderMoved.connect(self.set_position)
self.mydialog.label = QLabel()
self.mydialog.label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
hboxLayout = QHBoxLayout()
hboxLayout.setContentsMargins(0,0,0,0)
# hboxLayout.addWidget(openBtn)
hboxLayout.addWidget(self.mydialog.playBtn)
hboxLayout.addWidget(self.mydialog.slider)
vboxLayout = QVBoxLayout()
vboxLayout.addWidget(videowidget)
vboxLayout.addLayout(hboxLayout)
vboxLayout.addWidget(self.mydialog.label)
self.mydialog.setLayout(vboxLayout)
self.mydialog.mediaPlayer.setVideoOutput(videowidget)
self.mydialog.mediaPlayer.stateChanged.connect(self.mediastate_changed)
self.mydialog.mediaPlayer.positionChanged.connect(self.position_changed)
self.mydialog.mediaPlayer.durationChanged.connect(self.duration_changed)
self.mydialog.exec()
def play_video(self):
if self.mydialog.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.mydialog.mediaPlayer.pause()
else:
self.mydialog.mediaPlayer.play()
def set_position(self, position):
self.mydialog.mediaPlayer.setPosition(position)
def mediastate_changed(self, state):
if self.mydialog.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.mydialog.playBtn.setIcon(
self.mydialog.style().standardIcon(QStyle.SP_MediaPause)
)
else:
self.mydialog.playBtn.setIcon(
self.mydialog.style().standardIcon(QStyle.SP_MediaPlay)
)
def position_changed(self, position):
self.mydialog.slider.setValue(position)
def duration_changed(self, duration):
self.mydialog.slider.setRange(0, duration)
def set_position(self, position):
self.mydialog.mediaPlayer.setPosition(position)
def sleep_btn_process(self):
self.btn_process.setEnabled(False)
self.btn_process.setStyleSheet(self.stylesheet_process_inactive)
self.btn_write.setFont(self.font_asleep)
def unselect_btn_select(self):
self.btn_select_video.setStyleSheet(self.stylesheet_select_unselected)
self.btn_select_video.setText("Select Video")
def selected_btn_select(self):
self.btn_select_video.setStyleSheet(self.stylesheet_select_selected)
video_name = re.search('[^/]*$', self.video_path).group()
self.btn_select_video.setText(video_name)
def wake_process(self):
self.btn_process.setEnabled(True)
self.btn_process.setStyleSheet(self.stylesheet_process_active)
self.btn_process.setFont(self.font_awake)
def sleep_btn_process(self):
self.btn_process.setEnabled(False)
self.btn_process.setStyleSheet(self.stylesheet_process_inactive)
self.btn_process.setFont(self.font_asleep)
def main():
print("Entering fbs")
appctxt = ApplicationContext() # 1. Instantiate ApplicationContext
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow, appctxt)
MainWindow.show()
exit_code = appctxt.app.exec_() # 2. Invoke appctxt.app.exec_()
sys.exit(exit_code)
os.system('fbs run')
if __name__ == '__main__':
main() | [
"PyQt5.QtGui.QIcon",
"PyQt5.QtWidgets.QMessageBox",
"sys.exit",
"PyQt5.QtWidgets.QVBoxLayout",
"os.startfile",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"re.search",
"subprocess.Popen",
"platform.system",
"PyQt5.QtWidgets.QLabel",
"cv2.VideoWriter_fourcc",
"pandas.DataFrame",
"PyQt5.QtWidgets.QPushButton",
"fbs_runtime.application_context.PyQt5.ApplicationContext",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QMainWindow",
"PyQt5.QtGui.QFont",
"PyQt5.QtMultimediaWidgets.QVideoWidget",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QHBoxLayout",
"cv2.putText",
"re.sub",
"PyQt5.QtWidgets.QSlider",
"PyQt5.QtMultimedia.QMediaPlayer",
"PyQt5.QtWidgets.QProgressBar",
"PyQt5.QtGui.QPixmap",
"cv2.VideoCapture",
"os.system"
] | [((14796, 14816), 'fbs_runtime.application_context.PyQt5.ApplicationContext', 'ApplicationContext', ([], {}), '()\n', (14814, 14816), False, 'from fbs_runtime.application_context.PyQt5 import ApplicationContext\n'), ((14876, 14899), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (14897, 14899), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((15060, 15079), 'sys.exit', 'sys.exit', (['exit_code'], {}), '(exit_code)\n', (15068, 15079), False, 'import sys\n'), ((15084, 15104), 'os.system', 'os.system', (['"""fbs run"""'], {}), "('fbs run')\n", (15093, 15104), False, 'import os\n'), ((1759, 1788), 'PyQt5.QtGui.QFont', 'QtGui.QFont', (['"""Metropolis"""', '(18)'], {}), "('Metropolis', 18)\n", (1770, 1788), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1841, 1870), 'PyQt5.QtGui.QFont', 'QtGui.QFont', (['"""Metropolis"""', '(18)'], {}), "('Metropolis', 18)\n", (1852, 1870), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2461, 2490), 'PyQt5.QtGui.QFont', 'QtGui.QFont', (['"""Metropolis"""', '(18)'], {}), "('Metropolis', 18)\n", (2472, 2490), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3104, 3138), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['self.MainWindow'], {}), '(self.MainWindow)\n', (3121, 3138), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3366, 3402), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3382, 3402), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3701, 3737), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3717, 3737), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4049, 4085), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4065, 4085), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4387, 4444), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Select Video"""', 'self.centralwidget'], {}), "('Select Video', self.centralwidget)\n", (4408, 4444), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4849, 4901), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Process"""', 'self.centralwidget'], {}), "('Process', self.centralwidget)\n", (4870, 4901), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5329, 5368), 'PyQt5.QtWidgets.QProgressBar', 'QtWidgets.QProgressBar', (['self.MainWindow'], {}), '(self.MainWindow)\n', (5351, 5368), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5528, 5582), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['self.MainWindow'], {}), '(self.MainWindow)\n', (5565, 5582), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5967, 6061), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self.MainWindow', '"""Open Document"""'], {'filter': '"""*.mp4 *.mov *.avi"""'}), "(self.MainWindow, 'Open Document', filter=\n '*.mp4 *.mov *.avi')\n", (5994, 6061), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QLabel, QSlider, QStyle, QSizePolicy, QFileDialog\n'), ((6164, 6201), 're.sub', 're.sub', (['"""mp4"""', '"""avi"""', 'self.video_path'], {}), "('mp4', 'avi', self.video_path)\n", (6170, 6201), False, 'import re\n'), ((7617, 7651), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.input_video'], {}), '(self.input_video)\n', (7633, 7651), False, 'import cv2\n'), ((9084, 9125), 'PyQt5.QtWidgets.QMessageBox', 'QtWidgets.QMessageBox', (['self.centralwidget'], {}), '(self.centralwidget)\n', (9105, 9125), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9822, 9863), 'PyQt5.QtWidgets.QMessageBox', 'QtWidgets.QMessageBox', (['self.centralwidget'], {}), '(self.centralwidget)\n', (9843, 9863), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10111, 10160), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Play the Processed Video"""'], {}), "('Play the Processed Video')\n", (10132, 10160), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11256, 11301), 'PyQt5.QtMultimedia.QMediaPlayer', 'QMediaPlayer', (['None', 'QMediaPlayer.VideoSurface'], {}), '(None, QMediaPlayer.VideoSurface)\n', (11268, 11301), False, 'from PyQt5.QtMultimedia import QMediaPlayer, QMediaContent\n'), ((11325, 11339), 'PyQt5.QtMultimediaWidgets.QVideoWidget', 'QVideoWidget', ([], {}), '()\n', (11337, 11339), False, 'from PyQt5.QtMultimediaWidgets import QVideoWidget\n'), ((11525, 11538), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', ([], {}), '()\n', (11536, 11538), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QLabel, QSlider, QStyle, QSizePolicy, QFileDialog\n'), ((11729, 11751), 'PyQt5.QtWidgets.QSlider', 'QSlider', (['Qt.Horizontal'], {}), '(Qt.Horizontal)\n', (11736, 11751), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QLabel, QSlider, QStyle, QSizePolicy, QFileDialog\n'), ((11894, 11902), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (11900, 11902), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QLabel, QSlider, QStyle, QSizePolicy, QFileDialog\n'), ((12011, 12024), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (12022, 12024), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QLabel, QSlider, QStyle, QSizePolicy, QFileDialog\n'), ((12238, 12251), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (12249, 12251), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QLabel, QSlider, QStyle, QSizePolicy, QFileDialog\n'), ((3296, 3324), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['path_logo_small'], {}), '(path_logo_small)\n', (3307, 3324), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7208, 7226), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (7220, 7226), True, 'import pandas as pd\n'), ((8116, 8147), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (8138, 8147), False, 'import cv2\n'), ((10446, 10463), 'platform.system', 'platform.system', ([], {}), '()\n', (10461, 10463), False, 'import platform\n'), ((10573, 10598), 'os.startfile', 'os.startfile', (['output_path'], {}), '(output_path)\n', (10585, 10598), False, 'import os\n'), ((8702, 8793), 'cv2.putText', 'cv2.putText', (['frame', 'predictions[k]', 'org', 'font', 'fontScale', 'color', 'thickness', 'cv2.LINE_AA'], {}), '(frame, predictions[k], org, font, fontScale, color, thickness,\n cv2.LINE_AA)\n', (8713, 8793), False, 'import cv2\n'), ((10645, 10662), 'platform.system', 'platform.system', ([], {}), '()\n', (10660, 10662), False, 'import platform\n'), ((10805, 10844), 'subprocess.Popen', 'subprocess.Popen', (["['open', output_path]"], {}), "(['open', output_path])\n", (10821, 10844), False, 'import subprocess\n'), ((10969, 11012), 'subprocess.Popen', 'subprocess.Popen', (["['xdg-open', output_path]"], {}), "(['xdg-open', output_path])\n", (10985, 11012), False, 'import subprocess\n'), ((14250, 14286), 're.search', 're.search', (['"""[^/]*$"""', 'self.video_path'], {}), "('[^/]*$', self.video_path)\n", (14259, 14286), False, 'import re\n'), ((3430, 3457), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['dsc_logo_img'], {}), '(dsc_logo_img)\n', (3443, 3457), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3770, 3800), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['path_logo_small'], {}), '(path_logo_small)\n', (3783, 3800), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4113, 4137), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['path_logo'], {}), '(path_logo)\n', (4126, 4137), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10503, 10550), 're.search', 're.search', (['"""^(.+)\\\\([^\\\\]+)$"""', 'self.output_path'], {}), "('^(.+)\\\\([^\\\\]+)$', self.output_path)\n", (10512, 10550), False, 'import re\n'), ((10702, 10747), 're.search', 're.search', (['"""^(.+)/([^/]+)$"""', 'self.output_path'], {}), "('^(.+)/([^/]+)$', self.output_path)\n", (10711, 10747), False, 'import re\n'), ((10898, 10943), 're.search', 're.search', (['"""^(.+)/([^/]+)$"""', 'self.output_path'], {}), "('^(.+)/([^/]+)$', self.output_path)\n", (10907, 10943), False, 'import re\n')] |
# Licensed as BSD by <NAME> of the ESRF on 2014-08-06
################################################################################
# Copyright (c) 2014, the European Synchrotron Radiation Facility #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# #
# * Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright notice, #
# this list of conditions and the following disclaimer in the documentation #
# and/or other materials provided with the distribution. #
# #
# * Neither the name of the European Synchrotron Radiation Facility nor the #
# names of its contributors may be used to endorse or promote products #
# derived from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" #
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE #
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
################################################################################
from numpy import *
from time import time
###############################################################################################################
def mp_corr(iproc,nfile,chn,plot,npix_per_q,index_in_q,quc,quce,quplot):
global datreg, nq, corf, n, nc, sl, sr, chn2, lind, srr, sll, oneq, rch, rcr, xnq, sst
########################################################################################
#function correlator
def correlator(reg,matrx):
global datreg, corf, sl, sr, sst
if nc<=chn:
if nc>=2:
for cindex in xnq:
sr[cindex,:n]=roll(sr[cindex,:n],1)
sr[cindex,0]=sr[cindex,1]+sst[reg][cindex]#pyfmean(datreg[reg][cindex][:,0],lind[cindex])#average intensity in q for normalization right
sst[reg][cindex]=dot(oneq[cindex],matrx[cindex])/lind[cindex]
sl[cindex,:n]+=sst[reg][cindex]#pyfmean(matrx[cindex],lind[cindex])#average intensity in q for normalization left
else:
for cindex in xnq:
sst[reg][cindex]=dot(oneq[cindex],matrx[cindex])/lind[cindex]
for cindex in xnq:
corf[cindex,:n]+=dot(matrx[cindex],datreg[reg][cindex][:,:n])/lind[cindex] #calculate a product of input data and register corf(number of q's, number of chanels)
datreg[reg][cindex]=concatenate((reshape(matrx[cindex],(lind[cindex],1)), datreg[reg][cindex][:,:chn-1]), axis=1) #shift register by 1
if nc/2==nc/2.:
for cindex in xnq:
matrx[cindex]=(datreg[reg][cindex][:,0]+datreg[reg][cindex][:,1])/2. #data for the next register from present
correlator2(1,matrx)
else:
for cindex in xnq:
sr[cindex,:chn]=roll(sr[cindex,:chn],1)
sr[cindex,0]=sr[cindex,1]+sst[reg][cindex]#pyfmean(datreg[reg][cindex][:,0],lind[cindex])#average intensity in q for normalization right
sst[reg][cindex]=dot(oneq[cindex],matrx[cindex])/lind[cindex]
sl[cindex,:chn]+=sst[reg][cindex]#pyfmean(matrx[cindex],lind[cindex])#average intensity in q for normalization left
corf[cindex,:chn]+=dot(matrx[cindex],datreg[reg][cindex])/lind[cindex] #calculate a product of input data and register corf(number of q's, number of chanels)
datreg[reg][cindex]=concatenate((reshape(matrx[cindex],(lind[cindex],1)), datreg[reg][cindex][:,:chn-1]), axis=1) #shift register by 1
if nc/2==nc/2.:
for cindex in xnq:
matrx[cindex]=(datreg[reg][cindex][:,0]+datreg[reg][cindex][:,1])/2. #data for the next register from present
correlator2(1,matrx)
###############################################################
#correlator2(reg,matrx)
def correlator2(reg,matrx):
global datreg, corf, sl, sr, srr, sll, sst
condition=((reg+1)<rch and nc/2**(reg+1)==nc/2.**(reg+1))
corn=nc/2**reg
if 2<=corn<=chn:#corn<=chn and corn>=2:
for cindex in xnq:
srr[reg][cindex,:corn-1]=roll(srr[reg][cindex,:corn-1],1)
srr[reg][cindex,0]=srr[reg][cindex,1]+sst[reg][cindex]#pyfmean(datreg[reg][cindex][:,0],lind[cindex])#average intensity in q for normalization right
sst[reg][cindex]=dot(oneq[cindex],matrx[cindex])/lind[cindex]
sll[reg][cindex,:corn-1]+=sst[reg][cindex]#pyfmean(matrx[cindex],lind[cindex])#average intensity in q for normalization left
if corn>chn:
for cindex in xnq:
srr[reg][cindex,:chn]=roll(srr[reg][cindex,:chn],1)
srr[reg][cindex,0]=srr[reg][cindex,1]+sst[reg][cindex]#pyfmean(datreg[reg][cindex][:,0],lind[cindex]) #average intensity in q for normalization right
sst[reg][cindex]=dot(oneq[cindex],matrx[cindex])/lind[cindex]
sll[reg][cindex,:chn]+=sst[reg][cindex]#pyfmean(matrx[cindex],lind[cindex])#average intensity in q for normalization left
if chn2<corn<=chn:#corn<=chn and corn>chn/2:
inb=chn2*(reg+1)
ine=corn+chn2*reg
sl[:,inb:ine]=sll[reg][:,chn2:corn]
sr[:,inb:ine]=srr[reg][:,chn2:corn]
for cindex in xnq:
corf[cindex,inb:ine]+=dot(matrx[cindex],datreg[reg][cindex][:,chn2:corn])/lind[cindex] #calculate a product of input data and register corf(number of q's, number of chanels)
datreg[reg][cindex]=concatenate((reshape(matrx[cindex],(lind[cindex],1)), datreg[reg][cindex][:,:chn-1]), axis=1) #shift register by 1
if condition:#nc/2**(reg+1)==floor(nc/2**(reg+1)):
for cindex in xnq:
matrx[cindex]=(datreg[reg][cindex][:,0]+datreg[reg][cindex][:,1])/2. #data for the next register from present
reg+=1
correlator2(reg,matrx)
elif corn>chn:
inb=chn2*(reg+1)
ine=chn2*(reg+2)
sl[:,inb:ine]=sll[reg][:,chn2:chn]
sr[:,inb:ine]=srr[reg][:,chn2:chn]
for cindex in xnq:
corf[cindex,inb:ine]+=dot(matrx[cindex], datreg[reg][cindex][:,chn2:chn])/lind[cindex] #calculate a product of input data and register corf(number of q's, number of chanels)
datreg[reg][cindex]=concatenate((reshape(matrx[cindex],(lind[cindex],1)), datreg[reg][cindex][:,:chn-1]), axis=1) #shift register by 1
if condition:#nc/2**(reg+1)==floor(nc/2**(reg+1))
for cindex in xnq:
matrx[cindex]=(datreg[reg][cindex][:,0]+datreg[reg][cindex][:,1])/2. #data for the next register from present"""
reg+=1
correlator2(reg,matrx)
else:
for cindex in xnq:
sst[reg][cindex]=dot(oneq[cindex],matrx[cindex])/lind[cindex]
for cindex in xnq:
datreg[reg][cindex]=concatenate((reshape(matrx[cindex],(lind[cindex],1)), datreg[reg][cindex][:,:chn-1]), axis=1) #shift register by 1
if condition:#nc/2**(reg+1)==floor(nc/2**(reg+1)):
for cindex in xnq:
matrx[cindex]=(datreg[reg][cindex][:,0]+datreg[reg][cindex][:,1])/2. #data for the next register from present"""
reg+=1
correlator2(reg,matrx)
#####################################################################################################################
##FINISHED INITIALIZING PART OF THE CODE######
##START MAIN PART FOR CORRELATION#####
tcalc=time()
chn2=chn/2
datregt=[]
datreg=[]
nq=len(index_in_q)
xnq=xrange(nq)
rch=int(ceil(log(nfile/chn)/log(2))+1)
for ir in xrange(rch):
for iq in xnq:
datregt.append(zeros((npix_per_q[iq],chn),dtype=float32))
datreg.append(datregt)
datregt=[]
del datregt
oneq=[]
for iq in xnq:
oneq.append(ones((1,npix_per_q[iq])))
rcr=chn+chn2*ceil(log(nfile/chn)/log(2))
corf=zeros((nq,rcr),dtype=float32)
sl=zeros((nq,rcr),dtype=float32)
sr=zeros((nq,rcr),dtype=float32)
sll=[]
srr=[]
sst=[]
for ir in xrange(rch):
sll.append(zeros((nq,chn),dtype=float32))
srr.append(zeros((nq,chn),dtype=float32))
sst.append(arange(nq)*0.0)
#END of declaring and initializing variables####
n=0
nc=0
lind=npix_per_q
nnfile=nfile-1
while n<nnfile:
nc=n+1
if (nc%chn==0 and iproc==0):
if plot!='no':
quplot.put([corf[[0,-1],:],sr[[0,-1],:],sl[[0,-1],:]])
correlator(0,quc.get())
n+=1
#END OF MAIN LOOP
quc.close()
tcalc=time()-tcalc
quce.put([corf,sl,sr,tcalc])
| [
"time.time"
] | [((9003, 9009), 'time.time', 'time', ([], {}), '()\n', (9007, 9009), False, 'from time import time\n'), ((10089, 10095), 'time.time', 'time', ([], {}), '()\n', (10093, 10095), False, 'from time import time\n')] |
#
# Project: Zenodote
# Filename: book_test.py
# by Ludorg.Net (Ludovic LIEVRE) 2019/05/13
# https://ludorg.net/
#
# This work is licensed under the MIT License.
# See the LICENSE file in the root directory of this source tree.
#
import unittest
from book import Book, ol_Book, gb_Book, BookType
class BookTestCase(unittest.TestCase):
def test_construct(self):
b = Book(9782847200065, "aaa", "bbb", "ccc")
self.assertEqual(b.isbn, 9782847200065)
self.assertEqual(b.author, "aaa")
self.assertEqual(b.title, "bbb")
self.assertEqual(b.cover, "ccc")
def test_construct_ol(self):
b = ol_Book(9782847200065, "aaa", "bbb", "ccc", "2019/11/09",
"{ISBN:\"9782847200065\"}", "https://openlibrary.org/books/OL12627293M/aaa")
self.assertEqual(b.isbn, 9782847200065)
self.assertEqual(b.author, "aaa")
self.assertEqual(b.title, "bbb")
self.assertEqual(b.cover, "ccc")
self.assertEqual(b.json_data, "{ISBN:\"9782847200065\"}")
self.assertEqual(
b.book_url, "https://openlibrary.org/books/OL12627293M/aaa")
self.assertEqual(b.type, BookType.OPEN_LIBRARY)
print(b)
def test_construct_gb(self):
b = gb_Book(9782847200065, "aaa", "bbb", "ccc", "2019/11/09",
"{ISBN:\"9782847200065\"}", "https://openlibrary.org/books/OL12627293M/aaa")
self.assertEqual(b.isbn, 9782847200065)
self.assertEqual(b.author, "aaa")
self.assertEqual(b.title, "bbb")
self.assertEqual(b.cover, "ccc")
self.assertEqual(b.json_data, "{ISBN:\"9782847200065\"}")
self.assertEqual(
b.book_url, "https://openlibrary.org/books/OL12627293M/aaa")
self.assertEqual(b.type, BookType.GOOGLE_BOOKS)
print(b)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"book.gb_Book",
"book.ol_Book",
"book.Book"
] | [((1853, 1868), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1866, 1868), False, 'import unittest\n'), ((381, 421), 'book.Book', 'Book', (['(9782847200065)', '"""aaa"""', '"""bbb"""', '"""ccc"""'], {}), "(9782847200065, 'aaa', 'bbb', 'ccc')\n", (385, 421), False, 'from book import Book, ol_Book, gb_Book, BookType\n'), ((640, 776), 'book.ol_Book', 'ol_Book', (['(9782847200065)', '"""aaa"""', '"""bbb"""', '"""ccc"""', '"""2019/11/09"""', '"""{ISBN:"9782847200065"}"""', '"""https://openlibrary.org/books/OL12627293M/aaa"""'], {}), '(9782847200065, \'aaa\', \'bbb\', \'ccc\', \'2019/11/09\',\n \'{ISBN:"9782847200065"}\', \'https://openlibrary.org/books/OL12627293M/aaa\')\n', (647, 776), False, 'from book import Book, ol_Book, gb_Book, BookType\n'), ((1253, 1389), 'book.gb_Book', 'gb_Book', (['(9782847200065)', '"""aaa"""', '"""bbb"""', '"""ccc"""', '"""2019/11/09"""', '"""{ISBN:"9782847200065"}"""', '"""https://openlibrary.org/books/OL12627293M/aaa"""'], {}), '(9782847200065, \'aaa\', \'bbb\', \'ccc\', \'2019/11/09\',\n \'{ISBN:"9782847200065"}\', \'https://openlibrary.org/books/OL12627293M/aaa\')\n', (1260, 1389), False, 'from book import Book, ol_Book, gb_Book, BookType\n')] |
import rumps
import AppKit
from .utility import get_data_from_base64_data_url
from .png import get_png_dimensions
def _nsimage_from_data_url(data_url, dimensions=None, template=None):
"""Take a "data:" URL of a PNG image file and return an NSImage object."""
data = get_data_from_base64_data_url(data_url)
image = AppKit.NSImage.alloc().initWithData_(data)
image.setScalesWhenResized_(True)
image.setSize_(get_png_dimensions(data) if dimensions is None else dimensions)
if not template is None:
image.setTemplate_(template)
return image
class App(rumps.App):
@rumps.App.icon.setter
def icon(self, icon_data_url):
new_icon = _nsimage_from_data_url(icon_data_url, template=self._template) if icon_data_url is not None else None
self._icon = icon_data_url
self._icon_nsimage = new_icon
try:
self._nsapp.setStatusBarIcon()
except AttributeError as e:
pass
| [
"AppKit.NSImage.alloc"
] | [((329, 351), 'AppKit.NSImage.alloc', 'AppKit.NSImage.alloc', ([], {}), '()\n', (349, 351), False, 'import AppKit\n')] |
# GPLv3 License
#
# Copyright (C) 2020 Ubisoft
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Proxy for array of structures proxified as structure of arrays
See synchronization.md
"""
from __future__ import annotations
import logging
from typing import Dict, Iterable, Optional, Union, TYPE_CHECKING
import bpy.types as T # noqa
from mixer.blender_data import specifics
from mixer.blender_data.json_codec import serialize
from mixer.blender_data.aos_soa_proxy import SoaElement, AosElement
from mixer.blender_data.specifics import is_soable_property
from mixer.blender_data.attributes import diff_attribute, write_attribute
from mixer.blender_data.proxy import DeltaUpdate, Proxy
if TYPE_CHECKING:
from mixer.blender_data.proxy import Context, Delta
logger = logging.getLogger(__name__)
_unknown_type_attributes = {"__doc__", "__module__", "__slots__", "bl_rna", "rna_type"}
"""Attributes of bpy.types.UnknownType to not synchronize"""
@serialize
class AosProxy(Proxy):
"""
Proxy to a bpy_prop_collection of structure with at least a member that can be handled
by foreach_get()/foreach_set(), such as MeshVertices
"""
_serialize = ("_aos_length", "_data")
def __init__(self):
self._data: Dict[str, Union[AosElement, SoaElement, Delta]] = {}
self._aos_length = 0
def __len__(self):
return self._aos_length
@property
def length(self) -> int:
return self._aos_length
def load(self, bl_collection: T.bpy_prop_collection, bl_collection_property: T.Property, context: Context):
# Must process the Soa elements, even if empty, because we may we called when a diff detects that
# a replace is required (e.g. geometry vertext count change) and we must ensure that the soas are updated.
# This will unfortunately register and transfer empty arrays.
# TODO optimize and do not send empty arrays
self._aos_length = len(bl_collection)
item_bl_rna = bl_collection_property.fixed_type.bl_rna
if bl_collection_property.fixed_type.bl_rna is T.UnknownType.bl_rna:
# UnknownType used in ShakeKey. Contents depends on the items that has the Key (Curve, Mesh, Lattice)
if len(self) != 0:
item = bl_collection[0]
names = set(dir(item)) - _unknown_type_attributes
for attr_name in names:
# Since this dies no use read_attribute, pugh the current item by hand
context.visit_state.push(bl_collection, attr_name)
try:
self._data[attr_name] = SoaElement(attr_name).load(bl_collection, item_bl_rna, context)
finally:
context.visit_state.pop()
else:
for attr_name, bl_rna_property in context.synchronized_properties.properties(item_bl_rna):
# Since this dies no use read_attribute, pugh the current item by hand
context.visit_state.push(bl_collection, attr_name)
try:
if is_soable_property(bl_rna_property):
# element supported by foreach_get()/foreach_set(), e.g. MeshVertices.co
# The collection is loaded as an array.array and encoded as a binary buffer
self._data[attr_name] = SoaElement(attr_name).load(bl_collection, item_bl_rna, context)
else:
# element not supported by foreach_get()/foreach_set(), e.g. BezierSplinePoint.handle_left_type,
# which is an enum, loaded as string
# The collection is loaded as a dict, encoded as such
self._data[attr_name] = AosElement().load(bl_collection, attr_name, item_bl_rna, context)
finally:
context.visit_state.pop()
return self
def save(self, attribute: T.bpy_prop_collection, parent: T.bpy_struct, key: Union[int, str], context: Context):
"""
Save this proxy into attribute.
Args:
attribute: a collection of bpy_struct (e.g. a_Mesh_instance.vertices)
parent: the attribute that contains attribute (e.g. a Mesh instance)
key: the name of the bpy_collection in parent (e.g "vertices")
context: proxy and visit state
"""
specifics.fit_aos(attribute, self, context)
# nothing to do save here. The buffers that contains vertices and co are serialized apart from the json
# that contains the Mesh members. The children of this are SoaElement and have no child.
# They are updated directly bu SoaElement.save_array()
for k, v in self._data.items():
write_attribute(attribute, k, v, context)
def apply(
self,
attribute: T.bpy_prop_collection,
parent: T.bpy_struct,
key: Union[int, str],
delta: Delta,
context: Context,
to_blender=True,
) -> AosProxy:
"""
Apply delta to this proxy and optionally to the Blender attribute its manages.
Args:
attribute: a collection of bpy_struct (e.g. a_Mesh_instance.vertices)
parent: the attribute that contains attribute (e.g. a Mesh instance)
key: the name of the bpy_collection in parent (e.g "vertices")
delta: the delta to apply
context: proxy and visit state
to_blender: update the managed Blender attribute in addition to this Proxy
"""
struct_update = delta.value
self._aos_length = struct_update._aos_length
specifics.fit_aos(attribute, self, context)
for k, member_delta in struct_update._data.items():
current_value = self.data(k)
if current_value is not None:
self._data[k] = current_value.apply(None, attribute, k, member_delta, to_blender)
return self
def diff(
self, aos: T.bpy_prop_collection, key: Union[int, str], prop: T.Property, context: Context
) -> Optional[DeltaUpdate]:
""""""
# Create a proxy that will be populated with attributes differences, resulting in a hollow dict,
# as opposed as the dense self
diff = self.__class__()
diff.init(aos)
diff._aos_length = len(aos)
item_bl_rna = prop.fixed_type.bl_rna
member_names: Iterable[str] = []
if item_bl_rna is T.UnknownType.bl_rna:
# UnknownType used in ShapeKey. Contents depends on the items that has the Key (Curve, Mesh, Lattice)
if len(self) != 0:
member_names = set(dir(aos[0])) - _unknown_type_attributes
else:
member_names = [item[0] for item in context.synchronized_properties.properties(item_bl_rna)]
for member_name in member_names:
# co, normals, ...
proxy_data = self._data.get(member_name, SoaElement(member_name))
delta = diff_attribute(aos, member_name, item_bl_rna, proxy_data, context)
if delta is not None:
diff._data[member_name] = delta
# if anything has changed, wrap the hollow proxy in a DeltaUpdate. This may be superfluous but
# it is homogenous with additions and deletions
if len(diff._data):
return DeltaUpdate(diff)
return None
| [
"logging.getLogger",
"mixer.blender_data.specifics.is_soable_property",
"mixer.blender_data.specifics.fit_aos",
"mixer.blender_data.proxy.DeltaUpdate",
"mixer.blender_data.attributes.write_attribute",
"mixer.blender_data.aos_soa_proxy.AosElement",
"mixer.blender_data.attributes.diff_attribute",
"mixer.blender_data.aos_soa_proxy.SoaElement"
] | [((1365, 1392), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1382, 1392), False, 'import logging\n'), ((4998, 5041), 'mixer.blender_data.specifics.fit_aos', 'specifics.fit_aos', (['attribute', 'self', 'context'], {}), '(attribute, self, context)\n', (5015, 5041), False, 'from mixer.blender_data import specifics\n'), ((6265, 6308), 'mixer.blender_data.specifics.fit_aos', 'specifics.fit_aos', (['attribute', 'self', 'context'], {}), '(attribute, self, context)\n', (6282, 6308), False, 'from mixer.blender_data import specifics\n'), ((5368, 5409), 'mixer.blender_data.attributes.write_attribute', 'write_attribute', (['attribute', 'k', 'v', 'context'], {}), '(attribute, k, v, context)\n', (5383, 5409), False, 'from mixer.blender_data.attributes import diff_attribute, write_attribute\n'), ((7613, 7679), 'mixer.blender_data.attributes.diff_attribute', 'diff_attribute', (['aos', 'member_name', 'item_bl_rna', 'proxy_data', 'context'], {}), '(aos, member_name, item_bl_rna, proxy_data, context)\n', (7627, 7679), False, 'from mixer.blender_data.attributes import diff_attribute, write_attribute\n'), ((7969, 7986), 'mixer.blender_data.proxy.DeltaUpdate', 'DeltaUpdate', (['diff'], {}), '(diff)\n', (7980, 7986), False, 'from mixer.blender_data.proxy import DeltaUpdate, Proxy\n'), ((7568, 7591), 'mixer.blender_data.aos_soa_proxy.SoaElement', 'SoaElement', (['member_name'], {}), '(member_name)\n', (7578, 7591), False, 'from mixer.blender_data.aos_soa_proxy import SoaElement, AosElement\n'), ((3674, 3709), 'mixer.blender_data.specifics.is_soable_property', 'is_soable_property', (['bl_rna_property'], {}), '(bl_rna_property)\n', (3692, 3709), False, 'from mixer.blender_data.specifics import is_soable_property\n'), ((3216, 3237), 'mixer.blender_data.aos_soa_proxy.SoaElement', 'SoaElement', (['attr_name'], {}), '(attr_name)\n', (3226, 3237), False, 'from mixer.blender_data.aos_soa_proxy import SoaElement, AosElement\n'), ((3956, 3977), 'mixer.blender_data.aos_soa_proxy.SoaElement', 'SoaElement', (['attr_name'], {}), '(attr_name)\n', (3966, 3977), False, 'from mixer.blender_data.aos_soa_proxy import SoaElement, AosElement\n'), ((4354, 4366), 'mixer.blender_data.aos_soa_proxy.AosElement', 'AosElement', ([], {}), '()\n', (4364, 4366), False, 'from mixer.blender_data.aos_soa_proxy import SoaElement, AosElement\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import reader
import numpy as np
import pandas as pd
import tensorflow as tf
import dltk.core.modules as modules
from dltk.core import metrics as metrics
from dltk.core.io.sliding_window import SlidingWindow
import SimpleITK as sitk
from utils_dualstream import sliding_window_segmentation_inference
from dualstream_fcn_v2 import DualStreamFCN_v2
num_classes = 5
DSC_all = []
def infer(args):
s = tf.Session()
filenames = pd.read_csv(args.csv, dtype=str).as_matrix()
filenames2 = pd.read_csv(args.csv2, dtype=str).as_matrix()
inputs, outputs = DualStreamFCN_v4.load(args.model_path, s)
r = reader.OrgansReader([tf.float32, tf.int32],[[None, None, None, 1], [None, None, None]]) #,name='val_queue')
for f in filenames:
x, y = r._read_sample([f], is_training=False)
sw = SlidingWindow(x.shape[1:4], [64, 64, 64], striding=[64, 64, 64])
# Allocate the prediction output and a counter for averaging probabilities
y_prob = np.zeros(y.shape + (num_classes,))
y_pred_count = np.zeros_like(y_prob)
for slicer in sw:
y_sw = s.run(outputs['y_prob'], feed_dict={inputs[0]: x[slicer], inputs[1]: 0}) # TODO fix inputs[1]: 0
y_prob[slicer] += y_sw
y_pred_count[slicer] += 1
y_prob /= y_pred_count
y_ = np.argmax(y_prob, axis=-1)
dscs = metrics.dice(y_, y, num_classes)
print(f[0] + '; mean DSC = {:.3f}\n\t'.format(np.mean(dscs[1:]))
+ ', '.join(['DSC {}: {:.3f}'.format(i, dsc) for i, dsc in enumerate(dscs)]))
y_ = np.squeeze (y_, axis = 0)
pid = f[0].split('p/')[1][:2]
np.save(os.path.join(args.output_path,'Seg_MR_%s.npy' %pid), np.asanyarray(y_))
itk_prediction = sitk.GetImageFromArray(y_)
ds = np.transpose(dscs)
DSC_all.append(ds)
sitk.WriteImage(itk_prediction, os.path.join(args.output_path, '%s_segmentation.nii.gz' % (pid)))
for f in filenames2:
x, y = r._read_sample([f], is_training=False)
sw = SlidingWindow(x.shape[1:4], [64, 64, 64], striding=[64, 64, 64])
# Allocate the prediction output and a counter for averaging probabilities
y_prob = np.zeros(y.shape + (num_classes,))
y_pred_count = np.zeros_like(y_prob)
for slicer in sw:
y_sw = s.run(outputs['y_prob'], feed_dict={inputs[0]: x[slicer], inputs[1]: 1}) # TODO fix inputs[1]: 0
y_prob[slicer] += y_sw
y_pred_count[slicer] += 1
y_prob /= y_pred_count
y_ = np.argmax(y_prob, axis=-1)
dscs = metrics.dice(y_, y, num_classes)
print(f[0] + '; mean DSC = {:.3f}\n\t'.format(np.mean(dscs[1:]))
+ ', '.join(['DSC {}: {:.3f}'.format(i, dsc) for i, dsc in enumerate(dscs)]))
y_ = np.squeeze (y_, axis = 0)
pid = f[0].split('p/')[1][:2]
np.save(os.path.join(args.output_path,'Seg_CT_%s.npy' %pid), np.asanyarray(y_))
itk_prediction = sitk.GetImageFromArray(y_)
ds = np.transpose(dscs)
DSC_all.append(ds)
sitk.WriteImage(itk_prediction, os.path.join(args.output_path, '%s_segmentation.nii.gz' % (pid)))
np.save('DSC_dualstream_v1_set1.npy', DSC_all)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Malibo inference script')
parser.add_argument('--verbose', default=False, action='store_true')
parser.add_argument('--cuda_devices', '-c', default='0')
parser.add_argument('--csv', default='val_MR_set1.csv')
parser.add_argument('--csv2', default='val_CT_set1.csv')
parser.add_argument('--model_path', '-p', default='dualstream_v1_set1/saves')
parser.add_argument('--output_path', '-o', default='dualstream_v1_set1')
args = parser.parse_args()
if args.verbose:
tf.logging.set_verbosity(tf.logging.INFO)
else:
tf.logging.set_verbosity(tf.logging.ERROR)
# GPU allocation options
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_devices
infer(args) | [
"numpy.mean",
"numpy.transpose",
"dltk.core.metrics.dice",
"SimpleITK.GetImageFromArray",
"argparse.ArgumentParser",
"pandas.read_csv",
"tensorflow.Session",
"tensorflow.logging.set_verbosity",
"numpy.argmax",
"os.path.join",
"numpy.squeeze",
"numpy.asanyarray",
"numpy.zeros",
"dltk.core.io.sliding_window.SlidingWindow",
"reader.OrgansReader",
"numpy.zeros_like",
"numpy.save"
] | [((543, 555), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (553, 555), True, 'import tensorflow as tf\n'), ((759, 851), 'reader.OrgansReader', 'reader.OrgansReader', (['[tf.float32, tf.int32]', '[[None, None, None, 1], [None, None, None]]'], {}), '([tf.float32, tf.int32], [[None, None, None, 1], [None,\n None, None]])\n', (778, 851), False, 'import reader\n'), ((3438, 3484), 'numpy.save', 'np.save', (['"""DSC_dualstream_v1_set1.npy"""', 'DSC_all'], {}), "('DSC_dualstream_v1_set1.npy', DSC_all)\n", (3445, 3484), True, 'import numpy as np\n'), ((3527, 3589), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Malibo inference script"""'}), "(description='Malibo inference script')\n", (3550, 3589), False, 'import argparse\n'), ((973, 1037), 'dltk.core.io.sliding_window.SlidingWindow', 'SlidingWindow', (['x.shape[1:4]', '[64, 64, 64]'], {'striding': '[64, 64, 64]'}), '(x.shape[1:4], [64, 64, 64], striding=[64, 64, 64])\n', (986, 1037), False, 'from dltk.core.io.sliding_window import SlidingWindow\n'), ((1139, 1173), 'numpy.zeros', 'np.zeros', (['(y.shape + (num_classes,))'], {}), '(y.shape + (num_classes,))\n', (1147, 1173), True, 'import numpy as np\n'), ((1197, 1218), 'numpy.zeros_like', 'np.zeros_like', (['y_prob'], {}), '(y_prob)\n', (1210, 1218), True, 'import numpy as np\n'), ((1489, 1515), 'numpy.argmax', 'np.argmax', (['y_prob'], {'axis': '(-1)'}), '(y_prob, axis=-1)\n', (1498, 1515), True, 'import numpy as np\n'), ((1539, 1571), 'dltk.core.metrics.dice', 'metrics.dice', (['y_', 'y', 'num_classes'], {}), '(y_, y, num_classes)\n', (1551, 1571), True, 'from dltk.core import metrics as metrics\n'), ((1761, 1783), 'numpy.squeeze', 'np.squeeze', (['y_'], {'axis': '(0)'}), '(y_, axis=0)\n', (1771, 1783), True, 'import numpy as np\n'), ((1939, 1965), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['y_'], {}), '(y_)\n', (1961, 1965), True, 'import SimpleITK as sitk\n'), ((1980, 1998), 'numpy.transpose', 'np.transpose', (['dscs'], {}), '(dscs)\n', (1992, 1998), True, 'import numpy as np\n'), ((2246, 2310), 'dltk.core.io.sliding_window.SlidingWindow', 'SlidingWindow', (['x.shape[1:4]', '[64, 64, 64]'], {'striding': '[64, 64, 64]'}), '(x.shape[1:4], [64, 64, 64], striding=[64, 64, 64])\n', (2259, 2310), False, 'from dltk.core.io.sliding_window import SlidingWindow\n'), ((2412, 2446), 'numpy.zeros', 'np.zeros', (['(y.shape + (num_classes,))'], {}), '(y.shape + (num_classes,))\n', (2420, 2446), True, 'import numpy as np\n'), ((2470, 2491), 'numpy.zeros_like', 'np.zeros_like', (['y_prob'], {}), '(y_prob)\n', (2483, 2491), True, 'import numpy as np\n'), ((2762, 2788), 'numpy.argmax', 'np.argmax', (['y_prob'], {'axis': '(-1)'}), '(y_prob, axis=-1)\n', (2771, 2788), True, 'import numpy as np\n'), ((2812, 2844), 'dltk.core.metrics.dice', 'metrics.dice', (['y_', 'y', 'num_classes'], {}), '(y_, y, num_classes)\n', (2824, 2844), True, 'from dltk.core import metrics as metrics\n'), ((3034, 3056), 'numpy.squeeze', 'np.squeeze', (['y_'], {'axis': '(0)'}), '(y_, axis=0)\n', (3044, 3056), True, 'import numpy as np\n'), ((3212, 3238), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['y_'], {}), '(y_)\n', (3234, 3238), True, 'import SimpleITK as sitk\n'), ((3258, 3276), 'numpy.transpose', 'np.transpose', (['dscs'], {}), '(dscs)\n', (3270, 3276), True, 'import numpy as np\n'), ((4072, 4113), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (4096, 4113), True, 'import tensorflow as tf\n'), ((4132, 4174), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (4156, 4174), True, 'import tensorflow as tf\n'), ((573, 605), 'pandas.read_csv', 'pd.read_csv', (['args.csv'], {'dtype': 'str'}), '(args.csv, dtype=str)\n', (584, 605), True, 'import pandas as pd\n'), ((635, 668), 'pandas.read_csv', 'pd.read_csv', (['args.csv2'], {'dtype': 'str'}), '(args.csv2, dtype=str)\n', (646, 668), True, 'import pandas as pd\n'), ((1842, 1895), 'os.path.join', 'os.path.join', (['args.output_path', "('Seg_MR_%s.npy' % pid)"], {}), "(args.output_path, 'Seg_MR_%s.npy' % pid)\n", (1854, 1895), False, 'import os\n'), ((1895, 1912), 'numpy.asanyarray', 'np.asanyarray', (['y_'], {}), '(y_)\n', (1908, 1912), True, 'import numpy as np\n'), ((2068, 2130), 'os.path.join', 'os.path.join', (['args.output_path', "('%s_segmentation.nii.gz' % pid)"], {}), "(args.output_path, '%s_segmentation.nii.gz' % pid)\n", (2080, 2130), False, 'import os\n'), ((3115, 3168), 'os.path.join', 'os.path.join', (['args.output_path', "('Seg_CT_%s.npy' % pid)"], {}), "(args.output_path, 'Seg_CT_%s.npy' % pid)\n", (3127, 3168), False, 'import os\n'), ((3168, 3185), 'numpy.asanyarray', 'np.asanyarray', (['y_'], {}), '(y_)\n', (3181, 3185), True, 'import numpy as np\n'), ((3358, 3420), 'os.path.join', 'os.path.join', (['args.output_path', "('%s_segmentation.nii.gz' % pid)"], {}), "(args.output_path, '%s_segmentation.nii.gz' % pid)\n", (3370, 3420), False, 'import os\n'), ((1636, 1653), 'numpy.mean', 'np.mean', (['dscs[1:]'], {}), '(dscs[1:])\n', (1643, 1653), True, 'import numpy as np\n'), ((2909, 2926), 'numpy.mean', 'np.mean', (['dscs[1:]'], {}), '(dscs[1:])\n', (2916, 2926), True, 'import numpy as np\n')] |
"""
A file for creating unit tests
Run in spyder with
!python -m pytest test.py
"""
from djsetslow import ListSet, IDsSet
from unionfind import UFNaive, UFFast
import numpy as np
import matplotlib.pyplot as plt
import time
def site_example(DJSet):
"""
Test the example from the web site on all types of disjoint sets
Parameters
----------
DJSet: Class
A class type for a disjoint set. Assumed to have a constructor
with the number of elements, and the methods union(i, j) and
find(i, j)
"""
s = DJSet(10)
s.union(0, 2)
s.union(1, 8)
s.union(8, 7)
assert(not s.find(0, 3))
assert(s.find(1, 7))
s.union(1, 6)
s.union(0, 1)
assert(s.find(0, 7))
assert(not s.find(1, 9))
def test_site_example_ListSet():
site_example(ListSet)
def test_site_example_IDsSet():
site_example(IDsSet)
def test_site_example_UFNaive():
site_example(UFNaive)
def test_site_example_UFFast():
site_example(UFFast)
def do_stress_test(N, set_types):
np.random.seed(0)
# Create a random partition with at most 50 components
n_part = np.random.randint(min(N, 50))
bds = np.sort(np.random.permutation(N)[0:n_part])
n_ops = min(N*N, 40*int(np.log2(N))*N)
djsets = [s(N) for s in set_types]
times = np.zeros((n_ops, len(set_types)))
ops = np.zeros(n_ops)
for op in range(n_ops):
## Randomly choose two elements in the collection
i = np.random.randint(N)
j = np.random.randint(N)
ops[op] = np.random.randint(2)
if ops[op] == 0:
## Do a union on i and j for each type of disjoint set
for k, djset in enumerate(djsets):
tic = time.time()
djset.union(i, j)
times[op, k] = time.time()-tic
else:
# Do a find, and check to make sure all different data
# structures agree on the find at this point
find_res = []
for k, djset in enumerate(djsets):
tic = time.time()
find_res.append(djset.find(i, j))
times[op, k] = time.time()-tic
# Make sure they all came up with the same answer by
# forming a set and making sure it only has one element
assert(len(set(find_res)) == 1)
return times[ops == 0, :], times[ops == 1, :]
def test_stress100():
do_stress_test(100, [IDsSet, UFNaive, UFFast])
| [
"numpy.zeros",
"numpy.random.randint",
"numpy.random.seed",
"numpy.log2",
"time.time",
"numpy.random.permutation"
] | [((1039, 1056), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1053, 1056), True, 'import numpy as np\n'), ((1351, 1366), 'numpy.zeros', 'np.zeros', (['n_ops'], {}), '(n_ops)\n', (1359, 1366), True, 'import numpy as np\n'), ((1465, 1485), 'numpy.random.randint', 'np.random.randint', (['N'], {}), '(N)\n', (1482, 1485), True, 'import numpy as np\n'), ((1498, 1518), 'numpy.random.randint', 'np.random.randint', (['N'], {}), '(N)\n', (1515, 1518), True, 'import numpy as np\n'), ((1537, 1557), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (1554, 1557), True, 'import numpy as np\n'), ((1177, 1201), 'numpy.random.permutation', 'np.random.permutation', (['N'], {}), '(N)\n', (1198, 1201), True, 'import numpy as np\n'), ((1719, 1730), 'time.time', 'time.time', ([], {}), '()\n', (1728, 1730), False, 'import time\n'), ((2046, 2057), 'time.time', 'time.time', ([], {}), '()\n', (2055, 2057), False, 'import time\n'), ((1241, 1251), 'numpy.log2', 'np.log2', (['N'], {}), '(N)\n', (1248, 1251), True, 'import numpy as np\n'), ((1796, 1807), 'time.time', 'time.time', ([], {}), '()\n', (1805, 1807), False, 'import time\n'), ((2139, 2150), 'time.time', 'time.time', ([], {}), '()\n', (2148, 2150), False, 'import time\n')] |
"""Module for the get_attempt_like_users_for_group tag."""
from django import template
register = template.Library()
@register.simple_tag
def get_attempt_like_users_for_group(attempt, group_pk):
"""
Call the get_like_users_for_group method for the attempt.
:param attempt: The attempt to get the like users for.
:param group_pk: The group pk to pass to the method call.
:return: The result of get_like_users_for_group (queryset of Users).
"""
return attempt.get_like_users_for_group(group_pk)
| [
"django.template.Library"
] | [((100, 118), 'django.template.Library', 'template.Library', ([], {}), '()\n', (116, 118), False, 'from django import template\n')] |
#!/usr/bin/env python3
from google.cloud import storage
import gzip
import html
import json
import math
import re
import requests
import sys
import time
###############################################################################
## ##
## Fetch posts from reddit and cache in a GCS bucket so f6oclock doesn't ##
## have to suffer the reddit APIs high latency on initial page-load. ##
## ##
## The refresh rate is a function of how busy r/politics is. This saves ##
## by not unnecesarily updating the GCS cache object. ##
## ##
###############################################################################
# We refresh somewhere between every REFRESH_MIN seconds and REFRESH_MAX
# seconds depending on how much is happening between refreshes. REFRESH_BASE
# controls how fast it (exponentially) moves between the min and max.
REFRESH_MIN = 30 # 30 seconds
REFRESH_MAX = 3600 # 1 hour
REFRESH_BASE = 2 # base for exponential increase/decay of refresh rate
REFRESH_UP = 1.5 # speed at which exponent moves when uping the refresh rate
REFRESH_DOWN = 1
STREAK_MAX = math.ceil(math.log(REFRESH_MAX - REFRESH_MIN + 1, REFRESH_BASE))
# These numbers control how much weight is given to changes in score when
# computing the diff for two scoreboards. These values are chosen to match the
# ones in the front-end that control colouring. The goal is to approximate how
# much the color changed for each post between refreshes.
VOTES_MIN = 20
VOTES_MAX = 700
VOTES_STEP = 150 # each one of these counts for one "diff" point
# Clamp the pre-normalized delta scores between these two numbers
DELTA_MIN = 2
DELTA_MAX = 100
# The cutoff to consider a normalized delta significant
DELTA_CUTOFF = 0.5
# Get data from reddit.com
def fetch_data():
headers = {
'user-agent': 'f6oclock/1.0.0'
}
res = requests.get(
url = 'https://www.reddit.com/r/politics/rising.json',
headers = headers
)
return sorted(
res.json()['data']['children'],
key = lambda post: post['data']['ups'],
reverse = True
)
# Extracts the ids and vote numbers for the posts. This minus the links and
# titles is what is shown on f6oclock
def get_scoreboard(posts):
entries = [get_scoreboard_entry(post) for post in posts]
return {
"ids": [post[0] for post in entries],
"votes": [post[1] for post in entries]
}
def get_scoreboard_entry(post):
data = post['data']
return (data['id'], data['ups'])
# Exponentially move between REFRESH_MIN/REFRESH_MAX depending on recent
# history of refresh productivity
def get_next_refresh(streak):
refresh = REFRESH_MIN + REFRESH_BASE**abs(streak) - 1
return clamp(REFRESH_MIN, REFRESH_MAX, refresh)
# Compute a delta between two scoreboards. This is intended to be a qualitative
# measure of the value of this cache refresh to the f6oclock user.
def compute_delta(prev, cur):
delta = 0
for idx, cur_id in enumerate(cur["ids"]):
cur_votes = cur["votes"][idx]
cur_votes_norm = normalize_votes(cur_votes)
try:
prev_idx = prev["ids"].index(cur_id) # O(n^2)
except ValueError:
prev_idx = None
if prev_idx == None:
dvotes_norm = cur_votes_norm
didx = max(0, len(prev) - idx)
else:
prev_votes = prev["votes"][prev_idx]
prev_votes_norm = normalize_votes(prev_votes)
dvotes_norm = abs(prev_votes_norm - cur_votes_norm)
didx = abs(idx - prev_idx)
delta += didx
delta += dvotes_norm
print('delta = ' + str(delta))
delta_norm = normalize(DELTA_MIN, DELTA_MAX, delta)
print('delta_norm = ' + str(delta_norm))
delta_smooth = smoothstep(delta_norm)
print('delta_smooth = ' + str(delta_smooth))
return delta_smooth
# Maps [0, inf] to an integer in [0, floor((VOTES_MAX-VOTES_MIN)/VOTES_STEP)]
def normalize_votes(votes):
return int(normalize(VOTES_MIN, VOTES_MAX, float(votes))*VOTES_STEP)
# Map [-inf, inf] to [xmin, xmax] with a clamp and then to [0, 1] linearly
def normalize(xmin, xmax, x):
clamped = clamp(xmin, xmax, x)
return float(clamped - xmin)/(xmax - xmin)
# Map [-inf, inf] to [xmin, xmax] with thresholding
def clamp(xmin, xmax, x):
return max(min(x, xmax), xmin)
# Hermite polynomial/s-curve. Maps from [0, 1] to [0, 1] smoothly
def smoothstep(x):
if x <= 0:
return 0
if x >= 1:
return 1
return (3 -2*x)*x*x
def sign(x):
if x < 0:
return -1
return 1
# Store the raw reddit response into GCS
def set_cache(bucket, res):
blob = storage.Blob('index.html', bucket)
# TODO between REFRESH_MIN and something else cache lifetime?
blob.cache_control = 'public, max-age=' + str(REFRESH_MIN)
blob.content_encoding = 'gzip'
data = bytes(res, 'utf-8')
compressed = gzip.compress(data)
blob.upload_from_string(
data = compressed,
content_type = 'text/html',
)
print('cached')
def render(posts):
with open('../index.html', 'r') as f:
template = f.read()
# This knows about the Elm type Post
posts = [{
'id': post['data']['id'],
'upvotes': post['data']['ups'],
'createdUtc': post['data']['created_utc'],
'domain': post['data']['domain'],
'url': post['data']['url'],
# TODO: if elm did unescape @ render wouldn't need to do it here.
'title': html.unescape(post['data']['title']),
# TODO: elm should do this at render time
'permalink': 'https://www.reddit.com' + post['data']['permalink'],
} for post in posts]
data = json.dumps(posts, separators=(',', ':'))
return template.replace(
'const cache = { posts: [] };',
'const cache={posts:'+ data + '};'
)
###############################################################################
###############################################################################
###############################################################################
client = storage.Client(project='f6oclock')
bucket = client.get_bucket('www.f6oclock.com')
# We don't load what's actually in the cache on boot so we will always do one
# store.
prev = {
"votes": [],
"ids": [],
}
# start off with a sleep so that if this program restarta a bunchs it doesnt
# refresh faster than the minimum
next_refresh = REFRESH_MIN
# the first iteration will have a big enough delta so streak will go to 0.
streak = -1
while True:
time.sleep(next_refresh)
posts = fetch_data()
cur = get_scoreboard(posts)
delta = compute_delta(prev, cur)
# If the difference was negligble we pretend that we don't store the result
# and we also don't update prev (that way we are always diffing against
# what was cached (unless this is on boot)
if delta > DELTA_CUTOFF:
streak -= REFRESH_UP
res = render(posts)
set_cache(bucket, res)
prev = cur
else:
streak += REFRESH_DOWN
streak = clamp(0, STREAK_MAX, streak)
next_refresh = get_next_refresh(streak)
print('streak = ' + str(streak))
print('next_refresh = ' + str(next_refresh))
print('')
| [
"google.cloud.storage.Client",
"json.dumps",
"html.unescape",
"requests.get",
"math.log",
"time.sleep",
"google.cloud.storage.Blob",
"gzip.compress"
] | [((6318, 6352), 'google.cloud.storage.Client', 'storage.Client', ([], {'project': '"""f6oclock"""'}), "(project='f6oclock')\n", (6332, 6352), False, 'from google.cloud import storage\n'), ((1341, 1394), 'math.log', 'math.log', (['(REFRESH_MAX - REFRESH_MIN + 1)', 'REFRESH_BASE'], {}), '(REFRESH_MAX - REFRESH_MIN + 1, REFRESH_BASE)\n', (1349, 1394), False, 'import math\n'), ((2073, 2160), 'requests.get', 'requests.get', ([], {'url': '"""https://www.reddit.com/r/politics/rising.json"""', 'headers': 'headers'}), "(url='https://www.reddit.com/r/politics/rising.json', headers=\n headers)\n", (2085, 2160), False, 'import requests\n'), ((4873, 4907), 'google.cloud.storage.Blob', 'storage.Blob', (['"""index.html"""', 'bucket'], {}), "('index.html', bucket)\n", (4885, 4907), False, 'from google.cloud import storage\n'), ((5123, 5142), 'gzip.compress', 'gzip.compress', (['data'], {}), '(data)\n', (5136, 5142), False, 'import gzip\n'), ((5907, 5947), 'json.dumps', 'json.dumps', (['posts'], {'separators': "(',', ':')"}), "(posts, separators=(',', ':'))\n", (5917, 5947), False, 'import json\n'), ((6775, 6799), 'time.sleep', 'time.sleep', (['next_refresh'], {}), '(next_refresh)\n', (6785, 6799), False, 'import time\n'), ((5705, 5741), 'html.unescape', 'html.unescape', (["post['data']['title']"], {}), "(post['data']['title'])\n", (5718, 5741), False, 'import html\n')] |
"""
The MIT License (MIT)
Copyright (c) 2019 <NAME>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import scipy as scp
import logging
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
from mutils.image import normalize
if __name__ == '__main__':
logging.info("Hello World.")
| [
"logging.basicConfig",
"logging.info"
] | [((246, 357), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s %(message)s"""', 'level': 'logging.INFO', 'stream': 'sys.stdout'}), "(format='%(asctime)s %(levelname)s %(message)s', level=\n logging.INFO, stream=sys.stdout)\n", (265, 357), False, 'import logging\n'), ((462, 490), 'logging.info', 'logging.info', (['"""Hello World."""'], {}), "('Hello World.')\n", (474, 490), False, 'import logging\n')] |
from cumodoro.component.window import Window
from cumodoro.component.frame import Frame
from cumodoro.component.input import Input
from cumodoro.component.taskeditor import Taskeditor
from cumodoro.component.sloteditor import Sloteditor
import cumodoro.globals as globals
import cumodoro.config as config
import curses
import types
import logging
log = logging.getLogger('cumodoro')
class Configeditor(Window):
def __init__(self):
super().__init__()
self.stack = []
self.current_select = -1
self.position = [0,0]
self.prev_select = 0
def set_position(self,x,y):
self.position = [x,y]
def init(self):
x,y = self.position
# tasks
window = Window()
window.name = "tasks"
frame = Frame()
frame.name = "tasks_frame"
frame.set_size(10,1)
frame.set_position(x,y)
def update_patch(target):
target.erase()
options = None
if target.selected:
options = curses.A_BOLD
if target.selected_title:
options = options | curses.color_pair(config.COLOR_FOCUS)
target.addstr(0,0,"Tasks",options)
frame.update = types.MethodType(update_patch,frame)
window.add_frame(frame.name,frame)
frame = Taskeditor()
frame.name = "taskeditor_frame"
frame.set_position(x,y+2)
window.add_frame(frame.name,frame)
window.input_frame = frame
self.stack.append(window)
self.add_frame(window.name,window)
# pomodoro time
window = Window()
window.name = "pomodoro_time"
x,y = self.position
frame = Frame()
frame.name = "pomodoro_time_frame"
frame.set_size(10,1)
frame.set_position(40,y)
def update_patch(target):
target.erase()
options = None
if target.selected:
options = curses.A_BOLD
if target.selected_title:
options = options | curses.color_pair(config.COLOR_FOCUS)
target.addstr(0,0,"Pomodoro",options)
frame.update = types.MethodType(update_patch,frame)
window.add_frame("pomodoro_time_frame",frame)
frame = Input()
frame.name = "pomodoro_time_edit_frame"
frame.init("time")
frame.variable = "TIME_POMODORO"
frame.value = config.TIME_POMODORO
frame.set_position(40+14,y)
window.add_frame("pomodoro_time_edit_frame",frame)
window.input_frame = frame
self.stack.append(window)
self.add_frame(window.name,window)
# break time
window = Window()
window.name = "break_time"
frame = Frame()
frame.name = "break_time_frame"
frame.set_size(10,1)
frame.set_position(40,y+1)
def update_patch(target):
target.erase()
options = None
if target.selected:
options = curses.A_BOLD
if target.selected_title:
options = options | curses.color_pair(config.COLOR_FOCUS)
target.addstr(0,0,"Break",options)
frame.update = types.MethodType(update_patch,frame)
window.add_frame("break_time_frame",frame)
frame = Input()
frame.name = "break_time_edit_frame"
frame.init("time")
frame.variable = "TIME_BREAK"
frame.value = config.TIME_BREAK
frame.set_position(40+14,y+1)
window.add_frame("break_time_edit_frame",frame)
window.input_frame = frame
self.stack.append(window)
self.add_frame(window.name,window)
# slots
window = Window()
window.name = "slot_time"
frame = Frame()
frame.name = "slot_frame"
frame.variable = "TIME_SLOT"
frame.set_size(10,1)
frame.set_position(40,y+2)
def update_patch(target):
target.erase()
options = None
if target.selected:
options = curses.A_BOLD
if target.selected_title:
options = options | curses.color_pair(config.COLOR_FOCUS)
target.addstr(0,0,"Slots",options)
frame.update = types.MethodType(update_patch,frame)
window.add_frame("slot_frame",frame)
frame = Sloteditor()
frame.name = "slot_edit_frame"
frame.set_position(40+3,y+4)
window.add_frame(frame.name,frame)
window.input_frame = frame
self.stack.append(window)
self.add_frame(window.name,window)
# initial state
for w in self.stack:
w.set_variable("selected",False)
w.set_variable("selected_title",False)
self.select(0)
self.set_variable("selected_input",False)
def update(self):
super().update()
def refresh(self):
super().refresh()
def select(self,n = None):
if n == None:
n = self.prev_select
if n < len(self.stack):
pn = self.current_select
self.current_select = n
if pn >= 0:
self.stack[pn].set_variable("selected",False)
self.stack[pn].set_variable("selected_title",False)
if n >= 0:
self.stack[n].set_variable("selected",True)
self.stack[n].set_variable("selected_title",True)
self.prev_select = n
def create(self):
for w in self.stack:
w.create()
def handle_input(self):
mb = globals.messageboard
window = self.stack[self.current_select]
window.set_variable("selected_title",False)
window.set_variable("selected_input",True)
window.update()
window.refresh()
if window.name == "pomodoro_time":
value = window.input_frame.value
window.input_frame.handle_input()
nvalue = window.input_frame.value
if value != nvalue:
config.TIME_POMODORO = nvalue
globals.database.update_config("TIME_POMODORO",nvalue)
config.init()
elif window.name == "break_time":
value = window.input_frame.value
window.input_frame.handle_input()
nvalue = window.input_frame.value
if value != nvalue:
config.TIME_BREAK = nvalue
globals.database.update_config("TIME_BREAK",nvalue)
config.init()
else:
window.input_frame.handle_input()
window.set_variable("selected_title",True)
window.set_variable("selected_input",False)
window.update()
window.refresh()
def down(self):
s = self.current_select + 1
if s != 1 and s < 4:
self.select(s)
def up(self):
s = self.current_select - 1
if s > 0 or s >= 1:
self.select(s)
def left(self):
if self.current_select > 0:
self.select(0)
def right(self):
if self.current_select == 0:
self.select(1)
| [
"logging.getLogger",
"curses.color_pair",
"cumodoro.component.frame.Frame",
"cumodoro.component.taskeditor.Taskeditor",
"cumodoro.component.input.Input",
"cumodoro.config.init",
"cumodoro.globals.database.update_config",
"cumodoro.component.sloteditor.Sloteditor",
"cumodoro.component.window.Window",
"types.MethodType"
] | [((354, 383), 'logging.getLogger', 'logging.getLogger', (['"""cumodoro"""'], {}), "('cumodoro')\n", (371, 383), False, 'import logging\n'), ((726, 734), 'cumodoro.component.window.Window', 'Window', ([], {}), '()\n', (732, 734), False, 'from cumodoro.component.window import Window\n'), ((782, 789), 'cumodoro.component.frame.Frame', 'Frame', ([], {}), '()\n', (787, 789), False, 'from cumodoro.component.frame import Frame\n'), ((1236, 1273), 'types.MethodType', 'types.MethodType', (['update_patch', 'frame'], {}), '(update_patch, frame)\n', (1252, 1273), False, 'import types\n'), ((1333, 1345), 'cumodoro.component.taskeditor.Taskeditor', 'Taskeditor', ([], {}), '()\n', (1343, 1345), False, 'from cumodoro.component.taskeditor import Taskeditor\n'), ((1618, 1626), 'cumodoro.component.window.Window', 'Window', ([], {}), '()\n', (1624, 1626), False, 'from cumodoro.component.window import Window\n'), ((1710, 1717), 'cumodoro.component.frame.Frame', 'Frame', ([], {}), '()\n', (1715, 1717), False, 'from cumodoro.component.frame import Frame\n'), ((2176, 2213), 'types.MethodType', 'types.MethodType', (['update_patch', 'frame'], {}), '(update_patch, frame)\n', (2192, 2213), False, 'import types\n'), ((2284, 2291), 'cumodoro.component.input.Input', 'Input', ([], {}), '()\n', (2289, 2291), False, 'from cumodoro.component.input import Input\n'), ((2698, 2706), 'cumodoro.component.window.Window', 'Window', ([], {}), '()\n', (2704, 2706), False, 'from cumodoro.component.window import Window\n'), ((2759, 2766), 'cumodoro.component.frame.Frame', 'Frame', ([], {}), '()\n', (2764, 2766), False, 'from cumodoro.component.frame import Frame\n'), ((3221, 3258), 'types.MethodType', 'types.MethodType', (['update_patch', 'frame'], {}), '(update_patch, frame)\n', (3237, 3258), False, 'import types\n'), ((3326, 3333), 'cumodoro.component.input.Input', 'Input', ([], {}), '()\n', (3331, 3333), False, 'from cumodoro.component.input import Input\n'), ((3725, 3733), 'cumodoro.component.window.Window', 'Window', ([], {}), '()\n', (3731, 3733), False, 'from cumodoro.component.window import Window\n'), ((3785, 3792), 'cumodoro.component.frame.Frame', 'Frame', ([], {}), '()\n', (3790, 3792), False, 'from cumodoro.component.frame import Frame\n'), ((4278, 4315), 'types.MethodType', 'types.MethodType', (['update_patch', 'frame'], {}), '(update_patch, frame)\n', (4294, 4315), False, 'import types\n'), ((4377, 4389), 'cumodoro.component.sloteditor.Sloteditor', 'Sloteditor', ([], {}), '()\n', (4387, 4389), False, 'from cumodoro.component.sloteditor import Sloteditor\n'), ((6092, 6147), 'cumodoro.globals.database.update_config', 'globals.database.update_config', (['"""TIME_POMODORO"""', 'nvalue'], {}), "('TIME_POMODORO', nvalue)\n", (6122, 6147), True, 'import cumodoro.globals as globals\n'), ((6163, 6176), 'cumodoro.config.init', 'config.init', ([], {}), '()\n', (6174, 6176), True, 'import cumodoro.config as config\n'), ((6447, 6499), 'cumodoro.globals.database.update_config', 'globals.database.update_config', (['"""TIME_BREAK"""', 'nvalue'], {}), "('TIME_BREAK', nvalue)\n", (6477, 6499), True, 'import cumodoro.globals as globals\n'), ((6515, 6528), 'cumodoro.config.init', 'config.init', ([], {}), '()\n', (6526, 6528), True, 'import cumodoro.config as config\n'), ((1128, 1165), 'curses.color_pair', 'curses.color_pair', (['config.COLOR_FOCUS'], {}), '(config.COLOR_FOCUS)\n', (1145, 1165), False, 'import curses\n'), ((2065, 2102), 'curses.color_pair', 'curses.color_pair', (['config.COLOR_FOCUS'], {}), '(config.COLOR_FOCUS)\n', (2082, 2102), False, 'import curses\n'), ((3113, 3150), 'curses.color_pair', 'curses.color_pair', (['config.COLOR_FOCUS'], {}), '(config.COLOR_FOCUS)\n', (3130, 3150), False, 'import curses\n'), ((4170, 4207), 'curses.color_pair', 'curses.color_pair', (['config.COLOR_FOCUS'], {}), '(config.COLOR_FOCUS)\n', (4187, 4207), False, 'import curses\n')] |
#!/usr/bin/env python3
# Example of the use of ROS parameters.
#
# parameters.py
#
# <NAME>
#
# This shows how to get and set parameters in ROS.
import rospy
import sys
if __name__ == '__main__':
# Initialize the node
rospy.init_node('parameters', argv=sys.argv)
# Set a parameter in a relative namespace
rospy.set_param('foo', 3)
# Setting a parameter with a dictionary value.
d = {'p': 2, 'i': 3, 'd': 4}
rospy.set_param('gains', d)
# Getting a parameter. If the parameter doesn't exist, ROS will raise a KeyError.
rospy.loginfo('foo: {0}'.format(rospy.get_param('foo')))
# Getting a parameter, with a default value.
rospy.loginfo('bar: {0}'.format(rospy.get_param('bar', 123)))
| [
"rospy.init_node",
"rospy.get_param",
"rospy.set_param"
] | [((225, 269), 'rospy.init_node', 'rospy.init_node', (['"""parameters"""'], {'argv': 'sys.argv'}), "('parameters', argv=sys.argv)\n", (240, 269), False, 'import rospy\n'), ((315, 340), 'rospy.set_param', 'rospy.set_param', (['"""foo"""', '(3)'], {}), "('foo', 3)\n", (330, 340), False, 'import rospy\n'), ((421, 448), 'rospy.set_param', 'rospy.set_param', (['"""gains"""', 'd'], {}), "('gains', d)\n", (436, 448), False, 'import rospy\n'), ((567, 589), 'rospy.get_param', 'rospy.get_param', (['"""foo"""'], {}), "('foo')\n", (582, 589), False, 'import rospy\n'), ((672, 699), 'rospy.get_param', 'rospy.get_param', (['"""bar"""', '(123)'], {}), "('bar', 123)\n", (687, 699), False, 'import rospy\n')] |
# Standard Initialization Stuff for PyTorch, Matplotlib, ...
from __future__ import unicode_literals, print_function, division
from io import open
import glob
import os
import unicodedata
import string
import torch.optim as optim
import numpy as np
import scipy
from sklearn import preprocessing
import torch
import matplotlib.pyplot as plt
import time
import torch
import torch.nn as nn
from torch.nn.functional import gumbel_softmax
import random
from torch.distributions.categorical import Categorical
from model_input_embedding import TextData, network_state, RNN
##### PARAMETERS
# Continue training with saved model
continue_training = False
# Training data
data_file = 'german_u.txt' #'/home/kai/bernstein/corpora/wikipedia/scripts/dewiki.txt'
save_name = 'HM_RNN_GERMAN'
# Length of unrolled strings for training
seq_len = 300
# Learning rate for ADAM optimiser
lr_start = 3e-4
lr_end = 1e-5
lr_tau = 1e-4
# Number of parallel training examples (i.e. size of the mini-batches)
n_proc = 100
# Dimensionality of each hidden layer
hidden_size = 300
#Number of optimization steps
n_steps = 5000000000
# Clip gradient?
clip_grad = True
clip_norm = 1.0
# Parameters of Gumbel-Softmax (i.e. differentiable approximation of Bernoulli-Distribution)
# True: Use actual samples from Bernoulli for the forward pass, use smooth approximation for the backward pass
# False: Use smooth approximation for the forward and the backward pass
gumbel_hard = True
# Smoothness parameter: Has to be positive, larger == smoother
theta_start = 1.0
theta_end = 0.01
theta_tau = 1e-4
# Which GPU to use
torch.cuda.set_device(0)
##### Manages the reading of a text file, the conversion between
##### characters, labels and one-hot encodings
##### and the ***sequential*** reading of sliced mini-batches
data = TextData(path=data_file)
##### Just some tests of the data-management class
# shape of the data
print(data.data.shape)
# get some example batches
example, example_l, resets = data.slices(200,10)
example2, example2_l, resets = data.slices(200,10)
example3, example3_l, resets = data.slices(200,10)
# check if the successive reading over batches works by printing the
# first sample of three successive training batches
print(example.shape)
data.print_str(example[:,0,:])
print(example2.shape)
data.print_str(example2[:,0,:])
print(example3.shape)
data.print_str(example3[:,0,:])
# initialize network state and connectivity
input_size = data.n_chars
output_size = input_size
state = network_state(hidden_size, n_proc)
rnn = RNN(hidden_size, data.n_chars, n_proc, gumbel_hard)
# initialize weights with Gaussian distribution with
# mean zero and std dev 0.05
rnn.weight_init(0.0, 0.05)
# if a network was saved before, load it and continue training it
if continue_training == True:
rnn.load_state_dict(torch.load(save_name + '.pkl'))
# functional form of objective function: Categorical Negative Log Likelihood
criterion = nn.NLLLoss().cuda()
# optimizer: Adam (https://arxiv.org/abs/1412.6980)
optimizer = optim.Adam(rnn.parameters(), lr=lr_start, betas=(0.5, 0.999))
# perform a training step on a training batch
def train(data, n_proc, state, theta, print_segmentation = True):
# set rnn to training mode (in case some previous code called rnn.eval() )
rnn.train()
# get a training batch
batch, batch_l, resets = data.slices(seq_len, n_proc)
# reset hidden states, if some of the reading processes encountered the end
# of the training data
state.reset_hidden(resets)
# reset the gradients
rnn.zero_grad()
# this will be the cost function, that we take the derivative of
cost = 0
# some code to output the activation of the gating units b1, b2, b3
if print_segmentation == True:
outputs1 = []
outputs2 = []
outputs3 = []
# create a prediction of the first characters from the current states of the
# hidden layers
output = rnn.first_step(state)
# calculate the negative log-likelihood (NLL) of the first characters under this prediction
l = criterion(output, batch_l[0])
# add this NLL to the cost
cost += l
# iterate over all but the last character in the training data
for i in range(seq_len - 1):
# propagate the rnn, given the current character and state
# and the current "temperature" parameter theta of the gumbel-softmax
# distribution (the lower, the closer it is to a true bernoulli distribution)
# returns a predictive probability distribution for the next character
output, state = rnn(batch[i], state, theta)
# add the nll of the next character under the predictive distribution
l = criterion(output, batch_l[i+1])
# add this to the cost
cost += l
# some output code to examine the activity of the gating units
if print_segmentation == True:
next_char = data.le.inverse_transform(np.argmax(batch[i,0].cpu().numpy())).squeeze()
outputs1.append(next_char)
outputs2.append(next_char)
outputs3.append(next_char)
if state.b1[0,0].detach().cpu().numpy() > 0.5:
outputs1.append('|')
if state.b1[0,0].detach().cpu().numpy() > 0.5 and state.b2[0,0].detach().cpu().numpy() > 0.5:
outputs2.append('|')
if state.b1[0,0].detach().cpu().numpy() > 0.5 and state.b2[0,0].detach().cpu().numpy() > 0.5 and state.b3[0,0].detach().cpu().numpy() > 0.5:
outputs3.append('|')
# Update hidden representations with the last letter in the sequence!!!!
# This is important, since the first letter of the next training batch
# will be predicted from these hidden states.
output, state = rnn(batch[seq_len - 1], state, theta)
# Calculate the derivative of the sum of all the NLLs w.r.t. the
# parameters of the RNN
cost.backward()
# In RNNs, sometimes gradients can grow exponentially. So restrict the
# norm of the gradient, just to be sure.
if clip_grad:
torch.nn.utils.clip_grad_norm_(rnn.parameters(), clip_norm)
# Change the parameters of the RNN, given their current values, gradients
# and some internal variables kept by the optimizer
optimizer.step()
# Detach the current state, so that the gradients of the next training batch
# will not be backpropagated through this state.
state.detach()
# Print output segmented by the activity of the gating units
if print_segmentation == True:
print('\nTRAINING DATA Segmented according to b1:\n')
print(''.join(outputs1))
print('\nTRAINING DATA Segmented according to b2:\n')
print(''.join(outputs2))
print('\nTRAINING DATA Segmented according to b3:\n')
print(''.join(outputs3))
# Return the numerical value of the cost function,
# and the current hidden state, to be used as initial state for the next batch
return cost.detach().cpu().numpy(), state
# very similar to the training function, only that instead of evaluating the
# negative log-likelihood of the next letter from the training data,
# the next letter is just sampled from the predictive density
# Note that the initial hidden state will be conditioned by a starting string,
# which by default is 'Hallo'. An empty string should be possible, but not
# tested yet.
def sample(start_letters = 'Hallo', theta = 0.1):
# We don't need PyTorch to keep track of relations required to compute
# gradients later on
with torch.no_grad():
# Also the rnn needs some less book-keeping if we tell it, that we
# don't want to optimize it
rnn.eval()
# Initialize a single hidden state
sampling_state = network_state(hidden_size, 1)
if len(start_letters) > 0:
start_list = data.le.transform(list(start_letters)).reshape(-1,1)
start_tensor = torch.FloatTensor(data.ohe.transform(start_list).toarray()).cuda()
for i in range(len(start_letters)):
output, sampling_state = rnn(start_tensor[i].unsqueeze(0), sampling_state, theta)
else:
output = rnn.first_step(sampling_state)
outputs1 = []
outputs2 = []
outputs3 = []
# Generate seq_len samples
for i in range(seq_len):
# Create a categorical distribution with probabilities given by the predictive distribution
dist = Categorical(logits = output)
# Sample a character from that distribution
new_input = dist.sample()
# Convert the one-hot encoding to a standard character, to be able to print it
next_char = data.le.inverse_transform(new_input.cpu().numpy()[0]).squeeze()
# Append the new character to all output strings
outputs1.append(next_char)
outputs2.append(next_char)
outputs3.append(next_char)
# Add a segmentation mark, when the fastest gating unit b1 is active
if sampling_state.b1.cpu().numpy()[0,0] > 0.5:
outputs1.append('|')
# Add a segmentation mark, when the gating units b1 and b2 are active
if sampling_state.b1.cpu().numpy()[0,0] > 0.5 and sampling_state.b2.cpu().numpy()[0,0] > 0.5:
outputs2.append('|')
# Add a segmentation mark, when all gating units b1, b2, b3 are active
if sampling_state.b1.cpu().numpy()[0,0] > 0.5 and sampling_state.b2.cpu().numpy()[0,0] > 0.5 and sampling_state.b3.cpu().numpy()[0,0] > 0.5:
outputs3.append('|')
# Send the new character also to the GPU to generate the next prediction
x = data.ohe.transform(new_input.cpu().numpy().reshape(-1,1))
new_input = torch.FloatTensor(x.todense()).cuda()
# Propagate the RNN with the new character
output, sampling_state = rnn(new_input, sampling_state, theta)
# Print output strings
print('\nSAMPLES Segmented according to b1:\n')
print(''.join(outputs1))
print('\nSAMPLES Segmented according to b2:\n')
print(''.join(outputs2))
print('\nSAMPLES Segmented according to b3:\n')
print(''.join(outputs3))
# Initialize the temperature of the gumbel-softmax
# higher == smoother approximation of true Bernoulli distribution
theta = theta_start
# Iterate over training steps
for i in range(n_steps):
# Anneal learning rate exponentially from lr_start to lr_end with
# time constant lr_tau
lr = (lr_start - lr_end)*np.exp(-i*lr_tau) + lr_end
# Set the current learning rate
optimizer.param_groups[0]['lr'] = lr
# Anneal the current temperature of the gumbel softmay exponentially
# from theta_start to theta_end with time constant theta_tau
theta = (theta_start - theta_end)*np.exp(-i*theta_tau) + theta_end
# Print segmented input every 100th iteration
if i % 100 == 0:
cost, state = train(data,n_proc,state,theta)
else:
cost, state = train(data,n_proc,state,theta,print_segmentation=False)
# Generate a sample every 100th iteration
if i % 100 == 0:
sample()
print("crit: %f" % (100.0*cost/seq_len) )
print('theta: %f' % theta)
# Save the parameters of the RNN every 1000th iteration
if i % 1000 == 0:
torch.save(rnn.state_dict(), save_name + '.pkl')
# Save the current cost at every iteration
if i == 0:
with open('./plots/log_' + save_name + '.txt', "w") as myfile:
myfile.write("%f\n" % (100.0*cost/seq_len) )
else:
with open('./plots/log_' + save_name + '.txt', "a") as myfile:
myfile.write("%f\n" % (100.0*cost/seq_len) )
| [
"model_input_embedding.TextData",
"torch.load",
"io.open",
"numpy.exp",
"model_input_embedding.RNN",
"torch.nn.NLLLoss",
"torch.no_grad",
"model_input_embedding.network_state",
"torch.cuda.set_device",
"torch.distributions.categorical.Categorical"
] | [((1595, 1619), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(0)'], {}), '(0)\n', (1616, 1619), False, 'import torch\n'), ((1803, 1827), 'model_input_embedding.TextData', 'TextData', ([], {'path': 'data_file'}), '(path=data_file)\n', (1811, 1827), False, 'from model_input_embedding import TextData, network_state, RNN\n'), ((2490, 2524), 'model_input_embedding.network_state', 'network_state', (['hidden_size', 'n_proc'], {}), '(hidden_size, n_proc)\n', (2503, 2524), False, 'from model_input_embedding import TextData, network_state, RNN\n'), ((2532, 2583), 'model_input_embedding.RNN', 'RNN', (['hidden_size', 'data.n_chars', 'n_proc', 'gumbel_hard'], {}), '(hidden_size, data.n_chars, n_proc, gumbel_hard)\n', (2535, 2583), False, 'from model_input_embedding import TextData, network_state, RNN\n'), ((2816, 2846), 'torch.load', 'torch.load', (["(save_name + '.pkl')"], {}), "(save_name + '.pkl')\n", (2826, 2846), False, 'import torch\n'), ((2938, 2950), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (2948, 2950), True, 'import torch.nn as nn\n'), ((7680, 7695), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7693, 7695), False, 'import torch\n'), ((7895, 7924), 'model_input_embedding.network_state', 'network_state', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (7908, 7924), False, 'from model_input_embedding import TextData, network_state, RNN\n'), ((8627, 8653), 'torch.distributions.categorical.Categorical', 'Categorical', ([], {'logits': 'output'}), '(logits=output)\n', (8638, 8653), False, 'from torch.distributions.categorical import Categorical\n'), ((10880, 10899), 'numpy.exp', 'np.exp', (['(-i * lr_tau)'], {}), '(-i * lr_tau)\n', (10886, 10899), True, 'import numpy as np\n'), ((11170, 11192), 'numpy.exp', 'np.exp', (['(-i * theta_tau)'], {}), '(-i * theta_tau)\n', (11176, 11192), True, 'import numpy as np\n'), ((11830, 11876), 'io.open', 'open', (["('./plots/log_' + save_name + '.txt')", '"""w"""'], {}), "('./plots/log_' + save_name + '.txt', 'w')\n", (11834, 11876), False, 'from io import open\n'), ((11968, 12014), 'io.open', 'open', (["('./plots/log_' + save_name + '.txt')", '"""a"""'], {}), "('./plots/log_' + save_name + '.txt', 'a')\n", (11972, 12014), False, 'from io import open\n')] |
import api
api.pressed_right()
| [
"api.pressed_right"
] | [((12, 31), 'api.pressed_right', 'api.pressed_right', ([], {}), '()\n', (29, 31), False, 'import api\n')] |
#!/usr/bin/env python3
import sys
import time
from pymcumgr.mgmt.os_cmd import registerOSCommandArguments, Reset, Echo
from pymcumgr.mgmt.img_cmd import ImgDescription, registerImageCommandArguments, ImageList, ImageConfirm, ImageTest, ImageErase, ImageUpload
from pymcumgr.mgmt.header import MgmtHeader, MgmtOp, MgmtGroup, MgmtErr
from pymcumgr.mgmt.mcuboot_image import MCUBootImage, print_hex as print_hex2
from pymcumgr.transport import Transport, TransportBLE
from argparse import ArgumentParser, ArgumentTypeError
_usage='''
%(prog)s [options]'''
def cmd_finished(transport, err, response):
print(transport)
if err:
print('err:', str(err))
else:
print(response)
transport.close()
def conntype(arg):
if not arg in Transport.transport_types():
raise ArgumentTypeError(f'Supported conntypes: {Transport.transport_types()}')
return arg
def main():
parser = ArgumentParser(
description='%(prog)s helps you manage remote devices',
usage=_usage,
epilog='Use "%(prog)s [command] --help" for more information about a command.'
)
# parser.add_argument('cmd', default=None)
parser.add_argument('--connstring', metavar='string', type=str, default=None,
help='Connection key-value pairs to use instead of using the profile\'s connstring'
)
#only ble for now, set as default
parser.add_argument('--conntype', metavar='string', type=conntype, default=TransportBLE.conntype(),
help='Connection type to use instead of using the profile\'s type'
)
parser.add_argument('-i', '--hci', metavar='int', type=int, default=0,
help='HCI index for the controller on Linux machine'
)
parser.add_argument('-t', '--timeout', metavar='float', type=float, default=10,
help='timeout in seconds (partial seconds allowed) (default 10)'
)
parser.add_argument('-l', '--log-level', metavar='level', type=str, default='info', help='enable debug printing by settting level to \'debug\'')
# sub command parser
subs = parser.add_subparsers(title='Available Commands',
description=None,
dest='command')
#
registerImageCommandArguments(subs)
registerOSCommandArguments(subs)
subs.add_parser('version', help='Display the %(prog)s version number')
args = parser.parse_args()
debug = True if args.log_level == 'debug' else False
#print(args)
# handle static commmands here
if args.command == 'version':
from pymcumgr import __version__
print(__version__)
sys.exit(0)
elif args.command == 'image':
if args.img_cmd == 'analyze':
with open(args.file, 'rb') as f:
contents = f.read()
img = MCUBootImage(contents)
print(img)
sys.exit(0)
try:
transport = Transport.fromCmdArgs(args)
except ValueError as ex:
print(str(ex))
sys.exit(2)
transport.set_timeout(args.timeout)
transport.debug = debug
if args.command == 'image':
if args.img_cmd == 'list':
rsp = transport.run(ImageList())
if debug:
print('list returned')
print(rsp)
if rsp:
if rsp.err:
print(str(rsp.err))
elif rsp.obj:
for idx, sl in enumerate(rsp.obj.slots):
print('image:{} {}'.format(idx, str(sl)))
elif args.img_cmd == 'confirm':
rsp = transport.run(ImageConfirm())
if debug:
print('confirm returned')
print(rsp)
if rsp:
if rsp.err:
print(str(rsp.err))
elif rsp.obj:
for idx, sl in enumerate(rsp.obj.slots):
print('image:{} {}'.format(idx, str(sl)))
elif args.img_cmd == 'test':
rsp = transport.run(ImageTest(args.hash))
if debug:
print('test returned')
print(rsp)
if rsp:
if rsp.err:
print(str(rsp.err))
elif rsp.obj:
for idx, sl in enumerate(rsp.obj.slots):
print('image:{} {}'.format(idx, str(sl)))
elif args.img_cmd == 'upload':
with open(args.file, 'rb') as f:
contents = f.read()
# TODO: don't know how to obtain MTU, set static for now
transport._connect()
try:
mtu = transport.gatt.dev.MTU
except:
mtu = 160
# always to low (erase)
transport.set_timeout(args.timeout + 20)
rsp = transport.run(ImageUpload(MCUBootImage(contents), mtu=mtu, progress=True))
transport.set_timeout(args.timeout)
if debug:
print('upload returned')
print(rsp)
if rsp:
print('Done')
elif args.img_cmd == 'erase':
# always to low
transport.set_timeout(args.timeout + 20)
rsp = transport.run(ImageErase())
transport.set_timeout(args.timeout)
if debug:
print('erase returned')
if rsp:
print(rsp)
print('Done')
else:
raise NotImplementedError('Image command: {}'.format(args.img_cmd))
elif args.command == 'echo':
rsp = transport.run(Echo(args.text))
if debug:
print('echo returned')
if rsp:
print(rsp.obj)
else:
print('Done')
elif args.command == 'reset':
rsp = transport.run(Reset())
print('reset returend')
if rsp:
print(rsp.obj)
else:
print('Done')
else:
raise NotImplementedError('Command: {}'.format(args.command))
if __name__ == "__main__":
main()
| [
"pymcumgr.mgmt.os_cmd.registerOSCommandArguments",
"argparse.ArgumentParser",
"pymcumgr.mgmt.mcuboot_image.MCUBootImage",
"pymcumgr.mgmt.os_cmd.Echo",
"pymcumgr.transport.TransportBLE.conntype",
"pymcumgr.mgmt.os_cmd.Reset",
"pymcumgr.mgmt.img_cmd.ImageTest",
"pymcumgr.mgmt.img_cmd.ImageList",
"pymcumgr.mgmt.img_cmd.ImageErase",
"sys.exit",
"pymcumgr.mgmt.img_cmd.ImageConfirm",
"pymcumgr.transport.Transport.fromCmdArgs",
"pymcumgr.transport.Transport.transport_types",
"pymcumgr.mgmt.img_cmd.registerImageCommandArguments"
] | [((928, 1101), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""%(prog)s helps you manage remote devices"""', 'usage': '_usage', 'epilog': '"""Use "%(prog)s [command] --help" for more information about a command."""'}), '(description=\'%(prog)s helps you manage remote devices\',\n usage=_usage, epilog=\n \'Use "%(prog)s [command] --help" for more information about a command.\')\n', (942, 1101), False, 'from argparse import ArgumentParser, ArgumentTypeError\n'), ((2288, 2323), 'pymcumgr.mgmt.img_cmd.registerImageCommandArguments', 'registerImageCommandArguments', (['subs'], {}), '(subs)\n', (2317, 2323), False, 'from pymcumgr.mgmt.img_cmd import ImgDescription, registerImageCommandArguments, ImageList, ImageConfirm, ImageTest, ImageErase, ImageUpload\n'), ((2328, 2360), 'pymcumgr.mgmt.os_cmd.registerOSCommandArguments', 'registerOSCommandArguments', (['subs'], {}), '(subs)\n', (2354, 2360), False, 'from pymcumgr.mgmt.os_cmd import registerOSCommandArguments, Reset, Echo\n'), ((2688, 2699), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2696, 2699), False, 'import sys\n'), ((2972, 2999), 'pymcumgr.transport.Transport.fromCmdArgs', 'Transport.fromCmdArgs', (['args'], {}), '(args)\n', (2993, 2999), False, 'from pymcumgr.transport import Transport, TransportBLE\n'), ((768, 795), 'pymcumgr.transport.Transport.transport_types', 'Transport.transport_types', ([], {}), '()\n', (793, 795), False, 'from pymcumgr.transport import Transport, TransportBLE\n'), ((1484, 1507), 'pymcumgr.transport.TransportBLE.conntype', 'TransportBLE.conntype', ([], {}), '()\n', (1505, 1507), False, 'from pymcumgr.transport import Transport, TransportBLE\n'), ((3060, 3071), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (3068, 3071), False, 'import sys\n'), ((2872, 2894), 'pymcumgr.mgmt.mcuboot_image.MCUBootImage', 'MCUBootImage', (['contents'], {}), '(contents)\n', (2884, 2894), False, 'from pymcumgr.mgmt.mcuboot_image import MCUBootImage, print_hex as print_hex2\n'), ((2930, 2941), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2938, 2941), False, 'import sys\n'), ((3242, 3253), 'pymcumgr.mgmt.img_cmd.ImageList', 'ImageList', ([], {}), '()\n', (3251, 3253), False, 'from pymcumgr.mgmt.img_cmd import ImgDescription, registerImageCommandArguments, ImageList, ImageConfirm, ImageTest, ImageErase, ImageUpload\n'), ((5662, 5677), 'pymcumgr.mgmt.os_cmd.Echo', 'Echo', (['args.text'], {}), '(args.text)\n', (5666, 5677), False, 'from pymcumgr.mgmt.os_cmd import registerOSCommandArguments, Reset, Echo\n'), ((853, 880), 'pymcumgr.transport.Transport.transport_types', 'Transport.transport_types', ([], {}), '()\n', (878, 880), False, 'from pymcumgr.transport import Transport, TransportBLE\n'), ((3662, 3676), 'pymcumgr.mgmt.img_cmd.ImageConfirm', 'ImageConfirm', ([], {}), '()\n', (3674, 3676), False, 'from pymcumgr.mgmt.img_cmd import ImgDescription, registerImageCommandArguments, ImageList, ImageConfirm, ImageTest, ImageErase, ImageUpload\n'), ((5878, 5885), 'pymcumgr.mgmt.os_cmd.Reset', 'Reset', ([], {}), '()\n', (5883, 5885), False, 'from pymcumgr.mgmt.os_cmd import registerOSCommandArguments, Reset, Echo\n'), ((4085, 4105), 'pymcumgr.mgmt.img_cmd.ImageTest', 'ImageTest', (['args.hash'], {}), '(args.hash)\n', (4094, 4105), False, 'from pymcumgr.mgmt.img_cmd import ImgDescription, registerImageCommandArguments, ImageList, ImageConfirm, ImageTest, ImageErase, ImageUpload\n'), ((4909, 4931), 'pymcumgr.mgmt.mcuboot_image.MCUBootImage', 'MCUBootImage', (['contents'], {}), '(contents)\n', (4921, 4931), False, 'from pymcumgr.mgmt.mcuboot_image import MCUBootImage, print_hex as print_hex2\n'), ((5299, 5311), 'pymcumgr.mgmt.img_cmd.ImageErase', 'ImageErase', ([], {}), '()\n', (5309, 5311), False, 'from pymcumgr.mgmt.img_cmd import ImgDescription, registerImageCommandArguments, ImageList, ImageConfirm, ImageTest, ImageErase, ImageUpload\n')] |
"""Build_manager test modules for views.py"""
import json
from unittest.mock import patch
from django.contrib.auth.models import User
from django.core.management import call_command
from django.shortcuts import reverse
from django.test import Client, TestCase
from build_manager.models import TravisInstance
from dataschema_manager.models import DataSchema
from experiments_manager.models import Experiment
from git_manager.models import GitRepository
from user_manager.models import WorkbenchUser
class BuildManagerViewsTestCases(TestCase):
"""Tests for views.py in build_manager app"""
def setUp(self):
"""Prepare for running tests:
Load data from fixtures
Create a new user
Create a second user
Create a git repo, data schema and experiment
Sign in the user"""
call_command('loaddata', 'fixtures/steps.json', verbosity=0)
call_command('loaddata', 'fixtures/package_categories_languages.json', verbosity=0)
call_command('loaddata', 'fixtures/cookiecutter.json', verbosity=0)
self.user = User.objects.create_user('test', '<EMAIL>', 'test')
self.workbench_user = WorkbenchUser.objects.get(user=self.user)
self.second_user = User.objects.create_user('test2', '<EMAIL>', 'test2')
self.git_repo = GitRepository.objects.create(name='Experiment', owner=self.workbench_user,
github_url='https://github')
schema = DataSchema(name='main')
schema.save()
self.experiment = Experiment.objects.create(title='Experiment',
description='test',
owner=self.workbench_user,
git_repo=self.git_repo,
language_id=1,
template_id=1,
schema=schema)
self.client = Client()
self.client.login(username='test', password='<PASSWORD>')
@patch('build_manager.views.enable_travis')
def test_enable_ci_builds(self, mock_enable_travis): # pylint: disable=unused-argument
"""Test enable CI build for experiment"""
data = {'object_id': self.experiment.id, 'object_type': self.experiment.get_object_type()}
response = self.client.post(reverse('enable_ci_builds'), data=data)
response_json = json.loads(str(response.content, encoding='utf8'))
travis = TravisInstance.objects.filter(experiment=self.experiment)
self.assertTrue(response_json['enabled'])
self.assertEqual(travis.count(), 1)
def test_enable_ci_builds_missing_id(self):
"""Test enable CI build for experiment without object_id and object_type"""
args = [reverse('enable_ci_builds')]
self.assertRaises(AssertionError, self.client.get, *args)
@patch('build_manager.views.get_github_helper')
@patch('build_manager.views.TravisCiHelper')
def test_disable_ci_builds(self, mock_get_github, mock_travis_ci): # pylint: disable=unused-argument
"""Test to disable CI builds, after having them enabled"""
self.test_enable_ci_builds()
data = {'object_id': self.experiment.id, 'object_type': self.experiment.get_object_type()}
response = self.client.post(reverse('disable_ci_builds'), data=data)
response_json = json.loads(str(response.content, encoding='utf8'))
travis = TravisInstance.objects.get(experiment=self.experiment)
self.assertTrue(response_json['disabled'])
self.assertFalse(travis.enabled)
@patch('build_manager.views.get_github_helper')
@patch('build_manager.views.TravisCiHelper')
def test_disable_ci_builds_never_enabled(self, mock_get_github, mock_travis_ci): # pylint: disable=unused-argument
"""Test to disable CI builds, when builds are never enabled"""
data = {'object_id': self.experiment.id, 'object_type': self.experiment.get_object_type()}
response = self.client.post(reverse('disable_ci_builds'), data=data)
response_json = json.loads(str(response.content, encoding='utf8'))
self.assertTrue(response_json['disabled'])
def test_disable_ci_builds_missing_id(self):
"""Test to disable CI builds when object_id and object_type are missing"""
args = [reverse('disable_ci_builds')]
self.assertRaises(AssertionError, self.client.get, *args)
@patch('build_manager.views.get_github_helper')
@patch('build_manager.views.TravisCiHelper')
def test_build_experiment_now(self, mock_get_github, mock_travis_ci): # pylint: disable=unused-argument
"""Test to force the start of a build now"""
self.test_enable_ci_builds()
data = {'object_id': self.experiment.id, 'object_type': self.experiment.get_object_type()}
response = self.client.post(reverse('build_experiment_now'), data=data)
response_json = json.loads(str(response.content, encoding='utf8'))
self.assertTrue(response_json['build_started'])
def test_build_experiment_now_missing_id(self):
"""Test to force the start of a build now, with missing object_id and object_type"""
args = [reverse('build_experiment_now')]
self.assertRaises(AssertionError, self.client.get, *args)
@patch('build_manager.views.get_github_helper')
def test_build_status(self, mock_get_github): # pylint: disable=unused-argument
"""Test to get the last status of a CI build"""
self.test_enable_ci_builds()
response = self.client.get(
reverse('build_status', kwargs={'object_id': 1,
'object_type': self.experiment.get_object_type()}))
travis = TravisInstance.objects.get(experiment=self.experiment)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['current_config'], travis)
@patch('build_manager.views.get_github_helper')
def test_build_status_disabled(self, mock_get_github): # pylint: disable=unused-argument
"""Test to get the last status of a CI build when CI builds are disabled"""
self.test_enable_ci_builds()
self.test_disable_ci_builds()
response = self.client.get(
reverse('build_status', kwargs={'object_id': 1,
'object_type': self.experiment.get_object_type()}))
self.assertEqual(response.status_code, 200)
travis = TravisInstance.objects.get(experiment=self.experiment)
self.assertEqual(response.context['current_config'], travis)
self.assertFalse(response.context['configured'])
| [
"dataschema_manager.models.DataSchema",
"django.core.management.call_command",
"git_manager.models.GitRepository.objects.create",
"user_manager.models.WorkbenchUser.objects.get",
"build_manager.models.TravisInstance.objects.filter",
"experiments_manager.models.Experiment.objects.create",
"build_manager.models.TravisInstance.objects.get",
"django.shortcuts.reverse",
"unittest.mock.patch",
"django.contrib.auth.models.User.objects.create_user",
"django.test.Client"
] | [((2134, 2176), 'unittest.mock.patch', 'patch', (['"""build_manager.views.enable_travis"""'], {}), "('build_manager.views.enable_travis')\n", (2139, 2176), False, 'from unittest.mock import patch\n'), ((2990, 3036), 'unittest.mock.patch', 'patch', (['"""build_manager.views.get_github_helper"""'], {}), "('build_manager.views.get_github_helper')\n", (2995, 3036), False, 'from unittest.mock import patch\n'), ((3042, 3085), 'unittest.mock.patch', 'patch', (['"""build_manager.views.TravisCiHelper"""'], {}), "('build_manager.views.TravisCiHelper')\n", (3047, 3085), False, 'from unittest.mock import patch\n'), ((3719, 3765), 'unittest.mock.patch', 'patch', (['"""build_manager.views.get_github_helper"""'], {}), "('build_manager.views.get_github_helper')\n", (3724, 3765), False, 'from unittest.mock import patch\n'), ((3771, 3814), 'unittest.mock.patch', 'patch', (['"""build_manager.views.TravisCiHelper"""'], {}), "('build_manager.views.TravisCiHelper')\n", (3776, 3814), False, 'from unittest.mock import patch\n'), ((4559, 4605), 'unittest.mock.patch', 'patch', (['"""build_manager.views.get_github_helper"""'], {}), "('build_manager.views.get_github_helper')\n", (4564, 4605), False, 'from unittest.mock import patch\n'), ((4611, 4654), 'unittest.mock.patch', 'patch', (['"""build_manager.views.TravisCiHelper"""'], {}), "('build_manager.views.TravisCiHelper')\n", (4616, 4654), False, 'from unittest.mock import patch\n'), ((5431, 5477), 'unittest.mock.patch', 'patch', (['"""build_manager.views.get_github_helper"""'], {}), "('build_manager.views.get_github_helper')\n", (5436, 5477), False, 'from unittest.mock import patch\n'), ((6047, 6093), 'unittest.mock.patch', 'patch', (['"""build_manager.views.get_github_helper"""'], {}), "('build_manager.views.get_github_helper')\n", (6052, 6093), False, 'from unittest.mock import patch\n'), ((832, 892), 'django.core.management.call_command', 'call_command', (['"""loaddata"""', '"""fixtures/steps.json"""'], {'verbosity': '(0)'}), "('loaddata', 'fixtures/steps.json', verbosity=0)\n", (844, 892), False, 'from django.core.management import call_command\n'), ((901, 988), 'django.core.management.call_command', 'call_command', (['"""loaddata"""', '"""fixtures/package_categories_languages.json"""'], {'verbosity': '(0)'}), "('loaddata', 'fixtures/package_categories_languages.json',\n verbosity=0)\n", (913, 988), False, 'from django.core.management import call_command\n'), ((993, 1060), 'django.core.management.call_command', 'call_command', (['"""loaddata"""', '"""fixtures/cookiecutter.json"""'], {'verbosity': '(0)'}), "('loaddata', 'fixtures/cookiecutter.json', verbosity=0)\n", (1005, 1060), False, 'from django.core.management import call_command\n'), ((1082, 1133), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', (['"""test"""', '"""<EMAIL>"""', '"""test"""'], {}), "('test', '<EMAIL>', 'test')\n", (1106, 1133), False, 'from django.contrib.auth.models import User\n'), ((1164, 1205), 'user_manager.models.WorkbenchUser.objects.get', 'WorkbenchUser.objects.get', ([], {'user': 'self.user'}), '(user=self.user)\n', (1189, 1205), False, 'from user_manager.models import WorkbenchUser\n'), ((1233, 1286), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', (['"""test2"""', '"""<EMAIL>"""', '"""test2"""'], {}), "('test2', '<EMAIL>', 'test2')\n", (1257, 1286), False, 'from django.contrib.auth.models import User\n'), ((1311, 1418), 'git_manager.models.GitRepository.objects.create', 'GitRepository.objects.create', ([], {'name': '"""Experiment"""', 'owner': 'self.workbench_user', 'github_url': '"""https://github"""'}), "(name='Experiment', owner=self.workbench_user,\n github_url='https://github')\n", (1339, 1418), False, 'from git_manager.models import GitRepository\n'), ((1485, 1508), 'dataschema_manager.models.DataSchema', 'DataSchema', ([], {'name': '"""main"""'}), "(name='main')\n", (1495, 1508), False, 'from dataschema_manager.models import DataSchema\n'), ((1557, 1728), 'experiments_manager.models.Experiment.objects.create', 'Experiment.objects.create', ([], {'title': '"""Experiment"""', 'description': '"""test"""', 'owner': 'self.workbench_user', 'git_repo': 'self.git_repo', 'language_id': '(1)', 'template_id': '(1)', 'schema': 'schema'}), "(title='Experiment', description='test', owner=\n self.workbench_user, git_repo=self.git_repo, language_id=1, template_id\n =1, schema=schema)\n", (1582, 1728), False, 'from experiments_manager.models import Experiment\n'), ((2053, 2061), 'django.test.Client', 'Client', ([], {}), '()\n', (2059, 2061), False, 'from django.test import Client, TestCase\n'), ((2587, 2644), 'build_manager.models.TravisInstance.objects.filter', 'TravisInstance.objects.filter', ([], {'experiment': 'self.experiment'}), '(experiment=self.experiment)\n', (2616, 2644), False, 'from build_manager.models import TravisInstance\n'), ((3565, 3619), 'build_manager.models.TravisInstance.objects.get', 'TravisInstance.objects.get', ([], {'experiment': 'self.experiment'}), '(experiment=self.experiment)\n', (3591, 3619), False, 'from build_manager.models import TravisInstance\n'), ((5865, 5919), 'build_manager.models.TravisInstance.objects.get', 'TravisInstance.objects.get', ([], {'experiment': 'self.experiment'}), '(experiment=self.experiment)\n', (5891, 5919), False, 'from build_manager.models import TravisInstance\n'), ((6608, 6662), 'build_manager.models.TravisInstance.objects.get', 'TravisInstance.objects.get', ([], {'experiment': 'self.experiment'}), '(experiment=self.experiment)\n', (6634, 6662), False, 'from build_manager.models import TravisInstance\n'), ((2455, 2482), 'django.shortcuts.reverse', 'reverse', (['"""enable_ci_builds"""'], {}), "('enable_ci_builds')\n", (2462, 2482), False, 'from django.shortcuts import reverse\n'), ((2889, 2916), 'django.shortcuts.reverse', 'reverse', (['"""enable_ci_builds"""'], {}), "('enable_ci_builds')\n", (2896, 2916), False, 'from django.shortcuts import reverse\n'), ((3432, 3460), 'django.shortcuts.reverse', 'reverse', (['"""disable_ci_builds"""'], {}), "('disable_ci_builds')\n", (3439, 3460), False, 'from django.shortcuts import reverse\n'), ((4141, 4169), 'django.shortcuts.reverse', 'reverse', (['"""disable_ci_builds"""'], {}), "('disable_ci_builds')\n", (4148, 4169), False, 'from django.shortcuts import reverse\n'), ((4457, 4485), 'django.shortcuts.reverse', 'reverse', (['"""disable_ci_builds"""'], {}), "('disable_ci_builds')\n", (4464, 4485), False, 'from django.shortcuts import reverse\n'), ((4989, 5020), 'django.shortcuts.reverse', 'reverse', (['"""build_experiment_now"""'], {}), "('build_experiment_now')\n", (4996, 5020), False, 'from django.shortcuts import reverse\n'), ((5326, 5357), 'django.shortcuts.reverse', 'reverse', (['"""build_experiment_now"""'], {}), "('build_experiment_now')\n", (5333, 5357), False, 'from django.shortcuts import reverse\n')] |
from PIL import Image
import pytesseract
import os
from score_logic import FindScores
import google_handler
import csv_handler
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('img_dir', None, 'Image directory path')
flags.DEFINE_string('csv', None, 'The CSV file path')
flags.DEFINE_string('google_sheet', None, 'The ID of the Google sheet')
flags.DEFINE_string('service_account', None,
'Google service account file path')
flags.DEFINE_boolean('remove_all_screenshots', False,
'Delete all screenshots after processing')
flags.DEFINE_boolean('remove_captured', False,
'Delete only screenshots that were successfully processed')
def GetImages(path):
file_list = []
for filename in os.listdir(path):
if not filename.endswith('bw.png'):
file_path = os.path.join(path, filename)
file_list.append(file_path)
return file_list
def ProcessImages(files):
score_dict = {}
score_boards = []
for file_path in files:
img = Image.open(file_path)
matrix = ( 1, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0 )
rgb = img.convert("RGB")
converted = rgb.convert("RGB", matrix).convert('L')
bw = converted.point(lambda x: x > 25 and 255)
conv_path = '%s_bw.png' % file_path[:-4]
bw.save(conv_path)
score_board = pytesseract.image_to_string(bw)
score_dict[file_path] = [score_board]
return score_dict
def FindSongInfo(score_dict):
for file_path in score_dict.keys():
if file_path.endswith('.png'):
title = file_path.split('/')[-1][:-19][10:].replace('-', ' ').strip()
score = score_dict[file_path][1].strip('\'')
stars = score_dict[file_path][2].strip('\'')
accuracy = score_dict[file_path][3].strip('\'')
difficulty = score_dict[file_path][4].strip('\'')
score_year = file_path.split('/')[-1][-18:][:-4][:4]
score_month = file_path.split('/')[-1][-18:][:-4][4:][:2]
score_day = file_path.split('/')[-1][-18:][:-4][6:][:2]
score_date = '%s-%s-%s' % (score_year, score_month, score_day)
score_date = score_date.strip('\'')
score_dict[file_path] = [title, score, difficulty, stars, accuracy,
score_date]
return score_dict
def DeleteImages(path):
for filename in os.listdir(path):
if filename.endswith('.png'):
file_path = os.path.join(path, filename)
os.remove(file_path)
def main(argv):
del argv
file_list = GetImages(FLAGS.img_dir)
score_dict = ProcessImages(file_list)
updated_score_dict = FindScores(score_dict, FLAGS.remove_captured)
final_score_dict = FindSongInfo(updated_score_dict)
if FLAGS.csv:
csv_handler.HandleCsv(FLAGS.csv, final_score_dict)
if FLAGS.remove_all_screenshots:
DeleteImages(FLAGS.img_dir)
if FLAGS.google_sheet:
google_handler.GoogleSheetHandler(FLAGS.google_sheet, final_score_dict)
if __name__ == '__main__':
app.run(main)
| [
"os.listdir",
"PIL.Image.open",
"score_logic.FindScores",
"google_handler.GoogleSheetHandler",
"os.path.join",
"absl.app.run",
"absl.flags.DEFINE_boolean",
"csv_handler.HandleCsv",
"pytesseract.image_to_string",
"absl.flags.DEFINE_string",
"os.remove"
] | [((192, 252), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""img_dir"""', 'None', '"""Image directory path"""'], {}), "('img_dir', None, 'Image directory path')\n", (211, 252), False, 'from absl import flags\n'), ((253, 306), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""csv"""', 'None', '"""The CSV file path"""'], {}), "('csv', None, 'The CSV file path')\n", (272, 306), False, 'from absl import flags\n'), ((307, 378), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""google_sheet"""', 'None', '"""The ID of the Google sheet"""'], {}), "('google_sheet', None, 'The ID of the Google sheet')\n", (326, 378), False, 'from absl import flags\n'), ((379, 464), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""service_account"""', 'None', '"""Google service account file path"""'], {}), "('service_account', None, 'Google service account file path'\n )\n", (398, 464), False, 'from absl import flags\n'), ((480, 580), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""remove_all_screenshots"""', '(False)', '"""Delete all screenshots after processing"""'], {}), "('remove_all_screenshots', False,\n 'Delete all screenshots after processing')\n", (500, 580), False, 'from absl import flags\n'), ((598, 708), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""remove_captured"""', '(False)', '"""Delete only screenshots that were successfully processed"""'], {}), "('remove_captured', False,\n 'Delete only screenshots that were successfully processed')\n", (618, 708), False, 'from absl import flags\n'), ((784, 800), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (794, 800), False, 'import os\n'), ((2338, 2354), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2348, 2354), False, 'import os\n'), ((2594, 2639), 'score_logic.FindScores', 'FindScores', (['score_dict', 'FLAGS.remove_captured'], {}), '(score_dict, FLAGS.remove_captured)\n', (2604, 2639), False, 'from score_logic import FindScores\n'), ((2967, 2980), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (2974, 2980), False, 'from absl import app\n'), ((1046, 1067), 'PIL.Image.open', 'Image.open', (['file_path'], {}), '(file_path)\n', (1056, 1067), False, 'from PIL import Image\n'), ((1372, 1403), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['bw'], {}), '(bw)\n', (1399, 1403), False, 'import pytesseract\n'), ((2715, 2765), 'csv_handler.HandleCsv', 'csv_handler.HandleCsv', (['FLAGS.csv', 'final_score_dict'], {}), '(FLAGS.csv, final_score_dict)\n', (2736, 2765), False, 'import csv_handler\n'), ((2864, 2935), 'google_handler.GoogleSheetHandler', 'google_handler.GoogleSheetHandler', (['FLAGS.google_sheet', 'final_score_dict'], {}), '(FLAGS.google_sheet, final_score_dict)\n', (2897, 2935), False, 'import google_handler\n'), ((860, 888), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (872, 888), False, 'import os\n'), ((2407, 2435), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (2419, 2435), False, 'import os\n'), ((2441, 2461), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (2450, 2461), False, 'import os\n')] |
from eth_account import (
Account,
)
from eth_utils import (
encode_hex,
)
def test_personal_importRawKey(rpc_client):
account_manager = Account()
private_key = account_manager.create().privateKey
new_account = rpc_client('personal_importRawKey', [encode_hex(private_key), 'a-password'])
assert rpc_client('personal_unlockAccount', [new_account, 'a-password']) is True
| [
"eth_account.Account",
"eth_utils.encode_hex"
] | [((151, 160), 'eth_account.Account', 'Account', ([], {}), '()\n', (158, 160), False, 'from eth_account import Account\n'), ((270, 293), 'eth_utils.encode_hex', 'encode_hex', (['private_key'], {}), '(private_key)\n', (280, 293), False, 'from eth_utils import encode_hex\n')] |
import typing as t
from logging import getLogger
from .line import Line
from .patterns import REGEX_PARAM_NAME, REGEX_TYPES
from .route import Route
logger = getLogger("sanic.root")
class Node:
def __init__(
self, part: str = "", root: bool = False, parent=None
) -> None:
self.root = root
self.part = part
self.parent = parent
self._children: t.Dict[str, "Node"] = {}
self.children: t.Dict[str, "Node"] = {}
self.level = 0
self.offset = 0
self.route: t.Optional[Route] = None
self.dynamic = False
self.first = False
self.last = False
self.children_basketed = False
def __repr__(self) -> str:
internals = ", ".join(
f"{prop}={getattr(self, prop)}"
for prop in ["part", "level", "route", "dynamic"]
if getattr(self, prop) or prop in ["level"]
)
return f"<Node: {internals}>"
def finalize_children(self):
self.children = {
k: v for k, v in sorted(self._children.items(), key=self._sorting)
}
if self.children:
keys = list(self.children.keys())
self.children[keys[0]].first = True
self.children[keys[-1]].last = True
for child in self.children.values():
child.finalize_children()
def display(self) -> None:
"""
Visual display of the tree of nodes
"""
logger.info(" " * 4 * self.level + str(self))
for child in self.children.values():
child.display()
def render(self) -> t.List[Line]:
if not self.root:
output, delayed = self.to_src()
else:
output = []
delayed = []
for child in self.children.values():
output += child.render()
output += delayed
return output
def apply_offset(self, amt, apply_self=True, apply_children=False):
if apply_self:
self.offset += amt
if apply_children:
for child in self.children.values():
child.apply_offset(amt, apply_children=True)
def to_src(self) -> t.Tuple[t.List[Line], t.List[Line]]:
indent = (self.level + 1) * 2 - 3 + self.offset
delayed: t.List[Line] = []
src: t.List[Line] = []
level = self.level - 1
equality_check = False
len_check = ""
return_bump = 1
if self.first or self.root:
src = []
operation = ">"
use_level = level
conditional = "if"
if (
self.last
and self.route
and not self.children
and not self.route.requirements
and not self.route.router.regex_routes
):
use_level = self.level
operation = "=="
equality_check = True
conditional = "elif"
src.extend(
[
Line(f"if num > {use_level}:", indent),
Line("raise NotFound", indent + 1),
]
)
src.append(
Line(f"{conditional} num {operation} {use_level}:", indent)
)
if self.dynamic:
if not self.parent.children_basketed:
self.parent.children_basketed = True
src.append(
Line(f"basket[{level}] = parts[{level}]", indent + 1)
)
self.parent.apply_offset(-1, False, True)
if not self.children:
return_bump -= 1
else:
if_stmt = "if" if self.first or self.root else "elif"
len_check = (
f" and num == {self.level}"
if not self.children and not equality_check
else ""
)
src.append(
Line(
f'{if_stmt} parts[{level}] == "{self.part}"{len_check}:',
indent + 1,
)
)
if self.children:
return_bump += 1
if self.route and not self.route.regex:
location = delayed if self.children else src
if self.route.requirements:
self._inject_requirements(
location, indent + return_bump + bool(not self.children)
)
if self.route.params:
if not self.last:
return_bump += 1
self._inject_params(
location, indent + return_bump + bool(not self.children)
)
param_offset = bool(self.route.params)
return_indent = (
indent + return_bump + bool(not self.children) + param_offset
)
location.extend(
[
Line(
(f"basket['__raw_path__'] = '{self.route.path}'"),
return_indent,
),
Line(
(
f"return router.dynamic_routes[{self.route.parts}]"
", basket"
),
return_indent,
),
# Line("...", return_indent - 1, render=True),
]
)
if self.route.requirements and self.last and len_check:
location.append(Line("raise NotFound", return_indent - 1))
if self.route.params:
location.append(Line("...", return_indent - 1, render=False))
if self.last:
location.append(
Line("...", return_indent - 2, render=False),
)
return src, delayed
def add_child(self, child: "Node") -> None:
self._children[child.part] = child
def _inject_requirements(self, location, indent):
for k, (idx, reqs) in enumerate(self.route.requirements.items()):
conditional = "if" if k == 0 else "elif"
location.extend(
[
Line((f"{conditional} extra == {reqs}:"), indent),
Line((f"basket['__handler_idx__'] = {idx}"), indent + 1),
]
)
location.extend(
[
Line(("else:"), indent),
Line(("raise NotFound"), indent + 1),
]
)
def _inject_params(self, location, indent):
if self.last:
lines = [
Line(f"if num > {self.level}:", indent),
Line("raise NotFound", indent + 1),
]
else:
lines = [
Line(f"if num == {self.level}:", indent - 1),
]
lines.append(Line("try:", indent))
for idx, param in self.route.params.items():
unquote_start = "unquote(" if self.route.unquote else ""
unquote_end = ")" if self.route.unquote else ""
lines.append(
Line(
f"basket['__params__']['{param.name}'] = "
f"{unquote_start}{param.cast.__name__}(basket[{idx}])"
f"{unquote_end}",
indent + 1,
)
)
location.extend(
lines
+ [
Line("except (ValueError, KeyError):", indent),
Line("pass", indent + 1),
Line("else:", indent),
]
)
@staticmethod
def _sorting(item) -> t.Tuple[bool, int, str, int]:
key, child = item
type_ = 0
if child.dynamic:
key = key[1:-1]
if ":" in key:
key, param_type = key.split(":")
try:
type_ = list(REGEX_TYPES.keys()).index(param_type)
except ValueError:
type_ = len(list(REGEX_TYPES.keys()))
return child.dynamic, len(child._children), key, type_ * -1
class Tree:
def __init__(self) -> None:
self.root = Node(root=True)
self.root.level = 0
def generate(self, routes: t.Dict[t.Tuple[str, ...], Route]) -> None:
for route in routes.values():
current = self.root
for level, part in enumerate(route.parts):
if part not in current._children:
current.add_child(Node(part=part, parent=current))
current = current._children[part]
current.level = level + 1
current.dynamic = part.startswith("<")
if current.dynamic and not REGEX_PARAM_NAME.match(part):
raise ValueError(f"Invalid declaration: {part}")
current.route = route
def display(self) -> None:
"""
Debug tool to output visual of the tree
"""
self.root.display()
def render(self) -> t.List[Line]:
return self.root.render()
def finalize(self):
self.root.finalize_children()
| [
"logging.getLogger"
] | [((160, 183), 'logging.getLogger', 'getLogger', (['"""sanic.root"""'], {}), "('sanic.root')\n", (169, 183), False, 'from logging import getLogger\n')] |
# random story generator
import random
when = [ 'A long time ago', 'Yesterday', 'Before you were born', 'In the future', 'Before Thanos arrived']
who = ['Shazam', '<NAME>', 'Batman', 'Superman', 'Captain America']
where = ['Arkham Asylum', 'Gotham City', 'Stark Tower', 'Bat Cave', 'Avengers HQ']
why = ['to eat a lot of cakes', 'to fight fpr justice', 'to steal ice cream', 'to dance']
print(random.choice(when) + ', ' + random.choice(who) + ' went to ' + random.choice(where) + ' ' + random.choice(why))
| [
"random.choice"
] | [((491, 509), 'random.choice', 'random.choice', (['why'], {}), '(why)\n', (504, 509), False, 'import random\n'), ((462, 482), 'random.choice', 'random.choice', (['where'], {}), '(where)\n', (475, 482), False, 'import random\n'), ((427, 445), 'random.choice', 'random.choice', (['who'], {}), '(who)\n', (440, 445), False, 'import random\n'), ((398, 417), 'random.choice', 'random.choice', (['when'], {}), '(when)\n', (411, 417), False, 'import random\n')] |
import math
from numpy.random import default_rng
import numpy as np
class ScaledTModel(object):
__slots__ = ['_data', '_data_size', '_nu', '_rng', '_extended_vars', '_tau2',
'_mu', '_alpha2', '_tmp_with_data_size', '_tmp_with_data_size2', '_results_mu', '_results_sigma2']
def __init__(self, data, nu):
print("making a model")
self._data = np.asarray(data)
self._data_size = len(data)
self._nu = nu
self._rng = default_rng()
self._extended_vars = np.zeros(self._data_size)
self._tau2 = 1
self._mu = sum(data) / self._data_size
self._alpha2 = 1
self._update_extended_vars()
self._tmp_with_data_size = np.zeros(self._data_size)
self._tmp_with_data_size2 = np.zeros(self._data_size)
def _sampleScaledInvChiSquare(self, ni, scale):
x = self._rng.chisquare(ni)
return ni * scale / x
def _sampleScaledInvChiSquareN(self, ni, scale):
x = self._rng.chisquare(ni, size=len(scale))
return ni * scale / x
def _update_mu(self):
np.reciprocal(self._extended_vars, out=self._tmp_with_data_size)
self._tmp_with_data_size2 = self._data * self._tmp_with_data_size
variance = self._tmp_with_data_size.sum()
expected_value = self._tmp_with_data_size2.sum()
variance /= self._alpha2
expected_value /= self._alpha2
variance = 1.0 / variance
expected_value = expected_value * variance
self._mu = self._rng.normal(expected_value, math.sqrt(variance))
def _update_tau2(self):
np.reciprocal(self._extended_vars, out=self._tmp_with_data_size)
x = self._tmp_with_data_size.sum()
self._tau2 = self._rng.gamma(self._data_size * self._nu / 2.0, 2.0 / (self._nu * x))
def _update_alpha2(self):
x = 0.0
self._tmp_with_data_size = self._data - self._mu
self._tmp_with_data_size = (self._tmp_with_data_size * self._tmp_with_data_size) / self._extended_vars
x = self._tmp_with_data_size.sum()
x /= self._data_size
self._alpha2 = self._sampleScaledInvChiSquare(self._data_size, x)
def _update_extended_vars(self):
x = (self._data - self._mu) * (self._data - self._mu) / self._alpha2
self._extended_vars = self._sampleScaledInvChiSquareN(self._nu + 1, (self._nu * self._tau2 + x) / (self._nu + 1))
def run(self, burn_in = 1000, sample_size = 2000):
self._results_mu = np.zeros(sample_size)
self._results_sigma2 = np.zeros(sample_size)
for _ in range(burn_in):
self._update_extended_vars()
self._update_alpha2()
self._update_mu()
self._update_tau2
for i in range(sample_size):
self._update_extended_vars()
self._update_alpha2()
self._update_mu()
self._update_tau2
self._results_mu[i] = self._mu
self._results_sigma2[i] = self._alpha2 * self._tau2
@property
def mu(self):
if hasattr(self, '_results_mu'):
return self._results_mu
@property
def sigma2(self):
if hasattr(self, '_results_sigma2'):
return self._results_sigma2
| [
"numpy.random.default_rng",
"numpy.reciprocal",
"math.sqrt",
"numpy.asarray",
"numpy.zeros"
] | [((372, 388), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (382, 388), True, 'import numpy as np\n'), ((468, 481), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (479, 481), False, 'from numpy.random import default_rng\n'), ((513, 538), 'numpy.zeros', 'np.zeros', (['self._data_size'], {}), '(self._data_size)\n', (521, 538), True, 'import numpy as np\n'), ((708, 733), 'numpy.zeros', 'np.zeros', (['self._data_size'], {}), '(self._data_size)\n', (716, 733), True, 'import numpy as np\n'), ((770, 795), 'numpy.zeros', 'np.zeros', (['self._data_size'], {}), '(self._data_size)\n', (778, 795), True, 'import numpy as np\n'), ((1096, 1160), 'numpy.reciprocal', 'np.reciprocal', (['self._extended_vars'], {'out': 'self._tmp_with_data_size'}), '(self._extended_vars, out=self._tmp_with_data_size)\n', (1109, 1160), True, 'import numpy as np\n'), ((1615, 1679), 'numpy.reciprocal', 'np.reciprocal', (['self._extended_vars'], {'out': 'self._tmp_with_data_size'}), '(self._extended_vars, out=self._tmp_with_data_size)\n', (1628, 1679), True, 'import numpy as np\n'), ((2499, 2520), 'numpy.zeros', 'np.zeros', (['sample_size'], {}), '(sample_size)\n', (2507, 2520), True, 'import numpy as np\n'), ((2552, 2573), 'numpy.zeros', 'np.zeros', (['sample_size'], {}), '(sample_size)\n', (2560, 2573), True, 'import numpy as np\n'), ((1553, 1572), 'math.sqrt', 'math.sqrt', (['variance'], {}), '(variance)\n', (1562, 1572), False, 'import math\n')] |
# Generated by Django 3.2.7 on 2021-09-16 17:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('homepage', '0002_favorites_name'),
]
operations = [
migrations.RemoveField(
model_name='favorites',
name='name',
),
]
| [
"django.db.migrations.RemoveField"
] | [((224, 283), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""favorites"""', 'name': '"""name"""'}), "(model_name='favorites', name='name')\n", (246, 283), False, 'from django.db import migrations\n')] |
import logging
import librosa
import sounddevice as sd
import googleapiclient.discovery
from google.api_core.client_options import ClientOptions
from google.cloud import storage
import numpy as np
def segment_cough(x, fs, cough_padding=0.2, min_cough_len=0.1, th_l_multiplier=0.1, th_h_multiplier=0.5):
"""Preprocess the data by segmenting each file into individual coughs using a hysteresis comparator on the signal power
Inputs:
*x (np.array): cough signal
*fs (float): sampling frequency in Hz
*cough_padding (float): number of seconds added to the beginning and end of each detected cough to make sure coughs are not cut short
*min_cough_length (float): length of the minimum possible segment that can be considered a cough
*th_l_multiplier (float): multiplier of the RMS energy used as a lower threshold of the hysteresis comparator
*th_h_multiplier (float): multiplier of the RMS energy used as a high threshold of the hysteresis comparator
Outputs:
*coughSegments (np.array of np.arrays): a list of cough signal arrays corresponding to each cough
cough_mask (np.array): an array of booleans that are True at the indices where a cough is in progress"""
cough_mask = np.array([False] * len(x))
# Define hysteresis thresholds
rms = np.sqrt(np.mean(np.square(x)))
seg_th_l = th_l_multiplier * rms
seg_th_h = th_h_multiplier * rms
# Segment coughs
coughSegments = []
padding = round(fs * cough_padding)
min_cough_samples = round(fs * min_cough_len)
cough_start = 0
cough_end = 0
cough_in_progress = False
tolerance = round(0.01 * fs)
below_th_counter = 0
for i, sample in enumerate(x ** 2):
if cough_in_progress:
if sample < seg_th_l:
below_th_counter += 1
if below_th_counter > tolerance:
cough_end = i + padding if (i + padding < len(x)) else len(x) - 1
cough_in_progress = False
if (cough_end + 1 - cough_start - 2 * padding > min_cough_samples):
coughSegments.append(x[cough_start:cough_end + 1])
cough_mask[cough_start:cough_end + 1] = True
elif i == (len(x) - 1):
cough_end = i
cough_in_progress = False
if (cough_end + 1 - cough_start - 2 * padding > min_cough_samples):
coughSegments.append(x[cough_start:cough_end + 1])
else:
below_th_counter = 0
else:
if sample > seg_th_h:
cough_start = i - padding if (i - padding >= 0) else 0
cough_in_progress = True
return coughSegments, cough_mask
def normalize_audio(signal, fs, shouldTrim=True):
"""Normalizes and trims the audio.
Args:
signal (np.array): audio as a 1-D numpy array
fs (int): sample rate
Returns:
(np.array): normalized and trimmed audio
"""
frame_len = int(fs / 10) # 100 ms
hop = int(frame_len / 2) # 50% overlap, meaning 5ms hop length
# normalise the sound signal before processing
signal = signal / np.max(np.abs(signal))
# trim the signal to the appropriate length
if shouldTrim:
signal, _ = librosa.effects.trim(signal, frame_length=frame_len, hop_length=hop)
return signal
def upload_blob(bucket_name, source_object, destination_blob_name):
"""
Uploads a file object to the bucket.
Example Usage:
>>> Utils.upload_blob('cs329s-covid-user-coughs', recording, 'temp_data/user_cough.wav')
Args:
bucket_name (str): GCP storage bucket
source_object (any): object to be saved to GCP Storage
destination_blob_name (str): path and filename to save object to
"""
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_string(source_object)
logging.info('File uploaded.', destination_blob_name)
| [
"google.cloud.storage.Client",
"numpy.abs",
"numpy.square",
"librosa.effects.trim",
"logging.info"
] | [((3826, 3842), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (3840, 3842), False, 'from google.cloud import storage\n'), ((3985, 4038), 'logging.info', 'logging.info', (['"""File uploaded."""', 'destination_blob_name'], {}), "('File uploaded.', destination_blob_name)\n", (3997, 4038), False, 'import logging\n'), ((3283, 3351), 'librosa.effects.trim', 'librosa.effects.trim', (['signal'], {'frame_length': 'frame_len', 'hop_length': 'hop'}), '(signal, frame_length=frame_len, hop_length=hop)\n', (3303, 3351), False, 'import librosa\n'), ((1313, 1325), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (1322, 1325), True, 'import numpy as np\n'), ((3179, 3193), 'numpy.abs', 'np.abs', (['signal'], {}), '(signal)\n', (3185, 3193), True, 'import numpy as np\n')] |
import time
class FallEmergencyHandler:
def __init__(self, acceleration, notify_emergency):
self.acceleration = acceleration
self.notify = notify_emergency
def run_fall_emergency_handler(self):
while True:
status = self.acceleration.is_fallen()
time.sleep(0.05)
if status:
self.notify.notify_emergency_fallen()
time.sleep(4)
self.notify.notify_emergency_not_fallen()
for i in range(1000):
print("###############FALL DETECTED#######################")
| [
"time.sleep"
] | [((304, 320), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (314, 320), False, 'import time\n'), ((414, 427), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (424, 427), False, 'import time\n')] |
import cv2
import numpy as np
import traceback
from dataclasses import dataclass, field
from typing import List
# Set in the form (width, height)
default_roi = np.float32([
(550, 450), # Top-left corner
(160, 720), # Bottom-left corner
(1330, 720), # Bottom-right corner
(775, 450) # Top-right corner
])
@dataclass
class LaneLine:
type: str = ''
color: str = ''
is_solid: bool = False
curvature: float = 0.0
points: List[np.ndarray] = field(default_factory=list)
avg_fit: List[float] = field(default_factory=list)
@dataclass
class Lane:
left: LaneLine = field(default_factory=LaneLine)
right: LaneLine = field(default_factory=LaneLine)
is_host: bool = False
class LaneDetector:
def __init__(self):
width = 1280
height = 720
roi = default_roi
self.set_constants(width, height)
self.set_roi(roi)
# Each image step along the way
self.isolated_lane_lines = None
self.cropped_lane_lines = None
self.warped_frame = None
self.histogram = None
self.transformation_matrix = None
self.inv_transformation_matrix = None
self.lane_lines = []
self.fallback_right = np.array([0.0001018, -0.795, 40])
self.fallback_left = np.array([-0.000122, -0.15, 28])
# Best fit polynomial lines for left line and right line of the lane
self.left_fit = None
self.right_fit = None
self.left_lane_inds = None
self.right_lane_inds = None
self.ploty = None
self.left_fitx = None
self.right_fitx = None
self.leftx = None
self.rightx = None
self.lefty = None
self.righty = None
# Pixel parameters for x and y dimensions
self.YM_PER_PIX = 10.0 / 1000 # meters per pixel in y dimension
self.XM_PER_PIX = 3.7 / 781 # meters per pixel in x dimension
# Radii of curvature and offset
self.left_curvem = None
self.right_curvem = None
self.center_offset = None
def set_constants(self, width, height):
self.width = width
self.height = height
self.padding = int(0 * width) # padding from side of the image in pixels
self.desired_roi_points = np.float32([
[self.padding, 0], # Top-left corner
[self.padding, height], # Bottom-left corner
[width - self.padding, height], # Bottom-right corner
[width - self.padding, 0] # Top-right corner
])
# Sliding window parameters
self.no_of_windows = 10
self.margin = int((1/12) * width) # Window width is +/- margin
self.minpix = int((1/24) * width) # Min no. of pixels to recenter window
def set_roi(self, roi):
self.roi_points = roi
def set_roi_points(self, x, y):
"""
x: x-coordinates of the points [x1, x2, x3, x4]
y: y-coordinates of the points [y1, y2, y3, y4]
"""
self.roi_points = np.float32([(x[0], y[0]), (x[1], y[1]), (x[2], y[2]), (x[3], y[3])])
@staticmethod
def binary_array(array, thresh, value=0):
"""
Return a 2D binary array (mask) in which all pixels are either 0 or 1
:param array: NumPy 2D array that we want to convert to binary values
:param thresh: Values used for thresholding (inclusive)
:param value: Output value when between the supplied threshold
:return: Binary 2D array
"""
if value == 0:
# Create an array of ones with the same shape and type as
# the input 2D array.
binary = np.ones_like(array)
else:
# Creates an array of zeros with the same shape and type as
# the input 2D array.
binary = np.zeros_like(array)
value = 1
# If value == 0, make all values in binary equal to 0 if the
# corresponding value in the input array is between the threshold
# (inclusive). Otherwise, the value remains as 1. Therefore, the pixels
# with the high Sobel derivative values (i.e. sharp pixel intensity
# discontinuities) will have 0 in the corresponding cell of binary.
binary[(array >= thresh[0]) & (array <= thresh[1])] = value
return binary
@staticmethod
def blur_gaussian(channel, ksize=3):
"""
Implementation for Gaussian blur to reduce noise and detail in the image
:param image: 2D or 3D array to be blurred
:param ksize: Size of the small matrix (i.e. kernel) used to blur
i.e. number of rows and number of columns
:return: Blurred 2D image
"""
return cv2.GaussianBlur(channel, (ksize, ksize), 0)
@staticmethod
def sobel(img_channel, orient='x', sobel_kernel=3):
"""
Find edges that are aligned vertically and horizontally on the image
:param img_channel: Channel from an image
:param orient: Across which axis of the image are we detecting edges?
:sobel_kernel: No. of rows and columns of the kernel (i.e. 3x3 small matrix)
:return: Image with Sobel edge detection applied
"""
# cv2.Sobel(input image, data type, prder of the derivative x, order of the
# derivative y, small matrix used to calculate the derivative)
if orient == 'x':
# Will detect differences in pixel intensities going from
# left to right on the image (i.e. edges that are vertically aligned)
sobel = cv2.Sobel(img_channel, cv2.CV_64F, 1, 0, sobel_kernel)
if orient == 'y':
# Will detect differences in pixel intensities going from
# top to bottom on the image (i.e. edges that are horizontally aligned)
sobel = cv2.Sobel(img_channel, cv2.CV_64F, 0, 1, sobel_kernel)
return sobel
@staticmethod
def threshold(channel, thresh=(128,255), thresh_type=cv2.THRESH_BINARY):
"""
Apply a threshold to the input channel
:param channel: 2D array of the channel data of an image/video frame
:param thresh: 2D tuple of min and max threshold values
:param thresh_type: The technique of the threshold to apply
:return: Two outputs are returned:
ret: Threshold that was used
thresholded_image: 2D thresholded data.
"""
# If pixel intensity is greater than thresh[0], make that value
# white (255), else set it to black (0)
return cv2.threshold(channel, thresh[0], thresh[1], thresh_type)
def mag_thresh(self, image, sobel_kernel=3, thresh=(0, 255)):
"""
Implementation of Sobel edge detection
:param image: 2D or 3D array to be blurred
:param sobel_kernel: Size of the small matrix (i.e. kernel)
i.e. number of rows and columns
:return: Binary (black and white) 2D mask image
"""
# Get the magnitude of the edges that are vertically aligned on the image
sobelx = np.absolute(self.sobel(image, orient='x', sobel_kernel=sobel_kernel))
# Get the magnitude of the edges that are horizontally aligned on the image
sobely = np.absolute(self.sobel(image, orient='y', sobel_kernel=sobel_kernel))
# Find areas of the image that have the strongest pixel intensity changes
# in both the x and y directions. These have the strongest gradients and
# represent the strongest edges in the image (i.e. potential lane lines)
# mag is a 2D array .. number of rows x number of columns = number of pixels
# from top to bottom x number of pixels from left to right
mag = np.sqrt(sobelx ** 2 + sobely ** 2)
# Return a 2D array that contains 0s and 1s
return self.binary_array(mag, thresh)
def isolate_lanes(self, image):
""" Isolates the lane lines of the input image.
:param image: The raw image input to the pipeline.
:return: A binary image with the lane lines isolated.
"""
# White Color Mask
lower = np.uint8([200, 200, 200])
upper = np.uint8([255, 255, 255])
white_mask = cv2.inRange(image, lower, upper)
# Yellow Color Mask
lower = np.uint8([80, 150, 0])
upper = np.uint8([255, 255, 255])
yellow_mask = cv2.inRange(image, lower, upper)
# Combine Masks
mask = cv2.bitwise_or(white_mask, yellow_mask)
masked = cv2.bitwise_and(image, image, mask = mask)
_, thresh_img = cv2.threshold(masked, 10, 255, cv2.THRESH_BINARY)
return cv2.cvtColor(thresh_img, cv2.COLOR_BGR2GRAY)
def region_selection(self, image):
"""
Determine and cut the region of interest in the input image.
:param image: The input image from the pipeline.
"""
mask = np.zeros_like(image)
# Defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(image.shape) > 2:
channel_count = image.shape[2]
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
bottom_left = self.roi_points[1]
top_left = self.roi_points[0]
bottom_right = self.roi_points[2]
top_right = self.roi_points[3]
vertices = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)
cv2.fillPoly(mask, vertices, ignore_mask_color)
masked_image = cv2.bitwise_and(image, mask)
return masked_image
def calculate_car_position(self, print_to_terminal=False):
"""
Calculate the position of the car relative to the center
:param: print_to_terminal Display data to console if True
:return: Offset from the center of the lane
"""
# Assume the camera is centered in the image.
# Get position of car in centimeters
car_location = self.width / 2
# Fine the x coordinate of the lane line bottom
height = self.height
bottom_left = self.left_fit[0]*height**2 + self.left_fit[
1]*height + self.left_fit[2]
bottom_right = self.right_fit[0]*height**2 + self.right_fit[
1]*height + self.right_fit[2]
center_lane = (bottom_right - bottom_left)/2 + bottom_left
center_offset = (np.abs(car_location) - np.abs(
center_lane)) * self.XM_PER_PIX * 100
# Display on terminal window and log
if print_to_terminal == True:
print(str(center_offset) + 'cm')
self.center_offset = center_offset
return center_offset
def calculate_curvature(self, print_to_terminal=False):
"""
Calculate the road curvature in meters.
:param: print_to_terminal Display data to console if True
:return: Radii of curvature
"""
# Set the y-value where we want to calculate the road curvature.
# Select the maximum y-value, which is the bottom of the frame.
y_eval = np.max(self.ploty)
# Fit polynomial curves to the real world environment
left_fit_cr = np.polyfit(self.lefty * self.YM_PER_PIX, self.leftx * (
self.XM_PER_PIX), 2)
right_fit_cr = np.polyfit(self.righty * self.YM_PER_PIX, self.rightx * (
self.XM_PER_PIX), 2)
# Calculate the radii of curvature
left_curvem = ((1 + (2*left_fit_cr[0]*y_eval*self.YM_PER_PIX + left_fit_cr[
1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curvem = ((1 + (2*right_fit_cr[
0]*y_eval*self.YM_PER_PIX + right_fit_cr[
1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Display on terminal window and log
if print_to_terminal == True:
print(left_curvem, 'm', right_curvem, 'm')
self.left_curvem = left_curvem
self.right_curvem = right_curvem
return left_curvem, right_curvem
def calculate_histogram(self, frame):
"""
Calculate the image histogram to find peaks in white pixel count
:param frame: The warped image
:param plot: Create a plot if True
"""
# Generate the histogram
self.histogram = np.sum(frame[int(
frame.shape[0]/2):,:], axis=0)
return self.histogram
def display_curvature_offset(self, frame):
"""
Display curvature and offset statistics on the image
:param: plot Display the plot if True
:return: Image with lane lines and curvature
"""
image_copy = frame.copy()
# cv2.putText(image_copy,'Curve Radius: '+str((
# self.left_curvem+self.right_curvem)/2)[:7]+' m', (int((
# 5/600)*self.width), int((
# 20/338)*self.height)), cv2.FONT_HERSHEY_SIMPLEX, (float((
# 0.5/600)*self.width)),(
# 255,255,255),2,cv2.LINE_AA)
# cv2.putText(image_copy,'Center Offset: '+str(
# self.center_offset)[:7]+' cm', (int((
# 5/600)*self.width), int((
# 40/338)*self.height)), cv2.FONT_HERSHEY_SIMPLEX, (float((
# 0.5/600)*self.width)),(
# 255,255,255),2,cv2.LINE_AA)
return image_copy
def get_lane_line_previous_window(self, left_fit, right_fit, plot=False):
"""
Use the lane line from the previous sliding window to get the parameters
for the polynomial line for filling in the lane line
:param: left_fit Polynomial function of the left lane line
:param: right_fit Polynomial function of the right lane line
:param: plot To display an image or not
"""
# margin is a sliding window parameter
margin = self.margin
# Find the x and y coordinates of all the nonzero
# (i.e. white) pixels in the frame.
nonzero = self.warped_frame.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Store left and right lane pixel indices
left_lane_inds = ((nonzerox > (left_fit[0]*(
nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (
nonzerox < (left_fit[0]*(
nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(
nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (
nonzerox < (right_fit[0]*(
nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
self.left_lane_inds = left_lane_inds
self.right_lane_inds = right_lane_inds
# Get the left and right lane line pixel locations
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
self.leftx = leftx
self.rightx = rightx
self.lefty = lefty
self.righty = righty
# Fit a second order polynomial curve to each lane line
try:
left_fit = np.polyfit(lefty, leftx, 2)
except TypeError:
left_fit = np.array([0,0,0])
try:
right_fit = np.polyfit(righty, rightx, 2)
except TypeError:
right_fit = self.fallback_right
self.left_fit = left_fit
self.right_fit = right_fit
# Create the x and y values to plot on the image
ploty = np.linspace(
0, self.warped_frame.shape[0]-1, self.warped_frame.shape[0])
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
self.ploty = ploty
self.left_fitx = left_fitx
self.right_fitx = right_fitx
if plot==True:
# Generate images to draw on
out_img = np.dstack((self.warped_frame, self.warped_frame, (
self.warped_frame)))*255
window_img = np.zeros_like(out_img)
# Add color to the left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [
0, 0, 255]
# Create a polygon to show the search window area, and recast
# the x and y points into a usable format for cv2.fillPoly()
margin = self.margin
left_line_window1 = np.array([np.transpose(np.vstack([
left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([
left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([
right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([
right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
def get_lane_line_indices_sliding_windows(self, frame):
"""
Get the indices of the lane line pixels using the
sliding windows technique.
:param: plot Show plot or not
:return: Best fit lines for the left and right lines of the current lane
"""
# Sliding window width is +/- margin
margin = self.margin
frame_sliding_window = frame.copy()
# Set the height of the sliding windows
window_height = int(self.warped_frame.shape[0]/self.no_of_windows)
# Find the x and y coordinates of all the nonzero
# (i.e. white) pixels in the frame.
nonzero = frame.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Store the pixel indices for the left and right lane lines
left_lane_inds = []
right_lane_inds = []
# Current positions for pixel indices for each window,
# which we will continue to update
leftx_base, rightx_base = self.histogram_peak()
leftx_current = leftx_base
rightx_current = rightx_base
# Go through one window at a time
no_of_windows = self.no_of_windows
for window in range(no_of_windows):
# Identify window boundaries in x and y (and right and left)
win_y_low = self.warped_frame.shape[0] - (window + 1) * window_height
win_y_high = self.warped_frame.shape[0] - window * window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
cv2.rectangle(frame_sliding_window,(win_xleft_low,win_y_low),(
win_xleft_high,win_y_high), (255,255,255), 2)
cv2.rectangle(frame_sliding_window,(win_xright_low,win_y_low),(
win_xright_high,win_y_high), (255,255,255), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (
nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (
nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on mean position
minpix = self.minpix
if len(good_left_inds) > minpix:
leftx_current = int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract the pixel coordinates for the left and right lane lines
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial curve to the pixel coordinates for
# the left and right lane lines
try:
left_fit = np.polyfit(lefty, leftx, 2)
except TypeError:
left_fit = np.array([0,0,0])
try:
right_fit = np.polyfit(righty, rightx, 2)
except TypeError:
right_fit = self.fallback_right
self.left_fit = left_fit
self.right_fit = right_fit
return self.left_fit, self.right_fit
def get_line_markings(self, frame):
"""
Isolates lane lines.
:param frame: The camera frame that contains the lanes we want to detect
:return: Binary (i.e. black and white) image containing the lane lines.
"""
# Convert the video frame from BGR (blue, green, red)
# color space to HLS (hue, saturation, lightness).
hls = cv2.cvtColor(frame, cv2.COLOR_BGR2HLS)
################### Isolate possible lane line edges ######################
# Perform Sobel edge detection on the L (lightness) channel of
# the image to detect sharp discontinuities in the pixel intensities
# along the x and y axis of the video frame.
# sxbinary is a matrix full of 0s (black) and 255 (white) intensity values
# Relatively light pixels get made white. Dark pixels get made black.
_, sxbinary = self.threshold(hls[:, :, 1], thresh=(80, 255))
sxbinary = self.blur_gaussian(sxbinary, ksize=3) # Reduce noise
# 1s will be in the cells with the highest Sobel derivative values
# (i.e. strongest lane line edges)
sxbinary = self.mag_thresh(sxbinary, sobel_kernel=3, thresh=(120, 255))
######################## Isolate possible lane lines ######################
# Perform binary thresholding on the S (saturation) channel
# of the video frame. A high saturation value means the hue color is pure.
# We expect lane lines to be nice, pure colors (i.e. solid white, yellow)
# and have high saturation channel values.
# s_binary is matrix full of 0s (black) and 255 (white) intensity values
# White in the regions with the purest hue colors (e.g. >80...play with
# this value for best results).
s_channel = hls[:, :, 2] # use only the saturation channel data
_, s_binary = self.threshold(s_channel, (75, 255))
# Perform binary thresholding on the R (red) channel of the
# original BGR video frame.
# r_thresh is a matrix full of 0s (black) and 255 (white) intensity values
# White in the regions with the richest red channel values (e.g. >120).
# Remember, pure white is bgr(255, 255, 255).
# Pure yellow is bgr(0, 255, 255). Both have high red channel values.
_, r_thresh = self.threshold(frame[:, :, 2], thresh=(110, 255))
# Lane lines should be pure in color and have high red channel values
# Bitwise AND operation to reduce noise and black-out any pixels that
# don't appear to be nice, pure, solid colors (like white or yellow lane
# lines.)
rs_binary = cv2.bitwise_and(s_binary, r_thresh)
### Combine the possible lane lines with the possible lane line edges #####
# If you show rs_binary visually, you'll see that it is not that different
# from this return value. The edges of lane lines are thin lines of pixels.
self.lane_line_markings = cv2.bitwise_or(rs_binary, sxbinary.astype(
np.uint8))
return self.lane_line_markings
def histogram_peak(self):
"""
Get the left and right peak of the histogram
Return the x coordinate of the left histogram peak and the right histogram
peak.
"""
midpoint = int(self.histogram.shape[0]/2)
leftx_base = np.argmax(self.histogram[:midpoint])
rightx_base = np.argmax(self.histogram[midpoint:]) + midpoint
# (x coordinate of left peak, x coordinate of right peak)
return leftx_base, rightx_base
def perspective_transform(self, frame):
"""
Perform the perspective transform.
:param: frame Current frame
:param: plot Plot the warped image if True
:return: Bird's eye view of the current lane
"""
# Calculate the transformation matrix
self.transformation_matrix = cv2.getPerspectiveTransform(
self.roi_points, self.desired_roi_points)
# Calculate the inverse transformation matrix
self.inv_transformation_matrix = cv2.getPerspectiveTransform(
self.desired_roi_points, self.roi_points)
# Perform the transform using the transformation matrix
self.warped_frame = cv2.warpPerspective(
frame, self.transformation_matrix, (self.width, self.height), flags=(
cv2.INTER_LINEAR))
# Convert image to binary
(thresh, binary_warped) = cv2.threshold(
self.warped_frame, 180, 255, cv2.THRESH_BINARY)
self.warped_frame = binary_warped
return self.warped_frame
def plot_roi(self, frame):
"""
Plot the region of interest on an image.
:param: frame The current image frame
:param: plot Plot the roi image if True
"""
overlay = frame.copy()
# Overlay trapezoid on the frame
roi_image = cv2.polylines(overlay, np.int32([
self.roi_points]), True, (147, 20, 255), 3)
desired_roi_image = cv2.polylines(overlay, np.int32([
self.desired_roi_points]), True, (147, 20, 255), 3)
# Display the image
cv2.imshow('ROI Image', roi_image)
def get_host_lane(self, frame):
return self.host_lane
def detect_lanes(self, frame):
""" Detect Lane Lines in an image. """
try:
isolated1 = self.get_line_markings(frame)
isolated2 = self.isolate_lanes(frame)
self.isolated_lane_lines = cv2.addWeighted(isolated1, 1, isolated2, 1, 0)
self.cropped_lane_lines = self.region_selection(self.isolated_lane_lines)
self.warped_frame = self.perspective_transform(self.cropped_lane_lines)
self.histogram = self.calculate_histogram(self.warped_frame)
self.left_fit, self.right_fit = self.get_lane_line_indices_sliding_windows(self.warped_frame)
self.calculate_car_position(False)
self.get_lane_line_previous_window(self.left_fit, self.right_fit)
except Exception as e:
raise e
def overlay_detections(self, frame):
"""
Overlay lane lines on the original frame
:param: Plot the lane lines if True
:return: Lane with overlay
"""
overlay = frame.copy()
# Generate an image to draw the lane lines on
warp_zero = np.zeros(self.warped_frame.shape).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# print("Left", end="")
# print(self.left_fit)
# print("Right", end="")
# print(self.right_fit)
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([
self.left_fitx, self.ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([
self.right_fitx, self.ploty])))])
pts = np.hstack((pts_left, pts_right))
midpoints = [] # this is a tuple
waypoints = [] # this is just the point
for row in range(len(pts_left[0])):
left_pt = pts_left[0][row][0]
right_pt = pts_right[0][len(pts_left[0]) - row - 1][0]
midpoint = (right_pt + left_pt) / 2
midpoints.append((midpoint, row))
waypoints.append(midpoint)
# Find the values that define the line of best fit
# TODO: This can be improved with something like RANSAC
# Take the points that define the line.
# left_p1_x = int(self.left_fitx[0])
# left_p1_y = 0
# left_p2_x = int(self.left_fitx[-1])
# left_p2_y = 480
# right_p1_x = int(self.right_fitx[0])
# right_p1_y = 0
# right_p2_x = int(self.right_fitx[-1])
# right_p2_y = 480
# # Determine what points are the top and bottom (for midpoint calculation)
# if (left_p1_y > left_p2_y):
# left_top_x = left_p2_x
# left_top_y = left_p2_y
# left_bottom_x = left_p1_x
# left_bottom_y = left_p1_y
# else:
# left_top_x = left_p1_x
# left_top_y = left_p1_y
# left_bottom_x = left_p2_x
# left_bottom_y = left_p2_y
# if (right_p1_y > right_p2_y):
# right_top_x = right_p2_x
# right_top_y = right_p2_y
# right_bottom_x = right_p1_x
# right_bottom_y = right_p1_y
# else:
# right_top_x = right_p1_x
# right_top_y = right_p1_y
# right_bottom_x = right_p2_x
# right_bottom_y = right_p2_y
# midpoint_top_x = int((right_top_x + left_top_x) / 2)
# midpoint_top_y = left_top_y if left_top_y < right_top_y else right_top_y
# midpoint_bottom_x = int(color_warp.shape[1] / 2)
# midpoint_bottom_y = int(color_warp.shape[0])
# Draw lane on the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
cv2.polylines(color_warp, np.int_([pts]), True, (0, 0, 255), 3)
cv2.polylines(color_warp, np.int_([midpoints]), False, (0, 255, 255), 3)
# cv2.line(color_warp, (left_bottom_x, left_bottom_y), (left_top_x, left_top_y), (255, 0, 0), 10)
# cv2.line(color_warp, (right_bottom_x, right_bottom_y), (right_top_x, right_top_y), (255, 0, 0), 10)
# cv2.line(color_warp, (midpoint_bottom_x, midpoint_bottom_y), (midpoint_top_x, midpoint_top_y), (0, 0, 255), 10)
# Warp the blank back to original image space using inverse perspective matrix
newwarp = cv2.warpPerspective(color_warp, self.inv_transformation_matrix, (self.width, self.height))
# TODO: Remap the line points to the camera coordinate
# Combine the result with the original image
result = cv2.addWeighted(overlay, 1, newwarp, 0.6, 0)
# cv2.imshow("Test", color_warp)
# cv2.waitKey(1)
self.lanes_top_view = color_warp
self.lane_pts_top_view = pts
self.lanes_camera_view = result
self.waypoints = np.flip(waypoints)
return result
def print_detections(self):
for line in self.lane_lines:
print(f'Lane: {line.type}\tColor: {line.color}\tCurvature: {line.curvature}')
| [
"numpy.uint8",
"cv2.rectangle",
"numpy.sqrt",
"numpy.polyfit",
"numpy.hstack",
"numpy.int32",
"cv2.imshow",
"numpy.array",
"cv2.warpPerspective",
"cv2.bitwise_or",
"numpy.flip",
"numpy.mean",
"cv2.threshold",
"numpy.max",
"cv2.addWeighted",
"numpy.linspace",
"numpy.vstack",
"numpy.concatenate",
"dataclasses.field",
"cv2.fillPoly",
"numpy.abs",
"cv2.getPerspectiveTransform",
"numpy.argmax",
"cv2.cvtColor",
"numpy.int_",
"cv2.GaussianBlur",
"numpy.dstack",
"numpy.ones_like",
"cv2.inRange",
"numpy.absolute",
"cv2.bitwise_and",
"numpy.zeros",
"numpy.zeros_like",
"numpy.float32",
"cv2.Sobel"
] | [((161, 222), 'numpy.float32', 'np.float32', (['[(550, 450), (160, 720), (1330, 720), (775, 450)]'], {}), '([(550, 450), (160, 720), (1330, 720), (775, 450)])\n', (171, 222), True, 'import numpy as np\n'), ((468, 495), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (473, 495), False, 'from dataclasses import dataclass, field\n'), ((520, 547), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (525, 547), False, 'from dataclasses import dataclass, field\n'), ((591, 622), 'dataclasses.field', 'field', ([], {'default_factory': 'LaneLine'}), '(default_factory=LaneLine)\n', (596, 622), False, 'from dataclasses import dataclass, field\n'), ((642, 673), 'dataclasses.field', 'field', ([], {'default_factory': 'LaneLine'}), '(default_factory=LaneLine)\n', (647, 673), False, 'from dataclasses import dataclass, field\n'), ((1129, 1162), 'numpy.array', 'np.array', (['[0.0001018, -0.795, 40]'], {}), '([0.0001018, -0.795, 40])\n', (1137, 1162), True, 'import numpy as np\n'), ((1186, 1218), 'numpy.array', 'np.array', (['[-0.000122, -0.15, 28]'], {}), '([-0.000122, -0.15, 28])\n', (1194, 1218), True, 'import numpy as np\n'), ((2036, 2155), 'numpy.float32', 'np.float32', (['[[self.padding, 0], [self.padding, height], [width - self.padding, height],\n [width - self.padding, 0]]'], {}), '([[self.padding, 0], [self.padding, height], [width - self.\n padding, height], [width - self.padding, 0]])\n', (2046, 2155), True, 'import numpy as np\n'), ((2697, 2765), 'numpy.float32', 'np.float32', (['[(x[0], y[0]), (x[1], y[1]), (x[2], y[2]), (x[3], y[3])]'], {}), '([(x[0], y[0]), (x[1], y[1]), (x[2], y[2]), (x[3], y[3])])\n', (2707, 2765), True, 'import numpy as np\n'), ((4201, 4245), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['channel', '(ksize, ksize)', '(0)'], {}), '(channel, (ksize, ksize), 0)\n', (4217, 4245), False, 'import cv2\n'), ((5826, 5883), 'cv2.threshold', 'cv2.threshold', (['channel', 'thresh[0]', 'thresh[1]', 'thresh_type'], {}), '(channel, thresh[0], thresh[1], thresh_type)\n', (5839, 5883), False, 'import cv2\n'), ((6903, 6937), 'numpy.sqrt', 'np.sqrt', (['(sobelx ** 2 + sobely ** 2)'], {}), '(sobelx ** 2 + sobely ** 2)\n', (6910, 6937), True, 'import numpy as np\n'), ((7264, 7289), 'numpy.uint8', 'np.uint8', (['[200, 200, 200]'], {}), '([200, 200, 200])\n', (7272, 7289), True, 'import numpy as np\n'), ((7300, 7325), 'numpy.uint8', 'np.uint8', (['[255, 255, 255]'], {}), '([255, 255, 255])\n', (7308, 7325), True, 'import numpy as np\n'), ((7341, 7373), 'cv2.inRange', 'cv2.inRange', (['image', 'lower', 'upper'], {}), '(image, lower, upper)\n', (7352, 7373), False, 'import cv2\n'), ((7407, 7429), 'numpy.uint8', 'np.uint8', (['[80, 150, 0]'], {}), '([80, 150, 0])\n', (7415, 7429), True, 'import numpy as np\n'), ((7441, 7466), 'numpy.uint8', 'np.uint8', (['[255, 255, 255]'], {}), '([255, 255, 255])\n', (7449, 7466), True, 'import numpy as np\n'), ((7483, 7515), 'cv2.inRange', 'cv2.inRange', (['image', 'lower', 'upper'], {}), '(image, lower, upper)\n', (7494, 7515), False, 'import cv2\n'), ((7544, 7583), 'cv2.bitwise_or', 'cv2.bitwise_or', (['white_mask', 'yellow_mask'], {}), '(white_mask, yellow_mask)\n', (7558, 7583), False, 'import cv2\n'), ((7595, 7635), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'image'], {'mask': 'mask'}), '(image, image, mask=mask)\n', (7610, 7635), False, 'import cv2\n'), ((7657, 7706), 'cv2.threshold', 'cv2.threshold', (['masked', '(10)', '(255)', 'cv2.THRESH_BINARY'], {}), '(masked, 10, 255, cv2.THRESH_BINARY)\n', (7670, 7706), False, 'import cv2\n'), ((7717, 7761), 'cv2.cvtColor', 'cv2.cvtColor', (['thresh_img', 'cv2.COLOR_BGR2GRAY'], {}), '(thresh_img, cv2.COLOR_BGR2GRAY)\n', (7729, 7761), False, 'import cv2\n'), ((7935, 7955), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (7948, 7955), True, 'import numpy as np\n'), ((8355, 8431), 'numpy.array', 'np.array', (['[[bottom_left, top_left, top_right, bottom_right]]'], {'dtype': 'np.int32'}), '([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)\n', (8363, 8431), True, 'import numpy as np\n'), ((8434, 8481), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'vertices', 'ignore_mask_color'], {}), '(mask, vertices, ignore_mask_color)\n', (8446, 8481), False, 'import cv2\n'), ((8499, 8527), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'mask'], {}), '(image, mask)\n', (8514, 8527), False, 'import cv2\n'), ((9861, 9879), 'numpy.max', 'np.max', (['self.ploty'], {}), '(self.ploty)\n', (9867, 9879), True, 'import numpy as np\n'), ((9958, 10031), 'numpy.polyfit', 'np.polyfit', (['(self.lefty * self.YM_PER_PIX)', '(self.leftx * self.XM_PER_PIX)', '(2)'], {}), '(self.lefty * self.YM_PER_PIX, self.leftx * self.XM_PER_PIX, 2)\n', (9968, 10031), True, 'import numpy as np\n'), ((10055, 10130), 'numpy.polyfit', 'np.polyfit', (['(self.righty * self.YM_PER_PIX)', '(self.rightx * self.XM_PER_PIX)', '(2)'], {}), '(self.righty * self.YM_PER_PIX, self.rightx * self.XM_PER_PIX, 2)\n', (10065, 10130), True, 'import numpy as np\n'), ((12422, 12442), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (12430, 12442), True, 'import numpy as np\n'), ((12456, 12476), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (12464, 12476), True, 'import numpy as np\n'), ((13750, 13824), 'numpy.linspace', 'np.linspace', (['(0)', '(self.warped_frame.shape[0] - 1)', 'self.warped_frame.shape[0]'], {}), '(0, self.warped_frame.shape[0] - 1, self.warped_frame.shape[0])\n', (13761, 13824), True, 'import numpy as np\n'), ((16124, 16144), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (16132, 16144), True, 'import numpy as np\n'), ((16158, 16178), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (16166, 16178), True, 'import numpy as np\n'), ((18182, 18212), 'numpy.concatenate', 'np.concatenate', (['left_lane_inds'], {}), '(left_lane_inds)\n', (18196, 18212), True, 'import numpy as np\n'), ((18233, 18264), 'numpy.concatenate', 'np.concatenate', (['right_lane_inds'], {}), '(right_lane_inds)\n', (18247, 18264), True, 'import numpy as np\n'), ((19253, 19291), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HLS'], {}), '(frame, cv2.COLOR_BGR2HLS)\n', (19265, 19291), False, 'import cv2\n'), ((21378, 21413), 'cv2.bitwise_and', 'cv2.bitwise_and', (['s_binary', 'r_thresh'], {}), '(s_binary, r_thresh)\n', (21393, 21413), False, 'import cv2\n'), ((22023, 22059), 'numpy.argmax', 'np.argmax', (['self.histogram[:midpoint]'], {}), '(self.histogram[:midpoint])\n', (22032, 22059), True, 'import numpy as np\n'), ((22509, 22578), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['self.roi_points', 'self.desired_roi_points'], {}), '(self.roi_points, self.desired_roi_points)\n', (22536, 22578), False, 'import cv2\n'), ((22679, 22748), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['self.desired_roi_points', 'self.roi_points'], {}), '(self.desired_roi_points, self.roi_points)\n', (22706, 22748), False, 'import cv2\n'), ((22835, 22945), 'cv2.warpPerspective', 'cv2.warpPerspective', (['frame', 'self.transformation_matrix', '(self.width, self.height)'], {'flags': 'cv2.INTER_LINEAR'}), '(frame, self.transformation_matrix, (self.width, self.\n height), flags=cv2.INTER_LINEAR)\n', (22854, 22945), False, 'import cv2\n'), ((23010, 23071), 'cv2.threshold', 'cv2.threshold', (['self.warped_frame', '(180)', '(255)', 'cv2.THRESH_BINARY'], {}), '(self.warped_frame, 180, 255, cv2.THRESH_BINARY)\n', (23023, 23071), False, 'import cv2\n'), ((23627, 23661), 'cv2.imshow', 'cv2.imshow', (['"""ROI Image"""', 'roi_image'], {}), "('ROI Image', roi_image)\n", (23637, 23661), False, 'import cv2\n'), ((24745, 24789), 'numpy.dstack', 'np.dstack', (['(warp_zero, warp_zero, warp_zero)'], {}), '((warp_zero, warp_zero, warp_zero))\n', (24754, 24789), True, 'import numpy as np\n'), ((25183, 25215), 'numpy.hstack', 'np.hstack', (['(pts_left, pts_right)'], {}), '((pts_left, pts_right))\n', (25192, 25215), True, 'import numpy as np\n'), ((27474, 27568), 'cv2.warpPerspective', 'cv2.warpPerspective', (['color_warp', 'self.inv_transformation_matrix', '(self.width, self.height)'], {}), '(color_warp, self.inv_transformation_matrix, (self.width,\n self.height))\n', (27493, 27568), False, 'import cv2\n'), ((27683, 27727), 'cv2.addWeighted', 'cv2.addWeighted', (['overlay', '(1)', 'newwarp', '(0.6)', '(0)'], {}), '(overlay, 1, newwarp, 0.6, 0)\n', (27698, 27727), False, 'import cv2\n'), ((27901, 27919), 'numpy.flip', 'np.flip', (['waypoints'], {}), '(waypoints)\n', (27908, 27919), True, 'import numpy as np\n'), ((3252, 3271), 'numpy.ones_like', 'np.ones_like', (['array'], {}), '(array)\n', (3264, 3271), True, 'import numpy as np\n'), ((3387, 3407), 'numpy.zeros_like', 'np.zeros_like', (['array'], {}), '(array)\n', (3400, 3407), True, 'import numpy as np\n'), ((4958, 5012), 'cv2.Sobel', 'cv2.Sobel', (['img_channel', 'cv2.CV_64F', '(1)', '(0)', 'sobel_kernel'], {}), '(img_channel, cv2.CV_64F, 1, 0, sobel_kernel)\n', (4967, 5012), False, 'import cv2\n'), ((5181, 5235), 'cv2.Sobel', 'cv2.Sobel', (['img_channel', 'cv2.CV_64F', '(0)', '(1)', 'sobel_kernel'], {}), '(img_channel, cv2.CV_64F, 0, 1, sobel_kernel)\n', (5190, 5235), False, 'import cv2\n'), ((10286, 10317), 'numpy.absolute', 'np.absolute', (['(2 * left_fit_cr[0])'], {}), '(2 * left_fit_cr[0])\n', (10297, 10317), True, 'import numpy as np\n'), ((10434, 10466), 'numpy.absolute', 'np.absolute', (['(2 * right_fit_cr[0])'], {}), '(2 * right_fit_cr[0])\n', (10445, 10466), True, 'import numpy as np\n'), ((13429, 13456), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (13439, 13456), True, 'import numpy as np\n'), ((13536, 13565), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (13546, 13565), True, 'import numpy as np\n'), ((14231, 14253), 'numpy.zeros_like', 'np.zeros_like', (['out_img'], {}), '(out_img)\n', (14244, 14253), True, 'import numpy as np\n'), ((14897, 14946), 'numpy.hstack', 'np.hstack', (['(left_line_window1, left_line_window2)'], {}), '((left_line_window1, left_line_window2))\n', (14906, 14946), True, 'import numpy as np\n'), ((15194, 15245), 'numpy.hstack', 'np.hstack', (['(right_line_window1, right_line_window2)'], {}), '((right_line_window1, right_line_window2))\n', (15203, 15245), True, 'import numpy as np\n'), ((15446, 15493), 'cv2.addWeighted', 'cv2.addWeighted', (['out_img', '(1)', 'window_img', '(0.3)', '(0)'], {}), '(out_img, 1, window_img, 0.3, 0)\n', (15461, 15493), False, 'import cv2\n'), ((17009, 17127), 'cv2.rectangle', 'cv2.rectangle', (['frame_sliding_window', '(win_xleft_low, win_y_low)', '(win_xleft_high, win_y_high)', '(255, 255, 255)', '(2)'], {}), '(frame_sliding_window, (win_xleft_low, win_y_low), (\n win_xleft_high, win_y_high), (255, 255, 255), 2)\n', (17022, 17127), False, 'import cv2\n'), ((17125, 17245), 'cv2.rectangle', 'cv2.rectangle', (['frame_sliding_window', '(win_xright_low, win_y_low)', '(win_xright_high, win_y_high)', '(255, 255, 255)', '(2)'], {}), '(frame_sliding_window, (win_xright_low, win_y_low), (\n win_xright_high, win_y_high), (255, 255, 255), 2)\n', (17138, 17245), False, 'import cv2\n'), ((18610, 18637), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (18620, 18637), True, 'import numpy as np\n'), ((18717, 18746), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (18727, 18746), True, 'import numpy as np\n'), ((22076, 22112), 'numpy.argmax', 'np.argmax', (['self.histogram[midpoint:]'], {}), '(self.histogram[midpoint:])\n', (22085, 22112), True, 'import numpy as np\n'), ((23429, 23456), 'numpy.int32', 'np.int32', (['[self.roi_points]'], {}), '([self.roi_points])\n', (23437, 23456), True, 'import numpy as np\n'), ((23536, 23571), 'numpy.int32', 'np.int32', (['[self.desired_roi_points]'], {}), '([self.desired_roi_points])\n', (23544, 23571), True, 'import numpy as np\n'), ((23922, 23968), 'cv2.addWeighted', 'cv2.addWeighted', (['isolated1', '(1)', 'isolated2', '(1)', '(0)'], {}), '(isolated1, 1, isolated2, 1, 0)\n', (23937, 23968), False, 'import cv2\n'), ((26889, 26903), 'numpy.int_', 'np.int_', (['[pts]'], {}), '([pts])\n', (26896, 26903), True, 'import numpy as np\n'), ((26946, 26960), 'numpy.int_', 'np.int_', (['[pts]'], {}), '([pts])\n', (26953, 26960), True, 'import numpy as np\n'), ((27012, 27032), 'numpy.int_', 'np.int_', (['[midpoints]'], {}), '([midpoints])\n', (27019, 27032), True, 'import numpy as np\n'), ((13491, 13510), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (13499, 13510), True, 'import numpy as np\n'), ((14125, 14193), 'numpy.dstack', 'np.dstack', (['(self.warped_frame, self.warped_frame, self.warped_frame)'], {}), '((self.warped_frame, self.warped_frame, self.warped_frame))\n', (14134, 14193), True, 'import numpy as np\n'), ((15329, 15353), 'numpy.int_', 'np.int_', (['[left_line_pts]'], {}), '([left_line_pts])\n', (15336, 15353), True, 'import numpy as np\n'), ((15395, 15420), 'numpy.int_', 'np.int_', (['[right_line_pts]'], {}), '([right_line_pts])\n', (15402, 15420), True, 'import numpy as np\n'), ((18672, 18691), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (18680, 18691), True, 'import numpy as np\n'), ((24679, 24712), 'numpy.zeros', 'np.zeros', (['self.warped_frame.shape'], {}), '(self.warped_frame.shape)\n', (24687, 24712), True, 'import numpy as np\n'), ((9266, 9286), 'numpy.abs', 'np.abs', (['car_location'], {}), '(car_location)\n', (9272, 9286), True, 'import numpy as np\n'), ((9289, 9308), 'numpy.abs', 'np.abs', (['center_lane'], {}), '(center_lane)\n', (9295, 9308), True, 'import numpy as np\n'), ((17972, 18005), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (17979, 18005), True, 'import numpy as np\n'), ((18077, 18111), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (18084, 18111), True, 'import numpy as np\n'), ((25012, 25051), 'numpy.vstack', 'np.vstack', (['[self.left_fitx, self.ploty]'], {}), '([self.left_fitx, self.ploty])\n', (25021, 25051), True, 'import numpy as np\n'), ((14703, 14741), 'numpy.vstack', 'np.vstack', (['[left_fitx - margin, ploty]'], {}), '([left_fitx - margin, ploty])\n', (14712, 14741), True, 'import numpy as np\n'), ((14994, 15033), 'numpy.vstack', 'np.vstack', (['[right_fitx - margin, ploty]'], {}), '([right_fitx - margin, ploty])\n', (15003, 15033), True, 'import numpy as np\n'), ((25116, 25156), 'numpy.vstack', 'np.vstack', (['[self.right_fitx, self.ploty]'], {}), '([self.right_fitx, self.ploty])\n', (25125, 25156), True, 'import numpy as np\n'), ((14818, 14856), 'numpy.vstack', 'np.vstack', (['[left_fitx + margin, ploty]'], {}), '([left_fitx + margin, ploty])\n', (14827, 14856), True, 'import numpy as np\n'), ((15112, 15151), 'numpy.vstack', 'np.vstack', (['[right_fitx + margin, ploty]'], {}), '([right_fitx + margin, ploty])\n', (15121, 15151), True, 'import numpy as np\n')] |
from abc import ABCMeta, abstractmethod
from typing import (
Any,
Callable,
Collection,
Iterable,
Generic,
Tuple,
TypeVar,
)
from .store import Store
T = TypeVar('T')
class MulSetView:
"""Base class for MulSet views."""
__metaclass__ = ABCMeta
__slots__ = ('mulset', )
def __init__(self, mulset: 'MulSet'):
self.mulset = mulset
def __repr__(self):
return '{0.__class__.__name__}({0.mulset!r})'.format(self)
def __len__(self):
return len(self.mulset)
@abstractmethod
def __iter__(self):
raise NotImplementedError
@abstractmethod
def __contains__(self, elem):
raise NotImplementedError
def mulset_factory(
mutable: bool = True,
orderable: bool = False,
unique: bool = False,
):
raise NotImplementedError
def mulset(iterable: Iterable[T] = None, **kwargs):
cls = mulset_factory(**kwargs)
return cls(iterable)
# class MulSet(Generic[T], Collection):
class MulSet(Collection[T]):
"""
"""
def __init__(self, iterable: Iterable[T]):
self.store = Store(iterable)
@abstractmethod
def copy(self) -> 'MulSet[T]':
raise NotImplementedError
class MutableMulSet(MulSet):
pass
| [
"typing.TypeVar"
] | [((163, 175), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (170, 175), False, 'from typing import Any, Callable, Collection, Iterable, Generic, Tuple, TypeVar\n')] |
from mock import patch, Mock
from django import test
from django.core import exceptions
from django_google_maps import fields
class GeoPtFieldTests(test.TestCase):
def test_sets_lat_lon_on_initialization(self):
geo_pt = fields.GeoPt("15.001,32.001")
self.assertEqual(15.001, geo_pt.lat)
self.assertEqual(32.001, geo_pt.lon)
def test_uses_lat_comma_lon_as_unicode_representation(self):
lat_lon_string = "15.001,32.001"
geo_pt = fields.GeoPt(lat_lon_string)
self.assertEqual(lat_lon_string, unicode(geo_pt))
def test_two_GeoPts_with_same_lat_lon_should_be_equal(self):
geo_pt_1 = fields.GeoPt("15.001,32.001")
geo_pt_2 = fields.GeoPt("15.001,32.001")
self.assertEqual(geo_pt_1, geo_pt_2)
def test_two_GeoPts_with_different_lat_should_not_be_equal(self):
geo_pt_1 = fields.GeoPt("15.001,32.001")
geo_pt_2 = fields.GeoPt("20.001,32.001")
self.assertNotEqual(geo_pt_1, geo_pt_2)
def test_two_GeoPts_with_different_lon_should_not_be_equal(self):
geo_pt_1 = fields.GeoPt("15.001,32.001")
geo_pt_2 = fields.GeoPt("15.001,62.001")
self.assertNotEqual(geo_pt_1, geo_pt_2)
def test_is_not_equal_when_comparison_is_not_GeoPt_object(self):
geo_pt_1 = fields.GeoPt("15.001,32.001")
geo_pt_2 = "15.001,32.001"
self.assertNotEqual(geo_pt_1, geo_pt_2)
def test_allows_GeoPt_instantiated_with_empty_string(self):
geo_pt = fields.GeoPt('')
self.assertEqual(None, geo_pt.lat)
self.assertEqual(None, geo_pt.lon)
def test_uses_empty_string_as_unicode_representation_for_empty_GeoPt(self):
geo_pt = fields.GeoPt('')
self.assertEqual('', unicode(geo_pt))
@patch("django_google_maps.fields.GeoPt.__init__", Mock(return_value=None))
def test_splits_geo_point_on_comma(self):
lat, lon = fields.GeoPt(Mock())._split_geo_point("15.001,32.001")
self.assertEqual('15.001', lat)
self.assertEqual('32.001', lon)
@patch("django_google_maps.fields.GeoPt.__init__", Mock(return_value=None))
def test_raises_error_when_attribute_error_on_split(self):
geo_point = Mock()
geo_point.split.side_effect = AttributeError
geo_pt = fields.GeoPt(Mock())
self.assertRaises(exceptions.ValidationError, geo_pt._split_geo_point, geo_point)
@patch("django_google_maps.fields.GeoPt.__init__", Mock(return_value=None))
def test_raises_error_when_type_error_on_split(self):
geo_point = Mock()
geo_point.split.side_effect = ValueError
geo_pt = fields.GeoPt(Mock())
self.assertRaises(exceptions.ValidationError, geo_pt._split_geo_point, geo_point)
@patch("django_google_maps.fields.GeoPt.__init__", Mock(return_value=None))
def test_returns_float_value_when_valid_value(self):
geo_pt = fields.GeoPt(Mock())
val = geo_pt._validate_geo_range('45.005', 90)
self.assertEqual(45.005, val)
self.assertIsInstance(val, float)
@patch("django_google_maps.fields.GeoPt.__init__", Mock(return_value=None))
def test_raises_exception_when_type_error(self):
geo_pt = fields.GeoPt(Mock())
self.assertRaises(exceptions.ValidationError, geo_pt._validate_geo_range, object, 90)
@patch("django_google_maps.fields.GeoPt.__init__", Mock(return_value=None))
def test_raises_exception_when_value_error(self):
geo_pt = fields.GeoPt(Mock())
self.assertRaises(exceptions.ValidationError, geo_pt._validate_geo_range, 'a', 90)
@patch("django_google_maps.fields.GeoPt.__init__", Mock(return_value=None))
def test_raises_exception_when_value_is_out_of_upper_range(self):
geo_pt = fields.GeoPt(Mock())
self.assertRaises(exceptions.ValidationError, geo_pt._validate_geo_range, '90.01', 90)
@patch("django_google_maps.fields.GeoPt.__init__", Mock(return_value=None))
def test_raises_exception_when_value_is_out_of_lower_range(self):
geo_pt = fields.GeoPt(Mock())
self.assertRaises(exceptions.ValidationError, geo_pt._validate_geo_range, '-90.01', 90)
| [
"mock.Mock",
"django_google_maps.fields.GeoPt"
] | [((236, 265), 'django_google_maps.fields.GeoPt', 'fields.GeoPt', (['"""15.001,32.001"""'], {}), "('15.001,32.001')\n", (248, 265), False, 'from django_google_maps import fields\n'), ((480, 508), 'django_google_maps.fields.GeoPt', 'fields.GeoPt', (['lat_lon_string'], {}), '(lat_lon_string)\n', (492, 508), False, 'from django_google_maps import fields\n'), ((652, 681), 'django_google_maps.fields.GeoPt', 'fields.GeoPt', (['"""15.001,32.001"""'], {}), "('15.001,32.001')\n", (664, 681), False, 'from django_google_maps import fields\n'), ((701, 730), 'django_google_maps.fields.GeoPt', 'fields.GeoPt', (['"""15.001,32.001"""'], {}), "('15.001,32.001')\n", (713, 730), False, 'from django_google_maps import fields\n'), ((866, 895), 'django_google_maps.fields.GeoPt', 'fields.GeoPt', (['"""15.001,32.001"""'], {}), "('15.001,32.001')\n", (878, 895), False, 'from django_google_maps import fields\n'), ((915, 944), 'django_google_maps.fields.GeoPt', 'fields.GeoPt', (['"""20.001,32.001"""'], {}), "('20.001,32.001')\n", (927, 944), False, 'from django_google_maps import fields\n'), ((1083, 1112), 'django_google_maps.fields.GeoPt', 'fields.GeoPt', (['"""15.001,32.001"""'], {}), "('15.001,32.001')\n", (1095, 1112), False, 'from django_google_maps import fields\n'), ((1132, 1161), 'django_google_maps.fields.GeoPt', 'fields.GeoPt', (['"""15.001,62.001"""'], {}), "('15.001,62.001')\n", (1144, 1161), False, 'from django_google_maps import fields\n'), ((1299, 1328), 'django_google_maps.fields.GeoPt', 'fields.GeoPt', (['"""15.001,32.001"""'], {}), "('15.001,32.001')\n", (1311, 1328), False, 'from django_google_maps import fields\n'), ((1494, 1510), 'django_google_maps.fields.GeoPt', 'fields.GeoPt', (['""""""'], {}), "('')\n", (1506, 1510), False, 'from django_google_maps import fields\n'), ((1695, 1711), 'django_google_maps.fields.GeoPt', 'fields.GeoPt', (['""""""'], {}), "('')\n", (1707, 1711), False, 'from django_google_maps import fields\n'), ((1814, 1837), 'mock.Mock', 'Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (1818, 1837), False, 'from mock import patch, Mock\n'), ((2203, 2209), 'mock.Mock', 'Mock', ([], {}), '()\n', (2207, 2209), False, 'from mock import patch, Mock\n'), ((2095, 2118), 'mock.Mock', 'Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (2099, 2118), False, 'from mock import patch, Mock\n'), ((2551, 2557), 'mock.Mock', 'Mock', ([], {}), '()\n', (2555, 2557), False, 'from mock import patch, Mock\n'), ((2448, 2471), 'mock.Mock', 'Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (2452, 2471), False, 'from mock import patch, Mock\n'), ((2792, 2815), 'mock.Mock', 'Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (2796, 2815), False, 'from mock import patch, Mock\n'), ((3103, 3126), 'mock.Mock', 'Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (3107, 3126), False, 'from mock import patch, Mock\n'), ((3369, 3392), 'mock.Mock', 'Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (3373, 3392), False, 'from mock import patch, Mock\n'), ((3633, 3656), 'mock.Mock', 'Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (3637, 3656), False, 'from mock import patch, Mock\n'), ((3917, 3940), 'mock.Mock', 'Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (3921, 3940), False, 'from mock import patch, Mock\n'), ((2294, 2300), 'mock.Mock', 'Mock', ([], {}), '()\n', (2298, 2300), False, 'from mock import patch, Mock\n'), ((2638, 2644), 'mock.Mock', 'Mock', ([], {}), '()\n', (2642, 2644), False, 'from mock import patch, Mock\n'), ((2904, 2910), 'mock.Mock', 'Mock', ([], {}), '()\n', (2908, 2910), False, 'from mock import patch, Mock\n'), ((3211, 3217), 'mock.Mock', 'Mock', ([], {}), '()\n', (3215, 3217), False, 'from mock import patch, Mock\n'), ((3478, 3484), 'mock.Mock', 'Mock', ([], {}), '()\n', (3482, 3484), False, 'from mock import patch, Mock\n'), ((3758, 3764), 'mock.Mock', 'Mock', ([], {}), '()\n', (3762, 3764), False, 'from mock import patch, Mock\n'), ((4042, 4048), 'mock.Mock', 'Mock', ([], {}), '()\n', (4046, 4048), False, 'from mock import patch, Mock\n'), ((1917, 1923), 'mock.Mock', 'Mock', ([], {}), '()\n', (1921, 1923), False, 'from mock import patch, Mock\n')] |
# -*- coding: utf-8 -*-
"""
Kay authentication urls.
:Copyright: (c) 2009 Accense Technology, Inc.
<NAME> <<EMAIL>>,
All rights reserved.
:license: BSD, see LICENSE for more details.
"""
from kay.routing import (
ViewGroup, Rule
)
view_groups = [
ViewGroup(
Rule('/login', endpoint='login', view='kay.auth.views.login'),
Rule('/login_box', endpoint='login_box', view='kay.auth.views.login_box'),
Rule('/post_session', endpoint='post_session',
view='kay.auth.views.post_session'),
Rule('/logout', endpoint='logout', view='kay.auth.views.logout'),
Rule('/change_password', endpoint='change_password',
view=('kay.auth.views.ChangePasswordHandler',(), {})),
Rule('/request_reset_password', endpoint='request_reset_password',
view='kay.auth.views.request_reset_password'),
Rule('/reset_password/<session_key>', endpoint='reset_password',
view='kay.auth.views.reset_password'),
)
]
| [
"kay.routing.Rule"
] | [((313, 374), 'kay.routing.Rule', 'Rule', (['"""/login"""'], {'endpoint': '"""login"""', 'view': '"""kay.auth.views.login"""'}), "('/login', endpoint='login', view='kay.auth.views.login')\n", (317, 374), False, 'from kay.routing import ViewGroup, Rule\n'), ((380, 453), 'kay.routing.Rule', 'Rule', (['"""/login_box"""'], {'endpoint': '"""login_box"""', 'view': '"""kay.auth.views.login_box"""'}), "('/login_box', endpoint='login_box', view='kay.auth.views.login_box')\n", (384, 453), False, 'from kay.routing import ViewGroup, Rule\n'), ((459, 546), 'kay.routing.Rule', 'Rule', (['"""/post_session"""'], {'endpoint': '"""post_session"""', 'view': '"""kay.auth.views.post_session"""'}), "('/post_session', endpoint='post_session', view=\n 'kay.auth.views.post_session')\n", (463, 546), False, 'from kay.routing import ViewGroup, Rule\n'), ((556, 620), 'kay.routing.Rule', 'Rule', (['"""/logout"""'], {'endpoint': '"""logout"""', 'view': '"""kay.auth.views.logout"""'}), "('/logout', endpoint='logout', view='kay.auth.views.logout')\n", (560, 620), False, 'from kay.routing import ViewGroup, Rule\n'), ((626, 738), 'kay.routing.Rule', 'Rule', (['"""/change_password"""'], {'endpoint': '"""change_password"""', 'view': "('kay.auth.views.ChangePasswordHandler', (), {})"}), "('/change_password', endpoint='change_password', view=(\n 'kay.auth.views.ChangePasswordHandler', (), {}))\n", (630, 738), False, 'from kay.routing import ViewGroup, Rule\n'), ((747, 864), 'kay.routing.Rule', 'Rule', (['"""/request_reset_password"""'], {'endpoint': '"""request_reset_password"""', 'view': '"""kay.auth.views.request_reset_password"""'}), "('/request_reset_password', endpoint='request_reset_password', view=\n 'kay.auth.views.request_reset_password')\n", (751, 864), False, 'from kay.routing import ViewGroup, Rule\n'), ((874, 981), 'kay.routing.Rule', 'Rule', (['"""/reset_password/<session_key>"""'], {'endpoint': '"""reset_password"""', 'view': '"""kay.auth.views.reset_password"""'}), "('/reset_password/<session_key>', endpoint='reset_password', view=\n 'kay.auth.views.reset_password')\n", (878, 981), False, 'from kay.routing import ViewGroup, Rule\n')] |
#!/usr/bin/env python3
from subprocess import Popen, PIPE
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def strip_comments(text):
lines = text.splitlines()
code = []
for l in lines:
if not l.strip().startswith('//'):
code.append(l)
return "\n".join(code)
def extract_expected_out(text):
beg = "//@"
lines = text.splitlines()
comments = []
for l in lines:
if l.startswith(beg):
comments.append((l[len(beg):]).strip())
return comments;
def check_test(input, exit_code, output, expected_output):
output = output.strip()
if len(output) == 0 or output.isspace():
output = None
if exit_code < 0 or exit_code >= 128:
eprint("\n\n")
eprint(f"(exit_code = {exit_code}) :: Program aborted !!!!! ")
eprint("-------- INPUT:")
eprint(input)
eprint("-------- EXPECTED OUTPUT:")
eprint(expected_output)
eprint("--------")
eprint("\n\n")
exit(1)
elif (exit_code == 0 and expected_output is None):
eprint("\n\n")
eprint(f"(exit_code = {exit_code}) :: Expected this test to fail but successed instead")
eprint("-------- INPUT:")
eprint(input)
eprint("-------- OUTPUT:")
eprint(output)
eprint("-------- EXPECTED OUTPUT:")
eprint(expected_output)
eprint("--------")
eprint("\n\n")
exit(1)
elif (exit_code == 0 and output != expected_output):
eprint("\n\n")
eprint(f"(exit_code = {exit_code}) :: Wrong output obtained from the following test")
eprint("-------- INPUT:")
eprint(input)
eprint("-------- OUTPUT:")
eprint(output)
eprint("-------- EXPECTED OUTPUT:")
eprint(expected_output)
eprint("--------")
eprint("\n\n")
exit(1)
elif (exit_code != 0 and expected_output is not None):
eprint("\n\n")
eprint(f"(exit_code = {exit_code}) :: Exepcted this test to successed but failed instead")
eprint("-------- INPUT:")
eprint(input)
eprint("-------- EXPECTED OUTPUT:")
eprint(expected_output)
eprint("--------")
eprint("\n\n")
exit(1)
def test_chunk(input, expected_output):
if expected_output is not None and (len(expected_output) == 0 or expected_output == "\n"):
expected_output = None
out_filepath = "tests/tmp/code.txt"
out_fh = open(out_filepath, "w")
out_fh.write(input)
out_fh.close()
process = Popen(["run_tree/bin/dpcc", "run", out_filepath], stdout=PIPE)
(output, _) = process.communicate()
exit_code = process.wait()
output = output.decode("utf-8")
output = output.strip()
check_test(input, exit_code, output, expected_output)
import re
def test_file(filepath, is_valid=True):
with open(filepath, "r") as progs:
contents = progs.read()
chunks = re.split(r'^/{8}/*$', contents, flags=re.MULTILINE)
chunks = list(map(lambda c: c.strip(), chunks))
for c in chunks:
input = strip_comments(c).strip()
expected_output = "\n".join(extract_expected_out(c))
if is_valid == False:
expected_output = None
if len(input) > 0 and not input.isspace():
test_chunk(input, expected_output)
test_file('tests/valid.dpl', True)
test_file('tests/invalid.dpl', False)
| [
"subprocess.Popen",
"re.split"
] | [((2591, 2653), 'subprocess.Popen', 'Popen', (["['run_tree/bin/dpcc', 'run', out_filepath]"], {'stdout': 'PIPE'}), "(['run_tree/bin/dpcc', 'run', out_filepath], stdout=PIPE)\n", (2596, 2653), False, 'from subprocess import Popen, PIPE\n'), ((2993, 3043), 're.split', 're.split', (['"""^/{8}/*$"""', 'contents'], {'flags': 're.MULTILINE'}), "('^/{8}/*$', contents, flags=re.MULTILINE)\n", (3001, 3043), False, 'import re\n')] |
from django.conf.urls import url
from django.urls import path, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from mysite import views as core_views
from django.views.generic.base import TemplateView
from django.views.generic import UpdateView
from .models import *
from .forms import *
urlpatterns = [
url(r'^newticket/$', core_views.newticket, name='newticket'),
url(r'^newdepartement/$', core_views.newdepartement, name='newdepartement'),
url(r'^newmateriel/$', core_views.newmateriel, name='newmateriel'),
url(r'^signup/$', core_views.signup, name='signup'),
url(r'^login/$', auth_views.login, name='login'),
url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
url(r'^password/$', core_views.change_password, name='change_password'),
url(r'^email/$', core_views.change_email, name='change_email'),
url(r'^mytickets/$', core_views.my_tickets, name='my_tickets'),
url(r'^fortickets/$', core_views.for_tickets, name='for_tickets'),
url(r'^alltickets/$', core_views.all_tickets, name='all_tickets'),
url(r'^allusers/$', core_views.all_users, name='all_users'),
url(r'^deletedusers/$', core_views.deleted_users, name='deleted_users'),
path('home/', core_views.home, name='home'),
path('resolved/', core_views.resolved, name='resolved'),
path('', core_views.index, name='index'),
path('changeStatus/<int:id_ticket>/<str:redi>', core_views.changeStatus),
path('ticket/edit/<int:id_ticket>', core_views.editTicket),
path('deleteTicket/<int:id_ticket>/<str:redi>', core_views.deleteTicket),
path('deleteUser/<int:idu>/<str:redi>', core_views.deleteUser),
path('user/edit/<int:idu>', core_views.EditUser),
#path('', TemplateView.as_view(template_name='index.html'), name='index'),
]
| [
"django.conf.urls.url",
"django.urls.path"
] | [((354, 413), 'django.conf.urls.url', 'url', (['"""^newticket/$"""', 'core_views.newticket'], {'name': '"""newticket"""'}), "('^newticket/$', core_views.newticket, name='newticket')\n", (357, 413), False, 'from django.conf.urls import url\n'), ((420, 494), 'django.conf.urls.url', 'url', (['"""^newdepartement/$"""', 'core_views.newdepartement'], {'name': '"""newdepartement"""'}), "('^newdepartement/$', core_views.newdepartement, name='newdepartement')\n", (423, 494), False, 'from django.conf.urls import url\n'), ((501, 566), 'django.conf.urls.url', 'url', (['"""^newmateriel/$"""', 'core_views.newmateriel'], {'name': '"""newmateriel"""'}), "('^newmateriel/$', core_views.newmateriel, name='newmateriel')\n", (504, 566), False, 'from django.conf.urls import url\n'), ((573, 623), 'django.conf.urls.url', 'url', (['"""^signup/$"""', 'core_views.signup'], {'name': '"""signup"""'}), "('^signup/$', core_views.signup, name='signup')\n", (576, 623), False, 'from django.conf.urls import url\n'), ((630, 677), 'django.conf.urls.url', 'url', (['"""^login/$"""', 'auth_views.login'], {'name': '"""login"""'}), "('^login/$', auth_views.login, name='login')\n", (633, 677), False, 'from django.conf.urls import url\n'), ((684, 754), 'django.conf.urls.url', 'url', (['"""^logout/$"""', 'auth_views.logout', "{'next_page': '/'}"], {'name': '"""logout"""'}), "('^logout/$', auth_views.logout, {'next_page': '/'}, name='logout')\n", (687, 754), False, 'from django.conf.urls import url\n'), ((761, 831), 'django.conf.urls.url', 'url', (['"""^password/$"""', 'core_views.change_password'], {'name': '"""change_password"""'}), "('^password/$', core_views.change_password, name='change_password')\n", (764, 831), False, 'from django.conf.urls import url\n'), ((838, 899), 'django.conf.urls.url', 'url', (['"""^email/$"""', 'core_views.change_email'], {'name': '"""change_email"""'}), "('^email/$', core_views.change_email, name='change_email')\n", (841, 899), False, 'from django.conf.urls import url\n'), ((906, 967), 'django.conf.urls.url', 'url', (['"""^mytickets/$"""', 'core_views.my_tickets'], {'name': '"""my_tickets"""'}), "('^mytickets/$', core_views.my_tickets, name='my_tickets')\n", (909, 967), False, 'from django.conf.urls import url\n'), ((974, 1038), 'django.conf.urls.url', 'url', (['"""^fortickets/$"""', 'core_views.for_tickets'], {'name': '"""for_tickets"""'}), "('^fortickets/$', core_views.for_tickets, name='for_tickets')\n", (977, 1038), False, 'from django.conf.urls import url\n'), ((1045, 1109), 'django.conf.urls.url', 'url', (['"""^alltickets/$"""', 'core_views.all_tickets'], {'name': '"""all_tickets"""'}), "('^alltickets/$', core_views.all_tickets, name='all_tickets')\n", (1048, 1109), False, 'from django.conf.urls import url\n'), ((1116, 1174), 'django.conf.urls.url', 'url', (['"""^allusers/$"""', 'core_views.all_users'], {'name': '"""all_users"""'}), "('^allusers/$', core_views.all_users, name='all_users')\n", (1119, 1174), False, 'from django.conf.urls import url\n'), ((1181, 1251), 'django.conf.urls.url', 'url', (['"""^deletedusers/$"""', 'core_views.deleted_users'], {'name': '"""deleted_users"""'}), "('^deletedusers/$', core_views.deleted_users, name='deleted_users')\n", (1184, 1251), False, 'from django.conf.urls import url\n'), ((1258, 1301), 'django.urls.path', 'path', (['"""home/"""', 'core_views.home'], {'name': '"""home"""'}), "('home/', core_views.home, name='home')\n", (1262, 1301), False, 'from django.urls import path, include\n'), ((1307, 1362), 'django.urls.path', 'path', (['"""resolved/"""', 'core_views.resolved'], {'name': '"""resolved"""'}), "('resolved/', core_views.resolved, name='resolved')\n", (1311, 1362), False, 'from django.urls import path, include\n'), ((1368, 1408), 'django.urls.path', 'path', (['""""""', 'core_views.index'], {'name': '"""index"""'}), "('', core_views.index, name='index')\n", (1372, 1408), False, 'from django.urls import path, include\n'), ((1414, 1486), 'django.urls.path', 'path', (['"""changeStatus/<int:id_ticket>/<str:redi>"""', 'core_views.changeStatus'], {}), "('changeStatus/<int:id_ticket>/<str:redi>', core_views.changeStatus)\n", (1418, 1486), False, 'from django.urls import path, include\n'), ((1492, 1550), 'django.urls.path', 'path', (['"""ticket/edit/<int:id_ticket>"""', 'core_views.editTicket'], {}), "('ticket/edit/<int:id_ticket>', core_views.editTicket)\n", (1496, 1550), False, 'from django.urls import path, include\n'), ((1556, 1628), 'django.urls.path', 'path', (['"""deleteTicket/<int:id_ticket>/<str:redi>"""', 'core_views.deleteTicket'], {}), "('deleteTicket/<int:id_ticket>/<str:redi>', core_views.deleteTicket)\n", (1560, 1628), False, 'from django.urls import path, include\n'), ((1634, 1696), 'django.urls.path', 'path', (['"""deleteUser/<int:idu>/<str:redi>"""', 'core_views.deleteUser'], {}), "('deleteUser/<int:idu>/<str:redi>', core_views.deleteUser)\n", (1638, 1696), False, 'from django.urls import path, include\n'), ((1702, 1750), 'django.urls.path', 'path', (['"""user/edit/<int:idu>"""', 'core_views.EditUser'], {}), "('user/edit/<int:idu>', core_views.EditUser)\n", (1706, 1750), False, 'from django.urls import path, include\n')] |
# -*- coding: utf-8 -*-
# from sympy import *
import sympy as sym ## 引入Sympy 模块
import os
d1="<NAME>"
print(d1)
d2=1
d3=4
d4=(1,2,3,4,['a','b'],5,'678')
print(d4)
d4[4][0]='cd'
d4[4][1]='ef'
print(d4[0])
print(len(d4))
print(d4[4])
for aa in d4: #遍厉元组中的元素
print (aa)
for i in range(len(d4)):
print('%s---d4[%s]=%s' % (type(d4[i]),i,d4[i]))
# ds=type(d4[i])
# print(ds)
# 利用sympy 解数学方程
x=sym.Symbol('x')
y=sym.Symbol('y')
print (sym.solve([y+x-1,3*x+2*y-5],[x,y]))
diss=(sym.solve([y+x-1,3*x+2*y-5],[x,y]))
# print (limit(1/x**2, x, 0))
print (diss)
print(type (diss))
print(diss[x],diss[y]) #结果在Dict中
# 利用字符串 建立对应关系 替换另一个字符
str1='abcd' # 原始字母
taDB='人民公社' # 对应翻译码
# trantab=str.maketrans(str1,taDB)
st='abbdaaacd'
print(st.translate(str.maketrans(str1,taDB)))
# 字典的用法
student={'小萌':'1001','小智':'1002','小强':'1003'}
print('小强的学号是:%(小强)s' % student)
x={}
y=x
x['val']=12 #
print(x, y)
st=student.copy()
print(st)
st['小强']='4445'
print(student)
print(st)
# 列表中的字典
dirdd=[{'id':1001,'imm':'sttr'},{'idd':'p01','dis':123}]
print (len(dirdd))
print (dirdd[0],dirdd[1])
print(dirdd[0]['id'])
dirdd.append({'dsd':90909})
print(dirdd)
d2=[1,2,3,4,5]
print(d2)
d2[2:]=list('7')
print(d2.count(1))
a1=[1,2,3,4]
b1=['a','b','c','d']
c1=a1.copy()
a1[len(a1):]=b1
print (a1)
c1.extend(b1) ## c1.extend(b1) 和 a1[len(a1):]=b1的结果是一样的,都是用于在列表末尾一次性追加另一个序列中的多个值
print(a1.index('a'))
print(a1.index(1))
xxx=111
if xxx in a1:
print('X属于A1')
else:
print('X不属于A1')
for iii in a1:
print(iii)
print(len(a1))
isis=0
# 使用while 循环 读取List的值
while (isis<len(a1)):
print('a[%s]=%s'%(isis,a1[isis]))
isis=isis+1
'''
多行注释
多行注释
多行注释
'''
dic1={'a':(1,88),'b':2,'c':3,'d':4} #字典 同一Key 赋值多个。
print(dic1['a'][0])
print(dic1['a'][1])
dic2={('a','b'):100}
print(dic2['a','b'])
# 字典中去重复
d={'d':0,'b':0,'c':1,'a':0,'e':1,'f':0,'h':2}
func = lambda z: dict([(x, y) for y, x in z.items()])
print(d)
print(func(d))
print(func(func(d)))
d2=(func(func(d)))
print(d2)
''' Python中list、dict去重
# 1. 清晰明了版(不改变顺序):
ids = [1,2,3,3,4,2,3,4,5,6,1]
news_ids = []
for id in ids:
if id not in news_ids:
news_ids.append(id)
print (news_ids)
# 2. 简介快速版
# 利用set的自动去重功能:
li=[1,2,3,4,5,1,2,3]
li=list(set(li))
print(li)
# 这样处理会改变list原有顺序,若想保持顺序不变,则如下:
li=[1,2,3,4,5,1,2,3]
new_li=list(set(li))
new_li.sort(key=li.index)
print(new_li)
# 3. 匿名函数版
ids = [1,4,3,3,4,2,3,4,5,6,1]
func = lambda x,y:x if y in x else x + [y]
reduce(func, [[], ] + ids)
# 4. 高级模块版
import itertools
ids = [1,4,3,3,4,2,3,4,5,6,1]
ids.sort()
it = itertools.groupby(ids)sql
for k, g in it:
print (k)
# 5. 数量级GB左右文本快速去重
#coding=utf-8
import sys, re, os
def quchong(infile, outfile):
inopen = open(infile, 'r', encoding='utf-8')
outopen = open(outfile, 'w', encoding='utf-8')
data = inopen.read()
list_1 = list(set(data.split('\n')))
print(list_1)
for line in list_1:
if line != '':
outopen.write(line + '\n')
inopen.close()
outopen.close()
# 6. 字典针对Value去重:
# 由于字典要求“键”必须不一致,因此可通过将键值对调换位置进行去重,完成后再换回去即可。
func = lambda z: dict([(x, y) for y, x in z.items()]) # 字典键值对位置互换
result = func(func(tw))
'''
'''
333333333333333333333333333
'''
| [
"sympy.Symbol",
"sympy.solve"
] | [((408, 423), 'sympy.Symbol', 'sym.Symbol', (['"""x"""'], {}), "('x')\n", (418, 423), True, 'import sympy as sym\n'), ((426, 441), 'sympy.Symbol', 'sym.Symbol', (['"""y"""'], {}), "('y')\n", (436, 441), True, 'import sympy as sym\n'), ((491, 540), 'sympy.solve', 'sym.solve', (['[y + x - 1, 3 * x + 2 * y - 5]', '[x, y]'], {}), '([y + x - 1, 3 * x + 2 * y - 5], [x, y])\n', (500, 540), True, 'import sympy as sym\n'), ((449, 498), 'sympy.solve', 'sym.solve', (['[y + x - 1, 3 * x + 2 * y - 5]', '[x, y]'], {}), '([y + x - 1, 3 * x + 2 * y - 5], [x, y])\n', (458, 498), True, 'import sympy as sym\n')] |
# -*- coding: utf-8 -*-
from sqlalch import db
class Restaurant(db.Model):
__tablename__ = 'restaurants_restaurant'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Restaurant ({})>'.format(self.name)
class MenuItem(db.Model):
__tablename__ = 'restaurants_menuitem'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
description = db.Column(db.Text)
# store number of cents
price = db.Column(db.Integer, nullable=False)
course = db.Column(db.String(250))
restaurant_id = db.Column(db.Integer,
db.ForeignKey('restaurants_restaurant.id'))
restaurant = db.relationship(
'Restaurant',
backref=db.backref('menuitems', lazy='dynamic')
)
def __init__(self, name, price, restaurant, course="", description=""):
self.name = name
self.price = price
self.course = course
self.description = description
self.restaurant = restaurant
def __repr__(self):
return '<MenuItem ({})>'.format(self.name)
| [
"sqlalch.db.Column",
"sqlalch.db.ForeignKey",
"sqlalch.db.String",
"sqlalch.db.backref"
] | [((133, 172), 'sqlalch.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (142, 172), False, 'from sqlalch import db\n'), ((441, 480), 'sqlalch.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (450, 480), False, 'from sqlalch import db\n'), ((551, 569), 'sqlalch.db.Column', 'db.Column', (['db.Text'], {}), '(db.Text)\n', (560, 569), False, 'from sqlalch import db\n'), ((610, 647), 'sqlalch.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(False)'}), '(db.Integer, nullable=False)\n', (619, 647), False, 'from sqlalch import db\n'), ((194, 208), 'sqlalch.db.String', 'db.String', (['(250)'], {}), '(250)\n', (203, 208), False, 'from sqlalch import db\n'), ((502, 515), 'sqlalch.db.String', 'db.String', (['(80)'], {}), '(80)\n', (511, 515), False, 'from sqlalch import db\n'), ((671, 685), 'sqlalch.db.String', 'db.String', (['(250)'], {}), '(250)\n', (680, 685), False, 'from sqlalch import db\n'), ((759, 801), 'sqlalch.db.ForeignKey', 'db.ForeignKey', (['"""restaurants_restaurant.id"""'], {}), "('restaurants_restaurant.id')\n", (772, 801), False, 'from sqlalch import db\n'), ((876, 915), 'sqlalch.db.backref', 'db.backref', (['"""menuitems"""'], {'lazy': '"""dynamic"""'}), "('menuitems', lazy='dynamic')\n", (886, 915), False, 'from sqlalch import db\n')] |
#-*- coding: utf-8 -*-
from .utils.information_handler import crawl,parser
from .utils.request_handler import request_handler as request
from .utils.ngelog import logger,merah,kuning,hijau,biru,no_skema
from bs4 import BeautifulSoup
log = logger(__name__)
class extractor(object):
def __init__(self,target,respon):
self.target = target
self.respon = respon
self.p = crawl
self.links = self.link()
def form(self):
bs = BeautifulSoup(self.respon.text,'lxml')
log.log(10,'Searching Html Form !')
html_form = self.p.html_form(bs)
if any(html_form.values()) == True:
log.log(50,'Html Form Discovered')
for a,b in html_form.items():
log.log(10,f'{a}: {hijau(b)}')
else:
log.log(30,'No Html Form Found!?')
def link(self):
all_link = self.p.extract_link(self.respon)
insert_link = []
if all_link != []:
for i in all_link:
if not i.startswith("http"):
insert_link.append(f'{self.target}/{i}')
else:
insert_link.append(i)
return insert_link
else:
return []
def dom(self):
dom = self.p.dom(self.links)
if dom != []:
log.log(20,f'Found {hijau(len(dom))} dom parameter')
for i in dom:
log.log(10,f'{i}')
else:
log.log(30,f'No DOM Paramter Found!?')
def in_dynamic(self):
in_dynamic = self.p.internal_dynamic(self.links,no_skema(self.target))
if in_dynamic != []:
log.log(20,f'{hijau(len(in_dynamic))} Internal Dynamic Parameter Discovered')
for i in in_dynamic:
log.log(50,f'{i}')
else:
log.log(30,f'No internal Dynamic Parameter Found!?')
def ex_dynamic(self):
ex_dynamic = self.p.external_dynamic(self.links,no_skema(self.target))
if ex_dynamic != []:
log.log(20,f'{hijau(len(ex_dynamic))} External Dynamic Parameter Discovered')
for i in ex_dynamic:
log.log(10,f'{i}')
else:
log.log(30,f'No external Dynamic Paramter Found!?')
def in_link(self):
in_link = self.p.internal_link(self.links,no_skema(self.target))
if in_link != []:
log.log(20,f'{hijau(len(in_link))} Internal links Discovered')
for i in in_link:
log.log(50,f'{i}')
else:
log.log(30,f'No Internal Link Found!?')
def ex_link(self):
ex_link = self.p.external_link(self.links,no_skema(self.target))
if ex_link != []:
log.log(20,f'{hijau(len(ex_link))} External links Discovered')
for i in ex_link:
log.log(10,f'{i}')
else:
log.log(30,f'No External Link Found!?')
| [
"bs4.BeautifulSoup"
] | [((495, 534), 'bs4.BeautifulSoup', 'BeautifulSoup', (['self.respon.text', '"""lxml"""'], {}), "(self.respon.text, 'lxml')\n", (508, 534), False, 'from bs4 import BeautifulSoup\n')] |
import pyautogui
import os
def get_monitor_size():
width, height = pyautogui.size()
return (width, height)
def get_percentage_of_monitor_size(
percentage_width, percentage_height):
width, height = pyautogui.size()
return (width * percentage_width, height * percentage_height)
def get_file_extension(file_path:str):
file_name, file_extension = os.path.splitext(file_path)
return file_extension
def open_file(file_path:str):
os.startfile(file_path)
def get_file_name(file_path:str):
return os.path.basename(file_path)
# def on_search_key_up(self, window, keycode):
# if keycode is not None and len(keycode) == 2 and keycode[1] == "tab":
# self.scroll.focus = True | [
"os.startfile",
"os.path.splitext",
"os.path.basename",
"pyautogui.size"
] | [((72, 88), 'pyautogui.size', 'pyautogui.size', ([], {}), '()\n', (86, 88), False, 'import pyautogui\n'), ((216, 232), 'pyautogui.size', 'pyautogui.size', ([], {}), '()\n', (230, 232), False, 'import pyautogui\n'), ((371, 398), 'os.path.splitext', 'os.path.splitext', (['file_path'], {}), '(file_path)\n', (387, 398), False, 'import os\n'), ((460, 483), 'os.startfile', 'os.startfile', (['file_path'], {}), '(file_path)\n', (472, 483), False, 'import os\n'), ((530, 557), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (546, 557), False, 'import os\n')] |
from flask import Flask, request, url_for, render_template, flash, redirect, abort
from jinja2 import evalcontextfilter, Markup, escape
from flask_mail import Mail, Message
from raven.contrib.flask import Sentry, Client
from projects_controller import ProjectsController
from redirects_controller import RedirectsController
import config
import re
import strings
import atexit
app = Flask(__name__)
app.secret_key = config.SECRET_KEY
app.url_map.strict_slashes = False
app.config.update(config.APP_CONFIG)
app.config.update(config.MAIL_SETTINGS)
mail = Mail(app)
app.config.update(config.SENTRY_SETTINGS)
sentry = Sentry(app)
projects_controller = ProjectsController()
redirects_controller = RedirectsController()
def close_db_conn():
projects_controller.close()
atexit.register(close_db_conn)
_paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}')
@app.template_filter()
@evalcontextfilter
def nl2br(eval_ctx, value):
result = u'\n\n'.join(u'<p>%s</p>' % p.replace('\n', '<br>\n') \
for p in _paragraph_re.split(escape(value)))
if eval_ctx.autoescape:
result = Markup(result)
return result
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html', mixpanel_token=mixpanel_token()), 404
@app.route('/')
def index():
current_projects = projects_controller.get_current_projects()
past_projects = projects_controller.get_past_projects()
return render_template('index.html', current_projects=current_projects, past_projects=past_projects, mixpanel_token=mixpanel_token())
@app.route('/start', methods=['GET', 'POST'])
def start_project():
if request.method == 'GET':
return render_template('start.html', form={}, errors={}, mixpanel_token=mixpanel_token())
form = request.form
errors = {}
if not form['name']:
errors['name'] = strings.ERROR_NO_NAME
if not form['email']:
errors['email'] = strings.ERROR_NO_EMAIL_TO_GET_AHOLD
if not form['ptitle']:
errors['ptitle'] = strings.ERROR_NO_PROJ_TITLE
if not form['desc']:
errors['desc'] = strings.ERROR_NO_PROJ_DESC
if not errors:
subject = strings.SUBJ_PROJ_NEW % form.get('ptitle')
msg = Message(subject)
msg.add_recipient(email_address(config.CONTACT_EMAIL))
msg.html = render_template('mail/start.html', form=form)
msg.body = render_template('mail/start.txt', form=form)
mail.send(msg)
flash(strings.SUCCESS_APP_SUBMITTED, 'success')
return redirect(url_for('index'))
flash(strings.ERROR_NOT_SUBMITTED, 'danger')
return render_template('start.html', form=form, errors=errors, mixpanel_token=mixpanel_token())
@app.route('/<dynamic>', methods=['GET', 'POST'])
def dynamic(dynamic):
# First, test if if it's a project
projects = projects_controller.get_all_projects()
if dynamic in projects:
project_data = projects[dynamic]
past_project_url = project_data.get('past_project_url')
if past_project_url:
# The project is over, we should redirect to the post
return redirect(past_project_url)
else:
return render_project(dynamic, project_data)
redirects = redirects_controller.get_redirects()
if dynamic in redirects:
return redirect(redirects[dynamic])
abort(404)
def render_project(project_name, project_data):
if request.method == 'GET':
return render_template('project.html', project_data=project_data, form={}, errors={}, mixpanel_token=mixpanel_token())
form = request.form
errors = {}
if 'join_email' in form:
if not form['join_email']:
errors['join_email'] = strings.ERROR_NO_EMAIL_TO_GET_AHOLD
if not errors:
subject = strings.SUBJ_PROJ_JOIN_REQUESTED % project_data['name']
msg = Message(subject)
msg.add_recipient(email_address(project_data['leaders'][0]['email']))
msg.html = render_template('mail/join_project.html', form=form)
msg.body = render_template('mail/join_project.txt', form=form)
mail.send(msg)
flash_msg = strings.SUCCESS_PROJ_JOINED % project_data['name']
flash(flash_msg, 'success')
return redirect('/' + project_name)
if 'ask_msg' in form:
if not form['ask_msg']:
errors['ask_msg'] = strings.ERROR_DONT_FORGET_MSG
if not form['ask_email']:
errors['ask_email'] = strings.ERROR_NO_EMAIL_TO_ANSWER
if not errors:
subject = strings.SUBJ_PROJ_QUESTION % project_data['name']
msg = Message(subject, reply_to=form.get('ask_email'))
msg.add_recipient(email_address(project_data['leaders'][0]['email']))
msg.html = render_template('mail/project_question.html', form=form)
msg.body = render_template('mail/project_question.txt', form=form)
mail.send(msg)
flash_msg = strings.SUCCESS_MESSAGE_SUBMITTED
flash(flash_msg, 'success')
return redirect('/' + project_name)
flash(strings.ERROR_NOT_SUBMITTED, 'danger')
return render_template('project.html', project_data=project_data, form=form, errors=errors, mixpanel_token=mixpanel_token())
@app.route('/dev_sync')
def dev_save_and_reload_all_data():
save_all_data()
reload_all_data()
return redirect(redirect_url())
@app.route('/dev_reload')
def dev_reload_all_data():
reload_all_data()
return redirect(redirect_url())
def mixpanel_token():
if config.MIXPANEL_SUPPRESS_SEND:
return None
return config.MIXPANEL_TOKEN
def save_all_data():
projects_controller.write_projects()
redirects_controller.load_redirects()
def reload_all_data():
projects_controller.load_projects()
redirects_controller.load_redirects()
def redirect_url():
return request.args.get('next') or request.referrer or url_for('index')
def email_address(email):
if app.debug or app.testing:
return config.DEBUG_EMAIL
return email
if __name__ == '__main__':
app.run()
| [
"flask_mail.Mail",
"redirects_controller.RedirectsController",
"flask.render_template",
"flask.request.args.get",
"flask.flash",
"flask.Flask",
"re.compile",
"jinja2.escape",
"flask_mail.Message",
"flask.url_for",
"flask.redirect",
"raven.contrib.flask.Sentry",
"projects_controller.ProjectsController",
"flask.abort",
"jinja2.Markup",
"atexit.register"
] | [((384, 399), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (389, 399), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((555, 564), 'flask_mail.Mail', 'Mail', (['app'], {}), '(app)\n', (559, 564), False, 'from flask_mail import Mail, Message\n'), ((617, 628), 'raven.contrib.flask.Sentry', 'Sentry', (['app'], {}), '(app)\n', (623, 628), False, 'from raven.contrib.flask import Sentry, Client\n'), ((652, 672), 'projects_controller.ProjectsController', 'ProjectsController', ([], {}), '()\n', (670, 672), False, 'from projects_controller import ProjectsController\n'), ((696, 717), 'redirects_controller.RedirectsController', 'RedirectsController', ([], {}), '()\n', (715, 717), False, 'from redirects_controller import RedirectsController\n'), ((773, 803), 'atexit.register', 'atexit.register', (['close_db_conn'], {}), '(close_db_conn)\n', (788, 803), False, 'import atexit\n'), ((821, 857), 're.compile', 're.compile', (['"""(?:\\\\r\\\\n|\\\\r|\\\\n){2,}"""'], {}), "('(?:\\\\r\\\\n|\\\\r|\\\\n){2,}')\n", (831, 857), False, 'import re\n'), ((2550, 2594), 'flask.flash', 'flash', (['strings.ERROR_NOT_SUBMITTED', '"""danger"""'], {}), "(strings.ERROR_NOT_SUBMITTED, 'danger')\n", (2555, 2594), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((3337, 3347), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (3342, 3347), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((5118, 5162), 'flask.flash', 'flash', (['strings.ERROR_NOT_SUBMITTED', '"""danger"""'], {}), "(strings.ERROR_NOT_SUBMITTED, 'danger')\n", (5123, 5162), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((1093, 1107), 'jinja2.Markup', 'Markup', (['result'], {}), '(result)\n', (1099, 1107), False, 'from jinja2 import evalcontextfilter, Markup, escape\n'), ((2205, 2221), 'flask_mail.Message', 'Message', (['subject'], {}), '(subject)\n', (2212, 2221), False, 'from flask_mail import Mail, Message\n'), ((2304, 2349), 'flask.render_template', 'render_template', (['"""mail/start.html"""'], {'form': 'form'}), "('mail/start.html', form=form)\n", (2319, 2349), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((2369, 2413), 'flask.render_template', 'render_template', (['"""mail/start.txt"""'], {'form': 'form'}), "('mail/start.txt', form=form)\n", (2384, 2413), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((2455, 2502), 'flask.flash', 'flash', (['strings.SUCCESS_APP_SUBMITTED', '"""success"""'], {}), "(strings.SUCCESS_APP_SUBMITTED, 'success')\n", (2460, 2502), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((3303, 3331), 'flask.redirect', 'redirect', (['redirects[dynamic]'], {}), '(redirects[dynamic])\n', (3311, 3331), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((5901, 5925), 'flask.request.args.get', 'request.args.get', (['"""next"""'], {}), "('next')\n", (5917, 5925), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((5949, 5965), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (5956, 5965), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((2527, 2543), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (2534, 2543), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((3109, 3135), 'flask.redirect', 'redirect', (['past_project_url'], {}), '(past_project_url)\n', (3117, 3135), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((3861, 3877), 'flask_mail.Message', 'Message', (['subject'], {}), '(subject)\n', (3868, 3877), False, 'from flask_mail import Mail, Message\n'), ((3983, 4035), 'flask.render_template', 'render_template', (['"""mail/join_project.html"""'], {'form': 'form'}), "('mail/join_project.html', form=form)\n", (3998, 4035), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((4059, 4110), 'flask.render_template', 'render_template', (['"""mail/join_project.txt"""'], {'form': 'form'}), "('mail/join_project.txt', form=form)\n", (4074, 4110), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((4235, 4262), 'flask.flash', 'flash', (['flash_msg', '"""success"""'], {}), "(flash_msg, 'success')\n", (4240, 4262), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((4282, 4310), 'flask.redirect', 'redirect', (["('/' + project_name)"], {}), "('/' + project_name)\n", (4290, 4310), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((4802, 4858), 'flask.render_template', 'render_template', (['"""mail/project_question.html"""'], {'form': 'form'}), "('mail/project_question.html', form=form)\n", (4817, 4858), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((4882, 4937), 'flask.render_template', 'render_template', (['"""mail/project_question.txt"""'], {'form': 'form'}), "('mail/project_question.txt', form=form)\n", (4897, 4937), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((5037, 5064), 'flask.flash', 'flash', (['flash_msg', '"""success"""'], {}), "(flash_msg, 'success')\n", (5042, 5064), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((5084, 5112), 'flask.redirect', 'redirect', (["('/' + project_name)"], {}), "('/' + project_name)\n", (5092, 5112), False, 'from flask import Flask, request, url_for, render_template, flash, redirect, abort\n'), ((1032, 1045), 'jinja2.escape', 'escape', (['value'], {}), '(value)\n', (1038, 1045), False, 'from jinja2 import evalcontextfilter, Markup, escape\n')] |
# coding=utf-8
"""
"""
__author__ = 'Alisue <<EMAIL>>'
import re
import argparse
import txt2contincd
USING_FORMAT_PATTERN = re.compile(r"^(\d+:)*\d+$")
def parse_using(value):
m = USING_FORMAT_PATTERN.match(value)
if m is None:
raise argparse.ArgumentTypeError('Value has to be a colon (:) '
'separated column indexes (e.g. '
'"0:1" or "0:1:2").')
indexes = value.split(":")
return tuple(map(int, indexes))
def parse_args(args=None):
usage = None
description = None
parser = argparse.ArgumentParser(prog='txt2contincd',
usage=usage,
description=description,
version=txt2contincd.__version__)
group1 = parser.add_argument_group('Reading options')
group1.add_argument('-p', '--parser', default=None,
help=('A maidenhair parser name which will be used to '
'parse the raw text data.'))
group1.add_argument('-l', '--loader', default=None,
help=('A maidenhair loader name which will be used to '
'load the raw text data.'))
group1.add_argument('-u', '--using', default=None, type=parse_using,
help=('A colon (:) separated column indexes. '
'It is used for limiting the reading columns.'))
group1.add_argument('-a', '--average', action='store_true', default=None,
help=('Calculate the average value of the specified '
'data.'))
group1.add_argument('-s', '--no-strict', action='store_false', default=None,
dest='strict',
help=('Do not strict the wavelength range into 190-240 '
'.'))
group1.add_argument('-o', '--output', default=None,
help=('A output filename. '
'The default is "contin-cd.in".'))
# Experimental properties
group2 = parser.add_argument_group('Experimental properties')
group2.add_argument('-n', '--number', default=None, type=int,
help=('The number of residues (amino acids) in the '
'sample.'))
group2.add_argument('-m', '--molecular-weight', default=None, type=float,
help=('A molecular weight of the sample '
'in kDa (=kg/mol). '))
group2.add_argument('-c', '--concentration', default=None, type=float,
help=('A concentration of the sample in g/L. '
'See --molar-concentration as an alternative.'))
group2.add_argument('--molar-concentration', default=None, type=float,
help=('A molar concentration of the sample in '
'mol/L. '
'It is used as an alternative option of '
'--concentration.'))
group2.add_argument('-L', '--length', default=None, type=float,
help=('A light pathway length (cuvette length) '
'in centimeter'))
# Required
parser.add_argument('pathname',
help=('An unix grob style filename pattern for the '
'data files'))
args = parser.parse_args(args)
return args
| [
"argparse.ArgumentTypeError",
"argparse.ArgumentParser",
"re.compile"
] | [((125, 153), 're.compile', 're.compile', (['"""^(\\\\d+:)*\\\\d+$"""'], {}), "('^(\\\\d+:)*\\\\d+$')\n", (135, 153), False, 'import re\n'), ((598, 719), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""txt2contincd"""', 'usage': 'usage', 'description': 'description', 'version': 'txt2contincd.__version__'}), "(prog='txt2contincd', usage=usage, description=\n description, version=txt2contincd.__version__)\n", (621, 719), False, 'import argparse\n'), ((252, 369), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Value has to be a colon (:) separated column indexes (e.g. "0:1" or "0:1:2")."""'], {}), '(\n \'Value has to be a colon (:) separated column indexes (e.g. "0:1" or "0:1:2").\'\n )\n', (278, 369), False, 'import argparse\n')] |
import os
import pandas as pd
from tensorflow.keras.models import load_model
from sklearn.preprocessing import StandardScaler
class Predictions(object):
def __init__(self, df=None, ticker=None, period=None):
self.ticker = ticker
self.period = period
self.df = df
self.model = None
self.df_pred = None
self.valid = False
self.all_companies_path = './services/ModelPredictions/Storage/Models/'
def validate(self):
valid_period = type(self.period) == int and self.period in [2, 3, 5, 7, 9]
valid_ticker = type(self.ticker) == str and self.ticker[:-3] in os.listdir(self.all_companies_path)
valid_df = (self.df is not None) and (type(self.df) == pd.DataFrame) and (not self.df.empty)
if valid_ticker and valid_period and valid_df:
self.valid = True
def generate_prediction(self):
# Load model
for m in os.listdir(f'{self.all_companies_path}{self.ticker[:-3]}/'):
if int(m[12:13]) == self.period:
self.model = load_model(f'{self.all_companies_path}{self.ticker[:-3]}/{m}')
# Print summary
# self.model.summary()
# Scale data
scaler = StandardScaler()
self.df_pred = self.df.copy()
# print(self.df_pred.shape)
self.df_pred.drop(['time_stamp'], axis=1, inplace=True)
self.df_pred = self.df_pred.values
self.df_pred = scaler.fit_transform(self.df_pred)
self.df_pred = self.df_pred.reshape(self.df_pred.shape[0], self.df_pred.shape[1], 1)
# Get predictions
predictions = self.model.predict(self.df_pred)
scaler.fit(predictions)
predicted_percent_return = scaler.inverse_transform(predictions)
self.df['prediction'] = predicted_percent_return
return self.df
def get_prediction(self):
self.validate()
if self.valid:
return self.generate_prediction()
else:
raise ValueError("Invalid inputs!")
| [
"sklearn.preprocessing.StandardScaler",
"os.listdir",
"tensorflow.keras.models.load_model"
] | [((933, 992), 'os.listdir', 'os.listdir', (['f"""{self.all_companies_path}{self.ticker[:-3]}/"""'], {}), "(f'{self.all_companies_path}{self.ticker[:-3]}/')\n", (943, 992), False, 'import os\n'), ((1226, 1242), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1240, 1242), False, 'from sklearn.preprocessing import StandardScaler\n'), ((635, 670), 'os.listdir', 'os.listdir', (['self.all_companies_path'], {}), '(self.all_companies_path)\n', (645, 670), False, 'import os\n'), ((1068, 1130), 'tensorflow.keras.models.load_model', 'load_model', (['f"""{self.all_companies_path}{self.ticker[:-3]}/{m}"""'], {}), "(f'{self.all_companies_path}{self.ticker[:-3]}/{m}')\n", (1078, 1130), False, 'from tensorflow.keras.models import load_model\n')] |
import warnings
from typing import List, Optional, Union, Dict
from mkdocs.structure.nav import Navigation as MkDocsNavigation, Section, Link, \
_get_by_type, _add_parent_links, _add_previous_and_next_links
from mkdocs.structure.pages import Page
from .arrange import arrange, InvalidArrangeEntry
from .meta import Meta
from .options import Options
from .utils import dirname, basename, join_paths
NavigationItem = Union[Page, Section, Link]
class ArrangeEntryNotFound(Warning):
def __init__(self, entry: str, context: str):
super().__init__('Arrange entry "{entry}" not found. [{context}]'.format(entry=entry, context=context))
class TitleInRootHasNoEffect(Warning):
def __init__(self, filename: str):
super().__init__(
'Using the "title" attribute in the {filename} file of the doc root has no effect'
.format(filename=filename)
)
class HideInRootHasNoEffect(Warning):
def __init__(self, filename: str):
super().__init__(
'Using the "hide" attribute in the {filename} file of the doc root has no effect'
.format(filename=filename)
)
class AwesomeNavigation:
def __init__(self, navigation: MkDocsNavigation, options: Options):
self.options = options
self.meta = NavigationMeta(navigation.items, options)
if self.meta.root.title is not None:
warnings.warn(TitleInRootHasNoEffect(self.options.filename))
if self.meta.root.hide is not None:
warnings.warn(HideInRootHasNoEffect(self.options.filename))
self.items = self._process_children(
navigation.items,
self.options.collapse_single_pages,
self.meta.root
)
def _process_children(self, children: List[NavigationItem], collapse: bool, meta: Meta) -> List[NavigationItem]:
children = self._arrange_items(children, meta)
result = []
for item in children:
if isinstance(item, Section):
item = self._process_section(item, collapse)
if item is None:
continue
result.append(item)
return result
def _arrange_items(self, items: List[NavigationItem], meta: Meta) -> List[NavigationItem]:
if meta.arrange is not None:
try:
return arrange(items, meta.arrange, lambda item: basename(self._get_item_path(item)))
except InvalidArrangeEntry as e:
warning = ArrangeEntryNotFound(e.value, meta.path)
if self.options.strict:
raise warning
else:
warnings.warn(warning)
return items
def _process_section(self, section: Section, collapse_recursive: bool) -> Optional[NavigationItem]:
meta = self.meta.sections[section]
if meta.hide is True:
return None
if meta.collapse_single_pages is not None:
collapse_recursive = meta.collapse_single_pages
self._set_title(section, meta)
section.children = self._process_children(section.children, collapse_recursive, meta)
if not section.children:
return None
return self._collapse(section, meta.collapse, collapse_recursive)
def _get_item_path(self, item: NavigationItem) -> Optional[str]:
if isinstance(item, Section):
return dirname(self.meta.sections[item].path)
elif isinstance(item, Page):
return item.file.abs_src_path
@staticmethod
def _set_title(section: Section, meta: Meta):
if meta.title is not None:
section.title = meta.title
@staticmethod
def _collapse(section: Section, collapse: Optional[bool], collapse_recursive: bool) -> NavigationItem:
if collapse is None:
collapse = collapse_recursive
if collapse and len(section.children) == 1:
return section.children[0]
return section
def to_mkdocs(self) -> MkDocsNavigation:
pages = _get_by_type(self.items, Page)
_add_previous_and_next_links(pages)
_add_parent_links(self.items)
return MkDocsNavigation(self.items, pages)
class NavigationMeta:
def __init__(self, items: List[NavigationItem], options: Options):
self.options = options
self.sections = {}
root_path = self._gather_metadata(items)
self.root = Meta.try_load_from(join_paths(root_path, self.options.filename))
def _gather_metadata(self, items: List[NavigationItem]) -> Optional[str]:
paths = []
for item in items:
if isinstance(item, Page):
paths.append(item.file.abs_src_path)
elif isinstance(item, Section):
section_dir = self._gather_metadata(item.children)
paths.append(section_dir)
self.sections[item] = Meta.try_load_from(join_paths(section_dir, self.options.filename))
return self._common_dirname(paths)
@staticmethod
def _common_dirname(paths: List[Optional[str]]) -> Optional[str]:
if paths:
dirnames = [dirname(path) for path in paths]
if len(set(dirnames)) == 1:
return dirnames[0]
| [
"mkdocs.structure.nav._add_previous_and_next_links",
"mkdocs.structure.nav.Navigation",
"mkdocs.structure.nav._get_by_type",
"warnings.warn",
"mkdocs.structure.nav._add_parent_links"
] | [((4052, 4082), 'mkdocs.structure.nav._get_by_type', '_get_by_type', (['self.items', 'Page'], {}), '(self.items, Page)\n', (4064, 4082), False, 'from mkdocs.structure.nav import Navigation as MkDocsNavigation, Section, Link, _get_by_type, _add_parent_links, _add_previous_and_next_links\n'), ((4091, 4126), 'mkdocs.structure.nav._add_previous_and_next_links', '_add_previous_and_next_links', (['pages'], {}), '(pages)\n', (4119, 4126), False, 'from mkdocs.structure.nav import Navigation as MkDocsNavigation, Section, Link, _get_by_type, _add_parent_links, _add_previous_and_next_links\n'), ((4135, 4164), 'mkdocs.structure.nav._add_parent_links', '_add_parent_links', (['self.items'], {}), '(self.items)\n', (4152, 4164), False, 'from mkdocs.structure.nav import Navigation as MkDocsNavigation, Section, Link, _get_by_type, _add_parent_links, _add_previous_and_next_links\n'), ((4180, 4215), 'mkdocs.structure.nav.Navigation', 'MkDocsNavigation', (['self.items', 'pages'], {}), '(self.items, pages)\n', (4196, 4215), True, 'from mkdocs.structure.nav import Navigation as MkDocsNavigation, Section, Link, _get_by_type, _add_parent_links, _add_previous_and_next_links\n'), ((2663, 2685), 'warnings.warn', 'warnings.warn', (['warning'], {}), '(warning)\n', (2676, 2685), False, 'import warnings\n')] |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
try:
import pyDOE
except ImportError:
raise ImportError('pyDOE needs to be installed in order to use latin design')
from .base import ModelFreeDesignBase
class LatinDesign(ModelFreeDesignBase):
"""
Latin hypercube experiment design.
"""
def __init__(self, parameter_space):
super(LatinDesign, self).__init__(parameter_space)
def get_samples(self, point_count):
bounds = self.parameter_space.get_bounds()
X_design_aux = pyDOE.lhs(len(bounds), point_count, criterion='center')
ones = np.ones((X_design_aux.shape[0], 1))
lower_bound = np.asarray(bounds)[:, 0].reshape(1, len(bounds))
upper_bound = np.asarray(bounds)[:, 1].reshape(1, len(bounds))
diff = upper_bound - lower_bound
X_design = np.dot(ones, lower_bound) + X_design_aux * np.dot(ones, diff)
samples = self.parameter_space.round(X_design)
return samples
| [
"numpy.dot",
"numpy.asarray",
"numpy.ones"
] | [((679, 714), 'numpy.ones', 'np.ones', (['(X_design_aux.shape[0], 1)'], {}), '((X_design_aux.shape[0], 1))\n', (686, 714), True, 'import numpy as np\n'), ((919, 944), 'numpy.dot', 'np.dot', (['ones', 'lower_bound'], {}), '(ones, lower_bound)\n', (925, 944), True, 'import numpy as np\n'), ((962, 980), 'numpy.dot', 'np.dot', (['ones', 'diff'], {}), '(ones, diff)\n', (968, 980), True, 'import numpy as np\n'), ((738, 756), 'numpy.asarray', 'np.asarray', (['bounds'], {}), '(bounds)\n', (748, 756), True, 'import numpy as np\n'), ((809, 827), 'numpy.asarray', 'np.asarray', (['bounds'], {}), '(bounds)\n', (819, 827), True, 'import numpy as np\n')] |
from abc import ABC, abstractmethod, abstractstaticmethod
from pydantic import BaseModel
from typing import Any, Callable, Dict, List
import inspect
import asyncio
def abcmodel(self):
...
class Result(list):
...
# 适配器抽象基类
class Adapter(ABC):
@abstractmethod
def __eventspace__(self) -> Dict[str, Any]:
'''事件空间'''
...
@abstractmethod
def match(self) -> bool:
'''触发器匹配'''
...
@abstractmethod
def __call__(self) -> Any:
'''调用方式'''
...
@abstractstaticmethod
def funcevents(*args, **kwargs) -> List[Callable]:
'''执行器'''
...
@abstractmethod
def coupler(self) -> Dict:
'''耦合器'''
...
@abstractmethod
def callback(self) -> Any:
'''回调方式'''
...
_dependent: Dict[type, Any] = {}
_coupler: Dict[str, Any] = {}
_funcevents: List[Callable] = []
# 事件函数基类
class AdapterEvent(Adapter, BaseModel):
def match(self) -> bool:
return True
def coupler(self) -> dict:
return self._coupler
def __eventspace__(self) -> Dict[str, Any]:
space = {}
frame = inspect.currentframe()
space.update(frame.f_back.f_back.f_locals)
self._dependent.update(
{type(j): j for _, j in space.items()}
)
self._dependent.update(
{type(j): j for _, j in self.dict().items()}
)
self._dependent.update(
{type(j): j for _, j in self._callmethod(
self.coupler
)
.items()
}
)
return space
def __call__(self, *args, **kwargs) -> Any:
self.__eventspace__()
if not self._callmethod(self.match):
return None
result = Result()
for func in self.funcevents():
result.append(self._callmethod(func))
self._dependent.update({type(result):result})
return self._callmethod(self.callback)
def callback(self, result : Result) -> Any:
pass
def _injection(self, func: Callable) -> Dict[type, Any]:
'''依赖注入'''
f = inspect.signature(func)
return {j.name:self._dependent[j.annotation] for _,j in f.parameters.items()}
def _callmethod(self, func: Callable) -> Any:
'''注释类型方法调用'''
return func(**self._injection(func))
class Config:
arbitrary_types_allowed = True
# 异步事件函数基类
class AsyncAdapterEvent(Adapter, BaseModel):
async def match(self) -> bool:
return True
async def coupler(self) -> dict:
return self._coupler
async def __eventspace__(self) -> Dict[str, Any]:
space = {}
frame = inspect.currentframe()
space.update(frame.f_back.f_back.f_locals)
self._dependent.update(
{type(j):j for _,j in space.items()}
)
self._dependent.update(
{type(j):j for _,j in self.dict().items()}
)
T_coupler = await self._callmethod(self.coupler)
self._dependent.update(
{type(j):j for _,j in T_coupler.items()}
)
return space
async def __call__(self) -> Any:
print(self.__dict__)
await self.__eventspace__()
if not await self._callmethod(self.match):
return None
result = Result()
for func in await self.funcevents():
result.append(await self._callmethod(func))
self._dependent.update({type(result):result})
return await self._callmethod(self.callback)
async def callback(self, result: Result) -> Any:
pass
async def _injection(self, func: Callable) -> Dict[type, Any]:
f = inspect.signature(func)
return {j.name:self._dependent[j.annotation] for _,j in f.parameters.items()}
async def _callmethod(self, func: Callable) -> Any:
return await func(**await self._injection(func))
class Config:
arbitrary_types_allowed = True
# frame 穿透依赖注入
class FramePenetration:
def __enter__(self) -> "FramePenetration":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __init__(self, *args, introduce: Dict[str, Any]):
space, introduce, self._dependent = {}, {}, {}
frame = inspect.currentframe()
space.update(frame.f_back.f_locals)
self._dependent.update(
{type(j):j for _,j in space.items()}
)
self._dependent.update(
{type(i):i for i in args}
)
self._dependent.update(
{type(j):j for _,j in introduce.items()}
)
def __call__(self, *funcs: Callable) -> Any:
return [self._callmethod(func) for func in funcs]
def _injection(self, func: Callable) -> Dict[type, Any]:
'''依赖注入'''
f = inspect.signature(func)
return {j.name:self._dependent[j.annotation] for _,j in f.parameters.items()}
def _callmethod(self, func: Callable) -> Any:
'''注释类型方法调用'''
return func(**self._injection(func))
class AsyncFramePenetration:
async def __aenter__(self) -> "AsyncFramePenetration":
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
def __init__(self, *args, introduce: Dict[str, Any]):
space, introduce, self._dependent = {}, {}, {}
frame = inspect.currentframe()
space.update(frame.f_back.f_locals)
self._dependent.update(
{type(j):j for _,j in space.items()}
)
self._dependent.update(
{type(i):i for i in args}
)
self._dependent.update(
{type(j):j for _,j in introduce.items()}
)
async def __call__(self, *funcs: Callable, concurrent: bool = False) -> Any:
if concurrent:
return [await task for task in [asyncio.create_task(self._callmethod(func)) for func in funcs]]
return [await self._callmethod(func) for func in funcs]
async def _injection(self, func: Callable) -> Dict[type, Any]:
'''依赖注入'''
f = inspect.signature(func)
return {j.name:self._dependent[j.annotation] for _,j in f.parameters.items()}
async def _callmethod(self, func: Callable) -> Any:
'''注释类型方法调用'''
return await func(**await self._injection(func))
| [
"inspect.currentframe",
"inspect.signature"
] | [((1159, 1181), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (1179, 1181), False, 'import inspect\n'), ((2151, 2174), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (2168, 2174), False, 'import inspect\n'), ((2716, 2738), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (2736, 2738), False, 'import inspect\n'), ((3723, 3746), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (3740, 3746), False, 'import inspect\n'), ((4316, 4338), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (4336, 4338), False, 'import inspect\n'), ((4862, 4885), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (4879, 4885), False, 'import inspect\n'), ((5403, 5425), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (5423, 5425), False, 'import inspect\n'), ((6124, 6147), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (6141, 6147), False, 'import inspect\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# <NAME>, Royal HaskoningDHV
import rasterio
import logging
import os
log = logging.getLogger(os.path.basename(__file__))
def read_raster(rasterfile, masked=True, band=1):
log.debug('reading {f.name:}'.format(f=rasterfile))
with rasterio.open(rasterfile) as src:
return src.read(band, masked=masked)
def write_raster(rasterfile, values, profile):
log.debug('writing {f.name:}'.format(f=rasterfile))
with rasterio.open(rasterfile, 'w', **profile) as dst:
return dst.write(values, 1) | [
"rasterio.open",
"os.path.basename"
] | [((145, 171), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (161, 171), False, 'import os\n'), ((291, 316), 'rasterio.open', 'rasterio.open', (['rasterfile'], {}), '(rasterfile)\n', (304, 316), False, 'import rasterio\n'), ((484, 525), 'rasterio.open', 'rasterio.open', (['rasterfile', '"""w"""'], {}), "(rasterfile, 'w', **profile)\n", (497, 525), False, 'import rasterio\n')] |
# 这次算法的目的是,借鉴自编码器的原理。进行同类而不同数据的还原。
# the purpose of this algorithm is, reconstruct the different sample in same class. Based on the autodecoder's principle.
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets,transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
import random
from PIL import Image
class Conv_DeConv(nn.Module):
def __init__(self):
super(Conv_DeConv,self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5)
self.Re = nn.ReLU()
self.pool = nn.MaxPool2d(2,stride=2, return_indices=True)
self.unpool = nn.MaxUnpool2d(2, stride=2)
self.common_1 = nn.Linear(512,8)
self.fc1 = nn.Linear(8,4)
self.fc2 = nn.Linear(8,4)
self.fc3 = nn.Linear(8,4)
self.common_2 = nn.Linear(8, 512)
self.deconv1 = nn.ConvTranspose2d(in_channels=32, out_channels=16, kernel_size=5)
self.deconv2 = nn.ConvTranspose2d(in_channels=16, out_channels=1, kernel_size=5)
def forward(self, input_ ,flag=0):
input_, indices_1 = self.pool(self.Re(self.conv1(input_)))
input_, indices_2 = self.pool(self.Re(self.conv2(input_)))
tmp = input_.size()
input_ = input_.view(-1)
input_ = self.common_1(input_)
if flag==0: # 用于同步输出,传统的autodecoder
input_1 = self.fc1(input_)
input_2 = self.fc2(input_)
if flag==1: # 用于异步输出,这次尝试的东西
input_1 = self.fc2(input_)
input_2 = self.fc3(input_)
if flag==2: # 用于表征,可以用聚类算法来进行计算
output = self.fc2(input_)
return output
input_ = torch.cat([input_1, input_2])
input_ = self.common_2(input_).view(tmp)
input_ = self.unpool(input_, indices_2)
input_ = self.deconv1(input_)
input_ = self.unpool(input_, indices_1)
output = self.deconv2(input_)
return output
data_path = "CNN dataset"
save_dir = "CNN saved"
use_gpu = True
iteration = 1000
epochs = 10
train_set = datasets.MNIST(root=data_path, train=True, download=True, transform=transforms.ToTensor())
test_set = datasets.MNIST(root=data_path, train=False, download=False, transform=transforms.ToTensor())
train_loader = DataLoader(dataset=train_set,batch_size=10000)
test_loader = DataLoader(dataset=test_set,batch_size=500)
def show(input_, output_, name1=1,name2=1): # 用于展示所生成的图片
input = input_.view(28,28)
output = output_.view(28,28)
input_img = transforms.ToPILImage(input)
output_img = transforms.ToPILImage(output)
input_img.show()
input_img.save("raw_epoch_{}_iter_{}.png".format(name1,name2))
output_img.show()
output_img.save("output_epoch_{}_iter_{}.png".format(name1,name2))
def wash_data1(data,label): #将当前batch内的所有同label的数据,从0倒9分开并且分别装进一个字典里
Origin={}
for i in range(10):
location = np.where(label==i)
out = data[location]
Origin["data_{}".format(i)]=out
return Origin
def wash_data3(Current):
tongbu = []
yibu = []
for i in range(len(Current)):
tmp = len(Current)
tongbu.append([Current[i],Current[i]])
for j in range(i+1,len(Current)):
yibu.append([Current[i],Current[j]])
yibu.append([Current[j],Current[i]])
return {"tongbu":tongbu,"yibu":yibu}
def wash_data2(Origin): #将处理好的数据分别装进一个数组的十个元素里,每个元素又是一个数组,一个数组装着两个数组,一个是同步数据,一个是异步数据。
shuzu = []
for i in range(10):
Current = Origin["data_{}".format(i)]
result = wash_data3(Current)
shuzu.append(result)
return shuzu
def cuorong(input_,iteration):
if len(input_)>=iteration:
tmp = input_[:iteration]
return random.shuffle(tmp)
else:
cishu = int(iteration / len(input_))
yushu = iteration % len(input_)
tmp = []
for i in range(cishu):
tmp+=input_
tmp+=input_[:yushu]
return random.shuffle(tmp)
model = Conv_DeConv()
optimizer = optim.SGD(model.parameters(),lr=1e-3,momentum=0.3)
loss = nn.MSELoss()
for data_batch,data_label in train_loader:
Origin=wash_data1(data_batch,data_label)
shuzu = wash_data2(Origin)
tongbu = []
yibu = []
# tongbu_test = []
for i in range(10):
for j in range(len(shuzu[i]["tongbu"])):
tongbu.append(shuzu[i]["tongbu"][j])
for k in range(len(shuzu[i]["yibu"])):
yibu.append(shuzu[i]["yibu"][k])
# for l in range(int(0.9*len(shuzu[i]["tongbu"])),len(shuzu[i]["tongbu"])):
# tongbu_test[i].append(shuzu[i]["tongbu"][l])
tongbu_Loss=[]
yibu_Loss=[]
for epoch in range(epochs):
for i in range(iteration):
test_1 = tongbu[i][0].view(1,1,28,28)
test_1_result = tongbu[i][0].view(1,1,28,28)
out_1 = model(input_ = test_1, flag = 0)
output_1 = loss(out_1, test_1)
tongbu_Loss.append(output_1)
optimizer.zero_grad()
output_1.backward()
optimizer.step()
test_2 = yibu[i][0].view(1,1,28,28)
test_2_result = yibu[i][1].view(1,1,28,28)
out_2 = model(input_ = test_2, flag = 1)
output_2 = loss(out_2, test_2)
yibu_Loss.append(output_2)
optimizer.zero_grad()
output_2.backward()
optimizer.step()
if i%10==0:
print("epoch {}, iteration {}".format(epoch,i),"tongbu loss is {}".format(output_1)," yibu loss is {}".format(output_2))
if i+1 % int(0.2*iteration) == 0:
torch.save(model,"epoch_{}_iteration_{}.pkl".format(epoch,i))
plt.plot(tongbu_Loss)
plt.title("Loss on train set")
plt.xlabel("every 10 iterations")
plt.ylabel("Loss Value")
plt.show()
plt.plot(yibu_Loss)
plt.title("Loss on train set")
plt.xlabel("every 10 iterations")
plt.ylabel("Loss Value")
plt.show()
break
yibu_test = []
for test_batch,test_label in test_loader:
Origin_test = wash_data1(test_batch,test_label)
shuzu_test = wash_data2(Origin_test)
| [
"torch.nn.ReLU",
"torch.nn.MaxUnpool2d",
"torchvision.transforms.ToPILImage",
"random.shuffle",
"matplotlib.pyplot.ylabel",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"torch.nn.Conv2d",
"torch.nn.MSELoss",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.title",
"torchvision.transforms.ToTensor",
"torch.nn.ConvTranspose2d",
"torch.cat",
"matplotlib.pyplot.show"
] | [((2409, 2456), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_set', 'batch_size': '(10000)'}), '(dataset=train_set, batch_size=10000)\n', (2419, 2456), False, 'from torch.utils.data import DataLoader\n'), ((2470, 2514), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_set', 'batch_size': '(500)'}), '(dataset=test_set, batch_size=500)\n', (2480, 2514), False, 'from torch.utils.data import DataLoader\n'), ((4197, 4209), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4207, 4209), False, 'from torch import nn\n'), ((2652, 2680), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', (['input'], {}), '(input)\n', (2673, 2680), False, 'from torchvision import datasets, transforms\n'), ((2698, 2727), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', (['output'], {}), '(output)\n', (2719, 2727), False, 'from torchvision import datasets, transforms\n'), ((5810, 5831), 'matplotlib.pyplot.plot', 'plt.plot', (['tongbu_Loss'], {}), '(tongbu_Loss)\n', (5818, 5831), True, 'import matplotlib.pyplot as plt\n'), ((5836, 5866), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss on train set"""'], {}), "('Loss on train set')\n", (5845, 5866), True, 'import matplotlib.pyplot as plt\n'), ((5871, 5904), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""every 10 iterations"""'], {}), "('every 10 iterations')\n", (5881, 5904), True, 'import matplotlib.pyplot as plt\n'), ((5909, 5933), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss Value"""'], {}), "('Loss Value')\n", (5919, 5933), True, 'import matplotlib.pyplot as plt\n'), ((5938, 5948), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5946, 5948), True, 'import matplotlib.pyplot as plt\n'), ((5954, 5973), 'matplotlib.pyplot.plot', 'plt.plot', (['yibu_Loss'], {}), '(yibu_Loss)\n', (5962, 5973), True, 'import matplotlib.pyplot as plt\n'), ((5978, 6008), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss on train set"""'], {}), "('Loss on train set')\n", (5987, 6008), True, 'import matplotlib.pyplot as plt\n'), ((6013, 6046), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""every 10 iterations"""'], {}), "('every 10 iterations')\n", (6023, 6046), True, 'import matplotlib.pyplot as plt\n'), ((6051, 6075), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss Value"""'], {}), "('Loss Value')\n", (6061, 6075), True, 'import matplotlib.pyplot as plt\n'), ((6080, 6090), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6088, 6090), True, 'import matplotlib.pyplot as plt\n'), ((542, 598), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(16)', 'kernel_size': '(5)'}), '(in_channels=1, out_channels=16, kernel_size=5)\n', (551, 598), False, 'from torch import nn\n'), ((620, 677), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(16)', 'out_channels': '(32)', 'kernel_size': '(5)'}), '(in_channels=16, out_channels=32, kernel_size=5)\n', (629, 677), False, 'from torch import nn\n'), ((696, 705), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (703, 705), False, 'from torch import nn\n'), ((726, 772), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {'stride': '(2)', 'return_indices': '(True)'}), '(2, stride=2, return_indices=True)\n', (738, 772), False, 'from torch import nn\n'), ((794, 821), 'torch.nn.MaxUnpool2d', 'nn.MaxUnpool2d', (['(2)'], {'stride': '(2)'}), '(2, stride=2)\n', (808, 821), False, 'from torch import nn\n'), ((846, 863), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(8)'], {}), '(512, 8)\n', (855, 863), False, 'from torch import nn\n'), ((882, 897), 'torch.nn.Linear', 'nn.Linear', (['(8)', '(4)'], {}), '(8, 4)\n', (891, 897), False, 'from torch import nn\n'), ((916, 931), 'torch.nn.Linear', 'nn.Linear', (['(8)', '(4)'], {}), '(8, 4)\n', (925, 931), False, 'from torch import nn\n'), ((950, 965), 'torch.nn.Linear', 'nn.Linear', (['(8)', '(4)'], {}), '(8, 4)\n', (959, 965), False, 'from torch import nn\n'), ((989, 1006), 'torch.nn.Linear', 'nn.Linear', (['(8)', '(512)'], {}), '(8, 512)\n', (998, 1006), False, 'from torch import nn\n'), ((1030, 1096), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(32)', 'out_channels': '(16)', 'kernel_size': '(5)'}), '(in_channels=32, out_channels=16, kernel_size=5)\n', (1048, 1096), False, 'from torch import nn\n'), ((1120, 1185), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(16)', 'out_channels': '(1)', 'kernel_size': '(5)'}), '(in_channels=16, out_channels=1, kernel_size=5)\n', (1138, 1185), False, 'from torch import nn\n'), ((1818, 1847), 'torch.cat', 'torch.cat', (['[input_1, input_2]'], {}), '([input_1, input_2])\n', (1827, 1847), False, 'import torch\n'), ((2266, 2287), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2285, 2287), False, 'from torchvision import datasets, transforms\n'), ((2370, 2391), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2389, 2391), False, 'from torchvision import datasets, transforms\n'), ((3037, 3057), 'numpy.where', 'np.where', (['(label == i)'], {}), '(label == i)\n', (3045, 3057), True, 'import numpy as np\n'), ((3854, 3873), 'random.shuffle', 'random.shuffle', (['tmp'], {}), '(tmp)\n', (3868, 3873), False, 'import random\n'), ((4084, 4103), 'random.shuffle', 'random.shuffle', (['tmp'], {}), '(tmp)\n', (4098, 4103), False, 'import random\n')] |
import os
import numpy as np
from onnx import onnx_ml_pb2 as xpb2
from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input
def test_empty_graph():
g = empty_graph()
assert type(g) is xpb2.GraphProto, "Failed to create empty graph."
def test_graph_from_file():
g = graph_from_file("files/non-existing-file.onnx")
assert not g, "Graph from file failed to check emtpy file."
g = graph_from_file("files/example01.onnx")
assert type(g) is xpb2.GraphProto, "Graph from file failed to open file."
def test_graph_to_file():
g = empty_graph()
check1 = graph_to_file(g, "")
assert not check1, "Graph to file failed should have failed."
check2 = graph_to_file(g, "files/test_graph_to_file.onnx")
assert check2, "Graph to file failed to write file."
os.remove("files/test_graph_to_file.onnx")
def test_run():
g = graph_from_file("files/add.onnx")
example = {"x1": np.array([2]).astype(np.float32), "x2": np.array([5]).astype(np.float32)}
result = run(g,
inputs=example,
outputs=["sum"]
)
assert result[0] == 7, "Add output not correct."
result = run(g, inputs="", outputs="sum")
assert not result, "Model with this input should not run."
def test_display():
from onnx import TensorProto
print(TensorProto.DOUBLE)
return True # No test for display
def test_scblbl_input():
example = {"in": np.array([1,2,3,4]).astype(np.int32)}
result = sclbl_input(example, _verbose=False)
assert result == '{"input": "CAQQBkoQAQAAAAIAAAADAAAABAAAAA==", "type":"pb"}', "PB output not correct."
example = {"x1": np.array([1,2,3,4]).astype(np.int32), "x2": np.array([1,2,3,4]).astype(np.int32)}
result = sclbl_input(example, _verbose=False)
assert result == '{"input": ["CAQQBkoQAQAAAAIAAAADAAAABAAAAA==","CAQQBkoQAQAAAAIAAAADAAAABAAAAA=="], "type":"pb"}',\
"PB output 2 not correct. "
example = {"in": np.array([1,2,3,4]).astype(np.int32)}
result = sclbl_input(example, "raw", _verbose=False)
assert result == '{"input": "AQAAAAIAAAADAAAABAAAAA==", "type":"raw"}', "Raw output not correct."
example = {"x1": np.array([1,2,3,4]).astype(np.int32), "x2": np.array([1,2,3,4]).astype(np.int32)}
result = sclbl_input(example, "raw", _verbose=False)
assert result == '{"input": ["AQAAAAIAAAADAAAABAAAAA==","AQAAAAIAAAADAAAABAAAAA=="], "type":"raw"}',\
"Raw output 2 not correct. "
example = {"x1": np.array([1.2]).astype(np.float32), "x2": np.array([2.5]).astype(np.float32)}
input = sclbl_input(example, _verbose=False)
print(input)
def test_list_data_types():
test = list_data_types()
assert test, "Data types should be listed."
def test_list_operators():
test = list_operators()
assert test, "Operators should be listed." | [
"sclblonnx.list_operators",
"sclblonnx.graph_from_file",
"sclblonnx.sclbl_input",
"sclblonnx.run",
"sclblonnx.graph_to_file",
"numpy.array",
"sclblonnx.empty_graph",
"sclblonnx.list_data_types",
"os.remove"
] | [((217, 230), 'sclblonnx.empty_graph', 'empty_graph', ([], {}), '()\n', (228, 230), False, 'from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input\n'), ((340, 387), 'sclblonnx.graph_from_file', 'graph_from_file', (['"""files/non-existing-file.onnx"""'], {}), "('files/non-existing-file.onnx')\n", (355, 387), False, 'from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input\n'), ((460, 499), 'sclblonnx.graph_from_file', 'graph_from_file', (['"""files/example01.onnx"""'], {}), "('files/example01.onnx')\n", (475, 499), False, 'from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input\n'), ((614, 627), 'sclblonnx.empty_graph', 'empty_graph', ([], {}), '()\n', (625, 627), False, 'from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input\n'), ((641, 661), 'sclblonnx.graph_to_file', 'graph_to_file', (['g', '""""""'], {}), "(g, '')\n", (654, 661), False, 'from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input\n'), ((741, 790), 'sclblonnx.graph_to_file', 'graph_to_file', (['g', '"""files/test_graph_to_file.onnx"""'], {}), "(g, 'files/test_graph_to_file.onnx')\n", (754, 790), False, 'from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input\n'), ((852, 894), 'os.remove', 'os.remove', (['"""files/test_graph_to_file.onnx"""'], {}), "('files/test_graph_to_file.onnx')\n", (861, 894), False, 'import os\n'), ((921, 954), 'sclblonnx.graph_from_file', 'graph_from_file', (['"""files/add.onnx"""'], {}), "('files/add.onnx')\n", (936, 954), False, 'from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input\n'), ((1063, 1102), 'sclblonnx.run', 'run', (['g'], {'inputs': 'example', 'outputs': "['sum']"}), "(g, inputs=example, outputs=['sum'])\n", (1066, 1102), False, 'from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input\n'), ((1230, 1262), 'sclblonnx.run', 'run', (['g'], {'inputs': '""""""', 'outputs': '"""sum"""'}), "(g, inputs='', outputs='sum')\n", (1233, 1262), False, 'from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input\n'), ((1550, 1586), 'sclblonnx.sclbl_input', 'sclbl_input', (['example'], {'_verbose': '(False)'}), '(example, _verbose=False)\n', (1561, 1586), False, 'from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input\n'), ((1812, 1848), 'sclblonnx.sclbl_input', 'sclbl_input', (['example'], {'_verbose': '(False)'}), '(example, _verbose=False)\n', (1823, 1848), False, 'from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input\n'), ((2079, 2122), 'sclblonnx.sclbl_input', 'sclbl_input', (['example', '"""raw"""'], {'_verbose': '(False)'}), "(example, 'raw', _verbose=False)\n", (2090, 2122), False, 'from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input\n'), ((2342, 2385), 'sclblonnx.sclbl_input', 'sclbl_input', (['example', '"""raw"""'], {'_verbose': '(False)'}), "(example, 'raw', _verbose=False)\n", (2353, 2385), False, 'from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input\n'), ((2641, 2677), 'sclblonnx.sclbl_input', 'sclbl_input', (['example'], {'_verbose': '(False)'}), '(example, _verbose=False)\n', (2652, 2677), False, 'from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input\n'), ((2736, 2753), 'sclblonnx.list_data_types', 'list_data_types', ([], {}), '()\n', (2751, 2753), False, 'from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input\n'), ((2842, 2858), 'sclblonnx.list_operators', 'list_operators', ([], {}), '()\n', (2856, 2858), False, 'from sclblonnx import empty_graph, graph_from_file, graph_to_file, run, list_data_types, list_operators, sclbl_input\n'), ((976, 989), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (984, 989), True, 'import numpy as np\n'), ((1016, 1029), 'numpy.array', 'np.array', (['[5]'], {}), '([5])\n', (1024, 1029), True, 'import numpy as np\n'), ((1499, 1521), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1507, 1521), True, 'import numpy as np\n'), ((1717, 1739), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1725, 1739), True, 'import numpy as np\n'), ((1761, 1783), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1769, 1783), True, 'import numpy as np\n'), ((2028, 2050), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2036, 2050), True, 'import numpy as np\n'), ((2247, 2269), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2255, 2269), True, 'import numpy as np\n'), ((2291, 2313), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2299, 2313), True, 'import numpy as np\n'), ((2551, 2566), 'numpy.array', 'np.array', (['[1.2]'], {}), '([1.2])\n', (2559, 2566), True, 'import numpy as np\n'), ((2593, 2608), 'numpy.array', 'np.array', (['[2.5]'], {}), '([2.5])\n', (2601, 2608), True, 'import numpy as np\n')] |
# Generated by Django 3.0.4 on 2020-04-20 07:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('project_first_app', '0008_auto_20200420_1014'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='datein',
),
migrations.RemoveField(
model_name='comment',
name='dateout',
),
]
| [
"django.db.migrations.RemoveField"
] | [((237, 296), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""comment"""', 'name': '"""datein"""'}), "(model_name='comment', name='datein')\n", (259, 296), False, 'from django.db import migrations\n'), ((341, 401), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""comment"""', 'name': '"""dateout"""'}), "(model_name='comment', name='dateout')\n", (363, 401), False, 'from django.db import migrations\n')] |
import numpy as np
from tqdm import tqdm
LOOP_SIZE = 10000000
SEED = [0, 1, 0]
def monty_hall(typed):
doors = SEED
np.random.shuffle(doors)
opted_3 = np.random.randint(0, 3)
if not typed:
result = doors[opted_3]
return result
else:
for i in range(3):
if i != opted_3 and not doors[i]:
excluded_3 = i
for j in range(3):
if j != excluded_3 and j != opted_3:
result = doors[j]
return result
sum_0 = 0.
sum_1 = 0.
for _ in tqdm(range(LOOP_SIZE)):
sum_0 += monty_hall(typed=0)
sum_1 += monty_hall(typed=1)
print('For those who stopped in the second round the percentage of hit was of: ', (sum_0/LOOP_SIZE)*100)
print('For those who changed sides in the second round the percentage of hit was of: ', (sum_1/LOOP_SIZE)*100)
| [
"numpy.random.randint",
"numpy.random.shuffle"
] | [((126, 150), 'numpy.random.shuffle', 'np.random.shuffle', (['doors'], {}), '(doors)\n', (143, 150), True, 'import numpy as np\n'), ((165, 188), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (182, 188), True, 'import numpy as np\n')] |
# kallsyms
no_such_symbol
printk
printk("buf: %*pE\n", 5, "aaa\x15a")
printk("IPv4: %pI4\n", "\x7f\x00\x00\x01")
from kernel_ffi import kmalloc
x = kmalloc(8)
memcpy(x, "\xc0\xa8\x01\x01", 4)
printk("IPv4: %pI4\n", x)
printk("IPv4: %pi4\n", "\x7f\x00\x00\x01")
| [
"kernel_ffi.kmalloc"
] | [((150, 160), 'kernel_ffi.kmalloc', 'kmalloc', (['(8)'], {}), '(8)\n', (157, 160), False, 'from kernel_ffi import kmalloc\n')] |
from random import choice
class cell(object) :
def __init__(self, pos_x, pos_y, cell_size, color_palette) :
self.pos_x = pos_x
self.pos_y = pos_y
self.cell_size = cell_size
self.free_space = []
self.color_palette = color_palette
self.draw_big_triangle(self.pos_x, self.pos_y)
for emplacement in self.free_space :
if emplacement == 1 :
self.draw_small_triangles(self.pos_x, self.pos_y)
elif emplacement == 2 :
self.draw_small_triangles(self.pos_x + self.cell_size / 2 , self.pos_y)
elif emplacement == 3 :
self.draw_small_triangles(self.pos_x, self.pos_y + self.cell_size / 2)
elif emplacement == 4 :
self.draw_small_triangles(self.pos_x + self.cell_size / 2, self.pos_y + self.cell_size / 2)
else :
raise ValueError
def draw_small_triangles(self, pos_x, pos_y, random_axis = True, axis = "U") :
if random_axis :
axis = choice(["U", "L", "R", "D"])
else :
axis = axis
r, g, b = choice(self.color_palette)
fill(r, g, b)
if axis == "U" :
triangle(pos_x, pos_y + self.cell_size / 2, pos_x + self.cell_size / 2, pos_y + self.cell_size / 2, pos_x + self.cell_size / 4, pos_y)
elif axis == "L" :
triangle(pos_x, pos_y, pos_x, pos_y + self.cell_size / 2, pos_x + self.cell_size / 2, pos_y + self.cell_size / 4)
elif axis == "R" :
triangle(pos_x + self.cell_size / 2, pos_y, pos_x + self.cell_size / 2, pos_y + self.cell_size / 2, pos_x, pos_y + self.cell_size / 4)
elif axis == "D" :
triangle(pos_x, pos_y, pos_x + self.cell_size / 2, pos_y, pos_x + self.cell_size / 4, pos_y + self.cell_size / 2)
else :
raise ValueError
def draw_big_triangle(self,
pos_x,
pos_y,
random_axis = True,
random_half = True,
axis = "U", half = 1):
"""TODO Doc"""
# checking for the random axis mode ( if True give back 1 axis between the 4 possibles )
if random_axis:
axis = choice(["U", "L", "R", "D"])
else:
axis = axis
# checking random half mode ( if True give back 1 half between the 2 possibles )
if random_half:
half = choice([1, 2])
else:
half = half
if half == 2 and (axis == "R" or axis == "L") :
pos_x += self.cell_size / 2
self.free_space.append(1)
self.free_space.append(3)
elif half == 2 and (axis == "D" or axis == "U") :
pos_y += self.cell_size / 2
self.free_space.append(1)
self.free_space.append(2)
elif half == 1 and (axis == "R" or axis == "L") :
self.free_space.append(2)
self.free_space.append(4)
elif half == 1 and (axis == "D" or axis == "U") :
self.free_space.append(3)
self.free_space.append(4)
r, g, b = choice(self.color_palette)
fill(r, g, b)
if axis == "U":
triangle(pos_x, pos_y, pos_x + self.cell_size, pos_y, pos_x + self.cell_size / 2, pos_y + self.cell_size / 2)
elif axis == "D":
triangle(pos_x, pos_y + self.cell_size / 2, pos_x + self.cell_size, pos_y + self.cell_size / 2, pos_x + self.cell_size / 2,
pos_y)
elif axis == "L":
triangle(pos_x, pos_y, pos_x + self.cell_size / 2, pos_y + self.cell_size / 2, pos_x, pos_y + self.cell_size)
elif axis == "R":
triangle(pos_x + self.cell_size / 2, pos_y, pos_x + self.cell_size / 2, pos_y + self.cell_size, pos_x,
pos_y + self.cell_size / 2)
| [
"random.choice"
] | [((1141, 1167), 'random.choice', 'choice', (['self.color_palette'], {}), '(self.color_palette)\n', (1147, 1167), False, 'from random import choice\n'), ((3179, 3205), 'random.choice', 'choice', (['self.color_palette'], {}), '(self.color_palette)\n', (3185, 3205), False, 'from random import choice\n'), ((1046, 1074), 'random.choice', 'choice', (["['U', 'L', 'R', 'D']"], {}), "(['U', 'L', 'R', 'D'])\n", (1052, 1074), False, 'from random import choice\n'), ((2291, 2319), 'random.choice', 'choice', (["['U', 'L', 'R', 'D']"], {}), "(['U', 'L', 'R', 'D'])\n", (2297, 2319), False, 'from random import choice\n'), ((2491, 2505), 'random.choice', 'choice', (['[1, 2]'], {}), '([1, 2])\n', (2497, 2505), False, 'from random import choice\n')] |
import json
import numpy as np
from scipy.spatial.transform import Rotation as R
# from MH_04_difficult/cam0/sensor.yaml
tf = np.matrix([[0.0148655429818, -0.999880929698, 0.00414029679422, -0.0216401454975],
[0.999557249008, 0.0149672133247, 0.025715529948, -0.064676986768],
[-0.0257744366974, 0.00375618835797, 0.999660727178, 0.00981073058949],
[0.0, 0.0, 0.0, 1.0]])
r = R.from_matrix(tf[:3, :3])
q = r.as_quat()
tf_dict = {"x": tf[0, 3], "y": tf[1, 3], "z": tf[2, 3], "qx": q[0], "qy": q[1], "qz": q[2], "qw": q[3]}
print(json.dumps(tf_dict))
| [
"numpy.matrix",
"json.dumps",
"scipy.spatial.transform.Rotation.from_matrix"
] | [((126, 386), 'numpy.matrix', 'np.matrix', (['[[0.0148655429818, -0.999880929698, 0.00414029679422, -0.0216401454975], [\n 0.999557249008, 0.0149672133247, 0.025715529948, -0.064676986768], [-\n 0.0257744366974, 0.00375618835797, 0.999660727178, 0.00981073058949], [\n 0.0, 0.0, 0.0, 1.0]]'], {}), '([[0.0148655429818, -0.999880929698, 0.00414029679422, -\n 0.0216401454975], [0.999557249008, 0.0149672133247, 0.025715529948, -\n 0.064676986768], [-0.0257744366974, 0.00375618835797, 0.999660727178, \n 0.00981073058949], [0.0, 0.0, 0.0, 1.0]])\n', (135, 386), True, 'import numpy as np\n'), ((424, 449), 'scipy.spatial.transform.Rotation.from_matrix', 'R.from_matrix', (['tf[:3, :3]'], {}), '(tf[:3, :3])\n', (437, 449), True, 'from scipy.spatial.transform import Rotation as R\n'), ((576, 595), 'json.dumps', 'json.dumps', (['tf_dict'], {}), '(tf_dict)\n', (586, 595), False, 'import json\n')] |
'''This is like pexpect, but it will work with serial port that you
pass it. You are reponsible for opening and close the serial port.
This allows you to use Pexpect with Serial port which pyserial supports.
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, <NAME> <<EMAIL>>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
from pexpect.spawnbase import SpawnBase
from pexpect.exceptions import ExceptionPexpect
__all__ = ['SerialSpawn']
class SerialSpawn(SpawnBase):
'''This is like pexpect.spawn but allows you to supply a serial created by
pyserial.'''
def __init__ (self, ser, args=None, timeout=30, maxread=2000, searchwindowsize=None,
logfile=None, encoding=None, codec_errors='strict'):
'''This takes a serial of pyserial as input. Please make sure the serial is open
before creating SerialSpawn.'''
self.ser = ser
if not ser.isOpen():
raise ExceptionPexpect('serial port is not ready')
self.args = None
self.command = None
SpawnBase.__init__(self, timeout, maxread, searchwindowsize, logfile,
encoding=encoding, codec_errors=codec_errors)
self.own_fd = False
self.closed = False
self.name = '<serial port %s>' % ser.port
def close (self):
"""Close the serial port.
Calling this method a second time does nothing.
"""
if not self.ser.isOpen():
return
self.flush()
self.ser.close()
self.closed = True
def isalive (self):
'''This checks if the serial port is still valid.'''
return self.ser.isOpen()
def read_nonblocking(self, size=1, timeout=None):
s = self.ser.read(size)
s = self._decoder.decode(s, final=False)
self._log(s, 'read')
return s
def send(self, s):
"Write to serial, return number of bytes written"
s = self._coerce_send_string(s)
self._log(s, 'send')
b = self._encoder.encode(s, final=False)
return self.ser.write(b)
def sendline(self, s):
"Write to fd with trailing newline, return number of bytes written"
s = self._coerce_send_string(s)
return self.send(s + self.linesep)
def write(self, s):
"Write to serial, return None"
self.send(s)
def writelines(self, sequence):
"Call self.write() for each item in sequence"
for s in sequence:
self.write(s)
| [
"pexpect.exceptions.ExceptionPexpect",
"pexpect.spawnbase.SpawnBase.__init__"
] | [((1842, 1961), 'pexpect.spawnbase.SpawnBase.__init__', 'SpawnBase.__init__', (['self', 'timeout', 'maxread', 'searchwindowsize', 'logfile'], {'encoding': 'encoding', 'codec_errors': 'codec_errors'}), '(self, timeout, maxread, searchwindowsize, logfile,\n encoding=encoding, codec_errors=codec_errors)\n', (1860, 1961), False, 'from pexpect.spawnbase import SpawnBase\n'), ((1735, 1779), 'pexpect.exceptions.ExceptionPexpect', 'ExceptionPexpect', (['"""serial port is not ready"""'], {}), "('serial port is not ready')\n", (1751, 1779), False, 'from pexpect.exceptions import ExceptionPexpect\n')] |
#! /usr/bin/env python3
# coding: utf-8
import logging
import os
import pickle
class GenerationStepManager():
"""Class that manage the loading and saving of the different generation steps. It's used to skip long process.
Parameters
==========
enabled: bool
Indicate if the steps must be loaded or not
dirname: str
Directory where to save the steps.bin file
step: int
Id of the step, must be greater or equal to zero
data_type: type
Any type that has a to_array() and clone() method and a from_array(arr) static method"""
def __init__(self, enabled: bool, path: str, step: int, data_type: type):
self._enabled = enabled
self._path = path
self._data_type = data_type
self._data = None
self._step = step
self._steps = {}
self._loaded = False
# Check if the given type has the wanted interface
if not (hasattr(data_type, 'to_array') and hasattr(data_type, 'from_array') and hasattr(data_type, 'clone')):
raise NotImplementedError(
"Data type '{dt}' must implement the method to_array() and the static method from_array(a)".format(dt=str(data_type)))
@property
def path(self) -> str:
"""Access the file path property
Returns
=======
str
The path to the file"""
return self._path
@property
def data(self):
"""Access the data property"""
return self._data
@property
def loaded(self):
"""Access the loaded property"""
return self._loaded
def load(self):
"""Load the steps.bin file"""
if os.path.exists(self._path):
try:
# Loading the steps
with open(self._path, 'rb') as file:
self._steps = pickle.load(file)
except Exception as e:
logging.warning("Fail to load '{p}'. Initializing with the parameters.\n{err}".format(
p=self._path, err=e))
self._steps = {}
self._step = -1
else:
# Init empty generation steps
self._steps = {}
self._step = -1
self._loaded = True
def save(self):
"""Save the steps into steps.bin file"""
# Execute this action only if the debug is enabled
if not self._enabled:
return
# Delete existing file
if os.path.exists(self._path):
os.remove(self._path)
# Create the new file
try:
with open(self._path, 'wb') as file:
pickle.dump(self._steps, file)
except Exception as e:
logging.warning(
"Fail to save steps in '{p}': {err}".format(p=self._path, err=e))
def init_data(self, *args, **kwargs):
"""Initialize the data or retrieve it from the bin file
Parameters
==========
Whatever the data type init takes as parameters
Returns
=======
data_type
Object of given data type"""
self._data = None
if self._enabled and self._step in self._steps:
self._data = self._data_type.from_array(self._steps[self._step]).clone()
if self._data is None:
self._data = self._data_type(*args, **kwargs)
self._step = -1
return self._data
def make_step(self, step: int) -> object:
"""If the step is loaded and before the current generation step, it loads it elses it run it.
Use
===
This method is a decorator
```
@a_generation_step_manager_object.load_step(2)
def do_something():
#Do something
return result_of_data_type
do_something()
```
Parameters
==========
step: int
The step of the function.
Returns
=======
data_type
The object of data type or what the function return.
Raises
======
TypeError
If the decorated function return a different type than the data type."""
def _decorator(func):
if self._enabled:
def g(*args, **kwargs):
# Run the function if the wanted step is higher than
# the current step
if step > self._step:
result = func(*args, **kwargs)
if type(result) != self._data_type:
raise TypeError("The function return the wrong type of data. Expected: {dt}, Actual: {tt}".format(
dt=self._data_type, tt=type(result)))
self._add_step(step, result)
return result
g.__name__ == func.__name__
return g
else:
return func
return _decorator
def _add_step(self, step: int, data: object):
"""Add the step/data couple to the steps data
Parameters
==========
step: int
Step id
data: object of data_type
Data to associate to the step"""
# Execute this action only if the debug is enabled
if not self._enabled:
return
if step in self._steps:
logging.warning(
"Step {s} already exists, it will be replaced.".format(s=step))
self._steps[step] = data.clone().to_array()
| [
"os.path.exists",
"pickle.load",
"pickle.dump",
"os.remove"
] | [((1682, 1708), 'os.path.exists', 'os.path.exists', (['self._path'], {}), '(self._path)\n', (1696, 1708), False, 'import os\n'), ((2476, 2502), 'os.path.exists', 'os.path.exists', (['self._path'], {}), '(self._path)\n', (2490, 2502), False, 'import os\n'), ((2516, 2537), 'os.remove', 'os.remove', (['self._path'], {}), '(self._path)\n', (2525, 2537), False, 'import os\n'), ((2647, 2677), 'pickle.dump', 'pickle.dump', (['self._steps', 'file'], {}), '(self._steps, file)\n', (2658, 2677), False, 'import pickle\n'), ((1850, 1867), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1861, 1867), False, 'import pickle\n')] |
from pathlib import Path
import numpy
from acoustic_feature_extractor.data.f0 import F0, F0Type
from extractor.extract_f0 import extract_f0
from tests.utility import true_data_base_dir
def test_extract_f0(
data_dir: Path,
with_vuv: bool,
f0_type: F0Type,
):
output_dir = data_dir / f"output_extract_f0-with_vuv={with_vuv}-f0_type={f0_type}"
extract_f0(
input_glob=data_dir / "music*.wav",
output_directory=output_dir,
sampling_rate=24000,
frame_period=5.0,
f0_floor=71.0,
f0_ceil=800.0,
with_vuv=with_vuv,
f0_type=f0_type,
)
true_data_dir = true_data_base_dir.joinpath(
f"output_extract_f0-with_vuv={with_vuv}-f0_type={f0_type}"
)
output_paths = sorted(output_dir.glob("*.npy"))
true_paths = sorted(true_data_dir.glob("*.npy"))
# # overwrite true data
# for output_path in output_paths:
# output_data = F0.load(output_path)
# true_data_dir.mkdir(parents=True, exist_ok=True)
# true_path = true_data_dir.joinpath(output_path.name)
# output_data.save(true_path)
assert len(output_paths) == len(true_paths)
for output_path, true_path in zip(output_paths, true_paths):
output_data = F0.load(output_path)
true_data = F0.load(true_path)
numpy.testing.assert_allclose(
output_data.array, true_data.array, rtol=0, atol=1e-6
)
assert output_data.rate == true_data.rate
| [
"acoustic_feature_extractor.data.f0.F0.load",
"extractor.extract_f0.extract_f0",
"numpy.testing.assert_allclose",
"tests.utility.true_data_base_dir.joinpath"
] | [((364, 552), 'extractor.extract_f0.extract_f0', 'extract_f0', ([], {'input_glob': "(data_dir / 'music*.wav')", 'output_directory': 'output_dir', 'sampling_rate': '(24000)', 'frame_period': '(5.0)', 'f0_floor': '(71.0)', 'f0_ceil': '(800.0)', 'with_vuv': 'with_vuv', 'f0_type': 'f0_type'}), "(input_glob=data_dir / 'music*.wav', output_directory=output_dir,\n sampling_rate=24000, frame_period=5.0, f0_floor=71.0, f0_ceil=800.0,\n with_vuv=with_vuv, f0_type=f0_type)\n", (374, 552), False, 'from extractor.extract_f0 import extract_f0\n'), ((637, 729), 'tests.utility.true_data_base_dir.joinpath', 'true_data_base_dir.joinpath', (['f"""output_extract_f0-with_vuv={with_vuv}-f0_type={f0_type}"""'], {}), "(\n f'output_extract_f0-with_vuv={with_vuv}-f0_type={f0_type}')\n", (664, 729), False, 'from tests.utility import true_data_base_dir\n'), ((1255, 1275), 'acoustic_feature_extractor.data.f0.F0.load', 'F0.load', (['output_path'], {}), '(output_path)\n', (1262, 1275), False, 'from acoustic_feature_extractor.data.f0 import F0, F0Type\n'), ((1296, 1314), 'acoustic_feature_extractor.data.f0.F0.load', 'F0.load', (['true_path'], {}), '(true_path)\n', (1303, 1314), False, 'from acoustic_feature_extractor.data.f0 import F0, F0Type\n'), ((1324, 1413), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['output_data.array', 'true_data.array'], {'rtol': '(0)', 'atol': '(1e-06)'}), '(output_data.array, true_data.array, rtol=0,\n atol=1e-06)\n', (1353, 1413), False, 'import numpy\n')] |
import datetime as dt
import lxml.html
import re
from .utils import get_short_codes
from urlparse import urlparse
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
HI_URL_BASE = "http://capitol.hawaii.gov"
SHORT_CODES = "%s/committees/committees.aspx?chamber=all" % (HI_URL_BASE)
def create_bill_report_url( chamber, year, bill_type ):
cname = { "upper" : "s", "lower" : "h" }[chamber]
bill_slug = {
"bill" : "intro%sb" % ( cname ),
"cr" : "%sCR" % ( cname.upper() ),
"r" : "%sR" % ( cname.upper() )
}
return HI_URL_BASE + "/report.aspx?type=" + bill_slug[bill_type] + \
"&year=" + year
def categorize_action(action):
classifiers = (
('Pass(ed)? First Reading', 'bill:reading:1'),
('Introduced and Pass(ed)? First Reading',
['bill:introduced', 'bill:reading:1']),
('Introduced', 'bill:introduced'),
#('The committee\(s\) recommends that the measure be deferred', ?
('Re(-re)?ferred to ', 'committee:referred'),
('Passed Second Reading .* referred to the committee',
['bill:reading:2', 'committee:referred']),
('.* that the measure be PASSED', 'committee:passed:favorable'),
('Received from (House|Senate)', 'bill:introduced'),
('Floor amendment .* offered', 'amendment:introduced'),
('Floor amendment adopted', 'amendment:passed'),
('Floor amendment failed', 'amendment:failed'),
('.*Passed Third Reading', 'bill:passed'),
('Enrolled to Governor', 'governor:received'),
('Act ', 'governor:signed'),
# these are for resolutions
('Offered', 'bill:introduced'),
('Adopted', 'bill:passed'),
)
ctty = None
for pattern, types in classifiers:
if re.match(pattern, action):
if "committee:referred" in types:
ctty = re.findall(r'\w+', re.sub(pattern, "", action))
return (types, ctty)
# return other by default
return ('other', ctty)
def split_specific_votes(voters):
if voters is None or voters.startswith('none'):
return []
elif voters.startswith('Senator(s)'):
voters = voters.replace('Senator(s) ', '')
elif voters.startswith('Representative(s)'):
voters = voters.replace('Representative(s)', '')
return voters.split(', ')
class HIBillScraper(BillScraper):
jurisdiction = 'hi'
def parse_bill_metainf_table( self, metainf_table ):
def _sponsor_interceptor(line):
return [ guy.strip() for guy in line.split(",") ]
interceptors = {
"Introducer(s)" : _sponsor_interceptor
}
ret = {}
for tr in metainf_table:
row = tr.xpath( "td" )
key = row[0].text_content().strip()
value = row[1].text_content().strip()
if key[-1:] == ":":
key = key[:-1]
if key in interceptors:
value = interceptors[key](value)
ret[key] = value
return ret
def parse_bill_actions_table(self, bill, action_table):
for action in action_table.xpath('*')[1:]:
date = action[0].text_content()
date = dt.datetime.strptime(date, "%m/%d/%Y")
actor = action[1].text_content()
string = action[2].text_content()
actor = {
"S" : "upper",
"H" : "lower",
"D" : "Data Systems",
"$" : "Appropriation measure",
"ConAm" : "Constitutional Amendment"
}[actor]
act_type, committees = categorize_action(string)
# XXX: Translate short-code to full committee name for the
# matcher.
real_committees = []
if committees:
for committee in committees:
try:
committee = self.short_ids[committee]['name']
real_committees.append(committee)
except KeyError:
pass
bill.add_action(actor, string, date,
type=act_type, committees=real_committees)
vote = self.parse_vote(string)
if vote:
v, motion = vote
vote = Vote(actor, date, motion, 'passed' in string.lower(),
int( v['n_yes'] or 0 ),
int( v['n_no'] or 0 ),
int( v['n_excused'] or 0))
def _add_votes( attrib, v, vote ):
for voter in split_specific_votes(v):
getattr(vote, attrib)(voter)
_add_votes('yes', v['yes'], vote)
_add_votes('yes', v['yes_resv'], vote)
_add_votes('no', v['no'], vote)
_add_votes('other', v['excused'], vote)
bill.add_vote(vote)
def parse_bill_versions_table(self, bill, versions):
versions = versions.xpath("./*")
if len(versions) > 1:
versions = versions[1:]
if versions == []:
raise Exception("Missing bill versions.")
for version in versions:
tds = version.xpath("./*")
if 'No other versions' in tds[0].text_content():
return
http_href = tds[0].xpath("./a")
name = http_href[0].text_content().strip()
# category = tds[1].text_content().strip()
pdf_href = tds[1].xpath("./a")
http_link = http_href[0].attrib['href']
pdf_link = pdf_href[0].attrib['href']
bill.add_version(name, http_link, mimetype="text/html")
bill.add_version(name, pdf_link, mimetype="application/pdf")
def scrape_bill(self, session, chamber, bill_type, url):
bill_html = self.get(url).text
bill_page = lxml.html.fromstring(bill_html)
scraped_bill_id = bill_page.xpath(
"//a[contains(@id, 'LinkButtonMeasure')]")[0].text_content()
bill_id = scraped_bill_id.split(' ')[0]
versions = bill_page.xpath( "//table[contains(@id, 'GridViewVersions')]" )[0]
tables = bill_page.xpath("//table")
metainf_table = bill_page.xpath('//div[contains(@id, "itemPlaceholder")]//table[1]')[0]
action_table = bill_page.xpath('//div[contains(@id, "UpdatePanel1")]//table[1]')[0]
meta = self.parse_bill_metainf_table(metainf_table)
subs = [ s.strip() for s in meta['Report Title'].split(";") ]
if "" in subs:
subs.remove("")
b = Bill(session, chamber, bill_id, title=meta['Measure Title'],
summary=meta['Description'],
referral=meta['Current Referral'],
subjects=subs,
type=bill_type)
b.add_source(url)
companion = meta['Companion'].strip()
if companion:
b['companion'] = companion
for sponsor in meta['Introducer(s)']:
b.add_sponsor(type='primary', name=sponsor)
actions = self.parse_bill_actions_table(b, action_table)
versions = self.parse_bill_versions_table(b, versions)
self.save_bill(b)
def parse_vote(self, action):
vote_re = (r'''
(?P<n_yes>\d+)\sAye\(?s\)? # Yes vote count
(:\s+(?P<yes>.*?))?;\s+ # Yes members
Aye\(?s\)?\swith\sreservations:\s+(?P<yes_resv>.*?);? # Yes with reservations members
(?P<n_no>\d*)\sNo\(?es\)?:\s+(?P<no>.*?);?
(\s+and\s+)?
(?P<n_excused>\d*)\sExcused:\s(?P<excused>.*)\.?
''')
result = re.search(vote_re, action, re.VERBOSE)
if result is None:
return None
result = result.groupdict()
motion = action.split('.')[0] + '.'
return result, motion
def scrape_type(self, chamber, session, billtype):
session_urlslug = \
self.metadata['session_details'][session]['_scraped_name']
report_page_url = create_bill_report_url(chamber, session_urlslug,
billtype)
billy_billtype = {
"bill" : "bill",
"cr" : "concurrent resolution",
"r" : "resolution"
}[billtype]
list_html = self.get(report_page_url).text
list_page = lxml.html.fromstring(list_html)
for bill_url in list_page.xpath("//a[@class='report']"):
bill_url = HI_URL_BASE + bill_url.attrib['href']
self.scrape_bill(session, chamber, billy_billtype, bill_url)
def scrape(self, session, chamber):
get_short_codes(self)
bill_types = ["bill", "cr", "r"]
for typ in bill_types:
self.scrape_type(session, chamber, typ)
| [
"datetime.datetime.strptime",
"re.match",
"billy.scrape.bills.Bill",
"re.sub",
"re.search"
] | [((1816, 1841), 're.match', 're.match', (['pattern', 'action'], {}), '(pattern, action)\n', (1824, 1841), False, 'import re\n'), ((6655, 6820), 'billy.scrape.bills.Bill', 'Bill', (['session', 'chamber', 'bill_id'], {'title': "meta['Measure Title']", 'summary': "meta['Description']", 'referral': "meta['Current Referral']", 'subjects': 'subs', 'type': 'bill_type'}), "(session, chamber, bill_id, title=meta['Measure Title'], summary=meta[\n 'Description'], referral=meta['Current Referral'], subjects=subs, type=\n bill_type)\n", (6659, 6820), False, 'from billy.scrape.bills import BillScraper, Bill\n'), ((7742, 7780), 're.search', 're.search', (['vote_re', 'action', 're.VERBOSE'], {}), '(vote_re, action, re.VERBOSE)\n', (7751, 7780), False, 'import re\n'), ((3252, 3290), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['date', '"""%m/%d/%Y"""'], {}), "(date, '%m/%d/%Y')\n", (3272, 3290), True, 'import datetime as dt\n'), ((1931, 1958), 're.sub', 're.sub', (['pattern', '""""""', 'action'], {}), "(pattern, '', action)\n", (1937, 1958), False, 'import re\n')] |
# coding=utf-8
import logging
from scrapy import Request
from scrapy import Spider
__author__ = "zephor"
logger = logging.getLogger(__name__)
class BaseSpider(Spider):
def __init__(self, parser, task_id, task_name, *args, **kwargs):
super(BaseSpider, self).__init__(*args, **kwargs)
self.task_id = task_id
from zspider.parsers import get_parser
self.parser = get_parser(parser, task_id, task_name, **kwargs)
self._extra = {"task_id": task_id, "task_name": task_name}
logger.info(
u"task {0} start with parser:{1}".format(task_name, parser),
extra=self._extra,
)
def _parse_index(self, response, callback=None):
callback = callback if callable(callback) else self._parse_article
_extra_url = self._extra_url = dict(self._extra)
for url in self.parser.parse_index(response):
if isinstance(url, tuple):
url, _meta = url
else:
_meta = {}
url = response.urljoin(url)
if self.task_id == "test_index":
yield {"url": url}
continue
_extra_url["url"] = url
logger.info("begin to crawl", extra=_extra_url)
request = Request(url, callback)
request.meta.update(_meta)
if self.task_id == "test_article":
request.dont_filter = True # mark 去重
yield request
if self.task_id == "test_article":
break
def _parse_article(self, response):
self._extra_url["url"] = response.url
logger.info("begin to parse", extra=self._extra_url)
item = self.parser.parse_article(response)
logger.info("parser ok", extra=self._extra_url)
return item
@property
def logger(self):
# 推荐使用文件顶部定义logger形式
return logging.getLogger("spider.{0}".format(self.name))
def parse(self, response):
raise NotImplementedError("parse")
| [
"logging.getLogger",
"scrapy.Request",
"zspider.parsers.get_parser"
] | [((117, 144), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (134, 144), False, 'import logging\n'), ((401, 449), 'zspider.parsers.get_parser', 'get_parser', (['parser', 'task_id', 'task_name'], {}), '(parser, task_id, task_name, **kwargs)\n', (411, 449), False, 'from zspider.parsers import get_parser\n'), ((1272, 1294), 'scrapy.Request', 'Request', (['url', 'callback'], {}), '(url, callback)\n', (1279, 1294), False, 'from scrapy import Request\n')] |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test fee estimates persistence.
By default, bitcoind will dump fee estimates on shutdown and
then reload it on startup.
Test is as follows:
- start node0
- call the savefeeestimates RPC and verify the RPC succeeds and
that the file exists
- make the file read only and attempt to call the savefeeestimates RPC
with the expecation that it will fail
- move the read only file and shut down the node, verify the node writes
on shutdown a file that is identical to the one we saved via the RPC
"""
import filecmp
import os
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_raises_rpc_error
class FeeEstimatesPersistTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
fee_estimatesdat = os.path.join(self.nodes[0].datadir, self.chain, 'fee_estimates.dat')
self.log.debug('Verify the fee_estimates.dat file does not exists on start up')
assert not os.path.isfile(fee_estimatesdat)
self.nodes[0].savefeeestimates()
self.log.debug('Verify the fee_estimates.dat file exists after calling savefeeestimates RPC')
assert os.path.isfile(fee_estimatesdat)
self.log.debug("Prevent bitcoind from writing fee_estimates.dat to disk. Verify that `savefeeestimates` fails")
fee_estimatesdatold = fee_estimatesdat + '.old'
os.rename(fee_estimatesdat, fee_estimatesdatold)
os.mkdir(fee_estimatesdat)
assert_raises_rpc_error(-1, "Unable to dump fee estimates to disk", self.nodes[0].savefeeestimates)
os.rmdir(fee_estimatesdat)
self.stop_nodes()
self.log.debug("Verify that fee_estimates are written on shutdown")
assert os.path.isfile(fee_estimatesdat)
self.log.debug("Verify that the fee estimates from a shutdown are identical from the ones from savefeeestimates")
assert filecmp.cmp(fee_estimatesdat, fee_estimatesdatold)
if __name__ == "__main__":
FeeEstimatesPersistTest().main()
| [
"os.rename",
"os.path.join",
"os.path.isfile",
"os.rmdir",
"os.mkdir",
"filecmp.cmp",
"test_framework.util.assert_raises_rpc_error"
] | [((1046, 1114), 'os.path.join', 'os.path.join', (['self.nodes[0].datadir', 'self.chain', '"""fee_estimates.dat"""'], {}), "(self.nodes[0].datadir, self.chain, 'fee_estimates.dat')\n", (1058, 1114), False, 'import os\n'), ((1413, 1445), 'os.path.isfile', 'os.path.isfile', (['fee_estimatesdat'], {}), '(fee_estimatesdat)\n', (1427, 1445), False, 'import os\n'), ((1630, 1678), 'os.rename', 'os.rename', (['fee_estimatesdat', 'fee_estimatesdatold'], {}), '(fee_estimatesdat, fee_estimatesdatold)\n', (1639, 1678), False, 'import os\n'), ((1687, 1713), 'os.mkdir', 'os.mkdir', (['fee_estimatesdat'], {}), '(fee_estimatesdat)\n', (1695, 1713), False, 'import os\n'), ((1722, 1826), 'test_framework.util.assert_raises_rpc_error', 'assert_raises_rpc_error', (['(-1)', '"""Unable to dump fee estimates to disk"""', 'self.nodes[0].savefeeestimates'], {}), "(-1, 'Unable to dump fee estimates to disk', self.\n nodes[0].savefeeestimates)\n", (1745, 1826), False, 'from test_framework.util import assert_raises_rpc_error\n'), ((1830, 1856), 'os.rmdir', 'os.rmdir', (['fee_estimatesdat'], {}), '(fee_estimatesdat)\n', (1838, 1856), False, 'import os\n'), ((1974, 2006), 'os.path.isfile', 'os.path.isfile', (['fee_estimatesdat'], {}), '(fee_estimatesdat)\n', (1988, 2006), False, 'import os\n'), ((2144, 2194), 'filecmp.cmp', 'filecmp.cmp', (['fee_estimatesdat', 'fee_estimatesdatold'], {}), '(fee_estimatesdat, fee_estimatesdatold)\n', (2155, 2194), False, 'import filecmp\n'), ((1222, 1254), 'os.path.isfile', 'os.path.isfile', (['fee_estimatesdat'], {}), '(fee_estimatesdat)\n', (1236, 1254), False, 'import os\n')] |
import errno
import gc
import socket
import sys
import pytest
from guv import spawn
from guv.event import Event
from guv.greenio import socket as green_socket
from guv.green import socket as socket_patched
from guv.support import get_errno
pyversion = sys.version_info[:2]
TIMEOUT_SMALL = 0.01
BACKLOG = 10
def resize_buffer(sock, size):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, size)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, size)
class TestGreenSocket:
def test_socket_init(self):
sock = socket_patched.socket()
assert isinstance(sock, green_socket)
def test_socket_close(self, gsock):
gsock.close()
def test_connect(self, gsock, pub_addr):
gsock.connect(pub_addr)
print(gsock.getpeername())
assert gsock.getpeername()
def test_connect_timeout(self, gsock, fail_addr):
gsock.settimeout(TIMEOUT_SMALL)
with pytest.raises(socket.timeout):
gsock.connect(fail_addr)
def test_connect_ex_timeout(self, gsock, fail_addr):
gsock.settimeout(TIMEOUT_SMALL)
e = gsock.connect_ex(fail_addr)
if e not in {errno.EHOSTUNREACH, errno.ENETUNREACH}:
assert e == errno.EAGAIN
def test_accept_timeout(self, gsock):
gsock.settimeout(TIMEOUT_SMALL)
gsock.bind(('', 0))
gsock.listen(BACKLOG)
with pytest.raises(socket.timeout):
gsock.accept()
def test_recv_timeout(self, gsock, pub_addr):
gsock.connect(pub_addr)
gsock.settimeout(TIMEOUT_SMALL)
with pytest.raises(socket.timeout) as exc_info:
gsock.recv(8192)
assert exc_info.value.args[0] == 'timed out'
def test_send_timeout(self, gsock, server_sock):
resize_buffer(server_sock, 1)
evt = Event()
def server():
client_sock, addr = server_sock.accept()
resize_buffer(client_sock, 1)
evt.wait()
g = spawn(server)
server_addr = server_sock.getsockname()
resize_buffer(gsock, 1)
gsock.connect(server_addr)
gsock.settimeout(TIMEOUT_SMALL)
with pytest.raises(socket.timeout):
# large enough data to overwhelm most buffers
msg_len = 10 ** 6
sent = 0
while sent < msg_len:
sent += gsock.send(bytes(msg_len))
evt.send()
g.wait()
def test_send_to_closed_sock_raises(self, gsock):
try:
gsock.send(b'hello')
except socket.error as e:
assert get_errno(e) == errno.EPIPE
if pyversion >= (3, 3):
# on python 3.3+, the exception can be caught like this as well
with pytest.raises(BrokenPipeError):
gsock.send(b'hello')
def test_del_closes_socket(self, gsock, server_sock):
def accept_once(sock):
# delete/overwrite the original conn object, only keeping the file object around
# closing the file object should close everything
try:
client_sock, addr = sock.accept()
file = client_sock.makefile('wb')
del client_sock
file.write(b'hello\n')
file.close()
gc.collect()
with pytest.raises(ValueError):
file.write(b'a')
finally:
sock.close()
killer = spawn(accept_once, server_sock)
gsock.connect(('127.0.0.1', server_sock.getsockname()[1]))
f = gsock.makefile('rb')
gsock.close()
assert f.read() == b'hello\n'
assert f.read() == b''
killer.wait()
class TestGreenSocketModule:
def test_create_connection(self, pub_addr):
sock = socket_patched.create_connection(pub_addr)
assert sock
def test_create_connection_timeout_error(self, fail_addr):
# Inspired by eventlet Greenio_test
try:
socket_patched.create_connection(fail_addr, timeout=0.01)
pytest.fail('Timeout not raised')
except socket.timeout as e:
assert str(e) == 'timed out'
except socket.error as e:
# unreachable is also a valid outcome
if not get_errno(e) in (errno.EHOSTUNREACH, errno.ENETUNREACH):
raise
| [
"guv.green.socket.socket",
"guv.support.get_errno",
"guv.green.socket.create_connection",
"guv.event.Event",
"pytest.fail",
"pytest.raises",
"gc.collect",
"guv.spawn"
] | [((542, 565), 'guv.green.socket.socket', 'socket_patched.socket', ([], {}), '()\n', (563, 565), True, 'from guv.green import socket as socket_patched\n'), ((1820, 1827), 'guv.event.Event', 'Event', ([], {}), '()\n', (1825, 1827), False, 'from guv.event import Event\n'), ((1982, 1995), 'guv.spawn', 'spawn', (['server'], {}), '(server)\n', (1987, 1995), False, 'from guv import spawn\n'), ((3450, 3481), 'guv.spawn', 'spawn', (['accept_once', 'server_sock'], {}), '(accept_once, server_sock)\n', (3455, 3481), False, 'from guv import spawn\n'), ((3789, 3831), 'guv.green.socket.create_connection', 'socket_patched.create_connection', (['pub_addr'], {}), '(pub_addr)\n', (3821, 3831), True, 'from guv.green import socket as socket_patched\n'), ((932, 961), 'pytest.raises', 'pytest.raises', (['socket.timeout'], {}), '(socket.timeout)\n', (945, 961), False, 'import pytest\n'), ((1393, 1422), 'pytest.raises', 'pytest.raises', (['socket.timeout'], {}), '(socket.timeout)\n', (1406, 1422), False, 'import pytest\n'), ((1588, 1617), 'pytest.raises', 'pytest.raises', (['socket.timeout'], {}), '(socket.timeout)\n', (1601, 1617), False, 'import pytest\n'), ((2166, 2195), 'pytest.raises', 'pytest.raises', (['socket.timeout'], {}), '(socket.timeout)\n', (2179, 2195), False, 'import pytest\n'), ((3985, 4042), 'guv.green.socket.create_connection', 'socket_patched.create_connection', (['fail_addr'], {'timeout': '(0.01)'}), '(fail_addr, timeout=0.01)\n', (4017, 4042), True, 'from guv.green import socket as socket_patched\n'), ((4055, 4088), 'pytest.fail', 'pytest.fail', (['"""Timeout not raised"""'], {}), "('Timeout not raised')\n", (4066, 4088), False, 'import pytest\n'), ((2737, 2767), 'pytest.raises', 'pytest.raises', (['BrokenPipeError'], {}), '(BrokenPipeError)\n', (2750, 2767), False, 'import pytest\n'), ((3284, 3296), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3294, 3296), False, 'import gc\n'), ((2583, 2595), 'guv.support.get_errno', 'get_errno', (['e'], {}), '(e)\n', (2592, 2595), False, 'from guv.support import get_errno\n'), ((3318, 3343), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3331, 3343), False, 'import pytest\n'), ((4269, 4281), 'guv.support.get_errno', 'get_errno', (['e'], {}), '(e)\n', (4278, 4281), False, 'from guv.support import get_errno\n')] |
# (C) Copyright 2017- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
import argparse
import sys
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument("command")
args = parser.parse_args(args=argv)
if args.command == "selfcheck":
sys.argv = []
print("Trying to connect to a Metview installation...")
try:
from . import bindings as _bindings
except Exception as exp:
print("Could not find a valid Metview installation")
raise (exp)
mv = dict()
_bindings.bind_functions(mv, module_name="mv")
del _bindings
try:
mv["print"]("Hello world - printed from Metview!")
except Exception as exp:
print("Could not print a greeting from Metview")
raise (exp)
mv_version_f = mv["version_info"]
mv_version = mv_version_f()
mv_maj = str(int(mv_version["metview_major"]))
mv_min = str(int(mv_version["metview_minor"]))
mv_rev = str(int(mv_version["metview_revision"]))
mv_version_string = mv_maj + "." + mv_min + "." + mv_rev
print("Metview version", mv_version_string, "found")
print("Your system is ready.")
else:
raise RuntimeError(
"Command not recognised %r. See usage with --help." % args.command
)
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser"
] | [((444, 469), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (467, 469), False, 'import argparse\n')] |
# run in wt/visual-cluster
from statlib import stats
OFILE = open("stats.txt",'w')
for i in range(1,8):
BEs = []
IFILE = open("visual_cluster-%s.pdb" % i, 'r')
for l in IFILE:
values = l.split()
BEs.append(float(values[9]) / 0.7) # if given BEs are weighted
OFILE.write("visual_cluster-%s: %s, stddev %s, lower %s, upper %s, min %s, max %s, median %s \n" % (i,stats.mean(BEs),
stats.stdev(BEs),
stats.scoreatpercentile(BEs,25),
stats.scoreatpercentile(BEs,75),
min(BEs), max(BEs),
stats.median(BEs) ))
OFILE.close()
| [
"statlib.stats.stdev",
"statlib.stats.scoreatpercentile",
"statlib.stats.mean",
"statlib.stats.median"
] | [((392, 407), 'statlib.stats.mean', 'stats.mean', (['BEs'], {}), '(BEs)\n', (402, 407), False, 'from statlib import stats\n'), ((410, 426), 'statlib.stats.stdev', 'stats.stdev', (['BEs'], {}), '(BEs)\n', (421, 426), False, 'from statlib import stats\n'), ((429, 461), 'statlib.stats.scoreatpercentile', 'stats.scoreatpercentile', (['BEs', '(25)'], {}), '(BEs, 25)\n', (452, 461), False, 'from statlib import stats\n'), ((463, 495), 'statlib.stats.scoreatpercentile', 'stats.scoreatpercentile', (['BEs', '(75)'], {}), '(BEs, 75)\n', (486, 495), False, 'from statlib import stats\n'), ((516, 533), 'statlib.stats.median', 'stats.median', (['BEs'], {}), '(BEs)\n', (528, 533), False, 'from statlib import stats\n')] |
import time
import math
N = 28124 # We know a priori that any number N or larger can be written as the sum of 2 abundant integers
def proper_divisors(n):
"""
Compute all proper divisors of a positive integer n and return them as a list (not sorted)
"""
a, r = 1, [1]
while a * a < n:
a += 1
if n % a: continue
b, f = 1, []
while n % a == 0:
n //= a
b *= a
f += [i * b for i in r]
r += f
if n > 1: r += [i * n for i in r]
return r[:-1]
def find_abundant(n):
"""
return the set of all positive integers that are abundant and < n
"""
s = set([])
for j in range(12,n,1):
if sum(proper_divisors(j)) > j:
s = s.union([j])
return s
def abundant_sums(n):
a = find_abundant(n)
s = set([i + j for i in a for j in a if i + j < n])
return s
def non_abundant_sums():
"""
Computes the pairwise sums of all pairs of distinct abundant numbers less than N
"""
s = abundant_sums(N)
#return (N-1)*N/2 - sum(s)
t = set([i for i in range(1,N,1) if i not in s]) #doesn't add much extra compute time to return the set of all such non abundant sums instead of the sum of them
return t
def test():
print(len(find_abundant(30000)))
print(len(abundant_sums(30000)))
def main():
start = time.time()
t = non_abundant_sums()
print(sum(t))
print(len(t))
end = time.time()
print(end-start)
if __name__ == "__main__":
#test()
main()
| [
"time.time"
] | [((1222, 1233), 'time.time', 'time.time', ([], {}), '()\n', (1231, 1233), False, 'import time\n'), ((1296, 1307), 'time.time', 'time.time', ([], {}), '()\n', (1305, 1307), False, 'import time\n')] |
"""
Usage: python -u make_img_proc_stages_movies.py &> logs/s6_movies.log &
"""
import os
import imageutils as iu
def get_identifiers_slicebounds(sector):
if sector == 6:
projid = 1500 # initial project id for this sector
elif sector == 7:
projid = 1516
elif sector == 8:
projid = 1532
elif sector == 9:
projid = 1548
elif sector == 10:
projid = 1564
identifiers = []
slicebounds = []
for cam in range(1,5):
for ccd in range(1,5):
identifiers.append(
(sector, cam, ccd, projid)
)
if projid == 1500: # orion b here
slicebounds.append(
[slice(1,513), slice(300,812)]
)
else:
slicebounds.append(
[slice(300,812), slice(300,812)]
)
projid += 1
return identifiers, slicebounds
def make_img_proc_stages_movies(sector=None, overwrite=None):
assert isinstance(sector, int)
assert isinstance(overwrite, int)
identifiers, slicebounds = get_identifiers_slicebounds(sector)
basedir = '/nfs/phtess2/ar0/TESS/FFI/PROJ/IMG_PROC_STAGES'
moviedir = '/nfs/phtess2/ar0/TESS/FFI/MOVIES'
for i,s in zip(identifiers, slicebounds):
outmp4path = os.path.join(
moviedir, 'img_proc_stages_sector{}_cam{}_ccd{}_projid{}.mp4'.format(
i[0], i[1], i[2], i[3]
)
)
if os.path.exists(outmp4path):
print('found {}; skip this sector'.format(outmp4path))
continue
outdir = os.path.join(
basedir, 'sector{}_cam{}_ccd{}_projid{}'.format(
i[0], i[1], i[2], i[3]
)
)
if not os.path.exists(outdir):
print('made {}'.format(outdir))
os.mkdir(outdir)
iu.plot_stages_of_img_proc_sector_cam_ccd(sector=i[0], cam=i[1],
ccd=i[2], projid=i[3],
overwrite=overwrite,
outdir=outdir, slicebounds=s)
imgglob = os.path.join(outdir, 'tess2*_img_proc_stages.png')
iu.make_mp4_from_jpegs(imgglob, outmp4path,
ffmpegpath='/home/lbouma/bin/ffmpeg',
verbose=True)
if __name__ == "__main__":
sector = 10
overwrite = 0
make_img_proc_stages_movies(sector=sector, overwrite=overwrite)
| [
"os.path.exists",
"imageutils.make_mp4_from_jpegs",
"os.path.join",
"imageutils.plot_stages_of_img_proc_sector_cam_ccd",
"os.mkdir"
] | [((1503, 1529), 'os.path.exists', 'os.path.exists', (['outmp4path'], {}), '(outmp4path)\n', (1517, 1529), False, 'import os\n'), ((1896, 2038), 'imageutils.plot_stages_of_img_proc_sector_cam_ccd', 'iu.plot_stages_of_img_proc_sector_cam_ccd', ([], {'sector': 'i[0]', 'cam': 'i[1]', 'ccd': 'i[2]', 'projid': 'i[3]', 'overwrite': 'overwrite', 'outdir': 'outdir', 'slicebounds': 's'}), '(sector=i[0], cam=i[1], ccd=i[2],\n projid=i[3], overwrite=overwrite, outdir=outdir, slicebounds=s)\n', (1937, 2038), True, 'import imageutils as iu\n'), ((2204, 2254), 'os.path.join', 'os.path.join', (['outdir', '"""tess2*_img_proc_stages.png"""'], {}), "(outdir, 'tess2*_img_proc_stages.png')\n", (2216, 2254), False, 'import os\n'), ((2264, 2364), 'imageutils.make_mp4_from_jpegs', 'iu.make_mp4_from_jpegs', (['imgglob', 'outmp4path'], {'ffmpegpath': '"""/home/lbouma/bin/ffmpeg"""', 'verbose': '(True)'}), "(imgglob, outmp4path, ffmpegpath=\n '/home/lbouma/bin/ffmpeg', verbose=True)\n", (2286, 2364), True, 'import imageutils as iu\n'), ((1790, 1812), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (1804, 1812), False, 'import os\n'), ((1870, 1886), 'os.mkdir', 'os.mkdir', (['outdir'], {}), '(outdir)\n', (1878, 1886), False, 'import os\n')] |
import os, sys
from .PetriNet import PetriNet
from .Place import Place
from .Transition import Transition
from .Arc import Arc
import xml.etree.ElementTree as elemTree # XML parser
def parse_pnml_file(file):
tree = elemTree.parse(file) # parse XML with ElementTree
root = tree.getroot()
nets = [] # list for parsed PetriNet objects
xmlns = ""
for net_node in root.iter(xmlns+'net'):
# create PetriNet object
net = PetriNet()
net.reset_len() # Reset lenght of places and transitions
net.id = net_node.get('id')
netnmnode = net_node.find('./'+xmlns+'name/'+xmlns+'text')
if netnmnode is not None:
net.name = netnmnode.text
else:
net.name = net.id
# and parse transitions
for transition_node in net_node.iter(xmlns+'transition'):
transition = Transition()
net.set_len_transition()
transition.id = transition_node.get('id')
transition.label = transition.id if transition_node.find('./name/text')== None else transition_node.find('./name/text').text
position_node = transition_node.find('./graphics/position')
transition.position = [int(float(position_node.get('x'))), int(float(position_node.get('y')))]
off_node = transition_node.find('./'+xmlns+'name/'+xmlns+'graphics/'+xmlns+'offset')
if off_node == None :
transition.offset = [0,0]
else :
transition.offset = [int(off_node.get('x')), int(off_node.get('y'))]
net.transitions[transition.id] = transition
# and parse places
for place_node in net_node.iter(xmlns+'place'):
place = Place()
net.set_len_place()
place.id = place_node.get('id')
place.label = place.id if place_node.find('./'+xmlns+'name/'+xmlns+'text')== None else place_node.find('./'+xmlns+'name/'+xmlns+'text').text
position_node = place_node.find('./'+xmlns+'graphics/'+xmlns+'position')
place.position = [int(float(position_node.get('x'))), int(float(position_node.get('y')))]
off_node = place_node.find('./'+xmlns+'name/'+xmlns+'graphics/'+xmlns+'offset')
if off_node == None :
place.offset = [0,0]
else :
place.offset = [int(off_node.get('x')), int(off_node.get('y'))]
place.marking = 0 if place_node.find('./initialMarking/text')== None else int(place_node.find('./initialMarking/text').text)
net.places[place.id] = place
net.marking.append({place.id:place.marking})
# and arcs
for arc_node in net_node.iter(xmlns+'arc'):
arc = Arc()
arc.id = arc_node.get('id')
arc.source = arc_node.get('source')
arc.target = arc_node.get('target')
arc.type = arc_node.get('type')
if arc.type is None:
etp = arc_node.find('./'+xmlns+'type')
if etp is not None:
arc.type = etp.get('value')
if arc.type is None:
arc.type = 'normal'
inscr_txt = arc_node.find('./'+xmlns+'inscription/'+xmlns+'text')
if inscr_txt is not None:
arc.inscription = inscr_txt.text
else:
arc.inscription = "1"
net.arcs.append(arc)
nets.append(net)
return nets
def write_pnml_file(n, filename, relative_offset=True):
pnml = elemTree.Element('pnml')
net = elemTree.SubElement(pnml, 'net', id=n.id)
net_name = elemTree.SubElement(net, 'name')
net_name_text = elemTree.SubElement(net_name, 'text')
net_name_text.text = n.name
page = elemTree.SubElement(net, 'page', id='1')
for _id, t in n.transitions.items():
transition = elemTree.SubElement(page, 'transition', id=t.id)
transition_name = elemTree.SubElement(transition, 'name')
transition_name_text = elemTree.SubElement(transition_name, 'text')
transition_name_text.text = t.label
transition_name_graphics = elemTree.SubElement(transition_name, 'graphics')
transition_name_graphics_offset = elemTree.SubElement(transition_name_graphics, 'offset')
transition_name_graphics_offset.attrib['x'] = str(t.offset[0])
transition_name_graphics_offset.attrib['y'] = str(t.offset[1])
transition_graphics = elemTree.SubElement(transition, 'graphics')
transition_graphics_position = elemTree.SubElement(transition_graphics, 'position')
transition_graphics_position.attrib['x'] = str(t.position[0] if t.position is not None else 0)
transition_graphics_position.attrib['y'] = str(t.position[1] if t.position is not None else 0)
for _id, p in n.places.items():
place = elemTree.SubElement(page, 'place', id=p.id)
place_name = elemTree.SubElement(place, 'name')
place_name_text = elemTree.SubElement(place_name, 'text')
place_name_text.text = p.label
place_name_graphics = elemTree.SubElement(place_name, 'graphics')
place_name_graphics_offset = elemTree.SubElement(place_name_graphics, 'offset')
place_name_graphics_offset.attrib['x'] = str(p.offset[0] if p.offset is not None else 0)
place_name_graphics_offset.attrib['y'] = str(p.offset[1] if p.offset is not None else 0)
place_name_graphics_offset.attrib['x'] = str(p.offset[0] if p.offset is not None else 0)
place_name_graphics_offset.attrib['y'] = str(p.offset[1] if p.offset is not None else 0)
place_graphics = elemTree.SubElement(place, 'graphics')
place_graphics_position = elemTree.SubElement(place_graphics, 'position')
place_graphics_position.attrib['x'] = str(p.position[0] if p.position is not None else 0)
place_graphics_position.attrib['y'] = str(p.position[1] if p.position is not None else 0)
place_initialMarking = elemTree.SubElement(place, 'initialMarking')
place_initialMarking_text = elemTree.SubElement(place_initialMarking, 'text')
place_initialMarking_text.text = str(p.marking)
for e in n.arcs:
arc = elemTree.SubElement(page, 'arc', id=e.id, source=e.source, target=e.target, type=e.type)
arc_inscription = elemTree.SubElement(arc, 'inscription')
arc_inscription_text = elemTree.SubElement(arc_inscription, 'text')
arc_inscription_text.text = str(e.inscription)
tree = elemTree.ElementTree(element=pnml)
tree.write(filename, encoding="utf-8", xml_declaration=True, method="xml")
if __name__ == "__main__":
if len(sys.argv) > 1:
nets = parse_pnml_file(sys.argv[1])
for net in nets:
print(net)
| [
"xml.etree.ElementTree.Element",
"xml.etree.ElementTree.SubElement",
"xml.etree.ElementTree.parse",
"xml.etree.ElementTree.ElementTree"
] | [((222, 242), 'xml.etree.ElementTree.parse', 'elemTree.parse', (['file'], {}), '(file)\n', (236, 242), True, 'import xml.etree.ElementTree as elemTree\n'), ((3574, 3598), 'xml.etree.ElementTree.Element', 'elemTree.Element', (['"""pnml"""'], {}), "('pnml')\n", (3590, 3598), True, 'import xml.etree.ElementTree as elemTree\n'), ((3609, 3650), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['pnml', '"""net"""'], {'id': 'n.id'}), "(pnml, 'net', id=n.id)\n", (3628, 3650), True, 'import xml.etree.ElementTree as elemTree\n'), ((3666, 3698), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['net', '"""name"""'], {}), "(net, 'name')\n", (3685, 3698), True, 'import xml.etree.ElementTree as elemTree\n'), ((3719, 3756), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['net_name', '"""text"""'], {}), "(net_name, 'text')\n", (3738, 3756), True, 'import xml.etree.ElementTree as elemTree\n'), ((3801, 3841), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['net', '"""page"""'], {'id': '"""1"""'}), "(net, 'page', id='1')\n", (3820, 3841), True, 'import xml.etree.ElementTree as elemTree\n'), ((6538, 6572), 'xml.etree.ElementTree.ElementTree', 'elemTree.ElementTree', ([], {'element': 'pnml'}), '(element=pnml)\n', (6558, 6572), True, 'import xml.etree.ElementTree as elemTree\n'), ((3905, 3953), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['page', '"""transition"""'], {'id': 't.id'}), "(page, 'transition', id=t.id)\n", (3924, 3953), True, 'import xml.etree.ElementTree as elemTree\n'), ((3980, 4019), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['transition', '"""name"""'], {}), "(transition, 'name')\n", (3999, 4019), True, 'import xml.etree.ElementTree as elemTree\n'), ((4051, 4095), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['transition_name', '"""text"""'], {}), "(transition_name, 'text')\n", (4070, 4095), True, 'import xml.etree.ElementTree as elemTree\n'), ((4175, 4223), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['transition_name', '"""graphics"""'], {}), "(transition_name, 'graphics')\n", (4194, 4223), True, 'import xml.etree.ElementTree as elemTree\n'), ((4266, 4321), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['transition_name_graphics', '"""offset"""'], {}), "(transition_name_graphics, 'offset')\n", (4285, 4321), True, 'import xml.etree.ElementTree as elemTree\n'), ((4494, 4537), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['transition', '"""graphics"""'], {}), "(transition, 'graphics')\n", (4513, 4537), True, 'import xml.etree.ElementTree as elemTree\n'), ((4577, 4629), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['transition_graphics', '"""position"""'], {}), "(transition_graphics, 'position')\n", (4596, 4629), True, 'import xml.etree.ElementTree as elemTree\n'), ((4889, 4932), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['page', '"""place"""'], {'id': 'p.id'}), "(page, 'place', id=p.id)\n", (4908, 4932), True, 'import xml.etree.ElementTree as elemTree\n'), ((4954, 4988), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['place', '"""name"""'], {}), "(place, 'name')\n", (4973, 4988), True, 'import xml.etree.ElementTree as elemTree\n'), ((5015, 5054), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['place_name', '"""text"""'], {}), "(place_name, 'text')\n", (5034, 5054), True, 'import xml.etree.ElementTree as elemTree\n'), ((5124, 5167), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['place_name', '"""graphics"""'], {}), "(place_name, 'graphics')\n", (5143, 5167), True, 'import xml.etree.ElementTree as elemTree\n'), ((5205, 5255), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['place_name_graphics', '"""offset"""'], {}), "(place_name_graphics, 'offset')\n", (5224, 5255), True, 'import xml.etree.ElementTree as elemTree\n'), ((5669, 5707), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['place', '"""graphics"""'], {}), "(place, 'graphics')\n", (5688, 5707), True, 'import xml.etree.ElementTree as elemTree\n'), ((5742, 5789), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['place_graphics', '"""position"""'], {}), "(place_graphics, 'position')\n", (5761, 5789), True, 'import xml.etree.ElementTree as elemTree\n'), ((6017, 6061), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['place', '"""initialMarking"""'], {}), "(place, 'initialMarking')\n", (6036, 6061), True, 'import xml.etree.ElementTree as elemTree\n'), ((6098, 6147), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['place_initialMarking', '"""text"""'], {}), "(place_initialMarking, 'text')\n", (6117, 6147), True, 'import xml.etree.ElementTree as elemTree\n'), ((6240, 6332), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['page', '"""arc"""'], {'id': 'e.id', 'source': 'e.source', 'target': 'e.target', 'type': 'e.type'}), "(page, 'arc', id=e.id, source=e.source, target=e.target,\n type=e.type)\n", (6259, 6332), True, 'import xml.etree.ElementTree as elemTree\n'), ((6355, 6394), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['arc', '"""inscription"""'], {}), "(arc, 'inscription')\n", (6374, 6394), True, 'import xml.etree.ElementTree as elemTree\n'), ((6426, 6470), 'xml.etree.ElementTree.SubElement', 'elemTree.SubElement', (['arc_inscription', '"""text"""'], {}), "(arc_inscription, 'text')\n", (6445, 6470), True, 'import xml.etree.ElementTree as elemTree\n')] |
#!/usr/bin/env python3
from __future__ import annotations # [PEP 563 -- Postponed Evaluation of Annotations](https://www.python.org/dev/peps/pep-0563/)
from typing import Any, Callable, Dict, List, Union
import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit
class Cred:
'''Fetch user data from json file.'''
filePath:str = f"{pathlib.Path.cwd().joinpath('user.json')}"
try:
with open(file=filePath, mode='r') as jsonFile: credData = json.load(jsonFile)
user = credData.get('username')
apiKey = credData.get('apikey')
if user == 'your_username' or apiKey == 'your_api_key': raise Exception
except (FileNotFoundError, Exception):
with open(file=filePath, mode='w') as jsonFile: jsonFile.write('{\n"username": "your_username",\n"apikey": "your_api_key"\n}')
logging.error(f"Please specify 'username' and 'apikey' in '{filePath}'")
logging.error(f"A LastFM API key can be obtained from [https://www.last.fm/api/authentication]")
sys.exit()
def timer(func:Callable) -> Callable:
'''Timer decorator. Logs execution time for functions.'''
# [Primer on Python Decorators](https://realpython.com/primer-on-python-decorators/)
@functools.wraps(func)
def wrapper(*args, **kwargs):
t0 = timeit.default_timer()
value = func(*args, **kwargs)
t1 = timeit.default_timer()
logging.debug(f'{func.__name__}(): {t1-t0:.6f} s')
return value
return wrapper
def toUnixTime(dt:Union[str,int], log:bool=False, **kwargs) -> int:
'''Convert {dt} (assumes UTC) to unix time.'''
if isinstance(dt, str): dt = int(dateutil.parser.parse(str(dt)).replace(tzinfo=dateutil.tz.UTC).timestamp())
if isinstance(dt, datetime.datetime): dt = int(dt.replace(tzinfo=dateutil.tz.UTC).timestamp())
if log: logging.debug(fromUnixTime(dt))
return dt
def fromUnixTime(ts:int) -> str:
'''Convert {ts} unix timestamp (assumes UTC) to datetime string.'''
return datetime.datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
def dateRange(weeksAgo:int, nWeeks:int, **kwargs) -> int:
'''Handle date range timestamps. Prioritizes {to} and {fr} if present in {kwargs}.'''
now = datetime.datetime.now()
if 'fr' in kwargs: fr = toUnixTime(dt=kwargs.get('fr'), **kwargs)
else: fr = toUnixTime(dt=(now - datetime.timedelta(weeks=weeksAgo)), **kwargs)
if 'to' in kwargs:
if kwargs.get('to') == -1: to = toUnixTime(dt=now, **kwargs)
else: to = toUnixTime(dt=kwargs.get('to'), **kwargs)
else:
if nWeeks == -1: to = toUnixTime(dt=now, **kwargs)
else: to = toUnixTime(dt=(datetime.datetime.utcfromtimestamp(fr) + datetime.timedelta(weeks=nWeeks)), **kwargs)
return (fr,to)
def collapseResp(resp:Dict[str,Any], ignoreKey:string='@attr', returnDF:bool=False, **kwargs) -> List[Dict[str,Any]]:
'''Traverse single keys in nested dictionary (ignoring {ignoreKey}) until reaching a list or muti-key dict.'''
def collapseOnlyKey(resp:Dict[str,Any]) -> Dict[str,Any]:
'''Return the contents of a dict if it has only one key.'''
return resp.get(list(resp.keys())[0]) if len(resp.keys()) == 1 else resp
while isinstance(resp, dict):
if ignoreKey in resp.keys(): attr = resp.pop(ignoreKey) # toss resp['@attr'] into the trash (contains user and pagination info)
resp = collapseOnlyKey(resp)
if isinstance(resp, list):
if returnDF: resp = flattenDF(param=kwargs.get('param'), DF=pandas.DataFrame(resp), writeToDisk=False)
break
if len(resp.keys()) > 1 and ignoreKey not in resp: break
return resp
@timer
def loadJSON(param) -> pandas.DataFrame:
'''Append json data from files into a list of lists, then flatten the list, and return as a pandas.DataFrame.'''
# [Intermission: Flattening A List of Lists](https://realpython.com/python-itertools/#intermission-flattening-a-list-of-lists)
# if sys.version_info < (3,8): jsonFiles = param.filePath(glob='*json'); if jsonFiles: jsonListData = [json.load(open(file)) for file in jsonFiles]
if (jsonFiles := param.filePath(glob='*json')): jsonListData = [json.load(open(file)) for file in jsonFiles]
else: sys.exit(logging.error(f"No files found matching {param.filePath(ext='*json')}"))
flatListData = itertools.chain.from_iterable(collapseResp(data) for data in jsonListData)
return pandas.DataFrame(flatListData)
def flattenCol(dfCol:pandas.Series, prefix:str=None) -> pandas.DataFrame:
'''Flatten {dfCol} (pandas.Series) as needed and prepend {prefix} to {dfCol.name} (series/column name). Convert elements to integer if possible.'''
def fillNan(dfCol:pandas.Series, obj:Union[list,dict]): return dfCol.fillna({i: obj for i in dfCol.index}) # [How to fill dataframe Nan values with empty list [] in pandas?](https://stackoverflow.com/a/62689667/13019084)
def concatFlatCols(df:pandas.DataFrame): return pandas.concat(objs=[flattenCol(df[col]) for col in df], axis=1)
if any(isinstance(row, list) for row in dfCol):
# if dfCol contains list entries, fill any None/Nan values with empty list, flatten via dfCol.values.tolist(), and prepend {dfCol.name} to each column name
dfCol = fillNan(dfCol=dfCol, obj=[])
listDF = pandas.concat(objs=[pandas.DataFrame(dfCol.values.tolist()).add_prefix(f'{dfCol.name}_')], axis=1) # [DataFrame Pandas - Flatten column of lists to multiple columns](https://stackoverflow.com/a/44821357/13019084)
return concatFlatCols(listDF)
elif any(isinstance(row, dict) for row in dfCol):
# if dfCol contains dict entries, fill any None/Nan values with empty dict, flatten via pandas.json_normalize(), and prepend {dfCol.name} to each column name
dfCol = fillNan(dfCol=dfCol, obj={})
dictDF = pandas.json_normalize(dfCol).add_prefix(f'{dfCol.name}_')
return concatFlatCols(dictDF)
else:
dfCol = pandas.to_numeric(arg=dfCol, errors='ignore')
return dfCol.rename(f'{prefix}_{dfCol.name}') if prefix else dfCol
@timer
def flattenDF(param, DF:pandas.DataFrame, writeToDisk:bool=True) -> pandas.DataFrame:
'''Flatten all columns in {DF}, apply pandas.Timestamp dtype if applicable, and write to disk.'''
DF = DF[DF['@attr'] != {'nowplaying': 'true'}].reset_index(drop=True).drop(columns=['@attr']) if '@attr' in DF else DF # drop 'nowplaying' rows and '@attr' column (first entry in every 'RecentTracks' page response)
flatDF = pandas.concat(objs=[flattenCol(DF[col], prefix=param.splitMethod(plural=False, strip=True)) for col in DF.columns], axis=1) # .fillna('')
flatDF.columns = [col.replace('_#text','').replace('@attr_','') for col in flatDF.columns]
if 'date' in flatDF: flatDF['date'] = pandas.to_datetime(arg=flatDF['date'], format='%d %b %Y, %H:%M', utc=True)
if writeToDisk: pandasWrite(param=param, df=flatDF, outFile=param.filePath(ext=f'.{param.outFmt}'))
return flatDF
def shiftCols(df:pandas.DataFrame, firstCols=List[int]) -> pandas.Dataframe:
'''Shift {firstCols} to be left-most in {df}'''
return df[firstCols + [col for col in df.columns if col not in firstCols]]
@timer
def mergeRecentTracks(param):
'''Merge individual-year {RecentTracks} serialized files into a single "overall" file.'''
# if sys.version_info < (3,8): outFile = param.filePath(ext=f'.{param.outFmt}'); outFile.unlink(missing_ok=True)
(outFile := param.filePath(ext=f'.{param.outFmt}')).unlink(missing_ok=True)
inFiles = param.filePath(period='', glob=f'*.{param.outFmt}', reverse=True)
df = pandas.concat(objs=[pandasRead(param, f) for f in inFiles], ignore_index=True)
pandasWrite(param=param, df=df, outFile=outFile)
def pandasRead(param, inFile:str) -> pandas.DataFrame:
'''Read {inFile} and return as a pandas.DataFrame.'''
return getattr(pandas, f'read_{param.outFmt}')(inFile) # [Calling a function of a module by using its name (a string)](https://stackoverflow.com/a/3071/13019084)
def pandasWrite(param, df:pandas.DataFrame, outFile:str):
'''Write {df} to disk in {fmt} format.'''
outFile.unlink(missing_ok=True)
getattr(df, f'to_{param.outFmt}')(outFile) # [Calling a function of a module by using its name (a string)](https://stackoverflow.com/a/3071/13019084)
@timer
def writeCSV(param, df:pandas.DataFrame):
'''Write subset of dataframe columns to csv.'''
subMethod = param.splitMethod()
# if sys.version_info < (3,8): outFile = param.filePath(ext='.csv')); outFile.unlink(missing_ok=True)
(outFile := param.filePath(ext='.csv')).unlink(missing_ok=True)
if (subMethod == 'TopArtists'):
df.to_csv(path_or_buf=outFile, columns=['artist_playcount','artist_mbid','artist_name'], sep='|', header=True, index=False)
elif (subMethod == 'TopAlbums'):
df.to_csv(path_or_buf=outFile, columns=['album_playcount','artist_mbid','album_mbid','artist_name','album_name'], sep='|', header=True, index=False)
elif (subMethod == 'TopTracks'):
df.to_csv(path_or_buf=outFile, columns=['track_playcount','artist_mbid','track_mbid','artist_name','track_name'], sep='|', header=True, index=False)
| [
"datetime.datetime.utcfromtimestamp",
"pandas.json_normalize",
"logging.debug",
"pathlib.Path.cwd",
"timeit.default_timer",
"functools.wraps",
"json.load",
"datetime.datetime.now",
"pandas.to_numeric",
"sys.exit",
"pandas.DataFrame",
"datetime.timedelta",
"logging.error",
"pandas.to_datetime"
] | [((1254, 1275), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1269, 1275), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((2266, 2289), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2287, 2289), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((4464, 4494), 'pandas.DataFrame', 'pandas.DataFrame', (['flatListData'], {}), '(flatListData)\n', (4480, 4494), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((1328, 1350), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1348, 1350), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((1409, 1431), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1429, 1431), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((1440, 1492), 'logging.debug', 'logging.debug', (['f"""{func.__name__}(): {t1 - t0:.6f} s"""'], {}), "(f'{func.__name__}(): {t1 - t0:.6f} s')\n", (1453, 1492), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((6820, 6894), 'pandas.to_datetime', 'pandas.to_datetime', ([], {'arg': "flatDF['date']", 'format': '"""%d %b %Y, %H:%M"""', 'utc': '(True)'}), "(arg=flatDF['date'], format='%d %b %Y, %H:%M', utc=True)\n", (6838, 6894), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((495, 514), 'json.load', 'json.load', (['jsonFile'], {}), '(jsonFile)\n', (504, 514), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((861, 933), 'logging.error', 'logging.error', (['f"""Please specify \'username\' and \'apikey\' in \'{filePath}\'"""'], {}), '(f"Please specify \'username\' and \'apikey\' in \'{filePath}\'")\n', (874, 933), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((942, 1048), 'logging.error', 'logging.error', (['f"""A LastFM API key can be obtained from [https://www.last.fm/api/authentication]"""'], {}), "(\n f'A LastFM API key can be obtained from [https://www.last.fm/api/authentication]'\n )\n", (955, 1048), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((1047, 1057), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1055, 1057), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((2038, 2076), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['ts'], {}), '(ts)\n', (2072, 2076), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((5996, 6041), 'pandas.to_numeric', 'pandas.to_numeric', ([], {'arg': 'dfCol', 'errors': '"""ignore"""'}), "(arg=dfCol, errors='ignore')\n", (6013, 6041), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((376, 394), 'pathlib.Path.cwd', 'pathlib.Path.cwd', ([], {}), '()\n', (392, 394), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((2396, 2430), 'datetime.timedelta', 'datetime.timedelta', ([], {'weeks': 'weeksAgo'}), '(weeks=weeksAgo)\n', (2414, 2430), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((5874, 5902), 'pandas.json_normalize', 'pandas.json_normalize', (['dfCol'], {}), '(dfCol)\n', (5895, 5902), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((2699, 2737), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['fr'], {}), '(fr)\n', (2733, 2737), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((2740, 2772), 'datetime.timedelta', 'datetime.timedelta', ([], {'weeks': 'nWeeks'}), '(weeks=nWeeks)\n', (2758, 2772), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n'), ((3563, 3585), 'pandas.DataFrame', 'pandas.DataFrame', (['resp'], {}), '(resp)\n', (3579, 3585), False, 'import datetime, dateutil, functools, itertools, json, logging, pandas, pathlib, sys, timeit\n')] |
# Copyright © 2018 CNRS
# All rights reserved.
# @author <NAME>
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function, division, absolute_import
from copy import copy
from .message_queue import MessageQueue
from .binary_heap import BinaryHeap
from .time import Time
class HQValue:
def __init__(self, flow, queue):
self.flow = flow
self.queue = queue
self.handle = None
class MessageFlowMultiplexer:
def __init__(self, flows, time):
self.time = copy(time)
self.tmin_next = copy(time)
mq_greater = lambda a, b: a.queue.nextTime() > b.queue.nextTime()
self.qheap = BinaryHeap(mq_greater)
self.queues = []
for flow in flows:
self.queues.push(MessageQueue(flow.name, flow.time, flow.dt))
h = self.qheap.push(HQValue(flow, queue))
h.value.handle = handle
def push(self, flow_index, m, time):
self.queues[flow_index].push(m, time)
def next(self):
q = self.qheap.top()
m = q.queue.pop()
self.qheap.siftdown(q.handle)
#assert self.flows[idx].time_callback is not None,\
# "No time-callback for flow {}".format(self.time, self.flows[idx].name)
q.flow.time_callback(m[1])
self.tmin_next = self.qheap.top().queue.nextTime()
return q.flow.callback(q.flow.name, m, self.tmin_next)
def next_time(self):
return self.tmin_next
def setup_inflow(self, flow):
self.queues.append(MessageQueue(flow.name, flow.time, flow.dt))
h = self.qheap.push(HQValue(flow, self.queues[-1]))
h.value.handle = h
def __len__(self):
return len(self.queues)
| [
"copy.copy"
] | [((1809, 1819), 'copy.copy', 'copy', (['time'], {}), '(time)\n', (1813, 1819), False, 'from copy import copy\n'), ((1845, 1855), 'copy.copy', 'copy', (['time'], {}), '(time)\n', (1849, 1855), False, 'from copy import copy\n')] |
"""
Created by <NAME>. 2018-2019
Application is searching for the nearest measuremenet point (MPoint) of the pollution.
It is searching a place from the arguments or from user input
Provides an information about place (latitude and longitude) and measures from the nearest point.
running script without arguments will lead to menu which user can choose search for MPoints
First arg - place, city where it should search for MPoint. Argument should be by one word or with quotation marks
Example 1: main.py Gdansk
Example 2: main.py "Gdansk, dluga"
Example 3: main.py "Sopot, Haffnera 20"
Second arg - interval to repeat measurement request (in seconds)
Example 1: main.py Gdansk, 10
Example 2: main.py "Gdansk, dluga", 60
Example 3: main.py "Sopot, Haffnera 20", 900
"""
import sys
import advanced
import settings
import simple
import variables
z = 0
if len(sys.argv) == 1:
print(variables.menuIntro)
print(variables.menuOptions)
while z != 4:
try:
z = int(input(variables.yourChoice))
except ValueError:
print("Podana wartość nie jest liczbą")
if z == 1:
print(simple.simpleChose)
simple.GetData()
elif z == 2:
print(advanced.advancedChose)
elif z == 3:
print(settings.settingChose)
elif z == 4:
print(variables.menuEnd)
break
else:
print("nie wybrano poprawnej cyfry, wybierz jedną z opcji")
else:
arguments = sys.argv[1:]
try:
simple.GetData(*arguments)
except Exception:
print("niepoprawne parametry")
| [
"simple.GetData"
] | [((1526, 1552), 'simple.GetData', 'simple.GetData', (['*arguments'], {}), '(*arguments)\n', (1540, 1552), False, 'import simple\n'), ((1169, 1185), 'simple.GetData', 'simple.GetData', ([], {}), '()\n', (1183, 1185), False, 'import simple\n')] |
# Generated by Django 2.2.9 on 2020-01-21 20:34
import json
import pkgutil
import uuid
import django.contrib.postgres.fields.jsonb
import django.db.models.deletion
from django.db import migrations
from django.db import models
# Functions from the following migrations need manual copying.
# Move them and any dependencies into this file, then update the
# RunPython operations to refer to the local versions:
# api.migrations.0029_cloud_account_seeder
def seed_cost_management_aws_account_id(apps, schema_editor):
"""Create a cloud account, using the historical CloudAccount model."""
CloudAccount = apps.get_model("api", "CloudAccount")
cloud_account = CloudAccount.objects.create(
name="AWS", value="589173575009", description="Cost Management's AWS account ID"
)
cloud_account.save()
class Migration(migrations.Migration):
replaces = []
dependencies = []
operations = [
migrations.CreateModel(
name="ProviderStatus",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
(
"status",
models.IntegerField(
choices=[
(0, "New"),
(1, "Ready"),
(33, "Warning"),
(98, "Disabled: Error"),
(99, "Disabled: Admin"),
],
default=0,
),
),
("last_message", models.CharField(max_length=256)),
("timestamp", models.DateTimeField()),
("retries", models.IntegerField(default=0)),
("provider", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="api.Provider")),
],
),
migrations.CreateModel(
name="CostModelMetricsMap",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
(
"source_type",
models.CharField(
choices=[
("AWS", "AWS"),
("OCP", "OCP"),
("Azure", "Azure"),
("GCP", "GCP"),
("AWS-local", "AWS-local"),
("Azure-local", "Azure-local"),
("GCP-local", "GCP-local"),
],
max_length=50,
),
),
(
"metric",
models.CharField(
choices=[
("cpu_core_usage_per_hour", "cpu_core_usage_per_hour"),
("cpu_core_request_per_hour", "cpu_core_request_per_hour"),
("memory_gb_usage_per_hour", "memory_gb_usage_per_hour"),
("memory_gb_request_per_hour", "memory_gb_request_per_hour"),
("storage_gb_usage_per_month", "storage_gb_usage_per_month"),
("storage_gb_request_per_month", "storage_gb_request_per_month"),
("node_cost_per_month", "node_cost_per_month"),
],
max_length=256,
),
),
("label_metric", models.CharField(max_length=256)),
("label_measurement", models.CharField(max_length=256)),
("label_measurement_unit", models.CharField(max_length=64)),
],
options={"db_table": "cost_models_metrics_map", "unique_together": {("source_type", "metric")}},
),
migrations.CreateModel(
name="DataExportRequest",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("uuid", models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
("created_timestamp", models.DateTimeField(auto_now_add=True)),
("updated_timestamp", models.DateTimeField(auto_now=True)),
(
"status",
models.CharField(
choices=[
("pending", "Pending"),
("processing", "Processing"),
("waiting", "Waiting"),
("complete", "Complete"),
("error", "Error"),
],
default="pending",
max_length=32,
),
),
("start_date", models.DateField()),
("end_date", models.DateField()),
("created_by", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="api.User")),
],
options={"ordering": ("created_timestamp",)},
),
migrations.AddField(
model_name="dataexportrequest",
name="bucket_name",
field=models.CharField(default="", max_length=63),
preserve_default=False,
),
migrations.CreateModel(
name="Sources",
fields=[
("source_id", models.IntegerField(primary_key=True, serialize=False)),
("name", models.CharField(max_length=256, null=True)),
("source_type", models.CharField(max_length=50)),
("authentication", django.db.models.JSONField(default=dict)),
("billing_source", django.db.models.JSONField(default=dict, null=True)),
("koku_uuid", models.CharField(max_length=512, null=True, unique=True)),
("auth_header", models.TextField(null=True)),
("pending_delete", models.BooleanField(default=False)),
("offset", models.IntegerField()),
("endpoint_id", models.IntegerField(null=True)),
("pending_update", models.BooleanField(default=False)),
("source_uuid", models.UUIDField(null=True, unique=True)),
("account_id", models.CharField(max_length=150, null=True)),
],
options={"db_table": "api_sources"},
),
migrations.CreateModel(
name="CloudAccount",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("name", models.CharField(help_text="The name of the attribute", max_length=255)),
("value", models.TextField(null=True)),
("description", models.TextField(null=True)),
("updated_timestamp", models.DateTimeField(auto_now=True, null=True)),
],
),
migrations.CreateModel(
name="ProviderInfrastructureMap",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
(
"infrastructure_type",
models.CharField(
choices=[
("AWS", "AWS"),
("Azure", "Azure"),
("GCP", "GCP"),
("AWS-local", "AWS-local"),
("Azure-local", "Azure-local"),
("GCP-local", "GCP-local"),
],
max_length=50,
),
),
(
"infrastructure_provider",
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="api.Provider"),
),
],
),
# ==============================
# CIRCULAR FK HERE!
# ==============================
migrations.AddField(
model_name="provider",
name="infrastructure",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="api.ProviderInfrastructureMap"
),
),
migrations.RunPython(code=seed_cost_management_aws_account_id),
]
| [
"django.db.models.UUIDField",
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.migrations.RunPython",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((8398, 8460), 'django.db.migrations.RunPython', 'migrations.RunPython', ([], {'code': 'seed_cost_management_aws_account_id'}), '(code=seed_cost_management_aws_account_id)\n', (8418, 8460), False, 'from django.db import migrations\n'), ((5301, 5344), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(63)'}), "(default='', max_length=63)\n", (5317, 5344), False, 'from django.db import models\n'), ((8237, 8351), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""api.ProviderInfrastructureMap"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to='api.ProviderInfrastructureMap')\n", (8254, 8351), False, 'from django.db import models\n'), ((1033, 1126), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1049, 1126), False, 'from django.db import models\n'), ((1193, 1330), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'New'), (1, 'Ready'), (33, 'Warning'), (98, 'Disabled: Error'), (99,\n 'Disabled: Admin')]", 'default': '(0)'}), "(choices=[(0, 'New'), (1, 'Ready'), (33, 'Warning'), (98,\n 'Disabled: Error'), (99, 'Disabled: Admin')], default=0)\n", (1212, 1330), False, 'from django.db import models\n'), ((1618, 1650), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (1634, 1650), False, 'from django.db import models\n'), ((1683, 1705), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1703, 1705), False, 'from django.db import models\n'), ((1736, 1766), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1755, 1766), False, 'from django.db import models\n'), ((1798, 1884), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""api.Provider"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'api.Provider')\n", (1815, 1884), False, 'from django.db import models\n'), ((2024, 2117), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2040, 2117), False, 'from django.db import models\n'), ((2189, 2395), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('AWS', 'AWS'), ('OCP', 'OCP'), ('Azure', 'Azure'), ('GCP', 'GCP'), (\n 'AWS-local', 'AWS-local'), ('Azure-local', 'Azure-local'), ('GCP-local',\n 'GCP-local')]", 'max_length': '(50)'}), "(choices=[('AWS', 'AWS'), ('OCP', 'OCP'), ('Azure', 'Azure'\n ), ('GCP', 'GCP'), ('AWS-local', 'AWS-local'), ('Azure-local',\n 'Azure-local'), ('GCP-local', 'GCP-local')], max_length=50)\n", (2205, 2395), False, 'from django.db import models\n'), ((2769, 3251), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('cpu_core_usage_per_hour', 'cpu_core_usage_per_hour'), (\n 'cpu_core_request_per_hour', 'cpu_core_request_per_hour'), (\n 'memory_gb_usage_per_hour', 'memory_gb_usage_per_hour'), (\n 'memory_gb_request_per_hour', 'memory_gb_request_per_hour'), (\n 'storage_gb_usage_per_month', 'storage_gb_usage_per_month'), (\n 'storage_gb_request_per_month', 'storage_gb_request_per_month'), (\n 'node_cost_per_month', 'node_cost_per_month')]", 'max_length': '(256)'}), "(choices=[('cpu_core_usage_per_hour',\n 'cpu_core_usage_per_hour'), ('cpu_core_request_per_hour',\n 'cpu_core_request_per_hour'), ('memory_gb_usage_per_hour',\n 'memory_gb_usage_per_hour'), ('memory_gb_request_per_hour',\n 'memory_gb_request_per_hour'), ('storage_gb_usage_per_month',\n 'storage_gb_usage_per_month'), ('storage_gb_request_per_month',\n 'storage_gb_request_per_month'), ('node_cost_per_month',\n 'node_cost_per_month')], max_length=256)\n", (2785, 3251), False, 'from django.db import models\n'), ((3571, 3603), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (3587, 3603), False, 'from django.db import models\n'), ((3644, 3676), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (3660, 3676), False, 'from django.db import models\n'), ((3722, 3753), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (3738, 3753), False, 'from django.db import models\n'), ((4005, 4098), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (4021, 4098), False, 'from django.db import models\n'), ((4122, 4187), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)', 'unique': '(True)'}), '(default=uuid.uuid4, editable=False, unique=True)\n', (4138, 4187), False, 'from django.db import models\n'), ((4228, 4267), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (4248, 4267), False, 'from django.db import models\n'), ((4308, 4343), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (4328, 4343), False, 'from django.db import models\n'), ((4414, 4607), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('pending', 'Pending'), ('processing', 'Processing'), ('waiting',\n 'Waiting'), ('complete', 'Complete'), ('error', 'Error')]", 'default': '"""pending"""', 'max_length': '(32)'}), "(choices=[('pending', 'Pending'), ('processing',\n 'Processing'), ('waiting', 'Waiting'), ('complete', 'Complete'), (\n 'error', 'Error')], default='pending', max_length=32)\n", (4430, 4607), False, 'from django.db import models\n'), ((4912, 4930), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (4928, 4930), False, 'from django.db import models\n'), ((4962, 4980), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (4978, 4980), False, 'from django.db import models\n'), ((5014, 5091), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""api.User"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='api.User')\n", (5031, 5091), False, 'from django.db import models\n'), ((5504, 5558), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (5523, 5558), False, 'from django.db import models\n'), ((5586, 5629), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)', 'null': '(True)'}), '(max_length=256, null=True)\n', (5602, 5629), False, 'from django.db import models\n'), ((5664, 5695), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (5680, 5695), False, 'from django.db import models\n'), ((5895, 5951), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'null': '(True)', 'unique': '(True)'}), '(max_length=512, null=True, unique=True)\n', (5911, 5951), False, 'from django.db import models\n'), ((5986, 6013), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (6002, 6013), False, 'from django.db import models\n'), ((6051, 6085), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (6070, 6085), False, 'from django.db import models\n'), ((6115, 6136), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (6134, 6136), False, 'from django.db import models\n'), ((6171, 6201), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (6190, 6201), False, 'from django.db import models\n'), ((6239, 6273), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (6258, 6273), False, 'from django.db import models\n'), ((6308, 6348), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'null': '(True)', 'unique': '(True)'}), '(null=True, unique=True)\n', (6324, 6348), False, 'from django.db import models\n'), ((6382, 6425), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)', 'null': '(True)'}), '(max_length=150, null=True)\n', (6398, 6425), False, 'from django.db import models\n'), ((6612, 6705), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (6628, 6705), False, 'from django.db import models\n'), ((6729, 6800), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""The name of the attribute"""', 'max_length': '(255)'}), "(help_text='The name of the attribute', max_length=255)\n", (6745, 6800), False, 'from django.db import models\n'), ((6829, 6856), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (6845, 6856), False, 'from django.db import models\n'), ((6891, 6918), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (6907, 6918), False, 'from django.db import models\n'), ((6959, 7005), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)'}), '(auto_now=True, null=True)\n', (6979, 7005), False, 'from django.db import models\n'), ((7156, 7249), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (7172, 7249), False, 'from django.db import models\n'), ((7329, 7520), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('AWS', 'AWS'), ('Azure', 'Azure'), ('GCP', 'GCP'), ('AWS-local',\n 'AWS-local'), ('Azure-local', 'Azure-local'), ('GCP-local', 'GCP-local')]", 'max_length': '(50)'}), "(choices=[('AWS', 'AWS'), ('Azure', 'Azure'), ('GCP', 'GCP'\n ), ('AWS-local', 'AWS-local'), ('Azure-local', 'Azure-local'), (\n 'GCP-local', 'GCP-local')], max_length=50)\n", (7345, 7520), False, 'from django.db import models\n'), ((7882, 7968), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""api.Provider"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'api.Provider')\n", (7899, 7968), False, 'from django.db import models\n')] |
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.db import transaction
from .models import EveCharacter, EveCorporation, EveAlliance
from .tasks import update_character, update_corporation, update_alliance
@receiver(post_save, sender=EveCharacter)
def update_character_information(sender, **kwargs):
def call():
entity = kwargs.get('instance')
created = kwargs.get('created')
if created:
update_character.apply_async(
args=[entity.external_id])
transaction.on_commit(call)
@receiver(post_save, sender=EveCorporation)
def update_corporation_information(sender, **kwargs):
def call():
entity = kwargs.get('instance')
created = kwargs.get('created')
if created:
update_corporation.apply_async(
args=[entity.external_id])
transaction.on_commit(call)
@receiver(post_save, sender=EveAlliance)
def update_alliance_information(sender, **kwargs):
def call():
entity = kwargs.get('instance')
created = kwargs.get('created')
if created:
update_alliance.apply_async(
args=[entity.external_id])
transaction.on_commit(call)
| [
"django.dispatch.receiver",
"django.db.transaction.on_commit"
] | [((300, 340), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'EveCharacter'}), '(post_save, sender=EveCharacter)\n', (308, 340), False, 'from django.dispatch import receiver\n'), ((630, 672), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'EveCorporation'}), '(post_save, sender=EveCorporation)\n', (638, 672), False, 'from django.dispatch import receiver\n'), ((966, 1005), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'EveAlliance'}), '(post_save, sender=EveAlliance)\n', (974, 1005), False, 'from django.dispatch import receiver\n'), ((599, 626), 'django.db.transaction.on_commit', 'transaction.on_commit', (['call'], {}), '(call)\n', (620, 626), False, 'from django.db import transaction\n'), ((935, 962), 'django.db.transaction.on_commit', 'transaction.on_commit', (['call'], {}), '(call)\n', (956, 962), False, 'from django.db import transaction\n'), ((1262, 1289), 'django.db.transaction.on_commit', 'transaction.on_commit', (['call'], {}), '(call)\n', (1283, 1289), False, 'from django.db import transaction\n')] |
"""A package of tools for simulating the US Electoral College.
...
"""
__version__ = "0.5.0"
__all__ = ["Election", "OutcomeList"]
import logging
from typing import Optional, Union
from pandas import DataFrame, Series
from numpy.random import default_rng
from ecsim.loaders import *
from ecsim.apportionment import *
from ecsim.outcomes import Outcome, OutcomeList
# TODO: improve logging
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(level=logging.WARNING)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
_RANDOM_SEED = 713
SENATORS_PER_STATE: int = 2
"""The number of senators each state receives as specified in the US Constitution."""
DC_STATEHOOD: bool = False
"""Whether or not the District of Columbia is a state.
The seat of the US Government, the District of Columbia (DC), is not a state, nor
is it a part of any state. Until the ratification of the 23rd amendment, this meant
that it did not receive any electors in the vote for the president. With the adoption
of the 23rd Amendment in 1961, DC is granted as many electors it would have if it
were a state, but no more than the least populous state.
"""
census_data: DataFrame = load_census_data()
"""The (historical) US population data gathered with each decennial census."""
class Election:
"""Contains the data and method for simulating historical US elections.
Attributes
----------
year : int
The year of the presidential election to simulate.
union : DataFrame
The historical election data used for the simulation.
"""
__slots__ = [
"year",
"representatives",
"apportionment_method",
"union",
"_logger",
"_rng",
]
def __init__(
self,
year: int,
representatives: int = 435,
apportionment_method: ApportionmentMethod = huntington_hill,
) -> None:
"""Initializes an election by loading Census data and election data.
Parameters
----------
year
A presidential election year, must be a multiple of 4.
representatives
The total number of representatives in the House, by default 435,
and must be at least equal to the number of states.
apportionment_method
The specific apportionment method to be used when apportioning
representatives.
"""
self._logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
self._rng = default_rng(_RANDOM_SEED)
self.apportionment_method = apportionment_method
if year % 4 != 0:
raise ValueError(f"{year} is not an election year!")
self.year = year
self._logger.debug(f"Loading election data for {self.year}")
self.union = load_election_data(self.year)
number_states = len(self.union) - (
1 if "District of Columbia" in self.union.index and not DC_STATEHOOD else 0
)
if representatives < number_states:
raise ValueError("There are not enough representatives!")
self.representatives = self.apportion_representatives(representatives)
def get_electors(self, state: Optional[str] = None) -> Union[int, Series]:
"""Returns the number of electors for each state.
Parameters
----------
state : str, optional
If present, computes the states electors from its number of representatives
and the number of senators each state receives. Implements special logic
for the District of Columbia, which is not a state but does receive electors.
Returns
-------
Union[int, Series]
If no state is given, then a pandas.Series is returned containing the number
of electors each state receives.
"""
if state is not None:
if state == "District of Columbia" and not DC_STATEHOOD:
# TODO: move this comment to a Note in the docstring
# this is not techincally correct, but it is funcationally so,
# ie. if the population of DC was so less than the least populous state
# that it would be awarded 1 representative where the least populous
# state would receive 2, then this equation will give too many electors
# to DC, but this scenario seems implausible
return (
self.representatives["Representatives"].min() + SENATORS_PER_STATE
)
else:
return (
self.representatives["Representatives"][state] + SENATORS_PER_STATE
)
electors = Series(0, index=self.union.index, name="Electors", dtype=int)
for state in electors.index:
electors[state] = self.get_electors(state)
return electors
def apportion_representatives(self, number: int = 435) -> DataFrame:
"""Apportions some number of representatives among the states.
Parameters
----------
number : int, optional
The number of representatives to be apportioned, by default 435
Returns
-------
DataFrame
Their is a row for every state and the number of representatives is stored
in a column (called 'Representatives'), also contains a column with Census
data used to compute the apportionment.
"""
census_year = self.year // 10 * 10
self._logger.debug(
f"Initializing list of states with census data from {census_year}"
)
states = DataFrame(data=census_data[f"{census_year}"], dtype=int)
states["Representatives"] = 0
if "District of Columbia" in states.index and not DC_STATEHOOD:
self._logger.debug(
"Removing the District of Columbia because it is not a state"
)
states.drop("District of Columbia", inplace=True)
self._logger.info(
f"Apportioning {number} representatives among {len(states)} states"
)
global apportionment_method
self.apportionment_method(number, states)
self._logger.debug("Finished apportioning representatives")
return states
def simulate(self, percent_uncertainty: float = 0) -> Outcome:
"""Simulates a US presidential election.
Parameters
----------
percent_uncertainty : float, optional
The percent magnitude of the random change in the results of the election,
by default 0, and measured in percentage points (eg. an input of 2 means
a 2% margin of change in the results)
Returns
-------
Outcome
The results of an election stored in an object
"""
# self._logger.info("Simulating a US presidential election")
self._logger.debug("Initializing election results from historical data")
# TODO: fix the scrapers to remove the final two columns there
results = self.union.iloc[:, :-2].copy()
if percent_uncertainty:
self._logger.info(
f"Simulating an election with {percent_uncertainty:.1f}% uncertainty"
)
for candidate in results:
for state, votes in results[candidate].iteritems():
bound = int(votes // 100 * percent_uncertainty)
results.loc[state, candidate] += self._rng.integers(
-bound, bound, endpoint=True
)
else:
self._logger.info("Simulating an election with no uncertainty")
self._logger.debug("Getting electors for each state")
results["Electors"] = self.get_electors()
return Outcome(results)
| [
"logging.getLogger",
"pandas.Series",
"logging.StreamHandler",
"numpy.random.default_rng",
"logging.Formatter",
"ecsim.outcomes.Outcome",
"pandas.DataFrame"
] | [((408, 435), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (425, 435), False, 'import logging\n'), ((479, 502), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (500, 502), False, 'import logging\n'), ((551, 624), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (568, 624), False, 'import logging\n'), ((2561, 2619), 'logging.getLogger', 'logging.getLogger', (['f"""{__name__}.{self.__class__.__name__}"""'], {}), "(f'{__name__}.{self.__class__.__name__}')\n", (2578, 2619), False, 'import logging\n'), ((2640, 2665), 'numpy.random.default_rng', 'default_rng', (['_RANDOM_SEED'], {}), '(_RANDOM_SEED)\n', (2651, 2665), False, 'from numpy.random import default_rng\n'), ((4843, 4904), 'pandas.Series', 'Series', (['(0)'], {'index': 'self.union.index', 'name': '"""Electors"""', 'dtype': 'int'}), "(0, index=self.union.index, name='Electors', dtype=int)\n", (4849, 4904), False, 'from pandas import DataFrame, Series\n'), ((5779, 5835), 'pandas.DataFrame', 'DataFrame', ([], {'data': "census_data[f'{census_year}']", 'dtype': 'int'}), "(data=census_data[f'{census_year}'], dtype=int)\n", (5788, 5835), False, 'from pandas import DataFrame, Series\n'), ((7948, 7964), 'ecsim.outcomes.Outcome', 'Outcome', (['results'], {}), '(results)\n', (7955, 7964), False, 'from ecsim.outcomes import Outcome, OutcomeList\n')] |
import html2text
import requests
r = requests.get("https://pypi.org/project/html2text/")
h = html2text.HTML2Text()
text = h.handle(r.text)
print(text)
| [
"html2text.HTML2Text",
"requests.get"
] | [((38, 89), 'requests.get', 'requests.get', (['"""https://pypi.org/project/html2text/"""'], {}), "('https://pypi.org/project/html2text/')\n", (50, 89), False, 'import requests\n'), ((94, 115), 'html2text.HTML2Text', 'html2text.HTML2Text', ([], {}), '()\n', (113, 115), False, 'import html2text\n')] |
# -*- coding: utf-8 -*-
"""
Core utils for manage face recognition process
"""
import json
import logging
import os
import pickle
import time
from pprint import pformat
import face_recognition
from sklearn.metrics import accuracy_score, balanced_accuracy_score, classification_report, \
precision_score
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.neural_network import MLPClassifier
from tqdm import tqdm
from math import pow
from datastructure.Person import Person
from utils.util import dump_dataset, load_image_file
log = logging.getLogger()
class Classifier(object):
"""
Store the knowledge related to the people faces
"""
def __init__(self):
self.training_dir = None
self.model_path = None
self.peoples_list = []
self.classifier = None
self.parameters = {}
def init_classifier(self):
"""
Initialize a new classifier after be sure that necessary data are initalized
"""
if self.classifier is None:
log.debug("init_classifier | START!")
if len(self.parameters) > 0:
log.debug("init_classifier | Initializing a new classifier ... | {0}".format(
pformat(self.__dict__)))
self.classifier = MLPClassifier(**self.parameters)
else:
log.error(
"init_classifier | Mandatory parameter not provided | Init a new KNN Classifier")
self.classifier = MLPClassifier()
def load_classifier_from_file(self, timestamp):
"""
Initalize the classifier from file.
The classifier file rappresent the name of the directory related to the classifier that we want to load.
The tree structure of the the model folder will be something like this
Structure:
model/
├── <20190520_095119>/ --> Timestamp in which the model was created
│ ├── model.dat --> Dataset generated by encoding the faces and pickelizing them
│ ├── model.clf --> Classifier delegated to recognize a given face
│ ├── model.json --> Hyperparameters related to the current classifier
├── <20190519_210950>/
│ ├── model.dat
│ ├── model.clf
│ ├── model.json
└── ...
:param timestamp:
:return:
"""
log.debug(
"load_classifier_from_file | Loading classifier from file ... | File: {}".format(timestamp))
# Load a trained KNN model (if one was passed in)
err = None
if self.classifier is None:
if self.model_path is None or not os.path.isdir(self.model_path):
raise Exception("Model folder not provided!")
# Adding the conventional name used for the classifier -> 'model.clf'
filename = os.path.join(self.model_path, timestamp, "model.clf")
log.debug(
"load_classifier_from_file | Loading classifier from file: {}".format(filename))
if os.path.isfile(filename):
log.debug(
"load_classifier_from_file | File {} exist!".format(filename))
with open(filename, 'rb') as f:
self.classifier = pickle.load(f)
log.debug("load_classifier_from_file | Classifier loaded!")
else:
err = "load_classifier_from_file | FATAL | File {} DOES NOT EXIST ...".format(
filename)
else:
err = "load_classifier_from_file | FATAL | Path {} DOES NOT EXIST ...".format(
self.model_path)
if err is not None:
log.error(err)
log.error("load_classifier_from_file | Seems that the model is gone :/ | Loading an empty classifier for "
"training purpouse ...")
self.classifier = None
return
def train(self, X, Y, timestamp):
"""
Train a new model by the given data [X] related to the given target [Y]
:param X:
:param Y:
:param timestamp:
"""
log.debug("train | START")
if self.classifier is None:
self.init_classifier()
dump_dataset(X, Y, os.path.join(self.model_path, timestamp))
start_time = time.time()
X_train, x_test, Y_train, y_test = train_test_split(
X, Y, test_size=0.25)
log.debug("train | Training ...")
self.classifier.fit(X_train, Y_train)
log.debug("train | Model Trained!")
log.debug("train | Checking performance ...")
y_pred = self.classifier.predict(x_test)
# Static method
self.verify_performance(y_test, y_pred)
return self.dump_model(timestamp=timestamp, classifier=self.classifier), time.time() - start_time
def tuning(self, X, Y, timestamp):
"""
Tune the hyperparameter of a new model by the given data [X] related to the given target [Y]
:param X:
:param Y:
:param timestamp:
:return:
"""
start_time = time.time()
dump_dataset(X, Y, os.path.join(self.model_path, timestamp))
X_train, x_test, Y_train, y_test = train_test_split(
X, Y, test_size=0.25)
self.classifier = MLPClassifier(max_iter=250)
# Hyperparameter of the neural network (MLP) to tune
# Faces are encoded using 128 points
parameter_space = {
'hidden_layer_sizes': [(128,), (200,), (200, 128,), ],
'activation': ['identity', 'tanh', 'relu'],
'solver': ['adam'],
'learning_rate': ['constant', 'adaptive'],
}
log.debug("tuning | Parameter -> {}".format(pformat(parameter_space)))
grid = GridSearchCV(self.classifier, parameter_space,
cv=2, scoring='accuracy', verbose=20, n_jobs=8)
grid.fit(X_train, Y_train)
log.info("TUNING COMPLETE | DUMPING DATA!")
# log.info("tuning | Grid Scores: {}".format(pformat(grid.grid_scores_)))
log.info('Best parameters found: {}'.format(grid.best_params_))
y_pred = grid.predict(x_test)
log.info('Results on the test set: {}'.format(
pformat(grid.score(x_test, y_test))))
self.verify_performance(y_test, y_pred)
return self.dump_model(timestamp=timestamp, params=grid.best_params_,
classifier=grid.best_estimator_), time.time() - start_time
@staticmethod
def verify_performance(y_test, y_pred):
"""
Verify the performance of the result analyzing the known-predict result
:param y_test:
:param y_pred:
:return:
"""
log.debug("verify_performance | Analyzing performance ...")
log.info("\nClassification Report: {}".format(
pformat(classification_report(y_test, y_pred))))
log.info("balanced_accuracy_score: {}".format(
pformat(balanced_accuracy_score(y_test, y_pred))))
log.info("accuracy_score: {}".format(
pformat(accuracy_score(y_test, y_pred))))
log.info("precision_score: {}".format(
pformat(precision_score(y_test, y_pred, average='weighted'))))
def dump_model(self, timestamp, classifier, params=None, path=None):
"""
Dump the model to the given path, file
:param params:
:param timestamp:
:param classifier:
:param path:
"""
log.debug("dump_model | Dumping model ...")
if path is None:
if self.model_path is not None:
if os.path.exists(self.model_path) and os.path.isdir(self.model_path):
path = self.model_path
config = {'classifier_file': os.path.join(timestamp, "model.clf"),
'params': params
}
if not os.path.isdir(path):
os.makedirs(timestamp)
classifier_folder = os.path.join(path, timestamp)
classifier_file = os.path.join(classifier_folder, "model")
log.debug("dump_model | Dumping model ... | Path: {} | Model folder: {}".format(
path, timestamp))
if not os.path.exists(classifier_folder):
os.makedirs(classifier_folder)
with open(classifier_file + ".clf", 'wb') as f:
pickle.dump(classifier, f)
log.info('dump_model | Model saved to {0}.clf'.format(
classifier_file))
with open(classifier_file + ".json", 'w') as f:
json.dump(config, f)
log.info('dump_model | Configuration saved to {0}.json'.format(
classifier_file))
return config
def init_peoples_list(self, peoples_path=None):
"""
This method is delegated to iterate among the folder that contains the peoples's face in order to
initalize the array of peoples
:return:
"""
log.debug("init_peoples_list | Initalizing people ...")
if peoples_path is not None and os.path.isdir(peoples_path):
self.training_dir = peoples_path
else:
raise Exception("Dataset (peoples faces) path not provided :/")
# The init process can be threadized, but BATCH method will perform better
# pool = ThreadPool(3)
# self.peoples_list = pool.map(self.init_peoples_list_core, os.listdir(self.training_dir))
for people_name in tqdm(os.listdir(self.training_dir),
total=len(os.listdir(self.training_dir)), desc="Init people list ..."):
self.peoples_list.append(self.init_peoples_list_core(people_name))
self.peoples_list = list(
filter(None.__ne__, self.peoples_list)) # Remove None
def init_peoples_list_core(self, people_name):
"""
Delegated core method for parallelize operation
:param people_name:
:return:
"""
if os.path.isdir(os.path.join(self.training_dir, people_name)):
log.debug("Initalizing people {0}".format(
os.path.join(self.training_dir, people_name)))
person = Person()
person.name = people_name
person.path = os.path.join(self.training_dir, people_name)
person.init_dataset()
return person
else:
log.debug("People {0} invalid folder!".format(
os.path.join(self.training_dir, people_name)))
return None
def init_dataset(self):
"""
Initialize a new dataset joining all the data related to the peoples list
:return:
"""
DATASET = {
# Image data (numpy array)
"X": [],
# Person name
"Y": []
}
for people in self.peoples_list:
log.debug(people.name)
for item in people.dataset["X"]:
DATASET["X"].append(item)
for item in people.dataset["Y"]:
DATASET["Y"].append(item)
return DATASET
# The method is delegated to try to retrieve the face from the given image.
# In case of cuda_malloc error (out of memory), the image will be resized
@staticmethod
def extract_face_from_image(X_img_path):
# Load image data in a numpy array
try:
log.debug("predict | Loading image {}".format(X_img_path))
X_img, ratio = load_image_file(X_img_path)
except OSError:
log.error("predict | What have you uploaded ???")
return -2, -2, -1
log.debug("predict | Extracting faces locations ...")
try:
# TODO: Reduce size of the image at every iteration
X_face_locations = face_recognition.face_locations(
X_img, model="hog") # model="cnn")
except RuntimeError:
log.error(
"predict | GPU does not have enough memory: FIXME unload data and retry")
return None, None, ratio
log.debug("predict | Found {} face(s) for the given image".format(
len(X_face_locations)))
# If no faces are found in the image, return an empty result.
if len(X_face_locations) == 0:
log.warning("predict | Seems that no faces was found :( ")
return -3, -3, ratio
# Find encodings for faces in the test iamge
log.debug("predict | Encoding faces ...")
# num_jitters increase the distortion check
faces_encodings = face_recognition.face_encodings(
X_img, known_face_locations=X_face_locations, num_jitters=1)
log.debug("predict | Face encoded! | Let's ask to the neural network ...")
return faces_encodings, X_face_locations, ratio
def predict(self, X_img_path, distance_threshold=0.45):
"""
Recognizes faces in given image using a trained KNN classifier
:param X_img_path: path to image to be recognized
:param distance_threshold: (optional) distance threshold for face classification. the larger it is,
the more chance of mis-classifying an unknown person as a known one.
:return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
For faces of unrecognized persons, the name 'unknown' will be returned.
"""
if self.classifier is None:
log.error(
"predict | Be sure that you have loaded/trained the nerual network model")
return None
faces_encodings, X_face_locations = None, None
# Resize image if necessary for avoid cuda-malloc error (important for low gpu)
# In case of error, will be returned back an integer.
# FIXME: manage gpu memory unload in case of None
ratio = 2
while faces_encodings is None or X_face_locations is None:
faces_encodings, X_face_locations, ratio = Classifier.extract_face_from_image(
X_img_path)
# In this case return back the error to the caller
if isinstance(faces_encodings, int):
return faces_encodings
# Use the MLP model to find the best matches for the face(s)
log.debug("predict | Understanding peoples recognized from NN ...")
closest_distances = self.classifier.predict(faces_encodings)
log.debug("predict | Persons recognized: [{}]".format(
closest_distances))
log.debug("predict | Asking to the neural network for probability ...")
predictions = self.classifier.predict_proba(faces_encodings)
pred = []
for prediction in predictions:
pred.append(dict([v for v in sorted(zip(self.classifier.classes_, prediction),
key=lambda c: c[1], reverse=True)[:len(closest_distances)]]))
log.debug("predict | Predict proba -> {}".format(pred))
face_prediction = []
for i in range(len(pred)):
element = list(pred[i].items())[0]
log.debug("pred in cycle: {}".format(element))
face_prediction.append(element)
#log.debug("predict | *****MIN****| {}".format(min(closest_distances[0][i])))
log.debug("Scores -> {}".format(face_prediction))
_predictions = []
scores = []
if len(face_prediction) > 0:
for person_score, loc in zip(face_prediction, X_face_locations):
if person_score[1] < distance_threshold:
log.warning("predict | Person {} does not outbounds treshold {}<{}".format(
pred, person_score[1], distance_threshold))
else:
log.debug("predict | Pred: {} | Loc: {} | Score: {}".format(
person_score[0], loc, person_score[1]))
if ratio > 0:
log.debug(
"predict | Fixing face location using ratio: {}".format(ratio))
x1, y1, x2, y2 = loc
# 1200 < size < 1600
if ratio < 1:
ratio = pow(ratio, -1)
x1 *= ratio
x2 *= ratio
y1 *= ratio
y2 *= ratio
loc = x1, y1, x2, y2
_predictions.append((person_score[0], loc))
scores.append(person_score[1])
log.debug("predict | Prediction: {}".format(_predictions))
log.debug("predict | Score: {}".format(scores))
if len(_predictions) == 0 or len(face_prediction) == 0:
log.debug("predict | Face not recognized :/")
return -1
return {"predictions": _predictions, "scores": scores}
| [
"logging.getLogger",
"sklearn.model_selection.GridSearchCV",
"sklearn.metrics.balanced_accuracy_score",
"sklearn.metrics.classification_report",
"sklearn.metrics.precision_score",
"os.path.exists",
"os.listdir",
"utils.util.load_image_file",
"os.path.isdir",
"face_recognition.face_locations",
"sklearn.model_selection.train_test_split",
"pickle.load",
"pprint.pformat",
"os.path.isfile",
"time.time",
"sklearn.metrics.accuracy_score",
"sklearn.neural_network.MLPClassifier",
"pickle.dump",
"os.makedirs",
"datastructure.Person.Person",
"math.pow",
"os.path.join",
"face_recognition.face_encodings",
"json.dump"
] | [((568, 587), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (585, 587), False, 'import logging\n'), ((4341, 4352), 'time.time', 'time.time', ([], {}), '()\n', (4350, 4352), False, 'import time\n'), ((4397, 4435), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.25)'}), '(X, Y, test_size=0.25)\n', (4413, 4435), False, 'from sklearn.model_selection import GridSearchCV, train_test_split\n'), ((5130, 5141), 'time.time', 'time.time', ([], {}), '()\n', (5139, 5141), False, 'import time\n'), ((5255, 5293), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.25)'}), '(X, Y, test_size=0.25)\n', (5271, 5293), False, 'from sklearn.model_selection import GridSearchCV, train_test_split\n'), ((5333, 5360), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'max_iter': '(250)'}), '(max_iter=250)\n', (5346, 5360), False, 'from sklearn.neural_network import MLPClassifier\n'), ((5809, 5907), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['self.classifier', 'parameter_space'], {'cv': '(2)', 'scoring': '"""accuracy"""', 'verbose': '(20)', 'n_jobs': '(8)'}), "(self.classifier, parameter_space, cv=2, scoring='accuracy',\n verbose=20, n_jobs=8)\n", (5821, 5907), False, 'from sklearn.model_selection import GridSearchCV, train_test_split\n'), ((8014, 8043), 'os.path.join', 'os.path.join', (['path', 'timestamp'], {}), '(path, timestamp)\n', (8026, 8043), False, 'import os\n'), ((8070, 8110), 'os.path.join', 'os.path.join', (['classifier_folder', '"""model"""'], {}), "(classifier_folder, 'model')\n", (8082, 8110), False, 'import os\n'), ((12563, 12660), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['X_img'], {'known_face_locations': 'X_face_locations', 'num_jitters': '(1)'}), '(X_img, known_face_locations=\n X_face_locations, num_jitters=1)\n', (12594, 12660), False, 'import face_recognition\n'), ((2884, 2937), 'os.path.join', 'os.path.join', (['self.model_path', 'timestamp', '"""model.clf"""'], {}), "(self.model_path, timestamp, 'model.clf')\n", (2896, 2937), False, 'import os\n'), ((3073, 3097), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (3087, 3097), False, 'import os\n'), ((4277, 4317), 'os.path.join', 'os.path.join', (['self.model_path', 'timestamp'], {}), '(self.model_path, timestamp)\n', (4289, 4317), False, 'import os\n'), ((5169, 5209), 'os.path.join', 'os.path.join', (['self.model_path', 'timestamp'], {}), '(self.model_path, timestamp)\n', (5181, 5209), False, 'import os\n'), ((7822, 7858), 'os.path.join', 'os.path.join', (['timestamp', '"""model.clf"""'], {}), "(timestamp, 'model.clf')\n", (7834, 7858), False, 'import os\n'), ((7930, 7949), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (7943, 7949), False, 'import os\n'), ((7963, 7985), 'os.makedirs', 'os.makedirs', (['timestamp'], {}), '(timestamp)\n', (7974, 7985), False, 'import os\n'), ((8246, 8279), 'os.path.exists', 'os.path.exists', (['classifier_folder'], {}), '(classifier_folder)\n', (8260, 8279), False, 'import os\n'), ((8293, 8323), 'os.makedirs', 'os.makedirs', (['classifier_folder'], {}), '(classifier_folder)\n', (8304, 8323), False, 'import os\n'), ((8393, 8419), 'pickle.dump', 'pickle.dump', (['classifier', 'f'], {}), '(classifier, f)\n', (8404, 8419), False, 'import pickle\n'), ((8590, 8610), 'json.dump', 'json.dump', (['config', 'f'], {}), '(config, f)\n', (8599, 8610), False, 'import json\n'), ((9088, 9115), 'os.path.isdir', 'os.path.isdir', (['peoples_path'], {}), '(peoples_path)\n', (9101, 9115), False, 'import os\n'), ((9498, 9527), 'os.listdir', 'os.listdir', (['self.training_dir'], {}), '(self.training_dir)\n', (9508, 9527), False, 'import os\n'), ((10016, 10060), 'os.path.join', 'os.path.join', (['self.training_dir', 'people_name'], {}), '(self.training_dir, people_name)\n', (10028, 10060), False, 'import os\n'), ((10202, 10210), 'datastructure.Person.Person', 'Person', ([], {}), '()\n', (10208, 10210), False, 'from datastructure.Person import Person\n'), ((10275, 10319), 'os.path.join', 'os.path.join', (['self.training_dir', 'people_name'], {}), '(self.training_dir, people_name)\n', (10287, 10319), False, 'import os\n'), ((11478, 11505), 'utils.util.load_image_file', 'load_image_file', (['X_img_path'], {}), '(X_img_path)\n', (11493, 11505), False, 'from utils.util import dump_dataset, load_image_file\n'), ((11792, 11843), 'face_recognition.face_locations', 'face_recognition.face_locations', (['X_img'], {'model': '"""hog"""'}), "(X_img, model='hog')\n", (11823, 11843), False, 'import face_recognition\n'), ((1305, 1337), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {}), '(**self.parameters)\n', (1318, 1337), False, 'from sklearn.neural_network import MLPClassifier\n'), ((1519, 1534), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {}), '()\n', (1532, 1534), False, 'from sklearn.neural_network import MLPClassifier\n'), ((4839, 4850), 'time.time', 'time.time', ([], {}), '()\n', (4848, 4850), False, 'import time\n'), ((5767, 5791), 'pprint.pformat', 'pformat', (['parameter_space'], {}), '(parameter_space)\n', (5774, 5791), False, 'from pprint import pformat\n'), ((6511, 6522), 'time.time', 'time.time', ([], {}), '()\n', (6520, 6522), False, 'import time\n'), ((2685, 2715), 'os.path.isdir', 'os.path.isdir', (['self.model_path'], {}), '(self.model_path)\n', (2698, 2715), False, 'import os\n'), ((3295, 3309), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3306, 3309), False, 'import pickle\n'), ((6910, 6947), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (6931, 6947), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, classification_report, precision_score\n'), ((7026, 7065), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7049, 7065), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, classification_report, precision_score\n'), ((7135, 7165), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7149, 7165), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, classification_report, precision_score\n'), ((7236, 7287), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'y_pred'], {'average': '"""weighted"""'}), "(y_test, y_pred, average='weighted')\n", (7251, 7287), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, classification_report, precision_score\n'), ((7674, 7705), 'os.path.exists', 'os.path.exists', (['self.model_path'], {}), '(self.model_path)\n', (7688, 7705), False, 'import os\n'), ((7710, 7740), 'os.path.isdir', 'os.path.isdir', (['self.model_path'], {}), '(self.model_path)\n', (7723, 7740), False, 'import os\n'), ((9571, 9600), 'os.listdir', 'os.listdir', (['self.training_dir'], {}), '(self.training_dir)\n', (9581, 9600), False, 'import os\n'), ((10134, 10178), 'os.path.join', 'os.path.join', (['self.training_dir', 'people_name'], {}), '(self.training_dir, people_name)\n', (10146, 10178), False, 'import os\n'), ((10469, 10513), 'os.path.join', 'os.path.join', (['self.training_dir', 'people_name'], {}), '(self.training_dir, people_name)\n', (10481, 10513), False, 'import os\n'), ((1246, 1268), 'pprint.pformat', 'pformat', (['self.__dict__'], {}), '(self.__dict__)\n', (1253, 1268), False, 'from pprint import pformat\n'), ((16253, 16267), 'math.pow', 'pow', (['ratio', '(-1)'], {}), '(ratio, -1)\n', (16256, 16267), False, 'from math import pow\n')] |
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django import forms
# place form definition here
class SaveHelper(FormHelper):
def __init__(self, form=None):
super().__init__(form)
self.layout.append(Submit(name='save', value='Salvar'))
self.form_show_errors = True
self.render_required_fields = True
| [
"crispy_forms.layout.Submit"
] | [((260, 295), 'crispy_forms.layout.Submit', 'Submit', ([], {'name': '"""save"""', 'value': '"""Salvar"""'}), "(name='save', value='Salvar')\n", (266, 295), False, 'from crispy_forms.layout import Submit\n')] |
import Levenshtein as levd
import texterrors
def test_levd():
pairs = ['a', '', '', 'a', 'MOZILLA', 'MUSIAL', 'ARE', 'MOZILLA', 'TURNIPS', 'TENTH', 'POSTERS', 'POSTURE']
for a, b in zip(pairs[:-1:2], pairs[1::2]):
d1 = texterrors.lev_distance(a, b)
d2 = levd.distance(a, b)
if d1 != d2:
print(a, b, d1, d2)
raise RuntimeError('Assert failed!')
def calc_wer(ref, b):
cnt = 0
err = 0
for w1, w2 in zip(ref, b):
if w1 != '<eps>':
cnt += 1
if w1 != w2:
err += 1
return 100. * (err / cnt)
def test_wer():
ref = 'IN THE DISCOTHEQUE THE DJ PLAYED PROGRESSIVE HOUSE MUSIC AND TRANCE'.split()
hyp = 'IN THE DISCO TAK THE D J PLAYED PROGRESSIVE HOUSE MUSIC AND TRANCE'.split()
ref_aligned, hyp_aligned, _ = texterrors.align_texts(ref, hyp, False)
wer = calc_wer(ref_aligned, hyp_aligned)
assert round(wer, 2) == 36.36, round(wer, 2)
ref = 'IT FORMS PART OF THE SOUTH EAST DORSET CONURBATION ALONG THE ENGLISH CHANNEL COAST'.split()
hyp = "IT FOLLOWS PARDOFELIS LOUSES DORJE THAT COMORE H O LONELY ENGLISH GENOME COTA'S".split()
ref_aligned, hyp_aligned, _ = texterrors.align_texts(ref, hyp, False)
wer = calc_wer(ref_aligned, hyp_aligned)
assert round(wer, 2) == 85.71, round(wer, 2)
ref = 'THE FILM WAS LOADED INTO CASSETTES IN A DARKROOM OR CHANGING BAG'.split()
hyp = "THE FILM WAS LOADED INTO CASSETTES IN A DARK ROOM OR CHANGING BAG".split()
ref_aligned, hyp_aligned, _ = texterrors.align_texts(ref, hyp, False)
wer = calc_wer(ref_aligned, hyp_aligned)
assert round(wer, 2) == 16.67, round(wer, 2)
ref = 'GEPHYRIN HAS BEEN SHOWN TO BE NECESSARY FOR GLYR CLUSTERING AT INHIBITORY SYNAPSES'.split()
hyp = "THE VIDEOS RISHIRI TUX BINOY CYSTIDIA PHU LIAM CHOLESTEROL ET INNIT PATRESE SYNAPSES".split()
ref_aligned, hyp_aligned, _ = texterrors.align_texts(ref, hyp, False, use_chardiff=True)
wer = calc_wer(ref_aligned, hyp_aligned)
assert round(wer, 2) == 100.0, round(wer, 2) # kaldi gets 92.31 ! but has worse alignment
ref_aligned, hyp_aligned, _ = texterrors.align_texts(ref, hyp, False, use_chardiff=False)
wer = calc_wer(ref_aligned, hyp_aligned)
assert round(wer, 2) == 92.31, round(wer, 2)
ref = 'test sentence okay words ending now'.split()
hyp = "test a sentenc ok endin now".split()
ref_aligned, hyp_aligned, _ = texterrors.align_texts(ref, hyp, False, use_chardiff=True)
wer = calc_wer(ref_aligned, hyp_aligned)
assert round(wer, 2) == 83.33, round(wer, 2) # kaldi gets 66.67 ! but has worse alignment
ref_aligned, hyp_aligned, _ = texterrors.align_texts(ref, hyp, False, use_chardiff=False)
wer = calc_wer(ref_aligned, hyp_aligned)
assert round(wer, 2) == 66.67, round(wer, 2)
ref = 'speedbird eight six two'.split()
hyp = 'hello speedbird six two'.split()
ref_aligned, hyp_aligned, _ = texterrors.align_texts(ref, hyp, False, use_chardiff=True)
assert ref_aligned[0] == '<eps>'
wer = calc_wer(ref_aligned, hyp_aligned)
assert round(wer, 2) == 50.0, round(wer, 2) # kaldi gets 66.67 ! but has worse alignment
def test_oov_cer():
oov_set = {'airport'}
ref_aligned = 'the missing word is <eps> airport okay'.split()
hyp_aligned = 'the missing word is air port okay'.split()
err, cnt = texterrors.get_oov_cer(ref_aligned, hyp_aligned, oov_set)
assert round(err / cnt, 2) == 0.14, round(err / cnt, 2)
ref_aligned = 'the missing word is airport okay'.split()
hyp_aligned = 'the missing word is airport okay'.split()
err, cnt = texterrors.get_oov_cer(ref_aligned, hyp_aligned, oov_set)
assert err / cnt == 0., err / cnt
print('Reminder: texterrors needs to be installed')
test_levd()
test_wer()
test_oov_cer()
print('Passed!')
| [
"texterrors.lev_distance",
"texterrors.align_texts",
"texterrors.get_oov_cer",
"Levenshtein.distance"
] | [((827, 866), 'texterrors.align_texts', 'texterrors.align_texts', (['ref', 'hyp', '(False)'], {}), '(ref, hyp, False)\n', (849, 866), False, 'import texterrors\n'), ((1199, 1238), 'texterrors.align_texts', 'texterrors.align_texts', (['ref', 'hyp', '(False)'], {}), '(ref, hyp, False)\n', (1221, 1238), False, 'import texterrors\n'), ((1539, 1578), 'texterrors.align_texts', 'texterrors.align_texts', (['ref', 'hyp', '(False)'], {}), '(ref, hyp, False)\n', (1561, 1578), False, 'import texterrors\n'), ((1916, 1974), 'texterrors.align_texts', 'texterrors.align_texts', (['ref', 'hyp', '(False)'], {'use_chardiff': '(True)'}), '(ref, hyp, False, use_chardiff=True)\n', (1938, 1974), False, 'import texterrors\n'), ((2149, 2208), 'texterrors.align_texts', 'texterrors.align_texts', (['ref', 'hyp', '(False)'], {'use_chardiff': '(False)'}), '(ref, hyp, False, use_chardiff=False)\n', (2171, 2208), False, 'import texterrors\n'), ((2442, 2500), 'texterrors.align_texts', 'texterrors.align_texts', (['ref', 'hyp', '(False)'], {'use_chardiff': '(True)'}), '(ref, hyp, False, use_chardiff=True)\n', (2464, 2500), False, 'import texterrors\n'), ((2675, 2734), 'texterrors.align_texts', 'texterrors.align_texts', (['ref', 'hyp', '(False)'], {'use_chardiff': '(False)'}), '(ref, hyp, False, use_chardiff=False)\n', (2697, 2734), False, 'import texterrors\n'), ((2952, 3010), 'texterrors.align_texts', 'texterrors.align_texts', (['ref', 'hyp', '(False)'], {'use_chardiff': '(True)'}), '(ref, hyp, False, use_chardiff=True)\n', (2974, 3010), False, 'import texterrors\n'), ((3378, 3435), 'texterrors.get_oov_cer', 'texterrors.get_oov_cer', (['ref_aligned', 'hyp_aligned', 'oov_set'], {}), '(ref_aligned, hyp_aligned, oov_set)\n', (3400, 3435), False, 'import texterrors\n'), ((3634, 3691), 'texterrors.get_oov_cer', 'texterrors.get_oov_cer', (['ref_aligned', 'hyp_aligned', 'oov_set'], {}), '(ref_aligned, hyp_aligned, oov_set)\n', (3656, 3691), False, 'import texterrors\n'), ((237, 266), 'texterrors.lev_distance', 'texterrors.lev_distance', (['a', 'b'], {}), '(a, b)\n', (260, 266), False, 'import texterrors\n'), ((280, 299), 'Levenshtein.distance', 'levd.distance', (['a', 'b'], {}), '(a, b)\n', (293, 299), True, 'import Levenshtein as levd\n')] |
# -*- coding: utf-8 -*-
# Visigoth: A lightweight Python3 library for rendering data visualizations in SVG
# Copyright (C) 2020-2021 Visigoth Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from visigoth.map_layers import MapLayer
from visigoth.internal.utils.data.dataarray import DataArray
from visigoth.utils import ContinuousHueManager
class DataGrid(MapLayer):
"""
Create a Data grid plot.
Arguments:
data (list): data values, organised as a list of rows, where each row is an equal size list of column values
Keyword Arguments:
lats (list): a list of the lat values providing the center of each row
lons (list): a list of the lon values providing the center of each column
data_projection(function): a function mapping from WGS84 (lon,lat) to the data projection (lon,lat)
hue_manager(object): a ContinuousHueManager or DiscreteHueManager
sharpen(bool): produce a sharper image at 2x resolution, slower to run
Note:
If lons/lats are None, just creates a rectangular image from the data, this assumes that the data
reflects the map projection and boundaries. Don't set these to None unless you know what you are doing.
"""
def __init__(self, data, lats=None, lons=None, data_projection=lambda x:x, hue_manager=None, sharpen=False):
super().__init__()
self.data = DataArray(data)
self.data.check()
self.min_val = self.data.getMinValue()
self.max_val = self.data.getMaxValue()
if hue_manager is None:
hue_manager = ContinuousHueManager(withIntervals=True)
self.setHueManager(hue_manager)
if not self.getHueManager().isDiscrete():
self.getHueManager().allocateHue(self.min_val)
self.getHueManager().allocateHue(self.max_val)
# set up an ImageGrid to handle the real work, with empty data (we'll bind it to the real data at drawing time)
from visigoth.map_layers import ImageGrid
self.imagegrid = ImageGrid(r=[],g=[],b=[],a=[],
lats=lats,lons=lons,data_projection=data_projection,sharpen=sharpen)
def getBoundaries(self):
return self.imagegrid.getBoundaries()
def configureLayer(self, ownermap, width, height, boundaries, projection, zoom_to, fmt):
self.imagegrid.configureLayer(ownermap, width, height, boundaries, projection, zoom_to, fmt)
def getHeight(self):
return self.imagegrid.getHeight()
def getWidth(self):
return self.imagegrid.getWidth()
def build(self,fmt):
super().build(fmt)
if fmt == "html" and self.hue_manager.getAdjustable():
self.imagegrid.setData(self.data)
self.hue_manager.addEventConsumer(self.imagegrid, "hue_scale")
self.imagegrid.build(fmt)
def draw(self, doc, cx, cy):
r_data = []
g_data = []
b_data = []
a_data = []
rows = self.data.getRowCount()
for row_idx in range(rows):
row = self.data.getRowAt(row_idx)
r_row = []
g_row = []
b_row = []
a_row = []
for col_idx in range(len(row)):
value = row[col_idx]
col = self.hue_manager.getHue(value)
# col should be a hex encoded string, either #RRGGBBAA or #RRGGBB
r = int(col[1:3], 16)
g = int(col[3:5], 16)
b = int(col[5:7], 16)
a = 255 if len(col) == 7 else int(col[7:9], 16)
r_row.append(r)
g_row.append(g)
b_row.append(b)
a_row.append(a)
r_data.append(r_row)
g_data.append(g_row)
b_data.append(b_row)
a_data.append(a_row)
self.imagegrid.updateRGBA(r=r_data,g=g_data,b=b_data,a=a_data)
return self.imagegrid.draw(doc,cx,cy)
| [
"visigoth.map_layers.ImageGrid",
"visigoth.utils.ContinuousHueManager",
"visigoth.internal.utils.data.dataarray.DataArray"
] | [((2432, 2447), 'visigoth.internal.utils.data.dataarray.DataArray', 'DataArray', (['data'], {}), '(data)\n', (2441, 2447), False, 'from visigoth.internal.utils.data.dataarray import DataArray\n'), ((3073, 3183), 'visigoth.map_layers.ImageGrid', 'ImageGrid', ([], {'r': '[]', 'g': '[]', 'b': '[]', 'a': '[]', 'lats': 'lats', 'lons': 'lons', 'data_projection': 'data_projection', 'sharpen': 'sharpen'}), '(r=[], g=[], b=[], a=[], lats=lats, lons=lons, data_projection=\n data_projection, sharpen=sharpen)\n', (3082, 3183), False, 'from visigoth.map_layers import ImageGrid\n'), ((2628, 2668), 'visigoth.utils.ContinuousHueManager', 'ContinuousHueManager', ([], {'withIntervals': '(True)'}), '(withIntervals=True)\n', (2648, 2668), False, 'from visigoth.utils import ContinuousHueManager\n')] |
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import scipy.misc
import numpy as np
from model import IMAE
from modelz import ZGAN
import tensorflow as tf
import h5py
flags = tf.app.flags
flags.DEFINE_integer("epoch", 10000, "Epoch to train [25]")
flags.DEFINE_float("learning_rate", 0.00005, "Learning rate of for adam [0.0002]")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_string("dataset", "all_vox256_img", "The name of dataset")
flags.DEFINE_integer("real_size", 64, "output point-value voxel grid size in training [64]")
flags.DEFINE_integer("batch_size_input", 16384, "training batch size (virtual, batch_size is the real batch_size) [16384]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("data_dir", "./data", "Root directory of dataset [data]")
flags.DEFINE_string("sample_dir", "samples", "Directory name to save the image samples [samples]")
flags.DEFINE_boolean("train", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("ae", False, "True for AE, False for zGAN [False]")
FLAGS = flags.FLAGS
def main(_):
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.sample_dir):
os.makedirs(FLAGS.sample_dir)
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
#run_config = tf.ConfigProto(gpu_options=gpu_options)
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth=True
if FLAGS.ae:
with tf.Session(config=run_config) as sess:
imae = IMAE(
sess,
FLAGS.real_size,
FLAGS.batch_size_input,
is_training = FLAGS.train,
dataset_name=FLAGS.dataset,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=FLAGS.sample_dir,
data_dir=FLAGS.data_dir)
if FLAGS.train:
imae.train(FLAGS)
else:
imae.get_z(FLAGS)
#imae.test_interp(FLAGS)
#imae.test(FLAGS)
else:
if FLAGS.train:
with tf.Session(config=run_config) as sess_z:
zgan = ZGAN(
sess_z,
is_training = FLAGS.train,
dataset_name=FLAGS.dataset,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=FLAGS.sample_dir,
data_dir=FLAGS.data_dir)
zgan.train(FLAGS)
else:
#option 1 generate z
with tf.Session(config=run_config) as sess_z:
zgan = ZGAN(
sess_z,
is_training = FLAGS.train,
dataset_name=FLAGS.dataset,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=FLAGS.sample_dir,
data_dir=FLAGS.data_dir)
generated_z = zgan.get_z(FLAGS, 16)
tf.reset_default_graph()
'''
hdf5_file = h5py.File("temp_z.hdf5", mode='w')
hdf5_file.create_dataset("zs", generated_z.shape, np.float32)
hdf5_file["zs"][...] = generated_z
hdf5_file.close()
'''
with tf.Session(config=run_config) as sess:
imae = IMAE(
sess,
FLAGS.real_size,
FLAGS.batch_size_input,
is_training = FLAGS.train,
dataset_name=FLAGS.dataset,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=FLAGS.sample_dir,
data_dir=FLAGS.data_dir)
imae.test_z(FLAGS, generated_z, 128)
'''
#option 2 use filtered z
hdf5_file = h5py.File("temp_z.hdf5", mode='r')
generated_z = hdf5_file["zs"][:]
hdf5_file.close()
z_num = generated_z.shape[0]
filtered_z = np.copy(generated_z)
t = 0
for tt in range(z_num):
if (os.path.exists(FLAGS.sample_dir+'/'+str(tt)+'_1t.png')):
filtered_z[t] = generated_z[tt]
t += 1
filtered_z = filtered_z[:t]
print('filtered',t)
with tf.Session(config=run_config) as sess:
imae = IMAE(
sess,
is_training = FLAGS.train,
dataset_name=FLAGS.dataset,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=FLAGS.sample_dir,
data_dir=FLAGS.data_dir)
imae.test_z(FLAGS, filtered_z, 256)
'''
if __name__ == '__main__':
tf.app.run()
| [
"os.path.exists",
"tensorflow.reset_default_graph",
"os.makedirs",
"tensorflow.Session",
"model.IMAE",
"modelz.ZGAN",
"tensorflow.ConfigProto",
"tensorflow.app.run"
] | [((1503, 1519), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1517, 1519), True, 'import tensorflow as tf\n'), ((3944, 3956), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (3954, 3956), True, 'import tensorflow as tf\n'), ((1218, 1254), 'os.path.exists', 'os.path.exists', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (1232, 1254), False, 'import os\n'), ((1258, 1291), 'os.makedirs', 'os.makedirs', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (1269, 1291), False, 'import os\n'), ((1300, 1332), 'os.path.exists', 'os.path.exists', (['FLAGS.sample_dir'], {}), '(FLAGS.sample_dir)\n', (1314, 1332), False, 'import os\n'), ((1336, 1365), 'os.makedirs', 'os.makedirs', (['FLAGS.sample_dir'], {}), '(FLAGS.sample_dir)\n', (1347, 1365), False, 'import os\n'), ((1584, 1613), 'tensorflow.Session', 'tf.Session', ([], {'config': 'run_config'}), '(config=run_config)\n', (1594, 1613), True, 'import tensorflow as tf\n'), ((1633, 1836), 'model.IMAE', 'IMAE', (['sess', 'FLAGS.real_size', 'FLAGS.batch_size_input'], {'is_training': 'FLAGS.train', 'dataset_name': 'FLAGS.dataset', 'checkpoint_dir': 'FLAGS.checkpoint_dir', 'sample_dir': 'FLAGS.sample_dir', 'data_dir': 'FLAGS.data_dir'}), '(sess, FLAGS.real_size, FLAGS.batch_size_input, is_training=FLAGS.train,\n dataset_name=FLAGS.dataset, checkpoint_dir=FLAGS.checkpoint_dir,\n sample_dir=FLAGS.sample_dir, data_dir=FLAGS.data_dir)\n', (1637, 1836), False, 'from model import IMAE\n'), ((2634, 2658), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2656, 2658), True, 'import tensorflow as tf\n'), ((2029, 2058), 'tensorflow.Session', 'tf.Session', ([], {'config': 'run_config'}), '(config=run_config)\n', (2039, 2058), True, 'import tensorflow as tf\n'), ((2081, 2245), 'modelz.ZGAN', 'ZGAN', (['sess_z'], {'is_training': 'FLAGS.train', 'dataset_name': 'FLAGS.dataset', 'checkpoint_dir': 'FLAGS.checkpoint_dir', 'sample_dir': 'FLAGS.sample_dir', 'data_dir': 'FLAGS.data_dir'}), '(sess_z, is_training=FLAGS.train, dataset_name=FLAGS.dataset,\n checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir,\n data_dir=FLAGS.data_dir)\n', (2085, 2245), False, 'from modelz import ZGAN\n'), ((2343, 2372), 'tensorflow.Session', 'tf.Session', ([], {'config': 'run_config'}), '(config=run_config)\n', (2353, 2372), True, 'import tensorflow as tf\n'), ((2395, 2559), 'modelz.ZGAN', 'ZGAN', (['sess_z'], {'is_training': 'FLAGS.train', 'dataset_name': 'FLAGS.dataset', 'checkpoint_dir': 'FLAGS.checkpoint_dir', 'sample_dir': 'FLAGS.sample_dir', 'data_dir': 'FLAGS.data_dir'}), '(sess_z, is_training=FLAGS.train, dataset_name=FLAGS.dataset,\n checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir,\n data_dir=FLAGS.data_dir)\n', (2399, 2559), False, 'from modelz import ZGAN\n'), ((2855, 2884), 'tensorflow.Session', 'tf.Session', ([], {'config': 'run_config'}), '(config=run_config)\n', (2865, 2884), True, 'import tensorflow as tf\n'), ((2905, 3108), 'model.IMAE', 'IMAE', (['sess', 'FLAGS.real_size', 'FLAGS.batch_size_input'], {'is_training': 'FLAGS.train', 'dataset_name': 'FLAGS.dataset', 'checkpoint_dir': 'FLAGS.checkpoint_dir', 'sample_dir': 'FLAGS.sample_dir', 'data_dir': 'FLAGS.data_dir'}), '(sess, FLAGS.real_size, FLAGS.batch_size_input, is_training=FLAGS.train,\n dataset_name=FLAGS.dataset, checkpoint_dir=FLAGS.checkpoint_dir,\n sample_dir=FLAGS.sample_dir, data_dir=FLAGS.data_dir)\n', (2909, 3108), False, 'from model import IMAE\n')] |
import json
import jsonlines
import os
from config import APP_PATH
from hashlib import md5
class User:
def __init__(self,username = None,password = None):
# initialize fields
self.username = username
self.password = password
def addUser(self):
filepath = APP_PATH+"/appData/Users.jsonl"
#check if addData folder exist
if(os.path.exists(APP_PATH+"/appData") == False):
os.makedirs(APP_PATH+"/appData")
# check if username already exits, make sure username is the primary key
haveuser = self.getUser(self.username)
if(haveuser == True):
return "User Already Exist"
# encode password
enc = md5()
enc.update(self.password.encode("utf8"))
self.password = enc.hexdigest()
# check is there is already 10 users.
if(os.path.exists(filepath) == True):
with open(filepath, 'r') as fp:
num_lines = sum(1 for line in fp)
if (num_lines>9):
fp.close()
return "There are 10 users in system. Cannot register new user"
fp.close()
# open file
with jsonlines.open(filepath,mode="a") as f:
f.write(json.dumps(self.__dict__,indent = 4)) # insert into file
f.close()
return True
# get user by username
def getUser(self,username):
filepath = APP_PATH+"/appData/Users.jsonl"
if(os.path.exists(filepath)):
haveuser = False
with open(filepath, 'rb') as f:
# search is any match
for row in jsonlines.Reader(f):
user = json.loads(row)
if(user["username"] == username):
self.username = user["username"]
self.password = user["password"]
haveuser = True
break
# if user does not exist
f.close()
return haveuser
else:
# if file not exist
return False
def checkPW(self,tempPassword):
# encode password from parameter
enc = md5()
enc.update(tempPassword.encode("utf8"))
tempPassword = enc.hexdigest()
if(self.password == tempPassword):
return True
else:
return False
| [
"os.path.exists",
"json.loads",
"hashlib.md5",
"os.makedirs",
"jsonlines.Reader",
"json.dumps",
"jsonlines.open"
] | [((710, 715), 'hashlib.md5', 'md5', ([], {}), '()\n', (713, 715), False, 'from hashlib import md5\n'), ((1476, 1500), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (1490, 1500), False, 'import os\n'), ((2197, 2202), 'hashlib.md5', 'md5', ([], {}), '()\n', (2200, 2202), False, 'from hashlib import md5\n'), ((379, 416), 'os.path.exists', 'os.path.exists', (["(APP_PATH + '/appData')"], {}), "(APP_PATH + '/appData')\n", (393, 416), False, 'import os\n'), ((438, 472), 'os.makedirs', 'os.makedirs', (["(APP_PATH + '/appData')"], {}), "(APP_PATH + '/appData')\n", (449, 472), False, 'import os\n'), ((862, 886), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (876, 886), False, 'import os\n'), ((1196, 1230), 'jsonlines.open', 'jsonlines.open', (['filepath'], {'mode': '"""a"""'}), "(filepath, mode='a')\n", (1210, 1230), False, 'import jsonlines\n'), ((1256, 1291), 'json.dumps', 'json.dumps', (['self.__dict__'], {'indent': '(4)'}), '(self.__dict__, indent=4)\n', (1266, 1291), False, 'import json\n'), ((1641, 1660), 'jsonlines.Reader', 'jsonlines.Reader', (['f'], {}), '(f)\n', (1657, 1660), False, 'import jsonlines\n'), ((1689, 1704), 'json.loads', 'json.loads', (['row'], {}), '(row)\n', (1699, 1704), False, 'import json\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines, redefined-builtin
"""Relay to ONNX serialization """
import numpy
import onnx
import onnx.utils
from onnx import numpy_helper, OperatorSetIdProto, defs
import tvm
from tvm.autotvm.graph_tuner.utils.traverse_graph import _expr2graph_impl
from tvm.relay.expr import Call, TupleGetItem, Var, Constant, Tuple
ONNX_OPSET_VERSONS_SUPPORTED = [11]
def tvm_array_to_list(arr):
return tuple(x.value for x in arr)
def get_onnx_version():
return onnx.__version__
def add_input(data, name, model_container):
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[data.dtype]
tensor_value_info = onnx.helper.make_tensor_value_info(name, dtype, shape=data.shape)
model_container.add_inputs([tensor_value_info])
data_tensor = numpy_helper.from_array(data, name)
model_container.add_initializers([data_tensor])
class OpConverter(object):
""" Operator converter Base Class.
"""
@classmethod
def convert_attributes(cls, attrs):
"""convert Relay attributes to ONNX attributes.
The derived classes should implement this method
if attributes are required by the operator
otherwise by default no attributes are passed
"""
return {}
@classmethod
def convert(cls, node, model_container, node_list):
attrs = cls.convert_attributes(node['node'].attrs)
node = onnx.helper.make_node(cls.__name__,
node['input_names'],
node['output_names'],
**attrs)
model_container.add_nodes([node])
def rename(op_name):
""" This method creates dynamic operator of name op_name with empty attributes
"""
return type(op_name, (OpConverter,), {})
class Reshape(object):
""" Operator converter for Reshape.
"""
@classmethod
def convert(cls, node, model_container, node_list):
"""Converts Relay operator Reshape to ONNX operator.
Relay operator accepts shape as attribute but ONNX operator
accepts it as a input.
"""
shape = numpy.asarray([a.value for a in node['node'].attrs.newshape],
dtype=numpy.int64)
input_name = 'shape{}'.format(node['output_names'][0])
node = onnx.helper.make_node(cls.__name__, [node['input_names'][0], input_name],
node['output_names'])
model_container.add_nodes([node])
add_input(shape, input_name, model_container)
class Conv(OpConverter):
""" Operator converter for Conv.
"""
@classmethod
def convert_attributes(cls, attrs):
return {
'group': attrs.get_int("groups"),
'pads': attrs.get_int_tuple("padding"),
'strides': attrs.get_int_tuple("strides"),
'dilations': attrs.get_int_tuple("dilation"),
'kernel_shape': attrs.get_int_tuple("kernel_size"),
}
class MaxPool(OpConverter):
""" Operator converter for MaxPool.
"""
@classmethod
def convert_attributes(cls, attrs):
return {
'pads': attrs.get_int_tuple("padding") + attrs.get_int_tuple("padding"),
'strides': attrs.get_int_tuple("strides"),
'kernel_shape': attrs.get_int_tuple("pool_size"),
}
class Transpose(OpConverter):
""" Operator converter for Transpose.
"""
@classmethod
def convert_attributes(cls, attrs):
return {'perm': attrs.get_int_tuple("axes")} if attrs["axes"] else {}
class MatMul(OpConverter):
""" Operator converter for MatMul.
"""
@classmethod
def convert(cls, node, model_container, node_list):
output_name = 'inter{}'.format(node['output_names'][0])
transpose_node = onnx.helper.make_node(Transpose.__name__,
[node['input_names'][1]],
[output_name],
**{'perm': (1, 0)})
model_container.add_nodes([transpose_node])
inputs = [node['input_names'][0], output_name]
matmul_node = onnx.helper.make_node(cls.__name__, inputs, node['output_names'])
model_container.add_nodes([matmul_node])
class Flatten(OpConverter):
""" Operator converter for Flatten.
"""
@classmethod
def convert_attributes(cls, attrs):
return {
'axis': 1,
}
class BatchNormalization(OpConverter):
""" Operator converter for BatchNormalization.
"""
@classmethod
def convert_attributes(cls, attrs):
return {
'epsilon': float(attrs.get_str('epsilon')),
'axis': float(attrs.get_int('axis')),
}
@classmethod
def convert(cls, node, model_container, node_list):
"""Converts Relay operator batch_norm to ONNX operator.
Relay operator has property axis to handle data in NHWC format.
"""
attrs = cls.convert_attributes(node['node'].attrs)
transpose_out_name = node['input_names'][0]
output_names = node['output_names']
# axis==3 means channel is specified along the 3rd axis
if attrs['axis'] == 3:
transpose_out_name = 'transpose_{}'.format(node['output_names'][0])
node_transposed = onnx.helper.make_node(Transpose.__name__,
[node['input_names'][0]],
[transpose_out_name],
**{'perm': [0, 3, 1, 2]})
model_container.add_nodes([node_transposed])
output_names = ['batch_norm_{}'.format(node['output_names'][0])]
batch_norm_node = onnx.helper.make_node(cls.__name__,
[transpose_out_name] + node['input_names'][1:],
output_names,
**{'epsilon': attrs['epsilon']})
model_container.add_nodes([batch_norm_node])
if attrs['axis'] == 3:
node_transposed = onnx.helper.make_node(Transpose.__name__,
output_names,
node['output_names'],
**{'perm': [0, 2, 3, 1]})
model_container.add_nodes([node_transposed])
class Dropout(OpConverter):
""" Operator converter for Dropout.
"""
@classmethod
def convert_attributes(cls, attrs):
return {
'ratio': float(attrs.get_str('rate')),
}
class AveragePool(MaxPool):
""" Operator converter for AveragePool.
"""
class Concat(OpConverter):
""" Operator converter for Concat.
"""
@classmethod
def convert_attributes(cls, attrs):
return {
'axis': attrs.get_int("axis"),
}
class BiasAdd(OpConverter):
""" Operator converter for BiasAdd.
"""
@classmethod
def convert(cls, node, model_container, node_list):
input_node = node_list[node['inputs'][0][0]]
data_ndim = len(input_node['types'][0].shape)
axis = node['node'].attrs.get_int("axis")
if axis < 0:
axis = axis + data_ndim
new_axes = data_ndim - axis - 1
if new_axes:
output_name = 'inter{}'.format(node['output_names'][0])
unsqueeze_node = onnx.helper.make_node('Unsqueeze',
[node['input_names'][1]],
[output_name],
**{'axes': tuple(range(1, new_axes + 1))})
model_container.add_nodes([unsqueeze_node])
else:
output_name = node['input_names'][1]
inputs = [node['input_names'][0], output_name]
matmul_node = onnx.helper.make_node('Add', inputs, node['output_names'])
model_container.add_nodes([matmul_node])
class ReduceMean(OpConverter):
""" Operator converter for ReduceMean.
"""
@classmethod
def convert_attributes(cls, attrs):
return {
'axes': attrs.axis,
'keepdims': 0 if bool(attrs.get_int("keepdims", 0)) is False else 1
}
@classmethod
def convert(cls, node, model_container, node_list):
input_node = node_list[node['inputs'][0][0]]
shape = input_node['types'][0].shape
axis = node['node'].attrs.axis
axis = list(range(shape.size())) if not axis else tvm_array_to_list(axis)
exclude = 0 if not bool(node['node'].attrs.exclude) else 1
keepdims = 0 if not bool(node['node'].attrs.keepdims) else 1
if exclude:
all_axis = list(range(len(shape)))
axis = set(all_axis) - set(axis)
node = onnx.helper.make_node(cls.__name__,
node['input_names'],
node['output_names'],
**{"axes": axis,
"keepdims": keepdims})
model_container.add_nodes([node])
class Pad(OpConverter):
""" Operator converter for Pad.
"""
@classmethod
def convert_attributes(cls, attrs):
before = []
after = []
for axis_pads in attrs.pad_width:
before.append(axis_pads[0])
after.append(axis_pads[1])
pads = before + after
pads = numpy.asarray(pads, dtype=pads[0].dtype)
return {
'pads': pads,
'mode': attrs.get_str('pad_mode'),
'constant_value': attrs.pad_value
}
@classmethod
def convert(cls, node, model_container, node_list):
"""Converts Relay operator Pad to ONNX operator.
Relay operator accepts pads as attribute but ONNX operator
accepts it as a input.
"""
attrs = cls.convert_attributes(node['node'].attrs)
data = numpy.asarray(attrs['pads'], dtype=attrs['pads'][0].dtype).astype(numpy.int64)
input_name = 'pads_{}'.format(node['output_names'][0])
value = numpy.dtype(node['types'][0].dtype).type(attrs['constant_value'])
input_value_name = 'value_{}'.format(node['output_names'][0])
add_input(data, input_name, model_container)
add_input(value, input_value_name, model_container)
input_names = [node['input_names'][0], input_name, input_value_name]
node = onnx.helper.make_node(cls.__name__, input_names, node['output_names'])
model_container.add_nodes([node])
class Softmax(OpConverter):
""" Operator converter for SoftMax.
"""
@classmethod
def convert_attributes(cls, attrs):
return {
'axis': attrs.axis,
}
class Squeeze(OpConverter):
""" Operator converter for Squeeze.
"""
@classmethod
def convert_attributes(cls, attrs):
return {
'axes': attrs.axis,
}
@classmethod
def convert(cls, node, model_container, node_list):
input_node = node_list[node['inputs'][0][0]]
shape = input_node['types'][0].shape
axis = node['node'].attrs.get_int("axis")
if not axis:
axis = []
for axis_idx, val in enumerate(shape):
if val.value == 1:
axis.append(axis_idx)
else:
axis = node['node'].attrs.get_int_tuple("axis")
node = onnx.helper.make_node(cls.__name__,
node['input_names'],
node['output_names'],
**{"axes": axis})
model_container.add_nodes([node])
class Slice(OpConverter):
""" Operator converter for Slice.
"""
@classmethod
def convert_attributes(cls, attrs):
return {
'starts': attrs.get_int_tuple('begin'),
'ends': attrs.get_int_tuple('end'),
'steps': attrs.get_int_tuple('strides')
}
@classmethod
def convert(cls, node, model_container, node_list):
attrs = cls.convert_attributes(node['node'].attrs)
input_node = node_list[node['inputs'][0][0]]
shape = input_node['types'][0].shape
starts = list(attrs['starts'])
ends = list(attrs['ends'])
for i in range(len(starts), len(shape)):
starts.append(0)
for i in range(len(ends), len(shape)):
ends.append(shape[i] + 1)
starts = numpy.asarray(starts).astype(numpy.int64)
starts_name = 'starts_{}'.format(node['output_names'][0])
add_input(starts, starts_name, model_container)
ends = numpy.asarray(ends).astype(numpy.int64)
ends_name = 'ends_{}'.format(node['output_names'][0])
add_input(ends, ends_name, model_container)
input_names = node['input_names'] + [starts_name, ends_name]
if attrs['steps']:
axes = list(range(len(shape)))
attrs['axes'] = axes
assert len(axes) == len(attrs['steps']), "axes and steps should be of same size"
steps = numpy.asarray(attrs['steps']).astype(numpy.int64)
steps_name = 'steps_{}'.format(node['output_names'][0])
add_input(steps, steps_name, model_container)
axes = numpy.asarray(attrs['axes']).astype(numpy.int64)
axes_name = 'axes_{}'.format(node['output_names'][0])
add_input(axes, axes_name, model_container)
input_names = input_names + [axes_name, steps_name]
slice_node = onnx.helper.make_node(cls.__name__,
input_names,
node['output_names'])
model_container.add_nodes([slice_node])
class ConstantOfShapeZeros(OpConverter):
""" Operator converter for ConstantOfShape.
"""
@classmethod
def convert_attributes(cls, attrs):
return {
'value': 0
}
@classmethod
def convert(cls, node, model_container, node_list):
attrs = cls.convert_attributes(node['node'].attrs)
input_node = node_list[node['inputs'][0][0]]
shape = input_node['types'][0].shape
dtype = input_node['types'][0].dtype
input_shape_name = 'shape_{}'.format(node['output_names'][0])
shape = numpy.asarray(shape).astype(numpy.int64)
add_input(shape, input_shape_name, model_container)
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(dtype)]
tensor_value = onnx.helper.make_tensor("value", dtype,
[1], [attrs['value']])
node = onnx.helper.make_node('ConstantOfShape',
[input_shape_name],
node['output_names'],
**{'value': tensor_value})
model_container.add_nodes([node])
class ConstantOfShapeOnes(ConstantOfShapeZeros):
""" Operator converter for ConstantOfShape.
"""
@classmethod
def convert_attributes(cls, attrs):
return {
'value': 1
}
relay_to_onnx_op_mapping = {
'reshape': Reshape,
'nn.conv2d': Conv,
'add': rename('Add'),
'nn.relu': rename('Relu'),
'transpose': Transpose,
'nn.dense': MatMul,
'nn.max_pool2d': MaxPool,
'nn.batch_flatten': Flatten,
'multiply': rename('Mul'),
'nn.bias_add': BiasAdd,
'nn.batch_norm': BatchNormalization,
'nn.global_avg_pool2d': rename('GlobalAveragePool'),
'concatenate': Concat,
'nn.dropout': Dropout,
'nn.avg_pool2d': AveragePool,
'divide': rename('Div'),
'mean': ReduceMean,
'nn.pad': Pad,
'nn.softmax': Softmax,
'squeeze': Squeeze,
'strided_slice': Slice,
'greater': rename('Greater'),
'less': rename('Less'),
'equal': rename('Equal'),
'zeros_like': ConstantOfShapeZeros,
'ones_like': ConstantOfShapeOnes,
'subtract': rename('Sub')
}
class ModelContainer(object):
""" A container class to hold different attributes of ONNX model graph
"""
def __init__(self, name, opset_version):
self._name = name
self._opset_version = opset_version
self._inputs = []
self._outputs = []
self._nodes = []
self._initializers = []
def add_inputs(self, inputs):
self._inputs.extend(inputs)
def add_outputs(self, outputs):
self._outputs.extend(outputs)
def add_nodes(self, nodes):
self._nodes.extend(nodes)
def add_initializers(self, initializers):
self._initializers.extend(initializers)
def _get_opsets(self):
opsets = []
imp = OperatorSetIdProto()
imp.version = self._opset_version
opsets.append(imp)
return opsets
def make_model(self):
""" Creates the onnx model from the graph """
onnx_graph = onnx.helper.make_graph(
self._nodes,
self._name,
self._inputs,
self._outputs,
self._initializers
)
kwargs = {}
kwargs["opset_imports"] = self._get_opsets()
kwargs["producer_name"] = 'TVM Relay'
kwargs["producer_name"] = tvm.__version__
return onnx.helper.make_model(onnx_graph, **kwargs)
class RelayToONNXConverter(object):
"""A helper class converting topologically sorted Relay nodes to ONNX model
Parameters
----------
name : str
name of the model
node_list : list
topologically sorted Relay Node entry list
"""
def __init__(self, name, node_list, params, opset_version):
self._name = {}
self._mc = ModelContainer(name, opset_version)
self._node_list = node_list
self._params = params
def convert_to_onnx(self):
""" Loop through topologically sorted list of Relay nodes and generate a ONNX model"""
for idx, node_entry in enumerate(self._node_list):
out_idx = idx
node = node_entry['node']
if isinstance(node, Call):
self._add_node(node_entry, idx)
elif isinstance(node, Var):
self._add_input(node_entry, idx)
elif isinstance(node, Constant):
self._add_constant_input(node_entry, idx)
elif isinstance(node, (TupleGetItem, Tuple)):
out_idx = idx - 1 # TODO: Need to work on this.
# No equivalent ONNX operator found yet
else:
raise NotImplementedError("Relay Node of type {0} is not "
"implemented yet".format(type(node)))
if idx == len(self._node_list) - 1:
self._add_output(self._node_list[out_idx], out_idx)
model = self._mc.make_model()
polished_model = onnx.utils.polish_model(model)
return polished_model
def _tuple_to_name(self, input):
"""convert tuple of node indexes to string"""
return 'node_{0}'.format(input[0])
def _add_node(self, node_entry, idx):
"""Convert Relay operator node to ONNX operator and add it to container nodes list"""
if node_entry['op'].name not in relay_to_onnx_op_mapping:
raise NotImplementedError("Currently the operator '{0}' is "
"not supported.".format(node_entry['op'].name))
converter = relay_to_onnx_op_mapping[node_entry['op'].name]()
node_entry['output_names'] = [self._tuple_to_name([idx, 0, 0])]
node_entry['input_names'] = []
for input_idx_tuple in node_entry['inputs']:
if self._node_list[input_idx_tuple[0]]['name']:
node_entry['input_names'].append(self._node_list[input_idx_tuple[0]]['name'])
else:
node_entry['input_names'].append(self._tuple_to_name(input_idx_tuple))
converter.convert(node_entry, self._mc, self._node_list)
def _add_params(self, node_entry, idx):
"""Add param value to initializer and name to inputs"""
param_name = node_entry['name']
assert param_name in self._params, "The parameter {0} is not present" \
"in params dict provided.".format(param_name)
value = self._params[param_name]
numpy_array = value.asnumpy()
tensor = numpy_helper.from_array(numpy_array, param_name)
self._mc.add_initializers([tensor])
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy_array.dtype]
input = onnx.helper.make_tensor_value_info(param_name,
dtype,
shape=numpy_array.shape)
self._mc.add_inputs([input])
def _add_constant_input(self, node_entry, idx):
"""Create named input for constant and add it to container inputs.
If input is a parameter then add to param
"""
node = node_entry['node']
if not node_entry['name']:
node_entry['name'] = self._tuple_to_name([idx, 0, 0])
param_name = node_entry['name']
self._params[param_name] = node.data
self._add_params(node_entry, idx)
def _add_input(self, node_entry, idx):
"""Add input node to container inputs. If input is a parameter then add to param"""
if node_entry['name'] in self._params:
self._add_params(node_entry, idx)
else:
type = node_entry['types'][0]
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(type.dtype)]
input = onnx.helper.make_tensor_value_info(node_entry['name'],
dtype,
shape=type.concrete_shape)
self._mc.add_inputs([input])
def _add_output(self, node_entry, idx):
"""Add output node to container outputs."""
type = node_entry['types'][0]
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(type.dtype)]
output = onnx.helper.make_tensor_value_info(self._tuple_to_name([idx, 0, 0]),
dtype,
shape=type.concrete_shape)
self._mc.add_outputs([output])
def to_onnx(relay_module, params, name, opset_version=11, path=None):
"""Convert a Relay Function Module into an equivalent ONNX and serialize it to the path
Parameters
----------
relay_module : tvm.relay.Module
The relay module object
params : dict
dict of the parameter names and NDarray values
path : str
The path where ONNX model will be saved
Returns
-------
inferred_model : tvm.relay.Module
The relay module
"""
if opset_version not in ONNX_OPSET_VERSONS_SUPPORTED:
raise NotImplementedError("Currently only opset version 11 is supported.")
if opset_version > defs.onnx_opset_version():
raise Exception("The ONNX package installed of version {} does not support the opset "
"version {}. Upgrade the ONNX package to latest version.".format(
get_onnx_version(), opset_version))
node_list = [] # ONNX needs a topologically sorted list of nodes
node_dict = {}
_expr2graph_impl(relay_module["main"], [], node_dict, node_list)
converter = RelayToONNXConverter(name, node_list, params, opset_version)
onnx_model = converter.convert_to_onnx()
if path:
onnx.save(onnx_model, path)
return onnx_model
| [
"onnx.helper.make_graph",
"tvm.autotvm.graph_tuner.utils.traverse_graph._expr2graph_impl",
"onnx.OperatorSetIdProto",
"onnx.helper.make_node",
"onnx.save",
"onnx.numpy_helper.from_array",
"onnx.helper.make_tensor_value_info",
"numpy.asarray",
"onnx.defs.onnx_opset_version",
"onnx.helper.make_model",
"onnx.helper.make_tensor",
"onnx.utils.polish_model",
"numpy.dtype"
] | [((1477, 1542), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['name', 'dtype'], {'shape': 'data.shape'}), '(name, dtype, shape=data.shape)\n', (1511, 1542), False, 'import onnx\n'), ((1613, 1648), 'onnx.numpy_helper.from_array', 'numpy_helper.from_array', (['data', 'name'], {}), '(data, name)\n', (1636, 1648), False, 'from onnx import numpy_helper, OperatorSetIdProto, defs\n'), ((24355, 24419), 'tvm.autotvm.graph_tuner.utils.traverse_graph._expr2graph_impl', '_expr2graph_impl', (["relay_module['main']", '[]', 'node_dict', 'node_list'], {}), "(relay_module['main'], [], node_dict, node_list)\n", (24371, 24419), False, 'from tvm.autotvm.graph_tuner.utils.traverse_graph import _expr2graph_impl\n'), ((2240, 2332), 'onnx.helper.make_node', 'onnx.helper.make_node', (['cls.__name__', "node['input_names']", "node['output_names']"], {}), "(cls.__name__, node['input_names'], node[\n 'output_names'], **attrs)\n", (2261, 2332), False, 'import onnx\n'), ((2982, 3067), 'numpy.asarray', 'numpy.asarray', (["[a.value for a in node['node'].attrs.newshape]"], {'dtype': 'numpy.int64'}), "([a.value for a in node['node'].attrs.newshape], dtype=numpy.int64\n )\n", (2995, 3067), False, 'import numpy\n'), ((3171, 3270), 'onnx.helper.make_node', 'onnx.helper.make_node', (['cls.__name__', "[node['input_names'][0], input_name]", "node['output_names']"], {}), "(cls.__name__, [node['input_names'][0], input_name],\n node['output_names'])\n", (3192, 3270), False, 'import onnx\n'), ((4654, 4761), 'onnx.helper.make_node', 'onnx.helper.make_node', (['Transpose.__name__', "[node['input_names'][1]]", '[output_name]'], {}), "(Transpose.__name__, [node['input_names'][1]], [\n output_name], **{'perm': (1, 0)})\n", (4675, 4761), False, 'import onnx\n'), ((5028, 5093), 'onnx.helper.make_node', 'onnx.helper.make_node', (['cls.__name__', 'inputs', "node['output_names']"], {}), "(cls.__name__, inputs, node['output_names'])\n", (5049, 5093), False, 'import onnx\n'), ((6639, 6774), 'onnx.helper.make_node', 'onnx.helper.make_node', (['cls.__name__', "([transpose_out_name] + node['input_names'][1:])", 'output_names'], {}), "(cls.__name__, [transpose_out_name] + node[\n 'input_names'][1:], output_names, **{'epsilon': attrs['epsilon']})\n", (6660, 6774), False, 'import onnx\n'), ((8839, 8897), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Add"""', 'inputs', "node['output_names']"], {}), "('Add', inputs, node['output_names'])\n", (8860, 8897), False, 'import onnx\n'), ((9785, 9908), 'onnx.helper.make_node', 'onnx.helper.make_node', (['cls.__name__', "node['input_names']", "node['output_names']"], {}), "(cls.__name__, node['input_names'], node[\n 'output_names'], **{'axes': axis, 'keepdims': keepdims})\n", (9806, 9908), False, 'import onnx\n'), ((10430, 10470), 'numpy.asarray', 'numpy.asarray', (['pads'], {'dtype': 'pads[0].dtype'}), '(pads, dtype=pads[0].dtype)\n', (10443, 10470), False, 'import numpy\n'), ((11439, 11509), 'onnx.helper.make_node', 'onnx.helper.make_node', (['cls.__name__', 'input_names', "node['output_names']"], {}), "(cls.__name__, input_names, node['output_names'])\n", (11460, 11509), False, 'import onnx\n'), ((12425, 12526), 'onnx.helper.make_node', 'onnx.helper.make_node', (['cls.__name__', "node['input_names']", "node['output_names']"], {}), "(cls.__name__, node['input_names'], node[\n 'output_names'], **{'axes': axis})\n", (12446, 12526), False, 'import onnx\n'), ((14549, 14619), 'onnx.helper.make_node', 'onnx.helper.make_node', (['cls.__name__', 'input_names', "node['output_names']"], {}), "(cls.__name__, input_names, node['output_names'])\n", (14570, 14619), False, 'import onnx\n'), ((15520, 15582), 'onnx.helper.make_tensor', 'onnx.helper.make_tensor', (['"""value"""', 'dtype', '[1]', "[attrs['value']]"], {}), "('value', dtype, [1], [attrs['value']])\n", (15543, 15582), False, 'import onnx\n'), ((15646, 15760), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""ConstantOfShape"""', '[input_shape_name]', "node['output_names']"], {}), "('ConstantOfShape', [input_shape_name], node[\n 'output_names'], **{'value': tensor_value})\n", (15667, 15760), False, 'import onnx\n'), ((17684, 17704), 'onnx.OperatorSetIdProto', 'OperatorSetIdProto', ([], {}), '()\n', (17702, 17704), False, 'from onnx import numpy_helper, OperatorSetIdProto, defs\n'), ((17898, 17998), 'onnx.helper.make_graph', 'onnx.helper.make_graph', (['self._nodes', 'self._name', 'self._inputs', 'self._outputs', 'self._initializers'], {}), '(self._nodes, self._name, self._inputs, self._outputs,\n self._initializers)\n', (17920, 17998), False, 'import onnx\n'), ((18250, 18294), 'onnx.helper.make_model', 'onnx.helper.make_model', (['onnx_graph'], {}), '(onnx_graph, **kwargs)\n', (18272, 18294), False, 'import onnx\n'), ((19837, 19867), 'onnx.utils.polish_model', 'onnx.utils.polish_model', (['model'], {}), '(model)\n', (19860, 19867), False, 'import onnx\n'), ((21369, 21417), 'onnx.numpy_helper.from_array', 'numpy_helper.from_array', (['numpy_array', 'param_name'], {}), '(numpy_array, param_name)\n', (21392, 21417), False, 'from onnx import numpy_helper, OperatorSetIdProto, defs\n'), ((21549, 21627), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['param_name', 'dtype'], {'shape': 'numpy_array.shape'}), '(param_name, dtype, shape=numpy_array.shape)\n', (21583, 21627), False, 'import onnx\n'), ((23985, 24010), 'onnx.defs.onnx_opset_version', 'defs.onnx_opset_version', ([], {}), '()\n', (24008, 24010), False, 'from onnx import numpy_helper, OperatorSetIdProto, defs\n'), ((24564, 24591), 'onnx.save', 'onnx.save', (['onnx_model', 'path'], {}), '(onnx_model, path)\n', (24573, 24591), False, 'import onnx\n'), ((6206, 6326), 'onnx.helper.make_node', 'onnx.helper.make_node', (['Transpose.__name__', "[node['input_names'][0]]", '[transpose_out_name]'], {}), "(Transpose.__name__, [node['input_names'][0]], [\n transpose_out_name], **{'perm': [0, 3, 1, 2]})\n", (6227, 6326), False, 'import onnx\n'), ((7029, 7137), 'onnx.helper.make_node', 'onnx.helper.make_node', (['Transpose.__name__', 'output_names', "node['output_names']"], {}), "(Transpose.__name__, output_names, node['output_names'\n ], **{'perm': [0, 2, 3, 1]})\n", (7050, 7137), False, 'import onnx\n'), ((15477, 15495), 'numpy.dtype', 'numpy.dtype', (['dtype'], {}), '(dtype)\n', (15488, 15495), False, 'import numpy\n'), ((22605, 22698), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (["node_entry['name']", 'dtype'], {'shape': 'type.concrete_shape'}), "(node_entry['name'], dtype, shape=type.\n concrete_shape)\n", (22639, 22698), False, 'import onnx\n'), ((23033, 23056), 'numpy.dtype', 'numpy.dtype', (['type.dtype'], {}), '(type.dtype)\n', (23044, 23056), False, 'import numpy\n'), ((10939, 10997), 'numpy.asarray', 'numpy.asarray', (["attrs['pads']"], {'dtype': "attrs['pads'][0].dtype"}), "(attrs['pads'], dtype=attrs['pads'][0].dtype)\n", (10952, 10997), False, 'import numpy\n'), ((11097, 11132), 'numpy.dtype', 'numpy.dtype', (["node['types'][0].dtype"], {}), "(node['types'][0].dtype)\n", (11108, 11132), False, 'import numpy\n'), ((13473, 13494), 'numpy.asarray', 'numpy.asarray', (['starts'], {}), '(starts)\n', (13486, 13494), False, 'import numpy\n'), ((13653, 13672), 'numpy.asarray', 'numpy.asarray', (['ends'], {}), '(ends)\n', (13666, 13672), False, 'import numpy\n'), ((15323, 15343), 'numpy.asarray', 'numpy.asarray', (['shape'], {}), '(shape)\n', (15336, 15343), False, 'import numpy\n'), ((22560, 22583), 'numpy.dtype', 'numpy.dtype', (['type.dtype'], {}), '(type.dtype)\n', (22571, 22583), False, 'import numpy\n'), ((14095, 14124), 'numpy.asarray', 'numpy.asarray', (["attrs['steps']"], {}), "(attrs['steps'])\n", (14108, 14124), False, 'import numpy\n'), ((14291, 14319), 'numpy.asarray', 'numpy.asarray', (["attrs['axes']"], {}), "(attrs['axes'])\n", (14304, 14319), False, 'import numpy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import unittest
from numpy.testing import assert_allclose
try:
from .context import data_dir # If mripy is importable: python -m mripy.tests.test_io
except ValueError: # Attempted relative import in non-package
from context import data_dir # If not importable: cd mripy/tests; python -m test_io
from mripy import io
from os import path
import os, glob, subprocess
import numpy as np
class test_io(unittest.TestCase):
def test_Mask(self):
mask_file = path.join(data_dir, 'brain_mask', 'brain_mask+orig')
data_file = path.join(data_dir, 'brain_mask', 'gre*.volreg+orig.HEAD')
mask = io.Mask(mask_file)
# Test dump
x = mask.dump(data_file)
self.assertEqual(x.shape, (564361,4))
mask2 = io.MaskDumper(mask_file)
y = mask2.dump(data_file)
assert_allclose(x, y)
# Test undump
max_idx = np.argmax(x, axis=1) + 1
mask.undump('test_undump', max_idx, method='nibabel')
self.assertEqual(subprocess.check_output('3dinfo -orient test_undump+orig', shell=True), b'RSA\n')
assert_allclose(mask.dump('test_undump+orig.HEAD'), max_idx)
for f in glob.glob('test_undump+orig.*'):
os.remove(f)
# Test constrain
smaller, sel0 = mask.near(5, 45, -17, 12, return_selector=True)
sel = mask.infer_selector(smaller)
smaller.undump('test_constrain', max_idx[sel])
assert_allclose(smaller.dump('test_constrain+orig.HEAD'), max_idx[sel0])
for f in glob.glob('test_constrain+orig.*'):
os.remove(f)
if __name__ == '__main__':
unittest.main()
| [
"subprocess.check_output",
"mripy.io.MaskDumper",
"numpy.testing.assert_allclose",
"os.path.join",
"numpy.argmax",
"unittest.main",
"mripy.io.Mask",
"glob.glob",
"os.remove"
] | [((1737, 1752), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1750, 1752), False, 'import unittest\n'), ((602, 654), 'os.path.join', 'path.join', (['data_dir', '"""brain_mask"""', '"""brain_mask+orig"""'], {}), "(data_dir, 'brain_mask', 'brain_mask+orig')\n", (611, 654), False, 'from os import path\n'), ((675, 733), 'os.path.join', 'path.join', (['data_dir', '"""brain_mask"""', '"""gre*.volreg+orig.HEAD"""'], {}), "(data_dir, 'brain_mask', 'gre*.volreg+orig.HEAD')\n", (684, 733), False, 'from os import path\n'), ((749, 767), 'mripy.io.Mask', 'io.Mask', (['mask_file'], {}), '(mask_file)\n', (756, 767), False, 'from mripy import io\n'), ((883, 907), 'mripy.io.MaskDumper', 'io.MaskDumper', (['mask_file'], {}), '(mask_file)\n', (896, 907), False, 'from mripy import io\n'), ((950, 971), 'numpy.testing.assert_allclose', 'assert_allclose', (['x', 'y'], {}), '(x, y)\n', (965, 971), False, 'from numpy.testing import assert_allclose\n'), ((1292, 1323), 'glob.glob', 'glob.glob', (['"""test_undump+orig.*"""'], {}), "('test_undump+orig.*')\n", (1301, 1323), False, 'import os, glob, subprocess\n'), ((1643, 1677), 'glob.glob', 'glob.glob', (['"""test_constrain+orig.*"""'], {}), "('test_constrain+orig.*')\n", (1652, 1677), False, 'import os, glob, subprocess\n'), ((1012, 1032), 'numpy.argmax', 'np.argmax', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (1021, 1032), True, 'import numpy as np\n'), ((1124, 1194), 'subprocess.check_output', 'subprocess.check_output', (['"""3dinfo -orient test_undump+orig"""'], {'shell': '(True)'}), "('3dinfo -orient test_undump+orig', shell=True)\n", (1147, 1194), False, 'import os, glob, subprocess\n'), ((1337, 1349), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (1346, 1349), False, 'import os, glob, subprocess\n'), ((1691, 1703), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (1700, 1703), False, 'import os, glob, subprocess\n')] |
from . import social
from app.modules.base.base_handler import BaseHandler
from app.modules.vendor.pre_request.flask import filter_params
from app.modules.vendor.pre_request.filter_rules import Rule
from app.models.social.like import LikeModel
from app.models.account.user_info import UserInfoModel
from app.helper.response import *
class IndexHandler(BaseHandler):
rule = {
"share_id": Rule(direct_type=int),
"last_id": Rule(direct_type=int, allow_empty=True, default=0)
}
@filter_params(get=rule)
def get(self, params):
query = LikeModel.query.filter_by(share_id=params["share_id"], status=1)
if params["last_id"]:
query = query.filter(LikeModel.like_id < params["last_id"])
like_model_list = query.order_by(LikeModel.like_id.desc()).limit(20).all()
result = list()
for model in like_model_list:
user_info = UserInfoModel.query_user_model_by_id(model.user_id)
user_info = UserInfoModel.format_user_info(user_info)
user_info["like_id"] = model.like_id
result.append(user_info)
return json_success_response(result)
social.add_url_rule("/getlikelist/index", view_func=IndexHandler.as_view("get_like_list_index"))
| [
"app.models.social.like.LikeModel.query.filter_by",
"app.modules.vendor.pre_request.filter_rules.Rule",
"app.models.account.user_info.UserInfoModel.format_user_info",
"app.models.social.like.LikeModel.like_id.desc",
"app.modules.vendor.pre_request.flask.filter_params",
"app.models.account.user_info.UserInfoModel.query_user_model_by_id"
] | [((510, 533), 'app.modules.vendor.pre_request.flask.filter_params', 'filter_params', ([], {'get': 'rule'}), '(get=rule)\n', (523, 533), False, 'from app.modules.vendor.pre_request.flask import filter_params\n'), ((405, 426), 'app.modules.vendor.pre_request.filter_rules.Rule', 'Rule', ([], {'direct_type': 'int'}), '(direct_type=int)\n', (409, 426), False, 'from app.modules.vendor.pre_request.filter_rules import Rule\n'), ((447, 497), 'app.modules.vendor.pre_request.filter_rules.Rule', 'Rule', ([], {'direct_type': 'int', 'allow_empty': '(True)', 'default': '(0)'}), '(direct_type=int, allow_empty=True, default=0)\n', (451, 497), False, 'from app.modules.vendor.pre_request.filter_rules import Rule\n'), ((577, 641), 'app.models.social.like.LikeModel.query.filter_by', 'LikeModel.query.filter_by', ([], {'share_id': "params['share_id']", 'status': '(1)'}), "(share_id=params['share_id'], status=1)\n", (602, 641), False, 'from app.models.social.like import LikeModel\n'), ((915, 966), 'app.models.account.user_info.UserInfoModel.query_user_model_by_id', 'UserInfoModel.query_user_model_by_id', (['model.user_id'], {}), '(model.user_id)\n', (951, 966), False, 'from app.models.account.user_info import UserInfoModel\n'), ((991, 1032), 'app.models.account.user_info.UserInfoModel.format_user_info', 'UserInfoModel.format_user_info', (['user_info'], {}), '(user_info)\n', (1021, 1032), False, 'from app.models.account.user_info import UserInfoModel\n'), ((785, 809), 'app.models.social.like.LikeModel.like_id.desc', 'LikeModel.like_id.desc', ([], {}), '()\n', (807, 809), False, 'from app.models.social.like import LikeModel\n')] |
from splitwise.debt import Debt
from splitwise.balance import Balance
class Group(object):
def __init__(self,data=None):
from splitwise.user import Friend
if data:
self.id = data["id"]
self.name = data["name"]
self.updated_at = data["updated_at"]
self.simplify_by_default = data["simplify_by_default"]
if "group_type" in data:
self.group_type = data["group_type"]
else:
self.group_type = None
if "whiteboard" in data:
self.whiteboard = data["whiteboard"]
else:
self.whiteboard = None
if "invite_link" in data:
self.invite_link = data["invite_link"]
else:
self.invite_link = None
if "country_code" in data:
self.country_code = data["country_code"]
else:
self.country_code = None
self.original_debts = []
for debt in data["original_debts"]:
self.original_debts.append(Debt(debt))
self.simplified_debts = []
for debt in data["simplified_debts"]:
self.simplified_debts.append(Debt(debt))
self.members = []
for member in data["members"]:
self.members.append(Friend(member))
def getId(self):
return self.id
def getName(self):
return self.name
def getUpdatedAt(self):
return self.updated_at
def getWhiteBoard(self):
return self.whiteboard
def isSimplifiedByDefault(self):
return self.simplify_by_default
def getMembers(self):
return self.members
def getOriginalDebts(self):
return self.original_debts
def getType(self):
return self.group_type
def getSimplifiedDebts(self):
return self.simplified_debts
def getInviteLink(self):
return self.invite_link
def setName(self, name):
self.name = name
def setGroupType(self, group_type):
self.group_type = group_type
def setCountryCode(self, country_code):
self.country_code = country_code
def setMembers(self, members):
self.members = members
class FriendGroup(object):
def __init__(self,data=None):
self.id = data["group_id"]
self.balances = []
for balance in data["balance"]:
self.balances.append(Balance(balance))
def getId(self):
return self.id
def getBalances(self):
return self.balance
| [
"splitwise.balance.Balance",
"splitwise.debt.Debt",
"splitwise.user.Friend"
] | [((2508, 2524), 'splitwise.balance.Balance', 'Balance', (['balance'], {}), '(balance)\n', (2515, 2524), False, 'from splitwise.balance import Balance\n'), ((1131, 1141), 'splitwise.debt.Debt', 'Debt', (['debt'], {}), '(debt)\n', (1135, 1141), False, 'from splitwise.debt import Debt\n'), ((1278, 1288), 'splitwise.debt.Debt', 'Debt', (['debt'], {}), '(debt)\n', (1282, 1288), False, 'from splitwise.debt import Debt\n'), ((1400, 1414), 'splitwise.user.Friend', 'Friend', (['member'], {}), '(member)\n', (1406, 1414), False, 'from splitwise.user import Friend\n')] |
from ttracker.utils import timestamp_to_datetime
from datetime import datetime
def test_timestamp_to_datetime():
timestamp = '637304631881165025'
expected = datetime(2020, 7, 16, 2, 26, 28, 116503)
result = timestamp_to_datetime(timestamp)
assert result == expected
| [
"datetime.datetime",
"ttracker.utils.timestamp_to_datetime"
] | [((167, 207), 'datetime.datetime', 'datetime', (['(2020)', '(7)', '(16)', '(2)', '(26)', '(28)', '(116503)'], {}), '(2020, 7, 16, 2, 26, 28, 116503)\n', (175, 207), False, 'from datetime import datetime\n'), ((221, 253), 'ttracker.utils.timestamp_to_datetime', 'timestamp_to_datetime', (['timestamp'], {}), '(timestamp)\n', (242, 253), False, 'from ttracker.utils import timestamp_to_datetime\n')] |
# used to predict all validate images and encode results in a csv file
import numpy as np
import SimpleITK as sitk
import pandas as pd
import os
from os.path import join
from common.file_io import load_nrrd
# this function encodes a 2D file into run-length-encoding format (RLE)
# the inpuy is a 2D binary image (1 = positive), the output is a string of the RLE
def run_length_encoding(input_mask):
dots = np.where(input_mask.T.flatten() == 1)[0]
run_lengths, prev = [], -2
for b in dots:
if (b > prev + 1): run_lengths.extend((b + 1, 0))
run_lengths[-1] += 1
prev = b
return (" ".join([str(i) for i in run_lengths]))
if __name__ == '__main__':
### a sample script to produce a prediction
# load the image file and reformat such that its axis are consistent with the MRI
mask_format_name= "predict.nrrd"
validation_dir = 'home/AtriaSeg_2018_testing'
encode_cavity = []
image_ids=[]
for patient_name in sorted(os.listdir(validation_dir)):
print ('encode ',patient_name)
mask_path=join(*(validation_dir,patient_name,mask_format_name))
if not os.path.isdir(os.path.join(validation_dir,patient_name)):
continue
mask,_=load_nrrd(mask_path)
mask[mask>0]=1
# ***
# encode in RLE
image_ids.extend([patient_name+"_Slice_" + str(i) for i in range(mask.shape[0])])
for i in range(mask.shape[0]):
encode_cavity.append(run_length_encoding(mask[i, :, :]))
# output to csv file
csv_output = pd.DataFrame(data={"ImageId": image_ids, 'EncodeCavity': encode_cavity},
columns=['ImageId', 'EncodeCavity'])
csv_output.to_csv("submission.csv", sep=",", index=False)
| [
"pandas.DataFrame",
"os.listdir",
"os.path.join",
"common.file_io.load_nrrd"
] | [((1560, 1673), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'ImageId': image_ids, 'EncodeCavity': encode_cavity}", 'columns': "['ImageId', 'EncodeCavity']"}), "(data={'ImageId': image_ids, 'EncodeCavity': encode_cavity},\n columns=['ImageId', 'EncodeCavity'])\n", (1572, 1673), True, 'import pandas as pd\n'), ((991, 1017), 'os.listdir', 'os.listdir', (['validation_dir'], {}), '(validation_dir)\n', (1001, 1017), False, 'import os\n'), ((1077, 1132), 'os.path.join', 'join', (['*(validation_dir, patient_name, mask_format_name)'], {}), '(*(validation_dir, patient_name, mask_format_name))\n', (1081, 1132), False, 'from os.path import join\n'), ((1240, 1260), 'common.file_io.load_nrrd', 'load_nrrd', (['mask_path'], {}), '(mask_path)\n', (1249, 1260), False, 'from common.file_io import load_nrrd\n'), ((1160, 1202), 'os.path.join', 'os.path.join', (['validation_dir', 'patient_name'], {}), '(validation_dir, patient_name)\n', (1172, 1202), False, 'import os\n')] |
import datetime
from ..core.interface import Interface
from ..core import Config
class Stdout(Interface):
def __init__(self, config, _):
super(Stdout, self).__init__()
config_data = Config(config)
self.__channels = {}
self.__formatters = {
Interface.Channel.alert : self.__alert,
Interface.Channel.info : self.__info,
Interface.Channel.error : self.__error,
Interface.Channel.debug : self.__debug
}
for channel, name in self.channel_names.items():
if name in config_data.data:
self.__channels[channel] = Stdout.Channel(
self.__formatters[channel],
config_data.get_value_or_default(None, name, 'whitelist')[0],
config_data.get_value_or_default(None, name, 'blacklist')[0])
async def start(self):
channels = ','.join(self.channel_names[x] for x in self.__channels.keys())
print(f'[stdout] Stdout ready, available channels: {channels}')
async def send_message(self, channel, prefix, message):
now = datetime.datetime.now()
self.__channels[channel].send(prefix, message)
def __alert(self, prefix, message):
return f'[{Stdout.__now()}] [ALERT] [{prefix}]', message
def __info(self, prefix, message):
return f'[{Stdout.__now()}] [info] [{prefix}]', message
def __error(self, prefix, message):
return f'[{Stdout.__now()}] [ERROR] [{prefix}]', message
def __debug(self, prefix, message):
return f'[{Stdout.__now()}] [debug] [{prefix}]', message
@staticmethod
def __now():
return datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
class Channel:
def __init__(self, formatter, whitelist, blacklist):
self.__formatter = formatter
self.__whitelist = set(whitelist) if whitelist is not None else None
self.__blacklist = set(blacklist) if blacklist is not None else None
def send(self, prefix, message):
prefix, message = self.__formatter(prefix, message)
self.__print(prefix, message)
def __print(self, prefix, message):
lines = message.splitlines()
print(f'{prefix} {lines[0]}')
if len(lines) > 1:
for line in lines[1:]:
print(f'{" " * len(prefix)} {line}')
| [
"datetime.datetime.now"
] | [((1122, 1145), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1143, 1145), False, 'import datetime\n'), ((1682, 1705), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1703, 1705), False, 'import datetime\n')] |
import json
from ees.model import Response
class StatsHandler:
def __init__(self, db):
self.db = db
def execute(self, cmd):
v = self.db.get_analysis_state()
if not v:
return Response(
http_status=404,
body={
"error": "Statistics are not yet generated"
})
return Response(
http_status=200,
body={
'total_streams': v.total_streams,
'total_changesets': v.total_changesets,
'total_events': v.total_events,
'max_stream_length': v.max_stream_length,
'statistics_version': v.version
})
| [
"ees.model.Response"
] | [((381, 602), 'ees.model.Response', 'Response', ([], {'http_status': '(200)', 'body': "{'total_streams': v.total_streams, 'total_changesets': v.total_changesets,\n 'total_events': v.total_events, 'max_stream_length': v.\n max_stream_length, 'statistics_version': v.version}"}), "(http_status=200, body={'total_streams': v.total_streams,\n 'total_changesets': v.total_changesets, 'total_events': v.total_events,\n 'max_stream_length': v.max_stream_length, 'statistics_version': v.version})\n", (389, 602), False, 'from ees.model import Response\n'), ((220, 297), 'ees.model.Response', 'Response', ([], {'http_status': '(404)', 'body': "{'error': 'Statistics are not yet generated'}"}), "(http_status=404, body={'error': 'Statistics are not yet generated'})\n", (228, 297), False, 'from ees.model import Response\n')] |
import multiprocessing as mp
import numpy as np
import pandas as pd
import pytest
import dask
from dask import dataframe as dd
from dask.dataframe.shuffle import partitioning_index
from distributed import Client
from distributed.deploy.local import LocalCluster
import dask_cuda
from dask_cuda.explicit_comms import comms
from dask_cuda.explicit_comms.dataframe.merge import merge as explicit_comms_merge
from dask_cuda.explicit_comms.dataframe.shuffle import shuffle as explicit_comms_shuffle
mp = mp.get_context("spawn")
ucp = pytest.importorskip("ucp")
# Notice, all of the following tests is executed in a new process such
# that UCX options of the different tests doesn't conflict.
async def my_rank(state):
return state["rank"]
def _test_local_cluster(protocol):
with LocalCluster(
protocol=protocol,
dashboard_address=None,
n_workers=4,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster) as client:
c = comms.CommsContext(client)
assert sum(c.run(my_rank)) == sum(range(4))
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
def test_local_cluster(protocol):
p = mp.Process(target=_test_local_cluster, args=(protocol,))
p.start()
p.join()
assert not p.exitcode
def _test_dataframe_merge(backend, protocol, n_workers):
if backend == "cudf":
cudf = pytest.importorskip("cudf")
from cudf.tests.utils import assert_eq
else:
from dask.dataframe.utils import assert_eq
dask.config.update(
dask.config.global_config,
{"ucx": {"TLS": "tcp,sockcm,cuda_copy",},},
priority="new",
)
with LocalCluster(
protocol=protocol,
dashboard_address=None,
n_workers=n_workers,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster):
nrows = n_workers * 10
# Let's make some dataframes that we can join on the "key" column
df1 = pd.DataFrame({"key": np.arange(nrows), "payload1": np.arange(nrows)})
key = np.arange(nrows)
np.random.shuffle(key)
df2 = pd.DataFrame(
{"key": key[nrows // 3 :], "payload2": np.arange(nrows)[nrows // 3 :]}
)
expected = df1.merge(df2).set_index("key")
if backend == "cudf":
df1 = cudf.DataFrame.from_pandas(df1)
df2 = cudf.DataFrame.from_pandas(df2)
ddf1 = dd.from_pandas(df1, npartitions=n_workers + 1)
ddf2 = dd.from_pandas(
df2, npartitions=n_workers - 1 if n_workers > 1 else 1
)
ddf3 = explicit_comms_merge(ddf1, ddf2, on="key").set_index("key")
got = ddf3.compute()
if backend == "cudf":
assert_eq(got, expected)
else:
pd.testing.assert_frame_equal(got, expected)
@pytest.mark.parametrize("nworkers", [1, 2, 4])
@pytest.mark.parametrize("backend", ["pandas", "cudf"])
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
def test_dataframe_merge(backend, protocol, nworkers):
if backend == "cudf":
pytest.importorskip("cudf")
p = mp.Process(target=_test_dataframe_merge, args=(backend, protocol, nworkers))
p.start()
p.join()
assert not p.exitcode
def _test_dataframe_merge_empty_partitions(nrows, npartitions):
with LocalCluster(
protocol="tcp",
dashboard_address=None,
n_workers=npartitions,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster):
df1 = pd.DataFrame({"key": np.arange(nrows), "payload1": np.arange(nrows)})
key = np.arange(nrows)
np.random.shuffle(key)
df2 = pd.DataFrame({"key": key, "payload2": np.arange(nrows)})
expected = df1.merge(df2).set_index("key")
ddf1 = dd.from_pandas(df1, npartitions=npartitions)
ddf2 = dd.from_pandas(df2, npartitions=npartitions)
ddf3 = explicit_comms_merge(ddf1, ddf2, on=["key"]).set_index("key")
got = ddf3.compute()
pd.testing.assert_frame_equal(got, expected)
def test_dataframe_merge_empty_partitions():
# Notice, we use more partitions than rows
p = mp.Process(target=_test_dataframe_merge_empty_partitions, args=(2, 4))
p.start()
p.join()
assert not p.exitcode
def check_partitions(df, npartitions):
"""Check that all values in `df` hashes to the same"""
hashes = partitioning_index(df, npartitions)
if len(hashes) > 0:
return len(hashes.unique()) == 1
else:
return True
def _test_dataframe_shuffle(backend, protocol, n_workers):
if backend == "cudf":
cudf = pytest.importorskip("cudf")
from cudf.tests.utils import assert_eq
else:
from dask.dataframe.utils import assert_eq
dask.config.update(
dask.config.global_config,
{"ucx": {"TLS": "tcp,sockcm,cuda_copy",},},
priority="new",
)
with LocalCluster(
protocol=protocol,
dashboard_address=None,
n_workers=n_workers,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster) as client:
all_workers = list(client.get_worker_logs().keys())
comms.default_comms()
np.random.seed(42)
df = pd.DataFrame({"key": np.random.random(100)})
if backend == "cudf":
df = cudf.DataFrame.from_pandas(df)
for input_nparts in range(1, 5):
for output_nparts in range(1, 5):
ddf = dd.from_pandas(df.copy(), npartitions=input_nparts).persist(
workers=all_workers
)
ddf = explicit_comms_shuffle(
ddf, ["key"], npartitions=output_nparts
).persist()
assert ddf.npartitions == output_nparts
# Check that each partition of `ddf` hashes to the same value
result = ddf.map_partitions(
check_partitions, output_nparts
).compute()
assert all(result.to_list())
# Check the values of `ddf` (ignoring the row order)
expected = df.sort_values("key")
got = ddf.compute().sort_values("key")
if backend == "cudf":
assert_eq(got, expected)
else:
pd.testing.assert_frame_equal(got, expected)
@pytest.mark.parametrize("nworkers", [1, 2, 3])
@pytest.mark.parametrize("backend", ["pandas", "cudf"])
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
def test_dataframe_shuffle(backend, protocol, nworkers):
if backend == "cudf":
pytest.importorskip("cudf")
p = mp.Process(target=_test_dataframe_shuffle, args=(backend, protocol, nworkers))
p.start()
p.join()
assert not p.exitcode
def _test_dask_use_explicit_comms():
def check_shuffle(in_cluster):
"""Check if shuffle use explicit-comms by search for keys named "shuffle"
The explicit-comms implemention of shuffle doesn't produce any keys
named "shuffle"
"""
ddf = dd.from_pandas(pd.DataFrame({"key": np.arange(10)}), npartitions=2)
with dask.config.set(explicit_comms=False):
res = ddf.shuffle(on="key", npartitions=4, shuffle="tasks")
assert any(["shuffle" in str(key) for key in res.dask])
with dask.config.set(explicit_comms=True):
res = ddf.shuffle(on="key", npartitions=4, shuffle="tasks")
if in_cluster:
assert all(["shuffle" not in str(key) for key in res.dask])
else: # If not in cluster, we cannot use explicit comms
assert any(["shuffle" in str(key) for key in res.dask])
with LocalCluster(
protocol="tcp",
dashboard_address=None,
n_workers=2,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster):
check_shuffle(True)
check_shuffle(False)
def test_dask_use_explicit_comms():
p = mp.Process(target=_test_dask_use_explicit_comms)
p.start()
p.join()
assert not p.exitcode
def _test_jit_unspill(protocol):
import cudf
from cudf.tests.utils import assert_eq
dask.config.update(
dask.config.global_config,
{"ucx": {"TLS": "tcp,sockcm,cuda_copy",},},
priority="new",
)
with dask_cuda.LocalCUDACluster(
protocol=protocol,
dashboard_address=None,
n_workers=1,
threads_per_worker=1,
processes=True,
jit_unspill=True,
device_memory_limit="1B",
) as cluster:
with Client(cluster):
np.random.seed(42)
df = cudf.DataFrame.from_pandas(
pd.DataFrame({"key": np.random.random(100)})
)
ddf = dd.from_pandas(df.copy(), npartitions=4)
ddf = explicit_comms_shuffle(ddf, ["key"])
# Check the values of `ddf` (ignoring the row order)
expected = df.sort_values("key")
got = ddf.compute().sort_values("key")
assert_eq(got, expected)
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
def test_jit_unspill(protocol):
pytest.importorskip("cudf")
p = mp.Process(target=_test_jit_unspill, args=(protocol,))
p.start()
p.join()
assert not p.exitcode
| [
"multiprocessing.Process",
"pandas.testing.assert_frame_equal",
"dask.config.update",
"numpy.arange",
"numpy.random.random",
"distributed.deploy.local.LocalCluster",
"dask_cuda.explicit_comms.comms.CommsContext",
"dask_cuda.explicit_comms.dataframe.shuffle.shuffle",
"numpy.random.seed",
"distributed.Client",
"cudf.DataFrame.from_pandas",
"dask.config.set",
"dask_cuda.LocalCUDACluster",
"dask.dataframe.from_pandas",
"multiprocessing.get_context",
"dask.dataframe.shuffle.partitioning_index",
"dask_cuda.explicit_comms.comms.default_comms",
"dask_cuda.explicit_comms.dataframe.merge.merge",
"dask.dataframe.utils.assert_eq",
"pytest.mark.parametrize",
"pytest.importorskip",
"numpy.random.shuffle"
] | [((503, 526), 'multiprocessing.get_context', 'mp.get_context', (['"""spawn"""'], {}), "('spawn')\n", (517, 526), True, 'import multiprocessing as mp\n'), ((533, 559), 'pytest.importorskip', 'pytest.importorskip', (['"""ucp"""'], {}), "('ucp')\n", (552, 559), False, 'import pytest\n'), ((1099, 1150), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""protocol"""', "['tcp', 'ucx']"], {}), "('protocol', ['tcp', 'ucx'])\n", (1122, 1150), False, 'import pytest\n'), ((2955, 3001), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nworkers"""', '[1, 2, 4]'], {}), "('nworkers', [1, 2, 4])\n", (2978, 3001), False, 'import pytest\n'), ((3003, 3057), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""backend"""', "['pandas', 'cudf']"], {}), "('backend', ['pandas', 'cudf'])\n", (3026, 3057), False, 'import pytest\n'), ((3059, 3110), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""protocol"""', "['tcp', 'ucx']"], {}), "('protocol', ['tcp', 'ucx'])\n", (3082, 3110), False, 'import pytest\n'), ((6682, 6728), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nworkers"""', '[1, 2, 3]'], {}), "('nworkers', [1, 2, 3])\n", (6705, 6728), False, 'import pytest\n'), ((6730, 6784), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""backend"""', "['pandas', 'cudf']"], {}), "('backend', ['pandas', 'cudf'])\n", (6753, 6784), False, 'import pytest\n'), ((6786, 6837), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""protocol"""', "['tcp', 'ucx']"], {}), "('protocol', ['tcp', 'ucx'])\n", (6809, 6837), False, 'import pytest\n'), ((9399, 9450), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""protocol"""', "['tcp', 'ucx']"], {}), "('protocol', ['tcp', 'ucx'])\n", (9422, 9450), False, 'import pytest\n'), ((1193, 1249), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_test_local_cluster', 'args': '(protocol,)'}), '(target=_test_local_cluster, args=(protocol,))\n', (1203, 1249), True, 'import multiprocessing as mp\n'), ((1544, 1651), 'dask.config.update', 'dask.config.update', (['dask.config.global_config', "{'ucx': {'TLS': 'tcp,sockcm,cuda_copy'}}"], {'priority': '"""new"""'}), "(dask.config.global_config, {'ucx': {'TLS':\n 'tcp,sockcm,cuda_copy'}}, priority='new')\n", (1562, 1651), False, 'import dask\n'), ((3236, 3312), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_test_dataframe_merge', 'args': '(backend, protocol, nworkers)'}), '(target=_test_dataframe_merge, args=(backend, protocol, nworkers))\n', (3246, 3312), True, 'import multiprocessing as mp\n'), ((4333, 4403), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_test_dataframe_merge_empty_partitions', 'args': '(2, 4)'}), '(target=_test_dataframe_merge_empty_partitions, args=(2, 4))\n', (4343, 4403), True, 'import multiprocessing as mp\n'), ((4570, 4605), 'dask.dataframe.shuffle.partitioning_index', 'partitioning_index', (['df', 'npartitions'], {}), '(df, npartitions)\n', (4588, 4605), False, 'from dask.dataframe.shuffle import partitioning_index\n'), ((4944, 5051), 'dask.config.update', 'dask.config.update', (['dask.config.global_config', "{'ucx': {'TLS': 'tcp,sockcm,cuda_copy'}}"], {'priority': '"""new"""'}), "(dask.config.global_config, {'ucx': {'TLS':\n 'tcp,sockcm,cuda_copy'}}, priority='new')\n", (4962, 5051), False, 'import dask\n'), ((6966, 7044), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_test_dataframe_shuffle', 'args': '(backend, protocol, nworkers)'}), '(target=_test_dataframe_shuffle, args=(backend, protocol, nworkers))\n', (6976, 7044), True, 'import multiprocessing as mp\n'), ((8314, 8362), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_test_dask_use_explicit_comms'}), '(target=_test_dask_use_explicit_comms)\n', (8324, 8362), True, 'import multiprocessing as mp\n'), ((8515, 8622), 'dask.config.update', 'dask.config.update', (['dask.config.global_config', "{'ucx': {'TLS': 'tcp,sockcm,cuda_copy'}}"], {'priority': '"""new"""'}), "(dask.config.global_config, {'ucx': {'TLS':\n 'tcp,sockcm,cuda_copy'}}, priority='new')\n", (8533, 8622), False, 'import dask\n'), ((9487, 9514), 'pytest.importorskip', 'pytest.importorskip', (['"""cudf"""'], {}), "('cudf')\n", (9506, 9514), False, 'import pytest\n'), ((9524, 9578), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_test_jit_unspill', 'args': '(protocol,)'}), '(target=_test_jit_unspill, args=(protocol,))\n', (9534, 9578), True, 'import multiprocessing as mp\n'), ((791, 901), 'distributed.deploy.local.LocalCluster', 'LocalCluster', ([], {'protocol': 'protocol', 'dashboard_address': 'None', 'n_workers': '(4)', 'threads_per_worker': '(1)', 'processes': '(True)'}), '(protocol=protocol, dashboard_address=None, n_workers=4,\n threads_per_worker=1, processes=True)\n', (803, 901), False, 'from distributed.deploy.local import LocalCluster\n'), ((1403, 1430), 'pytest.importorskip', 'pytest.importorskip', (['"""cudf"""'], {}), "('cudf')\n", (1422, 1430), False, 'import pytest\n'), ((1691, 1809), 'distributed.deploy.local.LocalCluster', 'LocalCluster', ([], {'protocol': 'protocol', 'dashboard_address': 'None', 'n_workers': 'n_workers', 'threads_per_worker': '(1)', 'processes': '(True)'}), '(protocol=protocol, dashboard_address=None, n_workers=n_workers,\n threads_per_worker=1, processes=True)\n', (1703, 1809), False, 'from distributed.deploy.local import LocalCluster\n'), ((3200, 3227), 'pytest.importorskip', 'pytest.importorskip', (['"""cudf"""'], {}), "('cudf')\n", (3219, 3227), False, 'import pytest\n'), ((3441, 3558), 'distributed.deploy.local.LocalCluster', 'LocalCluster', ([], {'protocol': '"""tcp"""', 'dashboard_address': 'None', 'n_workers': 'npartitions', 'threads_per_worker': '(1)', 'processes': '(True)'}), "(protocol='tcp', dashboard_address=None, n_workers=npartitions,\n threads_per_worker=1, processes=True)\n", (3453, 3558), False, 'from distributed.deploy.local import LocalCluster\n'), ((4803, 4830), 'pytest.importorskip', 'pytest.importorskip', (['"""cudf"""'], {}), "('cudf')\n", (4822, 4830), False, 'import pytest\n'), ((5091, 5209), 'distributed.deploy.local.LocalCluster', 'LocalCluster', ([], {'protocol': 'protocol', 'dashboard_address': 'None', 'n_workers': 'n_workers', 'threads_per_worker': '(1)', 'processes': '(True)'}), '(protocol=protocol, dashboard_address=None, n_workers=n_workers,\n threads_per_worker=1, processes=True)\n', (5103, 5209), False, 'from distributed.deploy.local import LocalCluster\n'), ((6929, 6956), 'pytest.importorskip', 'pytest.importorskip', (['"""cudf"""'], {}), "('cudf')\n", (6948, 6956), False, 'import pytest\n'), ((8018, 8125), 'distributed.deploy.local.LocalCluster', 'LocalCluster', ([], {'protocol': '"""tcp"""', 'dashboard_address': 'None', 'n_workers': '(2)', 'threads_per_worker': '(1)', 'processes': '(True)'}), "(protocol='tcp', dashboard_address=None, n_workers=2,\n threads_per_worker=1, processes=True)\n", (8030, 8125), False, 'from distributed.deploy.local import LocalCluster\n'), ((8662, 8834), 'dask_cuda.LocalCUDACluster', 'dask_cuda.LocalCUDACluster', ([], {'protocol': 'protocol', 'dashboard_address': 'None', 'n_workers': '(1)', 'threads_per_worker': '(1)', 'processes': '(True)', 'jit_unspill': '(True)', 'device_memory_limit': '"""1B"""'}), "(protocol=protocol, dashboard_address=None,\n n_workers=1, threads_per_worker=1, processes=True, jit_unspill=True,\n device_memory_limit='1B')\n", (8688, 8834), False, 'import dask_cuda\n'), ((970, 985), 'distributed.Client', 'Client', (['cluster'], {}), '(cluster)\n', (976, 985), False, 'from distributed import Client\n'), ((1013, 1039), 'dask_cuda.explicit_comms.comms.CommsContext', 'comms.CommsContext', (['client'], {}), '(client)\n', (1031, 1039), False, 'from dask_cuda.explicit_comms import comms\n'), ((1878, 1893), 'distributed.Client', 'Client', (['cluster'], {}), '(cluster)\n', (1884, 1893), False, 'from distributed import Client\n'), ((2115, 2131), 'numpy.arange', 'np.arange', (['nrows'], {}), '(nrows)\n', (2124, 2131), True, 'import numpy as np\n'), ((2144, 2166), 'numpy.random.shuffle', 'np.random.shuffle', (['key'], {}), '(key)\n', (2161, 2166), True, 'import numpy as np\n'), ((2518, 2564), 'dask.dataframe.from_pandas', 'dd.from_pandas', (['df1'], {'npartitions': '(n_workers + 1)'}), '(df1, npartitions=n_workers + 1)\n', (2532, 2564), True, 'from dask import dataframe as dd\n'), ((2584, 2654), 'dask.dataframe.from_pandas', 'dd.from_pandas', (['df2'], {'npartitions': '(n_workers - 1 if n_workers > 1 else 1)'}), '(df2, npartitions=n_workers - 1 if n_workers > 1 else 1)\n', (2598, 2654), True, 'from dask import dataframe as dd\n'), ((3627, 3642), 'distributed.Client', 'Client', (['cluster'], {}), '(cluster)\n', (3633, 3642), False, 'from distributed import Client\n'), ((3750, 3766), 'numpy.arange', 'np.arange', (['nrows'], {}), '(nrows)\n', (3759, 3766), True, 'import numpy as np\n'), ((3779, 3801), 'numpy.random.shuffle', 'np.random.shuffle', (['key'], {}), '(key)\n', (3796, 3801), True, 'import numpy as np\n'), ((3951, 3995), 'dask.dataframe.from_pandas', 'dd.from_pandas', (['df1'], {'npartitions': 'npartitions'}), '(df1, npartitions=npartitions)\n', (3965, 3995), True, 'from dask import dataframe as dd\n'), ((4015, 4059), 'dask.dataframe.from_pandas', 'dd.from_pandas', (['df2'], {'npartitions': 'npartitions'}), '(df2, npartitions=npartitions)\n', (4029, 4059), True, 'from dask import dataframe as dd\n'), ((4186, 4230), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['got', 'expected'], {}), '(got, expected)\n', (4215, 4230), True, 'import pandas as pd\n'), ((5278, 5293), 'distributed.Client', 'Client', (['cluster'], {}), '(cluster)\n', (5284, 5293), False, 'from distributed import Client\n'), ((5381, 5402), 'dask_cuda.explicit_comms.comms.default_comms', 'comms.default_comms', ([], {}), '()\n', (5400, 5402), False, 'from dask_cuda.explicit_comms import comms\n'), ((5415, 5433), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (5429, 5433), True, 'import numpy as np\n'), ((7462, 7499), 'dask.config.set', 'dask.config.set', ([], {'explicit_comms': '(False)'}), '(explicit_comms=False)\n', (7477, 7499), False, 'import dask\n'), ((7654, 7690), 'dask.config.set', 'dask.config.set', ([], {'explicit_comms': '(True)'}), '(explicit_comms=True)\n', (7669, 7690), False, 'import dask\n'), ((8194, 8209), 'distributed.Client', 'Client', (['cluster'], {}), '(cluster)\n', (8200, 8209), False, 'from distributed import Client\n'), ((8915, 8930), 'distributed.Client', 'Client', (['cluster'], {}), '(cluster)\n', (8921, 8930), False, 'from distributed import Client\n'), ((8944, 8962), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (8958, 8962), True, 'import numpy as np\n'), ((9160, 9196), 'dask_cuda.explicit_comms.dataframe.shuffle.shuffle', 'explicit_comms_shuffle', (['ddf', "['key']"], {}), "(ddf, ['key'])\n", (9182, 9196), True, 'from dask_cuda.explicit_comms.dataframe.shuffle import shuffle as explicit_comms_shuffle\n'), ((9371, 9395), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['got', 'expected'], {}), '(got, expected)\n', (9380, 9395), False, 'from dask.dataframe.utils import assert_eq\n'), ((2412, 2443), 'cudf.DataFrame.from_pandas', 'cudf.DataFrame.from_pandas', (['df1'], {}), '(df1)\n', (2438, 2443), False, 'import cudf\n'), ((2466, 2497), 'cudf.DataFrame.from_pandas', 'cudf.DataFrame.from_pandas', (['df2'], {}), '(df2)\n', (2492, 2497), False, 'import cudf\n'), ((2848, 2872), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['got', 'expected'], {}), '(got, expected)\n', (2857, 2872), False, 'from dask.dataframe.utils import assert_eq\n'), ((2907, 2951), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['got', 'expected'], {}), '(got, expected)\n', (2936, 2951), True, 'import pandas as pd\n'), ((5551, 5581), 'cudf.DataFrame.from_pandas', 'cudf.DataFrame.from_pandas', (['df'], {}), '(df)\n', (5577, 5581), False, 'import cudf\n'), ((2048, 2064), 'numpy.arange', 'np.arange', (['nrows'], {}), '(nrows)\n', (2057, 2064), True, 'import numpy as np\n'), ((2078, 2094), 'numpy.arange', 'np.arange', (['nrows'], {}), '(nrows)\n', (2087, 2094), True, 'import numpy as np\n'), ((2704, 2746), 'dask_cuda.explicit_comms.dataframe.merge.merge', 'explicit_comms_merge', (['ddf1', 'ddf2'], {'on': '"""key"""'}), "(ddf1, ddf2, on='key')\n", (2724, 2746), True, 'from dask_cuda.explicit_comms.dataframe.merge import merge as explicit_comms_merge\n'), ((3683, 3699), 'numpy.arange', 'np.arange', (['nrows'], {}), '(nrows)\n', (3692, 3699), True, 'import numpy as np\n'), ((3713, 3729), 'numpy.arange', 'np.arange', (['nrows'], {}), '(nrows)\n', (3722, 3729), True, 'import numpy as np\n'), ((3858, 3874), 'numpy.arange', 'np.arange', (['nrows'], {}), '(nrows)\n', (3867, 3874), True, 'import numpy as np\n'), ((4079, 4123), 'dask_cuda.explicit_comms.dataframe.merge.merge', 'explicit_comms_merge', (['ddf1', 'ddf2'], {'on': "['key']"}), "(ddf1, ddf2, on=['key'])\n", (4099, 4123), True, 'from dask_cuda.explicit_comms.dataframe.merge import merge as explicit_comms_merge\n'), ((5472, 5493), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (5488, 5493), True, 'import numpy as np\n'), ((7417, 7430), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (7426, 7430), True, 'import numpy as np\n'), ((2254, 2270), 'numpy.arange', 'np.arange', (['nrows'], {}), '(nrows)\n', (2263, 2270), True, 'import numpy as np\n'), ((6559, 6583), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['got', 'expected'], {}), '(got, expected)\n', (6568, 6583), False, 'from dask.dataframe.utils import assert_eq\n'), ((6634, 6678), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['got', 'expected'], {}), '(got, expected)\n', (6663, 6678), True, 'import pandas as pd\n'), ((9045, 9066), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (9061, 9066), True, 'import numpy as np\n'), ((5857, 5920), 'dask_cuda.explicit_comms.dataframe.shuffle.shuffle', 'explicit_comms_shuffle', (['ddf', "['key']"], {'npartitions': 'output_nparts'}), "(ddf, ['key'], npartitions=output_nparts)\n", (5879, 5920), True, 'from dask_cuda.explicit_comms.dataframe.shuffle import shuffle as explicit_comms_shuffle\n')] |
"""
Created on Jun 10, 2016
@author: xiao
"""
import numbers
from collections import Counter
import six # Python 2-3 compatibility
from pdfminer.layout import LTComponent, LTCurve, LTFigure, LTLine, LTTextLine
from pdftotree.utils.pdf.grid import Grid
from pdftotree.utils.pdf.layout_utils import is_same_row, is_vline
from pdftotree.utils.pdf.vector_utils import bound_bboxes, bound_elems
def elem_type(elem):
if isinstance(elem, LTLine):
return "line"
if isinstance(elem, LTCurve):
return "curve"
if isinstance(elem, LTTextLine):
return "text"
if isinstance(elem, LTFigure):
return "figure"
return "unkown"
class Node(LTComponent):
"""
A rectangular region in the document representing certain local semantics.
Also holds its data and features.
"""
def __init__(self, elems):
"""
Constructor
"""
if not isinstance(elems, list):
elems = [elems]
self.elems = elems
self.sum_elem_bbox = 0
for elem in elems:
self.sum_elem_bbox = self.sum_elem_bbox + abs(
(elem.bbox[0] - elem.bbox[2]) * (elem.bbox[1] - elem.bbox[3])
)
# # self.sum_elem_bbox = self.sum_elem_bbox + len(elem.get_text())
self.table_area_threshold = 0.7
self.set_bbox(bound_elems(elems))
# self.table_indicator = True
self.type_counts = Counter(map(elem_type, elems))
if elem_type(elems) not in ["figure", "unknown"]:
self.feat_counts = Counter(
kv for e in elems for kv in six.iteritems(e.feats)
)
else:
self.feat_counts = 0
self.type = "UNK"
def merge(self, other):
self.elems.extend(other.elems)
self.set_bbox(bound_bboxes([self.bbox, other.bbox]))
self.type_counts += other.type_counts
self.feat_counts += other.feat_counts
def area(self):
return self.height * self.width
def is_borderless(self):
# at least this many segments for a table
return self.type_counts["line"] < 6
def is_table(self):
"""
Count the node's number of mention al ignment in both axes to determine
if the node is a table.
"""
if self.type_counts["text"] < 6 or "figure" in self.type_counts:
return False
for e in self.elems:
# Characters written as curve are usually small, discard diagrams here
if elem_type(e) == "curve" and e.height * e.width > 100:
return False
# import re
# space_re = '\\s+'
# ws_arr = []
# whitespace_aligned = False
# for elem in self.elems:
# elem_ws = []
# for m in re.finditer(space_re, elem.get_text()):
# elem_ws.append(m.start())
# # print elem, elem_ws
# if(len(elem_ws)>0):
# ws_arr.append(elem_ws)
# # print ws_arr
# if(len(ws_arr)>0):
# count_arr = max([ws_arr.count(i) for i in ws_arr])
# if(float(count_arr)/len(ws_arr) > 0.75):
# return True
if (
self.sum_elem_bbox / (self.height * self.width)
) > self.table_area_threshold:
return False
has_many_x_align = False
has_many_y_align = False
for k, v in six.iteritems(self.feat_counts):
font_key = k[0]
if (
v >= 2 and "-" in font_key
): # Text row or column with more than 2 elements
if font_key[-2] == "x":
has_many_x_align = True
if font_key[-2] == "y":
has_many_y_align = True
return has_many_x_align and has_many_y_align
# return 0.5
def get_grid(self):
"""
Standardize the layout of the table into grids
"""
mentions, lines = _split_text_n_lines(self.elems)
# Sort mentions in reading order where y values are snapped to half
# height-sized grid
mentions.sort(key=lambda m: (m.yc_grid, m.xc))
grid = Grid(mentions, lines, self)
return grid
def _find_vbars_for_row(self, plane, row):
align_grid_size = (
sum(m.height for m in row) / 2.0 / len(row)
) # half the avg height
# Find all x_coords of vertical bars crossing this row
ryc = sum(m.yc for m in row) / len(row) # avg yc
query_rect = (self.x0, ryc, self.x1, ryc)
vbars = filter(is_vline, plane.find(query_rect)) # vbars in this row
vbars = [(v.xc, v.xc_grid) for v in vbars]
vbars.sort()
# Group bars less than min cell width apart as one bar
prev_xc = -1
clustered_vbars = []
for xc, xc_grid in vbars:
if prev_xc < 0 or xc - prev_xc > align_grid_size:
clustered_vbars.append(xc_grid) # only keep snapped coord
prev_xc = xc
return clustered_vbars
def __str__(self, *args, **kwargs):
return "\t".join(
r.get_text().encode("utf8", "replace")
for r in self.elems
if isinstance(r, LTTextLine)
)
#############################################
# Static utilities
#############################################
def _split_text_n_lines(elems):
texts = []
lines = []
for e in elems:
if isinstance(e, LTTextLine):
texts.append(e)
elif isinstance(e, LTLine):
lines.append(e)
return texts, lines
def _left_bar(content, default_val):
last_bar = default_val
for _coord, val in content:
if not isinstance(val, LTTextLine):
last_bar = val
yield last_bar
def _right_bar(content, default_val):
return reversed(list(_left_bar(reversed(content), default_val)))
def _find_col_parent_for_row(content):
pass
def _get_cols(row_content):
"""
Counting the number columns based on the content of this row
"""
cols = []
subcell_col = []
prev_bar = None
for _coord, item in row_content:
if isinstance(item, LTTextLine):
subcell_col.append(item)
else: # bar, add column content
# When there is no content, we count a None column
if prev_bar:
bar_ranges = (prev_bar, item)
col_items = subcell_col if subcell_col else [None]
cols.extend([bar_ranges, col_items])
prev_bar = item
subcell_col = []
# Remove extra column before first bar
return cols
def _row_str(row_content):
def strfy(r):
if r is None:
return "None"
if isinstance(r, tuple):
_c, r = r
if isinstance(r, LTTextLine):
return r.get_text().encode("utf8", "replace")
if isinstance(r, numbers.Number):
return "|"
return str(r)
return "\t".join(strfy(r) for r in row_content)
def _get_rows(mentions):
curr_row = []
rows = []
prev = None
for m in mentions:
if not is_same_row(prev, m):
if curr_row:
rows.append(curr_row)
curr_row = []
curr_row.append(m)
prev = m
# Finish up last row
if curr_row:
rows.append(curr_row)
return rows
def _one_contains_other(s1, s2):
"""
Whether one set contains the other
"""
return min(len(s1), len(s2)) == len(s1 & s2)
| [
"pdftotree.utils.pdf.vector_utils.bound_elems",
"pdftotree.utils.pdf.layout_utils.is_same_row",
"pdftotree.utils.pdf.vector_utils.bound_bboxes",
"pdftotree.utils.pdf.grid.Grid",
"six.iteritems"
] | [((3404, 3435), 'six.iteritems', 'six.iteritems', (['self.feat_counts'], {}), '(self.feat_counts)\n', (3417, 3435), False, 'import six\n'), ((4167, 4194), 'pdftotree.utils.pdf.grid.Grid', 'Grid', (['mentions', 'lines', 'self'], {}), '(mentions, lines, self)\n', (4171, 4194), False, 'from pdftotree.utils.pdf.grid import Grid\n'), ((1348, 1366), 'pdftotree.utils.pdf.vector_utils.bound_elems', 'bound_elems', (['elems'], {}), '(elems)\n', (1359, 1366), False, 'from pdftotree.utils.pdf.vector_utils import bound_bboxes, bound_elems\n'), ((1806, 1843), 'pdftotree.utils.pdf.vector_utils.bound_bboxes', 'bound_bboxes', (['[self.bbox, other.bbox]'], {}), '([self.bbox, other.bbox])\n', (1818, 1843), False, 'from pdftotree.utils.pdf.vector_utils import bound_bboxes, bound_elems\n'), ((7140, 7160), 'pdftotree.utils.pdf.layout_utils.is_same_row', 'is_same_row', (['prev', 'm'], {}), '(prev, m)\n', (7151, 7160), False, 'from pdftotree.utils.pdf.layout_utils import is_same_row, is_vline\n'), ((1606, 1628), 'six.iteritems', 'six.iteritems', (['e.feats'], {}), '(e.feats)\n', (1619, 1628), False, 'import six\n')] |
"""
Dig command
"""
from Functions import Command, Context, create_command
from random import randint
class Dig(Command):
def __init__(self) -> None:
"""
:return: None
"""
super().__init__("Dig", "You dig some sand!", self.callback)
@staticmethod
def callback(ctx: Context) -> None:
"""
dig sand!
:param ctx: context
:return: None
"""
min_sand = 1
max_sand = 4
if ctx.database.get_shop_item("shovel").get_is_owned():
min_sand += 2
max_sand += 2
got_sand = randint(min_sand, max_sand)
print(f"You got {got_sand} sand!")
ctx.database.sand = ctx.database.sand + got_sand
def on_load():
create_command(Dig)
| [
"random.randint",
"Functions.create_command"
] | [((790, 809), 'Functions.create_command', 'create_command', (['Dig'], {}), '(Dig)\n', (804, 809), False, 'from Functions import Command, Context, create_command\n'), ((631, 658), 'random.randint', 'randint', (['min_sand', 'max_sand'], {}), '(min_sand, max_sand)\n', (638, 658), False, 'from random import randint\n')] |
from imageai.Detection import ObjectDetection
from utils import exportutils, fileutils
from config import constants
import numpy as np
def get_average_word_vector(sentence, magnitude_vectors):
sentence_list = sentence.split(' ')
composite_vector = [0.0 for _ in range(300)]
for word in sentence_list:
word_vector = magnitude_vectors.query(word)
composite_vector = (np.array(word_vector) + np.array(composite_vector))/2.0
return composite_vector
def get_image_hash_names(sentence,magnitude_vector_model,index_hash_map=constants.INDEX_HASH_MAP, nearest_neighbor_model=constants.NEAREST_NEIGHBOR_MODEL, top_n=6):
nearest_neighbor_model = exportutils.load_model(nearest_neighbor_model)
index_hash_map = exportutils.load_model(index_hash_map)
word_vec = get_average_word_vector(sentence, magnitude_vector_model)
_, nearest_index = nearest_neighbor_model.kneighbors(word_vec.reshape(1, -1), n_neighbors=top_n)
match_index_list = nearest_index[0]
image_hash_list = list([])
for match_index in match_index_list:
image_hash_list.append(index_hash_map[match_index])
return image_hash_list
def augment_image_path(image_hash_list):
augmented_path_list = list([])
for image_hash_name in image_hash_list:
augmented_path_list.append(constants.DATA_FOLDER_PATH + image_hash_name + constants.IMAGE_EXTENSION)
return augmented_path_list
def get_initialized_detector_model(model_path=constants.MODEL_FOLDER_PATH+constants.DETECTOR_MODEL_NAME):
detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(model_path)
detector.loadModel()
return detector
def extract_objects_from_image(detector_model_object, image_list, output_path):
generated_image_path_list = list([])
image_full_path_list = augment_image_path(image_list)
print(image_full_path_list)
for image_hash_name, image_path_name in zip(image_list,image_full_path_list):
print(output_path+image_hash_name)
detections, path = detector_model_object.detectObjectsFromImage(input_image=image_path_name,
extract_detected_objects=True, output_type='file',
output_image_path=output_path+image_hash_name, minimum_percentage_probability=30)
generated_image_path_list.extend(path)
# except Exception:
# pass
return generated_image_path_list
def object_extraction_pipeline(sentence, magnitude_vector_model, detector_model_object):
fileutils.remove_files_and_folders(constants.DETECTED_OBJECT_OUTPUT_PATH)
image_hash_name_list = get_image_hash_names(sentence, magnitude_vector_model)
extract_objects_from_image(detector_model_object, image_hash_name_list, constants.DETECTED_OBJECT_OUTPUT_PATH)
file_path_list = fileutils.get_list_of_files_in_folder(constants.DETECTED_OBJECT_OUTPUT_PATH)
full_file_path_list = list([])
for file_path in file_path_list:
full_file_path_list.append(constants.DETECT_OUTPUT_PREFIX + file_path)
return full_file_path_list
| [
"imageai.Detection.ObjectDetection",
"utils.fileutils.get_list_of_files_in_folder",
"utils.exportutils.load_model",
"numpy.array",
"utils.fileutils.remove_files_and_folders"
] | [((675, 721), 'utils.exportutils.load_model', 'exportutils.load_model', (['nearest_neighbor_model'], {}), '(nearest_neighbor_model)\n', (697, 721), False, 'from utils import exportutils, fileutils\n'), ((743, 781), 'utils.exportutils.load_model', 'exportutils.load_model', (['index_hash_map'], {}), '(index_hash_map)\n', (765, 781), False, 'from utils import exportutils, fileutils\n'), ((1540, 1557), 'imageai.Detection.ObjectDetection', 'ObjectDetection', ([], {}), '()\n', (1555, 1557), False, 'from imageai.Detection import ObjectDetection\n'), ((2605, 2678), 'utils.fileutils.remove_files_and_folders', 'fileutils.remove_files_and_folders', (['constants.DETECTED_OBJECT_OUTPUT_PATH'], {}), '(constants.DETECTED_OBJECT_OUTPUT_PATH)\n', (2639, 2678), False, 'from utils import exportutils, fileutils\n'), ((2897, 2973), 'utils.fileutils.get_list_of_files_in_folder', 'fileutils.get_list_of_files_in_folder', (['constants.DETECTED_OBJECT_OUTPUT_PATH'], {}), '(constants.DETECTED_OBJECT_OUTPUT_PATH)\n', (2934, 2973), False, 'from utils import exportutils, fileutils\n'), ((395, 416), 'numpy.array', 'np.array', (['word_vector'], {}), '(word_vector)\n', (403, 416), True, 'import numpy as np\n'), ((419, 445), 'numpy.array', 'np.array', (['composite_vector'], {}), '(composite_vector)\n', (427, 445), True, 'import numpy as np\n')] |
Subsets and Splits