code
stringlengths
13
6.09M
order_type
stringclasses
2 values
original_example
dict
step_ids
listlengths
1
5
import os import sys from tensor2tensor.bin import t2t_trainer def problem_args(problem_name): args = [ '--generate_data', '--model=transformer', '--hparams_set=transformer_librispeech_v1', '--problem=%s' % problem_name, '--data_dir=/tmp/refactor_test/problems/%s/data' % problem_name, '--tmp_dir=/tmp/refactor_test/problems/%s/tmp' % problem_name, '--output_dir=/tmp/refactor_test/models/%s/data' % problem_name, '--hparams=batch_shuffle_size=0,batch_size=1000000' ] return args def main(): sys.argv += problem_args('librispeech_clean_small') # sys.argv += problem_args('common_voice') t2t_trainer.main(None) print('All done.') if __name__ == '__main__': main()
normal
{ "blob_id": "cc5ad95419571d3eb2689b428e5805ad69958806", "index": 4796, "step-1": "<mask token>\n\n\ndef problem_args(problem_name):\n args = ['--generate_data', '--model=transformer',\n '--hparams_set=transformer_librispeech_v1', '--problem=%s' %\n problem_name, '--data_dir=/tmp/refactor_test/problems/%s/data' %\n problem_name, '--tmp_dir=/tmp/refactor_test/problems/%s/tmp' %\n problem_name, '--output_dir=/tmp/refactor_test/models/%s/data' %\n problem_name, '--hparams=batch_shuffle_size=0,batch_size=1000000']\n return args\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef problem_args(problem_name):\n args = ['--generate_data', '--model=transformer',\n '--hparams_set=transformer_librispeech_v1', '--problem=%s' %\n problem_name, '--data_dir=/tmp/refactor_test/problems/%s/data' %\n problem_name, '--tmp_dir=/tmp/refactor_test/problems/%s/tmp' %\n problem_name, '--output_dir=/tmp/refactor_test/models/%s/data' %\n problem_name, '--hparams=batch_shuffle_size=0,batch_size=1000000']\n return args\n\n\ndef main():\n sys.argv += problem_args('librispeech_clean_small')\n t2t_trainer.main(None)\n print('All done.')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef problem_args(problem_name):\n args = ['--generate_data', '--model=transformer',\n '--hparams_set=transformer_librispeech_v1', '--problem=%s' %\n problem_name, '--data_dir=/tmp/refactor_test/problems/%s/data' %\n problem_name, '--tmp_dir=/tmp/refactor_test/problems/%s/tmp' %\n problem_name, '--output_dir=/tmp/refactor_test/models/%s/data' %\n problem_name, '--hparams=batch_shuffle_size=0,batch_size=1000000']\n return args\n\n\ndef main():\n sys.argv += problem_args('librispeech_clean_small')\n t2t_trainer.main(None)\n print('All done.')\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import os\nimport sys\nfrom tensor2tensor.bin import t2t_trainer\n\n\ndef problem_args(problem_name):\n args = ['--generate_data', '--model=transformer',\n '--hparams_set=transformer_librispeech_v1', '--problem=%s' %\n problem_name, '--data_dir=/tmp/refactor_test/problems/%s/data' %\n problem_name, '--tmp_dir=/tmp/refactor_test/problems/%s/tmp' %\n problem_name, '--output_dir=/tmp/refactor_test/models/%s/data' %\n problem_name, '--hparams=batch_shuffle_size=0,batch_size=1000000']\n return args\n\n\ndef main():\n sys.argv += problem_args('librispeech_clean_small')\n t2t_trainer.main(None)\n print('All done.')\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "import os\nimport sys\n\nfrom tensor2tensor.bin import t2t_trainer\n\n\ndef problem_args(problem_name):\n\n args = [\n '--generate_data',\n '--model=transformer',\n '--hparams_set=transformer_librispeech_v1',\n '--problem=%s' % problem_name,\n '--data_dir=/tmp/refactor_test/problems/%s/data' % problem_name,\n '--tmp_dir=/tmp/refactor_test/problems/%s/tmp' % problem_name,\n '--output_dir=/tmp/refactor_test/models/%s/data' % problem_name,\n '--hparams=batch_shuffle_size=0,batch_size=1000000'\n ]\n\n return args\n\n\ndef main():\n\n sys.argv += problem_args('librispeech_clean_small')\n # sys.argv += problem_args('common_voice')\n\n t2t_trainer.main(None)\n\n print('All done.')\n\n\nif __name__ == '__main__':\n main()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# -*- coding: utf-8 -*- from django.db import models from filebrowser.fields import FileBrowseField from localisations.models import Ville, Lieu from model_utils.managers import InheritanceManager from services.models import Service from equipements.models import Equipement from localisations.models import Ville from django.db.models import permalink class Organisateur(models.Model): nom = models.CharField(max_length=255) slug = models.SlugField(max_length=255, unique=True) meta_description = models.CharField(max_length=200) description = models.TextField() logo = FileBrowseField("Image", max_length=255, directory="evenements", extensions=[".jpg", ".png", ".gif", ".jpeg"], blank=True, null=True) url = models.URLField("Site de cet organisateur: (facultatif) ", blank=True) email = models.EmailField("Mail (facultatif)", max_length=255, blank=True) telephone = models.CharField(max_length=25) fax = models.CharField("Fax (facultatif)", max_length=25, blank=True) rue = models.CharField(max_length=255) ville = models.ForeignKey(Ville) # Un choix de design pas très beau, mais fonctionellement les équipements, services, communes de la # communauté d'agglo peuvent organiser des evènements ainsi que d'autres entités exterieures alors ... orga_service = models.ForeignKey(Service, blank=True, null=True) orga_equipement = models.ForeignKey(Equipement, blank=True, null=True) orga_ville = models.ForeignKey(Ville, blank=True, null=True, related_name='orga_orga_ville') def __unicode__(self): return self.nom + " / " + self.ville.nom class Meta: verbose_name_plural = "Organisateurs" ordering = ['ville__nom'] class Saison(models.Model): nom = models.CharField(max_length=255) debut = models.DateTimeField("Date de début") fin = models.DateTimeField("date de fin") description = models.TextField() slug = models.SlugField(max_length=255, unique=True) objects = InheritanceManager() def __unicode__(self): return self.nom class SaisonCulturelle(Saison): def __unicode__(self): return self.nom class Festival(Saison): saison_culture = models.ForeignKey(SaisonCulturelle) def __unicode__(self): return self.nom class TypeEvenement(models.Model): nom = models.CharField(max_length=255) slug = models.SlugField(unique=True) def __unicode__(self): return self.nom class Meta: ordering = ['nom'] EVENEMENT_CATEGORIES = ( ('bib', u'Bibliothèques/Médiatèques'), ('crd', u'Conservatoires'), ('sty', u'Sothevy'), ('eco', u'Développement Économique'), ('aut', u'Autres'), ) EVENEMENT_PUBLIC = ( ('adt', u'Adulte'), ('enf', u'Enfant'), ('pub', u'Tout public'), ('ent', u'Entreprises'), ) class Evenement(models.Model): nom = models.CharField(max_length=255) meta_description = models.CharField(max_length=200) description = models.TextField() debut = models.DateTimeField("Date de début") fin = models.DateTimeField("Date de fin") organisateur = models.ManyToManyField(Organisateur) image = FileBrowseField("Image (facultatif)", max_length=255, directory="evenements", extensions=[".jpg", ".png", ".gif", ".jpeg", ".pdf"], blank=True, null=True) url = models.URLField("Un lien vers plus d'infos: (facultatif)", blank=True, null=True) url_reservation = models.URLField( "Un lien vers la page de reservation: (facultatif, annule le lien vers plus d'infos) ", blank=True, null=True) categorie = models.CharField(max_length=3, choices=EVENEMENT_CATEGORIES, default='aut') public = models.CharField(max_length=3, choices=EVENEMENT_PUBLIC, default='pub') cadre_evenement = models.ForeignKey(Saison) type = models.ForeignKey(TypeEvenement) lieu = models.ForeignKey(Lieu) publish = models.BooleanField("Publié", default=False) page_accueil = models.BooleanField("Page d'accueil", default=False) complet = models.BooleanField("Ce spectacle est complet", default=False) slug = models.SlugField(max_length=255, unique=True) class Meta: ordering = ['-debut'] def Organisateurs(self): return "\n;\n".join([s.nom for s in self.organisateur.all()]) def __unicode__(self): return self.nom def monthyeardebut(self): return self.debut.strftime("%m") + "-" + self.debut.strftime("%Y") @permalink def get_absolute_url(self): return ('event-details', (), {'slug': self.cadre_evenement.slug, 'evenement_slug': self.slug}) class Prix(models.Model): intitule = models.CharField("Intitulé ", max_length=255, blank=False, null=False) prix = models.FloatField("Prix (séparateur point ex : 0.5 )", default=None, blank=False, null=True) evenement = models.ForeignKey(Evenement) class Meta: verbose_name_plural = u"Prix" class DocumentAttache(models.Model): nom = models.CharField(max_length=255, verbose_name="Nom") document = FileBrowseField("Document", max_length=200, directory="evenements/docs", extensions=[".pdf", ".doc", ".odt", ".docx", ".txt"]) reference = models.ForeignKey(Evenement) class EvenementBibManager(models.Manager): def get_queryset(self): return super(EvenementBibManager, self).get_queryset().filter(categorie='bib') class EvenementBib(Evenement): objects = EvenementBibManager() class Meta: proxy = True verbose_name_plural = u"Événements Bibliothèques" verbose_name = u"Événement Bibliothèque" class EvenementCrdManager(models.Manager): def get_queryset(self): return super(EvenementCrdManager, self).get_queryset().filter(categorie='crd') class EvenementCrd(Evenement): objects = EvenementCrdManager() class Meta: proxy = True verbose_name_plural = u"Événements Conservatoires" verbose_name = u"Événement Conservatoire" class EvenementDevEcoManager(models.Manager): def get_queryset(self): return super(EvenementDevEcoManager, self).get_queryset().filter(categorie='eco') class EvenementDevEco(Evenement): objects = EvenementDevEcoManager() class Meta: proxy = True verbose_name_plural = u"Événements Dev Eco" verbose_name = u"Événement Dev Eco"
normal
{ "blob_id": "596fe474ae60dd6a06123df6fe246f7e947b3482", "index": 1760, "step-1": "<mask token>\n\n\nclass SaisonCulturelle(Saison):\n\n def __unicode__(self):\n return self.nom\n\n\nclass Festival(Saison):\n saison_culture = models.ForeignKey(SaisonCulturelle)\n\n def __unicode__(self):\n return self.nom\n\n\nclass TypeEvenement(models.Model):\n nom = models.CharField(max_length=255)\n slug = models.SlugField(unique=True)\n\n def __unicode__(self):\n return self.nom\n\n\n class Meta:\n ordering = ['nom']\n\n\n<mask token>\n\n\nclass Evenement(models.Model):\n nom = models.CharField(max_length=255)\n meta_description = models.CharField(max_length=200)\n description = models.TextField()\n debut = models.DateTimeField('Date de début')\n fin = models.DateTimeField('Date de fin')\n organisateur = models.ManyToManyField(Organisateur)\n image = FileBrowseField('Image (facultatif)', max_length=255, directory\n ='evenements', extensions=['.jpg', '.png', '.gif', '.jpeg', '.pdf'],\n blank=True, null=True)\n url = models.URLField(\"Un lien vers plus d'infos: (facultatif)\", blank=\n True, null=True)\n url_reservation = models.URLField(\n \"Un lien vers la page de reservation: (facultatif, annule le lien vers plus d'infos) \"\n , blank=True, null=True)\n categorie = models.CharField(max_length=3, choices=EVENEMENT_CATEGORIES,\n default='aut')\n public = models.CharField(max_length=3, choices=EVENEMENT_PUBLIC,\n default='pub')\n cadre_evenement = models.ForeignKey(Saison)\n type = models.ForeignKey(TypeEvenement)\n lieu = models.ForeignKey(Lieu)\n publish = models.BooleanField('Publié', default=False)\n page_accueil = models.BooleanField(\"Page d'accueil\", default=False)\n complet = models.BooleanField('Ce spectacle est complet', default=False)\n slug = models.SlugField(max_length=255, unique=True)\n\n\n class Meta:\n ordering = ['-debut']\n\n def Organisateurs(self):\n return '\\n;\\n'.join([s.nom for s in self.organisateur.all()])\n\n def __unicode__(self):\n return self.nom\n\n def monthyeardebut(self):\n return self.debut.strftime('%m') + '-' + self.debut.strftime('%Y')\n\n @permalink\n def get_absolute_url(self):\n return 'event-details', (), {'slug': self.cadre_evenement.slug,\n 'evenement_slug': self.slug}\n\n\nclass Prix(models.Model):\n intitule = models.CharField('Intitulé ', max_length=255, blank=False,\n null=False)\n prix = models.FloatField('Prix (séparateur point ex : 0.5 )', default=\n None, blank=False, null=True)\n evenement = models.ForeignKey(Evenement)\n\n\n class Meta:\n verbose_name_plural = u'Prix'\n\n\nclass DocumentAttache(models.Model):\n nom = models.CharField(max_length=255, verbose_name='Nom')\n document = FileBrowseField('Document', max_length=200, directory=\n 'evenements/docs', extensions=['.pdf', '.doc', '.odt', '.docx', '.txt']\n )\n reference = models.ForeignKey(Evenement)\n\n\nclass EvenementBibManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementBibManager, self).get_queryset().filter(categorie\n ='bib')\n\n\nclass EvenementBib(Evenement):\n objects = EvenementBibManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Bibliothèques'\n verbose_name = u'Événement Bibliothèque'\n\n\nclass EvenementCrdManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementCrdManager, self).get_queryset().filter(categorie\n ='crd')\n\n\nclass EvenementCrd(Evenement):\n objects = EvenementCrdManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Conservatoires'\n verbose_name = u'Événement Conservatoire'\n\n\nclass EvenementDevEcoManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementDevEcoManager, self).get_queryset().filter(\n categorie='eco')\n\n\nclass EvenementDevEco(Evenement):\n objects = EvenementDevEcoManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Dev Eco'\n verbose_name = u'Événement Dev Eco'\n", "step-2": "<mask token>\n\n\nclass Saison(models.Model):\n nom = models.CharField(max_length=255)\n debut = models.DateTimeField('Date de début')\n fin = models.DateTimeField('date de fin')\n description = models.TextField()\n slug = models.SlugField(max_length=255, unique=True)\n objects = InheritanceManager()\n\n def __unicode__(self):\n return self.nom\n\n\nclass SaisonCulturelle(Saison):\n\n def __unicode__(self):\n return self.nom\n\n\nclass Festival(Saison):\n saison_culture = models.ForeignKey(SaisonCulturelle)\n\n def __unicode__(self):\n return self.nom\n\n\nclass TypeEvenement(models.Model):\n nom = models.CharField(max_length=255)\n slug = models.SlugField(unique=True)\n\n def __unicode__(self):\n return self.nom\n\n\n class Meta:\n ordering = ['nom']\n\n\n<mask token>\n\n\nclass Evenement(models.Model):\n nom = models.CharField(max_length=255)\n meta_description = models.CharField(max_length=200)\n description = models.TextField()\n debut = models.DateTimeField('Date de début')\n fin = models.DateTimeField('Date de fin')\n organisateur = models.ManyToManyField(Organisateur)\n image = FileBrowseField('Image (facultatif)', max_length=255, directory\n ='evenements', extensions=['.jpg', '.png', '.gif', '.jpeg', '.pdf'],\n blank=True, null=True)\n url = models.URLField(\"Un lien vers plus d'infos: (facultatif)\", blank=\n True, null=True)\n url_reservation = models.URLField(\n \"Un lien vers la page de reservation: (facultatif, annule le lien vers plus d'infos) \"\n , blank=True, null=True)\n categorie = models.CharField(max_length=3, choices=EVENEMENT_CATEGORIES,\n default='aut')\n public = models.CharField(max_length=3, choices=EVENEMENT_PUBLIC,\n default='pub')\n cadre_evenement = models.ForeignKey(Saison)\n type = models.ForeignKey(TypeEvenement)\n lieu = models.ForeignKey(Lieu)\n publish = models.BooleanField('Publié', default=False)\n page_accueil = models.BooleanField(\"Page d'accueil\", default=False)\n complet = models.BooleanField('Ce spectacle est complet', default=False)\n slug = models.SlugField(max_length=255, unique=True)\n\n\n class Meta:\n ordering = ['-debut']\n\n def Organisateurs(self):\n return '\\n;\\n'.join([s.nom for s in self.organisateur.all()])\n\n def __unicode__(self):\n return self.nom\n\n def monthyeardebut(self):\n return self.debut.strftime('%m') + '-' + self.debut.strftime('%Y')\n\n @permalink\n def get_absolute_url(self):\n return 'event-details', (), {'slug': self.cadre_evenement.slug,\n 'evenement_slug': self.slug}\n\n\nclass Prix(models.Model):\n intitule = models.CharField('Intitulé ', max_length=255, blank=False,\n null=False)\n prix = models.FloatField('Prix (séparateur point ex : 0.5 )', default=\n None, blank=False, null=True)\n evenement = models.ForeignKey(Evenement)\n\n\n class Meta:\n verbose_name_plural = u'Prix'\n\n\nclass DocumentAttache(models.Model):\n nom = models.CharField(max_length=255, verbose_name='Nom')\n document = FileBrowseField('Document', max_length=200, directory=\n 'evenements/docs', extensions=['.pdf', '.doc', '.odt', '.docx', '.txt']\n )\n reference = models.ForeignKey(Evenement)\n\n\nclass EvenementBibManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementBibManager, self).get_queryset().filter(categorie\n ='bib')\n\n\nclass EvenementBib(Evenement):\n objects = EvenementBibManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Bibliothèques'\n verbose_name = u'Événement Bibliothèque'\n\n\nclass EvenementCrdManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementCrdManager, self).get_queryset().filter(categorie\n ='crd')\n\n\nclass EvenementCrd(Evenement):\n objects = EvenementCrdManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Conservatoires'\n verbose_name = u'Événement Conservatoire'\n\n\nclass EvenementDevEcoManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementDevEcoManager, self).get_queryset().filter(\n categorie='eco')\n\n\nclass EvenementDevEco(Evenement):\n objects = EvenementDevEcoManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Dev Eco'\n verbose_name = u'Événement Dev Eco'\n", "step-3": "<mask token>\n\n\nclass Organisateur(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name_plural = 'Organisateurs'\n ordering = ['ville__nom']\n\n\nclass Saison(models.Model):\n nom = models.CharField(max_length=255)\n debut = models.DateTimeField('Date de début')\n fin = models.DateTimeField('date de fin')\n description = models.TextField()\n slug = models.SlugField(max_length=255, unique=True)\n objects = InheritanceManager()\n\n def __unicode__(self):\n return self.nom\n\n\nclass SaisonCulturelle(Saison):\n\n def __unicode__(self):\n return self.nom\n\n\nclass Festival(Saison):\n saison_culture = models.ForeignKey(SaisonCulturelle)\n\n def __unicode__(self):\n return self.nom\n\n\nclass TypeEvenement(models.Model):\n nom = models.CharField(max_length=255)\n slug = models.SlugField(unique=True)\n\n def __unicode__(self):\n return self.nom\n\n\n class Meta:\n ordering = ['nom']\n\n\n<mask token>\n\n\nclass Evenement(models.Model):\n nom = models.CharField(max_length=255)\n meta_description = models.CharField(max_length=200)\n description = models.TextField()\n debut = models.DateTimeField('Date de début')\n fin = models.DateTimeField('Date de fin')\n organisateur = models.ManyToManyField(Organisateur)\n image = FileBrowseField('Image (facultatif)', max_length=255, directory\n ='evenements', extensions=['.jpg', '.png', '.gif', '.jpeg', '.pdf'],\n blank=True, null=True)\n url = models.URLField(\"Un lien vers plus d'infos: (facultatif)\", blank=\n True, null=True)\n url_reservation = models.URLField(\n \"Un lien vers la page de reservation: (facultatif, annule le lien vers plus d'infos) \"\n , blank=True, null=True)\n categorie = models.CharField(max_length=3, choices=EVENEMENT_CATEGORIES,\n default='aut')\n public = models.CharField(max_length=3, choices=EVENEMENT_PUBLIC,\n default='pub')\n cadre_evenement = models.ForeignKey(Saison)\n type = models.ForeignKey(TypeEvenement)\n lieu = models.ForeignKey(Lieu)\n publish = models.BooleanField('Publié', default=False)\n page_accueil = models.BooleanField(\"Page d'accueil\", default=False)\n complet = models.BooleanField('Ce spectacle est complet', default=False)\n slug = models.SlugField(max_length=255, unique=True)\n\n\n class Meta:\n ordering = ['-debut']\n\n def Organisateurs(self):\n return '\\n;\\n'.join([s.nom for s in self.organisateur.all()])\n\n def __unicode__(self):\n return self.nom\n\n def monthyeardebut(self):\n return self.debut.strftime('%m') + '-' + self.debut.strftime('%Y')\n\n @permalink\n def get_absolute_url(self):\n return 'event-details', (), {'slug': self.cadre_evenement.slug,\n 'evenement_slug': self.slug}\n\n\nclass Prix(models.Model):\n intitule = models.CharField('Intitulé ', max_length=255, blank=False,\n null=False)\n prix = models.FloatField('Prix (séparateur point ex : 0.5 )', default=\n None, blank=False, null=True)\n evenement = models.ForeignKey(Evenement)\n\n\n class Meta:\n verbose_name_plural = u'Prix'\n\n\nclass DocumentAttache(models.Model):\n nom = models.CharField(max_length=255, verbose_name='Nom')\n document = FileBrowseField('Document', max_length=200, directory=\n 'evenements/docs', extensions=['.pdf', '.doc', '.odt', '.docx', '.txt']\n )\n reference = models.ForeignKey(Evenement)\n\n\nclass EvenementBibManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementBibManager, self).get_queryset().filter(categorie\n ='bib')\n\n\nclass EvenementBib(Evenement):\n objects = EvenementBibManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Bibliothèques'\n verbose_name = u'Événement Bibliothèque'\n\n\nclass EvenementCrdManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementCrdManager, self).get_queryset().filter(categorie\n ='crd')\n\n\nclass EvenementCrd(Evenement):\n objects = EvenementCrdManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Conservatoires'\n verbose_name = u'Événement Conservatoire'\n\n\nclass EvenementDevEcoManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementDevEcoManager, self).get_queryset().filter(\n categorie='eco')\n\n\nclass EvenementDevEco(Evenement):\n objects = EvenementDevEcoManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Dev Eco'\n verbose_name = u'Événement Dev Eco'\n", "step-4": "<mask token>\n\n\nclass Organisateur(models.Model):\n nom = models.CharField(max_length=255)\n slug = models.SlugField(max_length=255, unique=True)\n meta_description = models.CharField(max_length=200)\n description = models.TextField()\n logo = FileBrowseField('Image', max_length=255, directory='evenements',\n extensions=['.jpg', '.png', '.gif', '.jpeg'], blank=True, null=True)\n url = models.URLField('Site de cet organisateur: (facultatif) ', blank\n =True)\n email = models.EmailField('Mail (facultatif)', max_length=255, blank=True)\n telephone = models.CharField(max_length=25)\n fax = models.CharField('Fax (facultatif)', max_length=25, blank=True)\n rue = models.CharField(max_length=255)\n ville = models.ForeignKey(Ville)\n orga_service = models.ForeignKey(Service, blank=True, null=True)\n orga_equipement = models.ForeignKey(Equipement, blank=True, null=True)\n orga_ville = models.ForeignKey(Ville, blank=True, null=True,\n related_name='orga_orga_ville')\n\n def __unicode__(self):\n return self.nom + ' / ' + self.ville.nom\n\n\n class Meta:\n verbose_name_plural = 'Organisateurs'\n ordering = ['ville__nom']\n\n\nclass Saison(models.Model):\n nom = models.CharField(max_length=255)\n debut = models.DateTimeField('Date de début')\n fin = models.DateTimeField('date de fin')\n description = models.TextField()\n slug = models.SlugField(max_length=255, unique=True)\n objects = InheritanceManager()\n\n def __unicode__(self):\n return self.nom\n\n\nclass SaisonCulturelle(Saison):\n\n def __unicode__(self):\n return self.nom\n\n\nclass Festival(Saison):\n saison_culture = models.ForeignKey(SaisonCulturelle)\n\n def __unicode__(self):\n return self.nom\n\n\nclass TypeEvenement(models.Model):\n nom = models.CharField(max_length=255)\n slug = models.SlugField(unique=True)\n\n def __unicode__(self):\n return self.nom\n\n\n class Meta:\n ordering = ['nom']\n\n\n<mask token>\n\n\nclass Evenement(models.Model):\n nom = models.CharField(max_length=255)\n meta_description = models.CharField(max_length=200)\n description = models.TextField()\n debut = models.DateTimeField('Date de début')\n fin = models.DateTimeField('Date de fin')\n organisateur = models.ManyToManyField(Organisateur)\n image = FileBrowseField('Image (facultatif)', max_length=255, directory\n ='evenements', extensions=['.jpg', '.png', '.gif', '.jpeg', '.pdf'],\n blank=True, null=True)\n url = models.URLField(\"Un lien vers plus d'infos: (facultatif)\", blank=\n True, null=True)\n url_reservation = models.URLField(\n \"Un lien vers la page de reservation: (facultatif, annule le lien vers plus d'infos) \"\n , blank=True, null=True)\n categorie = models.CharField(max_length=3, choices=EVENEMENT_CATEGORIES,\n default='aut')\n public = models.CharField(max_length=3, choices=EVENEMENT_PUBLIC,\n default='pub')\n cadre_evenement = models.ForeignKey(Saison)\n type = models.ForeignKey(TypeEvenement)\n lieu = models.ForeignKey(Lieu)\n publish = models.BooleanField('Publié', default=False)\n page_accueil = models.BooleanField(\"Page d'accueil\", default=False)\n complet = models.BooleanField('Ce spectacle est complet', default=False)\n slug = models.SlugField(max_length=255, unique=True)\n\n\n class Meta:\n ordering = ['-debut']\n\n def Organisateurs(self):\n return '\\n;\\n'.join([s.nom for s in self.organisateur.all()])\n\n def __unicode__(self):\n return self.nom\n\n def monthyeardebut(self):\n return self.debut.strftime('%m') + '-' + self.debut.strftime('%Y')\n\n @permalink\n def get_absolute_url(self):\n return 'event-details', (), {'slug': self.cadre_evenement.slug,\n 'evenement_slug': self.slug}\n\n\nclass Prix(models.Model):\n intitule = models.CharField('Intitulé ', max_length=255, blank=False,\n null=False)\n prix = models.FloatField('Prix (séparateur point ex : 0.5 )', default=\n None, blank=False, null=True)\n evenement = models.ForeignKey(Evenement)\n\n\n class Meta:\n verbose_name_plural = u'Prix'\n\n\nclass DocumentAttache(models.Model):\n nom = models.CharField(max_length=255, verbose_name='Nom')\n document = FileBrowseField('Document', max_length=200, directory=\n 'evenements/docs', extensions=['.pdf', '.doc', '.odt', '.docx', '.txt']\n )\n reference = models.ForeignKey(Evenement)\n\n\nclass EvenementBibManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementBibManager, self).get_queryset().filter(categorie\n ='bib')\n\n\nclass EvenementBib(Evenement):\n objects = EvenementBibManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Bibliothèques'\n verbose_name = u'Événement Bibliothèque'\n\n\nclass EvenementCrdManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementCrdManager, self).get_queryset().filter(categorie\n ='crd')\n\n\nclass EvenementCrd(Evenement):\n objects = EvenementCrdManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Conservatoires'\n verbose_name = u'Événement Conservatoire'\n\n\nclass EvenementDevEcoManager(models.Manager):\n\n def get_queryset(self):\n return super(EvenementDevEcoManager, self).get_queryset().filter(\n categorie='eco')\n\n\nclass EvenementDevEco(Evenement):\n objects = EvenementDevEcoManager()\n\n\n class Meta:\n proxy = True\n verbose_name_plural = u'Événements Dev Eco'\n verbose_name = u'Événement Dev Eco'\n", "step-5": "# -*- coding: utf-8 -*-\nfrom django.db import models\nfrom filebrowser.fields import FileBrowseField\nfrom localisations.models import Ville, Lieu\nfrom model_utils.managers import InheritanceManager\nfrom services.models import Service\nfrom equipements.models import Equipement\nfrom localisations.models import Ville\nfrom django.db.models import permalink\n\n\nclass Organisateur(models.Model):\n nom = models.CharField(max_length=255)\n slug = models.SlugField(max_length=255, unique=True)\n meta_description = models.CharField(max_length=200)\n description = models.TextField()\n logo = FileBrowseField(\"Image\", max_length=255, directory=\"evenements\",\n extensions=[\".jpg\", \".png\", \".gif\", \".jpeg\"], blank=True, null=True)\n url = models.URLField(\"Site de cet organisateur: (facultatif) \", blank=True)\n email = models.EmailField(\"Mail (facultatif)\", max_length=255, blank=True)\n telephone = models.CharField(max_length=25)\n fax = models.CharField(\"Fax (facultatif)\", max_length=25, blank=True)\n rue = models.CharField(max_length=255)\n ville = models.ForeignKey(Ville)\n\n # Un choix de design pas très beau, mais fonctionellement les équipements, services, communes de la\n # communauté d'agglo peuvent organiser des evènements ainsi que d'autres entités exterieures alors ...\n\n orga_service = models.ForeignKey(Service, blank=True, null=True)\n orga_equipement = models.ForeignKey(Equipement, blank=True, null=True)\n orga_ville = models.ForeignKey(Ville, blank=True, null=True, related_name='orga_orga_ville')\n\n def __unicode__(self):\n return self.nom + \" / \" + self.ville.nom\n\n class Meta:\n verbose_name_plural = \"Organisateurs\"\n ordering = ['ville__nom']\n\n\nclass Saison(models.Model):\n nom = models.CharField(max_length=255)\n debut = models.DateTimeField(\"Date de début\")\n fin = models.DateTimeField(\"date de fin\")\n description = models.TextField()\n slug = models.SlugField(max_length=255, unique=True)\n\n objects = InheritanceManager()\n\n def __unicode__(self):\n return self.nom\n\n\nclass SaisonCulturelle(Saison):\n def __unicode__(self):\n return self.nom\n\n\nclass Festival(Saison):\n saison_culture = models.ForeignKey(SaisonCulturelle)\n\n def __unicode__(self):\n return self.nom\n\n\nclass TypeEvenement(models.Model):\n nom = models.CharField(max_length=255)\n slug = models.SlugField(unique=True)\n\n def __unicode__(self):\n return self.nom\n\n class Meta:\n ordering = ['nom']\n\n\nEVENEMENT_CATEGORIES = (\n ('bib', u'Bibliothèques/Médiatèques'),\n ('crd', u'Conservatoires'),\n ('sty', u'Sothevy'),\n ('eco', u'Développement Économique'),\n ('aut', u'Autres'),\n)\n\nEVENEMENT_PUBLIC = (\n ('adt', u'Adulte'),\n ('enf', u'Enfant'),\n ('pub', u'Tout public'),\n ('ent', u'Entreprises'),\n)\n\n\nclass Evenement(models.Model):\n nom = models.CharField(max_length=255)\n meta_description = models.CharField(max_length=200)\n description = models.TextField()\n debut = models.DateTimeField(\"Date de début\")\n fin = models.DateTimeField(\"Date de fin\")\n organisateur = models.ManyToManyField(Organisateur)\n image = FileBrowseField(\"Image (facultatif)\", max_length=255, directory=\"evenements\",\n extensions=[\".jpg\", \".png\", \".gif\", \".jpeg\", \".pdf\"], blank=True, null=True)\n url = models.URLField(\"Un lien vers plus d'infos: (facultatif)\", blank=True, null=True)\n url_reservation = models.URLField(\n \"Un lien vers la page de reservation: (facultatif, annule le lien vers plus d'infos) \", blank=True, null=True)\n categorie = models.CharField(max_length=3, choices=EVENEMENT_CATEGORIES, default='aut')\n public = models.CharField(max_length=3, choices=EVENEMENT_PUBLIC, default='pub')\n cadre_evenement = models.ForeignKey(Saison)\n type = models.ForeignKey(TypeEvenement)\n lieu = models.ForeignKey(Lieu)\n publish = models.BooleanField(\"Publié\", default=False)\n page_accueil = models.BooleanField(\"Page d'accueil\", default=False)\n complet = models.BooleanField(\"Ce spectacle est complet\", default=False)\n slug = models.SlugField(max_length=255, unique=True)\n\n class Meta:\n ordering = ['-debut']\n\n def Organisateurs(self):\n return \"\\n;\\n\".join([s.nom for s in self.organisateur.all()])\n\n def __unicode__(self):\n return self.nom\n\n def monthyeardebut(self):\n return self.debut.strftime(\"%m\") + \"-\" + self.debut.strftime(\"%Y\")\n\n @permalink\n def get_absolute_url(self):\n return ('event-details', (), {'slug': self.cadre_evenement.slug, 'evenement_slug': self.slug})\n\n\nclass Prix(models.Model):\n intitule = models.CharField(\"Intitulé \", max_length=255, blank=False, null=False)\n prix = models.FloatField(\"Prix (séparateur point ex : 0.5 )\", default=None, blank=False, null=True)\n evenement = models.ForeignKey(Evenement)\n\n class Meta:\n verbose_name_plural = u\"Prix\"\n\n\nclass DocumentAttache(models.Model):\n nom = models.CharField(max_length=255, verbose_name=\"Nom\")\n document = FileBrowseField(\"Document\", max_length=200, directory=\"evenements/docs\",\n extensions=[\".pdf\", \".doc\", \".odt\", \".docx\", \".txt\"])\n reference = models.ForeignKey(Evenement)\n\n\nclass EvenementBibManager(models.Manager):\n def get_queryset(self):\n return super(EvenementBibManager, self).get_queryset().filter(categorie='bib')\n\n\nclass EvenementBib(Evenement):\n objects = EvenementBibManager()\n\n class Meta:\n proxy = True\n verbose_name_plural = u\"Événements Bibliothèques\"\n verbose_name = u\"Événement Bibliothèque\"\n\n\nclass EvenementCrdManager(models.Manager):\n def get_queryset(self):\n return super(EvenementCrdManager, self).get_queryset().filter(categorie='crd')\n\n\nclass EvenementCrd(Evenement):\n objects = EvenementCrdManager()\n\n class Meta:\n proxy = True\n verbose_name_plural = u\"Événements Conservatoires\"\n verbose_name = u\"Événement Conservatoire\"\n\n\nclass EvenementDevEcoManager(models.Manager):\n def get_queryset(self):\n return super(EvenementDevEcoManager, self).get_queryset().filter(categorie='eco')\n\n\nclass EvenementDevEco(Evenement):\n objects = EvenementDevEcoManager()\n\n class Meta:\n proxy = True\n verbose_name_plural = u\"Événements Dev Eco\"\n verbose_name = u\"Événement Dev Eco\"", "step-ids": [ 30, 33, 34, 36, 39 ] }
[ 30, 33, 34, 36, 39 ]
# Return min number of hacks (swap of adjacent instructions) # in p so that total damage <= d. # If impossible, return -1 def min_hacks(d, p): # list containing number of shoot commands per # damage level. Each element is represents a # damage level; 1, 2, 4, 8, ... and so on. shots = [0] damage = 0 for c in p: if c == "S": shots[-1] += 1 # we can also calculate damage here. damage += 2 ** (len(shots) - 1) else: shots.append(0) # each hack represents moving 1 shot down 1 element # in the shots list. So keep doing this until # damage is <= d. hacks = 0 while damage > d: # move 1 shot from highest element possible down 1 element. hacked = False for i in range(len(shots)-1, 0, -1): if shots[i] > 0: shots[i] -= 1 shots[i-1] += 1 damage -= 2 ** (i - 1) # damage = damage - 2**i + 2**(i-1) hacks += 1 hacked = True break if not hacked: # impossible to get damage <= d! return -1 return hacks num_cases = int(input()) for i in range(1, num_cases+1): current_case = input().split() d = int(current_case[0]) p = current_case[1] solution = min_hacks(d, p) if solution < 0: solution_string = "IMPOSSIBLE" else: solution_string = str(solution) print("Case #{:d}: {:s}".format(i, solution_string))
normal
{ "blob_id": "607700faebc2018327d66939419cc24a563c3900", "index": 6515, "step-1": "<mask token>\n", "step-2": "def min_hacks(d, p):\n shots = [0]\n damage = 0\n for c in p:\n if c == 'S':\n shots[-1] += 1\n damage += 2 ** (len(shots) - 1)\n else:\n shots.append(0)\n hacks = 0\n while damage > d:\n hacked = False\n for i in range(len(shots) - 1, 0, -1):\n if shots[i] > 0:\n shots[i] -= 1\n shots[i - 1] += 1\n damage -= 2 ** (i - 1)\n hacks += 1\n hacked = True\n break\n if not hacked:\n return -1\n return hacks\n\n\n<mask token>\n", "step-3": "def min_hacks(d, p):\n shots = [0]\n damage = 0\n for c in p:\n if c == 'S':\n shots[-1] += 1\n damage += 2 ** (len(shots) - 1)\n else:\n shots.append(0)\n hacks = 0\n while damage > d:\n hacked = False\n for i in range(len(shots) - 1, 0, -1):\n if shots[i] > 0:\n shots[i] -= 1\n shots[i - 1] += 1\n damage -= 2 ** (i - 1)\n hacks += 1\n hacked = True\n break\n if not hacked:\n return -1\n return hacks\n\n\n<mask token>\nfor i in range(1, num_cases + 1):\n current_case = input().split()\n d = int(current_case[0])\n p = current_case[1]\n solution = min_hacks(d, p)\n if solution < 0:\n solution_string = 'IMPOSSIBLE'\n else:\n solution_string = str(solution)\n print('Case #{:d}: {:s}'.format(i, solution_string))\n", "step-4": "def min_hacks(d, p):\n shots = [0]\n damage = 0\n for c in p:\n if c == 'S':\n shots[-1] += 1\n damage += 2 ** (len(shots) - 1)\n else:\n shots.append(0)\n hacks = 0\n while damage > d:\n hacked = False\n for i in range(len(shots) - 1, 0, -1):\n if shots[i] > 0:\n shots[i] -= 1\n shots[i - 1] += 1\n damage -= 2 ** (i - 1)\n hacks += 1\n hacked = True\n break\n if not hacked:\n return -1\n return hacks\n\n\nnum_cases = int(input())\nfor i in range(1, num_cases + 1):\n current_case = input().split()\n d = int(current_case[0])\n p = current_case[1]\n solution = min_hacks(d, p)\n if solution < 0:\n solution_string = 'IMPOSSIBLE'\n else:\n solution_string = str(solution)\n print('Case #{:d}: {:s}'.format(i, solution_string))\n", "step-5": "# Return min number of hacks (swap of adjacent instructions)\n# in p so that total damage <= d.\n# If impossible, return -1\ndef min_hacks(d, p):\n\n # list containing number of shoot commands per\n # damage level. Each element is represents a\n # damage level; 1, 2, 4, 8, ... and so on.\n shots = [0]\n damage = 0\n for c in p:\n if c == \"S\":\n shots[-1] += 1\n # we can also calculate damage here.\n damage += 2 ** (len(shots) - 1)\n else:\n shots.append(0)\n\n # each hack represents moving 1 shot down 1 element\n # in the shots list. So keep doing this until\n # damage is <= d.\n hacks = 0\n while damage > d:\n # move 1 shot from highest element possible down 1 element.\n hacked = False\n for i in range(len(shots)-1, 0, -1):\n if shots[i] > 0:\n shots[i] -= 1\n shots[i-1] += 1\n damage -= 2 ** (i - 1) # damage = damage - 2**i + 2**(i-1)\n hacks += 1\n hacked = True\n break\n\n if not hacked:\n # impossible to get damage <= d!\n return -1\n\n return hacks\n\nnum_cases = int(input())\nfor i in range(1, num_cases+1):\n current_case = input().split()\n d = int(current_case[0])\n p = current_case[1]\n solution = min_hacks(d, p)\n if solution < 0:\n solution_string = \"IMPOSSIBLE\"\n else:\n solution_string = str(solution)\n print(\"Case #{:d}: {:s}\".format(i, solution_string))\n \n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
"""" You are given a tree-like data structure represented as nested dictionaries. Implement a function collect_leaves that accepts a tree and returns a list of all its leaves. A leaf is a bottom-most node in a tree. Implement a kind of unit tests via assert operator. """ from typing import Union def collect_leaves(u: Union[dict, list]) -> list: flatten_list = [] if isinstance(u, dict): for item in u.values(): flatten_list.extend(collect_leaves(item)) return flatten_list return u tree = { "node1": { "node11": { "node111": [1, 2, 3], "node112": [4, 5] }, "node12": [6] }, "node2": [7, 8, 9] } assert collect_leaves([1, 2, 3]) == [1, 2, 3] assert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
normal
{ "blob_id": "603cce951dd0f78ef3ca9dce587042b3b7f6b449", "index": 8001, "step-1": "<mask token>\n\n\ndef collect_leaves(u: Union[dict, list]) ->list:\n flatten_list = []\n if isinstance(u, dict):\n for item in u.values():\n flatten_list.extend(collect_leaves(item))\n return flatten_list\n return u\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef collect_leaves(u: Union[dict, list]) ->list:\n flatten_list = []\n if isinstance(u, dict):\n for item in u.values():\n flatten_list.extend(collect_leaves(item))\n return flatten_list\n return u\n\n\n<mask token>\nassert collect_leaves([1, 2, 3]) == [1, 2, 3]\nassert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]\n", "step-3": "<mask token>\n\n\ndef collect_leaves(u: Union[dict, list]) ->list:\n flatten_list = []\n if isinstance(u, dict):\n for item in u.values():\n flatten_list.extend(collect_leaves(item))\n return flatten_list\n return u\n\n\ntree = {'node1': {'node11': {'node111': [1, 2, 3], 'node112': [4, 5]},\n 'node12': [6]}, 'node2': [7, 8, 9]}\nassert collect_leaves([1, 2, 3]) == [1, 2, 3]\nassert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]\n", "step-4": "<mask token>\nfrom typing import Union\n\n\ndef collect_leaves(u: Union[dict, list]) ->list:\n flatten_list = []\n if isinstance(u, dict):\n for item in u.values():\n flatten_list.extend(collect_leaves(item))\n return flatten_list\n return u\n\n\ntree = {'node1': {'node11': {'node111': [1, 2, 3], 'node112': [4, 5]},\n 'node12': [6]}, 'node2': [7, 8, 9]}\nassert collect_leaves([1, 2, 3]) == [1, 2, 3]\nassert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]\n", "step-5": "\"\"\"\"\r\nYou are given a tree-like data structure represented as nested dictionaries.\r\nImplement a function collect_leaves that accepts a tree and returns a list of all its leaves. A leaf is a bottom-most node in a tree.\r\n\r\nImplement a kind of unit tests via assert operator.\r\n\"\"\"\r\nfrom typing import Union\r\n\r\n\r\ndef collect_leaves(u: Union[dict, list]) -> list:\r\n flatten_list = []\r\n if isinstance(u, dict):\r\n for item in u.values():\r\n flatten_list.extend(collect_leaves(item))\r\n return flatten_list\r\n return u\r\n\r\n\r\ntree = {\r\n \"node1\": {\r\n \"node11\": {\r\n \"node111\": [1, 2, 3],\r\n \"node112\": [4, 5]\r\n },\r\n \"node12\": [6]\r\n },\r\n \"node2\": [7, 8, 9]\r\n}\r\n\r\nassert collect_leaves([1, 2, 3]) == [1, 2, 3]\r\nassert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]\r\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> @router.post('/create-job', response_model=ShowJob) def create_job(job: JobCreate, db: Session=Depends(get_db), current_user: User=Depends(get_current_user_from_token)): owner_id = current_user.id job = create_new_job(job=job, db=db, owner_id=owner_id) return job @router.put('/update/{id}') def update_job(id: int, job: JobCreate, db: Session=Depends(get_db), current_user: User=Depends(get_current_user_from_token)): owner_id = current_user.id message = update_job_by_id(id, job, db, owner_id) if message == 0: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail= f'Job with id {id} does not exist') return {'detail': 'Successfully updated'} @router.get('/get/{id}', response_model=ShowJob) def retrieve_job_by_id(id: int, db: Session=Depends(get_db)): job = retrieve_job(id=id, db=db) if not job: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail= f'Job with id {id} does not exist') return job @router.get('/all', response_model=List[ShowJob]) def retrieve_all_jobs(db: Session=Depends(get_db)): jobs = list_jobs(db=db) return jobs <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @router.post('/create-job', response_model=ShowJob) def create_job(job: JobCreate, db: Session=Depends(get_db), current_user: User=Depends(get_current_user_from_token)): owner_id = current_user.id job = create_new_job(job=job, db=db, owner_id=owner_id) return job @router.put('/update/{id}') def update_job(id: int, job: JobCreate, db: Session=Depends(get_db), current_user: User=Depends(get_current_user_from_token)): owner_id = current_user.id message = update_job_by_id(id, job, db, owner_id) if message == 0: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail= f'Job with id {id} does not exist') return {'detail': 'Successfully updated'} @router.get('/get/{id}', response_model=ShowJob) def retrieve_job_by_id(id: int, db: Session=Depends(get_db)): job = retrieve_job(id=id, db=db) if not job: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail= f'Job with id {id} does not exist') return job @router.get('/all', response_model=List[ShowJob]) def retrieve_all_jobs(db: Session=Depends(get_db)): jobs = list_jobs(db=db) return jobs @router.delete('/delete/{id}') def delete_job(id: int, db: Session=Depends(get_db), current_user: User= Depends(get_current_user_from_token)): owner_id = current_user.id message = delete_job_by_id(id, db, owner_id=owner_id) if message == 0: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail= f'Job with id {id} does not exist') return {'details': 'Successfully deleted'} <|reserved_special_token_1|> <|reserved_special_token_0|> router = APIRouter() @router.post('/create-job', response_model=ShowJob) def create_job(job: JobCreate, db: Session=Depends(get_db), current_user: User=Depends(get_current_user_from_token)): owner_id = current_user.id job = create_new_job(job=job, db=db, owner_id=owner_id) return job @router.put('/update/{id}') def update_job(id: int, job: JobCreate, db: Session=Depends(get_db), current_user: User=Depends(get_current_user_from_token)): owner_id = current_user.id message = update_job_by_id(id, job, db, owner_id) if message == 0: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail= f'Job with id {id} does not exist') return {'detail': 'Successfully updated'} @router.get('/get/{id}', response_model=ShowJob) def retrieve_job_by_id(id: int, db: Session=Depends(get_db)): job = retrieve_job(id=id, db=db) if not job: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail= f'Job with id {id} does not exist') return job @router.get('/all', response_model=List[ShowJob]) def retrieve_all_jobs(db: Session=Depends(get_db)): jobs = list_jobs(db=db) return jobs @router.delete('/delete/{id}') def delete_job(id: int, db: Session=Depends(get_db), current_user: User= Depends(get_current_user_from_token)): owner_id = current_user.id message = delete_job_by_id(id, db, owner_id=owner_id) if message == 0: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail= f'Job with id {id} does not exist') return {'details': 'Successfully deleted'} <|reserved_special_token_1|> from fastapi import APIRouter, Depends, HTTPException, status from sqlalchemy.orm import Session from typing import List from sqlalchemy.sql.functions import current_date, current_user from db.session import get_db from db.models.jobs import Job from schemas.jobs import JobCreate, ShowJob from db.repository.jobs import create_new_job, delete_job_by_id, retrieve_job, list_jobs, update_job_by_id from apis.version1.route_login import get_current_user_from_token from db.models.users import User router = APIRouter() @router.post('/create-job', response_model=ShowJob) def create_job(job: JobCreate, db: Session=Depends(get_db), current_user: User=Depends(get_current_user_from_token)): owner_id = current_user.id job = create_new_job(job=job, db=db, owner_id=owner_id) return job @router.put('/update/{id}') def update_job(id: int, job: JobCreate, db: Session=Depends(get_db), current_user: User=Depends(get_current_user_from_token)): owner_id = current_user.id message = update_job_by_id(id, job, db, owner_id) if message == 0: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail= f'Job with id {id} does not exist') return {'detail': 'Successfully updated'} @router.get('/get/{id}', response_model=ShowJob) def retrieve_job_by_id(id: int, db: Session=Depends(get_db)): job = retrieve_job(id=id, db=db) if not job: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail= f'Job with id {id} does not exist') return job @router.get('/all', response_model=List[ShowJob]) def retrieve_all_jobs(db: Session=Depends(get_db)): jobs = list_jobs(db=db) return jobs @router.delete('/delete/{id}') def delete_job(id: int, db: Session=Depends(get_db), current_user: User= Depends(get_current_user_from_token)): owner_id = current_user.id message = delete_job_by_id(id, db, owner_id=owner_id) if message == 0: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail= f'Job with id {id} does not exist') return {'details': 'Successfully deleted'} <|reserved_special_token_1|> from fastapi import APIRouter, Depends, HTTPException, status from sqlalchemy.orm import Session from typing import List from sqlalchemy.sql.functions import current_date, current_user from db.session import get_db from db.models.jobs import Job from schemas.jobs import JobCreate, ShowJob from db.repository.jobs import create_new_job, delete_job_by_id, retrieve_job, list_jobs, update_job_by_id from apis.version1.route_login import get_current_user_from_token from db.models.users import User router = APIRouter() @router.post("/create-job", response_model=ShowJob) def create_job(job: JobCreate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user_from_token)): owner_id = current_user.id job = create_new_job(job=job, db=db, owner_id=owner_id) return job @router.put("/update/{id}") def update_job(id: int, job: JobCreate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user_from_token)): owner_id = current_user.id message = update_job_by_id(id, job, db, owner_id) if message == 0: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=f"Job with id {id} does not exist" ) return {"detail": "Successfully updated"} @router.get("/get/{id}", response_model=ShowJob) def retrieve_job_by_id(id: int, db: Session = Depends(get_db)): job = retrieve_job(id=id, db=db) if not job: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=f"Job with id {id} does not exist" ) return job @router.get("/all", response_model=List[ShowJob]) def retrieve_all_jobs(db: Session = Depends(get_db)): jobs = list_jobs(db=db) return jobs @router.delete("/delete/{id}") def delete_job(id: int, db: Session = Depends(get_db), current_user: User = Depends(get_current_user_from_token)): owner_id = current_user.id message = delete_job_by_id(id, db, owner_id=owner_id) if message == 0: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=f"Job with id {id} does not exist" ) return {"details": "Successfully deleted"}
flexible
{ "blob_id": "e8092faed22607f9c8f18a79709022037ff647bf", "index": 9625, "step-1": "<mask token>\n\n\[email protected]('/create-job', response_model=ShowJob)\ndef create_job(job: JobCreate, db: Session=Depends(get_db), current_user:\n User=Depends(get_current_user_from_token)):\n owner_id = current_user.id\n job = create_new_job(job=job, db=db, owner_id=owner_id)\n return job\n\n\[email protected]('/update/{id}')\ndef update_job(id: int, job: JobCreate, db: Session=Depends(get_db),\n current_user: User=Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = update_job_by_id(id, job, db, owner_id)\n if message == 0:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return {'detail': 'Successfully updated'}\n\n\[email protected]('/get/{id}', response_model=ShowJob)\ndef retrieve_job_by_id(id: int, db: Session=Depends(get_db)):\n job = retrieve_job(id=id, db=db)\n if not job:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return job\n\n\[email protected]('/all', response_model=List[ShowJob])\ndef retrieve_all_jobs(db: Session=Depends(get_db)):\n jobs = list_jobs(db=db)\n return jobs\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\[email protected]('/create-job', response_model=ShowJob)\ndef create_job(job: JobCreate, db: Session=Depends(get_db), current_user:\n User=Depends(get_current_user_from_token)):\n owner_id = current_user.id\n job = create_new_job(job=job, db=db, owner_id=owner_id)\n return job\n\n\[email protected]('/update/{id}')\ndef update_job(id: int, job: JobCreate, db: Session=Depends(get_db),\n current_user: User=Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = update_job_by_id(id, job, db, owner_id)\n if message == 0:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return {'detail': 'Successfully updated'}\n\n\[email protected]('/get/{id}', response_model=ShowJob)\ndef retrieve_job_by_id(id: int, db: Session=Depends(get_db)):\n job = retrieve_job(id=id, db=db)\n if not job:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return job\n\n\[email protected]('/all', response_model=List[ShowJob])\ndef retrieve_all_jobs(db: Session=Depends(get_db)):\n jobs = list_jobs(db=db)\n return jobs\n\n\[email protected]('/delete/{id}')\ndef delete_job(id: int, db: Session=Depends(get_db), current_user: User=\n Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = delete_job_by_id(id, db, owner_id=owner_id)\n if message == 0:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return {'details': 'Successfully deleted'}\n", "step-3": "<mask token>\nrouter = APIRouter()\n\n\[email protected]('/create-job', response_model=ShowJob)\ndef create_job(job: JobCreate, db: Session=Depends(get_db), current_user:\n User=Depends(get_current_user_from_token)):\n owner_id = current_user.id\n job = create_new_job(job=job, db=db, owner_id=owner_id)\n return job\n\n\[email protected]('/update/{id}')\ndef update_job(id: int, job: JobCreate, db: Session=Depends(get_db),\n current_user: User=Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = update_job_by_id(id, job, db, owner_id)\n if message == 0:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return {'detail': 'Successfully updated'}\n\n\[email protected]('/get/{id}', response_model=ShowJob)\ndef retrieve_job_by_id(id: int, db: Session=Depends(get_db)):\n job = retrieve_job(id=id, db=db)\n if not job:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return job\n\n\[email protected]('/all', response_model=List[ShowJob])\ndef retrieve_all_jobs(db: Session=Depends(get_db)):\n jobs = list_jobs(db=db)\n return jobs\n\n\[email protected]('/delete/{id}')\ndef delete_job(id: int, db: Session=Depends(get_db), current_user: User=\n Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = delete_job_by_id(id, db, owner_id=owner_id)\n if message == 0:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return {'details': 'Successfully deleted'}\n", "step-4": "from fastapi import APIRouter, Depends, HTTPException, status\nfrom sqlalchemy.orm import Session\nfrom typing import List\nfrom sqlalchemy.sql.functions import current_date, current_user\nfrom db.session import get_db\nfrom db.models.jobs import Job\nfrom schemas.jobs import JobCreate, ShowJob\nfrom db.repository.jobs import create_new_job, delete_job_by_id, retrieve_job, list_jobs, update_job_by_id\nfrom apis.version1.route_login import get_current_user_from_token\nfrom db.models.users import User\nrouter = APIRouter()\n\n\[email protected]('/create-job', response_model=ShowJob)\ndef create_job(job: JobCreate, db: Session=Depends(get_db), current_user:\n User=Depends(get_current_user_from_token)):\n owner_id = current_user.id\n job = create_new_job(job=job, db=db, owner_id=owner_id)\n return job\n\n\[email protected]('/update/{id}')\ndef update_job(id: int, job: JobCreate, db: Session=Depends(get_db),\n current_user: User=Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = update_job_by_id(id, job, db, owner_id)\n if message == 0:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return {'detail': 'Successfully updated'}\n\n\[email protected]('/get/{id}', response_model=ShowJob)\ndef retrieve_job_by_id(id: int, db: Session=Depends(get_db)):\n job = retrieve_job(id=id, db=db)\n if not job:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return job\n\n\[email protected]('/all', response_model=List[ShowJob])\ndef retrieve_all_jobs(db: Session=Depends(get_db)):\n jobs = list_jobs(db=db)\n return jobs\n\n\[email protected]('/delete/{id}')\ndef delete_job(id: int, db: Session=Depends(get_db), current_user: User=\n Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = delete_job_by_id(id, db, owner_id=owner_id)\n if message == 0:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return {'details': 'Successfully deleted'}\n", "step-5": "from fastapi import APIRouter, Depends, HTTPException, status\nfrom sqlalchemy.orm import Session\nfrom typing import List\n\nfrom sqlalchemy.sql.functions import current_date, current_user\n\nfrom db.session import get_db\nfrom db.models.jobs import Job\nfrom schemas.jobs import JobCreate, ShowJob\nfrom db.repository.jobs import create_new_job, delete_job_by_id, retrieve_job, list_jobs, update_job_by_id\nfrom apis.version1.route_login import get_current_user_from_token\nfrom db.models.users import User\n\n\nrouter = APIRouter()\n\n\[email protected](\"/create-job\", response_model=ShowJob)\ndef create_job(job: JobCreate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user_from_token)):\n owner_id = current_user.id\n job = create_new_job(job=job, db=db, owner_id=owner_id)\n return job\n\n\[email protected](\"/update/{id}\")\ndef update_job(id: int, job: JobCreate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = update_job_by_id(id, job, db, owner_id)\n\n if message == 0:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Job with id {id} does not exist\"\n )\n \n return {\"detail\": \"Successfully updated\"}\n\n\[email protected](\"/get/{id}\", response_model=ShowJob)\ndef retrieve_job_by_id(id: int, db: Session = Depends(get_db)):\n job = retrieve_job(id=id, db=db)\n if not job:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Job with id {id} does not exist\"\n )\n return job\n\n\[email protected](\"/all\", response_model=List[ShowJob])\ndef retrieve_all_jobs(db: Session = Depends(get_db)):\n jobs = list_jobs(db=db)\n return jobs\n\n\[email protected](\"/delete/{id}\")\ndef delete_job(id: int, db: Session = Depends(get_db), current_user: User = Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = delete_job_by_id(id, db, owner_id=owner_id)\n if message == 0:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Job with id {id} does not exist\"\n )\n return {\"details\": \"Successfully deleted\"}\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
import openpyxl as opx import pyperclip from openpyxl import Workbook from openpyxl.styles import PatternFill wb = Workbook(write_only=True) ws = wb.create_sheet() def parseSeq(lines,seqName): '''splits each column''' data = [] for line in lines: data.append(line.split(' ')) '''removes any spaces''' for i in range(len(data)): for j in range(data[i].count('')): data[i].remove('') '''deletes the numbers at beginning of column''' for i in range(len(data)): del data[i][0] '''creates a list of lists from dna sequence''' seqRows = [] for i in range(len(data)): seqRow = [] seqRow.append(seqName) for j in range(len(data[i])): for k in range(len(data[i][j])): seqRow.append(data[i][j][k]) seqRows.append(seqRow) return seqRows seqs = int(input('How many DNA sequences do you want to compare? ')) saveFile = input('What do you want to name the spreadsheet? ') '''masterList contains each sequence, and each sequence is broken into rows''' masterList = [] '''reads files so they can be parsed''' for i in range(seqs): print('What is the name of DNA sequence',i+1,end='? ') name = input('') file = open(name+'.txt') info = file.readlines() masterList.append(parseSeq(info,name)) file.close() '''sequence that contains the most rows is used for following loop''' elems = [] for i in range(len(masterList)): elems.append(len(masterList[i])) bigElem = elems.index(max(elems)) '''adds dna sequence to excel spreadsheet, 60 columns, x rows''' for row in range(len(masterList[bigElem])): for seq in range(len(masterList)): try: ws.append(masterList[seq][row]) except IndexError: ws.append([]) ws.append([]) wb.save(saveFile+'.xlsx') '''color match''' match = input('Do you want to color match your sequence (y/n)? ') if match == 'y': wb = opx.load_workbook(saveFile+'.xlsx') sheet = wb['Sheet'] ws = wb.active red = 'FFFF0000' green = '0000FF00' blue = 'FF0000FF' greenFill = PatternFill(start_color=green, end_color=green, fill_type='solid') redFill = PatternFill(start_color=red, end_color=red, fill_type='solid') blueFill = PatternFill(start_color=blue, end_color=blue, fill_type='solid') ws['BK1'] = 'Matched' ws['BK1'].fill = greenFill ws['BK2'] = 'Unmatched' ws['BK2'].fill = blueFill lastRow = sheet.max_row + 1 end = int(lastRow / (seqs+1)) for section in range(end): startSec = (seqs+1)*section + 1 endSec = (seqs+1)*section + (seqs+1) for col in range(2,62): bp = [] for row in range(startSec,endSec): cell = sheet.cell(row=row,column=col).value bp.append(cell) if bp.count(bp[0]) == seqs: for row in range(startSec,endSec): sheet.cell(row=row,column=col).fill = greenFill else: for row in range(startSec,endSec): sheet.cell(row=row,column=col).fill = blueFill wb.save(saveFile+'.xlsx')
normal
{ "blob_id": "19e387cb731dad21e5ee50b0a9812df984c13f3b", "index": 7890, "step-1": "<mask token>\n\n\ndef parseSeq(lines, seqName):\n \"\"\"splits each column\"\"\"\n data = []\n for line in lines:\n data.append(line.split(' '))\n \"\"\"removes any spaces\"\"\"\n for i in range(len(data)):\n for j in range(data[i].count('')):\n data[i].remove('')\n \"\"\"deletes the numbers at beginning of column\"\"\"\n for i in range(len(data)):\n del data[i][0]\n \"\"\"creates a list of lists from dna sequence\"\"\"\n seqRows = []\n for i in range(len(data)):\n seqRow = []\n seqRow.append(seqName)\n for j in range(len(data[i])):\n for k in range(len(data[i][j])):\n seqRow.append(data[i][j][k])\n seqRows.append(seqRow)\n return seqRows\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef parseSeq(lines, seqName):\n \"\"\"splits each column\"\"\"\n data = []\n for line in lines:\n data.append(line.split(' '))\n \"\"\"removes any spaces\"\"\"\n for i in range(len(data)):\n for j in range(data[i].count('')):\n data[i].remove('')\n \"\"\"deletes the numbers at beginning of column\"\"\"\n for i in range(len(data)):\n del data[i][0]\n \"\"\"creates a list of lists from dna sequence\"\"\"\n seqRows = []\n for i in range(len(data)):\n seqRow = []\n seqRow.append(seqName)\n for j in range(len(data[i])):\n for k in range(len(data[i][j])):\n seqRow.append(data[i][j][k])\n seqRows.append(seqRow)\n return seqRows\n\n\n<mask token>\nfor i in range(seqs):\n print('What is the name of DNA sequence', i + 1, end='? ')\n name = input('')\n file = open(name + '.txt')\n info = file.readlines()\n masterList.append(parseSeq(info, name))\n file.close()\n<mask token>\nfor i in range(len(masterList)):\n elems.append(len(masterList[i]))\n<mask token>\nfor row in range(len(masterList[bigElem])):\n for seq in range(len(masterList)):\n try:\n ws.append(masterList[seq][row])\n except IndexError:\n ws.append([])\n ws.append([])\nwb.save(saveFile + '.xlsx')\n<mask token>\nif match == 'y':\n wb = opx.load_workbook(saveFile + '.xlsx')\n sheet = wb['Sheet']\n ws = wb.active\n red = 'FFFF0000'\n green = '0000FF00'\n blue = 'FF0000FF'\n greenFill = PatternFill(start_color=green, end_color=green, fill_type=\n 'solid')\n redFill = PatternFill(start_color=red, end_color=red, fill_type='solid')\n blueFill = PatternFill(start_color=blue, end_color=blue, fill_type='solid')\n ws['BK1'] = 'Matched'\n ws['BK1'].fill = greenFill\n ws['BK2'] = 'Unmatched'\n ws['BK2'].fill = blueFill\n lastRow = sheet.max_row + 1\n end = int(lastRow / (seqs + 1))\n for section in range(end):\n startSec = (seqs + 1) * section + 1\n endSec = (seqs + 1) * section + (seqs + 1)\n for col in range(2, 62):\n bp = []\n for row in range(startSec, endSec):\n cell = sheet.cell(row=row, column=col).value\n bp.append(cell)\n if bp.count(bp[0]) == seqs:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = greenFill\n else:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = blueFill\n wb.save(saveFile + '.xlsx')\n", "step-3": "<mask token>\nwb = Workbook(write_only=True)\nws = wb.create_sheet()\n\n\ndef parseSeq(lines, seqName):\n \"\"\"splits each column\"\"\"\n data = []\n for line in lines:\n data.append(line.split(' '))\n \"\"\"removes any spaces\"\"\"\n for i in range(len(data)):\n for j in range(data[i].count('')):\n data[i].remove('')\n \"\"\"deletes the numbers at beginning of column\"\"\"\n for i in range(len(data)):\n del data[i][0]\n \"\"\"creates a list of lists from dna sequence\"\"\"\n seqRows = []\n for i in range(len(data)):\n seqRow = []\n seqRow.append(seqName)\n for j in range(len(data[i])):\n for k in range(len(data[i][j])):\n seqRow.append(data[i][j][k])\n seqRows.append(seqRow)\n return seqRows\n\n\nseqs = int(input('How many DNA sequences do you want to compare? '))\nsaveFile = input('What do you want to name the spreadsheet? ')\n<mask token>\nmasterList = []\n<mask token>\nfor i in range(seqs):\n print('What is the name of DNA sequence', i + 1, end='? ')\n name = input('')\n file = open(name + '.txt')\n info = file.readlines()\n masterList.append(parseSeq(info, name))\n file.close()\n<mask token>\nelems = []\nfor i in range(len(masterList)):\n elems.append(len(masterList[i]))\nbigElem = elems.index(max(elems))\n<mask token>\nfor row in range(len(masterList[bigElem])):\n for seq in range(len(masterList)):\n try:\n ws.append(masterList[seq][row])\n except IndexError:\n ws.append([])\n ws.append([])\nwb.save(saveFile + '.xlsx')\n<mask token>\nmatch = input('Do you want to color match your sequence (y/n)? ')\nif match == 'y':\n wb = opx.load_workbook(saveFile + '.xlsx')\n sheet = wb['Sheet']\n ws = wb.active\n red = 'FFFF0000'\n green = '0000FF00'\n blue = 'FF0000FF'\n greenFill = PatternFill(start_color=green, end_color=green, fill_type=\n 'solid')\n redFill = PatternFill(start_color=red, end_color=red, fill_type='solid')\n blueFill = PatternFill(start_color=blue, end_color=blue, fill_type='solid')\n ws['BK1'] = 'Matched'\n ws['BK1'].fill = greenFill\n ws['BK2'] = 'Unmatched'\n ws['BK2'].fill = blueFill\n lastRow = sheet.max_row + 1\n end = int(lastRow / (seqs + 1))\n for section in range(end):\n startSec = (seqs + 1) * section + 1\n endSec = (seqs + 1) * section + (seqs + 1)\n for col in range(2, 62):\n bp = []\n for row in range(startSec, endSec):\n cell = sheet.cell(row=row, column=col).value\n bp.append(cell)\n if bp.count(bp[0]) == seqs:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = greenFill\n else:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = blueFill\n wb.save(saveFile + '.xlsx')\n", "step-4": "import openpyxl as opx\nimport pyperclip\nfrom openpyxl import Workbook\nfrom openpyxl.styles import PatternFill\nwb = Workbook(write_only=True)\nws = wb.create_sheet()\n\n\ndef parseSeq(lines, seqName):\n \"\"\"splits each column\"\"\"\n data = []\n for line in lines:\n data.append(line.split(' '))\n \"\"\"removes any spaces\"\"\"\n for i in range(len(data)):\n for j in range(data[i].count('')):\n data[i].remove('')\n \"\"\"deletes the numbers at beginning of column\"\"\"\n for i in range(len(data)):\n del data[i][0]\n \"\"\"creates a list of lists from dna sequence\"\"\"\n seqRows = []\n for i in range(len(data)):\n seqRow = []\n seqRow.append(seqName)\n for j in range(len(data[i])):\n for k in range(len(data[i][j])):\n seqRow.append(data[i][j][k])\n seqRows.append(seqRow)\n return seqRows\n\n\nseqs = int(input('How many DNA sequences do you want to compare? '))\nsaveFile = input('What do you want to name the spreadsheet? ')\n<mask token>\nmasterList = []\n<mask token>\nfor i in range(seqs):\n print('What is the name of DNA sequence', i + 1, end='? ')\n name = input('')\n file = open(name + '.txt')\n info = file.readlines()\n masterList.append(parseSeq(info, name))\n file.close()\n<mask token>\nelems = []\nfor i in range(len(masterList)):\n elems.append(len(masterList[i]))\nbigElem = elems.index(max(elems))\n<mask token>\nfor row in range(len(masterList[bigElem])):\n for seq in range(len(masterList)):\n try:\n ws.append(masterList[seq][row])\n except IndexError:\n ws.append([])\n ws.append([])\nwb.save(saveFile + '.xlsx')\n<mask token>\nmatch = input('Do you want to color match your sequence (y/n)? ')\nif match == 'y':\n wb = opx.load_workbook(saveFile + '.xlsx')\n sheet = wb['Sheet']\n ws = wb.active\n red = 'FFFF0000'\n green = '0000FF00'\n blue = 'FF0000FF'\n greenFill = PatternFill(start_color=green, end_color=green, fill_type=\n 'solid')\n redFill = PatternFill(start_color=red, end_color=red, fill_type='solid')\n blueFill = PatternFill(start_color=blue, end_color=blue, fill_type='solid')\n ws['BK1'] = 'Matched'\n ws['BK1'].fill = greenFill\n ws['BK2'] = 'Unmatched'\n ws['BK2'].fill = blueFill\n lastRow = sheet.max_row + 1\n end = int(lastRow / (seqs + 1))\n for section in range(end):\n startSec = (seqs + 1) * section + 1\n endSec = (seqs + 1) * section + (seqs + 1)\n for col in range(2, 62):\n bp = []\n for row in range(startSec, endSec):\n cell = sheet.cell(row=row, column=col).value\n bp.append(cell)\n if bp.count(bp[0]) == seqs:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = greenFill\n else:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = blueFill\n wb.save(saveFile + '.xlsx')\n", "step-5": "import openpyxl as opx\r\nimport pyperclip\r\nfrom openpyxl import Workbook\r\nfrom openpyxl.styles import PatternFill\r\nwb = Workbook(write_only=True)\r\nws = wb.create_sheet()\r\n\r\n\r\ndef parseSeq(lines,seqName):\r\n \r\n '''splits each column'''\r\n data = []\r\n for line in lines: data.append(line.split(' '))\r\n '''removes any spaces'''\r\n for i in range(len(data)):\r\n for j in range(data[i].count('')): data[i].remove('')\r\n '''deletes the numbers at beginning of column'''\r\n for i in range(len(data)): del data[i][0]\r\n '''creates a list of lists from dna sequence'''\r\n seqRows = []\r\n for i in range(len(data)):\r\n seqRow = []\r\n seqRow.append(seqName)\r\n for j in range(len(data[i])):\r\n for k in range(len(data[i][j])):\r\n seqRow.append(data[i][j][k])\r\n seqRows.append(seqRow) \r\n return seqRows\r\n\r\nseqs = int(input('How many DNA sequences do you want to compare? '))\r\nsaveFile = input('What do you want to name the spreadsheet? ')\r\n\r\n'''masterList contains each sequence, and each sequence is\r\n broken into rows'''\r\nmasterList = []\r\n'''reads files so they can be parsed'''\r\nfor i in range(seqs):\r\n print('What is the name of DNA sequence',i+1,end='? ')\r\n name = input('')\r\n file = open(name+'.txt')\r\n info = file.readlines()\r\n masterList.append(parseSeq(info,name))\r\n file.close()\r\n\r\n'''sequence that contains the most rows is used for following loop'''\r\nelems = []\r\nfor i in range(len(masterList)): elems.append(len(masterList[i]))\r\nbigElem = elems.index(max(elems))\r\n \r\n'''adds dna sequence to excel spreadsheet, 60 columns, x rows'''\r\nfor row in range(len(masterList[bigElem])):\r\n for seq in range(len(masterList)):\r\n try:\r\n ws.append(masterList[seq][row])\r\n except IndexError:\r\n ws.append([])\r\n ws.append([])\r\n \r\nwb.save(saveFile+'.xlsx')\r\n\r\n'''color match'''\r\nmatch = input('Do you want to color match your sequence (y/n)? ')\r\nif match == 'y':\r\n wb = opx.load_workbook(saveFile+'.xlsx')\r\n sheet = wb['Sheet']\r\n ws = wb.active\r\n\r\n\r\n red = 'FFFF0000'\r\n green = '0000FF00'\r\n blue = 'FF0000FF'\r\n\r\n greenFill = PatternFill(start_color=green,\r\n end_color=green,\r\n fill_type='solid')\r\n redFill = PatternFill(start_color=red,\r\n end_color=red,\r\n fill_type='solid')\r\n blueFill = PatternFill(start_color=blue,\r\n end_color=blue,\r\n fill_type='solid')\r\n\r\n\r\n ws['BK1'] = 'Matched'\r\n ws['BK1'].fill = greenFill\r\n ws['BK2'] = 'Unmatched'\r\n ws['BK2'].fill = blueFill\r\n\r\n lastRow = sheet.max_row + 1\r\n end = int(lastRow / (seqs+1))\r\n\r\n for section in range(end):\r\n startSec = (seqs+1)*section + 1\r\n endSec = (seqs+1)*section + (seqs+1)\r\n for col in range(2,62):\r\n bp = []\r\n for row in range(startSec,endSec):\r\n cell = sheet.cell(row=row,column=col).value\r\n bp.append(cell)\r\n if bp.count(bp[0]) == seqs:\r\n for row in range(startSec,endSec):\r\n sheet.cell(row=row,column=col).fill = greenFill\r\n else:\r\n for row in range(startSec,endSec):\r\n sheet.cell(row=row,column=col).fill = blueFill\r\n wb.save(saveFile+'.xlsx')\r\n\r\n\r\n\r\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> admin.autodiscover() <|reserved_special_token_0|> dajaxice_autodiscover() <|reserved_special_token_0|> urlpatterns += staticfiles_urlpatterns() urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) <|reserved_special_token_1|> <|reserved_special_token_0|> admin.autodiscover() <|reserved_special_token_0|> dajaxice_autodiscover() <|reserved_special_token_0|> urlpatterns = patterns('', url('^$', views.IndexView.as_view(), name= 'index'), url('^play/$', views.index, name='play'), url('^compose/$', views.compose, name='compose'), url('^random/$', views.random, name= 'random'), url('^play/(?P<pk>\\d+)/$', views.DetailView.as_view(), name ='quiz'), url('^compose/(?P<pk>\\d+)/$', views.UpdateView.as_view()), url('^clip/(?P<clip_id>\\d+)/$', views.clip, name='clip'), url( '^accounts/login/$', 'django.contrib.auth.views.login', { 'template_name': 'login.html', 'extra_context': {'next': '/'}}, name= 'login'), url('^accounts/logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}, name='logout'), url(dajaxice_config.dajaxice_url, include('dajaxice.urls')), url('^admin/doc/', include( 'django.contrib.admindocs.urls')), url('^admin/', include(admin.site.urls)) ) urlpatterns += staticfiles_urlpatterns() urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) <|reserved_special_token_1|> from django.conf.urls import patterns, include, url from django.contrib import admin admin.autodiscover() from django.conf import settings from django.conf.urls.static import static from django.contrib.staticfiles.urls import staticfiles_urlpatterns from dajaxice.core import dajaxice_autodiscover, dajaxice_config dajaxice_autodiscover() from spoticle import views urlpatterns = patterns('', url('^$', views.IndexView.as_view(), name= 'index'), url('^play/$', views.index, name='play'), url('^compose/$', views.compose, name='compose'), url('^random/$', views.random, name= 'random'), url('^play/(?P<pk>\\d+)/$', views.DetailView.as_view(), name ='quiz'), url('^compose/(?P<pk>\\d+)/$', views.UpdateView.as_view()), url('^clip/(?P<clip_id>\\d+)/$', views.clip, name='clip'), url( '^accounts/login/$', 'django.contrib.auth.views.login', { 'template_name': 'login.html', 'extra_context': {'next': '/'}}, name= 'login'), url('^accounts/logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}, name='logout'), url(dajaxice_config.dajaxice_url, include('dajaxice.urls')), url('^admin/doc/', include( 'django.contrib.admindocs.urls')), url('^admin/', include(admin.site.urls)) ) urlpatterns += staticfiles_urlpatterns() urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) <|reserved_special_token_1|> from django.conf.urls import patterns, include, url # Uncomment the next two lines to enable the admin: from django.contrib import admin admin.autodiscover() from django.conf import settings from django.conf.urls.static import static from django.contrib.staticfiles.urls import staticfiles_urlpatterns from dajaxice.core import dajaxice_autodiscover, dajaxice_config dajaxice_autodiscover() from spoticle import views urlpatterns = patterns('', # Examples: # url(r'^$', 'spoticle.views.home', name='home'), # url(r'^spoticle/', include('spoticle.foo.urls')), url(r'^$', views.IndexView.as_view(), name='index'), url(r'^play/$', views.index, name='play'), url(r'^compose/$', views.compose, name='compose'), url(r'^random/$', views.random, name='random'), url(r'^play/(?P<pk>\d+)/$', views.DetailView.as_view(), name='quiz'), url(r'^compose/(?P<pk>\d+)/$', views.UpdateView.as_view()), url(r'^clip/(?P<clip_id>\d+)/$', views.clip, name='clip'), # Auth url(r'^accounts/login/$', 'django.contrib.auth.views.login', { 'template_name': 'login.html', 'extra_context': { 'next': '/' }}, name='login'), url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', { 'next_page': '/' }, name='logout'), url(dajaxice_config.dajaxice_url, include('dajaxice.urls')), # Uncomment the admin/doc line below to enable admin documentation: url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: url(r'^admin/', include(admin.site.urls)), ) urlpatterns += staticfiles_urlpatterns() urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
flexible
{ "blob_id": "68a503b2a94304530e20d79baf9fb094024ba67e", "index": 539, "step-1": "<mask token>\n", "step-2": "<mask token>\nadmin.autodiscover()\n<mask token>\ndajaxice_autodiscover()\n<mask token>\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "step-3": "<mask token>\nadmin.autodiscover()\n<mask token>\ndajaxice_autodiscover()\n<mask token>\nurlpatterns = patterns('', url('^$', views.IndexView.as_view(), name=\n 'index'), url('^play/$', views.index, name='play'), url('^compose/$',\n views.compose, name='compose'), url('^random/$', views.random, name=\n 'random'), url('^play/(?P<pk>\\\\d+)/$', views.DetailView.as_view(), name\n ='quiz'), url('^compose/(?P<pk>\\\\d+)/$', views.UpdateView.as_view()),\n url('^clip/(?P<clip_id>\\\\d+)/$', views.clip, name='clip'), url(\n '^accounts/login/$', 'django.contrib.auth.views.login', {\n 'template_name': 'login.html', 'extra_context': {'next': '/'}}, name=\n 'login'), url('^accounts/logout/$', 'django.contrib.auth.views.logout',\n {'next_page': '/'}, name='logout'), url(dajaxice_config.dajaxice_url,\n include('dajaxice.urls')), url('^admin/doc/', include(\n 'django.contrib.admindocs.urls')), url('^admin/', include(admin.site.urls))\n )\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "step-4": "from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nadmin.autodiscover()\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom dajaxice.core import dajaxice_autodiscover, dajaxice_config\ndajaxice_autodiscover()\nfrom spoticle import views\nurlpatterns = patterns('', url('^$', views.IndexView.as_view(), name=\n 'index'), url('^play/$', views.index, name='play'), url('^compose/$',\n views.compose, name='compose'), url('^random/$', views.random, name=\n 'random'), url('^play/(?P<pk>\\\\d+)/$', views.DetailView.as_view(), name\n ='quiz'), url('^compose/(?P<pk>\\\\d+)/$', views.UpdateView.as_view()),\n url('^clip/(?P<clip_id>\\\\d+)/$', views.clip, name='clip'), url(\n '^accounts/login/$', 'django.contrib.auth.views.login', {\n 'template_name': 'login.html', 'extra_context': {'next': '/'}}, name=\n 'login'), url('^accounts/logout/$', 'django.contrib.auth.views.logout',\n {'next_page': '/'}, name='logout'), url(dajaxice_config.dajaxice_url,\n include('dajaxice.urls')), url('^admin/doc/', include(\n 'django.contrib.admindocs.urls')), url('^admin/', include(admin.site.urls))\n )\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "step-5": "from django.conf.urls import patterns, include, url\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom dajaxice.core import dajaxice_autodiscover, dajaxice_config\ndajaxice_autodiscover()\n\nfrom spoticle import views\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'spoticle.views.home', name='home'),\n # url(r'^spoticle/', include('spoticle.foo.urls')),\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^play/$', views.index, name='play'),\n url(r'^compose/$', views.compose, name='compose'),\n url(r'^random/$', views.random, name='random'),\n url(r'^play/(?P<pk>\\d+)/$', views.DetailView.as_view(), name='quiz'),\n url(r'^compose/(?P<pk>\\d+)/$', views.UpdateView.as_view()),\n\n url(r'^clip/(?P<clip_id>\\d+)/$', views.clip, name='clip'),\n\n # Auth\n url(r'^accounts/login/$', 'django.contrib.auth.views.login', { 'template_name': 'login.html', 'extra_context': { 'next': '/' }}, name='login'),\n url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', { 'next_page': '/' }, name='logout'),\n\n url(dajaxice_config.dajaxice_url, include('dajaxice.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n)\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class Vasicek: def __init__(self, rs, vol): self.t = rs.columns self.ps = rs[-1:] self.sigma = vol <|reserved_special_token_0|> def loss(self, x): self.a = x[0] self.b = x[1] self.sim_rs = apply(self.get_TheoreticalP, self.ps) loss = np.array(self.ps.as_matrix()) - np.array(self.sim_rs) loss = 10000 * np.sum(loss ** 2) return loss <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Vasicek: def __init__(self, rs, vol): self.t = rs.columns self.ps = rs[-1:] self.sigma = vol def get_TheoreticalP(self, x=0): sigma = self.sigma try: _ = x.shape except: x = self.t a = self.a b = self.b B = (1 - np.exp(-a * x)) / a A = np.exp((B - x) * (a ** 2 * b - sigma ** 2 / 2) / a ** 2 - sigma ** 2 * B ** 2 / (4 * a)) self.B = B self.A = A self.sim_p = A * np.exp(-B * x) self.r = -1 * np.log(self.sim_p) / x return self.r def loss(self, x): self.a = x[0] self.b = x[1] self.sim_rs = apply(self.get_TheoreticalP, self.ps) loss = np.array(self.ps.as_matrix()) - np.array(self.sim_rs) loss = 10000 * np.sum(loss ** 2) return loss def solve(self, x0=np.random.rand(2)): self.opt_results = optimize.fmin(self.loss, x0=x0) self.a = self.opt_results[0] self.b = self.opt_results[1] print(self.opt_results) <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Vasicek: def __init__(self, rs, vol): self.t = rs.columns self.ps = rs[-1:] self.sigma = vol def get_TheoreticalP(self, x=0): sigma = self.sigma try: _ = x.shape except: x = self.t a = self.a b = self.b B = (1 - np.exp(-a * x)) / a A = np.exp((B - x) * (a ** 2 * b - sigma ** 2 / 2) / a ** 2 - sigma ** 2 * B ** 2 / (4 * a)) self.B = B self.A = A self.sim_p = A * np.exp(-B * x) self.r = -1 * np.log(self.sim_p) / x return self.r def loss(self, x): self.a = x[0] self.b = x[1] self.sim_rs = apply(self.get_TheoreticalP, self.ps) loss = np.array(self.ps.as_matrix()) - np.array(self.sim_rs) loss = 10000 * np.sum(loss ** 2) return loss def solve(self, x0=np.random.rand(2)): self.opt_results = optimize.fmin(self.loss, x0=x0) self.a = self.opt_results[0] self.b = self.opt_results[1] print(self.opt_results) def get_price_rate(self, T, r): sigma = list(self.sigma)[T] T = self.t[T] a = self.a b = self.b B = (1 - np.exp(-a * T)) / a A = np.exp((B - T) * (a ** 2 * b - sigma ** 2 / 2) / a ** 2 ) - sigma ** 2 * B ** 2 / (4 * a) p = A * np.exp(-B * r) r = -1 * np.log(p) / T return p, r <|reserved_special_token_0|> <|reserved_special_token_1|> from functions2 import * import numpy as np import numpy as np import math from scipy import optimize import pylab as pl from IPython import display as dp class Vasicek: def __init__(self, rs, vol): self.t = rs.columns self.ps = rs[-1:] self.sigma = vol def get_TheoreticalP(self, x=0): sigma = self.sigma try: _ = x.shape except: x = self.t a = self.a b = self.b B = (1 - np.exp(-a * x)) / a A = np.exp((B - x) * (a ** 2 * b - sigma ** 2 / 2) / a ** 2 - sigma ** 2 * B ** 2 / (4 * a)) self.B = B self.A = A self.sim_p = A * np.exp(-B * x) self.r = -1 * np.log(self.sim_p) / x return self.r def loss(self, x): self.a = x[0] self.b = x[1] self.sim_rs = apply(self.get_TheoreticalP, self.ps) loss = np.array(self.ps.as_matrix()) - np.array(self.sim_rs) loss = 10000 * np.sum(loss ** 2) return loss def solve(self, x0=np.random.rand(2)): self.opt_results = optimize.fmin(self.loss, x0=x0) self.a = self.opt_results[0] self.b = self.opt_results[1] print(self.opt_results) def get_price_rate(self, T, r): sigma = list(self.sigma)[T] T = self.t[T] a = self.a b = self.b B = (1 - np.exp(-a * T)) / a A = np.exp((B - T) * (a ** 2 * b - sigma ** 2 / 2) / a ** 2 ) - sigma ** 2 * B ** 2 / (4 * a) p = A * np.exp(-B * r) r = -1 * np.log(p) / T return p, r def option_pricing(V, r, t, T, X): time_dict = dict(zip(V.t, np.arange(len(V.t)))) r = r[-1:][t].item() P = V.get_price_rate(time_dict[T], r) p = V.get_price_rate(time_dict[t], r) sigmap = V.sigma[t] * (1 / V.a) * (1 / np.sqrt(t)) * (1 - np.exp(-V.a * (T - t))) * np.sqrt((1 - np.exp(-2 * V.a * t)) / (2 * V.a)) d = 1 / sigmap * np.log(P[0] / (p[0] * X)) + 0.5 * sigmap c = P[0] * norm.cdf(d) - X * p[0] * norm.cdf(d - sigmap) return c <|reserved_special_token_1|> from functions2 import * import numpy as np #from functions import TermStructure,load_data import numpy as np import math from scipy import optimize import pylab as pl from IPython import display as dp class Vasicek(): def __init__(self,rs,vol): self.t = rs.columns self.ps= rs[-1:] self.sigma = vol def get_TheoreticalP(self,x=0): sigma = self.sigma try: _ = x.shape except: x = self.t a = self.a b = self.b B = (1-np.exp(-a*x))/a A = np.exp(((B-x)*(a**2*b-(sigma**2)/2))/a**2-(sigma**2*B**2)/(4*a)) self.B=B self.A=A self.sim_p = A*np.exp(-B*x) self.r = -1*np.log(self.sim_p)/x return self.r def loss(self,x): self.a = x[0] self.b = x[1] self.sim_rs = apply(self.get_TheoreticalP,self.ps) loss = np.array(self.ps.as_matrix())-np.array(self.sim_rs) loss = 10000*np.sum(loss**2) return loss def solve(self,x0=np.random.rand(2)): self.opt_results = optimize.fmin(self.loss,x0=x0)#,tol=1e-10,method='Nelder-Mead',options={'maxiter':1800}) self.a = self.opt_results[0] self.b = self.opt_results[1] print(self.opt_results) def get_price_rate(self,T,r): sigma = list(self.sigma)[T] T = self.t[T] a = self.a b = self.b B = (1-np.exp(-a*T))/a A = np.exp(((B-T)*(a**2*b-(sigma**2)/2))/a**2)-(sigma**2*B**2)/(4*a) p = A*np.exp(-B*r) r = -1*np.log(p)/T return p,r def option_pricing(V,r,t,T,X): #print('Expiration: {}'.format(t)) #print('Maturity: {}'.format(T)) time_dict = dict(zip(V.t,np.arange(len(V.t)))) r = r[-1:][t].item() P = V.get_price_rate(time_dict[T],r) p = V.get_price_rate(time_dict[t],r) sigmap = V.sigma[t]*(1/V.a)*(1/np.sqrt(t))*(1-np.exp(-V.a*(T-t)))*np.sqrt((1-np.exp(-2*V.a*t))/(2*V.a)) d = (1/sigmap)*np.log(P[0]/(p[0]*X))+0.5*sigmap c = P[0]*norm.cdf(d)-X*p[0]*norm.cdf(d-sigmap) return c
flexible
{ "blob_id": "b6470ffda9040223951a99abc600ce1e99fe146b", "index": 7902, "step-1": "<mask token>\n\n\nclass Vasicek:\n\n def __init__(self, rs, vol):\n self.t = rs.columns\n self.ps = rs[-1:]\n self.sigma = vol\n <mask token>\n\n def loss(self, x):\n self.a = x[0]\n self.b = x[1]\n self.sim_rs = apply(self.get_TheoreticalP, self.ps)\n loss = np.array(self.ps.as_matrix()) - np.array(self.sim_rs)\n loss = 10000 * np.sum(loss ** 2)\n return loss\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Vasicek:\n\n def __init__(self, rs, vol):\n self.t = rs.columns\n self.ps = rs[-1:]\n self.sigma = vol\n\n def get_TheoreticalP(self, x=0):\n sigma = self.sigma\n try:\n _ = x.shape\n except:\n x = self.t\n a = self.a\n b = self.b\n B = (1 - np.exp(-a * x)) / a\n A = np.exp((B - x) * (a ** 2 * b - sigma ** 2 / 2) / a ** 2 - sigma **\n 2 * B ** 2 / (4 * a))\n self.B = B\n self.A = A\n self.sim_p = A * np.exp(-B * x)\n self.r = -1 * np.log(self.sim_p) / x\n return self.r\n\n def loss(self, x):\n self.a = x[0]\n self.b = x[1]\n self.sim_rs = apply(self.get_TheoreticalP, self.ps)\n loss = np.array(self.ps.as_matrix()) - np.array(self.sim_rs)\n loss = 10000 * np.sum(loss ** 2)\n return loss\n\n def solve(self, x0=np.random.rand(2)):\n self.opt_results = optimize.fmin(self.loss, x0=x0)\n self.a = self.opt_results[0]\n self.b = self.opt_results[1]\n print(self.opt_results)\n <mask token>\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Vasicek:\n\n def __init__(self, rs, vol):\n self.t = rs.columns\n self.ps = rs[-1:]\n self.sigma = vol\n\n def get_TheoreticalP(self, x=0):\n sigma = self.sigma\n try:\n _ = x.shape\n except:\n x = self.t\n a = self.a\n b = self.b\n B = (1 - np.exp(-a * x)) / a\n A = np.exp((B - x) * (a ** 2 * b - sigma ** 2 / 2) / a ** 2 - sigma **\n 2 * B ** 2 / (4 * a))\n self.B = B\n self.A = A\n self.sim_p = A * np.exp(-B * x)\n self.r = -1 * np.log(self.sim_p) / x\n return self.r\n\n def loss(self, x):\n self.a = x[0]\n self.b = x[1]\n self.sim_rs = apply(self.get_TheoreticalP, self.ps)\n loss = np.array(self.ps.as_matrix()) - np.array(self.sim_rs)\n loss = 10000 * np.sum(loss ** 2)\n return loss\n\n def solve(self, x0=np.random.rand(2)):\n self.opt_results = optimize.fmin(self.loss, x0=x0)\n self.a = self.opt_results[0]\n self.b = self.opt_results[1]\n print(self.opt_results)\n\n def get_price_rate(self, T, r):\n sigma = list(self.sigma)[T]\n T = self.t[T]\n a = self.a\n b = self.b\n B = (1 - np.exp(-a * T)) / a\n A = np.exp((B - T) * (a ** 2 * b - sigma ** 2 / 2) / a ** 2\n ) - sigma ** 2 * B ** 2 / (4 * a)\n p = A * np.exp(-B * r)\n r = -1 * np.log(p) / T\n return p, r\n\n\n<mask token>\n", "step-4": "from functions2 import *\nimport numpy as np\nimport numpy as np\nimport math\nfrom scipy import optimize\nimport pylab as pl\nfrom IPython import display as dp\n\n\nclass Vasicek:\n\n def __init__(self, rs, vol):\n self.t = rs.columns\n self.ps = rs[-1:]\n self.sigma = vol\n\n def get_TheoreticalP(self, x=0):\n sigma = self.sigma\n try:\n _ = x.shape\n except:\n x = self.t\n a = self.a\n b = self.b\n B = (1 - np.exp(-a * x)) / a\n A = np.exp((B - x) * (a ** 2 * b - sigma ** 2 / 2) / a ** 2 - sigma **\n 2 * B ** 2 / (4 * a))\n self.B = B\n self.A = A\n self.sim_p = A * np.exp(-B * x)\n self.r = -1 * np.log(self.sim_p) / x\n return self.r\n\n def loss(self, x):\n self.a = x[0]\n self.b = x[1]\n self.sim_rs = apply(self.get_TheoreticalP, self.ps)\n loss = np.array(self.ps.as_matrix()) - np.array(self.sim_rs)\n loss = 10000 * np.sum(loss ** 2)\n return loss\n\n def solve(self, x0=np.random.rand(2)):\n self.opt_results = optimize.fmin(self.loss, x0=x0)\n self.a = self.opt_results[0]\n self.b = self.opt_results[1]\n print(self.opt_results)\n\n def get_price_rate(self, T, r):\n sigma = list(self.sigma)[T]\n T = self.t[T]\n a = self.a\n b = self.b\n B = (1 - np.exp(-a * T)) / a\n A = np.exp((B - T) * (a ** 2 * b - sigma ** 2 / 2) / a ** 2\n ) - sigma ** 2 * B ** 2 / (4 * a)\n p = A * np.exp(-B * r)\n r = -1 * np.log(p) / T\n return p, r\n\n\ndef option_pricing(V, r, t, T, X):\n time_dict = dict(zip(V.t, np.arange(len(V.t))))\n r = r[-1:][t].item()\n P = V.get_price_rate(time_dict[T], r)\n p = V.get_price_rate(time_dict[t], r)\n sigmap = V.sigma[t] * (1 / V.a) * (1 / np.sqrt(t)) * (1 - np.exp(-V.a *\n (T - t))) * np.sqrt((1 - np.exp(-2 * V.a * t)) / (2 * V.a))\n d = 1 / sigmap * np.log(P[0] / (p[0] * X)) + 0.5 * sigmap\n c = P[0] * norm.cdf(d) - X * p[0] * norm.cdf(d - sigmap)\n return c\n", "step-5": "from functions2 import *\nimport numpy as np\n#from functions import TermStructure,load_data\nimport numpy as np\nimport math\nfrom scipy import optimize\nimport pylab as pl\nfrom IPython import display as dp\n\n\n\n\nclass Vasicek():\n def __init__(self,rs,vol):\n self.t = rs.columns\n self.ps= rs[-1:]\n self.sigma = vol \n \n def get_TheoreticalP(self,x=0):\n sigma = self.sigma\n try:\n _ = x.shape\n except:\n x = self.t\n \n a = self.a\n b = self.b\n B = (1-np.exp(-a*x))/a\n A = np.exp(((B-x)*(a**2*b-(sigma**2)/2))/a**2-(sigma**2*B**2)/(4*a))\n self.B=B\n self.A=A\n self.sim_p = A*np.exp(-B*x)\n self.r = -1*np.log(self.sim_p)/x\n return self.r\n\n \n def loss(self,x):\n self.a = x[0]\n self.b = x[1] \n self.sim_rs = apply(self.get_TheoreticalP,self.ps)\n loss = np.array(self.ps.as_matrix())-np.array(self.sim_rs)\n\n loss = 10000*np.sum(loss**2)\n \n return loss\n\n \n def solve(self,x0=np.random.rand(2)):\n self.opt_results = optimize.fmin(self.loss,x0=x0)#,tol=1e-10,method='Nelder-Mead',options={'maxiter':1800})\n self.a = self.opt_results[0]\n self.b = self.opt_results[1]\n print(self.opt_results)\n \n def get_price_rate(self,T,r):\n \n sigma = list(self.sigma)[T]\n T = self.t[T]\n a = self.a\n b = self.b\n B = (1-np.exp(-a*T))/a\n A = np.exp(((B-T)*(a**2*b-(sigma**2)/2))/a**2)-(sigma**2*B**2)/(4*a)\n p = A*np.exp(-B*r)\n r = -1*np.log(p)/T\n return p,r\n\n\ndef option_pricing(V,r,t,T,X):\n #print('Expiration: {}'.format(t))\n #print('Maturity: {}'.format(T))\n \n time_dict = dict(zip(V.t,np.arange(len(V.t))))\n \n r = r[-1:][t].item()\n \n P = V.get_price_rate(time_dict[T],r)\n \n p = V.get_price_rate(time_dict[t],r)\n \n\n \n sigmap = V.sigma[t]*(1/V.a)*(1/np.sqrt(t))*(1-np.exp(-V.a*(T-t)))*np.sqrt((1-np.exp(-2*V.a*t))/(2*V.a))\n \n d = (1/sigmap)*np.log(P[0]/(p[0]*X))+0.5*sigmap\n \n c = P[0]*norm.cdf(d)-X*p[0]*norm.cdf(d-sigmap)\n \n return c", "step-ids": [ 3, 5, 6, 8, 9 ] }
[ 3, 5, 6, 8, 9 ]
class Solution: def containsDuplicate(self, nums) -> bool: d = {} # store the elements which already exist for elem in nums: if elem in d: return True else: d[elem] = 1 return False print(Solution().containsDuplicate([0]))
normal
{ "blob_id": "89256a38208be92f87115b110edc986cebc95306", "index": 8440, "step-1": "<mask token>\n", "step-2": "class Solution:\n <mask token>\n\n\n<mask token>\n", "step-3": "class Solution:\n\n def containsDuplicate(self, nums) ->bool:\n d = {}\n for elem in nums:\n if elem in d:\n return True\n else:\n d[elem] = 1\n return False\n\n\n<mask token>\n", "step-4": "class Solution:\n\n def containsDuplicate(self, nums) ->bool:\n d = {}\n for elem in nums:\n if elem in d:\n return True\n else:\n d[elem] = 1\n return False\n\n\nprint(Solution().containsDuplicate([0]))\n", "step-5": "class Solution:\n def containsDuplicate(self, nums) -> bool:\n d = {} # store the elements which already exist\n\n for elem in nums:\n if elem in d:\n return True\n else:\n d[elem] = 1\n\n return False\n\nprint(Solution().containsDuplicate([0]))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/bin/env python import sys import os import collections import re import json import urllib import urllib.request import uuid import time PROCESSOR_VERSION = "0.1" def process(trace_dir, out_dir): #order files trace_files = os.listdir(trace_dir) trace_files = sorted(trace_files) if trace_files[0] == "error.log": #we need to do this in case the last traces are in an error log file that wasn't rotated yet print ("Rotating to properly order logs.") trace_files = collections.deque(trace_files) trace_files.rotate(-1) #combine full_trace = b"" all_lines= "" for file_name in trace_files: print ("Processing: " + str(file_name)) with open(os.path.join(trace_dir, file_name), "rb") as f: for line in f: try: #print(line.decode('utf-8')) all_lines += line.decode('utf-8') except UnicodeDecodeError: print("weird text") # let's fix any pesky solitary \n's (these are at the end of all the bodies) full_trace = re.sub(r'(?<!\r)\n', '\r\n\r\n', all_lines) ''' Is the issue with the input or my processing? tmp_file = open('full_trace.json', 'wb') json.dump(full_trace, tmp_file) tmp_file.close() INPUT Issue ''' #do the first step of preprocessing, getting raw sessions print( "Collecting raw sessions") raw_sessions = dict() full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\n'))) for line in full_trace_iterator: #TODO IPv6 #TODO Responses (we get them but do we want to do this a different way) send_recv = re.findall(r'(SEND|RECV)', line) ipv4_port = re.findall(r'[0-9]+(?:\.[0-9]+){3}:[0-9]+', line) if ipv4_port: port = re.findall(r':[0-9]+$', ipv4_port[0]) if port: if port[0] == ":443" or port[0] == ":80": continue # we don't want the server conn side stuff yet if send_recv and ipv4_port: ip_port_key = ipv4_port[0] this_trace = line while True: try: next_line = next(full_trace_iterator) this_trace += next_line end_trace = re.findall(r'\[End Trace\]', next_line) if end_trace: break except Exception as e: #reached the end of the file print( e) break if ip_port_key not in raw_sessions: raw_sessions[ip_port_key] = this_trace print(ip_port_key) else: raw_sessions[ip_port_key] += this_trace #do the second step of preprocessing, getting JSONs from raw sessions print( "Constructing session JSONs") session_JSONs = dict() for session, raw_traces in raw_sessions.items(): #basic data session_JSONs[session] = dict() session_JSONs[session]["version"] = PROCESSOR_VERSION session_JSONs[session]["encoding"] = "url_encoded" # let's get the raw text from the traces raw_text = "" timestamp = "" timestamp_list = list() for line in raw_traces.splitlines(raw_traces.count('\n')): trace_line = re.findall(r'^\d{8}\.\d{2}h\d{2}m\d{2}s', line) timestamp = re.findall(r'\[\d{10}\.\d{3}\]', line) if timestamp: timestamp_list.append(timestamp[0][1:-1]) if not trace_line: raw_text += line #get session start timestamp session_JSONs[session]["timestamp"] = timestamp_list[0] # let's parse out requests and responses count = -1 delimiter = "\r\n\r\n" is_request_chunk = True raw_text_chunks = iter(raw_text.split(delimiter)) session_JSONs[session]["txns"] = list() for chunk in raw_text_chunks: #check if each chunk is request or response if it is do so accordingly #otherwise append it to the previous chunk's data request_chunk = re.findall(r'^\S+\s/\S+\sHTTP/\d\.\d\r\n', chunk) response_chunk = re.findall(r'^HTTP/\d\.\d\s\d{3}\s[\s\S]+\r\n', chunk) if request_chunk: count += 1 is_reqeust_chunk = True chunk += delimiter if count <= len(session_JSONs[session]["txns"]): session_JSONs[session]["txns"].append(dict()) session_JSONs[session]["txns"][count]["request"] = dict() session_JSONs[session]["txns"][count]["request"]["timestamp"] = timestamp_list[count - 1] session_JSONs[session]["txns"][count]["request"]["headers"] = chunk session_JSONs[session]["txns"][count]["uuid"] = uuid.uuid4().hex elif response_chunk: is_request_chunk = False chunk += delimiter if count <= len(session_JSONs[session]["txns"]): session_JSONs[session]["txns"].append(dict()) session_JSONs[session]["txns"][count]["response"] = dict() session_JSONs[session]["txns"][count]["response"]["timestamp"] = timestamp_list[count - 1] session_JSONs[session]["txns"][count]["response"]["headers"] = chunk else: #is body chunk try: if count == -1: continue #if we have garbage at the front chunk = urllib.parse.quote(chunk) if is_request_chunk: if "body" not in session_JSONs[session]["txns"][count]["request"]: session_JSONs[session]["txns"][count]["request"]["body"] = chunk else: session_JSONs[session]["txns"][count]["request"]["body"] += chunk else: if "body" not in session_JSONs[session]["txns"][count]["response"]: session_JSONs[session]["txns"][count]["response"]["body"] = chunk else: session_JSONs[session]["txns"][count]["response"]["body"] += chunk except KeyError as k: continue # for now we're dropping malformed bodies. will not be able to do this when we're validating. might have to go edit wiretracing code to give us better delimiters here for parsing. right now isn't particularly straightforward print(len(session_JSONs[session]["txns"])) session_JSONs[session]["txns"] = list(filter(bool, session_JSONs[session]["txns"])) if len(session_JSONs[session]["txns"]) == 0: del session_JSONs[session] #write out unicode_errors = 0 print( "Writing sessions to disk") out_files = dict() for session, data in session_JSONs.items(): out_files[session] = open(os.path.join(out_dir, 'session_' + str(session)) + '.json', 'w') try: json.dump(data, out_files[session]) out_files[session].close() except: unicode_errors += 1 out_files[session].close() os.remove(os.path.join(out_dir, 'session_' + str(session)) + '.json') print( str(unicode_errors) + " unicode errors") def main(argv): if len(argv) != 3: print( "Script to preprocess trace logs for client.") print( "Outputs JSONs to directory 'sessions'") print( "Usage: python " + str(argv[0]) + " <in directory> <out directory>") return if not os.path.isdir(argv[1]): print( str(argv[1]) + " is not a directory. Aborting.") return if not os.path.exists(argv[2]): os.makedirs(argv[2]) else: print( str(argv[2]) + " already exists, choose another output directory!") return t1=time.time() process(argv[1], argv[2]) t2=time.time() print("time taken:",(t2-t1)) if __name__ == "__main__": main(sys.argv)
normal
{ "blob_id": "4b83887e8d8e5c5dc7065354d24044d3c3a48714", "index": 3387, "step-1": "<mask token>\n\n\ndef process(trace_dir, out_dir):\n trace_files = os.listdir(trace_dir)\n trace_files = sorted(trace_files)\n if trace_files[0] == 'error.log':\n print('Rotating to properly order logs.')\n trace_files = collections.deque(trace_files)\n trace_files.rotate(-1)\n full_trace = b''\n all_lines = ''\n for file_name in trace_files:\n print('Processing: ' + str(file_name))\n with open(os.path.join(trace_dir, file_name), 'rb') as f:\n for line in f:\n try:\n all_lines += line.decode('utf-8')\n except UnicodeDecodeError:\n print('weird text')\n full_trace = re.sub('(?<!\\\\r)\\\\n', '\\r\\n\\r\\n', all_lines)\n \"\"\"\n Is the issue with the input or my processing? \n tmp_file = open('full_trace.json', 'wb')\n json.dump(full_trace, tmp_file)\n tmp_file.close()\n INPUT Issue\n \"\"\"\n print('Collecting raw sessions')\n raw_sessions = dict()\n full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\\n')))\n for line in full_trace_iterator:\n send_recv = re.findall('(SEND|RECV)', line)\n ipv4_port = re.findall('[0-9]+(?:\\\\.[0-9]+){3}:[0-9]+', line)\n if ipv4_port:\n port = re.findall(':[0-9]+$', ipv4_port[0])\n if port:\n if port[0] == ':443' or port[0] == ':80':\n continue\n if send_recv and ipv4_port:\n ip_port_key = ipv4_port[0]\n this_trace = line\n while True:\n try:\n next_line = next(full_trace_iterator)\n this_trace += next_line\n end_trace = re.findall('\\\\[End Trace\\\\]', next_line)\n if end_trace:\n break\n except Exception as e:\n print(e)\n break\n if ip_port_key not in raw_sessions:\n raw_sessions[ip_port_key] = this_trace\n print(ip_port_key)\n else:\n raw_sessions[ip_port_key] += this_trace\n print('Constructing session JSONs')\n session_JSONs = dict()\n for session, raw_traces in raw_sessions.items():\n session_JSONs[session] = dict()\n session_JSONs[session]['version'] = PROCESSOR_VERSION\n session_JSONs[session]['encoding'] = 'url_encoded'\n raw_text = ''\n timestamp = ''\n timestamp_list = list()\n for line in raw_traces.splitlines(raw_traces.count('\\n')):\n trace_line = re.findall('^\\\\d{8}\\\\.\\\\d{2}h\\\\d{2}m\\\\d{2}s', line)\n timestamp = re.findall('\\\\[\\\\d{10}\\\\.\\\\d{3}\\\\]', line)\n if timestamp:\n timestamp_list.append(timestamp[0][1:-1])\n if not trace_line:\n raw_text += line\n session_JSONs[session]['timestamp'] = timestamp_list[0]\n count = -1\n delimiter = '\\r\\n\\r\\n'\n is_request_chunk = True\n raw_text_chunks = iter(raw_text.split(delimiter))\n session_JSONs[session]['txns'] = list()\n for chunk in raw_text_chunks:\n request_chunk = re.findall('^\\\\S+\\\\s/\\\\S+\\\\sHTTP/\\\\d\\\\.\\\\d\\\\r\\\\n',\n chunk)\n response_chunk = re.findall(\n '^HTTP/\\\\d\\\\.\\\\d\\\\s\\\\d{3}\\\\s[\\\\s\\\\S]+\\\\r\\\\n', chunk)\n if request_chunk:\n count += 1\n is_reqeust_chunk = True\n chunk += delimiter\n if count <= len(session_JSONs[session]['txns']):\n session_JSONs[session]['txns'].append(dict())\n session_JSONs[session]['txns'][count]['request'] = dict()\n session_JSONs[session]['txns'][count]['request']['timestamp'\n ] = timestamp_list[count - 1]\n session_JSONs[session]['txns'][count]['request']['headers'\n ] = chunk\n session_JSONs[session]['txns'][count]['uuid'] = uuid.uuid4(\n ).hex\n elif response_chunk:\n is_request_chunk = False\n chunk += delimiter\n if count <= len(session_JSONs[session]['txns']):\n session_JSONs[session]['txns'].append(dict())\n session_JSONs[session]['txns'][count]['response'] = dict()\n session_JSONs[session]['txns'][count]['response']['timestamp'\n ] = timestamp_list[count - 1]\n session_JSONs[session]['txns'][count]['response']['headers'\n ] = chunk\n else:\n try:\n if count == -1:\n continue\n chunk = urllib.parse.quote(chunk)\n if is_request_chunk:\n if 'body' not in session_JSONs[session]['txns'][count][\n 'request']:\n session_JSONs[session]['txns'][count]['request'][\n 'body'] = chunk\n else:\n session_JSONs[session]['txns'][count]['request'][\n 'body'] += chunk\n elif 'body' not in session_JSONs[session]['txns'][count][\n 'response']:\n session_JSONs[session]['txns'][count]['response'][\n 'body'] = chunk\n else:\n session_JSONs[session]['txns'][count]['response'][\n 'body'] += chunk\n except KeyError as k:\n continue\n print(len(session_JSONs[session]['txns']))\n session_JSONs[session]['txns'] = list(filter(bool, session_JSONs[\n session]['txns']))\n if len(session_JSONs[session]['txns']) == 0:\n del session_JSONs[session]\n unicode_errors = 0\n print('Writing sessions to disk')\n out_files = dict()\n for session, data in session_JSONs.items():\n out_files[session] = open(os.path.join(out_dir, 'session_' + str(\n session)) + '.json', 'w')\n try:\n json.dump(data, out_files[session])\n out_files[session].close()\n except:\n unicode_errors += 1\n out_files[session].close()\n os.remove(os.path.join(out_dir, 'session_' + str(session)) +\n '.json')\n print(str(unicode_errors) + ' unicode errors')\n\n\ndef main(argv):\n if len(argv) != 3:\n print('Script to preprocess trace logs for client.')\n print(\"Outputs JSONs to directory 'sessions'\")\n print('Usage: python ' + str(argv[0]) +\n ' <in directory> <out directory>')\n return\n if not os.path.isdir(argv[1]):\n print(str(argv[1]) + ' is not a directory. Aborting.')\n return\n if not os.path.exists(argv[2]):\n os.makedirs(argv[2])\n else:\n print(str(argv[2]) +\n ' already exists, choose another output directory!')\n return\n t1 = time.time()\n process(argv[1], argv[2])\n t2 = time.time()\n print('time taken:', t2 - t1)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef process(trace_dir, out_dir):\n trace_files = os.listdir(trace_dir)\n trace_files = sorted(trace_files)\n if trace_files[0] == 'error.log':\n print('Rotating to properly order logs.')\n trace_files = collections.deque(trace_files)\n trace_files.rotate(-1)\n full_trace = b''\n all_lines = ''\n for file_name in trace_files:\n print('Processing: ' + str(file_name))\n with open(os.path.join(trace_dir, file_name), 'rb') as f:\n for line in f:\n try:\n all_lines += line.decode('utf-8')\n except UnicodeDecodeError:\n print('weird text')\n full_trace = re.sub('(?<!\\\\r)\\\\n', '\\r\\n\\r\\n', all_lines)\n \"\"\"\n Is the issue with the input or my processing? \n tmp_file = open('full_trace.json', 'wb')\n json.dump(full_trace, tmp_file)\n tmp_file.close()\n INPUT Issue\n \"\"\"\n print('Collecting raw sessions')\n raw_sessions = dict()\n full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\\n')))\n for line in full_trace_iterator:\n send_recv = re.findall('(SEND|RECV)', line)\n ipv4_port = re.findall('[0-9]+(?:\\\\.[0-9]+){3}:[0-9]+', line)\n if ipv4_port:\n port = re.findall(':[0-9]+$', ipv4_port[0])\n if port:\n if port[0] == ':443' or port[0] == ':80':\n continue\n if send_recv and ipv4_port:\n ip_port_key = ipv4_port[0]\n this_trace = line\n while True:\n try:\n next_line = next(full_trace_iterator)\n this_trace += next_line\n end_trace = re.findall('\\\\[End Trace\\\\]', next_line)\n if end_trace:\n break\n except Exception as e:\n print(e)\n break\n if ip_port_key not in raw_sessions:\n raw_sessions[ip_port_key] = this_trace\n print(ip_port_key)\n else:\n raw_sessions[ip_port_key] += this_trace\n print('Constructing session JSONs')\n session_JSONs = dict()\n for session, raw_traces in raw_sessions.items():\n session_JSONs[session] = dict()\n session_JSONs[session]['version'] = PROCESSOR_VERSION\n session_JSONs[session]['encoding'] = 'url_encoded'\n raw_text = ''\n timestamp = ''\n timestamp_list = list()\n for line in raw_traces.splitlines(raw_traces.count('\\n')):\n trace_line = re.findall('^\\\\d{8}\\\\.\\\\d{2}h\\\\d{2}m\\\\d{2}s', line)\n timestamp = re.findall('\\\\[\\\\d{10}\\\\.\\\\d{3}\\\\]', line)\n if timestamp:\n timestamp_list.append(timestamp[0][1:-1])\n if not trace_line:\n raw_text += line\n session_JSONs[session]['timestamp'] = timestamp_list[0]\n count = -1\n delimiter = '\\r\\n\\r\\n'\n is_request_chunk = True\n raw_text_chunks = iter(raw_text.split(delimiter))\n session_JSONs[session]['txns'] = list()\n for chunk in raw_text_chunks:\n request_chunk = re.findall('^\\\\S+\\\\s/\\\\S+\\\\sHTTP/\\\\d\\\\.\\\\d\\\\r\\\\n',\n chunk)\n response_chunk = re.findall(\n '^HTTP/\\\\d\\\\.\\\\d\\\\s\\\\d{3}\\\\s[\\\\s\\\\S]+\\\\r\\\\n', chunk)\n if request_chunk:\n count += 1\n is_reqeust_chunk = True\n chunk += delimiter\n if count <= len(session_JSONs[session]['txns']):\n session_JSONs[session]['txns'].append(dict())\n session_JSONs[session]['txns'][count]['request'] = dict()\n session_JSONs[session]['txns'][count]['request']['timestamp'\n ] = timestamp_list[count - 1]\n session_JSONs[session]['txns'][count]['request']['headers'\n ] = chunk\n session_JSONs[session]['txns'][count]['uuid'] = uuid.uuid4(\n ).hex\n elif response_chunk:\n is_request_chunk = False\n chunk += delimiter\n if count <= len(session_JSONs[session]['txns']):\n session_JSONs[session]['txns'].append(dict())\n session_JSONs[session]['txns'][count]['response'] = dict()\n session_JSONs[session]['txns'][count]['response']['timestamp'\n ] = timestamp_list[count - 1]\n session_JSONs[session]['txns'][count]['response']['headers'\n ] = chunk\n else:\n try:\n if count == -1:\n continue\n chunk = urllib.parse.quote(chunk)\n if is_request_chunk:\n if 'body' not in session_JSONs[session]['txns'][count][\n 'request']:\n session_JSONs[session]['txns'][count]['request'][\n 'body'] = chunk\n else:\n session_JSONs[session]['txns'][count]['request'][\n 'body'] += chunk\n elif 'body' not in session_JSONs[session]['txns'][count][\n 'response']:\n session_JSONs[session]['txns'][count]['response'][\n 'body'] = chunk\n else:\n session_JSONs[session]['txns'][count]['response'][\n 'body'] += chunk\n except KeyError as k:\n continue\n print(len(session_JSONs[session]['txns']))\n session_JSONs[session]['txns'] = list(filter(bool, session_JSONs[\n session]['txns']))\n if len(session_JSONs[session]['txns']) == 0:\n del session_JSONs[session]\n unicode_errors = 0\n print('Writing sessions to disk')\n out_files = dict()\n for session, data in session_JSONs.items():\n out_files[session] = open(os.path.join(out_dir, 'session_' + str(\n session)) + '.json', 'w')\n try:\n json.dump(data, out_files[session])\n out_files[session].close()\n except:\n unicode_errors += 1\n out_files[session].close()\n os.remove(os.path.join(out_dir, 'session_' + str(session)) +\n '.json')\n print(str(unicode_errors) + ' unicode errors')\n\n\ndef main(argv):\n if len(argv) != 3:\n print('Script to preprocess trace logs for client.')\n print(\"Outputs JSONs to directory 'sessions'\")\n print('Usage: python ' + str(argv[0]) +\n ' <in directory> <out directory>')\n return\n if not os.path.isdir(argv[1]):\n print(str(argv[1]) + ' is not a directory. Aborting.')\n return\n if not os.path.exists(argv[2]):\n os.makedirs(argv[2])\n else:\n print(str(argv[2]) +\n ' already exists, choose another output directory!')\n return\n t1 = time.time()\n process(argv[1], argv[2])\n t2 = time.time()\n print('time taken:', t2 - t1)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n", "step-3": "<mask token>\nPROCESSOR_VERSION = '0.1'\n\n\ndef process(trace_dir, out_dir):\n trace_files = os.listdir(trace_dir)\n trace_files = sorted(trace_files)\n if trace_files[0] == 'error.log':\n print('Rotating to properly order logs.')\n trace_files = collections.deque(trace_files)\n trace_files.rotate(-1)\n full_trace = b''\n all_lines = ''\n for file_name in trace_files:\n print('Processing: ' + str(file_name))\n with open(os.path.join(trace_dir, file_name), 'rb') as f:\n for line in f:\n try:\n all_lines += line.decode('utf-8')\n except UnicodeDecodeError:\n print('weird text')\n full_trace = re.sub('(?<!\\\\r)\\\\n', '\\r\\n\\r\\n', all_lines)\n \"\"\"\n Is the issue with the input or my processing? \n tmp_file = open('full_trace.json', 'wb')\n json.dump(full_trace, tmp_file)\n tmp_file.close()\n INPUT Issue\n \"\"\"\n print('Collecting raw sessions')\n raw_sessions = dict()\n full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\\n')))\n for line in full_trace_iterator:\n send_recv = re.findall('(SEND|RECV)', line)\n ipv4_port = re.findall('[0-9]+(?:\\\\.[0-9]+){3}:[0-9]+', line)\n if ipv4_port:\n port = re.findall(':[0-9]+$', ipv4_port[0])\n if port:\n if port[0] == ':443' or port[0] == ':80':\n continue\n if send_recv and ipv4_port:\n ip_port_key = ipv4_port[0]\n this_trace = line\n while True:\n try:\n next_line = next(full_trace_iterator)\n this_trace += next_line\n end_trace = re.findall('\\\\[End Trace\\\\]', next_line)\n if end_trace:\n break\n except Exception as e:\n print(e)\n break\n if ip_port_key not in raw_sessions:\n raw_sessions[ip_port_key] = this_trace\n print(ip_port_key)\n else:\n raw_sessions[ip_port_key] += this_trace\n print('Constructing session JSONs')\n session_JSONs = dict()\n for session, raw_traces in raw_sessions.items():\n session_JSONs[session] = dict()\n session_JSONs[session]['version'] = PROCESSOR_VERSION\n session_JSONs[session]['encoding'] = 'url_encoded'\n raw_text = ''\n timestamp = ''\n timestamp_list = list()\n for line in raw_traces.splitlines(raw_traces.count('\\n')):\n trace_line = re.findall('^\\\\d{8}\\\\.\\\\d{2}h\\\\d{2}m\\\\d{2}s', line)\n timestamp = re.findall('\\\\[\\\\d{10}\\\\.\\\\d{3}\\\\]', line)\n if timestamp:\n timestamp_list.append(timestamp[0][1:-1])\n if not trace_line:\n raw_text += line\n session_JSONs[session]['timestamp'] = timestamp_list[0]\n count = -1\n delimiter = '\\r\\n\\r\\n'\n is_request_chunk = True\n raw_text_chunks = iter(raw_text.split(delimiter))\n session_JSONs[session]['txns'] = list()\n for chunk in raw_text_chunks:\n request_chunk = re.findall('^\\\\S+\\\\s/\\\\S+\\\\sHTTP/\\\\d\\\\.\\\\d\\\\r\\\\n',\n chunk)\n response_chunk = re.findall(\n '^HTTP/\\\\d\\\\.\\\\d\\\\s\\\\d{3}\\\\s[\\\\s\\\\S]+\\\\r\\\\n', chunk)\n if request_chunk:\n count += 1\n is_reqeust_chunk = True\n chunk += delimiter\n if count <= len(session_JSONs[session]['txns']):\n session_JSONs[session]['txns'].append(dict())\n session_JSONs[session]['txns'][count]['request'] = dict()\n session_JSONs[session]['txns'][count]['request']['timestamp'\n ] = timestamp_list[count - 1]\n session_JSONs[session]['txns'][count]['request']['headers'\n ] = chunk\n session_JSONs[session]['txns'][count]['uuid'] = uuid.uuid4(\n ).hex\n elif response_chunk:\n is_request_chunk = False\n chunk += delimiter\n if count <= len(session_JSONs[session]['txns']):\n session_JSONs[session]['txns'].append(dict())\n session_JSONs[session]['txns'][count]['response'] = dict()\n session_JSONs[session]['txns'][count]['response']['timestamp'\n ] = timestamp_list[count - 1]\n session_JSONs[session]['txns'][count]['response']['headers'\n ] = chunk\n else:\n try:\n if count == -1:\n continue\n chunk = urllib.parse.quote(chunk)\n if is_request_chunk:\n if 'body' not in session_JSONs[session]['txns'][count][\n 'request']:\n session_JSONs[session]['txns'][count]['request'][\n 'body'] = chunk\n else:\n session_JSONs[session]['txns'][count]['request'][\n 'body'] += chunk\n elif 'body' not in session_JSONs[session]['txns'][count][\n 'response']:\n session_JSONs[session]['txns'][count]['response'][\n 'body'] = chunk\n else:\n session_JSONs[session]['txns'][count]['response'][\n 'body'] += chunk\n except KeyError as k:\n continue\n print(len(session_JSONs[session]['txns']))\n session_JSONs[session]['txns'] = list(filter(bool, session_JSONs[\n session]['txns']))\n if len(session_JSONs[session]['txns']) == 0:\n del session_JSONs[session]\n unicode_errors = 0\n print('Writing sessions to disk')\n out_files = dict()\n for session, data in session_JSONs.items():\n out_files[session] = open(os.path.join(out_dir, 'session_' + str(\n session)) + '.json', 'w')\n try:\n json.dump(data, out_files[session])\n out_files[session].close()\n except:\n unicode_errors += 1\n out_files[session].close()\n os.remove(os.path.join(out_dir, 'session_' + str(session)) +\n '.json')\n print(str(unicode_errors) + ' unicode errors')\n\n\ndef main(argv):\n if len(argv) != 3:\n print('Script to preprocess trace logs for client.')\n print(\"Outputs JSONs to directory 'sessions'\")\n print('Usage: python ' + str(argv[0]) +\n ' <in directory> <out directory>')\n return\n if not os.path.isdir(argv[1]):\n print(str(argv[1]) + ' is not a directory. Aborting.')\n return\n if not os.path.exists(argv[2]):\n os.makedirs(argv[2])\n else:\n print(str(argv[2]) +\n ' already exists, choose another output directory!')\n return\n t1 = time.time()\n process(argv[1], argv[2])\n t2 = time.time()\n print('time taken:', t2 - t1)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n", "step-4": "import sys\nimport os\nimport collections\nimport re\nimport json\nimport urllib\nimport urllib.request\nimport uuid\nimport time\nPROCESSOR_VERSION = '0.1'\n\n\ndef process(trace_dir, out_dir):\n trace_files = os.listdir(trace_dir)\n trace_files = sorted(trace_files)\n if trace_files[0] == 'error.log':\n print('Rotating to properly order logs.')\n trace_files = collections.deque(trace_files)\n trace_files.rotate(-1)\n full_trace = b''\n all_lines = ''\n for file_name in trace_files:\n print('Processing: ' + str(file_name))\n with open(os.path.join(trace_dir, file_name), 'rb') as f:\n for line in f:\n try:\n all_lines += line.decode('utf-8')\n except UnicodeDecodeError:\n print('weird text')\n full_trace = re.sub('(?<!\\\\r)\\\\n', '\\r\\n\\r\\n', all_lines)\n \"\"\"\n Is the issue with the input or my processing? \n tmp_file = open('full_trace.json', 'wb')\n json.dump(full_trace, tmp_file)\n tmp_file.close()\n INPUT Issue\n \"\"\"\n print('Collecting raw sessions')\n raw_sessions = dict()\n full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\\n')))\n for line in full_trace_iterator:\n send_recv = re.findall('(SEND|RECV)', line)\n ipv4_port = re.findall('[0-9]+(?:\\\\.[0-9]+){3}:[0-9]+', line)\n if ipv4_port:\n port = re.findall(':[0-9]+$', ipv4_port[0])\n if port:\n if port[0] == ':443' or port[0] == ':80':\n continue\n if send_recv and ipv4_port:\n ip_port_key = ipv4_port[0]\n this_trace = line\n while True:\n try:\n next_line = next(full_trace_iterator)\n this_trace += next_line\n end_trace = re.findall('\\\\[End Trace\\\\]', next_line)\n if end_trace:\n break\n except Exception as e:\n print(e)\n break\n if ip_port_key not in raw_sessions:\n raw_sessions[ip_port_key] = this_trace\n print(ip_port_key)\n else:\n raw_sessions[ip_port_key] += this_trace\n print('Constructing session JSONs')\n session_JSONs = dict()\n for session, raw_traces in raw_sessions.items():\n session_JSONs[session] = dict()\n session_JSONs[session]['version'] = PROCESSOR_VERSION\n session_JSONs[session]['encoding'] = 'url_encoded'\n raw_text = ''\n timestamp = ''\n timestamp_list = list()\n for line in raw_traces.splitlines(raw_traces.count('\\n')):\n trace_line = re.findall('^\\\\d{8}\\\\.\\\\d{2}h\\\\d{2}m\\\\d{2}s', line)\n timestamp = re.findall('\\\\[\\\\d{10}\\\\.\\\\d{3}\\\\]', line)\n if timestamp:\n timestamp_list.append(timestamp[0][1:-1])\n if not trace_line:\n raw_text += line\n session_JSONs[session]['timestamp'] = timestamp_list[0]\n count = -1\n delimiter = '\\r\\n\\r\\n'\n is_request_chunk = True\n raw_text_chunks = iter(raw_text.split(delimiter))\n session_JSONs[session]['txns'] = list()\n for chunk in raw_text_chunks:\n request_chunk = re.findall('^\\\\S+\\\\s/\\\\S+\\\\sHTTP/\\\\d\\\\.\\\\d\\\\r\\\\n',\n chunk)\n response_chunk = re.findall(\n '^HTTP/\\\\d\\\\.\\\\d\\\\s\\\\d{3}\\\\s[\\\\s\\\\S]+\\\\r\\\\n', chunk)\n if request_chunk:\n count += 1\n is_reqeust_chunk = True\n chunk += delimiter\n if count <= len(session_JSONs[session]['txns']):\n session_JSONs[session]['txns'].append(dict())\n session_JSONs[session]['txns'][count]['request'] = dict()\n session_JSONs[session]['txns'][count]['request']['timestamp'\n ] = timestamp_list[count - 1]\n session_JSONs[session]['txns'][count]['request']['headers'\n ] = chunk\n session_JSONs[session]['txns'][count]['uuid'] = uuid.uuid4(\n ).hex\n elif response_chunk:\n is_request_chunk = False\n chunk += delimiter\n if count <= len(session_JSONs[session]['txns']):\n session_JSONs[session]['txns'].append(dict())\n session_JSONs[session]['txns'][count]['response'] = dict()\n session_JSONs[session]['txns'][count]['response']['timestamp'\n ] = timestamp_list[count - 1]\n session_JSONs[session]['txns'][count]['response']['headers'\n ] = chunk\n else:\n try:\n if count == -1:\n continue\n chunk = urllib.parse.quote(chunk)\n if is_request_chunk:\n if 'body' not in session_JSONs[session]['txns'][count][\n 'request']:\n session_JSONs[session]['txns'][count]['request'][\n 'body'] = chunk\n else:\n session_JSONs[session]['txns'][count]['request'][\n 'body'] += chunk\n elif 'body' not in session_JSONs[session]['txns'][count][\n 'response']:\n session_JSONs[session]['txns'][count]['response'][\n 'body'] = chunk\n else:\n session_JSONs[session]['txns'][count]['response'][\n 'body'] += chunk\n except KeyError as k:\n continue\n print(len(session_JSONs[session]['txns']))\n session_JSONs[session]['txns'] = list(filter(bool, session_JSONs[\n session]['txns']))\n if len(session_JSONs[session]['txns']) == 0:\n del session_JSONs[session]\n unicode_errors = 0\n print('Writing sessions to disk')\n out_files = dict()\n for session, data in session_JSONs.items():\n out_files[session] = open(os.path.join(out_dir, 'session_' + str(\n session)) + '.json', 'w')\n try:\n json.dump(data, out_files[session])\n out_files[session].close()\n except:\n unicode_errors += 1\n out_files[session].close()\n os.remove(os.path.join(out_dir, 'session_' + str(session)) +\n '.json')\n print(str(unicode_errors) + ' unicode errors')\n\n\ndef main(argv):\n if len(argv) != 3:\n print('Script to preprocess trace logs for client.')\n print(\"Outputs JSONs to directory 'sessions'\")\n print('Usage: python ' + str(argv[0]) +\n ' <in directory> <out directory>')\n return\n if not os.path.isdir(argv[1]):\n print(str(argv[1]) + ' is not a directory. Aborting.')\n return\n if not os.path.exists(argv[2]):\n os.makedirs(argv[2])\n else:\n print(str(argv[2]) +\n ' already exists, choose another output directory!')\n return\n t1 = time.time()\n process(argv[1], argv[2])\n t2 = time.time()\n print('time taken:', t2 - t1)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n", "step-5": "#!/bin/env python\n\nimport sys\nimport os\nimport collections\nimport re\nimport json\nimport urllib\nimport urllib.request\nimport uuid\nimport time\nPROCESSOR_VERSION = \"0.1\"\n\ndef process(trace_dir, out_dir):\n #order files\n trace_files = os.listdir(trace_dir)\n trace_files = sorted(trace_files)\n if trace_files[0] == \"error.log\": #we need to do this in case the last traces are in an error log file that wasn't rotated yet\n print (\"Rotating to properly order logs.\")\n trace_files = collections.deque(trace_files)\n trace_files.rotate(-1)\n\n #combine\n full_trace = b\"\"\n all_lines= \"\"\n for file_name in trace_files:\n print (\"Processing: \" + str(file_name))\n with open(os.path.join(trace_dir, file_name), \"rb\") as f:\n for line in f:\n try:\n #print(line.decode('utf-8'))\n all_lines += line.decode('utf-8')\n except UnicodeDecodeError:\n print(\"weird text\")\n # let's fix any pesky solitary \\n's (these are at the end of all the bodies)\n full_trace = re.sub(r'(?<!\\r)\\n', '\\r\\n\\r\\n', all_lines)\n \n '''\n Is the issue with the input or my processing? \n tmp_file = open('full_trace.json', 'wb')\n json.dump(full_trace, tmp_file)\n tmp_file.close()\n INPUT Issue\n '''\n\n #do the first step of preprocessing, getting raw sessions\n print( \"Collecting raw sessions\")\n raw_sessions = dict()\n full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\\n')))\n for line in full_trace_iterator:\n #TODO IPv6\n #TODO Responses (we get them but do we want to do this a different way)\n send_recv = re.findall(r'(SEND|RECV)', line)\n ipv4_port = re.findall(r'[0-9]+(?:\\.[0-9]+){3}:[0-9]+', line)\n if ipv4_port:\n port = re.findall(r':[0-9]+$', ipv4_port[0])\n if port:\n if port[0] == \":443\" or port[0] == \":80\":\n continue # we don't want the server conn side stuff yet\n if send_recv and ipv4_port:\n ip_port_key = ipv4_port[0]\n this_trace = line\n while True:\n try:\n next_line = next(full_trace_iterator)\n this_trace += next_line\n end_trace = re.findall(r'\\[End Trace\\]', next_line)\n if end_trace:\n break\n except Exception as e:\n #reached the end of the file\n print( e)\n break\n\n if ip_port_key not in raw_sessions:\n raw_sessions[ip_port_key] = this_trace\n print(ip_port_key)\n else:\n raw_sessions[ip_port_key] += this_trace\n\n #do the second step of preprocessing, getting JSONs from raw sessions\n print( \"Constructing session JSONs\")\n session_JSONs = dict()\n for session, raw_traces in raw_sessions.items():\n #basic data\n session_JSONs[session] = dict()\n session_JSONs[session][\"version\"] = PROCESSOR_VERSION\n session_JSONs[session][\"encoding\"] = \"url_encoded\"\n\n # let's get the raw text from the traces\n raw_text = \"\"\n timestamp = \"\"\n timestamp_list = list()\n for line in raw_traces.splitlines(raw_traces.count('\\n')):\n trace_line = re.findall(r'^\\d{8}\\.\\d{2}h\\d{2}m\\d{2}s', line)\n timestamp = re.findall(r'\\[\\d{10}\\.\\d{3}\\]', line)\n if timestamp:\n timestamp_list.append(timestamp[0][1:-1])\n if not trace_line:\n raw_text += line\n \n #get session start timestamp\n session_JSONs[session][\"timestamp\"] = timestamp_list[0]\n \n # let's parse out requests and responses\n count = -1\n delimiter = \"\\r\\n\\r\\n\"\n is_request_chunk = True\n raw_text_chunks = iter(raw_text.split(delimiter))\n session_JSONs[session][\"txns\"] = list()\n for chunk in raw_text_chunks:\n #check if each chunk is request or response if it is do so accordingly\n #otherwise append it to the previous chunk's data\n request_chunk = re.findall(r'^\\S+\\s/\\S+\\sHTTP/\\d\\.\\d\\r\\n', chunk)\n response_chunk = re.findall(r'^HTTP/\\d\\.\\d\\s\\d{3}\\s[\\s\\S]+\\r\\n', chunk)\n if request_chunk:\n count += 1\n is_reqeust_chunk = True\n chunk += delimiter\n if count <= len(session_JSONs[session][\"txns\"]):\n session_JSONs[session][\"txns\"].append(dict())\n session_JSONs[session][\"txns\"][count][\"request\"] = dict()\n session_JSONs[session][\"txns\"][count][\"request\"][\"timestamp\"] = timestamp_list[count - 1] \n session_JSONs[session][\"txns\"][count][\"request\"][\"headers\"] = chunk\n session_JSONs[session][\"txns\"][count][\"uuid\"] = uuid.uuid4().hex\n elif response_chunk:\n is_request_chunk = False\n chunk += delimiter\n if count <= len(session_JSONs[session][\"txns\"]):\n session_JSONs[session][\"txns\"].append(dict())\n session_JSONs[session][\"txns\"][count][\"response\"] = dict()\n session_JSONs[session][\"txns\"][count][\"response\"][\"timestamp\"] = timestamp_list[count - 1] \n session_JSONs[session][\"txns\"][count][\"response\"][\"headers\"] = chunk\n else: #is body chunk\n try:\n if count == -1: continue #if we have garbage at the front\n chunk = urllib.parse.quote(chunk)\n if is_request_chunk:\n if \"body\" not in session_JSONs[session][\"txns\"][count][\"request\"]:\n session_JSONs[session][\"txns\"][count][\"request\"][\"body\"] = chunk\n else:\n session_JSONs[session][\"txns\"][count][\"request\"][\"body\"] += chunk\n else:\n if \"body\" not in session_JSONs[session][\"txns\"][count][\"response\"]:\n session_JSONs[session][\"txns\"][count][\"response\"][\"body\"] = chunk\n else:\n session_JSONs[session][\"txns\"][count][\"response\"][\"body\"] += chunk\n except KeyError as k:\n continue # for now we're dropping malformed bodies. will not be able to do this when we're validating. might have to go edit wiretracing code to give us better delimiters here for parsing. right now isn't particularly straightforward\n print(len(session_JSONs[session][\"txns\"]))\n session_JSONs[session][\"txns\"] = list(filter(bool, session_JSONs[session][\"txns\"]))\n if len(session_JSONs[session][\"txns\"]) == 0:\n del session_JSONs[session] \n\n #write out\n unicode_errors = 0\n print( \"Writing sessions to disk\")\n out_files = dict()\n for session, data in session_JSONs.items():\n out_files[session] = open(os.path.join(out_dir, 'session_' + str(session)) + '.json', 'w')\n try:\n json.dump(data, out_files[session])\n out_files[session].close() \n except:\n unicode_errors += 1\n out_files[session].close()\n os.remove(os.path.join(out_dir, 'session_' + str(session)) + '.json') \n\n print( str(unicode_errors) + \" unicode errors\")\n\ndef main(argv):\n if len(argv) != 3:\n print( \"Script to preprocess trace logs for client.\")\n print( \"Outputs JSONs to directory 'sessions'\")\n print( \"Usage: python \" + str(argv[0]) + \" <in directory> <out directory>\")\n return\n\n if not os.path.isdir(argv[1]):\n print( str(argv[1]) + \" is not a directory. Aborting.\")\n return\n if not os.path.exists(argv[2]):\n os.makedirs(argv[2])\n else:\n print( str(argv[2]) + \" already exists, choose another output directory!\")\n return\n t1=time.time()\n process(argv[1], argv[2])\n t2=time.time()\n print(\"time taken:\",(t2-t1))\nif __name__ == \"__main__\":\n main(sys.argv)\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from django.urls import path from admin_panel import views urlpatterns = [path('admin_panel/', views.AdminPanel.as_view(), name= 'admin_panel'), path('admin_panel/connection/', views.Connection. as_view(), name='connect_group-teacher'), path( 'admin_panel/connection/<str:choiced_departament>', views.Connection. as_view(), name='connect_group-teacher')]
normal
{ "blob_id": "34a7fd66a9e2eae25994336f22a76c24c11a6e1b", "index": 7408, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [path('admin_panel/', views.AdminPanel.as_view(), name=\n 'admin_panel'), path('admin_panel/connection/', views.Connection.\n as_view(), name='connect_group-teacher'), path(\n 'admin_panel/connection/<str:choiced_departament>', views.Connection.\n as_view(), name='connect_group-teacher')]\n", "step-3": "from django.urls import path\nfrom admin_panel import views\nurlpatterns = [path('admin_panel/', views.AdminPanel.as_view(), name=\n 'admin_panel'), path('admin_panel/connection/', views.Connection.\n as_view(), name='connect_group-teacher'), path(\n 'admin_panel/connection/<str:choiced_departament>', views.Connection.\n as_view(), name='connect_group-teacher')]\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from rdflib import Graph from rdflib.plugins.sparql import prepareQuery def is_file_ontology(file_path): """ Method that, given a file, returns its URI. This method is in a separate file in case we want to extract additional metadata if required Parameters ---------- @param file_path: path of the candidate ontology Returns ------- @return: The URI of the target ontology (if there is one) """ # load in rdf lib try: g = Graph() g.parse(file_path) q1 = prepareQuery(''' SELECT ?onto WHERE { ?onto a <http://www.w3.org/2002/07/owl#Ontology>. } ''') # TO DO: extract title, preferred ns. # there should be only one ontology per file for r in g.query(q1): # print("Found that %s is an ontology" % file_path) return r.onto except Exception: # If the candidate file could not be read, pass pass
normal
{ "blob_id": "c327f8f7aece1a9c25079613809df52e9a8e7a52", "index": 8763, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef is_file_ontology(file_path):\n \"\"\"\n Method that, given a file, returns its URI.\n This method is in a separate file in case we want to extract additional metadata if required\n Parameters\n ----------\n @param file_path: path of the candidate ontology\n\n Returns\n -------\n @return: The URI of the target ontology (if there is one)\n \"\"\"\n try:\n g = Graph()\n g.parse(file_path)\n q1 = prepareQuery(\n \"\"\"\n SELECT ?onto\n WHERE { \n ?onto a <http://www.w3.org/2002/07/owl#Ontology>. \n }\n \"\"\"\n )\n for r in g.query(q1):\n return r.onto\n except Exception:\n pass\n", "step-3": "from rdflib import Graph\nfrom rdflib.plugins.sparql import prepareQuery\n\n\ndef is_file_ontology(file_path):\n \"\"\"\n Method that, given a file, returns its URI.\n This method is in a separate file in case we want to extract additional metadata if required\n Parameters\n ----------\n @param file_path: path of the candidate ontology\n\n Returns\n -------\n @return: The URI of the target ontology (if there is one)\n \"\"\"\n try:\n g = Graph()\n g.parse(file_path)\n q1 = prepareQuery(\n \"\"\"\n SELECT ?onto\n WHERE { \n ?onto a <http://www.w3.org/2002/07/owl#Ontology>. \n }\n \"\"\"\n )\n for r in g.query(q1):\n return r.onto\n except Exception:\n pass\n", "step-4": "from rdflib import Graph\nfrom rdflib.plugins.sparql import prepareQuery\n\n\ndef is_file_ontology(file_path):\n \"\"\"\n Method that, given a file, returns its URI.\n This method is in a separate file in case we want to extract additional metadata if required\n Parameters\n ----------\n @param file_path: path of the candidate ontology\n\n Returns\n -------\n @return: The URI of the target ontology (if there is one)\n \"\"\"\n # load in rdf lib\n try:\n g = Graph()\n g.parse(file_path)\n q1 = prepareQuery('''\n SELECT ?onto\n WHERE { \n ?onto a <http://www.w3.org/2002/07/owl#Ontology>. \n }\n ''')\n # TO DO: extract title, preferred ns.\n # there should be only one ontology per file\n for r in g.query(q1):\n # print(\"Found that %s is an ontology\" % file_path)\n return r.onto\n except Exception:\n # If the candidate file could not be read, pass\n pass\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> class MD(BaseEstimator, TransformerMixin): <|reserved_special_token_0|> def _init_graph(self): """ Init a tensorflow Graph containing: input data, variables, model, loss, optimizer """ self.graph = tf.Graph() with self.graph.as_default(): tf.set_random_seed(self.random_seed) self.train_data = tf.placeholder(tf.float32, shape=[None, self. input_size]) self.train_labels = tf.placeholder(tf.float32, shape=[None, 1]) self.train_labels_center = tf.placeholder(tf.float32, shape=[ None, self.third_layer_size]) self.train_labels_center_disagree = tf.placeholder(tf.float32, shape=[None, self.third_layer_size]) self.weights = self._initialize_weights() self.embedding_layer = tf.keras.layers.Embedding(256, 32, input_length=324) self.embedding_result = self.embedding_layer(self.train_data) self.embedding_result = tf.layers.Flatten()(self.embedding_result) self.net1 = tf.matmul(self.embedding_result, self.weights['layer1'] ) self.layer1 = tf.layers.batch_normalization(self.net1, training =self.phase) self.layer1 = tf.nn.tanh(self.layer1) self.net2 = tf.matmul(self.layer1, self.weights['layer2']) self.net2 = tf.layers.batch_normalization(self.net2, training= self.phase) self.net2 = tf.nn.relu(self.net2) self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training= self.phase) self.net3 = tf.matmul(self.layer2, self.weights['layer3']) self.layer3 = tf.nn.tanh(self.net3) self.cross_entropy = tf.reduce_mean(tf.losses. mean_squared_error(self.train_labels_center, self.layer3)) self.train_step = tf.train.AdamOptimizer(self.learning_rate ).minimize(self.cross_entropy) self.init = tf.initialize_all_variables() self.sess = tf.Session() self.sess.run(self.init) <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def evaluate(self, true_labels, kmeans_labels, size): """ :param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1 :param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number :param size: number of samples :return: accuracy, precision, recall, f_measure """ self.label_list_0 = np.where(kmeans_labels == 0)[0] self.label_list_1 = np.where(kmeans_labels == 1)[0] temp = [true_labels[i][0] for i in self.label_list_0] temp1 = [true_labels[i][0] for i in self.label_list_1] temp1.append(2) temp.append(2) counts = np.bincount(temp) counts2 = np.bincount(temp1) if counts[0] > counts[1]: accuracy = (counts[0] + counts2[1]) / size precision = counts2[1] / (counts2[1] + counts2[0]) recall = counts2[1] / (counts2[1] + counts[1]) f_measure = 2 * (precision * recall / (precision + recall)) else: accuracy = (counts[1] + counts2[0]) / size precision = counts[1] / (counts[1] + counts[0]) recall = counts[1] / (counts[1] + counts2[1]) f_measure = 2 * (precision * recall / (precision + recall)) return accuracy, precision, recall, f_measure <|reserved_special_token_0|> <|reserved_special_token_0|> def log(self, message): print(message) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class MD(BaseEstimator, TransformerMixin): def __init__(self, data, input_size, epoch, batch_size, iteration, alpha=1.0, n_neg_samples=10, random_seed=2020): self.iteration = iteration self.epoch = epoch self.batch_size = batch_size self.learning_rate = 0.01 self.random_seed = random_seed self.phase = True self.first_layer_size = 256 self.second_layer_size = 128 self.third_layer_size = 128 self.input_size = input_size self.X_train_ben = data[0] self.X_train_mal = data[1] self.X_test_ben = data[2] self.X_test_mal = data[3] self.accuracy_list = [] self.fmeasure_list = [] self.clusters_dist = [] self.evaluation_metrics_list = {'accuracy': [], 'precision': [], 'recall': [], 'fmeasure': []} self.FinalCenters = {'benignCenter': 0, 'malwareCenter': 0} self._init_graph() def _init_graph(self): """ Init a tensorflow Graph containing: input data, variables, model, loss, optimizer """ self.graph = tf.Graph() with self.graph.as_default(): tf.set_random_seed(self.random_seed) self.train_data = tf.placeholder(tf.float32, shape=[None, self. input_size]) self.train_labels = tf.placeholder(tf.float32, shape=[None, 1]) self.train_labels_center = tf.placeholder(tf.float32, shape=[ None, self.third_layer_size]) self.train_labels_center_disagree = tf.placeholder(tf.float32, shape=[None, self.third_layer_size]) self.weights = self._initialize_weights() self.embedding_layer = tf.keras.layers.Embedding(256, 32, input_length=324) self.embedding_result = self.embedding_layer(self.train_data) self.embedding_result = tf.layers.Flatten()(self.embedding_result) self.net1 = tf.matmul(self.embedding_result, self.weights['layer1'] ) self.layer1 = tf.layers.batch_normalization(self.net1, training =self.phase) self.layer1 = tf.nn.tanh(self.layer1) self.net2 = tf.matmul(self.layer1, self.weights['layer2']) self.net2 = tf.layers.batch_normalization(self.net2, training= self.phase) self.net2 = tf.nn.relu(self.net2) self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training= self.phase) self.net3 = tf.matmul(self.layer2, self.weights['layer3']) self.layer3 = tf.nn.tanh(self.net3) self.cross_entropy = tf.reduce_mean(tf.losses. mean_squared_error(self.train_labels_center, self.layer3)) self.train_step = tf.train.AdamOptimizer(self.learning_rate ).minimize(self.cross_entropy) self.init = tf.initialize_all_variables() self.sess = tf.Session() self.sess.run(self.init) def _initialize_weights(self): self.all_weights = dict() self.all_weights['layer1'] = tf.Variable(tf.random.normal([10368, self.first_layer_size], mean=0.0, stddev=1)) self.all_weights['layer2'] = tf.Variable(tf.random.normal([self. first_layer_size, self.second_layer_size], mean=0.0, stddev=1)) self.all_weights['layer3'] = tf.Variable(tf.random.normal([self. second_layer_size, self.third_layer_size], mean=0.0, stddev=1)) self.all_weights['layer1'] = tf.Variable(tf.random.uniform([10368, self.first_layer_size], minval=-1, maxval=1)) self.all_weights['layer2'] = tf.Variable(tf.random.uniform([self. first_layer_size, self.second_layer_size], minval=-1, maxval=1)) self.all_weights['layer3'] = tf.Variable(tf.random.uniform([self. second_layer_size, self.third_layer_size], minval=-1, maxval=1)) self.all_weights['layer1'] = tf.get_variable('w', [32 * self. input_size, self.first_layer_size], initializer=tf.initializers .random_normal(mean=0, stddev=0.8), regularizer=tf.keras. regularizers.l2(0.01)) self.all_weights['layer2'] = tf.get_variable('w2', [self. first_layer_size, self.second_layer_size], initializer=tf. initializers.random_normal(mean=0, stddev=0.8), regularizer=tf. keras.regularizers.l2(0.01)) self.all_weights['layer3'] = tf.get_variable('w3', [self. second_layer_size, self.third_layer_size], initializer=tf. initializers.random_normal(mean=0, stddev=0.8), regularizer=tf. keras.regularizers.l2(0.01)) return self.all_weights <|reserved_special_token_0|> def partial_fit(self, X): feed_dict = {self.train_data: X['batch_data_train']} self.points = self.sess.run(self.layer3, feed_dict=feed_dict) new_labels = self.kmeans_clustering(self.points, len(X[ 'batch_data_label']), X['batch_data_label']) self.clusters_dist.append(np.linalg.norm(self.kmeans. cluster_centers_[0] - self.kmeans.cluster_centers_[1])) feed_dicts = {self.train_data: X['batch_data_train'], self. train_labels_center: new_labels} loss, opt = self.sess.run((self.cross_entropy, self.train_step), feed_dict=feed_dicts) metrics = self.evaluate(X['batch_data_label'], self.kmeans_labels, len(X['batch_data_label'])) self.accuracy_list.append(metrics[0]) self.fmeasure_list.append(metrics[3]) return loss def evaluate(self, true_labels, kmeans_labels, size): """ :param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1 :param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number :param size: number of samples :return: accuracy, precision, recall, f_measure """ self.label_list_0 = np.where(kmeans_labels == 0)[0] self.label_list_1 = np.where(kmeans_labels == 1)[0] temp = [true_labels[i][0] for i in self.label_list_0] temp1 = [true_labels[i][0] for i in self.label_list_1] temp1.append(2) temp.append(2) counts = np.bincount(temp) counts2 = np.bincount(temp1) if counts[0] > counts[1]: accuracy = (counts[0] + counts2[1]) / size precision = counts2[1] / (counts2[1] + counts2[0]) recall = counts2[1] / (counts2[1] + counts[1]) f_measure = 2 * (precision * recall / (precision + recall)) else: accuracy = (counts[1] + counts2[0]) / size precision = counts[1] / (counts[1] + counts[0]) recall = counts[1] / (counts[1] + counts2[1]) f_measure = 2 * (precision * recall / (precision + recall)) return accuracy, precision, recall, f_measure def final_fit(self, X, true_labels): self.phase = False feed_dict = {self.train_data: X['data_test']} self.points = self.sess.run(self.layer3, feed_dict=feed_dict) self.predicted_Labels = [] for i in range(len(true_labels)): if np.linalg.norm(self.FinalCenters['benignCenter'] - self. points[i]) < np.linalg.norm(self.FinalCenters[ 'malwareCenter'] - self.points[i]): self.predicted_Labels.append([0]) else: self.predicted_Labels.append([1]) tn, fp, fn, tp = confusion_matrix(true_labels, self.predicted_Labels ).ravel() accuracy = (tp + tn) / (tp + tn + fn + fp) precision = tp / (tp + fp) recall = tp / (tp + fn) f_measure = 2 * (precision * recall) / (precision + recall) self.evaluation_metrics_list['accuracy'].append(np.float('{0:.4f}'. format(accuracy))) self.evaluation_metrics_list['precision'].append(np.float('{0:.4f}' .format(precision))) self.evaluation_metrics_list['recall'].append(np.float('{0:.4f}'. format(recall))) self.evaluation_metrics_list['fmeasure'].append(np.float('{0:.4f}'. format(f_measure))) print('accuracy', 'precision', 'recall', 'f_measure', sep='\t\t\t\t\t') print(accuracy, precision, recall, f_measure, sep='\t\t\t') return 0 <|reserved_special_token_0|> def log(self, message): print(message) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class MD(BaseEstimator, TransformerMixin): def __init__(self, data, input_size, epoch, batch_size, iteration, alpha=1.0, n_neg_samples=10, random_seed=2020): self.iteration = iteration self.epoch = epoch self.batch_size = batch_size self.learning_rate = 0.01 self.random_seed = random_seed self.phase = True self.first_layer_size = 256 self.second_layer_size = 128 self.third_layer_size = 128 self.input_size = input_size self.X_train_ben = data[0] self.X_train_mal = data[1] self.X_test_ben = data[2] self.X_test_mal = data[3] self.accuracy_list = [] self.fmeasure_list = [] self.clusters_dist = [] self.evaluation_metrics_list = {'accuracy': [], 'precision': [], 'recall': [], 'fmeasure': []} self.FinalCenters = {'benignCenter': 0, 'malwareCenter': 0} self._init_graph() def _init_graph(self): """ Init a tensorflow Graph containing: input data, variables, model, loss, optimizer """ self.graph = tf.Graph() with self.graph.as_default(): tf.set_random_seed(self.random_seed) self.train_data = tf.placeholder(tf.float32, shape=[None, self. input_size]) self.train_labels = tf.placeholder(tf.float32, shape=[None, 1]) self.train_labels_center = tf.placeholder(tf.float32, shape=[ None, self.third_layer_size]) self.train_labels_center_disagree = tf.placeholder(tf.float32, shape=[None, self.third_layer_size]) self.weights = self._initialize_weights() self.embedding_layer = tf.keras.layers.Embedding(256, 32, input_length=324) self.embedding_result = self.embedding_layer(self.train_data) self.embedding_result = tf.layers.Flatten()(self.embedding_result) self.net1 = tf.matmul(self.embedding_result, self.weights['layer1'] ) self.layer1 = tf.layers.batch_normalization(self.net1, training =self.phase) self.layer1 = tf.nn.tanh(self.layer1) self.net2 = tf.matmul(self.layer1, self.weights['layer2']) self.net2 = tf.layers.batch_normalization(self.net2, training= self.phase) self.net2 = tf.nn.relu(self.net2) self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training= self.phase) self.net3 = tf.matmul(self.layer2, self.weights['layer3']) self.layer3 = tf.nn.tanh(self.net3) self.cross_entropy = tf.reduce_mean(tf.losses. mean_squared_error(self.train_labels_center, self.layer3)) self.train_step = tf.train.AdamOptimizer(self.learning_rate ).minimize(self.cross_entropy) self.init = tf.initialize_all_variables() self.sess = tf.Session() self.sess.run(self.init) def _initialize_weights(self): self.all_weights = dict() self.all_weights['layer1'] = tf.Variable(tf.random.normal([10368, self.first_layer_size], mean=0.0, stddev=1)) self.all_weights['layer2'] = tf.Variable(tf.random.normal([self. first_layer_size, self.second_layer_size], mean=0.0, stddev=1)) self.all_weights['layer3'] = tf.Variable(tf.random.normal([self. second_layer_size, self.third_layer_size], mean=0.0, stddev=1)) self.all_weights['layer1'] = tf.Variable(tf.random.uniform([10368, self.first_layer_size], minval=-1, maxval=1)) self.all_weights['layer2'] = tf.Variable(tf.random.uniform([self. first_layer_size, self.second_layer_size], minval=-1, maxval=1)) self.all_weights['layer3'] = tf.Variable(tf.random.uniform([self. second_layer_size, self.third_layer_size], minval=-1, maxval=1)) self.all_weights['layer1'] = tf.get_variable('w', [32 * self. input_size, self.first_layer_size], initializer=tf.initializers .random_normal(mean=0, stddev=0.8), regularizer=tf.keras. regularizers.l2(0.01)) self.all_weights['layer2'] = tf.get_variable('w2', [self. first_layer_size, self.second_layer_size], initializer=tf. initializers.random_normal(mean=0, stddev=0.8), regularizer=tf. keras.regularizers.l2(0.01)) self.all_weights['layer3'] = tf.get_variable('w3', [self. second_layer_size, self.third_layer_size], initializer=tf. initializers.random_normal(mean=0, stddev=0.8), regularizer=tf. keras.regularizers.l2(0.01)) return self.all_weights <|reserved_special_token_0|> def partial_fit(self, X): feed_dict = {self.train_data: X['batch_data_train']} self.points = self.sess.run(self.layer3, feed_dict=feed_dict) new_labels = self.kmeans_clustering(self.points, len(X[ 'batch_data_label']), X['batch_data_label']) self.clusters_dist.append(np.linalg.norm(self.kmeans. cluster_centers_[0] - self.kmeans.cluster_centers_[1])) feed_dicts = {self.train_data: X['batch_data_train'], self. train_labels_center: new_labels} loss, opt = self.sess.run((self.cross_entropy, self.train_step), feed_dict=feed_dicts) metrics = self.evaluate(X['batch_data_label'], self.kmeans_labels, len(X['batch_data_label'])) self.accuracy_list.append(metrics[0]) self.fmeasure_list.append(metrics[3]) return loss def evaluate(self, true_labels, kmeans_labels, size): """ :param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1 :param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number :param size: number of samples :return: accuracy, precision, recall, f_measure """ self.label_list_0 = np.where(kmeans_labels == 0)[0] self.label_list_1 = np.where(kmeans_labels == 1)[0] temp = [true_labels[i][0] for i in self.label_list_0] temp1 = [true_labels[i][0] for i in self.label_list_1] temp1.append(2) temp.append(2) counts = np.bincount(temp) counts2 = np.bincount(temp1) if counts[0] > counts[1]: accuracy = (counts[0] + counts2[1]) / size precision = counts2[1] / (counts2[1] + counts2[0]) recall = counts2[1] / (counts2[1] + counts[1]) f_measure = 2 * (precision * recall / (precision + recall)) else: accuracy = (counts[1] + counts2[0]) / size precision = counts[1] / (counts[1] + counts[0]) recall = counts[1] / (counts[1] + counts2[1]) f_measure = 2 * (precision * recall / (precision + recall)) return accuracy, precision, recall, f_measure def final_fit(self, X, true_labels): self.phase = False feed_dict = {self.train_data: X['data_test']} self.points = self.sess.run(self.layer3, feed_dict=feed_dict) self.predicted_Labels = [] for i in range(len(true_labels)): if np.linalg.norm(self.FinalCenters['benignCenter'] - self. points[i]) < np.linalg.norm(self.FinalCenters[ 'malwareCenter'] - self.points[i]): self.predicted_Labels.append([0]) else: self.predicted_Labels.append([1]) tn, fp, fn, tp = confusion_matrix(true_labels, self.predicted_Labels ).ravel() accuracy = (tp + tn) / (tp + tn + fn + fp) precision = tp / (tp + fp) recall = tp / (tp + fn) f_measure = 2 * (precision * recall) / (precision + recall) self.evaluation_metrics_list['accuracy'].append(np.float('{0:.4f}'. format(accuracy))) self.evaluation_metrics_list['precision'].append(np.float('{0:.4f}' .format(precision))) self.evaluation_metrics_list['recall'].append(np.float('{0:.4f}'. format(recall))) self.evaluation_metrics_list['fmeasure'].append(np.float('{0:.4f}'. format(f_measure))) print('accuracy', 'precision', 'recall', 'f_measure', sep='\t\t\t\t\t') print(accuracy, precision, recall, f_measure, sep='\t\t\t') return 0 def train(self): for iter in range(self.iteration): self.log('iteration {} '.format(iter)) for epoch in range(self.epoch): self.accuracy_list = [] self.fmeasure_list = [] self.clusters_dist = [] self.log('epoch %s' % epoch) total_batches = int(len(self.X_train_ben['data']) / self. batch_size) self.log('total_batches in epoch %s : %s ' % (epoch, total_batches)) start_index = 0 end_index = start_index + self.batch_size self.counter = 0 for i in range(total_batches + 1): self.counter += 1 batch_xs = {} batch_xs['batch_data_train'] = np.concatenate([self. X_train_ben['data'][start_index:end_index], self. X_train_mal['data'][start_index:end_index]]) batch_xs['batch_data_label'] = np.concatenate([self. X_train_ben['label'][start_index:end_index], self. X_train_mal['label'][start_index:end_index]]) end_index = end_index + self.batch_size cost = self.partial_fit(batch_xs) batch_test = {} batch_test['data'] = np.concatenate([self.X_test_ben['data'], self.X_test_mal['data']]) batch_test['label'] = np.concatenate([self.X_test_ben['label'], self.X_test_mal['label']]) self.final_fit(batch_test, batch_test['label']) self.sess.run(self.init) return (self.accuracy_list, self.fmeasure_list, self.clusters_dist, self.evaluation_metrics_list) def log(self, message): print(message) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class MD(BaseEstimator, TransformerMixin): def __init__(self, data, input_size, epoch, batch_size, iteration, alpha=1.0, n_neg_samples=10, random_seed=2020): self.iteration = iteration self.epoch = epoch self.batch_size = batch_size self.learning_rate = 0.01 self.random_seed = random_seed self.phase = True self.first_layer_size = 256 self.second_layer_size = 128 self.third_layer_size = 128 self.input_size = input_size self.X_train_ben = data[0] self.X_train_mal = data[1] self.X_test_ben = data[2] self.X_test_mal = data[3] self.accuracy_list = [] self.fmeasure_list = [] self.clusters_dist = [] self.evaluation_metrics_list = {'accuracy': [], 'precision': [], 'recall': [], 'fmeasure': []} self.FinalCenters = {'benignCenter': 0, 'malwareCenter': 0} self._init_graph() def _init_graph(self): """ Init a tensorflow Graph containing: input data, variables, model, loss, optimizer """ self.graph = tf.Graph() with self.graph.as_default(): tf.set_random_seed(self.random_seed) self.train_data = tf.placeholder(tf.float32, shape=[None, self. input_size]) self.train_labels = tf.placeholder(tf.float32, shape=[None, 1]) self.train_labels_center = tf.placeholder(tf.float32, shape=[ None, self.third_layer_size]) self.train_labels_center_disagree = tf.placeholder(tf.float32, shape=[None, self.third_layer_size]) self.weights = self._initialize_weights() self.embedding_layer = tf.keras.layers.Embedding(256, 32, input_length=324) self.embedding_result = self.embedding_layer(self.train_data) self.embedding_result = tf.layers.Flatten()(self.embedding_result) self.net1 = tf.matmul(self.embedding_result, self.weights['layer1'] ) self.layer1 = tf.layers.batch_normalization(self.net1, training =self.phase) self.layer1 = tf.nn.tanh(self.layer1) self.net2 = tf.matmul(self.layer1, self.weights['layer2']) self.net2 = tf.layers.batch_normalization(self.net2, training= self.phase) self.net2 = tf.nn.relu(self.net2) self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training= self.phase) self.net3 = tf.matmul(self.layer2, self.weights['layer3']) self.layer3 = tf.nn.tanh(self.net3) self.cross_entropy = tf.reduce_mean(tf.losses. mean_squared_error(self.train_labels_center, self.layer3)) self.train_step = tf.train.AdamOptimizer(self.learning_rate ).minimize(self.cross_entropy) self.init = tf.initialize_all_variables() self.sess = tf.Session() self.sess.run(self.init) def _initialize_weights(self): self.all_weights = dict() self.all_weights['layer1'] = tf.Variable(tf.random.normal([10368, self.first_layer_size], mean=0.0, stddev=1)) self.all_weights['layer2'] = tf.Variable(tf.random.normal([self. first_layer_size, self.second_layer_size], mean=0.0, stddev=1)) self.all_weights['layer3'] = tf.Variable(tf.random.normal([self. second_layer_size, self.third_layer_size], mean=0.0, stddev=1)) self.all_weights['layer1'] = tf.Variable(tf.random.uniform([10368, self.first_layer_size], minval=-1, maxval=1)) self.all_weights['layer2'] = tf.Variable(tf.random.uniform([self. first_layer_size, self.second_layer_size], minval=-1, maxval=1)) self.all_weights['layer3'] = tf.Variable(tf.random.uniform([self. second_layer_size, self.third_layer_size], minval=-1, maxval=1)) self.all_weights['layer1'] = tf.get_variable('w', [32 * self. input_size, self.first_layer_size], initializer=tf.initializers .random_normal(mean=0, stddev=0.8), regularizer=tf.keras. regularizers.l2(0.01)) self.all_weights['layer2'] = tf.get_variable('w2', [self. first_layer_size, self.second_layer_size], initializer=tf. initializers.random_normal(mean=0, stddev=0.8), regularizer=tf. keras.regularizers.l2(0.01)) self.all_weights['layer3'] = tf.get_variable('w3', [self. second_layer_size, self.third_layer_size], initializer=tf. initializers.random_normal(mean=0, stddev=0.8), regularizer=tf. keras.regularizers.l2(0.01)) return self.all_weights def kmeans_clustering(self, point, size, true_labels): self.kmeans = KMeans(n_clusters=2, random_state=10, init= 'k-means++', n_init=20).fit(point) self.kmeans_labels = self.kmeans.labels_ self.label_list_0 = np.where(self.kmeans_labels == 0)[0] temp = [true_labels[i][0] for i in self.label_list_0] temp.append(2) counts = np.bincount(temp) if counts[0] > counts[1]: benign_center = self.kmeans.cluster_centers_[0] malware_center = self.kmeans.cluster_centers_[1] else: benign_center = self.kmeans.cluster_centers_[1] malware_center = self.kmeans.cluster_centers_[0] new_labels = np.zeros((size, self.third_layer_size)) for i in range(size): if true_labels[i][0] == 0.0: new_labels[i] = benign_center else: new_labels[i] = malware_center self.FinalCenters = {'benignCenter': benign_center, 'malwareCenter': malware_center} return new_labels def partial_fit(self, X): feed_dict = {self.train_data: X['batch_data_train']} self.points = self.sess.run(self.layer3, feed_dict=feed_dict) new_labels = self.kmeans_clustering(self.points, len(X[ 'batch_data_label']), X['batch_data_label']) self.clusters_dist.append(np.linalg.norm(self.kmeans. cluster_centers_[0] - self.kmeans.cluster_centers_[1])) feed_dicts = {self.train_data: X['batch_data_train'], self. train_labels_center: new_labels} loss, opt = self.sess.run((self.cross_entropy, self.train_step), feed_dict=feed_dicts) metrics = self.evaluate(X['batch_data_label'], self.kmeans_labels, len(X['batch_data_label'])) self.accuracy_list.append(metrics[0]) self.fmeasure_list.append(metrics[3]) return loss def evaluate(self, true_labels, kmeans_labels, size): """ :param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1 :param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number :param size: number of samples :return: accuracy, precision, recall, f_measure """ self.label_list_0 = np.where(kmeans_labels == 0)[0] self.label_list_1 = np.where(kmeans_labels == 1)[0] temp = [true_labels[i][0] for i in self.label_list_0] temp1 = [true_labels[i][0] for i in self.label_list_1] temp1.append(2) temp.append(2) counts = np.bincount(temp) counts2 = np.bincount(temp1) if counts[0] > counts[1]: accuracy = (counts[0] + counts2[1]) / size precision = counts2[1] / (counts2[1] + counts2[0]) recall = counts2[1] / (counts2[1] + counts[1]) f_measure = 2 * (precision * recall / (precision + recall)) else: accuracy = (counts[1] + counts2[0]) / size precision = counts[1] / (counts[1] + counts[0]) recall = counts[1] / (counts[1] + counts2[1]) f_measure = 2 * (precision * recall / (precision + recall)) return accuracy, precision, recall, f_measure def final_fit(self, X, true_labels): self.phase = False feed_dict = {self.train_data: X['data_test']} self.points = self.sess.run(self.layer3, feed_dict=feed_dict) self.predicted_Labels = [] for i in range(len(true_labels)): if np.linalg.norm(self.FinalCenters['benignCenter'] - self. points[i]) < np.linalg.norm(self.FinalCenters[ 'malwareCenter'] - self.points[i]): self.predicted_Labels.append([0]) else: self.predicted_Labels.append([1]) tn, fp, fn, tp = confusion_matrix(true_labels, self.predicted_Labels ).ravel() accuracy = (tp + tn) / (tp + tn + fn + fp) precision = tp / (tp + fp) recall = tp / (tp + fn) f_measure = 2 * (precision * recall) / (precision + recall) self.evaluation_metrics_list['accuracy'].append(np.float('{0:.4f}'. format(accuracy))) self.evaluation_metrics_list['precision'].append(np.float('{0:.4f}' .format(precision))) self.evaluation_metrics_list['recall'].append(np.float('{0:.4f}'. format(recall))) self.evaluation_metrics_list['fmeasure'].append(np.float('{0:.4f}'. format(f_measure))) print('accuracy', 'precision', 'recall', 'f_measure', sep='\t\t\t\t\t') print(accuracy, precision, recall, f_measure, sep='\t\t\t') return 0 def train(self): for iter in range(self.iteration): self.log('iteration {} '.format(iter)) for epoch in range(self.epoch): self.accuracy_list = [] self.fmeasure_list = [] self.clusters_dist = [] self.log('epoch %s' % epoch) total_batches = int(len(self.X_train_ben['data']) / self. batch_size) self.log('total_batches in epoch %s : %s ' % (epoch, total_batches)) start_index = 0 end_index = start_index + self.batch_size self.counter = 0 for i in range(total_batches + 1): self.counter += 1 batch_xs = {} batch_xs['batch_data_train'] = np.concatenate([self. X_train_ben['data'][start_index:end_index], self. X_train_mal['data'][start_index:end_index]]) batch_xs['batch_data_label'] = np.concatenate([self. X_train_ben['label'][start_index:end_index], self. X_train_mal['label'][start_index:end_index]]) end_index = end_index + self.batch_size cost = self.partial_fit(batch_xs) batch_test = {} batch_test['data'] = np.concatenate([self.X_test_ben['data'], self.X_test_mal['data']]) batch_test['label'] = np.concatenate([self.X_test_ben['label'], self.X_test_mal['label']]) self.final_fit(batch_test, batch_test['label']) self.sess.run(self.init) return (self.accuracy_list, self.fmeasure_list, self.clusters_dist, self.evaluation_metrics_list) def log(self, message): print(message) def write_result_to_file(self, variable, message): file = open('results/' + str(self.batch_size) + '/results.txt', 'a+') file.write(message + '\n') file.write(str(np.mean(variable['accuracy'])) + '+' + str(np.var( variable['accuracy'])) + '\t' + str(np.mean(variable[ 'precision'])) + '\t' + str(np.mean(variable['recall'])) + '\t' + str(np.mean(variable['fmeasure'])) + '+' + str(np.var(variable[ 'fmeasure'])) + '\n') <|reserved_special_token_1|> import math import numpy as np # import tkinter import tensorflow as tf from matplotlib import axis import os from sklearn.base import BaseEstimator, TransformerMixin from sklearn.cluster import KMeans from sklearn.metrics import confusion_matrix class MD(BaseEstimator, TransformerMixin): def __init__(self, data, input_size, epoch, batch_size, iteration, alpha=1.0, n_neg_samples=10, random_seed=2020): # bind params to class # network parameters. self.iteration = iteration self.epoch = epoch self.batch_size = batch_size self.learning_rate = 0.01 self.random_seed = random_seed self.phase = True self.first_layer_size = 256 self.second_layer_size = 128 self.third_layer_size = 128 self.input_size = input_size # data. self.X_train_ben = data[0] self.X_train_mal = data[1] self.X_test_ben = data[2] self.X_test_mal = data[3] # evaluation. self.accuracy_list = [] # accuracy during training self.fmeasure_list = [] # fmeasure during training self.clusters_dist = [] # distance between clusters centroid self.evaluation_metrics_list = {'accuracy': [], 'precision': [], 'recall': [], 'fmeasure': []} # evaluation metrics of test data for all epochs self.FinalCenters = {'benignCenter': 0, 'malwareCenter': 0} # init all variables in a tensorflow graph self._init_graph() def _init_graph(self): ''' Init a tensorflow Graph containing: input data, variables, model, loss, optimizer ''' self.graph = tf.Graph() with self.graph.as_default(): # , tf.device('/cpu:0'): # Set graph level random seed. tf.set_random_seed(self.random_seed) # Input data. self.train_data = tf.placeholder(tf.float32, shape=[None, self.input_size]) # batch_size * input_size self.train_labels = tf.placeholder(tf.float32, shape=[None, 1]) # batch_size * 1 self.train_labels_center = tf.placeholder(tf.float32, shape=[None, self.third_layer_size]) # batch_size * third_layer_size self.train_labels_center_disagree = tf.placeholder(tf.float32, shape=[None, self.third_layer_size]) # batch_size * third_layer_size # Variables. self.weights = self._initialize_weights() # the embedding layer. self.embedding_layer = tf.keras.layers.Embedding(256, 32, input_length=324) self.embedding_result = self.embedding_layer(self.train_data) self.embedding_result = tf.layers.Flatten()(self.embedding_result) # the first hidden layer. self.net1 = tf.matmul(self.embedding_result, self.weights['layer1']) # batch_size * first_layer_size self.layer1 = tf.layers.batch_normalization(self.net1, training=self.phase) self.layer1 = tf.nn.tanh(self.layer1) # the second hidden layer. self.net2 = tf.matmul(self.layer1, self.weights['layer2']) self.net2 = tf.layers.batch_normalization(self.net2, training=self.phase) self.net2 = tf.nn.relu(self.net2) self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training=self.phase) # the third hidden layer. self.net3 = tf.matmul(self.layer2, self.weights['layer3']) self.layer3 = tf.nn.tanh(self.net3) # loss function. self.cross_entropy = tf.reduce_mean(tf.losses.mean_squared_error(self.train_labels_center, self.layer3)) # optimizer. self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cross_entropy) # init. self.init = tf.initialize_all_variables() self.sess = tf.Session() self.sess.run(self.init) def _initialize_weights(self): self.all_weights = dict() self.all_weights['layer1'] = tf.Variable( tf.random.normal([10368, self.first_layer_size], mean=0.0, stddev=1)) # input_size * attr_dim self.all_weights['layer2'] = tf.Variable( tf.random.normal([self.first_layer_size, self.second_layer_size], mean=0.0, stddev=1)) # input_size * attr_dim self.all_weights['layer3'] = tf.Variable( tf.random.normal([self.second_layer_size, self.third_layer_size], mean=0.0, stddev=1)) # input_size * attr_dim self.all_weights['layer1'] = tf.Variable( tf.random.uniform([10368, self.first_layer_size], minval=-1, maxval=1)) # input_size * attr_dim self.all_weights['layer2'] = tf.Variable( tf.random.uniform([self.first_layer_size, self.second_layer_size], minval=-1, maxval=1)) # input_size * attr_dim self.all_weights['layer3'] = tf.Variable( tf.random.uniform([self.second_layer_size, self.third_layer_size], minval=-1, maxval=1)) # input_size * attr_dim # -------------------------------------------------------------------------- self.all_weights['layer1'] = tf.get_variable("w", [32 * self.input_size, self.first_layer_size], initializer=tf.initializers.random_normal(mean=0, stddev=0.8), regularizer=tf.keras.regularizers.l2( 0.01)) # input_size * attr_dim self.all_weights['layer2'] = tf.get_variable("w2", [self.first_layer_size, self.second_layer_size], initializer=tf.initializers.random_normal(mean=0, stddev=0.8), regularizer=tf.keras.regularizers.l2( 0.01)) # input_size * attr_dim self.all_weights['layer3'] = tf.get_variable("w3", [self.second_layer_size, self.third_layer_size], initializer=tf.initializers.random_normal(mean=0, stddev=0.8), regularizer=tf.keras.regularizers.l2( 0.01)) # input_size * attr_dim return self.all_weights def kmeans_clustering(self, point, size, true_labels): self.kmeans = KMeans(n_clusters=2, random_state=10, init='k-means++', n_init=20).fit(point) self.kmeans_labels = self.kmeans.labels_ # find index of samples that are in the first cluster self.label_list_0 = np.where(self.kmeans_labels == 0)[0] # get labels of samples that are in the first cluster temp = [true_labels[i][0] for i in self.label_list_0] temp.append(2) # determine label(cluster center) of benign and malware group based on the majority samples in each cluster counts = np.bincount(temp) if counts[0] > counts[1]: # counts[0] : number of benign in the first cluster benign_center = self.kmeans.cluster_centers_[0] malware_center = self.kmeans.cluster_centers_[1] else: benign_center = self.kmeans.cluster_centers_[1] malware_center = self.kmeans.cluster_centers_[0] # set label for each sample new_labels = np.zeros((size, self.third_layer_size)) for i in range(size): if true_labels[i][0] == 0.0: new_labels[i] = benign_center else: new_labels[i] = malware_center self.FinalCenters = {'benignCenter': benign_center, 'malwareCenter': malware_center} return new_labels def partial_fit(self, X): # fit a batch # get network output. feed_dict = {self.train_data: X['batch_data_train']} self.points = self.sess.run((self.layer3), feed_dict=feed_dict) # apply clustering to find expected output. new_labels = self.kmeans_clustering(self.points, len(X['batch_data_label']), X['batch_data_label']) self.clusters_dist.append(np.linalg.norm(self.kmeans.cluster_centers_[0] - self.kmeans.cluster_centers_[1])) feed_dicts = {self.train_data: X['batch_data_train'], self.train_labels_center: new_labels} loss, opt = self.sess.run((self.cross_entropy, self.train_step), feed_dict=feed_dicts) # print(loss) # print('------------') metrics = self.evaluate(X['batch_data_label'], self.kmeans_labels, len((X['batch_data_label']))) self.accuracy_list.append(metrics[0]) self.fmeasure_list.append(metrics[3]) return loss def evaluate(self, true_labels, kmeans_labels, size): """ :param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1 :param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number :param size: number of samples :return: accuracy, precision, recall, f_measure """ # find index of samples that are in the first cluster self.label_list_0 = np.where(kmeans_labels == 0)[0] self.label_list_1 = np.where(kmeans_labels == 1)[0] # get labels of samples that are in the first cluster temp = [true_labels[i][0] for i in self.label_list_0] temp1 = [true_labels[i][0] for i in self.label_list_1] temp1.append(2) temp.append(2) # determine label(cluster center) of benign and malware group based on the majority samples in each cluster counts = np.bincount(temp) counts2 = np.bincount(temp1) if counts[0] > counts[1]: accuracy = (counts[0] + counts2[1]) / size precision = counts2[1] / (counts2[1] + counts2[0]) recall = counts2[1] / (counts2[1] + counts[1]) f_measure = 2 * ((precision * recall) / (precision + recall)) else: accuracy = (counts[1] + counts2[0]) / size precision = counts[1] / (counts[1] + counts[0]) recall = counts[1] / (counts[1] + counts2[1]) f_measure = 2 * ((precision * recall) / (precision + recall)) return accuracy, precision, recall, f_measure def final_fit(self, X, true_labels): self.phase = False # get network output for test data. feed_dict = {self.train_data: X['data_test']} self.points = self.sess.run(self.layer3, feed_dict=feed_dict) # determine label of each test sample based on the euclidean distance self.predicted_Labels = [] for i in range(len(true_labels)): if np.linalg.norm(self.FinalCenters['benignCenter'] - self.points[i]) < np.linalg.norm( self.FinalCenters['malwareCenter'] - self.points[i]): self.predicted_Labels.append([0]) else: self.predicted_Labels.append([1]) tn, fp, fn, tp = confusion_matrix(true_labels, self.predicted_Labels).ravel() accuracy = (tp + tn) / (tp + tn + fn + fp) precision = tp / (tp + fp) recall = tp / (tp + fn) f_measure = 2 * (precision * recall) / (precision + recall) self.evaluation_metrics_list['accuracy'].append(np.float("{0:.4f}".format(accuracy))) self.evaluation_metrics_list['precision'].append(np.float("{0:.4f}".format(precision))) self.evaluation_metrics_list['recall'].append(np.float("{0:.4f}".format(recall))) self.evaluation_metrics_list['fmeasure'].append(np.float("{0:.4f}".format(f_measure))) print("accuracy", "precision", "recall", "f_measure", sep="\t\t\t\t\t") print(accuracy, precision, recall, f_measure, sep="\t\t\t") return 0 def train(self): # fit a dataset for iter in range(self.iteration): self.log("iteration {} ".format(iter)) for epoch in range(self.epoch): self.accuracy_list = [] self.fmeasure_list = [] self.clusters_dist = [] self.log("epoch %s" % (epoch)) total_batches = int(len(self.X_train_ben['data']) / self.batch_size) self.log('total_batches in epoch %s : %s ' % (epoch, total_batches)) start_index = 0 end_index = start_index + self.batch_size self.counter = 0 # Loop over all batches. for i in range(total_batches + 1): self.counter += 1 # generate a batch data batch_xs = {} batch_xs['batch_data_train'] = np.concatenate( [self.X_train_ben['data'][start_index:end_index], \ self.X_train_mal['data'][start_index:end_index]]) batch_xs['batch_data_label'] = np.concatenate( [self.X_train_ben['label'][start_index:end_index], \ self.X_train_mal['label'][start_index:end_index]]) # Fit training using batch data end_index = end_index + self.batch_size cost = self.partial_fit(batch_xs) # test batch_test = {} batch_test["data"] = np.concatenate([self.X_test_ben['data'], self.X_test_mal['data']]) batch_test["label"] = np.concatenate([self.X_test_ben['label'], self.X_test_mal['label']]) self.final_fit(batch_test, batch_test["label"]) # init all variables in a tensorflow graph for the next fold self.sess.run(self.init) return self.accuracy_list, self.fmeasure_list, self.clusters_dist, self.evaluation_metrics_list def log(self, message): print(message) def write_result_to_file(self, variable, message): # file = open('result.txt', 'a+') file = open('results/' + str(self.batch_size) + '/results.txt', 'a+') file.write(message + "\n") file.write(str(np.mean(variable['accuracy'])) + '+' + str(np.var(variable['accuracy'])) + '\t' + str( np.mean(variable['precision'])) + '\t' + str( np.mean(variable['recall'])) + '\t' + str( np.mean(variable['fmeasure'])) + '+' + str(np.var(variable['fmeasure'])) + '\n')
flexible
{ "blob_id": "a9947884e805cc8fcb6bff010a5f6e0ff0bb01fe", "index": 8393, "step-1": "<mask token>\n\n\nclass MD(BaseEstimator, TransformerMixin):\n <mask token>\n\n def _init_graph(self):\n \"\"\"\n Init a tensorflow Graph containing: input data, variables, model, loss, optimizer\n \"\"\"\n self.graph = tf.Graph()\n with self.graph.as_default():\n tf.set_random_seed(self.random_seed)\n self.train_data = tf.placeholder(tf.float32, shape=[None, self.\n input_size])\n self.train_labels = tf.placeholder(tf.float32, shape=[None, 1])\n self.train_labels_center = tf.placeholder(tf.float32, shape=[\n None, self.third_layer_size])\n self.train_labels_center_disagree = tf.placeholder(tf.float32,\n shape=[None, self.third_layer_size])\n self.weights = self._initialize_weights()\n self.embedding_layer = tf.keras.layers.Embedding(256, 32,\n input_length=324)\n self.embedding_result = self.embedding_layer(self.train_data)\n self.embedding_result = tf.layers.Flatten()(self.embedding_result)\n self.net1 = tf.matmul(self.embedding_result, self.weights['layer1']\n )\n self.layer1 = tf.layers.batch_normalization(self.net1, training\n =self.phase)\n self.layer1 = tf.nn.tanh(self.layer1)\n self.net2 = tf.matmul(self.layer1, self.weights['layer2'])\n self.net2 = tf.layers.batch_normalization(self.net2, training=\n self.phase)\n self.net2 = tf.nn.relu(self.net2)\n self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training=\n self.phase)\n self.net3 = tf.matmul(self.layer2, self.weights['layer3'])\n self.layer3 = tf.nn.tanh(self.net3)\n self.cross_entropy = tf.reduce_mean(tf.losses.\n mean_squared_error(self.train_labels_center, self.layer3))\n self.train_step = tf.train.AdamOptimizer(self.learning_rate\n ).minimize(self.cross_entropy)\n self.init = tf.initialize_all_variables()\n self.sess = tf.Session()\n self.sess.run(self.init)\n <mask token>\n <mask token>\n <mask token>\n\n def evaluate(self, true_labels, kmeans_labels, size):\n \"\"\"\n :param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1\n :param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number\n :param size: number of samples\n\n :return: accuracy, precision, recall, f_measure\n\n \"\"\"\n self.label_list_0 = np.where(kmeans_labels == 0)[0]\n self.label_list_1 = np.where(kmeans_labels == 1)[0]\n temp = [true_labels[i][0] for i in self.label_list_0]\n temp1 = [true_labels[i][0] for i in self.label_list_1]\n temp1.append(2)\n temp.append(2)\n counts = np.bincount(temp)\n counts2 = np.bincount(temp1)\n if counts[0] > counts[1]:\n accuracy = (counts[0] + counts2[1]) / size\n precision = counts2[1] / (counts2[1] + counts2[0])\n recall = counts2[1] / (counts2[1] + counts[1])\n f_measure = 2 * (precision * recall / (precision + recall))\n else:\n accuracy = (counts[1] + counts2[0]) / size\n precision = counts[1] / (counts[1] + counts[0])\n recall = counts[1] / (counts[1] + counts2[1])\n f_measure = 2 * (precision * recall / (precision + recall))\n return accuracy, precision, recall, f_measure\n <mask token>\n <mask token>\n\n def log(self, message):\n print(message)\n <mask token>\n", "step-2": "<mask token>\n\n\nclass MD(BaseEstimator, TransformerMixin):\n\n def __init__(self, data, input_size, epoch, batch_size, iteration,\n alpha=1.0, n_neg_samples=10, random_seed=2020):\n self.iteration = iteration\n self.epoch = epoch\n self.batch_size = batch_size\n self.learning_rate = 0.01\n self.random_seed = random_seed\n self.phase = True\n self.first_layer_size = 256\n self.second_layer_size = 128\n self.third_layer_size = 128\n self.input_size = input_size\n self.X_train_ben = data[0]\n self.X_train_mal = data[1]\n self.X_test_ben = data[2]\n self.X_test_mal = data[3]\n self.accuracy_list = []\n self.fmeasure_list = []\n self.clusters_dist = []\n self.evaluation_metrics_list = {'accuracy': [], 'precision': [],\n 'recall': [], 'fmeasure': []}\n self.FinalCenters = {'benignCenter': 0, 'malwareCenter': 0}\n self._init_graph()\n\n def _init_graph(self):\n \"\"\"\n Init a tensorflow Graph containing: input data, variables, model, loss, optimizer\n \"\"\"\n self.graph = tf.Graph()\n with self.graph.as_default():\n tf.set_random_seed(self.random_seed)\n self.train_data = tf.placeholder(tf.float32, shape=[None, self.\n input_size])\n self.train_labels = tf.placeholder(tf.float32, shape=[None, 1])\n self.train_labels_center = tf.placeholder(tf.float32, shape=[\n None, self.third_layer_size])\n self.train_labels_center_disagree = tf.placeholder(tf.float32,\n shape=[None, self.third_layer_size])\n self.weights = self._initialize_weights()\n self.embedding_layer = tf.keras.layers.Embedding(256, 32,\n input_length=324)\n self.embedding_result = self.embedding_layer(self.train_data)\n self.embedding_result = tf.layers.Flatten()(self.embedding_result)\n self.net1 = tf.matmul(self.embedding_result, self.weights['layer1']\n )\n self.layer1 = tf.layers.batch_normalization(self.net1, training\n =self.phase)\n self.layer1 = tf.nn.tanh(self.layer1)\n self.net2 = tf.matmul(self.layer1, self.weights['layer2'])\n self.net2 = tf.layers.batch_normalization(self.net2, training=\n self.phase)\n self.net2 = tf.nn.relu(self.net2)\n self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training=\n self.phase)\n self.net3 = tf.matmul(self.layer2, self.weights['layer3'])\n self.layer3 = tf.nn.tanh(self.net3)\n self.cross_entropy = tf.reduce_mean(tf.losses.\n mean_squared_error(self.train_labels_center, self.layer3))\n self.train_step = tf.train.AdamOptimizer(self.learning_rate\n ).minimize(self.cross_entropy)\n self.init = tf.initialize_all_variables()\n self.sess = tf.Session()\n self.sess.run(self.init)\n\n def _initialize_weights(self):\n self.all_weights = dict()\n self.all_weights['layer1'] = tf.Variable(tf.random.normal([10368,\n self.first_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer2'] = tf.Variable(tf.random.normal([self.\n first_layer_size, self.second_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer3'] = tf.Variable(tf.random.normal([self.\n second_layer_size, self.third_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer1'] = tf.Variable(tf.random.uniform([10368,\n self.first_layer_size], minval=-1, maxval=1))\n self.all_weights['layer2'] = tf.Variable(tf.random.uniform([self.\n first_layer_size, self.second_layer_size], minval=-1, maxval=1))\n self.all_weights['layer3'] = tf.Variable(tf.random.uniform([self.\n second_layer_size, self.third_layer_size], minval=-1, maxval=1))\n self.all_weights['layer1'] = tf.get_variable('w', [32 * self.\n input_size, self.first_layer_size], initializer=tf.initializers\n .random_normal(mean=0, stddev=0.8), regularizer=tf.keras.\n regularizers.l2(0.01))\n self.all_weights['layer2'] = tf.get_variable('w2', [self.\n first_layer_size, self.second_layer_size], initializer=tf.\n initializers.random_normal(mean=0, stddev=0.8), regularizer=tf.\n keras.regularizers.l2(0.01))\n self.all_weights['layer3'] = tf.get_variable('w3', [self.\n second_layer_size, self.third_layer_size], initializer=tf.\n initializers.random_normal(mean=0, stddev=0.8), regularizer=tf.\n keras.regularizers.l2(0.01))\n return self.all_weights\n <mask token>\n\n def partial_fit(self, X):\n feed_dict = {self.train_data: X['batch_data_train']}\n self.points = self.sess.run(self.layer3, feed_dict=feed_dict)\n new_labels = self.kmeans_clustering(self.points, len(X[\n 'batch_data_label']), X['batch_data_label'])\n self.clusters_dist.append(np.linalg.norm(self.kmeans.\n cluster_centers_[0] - self.kmeans.cluster_centers_[1]))\n feed_dicts = {self.train_data: X['batch_data_train'], self.\n train_labels_center: new_labels}\n loss, opt = self.sess.run((self.cross_entropy, self.train_step),\n feed_dict=feed_dicts)\n metrics = self.evaluate(X['batch_data_label'], self.kmeans_labels,\n len(X['batch_data_label']))\n self.accuracy_list.append(metrics[0])\n self.fmeasure_list.append(metrics[3])\n return loss\n\n def evaluate(self, true_labels, kmeans_labels, size):\n \"\"\"\n :param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1\n :param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number\n :param size: number of samples\n\n :return: accuracy, precision, recall, f_measure\n\n \"\"\"\n self.label_list_0 = np.where(kmeans_labels == 0)[0]\n self.label_list_1 = np.where(kmeans_labels == 1)[0]\n temp = [true_labels[i][0] for i in self.label_list_0]\n temp1 = [true_labels[i][0] for i in self.label_list_1]\n temp1.append(2)\n temp.append(2)\n counts = np.bincount(temp)\n counts2 = np.bincount(temp1)\n if counts[0] > counts[1]:\n accuracy = (counts[0] + counts2[1]) / size\n precision = counts2[1] / (counts2[1] + counts2[0])\n recall = counts2[1] / (counts2[1] + counts[1])\n f_measure = 2 * (precision * recall / (precision + recall))\n else:\n accuracy = (counts[1] + counts2[0]) / size\n precision = counts[1] / (counts[1] + counts[0])\n recall = counts[1] / (counts[1] + counts2[1])\n f_measure = 2 * (precision * recall / (precision + recall))\n return accuracy, precision, recall, f_measure\n\n def final_fit(self, X, true_labels):\n self.phase = False\n feed_dict = {self.train_data: X['data_test']}\n self.points = self.sess.run(self.layer3, feed_dict=feed_dict)\n self.predicted_Labels = []\n for i in range(len(true_labels)):\n if np.linalg.norm(self.FinalCenters['benignCenter'] - self.\n points[i]) < np.linalg.norm(self.FinalCenters[\n 'malwareCenter'] - self.points[i]):\n self.predicted_Labels.append([0])\n else:\n self.predicted_Labels.append([1])\n tn, fp, fn, tp = confusion_matrix(true_labels, self.predicted_Labels\n ).ravel()\n accuracy = (tp + tn) / (tp + tn + fn + fp)\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f_measure = 2 * (precision * recall) / (precision + recall)\n self.evaluation_metrics_list['accuracy'].append(np.float('{0:.4f}'.\n format(accuracy)))\n self.evaluation_metrics_list['precision'].append(np.float('{0:.4f}'\n .format(precision)))\n self.evaluation_metrics_list['recall'].append(np.float('{0:.4f}'.\n format(recall)))\n self.evaluation_metrics_list['fmeasure'].append(np.float('{0:.4f}'.\n format(f_measure)))\n print('accuracy', 'precision', 'recall', 'f_measure', sep='\\t\\t\\t\\t\\t')\n print(accuracy, precision, recall, f_measure, sep='\\t\\t\\t')\n return 0\n <mask token>\n\n def log(self, message):\n print(message)\n <mask token>\n", "step-3": "<mask token>\n\n\nclass MD(BaseEstimator, TransformerMixin):\n\n def __init__(self, data, input_size, epoch, batch_size, iteration,\n alpha=1.0, n_neg_samples=10, random_seed=2020):\n self.iteration = iteration\n self.epoch = epoch\n self.batch_size = batch_size\n self.learning_rate = 0.01\n self.random_seed = random_seed\n self.phase = True\n self.first_layer_size = 256\n self.second_layer_size = 128\n self.third_layer_size = 128\n self.input_size = input_size\n self.X_train_ben = data[0]\n self.X_train_mal = data[1]\n self.X_test_ben = data[2]\n self.X_test_mal = data[3]\n self.accuracy_list = []\n self.fmeasure_list = []\n self.clusters_dist = []\n self.evaluation_metrics_list = {'accuracy': [], 'precision': [],\n 'recall': [], 'fmeasure': []}\n self.FinalCenters = {'benignCenter': 0, 'malwareCenter': 0}\n self._init_graph()\n\n def _init_graph(self):\n \"\"\"\n Init a tensorflow Graph containing: input data, variables, model, loss, optimizer\n \"\"\"\n self.graph = tf.Graph()\n with self.graph.as_default():\n tf.set_random_seed(self.random_seed)\n self.train_data = tf.placeholder(tf.float32, shape=[None, self.\n input_size])\n self.train_labels = tf.placeholder(tf.float32, shape=[None, 1])\n self.train_labels_center = tf.placeholder(tf.float32, shape=[\n None, self.third_layer_size])\n self.train_labels_center_disagree = tf.placeholder(tf.float32,\n shape=[None, self.third_layer_size])\n self.weights = self._initialize_weights()\n self.embedding_layer = tf.keras.layers.Embedding(256, 32,\n input_length=324)\n self.embedding_result = self.embedding_layer(self.train_data)\n self.embedding_result = tf.layers.Flatten()(self.embedding_result)\n self.net1 = tf.matmul(self.embedding_result, self.weights['layer1']\n )\n self.layer1 = tf.layers.batch_normalization(self.net1, training\n =self.phase)\n self.layer1 = tf.nn.tanh(self.layer1)\n self.net2 = tf.matmul(self.layer1, self.weights['layer2'])\n self.net2 = tf.layers.batch_normalization(self.net2, training=\n self.phase)\n self.net2 = tf.nn.relu(self.net2)\n self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training=\n self.phase)\n self.net3 = tf.matmul(self.layer2, self.weights['layer3'])\n self.layer3 = tf.nn.tanh(self.net3)\n self.cross_entropy = tf.reduce_mean(tf.losses.\n mean_squared_error(self.train_labels_center, self.layer3))\n self.train_step = tf.train.AdamOptimizer(self.learning_rate\n ).minimize(self.cross_entropy)\n self.init = tf.initialize_all_variables()\n self.sess = tf.Session()\n self.sess.run(self.init)\n\n def _initialize_weights(self):\n self.all_weights = dict()\n self.all_weights['layer1'] = tf.Variable(tf.random.normal([10368,\n self.first_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer2'] = tf.Variable(tf.random.normal([self.\n first_layer_size, self.second_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer3'] = tf.Variable(tf.random.normal([self.\n second_layer_size, self.third_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer1'] = tf.Variable(tf.random.uniform([10368,\n self.first_layer_size], minval=-1, maxval=1))\n self.all_weights['layer2'] = tf.Variable(tf.random.uniform([self.\n first_layer_size, self.second_layer_size], minval=-1, maxval=1))\n self.all_weights['layer3'] = tf.Variable(tf.random.uniform([self.\n second_layer_size, self.third_layer_size], minval=-1, maxval=1))\n self.all_weights['layer1'] = tf.get_variable('w', [32 * self.\n input_size, self.first_layer_size], initializer=tf.initializers\n .random_normal(mean=0, stddev=0.8), regularizer=tf.keras.\n regularizers.l2(0.01))\n self.all_weights['layer2'] = tf.get_variable('w2', [self.\n first_layer_size, self.second_layer_size], initializer=tf.\n initializers.random_normal(mean=0, stddev=0.8), regularizer=tf.\n keras.regularizers.l2(0.01))\n self.all_weights['layer3'] = tf.get_variable('w3', [self.\n second_layer_size, self.third_layer_size], initializer=tf.\n initializers.random_normal(mean=0, stddev=0.8), regularizer=tf.\n keras.regularizers.l2(0.01))\n return self.all_weights\n <mask token>\n\n def partial_fit(self, X):\n feed_dict = {self.train_data: X['batch_data_train']}\n self.points = self.sess.run(self.layer3, feed_dict=feed_dict)\n new_labels = self.kmeans_clustering(self.points, len(X[\n 'batch_data_label']), X['batch_data_label'])\n self.clusters_dist.append(np.linalg.norm(self.kmeans.\n cluster_centers_[0] - self.kmeans.cluster_centers_[1]))\n feed_dicts = {self.train_data: X['batch_data_train'], self.\n train_labels_center: new_labels}\n loss, opt = self.sess.run((self.cross_entropy, self.train_step),\n feed_dict=feed_dicts)\n metrics = self.evaluate(X['batch_data_label'], self.kmeans_labels,\n len(X['batch_data_label']))\n self.accuracy_list.append(metrics[0])\n self.fmeasure_list.append(metrics[3])\n return loss\n\n def evaluate(self, true_labels, kmeans_labels, size):\n \"\"\"\n :param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1\n :param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number\n :param size: number of samples\n\n :return: accuracy, precision, recall, f_measure\n\n \"\"\"\n self.label_list_0 = np.where(kmeans_labels == 0)[0]\n self.label_list_1 = np.where(kmeans_labels == 1)[0]\n temp = [true_labels[i][0] for i in self.label_list_0]\n temp1 = [true_labels[i][0] for i in self.label_list_1]\n temp1.append(2)\n temp.append(2)\n counts = np.bincount(temp)\n counts2 = np.bincount(temp1)\n if counts[0] > counts[1]:\n accuracy = (counts[0] + counts2[1]) / size\n precision = counts2[1] / (counts2[1] + counts2[0])\n recall = counts2[1] / (counts2[1] + counts[1])\n f_measure = 2 * (precision * recall / (precision + recall))\n else:\n accuracy = (counts[1] + counts2[0]) / size\n precision = counts[1] / (counts[1] + counts[0])\n recall = counts[1] / (counts[1] + counts2[1])\n f_measure = 2 * (precision * recall / (precision + recall))\n return accuracy, precision, recall, f_measure\n\n def final_fit(self, X, true_labels):\n self.phase = False\n feed_dict = {self.train_data: X['data_test']}\n self.points = self.sess.run(self.layer3, feed_dict=feed_dict)\n self.predicted_Labels = []\n for i in range(len(true_labels)):\n if np.linalg.norm(self.FinalCenters['benignCenter'] - self.\n points[i]) < np.linalg.norm(self.FinalCenters[\n 'malwareCenter'] - self.points[i]):\n self.predicted_Labels.append([0])\n else:\n self.predicted_Labels.append([1])\n tn, fp, fn, tp = confusion_matrix(true_labels, self.predicted_Labels\n ).ravel()\n accuracy = (tp + tn) / (tp + tn + fn + fp)\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f_measure = 2 * (precision * recall) / (precision + recall)\n self.evaluation_metrics_list['accuracy'].append(np.float('{0:.4f}'.\n format(accuracy)))\n self.evaluation_metrics_list['precision'].append(np.float('{0:.4f}'\n .format(precision)))\n self.evaluation_metrics_list['recall'].append(np.float('{0:.4f}'.\n format(recall)))\n self.evaluation_metrics_list['fmeasure'].append(np.float('{0:.4f}'.\n format(f_measure)))\n print('accuracy', 'precision', 'recall', 'f_measure', sep='\\t\\t\\t\\t\\t')\n print(accuracy, precision, recall, f_measure, sep='\\t\\t\\t')\n return 0\n\n def train(self):\n for iter in range(self.iteration):\n self.log('iteration {} '.format(iter))\n for epoch in range(self.epoch):\n self.accuracy_list = []\n self.fmeasure_list = []\n self.clusters_dist = []\n self.log('epoch %s' % epoch)\n total_batches = int(len(self.X_train_ben['data']) / self.\n batch_size)\n self.log('total_batches in epoch %s : %s ' % (epoch,\n total_batches))\n start_index = 0\n end_index = start_index + self.batch_size\n self.counter = 0\n for i in range(total_batches + 1):\n self.counter += 1\n batch_xs = {}\n batch_xs['batch_data_train'] = np.concatenate([self.\n X_train_ben['data'][start_index:end_index], self.\n X_train_mal['data'][start_index:end_index]])\n batch_xs['batch_data_label'] = np.concatenate([self.\n X_train_ben['label'][start_index:end_index], self.\n X_train_mal['label'][start_index:end_index]])\n end_index = end_index + self.batch_size\n cost = self.partial_fit(batch_xs)\n batch_test = {}\n batch_test['data'] = np.concatenate([self.X_test_ben['data'],\n self.X_test_mal['data']])\n batch_test['label'] = np.concatenate([self.X_test_ben['label'],\n self.X_test_mal['label']])\n self.final_fit(batch_test, batch_test['label'])\n self.sess.run(self.init)\n return (self.accuracy_list, self.fmeasure_list, self.clusters_dist,\n self.evaluation_metrics_list)\n\n def log(self, message):\n print(message)\n <mask token>\n", "step-4": "<mask token>\n\n\nclass MD(BaseEstimator, TransformerMixin):\n\n def __init__(self, data, input_size, epoch, batch_size, iteration,\n alpha=1.0, n_neg_samples=10, random_seed=2020):\n self.iteration = iteration\n self.epoch = epoch\n self.batch_size = batch_size\n self.learning_rate = 0.01\n self.random_seed = random_seed\n self.phase = True\n self.first_layer_size = 256\n self.second_layer_size = 128\n self.third_layer_size = 128\n self.input_size = input_size\n self.X_train_ben = data[0]\n self.X_train_mal = data[1]\n self.X_test_ben = data[2]\n self.X_test_mal = data[3]\n self.accuracy_list = []\n self.fmeasure_list = []\n self.clusters_dist = []\n self.evaluation_metrics_list = {'accuracy': [], 'precision': [],\n 'recall': [], 'fmeasure': []}\n self.FinalCenters = {'benignCenter': 0, 'malwareCenter': 0}\n self._init_graph()\n\n def _init_graph(self):\n \"\"\"\n Init a tensorflow Graph containing: input data, variables, model, loss, optimizer\n \"\"\"\n self.graph = tf.Graph()\n with self.graph.as_default():\n tf.set_random_seed(self.random_seed)\n self.train_data = tf.placeholder(tf.float32, shape=[None, self.\n input_size])\n self.train_labels = tf.placeholder(tf.float32, shape=[None, 1])\n self.train_labels_center = tf.placeholder(tf.float32, shape=[\n None, self.third_layer_size])\n self.train_labels_center_disagree = tf.placeholder(tf.float32,\n shape=[None, self.third_layer_size])\n self.weights = self._initialize_weights()\n self.embedding_layer = tf.keras.layers.Embedding(256, 32,\n input_length=324)\n self.embedding_result = self.embedding_layer(self.train_data)\n self.embedding_result = tf.layers.Flatten()(self.embedding_result)\n self.net1 = tf.matmul(self.embedding_result, self.weights['layer1']\n )\n self.layer1 = tf.layers.batch_normalization(self.net1, training\n =self.phase)\n self.layer1 = tf.nn.tanh(self.layer1)\n self.net2 = tf.matmul(self.layer1, self.weights['layer2'])\n self.net2 = tf.layers.batch_normalization(self.net2, training=\n self.phase)\n self.net2 = tf.nn.relu(self.net2)\n self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training=\n self.phase)\n self.net3 = tf.matmul(self.layer2, self.weights['layer3'])\n self.layer3 = tf.nn.tanh(self.net3)\n self.cross_entropy = tf.reduce_mean(tf.losses.\n mean_squared_error(self.train_labels_center, self.layer3))\n self.train_step = tf.train.AdamOptimizer(self.learning_rate\n ).minimize(self.cross_entropy)\n self.init = tf.initialize_all_variables()\n self.sess = tf.Session()\n self.sess.run(self.init)\n\n def _initialize_weights(self):\n self.all_weights = dict()\n self.all_weights['layer1'] = tf.Variable(tf.random.normal([10368,\n self.first_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer2'] = tf.Variable(tf.random.normal([self.\n first_layer_size, self.second_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer3'] = tf.Variable(tf.random.normal([self.\n second_layer_size, self.third_layer_size], mean=0.0, stddev=1))\n self.all_weights['layer1'] = tf.Variable(tf.random.uniform([10368,\n self.first_layer_size], minval=-1, maxval=1))\n self.all_weights['layer2'] = tf.Variable(tf.random.uniform([self.\n first_layer_size, self.second_layer_size], minval=-1, maxval=1))\n self.all_weights['layer3'] = tf.Variable(tf.random.uniform([self.\n second_layer_size, self.third_layer_size], minval=-1, maxval=1))\n self.all_weights['layer1'] = tf.get_variable('w', [32 * self.\n input_size, self.first_layer_size], initializer=tf.initializers\n .random_normal(mean=0, stddev=0.8), regularizer=tf.keras.\n regularizers.l2(0.01))\n self.all_weights['layer2'] = tf.get_variable('w2', [self.\n first_layer_size, self.second_layer_size], initializer=tf.\n initializers.random_normal(mean=0, stddev=0.8), regularizer=tf.\n keras.regularizers.l2(0.01))\n self.all_weights['layer3'] = tf.get_variable('w3', [self.\n second_layer_size, self.third_layer_size], initializer=tf.\n initializers.random_normal(mean=0, stddev=0.8), regularizer=tf.\n keras.regularizers.l2(0.01))\n return self.all_weights\n\n def kmeans_clustering(self, point, size, true_labels):\n self.kmeans = KMeans(n_clusters=2, random_state=10, init=\n 'k-means++', n_init=20).fit(point)\n self.kmeans_labels = self.kmeans.labels_\n self.label_list_0 = np.where(self.kmeans_labels == 0)[0]\n temp = [true_labels[i][0] for i in self.label_list_0]\n temp.append(2)\n counts = np.bincount(temp)\n if counts[0] > counts[1]:\n benign_center = self.kmeans.cluster_centers_[0]\n malware_center = self.kmeans.cluster_centers_[1]\n else:\n benign_center = self.kmeans.cluster_centers_[1]\n malware_center = self.kmeans.cluster_centers_[0]\n new_labels = np.zeros((size, self.third_layer_size))\n for i in range(size):\n if true_labels[i][0] == 0.0:\n new_labels[i] = benign_center\n else:\n new_labels[i] = malware_center\n self.FinalCenters = {'benignCenter': benign_center, 'malwareCenter':\n malware_center}\n return new_labels\n\n def partial_fit(self, X):\n feed_dict = {self.train_data: X['batch_data_train']}\n self.points = self.sess.run(self.layer3, feed_dict=feed_dict)\n new_labels = self.kmeans_clustering(self.points, len(X[\n 'batch_data_label']), X['batch_data_label'])\n self.clusters_dist.append(np.linalg.norm(self.kmeans.\n cluster_centers_[0] - self.kmeans.cluster_centers_[1]))\n feed_dicts = {self.train_data: X['batch_data_train'], self.\n train_labels_center: new_labels}\n loss, opt = self.sess.run((self.cross_entropy, self.train_step),\n feed_dict=feed_dicts)\n metrics = self.evaluate(X['batch_data_label'], self.kmeans_labels,\n len(X['batch_data_label']))\n self.accuracy_list.append(metrics[0])\n self.fmeasure_list.append(metrics[3])\n return loss\n\n def evaluate(self, true_labels, kmeans_labels, size):\n \"\"\"\n :param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1\n :param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number\n :param size: number of samples\n\n :return: accuracy, precision, recall, f_measure\n\n \"\"\"\n self.label_list_0 = np.where(kmeans_labels == 0)[0]\n self.label_list_1 = np.where(kmeans_labels == 1)[0]\n temp = [true_labels[i][0] for i in self.label_list_0]\n temp1 = [true_labels[i][0] for i in self.label_list_1]\n temp1.append(2)\n temp.append(2)\n counts = np.bincount(temp)\n counts2 = np.bincount(temp1)\n if counts[0] > counts[1]:\n accuracy = (counts[0] + counts2[1]) / size\n precision = counts2[1] / (counts2[1] + counts2[0])\n recall = counts2[1] / (counts2[1] + counts[1])\n f_measure = 2 * (precision * recall / (precision + recall))\n else:\n accuracy = (counts[1] + counts2[0]) / size\n precision = counts[1] / (counts[1] + counts[0])\n recall = counts[1] / (counts[1] + counts2[1])\n f_measure = 2 * (precision * recall / (precision + recall))\n return accuracy, precision, recall, f_measure\n\n def final_fit(self, X, true_labels):\n self.phase = False\n feed_dict = {self.train_data: X['data_test']}\n self.points = self.sess.run(self.layer3, feed_dict=feed_dict)\n self.predicted_Labels = []\n for i in range(len(true_labels)):\n if np.linalg.norm(self.FinalCenters['benignCenter'] - self.\n points[i]) < np.linalg.norm(self.FinalCenters[\n 'malwareCenter'] - self.points[i]):\n self.predicted_Labels.append([0])\n else:\n self.predicted_Labels.append([1])\n tn, fp, fn, tp = confusion_matrix(true_labels, self.predicted_Labels\n ).ravel()\n accuracy = (tp + tn) / (tp + tn + fn + fp)\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f_measure = 2 * (precision * recall) / (precision + recall)\n self.evaluation_metrics_list['accuracy'].append(np.float('{0:.4f}'.\n format(accuracy)))\n self.evaluation_metrics_list['precision'].append(np.float('{0:.4f}'\n .format(precision)))\n self.evaluation_metrics_list['recall'].append(np.float('{0:.4f}'.\n format(recall)))\n self.evaluation_metrics_list['fmeasure'].append(np.float('{0:.4f}'.\n format(f_measure)))\n print('accuracy', 'precision', 'recall', 'f_measure', sep='\\t\\t\\t\\t\\t')\n print(accuracy, precision, recall, f_measure, sep='\\t\\t\\t')\n return 0\n\n def train(self):\n for iter in range(self.iteration):\n self.log('iteration {} '.format(iter))\n for epoch in range(self.epoch):\n self.accuracy_list = []\n self.fmeasure_list = []\n self.clusters_dist = []\n self.log('epoch %s' % epoch)\n total_batches = int(len(self.X_train_ben['data']) / self.\n batch_size)\n self.log('total_batches in epoch %s : %s ' % (epoch,\n total_batches))\n start_index = 0\n end_index = start_index + self.batch_size\n self.counter = 0\n for i in range(total_batches + 1):\n self.counter += 1\n batch_xs = {}\n batch_xs['batch_data_train'] = np.concatenate([self.\n X_train_ben['data'][start_index:end_index], self.\n X_train_mal['data'][start_index:end_index]])\n batch_xs['batch_data_label'] = np.concatenate([self.\n X_train_ben['label'][start_index:end_index], self.\n X_train_mal['label'][start_index:end_index]])\n end_index = end_index + self.batch_size\n cost = self.partial_fit(batch_xs)\n batch_test = {}\n batch_test['data'] = np.concatenate([self.X_test_ben['data'],\n self.X_test_mal['data']])\n batch_test['label'] = np.concatenate([self.X_test_ben['label'],\n self.X_test_mal['label']])\n self.final_fit(batch_test, batch_test['label'])\n self.sess.run(self.init)\n return (self.accuracy_list, self.fmeasure_list, self.clusters_dist,\n self.evaluation_metrics_list)\n\n def log(self, message):\n print(message)\n\n def write_result_to_file(self, variable, message):\n file = open('results/' + str(self.batch_size) + '/results.txt', 'a+')\n file.write(message + '\\n')\n file.write(str(np.mean(variable['accuracy'])) + '+' + str(np.var(\n variable['accuracy'])) + '\\t' + str(np.mean(variable[\n 'precision'])) + '\\t' + str(np.mean(variable['recall'])) + '\\t' +\n str(np.mean(variable['fmeasure'])) + '+' + str(np.var(variable[\n 'fmeasure'])) + '\\n')\n", "step-5": "import math\nimport numpy as np\n# import tkinter\nimport tensorflow as tf\nfrom matplotlib import axis\nimport os\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import confusion_matrix\n\n\nclass MD(BaseEstimator, TransformerMixin):\n def __init__(self, data, input_size, epoch,\n batch_size, iteration, alpha=1.0, n_neg_samples=10,\n random_seed=2020):\n # bind params to class\n\n # network parameters.\n self.iteration = iteration\n self.epoch = epoch\n self.batch_size = batch_size\n self.learning_rate = 0.01\n self.random_seed = random_seed\n self.phase = True\n self.first_layer_size = 256\n self.second_layer_size = 128\n self.third_layer_size = 128\n self.input_size = input_size\n\n # data.\n self.X_train_ben = data[0]\n self.X_train_mal = data[1]\n self.X_test_ben = data[2]\n self.X_test_mal = data[3]\n\n # evaluation.\n self.accuracy_list = [] # accuracy during training\n self.fmeasure_list = [] # fmeasure during training\n self.clusters_dist = [] # distance between clusters centroid\n self.evaluation_metrics_list = {'accuracy': [], 'precision': [], 'recall': [],\n 'fmeasure': []} # evaluation metrics of test data for all epochs\n\n self.FinalCenters = {'benignCenter': 0, 'malwareCenter': 0}\n\n # init all variables in a tensorflow graph\n self._init_graph()\n\n def _init_graph(self):\n '''\n Init a tensorflow Graph containing: input data, variables, model, loss, optimizer\n '''\n self.graph = tf.Graph()\n with self.graph.as_default(): # , tf.device('/cpu:0'):\n\n # Set graph level random seed.\n tf.set_random_seed(self.random_seed)\n\n # Input data.\n\n self.train_data = tf.placeholder(tf.float32,\n shape=[None, self.input_size]) # batch_size * input_size\n self.train_labels = tf.placeholder(tf.float32, shape=[None, 1]) # batch_size * 1\n self.train_labels_center = tf.placeholder(tf.float32, shape=[None,\n self.third_layer_size]) # batch_size * third_layer_size\n self.train_labels_center_disagree = tf.placeholder(tf.float32, shape=[None,\n self.third_layer_size]) # batch_size * third_layer_size\n\n # Variables.\n self.weights = self._initialize_weights()\n\n # the embedding layer.\n self.embedding_layer = tf.keras.layers.Embedding(256, 32, input_length=324)\n self.embedding_result = self.embedding_layer(self.train_data)\n self.embedding_result = tf.layers.Flatten()(self.embedding_result)\n\n # the first hidden layer.\n self.net1 = tf.matmul(self.embedding_result, self.weights['layer1']) # batch_size * first_layer_size\n self.layer1 = tf.layers.batch_normalization(self.net1, training=self.phase)\n self.layer1 = tf.nn.tanh(self.layer1)\n\n # the second hidden layer.\n self.net2 = tf.matmul(self.layer1, self.weights['layer2'])\n self.net2 = tf.layers.batch_normalization(self.net2, training=self.phase)\n self.net2 = tf.nn.relu(self.net2)\n self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training=self.phase)\n\n # the third hidden layer.\n self.net3 = tf.matmul(self.layer2, self.weights['layer3'])\n self.layer3 = tf.nn.tanh(self.net3)\n\n # loss function.\n self.cross_entropy = tf.reduce_mean(tf.losses.mean_squared_error(self.train_labels_center, self.layer3))\n\n # optimizer.\n self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cross_entropy)\n\n # init.\n self.init = tf.initialize_all_variables()\n self.sess = tf.Session()\n self.sess.run(self.init)\n\n def _initialize_weights(self):\n\n self.all_weights = dict()\n\n self.all_weights['layer1'] = tf.Variable(\n tf.random.normal([10368, self.first_layer_size], mean=0.0, stddev=1)) # input_size * attr_dim\n self.all_weights['layer2'] = tf.Variable(\n tf.random.normal([self.first_layer_size, self.second_layer_size], mean=0.0,\n stddev=1)) # input_size * attr_dim\n\n self.all_weights['layer3'] = tf.Variable(\n tf.random.normal([self.second_layer_size, self.third_layer_size], mean=0.0,\n stddev=1)) # input_size * attr_dim\n\n self.all_weights['layer1'] = tf.Variable(\n tf.random.uniform([10368, self.first_layer_size], minval=-1,\n maxval=1)) # input_size * attr_dim\n self.all_weights['layer2'] = tf.Variable(\n tf.random.uniform([self.first_layer_size, self.second_layer_size], minval=-1,\n maxval=1)) # input_size * attr_dim\n\n self.all_weights['layer3'] = tf.Variable(\n tf.random.uniform([self.second_layer_size, self.third_layer_size], minval=-1,\n maxval=1)) # input_size * attr_dim\n # --------------------------------------------------------------------------\n self.all_weights['layer1'] = tf.get_variable(\"w\", [32 * self.input_size, self.first_layer_size],\n initializer=tf.initializers.random_normal(mean=0, stddev=0.8),\n regularizer=tf.keras.regularizers.l2(\n 0.01)) # input_size * attr_dim\n self.all_weights['layer2'] = tf.get_variable(\"w2\", [self.first_layer_size, self.second_layer_size],\n initializer=tf.initializers.random_normal(mean=0,\n stddev=0.8),\n regularizer=tf.keras.regularizers.l2(\n 0.01)) # input_size * attr_dim\n\n self.all_weights['layer3'] = tf.get_variable(\"w3\", [self.second_layer_size, self.third_layer_size],\n initializer=tf.initializers.random_normal(mean=0, stddev=0.8),\n regularizer=tf.keras.regularizers.l2(\n 0.01)) # input_size * attr_dim\n\n return self.all_weights\n\n def kmeans_clustering(self, point, size, true_labels):\n self.kmeans = KMeans(n_clusters=2, random_state=10, init='k-means++', n_init=20).fit(point)\n\n self.kmeans_labels = self.kmeans.labels_\n\n # find index of samples that are in the first cluster\n self.label_list_0 = np.where(self.kmeans_labels == 0)[0]\n\n # get labels of samples that are in the first cluster\n temp = [true_labels[i][0] for i in self.label_list_0]\n temp.append(2)\n\n # determine label(cluster center) of benign and malware group based on the majority samples in each cluster\n counts = np.bincount(temp)\n\n if counts[0] > counts[1]: # counts[0] : number of benign in the first cluster\n benign_center = self.kmeans.cluster_centers_[0]\n malware_center = self.kmeans.cluster_centers_[1]\n else:\n benign_center = self.kmeans.cluster_centers_[1]\n malware_center = self.kmeans.cluster_centers_[0]\n\n # set label for each sample\n new_labels = np.zeros((size, self.third_layer_size))\n\n for i in range(size):\n if true_labels[i][0] == 0.0:\n new_labels[i] = benign_center\n else:\n new_labels[i] = malware_center\n\n self.FinalCenters = {'benignCenter': benign_center, 'malwareCenter': malware_center}\n\n return new_labels\n\n def partial_fit(self, X): # fit a batch\n\n # get network output.\n feed_dict = {self.train_data: X['batch_data_train']}\n self.points = self.sess.run((self.layer3), feed_dict=feed_dict)\n\n # apply clustering to find expected output.\n new_labels = self.kmeans_clustering(self.points, len(X['batch_data_label']), X['batch_data_label'])\n self.clusters_dist.append(np.linalg.norm(self.kmeans.cluster_centers_[0] - self.kmeans.cluster_centers_[1]))\n\n feed_dicts = {self.train_data: X['batch_data_train'],\n self.train_labels_center: new_labels}\n loss, opt = self.sess.run((self.cross_entropy, self.train_step), feed_dict=feed_dicts)\n\n # print(loss)\n # print('------------')\n\n metrics = self.evaluate(X['batch_data_label'], self.kmeans_labels, len((X['batch_data_label'])))\n self.accuracy_list.append(metrics[0])\n self.fmeasure_list.append(metrics[3])\n\n return loss\n\n def evaluate(self, true_labels, kmeans_labels, size):\n \"\"\"\n :param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1\n :param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number\n :param size: number of samples\n\n :return: accuracy, precision, recall, f_measure\n\n \"\"\"\n\n # find index of samples that are in the first cluster\n self.label_list_0 = np.where(kmeans_labels == 0)[0]\n self.label_list_1 = np.where(kmeans_labels == 1)[0]\n\n # get labels of samples that are in the first cluster\n temp = [true_labels[i][0] for i in self.label_list_0]\n temp1 = [true_labels[i][0] for i in self.label_list_1]\n temp1.append(2)\n temp.append(2)\n\n # determine label(cluster center) of benign and malware group based on the majority samples in each cluster\n counts = np.bincount(temp)\n counts2 = np.bincount(temp1)\n\n if counts[0] > counts[1]:\n accuracy = (counts[0] + counts2[1]) / size\n precision = counts2[1] / (counts2[1] + counts2[0])\n recall = counts2[1] / (counts2[1] + counts[1])\n f_measure = 2 * ((precision * recall) / (precision + recall))\n else:\n accuracy = (counts[1] + counts2[0]) / size\n precision = counts[1] / (counts[1] + counts[0])\n recall = counts[1] / (counts[1] + counts2[1])\n f_measure = 2 * ((precision * recall) / (precision + recall))\n\n return accuracy, precision, recall, f_measure\n\n def final_fit(self, X, true_labels):\n\n self.phase = False\n\n # get network output for test data.\n feed_dict = {self.train_data: X['data_test']}\n self.points = self.sess.run(self.layer3, feed_dict=feed_dict)\n\n # determine label of each test sample based on the euclidean distance\n self.predicted_Labels = []\n for i in range(len(true_labels)):\n if np.linalg.norm(self.FinalCenters['benignCenter'] - self.points[i]) < np.linalg.norm(\n self.FinalCenters['malwareCenter'] - self.points[i]):\n self.predicted_Labels.append([0])\n else:\n self.predicted_Labels.append([1])\n\n tn, fp, fn, tp = confusion_matrix(true_labels, self.predicted_Labels).ravel()\n\n accuracy = (tp + tn) / (tp + tn + fn + fp)\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f_measure = 2 * (precision * recall) / (precision + recall)\n\n self.evaluation_metrics_list['accuracy'].append(np.float(\"{0:.4f}\".format(accuracy)))\n self.evaluation_metrics_list['precision'].append(np.float(\"{0:.4f}\".format(precision)))\n self.evaluation_metrics_list['recall'].append(np.float(\"{0:.4f}\".format(recall)))\n self.evaluation_metrics_list['fmeasure'].append(np.float(\"{0:.4f}\".format(f_measure)))\n\n print(\"accuracy\", \"precision\", \"recall\", \"f_measure\", sep=\"\\t\\t\\t\\t\\t\")\n print(accuracy, precision, recall, f_measure, sep=\"\\t\\t\\t\")\n\n return 0\n\n def train(self): # fit a dataset\n\n for iter in range(self.iteration):\n self.log(\"iteration {} \".format(iter))\n\n for epoch in range(self.epoch):\n\n self.accuracy_list = []\n self.fmeasure_list = []\n self.clusters_dist = []\n\n self.log(\"epoch %s\" % (epoch))\n\n total_batches = int(len(self.X_train_ben['data']) / self.batch_size)\n self.log('total_batches in epoch %s : %s ' % (epoch, total_batches))\n\n start_index = 0\n end_index = start_index + self.batch_size\n self.counter = 0\n\n # Loop over all batches.\n for i in range(total_batches + 1):\n self.counter += 1\n\n # generate a batch data\n batch_xs = {}\n\n batch_xs['batch_data_train'] = np.concatenate(\n [self.X_train_ben['data'][start_index:end_index], \\\n self.X_train_mal['data'][start_index:end_index]])\n\n batch_xs['batch_data_label'] = np.concatenate(\n [self.X_train_ben['label'][start_index:end_index], \\\n self.X_train_mal['label'][start_index:end_index]])\n\n # Fit training using batch data\n end_index = end_index + self.batch_size\n cost = self.partial_fit(batch_xs)\n\n\n # test\n batch_test = {}\n batch_test[\"data\"] = np.concatenate([self.X_test_ben['data'], self.X_test_mal['data']])\n batch_test[\"label\"] = np.concatenate([self.X_test_ben['label'], self.X_test_mal['label']])\n\n self.final_fit(batch_test, batch_test[\"label\"])\n\n # init all variables in a tensorflow graph for the next fold\n self.sess.run(self.init)\n\n return self.accuracy_list, self.fmeasure_list, self.clusters_dist, self.evaluation_metrics_list\n\n def log(self, message):\n print(message)\n\n def write_result_to_file(self, variable, message):\n # file = open('result.txt', 'a+')\n file = open('results/' + str(self.batch_size) + '/results.txt', 'a+')\n file.write(message + \"\\n\")\n file.write(str(np.mean(variable['accuracy'])) + '+' + str(np.var(variable['accuracy'])) + '\\t' + str(\n np.mean(variable['precision'])) + '\\t' + str(\n np.mean(variable['recall'])) + '\\t' + str(\n np.mean(variable['fmeasure'])) + '+' + str(np.var(variable['fmeasure'])) + '\\n')\n\n", "step-ids": [ 4, 8, 9, 11, 13 ] }
[ 4, 8, 9, 11, 13 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> workdir = './model/adamW-BCE/model_seresnext50_32x4d_i768_runmila_2fold_50ep' seed = 300 n_fold = 2 epoch = 50 resume_from = None batch_size = 32 num_workers = 32 imgsize = 768, 768 loss = dict(name='BCEWithLogitsLoss', params=dict()) optim = dict(name='AdamW', params=dict(lr=0.0003, betas=(0.9, 0.999), eps= 1e-08, weight_decay=0.01)) model = dict(name='se_resnext50_32x4d') normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]} totensor = dict(name='ToTensor', params=dict(normalize=normalize)) crop = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width= imgsize[1], scale=(0.7, 1.0), p=1.0)) crop_test = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=imgsize[1], scale=(0.7, 1.0), p=1.0)) rotate_test = dict(name='Rotate', params=dict(limit=25, border_mode=0, p=0.7)) hflip = dict(name='HorizontalFlip', params=dict(p=0.5)) <|reserved_special_token_0|> rotate = dict(name='Rotate', params=dict(limit=30, border_mode=0, p=0.7)) dicomnoise = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.06, p=0.9) ) dicomnoise_test = dict(name='RandomDicomNoise', params=dict(limit_ratio= 0.05, p=0.7)) elastic_transform = dict(name='ElasticTransform', params=dict(alpha=1, sigma=50, p=0.5)) grid_distortion = dict(name='GridDistortion', params=dict(), p=0.5) window_policy = 1 data = dict(train=dict(dataset_type='CustomDataset', annotations= './cache/train-runmila_2folds_seed123.pkl', imgdir= './input/runmila_i768', imgsize=imgsize, n_grad_acc=2, loader=dict( shuffle=True, batch_size=batch_size, drop_last=True, num_workers= num_workers, pin_memory=False), transforms=[crop, hflip, rotate, dicomnoise, totensor], dataset_policy=1, window_policy=window_policy), valid=dict(dataset_type='CustomDataset', annotations= './cache/train-runmila_2folds_seed123.pkl', imgdir= './input/runmila_i768', imgsize=imgsize, loader=dict(shuffle=False, batch_size=batch_size, drop_last=False, num_workers=num_workers, pin_memory=False), transforms=[crop_test, hflip, rotate_test, dicomnoise_test, totensor], dataset_policy=1, window_policy= window_policy), test=dict(dataset_type='CustomDataset', annotations= './cache/test.pkl', imgdir='./input/test_runmila_i768', imgsize=imgsize, loader=dict(shuffle=False, batch_size=batch_size, drop_last=False, num_workers=num_workers, pin_memory=False), transforms=[crop_test, hflip, rotate_test, dicomnoise_test, totensor], dataset_policy=1, window_policy=window_policy)) <|reserved_special_token_1|> workdir = './model/adamW-BCE/model_seresnext50_32x4d_i768_runmila_2fold_50ep' seed = 300 n_fold = 2 epoch = 50 resume_from = None batch_size = 32 num_workers = 32 imgsize = (768, 768) #(height, width) loss = dict( name='BCEWithLogitsLoss', params=dict(), ) optim = dict( name='AdamW', params=dict( lr=0.0003, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, ), ) model = dict( name='se_resnext50_32x4d' ) normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225],} totensor = dict(name='ToTensor', params=dict(normalize=normalize)) crop = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=imgsize[1], scale=(0.7,1.0), p=1.0)) crop_test = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=imgsize[1], scale=(0.7,1.0), p=1.0)) rotate_test = dict(name='Rotate', params=dict(limit=25, border_mode=0, p=0.7)) hflip = dict(name='HorizontalFlip', params=dict(p=0.5)) ''' Additional augmentarions ------------------------ vflip = dict(name='VerticalFlip', params=dict(p=0.5,)) random_brightness_contrast = dict(name='RandomBrightnessContrast', params=dict(brightness_limit=0.2, contrast_limit=0.2, p=0.5)) #gaussian_blur = dict(name='GaussianBlur', params=dict(blur_limit=7, always_apply=False, p=0.5)) #iaa_emboss = dict(name='IAAEmboss', params=dict(alpha=(0.2, 0.5), strength=(0.2, 0.7), always_apply=False, p=0.5)) #iaa_sharpen = dict(name='IAASharpen', params=dict(alpha=(0.2, 0.5), lightness=(0.5, 1.0), always_apply=False, p=0.5)) hue_saturation_value = dict(name='HueSaturationValue', params=dict(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50, p=0.4)) cut_out = dict(name='Cutout', params=dict(num_holes=8, max_h_size=546//8, max_w_size=546//8, fill_value=0, p=0.3)) blur = dict(name='Blur', params=dict(blur_limit=4, p=.25)) shift_scale_rotate = dict(name='ShiftScaleRotate', params=dict(shift_limit=0.2, scale_limit=0.2, rotate_limit=20, p=1)) ''' rotate = dict(name='Rotate', params=dict(limit=30, border_mode=0, p=0.7)) dicomnoise = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.06, p=0.9)) dicomnoise_test = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.05, p=0.7)) elastic_transform = dict(name='ElasticTransform', params=dict(alpha=1, sigma=50, p=0.5)) grid_distortion = dict(name='GridDistortion', params=dict(), p=0.5) window_policy = 1 data = dict( train=dict( dataset_type='CustomDataset', annotations='./cache/train-runmila_2folds_seed123.pkl', imgdir='./input/runmila_i768', imgsize=imgsize, n_grad_acc=2, loader=dict( shuffle=True, batch_size=batch_size, drop_last=True, num_workers=num_workers, pin_memory=False, ), transforms=[crop, hflip, rotate, dicomnoise, totensor], dataset_policy=1, window_policy=window_policy, ), valid = dict( dataset_type='CustomDataset', annotations='./cache/train-runmila_2folds_seed123.pkl', imgdir='./input/runmila_i768', imgsize=imgsize, loader=dict( shuffle=False, batch_size=batch_size, drop_last=False, num_workers=num_workers, pin_memory=False, ), transforms=[crop_test, hflip, rotate_test, dicomnoise_test, totensor], dataset_policy=1, window_policy=window_policy, ), test = dict( dataset_type='CustomDataset', annotations='./cache/test.pkl', imgdir='./input/test_runmila_i768', imgsize=imgsize, loader=dict( shuffle=False, batch_size=batch_size, drop_last=False, num_workers=num_workers, pin_memory=False, ), transforms=[crop_test, hflip, rotate_test, dicomnoise_test, totensor], dataset_policy=1, window_policy=window_policy, ), )
flexible
{ "blob_id": "8030bdb6c9f0b7114916d7abc245ff680d1fc917", "index": 6790, "step-1": "<mask token>\n", "step-2": "workdir = './model/adamW-BCE/model_seresnext50_32x4d_i768_runmila_2fold_50ep'\nseed = 300\nn_fold = 2\nepoch = 50\nresume_from = None\nbatch_size = 32\nnum_workers = 32\nimgsize = 768, 768\nloss = dict(name='BCEWithLogitsLoss', params=dict())\noptim = dict(name='AdamW', params=dict(lr=0.0003, betas=(0.9, 0.999), eps=\n 1e-08, weight_decay=0.01))\nmodel = dict(name='se_resnext50_32x4d')\nnormalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}\ntotensor = dict(name='ToTensor', params=dict(normalize=normalize))\ncrop = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=\n imgsize[1], scale=(0.7, 1.0), p=1.0))\ncrop_test = dict(name='RandomResizedCrop', params=dict(height=imgsize[0],\n width=imgsize[1], scale=(0.7, 1.0), p=1.0))\nrotate_test = dict(name='Rotate', params=dict(limit=25, border_mode=0, p=0.7))\nhflip = dict(name='HorizontalFlip', params=dict(p=0.5))\n<mask token>\nrotate = dict(name='Rotate', params=dict(limit=30, border_mode=0, p=0.7))\ndicomnoise = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.06, p=0.9)\n )\ndicomnoise_test = dict(name='RandomDicomNoise', params=dict(limit_ratio=\n 0.05, p=0.7))\nelastic_transform = dict(name='ElasticTransform', params=dict(alpha=1,\n sigma=50, p=0.5))\ngrid_distortion = dict(name='GridDistortion', params=dict(), p=0.5)\nwindow_policy = 1\ndata = dict(train=dict(dataset_type='CustomDataset', annotations=\n './cache/train-runmila_2folds_seed123.pkl', imgdir=\n './input/runmila_i768', imgsize=imgsize, n_grad_acc=2, loader=dict(\n shuffle=True, batch_size=batch_size, drop_last=True, num_workers=\n num_workers, pin_memory=False), transforms=[crop, hflip, rotate,\n dicomnoise, totensor], dataset_policy=1, window_policy=window_policy),\n valid=dict(dataset_type='CustomDataset', annotations=\n './cache/train-runmila_2folds_seed123.pkl', imgdir=\n './input/runmila_i768', imgsize=imgsize, loader=dict(shuffle=False,\n batch_size=batch_size, drop_last=False, num_workers=num_workers,\n pin_memory=False), transforms=[crop_test, hflip, rotate_test,\n dicomnoise_test, totensor], dataset_policy=1, window_policy=\n window_policy), test=dict(dataset_type='CustomDataset', annotations=\n './cache/test.pkl', imgdir='./input/test_runmila_i768', imgsize=imgsize,\n loader=dict(shuffle=False, batch_size=batch_size, drop_last=False,\n num_workers=num_workers, pin_memory=False), transforms=[crop_test,\n hflip, rotate_test, dicomnoise_test, totensor], dataset_policy=1,\n window_policy=window_policy))\n", "step-3": "workdir = './model/adamW-BCE/model_seresnext50_32x4d_i768_runmila_2fold_50ep'\nseed = 300\n\nn_fold = 2\nepoch = 50\nresume_from = None\n\nbatch_size = 32\nnum_workers = 32\nimgsize = (768, 768) #(height, width)\n\nloss = dict(\n name='BCEWithLogitsLoss',\n params=dict(),\n)\n\noptim = dict(\n name='AdamW',\n params=dict(\n lr=0.0003,\n betas=(0.9, 0.999),\n eps=1e-08,\n weight_decay=0.01,\n ),\n)\n\nmodel = dict(\n name='se_resnext50_32x4d'\n)\n\n\nnormalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225],}\ntotensor = dict(name='ToTensor', params=dict(normalize=normalize))\ncrop = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=imgsize[1], scale=(0.7,1.0), p=1.0))\ncrop_test = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=imgsize[1], scale=(0.7,1.0), p=1.0))\nrotate_test = dict(name='Rotate', params=dict(limit=25, border_mode=0, p=0.7))\nhflip = dict(name='HorizontalFlip', params=dict(p=0.5))\n\n'''\nAdditional augmentarions\n------------------------\n\nvflip = dict(name='VerticalFlip', params=dict(p=0.5,))\nrandom_brightness_contrast = dict(name='RandomBrightnessContrast', params=dict(brightness_limit=0.2, contrast_limit=0.2, p=0.5))\n#gaussian_blur = dict(name='GaussianBlur', params=dict(blur_limit=7, always_apply=False, p=0.5))\n#iaa_emboss = dict(name='IAAEmboss', params=dict(alpha=(0.2, 0.5), strength=(0.2, 0.7), always_apply=False, p=0.5))\n#iaa_sharpen = dict(name='IAASharpen', params=dict(alpha=(0.2, 0.5), lightness=(0.5, 1.0), always_apply=False, p=0.5))\nhue_saturation_value = dict(name='HueSaturationValue', params=dict(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50, p=0.4))\ncut_out = dict(name='Cutout', params=dict(num_holes=8, max_h_size=546//8, max_w_size=546//8, fill_value=0, p=0.3))\nblur = dict(name='Blur', params=dict(blur_limit=4, p=.25))\nshift_scale_rotate = dict(name='ShiftScaleRotate', params=dict(shift_limit=0.2, scale_limit=0.2, rotate_limit=20, p=1))\n'''\nrotate = dict(name='Rotate', params=dict(limit=30, border_mode=0, p=0.7))\ndicomnoise = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.06, p=0.9))\ndicomnoise_test = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.05, p=0.7))\nelastic_transform = dict(name='ElasticTransform', params=dict(alpha=1, sigma=50, p=0.5))\ngrid_distortion = dict(name='GridDistortion', params=dict(), p=0.5)\n\n\nwindow_policy = 1\n\ndata = dict(\n train=dict(\n dataset_type='CustomDataset',\n annotations='./cache/train-runmila_2folds_seed123.pkl',\n imgdir='./input/runmila_i768',\n imgsize=imgsize,\n n_grad_acc=2,\n loader=dict(\n shuffle=True,\n batch_size=batch_size,\n drop_last=True,\n num_workers=num_workers,\n pin_memory=False,\n ),\n transforms=[crop, hflip, rotate, dicomnoise, totensor],\n dataset_policy=1,\n window_policy=window_policy,\n ),\n valid = dict(\n dataset_type='CustomDataset',\n annotations='./cache/train-runmila_2folds_seed123.pkl',\n imgdir='./input/runmila_i768',\n imgsize=imgsize,\n loader=dict(\n shuffle=False,\n batch_size=batch_size,\n drop_last=False,\n num_workers=num_workers,\n pin_memory=False,\n ),\n transforms=[crop_test, hflip, rotate_test, dicomnoise_test, totensor],\n dataset_policy=1,\n window_policy=window_policy,\n ),\n test = dict(\n dataset_type='CustomDataset',\n annotations='./cache/test.pkl',\n imgdir='./input/test_runmila_i768',\n imgsize=imgsize,\n loader=dict(\n shuffle=False,\n batch_size=batch_size,\n drop_last=False,\n num_workers=num_workers,\n pin_memory=False,\n ),\n transforms=[crop_test, hflip, rotate_test, dicomnoise_test, totensor],\n dataset_policy=1,\n window_policy=window_policy,\n ),\n)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> class mutasibankjurnal(ReportXlsx): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class mutasibankjurnal(ReportXlsx): def generate_xlsx_report(self, workbook, data, wizard): bold = workbook.add_format({'bold': True}) middle = workbook.add_format({'bold': True, 'top': 1}) left = workbook.add_format({'left': 1, 'top': 1, 'bold': True}) right = workbook.add_format({'right': 1, 'top': 1}) top = workbook.add_format({'top': 1}) report_format = workbook.add_format({'font_size': 24}) lang_code = self.env.user.lang or 'en_US' date_format = self.env['res.lang']._lang_get(lang_code).date_format report = '991' def get_date_format(date): if date: date = datetime.strptime(date, DEFAULT_SERVER_DATE_FORMAT) date = date.strftime('%d/%m/%Y') return date def _header_sheet(sheet): sheet.write_merge(0, 4, 'Mutasi Bank', report_format) sheet.write_merge(2, 0, _('Company:'), bold) sheet.write_merge(3, 0, self.env.user.company_id.name) head = [{'name': _('NO Bukti'), 'larg': 10, 'col': {}}, {'name': _( 'vendor /supplier'), 'larg': 10, 'col': {}}, {'name': _( 'Tanggal'), 'larg': 10, 'col': {}}, {'name': _('Nomor Cek'), 'larg': 15, 'col': {}}, {'name': _('Pemasukan'), 'larg': 10, 'col': {}}, {'name': _('Pengeluaran'), 'larg': 20, 'col': {}}, {'name': _('Saldo Akhir'), 'larg': 20, 'col': {}}, {'name': _( 'Keterangan'), 'larg': 10, 'col': {}}] table = [] for h in head: col = {'header': h['name']} col.update(h['col']) table.append(col) def _set_line(line): sheet.write(i, 0, line.get('name', '')) if line.get('nama_partner') == False: sheet.write(i, 1, line.get('', '')) else: sheet.write(i, 1, line.get('nama_partner', '')) sheet.write(i, 2, line.get('payment_date', '')) if line.get('nomor_cek') == False: sheet.write(i, 3, line.get('', '')) else: sheet.write(i, 3, line.get('nomor_cek', '')) if line.get('payment_type') == 'inbound': sheet.write(i, 4, line.get('amount', '')) else: sheet.write(i, 4, 0.0) if line.get('payment_type') == 'outbound': sheet.write(i, 5, line.get('amount', '')) else: sheet.write(i, 5, 0.0) sheet.write(i, 6, line.get('nsaldo_akhir', '')) if line.get('note') == False: sheet.write(i, 7, '') else: sheet.write(i, 7, line.get('note', '')) def _set_table(start_row, row): sheet.add_table(start_row - 1, 0, row, len(head) - 1, { 'columns': table, 'style': 'Table Style Light 9'}) all_lines2 = wizard._fill_details_dict() sheet = workbook.add_worksheet('Mutasi bank') if all_lines2: row = 9 start_row = row range_tanggal = get_date_format(wizard.date_from_mb ) + ' s.d ' + get_date_format(wizard.date_to_mb) sheet.write(0, 0, 'PT. PAN ASIA JAYA ABADI', bold) sheet.write(1, 0, 'MUTASI REKENING') sheet.write(2, 0, 'TANGGAL : ' + range_tanggal) if wizard.jenis_journal_mb.currency_id.name == False: sheet.write(3, 0, 'Mata Uang : IDR') else: sheet.write(3, 0, 'Mata Uang : %s' % wizard. jenis_journal_mb.currency_id.name) sheet.write(4, 0, 'Bank : %s ' % wizard.jenis_journal_mb. bank_account_id.sanitized_acc_number) sheet.write(5, 0, '' + wizard.jenis_journal_mb. default_debit_account_id.code + ' ' + wizard. jenis_journal_mb.name) for i, line in enumerate(all_lines2): i += row _set_line(line) row = i for j, h in enumerate(head): sheet.set_column(j, j, h['larg']) _set_table(start_row, row) sheet.merge_range('A' + str(start_row - 1) + ':D' + str(start_row - 1), 'Saldo Awal :') sheet.write(start_row - 2, 4, line.get('saldo_awal', '')) sheet.merge_range('A' + str(row + 2) + ':D' + str(row + 2), 'Total') sheet.write_formula(row + 1, 4, '=SUM(E10:E' + str(row + 1) + ')') sheet.write_formula(row + 1, 5, '=SUM(F10:F' + str(row + 1) + ')') sheet.write_formula(row + 1, 6, '=E' + str(start_row - 1) + '+SUM(E10:E' + str(row + 1) + ')-SUM(F10:F' + str(row + 1) + ')') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class mutasibankjurnal(ReportXlsx): def generate_xlsx_report(self, workbook, data, wizard): bold = workbook.add_format({'bold': True}) middle = workbook.add_format({'bold': True, 'top': 1}) left = workbook.add_format({'left': 1, 'top': 1, 'bold': True}) right = workbook.add_format({'right': 1, 'top': 1}) top = workbook.add_format({'top': 1}) report_format = workbook.add_format({'font_size': 24}) lang_code = self.env.user.lang or 'en_US' date_format = self.env['res.lang']._lang_get(lang_code).date_format report = '991' def get_date_format(date): if date: date = datetime.strptime(date, DEFAULT_SERVER_DATE_FORMAT) date = date.strftime('%d/%m/%Y') return date def _header_sheet(sheet): sheet.write_merge(0, 4, 'Mutasi Bank', report_format) sheet.write_merge(2, 0, _('Company:'), bold) sheet.write_merge(3, 0, self.env.user.company_id.name) head = [{'name': _('NO Bukti'), 'larg': 10, 'col': {}}, {'name': _( 'vendor /supplier'), 'larg': 10, 'col': {}}, {'name': _( 'Tanggal'), 'larg': 10, 'col': {}}, {'name': _('Nomor Cek'), 'larg': 15, 'col': {}}, {'name': _('Pemasukan'), 'larg': 10, 'col': {}}, {'name': _('Pengeluaran'), 'larg': 20, 'col': {}}, {'name': _('Saldo Akhir'), 'larg': 20, 'col': {}}, {'name': _( 'Keterangan'), 'larg': 10, 'col': {}}] table = [] for h in head: col = {'header': h['name']} col.update(h['col']) table.append(col) def _set_line(line): sheet.write(i, 0, line.get('name', '')) if line.get('nama_partner') == False: sheet.write(i, 1, line.get('', '')) else: sheet.write(i, 1, line.get('nama_partner', '')) sheet.write(i, 2, line.get('payment_date', '')) if line.get('nomor_cek') == False: sheet.write(i, 3, line.get('', '')) else: sheet.write(i, 3, line.get('nomor_cek', '')) if line.get('payment_type') == 'inbound': sheet.write(i, 4, line.get('amount', '')) else: sheet.write(i, 4, 0.0) if line.get('payment_type') == 'outbound': sheet.write(i, 5, line.get('amount', '')) else: sheet.write(i, 5, 0.0) sheet.write(i, 6, line.get('nsaldo_akhir', '')) if line.get('note') == False: sheet.write(i, 7, '') else: sheet.write(i, 7, line.get('note', '')) def _set_table(start_row, row): sheet.add_table(start_row - 1, 0, row, len(head) - 1, { 'columns': table, 'style': 'Table Style Light 9'}) all_lines2 = wizard._fill_details_dict() sheet = workbook.add_worksheet('Mutasi bank') if all_lines2: row = 9 start_row = row range_tanggal = get_date_format(wizard.date_from_mb ) + ' s.d ' + get_date_format(wizard.date_to_mb) sheet.write(0, 0, 'PT. PAN ASIA JAYA ABADI', bold) sheet.write(1, 0, 'MUTASI REKENING') sheet.write(2, 0, 'TANGGAL : ' + range_tanggal) if wizard.jenis_journal_mb.currency_id.name == False: sheet.write(3, 0, 'Mata Uang : IDR') else: sheet.write(3, 0, 'Mata Uang : %s' % wizard. jenis_journal_mb.currency_id.name) sheet.write(4, 0, 'Bank : %s ' % wizard.jenis_journal_mb. bank_account_id.sanitized_acc_number) sheet.write(5, 0, '' + wizard.jenis_journal_mb. default_debit_account_id.code + ' ' + wizard. jenis_journal_mb.name) for i, line in enumerate(all_lines2): i += row _set_line(line) row = i for j, h in enumerate(head): sheet.set_column(j, j, h['larg']) _set_table(start_row, row) sheet.merge_range('A' + str(start_row - 1) + ':D' + str(start_row - 1), 'Saldo Awal :') sheet.write(start_row - 2, 4, line.get('saldo_awal', '')) sheet.merge_range('A' + str(row + 2) + ':D' + str(row + 2), 'Total') sheet.write_formula(row + 1, 4, '=SUM(E10:E' + str(row + 1) + ')') sheet.write_formula(row + 1, 5, '=SUM(F10:F' + str(row + 1) + ')') sheet.write_formula(row + 1, 6, '=E' + str(start_row - 1) + '+SUM(E10:E' + str(row + 1) + ')-SUM(F10:F' + str(row + 1) + ')') mutasibankjurnal('report.dev_accounting_report.report_mutasi_bank_byjurnal', 'accounting.report.standard') <|reserved_special_token_1|> from datetime import datetime from odoo.addons.report_xlsx.report.report_xlsx import ReportXlsx from odoo.tools import DEFAULT_SERVER_DATE_FORMAT import xlwt from odoo import _ from odoo.exceptions import AccessError, UserError class mutasibankjurnal(ReportXlsx): def generate_xlsx_report(self, workbook, data, wizard): bold = workbook.add_format({'bold': True}) middle = workbook.add_format({'bold': True, 'top': 1}) left = workbook.add_format({'left': 1, 'top': 1, 'bold': True}) right = workbook.add_format({'right': 1, 'top': 1}) top = workbook.add_format({'top': 1}) report_format = workbook.add_format({'font_size': 24}) lang_code = self.env.user.lang or 'en_US' date_format = self.env['res.lang']._lang_get(lang_code).date_format report = '991' def get_date_format(date): if date: date = datetime.strptime(date, DEFAULT_SERVER_DATE_FORMAT) date = date.strftime('%d/%m/%Y') return date def _header_sheet(sheet): sheet.write_merge(0, 4, 'Mutasi Bank', report_format) sheet.write_merge(2, 0, _('Company:'), bold) sheet.write_merge(3, 0, self.env.user.company_id.name) head = [{'name': _('NO Bukti'), 'larg': 10, 'col': {}}, {'name': _( 'vendor /supplier'), 'larg': 10, 'col': {}}, {'name': _( 'Tanggal'), 'larg': 10, 'col': {}}, {'name': _('Nomor Cek'), 'larg': 15, 'col': {}}, {'name': _('Pemasukan'), 'larg': 10, 'col': {}}, {'name': _('Pengeluaran'), 'larg': 20, 'col': {}}, {'name': _('Saldo Akhir'), 'larg': 20, 'col': {}}, {'name': _( 'Keterangan'), 'larg': 10, 'col': {}}] table = [] for h in head: col = {'header': h['name']} col.update(h['col']) table.append(col) def _set_line(line): sheet.write(i, 0, line.get('name', '')) if line.get('nama_partner') == False: sheet.write(i, 1, line.get('', '')) else: sheet.write(i, 1, line.get('nama_partner', '')) sheet.write(i, 2, line.get('payment_date', '')) if line.get('nomor_cek') == False: sheet.write(i, 3, line.get('', '')) else: sheet.write(i, 3, line.get('nomor_cek', '')) if line.get('payment_type') == 'inbound': sheet.write(i, 4, line.get('amount', '')) else: sheet.write(i, 4, 0.0) if line.get('payment_type') == 'outbound': sheet.write(i, 5, line.get('amount', '')) else: sheet.write(i, 5, 0.0) sheet.write(i, 6, line.get('nsaldo_akhir', '')) if line.get('note') == False: sheet.write(i, 7, '') else: sheet.write(i, 7, line.get('note', '')) def _set_table(start_row, row): sheet.add_table(start_row - 1, 0, row, len(head) - 1, { 'columns': table, 'style': 'Table Style Light 9'}) all_lines2 = wizard._fill_details_dict() sheet = workbook.add_worksheet('Mutasi bank') if all_lines2: row = 9 start_row = row range_tanggal = get_date_format(wizard.date_from_mb ) + ' s.d ' + get_date_format(wizard.date_to_mb) sheet.write(0, 0, 'PT. PAN ASIA JAYA ABADI', bold) sheet.write(1, 0, 'MUTASI REKENING') sheet.write(2, 0, 'TANGGAL : ' + range_tanggal) if wizard.jenis_journal_mb.currency_id.name == False: sheet.write(3, 0, 'Mata Uang : IDR') else: sheet.write(3, 0, 'Mata Uang : %s' % wizard. jenis_journal_mb.currency_id.name) sheet.write(4, 0, 'Bank : %s ' % wizard.jenis_journal_mb. bank_account_id.sanitized_acc_number) sheet.write(5, 0, '' + wizard.jenis_journal_mb. default_debit_account_id.code + ' ' + wizard. jenis_journal_mb.name) for i, line in enumerate(all_lines2): i += row _set_line(line) row = i for j, h in enumerate(head): sheet.set_column(j, j, h['larg']) _set_table(start_row, row) sheet.merge_range('A' + str(start_row - 1) + ':D' + str(start_row - 1), 'Saldo Awal :') sheet.write(start_row - 2, 4, line.get('saldo_awal', '')) sheet.merge_range('A' + str(row + 2) + ':D' + str(row + 2), 'Total') sheet.write_formula(row + 1, 4, '=SUM(E10:E' + str(row + 1) + ')') sheet.write_formula(row + 1, 5, '=SUM(F10:F' + str(row + 1) + ')') sheet.write_formula(row + 1, 6, '=E' + str(start_row - 1) + '+SUM(E10:E' + str(row + 1) + ')-SUM(F10:F' + str(row + 1) + ')') mutasibankjurnal('report.dev_accounting_report.report_mutasi_bank_byjurnal', 'accounting.report.standard') <|reserved_special_token_1|> # -*- coding: utf-8 -*- from datetime import datetime from odoo.addons.report_xlsx.report.report_xlsx import ReportXlsx from odoo.tools import DEFAULT_SERVER_DATE_FORMAT import xlwt from odoo import _ from odoo.exceptions import AccessError, UserError class mutasibankjurnal(ReportXlsx): def generate_xlsx_report(self, workbook, data, wizard): # num_format = wizard.company_currency_id.excel_format bold = workbook.add_format({'bold': True}) middle = workbook.add_format({'bold': True, 'top': 1}) left = workbook.add_format({'left': 1, 'top': 1, 'bold': True}) right = workbook.add_format({'right': 1, 'top': 1}) top = workbook.add_format({'top': 1}) # currency_format = workbook.add_format({'num_format': num_format}) # c_middle = workbook.add_format({'bold': True, 'top': 1, 'num_format': num_format}) report_format = workbook.add_format({'font_size': 24}) # rounding = self.env.user.company_id.currency_id.decimal_places or 2 lang_code = self.env.user.lang or 'en_US' date_format = self.env['res.lang']._lang_get(lang_code).date_format report = '991' # def _get_data_float(data): ## if data == None or data == False: # return 0.0 # else: # return wizard.company_currency_id.round(data) + 0.0 def get_date_format(date): if date: date = datetime.strptime(date, DEFAULT_SERVER_DATE_FORMAT) date = date.strftime('%d/%m/%Y') return date def _header_sheet(sheet): sheet.write_merge(0, 4, 'Mutasi Bank', report_format) # style_title = xlwt.easyxf("font:height 300; font: name Liberation Sans, bold on,color black; align: horiz center") #sheet.write_merge(0, 1, 0, 5,'LAPORAN ', style = style_title) sheet.write_merge(2, 0, _('Company:'), bold) sheet.write_merge(3, 0, self.env.user.company_id.name,) #sheet.write(4, 0, _('Print on %s') % report.print_time) # sheet.write(2, 2, _('Start Date : %s ') % wizard.date_from if wizard.date_from else '') # sheet.write(3, 2, _('End Date : %s ') % wizard.date_to if wizard.date_to else '') # sheet.write(2, 4, _('Target Moves:'), bold) # sheet.write(3, 4, _('All Entries') if wizard.target_move == 'all' else _('All Posted Entries')) # sheet.write(2, 6, _('Only UnReconciled Entries') if wizard.reconciled is False else _('With Reconciled Entries'), bold) head = [ {'name': _('NO Bukti'), 'larg': 10, 'col': {}}, {'name': _('vendor /supplier'), 'larg': 10, 'col': {}}, {'name': _('Tanggal'), 'larg': 10, 'col': {}}, {'name': _('Nomor Cek'), 'larg': 15, 'col': {}}, {'name': _('Pemasukan'), 'larg': 10, 'col': {}}, {'name': _('Pengeluaran'), 'larg': 20, 'col': {}}, {'name': _('Saldo Akhir'), 'larg': 20, 'col': {}}, {'name': _('Keterangan'), 'larg': 10, 'col': {}}, ] table = [] for h in head: col = {'header': h['name']} col.update(h['col']) table.append(col) def _set_line(line): #sheet.write(i, 0, get_date_format(line.get('date', '')) if line.get('type_view') != 'init' else 'INIT') sheet.write(i, 0, line.get('name', '')) if line.get('nama_partner')==False: sheet.write(i, 1, line.get('', '')) else: sheet.write(i, 1, line.get('nama_partner', '')) sheet.write(i, 2, line.get('payment_date', '')) if line.get('nomor_cek')==False: sheet.write(i, 3, line.get('', '')) else: sheet.write(i, 3, line.get('nomor_cek', '')) if line.get('payment_type')=='inbound': sheet.write(i, 4, line.get('amount', '')) else: sheet.write(i, 4, (0.00),) if line.get('payment_type')=='outbound': sheet.write(i, 5, line.get('amount', '')) else: sheet.write(i, 5, (0.00),) sheet.write(i, 6, line.get('nsaldo_akhir', '')) if line.get('note')==False: sheet.write(i, 7, (''),) else: sheet.write(i, 7, line.get('note', '')) # buat formula untuk men-sum / mentotal sales per user # sheet.merge_range('A' + 4 + ':D' + 4, nsaldo_awal, text_style) # def _set_line2(line2): # sheet.merge_range('A' + str(start_row-1) + ':D' + str(start_row-1), '') # sheet.write(start_row-2, 4, line2, '') # [{'saldo_awal ': 0}]: None def _set_table(start_row, row): sheet.add_table(start_row - 1, 0, row, len(head) - 1, { 'columns': table, 'style': 'Table Style Light 9', }) # all_lines = wizard._sql_get_line_mutasibank() all_lines2 = wizard._fill_details_dict() # raise UserError(all_lines.saldo_awal) # saldoawal =all_lines.Total_saldo_awal # Pivot workbook sheet = workbook.add_worksheet('Mutasi bank') # _header_sheet(sheet) # Head # if all_lines: # for line2 in enumerate(all_lines): # _set_line(line2) # raise UserError(all_lines.saldo_awal) if all_lines2: row = 9 start_row = row range_tanggal = get_date_format(wizard.date_from_mb) + ' s.d '+get_date_format(wizard.date_to_mb) sheet.write(0, 0, ('PT. PAN ASIA JAYA ABADI'), bold) sheet.write(1, 0, ('MUTASI REKENING'),) sheet.write(2, 0,('TANGGAL : '+range_tanggal),) if wizard.jenis_journal_mb.currency_id.name==False: sheet.write(3, 0,('Mata Uang : IDR'),) else: sheet.write(3, 0,('Mata Uang : %s') % wizard.jenis_journal_mb.currency_id.name,) sheet.write(4, 0, ('Bank : %s ') % wizard.jenis_journal_mb.bank_account_id.sanitized_acc_number,) sheet.write(5, 0, ('' +wizard.jenis_journal_mb.default_debit_account_id.code+' '+wizard.jenis_journal_mb.name),) for i, line in enumerate(all_lines2): i += row _set_line(line) row = i for j, h in enumerate(head): sheet.set_column(j, j, h['larg']) _set_table(start_row, row) # raise UserError(st) # if all_lines: # for line2 in enumerate(all_lines): # raise UserError(line2) # _set_line2(line2) # raise UserError(line2.Total_saldo_awal) sheet.merge_range('A' + str(start_row-1) + ':D' + str(start_row-1), 'Saldo Awal :') sheet.write(start_row-2, 4, line.get('saldo_awal', '')) # # [{'saldo_awal ': 0}]: None sheet.merge_range('A' + str(row+2) + ':D' + str(row+2), 'Total') sheet.write_formula(row+1, 4, '=SUM(E10:E' + str(row+1) + ')') sheet.write_formula(row+1, 5, '=SUM(F10:F' + str(row+1) + ')') ##if line.get('saldo_awal')==0: # sheet.write_formula(row+1, 6, ('0.00'), ) #else: sheet.write_formula(row+1, 6, '=E' + str(start_row-1)+'+SUM(E10:E' + str(row+1)+ ')-SUM(F10:F' + str(row+1)+')' ) # sheet.write_formula(start_row-2, 4, line.get('saldo_awal', '') mutasibankjurnal('report.dev_accounting_report.report_mutasi_bank_byjurnal', 'accounting.report.standard')
flexible
{ "blob_id": "950929edc82bf78ee33df117fba370b937255adc", "index": 1703, "step-1": "<mask token>\n\n\nclass mutasibankjurnal(ReportXlsx):\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass mutasibankjurnal(ReportXlsx):\n\n def generate_xlsx_report(self, workbook, data, wizard):\n bold = workbook.add_format({'bold': True})\n middle = workbook.add_format({'bold': True, 'top': 1})\n left = workbook.add_format({'left': 1, 'top': 1, 'bold': True})\n right = workbook.add_format({'right': 1, 'top': 1})\n top = workbook.add_format({'top': 1})\n report_format = workbook.add_format({'font_size': 24})\n lang_code = self.env.user.lang or 'en_US'\n date_format = self.env['res.lang']._lang_get(lang_code).date_format\n report = '991'\n\n def get_date_format(date):\n if date:\n date = datetime.strptime(date, DEFAULT_SERVER_DATE_FORMAT)\n date = date.strftime('%d/%m/%Y')\n return date\n\n def _header_sheet(sheet):\n sheet.write_merge(0, 4, 'Mutasi Bank', report_format)\n sheet.write_merge(2, 0, _('Company:'), bold)\n sheet.write_merge(3, 0, self.env.user.company_id.name)\n head = [{'name': _('NO Bukti'), 'larg': 10, 'col': {}}, {'name': _(\n 'vendor /supplier'), 'larg': 10, 'col': {}}, {'name': _(\n 'Tanggal'), 'larg': 10, 'col': {}}, {'name': _('Nomor Cek'),\n 'larg': 15, 'col': {}}, {'name': _('Pemasukan'), 'larg': 10,\n 'col': {}}, {'name': _('Pengeluaran'), 'larg': 20, 'col': {}},\n {'name': _('Saldo Akhir'), 'larg': 20, 'col': {}}, {'name': _(\n 'Keterangan'), 'larg': 10, 'col': {}}]\n table = []\n for h in head:\n col = {'header': h['name']}\n col.update(h['col'])\n table.append(col)\n\n def _set_line(line):\n sheet.write(i, 0, line.get('name', ''))\n if line.get('nama_partner') == False:\n sheet.write(i, 1, line.get('', ''))\n else:\n sheet.write(i, 1, line.get('nama_partner', ''))\n sheet.write(i, 2, line.get('payment_date', ''))\n if line.get('nomor_cek') == False:\n sheet.write(i, 3, line.get('', ''))\n else:\n sheet.write(i, 3, line.get('nomor_cek', ''))\n if line.get('payment_type') == 'inbound':\n sheet.write(i, 4, line.get('amount', ''))\n else:\n sheet.write(i, 4, 0.0)\n if line.get('payment_type') == 'outbound':\n sheet.write(i, 5, line.get('amount', ''))\n else:\n sheet.write(i, 5, 0.0)\n sheet.write(i, 6, line.get('nsaldo_akhir', ''))\n if line.get('note') == False:\n sheet.write(i, 7, '')\n else:\n sheet.write(i, 7, line.get('note', ''))\n\n def _set_table(start_row, row):\n sheet.add_table(start_row - 1, 0, row, len(head) - 1, {\n 'columns': table, 'style': 'Table Style Light 9'})\n all_lines2 = wizard._fill_details_dict()\n sheet = workbook.add_worksheet('Mutasi bank')\n if all_lines2:\n row = 9\n start_row = row\n range_tanggal = get_date_format(wizard.date_from_mb\n ) + ' s.d ' + get_date_format(wizard.date_to_mb)\n sheet.write(0, 0, 'PT. PAN ASIA JAYA ABADI', bold)\n sheet.write(1, 0, 'MUTASI REKENING')\n sheet.write(2, 0, 'TANGGAL : ' + range_tanggal)\n if wizard.jenis_journal_mb.currency_id.name == False:\n sheet.write(3, 0, 'Mata Uang : IDR')\n else:\n sheet.write(3, 0, 'Mata Uang : %s' % wizard.\n jenis_journal_mb.currency_id.name)\n sheet.write(4, 0, 'Bank : %s ' % wizard.jenis_journal_mb.\n bank_account_id.sanitized_acc_number)\n sheet.write(5, 0, '' + wizard.jenis_journal_mb.\n default_debit_account_id.code + ' ' + wizard.\n jenis_journal_mb.name)\n for i, line in enumerate(all_lines2):\n i += row\n _set_line(line)\n row = i\n for j, h in enumerate(head):\n sheet.set_column(j, j, h['larg'])\n _set_table(start_row, row)\n sheet.merge_range('A' + str(start_row - 1) + ':D' + str(start_row -\n 1), 'Saldo Awal :')\n sheet.write(start_row - 2, 4, line.get('saldo_awal', ''))\n sheet.merge_range('A' + str(row + 2) + ':D' + str(row + 2), 'Total')\n sheet.write_formula(row + 1, 4, '=SUM(E10:E' + str(row + 1) + ')')\n sheet.write_formula(row + 1, 5, '=SUM(F10:F' + str(row + 1) + ')')\n sheet.write_formula(row + 1, 6, '=E' + str(start_row - 1) +\n '+SUM(E10:E' + str(row + 1) + ')-SUM(F10:F' + str(row + 1) + ')')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass mutasibankjurnal(ReportXlsx):\n\n def generate_xlsx_report(self, workbook, data, wizard):\n bold = workbook.add_format({'bold': True})\n middle = workbook.add_format({'bold': True, 'top': 1})\n left = workbook.add_format({'left': 1, 'top': 1, 'bold': True})\n right = workbook.add_format({'right': 1, 'top': 1})\n top = workbook.add_format({'top': 1})\n report_format = workbook.add_format({'font_size': 24})\n lang_code = self.env.user.lang or 'en_US'\n date_format = self.env['res.lang']._lang_get(lang_code).date_format\n report = '991'\n\n def get_date_format(date):\n if date:\n date = datetime.strptime(date, DEFAULT_SERVER_DATE_FORMAT)\n date = date.strftime('%d/%m/%Y')\n return date\n\n def _header_sheet(sheet):\n sheet.write_merge(0, 4, 'Mutasi Bank', report_format)\n sheet.write_merge(2, 0, _('Company:'), bold)\n sheet.write_merge(3, 0, self.env.user.company_id.name)\n head = [{'name': _('NO Bukti'), 'larg': 10, 'col': {}}, {'name': _(\n 'vendor /supplier'), 'larg': 10, 'col': {}}, {'name': _(\n 'Tanggal'), 'larg': 10, 'col': {}}, {'name': _('Nomor Cek'),\n 'larg': 15, 'col': {}}, {'name': _('Pemasukan'), 'larg': 10,\n 'col': {}}, {'name': _('Pengeluaran'), 'larg': 20, 'col': {}},\n {'name': _('Saldo Akhir'), 'larg': 20, 'col': {}}, {'name': _(\n 'Keterangan'), 'larg': 10, 'col': {}}]\n table = []\n for h in head:\n col = {'header': h['name']}\n col.update(h['col'])\n table.append(col)\n\n def _set_line(line):\n sheet.write(i, 0, line.get('name', ''))\n if line.get('nama_partner') == False:\n sheet.write(i, 1, line.get('', ''))\n else:\n sheet.write(i, 1, line.get('nama_partner', ''))\n sheet.write(i, 2, line.get('payment_date', ''))\n if line.get('nomor_cek') == False:\n sheet.write(i, 3, line.get('', ''))\n else:\n sheet.write(i, 3, line.get('nomor_cek', ''))\n if line.get('payment_type') == 'inbound':\n sheet.write(i, 4, line.get('amount', ''))\n else:\n sheet.write(i, 4, 0.0)\n if line.get('payment_type') == 'outbound':\n sheet.write(i, 5, line.get('amount', ''))\n else:\n sheet.write(i, 5, 0.0)\n sheet.write(i, 6, line.get('nsaldo_akhir', ''))\n if line.get('note') == False:\n sheet.write(i, 7, '')\n else:\n sheet.write(i, 7, line.get('note', ''))\n\n def _set_table(start_row, row):\n sheet.add_table(start_row - 1, 0, row, len(head) - 1, {\n 'columns': table, 'style': 'Table Style Light 9'})\n all_lines2 = wizard._fill_details_dict()\n sheet = workbook.add_worksheet('Mutasi bank')\n if all_lines2:\n row = 9\n start_row = row\n range_tanggal = get_date_format(wizard.date_from_mb\n ) + ' s.d ' + get_date_format(wizard.date_to_mb)\n sheet.write(0, 0, 'PT. PAN ASIA JAYA ABADI', bold)\n sheet.write(1, 0, 'MUTASI REKENING')\n sheet.write(2, 0, 'TANGGAL : ' + range_tanggal)\n if wizard.jenis_journal_mb.currency_id.name == False:\n sheet.write(3, 0, 'Mata Uang : IDR')\n else:\n sheet.write(3, 0, 'Mata Uang : %s' % wizard.\n jenis_journal_mb.currency_id.name)\n sheet.write(4, 0, 'Bank : %s ' % wizard.jenis_journal_mb.\n bank_account_id.sanitized_acc_number)\n sheet.write(5, 0, '' + wizard.jenis_journal_mb.\n default_debit_account_id.code + ' ' + wizard.\n jenis_journal_mb.name)\n for i, line in enumerate(all_lines2):\n i += row\n _set_line(line)\n row = i\n for j, h in enumerate(head):\n sheet.set_column(j, j, h['larg'])\n _set_table(start_row, row)\n sheet.merge_range('A' + str(start_row - 1) + ':D' + str(start_row -\n 1), 'Saldo Awal :')\n sheet.write(start_row - 2, 4, line.get('saldo_awal', ''))\n sheet.merge_range('A' + str(row + 2) + ':D' + str(row + 2), 'Total')\n sheet.write_formula(row + 1, 4, '=SUM(E10:E' + str(row + 1) + ')')\n sheet.write_formula(row + 1, 5, '=SUM(F10:F' + str(row + 1) + ')')\n sheet.write_formula(row + 1, 6, '=E' + str(start_row - 1) +\n '+SUM(E10:E' + str(row + 1) + ')-SUM(F10:F' + str(row + 1) + ')')\n\n\nmutasibankjurnal('report.dev_accounting_report.report_mutasi_bank_byjurnal',\n 'accounting.report.standard')\n", "step-4": "from datetime import datetime\nfrom odoo.addons.report_xlsx.report.report_xlsx import ReportXlsx\nfrom odoo.tools import DEFAULT_SERVER_DATE_FORMAT\nimport xlwt\nfrom odoo import _\nfrom odoo.exceptions import AccessError, UserError\n\n\nclass mutasibankjurnal(ReportXlsx):\n\n def generate_xlsx_report(self, workbook, data, wizard):\n bold = workbook.add_format({'bold': True})\n middle = workbook.add_format({'bold': True, 'top': 1})\n left = workbook.add_format({'left': 1, 'top': 1, 'bold': True})\n right = workbook.add_format({'right': 1, 'top': 1})\n top = workbook.add_format({'top': 1})\n report_format = workbook.add_format({'font_size': 24})\n lang_code = self.env.user.lang or 'en_US'\n date_format = self.env['res.lang']._lang_get(lang_code).date_format\n report = '991'\n\n def get_date_format(date):\n if date:\n date = datetime.strptime(date, DEFAULT_SERVER_DATE_FORMAT)\n date = date.strftime('%d/%m/%Y')\n return date\n\n def _header_sheet(sheet):\n sheet.write_merge(0, 4, 'Mutasi Bank', report_format)\n sheet.write_merge(2, 0, _('Company:'), bold)\n sheet.write_merge(3, 0, self.env.user.company_id.name)\n head = [{'name': _('NO Bukti'), 'larg': 10, 'col': {}}, {'name': _(\n 'vendor /supplier'), 'larg': 10, 'col': {}}, {'name': _(\n 'Tanggal'), 'larg': 10, 'col': {}}, {'name': _('Nomor Cek'),\n 'larg': 15, 'col': {}}, {'name': _('Pemasukan'), 'larg': 10,\n 'col': {}}, {'name': _('Pengeluaran'), 'larg': 20, 'col': {}},\n {'name': _('Saldo Akhir'), 'larg': 20, 'col': {}}, {'name': _(\n 'Keterangan'), 'larg': 10, 'col': {}}]\n table = []\n for h in head:\n col = {'header': h['name']}\n col.update(h['col'])\n table.append(col)\n\n def _set_line(line):\n sheet.write(i, 0, line.get('name', ''))\n if line.get('nama_partner') == False:\n sheet.write(i, 1, line.get('', ''))\n else:\n sheet.write(i, 1, line.get('nama_partner', ''))\n sheet.write(i, 2, line.get('payment_date', ''))\n if line.get('nomor_cek') == False:\n sheet.write(i, 3, line.get('', ''))\n else:\n sheet.write(i, 3, line.get('nomor_cek', ''))\n if line.get('payment_type') == 'inbound':\n sheet.write(i, 4, line.get('amount', ''))\n else:\n sheet.write(i, 4, 0.0)\n if line.get('payment_type') == 'outbound':\n sheet.write(i, 5, line.get('amount', ''))\n else:\n sheet.write(i, 5, 0.0)\n sheet.write(i, 6, line.get('nsaldo_akhir', ''))\n if line.get('note') == False:\n sheet.write(i, 7, '')\n else:\n sheet.write(i, 7, line.get('note', ''))\n\n def _set_table(start_row, row):\n sheet.add_table(start_row - 1, 0, row, len(head) - 1, {\n 'columns': table, 'style': 'Table Style Light 9'})\n all_lines2 = wizard._fill_details_dict()\n sheet = workbook.add_worksheet('Mutasi bank')\n if all_lines2:\n row = 9\n start_row = row\n range_tanggal = get_date_format(wizard.date_from_mb\n ) + ' s.d ' + get_date_format(wizard.date_to_mb)\n sheet.write(0, 0, 'PT. PAN ASIA JAYA ABADI', bold)\n sheet.write(1, 0, 'MUTASI REKENING')\n sheet.write(2, 0, 'TANGGAL : ' + range_tanggal)\n if wizard.jenis_journal_mb.currency_id.name == False:\n sheet.write(3, 0, 'Mata Uang : IDR')\n else:\n sheet.write(3, 0, 'Mata Uang : %s' % wizard.\n jenis_journal_mb.currency_id.name)\n sheet.write(4, 0, 'Bank : %s ' % wizard.jenis_journal_mb.\n bank_account_id.sanitized_acc_number)\n sheet.write(5, 0, '' + wizard.jenis_journal_mb.\n default_debit_account_id.code + ' ' + wizard.\n jenis_journal_mb.name)\n for i, line in enumerate(all_lines2):\n i += row\n _set_line(line)\n row = i\n for j, h in enumerate(head):\n sheet.set_column(j, j, h['larg'])\n _set_table(start_row, row)\n sheet.merge_range('A' + str(start_row - 1) + ':D' + str(start_row -\n 1), 'Saldo Awal :')\n sheet.write(start_row - 2, 4, line.get('saldo_awal', ''))\n sheet.merge_range('A' + str(row + 2) + ':D' + str(row + 2), 'Total')\n sheet.write_formula(row + 1, 4, '=SUM(E10:E' + str(row + 1) + ')')\n sheet.write_formula(row + 1, 5, '=SUM(F10:F' + str(row + 1) + ')')\n sheet.write_formula(row + 1, 6, '=E' + str(start_row - 1) +\n '+SUM(E10:E' + str(row + 1) + ')-SUM(F10:F' + str(row + 1) + ')')\n\n\nmutasibankjurnal('report.dev_accounting_report.report_mutasi_bank_byjurnal',\n 'accounting.report.standard')\n", "step-5": "# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom odoo.addons.report_xlsx.report.report_xlsx import ReportXlsx\nfrom odoo.tools import DEFAULT_SERVER_DATE_FORMAT\nimport xlwt\nfrom odoo import _\nfrom odoo.exceptions import AccessError, UserError\n\nclass mutasibankjurnal(ReportXlsx):\n \n \n def generate_xlsx_report(self, workbook, data, wizard):\n\n # num_format = wizard.company_currency_id.excel_format\n bold = workbook.add_format({'bold': True})\n middle = workbook.add_format({'bold': True, 'top': 1})\n left = workbook.add_format({'left': 1, 'top': 1, 'bold': True})\n right = workbook.add_format({'right': 1, 'top': 1})\n top = workbook.add_format({'top': 1})\n # currency_format = workbook.add_format({'num_format': num_format})\n # c_middle = workbook.add_format({'bold': True, 'top': 1, 'num_format': num_format})\n report_format = workbook.add_format({'font_size': 24})\n # rounding = self.env.user.company_id.currency_id.decimal_places or 2\n lang_code = self.env.user.lang or 'en_US'\n date_format = self.env['res.lang']._lang_get(lang_code).date_format\n\n report = '991'\n\n # def _get_data_float(data):\n ## if data == None or data == False:\n # return 0.0\n # else:\n # return wizard.company_currency_id.round(data) + 0.0\n\n def get_date_format(date):\n if date:\n date = datetime.strptime(date, DEFAULT_SERVER_DATE_FORMAT)\n date = date.strftime('%d/%m/%Y')\n return date\n\n def _header_sheet(sheet):\n sheet.write_merge(0, 4, 'Mutasi Bank', report_format)\n # style_title = xlwt.easyxf(\"font:height 300; font: name Liberation Sans, bold on,color black; align: horiz center\")\n #sheet.write_merge(0, 1, 0, 5,'LAPORAN ', style = style_title)\n sheet.write_merge(2, 0, _('Company:'), bold)\n sheet.write_merge(3, 0, self.env.user.company_id.name,)\n #sheet.write(4, 0, _('Print on %s') % report.print_time)\n\n # sheet.write(2, 2, _('Start Date : %s ') % wizard.date_from if wizard.date_from else '')\n # sheet.write(3, 2, _('End Date : %s ') % wizard.date_to if wizard.date_to else '')\n\n # sheet.write(2, 4, _('Target Moves:'), bold)\n # sheet.write(3, 4, _('All Entries') if wizard.target_move == 'all' else _('All Posted Entries'))\n\n # sheet.write(2, 6, _('Only UnReconciled Entries') if wizard.reconciled is False else _('With Reconciled Entries'), bold)\n \n head = [\n \n {'name': _('NO Bukti'),\n 'larg': 10,\n 'col': {}},\n {'name': _('vendor /supplier'),\n 'larg': 10,\n 'col': {}},\n {'name': _('Tanggal'),\n 'larg': 10,\n 'col': {}}, \n {'name': _('Nomor Cek'),\n 'larg': 15,\n 'col': {}}, \n {'name': _('Pemasukan'),\n 'larg': 10,\n 'col': {}},\n {'name': _('Pengeluaran'),\n 'larg': 20,\n 'col': {}},\n {'name': _('Saldo Akhir'),\n 'larg': 20,\n 'col': {}},\n {'name': _('Keterangan'),\n 'larg': 10,\n 'col': {}},\n \n ]\n table = []\n for h in head:\n col = {'header': h['name']}\n col.update(h['col'])\n table.append(col)\n\n def _set_line(line):\n #sheet.write(i, 0, get_date_format(line.get('date', '')) if line.get('type_view') != 'init' else 'INIT')\n sheet.write(i, 0, line.get('name', ''))\n if line.get('nama_partner')==False:\n sheet.write(i, 1, line.get('', ''))\n else:\n sheet.write(i, 1, line.get('nama_partner', ''))\n sheet.write(i, 2, line.get('payment_date', ''))\n if line.get('nomor_cek')==False:\n sheet.write(i, 3, line.get('', ''))\n else:\n sheet.write(i, 3, line.get('nomor_cek', ''))\n if line.get('payment_type')=='inbound':\n sheet.write(i, 4, line.get('amount', ''))\n else:\n sheet.write(i, 4, (0.00),)\n if line.get('payment_type')=='outbound':\n sheet.write(i, 5, line.get('amount', ''))\n else:\n sheet.write(i, 5, (0.00),)\n sheet.write(i, 6, line.get('nsaldo_akhir', ''))\n if line.get('note')==False:\n sheet.write(i, 7, (''),)\n else:\n sheet.write(i, 7, line.get('note', ''))\n\n # buat formula untuk men-sum / mentotal sales per user\n # sheet.merge_range('A' + 4 + ':D' + 4, nsaldo_awal, text_style)\n \n # def _set_line2(line2):\n # sheet.merge_range('A' + str(start_row-1) + ':D' + str(start_row-1), '')\n # sheet.write(start_row-2, 4, line2, '')\n\n\n# [{'saldo_awal ': 0}]: None\n\n def _set_table(start_row, row):\n sheet.add_table(start_row - 1, 0, row, len(head) - 1,\n {\n 'columns': table,\n 'style': 'Table Style Light 9',\n })\n\n # all_lines = wizard._sql_get_line_mutasibank()\n all_lines2 = wizard._fill_details_dict() \n # raise UserError(all_lines.saldo_awal)\n # saldoawal =all_lines.Total_saldo_awal \n\n # Pivot workbook\n sheet = workbook.add_worksheet('Mutasi bank')\n # _header_sheet(sheet)\n\n # Head\n # if all_lines:\n # for line2 in enumerate(all_lines):\n # _set_line(line2)\n # raise UserError(all_lines.saldo_awal)\n if all_lines2:\n row = 9\n\n start_row = row\n range_tanggal = get_date_format(wizard.date_from_mb) + ' s.d '+get_date_format(wizard.date_to_mb)\n sheet.write(0, 0, ('PT. PAN ASIA JAYA ABADI'), bold)\n sheet.write(1, 0, ('MUTASI REKENING'),)\n sheet.write(2, 0,('TANGGAL : '+range_tanggal),)\n if wizard.jenis_journal_mb.currency_id.name==False:\n sheet.write(3, 0,('Mata Uang : IDR'),)\n else:\n sheet.write(3, 0,('Mata Uang : %s') % wizard.jenis_journal_mb.currency_id.name,)\n sheet.write(4, 0, ('Bank : %s ') % wizard.jenis_journal_mb.bank_account_id.sanitized_acc_number,)\n sheet.write(5, 0, ('' +wizard.jenis_journal_mb.default_debit_account_id.code+' '+wizard.jenis_journal_mb.name),)\n\n for i, line in enumerate(all_lines2):\n i += row\n _set_line(line)\n row = i\n\n for j, h in enumerate(head):\n sheet.set_column(j, j, h['larg'])\n\n _set_table(start_row, row)\n # raise UserError(st)\n # if all_lines:\n # for line2 in enumerate(all_lines):\n # raise UserError(line2)\n # _set_line2(line2)\n # raise UserError(line2.Total_saldo_awal)\n\n sheet.merge_range('A' + str(start_row-1) + ':D' + str(start_row-1), 'Saldo Awal :')\n sheet.write(start_row-2, 4, line.get('saldo_awal', ''))\n# # [{'saldo_awal ': 0}]: None\n sheet.merge_range('A' + str(row+2) + ':D' + str(row+2), 'Total')\n sheet.write_formula(row+1, 4, '=SUM(E10:E' + str(row+1) + ')')\n sheet.write_formula(row+1, 5, '=SUM(F10:F' + str(row+1) + ')')\n ##if line.get('saldo_awal')==0:\n # sheet.write_formula(row+1, 6, ('0.00'), )\n #else:\n sheet.write_formula(row+1, 6, '=E' + str(start_row-1)+'+SUM(E10:E' + str(row+1)+ ')-SUM(F10:F' + str(row+1)+')' )\n # sheet.write_formula(start_row-2, 4, line.get('saldo_awal', '')\nmutasibankjurnal('report.dev_accounting_report.report_mutasi_bank_byjurnal', 'accounting.report.standard')", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> class SingleTouchReading: <|reserved_special_token_0|> <|reserved_special_token_0|> def __init__(self, ribbon): self.ribbon = ribbon self.read_raw_lower() self.read_raw_upper() self.process_readings() def prepare_to_read(self): activate_single_touch_transistors() self.ribbon.ads.mode = ADS.Mode.SINGLE self.ribbon.ads.gain = ads_gain_single <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> class ContinuousSingleTouchReading(SingleTouchReading): @staticmethod def prepare_to_read(): activate_single_touch_transistors() ads.mode = ADS.Mode.CONTINUOUS ads.gain = ads_gain_single self.ribbon.ads_single.value class CheapSingleTouchReading(SingleTouchReading): GATE_THRESHOLD = 1500 GATE_THRESHOLD = 4000 def read_raw_lower(self): self.prepare_to_read() single_pull.value = False self.raw_lower = self.ribbon.rib_mid.value def read_raw_upper(self): self.prepare_to_read() single_pull.value = True self.raw_upper = self.ribbon.rib_mid.value class DualTouchReading: __slots__ = ['raw_a', 'raw_b'] def __init__(self, ribbon): self.ribbon = ribbon self.prepare_to_read() try: self.raw_a = self.ribbon.ads_dual_top.value self.raw_b = self.ribbon.ads_dual_bot.value except OSError as exception: raise I2CError(exception) def prepare_to_read(self): activate_dual_touch_transistors() self.ribbon.ads.gain = ads_gain_dual class ProcessedDualTouchReading: __slots__ = ['gate', 'bot', 'top', 'mid', 'num_fingers', 'old', 'new'] DELTA_THRESHOLD = -4 TWO_TOUCH_THRESHOLD = 2 TWO_TOUCH_THRESHOLD_SLACK = 0.05 def __init__(self, ribbon, blink=False): self.ribbon = ribbon def clear_filters(): ribbon.cheap_single_filter.clear() ribbon.dual_bot_filter.clear() ribbon.dual_top_filter.clear() previous_gate = ribbon.previous_gate single_before = ribbon.processed_cheap_single_touch_reading() if not single_before.gate: self.gate = False clear_filters() return with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()): dual_reading = ribbon.dual_touch_reading() single_after = ribbon.cheap_single_touch_reading() if not single_after.gate: self.gate = False clear_filters() return if not previous_gate: clear_filters() self.gate = True raw_mid = (single_before.raw_value + single_after.raw_value) / 2 raw_top = dual_reading.raw_a raw_bot = dual_reading.raw_b top = raw_top bot = raw_bot mid = raw_mid top = ribbon.dual_touch_top_to_neopixel_calibration(top) bot = ribbon.dual_touch_bot_to_neopixel_calibration(bot) mid = ribbon.cheap_single_touch_to_neopixel_calibration(mid) mid = ribbon.cheap_single_filter(mid) if int(raw_top) == 32767: top = mid if int(raw_bot) == 32767: bot = mid delta = top - bot if delta <= self.DELTA_THRESHOLD: ribbon.dual_num_fingers = 1 elif not previous_gate: ribbon.dual_num_fingers = (2 if delta > self. TWO_TOUCH_THRESHOLD else 1) elif ribbon.dual_num_fingers == 1 and delta > self.TWO_TOUCH_THRESHOLD + self.TWO_TOUCH_THRESHOLD_SLACK: ribbon.dual_num_fingers = 2 elif ribbon.dual_num_fingers == 2 and delta < self.TWO_TOUCH_THRESHOLD - self.TWO_TOUCH_THRESHOLD_SLACK: ribbon.dual_num_fingers = 1 self.num_fingers = ribbon.dual_num_fingers if self.num_fingers == 1: bot = top = mid elif bot > top: bot = top = (bot + top) / 2 if not hasattr(ribbon, 'previous_dual_old'): ribbon.previous_dual_old = mid old, new = sorted([bot, top], key=lambda pos: abs(pos - ribbon. previous_dual_old)) self.top = ribbon.dual_top_filter(top) self.bot = ribbon.dual_bot_filter(bot) self.mid = mid self.old = old self.new = new ribbon.previous_dual_old = old class ProcessedSingleTouchReading: def __init__(self, ribbon, blink=False): self.ribbon = ribbon if ribbon.previous_gate: with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext() ): single_touch_reading = ribbon.single_touch_reading() self.gate = single_touch_reading.gate else: cheap_single_touch_reading = ribbon.cheap_single_touch_reading() if cheap_single_touch_reading.gate: with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()): single_touch_reading = ribbon.single_touch_reading() self.gate = single_touch_reading.gate else: self.gate = False if self.gate: self.raw_value = single_touch_reading.raw_value self.value = ribbon.single_touch_to_neopixel_calibration(self. raw_value) class ProcessedCheapSingleTouchReading: def __init__(self, ribbon, blink=False): self.ribbon = ribbon with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()): if not ribbon.previous_gate: ribbon.cheap_single_touch_reading() cheap_single_touch_reading = ribbon.cheap_single_touch_reading() self.gate = cheap_single_touch_reading.gate if self.gate: self.raw_value = cheap_single_touch_reading.raw_value self.value = ribbon.cheap_single_touch_to_neopixel_calibration(self .raw_value) self.value = ribbon.cheap_single_filter(self.value) else: ribbon.cheap_single_filter.clear() <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Ribbon: <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> @property def is_calibrated(self): return (self.dual_touch_top_to_neopixel_calibration.is_fitted and self.dual_touch_bot_to_neopixel_calibration.is_fitted and self. single_touch_to_neopixel_calibration.is_fitted and self. cheap_single_touch_to_neopixel_calibration.is_fitted) <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def processed_single_touch_reading(self, blink=False): reading = ProcessedSingleTouchReading(self, blink=blink) self.previous_gate = reading.gate return reading <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> class NoiseFilter: def __init__(self, moving_average_length=10, soft_tether_size=5, tether_size=1, moving_median_length=1): self.moving_average = MovingAverage(moving_average_length) self.soft_tether = SoftTether(size=soft_tether_size) self.tether = Tether(size=tether_size) self.moving_median = MovingMedian(moving_median_length) def __call__(self, value): value = self.moving_average(value) value = self.soft_tether(value) value = self.tether(value) value = self.moving_median(value) return value def clear(self): self.soft_tether.clear() self.tether.clear() self.moving_average.clear() self.moving_median.clear() def copy(self): return NoiseFilter(self.moving_average.length, self.soft_tether. size, self.tether.size) class SingleTouchReading: __slots__ = ['gate', 'raw_lower', 'raw_upper', 'raw_gap', 'raw_value'] GATE_THRESHOLD = 500 def __init__(self, ribbon): self.ribbon = ribbon self.read_raw_lower() self.read_raw_upper() self.process_readings() def prepare_to_read(self): activate_single_touch_transistors() self.ribbon.ads.mode = ADS.Mode.SINGLE self.ribbon.ads.gain = ads_gain_single def read_raw_lower(self): single_pull.value = False self.prepare_to_read() try: self.raw_lower = self.ribbon.ads_single.value except OSError as exception: raise I2CError(exception) def read_raw_upper(self): single_pull.value = True self.prepare_to_read() try: self.raw_upper = self.ribbon.ads_single.value except OSError as exception: raise I2CError(exception) def process_readings(self): self.raw_gap = abs(self.raw_upper - self.raw_lower) self.gate = self.raw_gap < self.GATE_THRESHOLD self.raw_value = (self.raw_upper + self.raw_lower) / 2 class ContinuousSingleTouchReading(SingleTouchReading): @staticmethod def prepare_to_read(): activate_single_touch_transistors() ads.mode = ADS.Mode.CONTINUOUS ads.gain = ads_gain_single self.ribbon.ads_single.value class CheapSingleTouchReading(SingleTouchReading): GATE_THRESHOLD = 1500 GATE_THRESHOLD = 4000 def read_raw_lower(self): self.prepare_to_read() single_pull.value = False self.raw_lower = self.ribbon.rib_mid.value def read_raw_upper(self): self.prepare_to_read() single_pull.value = True self.raw_upper = self.ribbon.rib_mid.value class DualTouchReading: __slots__ = ['raw_a', 'raw_b'] def __init__(self, ribbon): self.ribbon = ribbon self.prepare_to_read() try: self.raw_a = self.ribbon.ads_dual_top.value self.raw_b = self.ribbon.ads_dual_bot.value except OSError as exception: raise I2CError(exception) def prepare_to_read(self): activate_dual_touch_transistors() self.ribbon.ads.gain = ads_gain_dual class ProcessedDualTouchReading: __slots__ = ['gate', 'bot', 'top', 'mid', 'num_fingers', 'old', 'new'] DELTA_THRESHOLD = -4 TWO_TOUCH_THRESHOLD = 2 TWO_TOUCH_THRESHOLD_SLACK = 0.05 def __init__(self, ribbon, blink=False): self.ribbon = ribbon def clear_filters(): ribbon.cheap_single_filter.clear() ribbon.dual_bot_filter.clear() ribbon.dual_top_filter.clear() previous_gate = ribbon.previous_gate single_before = ribbon.processed_cheap_single_touch_reading() if not single_before.gate: self.gate = False clear_filters() return with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()): dual_reading = ribbon.dual_touch_reading() single_after = ribbon.cheap_single_touch_reading() if not single_after.gate: self.gate = False clear_filters() return if not previous_gate: clear_filters() self.gate = True raw_mid = (single_before.raw_value + single_after.raw_value) / 2 raw_top = dual_reading.raw_a raw_bot = dual_reading.raw_b top = raw_top bot = raw_bot mid = raw_mid top = ribbon.dual_touch_top_to_neopixel_calibration(top) bot = ribbon.dual_touch_bot_to_neopixel_calibration(bot) mid = ribbon.cheap_single_touch_to_neopixel_calibration(mid) mid = ribbon.cheap_single_filter(mid) if int(raw_top) == 32767: top = mid if int(raw_bot) == 32767: bot = mid delta = top - bot if delta <= self.DELTA_THRESHOLD: ribbon.dual_num_fingers = 1 elif not previous_gate: ribbon.dual_num_fingers = (2 if delta > self. TWO_TOUCH_THRESHOLD else 1) elif ribbon.dual_num_fingers == 1 and delta > self.TWO_TOUCH_THRESHOLD + self.TWO_TOUCH_THRESHOLD_SLACK: ribbon.dual_num_fingers = 2 elif ribbon.dual_num_fingers == 2 and delta < self.TWO_TOUCH_THRESHOLD - self.TWO_TOUCH_THRESHOLD_SLACK: ribbon.dual_num_fingers = 1 self.num_fingers = ribbon.dual_num_fingers if self.num_fingers == 1: bot = top = mid elif bot > top: bot = top = (bot + top) / 2 if not hasattr(ribbon, 'previous_dual_old'): ribbon.previous_dual_old = mid old, new = sorted([bot, top], key=lambda pos: abs(pos - ribbon. previous_dual_old)) self.top = ribbon.dual_top_filter(top) self.bot = ribbon.dual_bot_filter(bot) self.mid = mid self.old = old self.new = new ribbon.previous_dual_old = old class ProcessedSingleTouchReading: def __init__(self, ribbon, blink=False): self.ribbon = ribbon if ribbon.previous_gate: with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext() ): single_touch_reading = ribbon.single_touch_reading() self.gate = single_touch_reading.gate else: cheap_single_touch_reading = ribbon.cheap_single_touch_reading() if cheap_single_touch_reading.gate: with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()): single_touch_reading = ribbon.single_touch_reading() self.gate = single_touch_reading.gate else: self.gate = False if self.gate: self.raw_value = single_touch_reading.raw_value self.value = ribbon.single_touch_to_neopixel_calibration(self. raw_value) class ProcessedCheapSingleTouchReading: def __init__(self, ribbon, blink=False): self.ribbon = ribbon with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()): if not ribbon.previous_gate: ribbon.cheap_single_touch_reading() cheap_single_touch_reading = ribbon.cheap_single_touch_reading() self.gate = cheap_single_touch_reading.gate if self.gate: self.raw_value = cheap_single_touch_reading.raw_value self.value = ribbon.cheap_single_touch_to_neopixel_calibration(self .raw_value) self.value = ribbon.cheap_single_filter(self.value) else: ribbon.cheap_single_filter.clear() <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Ribbon: <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def __init__(self, name, rib_mid, ads, ads_single, ads_dual_top, ads_dual_bot): self.name = name self.rib_mid = rib_mid self.ads = ads self.ads_single = ads_single self.ads_dual_top = ads_dual_top self.ads_dual_bot = ads_dual_bot dual_touch_top_to_neopixel_calibration_path = path_join(self. CALIBRATION_FOLDER, self.name + '_dual_touch_top_to_neopixel_calibration') dual_touch_bot_to_neopixel_calibration_path = path_join(self. CALIBRATION_FOLDER, self.name + '_dual_touch_bot_to_neopixel_calibration') single_touch_to_neopixel_calibration_path = path_join(self. CALIBRATION_FOLDER, self.name + '_single_touch_to_neopixel_calibration') cheap_single_touch_to_neopixel_calibration_path = path_join(self. CALIBRATION_FOLDER, self.name + '_cheap_single_touch_to_neopixel_calibration') self.dual_touch_top_to_neopixel_calibration = HistogramFitter(bin_size =self.ADS_BIN_SIZE, file_path= dual_touch_top_to_neopixel_calibration_path, auto_load=True) self.dual_touch_bot_to_neopixel_calibration = HistogramFitter(bin_size =self.ADS_BIN_SIZE, file_path= dual_touch_bot_to_neopixel_calibration_path, auto_load=True) self.single_touch_to_neopixel_calibration = HistogramFitter(bin_size =self.ADS_BIN_SIZE, file_path= single_touch_to_neopixel_calibration_path, auto_load=True) self.cheap_single_touch_to_neopixel_calibration = HistogramFitter( bin_size=self.RIB_BIN_SIZE, file_path= cheap_single_touch_to_neopixel_calibration_path, auto_load=True) self.previous_gate = False self.dual_num_fingers = 0 dual_filter_moving_average_length = 3 dual_filter_soft_tether_size = 0.1 dual_filter_tether_size = 0.05 self.dual_bot_filter = NoiseFilter(moving_average_length= dual_filter_moving_average_length, soft_tether_size= dual_filter_soft_tether_size, tether_size=dual_filter_tether_size) self.dual_top_filter = NoiseFilter(moving_average_length= dual_filter_moving_average_length, soft_tether_size= dual_filter_soft_tether_size, tether_size=dual_filter_tether_size) self.cheap_single_filter = NoiseFilter(moving_average_length=1, soft_tether_size=0.3, tether_size=0.01, moving_median_length=1) @property def is_calibrated(self): return (self.dual_touch_top_to_neopixel_calibration.is_fitted and self.dual_touch_bot_to_neopixel_calibration.is_fitted and self. single_touch_to_neopixel_calibration.is_fitted and self. cheap_single_touch_to_neopixel_calibration.is_fitted) def dual_touch_reading(self): reading = DualTouchReading(self) return reading def single_touch_reading(self): reading = SingleTouchReading(self) self.previous_gate = reading.gate return reading def cheap_single_touch_reading(self): reading = CheapSingleTouchReading(self) self.previous_gate = reading.gate return reading def processed_single_touch_reading(self, blink=False): reading = ProcessedSingleTouchReading(self, blink=blink) self.previous_gate = reading.gate return reading def processed_cheap_single_touch_reading(self, blink=False): reading = ProcessedCheapSingleTouchReading(self, blink=blink) self.previous_gate = reading.gate return reading def processed_dual_touch_reading(self, blink=False): reading = ProcessedDualTouchReading(self, blink=blink) self.previous_gate = reading.gate return reading def run_calibration(self, samples_per_pixel=25): import lightboard.display as display import lightboard.neopixels as neopixels import lightboard.buttons as buttons import lightboard.widgets as widgets buttons.metal_press_viewer.value def ask_to_try_again(): if widgets.input_yes_no('Would you like to try calibrating again?' ): self.run_calibration(samples_per_pixel) start_from_scratch = True dual_touch_top_to_neopixel_calibration = HistogramFitter(bin_size= self.ADS_BIN_SIZE, file_path=self. dual_touch_top_to_neopixel_calibration.file_path, auto_load=not start_from_scratch) dual_touch_bot_to_neopixel_calibration = HistogramFitter(bin_size= self.ADS_BIN_SIZE, file_path=self. dual_touch_bot_to_neopixel_calibration.file_path, auto_load=not start_from_scratch) single_touch_to_neopixel_calibration = HistogramFitter(bin_size= self.ADS_BIN_SIZE, file_path=self. single_touch_to_neopixel_calibration.file_path, auto_load=not start_from_scratch) cheap_single_touch_to_neopixel_calibration = HistogramFitter(bin_size =self.RIB_BIN_SIZE, file_path=self. cheap_single_touch_to_neopixel_calibration.file_path, auto_load =not start_from_scratch) buttons.metal_button.color = 255, 0, 255 def show_instructions(): display.set_text('Running calibration on ribbon ' + self.name + """ Please press the glowing green buttons until the red dot is barely on the ribbon""" ) buttons.set_green_button_lights(1, 1, 0, 0) show_instructions() button_press_next_neopixel = buttons.ButtonPressViewer(buttons. green_button_1) button_press_prev_neopixel = buttons.ButtonPressViewer(buttons. green_button_3) def display_neopixel_calibration(cursor_index, r, g, b, highlighted_pixels=[]): nonlocal calibrated_pixels neopixels.draw_all_off() for pixel in highlighted_pixels: neopixels.draw_dot(pixel, 0, 10, 0) neopixels.draw_dot(cursor_index, r, g, b) neopixels.refresh() i = 0 i = neopixels.first display_neopixel_calibration(i, 63, 0, 0) buttons.metal_press_viewer.value while True: reading = self.cheap_single_touch_reading() if reading.gate: break refresh_flag = False if button_press_next_neopixel.value: i += 1 refresh_flag = True if button_press_prev_neopixel.value: i -= 1 refresh_flag = True if refresh_flag: i = min(neopixels.length - 1, max(0, i)) display_neopixel_calibration(i, 63, 0, 0) if buttons.metal_press_viewer.value: if widgets.input_yes_no( """Do you want to cancel calibration? (All progress will be lost)""" ): ask_to_try_again() return else: show_instructions() button_press_skip = buttons.ButtonPressViewer(buttons.green_button_1) button_press_back = buttons.ButtonPressViewer(buttons.green_button_3) button_press_finished = buttons.ButtonPressViewer(buttons. green_button_2) buttons.set_green_button_lights(1, 1, 0, 0) def show_instructions(): display.set_text('Running calibration on ribbon ' + self.name + """ Please press cyan dots on ribbon until they become orange Press the 2rd green button when you're done (If the 2rd green button isnt lit, calibrate at least two points) Press button 1 to skip the current dot Press button 3 to go back a dot""" ) show_instructions() finished = False calibrated_pixels = set() while not finished: i = max(0, min(i, neopixels.length - 1)) display_neopixel_calibration(i, 0, 63, 63, calibrated_pixels) dual_a_samples = [] dual_b_samples = [] single_samples = [] cheap_samples = [] pixel_num_samples = 0 buttons.metal_press_viewer.value while True: buttons.green_button_3.light = len(calibrated_pixels) >= 2 if buttons.metal_press_viewer.value: if widgets.input_yes_no( """Do you want to cancel calibration? (All progress will be lost)""" ): ask_to_try_again() return else: show_instructions() if button_press_skip.value: break if button_press_back.value: i -= 2 break if button_press_finished.value and len(calibrated_pixels) >= 2: if widgets.input_yes_no( """Do you want to test your calibration? Yes: Test it! No: I'm done calibrating!""" ): with buttons.TemporaryButtonLights(): self.test_smooth_demo( single_touch_to_neopixel_calibration, dual_touch_top_to_neopixel_calibration, dual_touch_bot_to_neopixel_calibration) show_instructions() elif widgets.input_yes_no( "Are you sure your're done\ncalibrating this ribbon?"): finished = True break else: show_instructions() if len(cheap_samples) >= samples_per_pixel: dual_touch_top_to_neopixel_calibration.add_sample(median (dual_a_samples), i) dual_touch_bot_to_neopixel_calibration.add_sample(median (dual_b_samples), i) single_touch_to_neopixel_calibration.add_sample(median( single_samples), i) cheap_single_touch_to_neopixel_calibration.add_sample( median(cheap_samples), i) calibrated_pixels.add(i) break if self.cheap_single_touch_reading().gate: with neopixels.TemporarilyTurnedOff(): cheap_single_touch_reading = (self. cheap_single_touch_reading()) single_touch_reading = self.single_touch_reading() dual_touch_reading = self.dual_touch_reading() if (single_touch_reading.gate and cheap_single_touch_reading.gate): dual_a_samples.append(dual_touch_reading.raw_a) dual_b_samples.append(dual_touch_reading.raw_b) single_samples.append(single_touch_reading. raw_value) cheap_samples.append(cheap_single_touch_reading .raw_value) pixel_num_samples += 1 else: dual_a_samples.clear() dual_b_samples.clear() single_samples.clear() cheap_samples.clear() i += 1 display_neopixel_calibration(i, 63, 31, 0, calibrated_pixels) while self.cheap_single_touch_reading().gate: pass buttons.set_green_button_lights(0, 0, 0, 0) buttons.metal_button.color = 0, 1, 1 neopixels.turn_off() display.set_text('Finished calibration on ribbon ' + self.name + """ Try the ribbon out to see if you like it Also rinting out sensor values to serial for a demo (Watch in the arduino plotter) Press the metal button when you're done""" ) while not buttons.metal_press_viewer.value: if self.cheap_single_touch_reading().gate: with neopixels.TemporarilyTurnedOff(): cheap_single_touch_reading = (self. cheap_single_touch_reading()) single_touch_reading = self.single_touch_reading() dual_touch_reading = self.dual_touch_reading() dual_top = dual_touch_top_to_neopixel_calibration( dual_touch_reading.raw_a) dual_bot = dual_touch_bot_to_neopixel_calibration( dual_touch_reading.raw_b) single = single_touch_to_neopixel_calibration( single_touch_reading.raw_value) cheap_single = cheap_single_touch_to_neopixel_calibration( cheap_single_touch_reading.raw_value) if (cheap_single_touch_reading.gate and single_touch_reading.gate): neopixels.display_dot(int(cheap_single), 0, 128, 0) print(dual_top, dual_bot, single, cheap_single) self.test_smooth_demo(single_touch_to_neopixel_calibration, dual_touch_top_to_neopixel_calibration, dual_touch_bot_to_neopixel_calibration) if widgets.input_yes_no( 'Would you like to save this\ncalibration for ribbon ' + self. name + '?'): self.dual_touch_top_to_neopixel_calibration = ( dual_touch_top_to_neopixel_calibration) self.dual_touch_bot_to_neopixel_calibration = ( dual_touch_bot_to_neopixel_calibration) self.single_touch_to_neopixel_calibration = ( single_touch_to_neopixel_calibration) self.cheap_single_touch_to_neopixel_calibration = ( cheap_single_touch_to_neopixel_calibration) self.dual_touch_top_to_neopixel_calibration.save_to_file() self.dual_touch_bot_to_neopixel_calibration.save_to_file() self.single_touch_to_neopixel_calibration.save_to_file() self.cheap_single_touch_to_neopixel_calibration.save_to_file() display.set_text('Saved calibrations for ribbon ' + self.name + '!' ) time.sleep(2) else: display.set_text('Cancelled. No calibrations were saved.') time.sleep(2) ask_to_try_again() return def test_smooth_demo(self, single_touch_to_neopixel_calibration=None, dual_touch_top_to_neopixel_calibration=None, dual_touch_bot_to_neopixel_calibration=None): import lightboard.buttons as buttons import lightboard.neopixels as neopixels import lightboard.display as display if single_touch_to_neopixel_calibration is None: single_touch_to_neopixel_calibration = (self. single_touch_to_neopixel_calibration) if dual_touch_top_to_neopixel_calibration is None: dual_touch_top_to_neopixel_calibration = (self. dual_touch_top_to_neopixel_calibration) if dual_touch_bot_to_neopixel_calibration is None: dual_touch_bot_to_neopixel_calibration = (self. dual_touch_bot_to_neopixel_calibration) buttons.metal_button.color = 1, 0, 1 buttons.set_green_button_lights(0, 0, 0, 0) display.set_text('Smooth demo for ribbon %s\nPress metal to exit') def mean(l): l = list(l) return sum(l) / len(l) def std(l): u = mean(l) return mean((x - u) ** 2 for x in l) ** 0.5 class SuperSmooth: def __init__(self): self.DISCRETE = True self.N = 10 self.V = [] self.tet2 = Tether(1) self.tether = SoftTether(size=5) self.value = None def __call__(self, value): raw_value = value self.V.append(raw_value) while len(self.V) > self.N: del self.V[0] val = self.tether(mean(self.V)) if self.DISCRETE: Val = self.tet2(int(val)) else: Val = val self.value = Val return Val def clear(self): self.V.clear() self.tether.value = None super_smooth_single = SuperSmooth() super_smooth_dual_top = SuperSmooth() super_smooth_dual_bot = SuperSmooth() while not buttons.metal_press_viewer.value: single = self.single_touch_reading() if single.gate: dual = self.dual_touch_reading() val_top = dual_touch_top_to_neopixel_calibration( super_smooth_dual_top(dual.raw_a)) val_bot = dual_touch_bot_to_neopixel_calibration( super_smooth_dual_bot(dual.raw_b)) val = single_touch_to_neopixel_calibration(super_smooth_single (single.raw_value)) neopixels.draw_all_off() neopixels.draw_dot(floor(val_top), 0, 30, 15) neopixels.draw_dot(floor(val_bot), 15, 30, 0) neopixels.draw_dot(floor(val), 64, 0, 128) neopixels.refresh() else: super_smooth_single.clear() super_smooth_dual_top.clear() super_smooth_dual_bot.clear() neopixels.turn_off() class NoiseFilter: def __init__(self, moving_average_length=10, soft_tether_size=5, tether_size=1, moving_median_length=1): self.moving_average = MovingAverage(moving_average_length) self.soft_tether = SoftTether(size=soft_tether_size) self.tether = Tether(size=tether_size) self.moving_median = MovingMedian(moving_median_length) def __call__(self, value): value = self.moving_average(value) value = self.soft_tether(value) value = self.tether(value) value = self.moving_median(value) return value def clear(self): self.soft_tether.clear() self.tether.clear() self.moving_average.clear() self.moving_median.clear() def copy(self): return NoiseFilter(self.moving_average.length, self.soft_tether. size, self.tether.size) class SingleTouchReading: __slots__ = ['gate', 'raw_lower', 'raw_upper', 'raw_gap', 'raw_value'] GATE_THRESHOLD = 500 def __init__(self, ribbon): self.ribbon = ribbon self.read_raw_lower() self.read_raw_upper() self.process_readings() def prepare_to_read(self): activate_single_touch_transistors() self.ribbon.ads.mode = ADS.Mode.SINGLE self.ribbon.ads.gain = ads_gain_single def read_raw_lower(self): single_pull.value = False self.prepare_to_read() try: self.raw_lower = self.ribbon.ads_single.value except OSError as exception: raise I2CError(exception) def read_raw_upper(self): single_pull.value = True self.prepare_to_read() try: self.raw_upper = self.ribbon.ads_single.value except OSError as exception: raise I2CError(exception) def process_readings(self): self.raw_gap = abs(self.raw_upper - self.raw_lower) self.gate = self.raw_gap < self.GATE_THRESHOLD self.raw_value = (self.raw_upper + self.raw_lower) / 2 class ContinuousSingleTouchReading(SingleTouchReading): @staticmethod def prepare_to_read(): activate_single_touch_transistors() ads.mode = ADS.Mode.CONTINUOUS ads.gain = ads_gain_single self.ribbon.ads_single.value class CheapSingleTouchReading(SingleTouchReading): GATE_THRESHOLD = 1500 GATE_THRESHOLD = 4000 def read_raw_lower(self): self.prepare_to_read() single_pull.value = False self.raw_lower = self.ribbon.rib_mid.value def read_raw_upper(self): self.prepare_to_read() single_pull.value = True self.raw_upper = self.ribbon.rib_mid.value class DualTouchReading: __slots__ = ['raw_a', 'raw_b'] def __init__(self, ribbon): self.ribbon = ribbon self.prepare_to_read() try: self.raw_a = self.ribbon.ads_dual_top.value self.raw_b = self.ribbon.ads_dual_bot.value except OSError as exception: raise I2CError(exception) def prepare_to_read(self): activate_dual_touch_transistors() self.ribbon.ads.gain = ads_gain_dual class ProcessedDualTouchReading: __slots__ = ['gate', 'bot', 'top', 'mid', 'num_fingers', 'old', 'new'] DELTA_THRESHOLD = -4 TWO_TOUCH_THRESHOLD = 2 TWO_TOUCH_THRESHOLD_SLACK = 0.05 def __init__(self, ribbon, blink=False): self.ribbon = ribbon def clear_filters(): ribbon.cheap_single_filter.clear() ribbon.dual_bot_filter.clear() ribbon.dual_top_filter.clear() previous_gate = ribbon.previous_gate single_before = ribbon.processed_cheap_single_touch_reading() if not single_before.gate: self.gate = False clear_filters() return with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()): dual_reading = ribbon.dual_touch_reading() single_after = ribbon.cheap_single_touch_reading() if not single_after.gate: self.gate = False clear_filters() return if not previous_gate: clear_filters() self.gate = True raw_mid = (single_before.raw_value + single_after.raw_value) / 2 raw_top = dual_reading.raw_a raw_bot = dual_reading.raw_b top = raw_top bot = raw_bot mid = raw_mid top = ribbon.dual_touch_top_to_neopixel_calibration(top) bot = ribbon.dual_touch_bot_to_neopixel_calibration(bot) mid = ribbon.cheap_single_touch_to_neopixel_calibration(mid) mid = ribbon.cheap_single_filter(mid) if int(raw_top) == 32767: top = mid if int(raw_bot) == 32767: bot = mid delta = top - bot if delta <= self.DELTA_THRESHOLD: ribbon.dual_num_fingers = 1 elif not previous_gate: ribbon.dual_num_fingers = (2 if delta > self. TWO_TOUCH_THRESHOLD else 1) elif ribbon.dual_num_fingers == 1 and delta > self.TWO_TOUCH_THRESHOLD + self.TWO_TOUCH_THRESHOLD_SLACK: ribbon.dual_num_fingers = 2 elif ribbon.dual_num_fingers == 2 and delta < self.TWO_TOUCH_THRESHOLD - self.TWO_TOUCH_THRESHOLD_SLACK: ribbon.dual_num_fingers = 1 self.num_fingers = ribbon.dual_num_fingers if self.num_fingers == 1: bot = top = mid elif bot > top: bot = top = (bot + top) / 2 if not hasattr(ribbon, 'previous_dual_old'): ribbon.previous_dual_old = mid old, new = sorted([bot, top], key=lambda pos: abs(pos - ribbon. previous_dual_old)) self.top = ribbon.dual_top_filter(top) self.bot = ribbon.dual_bot_filter(bot) self.mid = mid self.old = old self.new = new ribbon.previous_dual_old = old class ProcessedSingleTouchReading: def __init__(self, ribbon, blink=False): self.ribbon = ribbon if ribbon.previous_gate: with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext() ): single_touch_reading = ribbon.single_touch_reading() self.gate = single_touch_reading.gate else: cheap_single_touch_reading = ribbon.cheap_single_touch_reading() if cheap_single_touch_reading.gate: with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()): single_touch_reading = ribbon.single_touch_reading() self.gate = single_touch_reading.gate else: self.gate = False if self.gate: self.raw_value = single_touch_reading.raw_value self.value = ribbon.single_touch_to_neopixel_calibration(self. raw_value) class ProcessedCheapSingleTouchReading: def __init__(self, ribbon, blink=False): self.ribbon = ribbon with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()): if not ribbon.previous_gate: ribbon.cheap_single_touch_reading() cheap_single_touch_reading = ribbon.cheap_single_touch_reading() self.gate = cheap_single_touch_reading.gate if self.gate: self.raw_value = cheap_single_touch_reading.raw_value self.value = ribbon.cheap_single_touch_to_neopixel_calibration(self .raw_value) self.value = ribbon.cheap_single_filter(self.value) else: ribbon.cheap_single_filter.clear() <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class I2CError(OSError): pass class Ribbon: ADS_BIN_SIZE = 100 RIB_BIN_SIZE = 100 CALIBRATION_FOLDER = '/generated/calibrations/ribbons' def __init__(self, name, rib_mid, ads, ads_single, ads_dual_top, ads_dual_bot): self.name = name self.rib_mid = rib_mid self.ads = ads self.ads_single = ads_single self.ads_dual_top = ads_dual_top self.ads_dual_bot = ads_dual_bot dual_touch_top_to_neopixel_calibration_path = path_join(self. CALIBRATION_FOLDER, self.name + '_dual_touch_top_to_neopixel_calibration') dual_touch_bot_to_neopixel_calibration_path = path_join(self. CALIBRATION_FOLDER, self.name + '_dual_touch_bot_to_neopixel_calibration') single_touch_to_neopixel_calibration_path = path_join(self. CALIBRATION_FOLDER, self.name + '_single_touch_to_neopixel_calibration') cheap_single_touch_to_neopixel_calibration_path = path_join(self. CALIBRATION_FOLDER, self.name + '_cheap_single_touch_to_neopixel_calibration') self.dual_touch_top_to_neopixel_calibration = HistogramFitter(bin_size =self.ADS_BIN_SIZE, file_path= dual_touch_top_to_neopixel_calibration_path, auto_load=True) self.dual_touch_bot_to_neopixel_calibration = HistogramFitter(bin_size =self.ADS_BIN_SIZE, file_path= dual_touch_bot_to_neopixel_calibration_path, auto_load=True) self.single_touch_to_neopixel_calibration = HistogramFitter(bin_size =self.ADS_BIN_SIZE, file_path= single_touch_to_neopixel_calibration_path, auto_load=True) self.cheap_single_touch_to_neopixel_calibration = HistogramFitter( bin_size=self.RIB_BIN_SIZE, file_path= cheap_single_touch_to_neopixel_calibration_path, auto_load=True) self.previous_gate = False self.dual_num_fingers = 0 dual_filter_moving_average_length = 3 dual_filter_soft_tether_size = 0.1 dual_filter_tether_size = 0.05 self.dual_bot_filter = NoiseFilter(moving_average_length= dual_filter_moving_average_length, soft_tether_size= dual_filter_soft_tether_size, tether_size=dual_filter_tether_size) self.dual_top_filter = NoiseFilter(moving_average_length= dual_filter_moving_average_length, soft_tether_size= dual_filter_soft_tether_size, tether_size=dual_filter_tether_size) self.cheap_single_filter = NoiseFilter(moving_average_length=1, soft_tether_size=0.3, tether_size=0.01, moving_median_length=1) @property def is_calibrated(self): return (self.dual_touch_top_to_neopixel_calibration.is_fitted and self.dual_touch_bot_to_neopixel_calibration.is_fitted and self. single_touch_to_neopixel_calibration.is_fitted and self. cheap_single_touch_to_neopixel_calibration.is_fitted) def dual_touch_reading(self): reading = DualTouchReading(self) return reading def single_touch_reading(self): reading = SingleTouchReading(self) self.previous_gate = reading.gate return reading def cheap_single_touch_reading(self): reading = CheapSingleTouchReading(self) self.previous_gate = reading.gate return reading def processed_single_touch_reading(self, blink=False): reading = ProcessedSingleTouchReading(self, blink=blink) self.previous_gate = reading.gate return reading def processed_cheap_single_touch_reading(self, blink=False): reading = ProcessedCheapSingleTouchReading(self, blink=blink) self.previous_gate = reading.gate return reading def processed_dual_touch_reading(self, blink=False): reading = ProcessedDualTouchReading(self, blink=blink) self.previous_gate = reading.gate return reading def run_calibration(self, samples_per_pixel=25): import lightboard.display as display import lightboard.neopixels as neopixels import lightboard.buttons as buttons import lightboard.widgets as widgets buttons.metal_press_viewer.value def ask_to_try_again(): if widgets.input_yes_no('Would you like to try calibrating again?' ): self.run_calibration(samples_per_pixel) start_from_scratch = True dual_touch_top_to_neopixel_calibration = HistogramFitter(bin_size= self.ADS_BIN_SIZE, file_path=self. dual_touch_top_to_neopixel_calibration.file_path, auto_load=not start_from_scratch) dual_touch_bot_to_neopixel_calibration = HistogramFitter(bin_size= self.ADS_BIN_SIZE, file_path=self. dual_touch_bot_to_neopixel_calibration.file_path, auto_load=not start_from_scratch) single_touch_to_neopixel_calibration = HistogramFitter(bin_size= self.ADS_BIN_SIZE, file_path=self. single_touch_to_neopixel_calibration.file_path, auto_load=not start_from_scratch) cheap_single_touch_to_neopixel_calibration = HistogramFitter(bin_size =self.RIB_BIN_SIZE, file_path=self. cheap_single_touch_to_neopixel_calibration.file_path, auto_load =not start_from_scratch) buttons.metal_button.color = 255, 0, 255 def show_instructions(): display.set_text('Running calibration on ribbon ' + self.name + """ Please press the glowing green buttons until the red dot is barely on the ribbon""" ) buttons.set_green_button_lights(1, 1, 0, 0) show_instructions() button_press_next_neopixel = buttons.ButtonPressViewer(buttons. green_button_1) button_press_prev_neopixel = buttons.ButtonPressViewer(buttons. green_button_3) def display_neopixel_calibration(cursor_index, r, g, b, highlighted_pixels=[]): nonlocal calibrated_pixels neopixels.draw_all_off() for pixel in highlighted_pixels: neopixels.draw_dot(pixel, 0, 10, 0) neopixels.draw_dot(cursor_index, r, g, b) neopixels.refresh() i = 0 i = neopixels.first display_neopixel_calibration(i, 63, 0, 0) buttons.metal_press_viewer.value while True: reading = self.cheap_single_touch_reading() if reading.gate: break refresh_flag = False if button_press_next_neopixel.value: i += 1 refresh_flag = True if button_press_prev_neopixel.value: i -= 1 refresh_flag = True if refresh_flag: i = min(neopixels.length - 1, max(0, i)) display_neopixel_calibration(i, 63, 0, 0) if buttons.metal_press_viewer.value: if widgets.input_yes_no( """Do you want to cancel calibration? (All progress will be lost)""" ): ask_to_try_again() return else: show_instructions() button_press_skip = buttons.ButtonPressViewer(buttons.green_button_1) button_press_back = buttons.ButtonPressViewer(buttons.green_button_3) button_press_finished = buttons.ButtonPressViewer(buttons. green_button_2) buttons.set_green_button_lights(1, 1, 0, 0) def show_instructions(): display.set_text('Running calibration on ribbon ' + self.name + """ Please press cyan dots on ribbon until they become orange Press the 2rd green button when you're done (If the 2rd green button isnt lit, calibrate at least two points) Press button 1 to skip the current dot Press button 3 to go back a dot""" ) show_instructions() finished = False calibrated_pixels = set() while not finished: i = max(0, min(i, neopixels.length - 1)) display_neopixel_calibration(i, 0, 63, 63, calibrated_pixels) dual_a_samples = [] dual_b_samples = [] single_samples = [] cheap_samples = [] pixel_num_samples = 0 buttons.metal_press_viewer.value while True: buttons.green_button_3.light = len(calibrated_pixels) >= 2 if buttons.metal_press_viewer.value: if widgets.input_yes_no( """Do you want to cancel calibration? (All progress will be lost)""" ): ask_to_try_again() return else: show_instructions() if button_press_skip.value: break if button_press_back.value: i -= 2 break if button_press_finished.value and len(calibrated_pixels) >= 2: if widgets.input_yes_no( """Do you want to test your calibration? Yes: Test it! No: I'm done calibrating!""" ): with buttons.TemporaryButtonLights(): self.test_smooth_demo( single_touch_to_neopixel_calibration, dual_touch_top_to_neopixel_calibration, dual_touch_bot_to_neopixel_calibration) show_instructions() elif widgets.input_yes_no( "Are you sure your're done\ncalibrating this ribbon?"): finished = True break else: show_instructions() if len(cheap_samples) >= samples_per_pixel: dual_touch_top_to_neopixel_calibration.add_sample(median (dual_a_samples), i) dual_touch_bot_to_neopixel_calibration.add_sample(median (dual_b_samples), i) single_touch_to_neopixel_calibration.add_sample(median( single_samples), i) cheap_single_touch_to_neopixel_calibration.add_sample( median(cheap_samples), i) calibrated_pixels.add(i) break if self.cheap_single_touch_reading().gate: with neopixels.TemporarilyTurnedOff(): cheap_single_touch_reading = (self. cheap_single_touch_reading()) single_touch_reading = self.single_touch_reading() dual_touch_reading = self.dual_touch_reading() if (single_touch_reading.gate and cheap_single_touch_reading.gate): dual_a_samples.append(dual_touch_reading.raw_a) dual_b_samples.append(dual_touch_reading.raw_b) single_samples.append(single_touch_reading. raw_value) cheap_samples.append(cheap_single_touch_reading .raw_value) pixel_num_samples += 1 else: dual_a_samples.clear() dual_b_samples.clear() single_samples.clear() cheap_samples.clear() i += 1 display_neopixel_calibration(i, 63, 31, 0, calibrated_pixels) while self.cheap_single_touch_reading().gate: pass buttons.set_green_button_lights(0, 0, 0, 0) buttons.metal_button.color = 0, 1, 1 neopixels.turn_off() display.set_text('Finished calibration on ribbon ' + self.name + """ Try the ribbon out to see if you like it Also rinting out sensor values to serial for a demo (Watch in the arduino plotter) Press the metal button when you're done""" ) while not buttons.metal_press_viewer.value: if self.cheap_single_touch_reading().gate: with neopixels.TemporarilyTurnedOff(): cheap_single_touch_reading = (self. cheap_single_touch_reading()) single_touch_reading = self.single_touch_reading() dual_touch_reading = self.dual_touch_reading() dual_top = dual_touch_top_to_neopixel_calibration( dual_touch_reading.raw_a) dual_bot = dual_touch_bot_to_neopixel_calibration( dual_touch_reading.raw_b) single = single_touch_to_neopixel_calibration( single_touch_reading.raw_value) cheap_single = cheap_single_touch_to_neopixel_calibration( cheap_single_touch_reading.raw_value) if (cheap_single_touch_reading.gate and single_touch_reading.gate): neopixels.display_dot(int(cheap_single), 0, 128, 0) print(dual_top, dual_bot, single, cheap_single) self.test_smooth_demo(single_touch_to_neopixel_calibration, dual_touch_top_to_neopixel_calibration, dual_touch_bot_to_neopixel_calibration) if widgets.input_yes_no( 'Would you like to save this\ncalibration for ribbon ' + self. name + '?'): self.dual_touch_top_to_neopixel_calibration = ( dual_touch_top_to_neopixel_calibration) self.dual_touch_bot_to_neopixel_calibration = ( dual_touch_bot_to_neopixel_calibration) self.single_touch_to_neopixel_calibration = ( single_touch_to_neopixel_calibration) self.cheap_single_touch_to_neopixel_calibration = ( cheap_single_touch_to_neopixel_calibration) self.dual_touch_top_to_neopixel_calibration.save_to_file() self.dual_touch_bot_to_neopixel_calibration.save_to_file() self.single_touch_to_neopixel_calibration.save_to_file() self.cheap_single_touch_to_neopixel_calibration.save_to_file() display.set_text('Saved calibrations for ribbon ' + self.name + '!' ) time.sleep(2) else: display.set_text('Cancelled. No calibrations were saved.') time.sleep(2) ask_to_try_again() return def test_smooth_demo(self, single_touch_to_neopixel_calibration=None, dual_touch_top_to_neopixel_calibration=None, dual_touch_bot_to_neopixel_calibration=None): import lightboard.buttons as buttons import lightboard.neopixels as neopixels import lightboard.display as display if single_touch_to_neopixel_calibration is None: single_touch_to_neopixel_calibration = (self. single_touch_to_neopixel_calibration) if dual_touch_top_to_neopixel_calibration is None: dual_touch_top_to_neopixel_calibration = (self. dual_touch_top_to_neopixel_calibration) if dual_touch_bot_to_neopixel_calibration is None: dual_touch_bot_to_neopixel_calibration = (self. dual_touch_bot_to_neopixel_calibration) buttons.metal_button.color = 1, 0, 1 buttons.set_green_button_lights(0, 0, 0, 0) display.set_text('Smooth demo for ribbon %s\nPress metal to exit') def mean(l): l = list(l) return sum(l) / len(l) def std(l): u = mean(l) return mean((x - u) ** 2 for x in l) ** 0.5 class SuperSmooth: def __init__(self): self.DISCRETE = True self.N = 10 self.V = [] self.tet2 = Tether(1) self.tether = SoftTether(size=5) self.value = None def __call__(self, value): raw_value = value self.V.append(raw_value) while len(self.V) > self.N: del self.V[0] val = self.tether(mean(self.V)) if self.DISCRETE: Val = self.tet2(int(val)) else: Val = val self.value = Val return Val def clear(self): self.V.clear() self.tether.value = None super_smooth_single = SuperSmooth() super_smooth_dual_top = SuperSmooth() super_smooth_dual_bot = SuperSmooth() while not buttons.metal_press_viewer.value: single = self.single_touch_reading() if single.gate: dual = self.dual_touch_reading() val_top = dual_touch_top_to_neopixel_calibration( super_smooth_dual_top(dual.raw_a)) val_bot = dual_touch_bot_to_neopixel_calibration( super_smooth_dual_bot(dual.raw_b)) val = single_touch_to_neopixel_calibration(super_smooth_single (single.raw_value)) neopixels.draw_all_off() neopixels.draw_dot(floor(val_top), 0, 30, 15) neopixels.draw_dot(floor(val_bot), 15, 30, 0) neopixels.draw_dot(floor(val), 64, 0, 128) neopixels.refresh() else: super_smooth_single.clear() super_smooth_dual_top.clear() super_smooth_dual_bot.clear() neopixels.turn_off() class NoiseFilter: def __init__(self, moving_average_length=10, soft_tether_size=5, tether_size=1, moving_median_length=1): self.moving_average = MovingAverage(moving_average_length) self.soft_tether = SoftTether(size=soft_tether_size) self.tether = Tether(size=tether_size) self.moving_median = MovingMedian(moving_median_length) def __call__(self, value): value = self.moving_average(value) value = self.soft_tether(value) value = self.tether(value) value = self.moving_median(value) return value def clear(self): self.soft_tether.clear() self.tether.clear() self.moving_average.clear() self.moving_median.clear() def copy(self): return NoiseFilter(self.moving_average.length, self.soft_tether. size, self.tether.size) class SingleTouchReading: __slots__ = ['gate', 'raw_lower', 'raw_upper', 'raw_gap', 'raw_value'] GATE_THRESHOLD = 500 def __init__(self, ribbon): self.ribbon = ribbon self.read_raw_lower() self.read_raw_upper() self.process_readings() def prepare_to_read(self): activate_single_touch_transistors() self.ribbon.ads.mode = ADS.Mode.SINGLE self.ribbon.ads.gain = ads_gain_single def read_raw_lower(self): single_pull.value = False self.prepare_to_read() try: self.raw_lower = self.ribbon.ads_single.value except OSError as exception: raise I2CError(exception) def read_raw_upper(self): single_pull.value = True self.prepare_to_read() try: self.raw_upper = self.ribbon.ads_single.value except OSError as exception: raise I2CError(exception) def process_readings(self): self.raw_gap = abs(self.raw_upper - self.raw_lower) self.gate = self.raw_gap < self.GATE_THRESHOLD self.raw_value = (self.raw_upper + self.raw_lower) / 2 class ContinuousSingleTouchReading(SingleTouchReading): @staticmethod def prepare_to_read(): activate_single_touch_transistors() ads.mode = ADS.Mode.CONTINUOUS ads.gain = ads_gain_single self.ribbon.ads_single.value class CheapSingleTouchReading(SingleTouchReading): GATE_THRESHOLD = 1500 GATE_THRESHOLD = 4000 def read_raw_lower(self): self.prepare_to_read() single_pull.value = False self.raw_lower = self.ribbon.rib_mid.value def read_raw_upper(self): self.prepare_to_read() single_pull.value = True self.raw_upper = self.ribbon.rib_mid.value class DualTouchReading: __slots__ = ['raw_a', 'raw_b'] def __init__(self, ribbon): self.ribbon = ribbon self.prepare_to_read() try: self.raw_a = self.ribbon.ads_dual_top.value self.raw_b = self.ribbon.ads_dual_bot.value except OSError as exception: raise I2CError(exception) def prepare_to_read(self): activate_dual_touch_transistors() self.ribbon.ads.gain = ads_gain_dual class ProcessedDualTouchReading: __slots__ = ['gate', 'bot', 'top', 'mid', 'num_fingers', 'old', 'new'] DELTA_THRESHOLD = -4 TWO_TOUCH_THRESHOLD = 2 TWO_TOUCH_THRESHOLD_SLACK = 0.05 def __init__(self, ribbon, blink=False): self.ribbon = ribbon def clear_filters(): ribbon.cheap_single_filter.clear() ribbon.dual_bot_filter.clear() ribbon.dual_top_filter.clear() previous_gate = ribbon.previous_gate single_before = ribbon.processed_cheap_single_touch_reading() if not single_before.gate: self.gate = False clear_filters() return with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()): dual_reading = ribbon.dual_touch_reading() single_after = ribbon.cheap_single_touch_reading() if not single_after.gate: self.gate = False clear_filters() return if not previous_gate: clear_filters() self.gate = True raw_mid = (single_before.raw_value + single_after.raw_value) / 2 raw_top = dual_reading.raw_a raw_bot = dual_reading.raw_b top = raw_top bot = raw_bot mid = raw_mid top = ribbon.dual_touch_top_to_neopixel_calibration(top) bot = ribbon.dual_touch_bot_to_neopixel_calibration(bot) mid = ribbon.cheap_single_touch_to_neopixel_calibration(mid) mid = ribbon.cheap_single_filter(mid) if int(raw_top) == 32767: top = mid if int(raw_bot) == 32767: bot = mid delta = top - bot if delta <= self.DELTA_THRESHOLD: ribbon.dual_num_fingers = 1 elif not previous_gate: ribbon.dual_num_fingers = (2 if delta > self. TWO_TOUCH_THRESHOLD else 1) elif ribbon.dual_num_fingers == 1 and delta > self.TWO_TOUCH_THRESHOLD + self.TWO_TOUCH_THRESHOLD_SLACK: ribbon.dual_num_fingers = 2 elif ribbon.dual_num_fingers == 2 and delta < self.TWO_TOUCH_THRESHOLD - self.TWO_TOUCH_THRESHOLD_SLACK: ribbon.dual_num_fingers = 1 self.num_fingers = ribbon.dual_num_fingers if self.num_fingers == 1: bot = top = mid elif bot > top: bot = top = (bot + top) / 2 if not hasattr(ribbon, 'previous_dual_old'): ribbon.previous_dual_old = mid old, new = sorted([bot, top], key=lambda pos: abs(pos - ribbon. previous_dual_old)) self.top = ribbon.dual_top_filter(top) self.bot = ribbon.dual_bot_filter(bot) self.mid = mid self.old = old self.new = new ribbon.previous_dual_old = old class ProcessedSingleTouchReading: def __init__(self, ribbon, blink=False): self.ribbon = ribbon if ribbon.previous_gate: with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext() ): single_touch_reading = ribbon.single_touch_reading() self.gate = single_touch_reading.gate else: cheap_single_touch_reading = ribbon.cheap_single_touch_reading() if cheap_single_touch_reading.gate: with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()): single_touch_reading = ribbon.single_touch_reading() self.gate = single_touch_reading.gate else: self.gate = False if self.gate: self.raw_value = single_touch_reading.raw_value self.value = ribbon.single_touch_to_neopixel_calibration(self. raw_value) class ProcessedCheapSingleTouchReading: def __init__(self, ribbon, blink=False): self.ribbon = ribbon with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()): if not ribbon.previous_gate: ribbon.cheap_single_touch_reading() cheap_single_touch_reading = ribbon.cheap_single_touch_reading() self.gate = cheap_single_touch_reading.gate if self.gate: self.raw_value = cheap_single_touch_reading.raw_value self.value = ribbon.cheap_single_touch_to_neopixel_calibration(self .raw_value) self.value = ribbon.cheap_single_filter(self.value) else: ribbon.cheap_single_filter.clear() <|reserved_special_token_0|> def show_calibration_menu(): import lightboard.widgets as widgets options = OrderedDict() options['Calibrate Rib A'] = ribbon_a.run_calibration options['Calibrate Rib B'] = ribbon_b.run_calibration options['Smooth Demo A'] = ribbon_a.test_smooth_demo options['Smooth Demo B'] = ribbon_b.test_smooth_demo options['Raw UART Demo A'] = lambda : test_ribbon_raw_uart(ribbon_a) options['Raw UART Demo B'] = lambda : test_ribbon_raw_uart(ribbon_b) options['Dual Touch Demo A'] = lambda : test_ribbon_dual_touch(ribbon_a) options['Dual Touch Demo B'] = lambda : test_ribbon_dual_touch(ribbon_b) widgets.run_select_subroutine(options) <|reserved_special_token_0|> <|reserved_special_token_1|> #ribbon_a and ribbon_b are the two important variables here ribbon_a=None ribbon_b=None #Notes: # - As it turns out, the internal ADC in the Teensy is NOT very susceptible to fluctuations in the Neopixels' current...BUT...the ADS1115 IS. # Therefore, I think a better model would ditch the ADS1115 alltogether - replacing it with a simple 8x toggleable amp for dual touches. # - Shouldn't cause errors. No scl/sda pullup means the board isn't connected. No i2c at 48 means the individual chip isn't powered or connected etc. I just ficed a bad solder joint that took a while to flare up.......maybe this is what happened with the old lightwave? I was too quick with the solder joints, leaving a bubble that didn't touch it because of some stress bs later on? __all__=['ribbon_a','ribbon_b'] from urp import * import time import board import busio import adafruit_ads1x15.ads1115 as ADS from collections import OrderedDict from adafruit_ads1x15.ads1x15 import Mode from adafruit_ads1x15.analog_in import AnalogIn as ADS1115_AnalogIn from digitalio import DigitalInOut, Direction, Pull from analogio import AnalogIn as Internal_AnalogIn from tools import * import storage from linear_modules import * import lightboard.neopixels as neopixels import time from micropython import const i2c = busio.I2C(board.SCL, board.SDA, frequency=1000000)# Create the I2C bus with a fast frequency #I2C addresses for ADS1115's: 0x48 and 0x4a for Ribbon A and Ribbon B respectively ads_a = ADS.ADS1115(i2c,address=0x48) ads_b = ADS.ADS1115(i2c,address=0x4a) data_rate=const(860) # Maximum number of samples per second ads_a.data_rate = data_rate ads_b.data_rate = data_rate ads_gain_single=const(1) ads_gain_dual =const(8) #Uses 100kΩ #Change the gains depending on whether you're measuring dual or single touches ads_a.gain=ads_gain_single ads_b.gain=ads_gain_single ads_a_a0 = ADS1115_AnalogIn(ads_a, ADS.P0) ads_a_a1 = ADS1115_AnalogIn(ads_a, ADS.P1) ads_a_a2 = ADS1115_AnalogIn(ads_a, ADS.P2) ads_a_single=ads_a_a0 ads_a_dual_top=ads_a_a1 ads_a_dual_b=ads_a_a2 rib_a_mid = Internal_AnalogIn(board.D26) ads_b_a0 = ADS1115_AnalogIn(ads_b, ADS.P0) ads_b_a1 = ADS1115_AnalogIn(ads_b, ADS.P1) ads_b_a2 = ADS1115_AnalogIn(ads_b, ADS.P2) ads_b_single=ads_b_a0 ads_b_dual_top=ads_b_a1 ads_b_dual_b=ads_b_a2 rib_b_mid = Internal_AnalogIn(board.D27) single_pull=DigitalInOut(board.D32) single_pin =DigitalInOut(board.D31) dual_pin_2 =DigitalInOut(board.D25) dual_pin_1 =DigitalInOut(board.D24) single_pull.direction=Direction.OUTPUT single_pin .direction=Direction.OUTPUT dual_pin_2 .direction=Direction.OUTPUT dual_pin_1 .direction=Direction.OUTPUT def activate_single_touch_transistors(): single_pin.value=True dual_pin_1 .value=False dual_pin_2 .value=False def activate_dual_touch_transistors(): single_pin.value=False dual_pin_1 .value=True dual_pin_2 .value=True class I2CError(OSError): pass class Ribbon: ADS_BIN_SIZE=100 RIB_BIN_SIZE=100 CALIBRATION_FOLDER='/generated/calibrations/ribbons' def __init__(self,name,rib_mid,ads,ads_single,ads_dual_top,ads_dual_bot): self.name=name self.rib_mid=rib_mid self.ads=ads self.ads_single=ads_single self.ads_dual_top=ads_dual_top self.ads_dual_bot=ads_dual_bot dual_touch_top_to_neopixel_calibration_path = path_join(self.CALIBRATION_FOLDER,self.name+'_dual_touch_top_to_neopixel_calibration' ) dual_touch_bot_to_neopixel_calibration_path = path_join(self.CALIBRATION_FOLDER,self.name+'_dual_touch_bot_to_neopixel_calibration' ) single_touch_to_neopixel_calibration_path = path_join(self.CALIBRATION_FOLDER,self.name+'_single_touch_to_neopixel_calibration' ) cheap_single_touch_to_neopixel_calibration_path = path_join(self.CALIBRATION_FOLDER,self.name+'_cheap_single_touch_to_neopixel_calibration') self.dual_touch_top_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=dual_touch_top_to_neopixel_calibration_path ,auto_load=True) self.dual_touch_bot_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=dual_touch_bot_to_neopixel_calibration_path ,auto_load=True) self.single_touch_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=single_touch_to_neopixel_calibration_path ,auto_load=True) self.cheap_single_touch_to_neopixel_calibration = HistogramFitter(bin_size=self.RIB_BIN_SIZE,file_path=cheap_single_touch_to_neopixel_calibration_path,auto_load=True) self.previous_gate=False self.dual_num_fingers=0 dual_filter_moving_average_length=3 dual_filter_soft_tether_size=.1 dual_filter_tether_size=.05 self.dual_bot_filter=NoiseFilter(moving_average_length=dual_filter_moving_average_length,soft_tether_size=dual_filter_soft_tether_size,tether_size=dual_filter_tether_size) self.dual_top_filter=NoiseFilter(moving_average_length=dual_filter_moving_average_length,soft_tether_size=dual_filter_soft_tether_size,tether_size=dual_filter_tether_size) self.cheap_single_filter=NoiseFilter(moving_average_length=1,soft_tether_size=.3,tether_size=.01,moving_median_length=1) @property def is_calibrated(self): return self.dual_touch_top_to_neopixel_calibration .is_fitted and \ self.dual_touch_bot_to_neopixel_calibration .is_fitted and \ self.single_touch_to_neopixel_calibration .is_fitted and \ self.cheap_single_touch_to_neopixel_calibration.is_fitted def dual_touch_reading(self): reading=DualTouchReading(self) #DualTouchReading objects don't have a gate as of right now (though they will probably soon - we can get the gate by comparing the top value to the bot value and setting a threshold) return reading def single_touch_reading(self): reading=SingleTouchReading(self) self.previous_gate=reading.gate return reading def cheap_single_touch_reading(self): reading=CheapSingleTouchReading(self) self.previous_gate=reading.gate return reading def processed_single_touch_reading(self,blink=False): # if not self.is_calibrated: #Unnessecary CPU time...its cheap but so unimportant... # print("Ribbon.processed_single_touch_reading: Warning: This ribbon is not calibrated!") reading=ProcessedSingleTouchReading(self,blink=blink) self.previous_gate=reading.gate return reading def processed_cheap_single_touch_reading(self,blink=False): reading=ProcessedCheapSingleTouchReading(self,blink=blink) self.previous_gate=reading.gate return reading def processed_dual_touch_reading(self,blink=False): reading=ProcessedDualTouchReading(self,blink=blink) self.previous_gate=reading.gate return reading def run_calibration(self,samples_per_pixel=25): import lightboard.display as display import lightboard.neopixels as neopixels import lightboard.buttons as buttons import lightboard.widgets as widgets buttons.metal_press_viewer.value #Reset it - so it doesn't immediately press by accident def ask_to_try_again(): if widgets.input_yes_no("Would you like to try calibrating again?"): self.run_calibration(samples_per_pixel) start_from_scratch = True # widgets.input_yes_no('Start from scratch?\nNo: Modify current calibration\nYes: Create entirely new calibration') dual_touch_top_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=self.dual_touch_top_to_neopixel_calibration .file_path,auto_load=not start_from_scratch) dual_touch_bot_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=self.dual_touch_bot_to_neopixel_calibration .file_path,auto_load=not start_from_scratch) single_touch_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=self.single_touch_to_neopixel_calibration .file_path,auto_load=not start_from_scratch) cheap_single_touch_to_neopixel_calibration = HistogramFitter(bin_size=self.RIB_BIN_SIZE,file_path=self.cheap_single_touch_to_neopixel_calibration.file_path,auto_load=not start_from_scratch) buttons.metal_button.color=(255,0,255) def show_instructions(): display.set_text('Running calibration on ribbon '+self.name+'\nPlease press the glowing green buttons until\nthe red dot is barely on the ribbon') buttons.set_green_button_lights(1,1,0,0) show_instructions() button_press_next_neopixel=buttons.ButtonPressViewer(buttons.green_button_1) button_press_prev_neopixel=buttons.ButtonPressViewer(buttons.green_button_3) def display_neopixel_calibration(cursor_index,r,g,b,highlighted_pixels=[]): nonlocal calibrated_pixels neopixels.draw_all_off() for pixel in highlighted_pixels: neopixels.draw_dot(pixel,0,10,0) neopixels.draw_dot(cursor_index,r,g,b) neopixels.refresh() i=0 i=neopixels.first display_neopixel_calibration(i,63,0,0) buttons.metal_press_viewer.value #Reset it - so it doesn't immediately press by accident while True: reading=self.cheap_single_touch_reading() if reading.gate: break refresh_flag=False if button_press_next_neopixel.value: i+=1 refresh_flag=True if button_press_prev_neopixel.value: i-=1 refresh_flag=True if refresh_flag: i=min(neopixels.length-1,max(0,i)) display_neopixel_calibration(i,63,0,0) if buttons.metal_press_viewer.value: if widgets.input_yes_no("Do you want to cancel calibration?\n(All progress will be lost)"): #NOTE: This code block is duplicated! ask_to_try_again() return else: show_instructions() button_press_skip =buttons.ButtonPressViewer(buttons.green_button_1) button_press_back =buttons.ButtonPressViewer(buttons.green_button_3) button_press_finished=buttons.ButtonPressViewer(buttons.green_button_2) buttons.set_green_button_lights(1,1,0,0) def show_instructions(): display.set_text('Running calibration on ribbon '+self.name+'\nPlease press cyan dots on ribbon\nuntil they become orange\nPress the 2rd green button when you\'re done\n(If the 2rd green button isnt lit, calibrate at least two points)\nPress button 1 to skip the current dot\nPress button 3 to go back a dot') show_instructions() finished=False calibrated_pixels=set() while not finished: i=max(0,min(i,neopixels.length-1)) display_neopixel_calibration(i,0,63,63,calibrated_pixels) dual_a_samples=[] dual_b_samples=[] single_samples=[] cheap_samples =[] pixel_num_samples=0 buttons.metal_press_viewer.value #Reset it - so it doesn't immediately press by accident while True: buttons.green_button_3.light=len(calibrated_pixels)>=2 if buttons.metal_press_viewer.value: if widgets.input_yes_no("Do you want to cancel calibration?\n(All progress will be lost)"): #NOTE: This code block is duplicated! ask_to_try_again() return else: show_instructions() if button_press_skip.value: break if button_press_back.value: i-=2 break if button_press_finished.value and len(calibrated_pixels)>=2: if widgets.input_yes_no("Do you want to test your calibration?\nYes: Test it!\nNo: I'm done calibrating!"): #This UI is a bit janky....should use better messages. But whatevs...this is just calibration after all... with buttons.TemporaryButtonLights(): self.test_smooth_demo(single_touch_to_neopixel_calibration,dual_touch_top_to_neopixel_calibration,dual_touch_bot_to_neopixel_calibration) show_instructions() elif widgets.input_yes_no("Are you sure your're done\ncalibrating this ribbon?"): finished=True break else: show_instructions() if len(cheap_samples)>=samples_per_pixel: dual_touch_top_to_neopixel_calibration .add_sample(median(dual_a_samples),i) dual_touch_bot_to_neopixel_calibration .add_sample(median(dual_b_samples),i) single_touch_to_neopixel_calibration .add_sample(median(single_samples),i) cheap_single_touch_to_neopixel_calibration.add_sample(median(cheap_samples ),i) calibrated_pixels.add(i) break if self.cheap_single_touch_reading().gate: with neopixels.TemporarilyTurnedOff(): cheap_single_touch_reading=self.cheap_single_touch_reading() single_touch_reading =self.single_touch_reading() dual_touch_reading =self.dual_touch_reading() if single_touch_reading.gate and cheap_single_touch_reading.gate: dual_a_samples.append(dual_touch_reading .raw_a ) dual_b_samples.append(dual_touch_reading .raw_b ) single_samples.append(single_touch_reading .raw_value) cheap_samples .append(cheap_single_touch_reading.raw_value) pixel_num_samples+=1 else: #Accidently remove finger? Cancel it...try again. dual_a_samples.clear() dual_b_samples.clear() single_samples.clear() cheap_samples .clear() i+=1 display_neopixel_calibration(i,63,31,0,calibrated_pixels) while self.cheap_single_touch_reading().gate: pass buttons.set_green_button_lights(0,0,0,0) buttons.metal_button.color=(0,1,1) neopixels.turn_off() display.set_text('Finished calibration on ribbon '+self.name+'\nTry the ribbon out to see if you like it\nAlso rinting out sensor values to serial for a demo\n(Watch in the arduino plotter)\nPress the metal button when you\'re done') while not buttons.metal_press_viewer.value: if self.cheap_single_touch_reading().gate: with neopixels.TemporarilyTurnedOff(): cheap_single_touch_reading=self.cheap_single_touch_reading() single_touch_reading =self.single_touch_reading() dual_touch_reading =self.dual_touch_reading() dual_top = dual_touch_top_to_neopixel_calibration(dual_touch_reading .raw_a ) dual_bot = dual_touch_bot_to_neopixel_calibration(dual_touch_reading .raw_b ) single = single_touch_to_neopixel_calibration (single_touch_reading.raw_value) cheap_single=cheap_single_touch_to_neopixel_calibration(cheap_single_touch_reading.raw_value) if cheap_single_touch_reading.gate and single_touch_reading.gate: neopixels.display_dot(int(cheap_single),0,128,0) print(dual_top,dual_bot,single,cheap_single) self.test_smooth_demo(single_touch_to_neopixel_calibration,dual_touch_top_to_neopixel_calibration,dual_touch_bot_to_neopixel_calibration) if widgets.input_yes_no("Would you like to save this\ncalibration for ribbon "+self.name+"?"): self.dual_touch_top_to_neopixel_calibration = dual_touch_top_to_neopixel_calibration self.dual_touch_bot_to_neopixel_calibration = dual_touch_bot_to_neopixel_calibration self.single_touch_to_neopixel_calibration = single_touch_to_neopixel_calibration self.cheap_single_touch_to_neopixel_calibration = cheap_single_touch_to_neopixel_calibration self.dual_touch_top_to_neopixel_calibration .save_to_file() self.dual_touch_bot_to_neopixel_calibration .save_to_file() self.single_touch_to_neopixel_calibration .save_to_file() self.cheap_single_touch_to_neopixel_calibration.save_to_file() display.set_text("Saved calibrations for ribbon "+self.name+"!") time.sleep(2) else: display.set_text("Cancelled. No calibrations were saved.") time.sleep(2) ask_to_try_again() return def test_smooth_demo( self, single_touch_to_neopixel_calibration=None, dual_touch_top_to_neopixel_calibration=None, dual_touch_bot_to_neopixel_calibration=None): import lightboard.buttons as buttons import lightboard.neopixels as neopixels import lightboard.display as display if single_touch_to_neopixel_calibration is None: single_touch_to_neopixel_calibration =self.single_touch_to_neopixel_calibration if dual_touch_top_to_neopixel_calibration is None: dual_touch_top_to_neopixel_calibration=self.dual_touch_top_to_neopixel_calibration if dual_touch_bot_to_neopixel_calibration is None: dual_touch_bot_to_neopixel_calibration=self.dual_touch_bot_to_neopixel_calibration buttons.metal_button.color=(1,0,1) buttons.set_green_button_lights(0,0,0,0) display.set_text("Smooth demo for ribbon %s\nPress metal to exit") #This is a show-offy demo lol. Try miscalibrating it such that a tiny vibrato makes it move from one side of the lightwave to the otehr... def mean(l): l=list(l) return sum(l)/len(l) def std(l): u=mean(l) return mean((x-u)**2 for x in l)**.5 class SuperSmooth: #A linear module created from the original code of this demo. #When DISCRETE is True, it's so sensitive that it can recognize individual ADS readings without noise when the finger is still. #Used to smooth ADS readings. def __init__(self): self.DISCRETE=True self.N=10 self.V=[] self.tet2=Tether(1) self.tether=SoftTether(size=5) self.value=None def __call__(self,value): raw_value=value self.V.append(raw_value) while len(self.V)>self.N: del self.V[0] val=self.tether(mean(self.V)) if self.DISCRETE: Val=(self.tet2(int(val))) else: Val=(val) self.value=Val return Val def clear(self): self.V.clear() self.tether.value=None super_smooth_single =SuperSmooth() super_smooth_dual_top=SuperSmooth() super_smooth_dual_bot=SuperSmooth() while not buttons.metal_press_viewer.value: single=self.single_touch_reading() if single.gate: dual=self.dual_touch_reading() val_top=dual_touch_top_to_neopixel_calibration(super_smooth_dual_top(dual.raw_a)) val_bot=dual_touch_bot_to_neopixel_calibration(super_smooth_dual_bot(dual.raw_b)) val=single_touch_to_neopixel_calibration(super_smooth_single(single.raw_value)) neopixels.draw_all_off() neopixels.draw_dot(floor(val_top), 0,30, 15) neopixels.draw_dot(floor(val_bot),15,30, 0) neopixels.draw_dot(floor(val ),64, 0,128) neopixels.refresh() else: super_smooth_single .clear() super_smooth_dual_top.clear() super_smooth_dual_bot.clear() neopixels.turn_off() class NoiseFilter: #This is a LinearModule #It should be cleared whever the gate is off def __init__(self,moving_average_length=10, soft_tether_size =5, tether_size =1, moving_median_length =1): self.moving_average=MovingAverage(moving_average_length) self.soft_tether=SoftTether(size=soft_tether_size) self.tether=Tether(size=tether_size) self.moving_median=MovingMedian(moving_median_length) def __call__(self,value): value=self.moving_average(value) value=self.soft_tether (value) value=self.tether (value) value=self.moving_median (value) return value def clear(self): self.soft_tether .clear() self.tether .clear() self.moving_average.clear() self.moving_median .clear() def copy(self): #Create a duplicate filter with the same parameters return NoiseFilter(self.moving_average.length,self.soft_tether.size,self.tether.size) class SingleTouchReading: __slots__=['gate','raw_lower','raw_upper','raw_gap', 'raw_value'] GATE_THRESHOLD=500 #This needs to be calibrated after observing the raw_gap when touching and not touching the ribbon. You can do this automatically with some fancy algorithm, or you can just look at the serial monitor while printing reading.raw_gap over and over again def __init__(self,ribbon): self.ribbon=ribbon self.read_raw_lower() self.read_raw_upper() self.process_readings() def prepare_to_read(self): activate_single_touch_transistors() self.ribbon.ads.mode=ADS.Mode.SINGLE self.ribbon.ads.gain=ads_gain_single def read_raw_lower(self): single_pull.value=False self.prepare_to_read() try: self.raw_lower=self.ribbon.ads_single.value except OSError as exception: raise I2CError(exception) def read_raw_upper(self): single_pull.value=True self.prepare_to_read() try: self.raw_upper=self.ribbon.ads_single.value except OSError as exception: raise I2CError(exception) def process_readings(self): self.raw_gap=abs(self.raw_upper-self.raw_lower) self.gate=self.raw_gap<self.GATE_THRESHOLD self.raw_value=(self.raw_upper+self.raw_lower)/2 class ContinuousSingleTouchReading(SingleTouchReading): #Should be similar to SingleTouchReading, but much faster when not using DualTouchReading #WARNING AND TODO: This function isn't currently doing enough to flush out anything. Perhaps continous can use the CheapSingleTouchReading's gate, and a single non-wobbling single_pull value @staticmethod def prepare_to_read(): activate_single_touch_transistors() ads.mode=ADS.Mode.CONTINUOUS ads.gain=ads_gain_single self.ribbon.ads_single.value #Flush out the current reading of the ADC, in-case we changed single_pull in the middle of the ADS's reading (which happens 99% of the time if we don't do this lol - making detecting the gate practically useless) class CheapSingleTouchReading(SingleTouchReading): #TODO: The Teensy's internal ADC is wonked. Between around raw values 30000 and 35000, it jumps (whereas the ADS1115 doesn't jump). # Calibration with respect to the ADS1115's non-cheap single touch should mitigate this problem # Even though the raw range is the same for both analog_in and ads_single, we need a larger GATE_THRESHOLD for CheapSingleTouchReading beacause of this flaw in Teensy's ADC. #Uses the Teensy's internal ADC that can read up to 6000x per second #TODO: Implement a variation of the SingleTouchReading class called quick-gate check via the Teensy's internal ADC to save a bit of time and get more accurate results on the dual touch readings (because then we can check both upper and lower both before and after the dual readings which means less spikes) #GATE_THRESHOLD is proportional to a threshold of the voltage gap between LOW and HIGH #When GATE_THRESHOLD is small, there are less unwanted jumps when barely pressing the ribbon. But if its too small, it won't register touches. GATE_THRESHOLD=1500 #This was measured to be a good value for most of the ribbon GATE_THRESHOLD=4000 #But, the ribbon has a kink in the middle that jumps a lot voltage over the space of a milimeter. def read_raw_lower(self): self.prepare_to_read() single_pull.value=False self.raw_lower=self.ribbon.rib_mid.value def read_raw_upper(self): self.prepare_to_read() single_pull.value=True self.raw_upper=self.ribbon.rib_mid.value class DualTouchReading: __slots__ = ['raw_a', 'raw_b'] def __init__(self,ribbon): self.ribbon=ribbon self.prepare_to_read() try: self.raw_a=self.ribbon.ads_dual_top.value self.raw_b=self.ribbon.ads_dual_bot.value except OSError as exception: raise I2CError(exception) def prepare_to_read(self): activate_dual_touch_transistors() self.ribbon.ads.gain=ads_gain_dual class ProcessedDualTouchReading: __slots__=['gate','bot','top','mid','num_fingers','old','new'] DELTA_THRESHOLD=-4 # A distance, measured in neopixel widths, that the two dual touches can be apart from one another before registering as not being touched. (This is because, as it turns out, it can sometimes take more than one sample for dual touch values to go all the way back to the top after releasing your finger from the ribbon) #You want to calibrate DELTA_THRESHOLD such that it's high enough to keep good readings once you release your finger, but low enough that it doesn't require pressing down too hard to activate. #DELTA_THRESHOLD can be a negative value. #DELTA_THRESHOLD might need to be changed if you calibrate with a pencil eraser instead of your fingertip, because the pencil eraser is a narrower touch area etc. #You should always calibrate using your finger for this reason... TWO_TOUCH_THRESHOLD=2 #A distance, measured in neopixel widths, that the dual readings must be apart from each other to register as TWO_TOUCH_THRESHOLD_SLACK=.05 #A bit of hysterisis used here...like a tether. Basically, to prevent flickering on the bonudary, to switch between two touch and one touch you must move this much distance. def __init__(self,ribbon,blink=False): #If self.gate is False, your code shouldn't try to check for a .bot, .top, or .middle value - as it was never measured #If your fingers are pressing the ribbon in two different places, after calibration the 'top' value should be above the 'bot' value # In the event that the hardware of the z self.ribbon=ribbon def clear_filters(): ribbon.cheap_single_filter.clear() ribbon.dual_bot_filter.clear() ribbon.dual_top_filter.clear() previous_gate=ribbon.previous_gate single_before=ribbon.processed_cheap_single_touch_reading() if not single_before.gate: #Don't waste time with the dual touch reading if one of the gates is False self.gate=False clear_filters() return with neopixels.TemporarilyTurnedOff() if blink else EmptyContext(): dual_reading=ribbon.dual_touch_reading() single_after=ribbon.cheap_single_touch_reading() if not single_after.gate: self.gate=False clear_filters() return if not previous_gate: clear_filters() self.gate=True #single_before.gate and single_after.gate #TODO: Lower the DELTA_THRESHOLD and use self.middle whenever it gets too crazy; that way we can have maximum sensitivity and never miss a sample... raw_mid=(single_before.raw_value+single_after.raw_value)/2 raw_top=dual_reading.raw_a raw_bot=dual_reading.raw_b top=raw_top bot=raw_bot mid=raw_mid top=ribbon.dual_touch_top_to_neopixel_calibration(top) bot=ribbon.dual_touch_bot_to_neopixel_calibration(bot) mid=ribbon.cheap_single_touch_to_neopixel_calibration(mid) mid=ribbon.cheap_single_filter(mid) #I made a mistake on the lightboard...one of the resistors is too large or small (probably resistor tolerance issues) #As a result, one of the ribbons' dual touches doesn't work on the far ends of the ribbon #When this happens, the ADS's reading saturates to 32767 (with the current gain) #Instea of decreasing resolution by turning down the gain, or leaving a touch area unuseable, I'll just do this: #Note: Another valid solution is turning down the ADS1115's gain. This will solve the problem but decrease resolution... if int(raw_top)==32767: top=mid if int(raw_bot)==32767: bot=mid delta=top-bot # old_num_fingers=ribbon.dual_num_fingers # changed_num_fingers=False if delta<=self.DELTA_THRESHOLD: ribbon.dual_num_fingers=1 # changed_num_fingers=old_num_fingers!=ribbon.dual_num_fingers elif not previous_gate: ribbon.dual_num_fingers = 2 if delta>self.TWO_TOUCH_THRESHOLD else 1 # changed_num_fingers=old_num_fingers!=ribbon.dual_num_fingers elif ribbon.dual_num_fingers == 1 and delta>self.TWO_TOUCH_THRESHOLD+self.TWO_TOUCH_THRESHOLD_SLACK: ribbon.dual_num_fingers = 2 # changed_num_fingers=old_num_fingers!=ribbon.dual_num_fingers elif ribbon.dual_num_fingers == 2 and delta<self.TWO_TOUCH_THRESHOLD-self.TWO_TOUCH_THRESHOLD_SLACK: ribbon.dual_num_fingers = 1 # changed_num_fingers=old_num_fingers!=ribbon.dual_num_fingers self.num_fingers=ribbon.dual_num_fingers # if changed_num_fingers: # clear_filters() if self.num_fingers==1: #Even if the two-touches can't be used, we can still use the single cheap touch value #Originally, this set gate to False. Now it doesn't. bot=top=mid elif bot>top: #The only time self.bot>self.top is when your're barely pressing on the ribbon at all... #...we can average these two values out to get a single, more reasonable value bot=top=(bot+top)/2 #The older and newer dual touch positions. Only different when num_fingers>1 if not hasattr(ribbon,'previous_dual_old'): ribbon.previous_dual_old=mid old,new=sorted([bot,top],key=lambda pos:abs(pos-ribbon.previous_dual_old)) self.top=ribbon.dual_top_filter(top) self.bot=ribbon.dual_bot_filter(bot) self.mid=mid self.old=old self.new=new ribbon.previous_dual_old=old class ProcessedSingleTouchReading: def __init__(self,ribbon,blink=False): self.ribbon=ribbon if ribbon.previous_gate: #If it was previously pressed, don't check the gate with the expensive reading... with neopixels.TemporarilyTurnedOff() if blink else EmptyContext(): single_touch_reading=ribbon.single_touch_reading() self.gate=single_touch_reading.gate else: cheap_single_touch_reading=ribbon.cheap_single_touch_reading() if cheap_single_touch_reading.gate: with neopixels.TemporarilyTurnedOff() if blink else EmptyContext(): single_touch_reading=ribbon.single_touch_reading() self.gate=single_touch_reading.gate else: self.gate=False if self.gate: self.raw_value=single_touch_reading.raw_value self.value=ribbon.single_touch_to_neopixel_calibration(self.raw_value) class ProcessedCheapSingleTouchReading: def __init__(self,ribbon,blink=False): self.ribbon=ribbon with neopixels.TemporarilyTurnedOff() if blink else EmptyContext(): if not ribbon.previous_gate: ribbon.cheap_single_touch_reading()#Sometimes it spikes on the first value for some reason...idk why cheap_single_touch_reading=ribbon.cheap_single_touch_reading() self.gate=cheap_single_touch_reading.gate if self.gate: self.raw_value=cheap_single_touch_reading.raw_value self.value=ribbon.cheap_single_touch_to_neopixel_calibration(self.raw_value) self.value=ribbon.cheap_single_filter(self.value) else: ribbon.cheap_single_filter.clear() # pass def test_ribbon_raw_uart(ribbon): #Use this test to print all (raw, uncalibrated) ribbon values to uart #Then, you can view them in an arduino grapher import lightboard.buttons as buttons import lightboard.display as display display.set_text('Running raw uart test\nPress metal button\nto end this test\n\nThe green buttons show\ncheap_gate and single_gate\n(They\'re just for display)') buttons.set_green_button_lights(0,0,0,0) buttons.metal_button.color=(255,0,0) while True: cheap =ribbon.cheap_single_touch_reading() single=ribbon.single_touch_reading() dual =ribbon.dual_touch_reading() c_raw_value,c_gate = cheap .raw_value, cheap .gate raw_value ,s_gate = single.raw_value, single.gate raw_a,raw_b = dual.raw_a,dual.raw_b message = '%s %i %i %.5f %.5f %.5f %.5f'%(ribbon.name, int(c_gate), int(s_gate), c_raw_value, raw_value, raw_a, raw_b) print(message) buttons.set_green_button_lights(c_gate,s_gate,0,0) if buttons.metal_press_viewer.value: buttons.metal_button.color=(0,0,0) display.set_text('Running raw uart test:\nDone!') break def test_ribbon_dual_touch(ribbon): import lightboard.buttons as buttons import lightboard.display as display display.set_text('Running dual-touch test on\nRibbon %s\n\nWhen yellow dot, one touch\nWhen white dot, two touches\n\nPress metal to exit'%ribbon.name) buttons.set_green_button_lights(0,0,0,0) buttons.metal_button.color=(255,0,0) while not buttons.metal_press_viewer.value: dual =ribbon.processed_dual_touch_reading() if not dual.gate: continue neopixels.draw_all_off() neopixels.draw_dot(dual.top, 64,0,128) neopixels.draw_dot(dual.bot, 128,0,64) neopixels.draw_dot(dual.mid, 128,128,128*(dual.num_fingers-1)) neopixels.refresh() buttons.metal_button.color=(0,0,0) display.set_text('test_ribbon_dual_touch: Done!') def show_calibration_menu(): import lightboard.widgets as widgets options = OrderedDict() options['Calibrate Rib A'] = ribbon_a.run_calibration options['Calibrate Rib B'] = ribbon_b.run_calibration options['Smooth Demo A' ] = ribbon_a.test_smooth_demo options['Smooth Demo B' ] = ribbon_b.test_smooth_demo options['Raw UART Demo A'] = lambda: test_ribbon_raw_uart(ribbon_a) options['Raw UART Demo B'] = lambda: test_ribbon_raw_uart(ribbon_b) options['Dual Touch Demo A'] = lambda: test_ribbon_dual_touch(ribbon_a) options['Dual Touch Demo B'] = lambda: test_ribbon_dual_touch(ribbon_b) widgets.run_select_subroutine(options) ribbon_a=Ribbon('a',rib_a_mid,ads_a,ads_a_single,ads_a_dual_top,ads_a_dual_b) ribbon_b=Ribbon('b',rib_b_mid,ads_b,ads_b_single,ads_b_dual_top,ads_b_dual_b)
flexible
{ "blob_id": "06caee24b9d0bb78e646f27486b9a3a0ed5f2502", "index": 6796, "step-1": "<mask token>\n\n\nclass SingleTouchReading:\n <mask token>\n <mask token>\n\n def __init__(self, ribbon):\n self.ribbon = ribbon\n self.read_raw_lower()\n self.read_raw_upper()\n self.process_readings()\n\n def prepare_to_read(self):\n activate_single_touch_transistors()\n self.ribbon.ads.mode = ADS.Mode.SINGLE\n self.ribbon.ads.gain = ads_gain_single\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ContinuousSingleTouchReading(SingleTouchReading):\n\n @staticmethod\n def prepare_to_read():\n activate_single_touch_transistors()\n ads.mode = ADS.Mode.CONTINUOUS\n ads.gain = ads_gain_single\n self.ribbon.ads_single.value\n\n\nclass CheapSingleTouchReading(SingleTouchReading):\n GATE_THRESHOLD = 1500\n GATE_THRESHOLD = 4000\n\n def read_raw_lower(self):\n self.prepare_to_read()\n single_pull.value = False\n self.raw_lower = self.ribbon.rib_mid.value\n\n def read_raw_upper(self):\n self.prepare_to_read()\n single_pull.value = True\n self.raw_upper = self.ribbon.rib_mid.value\n\n\nclass DualTouchReading:\n __slots__ = ['raw_a', 'raw_b']\n\n def __init__(self, ribbon):\n self.ribbon = ribbon\n self.prepare_to_read()\n try:\n self.raw_a = self.ribbon.ads_dual_top.value\n self.raw_b = self.ribbon.ads_dual_bot.value\n except OSError as exception:\n raise I2CError(exception)\n\n def prepare_to_read(self):\n activate_dual_touch_transistors()\n self.ribbon.ads.gain = ads_gain_dual\n\n\nclass ProcessedDualTouchReading:\n __slots__ = ['gate', 'bot', 'top', 'mid', 'num_fingers', 'old', 'new']\n DELTA_THRESHOLD = -4\n TWO_TOUCH_THRESHOLD = 2\n TWO_TOUCH_THRESHOLD_SLACK = 0.05\n\n def __init__(self, ribbon, blink=False):\n self.ribbon = ribbon\n\n def clear_filters():\n ribbon.cheap_single_filter.clear()\n ribbon.dual_bot_filter.clear()\n ribbon.dual_top_filter.clear()\n previous_gate = ribbon.previous_gate\n single_before = ribbon.processed_cheap_single_touch_reading()\n if not single_before.gate:\n self.gate = False\n clear_filters()\n return\n with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()):\n dual_reading = ribbon.dual_touch_reading()\n single_after = ribbon.cheap_single_touch_reading()\n if not single_after.gate:\n self.gate = False\n clear_filters()\n return\n if not previous_gate:\n clear_filters()\n self.gate = True\n raw_mid = (single_before.raw_value + single_after.raw_value) / 2\n raw_top = dual_reading.raw_a\n raw_bot = dual_reading.raw_b\n top = raw_top\n bot = raw_bot\n mid = raw_mid\n top = ribbon.dual_touch_top_to_neopixel_calibration(top)\n bot = ribbon.dual_touch_bot_to_neopixel_calibration(bot)\n mid = ribbon.cheap_single_touch_to_neopixel_calibration(mid)\n mid = ribbon.cheap_single_filter(mid)\n if int(raw_top) == 32767:\n top = mid\n if int(raw_bot) == 32767:\n bot = mid\n delta = top - bot\n if delta <= self.DELTA_THRESHOLD:\n ribbon.dual_num_fingers = 1\n elif not previous_gate:\n ribbon.dual_num_fingers = (2 if delta > self.\n TWO_TOUCH_THRESHOLD else 1)\n elif ribbon.dual_num_fingers == 1 and delta > self.TWO_TOUCH_THRESHOLD + self.TWO_TOUCH_THRESHOLD_SLACK:\n ribbon.dual_num_fingers = 2\n elif ribbon.dual_num_fingers == 2 and delta < self.TWO_TOUCH_THRESHOLD - self.TWO_TOUCH_THRESHOLD_SLACK:\n ribbon.dual_num_fingers = 1\n self.num_fingers = ribbon.dual_num_fingers\n if self.num_fingers == 1:\n bot = top = mid\n elif bot > top:\n bot = top = (bot + top) / 2\n if not hasattr(ribbon, 'previous_dual_old'):\n ribbon.previous_dual_old = mid\n old, new = sorted([bot, top], key=lambda pos: abs(pos - ribbon.\n previous_dual_old))\n self.top = ribbon.dual_top_filter(top)\n self.bot = ribbon.dual_bot_filter(bot)\n self.mid = mid\n self.old = old\n self.new = new\n ribbon.previous_dual_old = old\n\n\nclass ProcessedSingleTouchReading:\n\n def __init__(self, ribbon, blink=False):\n self.ribbon = ribbon\n if ribbon.previous_gate:\n with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()\n ):\n single_touch_reading = ribbon.single_touch_reading()\n self.gate = single_touch_reading.gate\n else:\n cheap_single_touch_reading = ribbon.cheap_single_touch_reading()\n if cheap_single_touch_reading.gate:\n with (neopixels.TemporarilyTurnedOff() if blink else\n EmptyContext()):\n single_touch_reading = ribbon.single_touch_reading()\n self.gate = single_touch_reading.gate\n else:\n self.gate = False\n if self.gate:\n self.raw_value = single_touch_reading.raw_value\n self.value = ribbon.single_touch_to_neopixel_calibration(self.\n raw_value)\n\n\nclass ProcessedCheapSingleTouchReading:\n\n def __init__(self, ribbon, blink=False):\n self.ribbon = ribbon\n with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()):\n if not ribbon.previous_gate:\n ribbon.cheap_single_touch_reading()\n cheap_single_touch_reading = ribbon.cheap_single_touch_reading()\n self.gate = cheap_single_touch_reading.gate\n if self.gate:\n self.raw_value = cheap_single_touch_reading.raw_value\n self.value = ribbon.cheap_single_touch_to_neopixel_calibration(self\n .raw_value)\n self.value = ribbon.cheap_single_filter(self.value)\n else:\n ribbon.cheap_single_filter.clear()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Ribbon:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def is_calibrated(self):\n return (self.dual_touch_top_to_neopixel_calibration.is_fitted and\n self.dual_touch_bot_to_neopixel_calibration.is_fitted and self.\n single_touch_to_neopixel_calibration.is_fitted and self.\n cheap_single_touch_to_neopixel_calibration.is_fitted)\n <mask token>\n <mask token>\n <mask token>\n\n def processed_single_touch_reading(self, blink=False):\n reading = ProcessedSingleTouchReading(self, blink=blink)\n self.previous_gate = reading.gate\n return reading\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass NoiseFilter:\n\n def __init__(self, moving_average_length=10, soft_tether_size=5,\n tether_size=1, moving_median_length=1):\n self.moving_average = MovingAverage(moving_average_length)\n self.soft_tether = SoftTether(size=soft_tether_size)\n self.tether = Tether(size=tether_size)\n self.moving_median = MovingMedian(moving_median_length)\n\n def __call__(self, value):\n value = self.moving_average(value)\n value = self.soft_tether(value)\n value = self.tether(value)\n value = self.moving_median(value)\n return value\n\n def clear(self):\n self.soft_tether.clear()\n self.tether.clear()\n self.moving_average.clear()\n self.moving_median.clear()\n\n def copy(self):\n return NoiseFilter(self.moving_average.length, self.soft_tether.\n size, self.tether.size)\n\n\nclass SingleTouchReading:\n __slots__ = ['gate', 'raw_lower', 'raw_upper', 'raw_gap', 'raw_value']\n GATE_THRESHOLD = 500\n\n def __init__(self, ribbon):\n self.ribbon = ribbon\n self.read_raw_lower()\n self.read_raw_upper()\n self.process_readings()\n\n def prepare_to_read(self):\n activate_single_touch_transistors()\n self.ribbon.ads.mode = ADS.Mode.SINGLE\n self.ribbon.ads.gain = ads_gain_single\n\n def read_raw_lower(self):\n single_pull.value = False\n self.prepare_to_read()\n try:\n self.raw_lower = self.ribbon.ads_single.value\n except OSError as exception:\n raise I2CError(exception)\n\n def read_raw_upper(self):\n single_pull.value = True\n self.prepare_to_read()\n try:\n self.raw_upper = self.ribbon.ads_single.value\n except OSError as exception:\n raise I2CError(exception)\n\n def process_readings(self):\n self.raw_gap = abs(self.raw_upper - self.raw_lower)\n self.gate = self.raw_gap < self.GATE_THRESHOLD\n self.raw_value = (self.raw_upper + self.raw_lower) / 2\n\n\nclass ContinuousSingleTouchReading(SingleTouchReading):\n\n @staticmethod\n def prepare_to_read():\n activate_single_touch_transistors()\n ads.mode = ADS.Mode.CONTINUOUS\n ads.gain = ads_gain_single\n self.ribbon.ads_single.value\n\n\nclass CheapSingleTouchReading(SingleTouchReading):\n GATE_THRESHOLD = 1500\n GATE_THRESHOLD = 4000\n\n def read_raw_lower(self):\n self.prepare_to_read()\n single_pull.value = False\n self.raw_lower = self.ribbon.rib_mid.value\n\n def read_raw_upper(self):\n self.prepare_to_read()\n single_pull.value = True\n self.raw_upper = self.ribbon.rib_mid.value\n\n\nclass DualTouchReading:\n __slots__ = ['raw_a', 'raw_b']\n\n def __init__(self, ribbon):\n self.ribbon = ribbon\n self.prepare_to_read()\n try:\n self.raw_a = self.ribbon.ads_dual_top.value\n self.raw_b = self.ribbon.ads_dual_bot.value\n except OSError as exception:\n raise I2CError(exception)\n\n def prepare_to_read(self):\n activate_dual_touch_transistors()\n self.ribbon.ads.gain = ads_gain_dual\n\n\nclass ProcessedDualTouchReading:\n __slots__ = ['gate', 'bot', 'top', 'mid', 'num_fingers', 'old', 'new']\n DELTA_THRESHOLD = -4\n TWO_TOUCH_THRESHOLD = 2\n TWO_TOUCH_THRESHOLD_SLACK = 0.05\n\n def __init__(self, ribbon, blink=False):\n self.ribbon = ribbon\n\n def clear_filters():\n ribbon.cheap_single_filter.clear()\n ribbon.dual_bot_filter.clear()\n ribbon.dual_top_filter.clear()\n previous_gate = ribbon.previous_gate\n single_before = ribbon.processed_cheap_single_touch_reading()\n if not single_before.gate:\n self.gate = False\n clear_filters()\n return\n with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()):\n dual_reading = ribbon.dual_touch_reading()\n single_after = ribbon.cheap_single_touch_reading()\n if not single_after.gate:\n self.gate = False\n clear_filters()\n return\n if not previous_gate:\n clear_filters()\n self.gate = True\n raw_mid = (single_before.raw_value + single_after.raw_value) / 2\n raw_top = dual_reading.raw_a\n raw_bot = dual_reading.raw_b\n top = raw_top\n bot = raw_bot\n mid = raw_mid\n top = ribbon.dual_touch_top_to_neopixel_calibration(top)\n bot = ribbon.dual_touch_bot_to_neopixel_calibration(bot)\n mid = ribbon.cheap_single_touch_to_neopixel_calibration(mid)\n mid = ribbon.cheap_single_filter(mid)\n if int(raw_top) == 32767:\n top = mid\n if int(raw_bot) == 32767:\n bot = mid\n delta = top - bot\n if delta <= self.DELTA_THRESHOLD:\n ribbon.dual_num_fingers = 1\n elif not previous_gate:\n ribbon.dual_num_fingers = (2 if delta > self.\n TWO_TOUCH_THRESHOLD else 1)\n elif ribbon.dual_num_fingers == 1 and delta > self.TWO_TOUCH_THRESHOLD + self.TWO_TOUCH_THRESHOLD_SLACK:\n ribbon.dual_num_fingers = 2\n elif ribbon.dual_num_fingers == 2 and delta < self.TWO_TOUCH_THRESHOLD - self.TWO_TOUCH_THRESHOLD_SLACK:\n ribbon.dual_num_fingers = 1\n self.num_fingers = ribbon.dual_num_fingers\n if self.num_fingers == 1:\n bot = top = mid\n elif bot > top:\n bot = top = (bot + top) / 2\n if not hasattr(ribbon, 'previous_dual_old'):\n ribbon.previous_dual_old = mid\n old, new = sorted([bot, top], key=lambda pos: abs(pos - ribbon.\n previous_dual_old))\n self.top = ribbon.dual_top_filter(top)\n self.bot = ribbon.dual_bot_filter(bot)\n self.mid = mid\n self.old = old\n self.new = new\n ribbon.previous_dual_old = old\n\n\nclass ProcessedSingleTouchReading:\n\n def __init__(self, ribbon, blink=False):\n self.ribbon = ribbon\n if ribbon.previous_gate:\n with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()\n ):\n single_touch_reading = ribbon.single_touch_reading()\n self.gate = single_touch_reading.gate\n else:\n cheap_single_touch_reading = ribbon.cheap_single_touch_reading()\n if cheap_single_touch_reading.gate:\n with (neopixels.TemporarilyTurnedOff() if blink else\n EmptyContext()):\n single_touch_reading = ribbon.single_touch_reading()\n self.gate = single_touch_reading.gate\n else:\n self.gate = False\n if self.gate:\n self.raw_value = single_touch_reading.raw_value\n self.value = ribbon.single_touch_to_neopixel_calibration(self.\n raw_value)\n\n\nclass ProcessedCheapSingleTouchReading:\n\n def __init__(self, ribbon, blink=False):\n self.ribbon = ribbon\n with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()):\n if not ribbon.previous_gate:\n ribbon.cheap_single_touch_reading()\n cheap_single_touch_reading = ribbon.cheap_single_touch_reading()\n self.gate = cheap_single_touch_reading.gate\n if self.gate:\n self.raw_value = cheap_single_touch_reading.raw_value\n self.value = ribbon.cheap_single_touch_to_neopixel_calibration(self\n .raw_value)\n self.value = ribbon.cheap_single_filter(self.value)\n else:\n ribbon.cheap_single_filter.clear()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Ribbon:\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, name, rib_mid, ads, ads_single, ads_dual_top,\n ads_dual_bot):\n self.name = name\n self.rib_mid = rib_mid\n self.ads = ads\n self.ads_single = ads_single\n self.ads_dual_top = ads_dual_top\n self.ads_dual_bot = ads_dual_bot\n dual_touch_top_to_neopixel_calibration_path = path_join(self.\n CALIBRATION_FOLDER, self.name +\n '_dual_touch_top_to_neopixel_calibration')\n dual_touch_bot_to_neopixel_calibration_path = path_join(self.\n CALIBRATION_FOLDER, self.name +\n '_dual_touch_bot_to_neopixel_calibration')\n single_touch_to_neopixel_calibration_path = path_join(self.\n CALIBRATION_FOLDER, self.name +\n '_single_touch_to_neopixel_calibration')\n cheap_single_touch_to_neopixel_calibration_path = path_join(self.\n CALIBRATION_FOLDER, self.name +\n '_cheap_single_touch_to_neopixel_calibration')\n self.dual_touch_top_to_neopixel_calibration = HistogramFitter(bin_size\n =self.ADS_BIN_SIZE, file_path=\n dual_touch_top_to_neopixel_calibration_path, auto_load=True)\n self.dual_touch_bot_to_neopixel_calibration = HistogramFitter(bin_size\n =self.ADS_BIN_SIZE, file_path=\n dual_touch_bot_to_neopixel_calibration_path, auto_load=True)\n self.single_touch_to_neopixel_calibration = HistogramFitter(bin_size\n =self.ADS_BIN_SIZE, file_path=\n single_touch_to_neopixel_calibration_path, auto_load=True)\n self.cheap_single_touch_to_neopixel_calibration = HistogramFitter(\n bin_size=self.RIB_BIN_SIZE, file_path=\n cheap_single_touch_to_neopixel_calibration_path, auto_load=True)\n self.previous_gate = False\n self.dual_num_fingers = 0\n dual_filter_moving_average_length = 3\n dual_filter_soft_tether_size = 0.1\n dual_filter_tether_size = 0.05\n self.dual_bot_filter = NoiseFilter(moving_average_length=\n dual_filter_moving_average_length, soft_tether_size=\n dual_filter_soft_tether_size, tether_size=dual_filter_tether_size)\n self.dual_top_filter = NoiseFilter(moving_average_length=\n dual_filter_moving_average_length, soft_tether_size=\n dual_filter_soft_tether_size, tether_size=dual_filter_tether_size)\n self.cheap_single_filter = NoiseFilter(moving_average_length=1,\n soft_tether_size=0.3, tether_size=0.01, moving_median_length=1)\n\n @property\n def is_calibrated(self):\n return (self.dual_touch_top_to_neopixel_calibration.is_fitted and\n self.dual_touch_bot_to_neopixel_calibration.is_fitted and self.\n single_touch_to_neopixel_calibration.is_fitted and self.\n cheap_single_touch_to_neopixel_calibration.is_fitted)\n\n def dual_touch_reading(self):\n reading = DualTouchReading(self)\n return reading\n\n def single_touch_reading(self):\n reading = SingleTouchReading(self)\n self.previous_gate = reading.gate\n return reading\n\n def cheap_single_touch_reading(self):\n reading = CheapSingleTouchReading(self)\n self.previous_gate = reading.gate\n return reading\n\n def processed_single_touch_reading(self, blink=False):\n reading = ProcessedSingleTouchReading(self, blink=blink)\n self.previous_gate = reading.gate\n return reading\n\n def processed_cheap_single_touch_reading(self, blink=False):\n reading = ProcessedCheapSingleTouchReading(self, blink=blink)\n self.previous_gate = reading.gate\n return reading\n\n def processed_dual_touch_reading(self, blink=False):\n reading = ProcessedDualTouchReading(self, blink=blink)\n self.previous_gate = reading.gate\n return reading\n\n def run_calibration(self, samples_per_pixel=25):\n import lightboard.display as display\n import lightboard.neopixels as neopixels\n import lightboard.buttons as buttons\n import lightboard.widgets as widgets\n buttons.metal_press_viewer.value\n\n def ask_to_try_again():\n if widgets.input_yes_no('Would you like to try calibrating again?'\n ):\n self.run_calibration(samples_per_pixel)\n start_from_scratch = True\n dual_touch_top_to_neopixel_calibration = HistogramFitter(bin_size=\n self.ADS_BIN_SIZE, file_path=self.\n dual_touch_top_to_neopixel_calibration.file_path, auto_load=not\n start_from_scratch)\n dual_touch_bot_to_neopixel_calibration = HistogramFitter(bin_size=\n self.ADS_BIN_SIZE, file_path=self.\n dual_touch_bot_to_neopixel_calibration.file_path, auto_load=not\n start_from_scratch)\n single_touch_to_neopixel_calibration = HistogramFitter(bin_size=\n self.ADS_BIN_SIZE, file_path=self.\n single_touch_to_neopixel_calibration.file_path, auto_load=not\n start_from_scratch)\n cheap_single_touch_to_neopixel_calibration = HistogramFitter(bin_size\n =self.RIB_BIN_SIZE, file_path=self.\n cheap_single_touch_to_neopixel_calibration.file_path, auto_load\n =not start_from_scratch)\n buttons.metal_button.color = 255, 0, 255\n\n def show_instructions():\n display.set_text('Running calibration on ribbon ' + self.name +\n \"\"\"\nPlease press the glowing green buttons until\nthe red dot is barely on the ribbon\"\"\"\n )\n buttons.set_green_button_lights(1, 1, 0, 0)\n show_instructions()\n button_press_next_neopixel = buttons.ButtonPressViewer(buttons.\n green_button_1)\n button_press_prev_neopixel = buttons.ButtonPressViewer(buttons.\n green_button_3)\n\n def display_neopixel_calibration(cursor_index, r, g, b,\n highlighted_pixels=[]):\n nonlocal calibrated_pixels\n neopixels.draw_all_off()\n for pixel in highlighted_pixels:\n neopixels.draw_dot(pixel, 0, 10, 0)\n neopixels.draw_dot(cursor_index, r, g, b)\n neopixels.refresh()\n i = 0\n i = neopixels.first\n display_neopixel_calibration(i, 63, 0, 0)\n buttons.metal_press_viewer.value\n while True:\n reading = self.cheap_single_touch_reading()\n if reading.gate:\n break\n refresh_flag = False\n if button_press_next_neopixel.value:\n i += 1\n refresh_flag = True\n if button_press_prev_neopixel.value:\n i -= 1\n refresh_flag = True\n if refresh_flag:\n i = min(neopixels.length - 1, max(0, i))\n display_neopixel_calibration(i, 63, 0, 0)\n if buttons.metal_press_viewer.value:\n if widgets.input_yes_no(\n \"\"\"Do you want to cancel calibration?\n(All progress will be lost)\"\"\"\n ):\n ask_to_try_again()\n return\n else:\n show_instructions()\n button_press_skip = buttons.ButtonPressViewer(buttons.green_button_1)\n button_press_back = buttons.ButtonPressViewer(buttons.green_button_3)\n button_press_finished = buttons.ButtonPressViewer(buttons.\n green_button_2)\n buttons.set_green_button_lights(1, 1, 0, 0)\n\n def show_instructions():\n display.set_text('Running calibration on ribbon ' + self.name +\n \"\"\"\nPlease press cyan dots on ribbon\nuntil they become orange\nPress the 2rd green button when you're done\n(If the 2rd green button isnt lit, calibrate at least two points)\nPress button 1 to skip the current dot\nPress button 3 to go back a dot\"\"\"\n )\n show_instructions()\n finished = False\n calibrated_pixels = set()\n while not finished:\n i = max(0, min(i, neopixels.length - 1))\n display_neopixel_calibration(i, 0, 63, 63, calibrated_pixels)\n dual_a_samples = []\n dual_b_samples = []\n single_samples = []\n cheap_samples = []\n pixel_num_samples = 0\n buttons.metal_press_viewer.value\n while True:\n buttons.green_button_3.light = len(calibrated_pixels) >= 2\n if buttons.metal_press_viewer.value:\n if widgets.input_yes_no(\n \"\"\"Do you want to cancel calibration?\n(All progress will be lost)\"\"\"\n ):\n ask_to_try_again()\n return\n else:\n show_instructions()\n if button_press_skip.value:\n break\n if button_press_back.value:\n i -= 2\n break\n if button_press_finished.value and len(calibrated_pixels) >= 2:\n if widgets.input_yes_no(\n \"\"\"Do you want to test your calibration?\nYes: Test it!\nNo: I'm done calibrating!\"\"\"\n ):\n with buttons.TemporaryButtonLights():\n self.test_smooth_demo(\n single_touch_to_neopixel_calibration,\n dual_touch_top_to_neopixel_calibration,\n dual_touch_bot_to_neopixel_calibration)\n show_instructions()\n elif widgets.input_yes_no(\n \"Are you sure your're done\\ncalibrating this ribbon?\"):\n finished = True\n break\n else:\n show_instructions()\n if len(cheap_samples) >= samples_per_pixel:\n dual_touch_top_to_neopixel_calibration.add_sample(median\n (dual_a_samples), i)\n dual_touch_bot_to_neopixel_calibration.add_sample(median\n (dual_b_samples), i)\n single_touch_to_neopixel_calibration.add_sample(median(\n single_samples), i)\n cheap_single_touch_to_neopixel_calibration.add_sample(\n median(cheap_samples), i)\n calibrated_pixels.add(i)\n break\n if self.cheap_single_touch_reading().gate:\n with neopixels.TemporarilyTurnedOff():\n cheap_single_touch_reading = (self.\n cheap_single_touch_reading())\n single_touch_reading = self.single_touch_reading()\n dual_touch_reading = self.dual_touch_reading()\n if (single_touch_reading.gate and\n cheap_single_touch_reading.gate):\n dual_a_samples.append(dual_touch_reading.raw_a)\n dual_b_samples.append(dual_touch_reading.raw_b)\n single_samples.append(single_touch_reading.\n raw_value)\n cheap_samples.append(cheap_single_touch_reading\n .raw_value)\n pixel_num_samples += 1\n else:\n dual_a_samples.clear()\n dual_b_samples.clear()\n single_samples.clear()\n cheap_samples.clear()\n i += 1\n display_neopixel_calibration(i, 63, 31, 0, calibrated_pixels)\n while self.cheap_single_touch_reading().gate:\n pass\n buttons.set_green_button_lights(0, 0, 0, 0)\n buttons.metal_button.color = 0, 1, 1\n neopixels.turn_off()\n display.set_text('Finished calibration on ribbon ' + self.name +\n \"\"\"\nTry the ribbon out to see if you like it\nAlso rinting out sensor values to serial for a demo\n(Watch in the arduino plotter)\nPress the metal button when you're done\"\"\"\n )\n while not buttons.metal_press_viewer.value:\n if self.cheap_single_touch_reading().gate:\n with neopixels.TemporarilyTurnedOff():\n cheap_single_touch_reading = (self.\n cheap_single_touch_reading())\n single_touch_reading = self.single_touch_reading()\n dual_touch_reading = self.dual_touch_reading()\n dual_top = dual_touch_top_to_neopixel_calibration(\n dual_touch_reading.raw_a)\n dual_bot = dual_touch_bot_to_neopixel_calibration(\n dual_touch_reading.raw_b)\n single = single_touch_to_neopixel_calibration(\n single_touch_reading.raw_value)\n cheap_single = cheap_single_touch_to_neopixel_calibration(\n cheap_single_touch_reading.raw_value)\n if (cheap_single_touch_reading.gate and\n single_touch_reading.gate):\n neopixels.display_dot(int(cheap_single), 0, 128, 0)\n print(dual_top, dual_bot, single, cheap_single)\n self.test_smooth_demo(single_touch_to_neopixel_calibration,\n dual_touch_top_to_neopixel_calibration,\n dual_touch_bot_to_neopixel_calibration)\n if widgets.input_yes_no(\n 'Would you like to save this\\ncalibration for ribbon ' + self.\n name + '?'):\n self.dual_touch_top_to_neopixel_calibration = (\n dual_touch_top_to_neopixel_calibration)\n self.dual_touch_bot_to_neopixel_calibration = (\n dual_touch_bot_to_neopixel_calibration)\n self.single_touch_to_neopixel_calibration = (\n single_touch_to_neopixel_calibration)\n self.cheap_single_touch_to_neopixel_calibration = (\n cheap_single_touch_to_neopixel_calibration)\n self.dual_touch_top_to_neopixel_calibration.save_to_file()\n self.dual_touch_bot_to_neopixel_calibration.save_to_file()\n self.single_touch_to_neopixel_calibration.save_to_file()\n self.cheap_single_touch_to_neopixel_calibration.save_to_file()\n display.set_text('Saved calibrations for ribbon ' + self.name + '!'\n )\n time.sleep(2)\n else:\n display.set_text('Cancelled. No calibrations were saved.')\n time.sleep(2)\n ask_to_try_again()\n return\n\n def test_smooth_demo(self, single_touch_to_neopixel_calibration=None,\n dual_touch_top_to_neopixel_calibration=None,\n dual_touch_bot_to_neopixel_calibration=None):\n import lightboard.buttons as buttons\n import lightboard.neopixels as neopixels\n import lightboard.display as display\n if single_touch_to_neopixel_calibration is None:\n single_touch_to_neopixel_calibration = (self.\n single_touch_to_neopixel_calibration)\n if dual_touch_top_to_neopixel_calibration is None:\n dual_touch_top_to_neopixel_calibration = (self.\n dual_touch_top_to_neopixel_calibration)\n if dual_touch_bot_to_neopixel_calibration is None:\n dual_touch_bot_to_neopixel_calibration = (self.\n dual_touch_bot_to_neopixel_calibration)\n buttons.metal_button.color = 1, 0, 1\n buttons.set_green_button_lights(0, 0, 0, 0)\n display.set_text('Smooth demo for ribbon %s\\nPress metal to exit')\n\n def mean(l):\n l = list(l)\n return sum(l) / len(l)\n\n def std(l):\n u = mean(l)\n return mean((x - u) ** 2 for x in l) ** 0.5\n\n\n class SuperSmooth:\n\n def __init__(self):\n self.DISCRETE = True\n self.N = 10\n self.V = []\n self.tet2 = Tether(1)\n self.tether = SoftTether(size=5)\n self.value = None\n\n def __call__(self, value):\n raw_value = value\n self.V.append(raw_value)\n while len(self.V) > self.N:\n del self.V[0]\n val = self.tether(mean(self.V))\n if self.DISCRETE:\n Val = self.tet2(int(val))\n else:\n Val = val\n self.value = Val\n return Val\n\n def clear(self):\n self.V.clear()\n self.tether.value = None\n super_smooth_single = SuperSmooth()\n super_smooth_dual_top = SuperSmooth()\n super_smooth_dual_bot = SuperSmooth()\n while not buttons.metal_press_viewer.value:\n single = self.single_touch_reading()\n if single.gate:\n dual = self.dual_touch_reading()\n val_top = dual_touch_top_to_neopixel_calibration(\n super_smooth_dual_top(dual.raw_a))\n val_bot = dual_touch_bot_to_neopixel_calibration(\n super_smooth_dual_bot(dual.raw_b))\n val = single_touch_to_neopixel_calibration(super_smooth_single\n (single.raw_value))\n neopixels.draw_all_off()\n neopixels.draw_dot(floor(val_top), 0, 30, 15)\n neopixels.draw_dot(floor(val_bot), 15, 30, 0)\n neopixels.draw_dot(floor(val), 64, 0, 128)\n neopixels.refresh()\n else:\n super_smooth_single.clear()\n super_smooth_dual_top.clear()\n super_smooth_dual_bot.clear()\n neopixels.turn_off()\n\n\nclass NoiseFilter:\n\n def __init__(self, moving_average_length=10, soft_tether_size=5,\n tether_size=1, moving_median_length=1):\n self.moving_average = MovingAverage(moving_average_length)\n self.soft_tether = SoftTether(size=soft_tether_size)\n self.tether = Tether(size=tether_size)\n self.moving_median = MovingMedian(moving_median_length)\n\n def __call__(self, value):\n value = self.moving_average(value)\n value = self.soft_tether(value)\n value = self.tether(value)\n value = self.moving_median(value)\n return value\n\n def clear(self):\n self.soft_tether.clear()\n self.tether.clear()\n self.moving_average.clear()\n self.moving_median.clear()\n\n def copy(self):\n return NoiseFilter(self.moving_average.length, self.soft_tether.\n size, self.tether.size)\n\n\nclass SingleTouchReading:\n __slots__ = ['gate', 'raw_lower', 'raw_upper', 'raw_gap', 'raw_value']\n GATE_THRESHOLD = 500\n\n def __init__(self, ribbon):\n self.ribbon = ribbon\n self.read_raw_lower()\n self.read_raw_upper()\n self.process_readings()\n\n def prepare_to_read(self):\n activate_single_touch_transistors()\n self.ribbon.ads.mode = ADS.Mode.SINGLE\n self.ribbon.ads.gain = ads_gain_single\n\n def read_raw_lower(self):\n single_pull.value = False\n self.prepare_to_read()\n try:\n self.raw_lower = self.ribbon.ads_single.value\n except OSError as exception:\n raise I2CError(exception)\n\n def read_raw_upper(self):\n single_pull.value = True\n self.prepare_to_read()\n try:\n self.raw_upper = self.ribbon.ads_single.value\n except OSError as exception:\n raise I2CError(exception)\n\n def process_readings(self):\n self.raw_gap = abs(self.raw_upper - self.raw_lower)\n self.gate = self.raw_gap < self.GATE_THRESHOLD\n self.raw_value = (self.raw_upper + self.raw_lower) / 2\n\n\nclass ContinuousSingleTouchReading(SingleTouchReading):\n\n @staticmethod\n def prepare_to_read():\n activate_single_touch_transistors()\n ads.mode = ADS.Mode.CONTINUOUS\n ads.gain = ads_gain_single\n self.ribbon.ads_single.value\n\n\nclass CheapSingleTouchReading(SingleTouchReading):\n GATE_THRESHOLD = 1500\n GATE_THRESHOLD = 4000\n\n def read_raw_lower(self):\n self.prepare_to_read()\n single_pull.value = False\n self.raw_lower = self.ribbon.rib_mid.value\n\n def read_raw_upper(self):\n self.prepare_to_read()\n single_pull.value = True\n self.raw_upper = self.ribbon.rib_mid.value\n\n\nclass DualTouchReading:\n __slots__ = ['raw_a', 'raw_b']\n\n def __init__(self, ribbon):\n self.ribbon = ribbon\n self.prepare_to_read()\n try:\n self.raw_a = self.ribbon.ads_dual_top.value\n self.raw_b = self.ribbon.ads_dual_bot.value\n except OSError as exception:\n raise I2CError(exception)\n\n def prepare_to_read(self):\n activate_dual_touch_transistors()\n self.ribbon.ads.gain = ads_gain_dual\n\n\nclass ProcessedDualTouchReading:\n __slots__ = ['gate', 'bot', 'top', 'mid', 'num_fingers', 'old', 'new']\n DELTA_THRESHOLD = -4\n TWO_TOUCH_THRESHOLD = 2\n TWO_TOUCH_THRESHOLD_SLACK = 0.05\n\n def __init__(self, ribbon, blink=False):\n self.ribbon = ribbon\n\n def clear_filters():\n ribbon.cheap_single_filter.clear()\n ribbon.dual_bot_filter.clear()\n ribbon.dual_top_filter.clear()\n previous_gate = ribbon.previous_gate\n single_before = ribbon.processed_cheap_single_touch_reading()\n if not single_before.gate:\n self.gate = False\n clear_filters()\n return\n with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()):\n dual_reading = ribbon.dual_touch_reading()\n single_after = ribbon.cheap_single_touch_reading()\n if not single_after.gate:\n self.gate = False\n clear_filters()\n return\n if not previous_gate:\n clear_filters()\n self.gate = True\n raw_mid = (single_before.raw_value + single_after.raw_value) / 2\n raw_top = dual_reading.raw_a\n raw_bot = dual_reading.raw_b\n top = raw_top\n bot = raw_bot\n mid = raw_mid\n top = ribbon.dual_touch_top_to_neopixel_calibration(top)\n bot = ribbon.dual_touch_bot_to_neopixel_calibration(bot)\n mid = ribbon.cheap_single_touch_to_neopixel_calibration(mid)\n mid = ribbon.cheap_single_filter(mid)\n if int(raw_top) == 32767:\n top = mid\n if int(raw_bot) == 32767:\n bot = mid\n delta = top - bot\n if delta <= self.DELTA_THRESHOLD:\n ribbon.dual_num_fingers = 1\n elif not previous_gate:\n ribbon.dual_num_fingers = (2 if delta > self.\n TWO_TOUCH_THRESHOLD else 1)\n elif ribbon.dual_num_fingers == 1 and delta > self.TWO_TOUCH_THRESHOLD + self.TWO_TOUCH_THRESHOLD_SLACK:\n ribbon.dual_num_fingers = 2\n elif ribbon.dual_num_fingers == 2 and delta < self.TWO_TOUCH_THRESHOLD - self.TWO_TOUCH_THRESHOLD_SLACK:\n ribbon.dual_num_fingers = 1\n self.num_fingers = ribbon.dual_num_fingers\n if self.num_fingers == 1:\n bot = top = mid\n elif bot > top:\n bot = top = (bot + top) / 2\n if not hasattr(ribbon, 'previous_dual_old'):\n ribbon.previous_dual_old = mid\n old, new = sorted([bot, top], key=lambda pos: abs(pos - ribbon.\n previous_dual_old))\n self.top = ribbon.dual_top_filter(top)\n self.bot = ribbon.dual_bot_filter(bot)\n self.mid = mid\n self.old = old\n self.new = new\n ribbon.previous_dual_old = old\n\n\nclass ProcessedSingleTouchReading:\n\n def __init__(self, ribbon, blink=False):\n self.ribbon = ribbon\n if ribbon.previous_gate:\n with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()\n ):\n single_touch_reading = ribbon.single_touch_reading()\n self.gate = single_touch_reading.gate\n else:\n cheap_single_touch_reading = ribbon.cheap_single_touch_reading()\n if cheap_single_touch_reading.gate:\n with (neopixels.TemporarilyTurnedOff() if blink else\n EmptyContext()):\n single_touch_reading = ribbon.single_touch_reading()\n self.gate = single_touch_reading.gate\n else:\n self.gate = False\n if self.gate:\n self.raw_value = single_touch_reading.raw_value\n self.value = ribbon.single_touch_to_neopixel_calibration(self.\n raw_value)\n\n\nclass ProcessedCheapSingleTouchReading:\n\n def __init__(self, ribbon, blink=False):\n self.ribbon = ribbon\n with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()):\n if not ribbon.previous_gate:\n ribbon.cheap_single_touch_reading()\n cheap_single_touch_reading = ribbon.cheap_single_touch_reading()\n self.gate = cheap_single_touch_reading.gate\n if self.gate:\n self.raw_value = cheap_single_touch_reading.raw_value\n self.value = ribbon.cheap_single_touch_to_neopixel_calibration(self\n .raw_value)\n self.value = ribbon.cheap_single_filter(self.value)\n else:\n ribbon.cheap_single_filter.clear()\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass I2CError(OSError):\n pass\n\n\nclass Ribbon:\n ADS_BIN_SIZE = 100\n RIB_BIN_SIZE = 100\n CALIBRATION_FOLDER = '/generated/calibrations/ribbons'\n\n def __init__(self, name, rib_mid, ads, ads_single, ads_dual_top,\n ads_dual_bot):\n self.name = name\n self.rib_mid = rib_mid\n self.ads = ads\n self.ads_single = ads_single\n self.ads_dual_top = ads_dual_top\n self.ads_dual_bot = ads_dual_bot\n dual_touch_top_to_neopixel_calibration_path = path_join(self.\n CALIBRATION_FOLDER, self.name +\n '_dual_touch_top_to_neopixel_calibration')\n dual_touch_bot_to_neopixel_calibration_path = path_join(self.\n CALIBRATION_FOLDER, self.name +\n '_dual_touch_bot_to_neopixel_calibration')\n single_touch_to_neopixel_calibration_path = path_join(self.\n CALIBRATION_FOLDER, self.name +\n '_single_touch_to_neopixel_calibration')\n cheap_single_touch_to_neopixel_calibration_path = path_join(self.\n CALIBRATION_FOLDER, self.name +\n '_cheap_single_touch_to_neopixel_calibration')\n self.dual_touch_top_to_neopixel_calibration = HistogramFitter(bin_size\n =self.ADS_BIN_SIZE, file_path=\n dual_touch_top_to_neopixel_calibration_path, auto_load=True)\n self.dual_touch_bot_to_neopixel_calibration = HistogramFitter(bin_size\n =self.ADS_BIN_SIZE, file_path=\n dual_touch_bot_to_neopixel_calibration_path, auto_load=True)\n self.single_touch_to_neopixel_calibration = HistogramFitter(bin_size\n =self.ADS_BIN_SIZE, file_path=\n single_touch_to_neopixel_calibration_path, auto_load=True)\n self.cheap_single_touch_to_neopixel_calibration = HistogramFitter(\n bin_size=self.RIB_BIN_SIZE, file_path=\n cheap_single_touch_to_neopixel_calibration_path, auto_load=True)\n self.previous_gate = False\n self.dual_num_fingers = 0\n dual_filter_moving_average_length = 3\n dual_filter_soft_tether_size = 0.1\n dual_filter_tether_size = 0.05\n self.dual_bot_filter = NoiseFilter(moving_average_length=\n dual_filter_moving_average_length, soft_tether_size=\n dual_filter_soft_tether_size, tether_size=dual_filter_tether_size)\n self.dual_top_filter = NoiseFilter(moving_average_length=\n dual_filter_moving_average_length, soft_tether_size=\n dual_filter_soft_tether_size, tether_size=dual_filter_tether_size)\n self.cheap_single_filter = NoiseFilter(moving_average_length=1,\n soft_tether_size=0.3, tether_size=0.01, moving_median_length=1)\n\n @property\n def is_calibrated(self):\n return (self.dual_touch_top_to_neopixel_calibration.is_fitted and\n self.dual_touch_bot_to_neopixel_calibration.is_fitted and self.\n single_touch_to_neopixel_calibration.is_fitted and self.\n cheap_single_touch_to_neopixel_calibration.is_fitted)\n\n def dual_touch_reading(self):\n reading = DualTouchReading(self)\n return reading\n\n def single_touch_reading(self):\n reading = SingleTouchReading(self)\n self.previous_gate = reading.gate\n return reading\n\n def cheap_single_touch_reading(self):\n reading = CheapSingleTouchReading(self)\n self.previous_gate = reading.gate\n return reading\n\n def processed_single_touch_reading(self, blink=False):\n reading = ProcessedSingleTouchReading(self, blink=blink)\n self.previous_gate = reading.gate\n return reading\n\n def processed_cheap_single_touch_reading(self, blink=False):\n reading = ProcessedCheapSingleTouchReading(self, blink=blink)\n self.previous_gate = reading.gate\n return reading\n\n def processed_dual_touch_reading(self, blink=False):\n reading = ProcessedDualTouchReading(self, blink=blink)\n self.previous_gate = reading.gate\n return reading\n\n def run_calibration(self, samples_per_pixel=25):\n import lightboard.display as display\n import lightboard.neopixels as neopixels\n import lightboard.buttons as buttons\n import lightboard.widgets as widgets\n buttons.metal_press_viewer.value\n\n def ask_to_try_again():\n if widgets.input_yes_no('Would you like to try calibrating again?'\n ):\n self.run_calibration(samples_per_pixel)\n start_from_scratch = True\n dual_touch_top_to_neopixel_calibration = HistogramFitter(bin_size=\n self.ADS_BIN_SIZE, file_path=self.\n dual_touch_top_to_neopixel_calibration.file_path, auto_load=not\n start_from_scratch)\n dual_touch_bot_to_neopixel_calibration = HistogramFitter(bin_size=\n self.ADS_BIN_SIZE, file_path=self.\n dual_touch_bot_to_neopixel_calibration.file_path, auto_load=not\n start_from_scratch)\n single_touch_to_neopixel_calibration = HistogramFitter(bin_size=\n self.ADS_BIN_SIZE, file_path=self.\n single_touch_to_neopixel_calibration.file_path, auto_load=not\n start_from_scratch)\n cheap_single_touch_to_neopixel_calibration = HistogramFitter(bin_size\n =self.RIB_BIN_SIZE, file_path=self.\n cheap_single_touch_to_neopixel_calibration.file_path, auto_load\n =not start_from_scratch)\n buttons.metal_button.color = 255, 0, 255\n\n def show_instructions():\n display.set_text('Running calibration on ribbon ' + self.name +\n \"\"\"\nPlease press the glowing green buttons until\nthe red dot is barely on the ribbon\"\"\"\n )\n buttons.set_green_button_lights(1, 1, 0, 0)\n show_instructions()\n button_press_next_neopixel = buttons.ButtonPressViewer(buttons.\n green_button_1)\n button_press_prev_neopixel = buttons.ButtonPressViewer(buttons.\n green_button_3)\n\n def display_neopixel_calibration(cursor_index, r, g, b,\n highlighted_pixels=[]):\n nonlocal calibrated_pixels\n neopixels.draw_all_off()\n for pixel in highlighted_pixels:\n neopixels.draw_dot(pixel, 0, 10, 0)\n neopixels.draw_dot(cursor_index, r, g, b)\n neopixels.refresh()\n i = 0\n i = neopixels.first\n display_neopixel_calibration(i, 63, 0, 0)\n buttons.metal_press_viewer.value\n while True:\n reading = self.cheap_single_touch_reading()\n if reading.gate:\n break\n refresh_flag = False\n if button_press_next_neopixel.value:\n i += 1\n refresh_flag = True\n if button_press_prev_neopixel.value:\n i -= 1\n refresh_flag = True\n if refresh_flag:\n i = min(neopixels.length - 1, max(0, i))\n display_neopixel_calibration(i, 63, 0, 0)\n if buttons.metal_press_viewer.value:\n if widgets.input_yes_no(\n \"\"\"Do you want to cancel calibration?\n(All progress will be lost)\"\"\"\n ):\n ask_to_try_again()\n return\n else:\n show_instructions()\n button_press_skip = buttons.ButtonPressViewer(buttons.green_button_1)\n button_press_back = buttons.ButtonPressViewer(buttons.green_button_3)\n button_press_finished = buttons.ButtonPressViewer(buttons.\n green_button_2)\n buttons.set_green_button_lights(1, 1, 0, 0)\n\n def show_instructions():\n display.set_text('Running calibration on ribbon ' + self.name +\n \"\"\"\nPlease press cyan dots on ribbon\nuntil they become orange\nPress the 2rd green button when you're done\n(If the 2rd green button isnt lit, calibrate at least two points)\nPress button 1 to skip the current dot\nPress button 3 to go back a dot\"\"\"\n )\n show_instructions()\n finished = False\n calibrated_pixels = set()\n while not finished:\n i = max(0, min(i, neopixels.length - 1))\n display_neopixel_calibration(i, 0, 63, 63, calibrated_pixels)\n dual_a_samples = []\n dual_b_samples = []\n single_samples = []\n cheap_samples = []\n pixel_num_samples = 0\n buttons.metal_press_viewer.value\n while True:\n buttons.green_button_3.light = len(calibrated_pixels) >= 2\n if buttons.metal_press_viewer.value:\n if widgets.input_yes_no(\n \"\"\"Do you want to cancel calibration?\n(All progress will be lost)\"\"\"\n ):\n ask_to_try_again()\n return\n else:\n show_instructions()\n if button_press_skip.value:\n break\n if button_press_back.value:\n i -= 2\n break\n if button_press_finished.value and len(calibrated_pixels) >= 2:\n if widgets.input_yes_no(\n \"\"\"Do you want to test your calibration?\nYes: Test it!\nNo: I'm done calibrating!\"\"\"\n ):\n with buttons.TemporaryButtonLights():\n self.test_smooth_demo(\n single_touch_to_neopixel_calibration,\n dual_touch_top_to_neopixel_calibration,\n dual_touch_bot_to_neopixel_calibration)\n show_instructions()\n elif widgets.input_yes_no(\n \"Are you sure your're done\\ncalibrating this ribbon?\"):\n finished = True\n break\n else:\n show_instructions()\n if len(cheap_samples) >= samples_per_pixel:\n dual_touch_top_to_neopixel_calibration.add_sample(median\n (dual_a_samples), i)\n dual_touch_bot_to_neopixel_calibration.add_sample(median\n (dual_b_samples), i)\n single_touch_to_neopixel_calibration.add_sample(median(\n single_samples), i)\n cheap_single_touch_to_neopixel_calibration.add_sample(\n median(cheap_samples), i)\n calibrated_pixels.add(i)\n break\n if self.cheap_single_touch_reading().gate:\n with neopixels.TemporarilyTurnedOff():\n cheap_single_touch_reading = (self.\n cheap_single_touch_reading())\n single_touch_reading = self.single_touch_reading()\n dual_touch_reading = self.dual_touch_reading()\n if (single_touch_reading.gate and\n cheap_single_touch_reading.gate):\n dual_a_samples.append(dual_touch_reading.raw_a)\n dual_b_samples.append(dual_touch_reading.raw_b)\n single_samples.append(single_touch_reading.\n raw_value)\n cheap_samples.append(cheap_single_touch_reading\n .raw_value)\n pixel_num_samples += 1\n else:\n dual_a_samples.clear()\n dual_b_samples.clear()\n single_samples.clear()\n cheap_samples.clear()\n i += 1\n display_neopixel_calibration(i, 63, 31, 0, calibrated_pixels)\n while self.cheap_single_touch_reading().gate:\n pass\n buttons.set_green_button_lights(0, 0, 0, 0)\n buttons.metal_button.color = 0, 1, 1\n neopixels.turn_off()\n display.set_text('Finished calibration on ribbon ' + self.name +\n \"\"\"\nTry the ribbon out to see if you like it\nAlso rinting out sensor values to serial for a demo\n(Watch in the arduino plotter)\nPress the metal button when you're done\"\"\"\n )\n while not buttons.metal_press_viewer.value:\n if self.cheap_single_touch_reading().gate:\n with neopixels.TemporarilyTurnedOff():\n cheap_single_touch_reading = (self.\n cheap_single_touch_reading())\n single_touch_reading = self.single_touch_reading()\n dual_touch_reading = self.dual_touch_reading()\n dual_top = dual_touch_top_to_neopixel_calibration(\n dual_touch_reading.raw_a)\n dual_bot = dual_touch_bot_to_neopixel_calibration(\n dual_touch_reading.raw_b)\n single = single_touch_to_neopixel_calibration(\n single_touch_reading.raw_value)\n cheap_single = cheap_single_touch_to_neopixel_calibration(\n cheap_single_touch_reading.raw_value)\n if (cheap_single_touch_reading.gate and\n single_touch_reading.gate):\n neopixels.display_dot(int(cheap_single), 0, 128, 0)\n print(dual_top, dual_bot, single, cheap_single)\n self.test_smooth_demo(single_touch_to_neopixel_calibration,\n dual_touch_top_to_neopixel_calibration,\n dual_touch_bot_to_neopixel_calibration)\n if widgets.input_yes_no(\n 'Would you like to save this\\ncalibration for ribbon ' + self.\n name + '?'):\n self.dual_touch_top_to_neopixel_calibration = (\n dual_touch_top_to_neopixel_calibration)\n self.dual_touch_bot_to_neopixel_calibration = (\n dual_touch_bot_to_neopixel_calibration)\n self.single_touch_to_neopixel_calibration = (\n single_touch_to_neopixel_calibration)\n self.cheap_single_touch_to_neopixel_calibration = (\n cheap_single_touch_to_neopixel_calibration)\n self.dual_touch_top_to_neopixel_calibration.save_to_file()\n self.dual_touch_bot_to_neopixel_calibration.save_to_file()\n self.single_touch_to_neopixel_calibration.save_to_file()\n self.cheap_single_touch_to_neopixel_calibration.save_to_file()\n display.set_text('Saved calibrations for ribbon ' + self.name + '!'\n )\n time.sleep(2)\n else:\n display.set_text('Cancelled. No calibrations were saved.')\n time.sleep(2)\n ask_to_try_again()\n return\n\n def test_smooth_demo(self, single_touch_to_neopixel_calibration=None,\n dual_touch_top_to_neopixel_calibration=None,\n dual_touch_bot_to_neopixel_calibration=None):\n import lightboard.buttons as buttons\n import lightboard.neopixels as neopixels\n import lightboard.display as display\n if single_touch_to_neopixel_calibration is None:\n single_touch_to_neopixel_calibration = (self.\n single_touch_to_neopixel_calibration)\n if dual_touch_top_to_neopixel_calibration is None:\n dual_touch_top_to_neopixel_calibration = (self.\n dual_touch_top_to_neopixel_calibration)\n if dual_touch_bot_to_neopixel_calibration is None:\n dual_touch_bot_to_neopixel_calibration = (self.\n dual_touch_bot_to_neopixel_calibration)\n buttons.metal_button.color = 1, 0, 1\n buttons.set_green_button_lights(0, 0, 0, 0)\n display.set_text('Smooth demo for ribbon %s\\nPress metal to exit')\n\n def mean(l):\n l = list(l)\n return sum(l) / len(l)\n\n def std(l):\n u = mean(l)\n return mean((x - u) ** 2 for x in l) ** 0.5\n\n\n class SuperSmooth:\n\n def __init__(self):\n self.DISCRETE = True\n self.N = 10\n self.V = []\n self.tet2 = Tether(1)\n self.tether = SoftTether(size=5)\n self.value = None\n\n def __call__(self, value):\n raw_value = value\n self.V.append(raw_value)\n while len(self.V) > self.N:\n del self.V[0]\n val = self.tether(mean(self.V))\n if self.DISCRETE:\n Val = self.tet2(int(val))\n else:\n Val = val\n self.value = Val\n return Val\n\n def clear(self):\n self.V.clear()\n self.tether.value = None\n super_smooth_single = SuperSmooth()\n super_smooth_dual_top = SuperSmooth()\n super_smooth_dual_bot = SuperSmooth()\n while not buttons.metal_press_viewer.value:\n single = self.single_touch_reading()\n if single.gate:\n dual = self.dual_touch_reading()\n val_top = dual_touch_top_to_neopixel_calibration(\n super_smooth_dual_top(dual.raw_a))\n val_bot = dual_touch_bot_to_neopixel_calibration(\n super_smooth_dual_bot(dual.raw_b))\n val = single_touch_to_neopixel_calibration(super_smooth_single\n (single.raw_value))\n neopixels.draw_all_off()\n neopixels.draw_dot(floor(val_top), 0, 30, 15)\n neopixels.draw_dot(floor(val_bot), 15, 30, 0)\n neopixels.draw_dot(floor(val), 64, 0, 128)\n neopixels.refresh()\n else:\n super_smooth_single.clear()\n super_smooth_dual_top.clear()\n super_smooth_dual_bot.clear()\n neopixels.turn_off()\n\n\nclass NoiseFilter:\n\n def __init__(self, moving_average_length=10, soft_tether_size=5,\n tether_size=1, moving_median_length=1):\n self.moving_average = MovingAverage(moving_average_length)\n self.soft_tether = SoftTether(size=soft_tether_size)\n self.tether = Tether(size=tether_size)\n self.moving_median = MovingMedian(moving_median_length)\n\n def __call__(self, value):\n value = self.moving_average(value)\n value = self.soft_tether(value)\n value = self.tether(value)\n value = self.moving_median(value)\n return value\n\n def clear(self):\n self.soft_tether.clear()\n self.tether.clear()\n self.moving_average.clear()\n self.moving_median.clear()\n\n def copy(self):\n return NoiseFilter(self.moving_average.length, self.soft_tether.\n size, self.tether.size)\n\n\nclass SingleTouchReading:\n __slots__ = ['gate', 'raw_lower', 'raw_upper', 'raw_gap', 'raw_value']\n GATE_THRESHOLD = 500\n\n def __init__(self, ribbon):\n self.ribbon = ribbon\n self.read_raw_lower()\n self.read_raw_upper()\n self.process_readings()\n\n def prepare_to_read(self):\n activate_single_touch_transistors()\n self.ribbon.ads.mode = ADS.Mode.SINGLE\n self.ribbon.ads.gain = ads_gain_single\n\n def read_raw_lower(self):\n single_pull.value = False\n self.prepare_to_read()\n try:\n self.raw_lower = self.ribbon.ads_single.value\n except OSError as exception:\n raise I2CError(exception)\n\n def read_raw_upper(self):\n single_pull.value = True\n self.prepare_to_read()\n try:\n self.raw_upper = self.ribbon.ads_single.value\n except OSError as exception:\n raise I2CError(exception)\n\n def process_readings(self):\n self.raw_gap = abs(self.raw_upper - self.raw_lower)\n self.gate = self.raw_gap < self.GATE_THRESHOLD\n self.raw_value = (self.raw_upper + self.raw_lower) / 2\n\n\nclass ContinuousSingleTouchReading(SingleTouchReading):\n\n @staticmethod\n def prepare_to_read():\n activate_single_touch_transistors()\n ads.mode = ADS.Mode.CONTINUOUS\n ads.gain = ads_gain_single\n self.ribbon.ads_single.value\n\n\nclass CheapSingleTouchReading(SingleTouchReading):\n GATE_THRESHOLD = 1500\n GATE_THRESHOLD = 4000\n\n def read_raw_lower(self):\n self.prepare_to_read()\n single_pull.value = False\n self.raw_lower = self.ribbon.rib_mid.value\n\n def read_raw_upper(self):\n self.prepare_to_read()\n single_pull.value = True\n self.raw_upper = self.ribbon.rib_mid.value\n\n\nclass DualTouchReading:\n __slots__ = ['raw_a', 'raw_b']\n\n def __init__(self, ribbon):\n self.ribbon = ribbon\n self.prepare_to_read()\n try:\n self.raw_a = self.ribbon.ads_dual_top.value\n self.raw_b = self.ribbon.ads_dual_bot.value\n except OSError as exception:\n raise I2CError(exception)\n\n def prepare_to_read(self):\n activate_dual_touch_transistors()\n self.ribbon.ads.gain = ads_gain_dual\n\n\nclass ProcessedDualTouchReading:\n __slots__ = ['gate', 'bot', 'top', 'mid', 'num_fingers', 'old', 'new']\n DELTA_THRESHOLD = -4\n TWO_TOUCH_THRESHOLD = 2\n TWO_TOUCH_THRESHOLD_SLACK = 0.05\n\n def __init__(self, ribbon, blink=False):\n self.ribbon = ribbon\n\n def clear_filters():\n ribbon.cheap_single_filter.clear()\n ribbon.dual_bot_filter.clear()\n ribbon.dual_top_filter.clear()\n previous_gate = ribbon.previous_gate\n single_before = ribbon.processed_cheap_single_touch_reading()\n if not single_before.gate:\n self.gate = False\n clear_filters()\n return\n with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()):\n dual_reading = ribbon.dual_touch_reading()\n single_after = ribbon.cheap_single_touch_reading()\n if not single_after.gate:\n self.gate = False\n clear_filters()\n return\n if not previous_gate:\n clear_filters()\n self.gate = True\n raw_mid = (single_before.raw_value + single_after.raw_value) / 2\n raw_top = dual_reading.raw_a\n raw_bot = dual_reading.raw_b\n top = raw_top\n bot = raw_bot\n mid = raw_mid\n top = ribbon.dual_touch_top_to_neopixel_calibration(top)\n bot = ribbon.dual_touch_bot_to_neopixel_calibration(bot)\n mid = ribbon.cheap_single_touch_to_neopixel_calibration(mid)\n mid = ribbon.cheap_single_filter(mid)\n if int(raw_top) == 32767:\n top = mid\n if int(raw_bot) == 32767:\n bot = mid\n delta = top - bot\n if delta <= self.DELTA_THRESHOLD:\n ribbon.dual_num_fingers = 1\n elif not previous_gate:\n ribbon.dual_num_fingers = (2 if delta > self.\n TWO_TOUCH_THRESHOLD else 1)\n elif ribbon.dual_num_fingers == 1 and delta > self.TWO_TOUCH_THRESHOLD + self.TWO_TOUCH_THRESHOLD_SLACK:\n ribbon.dual_num_fingers = 2\n elif ribbon.dual_num_fingers == 2 and delta < self.TWO_TOUCH_THRESHOLD - self.TWO_TOUCH_THRESHOLD_SLACK:\n ribbon.dual_num_fingers = 1\n self.num_fingers = ribbon.dual_num_fingers\n if self.num_fingers == 1:\n bot = top = mid\n elif bot > top:\n bot = top = (bot + top) / 2\n if not hasattr(ribbon, 'previous_dual_old'):\n ribbon.previous_dual_old = mid\n old, new = sorted([bot, top], key=lambda pos: abs(pos - ribbon.\n previous_dual_old))\n self.top = ribbon.dual_top_filter(top)\n self.bot = ribbon.dual_bot_filter(bot)\n self.mid = mid\n self.old = old\n self.new = new\n ribbon.previous_dual_old = old\n\n\nclass ProcessedSingleTouchReading:\n\n def __init__(self, ribbon, blink=False):\n self.ribbon = ribbon\n if ribbon.previous_gate:\n with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()\n ):\n single_touch_reading = ribbon.single_touch_reading()\n self.gate = single_touch_reading.gate\n else:\n cheap_single_touch_reading = ribbon.cheap_single_touch_reading()\n if cheap_single_touch_reading.gate:\n with (neopixels.TemporarilyTurnedOff() if blink else\n EmptyContext()):\n single_touch_reading = ribbon.single_touch_reading()\n self.gate = single_touch_reading.gate\n else:\n self.gate = False\n if self.gate:\n self.raw_value = single_touch_reading.raw_value\n self.value = ribbon.single_touch_to_neopixel_calibration(self.\n raw_value)\n\n\nclass ProcessedCheapSingleTouchReading:\n\n def __init__(self, ribbon, blink=False):\n self.ribbon = ribbon\n with (neopixels.TemporarilyTurnedOff() if blink else EmptyContext()):\n if not ribbon.previous_gate:\n ribbon.cheap_single_touch_reading()\n cheap_single_touch_reading = ribbon.cheap_single_touch_reading()\n self.gate = cheap_single_touch_reading.gate\n if self.gate:\n self.raw_value = cheap_single_touch_reading.raw_value\n self.value = ribbon.cheap_single_touch_to_neopixel_calibration(self\n .raw_value)\n self.value = ribbon.cheap_single_filter(self.value)\n else:\n ribbon.cheap_single_filter.clear()\n\n\n<mask token>\n\n\ndef show_calibration_menu():\n import lightboard.widgets as widgets\n options = OrderedDict()\n options['Calibrate Rib A'] = ribbon_a.run_calibration\n options['Calibrate Rib B'] = ribbon_b.run_calibration\n options['Smooth Demo A'] = ribbon_a.test_smooth_demo\n options['Smooth Demo B'] = ribbon_b.test_smooth_demo\n options['Raw UART Demo A'] = lambda : test_ribbon_raw_uart(ribbon_a)\n options['Raw UART Demo B'] = lambda : test_ribbon_raw_uart(ribbon_b)\n options['Dual Touch Demo A'] = lambda : test_ribbon_dual_touch(ribbon_a)\n options['Dual Touch Demo B'] = lambda : test_ribbon_dual_touch(ribbon_b)\n widgets.run_select_subroutine(options)\n\n\n<mask token>\n", "step-5": "#ribbon_a and ribbon_b are the two important variables here\nribbon_a=None\nribbon_b=None\n\n#Notes:\n# - As it turns out, the internal ADC in the Teensy is NOT very susceptible to fluctuations in the Neopixels' current...BUT...the ADS1115 IS. \n# Therefore, I think a better model would ditch the ADS1115 alltogether - replacing it with a simple 8x toggleable amp for dual touches. \n# - Shouldn't cause errors. No scl/sda pullup means the board isn't connected. No i2c at 48 means the individual chip isn't powered or connected etc. I just ficed a bad solder joint that took a while to flare up.......maybe this is what happened with the old lightwave? I was too quick with the solder joints, leaving a bubble that didn't touch it because of some stress bs later on?\n\n__all__=['ribbon_a','ribbon_b']\n\nfrom urp import *\nimport time\nimport board\nimport busio\nimport adafruit_ads1x15.ads1115 as ADS\nfrom collections import OrderedDict\nfrom adafruit_ads1x15.ads1x15 import Mode\nfrom adafruit_ads1x15.analog_in import AnalogIn as ADS1115_AnalogIn\nfrom digitalio import DigitalInOut, Direction, Pull\nfrom analogio import AnalogIn as Internal_AnalogIn\nfrom tools import *\nimport storage\nfrom linear_modules import *\nimport lightboard.neopixels as neopixels\nimport time\nfrom micropython import const\n\n\ni2c = busio.I2C(board.SCL, board.SDA, frequency=1000000)# Create the I2C bus with a fast frequency\n\n#I2C addresses for ADS1115's: 0x48 and 0x4a for Ribbon A and Ribbon B respectively\nads_a = ADS.ADS1115(i2c,address=0x48) \nads_b = ADS.ADS1115(i2c,address=0x4a) \n\ndata_rate=const(860) # Maximum number of samples per second\nads_a.data_rate = data_rate\nads_b.data_rate = data_rate\n\nads_gain_single=const(1)\nads_gain_dual =const(8) #Uses 100kΩ\n\n#Change the gains depending on whether you're measuring dual or single touches\nads_a.gain=ads_gain_single\nads_b.gain=ads_gain_single\n\nads_a_a0 = ADS1115_AnalogIn(ads_a, ADS.P0)\nads_a_a1 = ADS1115_AnalogIn(ads_a, ADS.P1)\nads_a_a2 = ADS1115_AnalogIn(ads_a, ADS.P2)\nads_a_single=ads_a_a0\nads_a_dual_top=ads_a_a1\nads_a_dual_b=ads_a_a2\nrib_a_mid = Internal_AnalogIn(board.D26)\n\nads_b_a0 = ADS1115_AnalogIn(ads_b, ADS.P0)\nads_b_a1 = ADS1115_AnalogIn(ads_b, ADS.P1)\nads_b_a2 = ADS1115_AnalogIn(ads_b, ADS.P2)\nads_b_single=ads_b_a0\nads_b_dual_top=ads_b_a1\nads_b_dual_b=ads_b_a2\nrib_b_mid = Internal_AnalogIn(board.D27)\n\nsingle_pull=DigitalInOut(board.D32)\nsingle_pin =DigitalInOut(board.D31)\ndual_pin_2 =DigitalInOut(board.D25)\ndual_pin_1 =DigitalInOut(board.D24)\nsingle_pull.direction=Direction.OUTPUT\nsingle_pin .direction=Direction.OUTPUT\ndual_pin_2 .direction=Direction.OUTPUT\ndual_pin_1 .direction=Direction.OUTPUT\n\ndef activate_single_touch_transistors():\n\tsingle_pin.value=True\n\tdual_pin_1 .value=False\n\tdual_pin_2 .value=False\n\ndef activate_dual_touch_transistors():\n\tsingle_pin.value=False\n\tdual_pin_1 .value=True\n\tdual_pin_2 .value=True\n\nclass I2CError(OSError):\n\tpass\n\nclass Ribbon:\n\n\tADS_BIN_SIZE=100\n\tRIB_BIN_SIZE=100\n\tCALIBRATION_FOLDER='/generated/calibrations/ribbons'\n\n\tdef __init__(self,name,rib_mid,ads,ads_single,ads_dual_top,ads_dual_bot):\n\t\tself.name=name\n\t\tself.rib_mid=rib_mid\n\t\tself.ads=ads\n\t\tself.ads_single=ads_single\n\t\tself.ads_dual_top=ads_dual_top\n\t\tself.ads_dual_bot=ads_dual_bot\n\n\t\tdual_touch_top_to_neopixel_calibration_path = path_join(self.CALIBRATION_FOLDER,self.name+'_dual_touch_top_to_neopixel_calibration' )\n\t\tdual_touch_bot_to_neopixel_calibration_path = path_join(self.CALIBRATION_FOLDER,self.name+'_dual_touch_bot_to_neopixel_calibration' )\n\t\tsingle_touch_to_neopixel_calibration_path = path_join(self.CALIBRATION_FOLDER,self.name+'_single_touch_to_neopixel_calibration' )\n\t\tcheap_single_touch_to_neopixel_calibration_path = path_join(self.CALIBRATION_FOLDER,self.name+'_cheap_single_touch_to_neopixel_calibration')\n\n\t\tself.dual_touch_top_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=dual_touch_top_to_neopixel_calibration_path ,auto_load=True)\n\t\tself.dual_touch_bot_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=dual_touch_bot_to_neopixel_calibration_path ,auto_load=True)\n\t\tself.single_touch_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=single_touch_to_neopixel_calibration_path ,auto_load=True)\n\t\tself.cheap_single_touch_to_neopixel_calibration = HistogramFitter(bin_size=self.RIB_BIN_SIZE,file_path=cheap_single_touch_to_neopixel_calibration_path,auto_load=True)\n\n\t\tself.previous_gate=False\n\n\t\tself.dual_num_fingers=0\n\t\tdual_filter_moving_average_length=3\n\t\tdual_filter_soft_tether_size=.1\n\t\tdual_filter_tether_size=.05\n\t\tself.dual_bot_filter=NoiseFilter(moving_average_length=dual_filter_moving_average_length,soft_tether_size=dual_filter_soft_tether_size,tether_size=dual_filter_tether_size)\n\t\tself.dual_top_filter=NoiseFilter(moving_average_length=dual_filter_moving_average_length,soft_tether_size=dual_filter_soft_tether_size,tether_size=dual_filter_tether_size)\n\n\t\tself.cheap_single_filter=NoiseFilter(moving_average_length=1,soft_tether_size=.3,tether_size=.01,moving_median_length=1)\n\n\n\t@property\n\tdef is_calibrated(self):\n\t\treturn self.dual_touch_top_to_neopixel_calibration .is_fitted and \\\n\t\t self.dual_touch_bot_to_neopixel_calibration .is_fitted and \\\n\t\t self.single_touch_to_neopixel_calibration .is_fitted and \\\n\t\t self.cheap_single_touch_to_neopixel_calibration.is_fitted\n\n\tdef dual_touch_reading(self):\n\t\treading=DualTouchReading(self)\n\t\t#DualTouchReading objects don't have a gate as of right now (though they will probably soon - we can get the gate by comparing the top value to the bot value and setting a threshold)\n\t\treturn reading\n\n\tdef single_touch_reading(self):\n\t\treading=SingleTouchReading(self)\n\t\tself.previous_gate=reading.gate\n\t\treturn reading\n\n\tdef cheap_single_touch_reading(self):\n\t\treading=CheapSingleTouchReading(self)\n\t\tself.previous_gate=reading.gate\n\t\treturn reading\n\n\tdef processed_single_touch_reading(self,blink=False):\n\t\t# if not self.is_calibrated: #Unnessecary CPU time...its cheap but so unimportant...\n\t\t\t# print(\"Ribbon.processed_single_touch_reading: Warning: This ribbon is not calibrated!\")\n\t\treading=ProcessedSingleTouchReading(self,blink=blink)\n\t\tself.previous_gate=reading.gate\n\t\treturn reading\n\n\tdef processed_cheap_single_touch_reading(self,blink=False):\n\t\treading=ProcessedCheapSingleTouchReading(self,blink=blink)\n\t\tself.previous_gate=reading.gate\n\t\treturn reading\n\n\tdef processed_dual_touch_reading(self,blink=False):\n\t\treading=ProcessedDualTouchReading(self,blink=blink)\n\t\tself.previous_gate=reading.gate\n\t\treturn reading\n\t\n\tdef run_calibration(self,samples_per_pixel=25):\n\t\timport lightboard.display as display\n\t\timport lightboard.neopixels as neopixels\n\t\timport lightboard.buttons as buttons\n\t\timport lightboard.widgets as widgets\n\n\t\tbuttons.metal_press_viewer.value #Reset it - so it doesn't immediately press by accident\n\n\t\tdef ask_to_try_again():\n\t\t\tif widgets.input_yes_no(\"Would you like to try calibrating again?\"):\n\t\t\t\tself.run_calibration(samples_per_pixel)\n\n\t\tstart_from_scratch = True # widgets.input_yes_no('Start from scratch?\\nNo: Modify current calibration\\nYes: Create entirely new calibration')\n\n\t\tdual_touch_top_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=self.dual_touch_top_to_neopixel_calibration .file_path,auto_load=not start_from_scratch)\n\t\tdual_touch_bot_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=self.dual_touch_bot_to_neopixel_calibration .file_path,auto_load=not start_from_scratch)\n\t\tsingle_touch_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=self.single_touch_to_neopixel_calibration .file_path,auto_load=not start_from_scratch)\n\t\tcheap_single_touch_to_neopixel_calibration = HistogramFitter(bin_size=self.RIB_BIN_SIZE,file_path=self.cheap_single_touch_to_neopixel_calibration.file_path,auto_load=not start_from_scratch)\n\n\t\tbuttons.metal_button.color=(255,0,255)\n\n\t\tdef show_instructions():\n\t\t\tdisplay.set_text('Running calibration on ribbon '+self.name+'\\nPlease press the glowing green buttons until\\nthe red dot is barely on the ribbon')\n\t\t\tbuttons.set_green_button_lights(1,1,0,0)\n\t\tshow_instructions()\n\n\t\tbutton_press_next_neopixel=buttons.ButtonPressViewer(buttons.green_button_1)\n\t\tbutton_press_prev_neopixel=buttons.ButtonPressViewer(buttons.green_button_3)\n\n\t\tdef display_neopixel_calibration(cursor_index,r,g,b,highlighted_pixels=[]):\n\t\t\tnonlocal calibrated_pixels\n\n\t\t\tneopixels.draw_all_off()\n\n\t\t\tfor pixel in highlighted_pixels:\n\t\t\t\tneopixels.draw_dot(pixel,0,10,0)\n\n\t\t\tneopixels.draw_dot(cursor_index,r,g,b)\n\t\t\tneopixels.refresh()\n\n\t\ti=0\n\t\ti=neopixels.first\n\n\t\tdisplay_neopixel_calibration(i,63,0,0)\n\n\t\tbuttons.metal_press_viewer.value #Reset it - so it doesn't immediately press by accident\n\n\t\twhile True:\n\t\t\treading=self.cheap_single_touch_reading()\n\t\t\tif reading.gate:\n\t\t\t\tbreak\n\n\t\t\trefresh_flag=False\n\t\t\tif button_press_next_neopixel.value:\n\t\t\t\ti+=1\n\t\t\t\trefresh_flag=True\n\t\t\tif button_press_prev_neopixel.value:\n\t\t\t\ti-=1\n\t\t\t\trefresh_flag=True\n\t\t\tif refresh_flag:\n\t\t\t\ti=min(neopixels.length-1,max(0,i))\n\t\t\t\tdisplay_neopixel_calibration(i,63,0,0)\n\n\t\t\tif buttons.metal_press_viewer.value:\n\t\t\t\tif widgets.input_yes_no(\"Do you want to cancel calibration?\\n(All progress will be lost)\"):\n\t\t\t\t\t#NOTE: This code block is duplicated!\n\t\t\t\t\task_to_try_again()\n\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\tshow_instructions()\n\n\n\t\tbutton_press_skip =buttons.ButtonPressViewer(buttons.green_button_1)\n\t\tbutton_press_back =buttons.ButtonPressViewer(buttons.green_button_3)\n\t\tbutton_press_finished=buttons.ButtonPressViewer(buttons.green_button_2)\n\t\tbuttons.set_green_button_lights(1,1,0,0)\n\n\t\tdef show_instructions():\n\t\t\tdisplay.set_text('Running calibration on ribbon '+self.name+'\\nPlease press cyan dots on ribbon\\nuntil they become orange\\nPress the 2rd green button when you\\'re done\\n(If the 2rd green button isnt lit, calibrate at least two points)\\nPress button 1 to skip the current dot\\nPress button 3 to go back a dot')\n\t\tshow_instructions()\n\n\t\tfinished=False\n\t\tcalibrated_pixels=set()\n\n\t\twhile not finished:\n\n\t\t\ti=max(0,min(i,neopixels.length-1))\n\n\t\t\tdisplay_neopixel_calibration(i,0,63,63,calibrated_pixels)\n\n\n\t\t\tdual_a_samples=[]\n\t\t\tdual_b_samples=[]\n\t\t\tsingle_samples=[]\n\t\t\tcheap_samples =[]\n\t\t\tpixel_num_samples=0\n\n\t\t\tbuttons.metal_press_viewer.value #Reset it - so it doesn't immediately press by accident\n\n\t\t\twhile True:\n\t\t\t\tbuttons.green_button_3.light=len(calibrated_pixels)>=2\n\t\t\t\tif buttons.metal_press_viewer.value:\n\t\t\t\t\tif widgets.input_yes_no(\"Do you want to cancel calibration?\\n(All progress will be lost)\"):\n\t\t\t\t\t\t#NOTE: This code block is duplicated!\n\t\t\t\t\t\task_to_try_again()\n\t\t\t\t\t\treturn\n\t\t\t\t\telse:\n\t\t\t\t\t\tshow_instructions()\n\t\t\t\tif button_press_skip.value:\n\t\t\t\t\tbreak\n\t\t\t\tif button_press_back.value:\n\t\t\t\t\ti-=2\n\t\t\t\t\tbreak\n\t\t\t\tif button_press_finished.value and len(calibrated_pixels)>=2:\n\t\t\t\t\tif widgets.input_yes_no(\"Do you want to test your calibration?\\nYes: Test it!\\nNo: I'm done calibrating!\"):\n\t\t\t\t\t\t#This UI is a bit janky....should use better messages. But whatevs...this is just calibration after all...\n\t\t\t\t\t\twith buttons.TemporaryButtonLights():\n\t\t\t\t\t\t\tself.test_smooth_demo(single_touch_to_neopixel_calibration,dual_touch_top_to_neopixel_calibration,dual_touch_bot_to_neopixel_calibration)\n\t\t\t\t\t\tshow_instructions()\n\t\t\t\t\telif widgets.input_yes_no(\"Are you sure your're done\\ncalibrating this ribbon?\"):\n\t\t\t\t\t\tfinished=True\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tshow_instructions()\n\n\t\t\t\tif len(cheap_samples)>=samples_per_pixel:\n\t\t\t\t\tdual_touch_top_to_neopixel_calibration .add_sample(median(dual_a_samples),i)\n\t\t\t\t\tdual_touch_bot_to_neopixel_calibration .add_sample(median(dual_b_samples),i)\n\t\t\t\t\tsingle_touch_to_neopixel_calibration .add_sample(median(single_samples),i)\n\t\t\t\t\tcheap_single_touch_to_neopixel_calibration.add_sample(median(cheap_samples ),i)\n\t\t\t\t\tcalibrated_pixels.add(i)\n\t\t\t\t\tbreak\n\n\t\t\t\tif self.cheap_single_touch_reading().gate:\n\t\t\t\t\twith neopixels.TemporarilyTurnedOff():\n\t\t\t\t\t\tcheap_single_touch_reading=self.cheap_single_touch_reading()\n\t\t\t\t\t\tsingle_touch_reading =self.single_touch_reading()\n\t\t\t\t\t\tdual_touch_reading =self.dual_touch_reading()\n\n\t\t\t\t\t\tif single_touch_reading.gate and cheap_single_touch_reading.gate:\n\t\t\t\t\t\t\tdual_a_samples.append(dual_touch_reading .raw_a )\n\t\t\t\t\t\t\tdual_b_samples.append(dual_touch_reading .raw_b )\n\t\t\t\t\t\t\tsingle_samples.append(single_touch_reading .raw_value)\n\t\t\t\t\t\t\tcheap_samples .append(cheap_single_touch_reading.raw_value)\n\n\t\t\t\t\t\tpixel_num_samples+=1\n\t\t\t\telse:\n\t\t\t\t\t#Accidently remove finger? Cancel it...try again.\n\t\t\t\t\tdual_a_samples.clear()\n\t\t\t\t\tdual_b_samples.clear()\n\t\t\t\t\tsingle_samples.clear()\n\t\t\t\t\tcheap_samples .clear()\n\n\n\n\t\t\ti+=1\n\t\t\tdisplay_neopixel_calibration(i,63,31,0,calibrated_pixels)\n\n\t\t\twhile self.cheap_single_touch_reading().gate:\n\t\t\t\tpass\n\n\t\tbuttons.set_green_button_lights(0,0,0,0)\n\t\tbuttons.metal_button.color=(0,1,1)\n\t\tneopixels.turn_off()\n\n\t\tdisplay.set_text('Finished calibration on ribbon '+self.name+'\\nTry the ribbon out to see if you like it\\nAlso rinting out sensor values to serial for a demo\\n(Watch in the arduino plotter)\\nPress the metal button when you\\'re done')\n\n\n\t\twhile not buttons.metal_press_viewer.value:\n\t\t\tif self.cheap_single_touch_reading().gate:\n\t\t\t\twith neopixels.TemporarilyTurnedOff():\n\t\t\t\t\tcheap_single_touch_reading=self.cheap_single_touch_reading()\n\t\t\t\t\tsingle_touch_reading =self.single_touch_reading()\n\t\t\t\t\tdual_touch_reading =self.dual_touch_reading()\n\n\t\t\t\t\tdual_top = dual_touch_top_to_neopixel_calibration(dual_touch_reading .raw_a )\n\t\t\t\t\tdual_bot = dual_touch_bot_to_neopixel_calibration(dual_touch_reading .raw_b )\n\t\t\t\t\tsingle = single_touch_to_neopixel_calibration (single_touch_reading.raw_value)\n\t\t\t\t\tcheap_single=cheap_single_touch_to_neopixel_calibration(cheap_single_touch_reading.raw_value)\n\n\t\t\t\t\tif cheap_single_touch_reading.gate and single_touch_reading.gate:\n\n\t\t\t\t\t\tneopixels.display_dot(int(cheap_single),0,128,0)\n\n\t\t\t\t\t\tprint(dual_top,dual_bot,single,cheap_single)\n\n\t\tself.test_smooth_demo(single_touch_to_neopixel_calibration,dual_touch_top_to_neopixel_calibration,dual_touch_bot_to_neopixel_calibration)\n\n\t\tif widgets.input_yes_no(\"Would you like to save this\\ncalibration for ribbon \"+self.name+\"?\"):\n\t\t\tself.dual_touch_top_to_neopixel_calibration = dual_touch_top_to_neopixel_calibration\n\t\t\tself.dual_touch_bot_to_neopixel_calibration = dual_touch_bot_to_neopixel_calibration\n\t\t\tself.single_touch_to_neopixel_calibration = single_touch_to_neopixel_calibration\n\t\t\tself.cheap_single_touch_to_neopixel_calibration = cheap_single_touch_to_neopixel_calibration\n\t\t\tself.dual_touch_top_to_neopixel_calibration .save_to_file()\n\t\t\tself.dual_touch_bot_to_neopixel_calibration .save_to_file()\n\t\t\tself.single_touch_to_neopixel_calibration .save_to_file()\n\t\t\tself.cheap_single_touch_to_neopixel_calibration.save_to_file()\n\t\t\tdisplay.set_text(\"Saved calibrations for ribbon \"+self.name+\"!\")\n\t\t\ttime.sleep(2)\n\t\telse:\n\t\t\tdisplay.set_text(\"Cancelled. No calibrations were saved.\")\n\t\t\ttime.sleep(2)\n\t\t\task_to_try_again()\n\t\t\treturn\n\n\tdef test_smooth_demo(\n\t\t\tself,\n\t\t\tsingle_touch_to_neopixel_calibration=None,\n\t\t\tdual_touch_top_to_neopixel_calibration=None,\n\t\t\tdual_touch_bot_to_neopixel_calibration=None):\n\n\t\timport lightboard.buttons as buttons\n\t\timport lightboard.neopixels as neopixels\n\t\timport lightboard.display as display\n\n\t\tif single_touch_to_neopixel_calibration is None: single_touch_to_neopixel_calibration =self.single_touch_to_neopixel_calibration\n\t\tif dual_touch_top_to_neopixel_calibration is None: dual_touch_top_to_neopixel_calibration=self.dual_touch_top_to_neopixel_calibration\n\t\tif dual_touch_bot_to_neopixel_calibration is None: dual_touch_bot_to_neopixel_calibration=self.dual_touch_bot_to_neopixel_calibration\n\n\t\tbuttons.metal_button.color=(1,0,1)\n\t\tbuttons.set_green_button_lights(0,0,0,0)\n\t\tdisplay.set_text(\"Smooth demo for ribbon %s\\nPress metal to exit\")\n\n\t\t#This is a show-offy demo lol. Try miscalibrating it such that a tiny vibrato makes it move from one side of the lightwave to the otehr...\n\n\t\tdef mean(l):\n\t\t\tl=list(l)\n\t\t\treturn sum(l)/len(l)\n\n\t\tdef std(l):\n\t\t\tu=mean(l)\n\t\t\treturn mean((x-u)**2 for x in l)**.5\n\n\t\tclass SuperSmooth:\n\t\t\t#A linear module created from the original code of this demo.\n\t\t\t#When DISCRETE is True, it's so sensitive that it can recognize individual ADS readings without noise when the finger is still.\n\t\t\t#Used to smooth ADS readings.\n\t\t\tdef __init__(self):\n\t\t\t\tself.DISCRETE=True\n\t\t\t\tself.N=10\n\t\t\t\tself.V=[]\n\t\t\t\tself.tet2=Tether(1)\n\t\t\t\tself.tether=SoftTether(size=5)\n\t\t\t\tself.value=None\n\n\t\t\tdef __call__(self,value):\n\t\t\t\traw_value=value\n\t\t\t\tself.V.append(raw_value)\n\t\t\t\twhile len(self.V)>self.N:\n\t\t\t\t\tdel self.V[0]\n\t\t\t\tval=self.tether(mean(self.V))\n\t\t\t\tif self.DISCRETE:\n\t\t\t\t\tVal=(self.tet2(int(val)))\n\t\t\t\telse:\n\t\t\t\t\tVal=(val)\n\t\t\t\tself.value=Val\n\t\t\t\treturn Val\n\n\t\t\tdef clear(self):\n\t\t\t\tself.V.clear()\n\t\t\t\tself.tether.value=None\n\n\t\tsuper_smooth_single =SuperSmooth()\n\t\tsuper_smooth_dual_top=SuperSmooth()\n\t\tsuper_smooth_dual_bot=SuperSmooth()\n\n\t\twhile not buttons.metal_press_viewer.value:\n\t\t\tsingle=self.single_touch_reading()\n\n\t\t\tif single.gate:\n\t\t\t\tdual=self.dual_touch_reading()\n\n\t\t\t\tval_top=dual_touch_top_to_neopixel_calibration(super_smooth_dual_top(dual.raw_a))\n\t\t\t\tval_bot=dual_touch_bot_to_neopixel_calibration(super_smooth_dual_bot(dual.raw_b))\n\t\t\t\tval=single_touch_to_neopixel_calibration(super_smooth_single(single.raw_value))\n\n\t\t\t\tneopixels.draw_all_off()\n\t\t\t\tneopixels.draw_dot(floor(val_top), 0,30, 15)\n\t\t\t\tneopixels.draw_dot(floor(val_bot),15,30, 0)\n\t\t\t\tneopixels.draw_dot(floor(val ),64, 0,128)\n\t\t\t\tneopixels.refresh()\n\n\t\t\telse:\n\t\t\t\tsuper_smooth_single .clear()\n\t\t\t\tsuper_smooth_dual_top.clear()\n\t\t\t\tsuper_smooth_dual_bot.clear()\n\n\t\tneopixels.turn_off()\n\nclass NoiseFilter:\n\t#This is a LinearModule\n\t#It should be cleared whever the gate is off\n\tdef __init__(self,moving_average_length=10,\n\t soft_tether_size =5,\n\t tether_size =1,\n\t moving_median_length =1):\n\t\tself.moving_average=MovingAverage(moving_average_length)\n\t\tself.soft_tether=SoftTether(size=soft_tether_size)\n\t\tself.tether=Tether(size=tether_size)\n\t\tself.moving_median=MovingMedian(moving_median_length)\n\tdef __call__(self,value):\n\t\tvalue=self.moving_average(value)\n\t\tvalue=self.soft_tether (value)\n\t\tvalue=self.tether (value)\n\t\tvalue=self.moving_median (value)\n\t\treturn value\n\tdef clear(self):\n\t\tself.soft_tether .clear()\n\t\tself.tether .clear()\n\t\tself.moving_average.clear()\n\t\tself.moving_median .clear()\n\tdef copy(self):\n\t\t#Create a duplicate filter with the same parameters\n\t\treturn NoiseFilter(self.moving_average.length,self.soft_tether.size,self.tether.size)\n\n\nclass SingleTouchReading:\n\t__slots__=['gate','raw_lower','raw_upper','raw_gap', 'raw_value']\n\n\tGATE_THRESHOLD=500 #This needs to be calibrated after observing the raw_gap when touching and not touching the ribbon. You can do this automatically with some fancy algorithm, or you can just look at the serial monitor while printing reading.raw_gap over and over again\n\n\tdef __init__(self,ribbon):\n\t\tself.ribbon=ribbon\n\t\tself.read_raw_lower()\n\t\tself.read_raw_upper()\n\t\tself.process_readings()\n\t\t\n\tdef prepare_to_read(self):\n\t\tactivate_single_touch_transistors()\n\t\tself.ribbon.ads.mode=ADS.Mode.SINGLE\n\t\tself.ribbon.ads.gain=ads_gain_single\n\n\tdef read_raw_lower(self):\n\t\tsingle_pull.value=False\n\t\tself.prepare_to_read()\n\t\ttry:\n\t\t\tself.raw_lower=self.ribbon.ads_single.value\n\t\texcept OSError as exception:\n\t\t\traise I2CError(exception)\n\n\tdef read_raw_upper(self):\n\t\tsingle_pull.value=True\n\t\tself.prepare_to_read()\n\t\ttry:\n\t\t\tself.raw_upper=self.ribbon.ads_single.value\n\t\texcept OSError as exception:\n\t\t\traise I2CError(exception)\n\n\tdef process_readings(self):\n\t\tself.raw_gap=abs(self.raw_upper-self.raw_lower)\n\t\tself.gate=self.raw_gap<self.GATE_THRESHOLD\n\t\tself.raw_value=(self.raw_upper+self.raw_lower)/2\n\nclass ContinuousSingleTouchReading(SingleTouchReading):\n\t#Should be similar to SingleTouchReading, but much faster when not using DualTouchReading\n\t#WARNING AND TODO: This function isn't currently doing enough to flush out anything. Perhaps continous can use the CheapSingleTouchReading's gate, and a single non-wobbling single_pull value\n\t@staticmethod\n\tdef prepare_to_read():\n\t\tactivate_single_touch_transistors()\n\t\tads.mode=ADS.Mode.CONTINUOUS\n\t\tads.gain=ads_gain_single\n\t\tself.ribbon.ads_single.value #Flush out the current reading of the ADC, in-case we changed single_pull in the middle of the ADS's reading (which happens 99% of the time if we don't do this lol - making detecting the gate practically useless)\n\nclass CheapSingleTouchReading(SingleTouchReading):\n\t#TODO: The Teensy's internal ADC is wonked. Between around raw values 30000 and 35000, it jumps (whereas the ADS1115 doesn't jump).\n\t#\t\tCalibration with respect to the ADS1115's non-cheap single touch should mitigate this problem\n\t#\t\tEven though the raw range is the same for both analog_in and ads_single, we need a larger GATE_THRESHOLD for CheapSingleTouchReading beacause of this flaw in Teensy's ADC.\n\t#Uses the Teensy's internal ADC that can read up to 6000x per second\n\t#TODO: Implement a variation of the SingleTouchReading class called quick-gate check via the Teensy's internal ADC to save a bit of time and get more accurate results on the dual touch readings (because then we can check both upper and lower both before and after the dual readings which means less spikes)\n\t#GATE_THRESHOLD is proportional to a threshold of the voltage gap between LOW and HIGH\n\t#When GATE_THRESHOLD is small, there are less unwanted jumps when barely pressing the ribbon. But if its too small, it won't register touches.\n\tGATE_THRESHOLD=1500 #This was measured to be a good value for most of the ribbon\n\tGATE_THRESHOLD=4000 #But, the ribbon has a kink in the middle that jumps a lot voltage over the space of a milimeter.\n\tdef read_raw_lower(self):\n\t\tself.prepare_to_read()\n\t\tsingle_pull.value=False\n\t\tself.raw_lower=self.ribbon.rib_mid.value\n\n\tdef read_raw_upper(self):\n\t\tself.prepare_to_read()\n\t\tsingle_pull.value=True\n\t\tself.raw_upper=self.ribbon.rib_mid.value\n\nclass DualTouchReading:\n\t__slots__ = ['raw_a', 'raw_b']\n\n\tdef __init__(self,ribbon):\n\t\tself.ribbon=ribbon\n\t\tself.prepare_to_read()\n\t\ttry:\n\t\t\tself.raw_a=self.ribbon.ads_dual_top.value\n\t\t\tself.raw_b=self.ribbon.ads_dual_bot.value\n\t\texcept OSError as exception:\n\t\t\traise I2CError(exception)\n\n\tdef prepare_to_read(self):\n\t\tactivate_dual_touch_transistors()\n\t\tself.ribbon.ads.gain=ads_gain_dual\n\nclass ProcessedDualTouchReading:\n\t__slots__=['gate','bot','top','mid','num_fingers','old','new']\n\n\tDELTA_THRESHOLD=-4 # A distance, measured in neopixel widths, that the two dual touches can be apart from one another before registering as not being touched. (This is because, as it turns out, it can sometimes take more than one sample for dual touch values to go all the way back to the top after releasing your finger from the ribbon)\n\t#You want to calibrate DELTA_THRESHOLD such that it's high enough to keep good readings once you release your finger, but low enough that it doesn't require pressing down too hard to activate. \n\t#DELTA_THRESHOLD can be a negative value.\n\t#DELTA_THRESHOLD might need to be changed if you calibrate with a pencil eraser instead of your fingertip, because the pencil eraser is a narrower touch area etc.\n\t#You should always calibrate using your finger for this reason...\n\n\tTWO_TOUCH_THRESHOLD=2 #A distance, measured in neopixel widths, that the dual readings must be apart from each other to register as \n\tTWO_TOUCH_THRESHOLD_SLACK=.05 #A bit of hysterisis used here...like a tether. Basically, to prevent flickering on the bonudary, to switch between two touch and one touch you must move this much distance.\n\n\tdef __init__(self,ribbon,blink=False):\n\t\t#If self.gate is False, your code shouldn't try to check for a .bot, .top, or .middle value - as it was never measured\n\t\t#If your fingers are pressing the ribbon in two different places, after calibration the 'top' value should be above the 'bot' value\n\t\t#\tIn the event that the hardware of the z\n\t\tself.ribbon=ribbon\n\n\t\tdef clear_filters():\n\t\t\tribbon.cheap_single_filter.clear()\n\t\t\tribbon.dual_bot_filter.clear()\n\t\t\tribbon.dual_top_filter.clear()\n\n\t\tprevious_gate=ribbon.previous_gate\n\n\t\tsingle_before=ribbon.processed_cheap_single_touch_reading()\n\n\t\tif not single_before.gate:\n\t\t\t#Don't waste time with the dual touch reading if one of the gates is False\n\t\t\tself.gate=False\n\t\t\tclear_filters()\n\t\t\treturn\n\n\t\twith neopixels.TemporarilyTurnedOff() if blink else EmptyContext():\n\t\t\tdual_reading=ribbon.dual_touch_reading()\n\n\t\tsingle_after=ribbon.cheap_single_touch_reading()\n\n\t\tif not single_after.gate:\n\t\t\tself.gate=False\n\t\t\tclear_filters()\n\t\t\treturn\n\n\t\tif not previous_gate:\n\t\t\tclear_filters()\n\n\t\tself.gate=True #single_before.gate and single_after.gate\n\n\t\t#TODO: Lower the DELTA_THRESHOLD and use self.middle whenever it gets too crazy; that way we can have maximum sensitivity and never miss a sample...\n\t\traw_mid=(single_before.raw_value+single_after.raw_value)/2\n\t\traw_top=dual_reading.raw_a\n\t\traw_bot=dual_reading.raw_b\n\n\t\ttop=raw_top\n\t\tbot=raw_bot\n\t\tmid=raw_mid\n\n\t\ttop=ribbon.dual_touch_top_to_neopixel_calibration(top)\n\t\tbot=ribbon.dual_touch_bot_to_neopixel_calibration(bot)\n\t\tmid=ribbon.cheap_single_touch_to_neopixel_calibration(mid)\n\t\tmid=ribbon.cheap_single_filter(mid)\n\n\t\t#I made a mistake on the lightboard...one of the resistors is too large or small (probably resistor tolerance issues)\n\t\t#As a result, one of the ribbons' dual touches doesn't work on the far ends of the ribbon\n\t\t#When this happens, the ADS's reading saturates to 32767 (with the current gain)\n\t\t#Instea of decreasing resolution by turning down the gain, or leaving a touch area unuseable, I'll just do this:\n\t\t#Note: Another valid solution is turning down the ADS1115's gain. This will solve the problem but decrease resolution...\n\t\tif int(raw_top)==32767: top=mid\n\t\tif int(raw_bot)==32767: bot=mid\n\n\t\tdelta=top-bot\n\n\t\t# old_num_fingers=ribbon.dual_num_fingers\n\t\t# changed_num_fingers=False\n\t\tif delta<=self.DELTA_THRESHOLD:\n\t\t\tribbon.dual_num_fingers=1\n\t\t\t# changed_num_fingers=old_num_fingers!=ribbon.dual_num_fingers\n\t\telif not previous_gate:\n\t\t\tribbon.dual_num_fingers = 2 if delta>self.TWO_TOUCH_THRESHOLD else 1\n\t\t\t# changed_num_fingers=old_num_fingers!=ribbon.dual_num_fingers\n\t\telif ribbon.dual_num_fingers == 1 and delta>self.TWO_TOUCH_THRESHOLD+self.TWO_TOUCH_THRESHOLD_SLACK:\n\t\t\tribbon.dual_num_fingers = 2\n\t\t\t# changed_num_fingers=old_num_fingers!=ribbon.dual_num_fingers\n\t\telif ribbon.dual_num_fingers == 2 and delta<self.TWO_TOUCH_THRESHOLD-self.TWO_TOUCH_THRESHOLD_SLACK:\n\t\t\tribbon.dual_num_fingers = 1\n\t\t\t# changed_num_fingers=old_num_fingers!=ribbon.dual_num_fingers\n\t\tself.num_fingers=ribbon.dual_num_fingers\n\n\t\t# if changed_num_fingers:\n\t\t# \tclear_filters()\n\n\t\tif self.num_fingers==1:\n\t\t\t#Even if the two-touches can't be used, we can still use the single cheap touch value\n\t\t\t#Originally, this set gate to False. Now it doesn't.\n\t\t\tbot=top=mid\n\n\t\telif bot>top:\n\t\t\t#The only time self.bot>self.top is when your're barely pressing on the ribbon at all...\n\t\t\t#...we can average these two values out to get a single, more reasonable value\n\t\t\tbot=top=(bot+top)/2\n\n\t\t#The older and newer dual touch positions. Only different when num_fingers>1\n\t\tif not hasattr(ribbon,'previous_dual_old'):\n\t\t\tribbon.previous_dual_old=mid\n\t\told,new=sorted([bot,top],key=lambda pos:abs(pos-ribbon.previous_dual_old))\n\n\t\tself.top=ribbon.dual_top_filter(top)\n\t\tself.bot=ribbon.dual_bot_filter(bot)\n\t\tself.mid=mid\n\t\tself.old=old\n\t\tself.new=new\n\t\tribbon.previous_dual_old=old\n\n\nclass ProcessedSingleTouchReading:\n\tdef __init__(self,ribbon,blink=False):\n\t\tself.ribbon=ribbon\n\t\tif ribbon.previous_gate:\n\t\t\t#If it was previously pressed, don't check the gate with the expensive reading...\n\t\t\twith neopixels.TemporarilyTurnedOff() if blink else EmptyContext():\n\t\t\t\tsingle_touch_reading=ribbon.single_touch_reading()\n\t\t\tself.gate=single_touch_reading.gate\n\t\telse:\n\t\t\tcheap_single_touch_reading=ribbon.cheap_single_touch_reading()\n\t\t\tif cheap_single_touch_reading.gate:\n\t\t\t\twith neopixels.TemporarilyTurnedOff() if blink else EmptyContext():\n\t\t\t\t\tsingle_touch_reading=ribbon.single_touch_reading()\n\t\t\t\tself.gate=single_touch_reading.gate\n\t\t\telse:\n\t\t\t\tself.gate=False\n\n\t\tif self.gate:\n\t\t\tself.raw_value=single_touch_reading.raw_value\n\t\t\tself.value=ribbon.single_touch_to_neopixel_calibration(self.raw_value)\n\nclass ProcessedCheapSingleTouchReading:\n\tdef __init__(self,ribbon,blink=False):\n\t\tself.ribbon=ribbon\n\t\twith neopixels.TemporarilyTurnedOff() if blink else EmptyContext():\n\t\t\tif not ribbon.previous_gate:\n\t\t\t\tribbon.cheap_single_touch_reading()#Sometimes it spikes on the first value for some reason...idk why\n\t\t\tcheap_single_touch_reading=ribbon.cheap_single_touch_reading()\n\t\tself.gate=cheap_single_touch_reading.gate\n\n\t\tif self.gate:\n\t\t\tself.raw_value=cheap_single_touch_reading.raw_value\n\t\t\tself.value=ribbon.cheap_single_touch_to_neopixel_calibration(self.raw_value)\n\t\t\tself.value=ribbon.cheap_single_filter(self.value)\n\t\telse:\n\t\t\tribbon.cheap_single_filter.clear()\n\t\t\t# pass\n\ndef test_ribbon_raw_uart(ribbon):\n\t#Use this test to print all (raw, uncalibrated) ribbon values to uart\n\t#Then, you can view them in an arduino grapher\n\timport lightboard.buttons as buttons\n\timport lightboard.display as display\n\n\tdisplay.set_text('Running raw uart test\\nPress metal button\\nto end this test\\n\\nThe green buttons show\\ncheap_gate and single_gate\\n(They\\'re just for display)')\n\tbuttons.set_green_button_lights(0,0,0,0)\n\tbuttons.metal_button.color=(255,0,0)\n\n\twhile True:\n\t\tcheap =ribbon.cheap_single_touch_reading()\n\t\tsingle=ribbon.single_touch_reading()\n\t\tdual =ribbon.dual_touch_reading()\n\n\t\tc_raw_value,c_gate = cheap .raw_value, cheap .gate\n\t\traw_value ,s_gate = single.raw_value, single.gate\n\t\traw_a,raw_b = dual.raw_a,dual.raw_b\n\n\t\tmessage = '%s %i %i %.5f %.5f %.5f %.5f'%(ribbon.name, int(c_gate), int(s_gate), c_raw_value, raw_value, raw_a, raw_b)\n\t\tprint(message)\n\n\t\tbuttons.set_green_button_lights(c_gate,s_gate,0,0)\n\n\t\tif buttons.metal_press_viewer.value:\n\t\t\tbuttons.metal_button.color=(0,0,0)\n\t\t\tdisplay.set_text('Running raw uart test:\\nDone!')\n\t\t\tbreak\n\ndef test_ribbon_dual_touch(ribbon):\n\timport lightboard.buttons as buttons\n\timport lightboard.display as display\n\n\tdisplay.set_text('Running dual-touch test on\\nRibbon %s\\n\\nWhen yellow dot, one touch\\nWhen white dot, two touches\\n\\nPress metal to exit'%ribbon.name)\n\tbuttons.set_green_button_lights(0,0,0,0)\n\tbuttons.metal_button.color=(255,0,0)\n\n\twhile not buttons.metal_press_viewer.value:\n\t\tdual =ribbon.processed_dual_touch_reading()\n\n\t\tif not dual.gate:\n\t\t\tcontinue\n\n\t\tneopixels.draw_all_off()\n\t\tneopixels.draw_dot(dual.top, 64,0,128)\n\t\tneopixels.draw_dot(dual.bot, 128,0,64)\n\t\tneopixels.draw_dot(dual.mid, 128,128,128*(dual.num_fingers-1))\n\t\tneopixels.refresh()\n\n\tbuttons.metal_button.color=(0,0,0)\n\tdisplay.set_text('test_ribbon_dual_touch: Done!')\n\ndef show_calibration_menu():\n\timport lightboard.widgets as widgets\n\n\toptions = OrderedDict()\n\n\toptions['Calibrate Rib A'] = ribbon_a.run_calibration\n\toptions['Calibrate Rib B'] = ribbon_b.run_calibration\n\toptions['Smooth Demo A' ] = ribbon_a.test_smooth_demo\n\toptions['Smooth Demo B' ] = ribbon_b.test_smooth_demo\n\toptions['Raw UART Demo A'] = lambda: test_ribbon_raw_uart(ribbon_a)\n\toptions['Raw UART Demo B'] = lambda: test_ribbon_raw_uart(ribbon_b)\n\toptions['Dual Touch Demo A'] = lambda: test_ribbon_dual_touch(ribbon_a)\n\toptions['Dual Touch Demo B'] = lambda: test_ribbon_dual_touch(ribbon_b)\n\n\twidgets.run_select_subroutine(options)\n\nribbon_a=Ribbon('a',rib_a_mid,ads_a,ads_a_single,ads_a_dual_top,ads_a_dual_b)\nribbon_b=Ribbon('b',rib_b_mid,ads_b,ads_b_single,ads_b_dual_top,ads_b_dual_b)\n", "step-ids": [ 20, 32, 40, 43, 50 ] }
[ 20, 32, 40, 43, 50 ]
from turtle import * import time import random colormode(255) class Ball(Turtle): def __init__(self, x,y,dx,dy,r): Turtle.__init__(self) self.pu() self.goto(x,y) self.dx = dx self.dy = dy self.r = r self.shape("circle") self.shapesize(r/10) r = random.randint(0,255) g = random.randint(0,255) b = random.randint(0,255) self.color(r,g,b) def move(self,screen_width, screen_hight): current_x = self.xcor() new_x = current_x + self.dx current_y = self.ycor() new_y = current_y + self.dy right_side_ball = new_x + self.r left_side_ball = new_x - self.r bottom_ball = new_y - self.r upper_ball_side = new_y + self.r self.goto(new_x, new_y) if bottom_ball < -screen_hight/2 or upper_ball_side > screen_hight/2: self.dy *= -1 if left_side_ball < -screen_width/2 or right_side_ball > screen_width/2: self.dx *= -1 tracer(0) ht() RUNNING = True SLEEP = 0.0077 SCREEN_WIDTH = getcanvas().winfo_width()/2 SCREEN_HEIGHT = getcanvas().winfo_height()/2 MY_BALL = (0,0,0.5,-0.4,30) NUMBER_OF_BALLS = 5 MINIMUM_BALL_RADIUS = 10 MAXIMUM_BALL_RADIUS = 100 MINIMUM_BALL_DX = -5 MAXIMUM_BALL_DX = 5 MINIMUM_BALL_DY = -5 MAXIMUM_BALL_DY = 5 BALLS = [] for i in range(NUMBER_OF_BALLS): x = random.randint(int(- SCREEN_WIDTH / 2 + MAXIMUM_BALL_RADIUS) , int(SCREEN_WIDTH/2 - MAXIMUM_BALL_RADIUS)) y = random.randint(-SCREEN_HEIGHT/2 + MAXIMUM_BALL_RADIUS , SCREEN_HEIGHT/2 - MAXIMUM_BALL_RADIUS) dy = random.randint(MINIMUM_BALL_DY , MAXIMUM_BALL_DY) dx = random.randint(MINIMUM_BALL_DX , MAXIMUM_BALL_DX) r = random.randint(MINIMUM_BALL_RADIUS , MAXIMUM_BALL_RADIUS) while dx == 0: dx = random.randint(MINIMUM_BALL_DX , MAXIMUM_BALL_DX) while dy == 0: dy = random.randint(MINIMUM_BALL_DY , MAXIMUM_BALL_DY) new_ball = Ball(x,y,dx,dy,r) BALLS.append(new_ball) def move_all_balls(BALLS): for index in range(len(BALLS)): BALLS[index].move(SCREEN_WIDTH , SCREEN_HEIGHT) #move_all_balls(BALLS) mainloop()
normal
{ "blob_id": "17cd6746e58a7f33bc239c1420d51c6810ed02d8", "index": 3575, "step-1": "<mask token>\n\n\nclass Ball(Turtle):\n\n def __init__(self, x, y, dx, dy, r):\n Turtle.__init__(self)\n self.pu()\n self.goto(x, y)\n self.dx = dx\n self.dy = dy\n self.r = r\n self.shape('circle')\n self.shapesize(r / 10)\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n self.color(r, g, b)\n\n def move(self, screen_width, screen_hight):\n current_x = self.xcor()\n new_x = current_x + self.dx\n current_y = self.ycor()\n new_y = current_y + self.dy\n right_side_ball = new_x + self.r\n left_side_ball = new_x - self.r\n bottom_ball = new_y - self.r\n upper_ball_side = new_y + self.r\n self.goto(new_x, new_y)\n if (bottom_ball < -screen_hight / 2 or upper_ball_side > \n screen_hight / 2):\n self.dy *= -1\n if (left_side_ball < -screen_width / 2 or right_side_ball > \n screen_width / 2):\n self.dx *= -1\n\n\n<mask token>\n\n\ndef move_all_balls(BALLS):\n for index in range(len(BALLS)):\n BALLS[index].move(SCREEN_WIDTH, SCREEN_HEIGHT)\n\n\n<mask token>\n", "step-2": "<mask token>\ncolormode(255)\n\n\nclass Ball(Turtle):\n\n def __init__(self, x, y, dx, dy, r):\n Turtle.__init__(self)\n self.pu()\n self.goto(x, y)\n self.dx = dx\n self.dy = dy\n self.r = r\n self.shape('circle')\n self.shapesize(r / 10)\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n self.color(r, g, b)\n\n def move(self, screen_width, screen_hight):\n current_x = self.xcor()\n new_x = current_x + self.dx\n current_y = self.ycor()\n new_y = current_y + self.dy\n right_side_ball = new_x + self.r\n left_side_ball = new_x - self.r\n bottom_ball = new_y - self.r\n upper_ball_side = new_y + self.r\n self.goto(new_x, new_y)\n if (bottom_ball < -screen_hight / 2 or upper_ball_side > \n screen_hight / 2):\n self.dy *= -1\n if (left_side_ball < -screen_width / 2 or right_side_ball > \n screen_width / 2):\n self.dx *= -1\n\n\ntracer(0)\nht()\n<mask token>\nfor i in range(NUMBER_OF_BALLS):\n x = random.randint(int(-SCREEN_WIDTH / 2 + MAXIMUM_BALL_RADIUS), int(\n SCREEN_WIDTH / 2 - MAXIMUM_BALL_RADIUS))\n y = random.randint(-SCREEN_HEIGHT / 2 + MAXIMUM_BALL_RADIUS, \n SCREEN_HEIGHT / 2 - MAXIMUM_BALL_RADIUS)\n dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n r = random.randint(MINIMUM_BALL_RADIUS, MAXIMUM_BALL_RADIUS)\n while dx == 0:\n dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n while dy == 0:\n dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n new_ball = Ball(x, y, dx, dy, r)\n BALLS.append(new_ball)\n\n\ndef move_all_balls(BALLS):\n for index in range(len(BALLS)):\n BALLS[index].move(SCREEN_WIDTH, SCREEN_HEIGHT)\n\n\nmainloop()\n", "step-3": "<mask token>\ncolormode(255)\n\n\nclass Ball(Turtle):\n\n def __init__(self, x, y, dx, dy, r):\n Turtle.__init__(self)\n self.pu()\n self.goto(x, y)\n self.dx = dx\n self.dy = dy\n self.r = r\n self.shape('circle')\n self.shapesize(r / 10)\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n self.color(r, g, b)\n\n def move(self, screen_width, screen_hight):\n current_x = self.xcor()\n new_x = current_x + self.dx\n current_y = self.ycor()\n new_y = current_y + self.dy\n right_side_ball = new_x + self.r\n left_side_ball = new_x - self.r\n bottom_ball = new_y - self.r\n upper_ball_side = new_y + self.r\n self.goto(new_x, new_y)\n if (bottom_ball < -screen_hight / 2 or upper_ball_side > \n screen_hight / 2):\n self.dy *= -1\n if (left_side_ball < -screen_width / 2 or right_side_ball > \n screen_width / 2):\n self.dx *= -1\n\n\ntracer(0)\nht()\nRUNNING = True\nSLEEP = 0.0077\nSCREEN_WIDTH = getcanvas().winfo_width() / 2\nSCREEN_HEIGHT = getcanvas().winfo_height() / 2\nMY_BALL = 0, 0, 0.5, -0.4, 30\nNUMBER_OF_BALLS = 5\nMINIMUM_BALL_RADIUS = 10\nMAXIMUM_BALL_RADIUS = 100\nMINIMUM_BALL_DX = -5\nMAXIMUM_BALL_DX = 5\nMINIMUM_BALL_DY = -5\nMAXIMUM_BALL_DY = 5\nBALLS = []\nfor i in range(NUMBER_OF_BALLS):\n x = random.randint(int(-SCREEN_WIDTH / 2 + MAXIMUM_BALL_RADIUS), int(\n SCREEN_WIDTH / 2 - MAXIMUM_BALL_RADIUS))\n y = random.randint(-SCREEN_HEIGHT / 2 + MAXIMUM_BALL_RADIUS, \n SCREEN_HEIGHT / 2 - MAXIMUM_BALL_RADIUS)\n dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n r = random.randint(MINIMUM_BALL_RADIUS, MAXIMUM_BALL_RADIUS)\n while dx == 0:\n dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n while dy == 0:\n dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n new_ball = Ball(x, y, dx, dy, r)\n BALLS.append(new_ball)\n\n\ndef move_all_balls(BALLS):\n for index in range(len(BALLS)):\n BALLS[index].move(SCREEN_WIDTH, SCREEN_HEIGHT)\n\n\nmainloop()\n", "step-4": "from turtle import *\nimport time\nimport random\ncolormode(255)\n\n\nclass Ball(Turtle):\n\n def __init__(self, x, y, dx, dy, r):\n Turtle.__init__(self)\n self.pu()\n self.goto(x, y)\n self.dx = dx\n self.dy = dy\n self.r = r\n self.shape('circle')\n self.shapesize(r / 10)\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n self.color(r, g, b)\n\n def move(self, screen_width, screen_hight):\n current_x = self.xcor()\n new_x = current_x + self.dx\n current_y = self.ycor()\n new_y = current_y + self.dy\n right_side_ball = new_x + self.r\n left_side_ball = new_x - self.r\n bottom_ball = new_y - self.r\n upper_ball_side = new_y + self.r\n self.goto(new_x, new_y)\n if (bottom_ball < -screen_hight / 2 or upper_ball_side > \n screen_hight / 2):\n self.dy *= -1\n if (left_side_ball < -screen_width / 2 or right_side_ball > \n screen_width / 2):\n self.dx *= -1\n\n\ntracer(0)\nht()\nRUNNING = True\nSLEEP = 0.0077\nSCREEN_WIDTH = getcanvas().winfo_width() / 2\nSCREEN_HEIGHT = getcanvas().winfo_height() / 2\nMY_BALL = 0, 0, 0.5, -0.4, 30\nNUMBER_OF_BALLS = 5\nMINIMUM_BALL_RADIUS = 10\nMAXIMUM_BALL_RADIUS = 100\nMINIMUM_BALL_DX = -5\nMAXIMUM_BALL_DX = 5\nMINIMUM_BALL_DY = -5\nMAXIMUM_BALL_DY = 5\nBALLS = []\nfor i in range(NUMBER_OF_BALLS):\n x = random.randint(int(-SCREEN_WIDTH / 2 + MAXIMUM_BALL_RADIUS), int(\n SCREEN_WIDTH / 2 - MAXIMUM_BALL_RADIUS))\n y = random.randint(-SCREEN_HEIGHT / 2 + MAXIMUM_BALL_RADIUS, \n SCREEN_HEIGHT / 2 - MAXIMUM_BALL_RADIUS)\n dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n r = random.randint(MINIMUM_BALL_RADIUS, MAXIMUM_BALL_RADIUS)\n while dx == 0:\n dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n while dy == 0:\n dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n new_ball = Ball(x, y, dx, dy, r)\n BALLS.append(new_ball)\n\n\ndef move_all_balls(BALLS):\n for index in range(len(BALLS)):\n BALLS[index].move(SCREEN_WIDTH, SCREEN_HEIGHT)\n\n\nmainloop()\n", "step-5": "from turtle import *\nimport time\nimport random\ncolormode(255)\n\nclass Ball(Turtle):\n def __init__(self, x,y,dx,dy,r):\n Turtle.__init__(self)\n self.pu()\n self.goto(x,y)\n self.dx = dx\n self.dy = dy\n self.r = r\n self.shape(\"circle\")\n self.shapesize(r/10)\n r = random.randint(0,255)\n g = random.randint(0,255)\n b = random.randint(0,255)\n self.color(r,g,b)\n def move(self,screen_width, screen_hight):\n current_x = self.xcor()\n new_x = current_x + self.dx\n current_y = self.ycor()\n new_y = current_y + self.dy\n right_side_ball = new_x + self.r\n left_side_ball = new_x - self.r\n bottom_ball = new_y - self.r\n upper_ball_side = new_y + self.r\n self.goto(new_x, new_y)\n if bottom_ball < -screen_hight/2 or upper_ball_side > screen_hight/2:\n self.dy *= -1\n if left_side_ball < -screen_width/2 or right_side_ball > screen_width/2:\n self.dx *= -1\n\ntracer(0)\nht()\nRUNNING = True\nSLEEP = 0.0077\nSCREEN_WIDTH = getcanvas().winfo_width()/2\nSCREEN_HEIGHT = getcanvas().winfo_height()/2\n\nMY_BALL = (0,0,0.5,-0.4,30)\nNUMBER_OF_BALLS = 5\nMINIMUM_BALL_RADIUS = 10\nMAXIMUM_BALL_RADIUS = 100\nMINIMUM_BALL_DX = -5\nMAXIMUM_BALL_DX = 5\nMINIMUM_BALL_DY = -5\nMAXIMUM_BALL_DY = 5\n\nBALLS = []\n\nfor i in range(NUMBER_OF_BALLS):\n x = random.randint(int(- SCREEN_WIDTH / 2 + MAXIMUM_BALL_RADIUS) , int(SCREEN_WIDTH/2 - MAXIMUM_BALL_RADIUS))\n y = random.randint(-SCREEN_HEIGHT/2 + MAXIMUM_BALL_RADIUS , SCREEN_HEIGHT/2 - MAXIMUM_BALL_RADIUS)\n dy = random.randint(MINIMUM_BALL_DY , MAXIMUM_BALL_DY)\n dx = random.randint(MINIMUM_BALL_DX , MAXIMUM_BALL_DX)\n r = random.randint(MINIMUM_BALL_RADIUS , MAXIMUM_BALL_RADIUS)\n while dx == 0:\n dx = random.randint(MINIMUM_BALL_DX , MAXIMUM_BALL_DX)\n while dy == 0:\n dy = random.randint(MINIMUM_BALL_DY , MAXIMUM_BALL_DY)\n new_ball = Ball(x,y,dx,dy,r)\n BALLS.append(new_ball)\n\ndef move_all_balls(BALLS):\n for index in range(len(BALLS)):\n BALLS[index].move(SCREEN_WIDTH , SCREEN_HEIGHT)\n\n#move_all_balls(BALLS)\n\nmainloop()\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> output.write("""{} {} {} {} {} {} {} """.format(line1, line2, line3, line4, line5, line6, line7)) <|reserved_special_token_1|> <|reserved_special_token_0|> bank_data = 'Resources/budget_data.csv' bank_df = pd.read_csv(bank_data) total_months = bank_df['Date'].count() net_end = bank_df['Profit/Losses'].sum() bank_df['Change'] = bank_df['Profit/Losses'].diff() average_change = bank_df['Change'].mean() greatest_increase = bank_df['Change'].max() greatest_increase_month = bank_df.loc[bank_df['Change'] == greatest_increase, : ] greatest_decrease = bank_df['Change'].min() greatest_decrease_month = bank_df.loc[bank_df['Change'] == greatest_decrease, : ] financial_analysis = print('Financial Analysis'), print( '----------------------------'), print(f'Total Months: {total_months}' ), print(f'Total: {net_end}'), print( f'Average Change: ${round(average_change)}'), print( f'Greatest Increase in Profits:'), print(str(greatest_increase_month) ), print(f'Greatest Decrease in Profits:'), print(greatest_decrease_month) output = open('output.txt', 'w') line1 = 'Financial Analysis' line2 = '---------------------' line3 = str(f'Total Months: {total_months}') line4 = str(f'Total: {net_end}') line5 = str(f'Average Change: ${average_change}') line6 = str(f'Greatest Increase in Profits: {greatest_increase_month}') line7 = str(f'Greatest Decrease in Profits: {greatest_decrease_month}') output.write("""{} {} {} {} {} {} {} """.format(line1, line2, line3, line4, line5, line6, line7)) <|reserved_special_token_1|> import pandas as pd bank_data = 'Resources/budget_data.csv' bank_df = pd.read_csv(bank_data) total_months = bank_df['Date'].count() net_end = bank_df['Profit/Losses'].sum() bank_df['Change'] = bank_df['Profit/Losses'].diff() average_change = bank_df['Change'].mean() greatest_increase = bank_df['Change'].max() greatest_increase_month = bank_df.loc[bank_df['Change'] == greatest_increase, : ] greatest_decrease = bank_df['Change'].min() greatest_decrease_month = bank_df.loc[bank_df['Change'] == greatest_decrease, : ] financial_analysis = print('Financial Analysis'), print( '----------------------------'), print(f'Total Months: {total_months}' ), print(f'Total: {net_end}'), print( f'Average Change: ${round(average_change)}'), print( f'Greatest Increase in Profits:'), print(str(greatest_increase_month) ), print(f'Greatest Decrease in Profits:'), print(greatest_decrease_month) output = open('output.txt', 'w') line1 = 'Financial Analysis' line2 = '---------------------' line3 = str(f'Total Months: {total_months}') line4 = str(f'Total: {net_end}') line5 = str(f'Average Change: ${average_change}') line6 = str(f'Greatest Increase in Profits: {greatest_increase_month}') line7 = str(f'Greatest Decrease in Profits: {greatest_decrease_month}') output.write("""{} {} {} {} {} {} {} """.format(line1, line2, line3, line4, line5, line6, line7)) <|reserved_special_token_1|> # Dependencies import pandas as pd # Load in data file from resources bank_data = "Resources/budget_data.csv" # Read and display with pandas bank_df = pd.read_csv(bank_data) # Find the total number of months included in the dataset total_months = bank_df["Date"].count() # Find the total net amount of "Profit/Losses" over the entire period net_end = bank_df["Profit/Losses"].sum() # Create a new column that displays profit or loss between months bank_df["Change"] = bank_df["Profit/Losses"].diff() # Find the average change in "Profit/Losses" between months over the entire period average_change = bank_df["Change"].mean() # Find the greatest increase in profits (date and amount) over the entire period greatest_increase = bank_df["Change"].max() greatest_increase_month = bank_df.loc[bank_df["Change"] == greatest_increase, :] # Find the greatest decrease in losses (date and amount) over the entire period greatest_decrease = bank_df["Change"].min() greatest_decrease_month = bank_df.loc[bank_df["Change"] == greatest_decrease, :] # Print financial analysis financial_analysis = (print("Financial Analysis"), print("----------------------------"), print(f'Total Months: {total_months}'), print(f'Total: {net_end}'), print(f'Average Change: ${round(average_change)}'), print(f'Greatest Increase in Profits:'), print(str(greatest_increase_month)), print(f'Greatest Decrease in Profits:'), print(greatest_decrease_month)) # Export to .txt output = open("output.txt", "w") line1 = "Financial Analysis" line2 = "---------------------" line3 = str(f'Total Months: {total_months}') line4 = str(f'Total: {net_end}') line5 = str(f'Average Change: ${average_change}') line6 = str(f'Greatest Increase in Profits: {greatest_increase_month}') line7 = str(f'Greatest Decrease in Profits: {greatest_decrease_month}') output.write('{}\n{}\n{}\n{}\n{}\n{}\n{}\n'.format(line1,line2,line3,line4,line5,line6,line7))
flexible
{ "blob_id": "1ad694c68ef264c6fbba4f4b9c069f22818d2816", "index": 9973, "step-1": "<mask token>\n", "step-2": "<mask token>\noutput.write(\"\"\"{}\n{}\n{}\n{}\n{}\n{}\n{}\n\"\"\".format(line1, line2, line3, line4,\n line5, line6, line7))\n", "step-3": "<mask token>\nbank_data = 'Resources/budget_data.csv'\nbank_df = pd.read_csv(bank_data)\ntotal_months = bank_df['Date'].count()\nnet_end = bank_df['Profit/Losses'].sum()\nbank_df['Change'] = bank_df['Profit/Losses'].diff()\naverage_change = bank_df['Change'].mean()\ngreatest_increase = bank_df['Change'].max()\ngreatest_increase_month = bank_df.loc[bank_df['Change'] == greatest_increase, :\n ]\ngreatest_decrease = bank_df['Change'].min()\ngreatest_decrease_month = bank_df.loc[bank_df['Change'] == greatest_decrease, :\n ]\nfinancial_analysis = print('Financial Analysis'), print(\n '----------------------------'), print(f'Total Months: {total_months}'\n ), print(f'Total: {net_end}'), print(\n f'Average Change: ${round(average_change)}'), print(\n f'Greatest Increase in Profits:'), print(str(greatest_increase_month)\n ), print(f'Greatest Decrease in Profits:'), print(greatest_decrease_month)\noutput = open('output.txt', 'w')\nline1 = 'Financial Analysis'\nline2 = '---------------------'\nline3 = str(f'Total Months: {total_months}')\nline4 = str(f'Total: {net_end}')\nline5 = str(f'Average Change: ${average_change}')\nline6 = str(f'Greatest Increase in Profits: {greatest_increase_month}')\nline7 = str(f'Greatest Decrease in Profits: {greatest_decrease_month}')\noutput.write(\"\"\"{}\n{}\n{}\n{}\n{}\n{}\n{}\n\"\"\".format(line1, line2, line3, line4,\n line5, line6, line7))\n", "step-4": "import pandas as pd\nbank_data = 'Resources/budget_data.csv'\nbank_df = pd.read_csv(bank_data)\ntotal_months = bank_df['Date'].count()\nnet_end = bank_df['Profit/Losses'].sum()\nbank_df['Change'] = bank_df['Profit/Losses'].diff()\naverage_change = bank_df['Change'].mean()\ngreatest_increase = bank_df['Change'].max()\ngreatest_increase_month = bank_df.loc[bank_df['Change'] == greatest_increase, :\n ]\ngreatest_decrease = bank_df['Change'].min()\ngreatest_decrease_month = bank_df.loc[bank_df['Change'] == greatest_decrease, :\n ]\nfinancial_analysis = print('Financial Analysis'), print(\n '----------------------------'), print(f'Total Months: {total_months}'\n ), print(f'Total: {net_end}'), print(\n f'Average Change: ${round(average_change)}'), print(\n f'Greatest Increase in Profits:'), print(str(greatest_increase_month)\n ), print(f'Greatest Decrease in Profits:'), print(greatest_decrease_month)\noutput = open('output.txt', 'w')\nline1 = 'Financial Analysis'\nline2 = '---------------------'\nline3 = str(f'Total Months: {total_months}')\nline4 = str(f'Total: {net_end}')\nline5 = str(f'Average Change: ${average_change}')\nline6 = str(f'Greatest Increase in Profits: {greatest_increase_month}')\nline7 = str(f'Greatest Decrease in Profits: {greatest_decrease_month}')\noutput.write(\"\"\"{}\n{}\n{}\n{}\n{}\n{}\n{}\n\"\"\".format(line1, line2, line3, line4,\n line5, line6, line7))\n", "step-5": "# Dependencies\nimport pandas as pd\n\n# Load in data file from resources\nbank_data = \"Resources/budget_data.csv\"\n\n# Read and display with pandas\nbank_df = pd.read_csv(bank_data)\n\n# Find the total number of months included in the dataset\ntotal_months = bank_df[\"Date\"].count()\n\n# Find the total net amount of \"Profit/Losses\" over the entire period\nnet_end = bank_df[\"Profit/Losses\"].sum()\n\n# Create a new column that displays profit or loss between months\nbank_df[\"Change\"] = bank_df[\"Profit/Losses\"].diff()\n\n# Find the average change in \"Profit/Losses\" between months over the entire period\naverage_change = bank_df[\"Change\"].mean()\n\n# Find the greatest increase in profits (date and amount) over the entire period\ngreatest_increase = bank_df[\"Change\"].max()\ngreatest_increase_month = bank_df.loc[bank_df[\"Change\"] == greatest_increase, :]\n\n# Find the greatest decrease in losses (date and amount) over the entire period\ngreatest_decrease = bank_df[\"Change\"].min()\ngreatest_decrease_month = bank_df.loc[bank_df[\"Change\"] == greatest_decrease, :]\n\n# Print financial analysis\nfinancial_analysis = (print(\"Financial Analysis\"), print(\"----------------------------\"), \nprint(f'Total Months: {total_months}'), print(f'Total: {net_end}'), \nprint(f'Average Change: ${round(average_change)}'), \nprint(f'Greatest Increase in Profits:'), \nprint(str(greatest_increase_month)),\nprint(f'Greatest Decrease in Profits:'), \nprint(greatest_decrease_month))\n\n# Export to .txt\noutput = open(\"output.txt\", \"w\")\n\nline1 = \"Financial Analysis\"\nline2 = \"---------------------\"\nline3 = str(f'Total Months: {total_months}')\nline4 = str(f'Total: {net_end}')\nline5 = str(f'Average Change: ${average_change}')\nline6 = str(f'Greatest Increase in Profits: {greatest_increase_month}')\nline7 = str(f'Greatest Decrease in Profits: {greatest_decrease_month}')\noutput.write('{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n'.format(line1,line2,line3,line4,line5,line6,line7))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class Ui_MainWindow(QMainWindow): threads = [] keywordJudge = '' def __init__(self): super(Ui_MainWindow, self).__init__() self.buy_succeed_count = 0 for func in [self.output_buy_record, self.output_login_status, self .output_register_record]: thr = Thread(target=func) thr.setDaemon(True) thr.start() self._thread = Worker(self) self._thread.finished.connect(self._thread.deleteLater) self._thread.valueChanged.connect(ex.create_c) self._thread.start() def setupUi(self, MainWindow): MainWindow.setObjectName('MainWindow') MainWindow.resize(640, 478) MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint) MainWindow.setFixedSize(self.width(), self.height()) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName('centralwidget') self.tabWidget = QtWidgets.QTabWidget(self.centralwidget) self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461)) self.tabWidget.setObjectName('tabWidget') self.tab = QtWidgets.QWidget() self.tab.setObjectName('tab') self.pushButton = QtWidgets.QPushButton(self.tab) self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30)) self.pushButton.setObjectName('pushButton') self.lineEdit_tab = QtWidgets.QLineEdit(self.tab) self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28)) self.lineEdit_tab.setPlaceholderText(' 请输入登陆个数') self.label_0 = QtWidgets.QLabel(self.tab) self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12)) self.label_0.setObjectName('label_0') self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab) self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221)) self.textBrowser_2.setObjectName('textBrowser_2') self.tabWidget.addTab(self.tab, '') self.tab_2 = QtWidgets.QWidget() self.tab_2.setObjectName('tab_2') self.tabWidget.addTab(self.tab, '') self.tab_3 = QtWidgets.QWidget() self.tab_3.setObjectName('tab_3') self.lineEdit = QtWidgets.QLineEdit(self.tab_2) self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31)) self.lineEdit.setObjectName('lineEdit') self.pushButton_2 = QtWidgets.QPushButton(self.tab_2) self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32)) self.pushButton_2.setObjectName('pushButton_2') self.pushButton_2.clicked.connect(self.search_1) self.label = QtWidgets.QLabel(self.tab_2) self.label.setGeometry(QtCore.QRect(30, 80, 54, 12)) self.label.setObjectName('label') self.label_2 = QtWidgets.QLabel(self.tab_2) self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12)) self.label_2.setObjectName('label_2') self.comboBox = QtWidgets.QComboBox(self.tab_2) self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31)) self.comboBox.setObjectName('comboBox') self.comboBox_2 = QtWidgets.QComboBox(self.tab_2) self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31)) self.comboBox_2.setObjectName('comboBox_2') self.label_3 = QtWidgets.QLabel(self.tab_2) self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12)) self.label_3.setObjectName('label_3') self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2) self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27)) self.lineEdit_1.setObjectName('lineEdit_1') self.label_6 = QtWidgets.QLabel(self.tab_2) self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12)) self.label_6.setObjectName('label_6') self.label_7 = QtWidgets.QLabel(self.tab_2) self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12)) self.label_7.setObjectName('label_7') self.label_7.setStyleSheet('font-size:16px;color:red') self.label_8 = QtWidgets.QLabel(self.tab_2) self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12)) self.label_8.setObjectName('label_8') self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2) self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27)) self.lineEdit_8.setObjectName('lineEdit_8') self.lineEdit_8.setText('4') self.pushButton_3 = QtWidgets.QPushButton(self.tab_2) self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31)) self.pushButton_3.setObjectName('pushButton_3') self.pushButton_3.clicked.connect(self.search_2) self.pushButton_quit = QtWidgets.QPushButton(self.tab_2) self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31)) self.pushButton_quit.setObjectName('pushButton_quit') self.pushButton_quit.clicked.connect(self.exit_quit) self.label_4 = QtWidgets.QLabel(self.tab_2) self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12)) self.label_4.setObjectName('label_4') self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2) self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192)) self.textBrowser_1.setObjectName('textBrowser') self.tabWidget.addTab(self.tab_2, '') MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName('statusbar') MainWindow.setStatusBar(self.statusbar) self.tabWidget.addTab(self.tab_3, '') MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName('statusbar') MainWindow.setStatusBar(self.statusbar) self.pushButton_4 = QtWidgets.QPushButton(self.tab_3) self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30)) self.pushButton_4.setObjectName('pushButton') self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3) self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28)) self.lineEdit_tab3.setPlaceholderText(' 请输入注册个数') self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3) self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221)) self.textBrowser_3.setObjectName('textBrowser_3') self.label_5 = QtWidgets.QLabel(self.tab_3) self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12)) self.label_5.setObjectName('label_5') _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate('MainWindow', '城市售票网-抢票')) self.pushButton.setText(_translate('MainWindow', '点击登录')) self.pushButton.clicked.connect(self.login) self.pushButton_4.clicked.connect(self.register) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate('MainWindow', '账号登录')) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate('MainWindow', '抢购中心')) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate('MainWindow', '账号注册')) self.label_0.setText(_translate('MainWindow', '登录日志:')) self.pushButton_2.setText(_translate('MainWindow', '搜索名称')) self.pushButton_3.setText(_translate('MainWindow', '点击购买')) self.pushButton_quit.setText(_translate('MainWindow', '退出程序')) self.pushButton_4.setText(_translate('MainWindow', '点击注册')) self.label.setText(_translate('MainWindow', '已择场次:')) self.label_2.setText(_translate('MainWindow', '已择价格:')) self.label_3.setText(_translate('MainWindow', '购买总数量:')) self.label_4.setText(_translate('MainWindow', '购买日志:')) self.label_5.setText(_translate('MainWindow', '注册日志:')) self.label_6.setText(_translate('MainWindow', '已购买:')) self.label_7.setText(_translate('MainWindow', '0')) self.label_8.setText(_translate('MainWindow', '每个账号购买数量:')) self.textBrowser_3.setText('') self.textBrowser_2.setText('') self.textBrowser_1.setText('') self.tabWidget.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(MainWindow) def login(self): try: regiterSum = int(self.lineEdit_tab.text()) except Exception as err: res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) return ipList = [''] self.textBrowser_2.append('开始登陆,请等待...') userinfo_list = [] with open('infomation.txt', 'rt', encoding='utf-8') as f: info_record = re.findall("'loginId': '(.*?)'", f.read()) for loginId in info_record: userinfo_list.append(loginId) for thr in userinfo_list[:regiterSum]: grabber = BuyUrbtix() ip = random.choice(ipList) Thread_name = Thread(target=grabber.openSite, args=(thr, ip)) self.threads.append(Thread_name) Thread_name.setDaemon(True) Thread_name.start() def search_1(self): keyword = self.lineEdit.text() self.textBrowser_1.append('正在查询 %s 的所有场次和价格...' % keyword) if keyword == self.keywordJudge: self.textBrowser_1.append('请等待...') self.keywordJudge = '' return self.keywordJudge = keyword Thread_name = Thread(target=self.refresh) self.threads.append(Thread_name) Thread_name.start() Thread_01 = Thread(target=self.show_session_data) self.threads.append(Thread_01) Thread_01.start() def show_session_data(self): global SHOW_S_P self.comboBox_2.clear() self.comboBox.clear() while True: if ex.sessionName and ex.sessionPrice and SHOW_S_P: for i, eventDateName in enumerate(ex.sessionName): self.comboBox_2.addItem(eventDateName, i) for i, price in enumerate(ex.sessionPrice): self.comboBox.addItem(str(price), i) self.comboBox.setCurrentIndex(0) self.comboBox_2.setCurrentIndex(0) ex.sessionName.clear() ex.sessionPrice.clear() SHOW_S_P = False time.sleep(0.2) def refresh(self): try: if self.lineEdit.text(): global eventDateList keyword = self.lineEdit.text() my_attr['selNum'] = self.lineEdit_8.text() ex.eventDateList = request_spider.get_date_url(keyword) if ex.eventDateList: self.textBrowser_1.append('查询成功,请在选择界面选择场次和价格...') global SESSION_DATA SESSION_DATA = True else: self.textBrowser_1.append('查询失败,请确定您查询的节目存在...') else: sys.exit() except Exception as err: self.textBrowser_1.append('查询失败,请确定您查询的节目存在...') print(err) sys.exit() def output_login_status(self): while True: login_record_list = login_record() if login_record_list: for i in login_record_list: self.textBrowser_2.append(i) self.textBrowser_2.moveCursor(self.textBrowser_2. textCursor().End) login_record_list.remove(i) time.sleep(0.1) def output_buy_record(self): while True: buy_record_list = buy_record() if buy_record_list: for record in buy_record_list: if '购买成功' in record: self.buy_succeed_count += 1 self.label_7.setText(str(self.buy_succeed_count)) self.textBrowser_1.append(record) self.textBrowser_1.moveCursor(self.textBrowser_1. textCursor().End) buy_record_list.remove(record) time.sleep(0.1) def output_register_record(self): while True: register_record_list = register_record() if register_record_list: for i in register_record_list: self.textBrowser_3.append(i) self.textBrowser_3.moveCursor(self.textBrowser_3. textCursor().End) register_record_list.remove(i) time.sleep(0.1) def search_2(self): if not self.lineEdit_1.text(): self.textBrowser_1.append('请输入购买总数量...') return if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']: self.textBrowser_1.append('正在购买,请等待...') return if ex.saleTime: Thread_name = Thread(target=self.wait_sale) Thread_name.setDaemon(True) Thread_name.start() return my_attr['gross'] = self.lineEdit_1.text() my_attr['selNum'] = self.lineEdit_8.text() my_attr['selPrice'] = ex.eventPrice my_attr['selSeatUrl'] = ex.eventUrl self.textBrowser_1.append('开始购买,请您耐心等待...') def wait_sale(self): dateList = ex.saleTime print('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList)) self.textBrowser_1.append('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList)) while True: saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) + '0000', '%Y%m%d%H%M%S'))) if saleTimestamp <= int(time.time()): print('%s年%s月%s日%s时开始售票,开始购买!' % tuple(dateList)) self.textBrowser_1.append('%s年%s月%s日%s时开始售票,开始购买!' % tuple( dateList)) break time.sleep(1) my_attr['gross'] = self.lineEdit_1.text() my_attr['selNum'] = self.lineEdit_8.text() my_attr['selPrice'] = ex.eventPrice my_attr['selSeatUrl'] = ex.eventUrl self.textBrowser_1.append('开始购买,请您耐心等待...') def register(self): self.textBrowser_3.append('开始注册,请等待...') try: regiterSum = int(self.lineEdit_tab3.text()) except Exception as err: res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) return threads = [] for _ in range(regiterSum): uper = Register() Thread_name = Thread(target=uper.registerInfo) Thread_name.setDaemon(True) Thread_name.start() threads.append(Thread_name) def exit_quit(self): global EXIT_COND res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox. Yes | QMessageBox.No) if res == QMessageBox.Yes: self._thread.exit_thread() time.sleep(1) sys.exit() else: pass class Example(QMainWindow): sessionList = [] priceList = [] sessionListEvn = [] priceListEvn = [] eventDateList = [] eventUrl = [] eventPrice = [] sessionName = [] sessionPrice = [] saleTime = [] buyNum = 1 def __init__(self): super(QMainWindow, self).__init__() self.setWindowTitle('城市售票网') self.resize(680, 800) self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint) self.setFixedSize(self.width(), self.height()) self.w = QWidget() self.w.setFixedWidth(680) self.w.setFixedHeight(540) self.setCentralWidget(self.w) self.topFiller = QWidget() self.scroll = QScrollArea() self.scroll.setWidget(self.topFiller) self.vbox = QVBoxLayout() self.vbox.addWidget(self.scroll) self.w.setLayout(self.vbox) self.initUI() def closeEvent(self, QCloseEvent): res = QMessageBox.question(self, '提示', '您确定选择无误吗?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if res == QMessageBox.Yes: global SHOW_S_P SHOW_S_P = True QCloseEvent.accept() self.cb1.setChecked(False) self.cb2.setChecked(False) else: QCloseEvent.ignore() def initUI(self): self.cb1 = QCheckBox('全选', self.topFiller) self.cb1.move(20, 30) self.cb2 = QCheckBox('全选', self) self.cb2.move(20, 570) bt1 = QPushButton('确定', self) bt2 = QPushButton('刷新', self) bt1.move(20, 760) bt2.move(120, 760) self.cb1.stateChanged.connect(self.changecb1) self.cb2.stateChanged.connect(self.changecb2) bt1.clicked.connect(self.pitch_on) bt2.clicked.connect(self.create_c) def create_c(self): if self.eventDateList: self.sessionList = [eventDateName['eventDateName'] for eventDateName in self.eventDateList] self.priceList = [price for price in self.eventDateList[0][ 'priceList']] ex.show() else: ex.show() QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok) return if self.sessionListEvn and self.priceListEvn: for s_evn in self.sessionListEvn: s_evn.deleteLater() for p_evn in self.priceListEvn: p_evn.deleteLater() self.sessionListEvn.clear() self.priceListEvn.clear() self.eventPrice.clear() self.eventUrl.clear() for i, item in enumerate(self.sessionList): cb = QCheckBox(item, self.topFiller) cb.move(30, 60 + 30 * i) self.sessionListEvn.append(cb) cb.show() self.topFiller.setMinimumSize(580, (len(self.sessionList) + 5) * 30) for i, item in enumerate(self.priceList): cb_1 = QCheckBox(str(item), self) if i % 2 == 0: i = i // 2 + 1 cb_1.move(30, 570 + 30 * i) else: i = i // 2 + 1 cb_1.move(330, 570 + 30 * i) self.priceListEvn.append(cb_1) cb_1.show() def pitch_on(self): if self.sessionList: for i in self.sessionListEvn: if i.isChecked(): for eventDate in self.eventDateList: if eventDate['eventDateName'] == i.text(): if 'saleDate' in eventDate: self.saleTime = eventDate['saleDate'] self.eventUrl.append(eventDate['eventUrl']) self.sessionName.append(eventDate['eventDateName']) for i in self.priceListEvn: if i.isChecked(): if i.text() in self.eventDateList[0]['priceList']: self.eventPrice.append(str(self.eventDateList[0][ 'priceList'].index(i.text()))) self.sessionPrice.append(i.text()) if self.eventPrice and self.eventUrl: self.close() else: res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if res == QMessageBox.Yes: self.close() else: print('输入内容不存在!') def changecb1(self): if self.cb1.checkState() == Qt.Checked: for qcb in self.sessionListEvn: qcb.setChecked(True) elif self.cb1.checkState() == Qt.Unchecked: for qcb in self.sessionListEvn: qcb.setChecked(False) def changecb2(self): if self.cb2.checkState() == Qt.Checked: for qcb in self.priceListEvn: qcb.setChecked(True) elif self.cb2.checkState() == Qt.Unchecked: for qcb in self.priceListEvn: qcb.setChecked(False) def refresh_cb(self): while True: if self.sessionList and self.priceList: self.create_c() break time.sleep(0.2) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Worker(QThread): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> class Ui_MainWindow(QMainWindow): threads = [] keywordJudge = '' def __init__(self): super(Ui_MainWindow, self).__init__() self.buy_succeed_count = 0 for func in [self.output_buy_record, self.output_login_status, self .output_register_record]: thr = Thread(target=func) thr.setDaemon(True) thr.start() self._thread = Worker(self) self._thread.finished.connect(self._thread.deleteLater) self._thread.valueChanged.connect(ex.create_c) self._thread.start() def setupUi(self, MainWindow): MainWindow.setObjectName('MainWindow') MainWindow.resize(640, 478) MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint) MainWindow.setFixedSize(self.width(), self.height()) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName('centralwidget') self.tabWidget = QtWidgets.QTabWidget(self.centralwidget) self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461)) self.tabWidget.setObjectName('tabWidget') self.tab = QtWidgets.QWidget() self.tab.setObjectName('tab') self.pushButton = QtWidgets.QPushButton(self.tab) self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30)) self.pushButton.setObjectName('pushButton') self.lineEdit_tab = QtWidgets.QLineEdit(self.tab) self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28)) self.lineEdit_tab.setPlaceholderText(' 请输入登陆个数') self.label_0 = QtWidgets.QLabel(self.tab) self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12)) self.label_0.setObjectName('label_0') self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab) self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221)) self.textBrowser_2.setObjectName('textBrowser_2') self.tabWidget.addTab(self.tab, '') self.tab_2 = QtWidgets.QWidget() self.tab_2.setObjectName('tab_2') self.tabWidget.addTab(self.tab, '') self.tab_3 = QtWidgets.QWidget() self.tab_3.setObjectName('tab_3') self.lineEdit = QtWidgets.QLineEdit(self.tab_2) self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31)) self.lineEdit.setObjectName('lineEdit') self.pushButton_2 = QtWidgets.QPushButton(self.tab_2) self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32)) self.pushButton_2.setObjectName('pushButton_2') self.pushButton_2.clicked.connect(self.search_1) self.label = QtWidgets.QLabel(self.tab_2) self.label.setGeometry(QtCore.QRect(30, 80, 54, 12)) self.label.setObjectName('label') self.label_2 = QtWidgets.QLabel(self.tab_2) self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12)) self.label_2.setObjectName('label_2') self.comboBox = QtWidgets.QComboBox(self.tab_2) self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31)) self.comboBox.setObjectName('comboBox') self.comboBox_2 = QtWidgets.QComboBox(self.tab_2) self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31)) self.comboBox_2.setObjectName('comboBox_2') self.label_3 = QtWidgets.QLabel(self.tab_2) self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12)) self.label_3.setObjectName('label_3') self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2) self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27)) self.lineEdit_1.setObjectName('lineEdit_1') self.label_6 = QtWidgets.QLabel(self.tab_2) self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12)) self.label_6.setObjectName('label_6') self.label_7 = QtWidgets.QLabel(self.tab_2) self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12)) self.label_7.setObjectName('label_7') self.label_7.setStyleSheet('font-size:16px;color:red') self.label_8 = QtWidgets.QLabel(self.tab_2) self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12)) self.label_8.setObjectName('label_8') self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2) self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27)) self.lineEdit_8.setObjectName('lineEdit_8') self.lineEdit_8.setText('4') self.pushButton_3 = QtWidgets.QPushButton(self.tab_2) self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31)) self.pushButton_3.setObjectName('pushButton_3') self.pushButton_3.clicked.connect(self.search_2) self.pushButton_quit = QtWidgets.QPushButton(self.tab_2) self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31)) self.pushButton_quit.setObjectName('pushButton_quit') self.pushButton_quit.clicked.connect(self.exit_quit) self.label_4 = QtWidgets.QLabel(self.tab_2) self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12)) self.label_4.setObjectName('label_4') self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2) self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192)) self.textBrowser_1.setObjectName('textBrowser') self.tabWidget.addTab(self.tab_2, '') MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName('statusbar') MainWindow.setStatusBar(self.statusbar) self.tabWidget.addTab(self.tab_3, '') MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName('statusbar') MainWindow.setStatusBar(self.statusbar) self.pushButton_4 = QtWidgets.QPushButton(self.tab_3) self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30)) self.pushButton_4.setObjectName('pushButton') self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3) self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28)) self.lineEdit_tab3.setPlaceholderText(' 请输入注册个数') self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3) self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221)) self.textBrowser_3.setObjectName('textBrowser_3') self.label_5 = QtWidgets.QLabel(self.tab_3) self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12)) self.label_5.setObjectName('label_5') _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate('MainWindow', '城市售票网-抢票')) self.pushButton.setText(_translate('MainWindow', '点击登录')) self.pushButton.clicked.connect(self.login) self.pushButton_4.clicked.connect(self.register) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate('MainWindow', '账号登录')) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate('MainWindow', '抢购中心')) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate('MainWindow', '账号注册')) self.label_0.setText(_translate('MainWindow', '登录日志:')) self.pushButton_2.setText(_translate('MainWindow', '搜索名称')) self.pushButton_3.setText(_translate('MainWindow', '点击购买')) self.pushButton_quit.setText(_translate('MainWindow', '退出程序')) self.pushButton_4.setText(_translate('MainWindow', '点击注册')) self.label.setText(_translate('MainWindow', '已择场次:')) self.label_2.setText(_translate('MainWindow', '已择价格:')) self.label_3.setText(_translate('MainWindow', '购买总数量:')) self.label_4.setText(_translate('MainWindow', '购买日志:')) self.label_5.setText(_translate('MainWindow', '注册日志:')) self.label_6.setText(_translate('MainWindow', '已购买:')) self.label_7.setText(_translate('MainWindow', '0')) self.label_8.setText(_translate('MainWindow', '每个账号购买数量:')) self.textBrowser_3.setText('') self.textBrowser_2.setText('') self.textBrowser_1.setText('') self.tabWidget.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(MainWindow) def login(self): try: regiterSum = int(self.lineEdit_tab.text()) except Exception as err: res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) return ipList = [''] self.textBrowser_2.append('开始登陆,请等待...') userinfo_list = [] with open('infomation.txt', 'rt', encoding='utf-8') as f: info_record = re.findall("'loginId': '(.*?)'", f.read()) for loginId in info_record: userinfo_list.append(loginId) for thr in userinfo_list[:regiterSum]: grabber = BuyUrbtix() ip = random.choice(ipList) Thread_name = Thread(target=grabber.openSite, args=(thr, ip)) self.threads.append(Thread_name) Thread_name.setDaemon(True) Thread_name.start() def search_1(self): keyword = self.lineEdit.text() self.textBrowser_1.append('正在查询 %s 的所有场次和价格...' % keyword) if keyword == self.keywordJudge: self.textBrowser_1.append('请等待...') self.keywordJudge = '' return self.keywordJudge = keyword Thread_name = Thread(target=self.refresh) self.threads.append(Thread_name) Thread_name.start() Thread_01 = Thread(target=self.show_session_data) self.threads.append(Thread_01) Thread_01.start() def show_session_data(self): global SHOW_S_P self.comboBox_2.clear() self.comboBox.clear() while True: if ex.sessionName and ex.sessionPrice and SHOW_S_P: for i, eventDateName in enumerate(ex.sessionName): self.comboBox_2.addItem(eventDateName, i) for i, price in enumerate(ex.sessionPrice): self.comboBox.addItem(str(price), i) self.comboBox.setCurrentIndex(0) self.comboBox_2.setCurrentIndex(0) ex.sessionName.clear() ex.sessionPrice.clear() SHOW_S_P = False time.sleep(0.2) def refresh(self): try: if self.lineEdit.text(): global eventDateList keyword = self.lineEdit.text() my_attr['selNum'] = self.lineEdit_8.text() ex.eventDateList = request_spider.get_date_url(keyword) if ex.eventDateList: self.textBrowser_1.append('查询成功,请在选择界面选择场次和价格...') global SESSION_DATA SESSION_DATA = True else: self.textBrowser_1.append('查询失败,请确定您查询的节目存在...') else: sys.exit() except Exception as err: self.textBrowser_1.append('查询失败,请确定您查询的节目存在...') print(err) sys.exit() def output_login_status(self): while True: login_record_list = login_record() if login_record_list: for i in login_record_list: self.textBrowser_2.append(i) self.textBrowser_2.moveCursor(self.textBrowser_2. textCursor().End) login_record_list.remove(i) time.sleep(0.1) def output_buy_record(self): while True: buy_record_list = buy_record() if buy_record_list: for record in buy_record_list: if '购买成功' in record: self.buy_succeed_count += 1 self.label_7.setText(str(self.buy_succeed_count)) self.textBrowser_1.append(record) self.textBrowser_1.moveCursor(self.textBrowser_1. textCursor().End) buy_record_list.remove(record) time.sleep(0.1) def output_register_record(self): while True: register_record_list = register_record() if register_record_list: for i in register_record_list: self.textBrowser_3.append(i) self.textBrowser_3.moveCursor(self.textBrowser_3. textCursor().End) register_record_list.remove(i) time.sleep(0.1) def search_2(self): if not self.lineEdit_1.text(): self.textBrowser_1.append('请输入购买总数量...') return if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']: self.textBrowser_1.append('正在购买,请等待...') return if ex.saleTime: Thread_name = Thread(target=self.wait_sale) Thread_name.setDaemon(True) Thread_name.start() return my_attr['gross'] = self.lineEdit_1.text() my_attr['selNum'] = self.lineEdit_8.text() my_attr['selPrice'] = ex.eventPrice my_attr['selSeatUrl'] = ex.eventUrl self.textBrowser_1.append('开始购买,请您耐心等待...') def wait_sale(self): dateList = ex.saleTime print('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList)) self.textBrowser_1.append('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList)) while True: saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) + '0000', '%Y%m%d%H%M%S'))) if saleTimestamp <= int(time.time()): print('%s年%s月%s日%s时开始售票,开始购买!' % tuple(dateList)) self.textBrowser_1.append('%s年%s月%s日%s时开始售票,开始购买!' % tuple( dateList)) break time.sleep(1) my_attr['gross'] = self.lineEdit_1.text() my_attr['selNum'] = self.lineEdit_8.text() my_attr['selPrice'] = ex.eventPrice my_attr['selSeatUrl'] = ex.eventUrl self.textBrowser_1.append('开始购买,请您耐心等待...') def register(self): self.textBrowser_3.append('开始注册,请等待...') try: regiterSum = int(self.lineEdit_tab3.text()) except Exception as err: res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) return threads = [] for _ in range(regiterSum): uper = Register() Thread_name = Thread(target=uper.registerInfo) Thread_name.setDaemon(True) Thread_name.start() threads.append(Thread_name) def exit_quit(self): global EXIT_COND res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox. Yes | QMessageBox.No) if res == QMessageBox.Yes: self._thread.exit_thread() time.sleep(1) sys.exit() else: pass class Example(QMainWindow): sessionList = [] priceList = [] sessionListEvn = [] priceListEvn = [] eventDateList = [] eventUrl = [] eventPrice = [] sessionName = [] sessionPrice = [] saleTime = [] buyNum = 1 def __init__(self): super(QMainWindow, self).__init__() self.setWindowTitle('城市售票网') self.resize(680, 800) self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint) self.setFixedSize(self.width(), self.height()) self.w = QWidget() self.w.setFixedWidth(680) self.w.setFixedHeight(540) self.setCentralWidget(self.w) self.topFiller = QWidget() self.scroll = QScrollArea() self.scroll.setWidget(self.topFiller) self.vbox = QVBoxLayout() self.vbox.addWidget(self.scroll) self.w.setLayout(self.vbox) self.initUI() def closeEvent(self, QCloseEvent): res = QMessageBox.question(self, '提示', '您确定选择无误吗?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if res == QMessageBox.Yes: global SHOW_S_P SHOW_S_P = True QCloseEvent.accept() self.cb1.setChecked(False) self.cb2.setChecked(False) else: QCloseEvent.ignore() def initUI(self): self.cb1 = QCheckBox('全选', self.topFiller) self.cb1.move(20, 30) self.cb2 = QCheckBox('全选', self) self.cb2.move(20, 570) bt1 = QPushButton('确定', self) bt2 = QPushButton('刷新', self) bt1.move(20, 760) bt2.move(120, 760) self.cb1.stateChanged.connect(self.changecb1) self.cb2.stateChanged.connect(self.changecb2) bt1.clicked.connect(self.pitch_on) bt2.clicked.connect(self.create_c) def create_c(self): if self.eventDateList: self.sessionList = [eventDateName['eventDateName'] for eventDateName in self.eventDateList] self.priceList = [price for price in self.eventDateList[0][ 'priceList']] ex.show() else: ex.show() QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok) return if self.sessionListEvn and self.priceListEvn: for s_evn in self.sessionListEvn: s_evn.deleteLater() for p_evn in self.priceListEvn: p_evn.deleteLater() self.sessionListEvn.clear() self.priceListEvn.clear() self.eventPrice.clear() self.eventUrl.clear() for i, item in enumerate(self.sessionList): cb = QCheckBox(item, self.topFiller) cb.move(30, 60 + 30 * i) self.sessionListEvn.append(cb) cb.show() self.topFiller.setMinimumSize(580, (len(self.sessionList) + 5) * 30) for i, item in enumerate(self.priceList): cb_1 = QCheckBox(str(item), self) if i % 2 == 0: i = i // 2 + 1 cb_1.move(30, 570 + 30 * i) else: i = i // 2 + 1 cb_1.move(330, 570 + 30 * i) self.priceListEvn.append(cb_1) cb_1.show() def pitch_on(self): if self.sessionList: for i in self.sessionListEvn: if i.isChecked(): for eventDate in self.eventDateList: if eventDate['eventDateName'] == i.text(): if 'saleDate' in eventDate: self.saleTime = eventDate['saleDate'] self.eventUrl.append(eventDate['eventUrl']) self.sessionName.append(eventDate['eventDateName']) for i in self.priceListEvn: if i.isChecked(): if i.text() in self.eventDateList[0]['priceList']: self.eventPrice.append(str(self.eventDateList[0][ 'priceList'].index(i.text()))) self.sessionPrice.append(i.text()) if self.eventPrice and self.eventUrl: self.close() else: res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if res == QMessageBox.Yes: self.close() else: print('输入内容不存在!') def changecb1(self): if self.cb1.checkState() == Qt.Checked: for qcb in self.sessionListEvn: qcb.setChecked(True) elif self.cb1.checkState() == Qt.Unchecked: for qcb in self.sessionListEvn: qcb.setChecked(False) def changecb2(self): if self.cb2.checkState() == Qt.Checked: for qcb in self.priceListEvn: qcb.setChecked(True) elif self.cb2.checkState() == Qt.Unchecked: for qcb in self.priceListEvn: qcb.setChecked(False) def refresh_cb(self): while True: if self.sessionList and self.priceList: self.create_c() break time.sleep(0.2) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Worker(QThread): valueChanged = pyqtSignal(int) handle = -1 def run(self): global SESSION_DATA, EXIT_COND try: self.handle = ctypes.windll.kernel32.OpenThread(win32con. PROCESS_ALL_ACCESS, False, int(QThread.currentThreadId())) except Exception as e: print('get thread handle failed', e) while True: if SESSION_DATA: self.valueChanged.emit(1024) SESSION_DATA = False time.sleep(0.1) def exit_thread(self): os._exit(122) class Ui_MainWindow(QMainWindow): threads = [] keywordJudge = '' def __init__(self): super(Ui_MainWindow, self).__init__() self.buy_succeed_count = 0 for func in [self.output_buy_record, self.output_login_status, self .output_register_record]: thr = Thread(target=func) thr.setDaemon(True) thr.start() self._thread = Worker(self) self._thread.finished.connect(self._thread.deleteLater) self._thread.valueChanged.connect(ex.create_c) self._thread.start() def setupUi(self, MainWindow): MainWindow.setObjectName('MainWindow') MainWindow.resize(640, 478) MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint) MainWindow.setFixedSize(self.width(), self.height()) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName('centralwidget') self.tabWidget = QtWidgets.QTabWidget(self.centralwidget) self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461)) self.tabWidget.setObjectName('tabWidget') self.tab = QtWidgets.QWidget() self.tab.setObjectName('tab') self.pushButton = QtWidgets.QPushButton(self.tab) self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30)) self.pushButton.setObjectName('pushButton') self.lineEdit_tab = QtWidgets.QLineEdit(self.tab) self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28)) self.lineEdit_tab.setPlaceholderText(' 请输入登陆个数') self.label_0 = QtWidgets.QLabel(self.tab) self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12)) self.label_0.setObjectName('label_0') self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab) self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221)) self.textBrowser_2.setObjectName('textBrowser_2') self.tabWidget.addTab(self.tab, '') self.tab_2 = QtWidgets.QWidget() self.tab_2.setObjectName('tab_2') self.tabWidget.addTab(self.tab, '') self.tab_3 = QtWidgets.QWidget() self.tab_3.setObjectName('tab_3') self.lineEdit = QtWidgets.QLineEdit(self.tab_2) self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31)) self.lineEdit.setObjectName('lineEdit') self.pushButton_2 = QtWidgets.QPushButton(self.tab_2) self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32)) self.pushButton_2.setObjectName('pushButton_2') self.pushButton_2.clicked.connect(self.search_1) self.label = QtWidgets.QLabel(self.tab_2) self.label.setGeometry(QtCore.QRect(30, 80, 54, 12)) self.label.setObjectName('label') self.label_2 = QtWidgets.QLabel(self.tab_2) self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12)) self.label_2.setObjectName('label_2') self.comboBox = QtWidgets.QComboBox(self.tab_2) self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31)) self.comboBox.setObjectName('comboBox') self.comboBox_2 = QtWidgets.QComboBox(self.tab_2) self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31)) self.comboBox_2.setObjectName('comboBox_2') self.label_3 = QtWidgets.QLabel(self.tab_2) self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12)) self.label_3.setObjectName('label_3') self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2) self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27)) self.lineEdit_1.setObjectName('lineEdit_1') self.label_6 = QtWidgets.QLabel(self.tab_2) self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12)) self.label_6.setObjectName('label_6') self.label_7 = QtWidgets.QLabel(self.tab_2) self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12)) self.label_7.setObjectName('label_7') self.label_7.setStyleSheet('font-size:16px;color:red') self.label_8 = QtWidgets.QLabel(self.tab_2) self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12)) self.label_8.setObjectName('label_8') self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2) self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27)) self.lineEdit_8.setObjectName('lineEdit_8') self.lineEdit_8.setText('4') self.pushButton_3 = QtWidgets.QPushButton(self.tab_2) self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31)) self.pushButton_3.setObjectName('pushButton_3') self.pushButton_3.clicked.connect(self.search_2) self.pushButton_quit = QtWidgets.QPushButton(self.tab_2) self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31)) self.pushButton_quit.setObjectName('pushButton_quit') self.pushButton_quit.clicked.connect(self.exit_quit) self.label_4 = QtWidgets.QLabel(self.tab_2) self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12)) self.label_4.setObjectName('label_4') self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2) self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192)) self.textBrowser_1.setObjectName('textBrowser') self.tabWidget.addTab(self.tab_2, '') MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName('statusbar') MainWindow.setStatusBar(self.statusbar) self.tabWidget.addTab(self.tab_3, '') MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName('statusbar') MainWindow.setStatusBar(self.statusbar) self.pushButton_4 = QtWidgets.QPushButton(self.tab_3) self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30)) self.pushButton_4.setObjectName('pushButton') self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3) self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28)) self.lineEdit_tab3.setPlaceholderText(' 请输入注册个数') self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3) self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221)) self.textBrowser_3.setObjectName('textBrowser_3') self.label_5 = QtWidgets.QLabel(self.tab_3) self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12)) self.label_5.setObjectName('label_5') _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate('MainWindow', '城市售票网-抢票')) self.pushButton.setText(_translate('MainWindow', '点击登录')) self.pushButton.clicked.connect(self.login) self.pushButton_4.clicked.connect(self.register) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate('MainWindow', '账号登录')) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate('MainWindow', '抢购中心')) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate('MainWindow', '账号注册')) self.label_0.setText(_translate('MainWindow', '登录日志:')) self.pushButton_2.setText(_translate('MainWindow', '搜索名称')) self.pushButton_3.setText(_translate('MainWindow', '点击购买')) self.pushButton_quit.setText(_translate('MainWindow', '退出程序')) self.pushButton_4.setText(_translate('MainWindow', '点击注册')) self.label.setText(_translate('MainWindow', '已择场次:')) self.label_2.setText(_translate('MainWindow', '已择价格:')) self.label_3.setText(_translate('MainWindow', '购买总数量:')) self.label_4.setText(_translate('MainWindow', '购买日志:')) self.label_5.setText(_translate('MainWindow', '注册日志:')) self.label_6.setText(_translate('MainWindow', '已购买:')) self.label_7.setText(_translate('MainWindow', '0')) self.label_8.setText(_translate('MainWindow', '每个账号购买数量:')) self.textBrowser_3.setText('') self.textBrowser_2.setText('') self.textBrowser_1.setText('') self.tabWidget.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(MainWindow) def login(self): try: regiterSum = int(self.lineEdit_tab.text()) except Exception as err: res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) return ipList = [''] self.textBrowser_2.append('开始登陆,请等待...') userinfo_list = [] with open('infomation.txt', 'rt', encoding='utf-8') as f: info_record = re.findall("'loginId': '(.*?)'", f.read()) for loginId in info_record: userinfo_list.append(loginId) for thr in userinfo_list[:regiterSum]: grabber = BuyUrbtix() ip = random.choice(ipList) Thread_name = Thread(target=grabber.openSite, args=(thr, ip)) self.threads.append(Thread_name) Thread_name.setDaemon(True) Thread_name.start() def search_1(self): keyword = self.lineEdit.text() self.textBrowser_1.append('正在查询 %s 的所有场次和价格...' % keyword) if keyword == self.keywordJudge: self.textBrowser_1.append('请等待...') self.keywordJudge = '' return self.keywordJudge = keyword Thread_name = Thread(target=self.refresh) self.threads.append(Thread_name) Thread_name.start() Thread_01 = Thread(target=self.show_session_data) self.threads.append(Thread_01) Thread_01.start() def show_session_data(self): global SHOW_S_P self.comboBox_2.clear() self.comboBox.clear() while True: if ex.sessionName and ex.sessionPrice and SHOW_S_P: for i, eventDateName in enumerate(ex.sessionName): self.comboBox_2.addItem(eventDateName, i) for i, price in enumerate(ex.sessionPrice): self.comboBox.addItem(str(price), i) self.comboBox.setCurrentIndex(0) self.comboBox_2.setCurrentIndex(0) ex.sessionName.clear() ex.sessionPrice.clear() SHOW_S_P = False time.sleep(0.2) def refresh(self): try: if self.lineEdit.text(): global eventDateList keyword = self.lineEdit.text() my_attr['selNum'] = self.lineEdit_8.text() ex.eventDateList = request_spider.get_date_url(keyword) if ex.eventDateList: self.textBrowser_1.append('查询成功,请在选择界面选择场次和价格...') global SESSION_DATA SESSION_DATA = True else: self.textBrowser_1.append('查询失败,请确定您查询的节目存在...') else: sys.exit() except Exception as err: self.textBrowser_1.append('查询失败,请确定您查询的节目存在...') print(err) sys.exit() def output_login_status(self): while True: login_record_list = login_record() if login_record_list: for i in login_record_list: self.textBrowser_2.append(i) self.textBrowser_2.moveCursor(self.textBrowser_2. textCursor().End) login_record_list.remove(i) time.sleep(0.1) def output_buy_record(self): while True: buy_record_list = buy_record() if buy_record_list: for record in buy_record_list: if '购买成功' in record: self.buy_succeed_count += 1 self.label_7.setText(str(self.buy_succeed_count)) self.textBrowser_1.append(record) self.textBrowser_1.moveCursor(self.textBrowser_1. textCursor().End) buy_record_list.remove(record) time.sleep(0.1) def output_register_record(self): while True: register_record_list = register_record() if register_record_list: for i in register_record_list: self.textBrowser_3.append(i) self.textBrowser_3.moveCursor(self.textBrowser_3. textCursor().End) register_record_list.remove(i) time.sleep(0.1) def search_2(self): if not self.lineEdit_1.text(): self.textBrowser_1.append('请输入购买总数量...') return if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']: self.textBrowser_1.append('正在购买,请等待...') return if ex.saleTime: Thread_name = Thread(target=self.wait_sale) Thread_name.setDaemon(True) Thread_name.start() return my_attr['gross'] = self.lineEdit_1.text() my_attr['selNum'] = self.lineEdit_8.text() my_attr['selPrice'] = ex.eventPrice my_attr['selSeatUrl'] = ex.eventUrl self.textBrowser_1.append('开始购买,请您耐心等待...') def wait_sale(self): dateList = ex.saleTime print('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList)) self.textBrowser_1.append('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList)) while True: saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) + '0000', '%Y%m%d%H%M%S'))) if saleTimestamp <= int(time.time()): print('%s年%s月%s日%s时开始售票,开始购买!' % tuple(dateList)) self.textBrowser_1.append('%s年%s月%s日%s时开始售票,开始购买!' % tuple( dateList)) break time.sleep(1) my_attr['gross'] = self.lineEdit_1.text() my_attr['selNum'] = self.lineEdit_8.text() my_attr['selPrice'] = ex.eventPrice my_attr['selSeatUrl'] = ex.eventUrl self.textBrowser_1.append('开始购买,请您耐心等待...') def register(self): self.textBrowser_3.append('开始注册,请等待...') try: regiterSum = int(self.lineEdit_tab3.text()) except Exception as err: res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) return threads = [] for _ in range(regiterSum): uper = Register() Thread_name = Thread(target=uper.registerInfo) Thread_name.setDaemon(True) Thread_name.start() threads.append(Thread_name) def exit_quit(self): global EXIT_COND res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox. Yes | QMessageBox.No) if res == QMessageBox.Yes: self._thread.exit_thread() time.sleep(1) sys.exit() else: pass class Example(QMainWindow): sessionList = [] priceList = [] sessionListEvn = [] priceListEvn = [] eventDateList = [] eventUrl = [] eventPrice = [] sessionName = [] sessionPrice = [] saleTime = [] buyNum = 1 def __init__(self): super(QMainWindow, self).__init__() self.setWindowTitle('城市售票网') self.resize(680, 800) self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint) self.setFixedSize(self.width(), self.height()) self.w = QWidget() self.w.setFixedWidth(680) self.w.setFixedHeight(540) self.setCentralWidget(self.w) self.topFiller = QWidget() self.scroll = QScrollArea() self.scroll.setWidget(self.topFiller) self.vbox = QVBoxLayout() self.vbox.addWidget(self.scroll) self.w.setLayout(self.vbox) self.initUI() def closeEvent(self, QCloseEvent): res = QMessageBox.question(self, '提示', '您确定选择无误吗?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if res == QMessageBox.Yes: global SHOW_S_P SHOW_S_P = True QCloseEvent.accept() self.cb1.setChecked(False) self.cb2.setChecked(False) else: QCloseEvent.ignore() def initUI(self): self.cb1 = QCheckBox('全选', self.topFiller) self.cb1.move(20, 30) self.cb2 = QCheckBox('全选', self) self.cb2.move(20, 570) bt1 = QPushButton('确定', self) bt2 = QPushButton('刷新', self) bt1.move(20, 760) bt2.move(120, 760) self.cb1.stateChanged.connect(self.changecb1) self.cb2.stateChanged.connect(self.changecb2) bt1.clicked.connect(self.pitch_on) bt2.clicked.connect(self.create_c) def create_c(self): if self.eventDateList: self.sessionList = [eventDateName['eventDateName'] for eventDateName in self.eventDateList] self.priceList = [price for price in self.eventDateList[0][ 'priceList']] ex.show() else: ex.show() QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok) return if self.sessionListEvn and self.priceListEvn: for s_evn in self.sessionListEvn: s_evn.deleteLater() for p_evn in self.priceListEvn: p_evn.deleteLater() self.sessionListEvn.clear() self.priceListEvn.clear() self.eventPrice.clear() self.eventUrl.clear() for i, item in enumerate(self.sessionList): cb = QCheckBox(item, self.topFiller) cb.move(30, 60 + 30 * i) self.sessionListEvn.append(cb) cb.show() self.topFiller.setMinimumSize(580, (len(self.sessionList) + 5) * 30) for i, item in enumerate(self.priceList): cb_1 = QCheckBox(str(item), self) if i % 2 == 0: i = i // 2 + 1 cb_1.move(30, 570 + 30 * i) else: i = i // 2 + 1 cb_1.move(330, 570 + 30 * i) self.priceListEvn.append(cb_1) cb_1.show() def pitch_on(self): if self.sessionList: for i in self.sessionListEvn: if i.isChecked(): for eventDate in self.eventDateList: if eventDate['eventDateName'] == i.text(): if 'saleDate' in eventDate: self.saleTime = eventDate['saleDate'] self.eventUrl.append(eventDate['eventUrl']) self.sessionName.append(eventDate['eventDateName']) for i in self.priceListEvn: if i.isChecked(): if i.text() in self.eventDateList[0]['priceList']: self.eventPrice.append(str(self.eventDateList[0][ 'priceList'].index(i.text()))) self.sessionPrice.append(i.text()) if self.eventPrice and self.eventUrl: self.close() else: res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if res == QMessageBox.Yes: self.close() else: print('输入内容不存在!') def changecb1(self): if self.cb1.checkState() == Qt.Checked: for qcb in self.sessionListEvn: qcb.setChecked(True) elif self.cb1.checkState() == Qt.Unchecked: for qcb in self.sessionListEvn: qcb.setChecked(False) def changecb2(self): if self.cb2.checkState() == Qt.Checked: for qcb in self.priceListEvn: qcb.setChecked(True) elif self.cb2.checkState() == Qt.Unchecked: for qcb in self.priceListEvn: qcb.setChecked(False) def refresh_cb(self): while True: if self.sessionList and self.priceList: self.create_c() break time.sleep(0.2) if __name__ == '__main__': app = QtWidgets.QApplication(sys.argv) ex = Example() MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_()) <|reserved_special_token_1|> import ctypes import win32con import request_spider from selenium_tickets_spider import * from threading import Thread from PyQt5.QtWidgets import * from PyQt5 import QtCore, QtWidgets from PyQt5.QtCore import Qt, QThread, pyqtSignal import sys, time, re import datetime SESSION_DATA = False SHOW_S_P = False class Worker(QThread): valueChanged = pyqtSignal(int) handle = -1 def run(self): global SESSION_DATA, EXIT_COND try: self.handle = ctypes.windll.kernel32.OpenThread(win32con. PROCESS_ALL_ACCESS, False, int(QThread.currentThreadId())) except Exception as e: print('get thread handle failed', e) while True: if SESSION_DATA: self.valueChanged.emit(1024) SESSION_DATA = False time.sleep(0.1) def exit_thread(self): os._exit(122) class Ui_MainWindow(QMainWindow): threads = [] keywordJudge = '' def __init__(self): super(Ui_MainWindow, self).__init__() self.buy_succeed_count = 0 for func in [self.output_buy_record, self.output_login_status, self .output_register_record]: thr = Thread(target=func) thr.setDaemon(True) thr.start() self._thread = Worker(self) self._thread.finished.connect(self._thread.deleteLater) self._thread.valueChanged.connect(ex.create_c) self._thread.start() def setupUi(self, MainWindow): MainWindow.setObjectName('MainWindow') MainWindow.resize(640, 478) MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint) MainWindow.setFixedSize(self.width(), self.height()) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName('centralwidget') self.tabWidget = QtWidgets.QTabWidget(self.centralwidget) self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461)) self.tabWidget.setObjectName('tabWidget') self.tab = QtWidgets.QWidget() self.tab.setObjectName('tab') self.pushButton = QtWidgets.QPushButton(self.tab) self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30)) self.pushButton.setObjectName('pushButton') self.lineEdit_tab = QtWidgets.QLineEdit(self.tab) self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28)) self.lineEdit_tab.setPlaceholderText(' 请输入登陆个数') self.label_0 = QtWidgets.QLabel(self.tab) self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12)) self.label_0.setObjectName('label_0') self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab) self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221)) self.textBrowser_2.setObjectName('textBrowser_2') self.tabWidget.addTab(self.tab, '') self.tab_2 = QtWidgets.QWidget() self.tab_2.setObjectName('tab_2') self.tabWidget.addTab(self.tab, '') self.tab_3 = QtWidgets.QWidget() self.tab_3.setObjectName('tab_3') self.lineEdit = QtWidgets.QLineEdit(self.tab_2) self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31)) self.lineEdit.setObjectName('lineEdit') self.pushButton_2 = QtWidgets.QPushButton(self.tab_2) self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32)) self.pushButton_2.setObjectName('pushButton_2') self.pushButton_2.clicked.connect(self.search_1) self.label = QtWidgets.QLabel(self.tab_2) self.label.setGeometry(QtCore.QRect(30, 80, 54, 12)) self.label.setObjectName('label') self.label_2 = QtWidgets.QLabel(self.tab_2) self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12)) self.label_2.setObjectName('label_2') self.comboBox = QtWidgets.QComboBox(self.tab_2) self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31)) self.comboBox.setObjectName('comboBox') self.comboBox_2 = QtWidgets.QComboBox(self.tab_2) self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31)) self.comboBox_2.setObjectName('comboBox_2') self.label_3 = QtWidgets.QLabel(self.tab_2) self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12)) self.label_3.setObjectName('label_3') self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2) self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27)) self.lineEdit_1.setObjectName('lineEdit_1') self.label_6 = QtWidgets.QLabel(self.tab_2) self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12)) self.label_6.setObjectName('label_6') self.label_7 = QtWidgets.QLabel(self.tab_2) self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12)) self.label_7.setObjectName('label_7') self.label_7.setStyleSheet('font-size:16px;color:red') self.label_8 = QtWidgets.QLabel(self.tab_2) self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12)) self.label_8.setObjectName('label_8') self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2) self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27)) self.lineEdit_8.setObjectName('lineEdit_8') self.lineEdit_8.setText('4') self.pushButton_3 = QtWidgets.QPushButton(self.tab_2) self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31)) self.pushButton_3.setObjectName('pushButton_3') self.pushButton_3.clicked.connect(self.search_2) self.pushButton_quit = QtWidgets.QPushButton(self.tab_2) self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31)) self.pushButton_quit.setObjectName('pushButton_quit') self.pushButton_quit.clicked.connect(self.exit_quit) self.label_4 = QtWidgets.QLabel(self.tab_2) self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12)) self.label_4.setObjectName('label_4') self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2) self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192)) self.textBrowser_1.setObjectName('textBrowser') self.tabWidget.addTab(self.tab_2, '') MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName('statusbar') MainWindow.setStatusBar(self.statusbar) self.tabWidget.addTab(self.tab_3, '') MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName('statusbar') MainWindow.setStatusBar(self.statusbar) self.pushButton_4 = QtWidgets.QPushButton(self.tab_3) self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30)) self.pushButton_4.setObjectName('pushButton') self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3) self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28)) self.lineEdit_tab3.setPlaceholderText(' 请输入注册个数') self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3) self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221)) self.textBrowser_3.setObjectName('textBrowser_3') self.label_5 = QtWidgets.QLabel(self.tab_3) self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12)) self.label_5.setObjectName('label_5') _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate('MainWindow', '城市售票网-抢票')) self.pushButton.setText(_translate('MainWindow', '点击登录')) self.pushButton.clicked.connect(self.login) self.pushButton_4.clicked.connect(self.register) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate('MainWindow', '账号登录')) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate('MainWindow', '抢购中心')) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate('MainWindow', '账号注册')) self.label_0.setText(_translate('MainWindow', '登录日志:')) self.pushButton_2.setText(_translate('MainWindow', '搜索名称')) self.pushButton_3.setText(_translate('MainWindow', '点击购买')) self.pushButton_quit.setText(_translate('MainWindow', '退出程序')) self.pushButton_4.setText(_translate('MainWindow', '点击注册')) self.label.setText(_translate('MainWindow', '已择场次:')) self.label_2.setText(_translate('MainWindow', '已择价格:')) self.label_3.setText(_translate('MainWindow', '购买总数量:')) self.label_4.setText(_translate('MainWindow', '购买日志:')) self.label_5.setText(_translate('MainWindow', '注册日志:')) self.label_6.setText(_translate('MainWindow', '已购买:')) self.label_7.setText(_translate('MainWindow', '0')) self.label_8.setText(_translate('MainWindow', '每个账号购买数量:')) self.textBrowser_3.setText('') self.textBrowser_2.setText('') self.textBrowser_1.setText('') self.tabWidget.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(MainWindow) def login(self): try: regiterSum = int(self.lineEdit_tab.text()) except Exception as err: res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) return ipList = [''] self.textBrowser_2.append('开始登陆,请等待...') userinfo_list = [] with open('infomation.txt', 'rt', encoding='utf-8') as f: info_record = re.findall("'loginId': '(.*?)'", f.read()) for loginId in info_record: userinfo_list.append(loginId) for thr in userinfo_list[:regiterSum]: grabber = BuyUrbtix() ip = random.choice(ipList) Thread_name = Thread(target=grabber.openSite, args=(thr, ip)) self.threads.append(Thread_name) Thread_name.setDaemon(True) Thread_name.start() def search_1(self): keyword = self.lineEdit.text() self.textBrowser_1.append('正在查询 %s 的所有场次和价格...' % keyword) if keyword == self.keywordJudge: self.textBrowser_1.append('请等待...') self.keywordJudge = '' return self.keywordJudge = keyword Thread_name = Thread(target=self.refresh) self.threads.append(Thread_name) Thread_name.start() Thread_01 = Thread(target=self.show_session_data) self.threads.append(Thread_01) Thread_01.start() def show_session_data(self): global SHOW_S_P self.comboBox_2.clear() self.comboBox.clear() while True: if ex.sessionName and ex.sessionPrice and SHOW_S_P: for i, eventDateName in enumerate(ex.sessionName): self.comboBox_2.addItem(eventDateName, i) for i, price in enumerate(ex.sessionPrice): self.comboBox.addItem(str(price), i) self.comboBox.setCurrentIndex(0) self.comboBox_2.setCurrentIndex(0) ex.sessionName.clear() ex.sessionPrice.clear() SHOW_S_P = False time.sleep(0.2) def refresh(self): try: if self.lineEdit.text(): global eventDateList keyword = self.lineEdit.text() my_attr['selNum'] = self.lineEdit_8.text() ex.eventDateList = request_spider.get_date_url(keyword) if ex.eventDateList: self.textBrowser_1.append('查询成功,请在选择界面选择场次和价格...') global SESSION_DATA SESSION_DATA = True else: self.textBrowser_1.append('查询失败,请确定您查询的节目存在...') else: sys.exit() except Exception as err: self.textBrowser_1.append('查询失败,请确定您查询的节目存在...') print(err) sys.exit() def output_login_status(self): while True: login_record_list = login_record() if login_record_list: for i in login_record_list: self.textBrowser_2.append(i) self.textBrowser_2.moveCursor(self.textBrowser_2. textCursor().End) login_record_list.remove(i) time.sleep(0.1) def output_buy_record(self): while True: buy_record_list = buy_record() if buy_record_list: for record in buy_record_list: if '购买成功' in record: self.buy_succeed_count += 1 self.label_7.setText(str(self.buy_succeed_count)) self.textBrowser_1.append(record) self.textBrowser_1.moveCursor(self.textBrowser_1. textCursor().End) buy_record_list.remove(record) time.sleep(0.1) def output_register_record(self): while True: register_record_list = register_record() if register_record_list: for i in register_record_list: self.textBrowser_3.append(i) self.textBrowser_3.moveCursor(self.textBrowser_3. textCursor().End) register_record_list.remove(i) time.sleep(0.1) def search_2(self): if not self.lineEdit_1.text(): self.textBrowser_1.append('请输入购买总数量...') return if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']: self.textBrowser_1.append('正在购买,请等待...') return if ex.saleTime: Thread_name = Thread(target=self.wait_sale) Thread_name.setDaemon(True) Thread_name.start() return my_attr['gross'] = self.lineEdit_1.text() my_attr['selNum'] = self.lineEdit_8.text() my_attr['selPrice'] = ex.eventPrice my_attr['selSeatUrl'] = ex.eventUrl self.textBrowser_1.append('开始购买,请您耐心等待...') def wait_sale(self): dateList = ex.saleTime print('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList)) self.textBrowser_1.append('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList)) while True: saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) + '0000', '%Y%m%d%H%M%S'))) if saleTimestamp <= int(time.time()): print('%s年%s月%s日%s时开始售票,开始购买!' % tuple(dateList)) self.textBrowser_1.append('%s年%s月%s日%s时开始售票,开始购买!' % tuple( dateList)) break time.sleep(1) my_attr['gross'] = self.lineEdit_1.text() my_attr['selNum'] = self.lineEdit_8.text() my_attr['selPrice'] = ex.eventPrice my_attr['selSeatUrl'] = ex.eventUrl self.textBrowser_1.append('开始购买,请您耐心等待...') def register(self): self.textBrowser_3.append('开始注册,请等待...') try: regiterSum = int(self.lineEdit_tab3.text()) except Exception as err: res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) return threads = [] for _ in range(regiterSum): uper = Register() Thread_name = Thread(target=uper.registerInfo) Thread_name.setDaemon(True) Thread_name.start() threads.append(Thread_name) def exit_quit(self): global EXIT_COND res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox. Yes | QMessageBox.No) if res == QMessageBox.Yes: self._thread.exit_thread() time.sleep(1) sys.exit() else: pass class Example(QMainWindow): sessionList = [] priceList = [] sessionListEvn = [] priceListEvn = [] eventDateList = [] eventUrl = [] eventPrice = [] sessionName = [] sessionPrice = [] saleTime = [] buyNum = 1 def __init__(self): super(QMainWindow, self).__init__() self.setWindowTitle('城市售票网') self.resize(680, 800) self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint) self.setFixedSize(self.width(), self.height()) self.w = QWidget() self.w.setFixedWidth(680) self.w.setFixedHeight(540) self.setCentralWidget(self.w) self.topFiller = QWidget() self.scroll = QScrollArea() self.scroll.setWidget(self.topFiller) self.vbox = QVBoxLayout() self.vbox.addWidget(self.scroll) self.w.setLayout(self.vbox) self.initUI() def closeEvent(self, QCloseEvent): res = QMessageBox.question(self, '提示', '您确定选择无误吗?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if res == QMessageBox.Yes: global SHOW_S_P SHOW_S_P = True QCloseEvent.accept() self.cb1.setChecked(False) self.cb2.setChecked(False) else: QCloseEvent.ignore() def initUI(self): self.cb1 = QCheckBox('全选', self.topFiller) self.cb1.move(20, 30) self.cb2 = QCheckBox('全选', self) self.cb2.move(20, 570) bt1 = QPushButton('确定', self) bt2 = QPushButton('刷新', self) bt1.move(20, 760) bt2.move(120, 760) self.cb1.stateChanged.connect(self.changecb1) self.cb2.stateChanged.connect(self.changecb2) bt1.clicked.connect(self.pitch_on) bt2.clicked.connect(self.create_c) def create_c(self): if self.eventDateList: self.sessionList = [eventDateName['eventDateName'] for eventDateName in self.eventDateList] self.priceList = [price for price in self.eventDateList[0][ 'priceList']] ex.show() else: ex.show() QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok) return if self.sessionListEvn and self.priceListEvn: for s_evn in self.sessionListEvn: s_evn.deleteLater() for p_evn in self.priceListEvn: p_evn.deleteLater() self.sessionListEvn.clear() self.priceListEvn.clear() self.eventPrice.clear() self.eventUrl.clear() for i, item in enumerate(self.sessionList): cb = QCheckBox(item, self.topFiller) cb.move(30, 60 + 30 * i) self.sessionListEvn.append(cb) cb.show() self.topFiller.setMinimumSize(580, (len(self.sessionList) + 5) * 30) for i, item in enumerate(self.priceList): cb_1 = QCheckBox(str(item), self) if i % 2 == 0: i = i // 2 + 1 cb_1.move(30, 570 + 30 * i) else: i = i // 2 + 1 cb_1.move(330, 570 + 30 * i) self.priceListEvn.append(cb_1) cb_1.show() def pitch_on(self): if self.sessionList: for i in self.sessionListEvn: if i.isChecked(): for eventDate in self.eventDateList: if eventDate['eventDateName'] == i.text(): if 'saleDate' in eventDate: self.saleTime = eventDate['saleDate'] self.eventUrl.append(eventDate['eventUrl']) self.sessionName.append(eventDate['eventDateName']) for i in self.priceListEvn: if i.isChecked(): if i.text() in self.eventDateList[0]['priceList']: self.eventPrice.append(str(self.eventDateList[0][ 'priceList'].index(i.text()))) self.sessionPrice.append(i.text()) if self.eventPrice and self.eventUrl: self.close() else: res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if res == QMessageBox.Yes: self.close() else: print('输入内容不存在!') def changecb1(self): if self.cb1.checkState() == Qt.Checked: for qcb in self.sessionListEvn: qcb.setChecked(True) elif self.cb1.checkState() == Qt.Unchecked: for qcb in self.sessionListEvn: qcb.setChecked(False) def changecb2(self): if self.cb2.checkState() == Qt.Checked: for qcb in self.priceListEvn: qcb.setChecked(True) elif self.cb2.checkState() == Qt.Unchecked: for qcb in self.priceListEvn: qcb.setChecked(False) def refresh_cb(self): while True: if self.sessionList and self.priceList: self.create_c() break time.sleep(0.2) if __name__ == '__main__': app = QtWidgets.QApplication(sys.argv) ex = Example() MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_()) <|reserved_special_token_1|> import ctypes import win32con import request_spider from selenium_tickets_spider import * from threading import Thread from PyQt5.QtWidgets import * from PyQt5 import QtCore, QtWidgets from PyQt5.QtCore import Qt, QThread, pyqtSignal import sys, time, re import datetime SESSION_DATA = False SHOW_S_P = False class Worker(QThread): valueChanged = pyqtSignal(int) # 值变化信号 handle = -1 def run(self): global SESSION_DATA,EXIT_COND try: self.handle = ctypes.windll.kernel32.OpenThread( # @UndefinedVariable win32con.PROCESS_ALL_ACCESS, False, int(QThread.currentThreadId())) except Exception as e: print('get thread handle failed', e) # print('thread id', int(QThread.currentThreadId())) # 循环发送信号 while True: if SESSION_DATA: self.valueChanged.emit(1024) SESSION_DATA = False time.sleep(0.1) def exit_thread(self): os._exit(122) class Ui_MainWindow(QMainWindow): threads = [] keywordJudge = '' def __init__(self): super(Ui_MainWindow, self).__init__() # self.ex = Example() self.buy_succeed_count = 0 for func in [self.output_buy_record, self.output_login_status,self.output_register_record]: thr = Thread(target=func) thr.setDaemon(True) thr.start() # 子线程 self._thread = Worker(self) self._thread.finished.connect(self._thread.deleteLater) self._thread.valueChanged.connect(ex.create_c) self._thread.start() def setupUi(self, MainWindow): # MainWindow.setStyleSheet("#MainWindow{background-color: yellow}") MainWindow.setObjectName("MainWindow") MainWindow.resize(640, 478) # MainWindow.setMinimumSize(640, 478) # MainWindow.setMaximumSize(640, 478) # 取消最大化 MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint) # 固定窗口大小 MainWindow.setFixedSize(self.width(), self.height()) # MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.tabWidget = QtWidgets.QTabWidget(self.centralwidget) self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461)) self.tabWidget.setObjectName("tabWidget") self.tab = QtWidgets.QWidget() self.tab.setObjectName("tab") # 登录按钮 self.pushButton = QtWidgets.QPushButton(self.tab) self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30)) self.pushButton.setObjectName("pushButton") # 登陆个数输入框 self.lineEdit_tab = QtWidgets.QLineEdit(self.tab) self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28)) self.lineEdit_tab.setPlaceholderText(" 请输入登陆个数") # 登录日志输出 self.label_0 = QtWidgets.QLabel(self.tab) self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12)) self.label_0.setObjectName("label_0") # 注册日志 self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab) self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221)) self.textBrowser_2.setObjectName("textBrowser_2") # 登录页面 self.tabWidget.addTab(self.tab, "") self.tab_2 = QtWidgets.QWidget() self.tab_2.setObjectName("tab_2") self.tabWidget.addTab(self.tab, "") self.tab_3 = QtWidgets.QWidget() self.tab_3.setObjectName("tab_3") self.lineEdit = QtWidgets.QLineEdit(self.tab_2) self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31)) self.lineEdit.setObjectName("lineEdit") # 查询商品名称 self.pushButton_2 = QtWidgets.QPushButton(self.tab_2) self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32)) self.pushButton_2.setObjectName("pushButton_2") self.pushButton_2.clicked.connect(self.search_1) self.label = QtWidgets.QLabel(self.tab_2) self.label.setGeometry(QtCore.QRect(30, 80, 54, 12)) self.label.setObjectName("label") self.label_2 = QtWidgets.QLabel(self.tab_2) self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12)) self.label_2.setObjectName("label_2") self.comboBox = QtWidgets.QComboBox(self.tab_2) self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31)) self.comboBox.setObjectName("comboBox") # self.comboBox.currentText() self.comboBox_2 = QtWidgets.QComboBox(self.tab_2) self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31)) self.comboBox_2.setObjectName("comboBox_2") # 选择数量 self.label_3 = QtWidgets.QLabel(self.tab_2) self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12)) self.label_3.setObjectName("label_3") # 数量输入框 self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2) self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27)) self.lineEdit_1.setObjectName("lineEdit_1") # 购买成功数量 self.label_6 = QtWidgets.QLabel(self.tab_2) self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12)) self.label_6.setObjectName("label_6") self.label_7 = QtWidgets.QLabel(self.tab_2) self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12)) self.label_7.setObjectName("label_7") self.label_7.setStyleSheet("font-size:16px;color:red") # 设置字体颜色 self.label_8 = QtWidgets.QLabel(self.tab_2) self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12)) self.label_8.setObjectName("label_8") self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2) self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27)) self.lineEdit_8.setObjectName("lineEdit_8") self.lineEdit_8.setText('4') # 购买按钮 当所有条件选择完之后点击 self.pushButton_3 = QtWidgets.QPushButton(self.tab_2) self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31)) self.pushButton_3.setObjectName("pushButton_3") self.pushButton_3.clicked.connect(self.search_2) # 退出程序按钮 self.pushButton_quit = QtWidgets.QPushButton(self.tab_2) self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31)) self.pushButton_quit.setObjectName("pushButton_quit") self.pushButton_quit.clicked.connect(self.exit_quit) self.label_4 = QtWidgets.QLabel(self.tab_2) self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12)) self.label_4.setObjectName("label_4") # 购买日志输出 self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2) self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192)) self.textBrowser_1.setObjectName("textBrowser") # 添加显示数据 # self.textBrowser_1.append('购买日志') # 抢票中心页面 self.tabWidget.addTab(self.tab_2, "") MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) # 账号注册页面 self.tabWidget.addTab(self.tab_3, "") MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) # 点击注册按钮 self.pushButton_4 = QtWidgets.QPushButton(self.tab_3) self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30)) self.pushButton_4.setObjectName("pushButton") # 注册个数输入框 self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3) self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28)) self.lineEdit_tab3.setPlaceholderText(" 请输入注册个数") # 注册日志输出 self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3) self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221)) self.textBrowser_3.setObjectName("textBrowser_3") self.label_5 = QtWidgets.QLabel(self.tab_3) self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12)) self.label_5.setObjectName("label_5") _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "城市售票网-抢票")) self.pushButton.setText(_translate("MainWindow", "点击登录")) self.pushButton.clicked.connect(self.login) self.pushButton_4.clicked.connect(self.register) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "账号登录")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "抢购中心")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("MainWindow", "账号注册")) self.label_0.setText(_translate("MainWindow", "登录日志:")) self.pushButton_2.setText(_translate("MainWindow", "搜索名称")) self.pushButton_3.setText(_translate("MainWindow", "点击购买")) self.pushButton_quit.setText(_translate("MainWindow", "退出程序")) self.pushButton_4.setText(_translate("MainWindow", "点击注册")) self.label.setText(_translate("MainWindow", "已择场次:")) self.label_2.setText(_translate("MainWindow", "已择价格:")) self.label_3.setText(_translate("MainWindow", "购买总数量:")) self.label_4.setText(_translate("MainWindow", "购买日志:")) self.label_5.setText(_translate("MainWindow", "注册日志:")) self.label_6.setText(_translate("MainWindow", "已购买:")) self.label_7.setText(_translate("MainWindow", "0")) self.label_8.setText(_translate("MainWindow", "每个账号购买数量:")) self.textBrowser_3.setText("") self.textBrowser_2.setText("") self.textBrowser_1.setText("") self.tabWidget.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(MainWindow) # 点击登录执行 def login(self): try: regiterSum = int(self.lineEdit_tab.text()) except Exception as err: res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) # 提示框 return ipList = [""] # ipList = request_tickets_spider.get_ip_list(10) self.textBrowser_2.append("开始登陆,请等待...") userinfo_list = [] with open('infomation.txt', 'rt', encoding='utf-8') as f: info_record = re.findall("'loginId': '(.*?)'", f.read()) for loginId in info_record: userinfo_list.append(loginId) # 多线程 for thr in userinfo_list[:regiterSum]: grabber = BuyUrbtix() ip = random.choice(ipList) Thread_name = Thread(target=grabber.openSite, args=(thr,ip)) self.threads.append(Thread_name) Thread_name.setDaemon(True) Thread_name.start() # 点击搜索按钮执行 def search_1(self): keyword = self.lineEdit.text() self.textBrowser_1.append("正在查询 %s 的所有场次和价格..." % keyword) if keyword == self.keywordJudge: self.textBrowser_1.append("请等待...") self.keywordJudge = '' return self.keywordJudge = keyword Thread_name = Thread(target=self.refresh) self.threads.append(Thread_name) Thread_name.start() Thread_01 = Thread(target=self.show_session_data) self.threads.append(Thread_01) Thread_01.start() # 把选择的场次和价格显示到主界面 def show_session_data(self): global SHOW_S_P self.comboBox_2.clear() self.comboBox.clear() while True: # if self.ex.sessionName and self.ex.sessionPrice: if ex.sessionName and ex.sessionPrice and SHOW_S_P: for i,eventDateName in enumerate(ex.sessionName): self.comboBox_2.addItem(eventDateName, i) for i,price in enumerate(ex.sessionPrice): self.comboBox.addItem(str(price), i)# 价格 self.comboBox.setCurrentIndex(0) self.comboBox_2.setCurrentIndex(0) ex.sessionName.clear() ex.sessionPrice.clear() SHOW_S_P = False time.sleep(0.2) # 把信息刷新到界面 def refresh(self): try: if self.lineEdit.text(): global eventDateList keyword = self.lineEdit.text() my_attr['selNum'] = self.lineEdit_8.text() ex.eventDateList = request_spider.get_date_url(keyword) if ex.eventDateList: self.textBrowser_1.append("查询成功,请在选择界面选择场次和价格...") global SESSION_DATA SESSION_DATA = True # ex.create_c() else: self.textBrowser_1.append("查询失败,请确定您查询的节目存在...") else: sys.exit() except Exception as err: self.textBrowser_1.append("查询失败,请确定您查询的节目存在...") print(err) sys.exit() # 日志更新 def output_login_status(self): # 登录成功输出 while True: # 登陆日志 login_record_list = login_record() if login_record_list: for i in login_record_list: self.textBrowser_2.append(i) self.textBrowser_2.moveCursor(self.textBrowser_2.textCursor().End) login_record_list.remove(i) time.sleep(0.1) # 购买日志 def output_buy_record(self): while True: buy_record_list = buy_record() if buy_record_list: for record in buy_record_list: if "购买成功" in record: self.buy_succeed_count += 1 self.label_7.setText(str(self.buy_succeed_count)) self.textBrowser_1.append(record) self.textBrowser_1.moveCursor(self.textBrowser_1.textCursor().End) buy_record_list.remove(record) time.sleep(0.1) # 注册日志 def output_register_record(self): while True: register_record_list = register_record() if register_record_list: for i in register_record_list: self.textBrowser_3.append(i) self.textBrowser_3.moveCursor(self.textBrowser_3.textCursor().End) register_record_list.remove(i) time.sleep(0.1) # 购买条件选择后点击执行 def search_2(self): if not self.lineEdit_1.text(): self.textBrowser_1.append("请输入购买总数量...") return if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']: self.textBrowser_1.append("正在购买,请等待...") return if ex.saleTime: Thread_name = Thread(target=self.wait_sale) Thread_name.setDaemon(True) Thread_name.start() return my_attr['gross'] = self.lineEdit_1.text() my_attr['selNum'] = self.lineEdit_8.text() my_attr['selPrice'] = ex.eventPrice my_attr['selSeatUrl'] = ex.eventUrl self.textBrowser_1.append("开始购买,请您耐心等待...") def wait_sale(self): dateList = ex.saleTime print("%s年%s月%s日%s时开始售票,等待购买!" % tuple(dateList)) self.textBrowser_1.append("%s年%s月%s日%s时开始售票,等待购买!" % tuple(dateList)) while True: saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) + '0000', "%Y%m%d%H%M%S"))) if saleTimestamp <= int(time.time()): print("%s年%s月%s日%s时开始售票,开始购买!" % tuple(dateList)) self.textBrowser_1.append("%s年%s月%s日%s时开始售票,开始购买!" % tuple(dateList)) break time.sleep(1) my_attr['gross'] = self.lineEdit_1.text() my_attr['selNum'] = self.lineEdit_8.text() my_attr['selPrice'] = ex.eventPrice my_attr['selSeatUrl'] = ex.eventUrl self.textBrowser_1.append("开始购买,请您耐心等待...") #点击注册执行并打印注册 def register(self): self.textBrowser_3.append("开始注册,请等待...") try: regiterSum = int(self.lineEdit_tab3.text()) except Exception as err: res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) # 提示框 return threads = [] for _ in range(regiterSum): uper = Register() Thread_name = Thread(target=uper.registerInfo) Thread_name.setDaemon(True) Thread_name.start() threads.append(Thread_name) # 退出程序 def exit_quit(self): global EXIT_COND res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox.Yes | QMessageBox.No) # 提示框 if res == QMessageBox.Yes: self._thread.exit_thread() time.sleep(1) sys.exit() else: pass class Example(QMainWindow): sessionList = [] priceList = [] sessionListEvn = [] priceListEvn = [] eventDateList = [] eventUrl = [] eventPrice = [] sessionName = [] sessionPrice = [] saleTime = [] buyNum = 1 def __init__(self): super(QMainWindow, self).__init__() self.setWindowTitle('城市售票网') # 主窗口 self.resize(680, 800) # 取消最大化 self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint) # 固定窗口大小 self.setFixedSize(self.width(), self.height()) self.w = QWidget() self.w.setFixedWidth(680) self.w.setFixedHeight(540) self.setCentralWidget(self.w) self.topFiller = QWidget() # 把布局放入到 w 窗口 # 创建一个滚动条 self.scroll = QScrollArea() self.scroll.setWidget(self.topFiller) # 滚动条放self.topFiller self.vbox = QVBoxLayout() # 方框布局 self.vbox.addWidget(self.scroll) # 滚动条放入布局 self.w.setLayout(self.vbox) self.initUI() def closeEvent(self, QCloseEvent): res = QMessageBox.question(self,'提示','您确定选择无误吗?',QMessageBox.Yes|QMessageBox.No,QMessageBox.No) #两个按钮是否, 默认No则关闭这个提示框 if res == QMessageBox.Yes: global SHOW_S_P SHOW_S_P = True QCloseEvent.accept() self.cb1.setChecked(False) self.cb2.setChecked(False) else: QCloseEvent.ignore() def initUI(self): #新建全选复选框对象 self.cb1 = QCheckBox('全选',self.topFiller) self.cb1.move(20,30) self.cb2 = QCheckBox('全选',self) self.cb2.move(20, 570) # 创建按钮 bt1 = QPushButton('确定',self) bt2 = QPushButton('刷新',self) bt1.move(20,760) bt2.move(120,760) # 每当复选框的状态改变时,即每当用户选中或取消选中该信号时,就会发出此信号。 # 所以当产生此信号的时候,我们将其连接相应的槽函数。 self.cb1.stateChanged.connect(self.changecb1) # 全选复选框连接到全选槽函数 self.cb2.stateChanged.connect(self.changecb2) # 全选复选框连接到全选槽函数 bt1.clicked.connect(self.pitch_on) # 连接到显示选中单元 bt2.clicked.connect(self.create_c) # 连接到创建函数 def create_c(self): if self.eventDateList: self.sessionList = [eventDateName['eventDateName'] for eventDateName in self.eventDateList] self.priceList = [price for price in self.eventDateList[0]['priceList']] # print(self.priceList) # print(self.sessionList) ex.show() else: ex.show() QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok) return # 清空上次搜索内容 if self.sessionListEvn and self.priceListEvn: for s_evn in self.sessionListEvn: s_evn.deleteLater() for p_evn in self.priceListEvn: p_evn.deleteLater() self.sessionListEvn.clear() self.priceListEvn.clear() self.eventPrice.clear() self.eventUrl.clear() # 场次信息显示 for i,item in enumerate(self.sessionList): cb = QCheckBox(item, self.topFiller) cb.move(30, 60+30*i) self.sessionListEvn.append(cb) cb.show() self.topFiller.setMinimumSize(580,(len(self.sessionList)+5)*30) #设置滚动条的尺寸 # 价格显示 for i,item in enumerate(self.priceList): cb_1 = QCheckBox(str(item), self) if i % 2 == 0: i = i // 2 + 1 cb_1.move(30, 570+30*i) else: i = i // 2 + 1 cb_1.move(330, 570+30*i) self.priceListEvn.append(cb_1) cb_1.show() def pitch_on(self): if self.sessionList: for i in self.sessionListEvn: # 遍历所有复选框 if i.isChecked(): # 判断是否被选中 for eventDate in self.eventDateList: # 遍历所有的数据 if eventDate['eventDateName'] == i.text(): # 判断数据是否被选中 if 'saleDate' in eventDate: self.saleTime = eventDate['saleDate'] # print(eventDate['saleDate']) self.eventUrl.append(eventDate["eventUrl"]) # 被选中则保存 self.sessionName.append(eventDate['eventDateName']) for i in self.priceListEvn: if i.isChecked(): if i.text() in self.eventDateList[0]['priceList']: self.eventPrice.append(str(self.eventDateList[0]['priceList'].index(i.text()))) self.sessionPrice.append(i.text()) # 如果选择的有数据,则关闭窗口,没有数据,提示选择数据 if self.eventPrice and self.eventUrl: self.close() else: res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No) # 两个按钮是否, 默认No则关闭这个提示框 if res == QMessageBox.Yes: self.close() else: print("输入内容不存在!") # 全选复选框槽函数 def changecb1(self): if self.cb1.checkState() == Qt.Checked: for qcb in self.sessionListEvn: qcb.setChecked(True) elif self.cb1.checkState() == Qt.Unchecked: for qcb in self.sessionListEvn: qcb.setChecked(False) # 全选复选框槽函数 def changecb2(self): if self.cb2.checkState() == Qt.Checked: for qcb in self.priceListEvn: qcb.setChecked(True) elif self.cb2.checkState() == Qt.Unchecked: for qcb in self.priceListEvn: qcb.setChecked(False) # 刷新按钮 def refresh_cb(self): while True: if self.sessionList and self.priceList: self.create_c() break time.sleep(0.2) if __name__ == '__main__': app = QtWidgets.QApplication(sys.argv) # 创建一个QApplication,也就是你要开发的软件app ex = Example() MainWindow = QtWidgets.QMainWindow() # 创建一个QMainWindow,用来装载你需要的各种组件、控件 ui = Ui_MainWindow() # ui是你创建的ui类的实例化对象 ui.setupUi(MainWindow) # 执行类中的setupUi方法,方法的参数是第二步中创建的QMainWindow MainWindow.show() # 执行QMainWindow的show()方法,显示这个QMainWindow # ex.show() sys.exit(app.exec_())
flexible
{ "blob_id": "bc0846397a5ad73b1c4b85e12864b27ef4fd08d7", "index": 5358, "step-1": "<mask token>\n\n\nclass Ui_MainWindow(QMainWindow):\n threads = []\n keywordJudge = ''\n\n def __init__(self):\n super(Ui_MainWindow, self).__init__()\n self.buy_succeed_count = 0\n for func in [self.output_buy_record, self.output_login_status, self\n .output_register_record]:\n thr = Thread(target=func)\n thr.setDaemon(True)\n thr.start()\n self._thread = Worker(self)\n self._thread.finished.connect(self._thread.deleteLater)\n self._thread.valueChanged.connect(ex.create_c)\n self._thread.start()\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n MainWindow.resize(640, 478)\n MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n MainWindow.setFixedSize(self.width(), self.height())\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461))\n self.tabWidget.setObjectName('tabWidget')\n self.tab = QtWidgets.QWidget()\n self.tab.setObjectName('tab')\n self.pushButton = QtWidgets.QPushButton(self.tab)\n self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton.setObjectName('pushButton')\n self.lineEdit_tab = QtWidgets.QLineEdit(self.tab)\n self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab.setPlaceholderText(' 请输入登陆个数')\n self.label_0 = QtWidgets.QLabel(self.tab)\n self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_0.setObjectName('label_0')\n self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab)\n self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_2.setObjectName('textBrowser_2')\n self.tabWidget.addTab(self.tab, '')\n self.tab_2 = QtWidgets.QWidget()\n self.tab_2.setObjectName('tab_2')\n self.tabWidget.addTab(self.tab, '')\n self.tab_3 = QtWidgets.QWidget()\n self.tab_3.setObjectName('tab_3')\n self.lineEdit = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31))\n self.lineEdit.setObjectName('lineEdit')\n self.pushButton_2 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32))\n self.pushButton_2.setObjectName('pushButton_2')\n self.pushButton_2.clicked.connect(self.search_1)\n self.label = QtWidgets.QLabel(self.tab_2)\n self.label.setGeometry(QtCore.QRect(30, 80, 54, 12))\n self.label.setObjectName('label')\n self.label_2 = QtWidgets.QLabel(self.tab_2)\n self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12))\n self.label_2.setObjectName('label_2')\n self.comboBox = QtWidgets.QComboBox(self.tab_2)\n self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31))\n self.comboBox.setObjectName('comboBox')\n self.comboBox_2 = QtWidgets.QComboBox(self.tab_2)\n self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31))\n self.comboBox_2.setObjectName('comboBox_2')\n self.label_3 = QtWidgets.QLabel(self.tab_2)\n self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12))\n self.label_3.setObjectName('label_3')\n self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27))\n self.lineEdit_1.setObjectName('lineEdit_1')\n self.label_6 = QtWidgets.QLabel(self.tab_2)\n self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12))\n self.label_6.setObjectName('label_6')\n self.label_7 = QtWidgets.QLabel(self.tab_2)\n self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12))\n self.label_7.setObjectName('label_7')\n self.label_7.setStyleSheet('font-size:16px;color:red')\n self.label_8 = QtWidgets.QLabel(self.tab_2)\n self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12))\n self.label_8.setObjectName('label_8')\n self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27))\n self.lineEdit_8.setObjectName('lineEdit_8')\n self.lineEdit_8.setText('4')\n self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31))\n self.pushButton_3.setObjectName('pushButton_3')\n self.pushButton_3.clicked.connect(self.search_2)\n self.pushButton_quit = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31))\n self.pushButton_quit.setObjectName('pushButton_quit')\n self.pushButton_quit.clicked.connect(self.exit_quit)\n self.label_4 = QtWidgets.QLabel(self.tab_2)\n self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12))\n self.label_4.setObjectName('label_4')\n self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2)\n self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192))\n self.textBrowser_1.setObjectName('textBrowser')\n self.tabWidget.addTab(self.tab_2, '')\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.tabWidget.addTab(self.tab_3, '')\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.pushButton_4 = QtWidgets.QPushButton(self.tab_3)\n self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton_4.setObjectName('pushButton')\n self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3)\n self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab3.setPlaceholderText(' 请输入注册个数')\n self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3)\n self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_3.setObjectName('textBrowser_3')\n self.label_5 = QtWidgets.QLabel(self.tab_3)\n self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_5.setObjectName('label_5')\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', '城市售票网-抢票'))\n self.pushButton.setText(_translate('MainWindow', '点击登录'))\n self.pushButton.clicked.connect(self.login)\n self.pushButton_4.clicked.connect(self.register)\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab),\n _translate('MainWindow', '账号登录'))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2),\n _translate('MainWindow', '抢购中心'))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3),\n _translate('MainWindow', '账号注册'))\n self.label_0.setText(_translate('MainWindow', '登录日志:'))\n self.pushButton_2.setText(_translate('MainWindow', '搜索名称'))\n self.pushButton_3.setText(_translate('MainWindow', '点击购买'))\n self.pushButton_quit.setText(_translate('MainWindow', '退出程序'))\n self.pushButton_4.setText(_translate('MainWindow', '点击注册'))\n self.label.setText(_translate('MainWindow', '已择场次:'))\n self.label_2.setText(_translate('MainWindow', '已择价格:'))\n self.label_3.setText(_translate('MainWindow', '购买总数量:'))\n self.label_4.setText(_translate('MainWindow', '购买日志:'))\n self.label_5.setText(_translate('MainWindow', '注册日志:'))\n self.label_6.setText(_translate('MainWindow', '已购买:'))\n self.label_7.setText(_translate('MainWindow', '0'))\n self.label_8.setText(_translate('MainWindow', '每个账号购买数量:'))\n self.textBrowser_3.setText('')\n self.textBrowser_2.setText('')\n self.textBrowser_1.setText('')\n self.tabWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def login(self):\n try:\n regiterSum = int(self.lineEdit_tab.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok)\n return\n ipList = ['']\n self.textBrowser_2.append('开始登陆,请等待...')\n userinfo_list = []\n with open('infomation.txt', 'rt', encoding='utf-8') as f:\n info_record = re.findall(\"'loginId': '(.*?)'\", f.read())\n for loginId in info_record:\n userinfo_list.append(loginId)\n for thr in userinfo_list[:regiterSum]:\n grabber = BuyUrbtix()\n ip = random.choice(ipList)\n Thread_name = Thread(target=grabber.openSite, args=(thr, ip))\n self.threads.append(Thread_name)\n Thread_name.setDaemon(True)\n Thread_name.start()\n\n def search_1(self):\n keyword = self.lineEdit.text()\n self.textBrowser_1.append('正在查询 %s 的所有场次和价格...' % keyword)\n if keyword == self.keywordJudge:\n self.textBrowser_1.append('请等待...')\n self.keywordJudge = ''\n return\n self.keywordJudge = keyword\n Thread_name = Thread(target=self.refresh)\n self.threads.append(Thread_name)\n Thread_name.start()\n Thread_01 = Thread(target=self.show_session_data)\n self.threads.append(Thread_01)\n Thread_01.start()\n\n def show_session_data(self):\n global SHOW_S_P\n self.comboBox_2.clear()\n self.comboBox.clear()\n while True:\n if ex.sessionName and ex.sessionPrice and SHOW_S_P:\n for i, eventDateName in enumerate(ex.sessionName):\n self.comboBox_2.addItem(eventDateName, i)\n for i, price in enumerate(ex.sessionPrice):\n self.comboBox.addItem(str(price), i)\n self.comboBox.setCurrentIndex(0)\n self.comboBox_2.setCurrentIndex(0)\n ex.sessionName.clear()\n ex.sessionPrice.clear()\n SHOW_S_P = False\n time.sleep(0.2)\n\n def refresh(self):\n try:\n if self.lineEdit.text():\n global eventDateList\n keyword = self.lineEdit.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n ex.eventDateList = request_spider.get_date_url(keyword)\n if ex.eventDateList:\n self.textBrowser_1.append('查询成功,请在选择界面选择场次和价格...')\n global SESSION_DATA\n SESSION_DATA = True\n else:\n self.textBrowser_1.append('查询失败,请确定您查询的节目存在...')\n else:\n sys.exit()\n except Exception as err:\n self.textBrowser_1.append('查询失败,请确定您查询的节目存在...')\n print(err)\n sys.exit()\n\n def output_login_status(self):\n while True:\n login_record_list = login_record()\n if login_record_list:\n for i in login_record_list:\n self.textBrowser_2.append(i)\n self.textBrowser_2.moveCursor(self.textBrowser_2.\n textCursor().End)\n login_record_list.remove(i)\n time.sleep(0.1)\n\n def output_buy_record(self):\n while True:\n buy_record_list = buy_record()\n if buy_record_list:\n for record in buy_record_list:\n if '购买成功' in record:\n self.buy_succeed_count += 1\n self.label_7.setText(str(self.buy_succeed_count))\n self.textBrowser_1.append(record)\n self.textBrowser_1.moveCursor(self.textBrowser_1.\n textCursor().End)\n buy_record_list.remove(record)\n time.sleep(0.1)\n\n def output_register_record(self):\n while True:\n register_record_list = register_record()\n if register_record_list:\n for i in register_record_list:\n self.textBrowser_3.append(i)\n self.textBrowser_3.moveCursor(self.textBrowser_3.\n textCursor().End)\n register_record_list.remove(i)\n time.sleep(0.1)\n\n def search_2(self):\n if not self.lineEdit_1.text():\n self.textBrowser_1.append('请输入购买总数量...')\n return\n if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']:\n self.textBrowser_1.append('正在购买,请等待...')\n return\n if ex.saleTime:\n Thread_name = Thread(target=self.wait_sale)\n Thread_name.setDaemon(True)\n Thread_name.start()\n return\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append('开始购买,请您耐心等待...')\n\n def wait_sale(self):\n dateList = ex.saleTime\n print('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList))\n self.textBrowser_1.append('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList))\n while True:\n saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) +\n '0000', '%Y%m%d%H%M%S')))\n if saleTimestamp <= int(time.time()):\n print('%s年%s月%s日%s时开始售票,开始购买!' % tuple(dateList))\n self.textBrowser_1.append('%s年%s月%s日%s时开始售票,开始购买!' % tuple(\n dateList))\n break\n time.sleep(1)\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append('开始购买,请您耐心等待...')\n\n def register(self):\n self.textBrowser_3.append('开始注册,请等待...')\n try:\n regiterSum = int(self.lineEdit_tab3.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok)\n return\n threads = []\n for _ in range(regiterSum):\n uper = Register()\n Thread_name = Thread(target=uper.registerInfo)\n Thread_name.setDaemon(True)\n Thread_name.start()\n threads.append(Thread_name)\n\n def exit_quit(self):\n global EXIT_COND\n res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox.\n Yes | QMessageBox.No)\n if res == QMessageBox.Yes:\n self._thread.exit_thread()\n time.sleep(1)\n sys.exit()\n else:\n pass\n\n\nclass Example(QMainWindow):\n sessionList = []\n priceList = []\n sessionListEvn = []\n priceListEvn = []\n eventDateList = []\n eventUrl = []\n eventPrice = []\n sessionName = []\n sessionPrice = []\n saleTime = []\n buyNum = 1\n\n def __init__(self):\n super(QMainWindow, self).__init__()\n self.setWindowTitle('城市售票网')\n self.resize(680, 800)\n self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n self.setFixedSize(self.width(), self.height())\n self.w = QWidget()\n self.w.setFixedWidth(680)\n self.w.setFixedHeight(540)\n self.setCentralWidget(self.w)\n self.topFiller = QWidget()\n self.scroll = QScrollArea()\n self.scroll.setWidget(self.topFiller)\n self.vbox = QVBoxLayout()\n self.vbox.addWidget(self.scroll)\n self.w.setLayout(self.vbox)\n self.initUI()\n\n def closeEvent(self, QCloseEvent):\n res = QMessageBox.question(self, '提示', '您确定选择无误吗?', QMessageBox.Yes |\n QMessageBox.No, QMessageBox.No)\n if res == QMessageBox.Yes:\n global SHOW_S_P\n SHOW_S_P = True\n QCloseEvent.accept()\n self.cb1.setChecked(False)\n self.cb2.setChecked(False)\n else:\n QCloseEvent.ignore()\n\n def initUI(self):\n self.cb1 = QCheckBox('全选', self.topFiller)\n self.cb1.move(20, 30)\n self.cb2 = QCheckBox('全选', self)\n self.cb2.move(20, 570)\n bt1 = QPushButton('确定', self)\n bt2 = QPushButton('刷新', self)\n bt1.move(20, 760)\n bt2.move(120, 760)\n self.cb1.stateChanged.connect(self.changecb1)\n self.cb2.stateChanged.connect(self.changecb2)\n bt1.clicked.connect(self.pitch_on)\n bt2.clicked.connect(self.create_c)\n\n def create_c(self):\n if self.eventDateList:\n self.sessionList = [eventDateName['eventDateName'] for\n eventDateName in self.eventDateList]\n self.priceList = [price for price in self.eventDateList[0][\n 'priceList']]\n ex.show()\n else:\n ex.show()\n QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok)\n return\n if self.sessionListEvn and self.priceListEvn:\n for s_evn in self.sessionListEvn:\n s_evn.deleteLater()\n for p_evn in self.priceListEvn:\n p_evn.deleteLater()\n self.sessionListEvn.clear()\n self.priceListEvn.clear()\n self.eventPrice.clear()\n self.eventUrl.clear()\n for i, item in enumerate(self.sessionList):\n cb = QCheckBox(item, self.topFiller)\n cb.move(30, 60 + 30 * i)\n self.sessionListEvn.append(cb)\n cb.show()\n self.topFiller.setMinimumSize(580, (len(self.sessionList) + 5) * 30)\n for i, item in enumerate(self.priceList):\n cb_1 = QCheckBox(str(item), self)\n if i % 2 == 0:\n i = i // 2 + 1\n cb_1.move(30, 570 + 30 * i)\n else:\n i = i // 2 + 1\n cb_1.move(330, 570 + 30 * i)\n self.priceListEvn.append(cb_1)\n cb_1.show()\n\n def pitch_on(self):\n if self.sessionList:\n for i in self.sessionListEvn:\n if i.isChecked():\n for eventDate in self.eventDateList:\n if eventDate['eventDateName'] == i.text():\n if 'saleDate' in eventDate:\n self.saleTime = eventDate['saleDate']\n self.eventUrl.append(eventDate['eventUrl'])\n self.sessionName.append(eventDate['eventDateName'])\n for i in self.priceListEvn:\n if i.isChecked():\n if i.text() in self.eventDateList[0]['priceList']:\n self.eventPrice.append(str(self.eventDateList[0][\n 'priceList'].index(i.text())))\n self.sessionPrice.append(i.text())\n if self.eventPrice and self.eventUrl:\n self.close()\n else:\n res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?',\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if res == QMessageBox.Yes:\n self.close()\n else:\n print('输入内容不存在!')\n\n def changecb1(self):\n if self.cb1.checkState() == Qt.Checked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(True)\n elif self.cb1.checkState() == Qt.Unchecked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(False)\n\n def changecb2(self):\n if self.cb2.checkState() == Qt.Checked:\n for qcb in self.priceListEvn:\n qcb.setChecked(True)\n elif self.cb2.checkState() == Qt.Unchecked:\n for qcb in self.priceListEvn:\n qcb.setChecked(False)\n\n def refresh_cb(self):\n while True:\n if self.sessionList and self.priceList:\n self.create_c()\n break\n time.sleep(0.2)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Worker(QThread):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Ui_MainWindow(QMainWindow):\n threads = []\n keywordJudge = ''\n\n def __init__(self):\n super(Ui_MainWindow, self).__init__()\n self.buy_succeed_count = 0\n for func in [self.output_buy_record, self.output_login_status, self\n .output_register_record]:\n thr = Thread(target=func)\n thr.setDaemon(True)\n thr.start()\n self._thread = Worker(self)\n self._thread.finished.connect(self._thread.deleteLater)\n self._thread.valueChanged.connect(ex.create_c)\n self._thread.start()\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n MainWindow.resize(640, 478)\n MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n MainWindow.setFixedSize(self.width(), self.height())\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461))\n self.tabWidget.setObjectName('tabWidget')\n self.tab = QtWidgets.QWidget()\n self.tab.setObjectName('tab')\n self.pushButton = QtWidgets.QPushButton(self.tab)\n self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton.setObjectName('pushButton')\n self.lineEdit_tab = QtWidgets.QLineEdit(self.tab)\n self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab.setPlaceholderText(' 请输入登陆个数')\n self.label_0 = QtWidgets.QLabel(self.tab)\n self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_0.setObjectName('label_0')\n self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab)\n self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_2.setObjectName('textBrowser_2')\n self.tabWidget.addTab(self.tab, '')\n self.tab_2 = QtWidgets.QWidget()\n self.tab_2.setObjectName('tab_2')\n self.tabWidget.addTab(self.tab, '')\n self.tab_3 = QtWidgets.QWidget()\n self.tab_3.setObjectName('tab_3')\n self.lineEdit = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31))\n self.lineEdit.setObjectName('lineEdit')\n self.pushButton_2 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32))\n self.pushButton_2.setObjectName('pushButton_2')\n self.pushButton_2.clicked.connect(self.search_1)\n self.label = QtWidgets.QLabel(self.tab_2)\n self.label.setGeometry(QtCore.QRect(30, 80, 54, 12))\n self.label.setObjectName('label')\n self.label_2 = QtWidgets.QLabel(self.tab_2)\n self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12))\n self.label_2.setObjectName('label_2')\n self.comboBox = QtWidgets.QComboBox(self.tab_2)\n self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31))\n self.comboBox.setObjectName('comboBox')\n self.comboBox_2 = QtWidgets.QComboBox(self.tab_2)\n self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31))\n self.comboBox_2.setObjectName('comboBox_2')\n self.label_3 = QtWidgets.QLabel(self.tab_2)\n self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12))\n self.label_3.setObjectName('label_3')\n self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27))\n self.lineEdit_1.setObjectName('lineEdit_1')\n self.label_6 = QtWidgets.QLabel(self.tab_2)\n self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12))\n self.label_6.setObjectName('label_6')\n self.label_7 = QtWidgets.QLabel(self.tab_2)\n self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12))\n self.label_7.setObjectName('label_7')\n self.label_7.setStyleSheet('font-size:16px;color:red')\n self.label_8 = QtWidgets.QLabel(self.tab_2)\n self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12))\n self.label_8.setObjectName('label_8')\n self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27))\n self.lineEdit_8.setObjectName('lineEdit_8')\n self.lineEdit_8.setText('4')\n self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31))\n self.pushButton_3.setObjectName('pushButton_3')\n self.pushButton_3.clicked.connect(self.search_2)\n self.pushButton_quit = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31))\n self.pushButton_quit.setObjectName('pushButton_quit')\n self.pushButton_quit.clicked.connect(self.exit_quit)\n self.label_4 = QtWidgets.QLabel(self.tab_2)\n self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12))\n self.label_4.setObjectName('label_4')\n self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2)\n self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192))\n self.textBrowser_1.setObjectName('textBrowser')\n self.tabWidget.addTab(self.tab_2, '')\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.tabWidget.addTab(self.tab_3, '')\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.pushButton_4 = QtWidgets.QPushButton(self.tab_3)\n self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton_4.setObjectName('pushButton')\n self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3)\n self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab3.setPlaceholderText(' 请输入注册个数')\n self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3)\n self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_3.setObjectName('textBrowser_3')\n self.label_5 = QtWidgets.QLabel(self.tab_3)\n self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_5.setObjectName('label_5')\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', '城市售票网-抢票'))\n self.pushButton.setText(_translate('MainWindow', '点击登录'))\n self.pushButton.clicked.connect(self.login)\n self.pushButton_4.clicked.connect(self.register)\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab),\n _translate('MainWindow', '账号登录'))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2),\n _translate('MainWindow', '抢购中心'))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3),\n _translate('MainWindow', '账号注册'))\n self.label_0.setText(_translate('MainWindow', '登录日志:'))\n self.pushButton_2.setText(_translate('MainWindow', '搜索名称'))\n self.pushButton_3.setText(_translate('MainWindow', '点击购买'))\n self.pushButton_quit.setText(_translate('MainWindow', '退出程序'))\n self.pushButton_4.setText(_translate('MainWindow', '点击注册'))\n self.label.setText(_translate('MainWindow', '已择场次:'))\n self.label_2.setText(_translate('MainWindow', '已择价格:'))\n self.label_3.setText(_translate('MainWindow', '购买总数量:'))\n self.label_4.setText(_translate('MainWindow', '购买日志:'))\n self.label_5.setText(_translate('MainWindow', '注册日志:'))\n self.label_6.setText(_translate('MainWindow', '已购买:'))\n self.label_7.setText(_translate('MainWindow', '0'))\n self.label_8.setText(_translate('MainWindow', '每个账号购买数量:'))\n self.textBrowser_3.setText('')\n self.textBrowser_2.setText('')\n self.textBrowser_1.setText('')\n self.tabWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def login(self):\n try:\n regiterSum = int(self.lineEdit_tab.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok)\n return\n ipList = ['']\n self.textBrowser_2.append('开始登陆,请等待...')\n userinfo_list = []\n with open('infomation.txt', 'rt', encoding='utf-8') as f:\n info_record = re.findall(\"'loginId': '(.*?)'\", f.read())\n for loginId in info_record:\n userinfo_list.append(loginId)\n for thr in userinfo_list[:regiterSum]:\n grabber = BuyUrbtix()\n ip = random.choice(ipList)\n Thread_name = Thread(target=grabber.openSite, args=(thr, ip))\n self.threads.append(Thread_name)\n Thread_name.setDaemon(True)\n Thread_name.start()\n\n def search_1(self):\n keyword = self.lineEdit.text()\n self.textBrowser_1.append('正在查询 %s 的所有场次和价格...' % keyword)\n if keyword == self.keywordJudge:\n self.textBrowser_1.append('请等待...')\n self.keywordJudge = ''\n return\n self.keywordJudge = keyword\n Thread_name = Thread(target=self.refresh)\n self.threads.append(Thread_name)\n Thread_name.start()\n Thread_01 = Thread(target=self.show_session_data)\n self.threads.append(Thread_01)\n Thread_01.start()\n\n def show_session_data(self):\n global SHOW_S_P\n self.comboBox_2.clear()\n self.comboBox.clear()\n while True:\n if ex.sessionName and ex.sessionPrice and SHOW_S_P:\n for i, eventDateName in enumerate(ex.sessionName):\n self.comboBox_2.addItem(eventDateName, i)\n for i, price in enumerate(ex.sessionPrice):\n self.comboBox.addItem(str(price), i)\n self.comboBox.setCurrentIndex(0)\n self.comboBox_2.setCurrentIndex(0)\n ex.sessionName.clear()\n ex.sessionPrice.clear()\n SHOW_S_P = False\n time.sleep(0.2)\n\n def refresh(self):\n try:\n if self.lineEdit.text():\n global eventDateList\n keyword = self.lineEdit.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n ex.eventDateList = request_spider.get_date_url(keyword)\n if ex.eventDateList:\n self.textBrowser_1.append('查询成功,请在选择界面选择场次和价格...')\n global SESSION_DATA\n SESSION_DATA = True\n else:\n self.textBrowser_1.append('查询失败,请确定您查询的节目存在...')\n else:\n sys.exit()\n except Exception as err:\n self.textBrowser_1.append('查询失败,请确定您查询的节目存在...')\n print(err)\n sys.exit()\n\n def output_login_status(self):\n while True:\n login_record_list = login_record()\n if login_record_list:\n for i in login_record_list:\n self.textBrowser_2.append(i)\n self.textBrowser_2.moveCursor(self.textBrowser_2.\n textCursor().End)\n login_record_list.remove(i)\n time.sleep(0.1)\n\n def output_buy_record(self):\n while True:\n buy_record_list = buy_record()\n if buy_record_list:\n for record in buy_record_list:\n if '购买成功' in record:\n self.buy_succeed_count += 1\n self.label_7.setText(str(self.buy_succeed_count))\n self.textBrowser_1.append(record)\n self.textBrowser_1.moveCursor(self.textBrowser_1.\n textCursor().End)\n buy_record_list.remove(record)\n time.sleep(0.1)\n\n def output_register_record(self):\n while True:\n register_record_list = register_record()\n if register_record_list:\n for i in register_record_list:\n self.textBrowser_3.append(i)\n self.textBrowser_3.moveCursor(self.textBrowser_3.\n textCursor().End)\n register_record_list.remove(i)\n time.sleep(0.1)\n\n def search_2(self):\n if not self.lineEdit_1.text():\n self.textBrowser_1.append('请输入购买总数量...')\n return\n if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']:\n self.textBrowser_1.append('正在购买,请等待...')\n return\n if ex.saleTime:\n Thread_name = Thread(target=self.wait_sale)\n Thread_name.setDaemon(True)\n Thread_name.start()\n return\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append('开始购买,请您耐心等待...')\n\n def wait_sale(self):\n dateList = ex.saleTime\n print('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList))\n self.textBrowser_1.append('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList))\n while True:\n saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) +\n '0000', '%Y%m%d%H%M%S')))\n if saleTimestamp <= int(time.time()):\n print('%s年%s月%s日%s时开始售票,开始购买!' % tuple(dateList))\n self.textBrowser_1.append('%s年%s月%s日%s时开始售票,开始购买!' % tuple(\n dateList))\n break\n time.sleep(1)\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append('开始购买,请您耐心等待...')\n\n def register(self):\n self.textBrowser_3.append('开始注册,请等待...')\n try:\n regiterSum = int(self.lineEdit_tab3.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok)\n return\n threads = []\n for _ in range(regiterSum):\n uper = Register()\n Thread_name = Thread(target=uper.registerInfo)\n Thread_name.setDaemon(True)\n Thread_name.start()\n threads.append(Thread_name)\n\n def exit_quit(self):\n global EXIT_COND\n res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox.\n Yes | QMessageBox.No)\n if res == QMessageBox.Yes:\n self._thread.exit_thread()\n time.sleep(1)\n sys.exit()\n else:\n pass\n\n\nclass Example(QMainWindow):\n sessionList = []\n priceList = []\n sessionListEvn = []\n priceListEvn = []\n eventDateList = []\n eventUrl = []\n eventPrice = []\n sessionName = []\n sessionPrice = []\n saleTime = []\n buyNum = 1\n\n def __init__(self):\n super(QMainWindow, self).__init__()\n self.setWindowTitle('城市售票网')\n self.resize(680, 800)\n self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n self.setFixedSize(self.width(), self.height())\n self.w = QWidget()\n self.w.setFixedWidth(680)\n self.w.setFixedHeight(540)\n self.setCentralWidget(self.w)\n self.topFiller = QWidget()\n self.scroll = QScrollArea()\n self.scroll.setWidget(self.topFiller)\n self.vbox = QVBoxLayout()\n self.vbox.addWidget(self.scroll)\n self.w.setLayout(self.vbox)\n self.initUI()\n\n def closeEvent(self, QCloseEvent):\n res = QMessageBox.question(self, '提示', '您确定选择无误吗?', QMessageBox.Yes |\n QMessageBox.No, QMessageBox.No)\n if res == QMessageBox.Yes:\n global SHOW_S_P\n SHOW_S_P = True\n QCloseEvent.accept()\n self.cb1.setChecked(False)\n self.cb2.setChecked(False)\n else:\n QCloseEvent.ignore()\n\n def initUI(self):\n self.cb1 = QCheckBox('全选', self.topFiller)\n self.cb1.move(20, 30)\n self.cb2 = QCheckBox('全选', self)\n self.cb2.move(20, 570)\n bt1 = QPushButton('确定', self)\n bt2 = QPushButton('刷新', self)\n bt1.move(20, 760)\n bt2.move(120, 760)\n self.cb1.stateChanged.connect(self.changecb1)\n self.cb2.stateChanged.connect(self.changecb2)\n bt1.clicked.connect(self.pitch_on)\n bt2.clicked.connect(self.create_c)\n\n def create_c(self):\n if self.eventDateList:\n self.sessionList = [eventDateName['eventDateName'] for\n eventDateName in self.eventDateList]\n self.priceList = [price for price in self.eventDateList[0][\n 'priceList']]\n ex.show()\n else:\n ex.show()\n QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok)\n return\n if self.sessionListEvn and self.priceListEvn:\n for s_evn in self.sessionListEvn:\n s_evn.deleteLater()\n for p_evn in self.priceListEvn:\n p_evn.deleteLater()\n self.sessionListEvn.clear()\n self.priceListEvn.clear()\n self.eventPrice.clear()\n self.eventUrl.clear()\n for i, item in enumerate(self.sessionList):\n cb = QCheckBox(item, self.topFiller)\n cb.move(30, 60 + 30 * i)\n self.sessionListEvn.append(cb)\n cb.show()\n self.topFiller.setMinimumSize(580, (len(self.sessionList) + 5) * 30)\n for i, item in enumerate(self.priceList):\n cb_1 = QCheckBox(str(item), self)\n if i % 2 == 0:\n i = i // 2 + 1\n cb_1.move(30, 570 + 30 * i)\n else:\n i = i // 2 + 1\n cb_1.move(330, 570 + 30 * i)\n self.priceListEvn.append(cb_1)\n cb_1.show()\n\n def pitch_on(self):\n if self.sessionList:\n for i in self.sessionListEvn:\n if i.isChecked():\n for eventDate in self.eventDateList:\n if eventDate['eventDateName'] == i.text():\n if 'saleDate' in eventDate:\n self.saleTime = eventDate['saleDate']\n self.eventUrl.append(eventDate['eventUrl'])\n self.sessionName.append(eventDate['eventDateName'])\n for i in self.priceListEvn:\n if i.isChecked():\n if i.text() in self.eventDateList[0]['priceList']:\n self.eventPrice.append(str(self.eventDateList[0][\n 'priceList'].index(i.text())))\n self.sessionPrice.append(i.text())\n if self.eventPrice and self.eventUrl:\n self.close()\n else:\n res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?',\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if res == QMessageBox.Yes:\n self.close()\n else:\n print('输入内容不存在!')\n\n def changecb1(self):\n if self.cb1.checkState() == Qt.Checked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(True)\n elif self.cb1.checkState() == Qt.Unchecked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(False)\n\n def changecb2(self):\n if self.cb2.checkState() == Qt.Checked:\n for qcb in self.priceListEvn:\n qcb.setChecked(True)\n elif self.cb2.checkState() == Qt.Unchecked:\n for qcb in self.priceListEvn:\n qcb.setChecked(False)\n\n def refresh_cb(self):\n while True:\n if self.sessionList and self.priceList:\n self.create_c()\n break\n time.sleep(0.2)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Worker(QThread):\n valueChanged = pyqtSignal(int)\n handle = -1\n\n def run(self):\n global SESSION_DATA, EXIT_COND\n try:\n self.handle = ctypes.windll.kernel32.OpenThread(win32con.\n PROCESS_ALL_ACCESS, False, int(QThread.currentThreadId()))\n except Exception as e:\n print('get thread handle failed', e)\n while True:\n if SESSION_DATA:\n self.valueChanged.emit(1024)\n SESSION_DATA = False\n time.sleep(0.1)\n\n def exit_thread(self):\n os._exit(122)\n\n\nclass Ui_MainWindow(QMainWindow):\n threads = []\n keywordJudge = ''\n\n def __init__(self):\n super(Ui_MainWindow, self).__init__()\n self.buy_succeed_count = 0\n for func in [self.output_buy_record, self.output_login_status, self\n .output_register_record]:\n thr = Thread(target=func)\n thr.setDaemon(True)\n thr.start()\n self._thread = Worker(self)\n self._thread.finished.connect(self._thread.deleteLater)\n self._thread.valueChanged.connect(ex.create_c)\n self._thread.start()\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n MainWindow.resize(640, 478)\n MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n MainWindow.setFixedSize(self.width(), self.height())\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461))\n self.tabWidget.setObjectName('tabWidget')\n self.tab = QtWidgets.QWidget()\n self.tab.setObjectName('tab')\n self.pushButton = QtWidgets.QPushButton(self.tab)\n self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton.setObjectName('pushButton')\n self.lineEdit_tab = QtWidgets.QLineEdit(self.tab)\n self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab.setPlaceholderText(' 请输入登陆个数')\n self.label_0 = QtWidgets.QLabel(self.tab)\n self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_0.setObjectName('label_0')\n self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab)\n self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_2.setObjectName('textBrowser_2')\n self.tabWidget.addTab(self.tab, '')\n self.tab_2 = QtWidgets.QWidget()\n self.tab_2.setObjectName('tab_2')\n self.tabWidget.addTab(self.tab, '')\n self.tab_3 = QtWidgets.QWidget()\n self.tab_3.setObjectName('tab_3')\n self.lineEdit = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31))\n self.lineEdit.setObjectName('lineEdit')\n self.pushButton_2 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32))\n self.pushButton_2.setObjectName('pushButton_2')\n self.pushButton_2.clicked.connect(self.search_1)\n self.label = QtWidgets.QLabel(self.tab_2)\n self.label.setGeometry(QtCore.QRect(30, 80, 54, 12))\n self.label.setObjectName('label')\n self.label_2 = QtWidgets.QLabel(self.tab_2)\n self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12))\n self.label_2.setObjectName('label_2')\n self.comboBox = QtWidgets.QComboBox(self.tab_2)\n self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31))\n self.comboBox.setObjectName('comboBox')\n self.comboBox_2 = QtWidgets.QComboBox(self.tab_2)\n self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31))\n self.comboBox_2.setObjectName('comboBox_2')\n self.label_3 = QtWidgets.QLabel(self.tab_2)\n self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12))\n self.label_3.setObjectName('label_3')\n self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27))\n self.lineEdit_1.setObjectName('lineEdit_1')\n self.label_6 = QtWidgets.QLabel(self.tab_2)\n self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12))\n self.label_6.setObjectName('label_6')\n self.label_7 = QtWidgets.QLabel(self.tab_2)\n self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12))\n self.label_7.setObjectName('label_7')\n self.label_7.setStyleSheet('font-size:16px;color:red')\n self.label_8 = QtWidgets.QLabel(self.tab_2)\n self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12))\n self.label_8.setObjectName('label_8')\n self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27))\n self.lineEdit_8.setObjectName('lineEdit_8')\n self.lineEdit_8.setText('4')\n self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31))\n self.pushButton_3.setObjectName('pushButton_3')\n self.pushButton_3.clicked.connect(self.search_2)\n self.pushButton_quit = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31))\n self.pushButton_quit.setObjectName('pushButton_quit')\n self.pushButton_quit.clicked.connect(self.exit_quit)\n self.label_4 = QtWidgets.QLabel(self.tab_2)\n self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12))\n self.label_4.setObjectName('label_4')\n self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2)\n self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192))\n self.textBrowser_1.setObjectName('textBrowser')\n self.tabWidget.addTab(self.tab_2, '')\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.tabWidget.addTab(self.tab_3, '')\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.pushButton_4 = QtWidgets.QPushButton(self.tab_3)\n self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton_4.setObjectName('pushButton')\n self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3)\n self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab3.setPlaceholderText(' 请输入注册个数')\n self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3)\n self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_3.setObjectName('textBrowser_3')\n self.label_5 = QtWidgets.QLabel(self.tab_3)\n self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_5.setObjectName('label_5')\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', '城市售票网-抢票'))\n self.pushButton.setText(_translate('MainWindow', '点击登录'))\n self.pushButton.clicked.connect(self.login)\n self.pushButton_4.clicked.connect(self.register)\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab),\n _translate('MainWindow', '账号登录'))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2),\n _translate('MainWindow', '抢购中心'))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3),\n _translate('MainWindow', '账号注册'))\n self.label_0.setText(_translate('MainWindow', '登录日志:'))\n self.pushButton_2.setText(_translate('MainWindow', '搜索名称'))\n self.pushButton_3.setText(_translate('MainWindow', '点击购买'))\n self.pushButton_quit.setText(_translate('MainWindow', '退出程序'))\n self.pushButton_4.setText(_translate('MainWindow', '点击注册'))\n self.label.setText(_translate('MainWindow', '已择场次:'))\n self.label_2.setText(_translate('MainWindow', '已择价格:'))\n self.label_3.setText(_translate('MainWindow', '购买总数量:'))\n self.label_4.setText(_translate('MainWindow', '购买日志:'))\n self.label_5.setText(_translate('MainWindow', '注册日志:'))\n self.label_6.setText(_translate('MainWindow', '已购买:'))\n self.label_7.setText(_translate('MainWindow', '0'))\n self.label_8.setText(_translate('MainWindow', '每个账号购买数量:'))\n self.textBrowser_3.setText('')\n self.textBrowser_2.setText('')\n self.textBrowser_1.setText('')\n self.tabWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def login(self):\n try:\n regiterSum = int(self.lineEdit_tab.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok)\n return\n ipList = ['']\n self.textBrowser_2.append('开始登陆,请等待...')\n userinfo_list = []\n with open('infomation.txt', 'rt', encoding='utf-8') as f:\n info_record = re.findall(\"'loginId': '(.*?)'\", f.read())\n for loginId in info_record:\n userinfo_list.append(loginId)\n for thr in userinfo_list[:regiterSum]:\n grabber = BuyUrbtix()\n ip = random.choice(ipList)\n Thread_name = Thread(target=grabber.openSite, args=(thr, ip))\n self.threads.append(Thread_name)\n Thread_name.setDaemon(True)\n Thread_name.start()\n\n def search_1(self):\n keyword = self.lineEdit.text()\n self.textBrowser_1.append('正在查询 %s 的所有场次和价格...' % keyword)\n if keyword == self.keywordJudge:\n self.textBrowser_1.append('请等待...')\n self.keywordJudge = ''\n return\n self.keywordJudge = keyword\n Thread_name = Thread(target=self.refresh)\n self.threads.append(Thread_name)\n Thread_name.start()\n Thread_01 = Thread(target=self.show_session_data)\n self.threads.append(Thread_01)\n Thread_01.start()\n\n def show_session_data(self):\n global SHOW_S_P\n self.comboBox_2.clear()\n self.comboBox.clear()\n while True:\n if ex.sessionName and ex.sessionPrice and SHOW_S_P:\n for i, eventDateName in enumerate(ex.sessionName):\n self.comboBox_2.addItem(eventDateName, i)\n for i, price in enumerate(ex.sessionPrice):\n self.comboBox.addItem(str(price), i)\n self.comboBox.setCurrentIndex(0)\n self.comboBox_2.setCurrentIndex(0)\n ex.sessionName.clear()\n ex.sessionPrice.clear()\n SHOW_S_P = False\n time.sleep(0.2)\n\n def refresh(self):\n try:\n if self.lineEdit.text():\n global eventDateList\n keyword = self.lineEdit.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n ex.eventDateList = request_spider.get_date_url(keyword)\n if ex.eventDateList:\n self.textBrowser_1.append('查询成功,请在选择界面选择场次和价格...')\n global SESSION_DATA\n SESSION_DATA = True\n else:\n self.textBrowser_1.append('查询失败,请确定您查询的节目存在...')\n else:\n sys.exit()\n except Exception as err:\n self.textBrowser_1.append('查询失败,请确定您查询的节目存在...')\n print(err)\n sys.exit()\n\n def output_login_status(self):\n while True:\n login_record_list = login_record()\n if login_record_list:\n for i in login_record_list:\n self.textBrowser_2.append(i)\n self.textBrowser_2.moveCursor(self.textBrowser_2.\n textCursor().End)\n login_record_list.remove(i)\n time.sleep(0.1)\n\n def output_buy_record(self):\n while True:\n buy_record_list = buy_record()\n if buy_record_list:\n for record in buy_record_list:\n if '购买成功' in record:\n self.buy_succeed_count += 1\n self.label_7.setText(str(self.buy_succeed_count))\n self.textBrowser_1.append(record)\n self.textBrowser_1.moveCursor(self.textBrowser_1.\n textCursor().End)\n buy_record_list.remove(record)\n time.sleep(0.1)\n\n def output_register_record(self):\n while True:\n register_record_list = register_record()\n if register_record_list:\n for i in register_record_list:\n self.textBrowser_3.append(i)\n self.textBrowser_3.moveCursor(self.textBrowser_3.\n textCursor().End)\n register_record_list.remove(i)\n time.sleep(0.1)\n\n def search_2(self):\n if not self.lineEdit_1.text():\n self.textBrowser_1.append('请输入购买总数量...')\n return\n if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']:\n self.textBrowser_1.append('正在购买,请等待...')\n return\n if ex.saleTime:\n Thread_name = Thread(target=self.wait_sale)\n Thread_name.setDaemon(True)\n Thread_name.start()\n return\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append('开始购买,请您耐心等待...')\n\n def wait_sale(self):\n dateList = ex.saleTime\n print('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList))\n self.textBrowser_1.append('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList))\n while True:\n saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) +\n '0000', '%Y%m%d%H%M%S')))\n if saleTimestamp <= int(time.time()):\n print('%s年%s月%s日%s时开始售票,开始购买!' % tuple(dateList))\n self.textBrowser_1.append('%s年%s月%s日%s时开始售票,开始购买!' % tuple(\n dateList))\n break\n time.sleep(1)\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append('开始购买,请您耐心等待...')\n\n def register(self):\n self.textBrowser_3.append('开始注册,请等待...')\n try:\n regiterSum = int(self.lineEdit_tab3.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok)\n return\n threads = []\n for _ in range(regiterSum):\n uper = Register()\n Thread_name = Thread(target=uper.registerInfo)\n Thread_name.setDaemon(True)\n Thread_name.start()\n threads.append(Thread_name)\n\n def exit_quit(self):\n global EXIT_COND\n res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox.\n Yes | QMessageBox.No)\n if res == QMessageBox.Yes:\n self._thread.exit_thread()\n time.sleep(1)\n sys.exit()\n else:\n pass\n\n\nclass Example(QMainWindow):\n sessionList = []\n priceList = []\n sessionListEvn = []\n priceListEvn = []\n eventDateList = []\n eventUrl = []\n eventPrice = []\n sessionName = []\n sessionPrice = []\n saleTime = []\n buyNum = 1\n\n def __init__(self):\n super(QMainWindow, self).__init__()\n self.setWindowTitle('城市售票网')\n self.resize(680, 800)\n self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n self.setFixedSize(self.width(), self.height())\n self.w = QWidget()\n self.w.setFixedWidth(680)\n self.w.setFixedHeight(540)\n self.setCentralWidget(self.w)\n self.topFiller = QWidget()\n self.scroll = QScrollArea()\n self.scroll.setWidget(self.topFiller)\n self.vbox = QVBoxLayout()\n self.vbox.addWidget(self.scroll)\n self.w.setLayout(self.vbox)\n self.initUI()\n\n def closeEvent(self, QCloseEvent):\n res = QMessageBox.question(self, '提示', '您确定选择无误吗?', QMessageBox.Yes |\n QMessageBox.No, QMessageBox.No)\n if res == QMessageBox.Yes:\n global SHOW_S_P\n SHOW_S_P = True\n QCloseEvent.accept()\n self.cb1.setChecked(False)\n self.cb2.setChecked(False)\n else:\n QCloseEvent.ignore()\n\n def initUI(self):\n self.cb1 = QCheckBox('全选', self.topFiller)\n self.cb1.move(20, 30)\n self.cb2 = QCheckBox('全选', self)\n self.cb2.move(20, 570)\n bt1 = QPushButton('确定', self)\n bt2 = QPushButton('刷新', self)\n bt1.move(20, 760)\n bt2.move(120, 760)\n self.cb1.stateChanged.connect(self.changecb1)\n self.cb2.stateChanged.connect(self.changecb2)\n bt1.clicked.connect(self.pitch_on)\n bt2.clicked.connect(self.create_c)\n\n def create_c(self):\n if self.eventDateList:\n self.sessionList = [eventDateName['eventDateName'] for\n eventDateName in self.eventDateList]\n self.priceList = [price for price in self.eventDateList[0][\n 'priceList']]\n ex.show()\n else:\n ex.show()\n QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok)\n return\n if self.sessionListEvn and self.priceListEvn:\n for s_evn in self.sessionListEvn:\n s_evn.deleteLater()\n for p_evn in self.priceListEvn:\n p_evn.deleteLater()\n self.sessionListEvn.clear()\n self.priceListEvn.clear()\n self.eventPrice.clear()\n self.eventUrl.clear()\n for i, item in enumerate(self.sessionList):\n cb = QCheckBox(item, self.topFiller)\n cb.move(30, 60 + 30 * i)\n self.sessionListEvn.append(cb)\n cb.show()\n self.topFiller.setMinimumSize(580, (len(self.sessionList) + 5) * 30)\n for i, item in enumerate(self.priceList):\n cb_1 = QCheckBox(str(item), self)\n if i % 2 == 0:\n i = i // 2 + 1\n cb_1.move(30, 570 + 30 * i)\n else:\n i = i // 2 + 1\n cb_1.move(330, 570 + 30 * i)\n self.priceListEvn.append(cb_1)\n cb_1.show()\n\n def pitch_on(self):\n if self.sessionList:\n for i in self.sessionListEvn:\n if i.isChecked():\n for eventDate in self.eventDateList:\n if eventDate['eventDateName'] == i.text():\n if 'saleDate' in eventDate:\n self.saleTime = eventDate['saleDate']\n self.eventUrl.append(eventDate['eventUrl'])\n self.sessionName.append(eventDate['eventDateName'])\n for i in self.priceListEvn:\n if i.isChecked():\n if i.text() in self.eventDateList[0]['priceList']:\n self.eventPrice.append(str(self.eventDateList[0][\n 'priceList'].index(i.text())))\n self.sessionPrice.append(i.text())\n if self.eventPrice and self.eventUrl:\n self.close()\n else:\n res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?',\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if res == QMessageBox.Yes:\n self.close()\n else:\n print('输入内容不存在!')\n\n def changecb1(self):\n if self.cb1.checkState() == Qt.Checked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(True)\n elif self.cb1.checkState() == Qt.Unchecked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(False)\n\n def changecb2(self):\n if self.cb2.checkState() == Qt.Checked:\n for qcb in self.priceListEvn:\n qcb.setChecked(True)\n elif self.cb2.checkState() == Qt.Unchecked:\n for qcb in self.priceListEvn:\n qcb.setChecked(False)\n\n def refresh_cb(self):\n while True:\n if self.sessionList and self.priceList:\n self.create_c()\n break\n time.sleep(0.2)\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n ex = Example()\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n", "step-4": "import ctypes\nimport win32con\nimport request_spider\nfrom selenium_tickets_spider import *\nfrom threading import Thread\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtCore import Qt, QThread, pyqtSignal\nimport sys, time, re\nimport datetime\nSESSION_DATA = False\nSHOW_S_P = False\n\n\nclass Worker(QThread):\n valueChanged = pyqtSignal(int)\n handle = -1\n\n def run(self):\n global SESSION_DATA, EXIT_COND\n try:\n self.handle = ctypes.windll.kernel32.OpenThread(win32con.\n PROCESS_ALL_ACCESS, False, int(QThread.currentThreadId()))\n except Exception as e:\n print('get thread handle failed', e)\n while True:\n if SESSION_DATA:\n self.valueChanged.emit(1024)\n SESSION_DATA = False\n time.sleep(0.1)\n\n def exit_thread(self):\n os._exit(122)\n\n\nclass Ui_MainWindow(QMainWindow):\n threads = []\n keywordJudge = ''\n\n def __init__(self):\n super(Ui_MainWindow, self).__init__()\n self.buy_succeed_count = 0\n for func in [self.output_buy_record, self.output_login_status, self\n .output_register_record]:\n thr = Thread(target=func)\n thr.setDaemon(True)\n thr.start()\n self._thread = Worker(self)\n self._thread.finished.connect(self._thread.deleteLater)\n self._thread.valueChanged.connect(ex.create_c)\n self._thread.start()\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n MainWindow.resize(640, 478)\n MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n MainWindow.setFixedSize(self.width(), self.height())\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461))\n self.tabWidget.setObjectName('tabWidget')\n self.tab = QtWidgets.QWidget()\n self.tab.setObjectName('tab')\n self.pushButton = QtWidgets.QPushButton(self.tab)\n self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton.setObjectName('pushButton')\n self.lineEdit_tab = QtWidgets.QLineEdit(self.tab)\n self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab.setPlaceholderText(' 请输入登陆个数')\n self.label_0 = QtWidgets.QLabel(self.tab)\n self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_0.setObjectName('label_0')\n self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab)\n self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_2.setObjectName('textBrowser_2')\n self.tabWidget.addTab(self.tab, '')\n self.tab_2 = QtWidgets.QWidget()\n self.tab_2.setObjectName('tab_2')\n self.tabWidget.addTab(self.tab, '')\n self.tab_3 = QtWidgets.QWidget()\n self.tab_3.setObjectName('tab_3')\n self.lineEdit = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31))\n self.lineEdit.setObjectName('lineEdit')\n self.pushButton_2 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32))\n self.pushButton_2.setObjectName('pushButton_2')\n self.pushButton_2.clicked.connect(self.search_1)\n self.label = QtWidgets.QLabel(self.tab_2)\n self.label.setGeometry(QtCore.QRect(30, 80, 54, 12))\n self.label.setObjectName('label')\n self.label_2 = QtWidgets.QLabel(self.tab_2)\n self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12))\n self.label_2.setObjectName('label_2')\n self.comboBox = QtWidgets.QComboBox(self.tab_2)\n self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31))\n self.comboBox.setObjectName('comboBox')\n self.comboBox_2 = QtWidgets.QComboBox(self.tab_2)\n self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31))\n self.comboBox_2.setObjectName('comboBox_2')\n self.label_3 = QtWidgets.QLabel(self.tab_2)\n self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12))\n self.label_3.setObjectName('label_3')\n self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27))\n self.lineEdit_1.setObjectName('lineEdit_1')\n self.label_6 = QtWidgets.QLabel(self.tab_2)\n self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12))\n self.label_6.setObjectName('label_6')\n self.label_7 = QtWidgets.QLabel(self.tab_2)\n self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12))\n self.label_7.setObjectName('label_7')\n self.label_7.setStyleSheet('font-size:16px;color:red')\n self.label_8 = QtWidgets.QLabel(self.tab_2)\n self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12))\n self.label_8.setObjectName('label_8')\n self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27))\n self.lineEdit_8.setObjectName('lineEdit_8')\n self.lineEdit_8.setText('4')\n self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31))\n self.pushButton_3.setObjectName('pushButton_3')\n self.pushButton_3.clicked.connect(self.search_2)\n self.pushButton_quit = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31))\n self.pushButton_quit.setObjectName('pushButton_quit')\n self.pushButton_quit.clicked.connect(self.exit_quit)\n self.label_4 = QtWidgets.QLabel(self.tab_2)\n self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12))\n self.label_4.setObjectName('label_4')\n self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2)\n self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192))\n self.textBrowser_1.setObjectName('textBrowser')\n self.tabWidget.addTab(self.tab_2, '')\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.tabWidget.addTab(self.tab_3, '')\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.pushButton_4 = QtWidgets.QPushButton(self.tab_3)\n self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton_4.setObjectName('pushButton')\n self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3)\n self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab3.setPlaceholderText(' 请输入注册个数')\n self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3)\n self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_3.setObjectName('textBrowser_3')\n self.label_5 = QtWidgets.QLabel(self.tab_3)\n self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_5.setObjectName('label_5')\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', '城市售票网-抢票'))\n self.pushButton.setText(_translate('MainWindow', '点击登录'))\n self.pushButton.clicked.connect(self.login)\n self.pushButton_4.clicked.connect(self.register)\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab),\n _translate('MainWindow', '账号登录'))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2),\n _translate('MainWindow', '抢购中心'))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3),\n _translate('MainWindow', '账号注册'))\n self.label_0.setText(_translate('MainWindow', '登录日志:'))\n self.pushButton_2.setText(_translate('MainWindow', '搜索名称'))\n self.pushButton_3.setText(_translate('MainWindow', '点击购买'))\n self.pushButton_quit.setText(_translate('MainWindow', '退出程序'))\n self.pushButton_4.setText(_translate('MainWindow', '点击注册'))\n self.label.setText(_translate('MainWindow', '已择场次:'))\n self.label_2.setText(_translate('MainWindow', '已择价格:'))\n self.label_3.setText(_translate('MainWindow', '购买总数量:'))\n self.label_4.setText(_translate('MainWindow', '购买日志:'))\n self.label_5.setText(_translate('MainWindow', '注册日志:'))\n self.label_6.setText(_translate('MainWindow', '已购买:'))\n self.label_7.setText(_translate('MainWindow', '0'))\n self.label_8.setText(_translate('MainWindow', '每个账号购买数量:'))\n self.textBrowser_3.setText('')\n self.textBrowser_2.setText('')\n self.textBrowser_1.setText('')\n self.tabWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def login(self):\n try:\n regiterSum = int(self.lineEdit_tab.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok)\n return\n ipList = ['']\n self.textBrowser_2.append('开始登陆,请等待...')\n userinfo_list = []\n with open('infomation.txt', 'rt', encoding='utf-8') as f:\n info_record = re.findall(\"'loginId': '(.*?)'\", f.read())\n for loginId in info_record:\n userinfo_list.append(loginId)\n for thr in userinfo_list[:regiterSum]:\n grabber = BuyUrbtix()\n ip = random.choice(ipList)\n Thread_name = Thread(target=grabber.openSite, args=(thr, ip))\n self.threads.append(Thread_name)\n Thread_name.setDaemon(True)\n Thread_name.start()\n\n def search_1(self):\n keyword = self.lineEdit.text()\n self.textBrowser_1.append('正在查询 %s 的所有场次和价格...' % keyword)\n if keyword == self.keywordJudge:\n self.textBrowser_1.append('请等待...')\n self.keywordJudge = ''\n return\n self.keywordJudge = keyword\n Thread_name = Thread(target=self.refresh)\n self.threads.append(Thread_name)\n Thread_name.start()\n Thread_01 = Thread(target=self.show_session_data)\n self.threads.append(Thread_01)\n Thread_01.start()\n\n def show_session_data(self):\n global SHOW_S_P\n self.comboBox_2.clear()\n self.comboBox.clear()\n while True:\n if ex.sessionName and ex.sessionPrice and SHOW_S_P:\n for i, eventDateName in enumerate(ex.sessionName):\n self.comboBox_2.addItem(eventDateName, i)\n for i, price in enumerate(ex.sessionPrice):\n self.comboBox.addItem(str(price), i)\n self.comboBox.setCurrentIndex(0)\n self.comboBox_2.setCurrentIndex(0)\n ex.sessionName.clear()\n ex.sessionPrice.clear()\n SHOW_S_P = False\n time.sleep(0.2)\n\n def refresh(self):\n try:\n if self.lineEdit.text():\n global eventDateList\n keyword = self.lineEdit.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n ex.eventDateList = request_spider.get_date_url(keyword)\n if ex.eventDateList:\n self.textBrowser_1.append('查询成功,请在选择界面选择场次和价格...')\n global SESSION_DATA\n SESSION_DATA = True\n else:\n self.textBrowser_1.append('查询失败,请确定您查询的节目存在...')\n else:\n sys.exit()\n except Exception as err:\n self.textBrowser_1.append('查询失败,请确定您查询的节目存在...')\n print(err)\n sys.exit()\n\n def output_login_status(self):\n while True:\n login_record_list = login_record()\n if login_record_list:\n for i in login_record_list:\n self.textBrowser_2.append(i)\n self.textBrowser_2.moveCursor(self.textBrowser_2.\n textCursor().End)\n login_record_list.remove(i)\n time.sleep(0.1)\n\n def output_buy_record(self):\n while True:\n buy_record_list = buy_record()\n if buy_record_list:\n for record in buy_record_list:\n if '购买成功' in record:\n self.buy_succeed_count += 1\n self.label_7.setText(str(self.buy_succeed_count))\n self.textBrowser_1.append(record)\n self.textBrowser_1.moveCursor(self.textBrowser_1.\n textCursor().End)\n buy_record_list.remove(record)\n time.sleep(0.1)\n\n def output_register_record(self):\n while True:\n register_record_list = register_record()\n if register_record_list:\n for i in register_record_list:\n self.textBrowser_3.append(i)\n self.textBrowser_3.moveCursor(self.textBrowser_3.\n textCursor().End)\n register_record_list.remove(i)\n time.sleep(0.1)\n\n def search_2(self):\n if not self.lineEdit_1.text():\n self.textBrowser_1.append('请输入购买总数量...')\n return\n if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']:\n self.textBrowser_1.append('正在购买,请等待...')\n return\n if ex.saleTime:\n Thread_name = Thread(target=self.wait_sale)\n Thread_name.setDaemon(True)\n Thread_name.start()\n return\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append('开始购买,请您耐心等待...')\n\n def wait_sale(self):\n dateList = ex.saleTime\n print('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList))\n self.textBrowser_1.append('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList))\n while True:\n saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) +\n '0000', '%Y%m%d%H%M%S')))\n if saleTimestamp <= int(time.time()):\n print('%s年%s月%s日%s时开始售票,开始购买!' % tuple(dateList))\n self.textBrowser_1.append('%s年%s月%s日%s时开始售票,开始购买!' % tuple(\n dateList))\n break\n time.sleep(1)\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append('开始购买,请您耐心等待...')\n\n def register(self):\n self.textBrowser_3.append('开始注册,请等待...')\n try:\n regiterSum = int(self.lineEdit_tab3.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok)\n return\n threads = []\n for _ in range(regiterSum):\n uper = Register()\n Thread_name = Thread(target=uper.registerInfo)\n Thread_name.setDaemon(True)\n Thread_name.start()\n threads.append(Thread_name)\n\n def exit_quit(self):\n global EXIT_COND\n res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox.\n Yes | QMessageBox.No)\n if res == QMessageBox.Yes:\n self._thread.exit_thread()\n time.sleep(1)\n sys.exit()\n else:\n pass\n\n\nclass Example(QMainWindow):\n sessionList = []\n priceList = []\n sessionListEvn = []\n priceListEvn = []\n eventDateList = []\n eventUrl = []\n eventPrice = []\n sessionName = []\n sessionPrice = []\n saleTime = []\n buyNum = 1\n\n def __init__(self):\n super(QMainWindow, self).__init__()\n self.setWindowTitle('城市售票网')\n self.resize(680, 800)\n self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n self.setFixedSize(self.width(), self.height())\n self.w = QWidget()\n self.w.setFixedWidth(680)\n self.w.setFixedHeight(540)\n self.setCentralWidget(self.w)\n self.topFiller = QWidget()\n self.scroll = QScrollArea()\n self.scroll.setWidget(self.topFiller)\n self.vbox = QVBoxLayout()\n self.vbox.addWidget(self.scroll)\n self.w.setLayout(self.vbox)\n self.initUI()\n\n def closeEvent(self, QCloseEvent):\n res = QMessageBox.question(self, '提示', '您确定选择无误吗?', QMessageBox.Yes |\n QMessageBox.No, QMessageBox.No)\n if res == QMessageBox.Yes:\n global SHOW_S_P\n SHOW_S_P = True\n QCloseEvent.accept()\n self.cb1.setChecked(False)\n self.cb2.setChecked(False)\n else:\n QCloseEvent.ignore()\n\n def initUI(self):\n self.cb1 = QCheckBox('全选', self.topFiller)\n self.cb1.move(20, 30)\n self.cb2 = QCheckBox('全选', self)\n self.cb2.move(20, 570)\n bt1 = QPushButton('确定', self)\n bt2 = QPushButton('刷新', self)\n bt1.move(20, 760)\n bt2.move(120, 760)\n self.cb1.stateChanged.connect(self.changecb1)\n self.cb2.stateChanged.connect(self.changecb2)\n bt1.clicked.connect(self.pitch_on)\n bt2.clicked.connect(self.create_c)\n\n def create_c(self):\n if self.eventDateList:\n self.sessionList = [eventDateName['eventDateName'] for\n eventDateName in self.eventDateList]\n self.priceList = [price for price in self.eventDateList[0][\n 'priceList']]\n ex.show()\n else:\n ex.show()\n QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok)\n return\n if self.sessionListEvn and self.priceListEvn:\n for s_evn in self.sessionListEvn:\n s_evn.deleteLater()\n for p_evn in self.priceListEvn:\n p_evn.deleteLater()\n self.sessionListEvn.clear()\n self.priceListEvn.clear()\n self.eventPrice.clear()\n self.eventUrl.clear()\n for i, item in enumerate(self.sessionList):\n cb = QCheckBox(item, self.topFiller)\n cb.move(30, 60 + 30 * i)\n self.sessionListEvn.append(cb)\n cb.show()\n self.topFiller.setMinimumSize(580, (len(self.sessionList) + 5) * 30)\n for i, item in enumerate(self.priceList):\n cb_1 = QCheckBox(str(item), self)\n if i % 2 == 0:\n i = i // 2 + 1\n cb_1.move(30, 570 + 30 * i)\n else:\n i = i // 2 + 1\n cb_1.move(330, 570 + 30 * i)\n self.priceListEvn.append(cb_1)\n cb_1.show()\n\n def pitch_on(self):\n if self.sessionList:\n for i in self.sessionListEvn:\n if i.isChecked():\n for eventDate in self.eventDateList:\n if eventDate['eventDateName'] == i.text():\n if 'saleDate' in eventDate:\n self.saleTime = eventDate['saleDate']\n self.eventUrl.append(eventDate['eventUrl'])\n self.sessionName.append(eventDate['eventDateName'])\n for i in self.priceListEvn:\n if i.isChecked():\n if i.text() in self.eventDateList[0]['priceList']:\n self.eventPrice.append(str(self.eventDateList[0][\n 'priceList'].index(i.text())))\n self.sessionPrice.append(i.text())\n if self.eventPrice and self.eventUrl:\n self.close()\n else:\n res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?',\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if res == QMessageBox.Yes:\n self.close()\n else:\n print('输入内容不存在!')\n\n def changecb1(self):\n if self.cb1.checkState() == Qt.Checked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(True)\n elif self.cb1.checkState() == Qt.Unchecked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(False)\n\n def changecb2(self):\n if self.cb2.checkState() == Qt.Checked:\n for qcb in self.priceListEvn:\n qcb.setChecked(True)\n elif self.cb2.checkState() == Qt.Unchecked:\n for qcb in self.priceListEvn:\n qcb.setChecked(False)\n\n def refresh_cb(self):\n while True:\n if self.sessionList and self.priceList:\n self.create_c()\n break\n time.sleep(0.2)\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n ex = Example()\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n", "step-5": "import ctypes\nimport win32con\nimport request_spider\nfrom selenium_tickets_spider import *\nfrom threading import Thread\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtCore import Qt, QThread, pyqtSignal\nimport sys, time, re\nimport datetime\n\nSESSION_DATA = False\nSHOW_S_P = False\n\nclass Worker(QThread):\n\n valueChanged = pyqtSignal(int) # 值变化信号\n handle = -1\n\n def run(self):\n global SESSION_DATA,EXIT_COND\n try:\n self.handle = ctypes.windll.kernel32.OpenThread( # @UndefinedVariable\n win32con.PROCESS_ALL_ACCESS, False, int(QThread.currentThreadId()))\n except Exception as e:\n print('get thread handle failed', e)\n # print('thread id', int(QThread.currentThreadId()))\n # 循环发送信号\n while True:\n if SESSION_DATA:\n self.valueChanged.emit(1024)\n SESSION_DATA = False\n time.sleep(0.1)\n\n def exit_thread(self):\n os._exit(122)\n\n\nclass Ui_MainWindow(QMainWindow):\n\n threads = []\n keywordJudge = ''\n def __init__(self):\n super(Ui_MainWindow, self).__init__()\n # self.ex = Example()\n\n self.buy_succeed_count = 0\n for func in [self.output_buy_record, self.output_login_status,self.output_register_record]:\n thr = Thread(target=func)\n thr.setDaemon(True)\n thr.start()\n\n # 子线程\n self._thread = Worker(self)\n self._thread.finished.connect(self._thread.deleteLater)\n self._thread.valueChanged.connect(ex.create_c)\n self._thread.start()\n\n def setupUi(self, MainWindow):\n # MainWindow.setStyleSheet(\"#MainWindow{background-color: yellow}\")\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(640, 478)\n # MainWindow.setMinimumSize(640, 478)\n # MainWindow.setMaximumSize(640, 478)\n # 取消最大化\n MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n # 固定窗口大小\n MainWindow.setFixedSize(self.width(), self.height())\n # MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint) \n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461))\n self.tabWidget.setObjectName(\"tabWidget\")\n\n self.tab = QtWidgets.QWidget()\n self.tab.setObjectName(\"tab\")\n\n # 登录按钮\n self.pushButton = QtWidgets.QPushButton(self.tab)\n self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton.setObjectName(\"pushButton\")\n\n # 登陆个数输入框\n self.lineEdit_tab = QtWidgets.QLineEdit(self.tab)\n self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab.setPlaceholderText(\" 请输入登陆个数\")\n\n # 登录日志输出\n self.label_0 = QtWidgets.QLabel(self.tab)\n self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_0.setObjectName(\"label_0\")\n\n # 注册日志\n self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab)\n self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_2.setObjectName(\"textBrowser_2\")\n\n\n # 登录页面\n self.tabWidget.addTab(self.tab, \"\")\n self.tab_2 = QtWidgets.QWidget()\n self.tab_2.setObjectName(\"tab_2\")\n\n self.tabWidget.addTab(self.tab, \"\")\n self.tab_3 = QtWidgets.QWidget()\n self.tab_3.setObjectName(\"tab_3\")\n\n self.lineEdit = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31))\n self.lineEdit.setObjectName(\"lineEdit\")\n\n # 查询商品名称\n self.pushButton_2 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32))\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.pushButton_2.clicked.connect(self.search_1)\n\n\n self.label = QtWidgets.QLabel(self.tab_2)\n self.label.setGeometry(QtCore.QRect(30, 80, 54, 12))\n self.label.setObjectName(\"label\")\n\n self.label_2 = QtWidgets.QLabel(self.tab_2)\n self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12))\n self.label_2.setObjectName(\"label_2\")\n\n self.comboBox = QtWidgets.QComboBox(self.tab_2)\n self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31))\n self.comboBox.setObjectName(\"comboBox\")\n # self.comboBox.currentText()\n\n self.comboBox_2 = QtWidgets.QComboBox(self.tab_2)\n self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31))\n self.comboBox_2.setObjectName(\"comboBox_2\")\n\n # 选择数量\n self.label_3 = QtWidgets.QLabel(self.tab_2)\n self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12))\n self.label_3.setObjectName(\"label_3\")\n\n # 数量输入框\n self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27))\n self.lineEdit_1.setObjectName(\"lineEdit_1\")\n\n # 购买成功数量\n self.label_6 = QtWidgets.QLabel(self.tab_2)\n self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12))\n self.label_6.setObjectName(\"label_6\")\n\n self.label_7 = QtWidgets.QLabel(self.tab_2)\n self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12))\n self.label_7.setObjectName(\"label_7\")\n self.label_7.setStyleSheet(\"font-size:16px;color:red\") # 设置字体颜色\n\n self.label_8 = QtWidgets.QLabel(self.tab_2)\n self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12))\n self.label_8.setObjectName(\"label_8\")\n\n self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27))\n self.lineEdit_8.setObjectName(\"lineEdit_8\")\n self.lineEdit_8.setText('4')\n\n # 购买按钮 当所有条件选择完之后点击\n self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31))\n self.pushButton_3.setObjectName(\"pushButton_3\")\n self.pushButton_3.clicked.connect(self.search_2)\n\n # 退出程序按钮\n self.pushButton_quit = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31))\n self.pushButton_quit.setObjectName(\"pushButton_quit\")\n self.pushButton_quit.clicked.connect(self.exit_quit)\n\n self.label_4 = QtWidgets.QLabel(self.tab_2)\n self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12))\n self.label_4.setObjectName(\"label_4\")\n\n # 购买日志输出\n self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2)\n self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192))\n self.textBrowser_1.setObjectName(\"textBrowser\")\n # 添加显示数据\n # self.textBrowser_1.append('购买日志')\n\n # 抢票中心页面\n self.tabWidget.addTab(self.tab_2, \"\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n # 账号注册页面\n self.tabWidget.addTab(self.tab_3, \"\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n # 点击注册按钮\n self.pushButton_4 = QtWidgets.QPushButton(self.tab_3)\n self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton_4.setObjectName(\"pushButton\")\n\n # 注册个数输入框\n self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3)\n self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab3.setPlaceholderText(\" 请输入注册个数\")\n\n # 注册日志输出\n self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3)\n self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_3.setObjectName(\"textBrowser_3\")\n\n self.label_5 = QtWidgets.QLabel(self.tab_3)\n self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_5.setObjectName(\"label_5\")\n\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"城市售票网-抢票\"))\n self.pushButton.setText(_translate(\"MainWindow\", \"点击登录\"))\n self.pushButton.clicked.connect(self.login)\n self.pushButton_4.clicked.connect(self.register)\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate(\"MainWindow\", \"账号登录\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate(\"MainWindow\", \"抢购中心\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate(\"MainWindow\", \"账号注册\"))\n self.label_0.setText(_translate(\"MainWindow\", \"登录日志:\"))\n self.pushButton_2.setText(_translate(\"MainWindow\", \"搜索名称\"))\n self.pushButton_3.setText(_translate(\"MainWindow\", \"点击购买\"))\n self.pushButton_quit.setText(_translate(\"MainWindow\", \"退出程序\"))\n self.pushButton_4.setText(_translate(\"MainWindow\", \"点击注册\"))\n self.label.setText(_translate(\"MainWindow\", \"已择场次:\"))\n self.label_2.setText(_translate(\"MainWindow\", \"已择价格:\"))\n self.label_3.setText(_translate(\"MainWindow\", \"购买总数量:\"))\n self.label_4.setText(_translate(\"MainWindow\", \"购买日志:\"))\n self.label_5.setText(_translate(\"MainWindow\", \"注册日志:\"))\n self.label_6.setText(_translate(\"MainWindow\", \"已购买:\"))\n self.label_7.setText(_translate(\"MainWindow\", \"0\"))\n self.label_8.setText(_translate(\"MainWindow\", \"每个账号购买数量:\"))\n self.textBrowser_3.setText(\"\")\n self.textBrowser_2.setText(\"\")\n self.textBrowser_1.setText(\"\")\n\n self.tabWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n\n # 点击登录执行\n def login(self):\n try:\n regiterSum = int(self.lineEdit_tab.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) # 提示框\n return\n ipList = [\"\"]\n # ipList = request_tickets_spider.get_ip_list(10)\n self.textBrowser_2.append(\"开始登陆,请等待...\")\n userinfo_list = []\n with open('infomation.txt', 'rt', encoding='utf-8') as f:\n info_record = re.findall(\"'loginId': '(.*?)'\", f.read())\n for loginId in info_record:\n userinfo_list.append(loginId)\n # 多线程\n for thr in userinfo_list[:regiterSum]:\n grabber = BuyUrbtix()\n ip = random.choice(ipList)\n Thread_name = Thread(target=grabber.openSite, args=(thr,ip))\n self.threads.append(Thread_name)\n Thread_name.setDaemon(True)\n Thread_name.start()\n\n # 点击搜索按钮执行\n def search_1(self):\n keyword = self.lineEdit.text()\n self.textBrowser_1.append(\"正在查询 %s 的所有场次和价格...\" % keyword)\n if keyword == self.keywordJudge:\n self.textBrowser_1.append(\"请等待...\")\n self.keywordJudge = ''\n return\n self.keywordJudge = keyword\n Thread_name = Thread(target=self.refresh)\n self.threads.append(Thread_name)\n Thread_name.start()\n\n Thread_01 = Thread(target=self.show_session_data)\n self.threads.append(Thread_01)\n Thread_01.start()\n\n\n # 把选择的场次和价格显示到主界面\n def show_session_data(self):\n global SHOW_S_P\n self.comboBox_2.clear()\n self.comboBox.clear()\n while True:\n # if self.ex.sessionName and self.ex.sessionPrice:\n if ex.sessionName and ex.sessionPrice and SHOW_S_P:\n for i,eventDateName in enumerate(ex.sessionName):\n self.comboBox_2.addItem(eventDateName, i)\n for i,price in enumerate(ex.sessionPrice):\n self.comboBox.addItem(str(price), i)# 价格\n self.comboBox.setCurrentIndex(0)\n self.comboBox_2.setCurrentIndex(0)\n ex.sessionName.clear()\n ex.sessionPrice.clear()\n SHOW_S_P = False\n time.sleep(0.2)\n\n # 把信息刷新到界面\n def refresh(self):\n try:\n if self.lineEdit.text():\n global eventDateList\n keyword = self.lineEdit.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n ex.eventDateList = request_spider.get_date_url(keyword)\n if ex.eventDateList:\n self.textBrowser_1.append(\"查询成功,请在选择界面选择场次和价格...\")\n global SESSION_DATA\n SESSION_DATA = True\n # ex.create_c()\n else:\n self.textBrowser_1.append(\"查询失败,请确定您查询的节目存在...\")\n else:\n sys.exit()\n except Exception as err:\n self.textBrowser_1.append(\"查询失败,请确定您查询的节目存在...\")\n print(err)\n sys.exit()\n\n # 日志更新\n def output_login_status(self):\n # 登录成功输出\n while True:\n # 登陆日志\n login_record_list = login_record()\n if login_record_list:\n for i in login_record_list:\n self.textBrowser_2.append(i)\n self.textBrowser_2.moveCursor(self.textBrowser_2.textCursor().End)\n login_record_list.remove(i)\n time.sleep(0.1)\n\n # 购买日志\n def output_buy_record(self):\n while True:\n buy_record_list = buy_record()\n if buy_record_list:\n for record in buy_record_list:\n if \"购买成功\" in record:\n self.buy_succeed_count += 1\n self.label_7.setText(str(self.buy_succeed_count))\n self.textBrowser_1.append(record)\n self.textBrowser_1.moveCursor(self.textBrowser_1.textCursor().End)\n buy_record_list.remove(record)\n time.sleep(0.1)\n\n # 注册日志\n def output_register_record(self):\n while True:\n register_record_list = register_record()\n if register_record_list:\n for i in register_record_list:\n self.textBrowser_3.append(i)\n self.textBrowser_3.moveCursor(self.textBrowser_3.textCursor().End)\n register_record_list.remove(i)\n time.sleep(0.1)\n\n\n # 购买条件选择后点击执行\n def search_2(self):\n if not self.lineEdit_1.text():\n self.textBrowser_1.append(\"请输入购买总数量...\")\n return\n\n if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']:\n self.textBrowser_1.append(\"正在购买,请等待...\")\n return\n\n if ex.saleTime:\n Thread_name = Thread(target=self.wait_sale)\n Thread_name.setDaemon(True)\n Thread_name.start()\n return\n\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append(\"开始购买,请您耐心等待...\")\n\n def wait_sale(self):\n dateList = ex.saleTime\n print(\"%s年%s月%s日%s时开始售票,等待购买!\" % tuple(dateList))\n self.textBrowser_1.append(\"%s年%s月%s日%s时开始售票,等待购买!\" % tuple(dateList))\n while True:\n saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) + '0000', \"%Y%m%d%H%M%S\")))\n if saleTimestamp <= int(time.time()):\n print(\"%s年%s月%s日%s时开始售票,开始购买!\" % tuple(dateList))\n self.textBrowser_1.append(\"%s年%s月%s日%s时开始售票,开始购买!\" % tuple(dateList))\n break\n time.sleep(1)\n\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append(\"开始购买,请您耐心等待...\")\n\n\n #点击注册执行并打印注册 \n def register(self):\n self.textBrowser_3.append(\"开始注册,请等待...\")\n try:\n regiterSum = int(self.lineEdit_tab3.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) # 提示框\n return\n threads = []\n for _ in range(regiterSum):\n uper = Register()\n Thread_name = Thread(target=uper.registerInfo)\n Thread_name.setDaemon(True)\n Thread_name.start()\n threads.append(Thread_name)\n\n # 退出程序\n def exit_quit(self):\n global EXIT_COND\n res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox.Yes | QMessageBox.No) # 提示框\n if res == QMessageBox.Yes:\n self._thread.exit_thread()\n time.sleep(1)\n sys.exit()\n else:\n pass\n\n\nclass Example(QMainWindow):\n sessionList = []\n priceList = []\n sessionListEvn = []\n priceListEvn = []\n eventDateList = []\n eventUrl = []\n eventPrice = []\n sessionName = []\n sessionPrice = []\n saleTime = []\n buyNum = 1\n\n def __init__(self):\n super(QMainWindow, self).__init__()\n self.setWindowTitle('城市售票网') # 主窗口\n self.resize(680, 800)\n # 取消最大化\n self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n # 固定窗口大小\n self.setFixedSize(self.width(), self.height())\n\n self.w = QWidget()\n self.w.setFixedWidth(680)\n self.w.setFixedHeight(540)\n\n self.setCentralWidget(self.w)\n self.topFiller = QWidget()\n # 把布局放入到 w 窗口\n # 创建一个滚动条\n self.scroll = QScrollArea()\n self.scroll.setWidget(self.topFiller) # 滚动条放self.topFiller\n\n self.vbox = QVBoxLayout() # 方框布局\n self.vbox.addWidget(self.scroll) # 滚动条放入布局\n self.w.setLayout(self.vbox)\n\n self.initUI()\n\n def closeEvent(self, QCloseEvent):\n res = QMessageBox.question(self,'提示','您确定选择无误吗?',QMessageBox.Yes|QMessageBox.No,QMessageBox.No) #两个按钮是否, 默认No则关闭这个提示框\n if res == QMessageBox.Yes:\n global SHOW_S_P\n SHOW_S_P = True\n QCloseEvent.accept()\n self.cb1.setChecked(False)\n self.cb2.setChecked(False)\n else:\n QCloseEvent.ignore()\n\n def initUI(self):\n #新建全选复选框对象\n self.cb1 = QCheckBox('全选',self.topFiller)\n self.cb1.move(20,30)\n self.cb2 = QCheckBox('全选',self)\n self.cb2.move(20, 570)\n # 创建按钮\n bt1 = QPushButton('确定',self)\n bt2 = QPushButton('刷新',self)\n\n bt1.move(20,760)\n bt2.move(120,760)\n\n # 每当复选框的状态改变时,即每当用户选中或取消选中该信号时,就会发出此信号。\n # 所以当产生此信号的时候,我们将其连接相应的槽函数。\n self.cb1.stateChanged.connect(self.changecb1) # 全选复选框连接到全选槽函数\n self.cb2.stateChanged.connect(self.changecb2) # 全选复选框连接到全选槽函数\n bt1.clicked.connect(self.pitch_on) # 连接到显示选中单元\n bt2.clicked.connect(self.create_c) # 连接到创建函数\n\n def create_c(self):\n if self.eventDateList:\n self.sessionList = [eventDateName['eventDateName'] for eventDateName in self.eventDateList]\n self.priceList = [price for price in self.eventDateList[0]['priceList']]\n # print(self.priceList)\n # print(self.sessionList)\n ex.show()\n else:\n ex.show()\n QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok)\n return\n\n # 清空上次搜索内容\n if self.sessionListEvn and self.priceListEvn:\n for s_evn in self.sessionListEvn:\n s_evn.deleteLater()\n for p_evn in self.priceListEvn:\n p_evn.deleteLater()\n\n self.sessionListEvn.clear()\n self.priceListEvn.clear()\n self.eventPrice.clear()\n self.eventUrl.clear()\n\n # 场次信息显示\n for i,item in enumerate(self.sessionList):\n cb = QCheckBox(item, self.topFiller)\n cb.move(30, 60+30*i)\n self.sessionListEvn.append(cb)\n cb.show()\n self.topFiller.setMinimumSize(580,(len(self.sessionList)+5)*30) #设置滚动条的尺寸\n\n # 价格显示\n for i,item in enumerate(self.priceList):\n cb_1 = QCheckBox(str(item), self)\n if i % 2 == 0:\n i = i // 2 + 1\n cb_1.move(30, 570+30*i)\n else:\n i = i // 2 + 1\n cb_1.move(330, 570+30*i)\n self.priceListEvn.append(cb_1)\n cb_1.show()\n\n def pitch_on(self):\n if self.sessionList:\n for i in self.sessionListEvn: # 遍历所有复选框\n if i.isChecked(): # 判断是否被选中\n for eventDate in self.eventDateList: # 遍历所有的数据\n if eventDate['eventDateName'] == i.text(): # 判断数据是否被选中\n if 'saleDate' in eventDate:\n self.saleTime = eventDate['saleDate']\n # print(eventDate['saleDate'])\n self.eventUrl.append(eventDate[\"eventUrl\"]) # 被选中则保存\n self.sessionName.append(eventDate['eventDateName'])\n\n for i in self.priceListEvn:\n if i.isChecked():\n if i.text() in self.eventDateList[0]['priceList']:\n self.eventPrice.append(str(self.eventDateList[0]['priceList'].index(i.text())))\n self.sessionPrice.append(i.text())\n\n # 如果选择的有数据,则关闭窗口,没有数据,提示选择数据\n if self.eventPrice and self.eventUrl:\n self.close()\n else:\n res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?', QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No) # 两个按钮是否, 默认No则关闭这个提示框\n if res == QMessageBox.Yes:\n self.close()\n else:\n print(\"输入内容不存在!\")\n\n # 全选复选框槽函数\n def changecb1(self):\n if self.cb1.checkState() == Qt.Checked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(True)\n elif self.cb1.checkState() == Qt.Unchecked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(False)\n\n # 全选复选框槽函数\n def changecb2(self):\n if self.cb2.checkState() == Qt.Checked:\n for qcb in self.priceListEvn:\n qcb.setChecked(True)\n elif self.cb2.checkState() == Qt.Unchecked:\n for qcb in self.priceListEvn:\n qcb.setChecked(False)\n\n # 刷新按钮\n def refresh_cb(self):\n while True:\n if self.sessionList and self.priceList:\n self.create_c()\n break\n time.sleep(0.2)\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv) # 创建一个QApplication,也就是你要开发的软件app\n ex = Example()\n MainWindow = QtWidgets.QMainWindow() # 创建一个QMainWindow,用来装载你需要的各种组件、控件\n ui = Ui_MainWindow() # ui是你创建的ui类的实例化对象\n ui.setupUi(MainWindow) # 执行类中的setupUi方法,方法的参数是第二步中创建的QMainWindow\n MainWindow.show() # 执行QMainWindow的show()方法,显示这个QMainWindow\n # ex.show()\n sys.exit(app.exec_())\n\n", "step-ids": [ 25, 26, 30, 32, 33 ] }
[ 25, 26, 30, 32, 33 ]
def tetrahedron_filled(tetrahedrons, water): var=0 br=0 tetrahedrons.sort() for numbers in tetrahedrons: v=(tetrahedrons[var]**3*(2**0.5))/12000 if v<water: br=br+1 water=water-v var=var+1 print (br) print (tetrahedron_filled([1000,10],10))
normal
{ "blob_id": "c926e16ef2daa5978b6c71e7794721d320bb9b1e", "index": 1224, "step-1": "<mask token>\n", "step-2": "def tetrahedron_filled(tetrahedrons, water):\n var = 0\n br = 0\n tetrahedrons.sort()\n for numbers in tetrahedrons:\n v = tetrahedrons[var] ** 3 * 2 ** 0.5 / 12000\n if v < water:\n br = br + 1\n water = water - v\n var = var + 1\n print(br)\n\n\n<mask token>\n", "step-3": "def tetrahedron_filled(tetrahedrons, water):\n var = 0\n br = 0\n tetrahedrons.sort()\n for numbers in tetrahedrons:\n v = tetrahedrons[var] ** 3 * 2 ** 0.5 / 12000\n if v < water:\n br = br + 1\n water = water - v\n var = var + 1\n print(br)\n\n\nprint(tetrahedron_filled([1000, 10], 10))\n", "step-4": "def tetrahedron_filled(tetrahedrons, water):\n\tvar=0\n\tbr=0\n\ttetrahedrons.sort()\n\tfor numbers in tetrahedrons:\n\t\tv=(tetrahedrons[var]**3*(2**0.5))/12000\n\t\tif v<water:\n\t\t\tbr=br+1\n\t\t\twater=water-v\n\t\tvar=var+1\n\tprint (br)\n\n\nprint (tetrahedron_filled([1000,10],10))", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#! /usr/bin/env python from thor.tree import TreeNode class Solution(object): def postorder_traversal(self, root: TreeNode): if not root: return [] else: return self.postorder_traversal(root.left) + self.postorder_traversal(root.right) + [root.val]
normal
{ "blob_id": "1d314a04625cfadf574f122b95577c1e677a8b35", "index": 3247, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Solution(object):\n\n def postorder_traversal(self, root: TreeNode):\n if not root:\n return []\n else:\n return self.postorder_traversal(root.left\n ) + self.postorder_traversal(root.right) + [root.val]\n", "step-4": "from thor.tree import TreeNode\n\n\nclass Solution(object):\n\n def postorder_traversal(self, root: TreeNode):\n if not root:\n return []\n else:\n return self.postorder_traversal(root.left\n ) + self.postorder_traversal(root.right) + [root.val]\n", "step-5": "#! /usr/bin/env python\nfrom thor.tree import TreeNode\n\n\nclass Solution(object):\n\tdef postorder_traversal(self, root: TreeNode):\n\t\tif not root:\n\t\t\treturn []\n\t\telse:\n\t\t\treturn self.postorder_traversal(root.left) + self.postorder_traversal(root.right) + [root.val]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Generated by Django 3.1.1 on 2020-10-10 07:38 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('socialapp', '0004_mesage_creation_date'), ] operations = [ migrations.CreateModel( name='Notification', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('content', models.TextField(max_length=200)), ('creation_date', models.DateTimeField(auto_now_add=True)), ('receiver', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='receiver_not', to=settings.AUTH_USER_MODEL)), ('sender', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='sender_not', to=settings.AUTH_USER_MODEL)), ], ), ]
normal
{ "blob_id": "38751da57ad7c786e9fc0722faf065380e5f7e60", "index": 4994, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('socialapp', '0004_mesage_creation_date')]\n operations = [migrations.CreateModel(name='Notification', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('content', models.TextField(max_length\n =200)), ('creation_date', models.DateTimeField(auto_now_add=True)),\n ('receiver', models.OneToOneField(null=True, on_delete=django.db.\n models.deletion.SET_NULL, related_name='receiver_not', to=settings.\n AUTH_USER_MODEL)), ('sender', models.OneToOneField(null=True,\n on_delete=django.db.models.deletion.SET_NULL, related_name=\n 'sender_not', to=settings.AUTH_USER_MODEL))])]\n", "step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('socialapp', '0004_mesage_creation_date')]\n operations = [migrations.CreateModel(name='Notification', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('content', models.TextField(max_length\n =200)), ('creation_date', models.DateTimeField(auto_now_add=True)),\n ('receiver', models.OneToOneField(null=True, on_delete=django.db.\n models.deletion.SET_NULL, related_name='receiver_not', to=settings.\n AUTH_USER_MODEL)), ('sender', models.OneToOneField(null=True,\n on_delete=django.db.models.deletion.SET_NULL, related_name=\n 'sender_not', to=settings.AUTH_USER_MODEL))])]\n", "step-5": "# Generated by Django 3.1.1 on 2020-10-10 07:38\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('socialapp', '0004_mesage_creation_date'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Notification',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('content', models.TextField(max_length=200)),\n ('creation_date', models.DateTimeField(auto_now_add=True)),\n ('receiver', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='receiver_not', to=settings.AUTH_USER_MODEL)),\n ('sender', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='sender_not', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def std_dev(L, is_sample=0): """calculate standard deviation of given List""" return math.sqrt(variance(L, is_sample)) def z_score(num, mean, std_dev): """calculate z-score given sample size, mean and standard deviation""" return (num - mean) / std_dev <|reserved_special_token_0|> def over(n, k): """n over k""" return fac(n) // fac(n - k) def coin(coins, heads): """Probability for given number of heads (or tails) when throwing given number of fair coins.""" return Fraction(int(fac(c) / fac(c - n) / fac(n)), 2 ** c) def pick_grom_group(group, other, selected): """When selecting 'selected' number of individuums from 'group' and 'other', return probability that all are from 'group'.""" return Faction(over(group, selected), over(group + other, selected)) <|reserved_special_token_0|> def series(r, n): """Calculate geometric series.""" return (1 - r ** n) / (1 - r) def quad_form(a, b, c): """Quadratic Formula: calculate values of x so that ax^2+bx+c=0.""" sq = math.sqrt(b ** 2 - 4 * a * c) x1 = (-b - sq) / (2 * a) x2 = (-b + sq) / (2 * a) return x1, x2 <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def mean(L): """Calculate mean of given List""" return sum(L) / len(L) def variance(L, is_sample=0): """calculate variance (or sample variance) of given List""" m = mean(L) return sum((x - m) ** 2 for x in L) / (len(L) - is_sample) def std_dev(L, is_sample=0): """calculate standard deviation of given List""" return math.sqrt(variance(L, is_sample)) def z_score(num, mean, std_dev): """calculate z-score given sample size, mean and standard deviation""" return (num - mean) / std_dev def fac(n): assert n >= 0 return n if n <= 2 else fac(n - 1) * n def over(n, k): """n over k""" return fac(n) // fac(n - k) def coin(coins, heads): """Probability for given number of heads (or tails) when throwing given number of fair coins.""" return Fraction(int(fac(c) / fac(c - n) / fac(n)), 2 ** c) def pick_grom_group(group, other, selected): """When selecting 'selected' number of individuums from 'group' and 'other', return probability that all are from 'group'.""" return Faction(over(group, selected), over(group + other, selected)) <|reserved_special_token_0|> def herons_formula(a, b, c): """Calculate area of triangle with sides a, b, and c.""" print('sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2') s = (a + b + c) / 2 return math.sqrt(s * (s - a) * (s - b) * (s - c)) def area_equilat(side): """Area of equilateral triangle.""" return side / 2 * math.sqrt(side ** 2 - (side / 2) ** 2) <|reserved_special_token_0|> def det2(m): """Determinant of 2x2 matrix.""" (a, b), (c, d) = m return a * d - b * c <|reserved_special_token_0|> def series(r, n): """Calculate geometric series.""" return (1 - r ** n) / (1 - r) def quad_form(a, b, c): """Quadratic Formula: calculate values of x so that ax^2+bx+c=0.""" sq = math.sqrt(b ** 2 - 4 * a * c) x1 = (-b - sq) / (2 * a) x2 = (-b + sq) / (2 * a) return x1, x2 def master_method(a, b, d): """Estimate Complexity using Master Method, print result.""" if a == b ** d: print('Case 1: a = b^d') print('-> O(n^%d log n)' % d) elif a < b ** d: print('Case 2: a < b^d') print('-> O(n^%d)' % d) elif a > b ** d: print('Case 3: a > b^d') print('-> O(n^log%d(%d))' % (b, a)) print(' = O(n^%.2f)' % math.log(a, b)) <|reserved_special_token_1|> <|reserved_special_token_0|> def mean(L): """Calculate mean of given List""" return sum(L) / len(L) def variance(L, is_sample=0): """calculate variance (or sample variance) of given List""" m = mean(L) return sum((x - m) ** 2 for x in L) / (len(L) - is_sample) def std_dev(L, is_sample=0): """calculate standard deviation of given List""" return math.sqrt(variance(L, is_sample)) def z_score(num, mean, std_dev): """calculate z-score given sample size, mean and standard deviation""" return (num - mean) / std_dev def fac(n): assert n >= 0 return n if n <= 2 else fac(n - 1) * n def over(n, k): """n over k""" return fac(n) // fac(n - k) def coin(coins, heads): """Probability for given number of heads (or tails) when throwing given number of fair coins.""" return Fraction(int(fac(c) / fac(c - n) / fac(n)), 2 ** c) def pick_grom_group(group, other, selected): """When selecting 'selected' number of individuums from 'group' and 'other', return probability that all are from 'group'.""" return Faction(over(group, selected), over(group + other, selected)) <|reserved_special_token_0|> def herons_formula(a, b, c): """Calculate area of triangle with sides a, b, and c.""" print('sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2') s = (a + b + c) / 2 return math.sqrt(s * (s - a) * (s - b) * (s - c)) def area_equilat(side): """Area of equilateral triangle.""" return side / 2 * math.sqrt(side ** 2 - (side / 2) ** 2) <|reserved_special_token_0|> def det2(m): """Determinant of 2x2 matrix.""" (a, b), (c, d) = m return a * d - b * c def det3(m): """Determinant of 3x3 matrix.""" a, b, c = m[0] da = det2([m[1][1:], m[2][1:]]) db = det2([[m[1][0], m[1][2]], [m[2][0], m[2][2]]]) dc = det2([m[1][:2], m[2][:2]]) return a * da - b * db + c * dc def series(r, n): """Calculate geometric series.""" return (1 - r ** n) / (1 - r) def quad_form(a, b, c): """Quadratic Formula: calculate values of x so that ax^2+bx+c=0.""" sq = math.sqrt(b ** 2 - 4 * a * c) x1 = (-b - sq) / (2 * a) x2 = (-b + sq) / (2 * a) return x1, x2 def master_method(a, b, d): """Estimate Complexity using Master Method, print result.""" if a == b ** d: print('Case 1: a = b^d') print('-> O(n^%d log n)' % d) elif a < b ** d: print('Case 2: a < b^d') print('-> O(n^%d)' % d) elif a > b ** d: print('Case 3: a > b^d') print('-> O(n^log%d(%d))' % (b, a)) print(' = O(n^%.2f)' % math.log(a, b)) <|reserved_special_token_1|> <|reserved_special_token_0|> def mean(L): """Calculate mean of given List""" return sum(L) / len(L) def variance(L, is_sample=0): """calculate variance (or sample variance) of given List""" m = mean(L) return sum((x - m) ** 2 for x in L) / (len(L) - is_sample) def std_dev(L, is_sample=0): """calculate standard deviation of given List""" return math.sqrt(variance(L, is_sample)) def z_score(num, mean, std_dev): """calculate z-score given sample size, mean and standard deviation""" return (num - mean) / std_dev def fac(n): assert n >= 0 return n if n <= 2 else fac(n - 1) * n def over(n, k): """n over k""" return fac(n) // fac(n - k) def coin(coins, heads): """Probability for given number of heads (or tails) when throwing given number of fair coins.""" return Fraction(int(fac(c) / fac(c - n) / fac(n)), 2 ** c) def pick_grom_group(group, other, selected): """When selecting 'selected' number of individuums from 'group' and 'other', return probability that all are from 'group'.""" return Faction(over(group, selected), over(group + other, selected)) def unfair_coins(num_coins, num_unfair, percent_unfair, heads_needed): """Calculate probability for pulling a coin from a bag with fair and unfair coins and flipping it a number of times, each time coming up heads.""" part_fair = (num_coins - num_unfair) / num_coins part_unfair = num_unfair / num_coins prob_fair = 0.5 ** heads_needed prob_unfair = (percent_unfair / 100) ** heads_needed return part_fair * prob_fair + part_unfair * prob_unfair def herons_formula(a, b, c): """Calculate area of triangle with sides a, b, and c.""" print('sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2') s = (a + b + c) / 2 return math.sqrt(s * (s - a) * (s - b) * (s - c)) def area_equilat(side): """Area of equilateral triangle.""" return side / 2 * math.sqrt(side ** 2 - (side / 2) ** 2) def inv(a, b, c, d): """Inverse of 2x2 matrix.""" det = a * d - b * c m = lambda x: fractions.Fraction(x, det) return map(str, map(m, [d, -b, -c, a])) def det2(m): """Determinant of 2x2 matrix.""" (a, b), (c, d) = m return a * d - b * c def det3(m): """Determinant of 3x3 matrix.""" a, b, c = m[0] da = det2([m[1][1:], m[2][1:]]) db = det2([[m[1][0], m[1][2]], [m[2][0], m[2][2]]]) dc = det2([m[1][:2], m[2][:2]]) return a * da - b * db + c * dc def series(r, n): """Calculate geometric series.""" return (1 - r ** n) / (1 - r) def quad_form(a, b, c): """Quadratic Formula: calculate values of x so that ax^2+bx+c=0.""" sq = math.sqrt(b ** 2 - 4 * a * c) x1 = (-b - sq) / (2 * a) x2 = (-b + sq) / (2 * a) return x1, x2 def master_method(a, b, d): """Estimate Complexity using Master Method, print result.""" if a == b ** d: print('Case 1: a = b^d') print('-> O(n^%d log n)' % d) elif a < b ** d: print('Case 2: a < b^d') print('-> O(n^%d)' % d) elif a > b ** d: print('Case 3: a > b^d') print('-> O(n^log%d(%d))' % (b, a)) print(' = O(n^%.2f)' % math.log(a, b)) <|reserved_special_token_1|> """Some random mathematical helper functions. """ from __future__ import division, print_function import math # STATISTICS def mean(L): """Calculate mean of given List""" return sum(L) / len(L) def variance(L, is_sample=0): """calculate variance (or sample variance) of given List""" m = mean(L) return sum((x-m)**2 for x in L) / (len(L) - is_sample) def std_dev(L, is_sample=0): """calculate standard deviation of given List""" return math.sqrt(variance(L, is_sample)) def z_score(num, mean, std_dev): """calculate z-score given sample size, mean and standard deviation""" return (num - mean) / std_dev # COMBINATORICS def fac(n): assert n >= 0 return n if n <= 2 else fac(n - 1) * n def over(n, k): """n over k""" return fac(n) // fac(n-k) def coin(coins, heads): """Probability for given number of heads (or tails) when throwing given number of fair coins.""" return Fraction(int(fac(c) / fac(c-n) / fac(n)), 2**c) def pick_grom_group(group, other, selected): """When selecting 'selected' number of individuums from 'group' and 'other', return probability that all are from 'group'.""" return Faction(over(group, selected), over(group + other, selected)) def unfair_coins(num_coins, num_unfair, percent_unfair, heads_needed): """Calculate probability for pulling a coin from a bag with fair and unfair coins and flipping it a number of times, each time coming up heads.""" part_fair = (num_coins - num_unfair) / num_coins part_unfair = num_unfair / num_coins prob_fair = 0.5**heads_needed prob_unfair = (percent_unfair / 100)**heads_needed return part_fair * prob_fair + part_unfair * prob_unfair # GEOMETRY def herons_formula(a, b, c): """Calculate area of triangle with sides a, b, and c.""" print("sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2") s = (a + b + c) / 2 return math.sqrt(s * (s-a) * (s-b) * (s-c)) def area_equilat(side): """Area of equilateral triangle.""" return side/2 * math.sqrt(side**2 - (side/2)**2) # LINEAR ALGEBRA def inv(a,b,c,d): """Inverse of 2x2 matrix.""" det = a*d-b*c m = lambda x: fractions.Fraction(x, det) return map(str, map(m, [d, -b, -c, a])) def det2(m): """Determinant of 2x2 matrix.""" (a,b), (c,d) = m return a*d - b*c def det3(m): """Determinant of 3x3 matrix.""" a, b, c = m[0] da = det2([ m[1][1:] , m[2][1:]]) db = det2([[m[1][0],m[1][2]],[m[2][0],m[2][2]]]) dc = det2([ m[1][:2] , m[2][:2]]) return a*da - b*db + c*dc # SOME COMPLEX FORMULAS I NEVER CAN QUITE REMEMBER def series(r, n): """Calculate geometric series.""" return (1 - r**n) / (1 - r) def quad_form(a, b, c): """Quadratic Formula: calculate values of x so that ax^2+bx+c=0.""" sq = math.sqrt(b**2 - 4 * a * c) x1 = (-b - sq) / (2 * a) x2 = (-b + sq) / (2 * a) return (x1, x2) def master_method(a, b, d): """Estimate Complexity using Master Method, print result.""" if a == b**d: print("Case 1: a = b^d") print("-> O(n^%d log n)" % d) elif a < b**d: print("Case 2: a < b^d") print("-> O(n^%d)" % d) elif a > b**d: print("Case 3: a > b^d") print("-> O(n^log%d(%d))" % (b, a)) print(" = O(n^%.2f)" % math.log(a, b))
flexible
{ "blob_id": "34acb6da1dc9403a311ce3bca0a828a77b7b36da", "index": 7403, "step-1": "<mask token>\n\n\ndef std_dev(L, is_sample=0):\n \"\"\"calculate standard deviation of given List\"\"\"\n return math.sqrt(variance(L, is_sample))\n\n\ndef z_score(num, mean, std_dev):\n \"\"\"calculate z-score given sample size, mean and standard deviation\"\"\"\n return (num - mean) / std_dev\n\n\n<mask token>\n\n\ndef over(n, k):\n \"\"\"n over k\"\"\"\n return fac(n) // fac(n - k)\n\n\ndef coin(coins, heads):\n \"\"\"Probability for given number of heads (or tails) when throwing given\n\tnumber of fair coins.\"\"\"\n return Fraction(int(fac(c) / fac(c - n) / fac(n)), 2 ** c)\n\n\ndef pick_grom_group(group, other, selected):\n \"\"\"When selecting 'selected' number of individuums from 'group' and 'other',\n\treturn probability that all are from 'group'.\"\"\"\n return Faction(over(group, selected), over(group + other, selected))\n\n\n<mask token>\n\n\ndef series(r, n):\n \"\"\"Calculate geometric series.\"\"\"\n return (1 - r ** n) / (1 - r)\n\n\ndef quad_form(a, b, c):\n \"\"\"Quadratic Formula: calculate values of x so that ax^2+bx+c=0.\"\"\"\n sq = math.sqrt(b ** 2 - 4 * a * c)\n x1 = (-b - sq) / (2 * a)\n x2 = (-b + sq) / (2 * a)\n return x1, x2\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef mean(L):\n \"\"\"Calculate mean of given List\"\"\"\n return sum(L) / len(L)\n\n\ndef variance(L, is_sample=0):\n \"\"\"calculate variance (or sample variance) of given List\"\"\"\n m = mean(L)\n return sum((x - m) ** 2 for x in L) / (len(L) - is_sample)\n\n\ndef std_dev(L, is_sample=0):\n \"\"\"calculate standard deviation of given List\"\"\"\n return math.sqrt(variance(L, is_sample))\n\n\ndef z_score(num, mean, std_dev):\n \"\"\"calculate z-score given sample size, mean and standard deviation\"\"\"\n return (num - mean) / std_dev\n\n\ndef fac(n):\n assert n >= 0\n return n if n <= 2 else fac(n - 1) * n\n\n\ndef over(n, k):\n \"\"\"n over k\"\"\"\n return fac(n) // fac(n - k)\n\n\ndef coin(coins, heads):\n \"\"\"Probability for given number of heads (or tails) when throwing given\n\tnumber of fair coins.\"\"\"\n return Fraction(int(fac(c) / fac(c - n) / fac(n)), 2 ** c)\n\n\ndef pick_grom_group(group, other, selected):\n \"\"\"When selecting 'selected' number of individuums from 'group' and 'other',\n\treturn probability that all are from 'group'.\"\"\"\n return Faction(over(group, selected), over(group + other, selected))\n\n\n<mask token>\n\n\ndef herons_formula(a, b, c):\n \"\"\"Calculate area of triangle with sides a, b, and c.\"\"\"\n print('sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2')\n s = (a + b + c) / 2\n return math.sqrt(s * (s - a) * (s - b) * (s - c))\n\n\ndef area_equilat(side):\n \"\"\"Area of equilateral triangle.\"\"\"\n return side / 2 * math.sqrt(side ** 2 - (side / 2) ** 2)\n\n\n<mask token>\n\n\ndef det2(m):\n \"\"\"Determinant of 2x2 matrix.\"\"\"\n (a, b), (c, d) = m\n return a * d - b * c\n\n\n<mask token>\n\n\ndef series(r, n):\n \"\"\"Calculate geometric series.\"\"\"\n return (1 - r ** n) / (1 - r)\n\n\ndef quad_form(a, b, c):\n \"\"\"Quadratic Formula: calculate values of x so that ax^2+bx+c=0.\"\"\"\n sq = math.sqrt(b ** 2 - 4 * a * c)\n x1 = (-b - sq) / (2 * a)\n x2 = (-b + sq) / (2 * a)\n return x1, x2\n\n\ndef master_method(a, b, d):\n \"\"\"Estimate Complexity using Master Method, print result.\"\"\"\n if a == b ** d:\n print('Case 1: a = b^d')\n print('-> O(n^%d log n)' % d)\n elif a < b ** d:\n print('Case 2: a < b^d')\n print('-> O(n^%d)' % d)\n elif a > b ** d:\n print('Case 3: a > b^d')\n print('-> O(n^log%d(%d))' % (b, a))\n print(' = O(n^%.2f)' % math.log(a, b))\n", "step-3": "<mask token>\n\n\ndef mean(L):\n \"\"\"Calculate mean of given List\"\"\"\n return sum(L) / len(L)\n\n\ndef variance(L, is_sample=0):\n \"\"\"calculate variance (or sample variance) of given List\"\"\"\n m = mean(L)\n return sum((x - m) ** 2 for x in L) / (len(L) - is_sample)\n\n\ndef std_dev(L, is_sample=0):\n \"\"\"calculate standard deviation of given List\"\"\"\n return math.sqrt(variance(L, is_sample))\n\n\ndef z_score(num, mean, std_dev):\n \"\"\"calculate z-score given sample size, mean and standard deviation\"\"\"\n return (num - mean) / std_dev\n\n\ndef fac(n):\n assert n >= 0\n return n if n <= 2 else fac(n - 1) * n\n\n\ndef over(n, k):\n \"\"\"n over k\"\"\"\n return fac(n) // fac(n - k)\n\n\ndef coin(coins, heads):\n \"\"\"Probability for given number of heads (or tails) when throwing given\n\tnumber of fair coins.\"\"\"\n return Fraction(int(fac(c) / fac(c - n) / fac(n)), 2 ** c)\n\n\ndef pick_grom_group(group, other, selected):\n \"\"\"When selecting 'selected' number of individuums from 'group' and 'other',\n\treturn probability that all are from 'group'.\"\"\"\n return Faction(over(group, selected), over(group + other, selected))\n\n\n<mask token>\n\n\ndef herons_formula(a, b, c):\n \"\"\"Calculate area of triangle with sides a, b, and c.\"\"\"\n print('sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2')\n s = (a + b + c) / 2\n return math.sqrt(s * (s - a) * (s - b) * (s - c))\n\n\ndef area_equilat(side):\n \"\"\"Area of equilateral triangle.\"\"\"\n return side / 2 * math.sqrt(side ** 2 - (side / 2) ** 2)\n\n\n<mask token>\n\n\ndef det2(m):\n \"\"\"Determinant of 2x2 matrix.\"\"\"\n (a, b), (c, d) = m\n return a * d - b * c\n\n\ndef det3(m):\n \"\"\"Determinant of 3x3 matrix.\"\"\"\n a, b, c = m[0]\n da = det2([m[1][1:], m[2][1:]])\n db = det2([[m[1][0], m[1][2]], [m[2][0], m[2][2]]])\n dc = det2([m[1][:2], m[2][:2]])\n return a * da - b * db + c * dc\n\n\ndef series(r, n):\n \"\"\"Calculate geometric series.\"\"\"\n return (1 - r ** n) / (1 - r)\n\n\ndef quad_form(a, b, c):\n \"\"\"Quadratic Formula: calculate values of x so that ax^2+bx+c=0.\"\"\"\n sq = math.sqrt(b ** 2 - 4 * a * c)\n x1 = (-b - sq) / (2 * a)\n x2 = (-b + sq) / (2 * a)\n return x1, x2\n\n\ndef master_method(a, b, d):\n \"\"\"Estimate Complexity using Master Method, print result.\"\"\"\n if a == b ** d:\n print('Case 1: a = b^d')\n print('-> O(n^%d log n)' % d)\n elif a < b ** d:\n print('Case 2: a < b^d')\n print('-> O(n^%d)' % d)\n elif a > b ** d:\n print('Case 3: a > b^d')\n print('-> O(n^log%d(%d))' % (b, a))\n print(' = O(n^%.2f)' % math.log(a, b))\n", "step-4": "<mask token>\n\n\ndef mean(L):\n \"\"\"Calculate mean of given List\"\"\"\n return sum(L) / len(L)\n\n\ndef variance(L, is_sample=0):\n \"\"\"calculate variance (or sample variance) of given List\"\"\"\n m = mean(L)\n return sum((x - m) ** 2 for x in L) / (len(L) - is_sample)\n\n\ndef std_dev(L, is_sample=0):\n \"\"\"calculate standard deviation of given List\"\"\"\n return math.sqrt(variance(L, is_sample))\n\n\ndef z_score(num, mean, std_dev):\n \"\"\"calculate z-score given sample size, mean and standard deviation\"\"\"\n return (num - mean) / std_dev\n\n\ndef fac(n):\n assert n >= 0\n return n if n <= 2 else fac(n - 1) * n\n\n\ndef over(n, k):\n \"\"\"n over k\"\"\"\n return fac(n) // fac(n - k)\n\n\ndef coin(coins, heads):\n \"\"\"Probability for given number of heads (or tails) when throwing given\n\tnumber of fair coins.\"\"\"\n return Fraction(int(fac(c) / fac(c - n) / fac(n)), 2 ** c)\n\n\ndef pick_grom_group(group, other, selected):\n \"\"\"When selecting 'selected' number of individuums from 'group' and 'other',\n\treturn probability that all are from 'group'.\"\"\"\n return Faction(over(group, selected), over(group + other, selected))\n\n\ndef unfair_coins(num_coins, num_unfair, percent_unfair, heads_needed):\n \"\"\"Calculate probability for pulling a coin from a bag with fair and unfair\n\tcoins and flipping it a number of times, each time coming up heads.\"\"\"\n part_fair = (num_coins - num_unfair) / num_coins\n part_unfair = num_unfair / num_coins\n prob_fair = 0.5 ** heads_needed\n prob_unfair = (percent_unfair / 100) ** heads_needed\n return part_fair * prob_fair + part_unfair * prob_unfair\n\n\ndef herons_formula(a, b, c):\n \"\"\"Calculate area of triangle with sides a, b, and c.\"\"\"\n print('sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2')\n s = (a + b + c) / 2\n return math.sqrt(s * (s - a) * (s - b) * (s - c))\n\n\ndef area_equilat(side):\n \"\"\"Area of equilateral triangle.\"\"\"\n return side / 2 * math.sqrt(side ** 2 - (side / 2) ** 2)\n\n\ndef inv(a, b, c, d):\n \"\"\"Inverse of 2x2 matrix.\"\"\"\n det = a * d - b * c\n m = lambda x: fractions.Fraction(x, det)\n return map(str, map(m, [d, -b, -c, a]))\n\n\ndef det2(m):\n \"\"\"Determinant of 2x2 matrix.\"\"\"\n (a, b), (c, d) = m\n return a * d - b * c\n\n\ndef det3(m):\n \"\"\"Determinant of 3x3 matrix.\"\"\"\n a, b, c = m[0]\n da = det2([m[1][1:], m[2][1:]])\n db = det2([[m[1][0], m[1][2]], [m[2][0], m[2][2]]])\n dc = det2([m[1][:2], m[2][:2]])\n return a * da - b * db + c * dc\n\n\ndef series(r, n):\n \"\"\"Calculate geometric series.\"\"\"\n return (1 - r ** n) / (1 - r)\n\n\ndef quad_form(a, b, c):\n \"\"\"Quadratic Formula: calculate values of x so that ax^2+bx+c=0.\"\"\"\n sq = math.sqrt(b ** 2 - 4 * a * c)\n x1 = (-b - sq) / (2 * a)\n x2 = (-b + sq) / (2 * a)\n return x1, x2\n\n\ndef master_method(a, b, d):\n \"\"\"Estimate Complexity using Master Method, print result.\"\"\"\n if a == b ** d:\n print('Case 1: a = b^d')\n print('-> O(n^%d log n)' % d)\n elif a < b ** d:\n print('Case 2: a < b^d')\n print('-> O(n^%d)' % d)\n elif a > b ** d:\n print('Case 3: a > b^d')\n print('-> O(n^log%d(%d))' % (b, a))\n print(' = O(n^%.2f)' % math.log(a, b))\n", "step-5": "\"\"\"Some random mathematical helper functions.\n\"\"\"\n\nfrom __future__ import division, print_function\nimport math\n\n\n# STATISTICS\n\ndef mean(L):\n\t\"\"\"Calculate mean of given List\"\"\"\n\treturn sum(L) / len(L)\n\t\ndef variance(L, is_sample=0):\n\t\"\"\"calculate variance (or sample variance) of given List\"\"\"\n\tm = mean(L)\n\treturn sum((x-m)**2 for x in L) / (len(L) - is_sample)\n\t\ndef std_dev(L, is_sample=0):\n\t\"\"\"calculate standard deviation of given List\"\"\"\n\treturn math.sqrt(variance(L, is_sample))\n\ndef z_score(num, mean, std_dev):\n\t\"\"\"calculate z-score given sample size, mean and standard deviation\"\"\"\n\treturn (num - mean) / std_dev\n\n\n# COMBINATORICS\n\ndef fac(n):\n\tassert n >= 0\n\treturn n if n <= 2 else fac(n - 1) * n\n\ndef over(n, k):\n\t\"\"\"n over k\"\"\"\n\treturn fac(n) // fac(n-k)\n\ndef coin(coins, heads):\n\t\"\"\"Probability for given number of heads (or tails) when throwing given\n\tnumber of fair coins.\"\"\"\n\treturn Fraction(int(fac(c) / fac(c-n) / fac(n)), 2**c)\n\ndef pick_grom_group(group, other, selected):\n\t\"\"\"When selecting 'selected' number of individuums from 'group' and 'other',\n\treturn probability that all are from 'group'.\"\"\"\n\treturn Faction(over(group, selected), over(group + other, selected))\n\ndef unfair_coins(num_coins, num_unfair, percent_unfair, heads_needed):\n\t\"\"\"Calculate probability for pulling a coin from a bag with fair and unfair\n\tcoins and flipping it a number of times, each time coming up heads.\"\"\"\n\tpart_fair = (num_coins - num_unfair) / num_coins\n\tpart_unfair = num_unfair / num_coins\n\tprob_fair = 0.5**heads_needed\n\tprob_unfair = (percent_unfair / 100)**heads_needed\n\treturn part_fair * prob_fair + part_unfair * prob_unfair\n\n\n# GEOMETRY\n\ndef herons_formula(a, b, c):\n\t\"\"\"Calculate area of triangle with sides a, b, and c.\"\"\"\n\tprint(\"sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2\")\n\ts = (a + b + c) / 2\n\treturn math.sqrt(s * (s-a) * (s-b) * (s-c))\n\t\ndef area_equilat(side):\n\t\"\"\"Area of equilateral triangle.\"\"\"\n\treturn side/2 * math.sqrt(side**2 - (side/2)**2)\n\n\n# LINEAR ALGEBRA\n\ndef inv(a,b,c,d):\n\t\"\"\"Inverse of 2x2 matrix.\"\"\"\n\tdet = a*d-b*c\n\tm = lambda x: fractions.Fraction(x, det)\n\treturn map(str, map(m, [d, -b, -c, a]))\n\ndef det2(m):\n\t\"\"\"Determinant of 2x2 matrix.\"\"\"\n\t(a,b), (c,d) = m\n\treturn a*d - b*c\n\ndef det3(m):\n\t\"\"\"Determinant of 3x3 matrix.\"\"\"\n\ta, b, c = m[0]\n\tda = det2([ m[1][1:] , m[2][1:]])\n\tdb = det2([[m[1][0],m[1][2]],[m[2][0],m[2][2]]])\n\tdc = det2([ m[1][:2] , m[2][:2]])\n\treturn a*da - b*db + c*dc\n\n\n# SOME COMPLEX FORMULAS I NEVER CAN QUITE REMEMBER\n\ndef series(r, n):\n\t\"\"\"Calculate geometric series.\"\"\"\n\treturn (1 - r**n) / (1 - r)\n\ndef quad_form(a, b, c):\n\t\"\"\"Quadratic Formula: calculate values of x so that ax^2+bx+c=0.\"\"\"\n\tsq = math.sqrt(b**2 - 4 * a * c)\n\tx1 = (-b - sq) / (2 * a)\n\tx2 = (-b + sq) / (2 * a)\n\treturn (x1, x2)\n\ndef master_method(a, b, d):\n\t\"\"\"Estimate Complexity using Master Method, print result.\"\"\"\n\tif a == b**d:\n\t\tprint(\"Case 1: a = b^d\")\n\t\tprint(\"-> O(n^%d log n)\" % d)\n\telif a < b**d:\n\t\tprint(\"Case 2: a < b^d\")\n\t\tprint(\"-> O(n^%d)\" % d)\n\telif a > b**d:\n\t\tprint(\"Case 3: a > b^d\")\n\t\tprint(\"-> O(n^log%d(%d))\" % (b, a))\n\t\tprint(\" = O(n^%.2f)\" % math.log(a, b))\n\n", "step-ids": [ 7, 14, 15, 17, 19 ] }
[ 7, 14, 15, 17, 19 ]
<|reserved_special_token_0|> def cauchy_model(x, a, loc, scale, y0): return a * cauchy.pdf(x, loc, scale) + y0 def cauchy_fit(x, y, d): if d is -1: a0 = -(max(y) - min(y)) * (max(x) - min(x)) / 10 loc0 = x[np.argmin(y)] scale0 = (max(x) - min(x)) / 10 y00 = max(y) elif d is 1: a0 = (max(y) - min(y)) * (max(x) - min(x)) / 10 loc0 = x[np.argmax(y)] scale0 = (max(x) - min(x)) / 10 y00 = min(y) else: a0 = 1 loc0 = np.mean(x) scale0 = (max(x) - min(x)) / 10 y00 = 1 p0 = [a0, loc0, scale0, y00] print(p0) popt, pcov = curve_fit(cauchy_model, x, y, p0) print('Center Frequency is : ', popt[1] * 1e-06, ' MHz') print('FWHM is : ', 2 * popt[2] * 1e-06, ' MHz') print('Q is : ', popt[1] / (2 * popt[2])) return popt def mw_fscan(fname, d, ax, plotting=True): data = pd.read_csv(fname, sep='\t', comment='#', index_col=False, header=None, names=['f', 'b', 's', 'r']) data.sort_values(by='f', inplace=True) data['sig'] = data['s'] - data['b'] data['ref'] = data['r'] - data['b'] data['nrm'] = data['sig'] / data['ref'] data['nrm'] = data['nrm'] popt = cauchy_fit(data['f'].values, data['nrm'].values, d) if plotting is True: data.plot(x='f', y='nrm', ax=ax) ax.plot(data['f'].values, cauchy_model(data['f'].values, *popt)) ax.plot(data['f'].values, data['nrm'].values - cauchy_model(data[ 'f'].values, *popt)) return data def cavity_resonances(): """Using the dye laser at -180 GHz, the MW f is scanned over the cavity resonances, finding center, FWHM, and Q values.""" fig, axes = plt.subplots() folder = os.path.join('..', '2018-09-29') fname = '3_fscan.txt' fname = os.path.join(folder, fname) mw_fscan(fname, -1, axes) axes.axhline(0.9, c='k') fig.tight_layout() return <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def limit_scan(fname, ax): data = pd.read_csv(fname, sep='\t', comment='#', index_col=False) data['sig'] = data['s'] - data['sb'] data.sort_values(by='f', inplace=True) data.plot(x='f', y='sig', ax=ax) return def limit(): """Using the HP 214B, see what the DIL is.""" fig, ax = plt.subplots() fname = '1_lim_dye.txt' folder = os.path.join('..', '2018-09-29') fname = os.path.join(folder, fname) limit_scan(fname, ax) fname = '2_lim_dye.txt' folder = os.path.join('..', '2018-09-29') fname = os.path.join(folder, fname) limit_scan(fname, ax) return def cauchy_model(x, a, loc, scale, y0): return a * cauchy.pdf(x, loc, scale) + y0 def cauchy_fit(x, y, d): if d is -1: a0 = -(max(y) - min(y)) * (max(x) - min(x)) / 10 loc0 = x[np.argmin(y)] scale0 = (max(x) - min(x)) / 10 y00 = max(y) elif d is 1: a0 = (max(y) - min(y)) * (max(x) - min(x)) / 10 loc0 = x[np.argmax(y)] scale0 = (max(x) - min(x)) / 10 y00 = min(y) else: a0 = 1 loc0 = np.mean(x) scale0 = (max(x) - min(x)) / 10 y00 = 1 p0 = [a0, loc0, scale0, y00] print(p0) popt, pcov = curve_fit(cauchy_model, x, y, p0) print('Center Frequency is : ', popt[1] * 1e-06, ' MHz') print('FWHM is : ', 2 * popt[2] * 1e-06, ' MHz') print('Q is : ', popt[1] / (2 * popt[2])) return popt def mw_fscan(fname, d, ax, plotting=True): data = pd.read_csv(fname, sep='\t', comment='#', index_col=False, header=None, names=['f', 'b', 's', 'r']) data.sort_values(by='f', inplace=True) data['sig'] = data['s'] - data['b'] data['ref'] = data['r'] - data['b'] data['nrm'] = data['sig'] / data['ref'] data['nrm'] = data['nrm'] popt = cauchy_fit(data['f'].values, data['nrm'].values, d) if plotting is True: data.plot(x='f', y='nrm', ax=ax) ax.plot(data['f'].values, cauchy_model(data['f'].values, *popt)) ax.plot(data['f'].values, data['nrm'].values - cauchy_model(data[ 'f'].values, *popt)) return data def cavity_resonances(): """Using the dye laser at -180 GHz, the MW f is scanned over the cavity resonances, finding center, FWHM, and Q values.""" fig, axes = plt.subplots() folder = os.path.join('..', '2018-09-29') fname = '3_fscan.txt' fname = os.path.join(folder, fname) mw_fscan(fname, -1, axes) axes.axhline(0.9, c='k') fig.tight_layout() return def mwion_scan(): """Take ratios of MW on / MW off to get ionization rate at different values of the Variable Attenuator""" fig, ax = plt.subplots() fname = '4_mwion_blnk.txt' folder = os.path.join('..', '2018-09-29') fname = os.path.join(folder, fname) data = pd.read_csv(fname, sep='\t', comment='#') data['r'] = data['s1'] / data['s2'] data['f'] = np.power(10, data['d'] / 20) data.sort_values(by='f', inplace=True) data.plot(x='f', y='r', marker='v', ax=ax, label='-180 GHz') return <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def limit_scan(fname, ax): data = pd.read_csv(fname, sep='\t', comment='#', index_col=False) data['sig'] = data['s'] - data['sb'] data.sort_values(by='f', inplace=True) data.plot(x='f', y='sig', ax=ax) return def limit(): """Using the HP 214B, see what the DIL is.""" fig, ax = plt.subplots() fname = '1_lim_dye.txt' folder = os.path.join('..', '2018-09-29') fname = os.path.join(folder, fname) limit_scan(fname, ax) fname = '2_lim_dye.txt' folder = os.path.join('..', '2018-09-29') fname = os.path.join(folder, fname) limit_scan(fname, ax) return def cauchy_model(x, a, loc, scale, y0): return a * cauchy.pdf(x, loc, scale) + y0 def cauchy_fit(x, y, d): if d is -1: a0 = -(max(y) - min(y)) * (max(x) - min(x)) / 10 loc0 = x[np.argmin(y)] scale0 = (max(x) - min(x)) / 10 y00 = max(y) elif d is 1: a0 = (max(y) - min(y)) * (max(x) - min(x)) / 10 loc0 = x[np.argmax(y)] scale0 = (max(x) - min(x)) / 10 y00 = min(y) else: a0 = 1 loc0 = np.mean(x) scale0 = (max(x) - min(x)) / 10 y00 = 1 p0 = [a0, loc0, scale0, y00] print(p0) popt, pcov = curve_fit(cauchy_model, x, y, p0) print('Center Frequency is : ', popt[1] * 1e-06, ' MHz') print('FWHM is : ', 2 * popt[2] * 1e-06, ' MHz') print('Q is : ', popt[1] / (2 * popt[2])) return popt def mw_fscan(fname, d, ax, plotting=True): data = pd.read_csv(fname, sep='\t', comment='#', index_col=False, header=None, names=['f', 'b', 's', 'r']) data.sort_values(by='f', inplace=True) data['sig'] = data['s'] - data['b'] data['ref'] = data['r'] - data['b'] data['nrm'] = data['sig'] / data['ref'] data['nrm'] = data['nrm'] popt = cauchy_fit(data['f'].values, data['nrm'].values, d) if plotting is True: data.plot(x='f', y='nrm', ax=ax) ax.plot(data['f'].values, cauchy_model(data['f'].values, *popt)) ax.plot(data['f'].values, data['nrm'].values - cauchy_model(data[ 'f'].values, *popt)) return data def cavity_resonances(): """Using the dye laser at -180 GHz, the MW f is scanned over the cavity resonances, finding center, FWHM, and Q values.""" fig, axes = plt.subplots() folder = os.path.join('..', '2018-09-29') fname = '3_fscan.txt' fname = os.path.join(folder, fname) mw_fscan(fname, -1, axes) axes.axhline(0.9, c='k') fig.tight_layout() return def mwion_scan(): """Take ratios of MW on / MW off to get ionization rate at different values of the Variable Attenuator""" fig, ax = plt.subplots() fname = '4_mwion_blnk.txt' folder = os.path.join('..', '2018-09-29') fname = os.path.join(folder, fname) data = pd.read_csv(fname, sep='\t', comment='#') data['r'] = data['s1'] / data['s2'] data['f'] = np.power(10, data['d'] / 20) data.sort_values(by='f', inplace=True) data.plot(x='f', y='r', marker='v', ax=ax, label='-180 GHz') return if __name__ == '__main__': mwion_scan() <|reserved_special_token_1|> <|reserved_special_token_0|> import os import numpy as np from scipy.stats import cauchy from scipy.optimize import curve_fit import matplotlib.pyplot as plt import pandas as pd def limit_scan(fname, ax): data = pd.read_csv(fname, sep='\t', comment='#', index_col=False) data['sig'] = data['s'] - data['sb'] data.sort_values(by='f', inplace=True) data.plot(x='f', y='sig', ax=ax) return def limit(): """Using the HP 214B, see what the DIL is.""" fig, ax = plt.subplots() fname = '1_lim_dye.txt' folder = os.path.join('..', '2018-09-29') fname = os.path.join(folder, fname) limit_scan(fname, ax) fname = '2_lim_dye.txt' folder = os.path.join('..', '2018-09-29') fname = os.path.join(folder, fname) limit_scan(fname, ax) return def cauchy_model(x, a, loc, scale, y0): return a * cauchy.pdf(x, loc, scale) + y0 def cauchy_fit(x, y, d): if d is -1: a0 = -(max(y) - min(y)) * (max(x) - min(x)) / 10 loc0 = x[np.argmin(y)] scale0 = (max(x) - min(x)) / 10 y00 = max(y) elif d is 1: a0 = (max(y) - min(y)) * (max(x) - min(x)) / 10 loc0 = x[np.argmax(y)] scale0 = (max(x) - min(x)) / 10 y00 = min(y) else: a0 = 1 loc0 = np.mean(x) scale0 = (max(x) - min(x)) / 10 y00 = 1 p0 = [a0, loc0, scale0, y00] print(p0) popt, pcov = curve_fit(cauchy_model, x, y, p0) print('Center Frequency is : ', popt[1] * 1e-06, ' MHz') print('FWHM is : ', 2 * popt[2] * 1e-06, ' MHz') print('Q is : ', popt[1] / (2 * popt[2])) return popt def mw_fscan(fname, d, ax, plotting=True): data = pd.read_csv(fname, sep='\t', comment='#', index_col=False, header=None, names=['f', 'b', 's', 'r']) data.sort_values(by='f', inplace=True) data['sig'] = data['s'] - data['b'] data['ref'] = data['r'] - data['b'] data['nrm'] = data['sig'] / data['ref'] data['nrm'] = data['nrm'] popt = cauchy_fit(data['f'].values, data['nrm'].values, d) if plotting is True: data.plot(x='f', y='nrm', ax=ax) ax.plot(data['f'].values, cauchy_model(data['f'].values, *popt)) ax.plot(data['f'].values, data['nrm'].values - cauchy_model(data[ 'f'].values, *popt)) return data def cavity_resonances(): """Using the dye laser at -180 GHz, the MW f is scanned over the cavity resonances, finding center, FWHM, and Q values.""" fig, axes = plt.subplots() folder = os.path.join('..', '2018-09-29') fname = '3_fscan.txt' fname = os.path.join(folder, fname) mw_fscan(fname, -1, axes) axes.axhline(0.9, c='k') fig.tight_layout() return def mwion_scan(): """Take ratios of MW on / MW off to get ionization rate at different values of the Variable Attenuator""" fig, ax = plt.subplots() fname = '4_mwion_blnk.txt' folder = os.path.join('..', '2018-09-29') fname = os.path.join(folder, fname) data = pd.read_csv(fname, sep='\t', comment='#') data['r'] = data['s1'] / data['s2'] data['f'] = np.power(10, data['d'] / 20) data.sort_values(by='f', inplace=True) data.plot(x='f', y='r', marker='v', ax=ax, label='-180 GHz') return if __name__ == '__main__': mwion_scan() <|reserved_special_token_1|> # -*- coding: utf-8 -*- """ Created on Sat Sep 29 19:10:06 2018 @author: labuser """ # 2018-09-29 import os import numpy as np from scipy.stats import cauchy from scipy.optimize import curve_fit import matplotlib.pyplot as plt import pandas as pd def limit_scan(fname, ax): data = pd.read_csv(fname, sep='\t', comment="#", index_col=False) data['sig'] = data['s'] - data['sb'] data.sort_values(by='f', inplace=True) data.plot(x='f', y='sig', ax=ax) return def limit(): """Using the HP 214B, see what the DIL is.""" fig, ax = plt.subplots() fname = "1_lim_dye.txt" folder = os.path.join("..", "2018-09-29") fname = os.path.join(folder, fname) limit_scan(fname, ax) fname = "2_lim_dye.txt" folder = os.path.join("..", "2018-09-29") fname = os.path.join(folder, fname) limit_scan(fname, ax) return def cauchy_model(x, a, loc, scale, y0): return a*cauchy.pdf(x, loc, scale) + y0 def cauchy_fit(x, y, d): if d is -1: a0 = -(max(y) - min(y))*(max(x) - min(x))/10 loc0 = x[np.argmin(y)] scale0 = (max(x) - min(x))/10 y00 = max(y) elif d is 1: a0 = (max(y) - min(y))*(max(x) - min(x))/10 loc0 = x[np.argmax(y)] scale0 = (max(x) - min(x))/10 y00 = min(y) else: a0 = 1 loc0 = np.mean(x) scale0 = (max(x) - min(x))/10 y00 = 1 p0 = [a0, loc0, scale0, y00] print(p0) popt, pcov = curve_fit(cauchy_model, x, y, p0) print("Center Frequency is : ", popt[1]*1e-6, " MHz") print("FWHM is : ", 2*popt[2]*1e-6, " MHz") print("Q is : ", popt[1]/(2*popt[2])) return popt def mw_fscan(fname, d, ax, plotting=True): data = pd.read_csv(fname, sep="\t", comment="#", index_col=False, header=None, names=['f', 'b', 's', 'r']) data.sort_values(by='f', inplace=True) data['sig'] = data['s'] - data['b'] data['ref'] = data['r'] - data['b'] data['nrm'] = data['sig'] / data['ref'] # norm by signal / reference data['nrm'] = data['nrm'] popt = cauchy_fit(data['f'].values, data['nrm'].values, d) # print(popt) if plotting is True: data.plot(x='f', y='nrm', ax=ax) ax.plot(data['f'].values, cauchy_model(data['f'].values, *popt)) ax.plot(data['f'].values, data['nrm'].values - cauchy_model(data['f'].values, *popt)) return data def cavity_resonances(): """Using the dye laser at -180 GHz, the MW f is scanned over the cavity resonances, finding center, FWHM, and Q values.""" fig, axes = plt.subplots() folder = os.path.join("..", "2018-09-29") fname = "3_fscan.txt" fname = os.path.join(folder, fname) mw_fscan(fname, -1, axes) axes.axhline(0.9, c='k') fig.tight_layout() return def mwion_scan(): """Take ratios of MW on / MW off to get ionization rate at different values of the Variable Attenuator""" fig, ax = plt.subplots() # Data from 2018-09-27, using the SFIP fname = "4_mwion_blnk.txt" # -180 GHz folder = os.path.join("..", "2018-09-29") fname = os.path.join(folder, fname) data = pd.read_csv(fname, sep="\t", comment="#") data['r'] = data['s1']/data['s2'] data['f'] = np.power(10, data['d']/20) # field equivalent data.sort_values(by='f', inplace=True) data.plot(x='f', y='r', marker='v', ax=ax, label="-180 GHz") return if __name__ == "__main__": # limit() # cavity_resonances() mwion_scan()
flexible
{ "blob_id": "aee8fa7bc1426945d61421fc72732e43ddadafa1", "index": 3191, "step-1": "<mask token>\n\n\ndef cauchy_model(x, a, loc, scale, y0):\n return a * cauchy.pdf(x, loc, scale) + y0\n\n\ndef cauchy_fit(x, y, d):\n if d is -1:\n a0 = -(max(y) - min(y)) * (max(x) - min(x)) / 10\n loc0 = x[np.argmin(y)]\n scale0 = (max(x) - min(x)) / 10\n y00 = max(y)\n elif d is 1:\n a0 = (max(y) - min(y)) * (max(x) - min(x)) / 10\n loc0 = x[np.argmax(y)]\n scale0 = (max(x) - min(x)) / 10\n y00 = min(y)\n else:\n a0 = 1\n loc0 = np.mean(x)\n scale0 = (max(x) - min(x)) / 10\n y00 = 1\n p0 = [a0, loc0, scale0, y00]\n print(p0)\n popt, pcov = curve_fit(cauchy_model, x, y, p0)\n print('Center Frequency is : ', popt[1] * 1e-06, ' MHz')\n print('FWHM is : ', 2 * popt[2] * 1e-06, ' MHz')\n print('Q is : ', popt[1] / (2 * popt[2]))\n return popt\n\n\ndef mw_fscan(fname, d, ax, plotting=True):\n data = pd.read_csv(fname, sep='\\t', comment='#', index_col=False,\n header=None, names=['f', 'b', 's', 'r'])\n data.sort_values(by='f', inplace=True)\n data['sig'] = data['s'] - data['b']\n data['ref'] = data['r'] - data['b']\n data['nrm'] = data['sig'] / data['ref']\n data['nrm'] = data['nrm']\n popt = cauchy_fit(data['f'].values, data['nrm'].values, d)\n if plotting is True:\n data.plot(x='f', y='nrm', ax=ax)\n ax.plot(data['f'].values, cauchy_model(data['f'].values, *popt))\n ax.plot(data['f'].values, data['nrm'].values - cauchy_model(data[\n 'f'].values, *popt))\n return data\n\n\ndef cavity_resonances():\n \"\"\"Using the dye laser at -180 GHz, the MW f is scanned over the\n cavity resonances, finding center, FWHM, and Q values.\"\"\"\n fig, axes = plt.subplots()\n folder = os.path.join('..', '2018-09-29')\n fname = '3_fscan.txt'\n fname = os.path.join(folder, fname)\n mw_fscan(fname, -1, axes)\n axes.axhline(0.9, c='k')\n fig.tight_layout()\n return\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef limit_scan(fname, ax):\n data = pd.read_csv(fname, sep='\\t', comment='#', index_col=False)\n data['sig'] = data['s'] - data['sb']\n data.sort_values(by='f', inplace=True)\n data.plot(x='f', y='sig', ax=ax)\n return\n\n\ndef limit():\n \"\"\"Using the HP 214B, see what the DIL is.\"\"\"\n fig, ax = plt.subplots()\n fname = '1_lim_dye.txt'\n folder = os.path.join('..', '2018-09-29')\n fname = os.path.join(folder, fname)\n limit_scan(fname, ax)\n fname = '2_lim_dye.txt'\n folder = os.path.join('..', '2018-09-29')\n fname = os.path.join(folder, fname)\n limit_scan(fname, ax)\n return\n\n\ndef cauchy_model(x, a, loc, scale, y0):\n return a * cauchy.pdf(x, loc, scale) + y0\n\n\ndef cauchy_fit(x, y, d):\n if d is -1:\n a0 = -(max(y) - min(y)) * (max(x) - min(x)) / 10\n loc0 = x[np.argmin(y)]\n scale0 = (max(x) - min(x)) / 10\n y00 = max(y)\n elif d is 1:\n a0 = (max(y) - min(y)) * (max(x) - min(x)) / 10\n loc0 = x[np.argmax(y)]\n scale0 = (max(x) - min(x)) / 10\n y00 = min(y)\n else:\n a0 = 1\n loc0 = np.mean(x)\n scale0 = (max(x) - min(x)) / 10\n y00 = 1\n p0 = [a0, loc0, scale0, y00]\n print(p0)\n popt, pcov = curve_fit(cauchy_model, x, y, p0)\n print('Center Frequency is : ', popt[1] * 1e-06, ' MHz')\n print('FWHM is : ', 2 * popt[2] * 1e-06, ' MHz')\n print('Q is : ', popt[1] / (2 * popt[2]))\n return popt\n\n\ndef mw_fscan(fname, d, ax, plotting=True):\n data = pd.read_csv(fname, sep='\\t', comment='#', index_col=False,\n header=None, names=['f', 'b', 's', 'r'])\n data.sort_values(by='f', inplace=True)\n data['sig'] = data['s'] - data['b']\n data['ref'] = data['r'] - data['b']\n data['nrm'] = data['sig'] / data['ref']\n data['nrm'] = data['nrm']\n popt = cauchy_fit(data['f'].values, data['nrm'].values, d)\n if plotting is True:\n data.plot(x='f', y='nrm', ax=ax)\n ax.plot(data['f'].values, cauchy_model(data['f'].values, *popt))\n ax.plot(data['f'].values, data['nrm'].values - cauchy_model(data[\n 'f'].values, *popt))\n return data\n\n\ndef cavity_resonances():\n \"\"\"Using the dye laser at -180 GHz, the MW f is scanned over the\n cavity resonances, finding center, FWHM, and Q values.\"\"\"\n fig, axes = plt.subplots()\n folder = os.path.join('..', '2018-09-29')\n fname = '3_fscan.txt'\n fname = os.path.join(folder, fname)\n mw_fscan(fname, -1, axes)\n axes.axhline(0.9, c='k')\n fig.tight_layout()\n return\n\n\ndef mwion_scan():\n \"\"\"Take ratios of MW on / MW off to get ionization rate at different values\n of the Variable Attenuator\"\"\"\n fig, ax = plt.subplots()\n fname = '4_mwion_blnk.txt'\n folder = os.path.join('..', '2018-09-29')\n fname = os.path.join(folder, fname)\n data = pd.read_csv(fname, sep='\\t', comment='#')\n data['r'] = data['s1'] / data['s2']\n data['f'] = np.power(10, data['d'] / 20)\n data.sort_values(by='f', inplace=True)\n data.plot(x='f', y='r', marker='v', ax=ax, label='-180 GHz')\n return\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef limit_scan(fname, ax):\n data = pd.read_csv(fname, sep='\\t', comment='#', index_col=False)\n data['sig'] = data['s'] - data['sb']\n data.sort_values(by='f', inplace=True)\n data.plot(x='f', y='sig', ax=ax)\n return\n\n\ndef limit():\n \"\"\"Using the HP 214B, see what the DIL is.\"\"\"\n fig, ax = plt.subplots()\n fname = '1_lim_dye.txt'\n folder = os.path.join('..', '2018-09-29')\n fname = os.path.join(folder, fname)\n limit_scan(fname, ax)\n fname = '2_lim_dye.txt'\n folder = os.path.join('..', '2018-09-29')\n fname = os.path.join(folder, fname)\n limit_scan(fname, ax)\n return\n\n\ndef cauchy_model(x, a, loc, scale, y0):\n return a * cauchy.pdf(x, loc, scale) + y0\n\n\ndef cauchy_fit(x, y, d):\n if d is -1:\n a0 = -(max(y) - min(y)) * (max(x) - min(x)) / 10\n loc0 = x[np.argmin(y)]\n scale0 = (max(x) - min(x)) / 10\n y00 = max(y)\n elif d is 1:\n a0 = (max(y) - min(y)) * (max(x) - min(x)) / 10\n loc0 = x[np.argmax(y)]\n scale0 = (max(x) - min(x)) / 10\n y00 = min(y)\n else:\n a0 = 1\n loc0 = np.mean(x)\n scale0 = (max(x) - min(x)) / 10\n y00 = 1\n p0 = [a0, loc0, scale0, y00]\n print(p0)\n popt, pcov = curve_fit(cauchy_model, x, y, p0)\n print('Center Frequency is : ', popt[1] * 1e-06, ' MHz')\n print('FWHM is : ', 2 * popt[2] * 1e-06, ' MHz')\n print('Q is : ', popt[1] / (2 * popt[2]))\n return popt\n\n\ndef mw_fscan(fname, d, ax, plotting=True):\n data = pd.read_csv(fname, sep='\\t', comment='#', index_col=False,\n header=None, names=['f', 'b', 's', 'r'])\n data.sort_values(by='f', inplace=True)\n data['sig'] = data['s'] - data['b']\n data['ref'] = data['r'] - data['b']\n data['nrm'] = data['sig'] / data['ref']\n data['nrm'] = data['nrm']\n popt = cauchy_fit(data['f'].values, data['nrm'].values, d)\n if plotting is True:\n data.plot(x='f', y='nrm', ax=ax)\n ax.plot(data['f'].values, cauchy_model(data['f'].values, *popt))\n ax.plot(data['f'].values, data['nrm'].values - cauchy_model(data[\n 'f'].values, *popt))\n return data\n\n\ndef cavity_resonances():\n \"\"\"Using the dye laser at -180 GHz, the MW f is scanned over the\n cavity resonances, finding center, FWHM, and Q values.\"\"\"\n fig, axes = plt.subplots()\n folder = os.path.join('..', '2018-09-29')\n fname = '3_fscan.txt'\n fname = os.path.join(folder, fname)\n mw_fscan(fname, -1, axes)\n axes.axhline(0.9, c='k')\n fig.tight_layout()\n return\n\n\ndef mwion_scan():\n \"\"\"Take ratios of MW on / MW off to get ionization rate at different values\n of the Variable Attenuator\"\"\"\n fig, ax = plt.subplots()\n fname = '4_mwion_blnk.txt'\n folder = os.path.join('..', '2018-09-29')\n fname = os.path.join(folder, fname)\n data = pd.read_csv(fname, sep='\\t', comment='#')\n data['r'] = data['s1'] / data['s2']\n data['f'] = np.power(10, data['d'] / 20)\n data.sort_values(by='f', inplace=True)\n data.plot(x='f', y='r', marker='v', ax=ax, label='-180 GHz')\n return\n\n\nif __name__ == '__main__':\n mwion_scan()\n", "step-4": "<mask token>\nimport os\nimport numpy as np\nfrom scipy.stats import cauchy\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\ndef limit_scan(fname, ax):\n data = pd.read_csv(fname, sep='\\t', comment='#', index_col=False)\n data['sig'] = data['s'] - data['sb']\n data.sort_values(by='f', inplace=True)\n data.plot(x='f', y='sig', ax=ax)\n return\n\n\ndef limit():\n \"\"\"Using the HP 214B, see what the DIL is.\"\"\"\n fig, ax = plt.subplots()\n fname = '1_lim_dye.txt'\n folder = os.path.join('..', '2018-09-29')\n fname = os.path.join(folder, fname)\n limit_scan(fname, ax)\n fname = '2_lim_dye.txt'\n folder = os.path.join('..', '2018-09-29')\n fname = os.path.join(folder, fname)\n limit_scan(fname, ax)\n return\n\n\ndef cauchy_model(x, a, loc, scale, y0):\n return a * cauchy.pdf(x, loc, scale) + y0\n\n\ndef cauchy_fit(x, y, d):\n if d is -1:\n a0 = -(max(y) - min(y)) * (max(x) - min(x)) / 10\n loc0 = x[np.argmin(y)]\n scale0 = (max(x) - min(x)) / 10\n y00 = max(y)\n elif d is 1:\n a0 = (max(y) - min(y)) * (max(x) - min(x)) / 10\n loc0 = x[np.argmax(y)]\n scale0 = (max(x) - min(x)) / 10\n y00 = min(y)\n else:\n a0 = 1\n loc0 = np.mean(x)\n scale0 = (max(x) - min(x)) / 10\n y00 = 1\n p0 = [a0, loc0, scale0, y00]\n print(p0)\n popt, pcov = curve_fit(cauchy_model, x, y, p0)\n print('Center Frequency is : ', popt[1] * 1e-06, ' MHz')\n print('FWHM is : ', 2 * popt[2] * 1e-06, ' MHz')\n print('Q is : ', popt[1] / (2 * popt[2]))\n return popt\n\n\ndef mw_fscan(fname, d, ax, plotting=True):\n data = pd.read_csv(fname, sep='\\t', comment='#', index_col=False,\n header=None, names=['f', 'b', 's', 'r'])\n data.sort_values(by='f', inplace=True)\n data['sig'] = data['s'] - data['b']\n data['ref'] = data['r'] - data['b']\n data['nrm'] = data['sig'] / data['ref']\n data['nrm'] = data['nrm']\n popt = cauchy_fit(data['f'].values, data['nrm'].values, d)\n if plotting is True:\n data.plot(x='f', y='nrm', ax=ax)\n ax.plot(data['f'].values, cauchy_model(data['f'].values, *popt))\n ax.plot(data['f'].values, data['nrm'].values - cauchy_model(data[\n 'f'].values, *popt))\n return data\n\n\ndef cavity_resonances():\n \"\"\"Using the dye laser at -180 GHz, the MW f is scanned over the\n cavity resonances, finding center, FWHM, and Q values.\"\"\"\n fig, axes = plt.subplots()\n folder = os.path.join('..', '2018-09-29')\n fname = '3_fscan.txt'\n fname = os.path.join(folder, fname)\n mw_fscan(fname, -1, axes)\n axes.axhline(0.9, c='k')\n fig.tight_layout()\n return\n\n\ndef mwion_scan():\n \"\"\"Take ratios of MW on / MW off to get ionization rate at different values\n of the Variable Attenuator\"\"\"\n fig, ax = plt.subplots()\n fname = '4_mwion_blnk.txt'\n folder = os.path.join('..', '2018-09-29')\n fname = os.path.join(folder, fname)\n data = pd.read_csv(fname, sep='\\t', comment='#')\n data['r'] = data['s1'] / data['s2']\n data['f'] = np.power(10, data['d'] / 20)\n data.sort_values(by='f', inplace=True)\n data.plot(x='f', y='r', marker='v', ax=ax, label='-180 GHz')\n return\n\n\nif __name__ == '__main__':\n mwion_scan()\n", "step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 29 19:10:06 2018\n\n@author: labuser\n\"\"\"\n\n\n# 2018-09-29\n\nimport os\nimport numpy as np\nfrom scipy.stats import cauchy\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\ndef limit_scan(fname, ax):\n data = pd.read_csv(fname, sep='\\t', comment=\"#\", index_col=False)\n data['sig'] = data['s'] - data['sb']\n data.sort_values(by='f', inplace=True)\n data.plot(x='f', y='sig', ax=ax)\n return\n\n\ndef limit():\n \"\"\"Using the HP 214B, see what the DIL is.\"\"\"\n fig, ax = plt.subplots()\n fname = \"1_lim_dye.txt\"\n folder = os.path.join(\"..\", \"2018-09-29\")\n fname = os.path.join(folder, fname)\n limit_scan(fname, ax)\n fname = \"2_lim_dye.txt\"\n folder = os.path.join(\"..\", \"2018-09-29\")\n fname = os.path.join(folder, fname)\n limit_scan(fname, ax)\n return\n\n\ndef cauchy_model(x, a, loc, scale, y0):\n return a*cauchy.pdf(x, loc, scale) + y0\n\n\ndef cauchy_fit(x, y, d):\n if d is -1:\n a0 = -(max(y) - min(y))*(max(x) - min(x))/10\n loc0 = x[np.argmin(y)]\n scale0 = (max(x) - min(x))/10\n y00 = max(y)\n elif d is 1:\n a0 = (max(y) - min(y))*(max(x) - min(x))/10\n loc0 = x[np.argmax(y)]\n scale0 = (max(x) - min(x))/10\n y00 = min(y)\n else:\n a0 = 1\n loc0 = np.mean(x)\n scale0 = (max(x) - min(x))/10\n y00 = 1\n p0 = [a0, loc0, scale0, y00]\n print(p0)\n popt, pcov = curve_fit(cauchy_model, x, y, p0)\n print(\"Center Frequency is : \", popt[1]*1e-6, \" MHz\")\n print(\"FWHM is : \", 2*popt[2]*1e-6, \" MHz\")\n print(\"Q is : \", popt[1]/(2*popt[2]))\n return popt\n\n\ndef mw_fscan(fname, d, ax, plotting=True):\n data = pd.read_csv(fname, sep=\"\\t\", comment=\"#\", index_col=False,\n header=None, names=['f', 'b', 's', 'r'])\n data.sort_values(by='f', inplace=True)\n data['sig'] = data['s'] - data['b']\n data['ref'] = data['r'] - data['b']\n data['nrm'] = data['sig'] / data['ref'] # norm by signal / reference\n data['nrm'] = data['nrm']\n popt = cauchy_fit(data['f'].values, data['nrm'].values, d)\n # print(popt)\n if plotting is True:\n data.plot(x='f', y='nrm', ax=ax)\n ax.plot(data['f'].values, cauchy_model(data['f'].values, *popt))\n ax.plot(data['f'].values,\n data['nrm'].values - cauchy_model(data['f'].values, *popt))\n return data\n\n\ndef cavity_resonances():\n \"\"\"Using the dye laser at -180 GHz, the MW f is scanned over the\n cavity resonances, finding center, FWHM, and Q values.\"\"\"\n fig, axes = plt.subplots()\n folder = os.path.join(\"..\", \"2018-09-29\")\n fname = \"3_fscan.txt\"\n fname = os.path.join(folder, fname)\n mw_fscan(fname, -1, axes)\n axes.axhline(0.9, c='k')\n fig.tight_layout()\n return\n\n\ndef mwion_scan():\n \"\"\"Take ratios of MW on / MW off to get ionization rate at different values\n of the Variable Attenuator\"\"\"\n fig, ax = plt.subplots()\n # Data from 2018-09-27, using the SFIP\n fname = \"4_mwion_blnk.txt\" # -180 GHz\n folder = os.path.join(\"..\", \"2018-09-29\")\n fname = os.path.join(folder, fname)\n data = pd.read_csv(fname, sep=\"\\t\", comment=\"#\")\n data['r'] = data['s1']/data['s2']\n data['f'] = np.power(10, data['d']/20) # field equivalent\n data.sort_values(by='f', inplace=True)\n data.plot(x='f', y='r', marker='v', ax=ax, label=\"-180 GHz\")\n return\n\n\nif __name__ == \"__main__\":\n # limit()\n # cavity_resonances()\n mwion_scan()\n", "step-ids": [ 4, 7, 8, 9, 10 ] }
[ 4, 7, 8, 9, 10 ]
<|reserved_special_token_0|> def get(restaurant_id): with thrift_client('ers') as ers: cert = ers.get_restaurant_certification(restaurant_id) cert.comment = cert.comment.encode('utf-8') return cert <|reserved_special_token_0|> def add(cert): with thrift_client('ers') as ers: ers.add_restaurant_certification(cert) record_process_base.add(cert.restaurant_id, cert.type, CERTIFICATION_NOT_EXIST, STATUS_PENDING, comment='上传个人认证信息' if cert .type == TYPE_CERT_PERSONAL else '上传企业认证信息') return '' def update(cert): with thrift_client('ers') as ers: db_cert = ers.get_restaurant_certification(cert.restaurant_id) if not db_cert: raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id) with thrift_client('ers') as ers: ers.update_restaurant_certification(cert) record_process_base.add(cert.restaurant_id, cert.type, cert.status, STATUS_PENDING, comment='修改认证信息') return '' <|reserved_special_token_0|> def get_latest_record(restaurant_id): nopass_record = record_process_base.get_latest_record(restaurant_id) comment = '' cert_status = CERTIFICATION_NOT_EXIST if nopass_record: comment = nopass_record.comment cert_status = nopass_record.status_to return comment, cert_status <|reserved_special_token_1|> <|reserved_special_token_0|> def get(restaurant_id): with thrift_client('ers') as ers: cert = ers.get_restaurant_certification(restaurant_id) cert.comment = cert.comment.encode('utf-8') return cert def get_by_status(status, offset=0, limit=thirdparty_svc.ers.MAX_LIST_SIZE): limit = 250 with thrift_client('ers') as ers: return ers.query_restaurant_certification_by_status(status, offset, limit) def add(cert): with thrift_client('ers') as ers: ers.add_restaurant_certification(cert) record_process_base.add(cert.restaurant_id, cert.type, CERTIFICATION_NOT_EXIST, STATUS_PENDING, comment='上传个人认证信息' if cert .type == TYPE_CERT_PERSONAL else '上传企业认证信息') return '' def update(cert): with thrift_client('ers') as ers: db_cert = ers.get_restaurant_certification(cert.restaurant_id) if not db_cert: raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id) with thrift_client('ers') as ers: ers.update_restaurant_certification(cert) record_process_base.add(cert.restaurant_id, cert.type, cert.status, STATUS_PENDING, comment='修改认证信息') return '' <|reserved_special_token_0|> def get_latest_record(restaurant_id): nopass_record = record_process_base.get_latest_record(restaurant_id) comment = '' cert_status = CERTIFICATION_NOT_EXIST if nopass_record: comment = nopass_record.comment cert_status = nopass_record.status_to return comment, cert_status <|reserved_special_token_1|> <|reserved_special_token_0|> def get(restaurant_id): with thrift_client('ers') as ers: cert = ers.get_restaurant_certification(restaurant_id) cert.comment = cert.comment.encode('utf-8') return cert def get_by_status(status, offset=0, limit=thirdparty_svc.ers.MAX_LIST_SIZE): limit = 250 with thrift_client('ers') as ers: return ers.query_restaurant_certification_by_status(status, offset, limit) def add(cert): with thrift_client('ers') as ers: ers.add_restaurant_certification(cert) record_process_base.add(cert.restaurant_id, cert.type, CERTIFICATION_NOT_EXIST, STATUS_PENDING, comment='上传个人认证信息' if cert .type == TYPE_CERT_PERSONAL else '上传企业认证信息') return '' def update(cert): with thrift_client('ers') as ers: db_cert = ers.get_restaurant_certification(cert.restaurant_id) if not db_cert: raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id) with thrift_client('ers') as ers: ers.update_restaurant_certification(cert) record_process_base.add(cert.restaurant_id, cert.type, cert.status, STATUS_PENDING, comment='修改认证信息') return '' def process_certification(restaurant_id, status_to): with thrift_client('ers') as ers: ers.process_certification(current_user.id, restaurant_id, status_to) def get_latest_record(restaurant_id): nopass_record = record_process_base.get_latest_record(restaurant_id) comment = '' cert_status = CERTIFICATION_NOT_EXIST if nopass_record: comment = nopass_record.comment cert_status = nopass_record.status_to return comment, cert_status <|reserved_special_token_1|> from __future__ import print_function, division, absolute_import from flask.ext.login import current_user from . import cert_record_process as record_process_base from walis.thirdparty import thrift_client, thirdparty_svc from walis.exception.util import raise_user_exc from walis.exception.error_code import CERT_UPDATE_ERR TRestaurantCertification = thirdparty_svc.ers.TRestaurantCertification CERTIFICATION_TYPE_NONE = 0 RESTAURANT_NOT_EXIST_ID = -1 CERTIFICATION_NOT_EXIST = -2 CertType = thirdparty_svc.ers.CertificationConst STATUS_PENDING = CertType.STATUS_PENDING STATUS_PASSED = CertType.STATUS_PASSED STATUS_FAILED = CertType.STATUS_FAILED TYPE_CERT_PERSONAL = (thirdparty_svc.ers.RestaurantConst. CERTIFICATION_TYPE_PERSONAL) TYPE_CERT_CORP = thirdparty_svc.ers.RestaurantConst.CERTIFICATION_TYPE_CORP def get(restaurant_id): with thrift_client('ers') as ers: cert = ers.get_restaurant_certification(restaurant_id) cert.comment = cert.comment.encode('utf-8') return cert def get_by_status(status, offset=0, limit=thirdparty_svc.ers.MAX_LIST_SIZE): limit = 250 with thrift_client('ers') as ers: return ers.query_restaurant_certification_by_status(status, offset, limit) def add(cert): with thrift_client('ers') as ers: ers.add_restaurant_certification(cert) record_process_base.add(cert.restaurant_id, cert.type, CERTIFICATION_NOT_EXIST, STATUS_PENDING, comment='上传个人认证信息' if cert .type == TYPE_CERT_PERSONAL else '上传企业认证信息') return '' def update(cert): with thrift_client('ers') as ers: db_cert = ers.get_restaurant_certification(cert.restaurant_id) if not db_cert: raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id) with thrift_client('ers') as ers: ers.update_restaurant_certification(cert) record_process_base.add(cert.restaurant_id, cert.type, cert.status, STATUS_PENDING, comment='修改认证信息') return '' def process_certification(restaurant_id, status_to): with thrift_client('ers') as ers: ers.process_certification(current_user.id, restaurant_id, status_to) def get_latest_record(restaurant_id): nopass_record = record_process_base.get_latest_record(restaurant_id) comment = '' cert_status = CERTIFICATION_NOT_EXIST if nopass_record: comment = nopass_record.comment cert_status = nopass_record.status_to return comment, cert_status <|reserved_special_token_1|> #!/usr/bin/env python2 # -*- coding: utf8 -*- from __future__ import print_function, division, absolute_import from flask.ext.login import current_user from . import cert_record_process as record_process_base from walis.thirdparty import thrift_client, thirdparty_svc from walis.exception.util import raise_user_exc from walis.exception.error_code import CERT_UPDATE_ERR TRestaurantCertification = thirdparty_svc.ers.TRestaurantCertification CERTIFICATION_TYPE_NONE = 0 RESTAURANT_NOT_EXIST_ID = -1 CERTIFICATION_NOT_EXIST = -2 CertType = thirdparty_svc.ers.CertificationConst STATUS_PENDING = CertType.STATUS_PENDING STATUS_PASSED = CertType.STATUS_PASSED STATUS_FAILED = CertType.STATUS_FAILED TYPE_CERT_PERSONAL = thirdparty_svc.ers.RestaurantConst.CERTIFICATION_TYPE_PERSONAL TYPE_CERT_CORP = thirdparty_svc.ers.RestaurantConst.CERTIFICATION_TYPE_CORP def get(restaurant_id): with thrift_client('ers') as ers: cert = ers.get_restaurant_certification(restaurant_id) cert.comment = cert.comment.encode('utf-8') return cert def get_by_status(status, offset=0, limit=thirdparty_svc.ers.MAX_LIST_SIZE): limit = 250 with thrift_client('ers') as ers: return ers.query_restaurant_certification_by_status( status, offset, limit) def add(cert): with thrift_client('ers') as ers: ers.add_restaurant_certification(cert) record_process_base.add( cert.restaurant_id, cert.type, CERTIFICATION_NOT_EXIST, STATUS_PENDING, comment='上传个人认证信息' if cert.type == TYPE_CERT_PERSONAL else '上传企业认证信息') return '' def update(cert): with thrift_client('ers') as ers: db_cert = ers.get_restaurant_certification(cert.restaurant_id) if not db_cert: raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id) with thrift_client('ers') as ers: ers.update_restaurant_certification(cert) record_process_base.add( cert.restaurant_id, cert.type, cert.status, STATUS_PENDING, comment='修改认证信息') return '' def process_certification(restaurant_id, status_to): with thrift_client('ers') as ers: ers.process_certification(current_user.id, restaurant_id, status_to) def get_latest_record(restaurant_id): nopass_record = record_process_base.get_latest_record( restaurant_id) comment = '' cert_status = CERTIFICATION_NOT_EXIST if nopass_record: comment = nopass_record.comment cert_status = nopass_record.status_to return comment, cert_status
flexible
{ "blob_id": "746971cd6c5bf65268e89303c8f4ce98a56eb111", "index": 8011, "step-1": "<mask token>\n\n\ndef get(restaurant_id):\n with thrift_client('ers') as ers:\n cert = ers.get_restaurant_certification(restaurant_id)\n cert.comment = cert.comment.encode('utf-8')\n return cert\n\n\n<mask token>\n\n\ndef add(cert):\n with thrift_client('ers') as ers:\n ers.add_restaurant_certification(cert)\n record_process_base.add(cert.restaurant_id, cert.type,\n CERTIFICATION_NOT_EXIST, STATUS_PENDING, comment='上传个人认证信息' if cert\n .type == TYPE_CERT_PERSONAL else '上传企业认证信息')\n return ''\n\n\ndef update(cert):\n with thrift_client('ers') as ers:\n db_cert = ers.get_restaurant_certification(cert.restaurant_id)\n if not db_cert:\n raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id)\n with thrift_client('ers') as ers:\n ers.update_restaurant_certification(cert)\n record_process_base.add(cert.restaurant_id, cert.type, cert.status,\n STATUS_PENDING, comment='修改认证信息')\n return ''\n\n\n<mask token>\n\n\ndef get_latest_record(restaurant_id):\n nopass_record = record_process_base.get_latest_record(restaurant_id)\n comment = ''\n cert_status = CERTIFICATION_NOT_EXIST\n if nopass_record:\n comment = nopass_record.comment\n cert_status = nopass_record.status_to\n return comment, cert_status\n", "step-2": "<mask token>\n\n\ndef get(restaurant_id):\n with thrift_client('ers') as ers:\n cert = ers.get_restaurant_certification(restaurant_id)\n cert.comment = cert.comment.encode('utf-8')\n return cert\n\n\ndef get_by_status(status, offset=0, limit=thirdparty_svc.ers.MAX_LIST_SIZE):\n limit = 250\n with thrift_client('ers') as ers:\n return ers.query_restaurant_certification_by_status(status, offset,\n limit)\n\n\ndef add(cert):\n with thrift_client('ers') as ers:\n ers.add_restaurant_certification(cert)\n record_process_base.add(cert.restaurant_id, cert.type,\n CERTIFICATION_NOT_EXIST, STATUS_PENDING, comment='上传个人认证信息' if cert\n .type == TYPE_CERT_PERSONAL else '上传企业认证信息')\n return ''\n\n\ndef update(cert):\n with thrift_client('ers') as ers:\n db_cert = ers.get_restaurant_certification(cert.restaurant_id)\n if not db_cert:\n raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id)\n with thrift_client('ers') as ers:\n ers.update_restaurant_certification(cert)\n record_process_base.add(cert.restaurant_id, cert.type, cert.status,\n STATUS_PENDING, comment='修改认证信息')\n return ''\n\n\n<mask token>\n\n\ndef get_latest_record(restaurant_id):\n nopass_record = record_process_base.get_latest_record(restaurant_id)\n comment = ''\n cert_status = CERTIFICATION_NOT_EXIST\n if nopass_record:\n comment = nopass_record.comment\n cert_status = nopass_record.status_to\n return comment, cert_status\n", "step-3": "<mask token>\n\n\ndef get(restaurant_id):\n with thrift_client('ers') as ers:\n cert = ers.get_restaurant_certification(restaurant_id)\n cert.comment = cert.comment.encode('utf-8')\n return cert\n\n\ndef get_by_status(status, offset=0, limit=thirdparty_svc.ers.MAX_LIST_SIZE):\n limit = 250\n with thrift_client('ers') as ers:\n return ers.query_restaurant_certification_by_status(status, offset,\n limit)\n\n\ndef add(cert):\n with thrift_client('ers') as ers:\n ers.add_restaurant_certification(cert)\n record_process_base.add(cert.restaurant_id, cert.type,\n CERTIFICATION_NOT_EXIST, STATUS_PENDING, comment='上传个人认证信息' if cert\n .type == TYPE_CERT_PERSONAL else '上传企业认证信息')\n return ''\n\n\ndef update(cert):\n with thrift_client('ers') as ers:\n db_cert = ers.get_restaurant_certification(cert.restaurant_id)\n if not db_cert:\n raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id)\n with thrift_client('ers') as ers:\n ers.update_restaurant_certification(cert)\n record_process_base.add(cert.restaurant_id, cert.type, cert.status,\n STATUS_PENDING, comment='修改认证信息')\n return ''\n\n\ndef process_certification(restaurant_id, status_to):\n with thrift_client('ers') as ers:\n ers.process_certification(current_user.id, restaurant_id, status_to)\n\n\ndef get_latest_record(restaurant_id):\n nopass_record = record_process_base.get_latest_record(restaurant_id)\n comment = ''\n cert_status = CERTIFICATION_NOT_EXIST\n if nopass_record:\n comment = nopass_record.comment\n cert_status = nopass_record.status_to\n return comment, cert_status\n", "step-4": "from __future__ import print_function, division, absolute_import\nfrom flask.ext.login import current_user\nfrom . import cert_record_process as record_process_base\nfrom walis.thirdparty import thrift_client, thirdparty_svc\nfrom walis.exception.util import raise_user_exc\nfrom walis.exception.error_code import CERT_UPDATE_ERR\nTRestaurantCertification = thirdparty_svc.ers.TRestaurantCertification\nCERTIFICATION_TYPE_NONE = 0\nRESTAURANT_NOT_EXIST_ID = -1\nCERTIFICATION_NOT_EXIST = -2\nCertType = thirdparty_svc.ers.CertificationConst\nSTATUS_PENDING = CertType.STATUS_PENDING\nSTATUS_PASSED = CertType.STATUS_PASSED\nSTATUS_FAILED = CertType.STATUS_FAILED\nTYPE_CERT_PERSONAL = (thirdparty_svc.ers.RestaurantConst.\n CERTIFICATION_TYPE_PERSONAL)\nTYPE_CERT_CORP = thirdparty_svc.ers.RestaurantConst.CERTIFICATION_TYPE_CORP\n\n\ndef get(restaurant_id):\n with thrift_client('ers') as ers:\n cert = ers.get_restaurant_certification(restaurant_id)\n cert.comment = cert.comment.encode('utf-8')\n return cert\n\n\ndef get_by_status(status, offset=0, limit=thirdparty_svc.ers.MAX_LIST_SIZE):\n limit = 250\n with thrift_client('ers') as ers:\n return ers.query_restaurant_certification_by_status(status, offset,\n limit)\n\n\ndef add(cert):\n with thrift_client('ers') as ers:\n ers.add_restaurant_certification(cert)\n record_process_base.add(cert.restaurant_id, cert.type,\n CERTIFICATION_NOT_EXIST, STATUS_PENDING, comment='上传个人认证信息' if cert\n .type == TYPE_CERT_PERSONAL else '上传企业认证信息')\n return ''\n\n\ndef update(cert):\n with thrift_client('ers') as ers:\n db_cert = ers.get_restaurant_certification(cert.restaurant_id)\n if not db_cert:\n raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id)\n with thrift_client('ers') as ers:\n ers.update_restaurant_certification(cert)\n record_process_base.add(cert.restaurant_id, cert.type, cert.status,\n STATUS_PENDING, comment='修改认证信息')\n return ''\n\n\ndef process_certification(restaurant_id, status_to):\n with thrift_client('ers') as ers:\n ers.process_certification(current_user.id, restaurant_id, status_to)\n\n\ndef get_latest_record(restaurant_id):\n nopass_record = record_process_base.get_latest_record(restaurant_id)\n comment = ''\n cert_status = CERTIFICATION_NOT_EXIST\n if nopass_record:\n comment = nopass_record.comment\n cert_status = nopass_record.status_to\n return comment, cert_status\n", "step-5": "#!/usr/bin/env python2\n# -*- coding: utf8 -*-\n\nfrom __future__ import print_function, division, absolute_import\n\nfrom flask.ext.login import current_user\n\nfrom . import cert_record_process as record_process_base\nfrom walis.thirdparty import thrift_client, thirdparty_svc\nfrom walis.exception.util import raise_user_exc\nfrom walis.exception.error_code import CERT_UPDATE_ERR\n\nTRestaurantCertification = thirdparty_svc.ers.TRestaurantCertification\n\nCERTIFICATION_TYPE_NONE = 0\nRESTAURANT_NOT_EXIST_ID = -1\nCERTIFICATION_NOT_EXIST = -2\n\nCertType = thirdparty_svc.ers.CertificationConst\nSTATUS_PENDING = CertType.STATUS_PENDING\nSTATUS_PASSED = CertType.STATUS_PASSED\nSTATUS_FAILED = CertType.STATUS_FAILED\n\nTYPE_CERT_PERSONAL = thirdparty_svc.ers.RestaurantConst.CERTIFICATION_TYPE_PERSONAL\nTYPE_CERT_CORP = thirdparty_svc.ers.RestaurantConst.CERTIFICATION_TYPE_CORP\n\n\ndef get(restaurant_id):\n with thrift_client('ers') as ers:\n cert = ers.get_restaurant_certification(restaurant_id)\n cert.comment = cert.comment.encode('utf-8')\n return cert\n\n\ndef get_by_status(status, offset=0, limit=thirdparty_svc.ers.MAX_LIST_SIZE):\n limit = 250\n with thrift_client('ers') as ers:\n return ers.query_restaurant_certification_by_status(\n status, offset, limit)\n\n\ndef add(cert):\n with thrift_client('ers') as ers:\n ers.add_restaurant_certification(cert)\n record_process_base.add(\n cert.restaurant_id,\n cert.type,\n CERTIFICATION_NOT_EXIST,\n STATUS_PENDING,\n comment='上传个人认证信息' if cert.type ==\n TYPE_CERT_PERSONAL else '上传企业认证信息')\n return ''\n\n\ndef update(cert):\n with thrift_client('ers') as ers:\n db_cert = ers.get_restaurant_certification(cert.restaurant_id)\n\n if not db_cert:\n raise_user_exc(CERT_UPDATE_ERR, restaurant_id=cert.restaurant_id)\n\n with thrift_client('ers') as ers:\n ers.update_restaurant_certification(cert)\n\n record_process_base.add(\n cert.restaurant_id,\n cert.type,\n cert.status,\n STATUS_PENDING,\n comment='修改认证信息')\n return ''\n\n\ndef process_certification(restaurant_id, status_to):\n with thrift_client('ers') as ers:\n ers.process_certification(current_user.id,\n restaurant_id, status_to)\n\n\ndef get_latest_record(restaurant_id):\n nopass_record = record_process_base.get_latest_record(\n restaurant_id)\n\n comment = ''\n cert_status = CERTIFICATION_NOT_EXIST\n if nopass_record:\n comment = nopass_record.comment\n cert_status = nopass_record.status_to\n\n return comment, cert_status\n", "step-ids": [ 4, 5, 6, 8, 9 ] }
[ 4, 5, 6, 8, 9 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> random.seed(1) np.random.seed(1) tf.random.set_random_seed(1) <|reserved_special_token_0|> for i in range(1, 6): df = pd.read_csv(random_sample_save_folder_path + 'power_demand_sample%i.csv' % i, index_col=0) regions = df.columns result = pd.DataFrame(index=['rmse_test', 'r2_test', 'mae_test']) predict = pd.DataFrame() for region in regions: RE_demand = pd.read_csv(random_sample_save_folder_path + 'power_demand_sample%i.csv' % i, index_col=0) RE_demand = RE_demand[region] RE_demand = pd.DataFrame(RE_demand) train_test_split = int(len(RE_demand) * 0.8) train, test = RE_demand[:train_test_split], RE_demand[train_test_split: ] scaler = RobustScaler() scaler = scaler.fit(RE_demand.values) train_scaled = scaler.transform(train) test_scaled = scaler.transform(test) history = [x for x in train_scaled] test_pred = [] for j in range(len(test_scaled)): model = ARIMA(history, order=(3, 1, 1)) model_fit = model.fit() output = model_fit.forecast() yhat = output test_pred.append(yhat) obs = test_scaled[i] history.append(obs) test_pred = np.array(test_pred) test_pred = scaler.inverse_transform(test_pred) rmse = sqrt(mean_squared_error(test, test_pred)) r2 = r2_score(test, test_pred) mae = mean_absolute_error(test, test_pred) metrics = [rmse, r2, mae] result['%s' % region] = metrics performance_path = './ARIMA/performance/' forecast = model_fit.forecast(steps=24) forecast = forecast.reshape(-1, 1) forecast = scaler.inverse_transform(forecast) test = np.array(['test']).reshape(-1, 1) pred = np.array(['forecast']).reshape(-1, 1) forecast = np.concatenate([test, test_pred, pred, forecast]) forecast = np.concatenate(forecast) predict['%s' % region] = forecast forecast_path = './ARIMA/forecast/' if not os.path.exists(performance_path): os.makedirs(performance_path) result.to_csv(performance_path + 'ARIMA_sample%s_score.csv' % i) if not os.path.exists(forecast_path): os.makedirs(forecast_path) predict.to_csv(forecast_path + 'ARIMA_sample%s_forecast.csv' % i) <|reserved_special_token_1|> <|reserved_special_token_0|> random.seed(1) np.random.seed(1) tf.random.set_random_seed(1) random_sample_save_folder_path = ( '../c_data_processing/b_data_sampling/sampled_data/') for i in range(1, 6): df = pd.read_csv(random_sample_save_folder_path + 'power_demand_sample%i.csv' % i, index_col=0) regions = df.columns result = pd.DataFrame(index=['rmse_test', 'r2_test', 'mae_test']) predict = pd.DataFrame() for region in regions: RE_demand = pd.read_csv(random_sample_save_folder_path + 'power_demand_sample%i.csv' % i, index_col=0) RE_demand = RE_demand[region] RE_demand = pd.DataFrame(RE_demand) train_test_split = int(len(RE_demand) * 0.8) train, test = RE_demand[:train_test_split], RE_demand[train_test_split: ] scaler = RobustScaler() scaler = scaler.fit(RE_demand.values) train_scaled = scaler.transform(train) test_scaled = scaler.transform(test) history = [x for x in train_scaled] test_pred = [] for j in range(len(test_scaled)): model = ARIMA(history, order=(3, 1, 1)) model_fit = model.fit() output = model_fit.forecast() yhat = output test_pred.append(yhat) obs = test_scaled[i] history.append(obs) test_pred = np.array(test_pred) test_pred = scaler.inverse_transform(test_pred) rmse = sqrt(mean_squared_error(test, test_pred)) r2 = r2_score(test, test_pred) mae = mean_absolute_error(test, test_pred) metrics = [rmse, r2, mae] result['%s' % region] = metrics performance_path = './ARIMA/performance/' forecast = model_fit.forecast(steps=24) forecast = forecast.reshape(-1, 1) forecast = scaler.inverse_transform(forecast) test = np.array(['test']).reshape(-1, 1) pred = np.array(['forecast']).reshape(-1, 1) forecast = np.concatenate([test, test_pred, pred, forecast]) forecast = np.concatenate(forecast) predict['%s' % region] = forecast forecast_path = './ARIMA/forecast/' if not os.path.exists(performance_path): os.makedirs(performance_path) result.to_csv(performance_path + 'ARIMA_sample%s_score.csv' % i) if not os.path.exists(forecast_path): os.makedirs(forecast_path) predict.to_csv(forecast_path + 'ARIMA_sample%s_forecast.csv' % i) <|reserved_special_token_1|> from sklearn.preprocessing import RobustScaler from statsmodels.tsa.arima.model import ARIMA from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error from math import sqrt import tensorflow as tf import pandas as pd import numpy as np import os import random random.seed(1) np.random.seed(1) tf.random.set_random_seed(1) random_sample_save_folder_path = ( '../c_data_processing/b_data_sampling/sampled_data/') for i in range(1, 6): df = pd.read_csv(random_sample_save_folder_path + 'power_demand_sample%i.csv' % i, index_col=0) regions = df.columns result = pd.DataFrame(index=['rmse_test', 'r2_test', 'mae_test']) predict = pd.DataFrame() for region in regions: RE_demand = pd.read_csv(random_sample_save_folder_path + 'power_demand_sample%i.csv' % i, index_col=0) RE_demand = RE_demand[region] RE_demand = pd.DataFrame(RE_demand) train_test_split = int(len(RE_demand) * 0.8) train, test = RE_demand[:train_test_split], RE_demand[train_test_split: ] scaler = RobustScaler() scaler = scaler.fit(RE_demand.values) train_scaled = scaler.transform(train) test_scaled = scaler.transform(test) history = [x for x in train_scaled] test_pred = [] for j in range(len(test_scaled)): model = ARIMA(history, order=(3, 1, 1)) model_fit = model.fit() output = model_fit.forecast() yhat = output test_pred.append(yhat) obs = test_scaled[i] history.append(obs) test_pred = np.array(test_pred) test_pred = scaler.inverse_transform(test_pred) rmse = sqrt(mean_squared_error(test, test_pred)) r2 = r2_score(test, test_pred) mae = mean_absolute_error(test, test_pred) metrics = [rmse, r2, mae] result['%s' % region] = metrics performance_path = './ARIMA/performance/' forecast = model_fit.forecast(steps=24) forecast = forecast.reshape(-1, 1) forecast = scaler.inverse_transform(forecast) test = np.array(['test']).reshape(-1, 1) pred = np.array(['forecast']).reshape(-1, 1) forecast = np.concatenate([test, test_pred, pred, forecast]) forecast = np.concatenate(forecast) predict['%s' % region] = forecast forecast_path = './ARIMA/forecast/' if not os.path.exists(performance_path): os.makedirs(performance_path) result.to_csv(performance_path + 'ARIMA_sample%s_score.csv' % i) if not os.path.exists(forecast_path): os.makedirs(forecast_path) predict.to_csv(forecast_path + 'ARIMA_sample%s_forecast.csv' % i) <|reserved_special_token_1|> from sklearn.preprocessing import RobustScaler from statsmodels.tsa.arima.model import ARIMA from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error from math import sqrt import tensorflow as tf import pandas as pd import numpy as np import os import random # set random seed random.seed(1) np.random.seed(1) tf.random.set_random_seed(1) random_sample_save_folder_path = '../c_data_processing/b_data_sampling/sampled_data/' for i in range(1, 6): df = pd.read_csv( random_sample_save_folder_path + 'power_demand_sample%i.csv' %i, index_col=0) regions = df.columns result = pd.DataFrame(index=['rmse_test', 'r2_test', 'mae_test']) predict = pd.DataFrame() for region in regions: RE_demand = pd.read_csv(random_sample_save_folder_path + 'power_demand_sample%i.csv' % i, index_col=0) # data initialization RE_demand = RE_demand[region] RE_demand = pd.DataFrame(RE_demand) # train_test_split train_test_split = int(len(RE_demand)*0.8) train, test = RE_demand[:train_test_split], RE_demand[train_test_split:] # data scaling scaler = RobustScaler() scaler = scaler.fit(RE_demand.values) train_scaled = scaler.transform(train) test_scaled = scaler.transform(test) # model setting history = [x for x in train_scaled] test_pred = [] for j in range(len(test_scaled)): model = ARIMA(history, order=(3,1,1)) # setting (p, d, q) guide : https://www.youtube.com/watch?v=YQF5PDDI9jo&list=LL&index=5 model_fit = model.fit() output = model_fit.forecast() yhat = output test_pred.append(yhat) obs = test_scaled[i] history.append(obs) test_pred = np.array(test_pred) test_pred = scaler.inverse_transform(test_pred) # model evalutaion rmse = sqrt(mean_squared_error(test, test_pred)) r2 = r2_score(test, test_pred) mae = mean_absolute_error(test, test_pred) metrics = [rmse, r2, mae] result['%s' %region] = metrics performance_path = './ARIMA/performance/' # data forecasting forecast = model_fit.forecast(steps=24) forecast = forecast.reshape(-1,1) forecast = scaler.inverse_transform(forecast) # data concatenate test = np.array(['test']).reshape(-1, 1) pred = np.array(['forecast']).reshape(-1, 1) forecast = np.concatenate([test, test_pred, pred, forecast]) forecast = np.concatenate(forecast) predict['%s' % region] = forecast forecast_path = './ARIMA/forecast/' if not os.path.exists(performance_path): os.makedirs(performance_path) result.to_csv(performance_path + 'ARIMA_sample%s_score.csv' % i) if not os.path.exists(forecast_path): os.makedirs(forecast_path) predict.to_csv(forecast_path + 'ARIMA_sample%s_forecast.csv' % i)
flexible
{ "blob_id": "d78ac5188cad104ee1b3e214898c41f843b6d8c0", "index": 5185, "step-1": "<mask token>\n", "step-2": "<mask token>\nrandom.seed(1)\nnp.random.seed(1)\ntf.random.set_random_seed(1)\n<mask token>\nfor i in range(1, 6):\n df = pd.read_csv(random_sample_save_folder_path + \n 'power_demand_sample%i.csv' % i, index_col=0)\n regions = df.columns\n result = pd.DataFrame(index=['rmse_test', 'r2_test', 'mae_test'])\n predict = pd.DataFrame()\n for region in regions:\n RE_demand = pd.read_csv(random_sample_save_folder_path + \n 'power_demand_sample%i.csv' % i, index_col=0)\n RE_demand = RE_demand[region]\n RE_demand = pd.DataFrame(RE_demand)\n train_test_split = int(len(RE_demand) * 0.8)\n train, test = RE_demand[:train_test_split], RE_demand[train_test_split:\n ]\n scaler = RobustScaler()\n scaler = scaler.fit(RE_demand.values)\n train_scaled = scaler.transform(train)\n test_scaled = scaler.transform(test)\n history = [x for x in train_scaled]\n test_pred = []\n for j in range(len(test_scaled)):\n model = ARIMA(history, order=(3, 1, 1))\n model_fit = model.fit()\n output = model_fit.forecast()\n yhat = output\n test_pred.append(yhat)\n obs = test_scaled[i]\n history.append(obs)\n test_pred = np.array(test_pred)\n test_pred = scaler.inverse_transform(test_pred)\n rmse = sqrt(mean_squared_error(test, test_pred))\n r2 = r2_score(test, test_pred)\n mae = mean_absolute_error(test, test_pred)\n metrics = [rmse, r2, mae]\n result['%s' % region] = metrics\n performance_path = './ARIMA/performance/'\n forecast = model_fit.forecast(steps=24)\n forecast = forecast.reshape(-1, 1)\n forecast = scaler.inverse_transform(forecast)\n test = np.array(['test']).reshape(-1, 1)\n pred = np.array(['forecast']).reshape(-1, 1)\n forecast = np.concatenate([test, test_pred, pred, forecast])\n forecast = np.concatenate(forecast)\n predict['%s' % region] = forecast\n forecast_path = './ARIMA/forecast/'\n if not os.path.exists(performance_path):\n os.makedirs(performance_path)\n result.to_csv(performance_path + 'ARIMA_sample%s_score.csv' % i)\n if not os.path.exists(forecast_path):\n os.makedirs(forecast_path)\n predict.to_csv(forecast_path + 'ARIMA_sample%s_forecast.csv' % i)\n", "step-3": "<mask token>\nrandom.seed(1)\nnp.random.seed(1)\ntf.random.set_random_seed(1)\nrandom_sample_save_folder_path = (\n '../c_data_processing/b_data_sampling/sampled_data/')\nfor i in range(1, 6):\n df = pd.read_csv(random_sample_save_folder_path + \n 'power_demand_sample%i.csv' % i, index_col=0)\n regions = df.columns\n result = pd.DataFrame(index=['rmse_test', 'r2_test', 'mae_test'])\n predict = pd.DataFrame()\n for region in regions:\n RE_demand = pd.read_csv(random_sample_save_folder_path + \n 'power_demand_sample%i.csv' % i, index_col=0)\n RE_demand = RE_demand[region]\n RE_demand = pd.DataFrame(RE_demand)\n train_test_split = int(len(RE_demand) * 0.8)\n train, test = RE_demand[:train_test_split], RE_demand[train_test_split:\n ]\n scaler = RobustScaler()\n scaler = scaler.fit(RE_demand.values)\n train_scaled = scaler.transform(train)\n test_scaled = scaler.transform(test)\n history = [x for x in train_scaled]\n test_pred = []\n for j in range(len(test_scaled)):\n model = ARIMA(history, order=(3, 1, 1))\n model_fit = model.fit()\n output = model_fit.forecast()\n yhat = output\n test_pred.append(yhat)\n obs = test_scaled[i]\n history.append(obs)\n test_pred = np.array(test_pred)\n test_pred = scaler.inverse_transform(test_pred)\n rmse = sqrt(mean_squared_error(test, test_pred))\n r2 = r2_score(test, test_pred)\n mae = mean_absolute_error(test, test_pred)\n metrics = [rmse, r2, mae]\n result['%s' % region] = metrics\n performance_path = './ARIMA/performance/'\n forecast = model_fit.forecast(steps=24)\n forecast = forecast.reshape(-1, 1)\n forecast = scaler.inverse_transform(forecast)\n test = np.array(['test']).reshape(-1, 1)\n pred = np.array(['forecast']).reshape(-1, 1)\n forecast = np.concatenate([test, test_pred, pred, forecast])\n forecast = np.concatenate(forecast)\n predict['%s' % region] = forecast\n forecast_path = './ARIMA/forecast/'\n if not os.path.exists(performance_path):\n os.makedirs(performance_path)\n result.to_csv(performance_path + 'ARIMA_sample%s_score.csv' % i)\n if not os.path.exists(forecast_path):\n os.makedirs(forecast_path)\n predict.to_csv(forecast_path + 'ARIMA_sample%s_forecast.csv' % i)\n", "step-4": "from sklearn.preprocessing import RobustScaler\nfrom statsmodels.tsa.arima.model import ARIMA\nfrom sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error\nfrom math import sqrt\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport os\nimport random\nrandom.seed(1)\nnp.random.seed(1)\ntf.random.set_random_seed(1)\nrandom_sample_save_folder_path = (\n '../c_data_processing/b_data_sampling/sampled_data/')\nfor i in range(1, 6):\n df = pd.read_csv(random_sample_save_folder_path + \n 'power_demand_sample%i.csv' % i, index_col=0)\n regions = df.columns\n result = pd.DataFrame(index=['rmse_test', 'r2_test', 'mae_test'])\n predict = pd.DataFrame()\n for region in regions:\n RE_demand = pd.read_csv(random_sample_save_folder_path + \n 'power_demand_sample%i.csv' % i, index_col=0)\n RE_demand = RE_demand[region]\n RE_demand = pd.DataFrame(RE_demand)\n train_test_split = int(len(RE_demand) * 0.8)\n train, test = RE_demand[:train_test_split], RE_demand[train_test_split:\n ]\n scaler = RobustScaler()\n scaler = scaler.fit(RE_demand.values)\n train_scaled = scaler.transform(train)\n test_scaled = scaler.transform(test)\n history = [x for x in train_scaled]\n test_pred = []\n for j in range(len(test_scaled)):\n model = ARIMA(history, order=(3, 1, 1))\n model_fit = model.fit()\n output = model_fit.forecast()\n yhat = output\n test_pred.append(yhat)\n obs = test_scaled[i]\n history.append(obs)\n test_pred = np.array(test_pred)\n test_pred = scaler.inverse_transform(test_pred)\n rmse = sqrt(mean_squared_error(test, test_pred))\n r2 = r2_score(test, test_pred)\n mae = mean_absolute_error(test, test_pred)\n metrics = [rmse, r2, mae]\n result['%s' % region] = metrics\n performance_path = './ARIMA/performance/'\n forecast = model_fit.forecast(steps=24)\n forecast = forecast.reshape(-1, 1)\n forecast = scaler.inverse_transform(forecast)\n test = np.array(['test']).reshape(-1, 1)\n pred = np.array(['forecast']).reshape(-1, 1)\n forecast = np.concatenate([test, test_pred, pred, forecast])\n forecast = np.concatenate(forecast)\n predict['%s' % region] = forecast\n forecast_path = './ARIMA/forecast/'\n if not os.path.exists(performance_path):\n os.makedirs(performance_path)\n result.to_csv(performance_path + 'ARIMA_sample%s_score.csv' % i)\n if not os.path.exists(forecast_path):\n os.makedirs(forecast_path)\n predict.to_csv(forecast_path + 'ARIMA_sample%s_forecast.csv' % i)\n", "step-5": "from sklearn.preprocessing import RobustScaler\nfrom statsmodels.tsa.arima.model import ARIMA\nfrom sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error\nfrom math import sqrt\n\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport os\nimport random\n\n# set random seed\nrandom.seed(1)\nnp.random.seed(1)\ntf.random.set_random_seed(1)\n\nrandom_sample_save_folder_path = '../c_data_processing/b_data_sampling/sampled_data/'\nfor i in range(1, 6):\n df = pd.read_csv( random_sample_save_folder_path + 'power_demand_sample%i.csv' %i, index_col=0)\n regions = df.columns\n\n result = pd.DataFrame(index=['rmse_test', 'r2_test', 'mae_test'])\n predict = pd.DataFrame()\n\n for region in regions:\n RE_demand = pd.read_csv(random_sample_save_folder_path + 'power_demand_sample%i.csv' % i, index_col=0) # data initialization\n RE_demand = RE_demand[region]\n RE_demand = pd.DataFrame(RE_demand)\n\n\n # train_test_split\n train_test_split = int(len(RE_demand)*0.8)\n train, test = RE_demand[:train_test_split], RE_demand[train_test_split:]\n\n # data scaling\n scaler = RobustScaler()\n scaler = scaler.fit(RE_demand.values)\n\n train_scaled = scaler.transform(train)\n test_scaled = scaler.transform(test)\n\n\n # model setting\n history = [x for x in train_scaled]\n\n test_pred = []\n\n for j in range(len(test_scaled)):\n model = ARIMA(history, order=(3,1,1)) # setting (p, d, q) guide : https://www.youtube.com/watch?v=YQF5PDDI9jo&list=LL&index=5\n model_fit = model.fit()\n output = model_fit.forecast()\n yhat = output\n test_pred.append(yhat)\n obs = test_scaled[i]\n history.append(obs)\n test_pred = np.array(test_pred)\n test_pred = scaler.inverse_transform(test_pred)\n\n # model evalutaion\n rmse = sqrt(mean_squared_error(test, test_pred))\n r2 = r2_score(test, test_pred)\n mae = mean_absolute_error(test, test_pred)\n\n metrics = [rmse, r2, mae]\n result['%s' %region] = metrics\n performance_path = './ARIMA/performance/'\n\n\n # data forecasting\n forecast = model_fit.forecast(steps=24)\n forecast = forecast.reshape(-1,1)\n forecast = scaler.inverse_transform(forecast)\n\n\n # data concatenate\n test = np.array(['test']).reshape(-1, 1)\n pred = np.array(['forecast']).reshape(-1, 1)\n\n forecast = np.concatenate([test, test_pred, pred, forecast])\n forecast = np.concatenate(forecast)\n predict['%s' % region] = forecast\n\n forecast_path = './ARIMA/forecast/'\n\n\n if not os.path.exists(performance_path):\n os.makedirs(performance_path)\n result.to_csv(performance_path + 'ARIMA_sample%s_score.csv' % i)\n\n if not os.path.exists(forecast_path):\n os.makedirs(forecast_path)\n predict.to_csv(forecast_path + 'ARIMA_sample%s_forecast.csv' % i)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Libraries from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm import relationship # Taskobra from taskobra.orm.base import ORMBase from taskobra.orm.relationships import SystemComponent class System(ORMBase): __tablename__ = "System" unique_id = Column(Integer, primary_key=True) name = Column(String) user_roles = relationship("UserSystemRole") system_components = relationship("SystemComponent") @property def components(self): for system_component in self.system_components: for _ in range(system_component.count): yield system_component.count, system_component.component def add_component(self, component): for system_component in self.system_components: if system_component.component is component: system_component.count += 1 return SystemComponent(system=self, component=component, count=1) def __repr__(self): components = [ f"{count}x{repr(component)}" for count, component in self.components ] return f"<System(name={self.name}, {components}, unique_id={self.unique_id})>" def __str__(self): linesep = "\n " components = [ f"{linesep}{repr(component)}" for _, component in self.components ] return f"{self.name}:{''.join(components)}"
normal
{ "blob_id": "2fc2fd6631cee5f3737dadaac1a115c045af0986", "index": 5058, "step-1": "<mask token>\n\n\nclass System(ORMBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def add_component(self, component):\n for system_component in self.system_components:\n if system_component.component is component:\n system_component.count += 1\n return\n SystemComponent(system=self, component=component, count=1)\n <mask token>\n\n def __str__(self):\n linesep = '\\n '\n components = [f'{linesep}{repr(component)}' for _, component in\n self.components]\n return f\"{self.name}:{''.join(components)}\"\n", "step-2": "<mask token>\n\n\nclass System(ORMBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def components(self):\n for system_component in self.system_components:\n for _ in range(system_component.count):\n yield system_component.count, system_component.component\n\n def add_component(self, component):\n for system_component in self.system_components:\n if system_component.component is component:\n system_component.count += 1\n return\n SystemComponent(system=self, component=component, count=1)\n <mask token>\n\n def __str__(self):\n linesep = '\\n '\n components = [f'{linesep}{repr(component)}' for _, component in\n self.components]\n return f\"{self.name}:{''.join(components)}\"\n", "step-3": "<mask token>\n\n\nclass System(ORMBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def components(self):\n for system_component in self.system_components:\n for _ in range(system_component.count):\n yield system_component.count, system_component.component\n\n def add_component(self, component):\n for system_component in self.system_components:\n if system_component.component is component:\n system_component.count += 1\n return\n SystemComponent(system=self, component=component, count=1)\n\n def __repr__(self):\n components = [f'{count}x{repr(component)}' for count, component in\n self.components]\n return (\n f'<System(name={self.name}, {components}, unique_id={self.unique_id})>'\n )\n\n def __str__(self):\n linesep = '\\n '\n components = [f'{linesep}{repr(component)}' for _, component in\n self.components]\n return f\"{self.name}:{''.join(components)}\"\n", "step-4": "<mask token>\n\n\nclass System(ORMBase):\n __tablename__ = 'System'\n unique_id = Column(Integer, primary_key=True)\n name = Column(String)\n user_roles = relationship('UserSystemRole')\n system_components = relationship('SystemComponent')\n\n @property\n def components(self):\n for system_component in self.system_components:\n for _ in range(system_component.count):\n yield system_component.count, system_component.component\n\n def add_component(self, component):\n for system_component in self.system_components:\n if system_component.component is component:\n system_component.count += 1\n return\n SystemComponent(system=self, component=component, count=1)\n\n def __repr__(self):\n components = [f'{count}x{repr(component)}' for count, component in\n self.components]\n return (\n f'<System(name={self.name}, {components}, unique_id={self.unique_id})>'\n )\n\n def __str__(self):\n linesep = '\\n '\n components = [f'{linesep}{repr(component)}' for _, component in\n self.components]\n return f\"{self.name}:{''.join(components)}\"\n", "step-5": "# Libraries\nfrom sqlalchemy import Column, ForeignKey, Integer, String\nfrom sqlalchemy.ext.associationproxy import association_proxy\nfrom sqlalchemy.orm import relationship\n# Taskobra\nfrom taskobra.orm.base import ORMBase\nfrom taskobra.orm.relationships import SystemComponent\n\n\nclass System(ORMBase):\n __tablename__ = \"System\"\n unique_id = Column(Integer, primary_key=True)\n name = Column(String)\n user_roles = relationship(\"UserSystemRole\")\n system_components = relationship(\"SystemComponent\")\n\n @property\n def components(self):\n for system_component in self.system_components:\n for _ in range(system_component.count):\n yield system_component.count, system_component.component\n\n def add_component(self, component):\n for system_component in self.system_components:\n if system_component.component is component:\n system_component.count += 1\n return\n SystemComponent(system=self, component=component, count=1)\n\n def __repr__(self):\n components = [\n f\"{count}x{repr(component)}\"\n for count, component in self.components\n ]\n return f\"<System(name={self.name}, {components}, unique_id={self.unique_id})>\"\n\n def __str__(self):\n linesep = \"\\n \"\n components = [\n f\"{linesep}{repr(component)}\"\n for _, component in self.components\n ]\n return f\"{self.name}:{''.join(components)}\"\n", "step-ids": [ 3, 4, 5, 6, 8 ] }
[ 3, 4, 5, 6, 8 ]
<|reserved_special_token_0|> def index(request): posts = Post.objects.order_by('-created_at').filter(status='Published') paginator = Paginator(posts, 9) page = request.GET.get('page') post_listings = paginator.get_page(page) context = {'posts': post_listings} return render(request, 'hub/index.html', context) <|reserved_special_token_0|> def authors(request): profiles = Profile.objects.all() context = {'profiles': profiles} return render(request, 'hub/authors.html', context) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def index(request): posts = Post.objects.order_by('-created_at').filter(status='Published') paginator = Paginator(posts, 9) page = request.GET.get('page') post_listings = paginator.get_page(page) context = {'posts': post_listings} return render(request, 'hub/index.html', context) <|reserved_special_token_0|> def authors(request): profiles = Profile.objects.all() context = {'profiles': profiles} return render(request, 'hub/authors.html', context) def authorDetail(request, pk): author = User.objects.get(username=pk) profile = Profile.objects.get(user=author) posts = Post.objects.order_by('-created_at').filter(status='Published', author=author) paginator = Paginator(posts, 6) page = request.GET.get('page') posts_paginated = paginator.get_page(page) context = {'author': profile, 'posts': posts_paginated} return render(request, 'hub/authorDetail.html', context) <|reserved_special_token_1|> <|reserved_special_token_0|> def index(request): posts = Post.objects.order_by('-created_at').filter(status='Published') paginator = Paginator(posts, 9) page = request.GET.get('page') post_listings = paginator.get_page(page) context = {'posts': post_listings} return render(request, 'hub/index.html', context) def about(request): about_us = get_object_or_404(AboutSite, id=1) context = {'about': about_us} return render(request, 'hub/about.html', context) def authors(request): profiles = Profile.objects.all() context = {'profiles': profiles} return render(request, 'hub/authors.html', context) def authorDetail(request, pk): author = User.objects.get(username=pk) profile = Profile.objects.get(user=author) posts = Post.objects.order_by('-created_at').filter(status='Published', author=author) paginator = Paginator(posts, 6) page = request.GET.get('page') posts_paginated = paginator.get_page(page) context = {'author': profile, 'posts': posts_paginated} return render(request, 'hub/authorDetail.html', context) <|reserved_special_token_1|> from django.shortcuts import render from post.models import * from .models import * from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator from account.models import Profile from django.contrib.auth.models import User from django.db.models import Q def index(request): posts = Post.objects.order_by('-created_at').filter(status='Published') paginator = Paginator(posts, 9) page = request.GET.get('page') post_listings = paginator.get_page(page) context = {'posts': post_listings} return render(request, 'hub/index.html', context) def about(request): about_us = get_object_or_404(AboutSite, id=1) context = {'about': about_us} return render(request, 'hub/about.html', context) def authors(request): profiles = Profile.objects.all() context = {'profiles': profiles} return render(request, 'hub/authors.html', context) def authorDetail(request, pk): author = User.objects.get(username=pk) profile = Profile.objects.get(user=author) posts = Post.objects.order_by('-created_at').filter(status='Published', author=author) paginator = Paginator(posts, 6) page = request.GET.get('page') posts_paginated = paginator.get_page(page) context = {'author': profile, 'posts': posts_paginated} return render(request, 'hub/authorDetail.html', context) <|reserved_special_token_1|> from django.shortcuts import render from post.models import * from .models import * from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator from account.models import Profile from django.contrib.auth.models import User from django.db.models import Q # Create your views here. def index(request): posts = Post.objects.order_by('-created_at').filter(status='Published') # about_us = AboutSite.objects.get(id=1) paginator = Paginator(posts, 9) page = request.GET.get('page') post_listings = paginator.get_page(page) context = { 'posts': post_listings, # 'about': about_us } return render(request, 'hub/index.html', context) def about(request): about_us = get_object_or_404(AboutSite,id=1) context = { 'about': about_us } return render(request, 'hub/about.html', context) def authors(request): profiles = Profile.objects.all() context = { 'profiles': profiles } return render(request, 'hub/authors.html', context) def authorDetail(request, pk): author = User.objects.get(username=pk) profile = Profile.objects.get(user=author) posts = Post.objects.order_by('-created_at').filter(status='Published', author=author) paginator = Paginator(posts, 6) page = request.GET.get('page') posts_paginated = paginator.get_page(page) context = { 'author': profile, 'posts': posts_paginated } return render(request, 'hub/authorDetail.html', context) # def search(request): # queryset_list = Post.objects.order_by('-created_at') # if 'q' in request.GET: # query = request.GET['q'] # if query: # queryset_list = queryset_list.filter(Q(title__icontains=query) | Q(description__icontains=query) | Q(content__icontains=query)) # paginator = Paginator(queryset_list, 1) # page = request.GET.get('page') # paginated_result = paginator.get_page(page) # context = { # 'posts': paginated_result # } # return render(request, 'hub/search.html', context)
flexible
{ "blob_id": "ee3718dee869a58089e897489af2eec3ff72be56", "index": 3478, "step-1": "<mask token>\n\n\ndef index(request):\n posts = Post.objects.order_by('-created_at').filter(status='Published')\n paginator = Paginator(posts, 9)\n page = request.GET.get('page')\n post_listings = paginator.get_page(page)\n context = {'posts': post_listings}\n return render(request, 'hub/index.html', context)\n\n\n<mask token>\n\n\ndef authors(request):\n profiles = Profile.objects.all()\n context = {'profiles': profiles}\n return render(request, 'hub/authors.html', context)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef index(request):\n posts = Post.objects.order_by('-created_at').filter(status='Published')\n paginator = Paginator(posts, 9)\n page = request.GET.get('page')\n post_listings = paginator.get_page(page)\n context = {'posts': post_listings}\n return render(request, 'hub/index.html', context)\n\n\n<mask token>\n\n\ndef authors(request):\n profiles = Profile.objects.all()\n context = {'profiles': profiles}\n return render(request, 'hub/authors.html', context)\n\n\ndef authorDetail(request, pk):\n author = User.objects.get(username=pk)\n profile = Profile.objects.get(user=author)\n posts = Post.objects.order_by('-created_at').filter(status='Published',\n author=author)\n paginator = Paginator(posts, 6)\n page = request.GET.get('page')\n posts_paginated = paginator.get_page(page)\n context = {'author': profile, 'posts': posts_paginated}\n return render(request, 'hub/authorDetail.html', context)\n", "step-3": "<mask token>\n\n\ndef index(request):\n posts = Post.objects.order_by('-created_at').filter(status='Published')\n paginator = Paginator(posts, 9)\n page = request.GET.get('page')\n post_listings = paginator.get_page(page)\n context = {'posts': post_listings}\n return render(request, 'hub/index.html', context)\n\n\ndef about(request):\n about_us = get_object_or_404(AboutSite, id=1)\n context = {'about': about_us}\n return render(request, 'hub/about.html', context)\n\n\ndef authors(request):\n profiles = Profile.objects.all()\n context = {'profiles': profiles}\n return render(request, 'hub/authors.html', context)\n\n\ndef authorDetail(request, pk):\n author = User.objects.get(username=pk)\n profile = Profile.objects.get(user=author)\n posts = Post.objects.order_by('-created_at').filter(status='Published',\n author=author)\n paginator = Paginator(posts, 6)\n page = request.GET.get('page')\n posts_paginated = paginator.get_page(page)\n context = {'author': profile, 'posts': posts_paginated}\n return render(request, 'hub/authorDetail.html', context)\n", "step-4": "from django.shortcuts import render\nfrom post.models import *\nfrom .models import *\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom account.models import Profile\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\n\n\ndef index(request):\n posts = Post.objects.order_by('-created_at').filter(status='Published')\n paginator = Paginator(posts, 9)\n page = request.GET.get('page')\n post_listings = paginator.get_page(page)\n context = {'posts': post_listings}\n return render(request, 'hub/index.html', context)\n\n\ndef about(request):\n about_us = get_object_or_404(AboutSite, id=1)\n context = {'about': about_us}\n return render(request, 'hub/about.html', context)\n\n\ndef authors(request):\n profiles = Profile.objects.all()\n context = {'profiles': profiles}\n return render(request, 'hub/authors.html', context)\n\n\ndef authorDetail(request, pk):\n author = User.objects.get(username=pk)\n profile = Profile.objects.get(user=author)\n posts = Post.objects.order_by('-created_at').filter(status='Published',\n author=author)\n paginator = Paginator(posts, 6)\n page = request.GET.get('page')\n posts_paginated = paginator.get_page(page)\n context = {'author': profile, 'posts': posts_paginated}\n return render(request, 'hub/authorDetail.html', context)\n", "step-5": "from django.shortcuts import render\nfrom post.models import *\nfrom .models import *\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom account.models import Profile\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\n\n# Create your views here.\ndef index(request):\n\tposts = Post.objects.order_by('-created_at').filter(status='Published')\n\t# about_us = AboutSite.objects.get(id=1)\n\tpaginator = Paginator(posts, 9)\n\tpage = request.GET.get('page')\n\tpost_listings = paginator.get_page(page)\n\tcontext = {\n\t\t'posts': post_listings,\n\t\t# 'about': about_us\n\t}\n\treturn render(request, 'hub/index.html', context)\n\ndef about(request):\n\tabout_us = get_object_or_404(AboutSite,id=1)\n\tcontext = {\n\t\t'about': about_us\n\t}\n\treturn render(request, 'hub/about.html', context)\n\n\ndef authors(request):\n\tprofiles = Profile.objects.all()\n\tcontext = {\n\t\t'profiles': profiles\n\t}\n\treturn render(request, 'hub/authors.html', context)\n\n\ndef authorDetail(request, pk):\n\tauthor = User.objects.get(username=pk)\n\tprofile = Profile.objects.get(user=author)\n\tposts = Post.objects.order_by('-created_at').filter(status='Published', author=author)\n\tpaginator = Paginator(posts, 6)\n\tpage = request.GET.get('page')\n\tposts_paginated = paginator.get_page(page)\n\tcontext = {\n\t\t'author': profile,\n\t\t'posts': posts_paginated\n\t}\n\treturn render(request, 'hub/authorDetail.html', context)\n\n\n# def search(request):\n# \tqueryset_list = Post.objects.order_by('-created_at')\n\n# \tif 'q' in request.GET:\n# \t\tquery = request.GET['q']\n# \t\tif query:\n# \t\t\tqueryset_list = queryset_list.filter(Q(title__icontains=query) | Q(description__icontains=query) | Q(content__icontains=query))\n\n# \tpaginator = Paginator(queryset_list, 1)\n# \tpage = request.GET.get('page')\n# \tpaginated_result = paginator.get_page(page)\n# \tcontext = {\n# \t\t'posts': paginated_result\n# \t}\n# \treturn render(request, 'hub/search.html', context)", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> def article_list(request): articles = Article.objects.all() return render(request, 'board/list.html', {'articles': articles}) def article_detail(request, article_id): article = get_object_or_404(Article, id=article_id) comments = article.comment_set.all() return render(request, 'board/detail.html', {'article': article, 'comments': comments}) <|reserved_special_token_0|> def update_article(request, article_id): if request.method == 'GET': article = get_object_or_404(Article, id=article_id) return render(request, 'board/edit.html', {'article': article}) else: article = get_object_or_404(Article, id=article_id) article.title = request.POST.get('title') article.content = request.POST.get('content') article.save() return redirect('board:article_detail', article.id) <|reserved_special_token_0|> def create_comment(request, article_id): if request.method == 'POST': comment = Comment() comment.article = get_object_or_404(Article, id=article_id) comment.content = request.POST.get('comment') comment.save() return redirect('board:article_detail', article_id) def delete_comment(request, article_id, comment_id): if request.method == 'POST': comment = get_object_or_404(Comment, id=comment_id) comment.delete() return redirect('board:article_detail', article_id) <|reserved_special_token_1|> <|reserved_special_token_0|> def article_list(request): articles = Article.objects.all() return render(request, 'board/list.html', {'articles': articles}) def article_detail(request, article_id): article = get_object_or_404(Article, id=article_id) comments = article.comment_set.all() return render(request, 'board/detail.html', {'article': article, 'comments': comments}) def create_article(request): if request.method == 'GET': return render(request, 'board/new.html') else: article = Article() article.title = request.POST.get('title') article.content = request.POST.get('content') article.save() return redirect('board:article_detail', article.id) def update_article(request, article_id): if request.method == 'GET': article = get_object_or_404(Article, id=article_id) return render(request, 'board/edit.html', {'article': article}) else: article = get_object_or_404(Article, id=article_id) article.title = request.POST.get('title') article.content = request.POST.get('content') article.save() return redirect('board:article_detail', article.id) <|reserved_special_token_0|> def create_comment(request, article_id): if request.method == 'POST': comment = Comment() comment.article = get_object_or_404(Article, id=article_id) comment.content = request.POST.get('comment') comment.save() return redirect('board:article_detail', article_id) def delete_comment(request, article_id, comment_id): if request.method == 'POST': comment = get_object_or_404(Comment, id=comment_id) comment.delete() return redirect('board:article_detail', article_id) <|reserved_special_token_1|> <|reserved_special_token_0|> def article_list(request): articles = Article.objects.all() return render(request, 'board/list.html', {'articles': articles}) def article_detail(request, article_id): article = get_object_or_404(Article, id=article_id) comments = article.comment_set.all() return render(request, 'board/detail.html', {'article': article, 'comments': comments}) def create_article(request): if request.method == 'GET': return render(request, 'board/new.html') else: article = Article() article.title = request.POST.get('title') article.content = request.POST.get('content') article.save() return redirect('board:article_detail', article.id) def update_article(request, article_id): if request.method == 'GET': article = get_object_or_404(Article, id=article_id) return render(request, 'board/edit.html', {'article': article}) else: article = get_object_or_404(Article, id=article_id) article.title = request.POST.get('title') article.content = request.POST.get('content') article.save() return redirect('board:article_detail', article.id) def delete_article(request, article_id): if request.method == 'POST': article = get_object_or_404(Article, id=article_id) article.delete() return redirect('board:article_list') def create_comment(request, article_id): if request.method == 'POST': comment = Comment() comment.article = get_object_or_404(Article, id=article_id) comment.content = request.POST.get('comment') comment.save() return redirect('board:article_detail', article_id) def delete_comment(request, article_id, comment_id): if request.method == 'POST': comment = get_object_or_404(Comment, id=comment_id) comment.delete() return redirect('board:article_detail', article_id) <|reserved_special_token_1|> from django.shortcuts import render, redirect, get_object_or_404 from .models import Article, Comment def article_list(request): articles = Article.objects.all() return render(request, 'board/list.html', {'articles': articles}) def article_detail(request, article_id): article = get_object_or_404(Article, id=article_id) comments = article.comment_set.all() return render(request, 'board/detail.html', {'article': article, 'comments': comments}) def create_article(request): if request.method == 'GET': return render(request, 'board/new.html') else: article = Article() article.title = request.POST.get('title') article.content = request.POST.get('content') article.save() return redirect('board:article_detail', article.id) def update_article(request, article_id): if request.method == 'GET': article = get_object_or_404(Article, id=article_id) return render(request, 'board/edit.html', {'article': article}) else: article = get_object_or_404(Article, id=article_id) article.title = request.POST.get('title') article.content = request.POST.get('content') article.save() return redirect('board:article_detail', article.id) def delete_article(request, article_id): if request.method == 'POST': article = get_object_or_404(Article, id=article_id) article.delete() return redirect('board:article_list') def create_comment(request, article_id): if request.method == 'POST': comment = Comment() comment.article = get_object_or_404(Article, id=article_id) comment.content = request.POST.get('comment') comment.save() return redirect('board:article_detail', article_id) def delete_comment(request, article_id, comment_id): if request.method == 'POST': comment = get_object_or_404(Comment, id=comment_id) comment.delete() return redirect('board:article_detail', article_id) <|reserved_special_token_1|> from django.shortcuts import render, redirect, get_object_or_404 from .models import Article, Comment # from IPython import embed # Create your views here. def article_list(request): articles = Article.objects.all() return render(request, 'board/list.html', { 'articles': articles, }) def article_detail(request, article_id): article = get_object_or_404(Article, id=article_id) comments = article.comment_set.all() return render(request, 'board/detail.html', { 'article': article, 'comments': comments, }) # def new_article(request): # return render(request, 'board/new.html') def create_article(request): if request.method == 'GET': return render(request, 'board/new.html') else: # request.method == 'POST' article = Article() article.title = request.POST.get('title') article.content = request.POST.get('content') article.save() return redirect('board:article_detail', article.id) # def edit_article(request, article_id): # pass def update_article(request, article_id): if request.method == 'GET': article = get_object_or_404(Article, id=article_id) return render(request, 'board/edit.html', { 'article': article, }) else: # request.method == 'POST' article = get_object_or_404(Article, id=article_id) article.title = request.POST.get('title') article.content = request.POST.get('content') article.save() return redirect('board:article_detail', article.id) def delete_article(request, article_id): if request.method == 'POST': article = get_object_or_404(Article, id=article_id) article.delete() return redirect('board:article_list') def create_comment(request, article_id): if request.method == 'POST': comment = Comment() comment.article = get_object_or_404(Article, id=article_id) comment.content = request.POST.get('comment') comment.save() return redirect('board:article_detail', article_id) def delete_comment(request, article_id, comment_id): if request.method == 'POST': comment = get_object_or_404(Comment, id=comment_id) comment.delete() return redirect('board:article_detail', article_id)
flexible
{ "blob_id": "6946601050802aaaa559d25612d0d4f5116559eb", "index": 1845, "step-1": "<mask token>\n\n\ndef article_list(request):\n articles = Article.objects.all()\n return render(request, 'board/list.html', {'articles': articles})\n\n\ndef article_detail(request, article_id):\n article = get_object_or_404(Article, id=article_id)\n comments = article.comment_set.all()\n return render(request, 'board/detail.html', {'article': article,\n 'comments': comments})\n\n\n<mask token>\n\n\ndef update_article(request, article_id):\n if request.method == 'GET':\n article = get_object_or_404(Article, id=article_id)\n return render(request, 'board/edit.html', {'article': article})\n else:\n article = get_object_or_404(Article, id=article_id)\n article.title = request.POST.get('title')\n article.content = request.POST.get('content')\n article.save()\n return redirect('board:article_detail', article.id)\n\n\n<mask token>\n\n\ndef create_comment(request, article_id):\n if request.method == 'POST':\n comment = Comment()\n comment.article = get_object_or_404(Article, id=article_id)\n comment.content = request.POST.get('comment')\n comment.save()\n return redirect('board:article_detail', article_id)\n\n\ndef delete_comment(request, article_id, comment_id):\n if request.method == 'POST':\n comment = get_object_or_404(Comment, id=comment_id)\n comment.delete()\n return redirect('board:article_detail', article_id)\n", "step-2": "<mask token>\n\n\ndef article_list(request):\n articles = Article.objects.all()\n return render(request, 'board/list.html', {'articles': articles})\n\n\ndef article_detail(request, article_id):\n article = get_object_or_404(Article, id=article_id)\n comments = article.comment_set.all()\n return render(request, 'board/detail.html', {'article': article,\n 'comments': comments})\n\n\ndef create_article(request):\n if request.method == 'GET':\n return render(request, 'board/new.html')\n else:\n article = Article()\n article.title = request.POST.get('title')\n article.content = request.POST.get('content')\n article.save()\n return redirect('board:article_detail', article.id)\n\n\ndef update_article(request, article_id):\n if request.method == 'GET':\n article = get_object_or_404(Article, id=article_id)\n return render(request, 'board/edit.html', {'article': article})\n else:\n article = get_object_or_404(Article, id=article_id)\n article.title = request.POST.get('title')\n article.content = request.POST.get('content')\n article.save()\n return redirect('board:article_detail', article.id)\n\n\n<mask token>\n\n\ndef create_comment(request, article_id):\n if request.method == 'POST':\n comment = Comment()\n comment.article = get_object_or_404(Article, id=article_id)\n comment.content = request.POST.get('comment')\n comment.save()\n return redirect('board:article_detail', article_id)\n\n\ndef delete_comment(request, article_id, comment_id):\n if request.method == 'POST':\n comment = get_object_or_404(Comment, id=comment_id)\n comment.delete()\n return redirect('board:article_detail', article_id)\n", "step-3": "<mask token>\n\n\ndef article_list(request):\n articles = Article.objects.all()\n return render(request, 'board/list.html', {'articles': articles})\n\n\ndef article_detail(request, article_id):\n article = get_object_or_404(Article, id=article_id)\n comments = article.comment_set.all()\n return render(request, 'board/detail.html', {'article': article,\n 'comments': comments})\n\n\ndef create_article(request):\n if request.method == 'GET':\n return render(request, 'board/new.html')\n else:\n article = Article()\n article.title = request.POST.get('title')\n article.content = request.POST.get('content')\n article.save()\n return redirect('board:article_detail', article.id)\n\n\ndef update_article(request, article_id):\n if request.method == 'GET':\n article = get_object_or_404(Article, id=article_id)\n return render(request, 'board/edit.html', {'article': article})\n else:\n article = get_object_or_404(Article, id=article_id)\n article.title = request.POST.get('title')\n article.content = request.POST.get('content')\n article.save()\n return redirect('board:article_detail', article.id)\n\n\ndef delete_article(request, article_id):\n if request.method == 'POST':\n article = get_object_or_404(Article, id=article_id)\n article.delete()\n return redirect('board:article_list')\n\n\ndef create_comment(request, article_id):\n if request.method == 'POST':\n comment = Comment()\n comment.article = get_object_or_404(Article, id=article_id)\n comment.content = request.POST.get('comment')\n comment.save()\n return redirect('board:article_detail', article_id)\n\n\ndef delete_comment(request, article_id, comment_id):\n if request.method == 'POST':\n comment = get_object_or_404(Comment, id=comment_id)\n comment.delete()\n return redirect('board:article_detail', article_id)\n", "step-4": "from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Article, Comment\n\n\ndef article_list(request):\n articles = Article.objects.all()\n return render(request, 'board/list.html', {'articles': articles})\n\n\ndef article_detail(request, article_id):\n article = get_object_or_404(Article, id=article_id)\n comments = article.comment_set.all()\n return render(request, 'board/detail.html', {'article': article,\n 'comments': comments})\n\n\ndef create_article(request):\n if request.method == 'GET':\n return render(request, 'board/new.html')\n else:\n article = Article()\n article.title = request.POST.get('title')\n article.content = request.POST.get('content')\n article.save()\n return redirect('board:article_detail', article.id)\n\n\ndef update_article(request, article_id):\n if request.method == 'GET':\n article = get_object_or_404(Article, id=article_id)\n return render(request, 'board/edit.html', {'article': article})\n else:\n article = get_object_or_404(Article, id=article_id)\n article.title = request.POST.get('title')\n article.content = request.POST.get('content')\n article.save()\n return redirect('board:article_detail', article.id)\n\n\ndef delete_article(request, article_id):\n if request.method == 'POST':\n article = get_object_or_404(Article, id=article_id)\n article.delete()\n return redirect('board:article_list')\n\n\ndef create_comment(request, article_id):\n if request.method == 'POST':\n comment = Comment()\n comment.article = get_object_or_404(Article, id=article_id)\n comment.content = request.POST.get('comment')\n comment.save()\n return redirect('board:article_detail', article_id)\n\n\ndef delete_comment(request, article_id, comment_id):\n if request.method == 'POST':\n comment = get_object_or_404(Comment, id=comment_id)\n comment.delete()\n return redirect('board:article_detail', article_id)\n", "step-5": "from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Article, Comment\n# from IPython import embed\n\n# Create your views here.\n\n\ndef article_list(request):\n articles = Article.objects.all()\n return render(request, 'board/list.html', {\n 'articles': articles,\n })\n\n\ndef article_detail(request, article_id):\n article = get_object_or_404(Article, id=article_id)\n comments = article.comment_set.all()\n return render(request, 'board/detail.html', {\n 'article': article,\n 'comments': comments,\n })\n\n\n# def new_article(request):\n# return render(request, 'board/new.html')\n\n\ndef create_article(request):\n if request.method == 'GET':\n return render(request, 'board/new.html')\n else: # request.method == 'POST'\n article = Article()\n article.title = request.POST.get('title')\n article.content = request.POST.get('content')\n article.save()\n return redirect('board:article_detail', article.id)\n\n\n# def edit_article(request, article_id):\n# pass\n\n\ndef update_article(request, article_id):\n if request.method == 'GET':\n article = get_object_or_404(Article, id=article_id)\n return render(request, 'board/edit.html', {\n 'article': article,\n })\n\n else: # request.method == 'POST'\n article = get_object_or_404(Article, id=article_id)\n article.title = request.POST.get('title')\n article.content = request.POST.get('content')\n article.save()\n return redirect('board:article_detail', article.id)\n\n\ndef delete_article(request, article_id):\n if request.method == 'POST':\n article = get_object_or_404(Article, id=article_id)\n article.delete()\n return redirect('board:article_list')\n\n\ndef create_comment(request, article_id):\n if request.method == 'POST':\n comment = Comment()\n comment.article = get_object_or_404(Article, id=article_id)\n comment.content = request.POST.get('comment')\n\n comment.save()\n return redirect('board:article_detail', article_id)\n\n\ndef delete_comment(request, article_id, comment_id):\n if request.method == 'POST':\n comment = get_object_or_404(Comment, id=comment_id)\n comment.delete()\n\n return redirect('board:article_detail', article_id)\n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
# -*- coding: utf-8 -*- """ ORIGINAL PROGRAM SOURCE CODE: 1: from __future__ import division, print_function, absolute_import 2: 3: import os 4: from os.path import join 5: 6: from scipy._build_utils import numpy_nodepr_api 7: 8: 9: def configuration(parent_package='',top_path=None): 10: from numpy.distutils.misc_util import Configuration 11: from numpy.distutils.system_info import get_info 12: config = Configuration('integrate', parent_package, top_path) 13: 14: # Get a local copy of lapack_opt_info 15: lapack_opt = dict(get_info('lapack_opt',notfound_action=2)) 16: # Pop off the libraries list so it can be combined with 17: # additional required libraries 18: lapack_libs = lapack_opt.pop('libraries', []) 19: 20: mach_src = [join('mach','*.f')] 21: quadpack_src = [join('quadpack', '*.f')] 22: lsoda_src = [join('odepack', fn) for fn in [ 23: 'blkdta000.f', 'bnorm.f', 'cfode.f', 24: 'ewset.f', 'fnorm.f', 'intdy.f', 25: 'lsoda.f', 'prja.f', 'solsy.f', 'srcma.f', 26: 'stoda.f', 'vmnorm.f', 'xerrwv.f', 'xsetf.f', 27: 'xsetun.f']] 28: vode_src = [join('odepack', 'vode.f'), join('odepack', 'zvode.f')] 29: dop_src = [join('dop','*.f')] 30: quadpack_test_src = [join('tests','_test_multivariate.c')] 31: odeint_banded_test_src = [join('tests', 'banded5x5.f')] 32: 33: config.add_library('mach', sources=mach_src, 34: config_fc={'noopt':(__file__,1)}) 35: config.add_library('quadpack', sources=quadpack_src) 36: config.add_library('lsoda', sources=lsoda_src) 37: config.add_library('vode', sources=vode_src) 38: config.add_library('dop', sources=dop_src) 39: 40: # Extensions 41: # quadpack: 42: include_dirs = [join(os.path.dirname(__file__), '..', '_lib', 'src')] 43: if 'include_dirs' in lapack_opt: 44: lapack_opt = dict(lapack_opt) 45: include_dirs.extend(lapack_opt.pop('include_dirs')) 46: 47: config.add_extension('_quadpack', 48: sources=['_quadpackmodule.c'], 49: libraries=['quadpack', 'mach'] + lapack_libs, 50: depends=(['__quadpack.h'] 51: + quadpack_src + mach_src), 52: include_dirs=include_dirs, 53: **lapack_opt) 54: 55: # odepack/lsoda-odeint 56: odepack_opts = lapack_opt.copy() 57: odepack_opts.update(numpy_nodepr_api) 58: config.add_extension('_odepack', 59: sources=['_odepackmodule.c'], 60: libraries=['lsoda', 'mach'] + lapack_libs, 61: depends=(lsoda_src + mach_src), 62: **odepack_opts) 63: 64: # vode 65: config.add_extension('vode', 66: sources=['vode.pyf'], 67: libraries=['vode'] + lapack_libs, 68: depends=vode_src, 69: **lapack_opt) 70: 71: # lsoda 72: config.add_extension('lsoda', 73: sources=['lsoda.pyf'], 74: libraries=['lsoda', 'mach'] + lapack_libs, 75: depends=(lsoda_src + mach_src), 76: **lapack_opt) 77: 78: # dop 79: config.add_extension('_dop', 80: sources=['dop.pyf'], 81: libraries=['dop'], 82: depends=dop_src) 83: 84: config.add_extension('_test_multivariate', 85: sources=quadpack_test_src) 86: 87: # Fortran+f2py extension module for testing odeint. 88: config.add_extension('_test_odeint_banded', 89: sources=odeint_banded_test_src, 90: libraries=['lsoda', 'mach'] + lapack_libs, 91: depends=(lsoda_src + mach_src), 92: **lapack_opt) 93: 94: config.add_subpackage('_ivp') 95: 96: config.add_data_dir('tests') 97: return config 98: 99: 100: if __name__ == '__main__': 101: from numpy.distutils.core import setup 102: setup(**configuration(top_path='').todict()) 103: """ # Import the stypy library necessary elements from stypy.type_inference_programs.type_inference_programs_imports import * # Create the module type store module_type_store = Context(None, __file__) # ################# Begin of the type inference program ################## stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 3, 0)) # 'import os' statement (line 3) import os import_module(stypy.reporting.localization.Localization(__file__, 3, 0), 'os', os, module_type_store) stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 4, 0)) # 'from os.path import join' statement (line 4) update_path_to_current_file_folder('C:/Python27/lib/site-packages/scipy/integrate/') import_32066 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 4, 0), 'os.path') if (type(import_32066) is not StypyTypeError): if (import_32066 != 'pyd_module'): __import__(import_32066) sys_modules_32067 = sys.modules[import_32066] import_from_module(stypy.reporting.localization.Localization(__file__, 4, 0), 'os.path', sys_modules_32067.module_type_store, module_type_store, ['join']) nest_module(stypy.reporting.localization.Localization(__file__, 4, 0), __file__, sys_modules_32067, sys_modules_32067.module_type_store, module_type_store) else: from os.path import join import_from_module(stypy.reporting.localization.Localization(__file__, 4, 0), 'os.path', None, module_type_store, ['join'], [join]) else: # Assigning a type to the variable 'os.path' (line 4) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 4, 0), 'os.path', import_32066) remove_current_file_folder_from_path('C:/Python27/lib/site-packages/scipy/integrate/') stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 6, 0)) # 'from scipy._build_utils import numpy_nodepr_api' statement (line 6) update_path_to_current_file_folder('C:/Python27/lib/site-packages/scipy/integrate/') import_32068 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 6, 0), 'scipy._build_utils') if (type(import_32068) is not StypyTypeError): if (import_32068 != 'pyd_module'): __import__(import_32068) sys_modules_32069 = sys.modules[import_32068] import_from_module(stypy.reporting.localization.Localization(__file__, 6, 0), 'scipy._build_utils', sys_modules_32069.module_type_store, module_type_store, ['numpy_nodepr_api']) nest_module(stypy.reporting.localization.Localization(__file__, 6, 0), __file__, sys_modules_32069, sys_modules_32069.module_type_store, module_type_store) else: from scipy._build_utils import numpy_nodepr_api import_from_module(stypy.reporting.localization.Localization(__file__, 6, 0), 'scipy._build_utils', None, module_type_store, ['numpy_nodepr_api'], [numpy_nodepr_api]) else: # Assigning a type to the variable 'scipy._build_utils' (line 6) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 6, 0), 'scipy._build_utils', import_32068) remove_current_file_folder_from_path('C:/Python27/lib/site-packages/scipy/integrate/') @norecursion def configuration(localization, *varargs, **kwargs): global module_type_store # Assign values to the parameters with defaults str_32070 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 9, 33), 'str', '') # Getting the type of 'None' (line 9) None_32071 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 9, 45), 'None') defaults = [str_32070, None_32071] # Create a new context for function 'configuration' module_type_store = module_type_store.open_function_context('configuration', 9, 0, False) # Passed parameters checking function configuration.stypy_localization = localization configuration.stypy_type_of_self = None configuration.stypy_type_store = module_type_store configuration.stypy_function_name = 'configuration' configuration.stypy_param_names_list = ['parent_package', 'top_path'] configuration.stypy_varargs_param_name = None configuration.stypy_kwargs_param_name = None configuration.stypy_call_defaults = defaults configuration.stypy_call_varargs = varargs configuration.stypy_call_kwargs = kwargs arguments = process_argument_values(localization, None, module_type_store, 'configuration', ['parent_package', 'top_path'], None, None, defaults, varargs, kwargs) if is_error_type(arguments): # Destroy the current context module_type_store = module_type_store.close_function_context() return arguments # Initialize method data init_call_information(module_type_store, 'configuration', localization, ['parent_package', 'top_path'], arguments) # Default return type storage variable (SSA) # Assigning a type to the variable 'stypy_return_type' module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 0, 0), 'stypy_return_type', None) # ################# Begin of 'configuration(...)' code ################## stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 10, 4)) # 'from numpy.distutils.misc_util import Configuration' statement (line 10) update_path_to_current_file_folder('C:/Python27/lib/site-packages/scipy/integrate/') import_32072 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 10, 4), 'numpy.distutils.misc_util') if (type(import_32072) is not StypyTypeError): if (import_32072 != 'pyd_module'): __import__(import_32072) sys_modules_32073 = sys.modules[import_32072] import_from_module(stypy.reporting.localization.Localization(__file__, 10, 4), 'numpy.distutils.misc_util', sys_modules_32073.module_type_store, module_type_store, ['Configuration']) nest_module(stypy.reporting.localization.Localization(__file__, 10, 4), __file__, sys_modules_32073, sys_modules_32073.module_type_store, module_type_store) else: from numpy.distutils.misc_util import Configuration import_from_module(stypy.reporting.localization.Localization(__file__, 10, 4), 'numpy.distutils.misc_util', None, module_type_store, ['Configuration'], [Configuration]) else: # Assigning a type to the variable 'numpy.distutils.misc_util' (line 10) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 10, 4), 'numpy.distutils.misc_util', import_32072) remove_current_file_folder_from_path('C:/Python27/lib/site-packages/scipy/integrate/') stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 11, 4)) # 'from numpy.distutils.system_info import get_info' statement (line 11) update_path_to_current_file_folder('C:/Python27/lib/site-packages/scipy/integrate/') import_32074 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 11, 4), 'numpy.distutils.system_info') if (type(import_32074) is not StypyTypeError): if (import_32074 != 'pyd_module'): __import__(import_32074) sys_modules_32075 = sys.modules[import_32074] import_from_module(stypy.reporting.localization.Localization(__file__, 11, 4), 'numpy.distutils.system_info', sys_modules_32075.module_type_store, module_type_store, ['get_info']) nest_module(stypy.reporting.localization.Localization(__file__, 11, 4), __file__, sys_modules_32075, sys_modules_32075.module_type_store, module_type_store) else: from numpy.distutils.system_info import get_info import_from_module(stypy.reporting.localization.Localization(__file__, 11, 4), 'numpy.distutils.system_info', None, module_type_store, ['get_info'], [get_info]) else: # Assigning a type to the variable 'numpy.distutils.system_info' (line 11) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 11, 4), 'numpy.distutils.system_info', import_32074) remove_current_file_folder_from_path('C:/Python27/lib/site-packages/scipy/integrate/') # Assigning a Call to a Name (line 12): # Call to Configuration(...): (line 12) # Processing the call arguments (line 12) str_32077 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 12, 27), 'str', 'integrate') # Getting the type of 'parent_package' (line 12) parent_package_32078 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 12, 40), 'parent_package', False) # Getting the type of 'top_path' (line 12) top_path_32079 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 12, 56), 'top_path', False) # Processing the call keyword arguments (line 12) kwargs_32080 = {} # Getting the type of 'Configuration' (line 12) Configuration_32076 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 12, 13), 'Configuration', False) # Calling Configuration(args, kwargs) (line 12) Configuration_call_result_32081 = invoke(stypy.reporting.localization.Localization(__file__, 12, 13), Configuration_32076, *[str_32077, parent_package_32078, top_path_32079], **kwargs_32080) # Assigning a type to the variable 'config' (line 12) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 12, 4), 'config', Configuration_call_result_32081) # Assigning a Call to a Name (line 15): # Call to dict(...): (line 15) # Processing the call arguments (line 15) # Call to get_info(...): (line 15) # Processing the call arguments (line 15) str_32084 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 15, 31), 'str', 'lapack_opt') # Processing the call keyword arguments (line 15) int_32085 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 15, 60), 'int') keyword_32086 = int_32085 kwargs_32087 = {'notfound_action': keyword_32086} # Getting the type of 'get_info' (line 15) get_info_32083 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 15, 22), 'get_info', False) # Calling get_info(args, kwargs) (line 15) get_info_call_result_32088 = invoke(stypy.reporting.localization.Localization(__file__, 15, 22), get_info_32083, *[str_32084], **kwargs_32087) # Processing the call keyword arguments (line 15) kwargs_32089 = {} # Getting the type of 'dict' (line 15) dict_32082 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 15, 17), 'dict', False) # Calling dict(args, kwargs) (line 15) dict_call_result_32090 = invoke(stypy.reporting.localization.Localization(__file__, 15, 17), dict_32082, *[get_info_call_result_32088], **kwargs_32089) # Assigning a type to the variable 'lapack_opt' (line 15) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 15, 4), 'lapack_opt', dict_call_result_32090) # Assigning a Call to a Name (line 18): # Call to pop(...): (line 18) # Processing the call arguments (line 18) str_32093 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 18, 33), 'str', 'libraries') # Obtaining an instance of the builtin type 'list' (line 18) list_32094 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 18, 46), 'list') # Adding type elements to the builtin type 'list' instance (line 18) # Processing the call keyword arguments (line 18) kwargs_32095 = {} # Getting the type of 'lapack_opt' (line 18) lapack_opt_32091 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 18, 18), 'lapack_opt', False) # Obtaining the member 'pop' of a type (line 18) pop_32092 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 18, 18), lapack_opt_32091, 'pop') # Calling pop(args, kwargs) (line 18) pop_call_result_32096 = invoke(stypy.reporting.localization.Localization(__file__, 18, 18), pop_32092, *[str_32093, list_32094], **kwargs_32095) # Assigning a type to the variable 'lapack_libs' (line 18) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 18, 4), 'lapack_libs', pop_call_result_32096) # Assigning a List to a Name (line 20): # Obtaining an instance of the builtin type 'list' (line 20) list_32097 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 20, 15), 'list') # Adding type elements to the builtin type 'list' instance (line 20) # Adding element type (line 20) # Call to join(...): (line 20) # Processing the call arguments (line 20) str_32099 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 20, 21), 'str', 'mach') str_32100 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 20, 28), 'str', '*.f') # Processing the call keyword arguments (line 20) kwargs_32101 = {} # Getting the type of 'join' (line 20) join_32098 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 20, 16), 'join', False) # Calling join(args, kwargs) (line 20) join_call_result_32102 = invoke(stypy.reporting.localization.Localization(__file__, 20, 16), join_32098, *[str_32099, str_32100], **kwargs_32101) add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 20, 15), list_32097, join_call_result_32102) # Assigning a type to the variable 'mach_src' (line 20) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 20, 4), 'mach_src', list_32097) # Assigning a List to a Name (line 21): # Obtaining an instance of the builtin type 'list' (line 21) list_32103 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 21, 19), 'list') # Adding type elements to the builtin type 'list' instance (line 21) # Adding element type (line 21) # Call to join(...): (line 21) # Processing the call arguments (line 21) str_32105 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 21, 25), 'str', 'quadpack') str_32106 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 21, 37), 'str', '*.f') # Processing the call keyword arguments (line 21) kwargs_32107 = {} # Getting the type of 'join' (line 21) join_32104 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 21, 20), 'join', False) # Calling join(args, kwargs) (line 21) join_call_result_32108 = invoke(stypy.reporting.localization.Localization(__file__, 21, 20), join_32104, *[str_32105, str_32106], **kwargs_32107) add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 21, 19), list_32103, join_call_result_32108) # Assigning a type to the variable 'quadpack_src' (line 21) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 21, 4), 'quadpack_src', list_32103) # Assigning a ListComp to a Name (line 22): # Calculating list comprehension # Calculating comprehension expression # Obtaining an instance of the builtin type 'list' (line 22) list_32114 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 22, 47), 'list') # Adding type elements to the builtin type 'list' instance (line 22) # Adding element type (line 22) str_32115 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 23, 8), 'str', 'blkdta000.f') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32115) # Adding element type (line 22) str_32116 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 23, 23), 'str', 'bnorm.f') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32116) # Adding element type (line 22) str_32117 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 23, 34), 'str', 'cfode.f') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32117) # Adding element type (line 22) str_32118 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 24, 8), 'str', 'ewset.f') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32118) # Adding element type (line 22) str_32119 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 24, 19), 'str', 'fnorm.f') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32119) # Adding element type (line 22) str_32120 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 24, 30), 'str', 'intdy.f') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32120) # Adding element type (line 22) str_32121 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 25, 8), 'str', 'lsoda.f') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32121) # Adding element type (line 22) str_32122 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 25, 19), 'str', 'prja.f') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32122) # Adding element type (line 22) str_32123 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 25, 29), 'str', 'solsy.f') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32123) # Adding element type (line 22) str_32124 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 25, 40), 'str', 'srcma.f') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32124) # Adding element type (line 22) str_32125 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 26, 8), 'str', 'stoda.f') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32125) # Adding element type (line 22) str_32126 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 26, 19), 'str', 'vmnorm.f') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32126) # Adding element type (line 22) str_32127 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 26, 31), 'str', 'xerrwv.f') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32127) # Adding element type (line 22) str_32128 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 26, 43), 'str', 'xsetf.f') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32128) # Adding element type (line 22) str_32129 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 27, 8), 'str', 'xsetun.f') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32129) comprehension_32130 = get_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 17), list_32114) # Assigning a type to the variable 'fn' (line 22) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 22, 17), 'fn', comprehension_32130) # Call to join(...): (line 22) # Processing the call arguments (line 22) str_32110 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 22, 22), 'str', 'odepack') # Getting the type of 'fn' (line 22) fn_32111 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 22, 33), 'fn', False) # Processing the call keyword arguments (line 22) kwargs_32112 = {} # Getting the type of 'join' (line 22) join_32109 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 22, 17), 'join', False) # Calling join(args, kwargs) (line 22) join_call_result_32113 = invoke(stypy.reporting.localization.Localization(__file__, 22, 17), join_32109, *[str_32110, fn_32111], **kwargs_32112) list_32131 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 22, 17), 'list') set_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 17), list_32131, join_call_result_32113) # Assigning a type to the variable 'lsoda_src' (line 22) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 22, 4), 'lsoda_src', list_32131) # Assigning a List to a Name (line 28): # Obtaining an instance of the builtin type 'list' (line 28) list_32132 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 28, 15), 'list') # Adding type elements to the builtin type 'list' instance (line 28) # Adding element type (line 28) # Call to join(...): (line 28) # Processing the call arguments (line 28) str_32134 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 28, 21), 'str', 'odepack') str_32135 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 28, 32), 'str', 'vode.f') # Processing the call keyword arguments (line 28) kwargs_32136 = {} # Getting the type of 'join' (line 28) join_32133 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 28, 16), 'join', False) # Calling join(args, kwargs) (line 28) join_call_result_32137 = invoke(stypy.reporting.localization.Localization(__file__, 28, 16), join_32133, *[str_32134, str_32135], **kwargs_32136) add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 28, 15), list_32132, join_call_result_32137) # Adding element type (line 28) # Call to join(...): (line 28) # Processing the call arguments (line 28) str_32139 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 28, 48), 'str', 'odepack') str_32140 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 28, 59), 'str', 'zvode.f') # Processing the call keyword arguments (line 28) kwargs_32141 = {} # Getting the type of 'join' (line 28) join_32138 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 28, 43), 'join', False) # Calling join(args, kwargs) (line 28) join_call_result_32142 = invoke(stypy.reporting.localization.Localization(__file__, 28, 43), join_32138, *[str_32139, str_32140], **kwargs_32141) add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 28, 15), list_32132, join_call_result_32142) # Assigning a type to the variable 'vode_src' (line 28) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 28, 4), 'vode_src', list_32132) # Assigning a List to a Name (line 29): # Obtaining an instance of the builtin type 'list' (line 29) list_32143 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 29, 14), 'list') # Adding type elements to the builtin type 'list' instance (line 29) # Adding element type (line 29) # Call to join(...): (line 29) # Processing the call arguments (line 29) str_32145 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 29, 20), 'str', 'dop') str_32146 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 29, 26), 'str', '*.f') # Processing the call keyword arguments (line 29) kwargs_32147 = {} # Getting the type of 'join' (line 29) join_32144 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 29, 15), 'join', False) # Calling join(args, kwargs) (line 29) join_call_result_32148 = invoke(stypy.reporting.localization.Localization(__file__, 29, 15), join_32144, *[str_32145, str_32146], **kwargs_32147) add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 29, 14), list_32143, join_call_result_32148) # Assigning a type to the variable 'dop_src' (line 29) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 29, 4), 'dop_src', list_32143) # Assigning a List to a Name (line 30): # Obtaining an instance of the builtin type 'list' (line 30) list_32149 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 30, 24), 'list') # Adding type elements to the builtin type 'list' instance (line 30) # Adding element type (line 30) # Call to join(...): (line 30) # Processing the call arguments (line 30) str_32151 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 30, 30), 'str', 'tests') str_32152 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 30, 38), 'str', '_test_multivariate.c') # Processing the call keyword arguments (line 30) kwargs_32153 = {} # Getting the type of 'join' (line 30) join_32150 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 30, 25), 'join', False) # Calling join(args, kwargs) (line 30) join_call_result_32154 = invoke(stypy.reporting.localization.Localization(__file__, 30, 25), join_32150, *[str_32151, str_32152], **kwargs_32153) add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 30, 24), list_32149, join_call_result_32154) # Assigning a type to the variable 'quadpack_test_src' (line 30) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 30, 4), 'quadpack_test_src', list_32149) # Assigning a List to a Name (line 31): # Obtaining an instance of the builtin type 'list' (line 31) list_32155 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 31, 29), 'list') # Adding type elements to the builtin type 'list' instance (line 31) # Adding element type (line 31) # Call to join(...): (line 31) # Processing the call arguments (line 31) str_32157 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 31, 35), 'str', 'tests') str_32158 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 31, 44), 'str', 'banded5x5.f') # Processing the call keyword arguments (line 31) kwargs_32159 = {} # Getting the type of 'join' (line 31) join_32156 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 31, 30), 'join', False) # Calling join(args, kwargs) (line 31) join_call_result_32160 = invoke(stypy.reporting.localization.Localization(__file__, 31, 30), join_32156, *[str_32157, str_32158], **kwargs_32159) add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 31, 29), list_32155, join_call_result_32160) # Assigning a type to the variable 'odeint_banded_test_src' (line 31) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 31, 4), 'odeint_banded_test_src', list_32155) # Call to add_library(...): (line 33) # Processing the call arguments (line 33) str_32163 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 33, 23), 'str', 'mach') # Processing the call keyword arguments (line 33) # Getting the type of 'mach_src' (line 33) mach_src_32164 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 33, 39), 'mach_src', False) keyword_32165 = mach_src_32164 # Obtaining an instance of the builtin type 'dict' (line 34) dict_32166 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 34, 33), 'dict') # Adding type elements to the builtin type 'dict' instance (line 34) # Adding element type (key, value) (line 34) str_32167 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 34, 34), 'str', 'noopt') # Obtaining an instance of the builtin type 'tuple' (line 34) tuple_32168 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 34, 43), 'tuple') # Adding type elements to the builtin type 'tuple' instance (line 34) # Adding element type (line 34) # Getting the type of '__file__' (line 34) file___32169 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 34, 43), '__file__', False) add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 34, 43), tuple_32168, file___32169) # Adding element type (line 34) int_32170 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 34, 52), 'int') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 34, 43), tuple_32168, int_32170) set_contained_elements_type(stypy.reporting.localization.Localization(__file__, 34, 33), dict_32166, (str_32167, tuple_32168)) keyword_32171 = dict_32166 kwargs_32172 = {'sources': keyword_32165, 'config_fc': keyword_32171} # Getting the type of 'config' (line 33) config_32161 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 33, 4), 'config', False) # Obtaining the member 'add_library' of a type (line 33) add_library_32162 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 33, 4), config_32161, 'add_library') # Calling add_library(args, kwargs) (line 33) add_library_call_result_32173 = invoke(stypy.reporting.localization.Localization(__file__, 33, 4), add_library_32162, *[str_32163], **kwargs_32172) # Call to add_library(...): (line 35) # Processing the call arguments (line 35) str_32176 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 35, 23), 'str', 'quadpack') # Processing the call keyword arguments (line 35) # Getting the type of 'quadpack_src' (line 35) quadpack_src_32177 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 35, 43), 'quadpack_src', False) keyword_32178 = quadpack_src_32177 kwargs_32179 = {'sources': keyword_32178} # Getting the type of 'config' (line 35) config_32174 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 35, 4), 'config', False) # Obtaining the member 'add_library' of a type (line 35) add_library_32175 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 35, 4), config_32174, 'add_library') # Calling add_library(args, kwargs) (line 35) add_library_call_result_32180 = invoke(stypy.reporting.localization.Localization(__file__, 35, 4), add_library_32175, *[str_32176], **kwargs_32179) # Call to add_library(...): (line 36) # Processing the call arguments (line 36) str_32183 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 36, 23), 'str', 'lsoda') # Processing the call keyword arguments (line 36) # Getting the type of 'lsoda_src' (line 36) lsoda_src_32184 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 36, 40), 'lsoda_src', False) keyword_32185 = lsoda_src_32184 kwargs_32186 = {'sources': keyword_32185} # Getting the type of 'config' (line 36) config_32181 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 36, 4), 'config', False) # Obtaining the member 'add_library' of a type (line 36) add_library_32182 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 36, 4), config_32181, 'add_library') # Calling add_library(args, kwargs) (line 36) add_library_call_result_32187 = invoke(stypy.reporting.localization.Localization(__file__, 36, 4), add_library_32182, *[str_32183], **kwargs_32186) # Call to add_library(...): (line 37) # Processing the call arguments (line 37) str_32190 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 37, 23), 'str', 'vode') # Processing the call keyword arguments (line 37) # Getting the type of 'vode_src' (line 37) vode_src_32191 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 37, 39), 'vode_src', False) keyword_32192 = vode_src_32191 kwargs_32193 = {'sources': keyword_32192} # Getting the type of 'config' (line 37) config_32188 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 37, 4), 'config', False) # Obtaining the member 'add_library' of a type (line 37) add_library_32189 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 37, 4), config_32188, 'add_library') # Calling add_library(args, kwargs) (line 37) add_library_call_result_32194 = invoke(stypy.reporting.localization.Localization(__file__, 37, 4), add_library_32189, *[str_32190], **kwargs_32193) # Call to add_library(...): (line 38) # Processing the call arguments (line 38) str_32197 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 38, 23), 'str', 'dop') # Processing the call keyword arguments (line 38) # Getting the type of 'dop_src' (line 38) dop_src_32198 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 38, 38), 'dop_src', False) keyword_32199 = dop_src_32198 kwargs_32200 = {'sources': keyword_32199} # Getting the type of 'config' (line 38) config_32195 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 38, 4), 'config', False) # Obtaining the member 'add_library' of a type (line 38) add_library_32196 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 38, 4), config_32195, 'add_library') # Calling add_library(args, kwargs) (line 38) add_library_call_result_32201 = invoke(stypy.reporting.localization.Localization(__file__, 38, 4), add_library_32196, *[str_32197], **kwargs_32200) # Assigning a List to a Name (line 42): # Obtaining an instance of the builtin type 'list' (line 42) list_32202 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 42, 19), 'list') # Adding type elements to the builtin type 'list' instance (line 42) # Adding element type (line 42) # Call to join(...): (line 42) # Processing the call arguments (line 42) # Call to dirname(...): (line 42) # Processing the call arguments (line 42) # Getting the type of '__file__' (line 42) file___32207 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 42, 41), '__file__', False) # Processing the call keyword arguments (line 42) kwargs_32208 = {} # Getting the type of 'os' (line 42) os_32204 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 42, 25), 'os', False) # Obtaining the member 'path' of a type (line 42) path_32205 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 42, 25), os_32204, 'path') # Obtaining the member 'dirname' of a type (line 42) dirname_32206 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 42, 25), path_32205, 'dirname') # Calling dirname(args, kwargs) (line 42) dirname_call_result_32209 = invoke(stypy.reporting.localization.Localization(__file__, 42, 25), dirname_32206, *[file___32207], **kwargs_32208) str_32210 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 42, 52), 'str', '..') str_32211 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 42, 58), 'str', '_lib') str_32212 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 42, 66), 'str', 'src') # Processing the call keyword arguments (line 42) kwargs_32213 = {} # Getting the type of 'join' (line 42) join_32203 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 42, 20), 'join', False) # Calling join(args, kwargs) (line 42) join_call_result_32214 = invoke(stypy.reporting.localization.Localization(__file__, 42, 20), join_32203, *[dirname_call_result_32209, str_32210, str_32211, str_32212], **kwargs_32213) add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 42, 19), list_32202, join_call_result_32214) # Assigning a type to the variable 'include_dirs' (line 42) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 42, 4), 'include_dirs', list_32202) str_32215 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 43, 7), 'str', 'include_dirs') # Getting the type of 'lapack_opt' (line 43) lapack_opt_32216 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 43, 25), 'lapack_opt') # Applying the binary operator 'in' (line 43) result_contains_32217 = python_operator(stypy.reporting.localization.Localization(__file__, 43, 7), 'in', str_32215, lapack_opt_32216) # Testing the type of an if condition (line 43) if_condition_32218 = is_suitable_condition(stypy.reporting.localization.Localization(__file__, 43, 4), result_contains_32217) # Assigning a type to the variable 'if_condition_32218' (line 43) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 43, 4), 'if_condition_32218', if_condition_32218) # SSA begins for if statement (line 43) module_type_store = SSAContext.create_ssa_context(module_type_store, 'if') # Assigning a Call to a Name (line 44): # Call to dict(...): (line 44) # Processing the call arguments (line 44) # Getting the type of 'lapack_opt' (line 44) lapack_opt_32220 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 44, 26), 'lapack_opt', False) # Processing the call keyword arguments (line 44) kwargs_32221 = {} # Getting the type of 'dict' (line 44) dict_32219 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 44, 21), 'dict', False) # Calling dict(args, kwargs) (line 44) dict_call_result_32222 = invoke(stypy.reporting.localization.Localization(__file__, 44, 21), dict_32219, *[lapack_opt_32220], **kwargs_32221) # Assigning a type to the variable 'lapack_opt' (line 44) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 44, 8), 'lapack_opt', dict_call_result_32222) # Call to extend(...): (line 45) # Processing the call arguments (line 45) # Call to pop(...): (line 45) # Processing the call arguments (line 45) str_32227 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 45, 43), 'str', 'include_dirs') # Processing the call keyword arguments (line 45) kwargs_32228 = {} # Getting the type of 'lapack_opt' (line 45) lapack_opt_32225 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 45, 28), 'lapack_opt', False) # Obtaining the member 'pop' of a type (line 45) pop_32226 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 45, 28), lapack_opt_32225, 'pop') # Calling pop(args, kwargs) (line 45) pop_call_result_32229 = invoke(stypy.reporting.localization.Localization(__file__, 45, 28), pop_32226, *[str_32227], **kwargs_32228) # Processing the call keyword arguments (line 45) kwargs_32230 = {} # Getting the type of 'include_dirs' (line 45) include_dirs_32223 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 45, 8), 'include_dirs', False) # Obtaining the member 'extend' of a type (line 45) extend_32224 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 45, 8), include_dirs_32223, 'extend') # Calling extend(args, kwargs) (line 45) extend_call_result_32231 = invoke(stypy.reporting.localization.Localization(__file__, 45, 8), extend_32224, *[pop_call_result_32229], **kwargs_32230) # SSA join for if statement (line 43) module_type_store = module_type_store.join_ssa_context() # Call to add_extension(...): (line 47) # Processing the call arguments (line 47) str_32234 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 47, 25), 'str', '_quadpack') # Processing the call keyword arguments (line 47) # Obtaining an instance of the builtin type 'list' (line 48) list_32235 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 48, 33), 'list') # Adding type elements to the builtin type 'list' instance (line 48) # Adding element type (line 48) str_32236 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 48, 34), 'str', '_quadpackmodule.c') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 48, 33), list_32235, str_32236) keyword_32237 = list_32235 # Obtaining an instance of the builtin type 'list' (line 49) list_32238 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 49, 35), 'list') # Adding type elements to the builtin type 'list' instance (line 49) # Adding element type (line 49) str_32239 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 49, 36), 'str', 'quadpack') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 49, 35), list_32238, str_32239) # Adding element type (line 49) str_32240 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 49, 48), 'str', 'mach') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 49, 35), list_32238, str_32240) # Getting the type of 'lapack_libs' (line 49) lapack_libs_32241 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 49, 58), 'lapack_libs', False) # Applying the binary operator '+' (line 49) result_add_32242 = python_operator(stypy.reporting.localization.Localization(__file__, 49, 35), '+', list_32238, lapack_libs_32241) keyword_32243 = result_add_32242 # Obtaining an instance of the builtin type 'list' (line 50) list_32244 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 50, 34), 'list') # Adding type elements to the builtin type 'list' instance (line 50) # Adding element type (line 50) str_32245 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 50, 35), 'str', '__quadpack.h') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 50, 34), list_32244, str_32245) # Getting the type of 'quadpack_src' (line 51) quadpack_src_32246 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 51, 36), 'quadpack_src', False) # Applying the binary operator '+' (line 50) result_add_32247 = python_operator(stypy.reporting.localization.Localization(__file__, 50, 34), '+', list_32244, quadpack_src_32246) # Getting the type of 'mach_src' (line 51) mach_src_32248 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 51, 51), 'mach_src', False) # Applying the binary operator '+' (line 51) result_add_32249 = python_operator(stypy.reporting.localization.Localization(__file__, 51, 49), '+', result_add_32247, mach_src_32248) keyword_32250 = result_add_32249 # Getting the type of 'include_dirs' (line 52) include_dirs_32251 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 52, 38), 'include_dirs', False) keyword_32252 = include_dirs_32251 # Getting the type of 'lapack_opt' (line 53) lapack_opt_32253 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 53, 27), 'lapack_opt', False) kwargs_32254 = {'libraries': keyword_32243, 'sources': keyword_32237, 'depends': keyword_32250, 'lapack_opt_32253': lapack_opt_32253, 'include_dirs': keyword_32252} # Getting the type of 'config' (line 47) config_32232 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 47, 4), 'config', False) # Obtaining the member 'add_extension' of a type (line 47) add_extension_32233 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 47, 4), config_32232, 'add_extension') # Calling add_extension(args, kwargs) (line 47) add_extension_call_result_32255 = invoke(stypy.reporting.localization.Localization(__file__, 47, 4), add_extension_32233, *[str_32234], **kwargs_32254) # Assigning a Call to a Name (line 56): # Call to copy(...): (line 56) # Processing the call keyword arguments (line 56) kwargs_32258 = {} # Getting the type of 'lapack_opt' (line 56) lapack_opt_32256 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 56, 19), 'lapack_opt', False) # Obtaining the member 'copy' of a type (line 56) copy_32257 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 56, 19), lapack_opt_32256, 'copy') # Calling copy(args, kwargs) (line 56) copy_call_result_32259 = invoke(stypy.reporting.localization.Localization(__file__, 56, 19), copy_32257, *[], **kwargs_32258) # Assigning a type to the variable 'odepack_opts' (line 56) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 56, 4), 'odepack_opts', copy_call_result_32259) # Call to update(...): (line 57) # Processing the call arguments (line 57) # Getting the type of 'numpy_nodepr_api' (line 57) numpy_nodepr_api_32262 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 57, 24), 'numpy_nodepr_api', False) # Processing the call keyword arguments (line 57) kwargs_32263 = {} # Getting the type of 'odepack_opts' (line 57) odepack_opts_32260 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 57, 4), 'odepack_opts', False) # Obtaining the member 'update' of a type (line 57) update_32261 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 57, 4), odepack_opts_32260, 'update') # Calling update(args, kwargs) (line 57) update_call_result_32264 = invoke(stypy.reporting.localization.Localization(__file__, 57, 4), update_32261, *[numpy_nodepr_api_32262], **kwargs_32263) # Call to add_extension(...): (line 58) # Processing the call arguments (line 58) str_32267 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 58, 25), 'str', '_odepack') # Processing the call keyword arguments (line 58) # Obtaining an instance of the builtin type 'list' (line 59) list_32268 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 59, 33), 'list') # Adding type elements to the builtin type 'list' instance (line 59) # Adding element type (line 59) str_32269 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 59, 34), 'str', '_odepackmodule.c') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 59, 33), list_32268, str_32269) keyword_32270 = list_32268 # Obtaining an instance of the builtin type 'list' (line 60) list_32271 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 60, 35), 'list') # Adding type elements to the builtin type 'list' instance (line 60) # Adding element type (line 60) str_32272 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 60, 36), 'str', 'lsoda') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 60, 35), list_32271, str_32272) # Adding element type (line 60) str_32273 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 60, 45), 'str', 'mach') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 60, 35), list_32271, str_32273) # Getting the type of 'lapack_libs' (line 60) lapack_libs_32274 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 60, 55), 'lapack_libs', False) # Applying the binary operator '+' (line 60) result_add_32275 = python_operator(stypy.reporting.localization.Localization(__file__, 60, 35), '+', list_32271, lapack_libs_32274) keyword_32276 = result_add_32275 # Getting the type of 'lsoda_src' (line 61) lsoda_src_32277 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 61, 34), 'lsoda_src', False) # Getting the type of 'mach_src' (line 61) mach_src_32278 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 61, 46), 'mach_src', False) # Applying the binary operator '+' (line 61) result_add_32279 = python_operator(stypy.reporting.localization.Localization(__file__, 61, 34), '+', lsoda_src_32277, mach_src_32278) keyword_32280 = result_add_32279 # Getting the type of 'odepack_opts' (line 62) odepack_opts_32281 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 62, 27), 'odepack_opts', False) kwargs_32282 = {'libraries': keyword_32276, 'sources': keyword_32270, 'depends': keyword_32280, 'odepack_opts_32281': odepack_opts_32281} # Getting the type of 'config' (line 58) config_32265 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 58, 4), 'config', False) # Obtaining the member 'add_extension' of a type (line 58) add_extension_32266 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 58, 4), config_32265, 'add_extension') # Calling add_extension(args, kwargs) (line 58) add_extension_call_result_32283 = invoke(stypy.reporting.localization.Localization(__file__, 58, 4), add_extension_32266, *[str_32267], **kwargs_32282) # Call to add_extension(...): (line 65) # Processing the call arguments (line 65) str_32286 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 65, 25), 'str', 'vode') # Processing the call keyword arguments (line 65) # Obtaining an instance of the builtin type 'list' (line 66) list_32287 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 66, 33), 'list') # Adding type elements to the builtin type 'list' instance (line 66) # Adding element type (line 66) str_32288 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 66, 34), 'str', 'vode.pyf') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 66, 33), list_32287, str_32288) keyword_32289 = list_32287 # Obtaining an instance of the builtin type 'list' (line 67) list_32290 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 67, 35), 'list') # Adding type elements to the builtin type 'list' instance (line 67) # Adding element type (line 67) str_32291 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 67, 36), 'str', 'vode') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 67, 35), list_32290, str_32291) # Getting the type of 'lapack_libs' (line 67) lapack_libs_32292 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 67, 46), 'lapack_libs', False) # Applying the binary operator '+' (line 67) result_add_32293 = python_operator(stypy.reporting.localization.Localization(__file__, 67, 35), '+', list_32290, lapack_libs_32292) keyword_32294 = result_add_32293 # Getting the type of 'vode_src' (line 68) vode_src_32295 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 68, 33), 'vode_src', False) keyword_32296 = vode_src_32295 # Getting the type of 'lapack_opt' (line 69) lapack_opt_32297 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 69, 27), 'lapack_opt', False) kwargs_32298 = {'libraries': keyword_32294, 'sources': keyword_32289, 'depends': keyword_32296, 'lapack_opt_32297': lapack_opt_32297} # Getting the type of 'config' (line 65) config_32284 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 65, 4), 'config', False) # Obtaining the member 'add_extension' of a type (line 65) add_extension_32285 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 65, 4), config_32284, 'add_extension') # Calling add_extension(args, kwargs) (line 65) add_extension_call_result_32299 = invoke(stypy.reporting.localization.Localization(__file__, 65, 4), add_extension_32285, *[str_32286], **kwargs_32298) # Call to add_extension(...): (line 72) # Processing the call arguments (line 72) str_32302 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 72, 25), 'str', 'lsoda') # Processing the call keyword arguments (line 72) # Obtaining an instance of the builtin type 'list' (line 73) list_32303 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 73, 33), 'list') # Adding type elements to the builtin type 'list' instance (line 73) # Adding element type (line 73) str_32304 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 73, 34), 'str', 'lsoda.pyf') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 73, 33), list_32303, str_32304) keyword_32305 = list_32303 # Obtaining an instance of the builtin type 'list' (line 74) list_32306 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 74, 35), 'list') # Adding type elements to the builtin type 'list' instance (line 74) # Adding element type (line 74) str_32307 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 74, 36), 'str', 'lsoda') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 74, 35), list_32306, str_32307) # Adding element type (line 74) str_32308 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 74, 45), 'str', 'mach') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 74, 35), list_32306, str_32308) # Getting the type of 'lapack_libs' (line 74) lapack_libs_32309 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 74, 55), 'lapack_libs', False) # Applying the binary operator '+' (line 74) result_add_32310 = python_operator(stypy.reporting.localization.Localization(__file__, 74, 35), '+', list_32306, lapack_libs_32309) keyword_32311 = result_add_32310 # Getting the type of 'lsoda_src' (line 75) lsoda_src_32312 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 75, 34), 'lsoda_src', False) # Getting the type of 'mach_src' (line 75) mach_src_32313 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 75, 46), 'mach_src', False) # Applying the binary operator '+' (line 75) result_add_32314 = python_operator(stypy.reporting.localization.Localization(__file__, 75, 34), '+', lsoda_src_32312, mach_src_32313) keyword_32315 = result_add_32314 # Getting the type of 'lapack_opt' (line 76) lapack_opt_32316 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 76, 27), 'lapack_opt', False) kwargs_32317 = {'libraries': keyword_32311, 'sources': keyword_32305, 'depends': keyword_32315, 'lapack_opt_32316': lapack_opt_32316} # Getting the type of 'config' (line 72) config_32300 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 72, 4), 'config', False) # Obtaining the member 'add_extension' of a type (line 72) add_extension_32301 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 72, 4), config_32300, 'add_extension') # Calling add_extension(args, kwargs) (line 72) add_extension_call_result_32318 = invoke(stypy.reporting.localization.Localization(__file__, 72, 4), add_extension_32301, *[str_32302], **kwargs_32317) # Call to add_extension(...): (line 79) # Processing the call arguments (line 79) str_32321 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 79, 25), 'str', '_dop') # Processing the call keyword arguments (line 79) # Obtaining an instance of the builtin type 'list' (line 80) list_32322 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 80, 33), 'list') # Adding type elements to the builtin type 'list' instance (line 80) # Adding element type (line 80) str_32323 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 80, 34), 'str', 'dop.pyf') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 80, 33), list_32322, str_32323) keyword_32324 = list_32322 # Obtaining an instance of the builtin type 'list' (line 81) list_32325 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 81, 35), 'list') # Adding type elements to the builtin type 'list' instance (line 81) # Adding element type (line 81) str_32326 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 81, 36), 'str', 'dop') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 81, 35), list_32325, str_32326) keyword_32327 = list_32325 # Getting the type of 'dop_src' (line 82) dop_src_32328 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 82, 33), 'dop_src', False) keyword_32329 = dop_src_32328 kwargs_32330 = {'libraries': keyword_32327, 'sources': keyword_32324, 'depends': keyword_32329} # Getting the type of 'config' (line 79) config_32319 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 79, 4), 'config', False) # Obtaining the member 'add_extension' of a type (line 79) add_extension_32320 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 79, 4), config_32319, 'add_extension') # Calling add_extension(args, kwargs) (line 79) add_extension_call_result_32331 = invoke(stypy.reporting.localization.Localization(__file__, 79, 4), add_extension_32320, *[str_32321], **kwargs_32330) # Call to add_extension(...): (line 84) # Processing the call arguments (line 84) str_32334 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 84, 25), 'str', '_test_multivariate') # Processing the call keyword arguments (line 84) # Getting the type of 'quadpack_test_src' (line 85) quadpack_test_src_32335 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 85, 33), 'quadpack_test_src', False) keyword_32336 = quadpack_test_src_32335 kwargs_32337 = {'sources': keyword_32336} # Getting the type of 'config' (line 84) config_32332 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 84, 4), 'config', False) # Obtaining the member 'add_extension' of a type (line 84) add_extension_32333 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 84, 4), config_32332, 'add_extension') # Calling add_extension(args, kwargs) (line 84) add_extension_call_result_32338 = invoke(stypy.reporting.localization.Localization(__file__, 84, 4), add_extension_32333, *[str_32334], **kwargs_32337) # Call to add_extension(...): (line 88) # Processing the call arguments (line 88) str_32341 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 88, 25), 'str', '_test_odeint_banded') # Processing the call keyword arguments (line 88) # Getting the type of 'odeint_banded_test_src' (line 89) odeint_banded_test_src_32342 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 89, 33), 'odeint_banded_test_src', False) keyword_32343 = odeint_banded_test_src_32342 # Obtaining an instance of the builtin type 'list' (line 90) list_32344 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 90, 35), 'list') # Adding type elements to the builtin type 'list' instance (line 90) # Adding element type (line 90) str_32345 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 90, 36), 'str', 'lsoda') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 90, 35), list_32344, str_32345) # Adding element type (line 90) str_32346 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 90, 45), 'str', 'mach') add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 90, 35), list_32344, str_32346) # Getting the type of 'lapack_libs' (line 90) lapack_libs_32347 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 90, 55), 'lapack_libs', False) # Applying the binary operator '+' (line 90) result_add_32348 = python_operator(stypy.reporting.localization.Localization(__file__, 90, 35), '+', list_32344, lapack_libs_32347) keyword_32349 = result_add_32348 # Getting the type of 'lsoda_src' (line 91) lsoda_src_32350 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 91, 34), 'lsoda_src', False) # Getting the type of 'mach_src' (line 91) mach_src_32351 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 91, 46), 'mach_src', False) # Applying the binary operator '+' (line 91) result_add_32352 = python_operator(stypy.reporting.localization.Localization(__file__, 91, 34), '+', lsoda_src_32350, mach_src_32351) keyword_32353 = result_add_32352 # Getting the type of 'lapack_opt' (line 92) lapack_opt_32354 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 92, 27), 'lapack_opt', False) kwargs_32355 = {'libraries': keyword_32349, 'sources': keyword_32343, 'depends': keyword_32353, 'lapack_opt_32354': lapack_opt_32354} # Getting the type of 'config' (line 88) config_32339 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 88, 4), 'config', False) # Obtaining the member 'add_extension' of a type (line 88) add_extension_32340 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 88, 4), config_32339, 'add_extension') # Calling add_extension(args, kwargs) (line 88) add_extension_call_result_32356 = invoke(stypy.reporting.localization.Localization(__file__, 88, 4), add_extension_32340, *[str_32341], **kwargs_32355) # Call to add_subpackage(...): (line 94) # Processing the call arguments (line 94) str_32359 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 94, 26), 'str', '_ivp') # Processing the call keyword arguments (line 94) kwargs_32360 = {} # Getting the type of 'config' (line 94) config_32357 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 94, 4), 'config', False) # Obtaining the member 'add_subpackage' of a type (line 94) add_subpackage_32358 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 94, 4), config_32357, 'add_subpackage') # Calling add_subpackage(args, kwargs) (line 94) add_subpackage_call_result_32361 = invoke(stypy.reporting.localization.Localization(__file__, 94, 4), add_subpackage_32358, *[str_32359], **kwargs_32360) # Call to add_data_dir(...): (line 96) # Processing the call arguments (line 96) str_32364 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 96, 24), 'str', 'tests') # Processing the call keyword arguments (line 96) kwargs_32365 = {} # Getting the type of 'config' (line 96) config_32362 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 96, 4), 'config', False) # Obtaining the member 'add_data_dir' of a type (line 96) add_data_dir_32363 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 96, 4), config_32362, 'add_data_dir') # Calling add_data_dir(args, kwargs) (line 96) add_data_dir_call_result_32366 = invoke(stypy.reporting.localization.Localization(__file__, 96, 4), add_data_dir_32363, *[str_32364], **kwargs_32365) # Getting the type of 'config' (line 97) config_32367 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 97, 11), 'config') # Assigning a type to the variable 'stypy_return_type' (line 97) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 97, 4), 'stypy_return_type', config_32367) # ################# End of 'configuration(...)' code ################## # Teardown call information teardown_call_information(localization, arguments) # Storing the return type of function 'configuration' in the type store # Getting the type of 'stypy_return_type' (line 9) stypy_return_type_32368 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 9, 0), 'stypy_return_type') module_type_store.store_return_type_of_current_context(stypy_return_type_32368) # Destroy the current context module_type_store = module_type_store.close_function_context() # Return type of the function 'configuration' return stypy_return_type_32368 # Assigning a type to the variable 'configuration' (line 9) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 9, 0), 'configuration', configuration) if (__name__ == '__main__'): stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 101, 4)) # 'from numpy.distutils.core import setup' statement (line 101) update_path_to_current_file_folder('C:/Python27/lib/site-packages/scipy/integrate/') import_32369 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 101, 4), 'numpy.distutils.core') if (type(import_32369) is not StypyTypeError): if (import_32369 != 'pyd_module'): __import__(import_32369) sys_modules_32370 = sys.modules[import_32369] import_from_module(stypy.reporting.localization.Localization(__file__, 101, 4), 'numpy.distutils.core', sys_modules_32370.module_type_store, module_type_store, ['setup']) nest_module(stypy.reporting.localization.Localization(__file__, 101, 4), __file__, sys_modules_32370, sys_modules_32370.module_type_store, module_type_store) else: from numpy.distutils.core import setup import_from_module(stypy.reporting.localization.Localization(__file__, 101, 4), 'numpy.distutils.core', None, module_type_store, ['setup'], [setup]) else: # Assigning a type to the variable 'numpy.distutils.core' (line 101) module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 101, 4), 'numpy.distutils.core', import_32369) remove_current_file_folder_from_path('C:/Python27/lib/site-packages/scipy/integrate/') # Call to setup(...): (line 102) # Processing the call keyword arguments (line 102) # Call to todict(...): (line 102) # Processing the call keyword arguments (line 102) kwargs_32378 = {} # Call to configuration(...): (line 102) # Processing the call keyword arguments (line 102) str_32373 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 102, 35), 'str', '') keyword_32374 = str_32373 kwargs_32375 = {'top_path': keyword_32374} # Getting the type of 'configuration' (line 102) configuration_32372 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 102, 12), 'configuration', False) # Calling configuration(args, kwargs) (line 102) configuration_call_result_32376 = invoke(stypy.reporting.localization.Localization(__file__, 102, 12), configuration_32372, *[], **kwargs_32375) # Obtaining the member 'todict' of a type (line 102) todict_32377 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 102, 12), configuration_call_result_32376, 'todict') # Calling todict(args, kwargs) (line 102) todict_call_result_32379 = invoke(stypy.reporting.localization.Localization(__file__, 102, 12), todict_32377, *[], **kwargs_32378) kwargs_32380 = {'todict_call_result_32379': todict_call_result_32379} # Getting the type of 'setup' (line 102) setup_32371 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 102, 4), 'setup', False) # Calling setup(args, kwargs) (line 102) setup_call_result_32381 = invoke(stypy.reporting.localization.Localization(__file__, 102, 4), setup_32371, *[], **kwargs_32380) # ################# End of the type inference program ################## module_errors = stypy.errors.type_error.StypyTypeError.get_error_msgs() module_warnings = stypy.errors.type_warning.TypeWarning.get_warning_msgs()
normal
{ "blob_id": "4453b8176cda60a3a8f4800860b87bddfdb6cafa", "index": 7963, "step-1": "<mask token>\n\n\n@norecursion\ndef configuration(localization, *varargs, **kwargs):\n global module_type_store\n str_32070 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 9, 33), 'str', '')\n None_32071 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 9, 45), 'None')\n defaults = [str_32070, None_32071]\n module_type_store = module_type_store.open_function_context('configuration'\n , 9, 0, False)\n configuration.stypy_localization = localization\n configuration.stypy_type_of_self = None\n configuration.stypy_type_store = module_type_store\n configuration.stypy_function_name = 'configuration'\n configuration.stypy_param_names_list = ['parent_package', 'top_path']\n configuration.stypy_varargs_param_name = None\n configuration.stypy_kwargs_param_name = None\n configuration.stypy_call_defaults = defaults\n configuration.stypy_call_varargs = varargs\n configuration.stypy_call_kwargs = kwargs\n arguments = process_argument_values(localization, None,\n module_type_store, 'configuration', ['parent_package', 'top_path'],\n None, None, defaults, varargs, kwargs)\n if is_error_type(arguments):\n module_type_store = module_type_store.close_function_context()\n return arguments\n init_call_information(module_type_store, 'configuration', localization,\n ['parent_package', 'top_path'], arguments)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 0, 0), 'stypy_return_type', None)\n stypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 10, 4))\n update_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n import_32072 = generate_type_inference_code_for_module(stypy.reporting.\n localization.Localization(__file__, 10, 4), 'numpy.distutils.misc_util'\n )\n if type(import_32072) is not StypyTypeError:\n if import_32072 != 'pyd_module':\n __import__(import_32072)\n sys_modules_32073 = sys.modules[import_32072]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 10, 4), 'numpy.distutils.misc_util',\n sys_modules_32073.module_type_store, module_type_store, [\n 'Configuration'])\n nest_module(stypy.reporting.localization.Localization(__file__,\n 10, 4), __file__, sys_modules_32073, sys_modules_32073.\n module_type_store, module_type_store)\n else:\n from numpy.distutils.misc_util import Configuration\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 10, 4), 'numpy.distutils.misc_util', None,\n module_type_store, ['Configuration'], [Configuration])\n else:\n module_type_store.set_type_of(stypy.reporting.localization.\n Localization(__file__, 10, 4), 'numpy.distutils.misc_util',\n import_32072)\n remove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n stypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 11, 4))\n update_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n import_32074 = generate_type_inference_code_for_module(stypy.reporting.\n localization.Localization(__file__, 11, 4),\n 'numpy.distutils.system_info')\n if type(import_32074) is not StypyTypeError:\n if import_32074 != 'pyd_module':\n __import__(import_32074)\n sys_modules_32075 = sys.modules[import_32074]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 11, 4), 'numpy.distutils.system_info',\n sys_modules_32075.module_type_store, module_type_store, [\n 'get_info'])\n nest_module(stypy.reporting.localization.Localization(__file__,\n 11, 4), __file__, sys_modules_32075, sys_modules_32075.\n module_type_store, module_type_store)\n else:\n from numpy.distutils.system_info import get_info\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 11, 4), 'numpy.distutils.system_info', None,\n module_type_store, ['get_info'], [get_info])\n else:\n module_type_store.set_type_of(stypy.reporting.localization.\n Localization(__file__, 11, 4), 'numpy.distutils.system_info',\n import_32074)\n remove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n str_32077 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 12, 27), 'str', 'integrate')\n parent_package_32078 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 12, 40), 'parent_package', False)\n top_path_32079 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 12, 56), 'top_path', False)\n kwargs_32080 = {}\n Configuration_32076 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 12, 13), 'Configuration', False)\n Configuration_call_result_32081 = invoke(stypy.reporting.localization.\n Localization(__file__, 12, 13), Configuration_32076, *[str_32077,\n parent_package_32078, top_path_32079], **kwargs_32080)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 12, 4), 'config', Configuration_call_result_32081)\n str_32084 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 15, 31), 'str', 'lapack_opt')\n int_32085 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 15, 60), 'int')\n keyword_32086 = int_32085\n kwargs_32087 = {'notfound_action': keyword_32086}\n get_info_32083 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 15, 22), 'get_info', False)\n get_info_call_result_32088 = invoke(stypy.reporting.localization.\n Localization(__file__, 15, 22), get_info_32083, *[str_32084], **\n kwargs_32087)\n kwargs_32089 = {}\n dict_32082 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 15, 17), 'dict', False)\n dict_call_result_32090 = invoke(stypy.reporting.localization.\n Localization(__file__, 15, 17), dict_32082, *[\n get_info_call_result_32088], **kwargs_32089)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 15, 4), 'lapack_opt', dict_call_result_32090)\n str_32093 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 18, 33), 'str', 'libraries')\n list_32094 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 18, 46), 'list')\n kwargs_32095 = {}\n lapack_opt_32091 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 18, 18), 'lapack_opt', False)\n pop_32092 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 18, 18), lapack_opt_32091, 'pop')\n pop_call_result_32096 = invoke(stypy.reporting.localization.\n Localization(__file__, 18, 18), pop_32092, *[str_32093, list_32094],\n **kwargs_32095)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 18, 4), 'lapack_libs', pop_call_result_32096)\n list_32097 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 20, 15), 'list')\n str_32099 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 20, 21), 'str', 'mach')\n str_32100 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 20, 28), 'str', '*.f')\n kwargs_32101 = {}\n join_32098 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 20, 16), 'join', False)\n join_call_result_32102 = invoke(stypy.reporting.localization.\n Localization(__file__, 20, 16), join_32098, *[str_32099, str_32100],\n **kwargs_32101)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 20, 15), list_32097, join_call_result_32102)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 20, 4), 'mach_src', list_32097)\n list_32103 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 21, 19), 'list')\n str_32105 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 21, 25), 'str', 'quadpack')\n str_32106 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 21, 37), 'str', '*.f')\n kwargs_32107 = {}\n join_32104 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 21, 20), 'join', False)\n join_call_result_32108 = invoke(stypy.reporting.localization.\n Localization(__file__, 21, 20), join_32104, *[str_32105, str_32106],\n **kwargs_32107)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 21, 19), list_32103, join_call_result_32108)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 21, 4), 'quadpack_src', list_32103)\n list_32114 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 22, 47), 'list')\n str_32115 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 23, 8), 'str', 'blkdta000.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32115)\n str_32116 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 23, 23), 'str', 'bnorm.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32116)\n str_32117 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 23, 34), 'str', 'cfode.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32117)\n str_32118 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 24, 8), 'str', 'ewset.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32118)\n str_32119 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 24, 19), 'str', 'fnorm.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32119)\n str_32120 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 24, 30), 'str', 'intdy.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32120)\n str_32121 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 25, 8), 'str', 'lsoda.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32121)\n str_32122 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 25, 19), 'str', 'prja.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32122)\n str_32123 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 25, 29), 'str', 'solsy.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32123)\n str_32124 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 25, 40), 'str', 'srcma.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32124)\n str_32125 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 26, 8), 'str', 'stoda.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32125)\n str_32126 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 26, 19), 'str', 'vmnorm.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32126)\n str_32127 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 26, 31), 'str', 'xerrwv.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32127)\n str_32128 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 26, 43), 'str', 'xsetf.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32128)\n str_32129 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 27, 8), 'str', 'xsetun.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32129)\n comprehension_32130 = get_contained_elements_type(stypy.reporting.\n localization.Localization(__file__, 22, 17), list_32114)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 22, 17), 'fn', comprehension_32130)\n str_32110 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 22, 22), 'str', 'odepack')\n fn_32111 = module_type_store.get_type_of(stypy.reporting.localization.\n Localization(__file__, 22, 33), 'fn', False)\n kwargs_32112 = {}\n join_32109 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 22, 17), 'join', False)\n join_call_result_32113 = invoke(stypy.reporting.localization.\n Localization(__file__, 22, 17), join_32109, *[str_32110, fn_32111],\n **kwargs_32112)\n list_32131 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 22, 17), 'list')\n set_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 17), list_32131, join_call_result_32113)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 22, 4), 'lsoda_src', list_32131)\n list_32132 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 15), 'list')\n str_32134 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 21), 'str', 'odepack')\n str_32135 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 32), 'str', 'vode.f')\n kwargs_32136 = {}\n join_32133 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 28, 16), 'join', False)\n join_call_result_32137 = invoke(stypy.reporting.localization.\n Localization(__file__, 28, 16), join_32133, *[str_32134, str_32135],\n **kwargs_32136)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 28, 15), list_32132, join_call_result_32137)\n str_32139 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 48), 'str', 'odepack')\n str_32140 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 59), 'str', 'zvode.f')\n kwargs_32141 = {}\n join_32138 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 28, 43), 'join', False)\n join_call_result_32142 = invoke(stypy.reporting.localization.\n Localization(__file__, 28, 43), join_32138, *[str_32139, str_32140],\n **kwargs_32141)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 28, 15), list_32132, join_call_result_32142)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 28, 4), 'vode_src', list_32132)\n list_32143 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 29, 14), 'list')\n str_32145 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 29, 20), 'str', 'dop')\n str_32146 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 29, 26), 'str', '*.f')\n kwargs_32147 = {}\n join_32144 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 29, 15), 'join', False)\n join_call_result_32148 = invoke(stypy.reporting.localization.\n Localization(__file__, 29, 15), join_32144, *[str_32145, str_32146],\n **kwargs_32147)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 29, 14), list_32143, join_call_result_32148)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 29, 4), 'dop_src', list_32143)\n list_32149 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 30, 24), 'list')\n str_32151 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 30, 30), 'str', 'tests')\n str_32152 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 30, 38), 'str',\n '_test_multivariate.c')\n kwargs_32153 = {}\n join_32150 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 30, 25), 'join', False)\n join_call_result_32154 = invoke(stypy.reporting.localization.\n Localization(__file__, 30, 25), join_32150, *[str_32151, str_32152],\n **kwargs_32153)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 30, 24), list_32149, join_call_result_32154)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 30, 4), 'quadpack_test_src', list_32149)\n list_32155 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 31, 29), 'list')\n str_32157 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 31, 35), 'str', 'tests')\n str_32158 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 31, 44), 'str', 'banded5x5.f')\n kwargs_32159 = {}\n join_32156 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 31, 30), 'join', False)\n join_call_result_32160 = invoke(stypy.reporting.localization.\n Localization(__file__, 31, 30), join_32156, *[str_32157, str_32158],\n **kwargs_32159)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 31, 29), list_32155, join_call_result_32160)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 31, 4), 'odeint_banded_test_src', list_32155)\n str_32163 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 33, 23), 'str', 'mach')\n mach_src_32164 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 33, 39), 'mach_src', False)\n keyword_32165 = mach_src_32164\n dict_32166 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 34, 33), 'dict')\n str_32167 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 34, 34), 'str', 'noopt')\n tuple_32168 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 34, 43), 'tuple')\n file___32169 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 34, 43), '__file__', False)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 34, 43), tuple_32168, file___32169)\n int_32170 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 34, 52), 'int')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 34, 43), tuple_32168, int_32170)\n set_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 34, 33), dict_32166, (str_32167, tuple_32168))\n keyword_32171 = dict_32166\n kwargs_32172 = {'sources': keyword_32165, 'config_fc': keyword_32171}\n config_32161 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 33, 4), 'config', False)\n add_library_32162 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 33, 4), config_32161,\n 'add_library')\n add_library_call_result_32173 = invoke(stypy.reporting.localization.\n Localization(__file__, 33, 4), add_library_32162, *[str_32163], **\n kwargs_32172)\n str_32176 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 35, 23), 'str', 'quadpack')\n quadpack_src_32177 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 35, 43), 'quadpack_src', False)\n keyword_32178 = quadpack_src_32177\n kwargs_32179 = {'sources': keyword_32178}\n config_32174 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 35, 4), 'config', False)\n add_library_32175 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 35, 4), config_32174,\n 'add_library')\n add_library_call_result_32180 = invoke(stypy.reporting.localization.\n Localization(__file__, 35, 4), add_library_32175, *[str_32176], **\n kwargs_32179)\n str_32183 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 36, 23), 'str', 'lsoda')\n lsoda_src_32184 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 36, 40), 'lsoda_src', False)\n keyword_32185 = lsoda_src_32184\n kwargs_32186 = {'sources': keyword_32185}\n config_32181 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 36, 4), 'config', False)\n add_library_32182 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 36, 4), config_32181,\n 'add_library')\n add_library_call_result_32187 = invoke(stypy.reporting.localization.\n Localization(__file__, 36, 4), add_library_32182, *[str_32183], **\n kwargs_32186)\n str_32190 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 37, 23), 'str', 'vode')\n vode_src_32191 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 37, 39), 'vode_src', False)\n keyword_32192 = vode_src_32191\n kwargs_32193 = {'sources': keyword_32192}\n config_32188 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 37, 4), 'config', False)\n add_library_32189 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 37, 4), config_32188,\n 'add_library')\n add_library_call_result_32194 = invoke(stypy.reporting.localization.\n Localization(__file__, 37, 4), add_library_32189, *[str_32190], **\n kwargs_32193)\n str_32197 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 38, 23), 'str', 'dop')\n dop_src_32198 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 38, 38), 'dop_src', False)\n keyword_32199 = dop_src_32198\n kwargs_32200 = {'sources': keyword_32199}\n config_32195 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 38, 4), 'config', False)\n add_library_32196 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 38, 4), config_32195,\n 'add_library')\n add_library_call_result_32201 = invoke(stypy.reporting.localization.\n Localization(__file__, 38, 4), add_library_32196, *[str_32197], **\n kwargs_32200)\n list_32202 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 42, 19), 'list')\n file___32207 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 42, 41), '__file__', False)\n kwargs_32208 = {}\n os_32204 = module_type_store.get_type_of(stypy.reporting.localization.\n Localization(__file__, 42, 25), 'os', False)\n path_32205 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 42, 25), os_32204, 'path')\n dirname_32206 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 42, 25), path_32205, 'dirname')\n dirname_call_result_32209 = invoke(stypy.reporting.localization.\n Localization(__file__, 42, 25), dirname_32206, *[file___32207], **\n kwargs_32208)\n str_32210 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 42, 52), 'str', '..')\n str_32211 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 42, 58), 'str', '_lib')\n str_32212 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 42, 66), 'str', 'src')\n kwargs_32213 = {}\n join_32203 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 42, 20), 'join', False)\n join_call_result_32214 = invoke(stypy.reporting.localization.\n Localization(__file__, 42, 20), join_32203, *[\n dirname_call_result_32209, str_32210, str_32211, str_32212], **\n kwargs_32213)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 42, 19), list_32202, join_call_result_32214)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 42, 4), 'include_dirs', list_32202)\n str_32215 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 43, 7), 'str', 'include_dirs')\n lapack_opt_32216 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 43, 25), 'lapack_opt')\n result_contains_32217 = python_operator(stypy.reporting.localization.\n Localization(__file__, 43, 7), 'in', str_32215, lapack_opt_32216)\n if_condition_32218 = is_suitable_condition(stypy.reporting.localization\n .Localization(__file__, 43, 4), result_contains_32217)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 43, 4), 'if_condition_32218', if_condition_32218)\n module_type_store = SSAContext.create_ssa_context(module_type_store, 'if')\n lapack_opt_32220 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 44, 26), 'lapack_opt', False)\n kwargs_32221 = {}\n dict_32219 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 44, 21), 'dict', False)\n dict_call_result_32222 = invoke(stypy.reporting.localization.\n Localization(__file__, 44, 21), dict_32219, *[lapack_opt_32220], **\n kwargs_32221)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 44, 8), 'lapack_opt', dict_call_result_32222)\n str_32227 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 45, 43), 'str', 'include_dirs')\n kwargs_32228 = {}\n lapack_opt_32225 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 45, 28), 'lapack_opt', False)\n pop_32226 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 45, 28), lapack_opt_32225, 'pop')\n pop_call_result_32229 = invoke(stypy.reporting.localization.\n Localization(__file__, 45, 28), pop_32226, *[str_32227], **kwargs_32228\n )\n kwargs_32230 = {}\n include_dirs_32223 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 45, 8), 'include_dirs', False)\n extend_32224 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 45, 8), include_dirs_32223,\n 'extend')\n extend_call_result_32231 = invoke(stypy.reporting.localization.\n Localization(__file__, 45, 8), extend_32224, *[\n pop_call_result_32229], **kwargs_32230)\n module_type_store = module_type_store.join_ssa_context()\n str_32234 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 47, 25), 'str', '_quadpack')\n list_32235 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 48, 33), 'list')\n str_32236 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 48, 34), 'str', '_quadpackmodule.c'\n )\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 48, 33), list_32235, str_32236)\n keyword_32237 = list_32235\n list_32238 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 49, 35), 'list')\n str_32239 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 49, 36), 'str', 'quadpack')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 49, 35), list_32238, str_32239)\n str_32240 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 49, 48), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 49, 35), list_32238, str_32240)\n lapack_libs_32241 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 49, 58), 'lapack_libs', False)\n result_add_32242 = python_operator(stypy.reporting.localization.\n Localization(__file__, 49, 35), '+', list_32238, lapack_libs_32241)\n keyword_32243 = result_add_32242\n list_32244 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 50, 34), 'list')\n str_32245 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 50, 35), 'str', '__quadpack.h')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 50, 34), list_32244, str_32245)\n quadpack_src_32246 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 51, 36), 'quadpack_src', False)\n result_add_32247 = python_operator(stypy.reporting.localization.\n Localization(__file__, 50, 34), '+', list_32244, quadpack_src_32246)\n mach_src_32248 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 51, 51), 'mach_src', False)\n result_add_32249 = python_operator(stypy.reporting.localization.\n Localization(__file__, 51, 49), '+', result_add_32247, mach_src_32248)\n keyword_32250 = result_add_32249\n include_dirs_32251 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 52, 38), 'include_dirs', False)\n keyword_32252 = include_dirs_32251\n lapack_opt_32253 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 53, 27), 'lapack_opt', False)\n kwargs_32254 = {'libraries': keyword_32243, 'sources': keyword_32237,\n 'depends': keyword_32250, 'lapack_opt_32253': lapack_opt_32253,\n 'include_dirs': keyword_32252}\n config_32232 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 47, 4), 'config', False)\n add_extension_32233 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 47, 4), config_32232,\n 'add_extension')\n add_extension_call_result_32255 = invoke(stypy.reporting.localization.\n Localization(__file__, 47, 4), add_extension_32233, *[str_32234],\n **kwargs_32254)\n kwargs_32258 = {}\n lapack_opt_32256 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 56, 19), 'lapack_opt', False)\n copy_32257 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 56, 19), lapack_opt_32256, 'copy')\n copy_call_result_32259 = invoke(stypy.reporting.localization.\n Localization(__file__, 56, 19), copy_32257, *[], **kwargs_32258)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 56, 4), 'odepack_opts', copy_call_result_32259)\n numpy_nodepr_api_32262 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 57, 24), 'numpy_nodepr_api', False)\n kwargs_32263 = {}\n odepack_opts_32260 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 57, 4), 'odepack_opts', False)\n update_32261 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 57, 4), odepack_opts_32260,\n 'update')\n update_call_result_32264 = invoke(stypy.reporting.localization.\n Localization(__file__, 57, 4), update_32261, *[\n numpy_nodepr_api_32262], **kwargs_32263)\n str_32267 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 58, 25), 'str', '_odepack')\n list_32268 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 59, 33), 'list')\n str_32269 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 59, 34), 'str', '_odepackmodule.c')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 59, 33), list_32268, str_32269)\n keyword_32270 = list_32268\n list_32271 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 60, 35), 'list')\n str_32272 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 60, 36), 'str', 'lsoda')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 60, 35), list_32271, str_32272)\n str_32273 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 60, 45), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 60, 35), list_32271, str_32273)\n lapack_libs_32274 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 60, 55), 'lapack_libs', False)\n result_add_32275 = python_operator(stypy.reporting.localization.\n Localization(__file__, 60, 35), '+', list_32271, lapack_libs_32274)\n keyword_32276 = result_add_32275\n lsoda_src_32277 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 61, 34), 'lsoda_src', False)\n mach_src_32278 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 61, 46), 'mach_src', False)\n result_add_32279 = python_operator(stypy.reporting.localization.\n Localization(__file__, 61, 34), '+', lsoda_src_32277, mach_src_32278)\n keyword_32280 = result_add_32279\n odepack_opts_32281 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 62, 27), 'odepack_opts', False)\n kwargs_32282 = {'libraries': keyword_32276, 'sources': keyword_32270,\n 'depends': keyword_32280, 'odepack_opts_32281': odepack_opts_32281}\n config_32265 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 58, 4), 'config', False)\n add_extension_32266 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 58, 4), config_32265,\n 'add_extension')\n add_extension_call_result_32283 = invoke(stypy.reporting.localization.\n Localization(__file__, 58, 4), add_extension_32266, *[str_32267],\n **kwargs_32282)\n str_32286 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 65, 25), 'str', 'vode')\n list_32287 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 66, 33), 'list')\n str_32288 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 66, 34), 'str', 'vode.pyf')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 66, 33), list_32287, str_32288)\n keyword_32289 = list_32287\n list_32290 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 67, 35), 'list')\n str_32291 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 67, 36), 'str', 'vode')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 67, 35), list_32290, str_32291)\n lapack_libs_32292 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 67, 46), 'lapack_libs', False)\n result_add_32293 = python_operator(stypy.reporting.localization.\n Localization(__file__, 67, 35), '+', list_32290, lapack_libs_32292)\n keyword_32294 = result_add_32293\n vode_src_32295 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 68, 33), 'vode_src', False)\n keyword_32296 = vode_src_32295\n lapack_opt_32297 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 69, 27), 'lapack_opt', False)\n kwargs_32298 = {'libraries': keyword_32294, 'sources': keyword_32289,\n 'depends': keyword_32296, 'lapack_opt_32297': lapack_opt_32297}\n config_32284 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 65, 4), 'config', False)\n add_extension_32285 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 65, 4), config_32284,\n 'add_extension')\n add_extension_call_result_32299 = invoke(stypy.reporting.localization.\n Localization(__file__, 65, 4), add_extension_32285, *[str_32286],\n **kwargs_32298)\n str_32302 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 72, 25), 'str', 'lsoda')\n list_32303 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 73, 33), 'list')\n str_32304 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 73, 34), 'str', 'lsoda.pyf')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 73, 33), list_32303, str_32304)\n keyword_32305 = list_32303\n list_32306 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 74, 35), 'list')\n str_32307 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 74, 36), 'str', 'lsoda')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 74, 35), list_32306, str_32307)\n str_32308 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 74, 45), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 74, 35), list_32306, str_32308)\n lapack_libs_32309 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 74, 55), 'lapack_libs', False)\n result_add_32310 = python_operator(stypy.reporting.localization.\n Localization(__file__, 74, 35), '+', list_32306, lapack_libs_32309)\n keyword_32311 = result_add_32310\n lsoda_src_32312 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 75, 34), 'lsoda_src', False)\n mach_src_32313 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 75, 46), 'mach_src', False)\n result_add_32314 = python_operator(stypy.reporting.localization.\n Localization(__file__, 75, 34), '+', lsoda_src_32312, mach_src_32313)\n keyword_32315 = result_add_32314\n lapack_opt_32316 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 76, 27), 'lapack_opt', False)\n kwargs_32317 = {'libraries': keyword_32311, 'sources': keyword_32305,\n 'depends': keyword_32315, 'lapack_opt_32316': lapack_opt_32316}\n config_32300 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 72, 4), 'config', False)\n add_extension_32301 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 72, 4), config_32300,\n 'add_extension')\n add_extension_call_result_32318 = invoke(stypy.reporting.localization.\n Localization(__file__, 72, 4), add_extension_32301, *[str_32302],\n **kwargs_32317)\n str_32321 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 79, 25), 'str', '_dop')\n list_32322 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 80, 33), 'list')\n str_32323 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 80, 34), 'str', 'dop.pyf')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 80, 33), list_32322, str_32323)\n keyword_32324 = list_32322\n list_32325 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 81, 35), 'list')\n str_32326 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 81, 36), 'str', 'dop')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 81, 35), list_32325, str_32326)\n keyword_32327 = list_32325\n dop_src_32328 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 82, 33), 'dop_src', False)\n keyword_32329 = dop_src_32328\n kwargs_32330 = {'libraries': keyword_32327, 'sources': keyword_32324,\n 'depends': keyword_32329}\n config_32319 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 79, 4), 'config', False)\n add_extension_32320 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 79, 4), config_32319,\n 'add_extension')\n add_extension_call_result_32331 = invoke(stypy.reporting.localization.\n Localization(__file__, 79, 4), add_extension_32320, *[str_32321],\n **kwargs_32330)\n str_32334 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 84, 25), 'str',\n '_test_multivariate')\n quadpack_test_src_32335 = module_type_store.get_type_of(stypy.reporting\n .localization.Localization(__file__, 85, 33), 'quadpack_test_src', \n False)\n keyword_32336 = quadpack_test_src_32335\n kwargs_32337 = {'sources': keyword_32336}\n config_32332 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 84, 4), 'config', False)\n add_extension_32333 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 84, 4), config_32332,\n 'add_extension')\n add_extension_call_result_32338 = invoke(stypy.reporting.localization.\n Localization(__file__, 84, 4), add_extension_32333, *[str_32334],\n **kwargs_32337)\n str_32341 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 88, 25), 'str',\n '_test_odeint_banded')\n odeint_banded_test_src_32342 = module_type_store.get_type_of(stypy.\n reporting.localization.Localization(__file__, 89, 33),\n 'odeint_banded_test_src', False)\n keyword_32343 = odeint_banded_test_src_32342\n list_32344 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 90, 35), 'list')\n str_32345 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 90, 36), 'str', 'lsoda')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 90, 35), list_32344, str_32345)\n str_32346 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 90, 45), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 90, 35), list_32344, str_32346)\n lapack_libs_32347 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 90, 55), 'lapack_libs', False)\n result_add_32348 = python_operator(stypy.reporting.localization.\n Localization(__file__, 90, 35), '+', list_32344, lapack_libs_32347)\n keyword_32349 = result_add_32348\n lsoda_src_32350 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 91, 34), 'lsoda_src', False)\n mach_src_32351 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 91, 46), 'mach_src', False)\n result_add_32352 = python_operator(stypy.reporting.localization.\n Localization(__file__, 91, 34), '+', lsoda_src_32350, mach_src_32351)\n keyword_32353 = result_add_32352\n lapack_opt_32354 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 92, 27), 'lapack_opt', False)\n kwargs_32355 = {'libraries': keyword_32349, 'sources': keyword_32343,\n 'depends': keyword_32353, 'lapack_opt_32354': lapack_opt_32354}\n config_32339 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 88, 4), 'config', False)\n add_extension_32340 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 88, 4), config_32339,\n 'add_extension')\n add_extension_call_result_32356 = invoke(stypy.reporting.localization.\n Localization(__file__, 88, 4), add_extension_32340, *[str_32341],\n **kwargs_32355)\n str_32359 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 94, 26), 'str', '_ivp')\n kwargs_32360 = {}\n config_32357 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 94, 4), 'config', False)\n add_subpackage_32358 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 94, 4), config_32357,\n 'add_subpackage')\n add_subpackage_call_result_32361 = invoke(stypy.reporting.localization.\n Localization(__file__, 94, 4), add_subpackage_32358, *[str_32359],\n **kwargs_32360)\n str_32364 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 96, 24), 'str', 'tests')\n kwargs_32365 = {}\n config_32362 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 96, 4), 'config', False)\n add_data_dir_32363 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 96, 4), config_32362,\n 'add_data_dir')\n add_data_dir_call_result_32366 = invoke(stypy.reporting.localization.\n Localization(__file__, 96, 4), add_data_dir_32363, *[str_32364], **\n kwargs_32365)\n config_32367 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 97, 11), 'config')\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 97, 4), 'stypy_return_type', config_32367)\n teardown_call_information(localization, arguments)\n stypy_return_type_32368 = module_type_store.get_type_of(stypy.reporting\n .localization.Localization(__file__, 9, 0), 'stypy_return_type')\n module_type_store.store_return_type_of_current_context(\n stypy_return_type_32368)\n module_type_store = module_type_store.close_function_context()\n return stypy_return_type_32368\n\n\n<mask token>\n", "step-2": "<mask token>\nstypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 3, 0))\n<mask token>\nimport_module(stypy.reporting.localization.Localization(__file__, 3, 0),\n 'os', os, module_type_store)\nstypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 4, 0))\nupdate_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n<mask token>\nif type(import_32066) is not StypyTypeError:\n if import_32066 != 'pyd_module':\n __import__(import_32066)\n sys_modules_32067 = sys.modules[import_32066]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 4, 0), 'os.path', sys_modules_32067.module_type_store,\n module_type_store, ['join'])\n nest_module(stypy.reporting.localization.Localization(__file__, 4, \n 0), __file__, sys_modules_32067, sys_modules_32067.\n module_type_store, module_type_store)\n else:\n from os.path import join\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 4, 0), 'os.path', None, module_type_store, ['join'],\n [join])\nelse:\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 4, 0), 'os.path', import_32066)\nremove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\nstypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 6, 0))\nupdate_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n<mask token>\nif type(import_32068) is not StypyTypeError:\n if import_32068 != 'pyd_module':\n __import__(import_32068)\n sys_modules_32069 = sys.modules[import_32068]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 6, 0), 'scipy._build_utils', sys_modules_32069.\n module_type_store, module_type_store, ['numpy_nodepr_api'])\n nest_module(stypy.reporting.localization.Localization(__file__, 6, \n 0), __file__, sys_modules_32069, sys_modules_32069.\n module_type_store, module_type_store)\n else:\n from scipy._build_utils import numpy_nodepr_api\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 6, 0), 'scipy._build_utils', None, module_type_store,\n ['numpy_nodepr_api'], [numpy_nodepr_api])\nelse:\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 6, 0), 'scipy._build_utils', import_32068)\nremove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n\n\n@norecursion\ndef configuration(localization, *varargs, **kwargs):\n global module_type_store\n str_32070 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 9, 33), 'str', '')\n None_32071 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 9, 45), 'None')\n defaults = [str_32070, None_32071]\n module_type_store = module_type_store.open_function_context('configuration'\n , 9, 0, False)\n configuration.stypy_localization = localization\n configuration.stypy_type_of_self = None\n configuration.stypy_type_store = module_type_store\n configuration.stypy_function_name = 'configuration'\n configuration.stypy_param_names_list = ['parent_package', 'top_path']\n configuration.stypy_varargs_param_name = None\n configuration.stypy_kwargs_param_name = None\n configuration.stypy_call_defaults = defaults\n configuration.stypy_call_varargs = varargs\n configuration.stypy_call_kwargs = kwargs\n arguments = process_argument_values(localization, None,\n module_type_store, 'configuration', ['parent_package', 'top_path'],\n None, None, defaults, varargs, kwargs)\n if is_error_type(arguments):\n module_type_store = module_type_store.close_function_context()\n return arguments\n init_call_information(module_type_store, 'configuration', localization,\n ['parent_package', 'top_path'], arguments)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 0, 0), 'stypy_return_type', None)\n stypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 10, 4))\n update_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n import_32072 = generate_type_inference_code_for_module(stypy.reporting.\n localization.Localization(__file__, 10, 4), 'numpy.distutils.misc_util'\n )\n if type(import_32072) is not StypyTypeError:\n if import_32072 != 'pyd_module':\n __import__(import_32072)\n sys_modules_32073 = sys.modules[import_32072]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 10, 4), 'numpy.distutils.misc_util',\n sys_modules_32073.module_type_store, module_type_store, [\n 'Configuration'])\n nest_module(stypy.reporting.localization.Localization(__file__,\n 10, 4), __file__, sys_modules_32073, sys_modules_32073.\n module_type_store, module_type_store)\n else:\n from numpy.distutils.misc_util import Configuration\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 10, 4), 'numpy.distutils.misc_util', None,\n module_type_store, ['Configuration'], [Configuration])\n else:\n module_type_store.set_type_of(stypy.reporting.localization.\n Localization(__file__, 10, 4), 'numpy.distutils.misc_util',\n import_32072)\n remove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n stypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 11, 4))\n update_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n import_32074 = generate_type_inference_code_for_module(stypy.reporting.\n localization.Localization(__file__, 11, 4),\n 'numpy.distutils.system_info')\n if type(import_32074) is not StypyTypeError:\n if import_32074 != 'pyd_module':\n __import__(import_32074)\n sys_modules_32075 = sys.modules[import_32074]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 11, 4), 'numpy.distutils.system_info',\n sys_modules_32075.module_type_store, module_type_store, [\n 'get_info'])\n nest_module(stypy.reporting.localization.Localization(__file__,\n 11, 4), __file__, sys_modules_32075, sys_modules_32075.\n module_type_store, module_type_store)\n else:\n from numpy.distutils.system_info import get_info\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 11, 4), 'numpy.distutils.system_info', None,\n module_type_store, ['get_info'], [get_info])\n else:\n module_type_store.set_type_of(stypy.reporting.localization.\n Localization(__file__, 11, 4), 'numpy.distutils.system_info',\n import_32074)\n remove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n str_32077 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 12, 27), 'str', 'integrate')\n parent_package_32078 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 12, 40), 'parent_package', False)\n top_path_32079 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 12, 56), 'top_path', False)\n kwargs_32080 = {}\n Configuration_32076 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 12, 13), 'Configuration', False)\n Configuration_call_result_32081 = invoke(stypy.reporting.localization.\n Localization(__file__, 12, 13), Configuration_32076, *[str_32077,\n parent_package_32078, top_path_32079], **kwargs_32080)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 12, 4), 'config', Configuration_call_result_32081)\n str_32084 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 15, 31), 'str', 'lapack_opt')\n int_32085 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 15, 60), 'int')\n keyword_32086 = int_32085\n kwargs_32087 = {'notfound_action': keyword_32086}\n get_info_32083 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 15, 22), 'get_info', False)\n get_info_call_result_32088 = invoke(stypy.reporting.localization.\n Localization(__file__, 15, 22), get_info_32083, *[str_32084], **\n kwargs_32087)\n kwargs_32089 = {}\n dict_32082 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 15, 17), 'dict', False)\n dict_call_result_32090 = invoke(stypy.reporting.localization.\n Localization(__file__, 15, 17), dict_32082, *[\n get_info_call_result_32088], **kwargs_32089)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 15, 4), 'lapack_opt', dict_call_result_32090)\n str_32093 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 18, 33), 'str', 'libraries')\n list_32094 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 18, 46), 'list')\n kwargs_32095 = {}\n lapack_opt_32091 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 18, 18), 'lapack_opt', False)\n pop_32092 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 18, 18), lapack_opt_32091, 'pop')\n pop_call_result_32096 = invoke(stypy.reporting.localization.\n Localization(__file__, 18, 18), pop_32092, *[str_32093, list_32094],\n **kwargs_32095)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 18, 4), 'lapack_libs', pop_call_result_32096)\n list_32097 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 20, 15), 'list')\n str_32099 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 20, 21), 'str', 'mach')\n str_32100 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 20, 28), 'str', '*.f')\n kwargs_32101 = {}\n join_32098 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 20, 16), 'join', False)\n join_call_result_32102 = invoke(stypy.reporting.localization.\n Localization(__file__, 20, 16), join_32098, *[str_32099, str_32100],\n **kwargs_32101)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 20, 15), list_32097, join_call_result_32102)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 20, 4), 'mach_src', list_32097)\n list_32103 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 21, 19), 'list')\n str_32105 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 21, 25), 'str', 'quadpack')\n str_32106 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 21, 37), 'str', '*.f')\n kwargs_32107 = {}\n join_32104 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 21, 20), 'join', False)\n join_call_result_32108 = invoke(stypy.reporting.localization.\n Localization(__file__, 21, 20), join_32104, *[str_32105, str_32106],\n **kwargs_32107)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 21, 19), list_32103, join_call_result_32108)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 21, 4), 'quadpack_src', list_32103)\n list_32114 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 22, 47), 'list')\n str_32115 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 23, 8), 'str', 'blkdta000.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32115)\n str_32116 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 23, 23), 'str', 'bnorm.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32116)\n str_32117 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 23, 34), 'str', 'cfode.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32117)\n str_32118 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 24, 8), 'str', 'ewset.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32118)\n str_32119 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 24, 19), 'str', 'fnorm.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32119)\n str_32120 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 24, 30), 'str', 'intdy.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32120)\n str_32121 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 25, 8), 'str', 'lsoda.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32121)\n str_32122 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 25, 19), 'str', 'prja.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32122)\n str_32123 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 25, 29), 'str', 'solsy.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32123)\n str_32124 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 25, 40), 'str', 'srcma.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32124)\n str_32125 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 26, 8), 'str', 'stoda.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32125)\n str_32126 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 26, 19), 'str', 'vmnorm.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32126)\n str_32127 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 26, 31), 'str', 'xerrwv.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32127)\n str_32128 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 26, 43), 'str', 'xsetf.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32128)\n str_32129 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 27, 8), 'str', 'xsetun.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32129)\n comprehension_32130 = get_contained_elements_type(stypy.reporting.\n localization.Localization(__file__, 22, 17), list_32114)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 22, 17), 'fn', comprehension_32130)\n str_32110 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 22, 22), 'str', 'odepack')\n fn_32111 = module_type_store.get_type_of(stypy.reporting.localization.\n Localization(__file__, 22, 33), 'fn', False)\n kwargs_32112 = {}\n join_32109 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 22, 17), 'join', False)\n join_call_result_32113 = invoke(stypy.reporting.localization.\n Localization(__file__, 22, 17), join_32109, *[str_32110, fn_32111],\n **kwargs_32112)\n list_32131 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 22, 17), 'list')\n set_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 17), list_32131, join_call_result_32113)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 22, 4), 'lsoda_src', list_32131)\n list_32132 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 15), 'list')\n str_32134 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 21), 'str', 'odepack')\n str_32135 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 32), 'str', 'vode.f')\n kwargs_32136 = {}\n join_32133 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 28, 16), 'join', False)\n join_call_result_32137 = invoke(stypy.reporting.localization.\n Localization(__file__, 28, 16), join_32133, *[str_32134, str_32135],\n **kwargs_32136)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 28, 15), list_32132, join_call_result_32137)\n str_32139 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 48), 'str', 'odepack')\n str_32140 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 59), 'str', 'zvode.f')\n kwargs_32141 = {}\n join_32138 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 28, 43), 'join', False)\n join_call_result_32142 = invoke(stypy.reporting.localization.\n Localization(__file__, 28, 43), join_32138, *[str_32139, str_32140],\n **kwargs_32141)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 28, 15), list_32132, join_call_result_32142)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 28, 4), 'vode_src', list_32132)\n list_32143 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 29, 14), 'list')\n str_32145 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 29, 20), 'str', 'dop')\n str_32146 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 29, 26), 'str', '*.f')\n kwargs_32147 = {}\n join_32144 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 29, 15), 'join', False)\n join_call_result_32148 = invoke(stypy.reporting.localization.\n Localization(__file__, 29, 15), join_32144, *[str_32145, str_32146],\n **kwargs_32147)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 29, 14), list_32143, join_call_result_32148)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 29, 4), 'dop_src', list_32143)\n list_32149 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 30, 24), 'list')\n str_32151 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 30, 30), 'str', 'tests')\n str_32152 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 30, 38), 'str',\n '_test_multivariate.c')\n kwargs_32153 = {}\n join_32150 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 30, 25), 'join', False)\n join_call_result_32154 = invoke(stypy.reporting.localization.\n Localization(__file__, 30, 25), join_32150, *[str_32151, str_32152],\n **kwargs_32153)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 30, 24), list_32149, join_call_result_32154)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 30, 4), 'quadpack_test_src', list_32149)\n list_32155 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 31, 29), 'list')\n str_32157 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 31, 35), 'str', 'tests')\n str_32158 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 31, 44), 'str', 'banded5x5.f')\n kwargs_32159 = {}\n join_32156 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 31, 30), 'join', False)\n join_call_result_32160 = invoke(stypy.reporting.localization.\n Localization(__file__, 31, 30), join_32156, *[str_32157, str_32158],\n **kwargs_32159)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 31, 29), list_32155, join_call_result_32160)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 31, 4), 'odeint_banded_test_src', list_32155)\n str_32163 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 33, 23), 'str', 'mach')\n mach_src_32164 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 33, 39), 'mach_src', False)\n keyword_32165 = mach_src_32164\n dict_32166 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 34, 33), 'dict')\n str_32167 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 34, 34), 'str', 'noopt')\n tuple_32168 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 34, 43), 'tuple')\n file___32169 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 34, 43), '__file__', False)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 34, 43), tuple_32168, file___32169)\n int_32170 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 34, 52), 'int')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 34, 43), tuple_32168, int_32170)\n set_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 34, 33), dict_32166, (str_32167, tuple_32168))\n keyword_32171 = dict_32166\n kwargs_32172 = {'sources': keyword_32165, 'config_fc': keyword_32171}\n config_32161 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 33, 4), 'config', False)\n add_library_32162 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 33, 4), config_32161,\n 'add_library')\n add_library_call_result_32173 = invoke(stypy.reporting.localization.\n Localization(__file__, 33, 4), add_library_32162, *[str_32163], **\n kwargs_32172)\n str_32176 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 35, 23), 'str', 'quadpack')\n quadpack_src_32177 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 35, 43), 'quadpack_src', False)\n keyword_32178 = quadpack_src_32177\n kwargs_32179 = {'sources': keyword_32178}\n config_32174 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 35, 4), 'config', False)\n add_library_32175 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 35, 4), config_32174,\n 'add_library')\n add_library_call_result_32180 = invoke(stypy.reporting.localization.\n Localization(__file__, 35, 4), add_library_32175, *[str_32176], **\n kwargs_32179)\n str_32183 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 36, 23), 'str', 'lsoda')\n lsoda_src_32184 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 36, 40), 'lsoda_src', False)\n keyword_32185 = lsoda_src_32184\n kwargs_32186 = {'sources': keyword_32185}\n config_32181 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 36, 4), 'config', False)\n add_library_32182 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 36, 4), config_32181,\n 'add_library')\n add_library_call_result_32187 = invoke(stypy.reporting.localization.\n Localization(__file__, 36, 4), add_library_32182, *[str_32183], **\n kwargs_32186)\n str_32190 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 37, 23), 'str', 'vode')\n vode_src_32191 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 37, 39), 'vode_src', False)\n keyword_32192 = vode_src_32191\n kwargs_32193 = {'sources': keyword_32192}\n config_32188 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 37, 4), 'config', False)\n add_library_32189 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 37, 4), config_32188,\n 'add_library')\n add_library_call_result_32194 = invoke(stypy.reporting.localization.\n Localization(__file__, 37, 4), add_library_32189, *[str_32190], **\n kwargs_32193)\n str_32197 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 38, 23), 'str', 'dop')\n dop_src_32198 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 38, 38), 'dop_src', False)\n keyword_32199 = dop_src_32198\n kwargs_32200 = {'sources': keyword_32199}\n config_32195 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 38, 4), 'config', False)\n add_library_32196 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 38, 4), config_32195,\n 'add_library')\n add_library_call_result_32201 = invoke(stypy.reporting.localization.\n Localization(__file__, 38, 4), add_library_32196, *[str_32197], **\n kwargs_32200)\n list_32202 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 42, 19), 'list')\n file___32207 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 42, 41), '__file__', False)\n kwargs_32208 = {}\n os_32204 = module_type_store.get_type_of(stypy.reporting.localization.\n Localization(__file__, 42, 25), 'os', False)\n path_32205 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 42, 25), os_32204, 'path')\n dirname_32206 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 42, 25), path_32205, 'dirname')\n dirname_call_result_32209 = invoke(stypy.reporting.localization.\n Localization(__file__, 42, 25), dirname_32206, *[file___32207], **\n kwargs_32208)\n str_32210 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 42, 52), 'str', '..')\n str_32211 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 42, 58), 'str', '_lib')\n str_32212 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 42, 66), 'str', 'src')\n kwargs_32213 = {}\n join_32203 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 42, 20), 'join', False)\n join_call_result_32214 = invoke(stypy.reporting.localization.\n Localization(__file__, 42, 20), join_32203, *[\n dirname_call_result_32209, str_32210, str_32211, str_32212], **\n kwargs_32213)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 42, 19), list_32202, join_call_result_32214)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 42, 4), 'include_dirs', list_32202)\n str_32215 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 43, 7), 'str', 'include_dirs')\n lapack_opt_32216 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 43, 25), 'lapack_opt')\n result_contains_32217 = python_operator(stypy.reporting.localization.\n Localization(__file__, 43, 7), 'in', str_32215, lapack_opt_32216)\n if_condition_32218 = is_suitable_condition(stypy.reporting.localization\n .Localization(__file__, 43, 4), result_contains_32217)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 43, 4), 'if_condition_32218', if_condition_32218)\n module_type_store = SSAContext.create_ssa_context(module_type_store, 'if')\n lapack_opt_32220 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 44, 26), 'lapack_opt', False)\n kwargs_32221 = {}\n dict_32219 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 44, 21), 'dict', False)\n dict_call_result_32222 = invoke(stypy.reporting.localization.\n Localization(__file__, 44, 21), dict_32219, *[lapack_opt_32220], **\n kwargs_32221)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 44, 8), 'lapack_opt', dict_call_result_32222)\n str_32227 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 45, 43), 'str', 'include_dirs')\n kwargs_32228 = {}\n lapack_opt_32225 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 45, 28), 'lapack_opt', False)\n pop_32226 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 45, 28), lapack_opt_32225, 'pop')\n pop_call_result_32229 = invoke(stypy.reporting.localization.\n Localization(__file__, 45, 28), pop_32226, *[str_32227], **kwargs_32228\n )\n kwargs_32230 = {}\n include_dirs_32223 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 45, 8), 'include_dirs', False)\n extend_32224 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 45, 8), include_dirs_32223,\n 'extend')\n extend_call_result_32231 = invoke(stypy.reporting.localization.\n Localization(__file__, 45, 8), extend_32224, *[\n pop_call_result_32229], **kwargs_32230)\n module_type_store = module_type_store.join_ssa_context()\n str_32234 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 47, 25), 'str', '_quadpack')\n list_32235 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 48, 33), 'list')\n str_32236 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 48, 34), 'str', '_quadpackmodule.c'\n )\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 48, 33), list_32235, str_32236)\n keyword_32237 = list_32235\n list_32238 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 49, 35), 'list')\n str_32239 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 49, 36), 'str', 'quadpack')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 49, 35), list_32238, str_32239)\n str_32240 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 49, 48), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 49, 35), list_32238, str_32240)\n lapack_libs_32241 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 49, 58), 'lapack_libs', False)\n result_add_32242 = python_operator(stypy.reporting.localization.\n Localization(__file__, 49, 35), '+', list_32238, lapack_libs_32241)\n keyword_32243 = result_add_32242\n list_32244 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 50, 34), 'list')\n str_32245 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 50, 35), 'str', '__quadpack.h')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 50, 34), list_32244, str_32245)\n quadpack_src_32246 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 51, 36), 'quadpack_src', False)\n result_add_32247 = python_operator(stypy.reporting.localization.\n Localization(__file__, 50, 34), '+', list_32244, quadpack_src_32246)\n mach_src_32248 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 51, 51), 'mach_src', False)\n result_add_32249 = python_operator(stypy.reporting.localization.\n Localization(__file__, 51, 49), '+', result_add_32247, mach_src_32248)\n keyword_32250 = result_add_32249\n include_dirs_32251 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 52, 38), 'include_dirs', False)\n keyword_32252 = include_dirs_32251\n lapack_opt_32253 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 53, 27), 'lapack_opt', False)\n kwargs_32254 = {'libraries': keyword_32243, 'sources': keyword_32237,\n 'depends': keyword_32250, 'lapack_opt_32253': lapack_opt_32253,\n 'include_dirs': keyword_32252}\n config_32232 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 47, 4), 'config', False)\n add_extension_32233 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 47, 4), config_32232,\n 'add_extension')\n add_extension_call_result_32255 = invoke(stypy.reporting.localization.\n Localization(__file__, 47, 4), add_extension_32233, *[str_32234],\n **kwargs_32254)\n kwargs_32258 = {}\n lapack_opt_32256 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 56, 19), 'lapack_opt', False)\n copy_32257 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 56, 19), lapack_opt_32256, 'copy')\n copy_call_result_32259 = invoke(stypy.reporting.localization.\n Localization(__file__, 56, 19), copy_32257, *[], **kwargs_32258)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 56, 4), 'odepack_opts', copy_call_result_32259)\n numpy_nodepr_api_32262 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 57, 24), 'numpy_nodepr_api', False)\n kwargs_32263 = {}\n odepack_opts_32260 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 57, 4), 'odepack_opts', False)\n update_32261 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 57, 4), odepack_opts_32260,\n 'update')\n update_call_result_32264 = invoke(stypy.reporting.localization.\n Localization(__file__, 57, 4), update_32261, *[\n numpy_nodepr_api_32262], **kwargs_32263)\n str_32267 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 58, 25), 'str', '_odepack')\n list_32268 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 59, 33), 'list')\n str_32269 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 59, 34), 'str', '_odepackmodule.c')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 59, 33), list_32268, str_32269)\n keyword_32270 = list_32268\n list_32271 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 60, 35), 'list')\n str_32272 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 60, 36), 'str', 'lsoda')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 60, 35), list_32271, str_32272)\n str_32273 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 60, 45), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 60, 35), list_32271, str_32273)\n lapack_libs_32274 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 60, 55), 'lapack_libs', False)\n result_add_32275 = python_operator(stypy.reporting.localization.\n Localization(__file__, 60, 35), '+', list_32271, lapack_libs_32274)\n keyword_32276 = result_add_32275\n lsoda_src_32277 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 61, 34), 'lsoda_src', False)\n mach_src_32278 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 61, 46), 'mach_src', False)\n result_add_32279 = python_operator(stypy.reporting.localization.\n Localization(__file__, 61, 34), '+', lsoda_src_32277, mach_src_32278)\n keyword_32280 = result_add_32279\n odepack_opts_32281 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 62, 27), 'odepack_opts', False)\n kwargs_32282 = {'libraries': keyword_32276, 'sources': keyword_32270,\n 'depends': keyword_32280, 'odepack_opts_32281': odepack_opts_32281}\n config_32265 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 58, 4), 'config', False)\n add_extension_32266 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 58, 4), config_32265,\n 'add_extension')\n add_extension_call_result_32283 = invoke(stypy.reporting.localization.\n Localization(__file__, 58, 4), add_extension_32266, *[str_32267],\n **kwargs_32282)\n str_32286 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 65, 25), 'str', 'vode')\n list_32287 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 66, 33), 'list')\n str_32288 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 66, 34), 'str', 'vode.pyf')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 66, 33), list_32287, str_32288)\n keyword_32289 = list_32287\n list_32290 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 67, 35), 'list')\n str_32291 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 67, 36), 'str', 'vode')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 67, 35), list_32290, str_32291)\n lapack_libs_32292 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 67, 46), 'lapack_libs', False)\n result_add_32293 = python_operator(stypy.reporting.localization.\n Localization(__file__, 67, 35), '+', list_32290, lapack_libs_32292)\n keyword_32294 = result_add_32293\n vode_src_32295 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 68, 33), 'vode_src', False)\n keyword_32296 = vode_src_32295\n lapack_opt_32297 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 69, 27), 'lapack_opt', False)\n kwargs_32298 = {'libraries': keyword_32294, 'sources': keyword_32289,\n 'depends': keyword_32296, 'lapack_opt_32297': lapack_opt_32297}\n config_32284 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 65, 4), 'config', False)\n add_extension_32285 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 65, 4), config_32284,\n 'add_extension')\n add_extension_call_result_32299 = invoke(stypy.reporting.localization.\n Localization(__file__, 65, 4), add_extension_32285, *[str_32286],\n **kwargs_32298)\n str_32302 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 72, 25), 'str', 'lsoda')\n list_32303 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 73, 33), 'list')\n str_32304 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 73, 34), 'str', 'lsoda.pyf')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 73, 33), list_32303, str_32304)\n keyword_32305 = list_32303\n list_32306 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 74, 35), 'list')\n str_32307 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 74, 36), 'str', 'lsoda')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 74, 35), list_32306, str_32307)\n str_32308 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 74, 45), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 74, 35), list_32306, str_32308)\n lapack_libs_32309 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 74, 55), 'lapack_libs', False)\n result_add_32310 = python_operator(stypy.reporting.localization.\n Localization(__file__, 74, 35), '+', list_32306, lapack_libs_32309)\n keyword_32311 = result_add_32310\n lsoda_src_32312 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 75, 34), 'lsoda_src', False)\n mach_src_32313 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 75, 46), 'mach_src', False)\n result_add_32314 = python_operator(stypy.reporting.localization.\n Localization(__file__, 75, 34), '+', lsoda_src_32312, mach_src_32313)\n keyword_32315 = result_add_32314\n lapack_opt_32316 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 76, 27), 'lapack_opt', False)\n kwargs_32317 = {'libraries': keyword_32311, 'sources': keyword_32305,\n 'depends': keyword_32315, 'lapack_opt_32316': lapack_opt_32316}\n config_32300 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 72, 4), 'config', False)\n add_extension_32301 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 72, 4), config_32300,\n 'add_extension')\n add_extension_call_result_32318 = invoke(stypy.reporting.localization.\n Localization(__file__, 72, 4), add_extension_32301, *[str_32302],\n **kwargs_32317)\n str_32321 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 79, 25), 'str', '_dop')\n list_32322 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 80, 33), 'list')\n str_32323 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 80, 34), 'str', 'dop.pyf')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 80, 33), list_32322, str_32323)\n keyword_32324 = list_32322\n list_32325 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 81, 35), 'list')\n str_32326 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 81, 36), 'str', 'dop')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 81, 35), list_32325, str_32326)\n keyword_32327 = list_32325\n dop_src_32328 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 82, 33), 'dop_src', False)\n keyword_32329 = dop_src_32328\n kwargs_32330 = {'libraries': keyword_32327, 'sources': keyword_32324,\n 'depends': keyword_32329}\n config_32319 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 79, 4), 'config', False)\n add_extension_32320 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 79, 4), config_32319,\n 'add_extension')\n add_extension_call_result_32331 = invoke(stypy.reporting.localization.\n Localization(__file__, 79, 4), add_extension_32320, *[str_32321],\n **kwargs_32330)\n str_32334 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 84, 25), 'str',\n '_test_multivariate')\n quadpack_test_src_32335 = module_type_store.get_type_of(stypy.reporting\n .localization.Localization(__file__, 85, 33), 'quadpack_test_src', \n False)\n keyword_32336 = quadpack_test_src_32335\n kwargs_32337 = {'sources': keyword_32336}\n config_32332 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 84, 4), 'config', False)\n add_extension_32333 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 84, 4), config_32332,\n 'add_extension')\n add_extension_call_result_32338 = invoke(stypy.reporting.localization.\n Localization(__file__, 84, 4), add_extension_32333, *[str_32334],\n **kwargs_32337)\n str_32341 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 88, 25), 'str',\n '_test_odeint_banded')\n odeint_banded_test_src_32342 = module_type_store.get_type_of(stypy.\n reporting.localization.Localization(__file__, 89, 33),\n 'odeint_banded_test_src', False)\n keyword_32343 = odeint_banded_test_src_32342\n list_32344 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 90, 35), 'list')\n str_32345 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 90, 36), 'str', 'lsoda')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 90, 35), list_32344, str_32345)\n str_32346 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 90, 45), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 90, 35), list_32344, str_32346)\n lapack_libs_32347 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 90, 55), 'lapack_libs', False)\n result_add_32348 = python_operator(stypy.reporting.localization.\n Localization(__file__, 90, 35), '+', list_32344, lapack_libs_32347)\n keyword_32349 = result_add_32348\n lsoda_src_32350 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 91, 34), 'lsoda_src', False)\n mach_src_32351 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 91, 46), 'mach_src', False)\n result_add_32352 = python_operator(stypy.reporting.localization.\n Localization(__file__, 91, 34), '+', lsoda_src_32350, mach_src_32351)\n keyword_32353 = result_add_32352\n lapack_opt_32354 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 92, 27), 'lapack_opt', False)\n kwargs_32355 = {'libraries': keyword_32349, 'sources': keyword_32343,\n 'depends': keyword_32353, 'lapack_opt_32354': lapack_opt_32354}\n config_32339 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 88, 4), 'config', False)\n add_extension_32340 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 88, 4), config_32339,\n 'add_extension')\n add_extension_call_result_32356 = invoke(stypy.reporting.localization.\n Localization(__file__, 88, 4), add_extension_32340, *[str_32341],\n **kwargs_32355)\n str_32359 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 94, 26), 'str', '_ivp')\n kwargs_32360 = {}\n config_32357 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 94, 4), 'config', False)\n add_subpackage_32358 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 94, 4), config_32357,\n 'add_subpackage')\n add_subpackage_call_result_32361 = invoke(stypy.reporting.localization.\n Localization(__file__, 94, 4), add_subpackage_32358, *[str_32359],\n **kwargs_32360)\n str_32364 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 96, 24), 'str', 'tests')\n kwargs_32365 = {}\n config_32362 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 96, 4), 'config', False)\n add_data_dir_32363 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 96, 4), config_32362,\n 'add_data_dir')\n add_data_dir_call_result_32366 = invoke(stypy.reporting.localization.\n Localization(__file__, 96, 4), add_data_dir_32363, *[str_32364], **\n kwargs_32365)\n config_32367 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 97, 11), 'config')\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 97, 4), 'stypy_return_type', config_32367)\n teardown_call_information(localization, arguments)\n stypy_return_type_32368 = module_type_store.get_type_of(stypy.reporting\n .localization.Localization(__file__, 9, 0), 'stypy_return_type')\n module_type_store.store_return_type_of_current_context(\n stypy_return_type_32368)\n module_type_store = module_type_store.close_function_context()\n return stypy_return_type_32368\n\n\nmodule_type_store.set_type_of(stypy.reporting.localization.Localization(\n __file__, 9, 0), 'configuration', configuration)\nif __name__ == '__main__':\n stypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 101, 4))\n update_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n import_32369 = generate_type_inference_code_for_module(stypy.reporting.\n localization.Localization(__file__, 101, 4), 'numpy.distutils.core')\n if type(import_32369) is not StypyTypeError:\n if import_32369 != 'pyd_module':\n __import__(import_32369)\n sys_modules_32370 = sys.modules[import_32369]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 101, 4), 'numpy.distutils.core',\n sys_modules_32370.module_type_store, module_type_store, [\n 'setup'])\n nest_module(stypy.reporting.localization.Localization(__file__,\n 101, 4), __file__, sys_modules_32370, sys_modules_32370.\n module_type_store, module_type_store)\n else:\n from numpy.distutils.core import setup\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 101, 4), 'numpy.distutils.core', None,\n module_type_store, ['setup'], [setup])\n else:\n module_type_store.set_type_of(stypy.reporting.localization.\n Localization(__file__, 101, 4), 'numpy.distutils.core',\n import_32369)\n remove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n kwargs_32378 = {}\n str_32373 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 102, 35), 'str', '')\n keyword_32374 = str_32373\n kwargs_32375 = {'top_path': keyword_32374}\n configuration_32372 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 102, 12), 'configuration', False)\n configuration_call_result_32376 = invoke(stypy.reporting.localization.\n Localization(__file__, 102, 12), configuration_32372, *[], **\n kwargs_32375)\n todict_32377 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 102, 12),\n configuration_call_result_32376, 'todict')\n todict_call_result_32379 = invoke(stypy.reporting.localization.\n Localization(__file__, 102, 12), todict_32377, *[], **kwargs_32378)\n kwargs_32380 = {'todict_call_result_32379': todict_call_result_32379}\n setup_32371 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 102, 4), 'setup', False)\n setup_call_result_32381 = invoke(stypy.reporting.localization.\n Localization(__file__, 102, 4), setup_32371, *[], **kwargs_32380)\n<mask token>\n", "step-3": "<mask token>\nmodule_type_store = Context(None, __file__)\nstypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 3, 0))\n<mask token>\nimport_module(stypy.reporting.localization.Localization(__file__, 3, 0),\n 'os', os, module_type_store)\nstypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 4, 0))\nupdate_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\nimport_32066 = generate_type_inference_code_for_module(stypy.reporting.\n localization.Localization(__file__, 4, 0), 'os.path')\nif type(import_32066) is not StypyTypeError:\n if import_32066 != 'pyd_module':\n __import__(import_32066)\n sys_modules_32067 = sys.modules[import_32066]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 4, 0), 'os.path', sys_modules_32067.module_type_store,\n module_type_store, ['join'])\n nest_module(stypy.reporting.localization.Localization(__file__, 4, \n 0), __file__, sys_modules_32067, sys_modules_32067.\n module_type_store, module_type_store)\n else:\n from os.path import join\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 4, 0), 'os.path', None, module_type_store, ['join'],\n [join])\nelse:\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 4, 0), 'os.path', import_32066)\nremove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\nstypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 6, 0))\nupdate_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\nimport_32068 = generate_type_inference_code_for_module(stypy.reporting.\n localization.Localization(__file__, 6, 0), 'scipy._build_utils')\nif type(import_32068) is not StypyTypeError:\n if import_32068 != 'pyd_module':\n __import__(import_32068)\n sys_modules_32069 = sys.modules[import_32068]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 6, 0), 'scipy._build_utils', sys_modules_32069.\n module_type_store, module_type_store, ['numpy_nodepr_api'])\n nest_module(stypy.reporting.localization.Localization(__file__, 6, \n 0), __file__, sys_modules_32069, sys_modules_32069.\n module_type_store, module_type_store)\n else:\n from scipy._build_utils import numpy_nodepr_api\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 6, 0), 'scipy._build_utils', None, module_type_store,\n ['numpy_nodepr_api'], [numpy_nodepr_api])\nelse:\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 6, 0), 'scipy._build_utils', import_32068)\nremove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n\n\n@norecursion\ndef configuration(localization, *varargs, **kwargs):\n global module_type_store\n str_32070 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 9, 33), 'str', '')\n None_32071 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 9, 45), 'None')\n defaults = [str_32070, None_32071]\n module_type_store = module_type_store.open_function_context('configuration'\n , 9, 0, False)\n configuration.stypy_localization = localization\n configuration.stypy_type_of_self = None\n configuration.stypy_type_store = module_type_store\n configuration.stypy_function_name = 'configuration'\n configuration.stypy_param_names_list = ['parent_package', 'top_path']\n configuration.stypy_varargs_param_name = None\n configuration.stypy_kwargs_param_name = None\n configuration.stypy_call_defaults = defaults\n configuration.stypy_call_varargs = varargs\n configuration.stypy_call_kwargs = kwargs\n arguments = process_argument_values(localization, None,\n module_type_store, 'configuration', ['parent_package', 'top_path'],\n None, None, defaults, varargs, kwargs)\n if is_error_type(arguments):\n module_type_store = module_type_store.close_function_context()\n return arguments\n init_call_information(module_type_store, 'configuration', localization,\n ['parent_package', 'top_path'], arguments)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 0, 0), 'stypy_return_type', None)\n stypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 10, 4))\n update_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n import_32072 = generate_type_inference_code_for_module(stypy.reporting.\n localization.Localization(__file__, 10, 4), 'numpy.distutils.misc_util'\n )\n if type(import_32072) is not StypyTypeError:\n if import_32072 != 'pyd_module':\n __import__(import_32072)\n sys_modules_32073 = sys.modules[import_32072]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 10, 4), 'numpy.distutils.misc_util',\n sys_modules_32073.module_type_store, module_type_store, [\n 'Configuration'])\n nest_module(stypy.reporting.localization.Localization(__file__,\n 10, 4), __file__, sys_modules_32073, sys_modules_32073.\n module_type_store, module_type_store)\n else:\n from numpy.distutils.misc_util import Configuration\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 10, 4), 'numpy.distutils.misc_util', None,\n module_type_store, ['Configuration'], [Configuration])\n else:\n module_type_store.set_type_of(stypy.reporting.localization.\n Localization(__file__, 10, 4), 'numpy.distutils.misc_util',\n import_32072)\n remove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n stypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 11, 4))\n update_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n import_32074 = generate_type_inference_code_for_module(stypy.reporting.\n localization.Localization(__file__, 11, 4),\n 'numpy.distutils.system_info')\n if type(import_32074) is not StypyTypeError:\n if import_32074 != 'pyd_module':\n __import__(import_32074)\n sys_modules_32075 = sys.modules[import_32074]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 11, 4), 'numpy.distutils.system_info',\n sys_modules_32075.module_type_store, module_type_store, [\n 'get_info'])\n nest_module(stypy.reporting.localization.Localization(__file__,\n 11, 4), __file__, sys_modules_32075, sys_modules_32075.\n module_type_store, module_type_store)\n else:\n from numpy.distutils.system_info import get_info\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 11, 4), 'numpy.distutils.system_info', None,\n module_type_store, ['get_info'], [get_info])\n else:\n module_type_store.set_type_of(stypy.reporting.localization.\n Localization(__file__, 11, 4), 'numpy.distutils.system_info',\n import_32074)\n remove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n str_32077 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 12, 27), 'str', 'integrate')\n parent_package_32078 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 12, 40), 'parent_package', False)\n top_path_32079 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 12, 56), 'top_path', False)\n kwargs_32080 = {}\n Configuration_32076 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 12, 13), 'Configuration', False)\n Configuration_call_result_32081 = invoke(stypy.reporting.localization.\n Localization(__file__, 12, 13), Configuration_32076, *[str_32077,\n parent_package_32078, top_path_32079], **kwargs_32080)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 12, 4), 'config', Configuration_call_result_32081)\n str_32084 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 15, 31), 'str', 'lapack_opt')\n int_32085 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 15, 60), 'int')\n keyword_32086 = int_32085\n kwargs_32087 = {'notfound_action': keyword_32086}\n get_info_32083 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 15, 22), 'get_info', False)\n get_info_call_result_32088 = invoke(stypy.reporting.localization.\n Localization(__file__, 15, 22), get_info_32083, *[str_32084], **\n kwargs_32087)\n kwargs_32089 = {}\n dict_32082 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 15, 17), 'dict', False)\n dict_call_result_32090 = invoke(stypy.reporting.localization.\n Localization(__file__, 15, 17), dict_32082, *[\n get_info_call_result_32088], **kwargs_32089)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 15, 4), 'lapack_opt', dict_call_result_32090)\n str_32093 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 18, 33), 'str', 'libraries')\n list_32094 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 18, 46), 'list')\n kwargs_32095 = {}\n lapack_opt_32091 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 18, 18), 'lapack_opt', False)\n pop_32092 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 18, 18), lapack_opt_32091, 'pop')\n pop_call_result_32096 = invoke(stypy.reporting.localization.\n Localization(__file__, 18, 18), pop_32092, *[str_32093, list_32094],\n **kwargs_32095)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 18, 4), 'lapack_libs', pop_call_result_32096)\n list_32097 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 20, 15), 'list')\n str_32099 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 20, 21), 'str', 'mach')\n str_32100 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 20, 28), 'str', '*.f')\n kwargs_32101 = {}\n join_32098 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 20, 16), 'join', False)\n join_call_result_32102 = invoke(stypy.reporting.localization.\n Localization(__file__, 20, 16), join_32098, *[str_32099, str_32100],\n **kwargs_32101)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 20, 15), list_32097, join_call_result_32102)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 20, 4), 'mach_src', list_32097)\n list_32103 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 21, 19), 'list')\n str_32105 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 21, 25), 'str', 'quadpack')\n str_32106 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 21, 37), 'str', '*.f')\n kwargs_32107 = {}\n join_32104 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 21, 20), 'join', False)\n join_call_result_32108 = invoke(stypy.reporting.localization.\n Localization(__file__, 21, 20), join_32104, *[str_32105, str_32106],\n **kwargs_32107)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 21, 19), list_32103, join_call_result_32108)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 21, 4), 'quadpack_src', list_32103)\n list_32114 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 22, 47), 'list')\n str_32115 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 23, 8), 'str', 'blkdta000.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32115)\n str_32116 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 23, 23), 'str', 'bnorm.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32116)\n str_32117 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 23, 34), 'str', 'cfode.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32117)\n str_32118 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 24, 8), 'str', 'ewset.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32118)\n str_32119 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 24, 19), 'str', 'fnorm.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32119)\n str_32120 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 24, 30), 'str', 'intdy.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32120)\n str_32121 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 25, 8), 'str', 'lsoda.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32121)\n str_32122 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 25, 19), 'str', 'prja.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32122)\n str_32123 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 25, 29), 'str', 'solsy.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32123)\n str_32124 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 25, 40), 'str', 'srcma.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32124)\n str_32125 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 26, 8), 'str', 'stoda.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32125)\n str_32126 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 26, 19), 'str', 'vmnorm.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32126)\n str_32127 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 26, 31), 'str', 'xerrwv.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32127)\n str_32128 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 26, 43), 'str', 'xsetf.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32128)\n str_32129 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 27, 8), 'str', 'xsetun.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32129)\n comprehension_32130 = get_contained_elements_type(stypy.reporting.\n localization.Localization(__file__, 22, 17), list_32114)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 22, 17), 'fn', comprehension_32130)\n str_32110 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 22, 22), 'str', 'odepack')\n fn_32111 = module_type_store.get_type_of(stypy.reporting.localization.\n Localization(__file__, 22, 33), 'fn', False)\n kwargs_32112 = {}\n join_32109 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 22, 17), 'join', False)\n join_call_result_32113 = invoke(stypy.reporting.localization.\n Localization(__file__, 22, 17), join_32109, *[str_32110, fn_32111],\n **kwargs_32112)\n list_32131 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 22, 17), 'list')\n set_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 17), list_32131, join_call_result_32113)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 22, 4), 'lsoda_src', list_32131)\n list_32132 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 15), 'list')\n str_32134 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 21), 'str', 'odepack')\n str_32135 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 32), 'str', 'vode.f')\n kwargs_32136 = {}\n join_32133 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 28, 16), 'join', False)\n join_call_result_32137 = invoke(stypy.reporting.localization.\n Localization(__file__, 28, 16), join_32133, *[str_32134, str_32135],\n **kwargs_32136)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 28, 15), list_32132, join_call_result_32137)\n str_32139 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 48), 'str', 'odepack')\n str_32140 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 59), 'str', 'zvode.f')\n kwargs_32141 = {}\n join_32138 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 28, 43), 'join', False)\n join_call_result_32142 = invoke(stypy.reporting.localization.\n Localization(__file__, 28, 43), join_32138, *[str_32139, str_32140],\n **kwargs_32141)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 28, 15), list_32132, join_call_result_32142)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 28, 4), 'vode_src', list_32132)\n list_32143 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 29, 14), 'list')\n str_32145 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 29, 20), 'str', 'dop')\n str_32146 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 29, 26), 'str', '*.f')\n kwargs_32147 = {}\n join_32144 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 29, 15), 'join', False)\n join_call_result_32148 = invoke(stypy.reporting.localization.\n Localization(__file__, 29, 15), join_32144, *[str_32145, str_32146],\n **kwargs_32147)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 29, 14), list_32143, join_call_result_32148)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 29, 4), 'dop_src', list_32143)\n list_32149 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 30, 24), 'list')\n str_32151 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 30, 30), 'str', 'tests')\n str_32152 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 30, 38), 'str',\n '_test_multivariate.c')\n kwargs_32153 = {}\n join_32150 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 30, 25), 'join', False)\n join_call_result_32154 = invoke(stypy.reporting.localization.\n Localization(__file__, 30, 25), join_32150, *[str_32151, str_32152],\n **kwargs_32153)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 30, 24), list_32149, join_call_result_32154)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 30, 4), 'quadpack_test_src', list_32149)\n list_32155 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 31, 29), 'list')\n str_32157 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 31, 35), 'str', 'tests')\n str_32158 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 31, 44), 'str', 'banded5x5.f')\n kwargs_32159 = {}\n join_32156 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 31, 30), 'join', False)\n join_call_result_32160 = invoke(stypy.reporting.localization.\n Localization(__file__, 31, 30), join_32156, *[str_32157, str_32158],\n **kwargs_32159)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 31, 29), list_32155, join_call_result_32160)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 31, 4), 'odeint_banded_test_src', list_32155)\n str_32163 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 33, 23), 'str', 'mach')\n mach_src_32164 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 33, 39), 'mach_src', False)\n keyword_32165 = mach_src_32164\n dict_32166 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 34, 33), 'dict')\n str_32167 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 34, 34), 'str', 'noopt')\n tuple_32168 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 34, 43), 'tuple')\n file___32169 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 34, 43), '__file__', False)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 34, 43), tuple_32168, file___32169)\n int_32170 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 34, 52), 'int')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 34, 43), tuple_32168, int_32170)\n set_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 34, 33), dict_32166, (str_32167, tuple_32168))\n keyword_32171 = dict_32166\n kwargs_32172 = {'sources': keyword_32165, 'config_fc': keyword_32171}\n config_32161 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 33, 4), 'config', False)\n add_library_32162 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 33, 4), config_32161,\n 'add_library')\n add_library_call_result_32173 = invoke(stypy.reporting.localization.\n Localization(__file__, 33, 4), add_library_32162, *[str_32163], **\n kwargs_32172)\n str_32176 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 35, 23), 'str', 'quadpack')\n quadpack_src_32177 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 35, 43), 'quadpack_src', False)\n keyword_32178 = quadpack_src_32177\n kwargs_32179 = {'sources': keyword_32178}\n config_32174 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 35, 4), 'config', False)\n add_library_32175 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 35, 4), config_32174,\n 'add_library')\n add_library_call_result_32180 = invoke(stypy.reporting.localization.\n Localization(__file__, 35, 4), add_library_32175, *[str_32176], **\n kwargs_32179)\n str_32183 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 36, 23), 'str', 'lsoda')\n lsoda_src_32184 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 36, 40), 'lsoda_src', False)\n keyword_32185 = lsoda_src_32184\n kwargs_32186 = {'sources': keyword_32185}\n config_32181 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 36, 4), 'config', False)\n add_library_32182 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 36, 4), config_32181,\n 'add_library')\n add_library_call_result_32187 = invoke(stypy.reporting.localization.\n Localization(__file__, 36, 4), add_library_32182, *[str_32183], **\n kwargs_32186)\n str_32190 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 37, 23), 'str', 'vode')\n vode_src_32191 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 37, 39), 'vode_src', False)\n keyword_32192 = vode_src_32191\n kwargs_32193 = {'sources': keyword_32192}\n config_32188 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 37, 4), 'config', False)\n add_library_32189 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 37, 4), config_32188,\n 'add_library')\n add_library_call_result_32194 = invoke(stypy.reporting.localization.\n Localization(__file__, 37, 4), add_library_32189, *[str_32190], **\n kwargs_32193)\n str_32197 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 38, 23), 'str', 'dop')\n dop_src_32198 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 38, 38), 'dop_src', False)\n keyword_32199 = dop_src_32198\n kwargs_32200 = {'sources': keyword_32199}\n config_32195 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 38, 4), 'config', False)\n add_library_32196 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 38, 4), config_32195,\n 'add_library')\n add_library_call_result_32201 = invoke(stypy.reporting.localization.\n Localization(__file__, 38, 4), add_library_32196, *[str_32197], **\n kwargs_32200)\n list_32202 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 42, 19), 'list')\n file___32207 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 42, 41), '__file__', False)\n kwargs_32208 = {}\n os_32204 = module_type_store.get_type_of(stypy.reporting.localization.\n Localization(__file__, 42, 25), 'os', False)\n path_32205 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 42, 25), os_32204, 'path')\n dirname_32206 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 42, 25), path_32205, 'dirname')\n dirname_call_result_32209 = invoke(stypy.reporting.localization.\n Localization(__file__, 42, 25), dirname_32206, *[file___32207], **\n kwargs_32208)\n str_32210 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 42, 52), 'str', '..')\n str_32211 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 42, 58), 'str', '_lib')\n str_32212 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 42, 66), 'str', 'src')\n kwargs_32213 = {}\n join_32203 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 42, 20), 'join', False)\n join_call_result_32214 = invoke(stypy.reporting.localization.\n Localization(__file__, 42, 20), join_32203, *[\n dirname_call_result_32209, str_32210, str_32211, str_32212], **\n kwargs_32213)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 42, 19), list_32202, join_call_result_32214)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 42, 4), 'include_dirs', list_32202)\n str_32215 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 43, 7), 'str', 'include_dirs')\n lapack_opt_32216 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 43, 25), 'lapack_opt')\n result_contains_32217 = python_operator(stypy.reporting.localization.\n Localization(__file__, 43, 7), 'in', str_32215, lapack_opt_32216)\n if_condition_32218 = is_suitable_condition(stypy.reporting.localization\n .Localization(__file__, 43, 4), result_contains_32217)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 43, 4), 'if_condition_32218', if_condition_32218)\n module_type_store = SSAContext.create_ssa_context(module_type_store, 'if')\n lapack_opt_32220 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 44, 26), 'lapack_opt', False)\n kwargs_32221 = {}\n dict_32219 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 44, 21), 'dict', False)\n dict_call_result_32222 = invoke(stypy.reporting.localization.\n Localization(__file__, 44, 21), dict_32219, *[lapack_opt_32220], **\n kwargs_32221)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 44, 8), 'lapack_opt', dict_call_result_32222)\n str_32227 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 45, 43), 'str', 'include_dirs')\n kwargs_32228 = {}\n lapack_opt_32225 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 45, 28), 'lapack_opt', False)\n pop_32226 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 45, 28), lapack_opt_32225, 'pop')\n pop_call_result_32229 = invoke(stypy.reporting.localization.\n Localization(__file__, 45, 28), pop_32226, *[str_32227], **kwargs_32228\n )\n kwargs_32230 = {}\n include_dirs_32223 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 45, 8), 'include_dirs', False)\n extend_32224 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 45, 8), include_dirs_32223,\n 'extend')\n extend_call_result_32231 = invoke(stypy.reporting.localization.\n Localization(__file__, 45, 8), extend_32224, *[\n pop_call_result_32229], **kwargs_32230)\n module_type_store = module_type_store.join_ssa_context()\n str_32234 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 47, 25), 'str', '_quadpack')\n list_32235 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 48, 33), 'list')\n str_32236 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 48, 34), 'str', '_quadpackmodule.c'\n )\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 48, 33), list_32235, str_32236)\n keyword_32237 = list_32235\n list_32238 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 49, 35), 'list')\n str_32239 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 49, 36), 'str', 'quadpack')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 49, 35), list_32238, str_32239)\n str_32240 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 49, 48), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 49, 35), list_32238, str_32240)\n lapack_libs_32241 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 49, 58), 'lapack_libs', False)\n result_add_32242 = python_operator(stypy.reporting.localization.\n Localization(__file__, 49, 35), '+', list_32238, lapack_libs_32241)\n keyword_32243 = result_add_32242\n list_32244 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 50, 34), 'list')\n str_32245 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 50, 35), 'str', '__quadpack.h')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 50, 34), list_32244, str_32245)\n quadpack_src_32246 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 51, 36), 'quadpack_src', False)\n result_add_32247 = python_operator(stypy.reporting.localization.\n Localization(__file__, 50, 34), '+', list_32244, quadpack_src_32246)\n mach_src_32248 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 51, 51), 'mach_src', False)\n result_add_32249 = python_operator(stypy.reporting.localization.\n Localization(__file__, 51, 49), '+', result_add_32247, mach_src_32248)\n keyword_32250 = result_add_32249\n include_dirs_32251 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 52, 38), 'include_dirs', False)\n keyword_32252 = include_dirs_32251\n lapack_opt_32253 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 53, 27), 'lapack_opt', False)\n kwargs_32254 = {'libraries': keyword_32243, 'sources': keyword_32237,\n 'depends': keyword_32250, 'lapack_opt_32253': lapack_opt_32253,\n 'include_dirs': keyword_32252}\n config_32232 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 47, 4), 'config', False)\n add_extension_32233 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 47, 4), config_32232,\n 'add_extension')\n add_extension_call_result_32255 = invoke(stypy.reporting.localization.\n Localization(__file__, 47, 4), add_extension_32233, *[str_32234],\n **kwargs_32254)\n kwargs_32258 = {}\n lapack_opt_32256 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 56, 19), 'lapack_opt', False)\n copy_32257 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 56, 19), lapack_opt_32256, 'copy')\n copy_call_result_32259 = invoke(stypy.reporting.localization.\n Localization(__file__, 56, 19), copy_32257, *[], **kwargs_32258)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 56, 4), 'odepack_opts', copy_call_result_32259)\n numpy_nodepr_api_32262 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 57, 24), 'numpy_nodepr_api', False)\n kwargs_32263 = {}\n odepack_opts_32260 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 57, 4), 'odepack_opts', False)\n update_32261 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 57, 4), odepack_opts_32260,\n 'update')\n update_call_result_32264 = invoke(stypy.reporting.localization.\n Localization(__file__, 57, 4), update_32261, *[\n numpy_nodepr_api_32262], **kwargs_32263)\n str_32267 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 58, 25), 'str', '_odepack')\n list_32268 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 59, 33), 'list')\n str_32269 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 59, 34), 'str', '_odepackmodule.c')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 59, 33), list_32268, str_32269)\n keyword_32270 = list_32268\n list_32271 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 60, 35), 'list')\n str_32272 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 60, 36), 'str', 'lsoda')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 60, 35), list_32271, str_32272)\n str_32273 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 60, 45), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 60, 35), list_32271, str_32273)\n lapack_libs_32274 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 60, 55), 'lapack_libs', False)\n result_add_32275 = python_operator(stypy.reporting.localization.\n Localization(__file__, 60, 35), '+', list_32271, lapack_libs_32274)\n keyword_32276 = result_add_32275\n lsoda_src_32277 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 61, 34), 'lsoda_src', False)\n mach_src_32278 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 61, 46), 'mach_src', False)\n result_add_32279 = python_operator(stypy.reporting.localization.\n Localization(__file__, 61, 34), '+', lsoda_src_32277, mach_src_32278)\n keyword_32280 = result_add_32279\n odepack_opts_32281 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 62, 27), 'odepack_opts', False)\n kwargs_32282 = {'libraries': keyword_32276, 'sources': keyword_32270,\n 'depends': keyword_32280, 'odepack_opts_32281': odepack_opts_32281}\n config_32265 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 58, 4), 'config', False)\n add_extension_32266 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 58, 4), config_32265,\n 'add_extension')\n add_extension_call_result_32283 = invoke(stypy.reporting.localization.\n Localization(__file__, 58, 4), add_extension_32266, *[str_32267],\n **kwargs_32282)\n str_32286 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 65, 25), 'str', 'vode')\n list_32287 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 66, 33), 'list')\n str_32288 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 66, 34), 'str', 'vode.pyf')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 66, 33), list_32287, str_32288)\n keyword_32289 = list_32287\n list_32290 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 67, 35), 'list')\n str_32291 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 67, 36), 'str', 'vode')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 67, 35), list_32290, str_32291)\n lapack_libs_32292 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 67, 46), 'lapack_libs', False)\n result_add_32293 = python_operator(stypy.reporting.localization.\n Localization(__file__, 67, 35), '+', list_32290, lapack_libs_32292)\n keyword_32294 = result_add_32293\n vode_src_32295 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 68, 33), 'vode_src', False)\n keyword_32296 = vode_src_32295\n lapack_opt_32297 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 69, 27), 'lapack_opt', False)\n kwargs_32298 = {'libraries': keyword_32294, 'sources': keyword_32289,\n 'depends': keyword_32296, 'lapack_opt_32297': lapack_opt_32297}\n config_32284 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 65, 4), 'config', False)\n add_extension_32285 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 65, 4), config_32284,\n 'add_extension')\n add_extension_call_result_32299 = invoke(stypy.reporting.localization.\n Localization(__file__, 65, 4), add_extension_32285, *[str_32286],\n **kwargs_32298)\n str_32302 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 72, 25), 'str', 'lsoda')\n list_32303 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 73, 33), 'list')\n str_32304 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 73, 34), 'str', 'lsoda.pyf')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 73, 33), list_32303, str_32304)\n keyword_32305 = list_32303\n list_32306 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 74, 35), 'list')\n str_32307 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 74, 36), 'str', 'lsoda')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 74, 35), list_32306, str_32307)\n str_32308 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 74, 45), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 74, 35), list_32306, str_32308)\n lapack_libs_32309 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 74, 55), 'lapack_libs', False)\n result_add_32310 = python_operator(stypy.reporting.localization.\n Localization(__file__, 74, 35), '+', list_32306, lapack_libs_32309)\n keyword_32311 = result_add_32310\n lsoda_src_32312 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 75, 34), 'lsoda_src', False)\n mach_src_32313 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 75, 46), 'mach_src', False)\n result_add_32314 = python_operator(stypy.reporting.localization.\n Localization(__file__, 75, 34), '+', lsoda_src_32312, mach_src_32313)\n keyword_32315 = result_add_32314\n lapack_opt_32316 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 76, 27), 'lapack_opt', False)\n kwargs_32317 = {'libraries': keyword_32311, 'sources': keyword_32305,\n 'depends': keyword_32315, 'lapack_opt_32316': lapack_opt_32316}\n config_32300 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 72, 4), 'config', False)\n add_extension_32301 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 72, 4), config_32300,\n 'add_extension')\n add_extension_call_result_32318 = invoke(stypy.reporting.localization.\n Localization(__file__, 72, 4), add_extension_32301, *[str_32302],\n **kwargs_32317)\n str_32321 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 79, 25), 'str', '_dop')\n list_32322 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 80, 33), 'list')\n str_32323 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 80, 34), 'str', 'dop.pyf')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 80, 33), list_32322, str_32323)\n keyword_32324 = list_32322\n list_32325 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 81, 35), 'list')\n str_32326 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 81, 36), 'str', 'dop')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 81, 35), list_32325, str_32326)\n keyword_32327 = list_32325\n dop_src_32328 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 82, 33), 'dop_src', False)\n keyword_32329 = dop_src_32328\n kwargs_32330 = {'libraries': keyword_32327, 'sources': keyword_32324,\n 'depends': keyword_32329}\n config_32319 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 79, 4), 'config', False)\n add_extension_32320 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 79, 4), config_32319,\n 'add_extension')\n add_extension_call_result_32331 = invoke(stypy.reporting.localization.\n Localization(__file__, 79, 4), add_extension_32320, *[str_32321],\n **kwargs_32330)\n str_32334 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 84, 25), 'str',\n '_test_multivariate')\n quadpack_test_src_32335 = module_type_store.get_type_of(stypy.reporting\n .localization.Localization(__file__, 85, 33), 'quadpack_test_src', \n False)\n keyword_32336 = quadpack_test_src_32335\n kwargs_32337 = {'sources': keyword_32336}\n config_32332 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 84, 4), 'config', False)\n add_extension_32333 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 84, 4), config_32332,\n 'add_extension')\n add_extension_call_result_32338 = invoke(stypy.reporting.localization.\n Localization(__file__, 84, 4), add_extension_32333, *[str_32334],\n **kwargs_32337)\n str_32341 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 88, 25), 'str',\n '_test_odeint_banded')\n odeint_banded_test_src_32342 = module_type_store.get_type_of(stypy.\n reporting.localization.Localization(__file__, 89, 33),\n 'odeint_banded_test_src', False)\n keyword_32343 = odeint_banded_test_src_32342\n list_32344 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 90, 35), 'list')\n str_32345 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 90, 36), 'str', 'lsoda')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 90, 35), list_32344, str_32345)\n str_32346 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 90, 45), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 90, 35), list_32344, str_32346)\n lapack_libs_32347 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 90, 55), 'lapack_libs', False)\n result_add_32348 = python_operator(stypy.reporting.localization.\n Localization(__file__, 90, 35), '+', list_32344, lapack_libs_32347)\n keyword_32349 = result_add_32348\n lsoda_src_32350 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 91, 34), 'lsoda_src', False)\n mach_src_32351 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 91, 46), 'mach_src', False)\n result_add_32352 = python_operator(stypy.reporting.localization.\n Localization(__file__, 91, 34), '+', lsoda_src_32350, mach_src_32351)\n keyword_32353 = result_add_32352\n lapack_opt_32354 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 92, 27), 'lapack_opt', False)\n kwargs_32355 = {'libraries': keyword_32349, 'sources': keyword_32343,\n 'depends': keyword_32353, 'lapack_opt_32354': lapack_opt_32354}\n config_32339 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 88, 4), 'config', False)\n add_extension_32340 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 88, 4), config_32339,\n 'add_extension')\n add_extension_call_result_32356 = invoke(stypy.reporting.localization.\n Localization(__file__, 88, 4), add_extension_32340, *[str_32341],\n **kwargs_32355)\n str_32359 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 94, 26), 'str', '_ivp')\n kwargs_32360 = {}\n config_32357 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 94, 4), 'config', False)\n add_subpackage_32358 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 94, 4), config_32357,\n 'add_subpackage')\n add_subpackage_call_result_32361 = invoke(stypy.reporting.localization.\n Localization(__file__, 94, 4), add_subpackage_32358, *[str_32359],\n **kwargs_32360)\n str_32364 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 96, 24), 'str', 'tests')\n kwargs_32365 = {}\n config_32362 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 96, 4), 'config', False)\n add_data_dir_32363 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 96, 4), config_32362,\n 'add_data_dir')\n add_data_dir_call_result_32366 = invoke(stypy.reporting.localization.\n Localization(__file__, 96, 4), add_data_dir_32363, *[str_32364], **\n kwargs_32365)\n config_32367 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 97, 11), 'config')\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 97, 4), 'stypy_return_type', config_32367)\n teardown_call_information(localization, arguments)\n stypy_return_type_32368 = module_type_store.get_type_of(stypy.reporting\n .localization.Localization(__file__, 9, 0), 'stypy_return_type')\n module_type_store.store_return_type_of_current_context(\n stypy_return_type_32368)\n module_type_store = module_type_store.close_function_context()\n return stypy_return_type_32368\n\n\nmodule_type_store.set_type_of(stypy.reporting.localization.Localization(\n __file__, 9, 0), 'configuration', configuration)\nif __name__ == '__main__':\n stypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 101, 4))\n update_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n import_32369 = generate_type_inference_code_for_module(stypy.reporting.\n localization.Localization(__file__, 101, 4), 'numpy.distutils.core')\n if type(import_32369) is not StypyTypeError:\n if import_32369 != 'pyd_module':\n __import__(import_32369)\n sys_modules_32370 = sys.modules[import_32369]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 101, 4), 'numpy.distutils.core',\n sys_modules_32370.module_type_store, module_type_store, [\n 'setup'])\n nest_module(stypy.reporting.localization.Localization(__file__,\n 101, 4), __file__, sys_modules_32370, sys_modules_32370.\n module_type_store, module_type_store)\n else:\n from numpy.distutils.core import setup\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 101, 4), 'numpy.distutils.core', None,\n module_type_store, ['setup'], [setup])\n else:\n module_type_store.set_type_of(stypy.reporting.localization.\n Localization(__file__, 101, 4), 'numpy.distutils.core',\n import_32369)\n remove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n kwargs_32378 = {}\n str_32373 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 102, 35), 'str', '')\n keyword_32374 = str_32373\n kwargs_32375 = {'top_path': keyword_32374}\n configuration_32372 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 102, 12), 'configuration', False)\n configuration_call_result_32376 = invoke(stypy.reporting.localization.\n Localization(__file__, 102, 12), configuration_32372, *[], **\n kwargs_32375)\n todict_32377 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 102, 12),\n configuration_call_result_32376, 'todict')\n todict_call_result_32379 = invoke(stypy.reporting.localization.\n Localization(__file__, 102, 12), todict_32377, *[], **kwargs_32378)\n kwargs_32380 = {'todict_call_result_32379': todict_call_result_32379}\n setup_32371 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 102, 4), 'setup', False)\n setup_call_result_32381 = invoke(stypy.reporting.localization.\n Localization(__file__, 102, 4), setup_32371, *[], **kwargs_32380)\nmodule_errors = stypy.errors.type_error.StypyTypeError.get_error_msgs()\nmodule_warnings = stypy.errors.type_warning.TypeWarning.get_warning_msgs()\n", "step-4": "<mask token>\nfrom stypy.type_inference_programs.type_inference_programs_imports import *\nmodule_type_store = Context(None, __file__)\nstypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 3, 0))\nimport os\nimport_module(stypy.reporting.localization.Localization(__file__, 3, 0),\n 'os', os, module_type_store)\nstypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 4, 0))\nupdate_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\nimport_32066 = generate_type_inference_code_for_module(stypy.reporting.\n localization.Localization(__file__, 4, 0), 'os.path')\nif type(import_32066) is not StypyTypeError:\n if import_32066 != 'pyd_module':\n __import__(import_32066)\n sys_modules_32067 = sys.modules[import_32066]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 4, 0), 'os.path', sys_modules_32067.module_type_store,\n module_type_store, ['join'])\n nest_module(stypy.reporting.localization.Localization(__file__, 4, \n 0), __file__, sys_modules_32067, sys_modules_32067.\n module_type_store, module_type_store)\n else:\n from os.path import join\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 4, 0), 'os.path', None, module_type_store, ['join'],\n [join])\nelse:\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 4, 0), 'os.path', import_32066)\nremove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\nstypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 6, 0))\nupdate_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\nimport_32068 = generate_type_inference_code_for_module(stypy.reporting.\n localization.Localization(__file__, 6, 0), 'scipy._build_utils')\nif type(import_32068) is not StypyTypeError:\n if import_32068 != 'pyd_module':\n __import__(import_32068)\n sys_modules_32069 = sys.modules[import_32068]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 6, 0), 'scipy._build_utils', sys_modules_32069.\n module_type_store, module_type_store, ['numpy_nodepr_api'])\n nest_module(stypy.reporting.localization.Localization(__file__, 6, \n 0), __file__, sys_modules_32069, sys_modules_32069.\n module_type_store, module_type_store)\n else:\n from scipy._build_utils import numpy_nodepr_api\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 6, 0), 'scipy._build_utils', None, module_type_store,\n ['numpy_nodepr_api'], [numpy_nodepr_api])\nelse:\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 6, 0), 'scipy._build_utils', import_32068)\nremove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n\n\n@norecursion\ndef configuration(localization, *varargs, **kwargs):\n global module_type_store\n str_32070 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 9, 33), 'str', '')\n None_32071 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 9, 45), 'None')\n defaults = [str_32070, None_32071]\n module_type_store = module_type_store.open_function_context('configuration'\n , 9, 0, False)\n configuration.stypy_localization = localization\n configuration.stypy_type_of_self = None\n configuration.stypy_type_store = module_type_store\n configuration.stypy_function_name = 'configuration'\n configuration.stypy_param_names_list = ['parent_package', 'top_path']\n configuration.stypy_varargs_param_name = None\n configuration.stypy_kwargs_param_name = None\n configuration.stypy_call_defaults = defaults\n configuration.stypy_call_varargs = varargs\n configuration.stypy_call_kwargs = kwargs\n arguments = process_argument_values(localization, None,\n module_type_store, 'configuration', ['parent_package', 'top_path'],\n None, None, defaults, varargs, kwargs)\n if is_error_type(arguments):\n module_type_store = module_type_store.close_function_context()\n return arguments\n init_call_information(module_type_store, 'configuration', localization,\n ['parent_package', 'top_path'], arguments)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 0, 0), 'stypy_return_type', None)\n stypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 10, 4))\n update_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n import_32072 = generate_type_inference_code_for_module(stypy.reporting.\n localization.Localization(__file__, 10, 4), 'numpy.distutils.misc_util'\n )\n if type(import_32072) is not StypyTypeError:\n if import_32072 != 'pyd_module':\n __import__(import_32072)\n sys_modules_32073 = sys.modules[import_32072]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 10, 4), 'numpy.distutils.misc_util',\n sys_modules_32073.module_type_store, module_type_store, [\n 'Configuration'])\n nest_module(stypy.reporting.localization.Localization(__file__,\n 10, 4), __file__, sys_modules_32073, sys_modules_32073.\n module_type_store, module_type_store)\n else:\n from numpy.distutils.misc_util import Configuration\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 10, 4), 'numpy.distutils.misc_util', None,\n module_type_store, ['Configuration'], [Configuration])\n else:\n module_type_store.set_type_of(stypy.reporting.localization.\n Localization(__file__, 10, 4), 'numpy.distutils.misc_util',\n import_32072)\n remove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n stypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 11, 4))\n update_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n import_32074 = generate_type_inference_code_for_module(stypy.reporting.\n localization.Localization(__file__, 11, 4),\n 'numpy.distutils.system_info')\n if type(import_32074) is not StypyTypeError:\n if import_32074 != 'pyd_module':\n __import__(import_32074)\n sys_modules_32075 = sys.modules[import_32074]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 11, 4), 'numpy.distutils.system_info',\n sys_modules_32075.module_type_store, module_type_store, [\n 'get_info'])\n nest_module(stypy.reporting.localization.Localization(__file__,\n 11, 4), __file__, sys_modules_32075, sys_modules_32075.\n module_type_store, module_type_store)\n else:\n from numpy.distutils.system_info import get_info\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 11, 4), 'numpy.distutils.system_info', None,\n module_type_store, ['get_info'], [get_info])\n else:\n module_type_store.set_type_of(stypy.reporting.localization.\n Localization(__file__, 11, 4), 'numpy.distutils.system_info',\n import_32074)\n remove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n str_32077 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 12, 27), 'str', 'integrate')\n parent_package_32078 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 12, 40), 'parent_package', False)\n top_path_32079 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 12, 56), 'top_path', False)\n kwargs_32080 = {}\n Configuration_32076 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 12, 13), 'Configuration', False)\n Configuration_call_result_32081 = invoke(stypy.reporting.localization.\n Localization(__file__, 12, 13), Configuration_32076, *[str_32077,\n parent_package_32078, top_path_32079], **kwargs_32080)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 12, 4), 'config', Configuration_call_result_32081)\n str_32084 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 15, 31), 'str', 'lapack_opt')\n int_32085 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 15, 60), 'int')\n keyword_32086 = int_32085\n kwargs_32087 = {'notfound_action': keyword_32086}\n get_info_32083 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 15, 22), 'get_info', False)\n get_info_call_result_32088 = invoke(stypy.reporting.localization.\n Localization(__file__, 15, 22), get_info_32083, *[str_32084], **\n kwargs_32087)\n kwargs_32089 = {}\n dict_32082 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 15, 17), 'dict', False)\n dict_call_result_32090 = invoke(stypy.reporting.localization.\n Localization(__file__, 15, 17), dict_32082, *[\n get_info_call_result_32088], **kwargs_32089)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 15, 4), 'lapack_opt', dict_call_result_32090)\n str_32093 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 18, 33), 'str', 'libraries')\n list_32094 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 18, 46), 'list')\n kwargs_32095 = {}\n lapack_opt_32091 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 18, 18), 'lapack_opt', False)\n pop_32092 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 18, 18), lapack_opt_32091, 'pop')\n pop_call_result_32096 = invoke(stypy.reporting.localization.\n Localization(__file__, 18, 18), pop_32092, *[str_32093, list_32094],\n **kwargs_32095)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 18, 4), 'lapack_libs', pop_call_result_32096)\n list_32097 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 20, 15), 'list')\n str_32099 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 20, 21), 'str', 'mach')\n str_32100 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 20, 28), 'str', '*.f')\n kwargs_32101 = {}\n join_32098 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 20, 16), 'join', False)\n join_call_result_32102 = invoke(stypy.reporting.localization.\n Localization(__file__, 20, 16), join_32098, *[str_32099, str_32100],\n **kwargs_32101)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 20, 15), list_32097, join_call_result_32102)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 20, 4), 'mach_src', list_32097)\n list_32103 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 21, 19), 'list')\n str_32105 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 21, 25), 'str', 'quadpack')\n str_32106 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 21, 37), 'str', '*.f')\n kwargs_32107 = {}\n join_32104 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 21, 20), 'join', False)\n join_call_result_32108 = invoke(stypy.reporting.localization.\n Localization(__file__, 21, 20), join_32104, *[str_32105, str_32106],\n **kwargs_32107)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 21, 19), list_32103, join_call_result_32108)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 21, 4), 'quadpack_src', list_32103)\n list_32114 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 22, 47), 'list')\n str_32115 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 23, 8), 'str', 'blkdta000.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32115)\n str_32116 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 23, 23), 'str', 'bnorm.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32116)\n str_32117 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 23, 34), 'str', 'cfode.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32117)\n str_32118 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 24, 8), 'str', 'ewset.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32118)\n str_32119 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 24, 19), 'str', 'fnorm.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32119)\n str_32120 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 24, 30), 'str', 'intdy.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32120)\n str_32121 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 25, 8), 'str', 'lsoda.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32121)\n str_32122 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 25, 19), 'str', 'prja.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32122)\n str_32123 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 25, 29), 'str', 'solsy.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32123)\n str_32124 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 25, 40), 'str', 'srcma.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32124)\n str_32125 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 26, 8), 'str', 'stoda.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32125)\n str_32126 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 26, 19), 'str', 'vmnorm.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32126)\n str_32127 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 26, 31), 'str', 'xerrwv.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32127)\n str_32128 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 26, 43), 'str', 'xsetf.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32128)\n str_32129 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 27, 8), 'str', 'xsetun.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 47), list_32114, str_32129)\n comprehension_32130 = get_contained_elements_type(stypy.reporting.\n localization.Localization(__file__, 22, 17), list_32114)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 22, 17), 'fn', comprehension_32130)\n str_32110 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 22, 22), 'str', 'odepack')\n fn_32111 = module_type_store.get_type_of(stypy.reporting.localization.\n Localization(__file__, 22, 33), 'fn', False)\n kwargs_32112 = {}\n join_32109 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 22, 17), 'join', False)\n join_call_result_32113 = invoke(stypy.reporting.localization.\n Localization(__file__, 22, 17), join_32109, *[str_32110, fn_32111],\n **kwargs_32112)\n list_32131 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 22, 17), 'list')\n set_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 22, 17), list_32131, join_call_result_32113)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 22, 4), 'lsoda_src', list_32131)\n list_32132 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 15), 'list')\n str_32134 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 21), 'str', 'odepack')\n str_32135 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 32), 'str', 'vode.f')\n kwargs_32136 = {}\n join_32133 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 28, 16), 'join', False)\n join_call_result_32137 = invoke(stypy.reporting.localization.\n Localization(__file__, 28, 16), join_32133, *[str_32134, str_32135],\n **kwargs_32136)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 28, 15), list_32132, join_call_result_32137)\n str_32139 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 48), 'str', 'odepack')\n str_32140 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 28, 59), 'str', 'zvode.f')\n kwargs_32141 = {}\n join_32138 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 28, 43), 'join', False)\n join_call_result_32142 = invoke(stypy.reporting.localization.\n Localization(__file__, 28, 43), join_32138, *[str_32139, str_32140],\n **kwargs_32141)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 28, 15), list_32132, join_call_result_32142)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 28, 4), 'vode_src', list_32132)\n list_32143 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 29, 14), 'list')\n str_32145 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 29, 20), 'str', 'dop')\n str_32146 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 29, 26), 'str', '*.f')\n kwargs_32147 = {}\n join_32144 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 29, 15), 'join', False)\n join_call_result_32148 = invoke(stypy.reporting.localization.\n Localization(__file__, 29, 15), join_32144, *[str_32145, str_32146],\n **kwargs_32147)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 29, 14), list_32143, join_call_result_32148)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 29, 4), 'dop_src', list_32143)\n list_32149 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 30, 24), 'list')\n str_32151 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 30, 30), 'str', 'tests')\n str_32152 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 30, 38), 'str',\n '_test_multivariate.c')\n kwargs_32153 = {}\n join_32150 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 30, 25), 'join', False)\n join_call_result_32154 = invoke(stypy.reporting.localization.\n Localization(__file__, 30, 25), join_32150, *[str_32151, str_32152],\n **kwargs_32153)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 30, 24), list_32149, join_call_result_32154)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 30, 4), 'quadpack_test_src', list_32149)\n list_32155 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 31, 29), 'list')\n str_32157 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 31, 35), 'str', 'tests')\n str_32158 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 31, 44), 'str', 'banded5x5.f')\n kwargs_32159 = {}\n join_32156 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 31, 30), 'join', False)\n join_call_result_32160 = invoke(stypy.reporting.localization.\n Localization(__file__, 31, 30), join_32156, *[str_32157, str_32158],\n **kwargs_32159)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 31, 29), list_32155, join_call_result_32160)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 31, 4), 'odeint_banded_test_src', list_32155)\n str_32163 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 33, 23), 'str', 'mach')\n mach_src_32164 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 33, 39), 'mach_src', False)\n keyword_32165 = mach_src_32164\n dict_32166 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 34, 33), 'dict')\n str_32167 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 34, 34), 'str', 'noopt')\n tuple_32168 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 34, 43), 'tuple')\n file___32169 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 34, 43), '__file__', False)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 34, 43), tuple_32168, file___32169)\n int_32170 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 34, 52), 'int')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 34, 43), tuple_32168, int_32170)\n set_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 34, 33), dict_32166, (str_32167, tuple_32168))\n keyword_32171 = dict_32166\n kwargs_32172 = {'sources': keyword_32165, 'config_fc': keyword_32171}\n config_32161 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 33, 4), 'config', False)\n add_library_32162 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 33, 4), config_32161,\n 'add_library')\n add_library_call_result_32173 = invoke(stypy.reporting.localization.\n Localization(__file__, 33, 4), add_library_32162, *[str_32163], **\n kwargs_32172)\n str_32176 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 35, 23), 'str', 'quadpack')\n quadpack_src_32177 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 35, 43), 'quadpack_src', False)\n keyword_32178 = quadpack_src_32177\n kwargs_32179 = {'sources': keyword_32178}\n config_32174 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 35, 4), 'config', False)\n add_library_32175 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 35, 4), config_32174,\n 'add_library')\n add_library_call_result_32180 = invoke(stypy.reporting.localization.\n Localization(__file__, 35, 4), add_library_32175, *[str_32176], **\n kwargs_32179)\n str_32183 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 36, 23), 'str', 'lsoda')\n lsoda_src_32184 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 36, 40), 'lsoda_src', False)\n keyword_32185 = lsoda_src_32184\n kwargs_32186 = {'sources': keyword_32185}\n config_32181 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 36, 4), 'config', False)\n add_library_32182 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 36, 4), config_32181,\n 'add_library')\n add_library_call_result_32187 = invoke(stypy.reporting.localization.\n Localization(__file__, 36, 4), add_library_32182, *[str_32183], **\n kwargs_32186)\n str_32190 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 37, 23), 'str', 'vode')\n vode_src_32191 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 37, 39), 'vode_src', False)\n keyword_32192 = vode_src_32191\n kwargs_32193 = {'sources': keyword_32192}\n config_32188 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 37, 4), 'config', False)\n add_library_32189 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 37, 4), config_32188,\n 'add_library')\n add_library_call_result_32194 = invoke(stypy.reporting.localization.\n Localization(__file__, 37, 4), add_library_32189, *[str_32190], **\n kwargs_32193)\n str_32197 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 38, 23), 'str', 'dop')\n dop_src_32198 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 38, 38), 'dop_src', False)\n keyword_32199 = dop_src_32198\n kwargs_32200 = {'sources': keyword_32199}\n config_32195 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 38, 4), 'config', False)\n add_library_32196 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 38, 4), config_32195,\n 'add_library')\n add_library_call_result_32201 = invoke(stypy.reporting.localization.\n Localization(__file__, 38, 4), add_library_32196, *[str_32197], **\n kwargs_32200)\n list_32202 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 42, 19), 'list')\n file___32207 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 42, 41), '__file__', False)\n kwargs_32208 = {}\n os_32204 = module_type_store.get_type_of(stypy.reporting.localization.\n Localization(__file__, 42, 25), 'os', False)\n path_32205 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 42, 25), os_32204, 'path')\n dirname_32206 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 42, 25), path_32205, 'dirname')\n dirname_call_result_32209 = invoke(stypy.reporting.localization.\n Localization(__file__, 42, 25), dirname_32206, *[file___32207], **\n kwargs_32208)\n str_32210 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 42, 52), 'str', '..')\n str_32211 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 42, 58), 'str', '_lib')\n str_32212 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 42, 66), 'str', 'src')\n kwargs_32213 = {}\n join_32203 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 42, 20), 'join', False)\n join_call_result_32214 = invoke(stypy.reporting.localization.\n Localization(__file__, 42, 20), join_32203, *[\n dirname_call_result_32209, str_32210, str_32211, str_32212], **\n kwargs_32213)\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 42, 19), list_32202, join_call_result_32214)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 42, 4), 'include_dirs', list_32202)\n str_32215 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 43, 7), 'str', 'include_dirs')\n lapack_opt_32216 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 43, 25), 'lapack_opt')\n result_contains_32217 = python_operator(stypy.reporting.localization.\n Localization(__file__, 43, 7), 'in', str_32215, lapack_opt_32216)\n if_condition_32218 = is_suitable_condition(stypy.reporting.localization\n .Localization(__file__, 43, 4), result_contains_32217)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 43, 4), 'if_condition_32218', if_condition_32218)\n module_type_store = SSAContext.create_ssa_context(module_type_store, 'if')\n lapack_opt_32220 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 44, 26), 'lapack_opt', False)\n kwargs_32221 = {}\n dict_32219 = module_type_store.get_type_of(stypy.reporting.localization\n .Localization(__file__, 44, 21), 'dict', False)\n dict_call_result_32222 = invoke(stypy.reporting.localization.\n Localization(__file__, 44, 21), dict_32219, *[lapack_opt_32220], **\n kwargs_32221)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 44, 8), 'lapack_opt', dict_call_result_32222)\n str_32227 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 45, 43), 'str', 'include_dirs')\n kwargs_32228 = {}\n lapack_opt_32225 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 45, 28), 'lapack_opt', False)\n pop_32226 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 45, 28), lapack_opt_32225, 'pop')\n pop_call_result_32229 = invoke(stypy.reporting.localization.\n Localization(__file__, 45, 28), pop_32226, *[str_32227], **kwargs_32228\n )\n kwargs_32230 = {}\n include_dirs_32223 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 45, 8), 'include_dirs', False)\n extend_32224 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 45, 8), include_dirs_32223,\n 'extend')\n extend_call_result_32231 = invoke(stypy.reporting.localization.\n Localization(__file__, 45, 8), extend_32224, *[\n pop_call_result_32229], **kwargs_32230)\n module_type_store = module_type_store.join_ssa_context()\n str_32234 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 47, 25), 'str', '_quadpack')\n list_32235 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 48, 33), 'list')\n str_32236 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 48, 34), 'str', '_quadpackmodule.c'\n )\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 48, 33), list_32235, str_32236)\n keyword_32237 = list_32235\n list_32238 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 49, 35), 'list')\n str_32239 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 49, 36), 'str', 'quadpack')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 49, 35), list_32238, str_32239)\n str_32240 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 49, 48), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 49, 35), list_32238, str_32240)\n lapack_libs_32241 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 49, 58), 'lapack_libs', False)\n result_add_32242 = python_operator(stypy.reporting.localization.\n Localization(__file__, 49, 35), '+', list_32238, lapack_libs_32241)\n keyword_32243 = result_add_32242\n list_32244 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 50, 34), 'list')\n str_32245 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 50, 35), 'str', '__quadpack.h')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 50, 34), list_32244, str_32245)\n quadpack_src_32246 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 51, 36), 'quadpack_src', False)\n result_add_32247 = python_operator(stypy.reporting.localization.\n Localization(__file__, 50, 34), '+', list_32244, quadpack_src_32246)\n mach_src_32248 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 51, 51), 'mach_src', False)\n result_add_32249 = python_operator(stypy.reporting.localization.\n Localization(__file__, 51, 49), '+', result_add_32247, mach_src_32248)\n keyword_32250 = result_add_32249\n include_dirs_32251 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 52, 38), 'include_dirs', False)\n keyword_32252 = include_dirs_32251\n lapack_opt_32253 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 53, 27), 'lapack_opt', False)\n kwargs_32254 = {'libraries': keyword_32243, 'sources': keyword_32237,\n 'depends': keyword_32250, 'lapack_opt_32253': lapack_opt_32253,\n 'include_dirs': keyword_32252}\n config_32232 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 47, 4), 'config', False)\n add_extension_32233 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 47, 4), config_32232,\n 'add_extension')\n add_extension_call_result_32255 = invoke(stypy.reporting.localization.\n Localization(__file__, 47, 4), add_extension_32233, *[str_32234],\n **kwargs_32254)\n kwargs_32258 = {}\n lapack_opt_32256 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 56, 19), 'lapack_opt', False)\n copy_32257 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 56, 19), lapack_opt_32256, 'copy')\n copy_call_result_32259 = invoke(stypy.reporting.localization.\n Localization(__file__, 56, 19), copy_32257, *[], **kwargs_32258)\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 56, 4), 'odepack_opts', copy_call_result_32259)\n numpy_nodepr_api_32262 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 57, 24), 'numpy_nodepr_api', False)\n kwargs_32263 = {}\n odepack_opts_32260 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 57, 4), 'odepack_opts', False)\n update_32261 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 57, 4), odepack_opts_32260,\n 'update')\n update_call_result_32264 = invoke(stypy.reporting.localization.\n Localization(__file__, 57, 4), update_32261, *[\n numpy_nodepr_api_32262], **kwargs_32263)\n str_32267 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 58, 25), 'str', '_odepack')\n list_32268 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 59, 33), 'list')\n str_32269 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 59, 34), 'str', '_odepackmodule.c')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 59, 33), list_32268, str_32269)\n keyword_32270 = list_32268\n list_32271 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 60, 35), 'list')\n str_32272 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 60, 36), 'str', 'lsoda')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 60, 35), list_32271, str_32272)\n str_32273 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 60, 45), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 60, 35), list_32271, str_32273)\n lapack_libs_32274 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 60, 55), 'lapack_libs', False)\n result_add_32275 = python_operator(stypy.reporting.localization.\n Localization(__file__, 60, 35), '+', list_32271, lapack_libs_32274)\n keyword_32276 = result_add_32275\n lsoda_src_32277 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 61, 34), 'lsoda_src', False)\n mach_src_32278 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 61, 46), 'mach_src', False)\n result_add_32279 = python_operator(stypy.reporting.localization.\n Localization(__file__, 61, 34), '+', lsoda_src_32277, mach_src_32278)\n keyword_32280 = result_add_32279\n odepack_opts_32281 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 62, 27), 'odepack_opts', False)\n kwargs_32282 = {'libraries': keyword_32276, 'sources': keyword_32270,\n 'depends': keyword_32280, 'odepack_opts_32281': odepack_opts_32281}\n config_32265 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 58, 4), 'config', False)\n add_extension_32266 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 58, 4), config_32265,\n 'add_extension')\n add_extension_call_result_32283 = invoke(stypy.reporting.localization.\n Localization(__file__, 58, 4), add_extension_32266, *[str_32267],\n **kwargs_32282)\n str_32286 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 65, 25), 'str', 'vode')\n list_32287 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 66, 33), 'list')\n str_32288 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 66, 34), 'str', 'vode.pyf')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 66, 33), list_32287, str_32288)\n keyword_32289 = list_32287\n list_32290 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 67, 35), 'list')\n str_32291 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 67, 36), 'str', 'vode')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 67, 35), list_32290, str_32291)\n lapack_libs_32292 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 67, 46), 'lapack_libs', False)\n result_add_32293 = python_operator(stypy.reporting.localization.\n Localization(__file__, 67, 35), '+', list_32290, lapack_libs_32292)\n keyword_32294 = result_add_32293\n vode_src_32295 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 68, 33), 'vode_src', False)\n keyword_32296 = vode_src_32295\n lapack_opt_32297 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 69, 27), 'lapack_opt', False)\n kwargs_32298 = {'libraries': keyword_32294, 'sources': keyword_32289,\n 'depends': keyword_32296, 'lapack_opt_32297': lapack_opt_32297}\n config_32284 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 65, 4), 'config', False)\n add_extension_32285 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 65, 4), config_32284,\n 'add_extension')\n add_extension_call_result_32299 = invoke(stypy.reporting.localization.\n Localization(__file__, 65, 4), add_extension_32285, *[str_32286],\n **kwargs_32298)\n str_32302 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 72, 25), 'str', 'lsoda')\n list_32303 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 73, 33), 'list')\n str_32304 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 73, 34), 'str', 'lsoda.pyf')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 73, 33), list_32303, str_32304)\n keyword_32305 = list_32303\n list_32306 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 74, 35), 'list')\n str_32307 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 74, 36), 'str', 'lsoda')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 74, 35), list_32306, str_32307)\n str_32308 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 74, 45), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 74, 35), list_32306, str_32308)\n lapack_libs_32309 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 74, 55), 'lapack_libs', False)\n result_add_32310 = python_operator(stypy.reporting.localization.\n Localization(__file__, 74, 35), '+', list_32306, lapack_libs_32309)\n keyword_32311 = result_add_32310\n lsoda_src_32312 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 75, 34), 'lsoda_src', False)\n mach_src_32313 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 75, 46), 'mach_src', False)\n result_add_32314 = python_operator(stypy.reporting.localization.\n Localization(__file__, 75, 34), '+', lsoda_src_32312, mach_src_32313)\n keyword_32315 = result_add_32314\n lapack_opt_32316 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 76, 27), 'lapack_opt', False)\n kwargs_32317 = {'libraries': keyword_32311, 'sources': keyword_32305,\n 'depends': keyword_32315, 'lapack_opt_32316': lapack_opt_32316}\n config_32300 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 72, 4), 'config', False)\n add_extension_32301 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 72, 4), config_32300,\n 'add_extension')\n add_extension_call_result_32318 = invoke(stypy.reporting.localization.\n Localization(__file__, 72, 4), add_extension_32301, *[str_32302],\n **kwargs_32317)\n str_32321 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 79, 25), 'str', '_dop')\n list_32322 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 80, 33), 'list')\n str_32323 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 80, 34), 'str', 'dop.pyf')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 80, 33), list_32322, str_32323)\n keyword_32324 = list_32322\n list_32325 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 81, 35), 'list')\n str_32326 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 81, 36), 'str', 'dop')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 81, 35), list_32325, str_32326)\n keyword_32327 = list_32325\n dop_src_32328 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 82, 33), 'dop_src', False)\n keyword_32329 = dop_src_32328\n kwargs_32330 = {'libraries': keyword_32327, 'sources': keyword_32324,\n 'depends': keyword_32329}\n config_32319 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 79, 4), 'config', False)\n add_extension_32320 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 79, 4), config_32319,\n 'add_extension')\n add_extension_call_result_32331 = invoke(stypy.reporting.localization.\n Localization(__file__, 79, 4), add_extension_32320, *[str_32321],\n **kwargs_32330)\n str_32334 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 84, 25), 'str',\n '_test_multivariate')\n quadpack_test_src_32335 = module_type_store.get_type_of(stypy.reporting\n .localization.Localization(__file__, 85, 33), 'quadpack_test_src', \n False)\n keyword_32336 = quadpack_test_src_32335\n kwargs_32337 = {'sources': keyword_32336}\n config_32332 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 84, 4), 'config', False)\n add_extension_32333 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 84, 4), config_32332,\n 'add_extension')\n add_extension_call_result_32338 = invoke(stypy.reporting.localization.\n Localization(__file__, 84, 4), add_extension_32333, *[str_32334],\n **kwargs_32337)\n str_32341 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 88, 25), 'str',\n '_test_odeint_banded')\n odeint_banded_test_src_32342 = module_type_store.get_type_of(stypy.\n reporting.localization.Localization(__file__, 89, 33),\n 'odeint_banded_test_src', False)\n keyword_32343 = odeint_banded_test_src_32342\n list_32344 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 90, 35), 'list')\n str_32345 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 90, 36), 'str', 'lsoda')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 90, 35), list_32344, str_32345)\n str_32346 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 90, 45), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(\n __file__, 90, 35), list_32344, str_32346)\n lapack_libs_32347 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 90, 55), 'lapack_libs', False)\n result_add_32348 = python_operator(stypy.reporting.localization.\n Localization(__file__, 90, 35), '+', list_32344, lapack_libs_32347)\n keyword_32349 = result_add_32348\n lsoda_src_32350 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 91, 34), 'lsoda_src', False)\n mach_src_32351 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 91, 46), 'mach_src', False)\n result_add_32352 = python_operator(stypy.reporting.localization.\n Localization(__file__, 91, 34), '+', lsoda_src_32350, mach_src_32351)\n keyword_32353 = result_add_32352\n lapack_opt_32354 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 92, 27), 'lapack_opt', False)\n kwargs_32355 = {'libraries': keyword_32349, 'sources': keyword_32343,\n 'depends': keyword_32353, 'lapack_opt_32354': lapack_opt_32354}\n config_32339 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 88, 4), 'config', False)\n add_extension_32340 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 88, 4), config_32339,\n 'add_extension')\n add_extension_call_result_32356 = invoke(stypy.reporting.localization.\n Localization(__file__, 88, 4), add_extension_32340, *[str_32341],\n **kwargs_32355)\n str_32359 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 94, 26), 'str', '_ivp')\n kwargs_32360 = {}\n config_32357 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 94, 4), 'config', False)\n add_subpackage_32358 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 94, 4), config_32357,\n 'add_subpackage')\n add_subpackage_call_result_32361 = invoke(stypy.reporting.localization.\n Localization(__file__, 94, 4), add_subpackage_32358, *[str_32359],\n **kwargs_32360)\n str_32364 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 96, 24), 'str', 'tests')\n kwargs_32365 = {}\n config_32362 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 96, 4), 'config', False)\n add_data_dir_32363 = module_type_store.get_type_of_member(stypy.\n reporting.localization.Localization(__file__, 96, 4), config_32362,\n 'add_data_dir')\n add_data_dir_call_result_32366 = invoke(stypy.reporting.localization.\n Localization(__file__, 96, 4), add_data_dir_32363, *[str_32364], **\n kwargs_32365)\n config_32367 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 97, 11), 'config')\n module_type_store.set_type_of(stypy.reporting.localization.Localization\n (__file__, 97, 4), 'stypy_return_type', config_32367)\n teardown_call_information(localization, arguments)\n stypy_return_type_32368 = module_type_store.get_type_of(stypy.reporting\n .localization.Localization(__file__, 9, 0), 'stypy_return_type')\n module_type_store.store_return_type_of_current_context(\n stypy_return_type_32368)\n module_type_store = module_type_store.close_function_context()\n return stypy_return_type_32368\n\n\nmodule_type_store.set_type_of(stypy.reporting.localization.Localization(\n __file__, 9, 0), 'configuration', configuration)\nif __name__ == '__main__':\n stypy.reporting.localization.Localization.set_current(stypy.reporting.\n localization.Localization(__file__, 101, 4))\n update_path_to_current_file_folder(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n import_32369 = generate_type_inference_code_for_module(stypy.reporting.\n localization.Localization(__file__, 101, 4), 'numpy.distutils.core')\n if type(import_32369) is not StypyTypeError:\n if import_32369 != 'pyd_module':\n __import__(import_32369)\n sys_modules_32370 = sys.modules[import_32369]\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 101, 4), 'numpy.distutils.core',\n sys_modules_32370.module_type_store, module_type_store, [\n 'setup'])\n nest_module(stypy.reporting.localization.Localization(__file__,\n 101, 4), __file__, sys_modules_32370, sys_modules_32370.\n module_type_store, module_type_store)\n else:\n from numpy.distutils.core import setup\n import_from_module(stypy.reporting.localization.Localization(\n __file__, 101, 4), 'numpy.distutils.core', None,\n module_type_store, ['setup'], [setup])\n else:\n module_type_store.set_type_of(stypy.reporting.localization.\n Localization(__file__, 101, 4), 'numpy.distutils.core',\n import_32369)\n remove_current_file_folder_from_path(\n 'C:/Python27/lib/site-packages/scipy/integrate/')\n kwargs_32378 = {}\n str_32373 = get_builtin_python_type_instance(stypy.reporting.\n localization.Localization(__file__, 102, 35), 'str', '')\n keyword_32374 = str_32373\n kwargs_32375 = {'top_path': keyword_32374}\n configuration_32372 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 102, 12), 'configuration', False)\n configuration_call_result_32376 = invoke(stypy.reporting.localization.\n Localization(__file__, 102, 12), configuration_32372, *[], **\n kwargs_32375)\n todict_32377 = module_type_store.get_type_of_member(stypy.reporting.\n localization.Localization(__file__, 102, 12),\n configuration_call_result_32376, 'todict')\n todict_call_result_32379 = invoke(stypy.reporting.localization.\n Localization(__file__, 102, 12), todict_32377, *[], **kwargs_32378)\n kwargs_32380 = {'todict_call_result_32379': todict_call_result_32379}\n setup_32371 = module_type_store.get_type_of(stypy.reporting.\n localization.Localization(__file__, 102, 4), 'setup', False)\n setup_call_result_32381 = invoke(stypy.reporting.localization.\n Localization(__file__, 102, 4), setup_32371, *[], **kwargs_32380)\nmodule_errors = stypy.errors.type_error.StypyTypeError.get_error_msgs()\nmodule_warnings = stypy.errors.type_warning.TypeWarning.get_warning_msgs()\n", "step-5": "\n# -*- coding: utf-8 -*-\n\n\"\"\"\nORIGINAL PROGRAM SOURCE CODE:\n1: from __future__ import division, print_function, absolute_import\n2: \n3: import os\n4: from os.path import join\n5: \n6: from scipy._build_utils import numpy_nodepr_api\n7: \n8: \n9: def configuration(parent_package='',top_path=None):\n10: from numpy.distutils.misc_util import Configuration\n11: from numpy.distutils.system_info import get_info\n12: config = Configuration('integrate', parent_package, top_path)\n13: \n14: # Get a local copy of lapack_opt_info\n15: lapack_opt = dict(get_info('lapack_opt',notfound_action=2))\n16: # Pop off the libraries list so it can be combined with\n17: # additional required libraries\n18: lapack_libs = lapack_opt.pop('libraries', [])\n19: \n20: mach_src = [join('mach','*.f')]\n21: quadpack_src = [join('quadpack', '*.f')]\n22: lsoda_src = [join('odepack', fn) for fn in [\n23: 'blkdta000.f', 'bnorm.f', 'cfode.f',\n24: 'ewset.f', 'fnorm.f', 'intdy.f',\n25: 'lsoda.f', 'prja.f', 'solsy.f', 'srcma.f',\n26: 'stoda.f', 'vmnorm.f', 'xerrwv.f', 'xsetf.f',\n27: 'xsetun.f']]\n28: vode_src = [join('odepack', 'vode.f'), join('odepack', 'zvode.f')]\n29: dop_src = [join('dop','*.f')]\n30: quadpack_test_src = [join('tests','_test_multivariate.c')]\n31: odeint_banded_test_src = [join('tests', 'banded5x5.f')]\n32: \n33: config.add_library('mach', sources=mach_src,\n34: config_fc={'noopt':(__file__,1)})\n35: config.add_library('quadpack', sources=quadpack_src)\n36: config.add_library('lsoda', sources=lsoda_src)\n37: config.add_library('vode', sources=vode_src)\n38: config.add_library('dop', sources=dop_src)\n39: \n40: # Extensions\n41: # quadpack:\n42: include_dirs = [join(os.path.dirname(__file__), '..', '_lib', 'src')]\n43: if 'include_dirs' in lapack_opt:\n44: lapack_opt = dict(lapack_opt)\n45: include_dirs.extend(lapack_opt.pop('include_dirs'))\n46: \n47: config.add_extension('_quadpack',\n48: sources=['_quadpackmodule.c'],\n49: libraries=['quadpack', 'mach'] + lapack_libs,\n50: depends=(['__quadpack.h']\n51: + quadpack_src + mach_src),\n52: include_dirs=include_dirs,\n53: **lapack_opt)\n54: \n55: # odepack/lsoda-odeint\n56: odepack_opts = lapack_opt.copy()\n57: odepack_opts.update(numpy_nodepr_api)\n58: config.add_extension('_odepack',\n59: sources=['_odepackmodule.c'],\n60: libraries=['lsoda', 'mach'] + lapack_libs,\n61: depends=(lsoda_src + mach_src),\n62: **odepack_opts)\n63: \n64: # vode\n65: config.add_extension('vode',\n66: sources=['vode.pyf'],\n67: libraries=['vode'] + lapack_libs,\n68: depends=vode_src,\n69: **lapack_opt)\n70: \n71: # lsoda\n72: config.add_extension('lsoda',\n73: sources=['lsoda.pyf'],\n74: libraries=['lsoda', 'mach'] + lapack_libs,\n75: depends=(lsoda_src + mach_src),\n76: **lapack_opt)\n77: \n78: # dop\n79: config.add_extension('_dop',\n80: sources=['dop.pyf'],\n81: libraries=['dop'],\n82: depends=dop_src)\n83: \n84: config.add_extension('_test_multivariate',\n85: sources=quadpack_test_src)\n86: \n87: # Fortran+f2py extension module for testing odeint.\n88: config.add_extension('_test_odeint_banded',\n89: sources=odeint_banded_test_src,\n90: libraries=['lsoda', 'mach'] + lapack_libs,\n91: depends=(lsoda_src + mach_src),\n92: **lapack_opt)\n93: \n94: config.add_subpackage('_ivp')\n95: \n96: config.add_data_dir('tests')\n97: return config\n98: \n99: \n100: if __name__ == '__main__':\n101: from numpy.distutils.core import setup\n102: setup(**configuration(top_path='').todict())\n103: \n\n\"\"\"\n\n# Import the stypy library necessary elements\nfrom stypy.type_inference_programs.type_inference_programs_imports import *\n\n# Create the module type store\nmodule_type_store = Context(None, __file__)\n\n# ################# Begin of the type inference program ##################\n\nstypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 3, 0))\n\n# 'import os' statement (line 3)\nimport os\n\nimport_module(stypy.reporting.localization.Localization(__file__, 3, 0), 'os', os, module_type_store)\n\nstypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 4, 0))\n\n# 'from os.path import join' statement (line 4)\nupdate_path_to_current_file_folder('C:/Python27/lib/site-packages/scipy/integrate/')\nimport_32066 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 4, 0), 'os.path')\n\nif (type(import_32066) is not StypyTypeError):\n\n if (import_32066 != 'pyd_module'):\n __import__(import_32066)\n sys_modules_32067 = sys.modules[import_32066]\n import_from_module(stypy.reporting.localization.Localization(__file__, 4, 0), 'os.path', sys_modules_32067.module_type_store, module_type_store, ['join'])\n nest_module(stypy.reporting.localization.Localization(__file__, 4, 0), __file__, sys_modules_32067, sys_modules_32067.module_type_store, module_type_store)\n else:\n from os.path import join\n\n import_from_module(stypy.reporting.localization.Localization(__file__, 4, 0), 'os.path', None, module_type_store, ['join'], [join])\n\nelse:\n # Assigning a type to the variable 'os.path' (line 4)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 4, 0), 'os.path', import_32066)\n\nremove_current_file_folder_from_path('C:/Python27/lib/site-packages/scipy/integrate/')\n\nstypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 6, 0))\n\n# 'from scipy._build_utils import numpy_nodepr_api' statement (line 6)\nupdate_path_to_current_file_folder('C:/Python27/lib/site-packages/scipy/integrate/')\nimport_32068 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 6, 0), 'scipy._build_utils')\n\nif (type(import_32068) is not StypyTypeError):\n\n if (import_32068 != 'pyd_module'):\n __import__(import_32068)\n sys_modules_32069 = sys.modules[import_32068]\n import_from_module(stypy.reporting.localization.Localization(__file__, 6, 0), 'scipy._build_utils', sys_modules_32069.module_type_store, module_type_store, ['numpy_nodepr_api'])\n nest_module(stypy.reporting.localization.Localization(__file__, 6, 0), __file__, sys_modules_32069, sys_modules_32069.module_type_store, module_type_store)\n else:\n from scipy._build_utils import numpy_nodepr_api\n\n import_from_module(stypy.reporting.localization.Localization(__file__, 6, 0), 'scipy._build_utils', None, module_type_store, ['numpy_nodepr_api'], [numpy_nodepr_api])\n\nelse:\n # Assigning a type to the variable 'scipy._build_utils' (line 6)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 6, 0), 'scipy._build_utils', import_32068)\n\nremove_current_file_folder_from_path('C:/Python27/lib/site-packages/scipy/integrate/')\n\n\n@norecursion\ndef configuration(localization, *varargs, **kwargs):\n global module_type_store\n # Assign values to the parameters with defaults\n str_32070 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 9, 33), 'str', '')\n # Getting the type of 'None' (line 9)\n None_32071 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 9, 45), 'None')\n defaults = [str_32070, None_32071]\n # Create a new context for function 'configuration'\n module_type_store = module_type_store.open_function_context('configuration', 9, 0, False)\n \n # Passed parameters checking function\n configuration.stypy_localization = localization\n configuration.stypy_type_of_self = None\n configuration.stypy_type_store = module_type_store\n configuration.stypy_function_name = 'configuration'\n configuration.stypy_param_names_list = ['parent_package', 'top_path']\n configuration.stypy_varargs_param_name = None\n configuration.stypy_kwargs_param_name = None\n configuration.stypy_call_defaults = defaults\n configuration.stypy_call_varargs = varargs\n configuration.stypy_call_kwargs = kwargs\n arguments = process_argument_values(localization, None, module_type_store, 'configuration', ['parent_package', 'top_path'], None, None, defaults, varargs, kwargs)\n\n if is_error_type(arguments):\n # Destroy the current context\n module_type_store = module_type_store.close_function_context()\n return arguments\n\n # Initialize method data\n init_call_information(module_type_store, 'configuration', localization, ['parent_package', 'top_path'], arguments)\n \n # Default return type storage variable (SSA)\n # Assigning a type to the variable 'stypy_return_type'\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 0, 0), 'stypy_return_type', None)\n \n \n # ################# Begin of 'configuration(...)' code ##################\n\n stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 10, 4))\n \n # 'from numpy.distutils.misc_util import Configuration' statement (line 10)\n update_path_to_current_file_folder('C:/Python27/lib/site-packages/scipy/integrate/')\n import_32072 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 10, 4), 'numpy.distutils.misc_util')\n\n if (type(import_32072) is not StypyTypeError):\n\n if (import_32072 != 'pyd_module'):\n __import__(import_32072)\n sys_modules_32073 = sys.modules[import_32072]\n import_from_module(stypy.reporting.localization.Localization(__file__, 10, 4), 'numpy.distutils.misc_util', sys_modules_32073.module_type_store, module_type_store, ['Configuration'])\n nest_module(stypy.reporting.localization.Localization(__file__, 10, 4), __file__, sys_modules_32073, sys_modules_32073.module_type_store, module_type_store)\n else:\n from numpy.distutils.misc_util import Configuration\n\n import_from_module(stypy.reporting.localization.Localization(__file__, 10, 4), 'numpy.distutils.misc_util', None, module_type_store, ['Configuration'], [Configuration])\n\n else:\n # Assigning a type to the variable 'numpy.distutils.misc_util' (line 10)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 10, 4), 'numpy.distutils.misc_util', import_32072)\n\n remove_current_file_folder_from_path('C:/Python27/lib/site-packages/scipy/integrate/')\n \n stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 11, 4))\n \n # 'from numpy.distutils.system_info import get_info' statement (line 11)\n update_path_to_current_file_folder('C:/Python27/lib/site-packages/scipy/integrate/')\n import_32074 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 11, 4), 'numpy.distutils.system_info')\n\n if (type(import_32074) is not StypyTypeError):\n\n if (import_32074 != 'pyd_module'):\n __import__(import_32074)\n sys_modules_32075 = sys.modules[import_32074]\n import_from_module(stypy.reporting.localization.Localization(__file__, 11, 4), 'numpy.distutils.system_info', sys_modules_32075.module_type_store, module_type_store, ['get_info'])\n nest_module(stypy.reporting.localization.Localization(__file__, 11, 4), __file__, sys_modules_32075, sys_modules_32075.module_type_store, module_type_store)\n else:\n from numpy.distutils.system_info import get_info\n\n import_from_module(stypy.reporting.localization.Localization(__file__, 11, 4), 'numpy.distutils.system_info', None, module_type_store, ['get_info'], [get_info])\n\n else:\n # Assigning a type to the variable 'numpy.distutils.system_info' (line 11)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 11, 4), 'numpy.distutils.system_info', import_32074)\n\n remove_current_file_folder_from_path('C:/Python27/lib/site-packages/scipy/integrate/')\n \n \n # Assigning a Call to a Name (line 12):\n \n # Call to Configuration(...): (line 12)\n # Processing the call arguments (line 12)\n str_32077 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 12, 27), 'str', 'integrate')\n # Getting the type of 'parent_package' (line 12)\n parent_package_32078 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 12, 40), 'parent_package', False)\n # Getting the type of 'top_path' (line 12)\n top_path_32079 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 12, 56), 'top_path', False)\n # Processing the call keyword arguments (line 12)\n kwargs_32080 = {}\n # Getting the type of 'Configuration' (line 12)\n Configuration_32076 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 12, 13), 'Configuration', False)\n # Calling Configuration(args, kwargs) (line 12)\n Configuration_call_result_32081 = invoke(stypy.reporting.localization.Localization(__file__, 12, 13), Configuration_32076, *[str_32077, parent_package_32078, top_path_32079], **kwargs_32080)\n \n # Assigning a type to the variable 'config' (line 12)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 12, 4), 'config', Configuration_call_result_32081)\n \n # Assigning a Call to a Name (line 15):\n \n # Call to dict(...): (line 15)\n # Processing the call arguments (line 15)\n \n # Call to get_info(...): (line 15)\n # Processing the call arguments (line 15)\n str_32084 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 15, 31), 'str', 'lapack_opt')\n # Processing the call keyword arguments (line 15)\n int_32085 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 15, 60), 'int')\n keyword_32086 = int_32085\n kwargs_32087 = {'notfound_action': keyword_32086}\n # Getting the type of 'get_info' (line 15)\n get_info_32083 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 15, 22), 'get_info', False)\n # Calling get_info(args, kwargs) (line 15)\n get_info_call_result_32088 = invoke(stypy.reporting.localization.Localization(__file__, 15, 22), get_info_32083, *[str_32084], **kwargs_32087)\n \n # Processing the call keyword arguments (line 15)\n kwargs_32089 = {}\n # Getting the type of 'dict' (line 15)\n dict_32082 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 15, 17), 'dict', False)\n # Calling dict(args, kwargs) (line 15)\n dict_call_result_32090 = invoke(stypy.reporting.localization.Localization(__file__, 15, 17), dict_32082, *[get_info_call_result_32088], **kwargs_32089)\n \n # Assigning a type to the variable 'lapack_opt' (line 15)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 15, 4), 'lapack_opt', dict_call_result_32090)\n \n # Assigning a Call to a Name (line 18):\n \n # Call to pop(...): (line 18)\n # Processing the call arguments (line 18)\n str_32093 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 18, 33), 'str', 'libraries')\n \n # Obtaining an instance of the builtin type 'list' (line 18)\n list_32094 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 18, 46), 'list')\n # Adding type elements to the builtin type 'list' instance (line 18)\n \n # Processing the call keyword arguments (line 18)\n kwargs_32095 = {}\n # Getting the type of 'lapack_opt' (line 18)\n lapack_opt_32091 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 18, 18), 'lapack_opt', False)\n # Obtaining the member 'pop' of a type (line 18)\n pop_32092 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 18, 18), lapack_opt_32091, 'pop')\n # Calling pop(args, kwargs) (line 18)\n pop_call_result_32096 = invoke(stypy.reporting.localization.Localization(__file__, 18, 18), pop_32092, *[str_32093, list_32094], **kwargs_32095)\n \n # Assigning a type to the variable 'lapack_libs' (line 18)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 18, 4), 'lapack_libs', pop_call_result_32096)\n \n # Assigning a List to a Name (line 20):\n \n # Obtaining an instance of the builtin type 'list' (line 20)\n list_32097 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 20, 15), 'list')\n # Adding type elements to the builtin type 'list' instance (line 20)\n # Adding element type (line 20)\n \n # Call to join(...): (line 20)\n # Processing the call arguments (line 20)\n str_32099 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 20, 21), 'str', 'mach')\n str_32100 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 20, 28), 'str', '*.f')\n # Processing the call keyword arguments (line 20)\n kwargs_32101 = {}\n # Getting the type of 'join' (line 20)\n join_32098 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 20, 16), 'join', False)\n # Calling join(args, kwargs) (line 20)\n join_call_result_32102 = invoke(stypy.reporting.localization.Localization(__file__, 20, 16), join_32098, *[str_32099, str_32100], **kwargs_32101)\n \n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 20, 15), list_32097, join_call_result_32102)\n \n # Assigning a type to the variable 'mach_src' (line 20)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 20, 4), 'mach_src', list_32097)\n \n # Assigning a List to a Name (line 21):\n \n # Obtaining an instance of the builtin type 'list' (line 21)\n list_32103 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 21, 19), 'list')\n # Adding type elements to the builtin type 'list' instance (line 21)\n # Adding element type (line 21)\n \n # Call to join(...): (line 21)\n # Processing the call arguments (line 21)\n str_32105 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 21, 25), 'str', 'quadpack')\n str_32106 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 21, 37), 'str', '*.f')\n # Processing the call keyword arguments (line 21)\n kwargs_32107 = {}\n # Getting the type of 'join' (line 21)\n join_32104 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 21, 20), 'join', False)\n # Calling join(args, kwargs) (line 21)\n join_call_result_32108 = invoke(stypy.reporting.localization.Localization(__file__, 21, 20), join_32104, *[str_32105, str_32106], **kwargs_32107)\n \n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 21, 19), list_32103, join_call_result_32108)\n \n # Assigning a type to the variable 'quadpack_src' (line 21)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 21, 4), 'quadpack_src', list_32103)\n \n # Assigning a ListComp to a Name (line 22):\n # Calculating list comprehension\n # Calculating comprehension expression\n \n # Obtaining an instance of the builtin type 'list' (line 22)\n list_32114 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 22, 47), 'list')\n # Adding type elements to the builtin type 'list' instance (line 22)\n # Adding element type (line 22)\n str_32115 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 23, 8), 'str', 'blkdta000.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32115)\n # Adding element type (line 22)\n str_32116 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 23, 23), 'str', 'bnorm.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32116)\n # Adding element type (line 22)\n str_32117 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 23, 34), 'str', 'cfode.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32117)\n # Adding element type (line 22)\n str_32118 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 24, 8), 'str', 'ewset.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32118)\n # Adding element type (line 22)\n str_32119 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 24, 19), 'str', 'fnorm.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32119)\n # Adding element type (line 22)\n str_32120 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 24, 30), 'str', 'intdy.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32120)\n # Adding element type (line 22)\n str_32121 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 25, 8), 'str', 'lsoda.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32121)\n # Adding element type (line 22)\n str_32122 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 25, 19), 'str', 'prja.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32122)\n # Adding element type (line 22)\n str_32123 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 25, 29), 'str', 'solsy.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32123)\n # Adding element type (line 22)\n str_32124 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 25, 40), 'str', 'srcma.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32124)\n # Adding element type (line 22)\n str_32125 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 26, 8), 'str', 'stoda.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32125)\n # Adding element type (line 22)\n str_32126 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 26, 19), 'str', 'vmnorm.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32126)\n # Adding element type (line 22)\n str_32127 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 26, 31), 'str', 'xerrwv.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32127)\n # Adding element type (line 22)\n str_32128 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 26, 43), 'str', 'xsetf.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32128)\n # Adding element type (line 22)\n str_32129 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 27, 8), 'str', 'xsetun.f')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32129)\n \n comprehension_32130 = get_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 17), list_32114)\n # Assigning a type to the variable 'fn' (line 22)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 22, 17), 'fn', comprehension_32130)\n \n # Call to join(...): (line 22)\n # Processing the call arguments (line 22)\n str_32110 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 22, 22), 'str', 'odepack')\n # Getting the type of 'fn' (line 22)\n fn_32111 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 22, 33), 'fn', False)\n # Processing the call keyword arguments (line 22)\n kwargs_32112 = {}\n # Getting the type of 'join' (line 22)\n join_32109 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 22, 17), 'join', False)\n # Calling join(args, kwargs) (line 22)\n join_call_result_32113 = invoke(stypy.reporting.localization.Localization(__file__, 22, 17), join_32109, *[str_32110, fn_32111], **kwargs_32112)\n \n list_32131 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 22, 17), 'list')\n set_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 17), list_32131, join_call_result_32113)\n # Assigning a type to the variable 'lsoda_src' (line 22)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 22, 4), 'lsoda_src', list_32131)\n \n # Assigning a List to a Name (line 28):\n \n # Obtaining an instance of the builtin type 'list' (line 28)\n list_32132 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 28, 15), 'list')\n # Adding type elements to the builtin type 'list' instance (line 28)\n # Adding element type (line 28)\n \n # Call to join(...): (line 28)\n # Processing the call arguments (line 28)\n str_32134 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 28, 21), 'str', 'odepack')\n str_32135 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 28, 32), 'str', 'vode.f')\n # Processing the call keyword arguments (line 28)\n kwargs_32136 = {}\n # Getting the type of 'join' (line 28)\n join_32133 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 28, 16), 'join', False)\n # Calling join(args, kwargs) (line 28)\n join_call_result_32137 = invoke(stypy.reporting.localization.Localization(__file__, 28, 16), join_32133, *[str_32134, str_32135], **kwargs_32136)\n \n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 28, 15), list_32132, join_call_result_32137)\n # Adding element type (line 28)\n \n # Call to join(...): (line 28)\n # Processing the call arguments (line 28)\n str_32139 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 28, 48), 'str', 'odepack')\n str_32140 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 28, 59), 'str', 'zvode.f')\n # Processing the call keyword arguments (line 28)\n kwargs_32141 = {}\n # Getting the type of 'join' (line 28)\n join_32138 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 28, 43), 'join', False)\n # Calling join(args, kwargs) (line 28)\n join_call_result_32142 = invoke(stypy.reporting.localization.Localization(__file__, 28, 43), join_32138, *[str_32139, str_32140], **kwargs_32141)\n \n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 28, 15), list_32132, join_call_result_32142)\n \n # Assigning a type to the variable 'vode_src' (line 28)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 28, 4), 'vode_src', list_32132)\n \n # Assigning a List to a Name (line 29):\n \n # Obtaining an instance of the builtin type 'list' (line 29)\n list_32143 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 29, 14), 'list')\n # Adding type elements to the builtin type 'list' instance (line 29)\n # Adding element type (line 29)\n \n # Call to join(...): (line 29)\n # Processing the call arguments (line 29)\n str_32145 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 29, 20), 'str', 'dop')\n str_32146 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 29, 26), 'str', '*.f')\n # Processing the call keyword arguments (line 29)\n kwargs_32147 = {}\n # Getting the type of 'join' (line 29)\n join_32144 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 29, 15), 'join', False)\n # Calling join(args, kwargs) (line 29)\n join_call_result_32148 = invoke(stypy.reporting.localization.Localization(__file__, 29, 15), join_32144, *[str_32145, str_32146], **kwargs_32147)\n \n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 29, 14), list_32143, join_call_result_32148)\n \n # Assigning a type to the variable 'dop_src' (line 29)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 29, 4), 'dop_src', list_32143)\n \n # Assigning a List to a Name (line 30):\n \n # Obtaining an instance of the builtin type 'list' (line 30)\n list_32149 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 30, 24), 'list')\n # Adding type elements to the builtin type 'list' instance (line 30)\n # Adding element type (line 30)\n \n # Call to join(...): (line 30)\n # Processing the call arguments (line 30)\n str_32151 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 30, 30), 'str', 'tests')\n str_32152 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 30, 38), 'str', '_test_multivariate.c')\n # Processing the call keyword arguments (line 30)\n kwargs_32153 = {}\n # Getting the type of 'join' (line 30)\n join_32150 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 30, 25), 'join', False)\n # Calling join(args, kwargs) (line 30)\n join_call_result_32154 = invoke(stypy.reporting.localization.Localization(__file__, 30, 25), join_32150, *[str_32151, str_32152], **kwargs_32153)\n \n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 30, 24), list_32149, join_call_result_32154)\n \n # Assigning a type to the variable 'quadpack_test_src' (line 30)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 30, 4), 'quadpack_test_src', list_32149)\n \n # Assigning a List to a Name (line 31):\n \n # Obtaining an instance of the builtin type 'list' (line 31)\n list_32155 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 31, 29), 'list')\n # Adding type elements to the builtin type 'list' instance (line 31)\n # Adding element type (line 31)\n \n # Call to join(...): (line 31)\n # Processing the call arguments (line 31)\n str_32157 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 31, 35), 'str', 'tests')\n str_32158 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 31, 44), 'str', 'banded5x5.f')\n # Processing the call keyword arguments (line 31)\n kwargs_32159 = {}\n # Getting the type of 'join' (line 31)\n join_32156 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 31, 30), 'join', False)\n # Calling join(args, kwargs) (line 31)\n join_call_result_32160 = invoke(stypy.reporting.localization.Localization(__file__, 31, 30), join_32156, *[str_32157, str_32158], **kwargs_32159)\n \n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 31, 29), list_32155, join_call_result_32160)\n \n # Assigning a type to the variable 'odeint_banded_test_src' (line 31)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 31, 4), 'odeint_banded_test_src', list_32155)\n \n # Call to add_library(...): (line 33)\n # Processing the call arguments (line 33)\n str_32163 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 33, 23), 'str', 'mach')\n # Processing the call keyword arguments (line 33)\n # Getting the type of 'mach_src' (line 33)\n mach_src_32164 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 33, 39), 'mach_src', False)\n keyword_32165 = mach_src_32164\n \n # Obtaining an instance of the builtin type 'dict' (line 34)\n dict_32166 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 34, 33), 'dict')\n # Adding type elements to the builtin type 'dict' instance (line 34)\n # Adding element type (key, value) (line 34)\n str_32167 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 34, 34), 'str', 'noopt')\n \n # Obtaining an instance of the builtin type 'tuple' (line 34)\n tuple_32168 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 34, 43), 'tuple')\n # Adding type elements to the builtin type 'tuple' instance (line 34)\n # Adding element type (line 34)\n # Getting the type of '__file__' (line 34)\n file___32169 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 34, 43), '__file__', False)\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 34, 43), tuple_32168, file___32169)\n # Adding element type (line 34)\n int_32170 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 34, 52), 'int')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 34, 43), tuple_32168, int_32170)\n \n set_contained_elements_type(stypy.reporting.localization.Localization(__file__, 34, 33), dict_32166, (str_32167, tuple_32168))\n \n keyword_32171 = dict_32166\n kwargs_32172 = {'sources': keyword_32165, 'config_fc': keyword_32171}\n # Getting the type of 'config' (line 33)\n config_32161 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 33, 4), 'config', False)\n # Obtaining the member 'add_library' of a type (line 33)\n add_library_32162 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 33, 4), config_32161, 'add_library')\n # Calling add_library(args, kwargs) (line 33)\n add_library_call_result_32173 = invoke(stypy.reporting.localization.Localization(__file__, 33, 4), add_library_32162, *[str_32163], **kwargs_32172)\n \n \n # Call to add_library(...): (line 35)\n # Processing the call arguments (line 35)\n str_32176 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 35, 23), 'str', 'quadpack')\n # Processing the call keyword arguments (line 35)\n # Getting the type of 'quadpack_src' (line 35)\n quadpack_src_32177 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 35, 43), 'quadpack_src', False)\n keyword_32178 = quadpack_src_32177\n kwargs_32179 = {'sources': keyword_32178}\n # Getting the type of 'config' (line 35)\n config_32174 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 35, 4), 'config', False)\n # Obtaining the member 'add_library' of a type (line 35)\n add_library_32175 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 35, 4), config_32174, 'add_library')\n # Calling add_library(args, kwargs) (line 35)\n add_library_call_result_32180 = invoke(stypy.reporting.localization.Localization(__file__, 35, 4), add_library_32175, *[str_32176], **kwargs_32179)\n \n \n # Call to add_library(...): (line 36)\n # Processing the call arguments (line 36)\n str_32183 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 36, 23), 'str', 'lsoda')\n # Processing the call keyword arguments (line 36)\n # Getting the type of 'lsoda_src' (line 36)\n lsoda_src_32184 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 36, 40), 'lsoda_src', False)\n keyword_32185 = lsoda_src_32184\n kwargs_32186 = {'sources': keyword_32185}\n # Getting the type of 'config' (line 36)\n config_32181 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 36, 4), 'config', False)\n # Obtaining the member 'add_library' of a type (line 36)\n add_library_32182 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 36, 4), config_32181, 'add_library')\n # Calling add_library(args, kwargs) (line 36)\n add_library_call_result_32187 = invoke(stypy.reporting.localization.Localization(__file__, 36, 4), add_library_32182, *[str_32183], **kwargs_32186)\n \n \n # Call to add_library(...): (line 37)\n # Processing the call arguments (line 37)\n str_32190 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 37, 23), 'str', 'vode')\n # Processing the call keyword arguments (line 37)\n # Getting the type of 'vode_src' (line 37)\n vode_src_32191 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 37, 39), 'vode_src', False)\n keyword_32192 = vode_src_32191\n kwargs_32193 = {'sources': keyword_32192}\n # Getting the type of 'config' (line 37)\n config_32188 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 37, 4), 'config', False)\n # Obtaining the member 'add_library' of a type (line 37)\n add_library_32189 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 37, 4), config_32188, 'add_library')\n # Calling add_library(args, kwargs) (line 37)\n add_library_call_result_32194 = invoke(stypy.reporting.localization.Localization(__file__, 37, 4), add_library_32189, *[str_32190], **kwargs_32193)\n \n \n # Call to add_library(...): (line 38)\n # Processing the call arguments (line 38)\n str_32197 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 38, 23), 'str', 'dop')\n # Processing the call keyword arguments (line 38)\n # Getting the type of 'dop_src' (line 38)\n dop_src_32198 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 38, 38), 'dop_src', False)\n keyword_32199 = dop_src_32198\n kwargs_32200 = {'sources': keyword_32199}\n # Getting the type of 'config' (line 38)\n config_32195 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 38, 4), 'config', False)\n # Obtaining the member 'add_library' of a type (line 38)\n add_library_32196 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 38, 4), config_32195, 'add_library')\n # Calling add_library(args, kwargs) (line 38)\n add_library_call_result_32201 = invoke(stypy.reporting.localization.Localization(__file__, 38, 4), add_library_32196, *[str_32197], **kwargs_32200)\n \n \n # Assigning a List to a Name (line 42):\n \n # Obtaining an instance of the builtin type 'list' (line 42)\n list_32202 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 42, 19), 'list')\n # Adding type elements to the builtin type 'list' instance (line 42)\n # Adding element type (line 42)\n \n # Call to join(...): (line 42)\n # Processing the call arguments (line 42)\n \n # Call to dirname(...): (line 42)\n # Processing the call arguments (line 42)\n # Getting the type of '__file__' (line 42)\n file___32207 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 42, 41), '__file__', False)\n # Processing the call keyword arguments (line 42)\n kwargs_32208 = {}\n # Getting the type of 'os' (line 42)\n os_32204 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 42, 25), 'os', False)\n # Obtaining the member 'path' of a type (line 42)\n path_32205 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 42, 25), os_32204, 'path')\n # Obtaining the member 'dirname' of a type (line 42)\n dirname_32206 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 42, 25), path_32205, 'dirname')\n # Calling dirname(args, kwargs) (line 42)\n dirname_call_result_32209 = invoke(stypy.reporting.localization.Localization(__file__, 42, 25), dirname_32206, *[file___32207], **kwargs_32208)\n \n str_32210 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 42, 52), 'str', '..')\n str_32211 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 42, 58), 'str', '_lib')\n str_32212 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 42, 66), 'str', 'src')\n # Processing the call keyword arguments (line 42)\n kwargs_32213 = {}\n # Getting the type of 'join' (line 42)\n join_32203 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 42, 20), 'join', False)\n # Calling join(args, kwargs) (line 42)\n join_call_result_32214 = invoke(stypy.reporting.localization.Localization(__file__, 42, 20), join_32203, *[dirname_call_result_32209, str_32210, str_32211, str_32212], **kwargs_32213)\n \n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 42, 19), list_32202, join_call_result_32214)\n \n # Assigning a type to the variable 'include_dirs' (line 42)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 42, 4), 'include_dirs', list_32202)\n \n \n str_32215 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 43, 7), 'str', 'include_dirs')\n # Getting the type of 'lapack_opt' (line 43)\n lapack_opt_32216 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 43, 25), 'lapack_opt')\n # Applying the binary operator 'in' (line 43)\n result_contains_32217 = python_operator(stypy.reporting.localization.Localization(__file__, 43, 7), 'in', str_32215, lapack_opt_32216)\n \n # Testing the type of an if condition (line 43)\n if_condition_32218 = is_suitable_condition(stypy.reporting.localization.Localization(__file__, 43, 4), result_contains_32217)\n # Assigning a type to the variable 'if_condition_32218' (line 43)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 43, 4), 'if_condition_32218', if_condition_32218)\n # SSA begins for if statement (line 43)\n module_type_store = SSAContext.create_ssa_context(module_type_store, 'if')\n \n # Assigning a Call to a Name (line 44):\n \n # Call to dict(...): (line 44)\n # Processing the call arguments (line 44)\n # Getting the type of 'lapack_opt' (line 44)\n lapack_opt_32220 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 44, 26), 'lapack_opt', False)\n # Processing the call keyword arguments (line 44)\n kwargs_32221 = {}\n # Getting the type of 'dict' (line 44)\n dict_32219 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 44, 21), 'dict', False)\n # Calling dict(args, kwargs) (line 44)\n dict_call_result_32222 = invoke(stypy.reporting.localization.Localization(__file__, 44, 21), dict_32219, *[lapack_opt_32220], **kwargs_32221)\n \n # Assigning a type to the variable 'lapack_opt' (line 44)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 44, 8), 'lapack_opt', dict_call_result_32222)\n \n # Call to extend(...): (line 45)\n # Processing the call arguments (line 45)\n \n # Call to pop(...): (line 45)\n # Processing the call arguments (line 45)\n str_32227 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 45, 43), 'str', 'include_dirs')\n # Processing the call keyword arguments (line 45)\n kwargs_32228 = {}\n # Getting the type of 'lapack_opt' (line 45)\n lapack_opt_32225 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 45, 28), 'lapack_opt', False)\n # Obtaining the member 'pop' of a type (line 45)\n pop_32226 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 45, 28), lapack_opt_32225, 'pop')\n # Calling pop(args, kwargs) (line 45)\n pop_call_result_32229 = invoke(stypy.reporting.localization.Localization(__file__, 45, 28), pop_32226, *[str_32227], **kwargs_32228)\n \n # Processing the call keyword arguments (line 45)\n kwargs_32230 = {}\n # Getting the type of 'include_dirs' (line 45)\n include_dirs_32223 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 45, 8), 'include_dirs', False)\n # Obtaining the member 'extend' of a type (line 45)\n extend_32224 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 45, 8), include_dirs_32223, 'extend')\n # Calling extend(args, kwargs) (line 45)\n extend_call_result_32231 = invoke(stypy.reporting.localization.Localization(__file__, 45, 8), extend_32224, *[pop_call_result_32229], **kwargs_32230)\n \n # SSA join for if statement (line 43)\n module_type_store = module_type_store.join_ssa_context()\n \n \n # Call to add_extension(...): (line 47)\n # Processing the call arguments (line 47)\n str_32234 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 47, 25), 'str', '_quadpack')\n # Processing the call keyword arguments (line 47)\n \n # Obtaining an instance of the builtin type 'list' (line 48)\n list_32235 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 48, 33), 'list')\n # Adding type elements to the builtin type 'list' instance (line 48)\n # Adding element type (line 48)\n str_32236 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 48, 34), 'str', '_quadpackmodule.c')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 48, 33), list_32235, str_32236)\n \n keyword_32237 = list_32235\n \n # Obtaining an instance of the builtin type 'list' (line 49)\n list_32238 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 49, 35), 'list')\n # Adding type elements to the builtin type 'list' instance (line 49)\n # Adding element type (line 49)\n str_32239 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 49, 36), 'str', 'quadpack')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 49, 35), list_32238, str_32239)\n # Adding element type (line 49)\n str_32240 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 49, 48), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 49, 35), list_32238, str_32240)\n \n # Getting the type of 'lapack_libs' (line 49)\n lapack_libs_32241 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 49, 58), 'lapack_libs', False)\n # Applying the binary operator '+' (line 49)\n result_add_32242 = python_operator(stypy.reporting.localization.Localization(__file__, 49, 35), '+', list_32238, lapack_libs_32241)\n \n keyword_32243 = result_add_32242\n \n # Obtaining an instance of the builtin type 'list' (line 50)\n list_32244 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 50, 34), 'list')\n # Adding type elements to the builtin type 'list' instance (line 50)\n # Adding element type (line 50)\n str_32245 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 50, 35), 'str', '__quadpack.h')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 50, 34), list_32244, str_32245)\n \n # Getting the type of 'quadpack_src' (line 51)\n quadpack_src_32246 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 51, 36), 'quadpack_src', False)\n # Applying the binary operator '+' (line 50)\n result_add_32247 = python_operator(stypy.reporting.localization.Localization(__file__, 50, 34), '+', list_32244, quadpack_src_32246)\n \n # Getting the type of 'mach_src' (line 51)\n mach_src_32248 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 51, 51), 'mach_src', False)\n # Applying the binary operator '+' (line 51)\n result_add_32249 = python_operator(stypy.reporting.localization.Localization(__file__, 51, 49), '+', result_add_32247, mach_src_32248)\n \n keyword_32250 = result_add_32249\n # Getting the type of 'include_dirs' (line 52)\n include_dirs_32251 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 52, 38), 'include_dirs', False)\n keyword_32252 = include_dirs_32251\n # Getting the type of 'lapack_opt' (line 53)\n lapack_opt_32253 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 53, 27), 'lapack_opt', False)\n kwargs_32254 = {'libraries': keyword_32243, 'sources': keyword_32237, 'depends': keyword_32250, 'lapack_opt_32253': lapack_opt_32253, 'include_dirs': keyword_32252}\n # Getting the type of 'config' (line 47)\n config_32232 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 47, 4), 'config', False)\n # Obtaining the member 'add_extension' of a type (line 47)\n add_extension_32233 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 47, 4), config_32232, 'add_extension')\n # Calling add_extension(args, kwargs) (line 47)\n add_extension_call_result_32255 = invoke(stypy.reporting.localization.Localization(__file__, 47, 4), add_extension_32233, *[str_32234], **kwargs_32254)\n \n \n # Assigning a Call to a Name (line 56):\n \n # Call to copy(...): (line 56)\n # Processing the call keyword arguments (line 56)\n kwargs_32258 = {}\n # Getting the type of 'lapack_opt' (line 56)\n lapack_opt_32256 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 56, 19), 'lapack_opt', False)\n # Obtaining the member 'copy' of a type (line 56)\n copy_32257 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 56, 19), lapack_opt_32256, 'copy')\n # Calling copy(args, kwargs) (line 56)\n copy_call_result_32259 = invoke(stypy.reporting.localization.Localization(__file__, 56, 19), copy_32257, *[], **kwargs_32258)\n \n # Assigning a type to the variable 'odepack_opts' (line 56)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 56, 4), 'odepack_opts', copy_call_result_32259)\n \n # Call to update(...): (line 57)\n # Processing the call arguments (line 57)\n # Getting the type of 'numpy_nodepr_api' (line 57)\n numpy_nodepr_api_32262 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 57, 24), 'numpy_nodepr_api', False)\n # Processing the call keyword arguments (line 57)\n kwargs_32263 = {}\n # Getting the type of 'odepack_opts' (line 57)\n odepack_opts_32260 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 57, 4), 'odepack_opts', False)\n # Obtaining the member 'update' of a type (line 57)\n update_32261 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 57, 4), odepack_opts_32260, 'update')\n # Calling update(args, kwargs) (line 57)\n update_call_result_32264 = invoke(stypy.reporting.localization.Localization(__file__, 57, 4), update_32261, *[numpy_nodepr_api_32262], **kwargs_32263)\n \n \n # Call to add_extension(...): (line 58)\n # Processing the call arguments (line 58)\n str_32267 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 58, 25), 'str', '_odepack')\n # Processing the call keyword arguments (line 58)\n \n # Obtaining an instance of the builtin type 'list' (line 59)\n list_32268 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 59, 33), 'list')\n # Adding type elements to the builtin type 'list' instance (line 59)\n # Adding element type (line 59)\n str_32269 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 59, 34), 'str', '_odepackmodule.c')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 59, 33), list_32268, str_32269)\n \n keyword_32270 = list_32268\n \n # Obtaining an instance of the builtin type 'list' (line 60)\n list_32271 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 60, 35), 'list')\n # Adding type elements to the builtin type 'list' instance (line 60)\n # Adding element type (line 60)\n str_32272 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 60, 36), 'str', 'lsoda')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 60, 35), list_32271, str_32272)\n # Adding element type (line 60)\n str_32273 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 60, 45), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 60, 35), list_32271, str_32273)\n \n # Getting the type of 'lapack_libs' (line 60)\n lapack_libs_32274 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 60, 55), 'lapack_libs', False)\n # Applying the binary operator '+' (line 60)\n result_add_32275 = python_operator(stypy.reporting.localization.Localization(__file__, 60, 35), '+', list_32271, lapack_libs_32274)\n \n keyword_32276 = result_add_32275\n # Getting the type of 'lsoda_src' (line 61)\n lsoda_src_32277 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 61, 34), 'lsoda_src', False)\n # Getting the type of 'mach_src' (line 61)\n mach_src_32278 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 61, 46), 'mach_src', False)\n # Applying the binary operator '+' (line 61)\n result_add_32279 = python_operator(stypy.reporting.localization.Localization(__file__, 61, 34), '+', lsoda_src_32277, mach_src_32278)\n \n keyword_32280 = result_add_32279\n # Getting the type of 'odepack_opts' (line 62)\n odepack_opts_32281 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 62, 27), 'odepack_opts', False)\n kwargs_32282 = {'libraries': keyword_32276, 'sources': keyword_32270, 'depends': keyword_32280, 'odepack_opts_32281': odepack_opts_32281}\n # Getting the type of 'config' (line 58)\n config_32265 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 58, 4), 'config', False)\n # Obtaining the member 'add_extension' of a type (line 58)\n add_extension_32266 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 58, 4), config_32265, 'add_extension')\n # Calling add_extension(args, kwargs) (line 58)\n add_extension_call_result_32283 = invoke(stypy.reporting.localization.Localization(__file__, 58, 4), add_extension_32266, *[str_32267], **kwargs_32282)\n \n \n # Call to add_extension(...): (line 65)\n # Processing the call arguments (line 65)\n str_32286 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 65, 25), 'str', 'vode')\n # Processing the call keyword arguments (line 65)\n \n # Obtaining an instance of the builtin type 'list' (line 66)\n list_32287 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 66, 33), 'list')\n # Adding type elements to the builtin type 'list' instance (line 66)\n # Adding element type (line 66)\n str_32288 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 66, 34), 'str', 'vode.pyf')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 66, 33), list_32287, str_32288)\n \n keyword_32289 = list_32287\n \n # Obtaining an instance of the builtin type 'list' (line 67)\n list_32290 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 67, 35), 'list')\n # Adding type elements to the builtin type 'list' instance (line 67)\n # Adding element type (line 67)\n str_32291 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 67, 36), 'str', 'vode')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 67, 35), list_32290, str_32291)\n \n # Getting the type of 'lapack_libs' (line 67)\n lapack_libs_32292 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 67, 46), 'lapack_libs', False)\n # Applying the binary operator '+' (line 67)\n result_add_32293 = python_operator(stypy.reporting.localization.Localization(__file__, 67, 35), '+', list_32290, lapack_libs_32292)\n \n keyword_32294 = result_add_32293\n # Getting the type of 'vode_src' (line 68)\n vode_src_32295 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 68, 33), 'vode_src', False)\n keyword_32296 = vode_src_32295\n # Getting the type of 'lapack_opt' (line 69)\n lapack_opt_32297 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 69, 27), 'lapack_opt', False)\n kwargs_32298 = {'libraries': keyword_32294, 'sources': keyword_32289, 'depends': keyword_32296, 'lapack_opt_32297': lapack_opt_32297}\n # Getting the type of 'config' (line 65)\n config_32284 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 65, 4), 'config', False)\n # Obtaining the member 'add_extension' of a type (line 65)\n add_extension_32285 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 65, 4), config_32284, 'add_extension')\n # Calling add_extension(args, kwargs) (line 65)\n add_extension_call_result_32299 = invoke(stypy.reporting.localization.Localization(__file__, 65, 4), add_extension_32285, *[str_32286], **kwargs_32298)\n \n \n # Call to add_extension(...): (line 72)\n # Processing the call arguments (line 72)\n str_32302 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 72, 25), 'str', 'lsoda')\n # Processing the call keyword arguments (line 72)\n \n # Obtaining an instance of the builtin type 'list' (line 73)\n list_32303 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 73, 33), 'list')\n # Adding type elements to the builtin type 'list' instance (line 73)\n # Adding element type (line 73)\n str_32304 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 73, 34), 'str', 'lsoda.pyf')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 73, 33), list_32303, str_32304)\n \n keyword_32305 = list_32303\n \n # Obtaining an instance of the builtin type 'list' (line 74)\n list_32306 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 74, 35), 'list')\n # Adding type elements to the builtin type 'list' instance (line 74)\n # Adding element type (line 74)\n str_32307 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 74, 36), 'str', 'lsoda')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 74, 35), list_32306, str_32307)\n # Adding element type (line 74)\n str_32308 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 74, 45), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 74, 35), list_32306, str_32308)\n \n # Getting the type of 'lapack_libs' (line 74)\n lapack_libs_32309 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 74, 55), 'lapack_libs', False)\n # Applying the binary operator '+' (line 74)\n result_add_32310 = python_operator(stypy.reporting.localization.Localization(__file__, 74, 35), '+', list_32306, lapack_libs_32309)\n \n keyword_32311 = result_add_32310\n # Getting the type of 'lsoda_src' (line 75)\n lsoda_src_32312 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 75, 34), 'lsoda_src', False)\n # Getting the type of 'mach_src' (line 75)\n mach_src_32313 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 75, 46), 'mach_src', False)\n # Applying the binary operator '+' (line 75)\n result_add_32314 = python_operator(stypy.reporting.localization.Localization(__file__, 75, 34), '+', lsoda_src_32312, mach_src_32313)\n \n keyword_32315 = result_add_32314\n # Getting the type of 'lapack_opt' (line 76)\n lapack_opt_32316 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 76, 27), 'lapack_opt', False)\n kwargs_32317 = {'libraries': keyword_32311, 'sources': keyword_32305, 'depends': keyword_32315, 'lapack_opt_32316': lapack_opt_32316}\n # Getting the type of 'config' (line 72)\n config_32300 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 72, 4), 'config', False)\n # Obtaining the member 'add_extension' of a type (line 72)\n add_extension_32301 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 72, 4), config_32300, 'add_extension')\n # Calling add_extension(args, kwargs) (line 72)\n add_extension_call_result_32318 = invoke(stypy.reporting.localization.Localization(__file__, 72, 4), add_extension_32301, *[str_32302], **kwargs_32317)\n \n \n # Call to add_extension(...): (line 79)\n # Processing the call arguments (line 79)\n str_32321 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 79, 25), 'str', '_dop')\n # Processing the call keyword arguments (line 79)\n \n # Obtaining an instance of the builtin type 'list' (line 80)\n list_32322 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 80, 33), 'list')\n # Adding type elements to the builtin type 'list' instance (line 80)\n # Adding element type (line 80)\n str_32323 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 80, 34), 'str', 'dop.pyf')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 80, 33), list_32322, str_32323)\n \n keyword_32324 = list_32322\n \n # Obtaining an instance of the builtin type 'list' (line 81)\n list_32325 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 81, 35), 'list')\n # Adding type elements to the builtin type 'list' instance (line 81)\n # Adding element type (line 81)\n str_32326 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 81, 36), 'str', 'dop')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 81, 35), list_32325, str_32326)\n \n keyword_32327 = list_32325\n # Getting the type of 'dop_src' (line 82)\n dop_src_32328 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 82, 33), 'dop_src', False)\n keyword_32329 = dop_src_32328\n kwargs_32330 = {'libraries': keyword_32327, 'sources': keyword_32324, 'depends': keyword_32329}\n # Getting the type of 'config' (line 79)\n config_32319 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 79, 4), 'config', False)\n # Obtaining the member 'add_extension' of a type (line 79)\n add_extension_32320 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 79, 4), config_32319, 'add_extension')\n # Calling add_extension(args, kwargs) (line 79)\n add_extension_call_result_32331 = invoke(stypy.reporting.localization.Localization(__file__, 79, 4), add_extension_32320, *[str_32321], **kwargs_32330)\n \n \n # Call to add_extension(...): (line 84)\n # Processing the call arguments (line 84)\n str_32334 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 84, 25), 'str', '_test_multivariate')\n # Processing the call keyword arguments (line 84)\n # Getting the type of 'quadpack_test_src' (line 85)\n quadpack_test_src_32335 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 85, 33), 'quadpack_test_src', False)\n keyword_32336 = quadpack_test_src_32335\n kwargs_32337 = {'sources': keyword_32336}\n # Getting the type of 'config' (line 84)\n config_32332 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 84, 4), 'config', False)\n # Obtaining the member 'add_extension' of a type (line 84)\n add_extension_32333 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 84, 4), config_32332, 'add_extension')\n # Calling add_extension(args, kwargs) (line 84)\n add_extension_call_result_32338 = invoke(stypy.reporting.localization.Localization(__file__, 84, 4), add_extension_32333, *[str_32334], **kwargs_32337)\n \n \n # Call to add_extension(...): (line 88)\n # Processing the call arguments (line 88)\n str_32341 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 88, 25), 'str', '_test_odeint_banded')\n # Processing the call keyword arguments (line 88)\n # Getting the type of 'odeint_banded_test_src' (line 89)\n odeint_banded_test_src_32342 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 89, 33), 'odeint_banded_test_src', False)\n keyword_32343 = odeint_banded_test_src_32342\n \n # Obtaining an instance of the builtin type 'list' (line 90)\n list_32344 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 90, 35), 'list')\n # Adding type elements to the builtin type 'list' instance (line 90)\n # Adding element type (line 90)\n str_32345 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 90, 36), 'str', 'lsoda')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 90, 35), list_32344, str_32345)\n # Adding element type (line 90)\n str_32346 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 90, 45), 'str', 'mach')\n add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 90, 35), list_32344, str_32346)\n \n # Getting the type of 'lapack_libs' (line 90)\n lapack_libs_32347 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 90, 55), 'lapack_libs', False)\n # Applying the binary operator '+' (line 90)\n result_add_32348 = python_operator(stypy.reporting.localization.Localization(__file__, 90, 35), '+', list_32344, lapack_libs_32347)\n \n keyword_32349 = result_add_32348\n # Getting the type of 'lsoda_src' (line 91)\n lsoda_src_32350 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 91, 34), 'lsoda_src', False)\n # Getting the type of 'mach_src' (line 91)\n mach_src_32351 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 91, 46), 'mach_src', False)\n # Applying the binary operator '+' (line 91)\n result_add_32352 = python_operator(stypy.reporting.localization.Localization(__file__, 91, 34), '+', lsoda_src_32350, mach_src_32351)\n \n keyword_32353 = result_add_32352\n # Getting the type of 'lapack_opt' (line 92)\n lapack_opt_32354 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 92, 27), 'lapack_opt', False)\n kwargs_32355 = {'libraries': keyword_32349, 'sources': keyword_32343, 'depends': keyword_32353, 'lapack_opt_32354': lapack_opt_32354}\n # Getting the type of 'config' (line 88)\n config_32339 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 88, 4), 'config', False)\n # Obtaining the member 'add_extension' of a type (line 88)\n add_extension_32340 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 88, 4), config_32339, 'add_extension')\n # Calling add_extension(args, kwargs) (line 88)\n add_extension_call_result_32356 = invoke(stypy.reporting.localization.Localization(__file__, 88, 4), add_extension_32340, *[str_32341], **kwargs_32355)\n \n \n # Call to add_subpackage(...): (line 94)\n # Processing the call arguments (line 94)\n str_32359 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 94, 26), 'str', '_ivp')\n # Processing the call keyword arguments (line 94)\n kwargs_32360 = {}\n # Getting the type of 'config' (line 94)\n config_32357 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 94, 4), 'config', False)\n # Obtaining the member 'add_subpackage' of a type (line 94)\n add_subpackage_32358 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 94, 4), config_32357, 'add_subpackage')\n # Calling add_subpackage(args, kwargs) (line 94)\n add_subpackage_call_result_32361 = invoke(stypy.reporting.localization.Localization(__file__, 94, 4), add_subpackage_32358, *[str_32359], **kwargs_32360)\n \n \n # Call to add_data_dir(...): (line 96)\n # Processing the call arguments (line 96)\n str_32364 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 96, 24), 'str', 'tests')\n # Processing the call keyword arguments (line 96)\n kwargs_32365 = {}\n # Getting the type of 'config' (line 96)\n config_32362 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 96, 4), 'config', False)\n # Obtaining the member 'add_data_dir' of a type (line 96)\n add_data_dir_32363 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 96, 4), config_32362, 'add_data_dir')\n # Calling add_data_dir(args, kwargs) (line 96)\n add_data_dir_call_result_32366 = invoke(stypy.reporting.localization.Localization(__file__, 96, 4), add_data_dir_32363, *[str_32364], **kwargs_32365)\n \n # Getting the type of 'config' (line 97)\n config_32367 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 97, 11), 'config')\n # Assigning a type to the variable 'stypy_return_type' (line 97)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 97, 4), 'stypy_return_type', config_32367)\n \n # ################# End of 'configuration(...)' code ##################\n\n # Teardown call information\n teardown_call_information(localization, arguments)\n \n # Storing the return type of function 'configuration' in the type store\n # Getting the type of 'stypy_return_type' (line 9)\n stypy_return_type_32368 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 9, 0), 'stypy_return_type')\n module_type_store.store_return_type_of_current_context(stypy_return_type_32368)\n \n # Destroy the current context\n module_type_store = module_type_store.close_function_context()\n \n # Return type of the function 'configuration'\n return stypy_return_type_32368\n\n# Assigning a type to the variable 'configuration' (line 9)\nmodule_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 9, 0), 'configuration', configuration)\n\nif (__name__ == '__main__'):\n stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 101, 4))\n \n # 'from numpy.distutils.core import setup' statement (line 101)\n update_path_to_current_file_folder('C:/Python27/lib/site-packages/scipy/integrate/')\n import_32369 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 101, 4), 'numpy.distutils.core')\n\n if (type(import_32369) is not StypyTypeError):\n\n if (import_32369 != 'pyd_module'):\n __import__(import_32369)\n sys_modules_32370 = sys.modules[import_32369]\n import_from_module(stypy.reporting.localization.Localization(__file__, 101, 4), 'numpy.distutils.core', sys_modules_32370.module_type_store, module_type_store, ['setup'])\n nest_module(stypy.reporting.localization.Localization(__file__, 101, 4), __file__, sys_modules_32370, sys_modules_32370.module_type_store, module_type_store)\n else:\n from numpy.distutils.core import setup\n\n import_from_module(stypy.reporting.localization.Localization(__file__, 101, 4), 'numpy.distutils.core', None, module_type_store, ['setup'], [setup])\n\n else:\n # Assigning a type to the variable 'numpy.distutils.core' (line 101)\n module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 101, 4), 'numpy.distutils.core', import_32369)\n\n remove_current_file_folder_from_path('C:/Python27/lib/site-packages/scipy/integrate/')\n \n \n # Call to setup(...): (line 102)\n # Processing the call keyword arguments (line 102)\n \n # Call to todict(...): (line 102)\n # Processing the call keyword arguments (line 102)\n kwargs_32378 = {}\n \n # Call to configuration(...): (line 102)\n # Processing the call keyword arguments (line 102)\n str_32373 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 102, 35), 'str', '')\n keyword_32374 = str_32373\n kwargs_32375 = {'top_path': keyword_32374}\n # Getting the type of 'configuration' (line 102)\n configuration_32372 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 102, 12), 'configuration', False)\n # Calling configuration(args, kwargs) (line 102)\n configuration_call_result_32376 = invoke(stypy.reporting.localization.Localization(__file__, 102, 12), configuration_32372, *[], **kwargs_32375)\n \n # Obtaining the member 'todict' of a type (line 102)\n todict_32377 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 102, 12), configuration_call_result_32376, 'todict')\n # Calling todict(args, kwargs) (line 102)\n todict_call_result_32379 = invoke(stypy.reporting.localization.Localization(__file__, 102, 12), todict_32377, *[], **kwargs_32378)\n \n kwargs_32380 = {'todict_call_result_32379': todict_call_result_32379}\n # Getting the type of 'setup' (line 102)\n setup_32371 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 102, 4), 'setup', False)\n # Calling setup(args, kwargs) (line 102)\n setup_call_result_32381 = invoke(stypy.reporting.localization.Localization(__file__, 102, 4), setup_32371, *[], **kwargs_32380)\n \n\n\n# ################# End of the type inference program ##################\n\nmodule_errors = stypy.errors.type_error.StypyTypeError.get_error_msgs()\nmodule_warnings = stypy.errors.type_warning.TypeWarning.get_warning_msgs()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for classifier in classifiers: classifier.fit(training_data[:1500], validation_data[:1500]) expected = validation_data[681:] predicted = classifier.predict(training_data[681:]) print('Classification report for classifier %s:\n%s\n' % (classifier, metrics.classification_report(expected, predicted))) print('Confusion matrix:\n%s' % metrics.confusion_matrix(expected, predicted)) <|reserved_special_token_1|> <|reserved_special_token_0|> my_data = np.loadtxt('edited_data/dataset_regression_edited.csv', delimiter =',', dtype='str') training_data = my_data[:, 0:6] validation_data = my_data[:, 6] classifiers = [tree.DecisionTreeClassifier(max_depth=5), tree. DecisionTreeClassifier(max_depth=8), tree.DecisionTreeClassifier( max_depth=10), svm.SVC(kernel='linear'), svm.SVC(kernel='rbf'), AdaBoostClassifier(n_estimators=50), AdaBoostClassifier(n_estimators= 100), KNeighborsClassifier(3), KNeighborsClassifier(5), KNeighborsClassifier(7)] for classifier in classifiers: classifier.fit(training_data[:1500], validation_data[:1500]) expected = validation_data[681:] predicted = classifier.predict(training_data[681:]) print('Classification report for classifier %s:\n%s\n' % (classifier, metrics.classification_report(expected, predicted))) print('Confusion matrix:\n%s' % metrics.confusion_matrix(expected, predicted)) <|reserved_special_token_1|> from sklearn import svm, metrics, tree from sklearn.ensemble import AdaBoostClassifier from sklearn.neighbors import KNeighborsClassifier import numpy as np my_data = np.loadtxt('edited_data/dataset_regression_edited.csv', delimiter =',', dtype='str') training_data = my_data[:, 0:6] validation_data = my_data[:, 6] classifiers = [tree.DecisionTreeClassifier(max_depth=5), tree. DecisionTreeClassifier(max_depth=8), tree.DecisionTreeClassifier( max_depth=10), svm.SVC(kernel='linear'), svm.SVC(kernel='rbf'), AdaBoostClassifier(n_estimators=50), AdaBoostClassifier(n_estimators= 100), KNeighborsClassifier(3), KNeighborsClassifier(5), KNeighborsClassifier(7)] for classifier in classifiers: classifier.fit(training_data[:1500], validation_data[:1500]) expected = validation_data[681:] predicted = classifier.predict(training_data[681:]) print('Classification report for classifier %s:\n%s\n' % (classifier, metrics.classification_report(expected, predicted))) print('Confusion matrix:\n%s' % metrics.confusion_matrix(expected, predicted)) <|reserved_special_token_1|> from sklearn import svm, metrics, tree from sklearn.ensemble import AdaBoostClassifier from sklearn.neighbors import KNeighborsClassifier import numpy as np my_data = np.loadtxt('edited_data/dataset_regression_edited.csv',delimiter=',', dtype='str') training_data = my_data[:, 0:6] validation_data = my_data[:, 6] classifiers = [ tree.DecisionTreeClassifier(max_depth=5), tree.DecisionTreeClassifier(max_depth=8), tree.DecisionTreeClassifier(max_depth=10), svm.SVC(kernel='linear'), svm.SVC(kernel='rbf'), AdaBoostClassifier(n_estimators=50), AdaBoostClassifier(n_estimators=100), KNeighborsClassifier(3), KNeighborsClassifier(5), KNeighborsClassifier(7) ] for classifier in classifiers: classifier.fit(training_data[:1500], validation_data[:1500]) expected = validation_data[681:] predicted = classifier.predict(training_data[681:]) print("Classification report for classifier %s:\n%s\n" % (classifier, metrics.classification_report(expected, predicted))) print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
flexible
{ "blob_id": "3024359710148bfbb15677973555f214b1f878b7", "index": 1521, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor classifier in classifiers:\n classifier.fit(training_data[:1500], validation_data[:1500])\n expected = validation_data[681:]\n predicted = classifier.predict(training_data[681:])\n print('Classification report for classifier %s:\\n%s\\n' % (classifier,\n metrics.classification_report(expected, predicted)))\n print('Confusion matrix:\\n%s' % metrics.confusion_matrix(expected,\n predicted))\n", "step-3": "<mask token>\nmy_data = np.loadtxt('edited_data/dataset_regression_edited.csv', delimiter\n =',', dtype='str')\ntraining_data = my_data[:, 0:6]\nvalidation_data = my_data[:, 6]\nclassifiers = [tree.DecisionTreeClassifier(max_depth=5), tree.\n DecisionTreeClassifier(max_depth=8), tree.DecisionTreeClassifier(\n max_depth=10), svm.SVC(kernel='linear'), svm.SVC(kernel='rbf'),\n AdaBoostClassifier(n_estimators=50), AdaBoostClassifier(n_estimators=\n 100), KNeighborsClassifier(3), KNeighborsClassifier(5),\n KNeighborsClassifier(7)]\nfor classifier in classifiers:\n classifier.fit(training_data[:1500], validation_data[:1500])\n expected = validation_data[681:]\n predicted = classifier.predict(training_data[681:])\n print('Classification report for classifier %s:\\n%s\\n' % (classifier,\n metrics.classification_report(expected, predicted)))\n print('Confusion matrix:\\n%s' % metrics.confusion_matrix(expected,\n predicted))\n", "step-4": "from sklearn import svm, metrics, tree\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nimport numpy as np\nmy_data = np.loadtxt('edited_data/dataset_regression_edited.csv', delimiter\n =',', dtype='str')\ntraining_data = my_data[:, 0:6]\nvalidation_data = my_data[:, 6]\nclassifiers = [tree.DecisionTreeClassifier(max_depth=5), tree.\n DecisionTreeClassifier(max_depth=8), tree.DecisionTreeClassifier(\n max_depth=10), svm.SVC(kernel='linear'), svm.SVC(kernel='rbf'),\n AdaBoostClassifier(n_estimators=50), AdaBoostClassifier(n_estimators=\n 100), KNeighborsClassifier(3), KNeighborsClassifier(5),\n KNeighborsClassifier(7)]\nfor classifier in classifiers:\n classifier.fit(training_data[:1500], validation_data[:1500])\n expected = validation_data[681:]\n predicted = classifier.predict(training_data[681:])\n print('Classification report for classifier %s:\\n%s\\n' % (classifier,\n metrics.classification_report(expected, predicted)))\n print('Confusion matrix:\\n%s' % metrics.confusion_matrix(expected,\n predicted))\n", "step-5": "from sklearn import svm, metrics, tree\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nimport numpy as np\n\n\nmy_data = np.loadtxt('edited_data/dataset_regression_edited.csv',delimiter=',', dtype='str')\n\ntraining_data = my_data[:, 0:6]\nvalidation_data = my_data[:, 6]\n\n\nclassifiers = [\n tree.DecisionTreeClassifier(max_depth=5),\n tree.DecisionTreeClassifier(max_depth=8),\n tree.DecisionTreeClassifier(max_depth=10),\n svm.SVC(kernel='linear'),\n svm.SVC(kernel='rbf'),\n AdaBoostClassifier(n_estimators=50),\n AdaBoostClassifier(n_estimators=100),\n KNeighborsClassifier(3),\n KNeighborsClassifier(5),\n KNeighborsClassifier(7)\n]\n\n\nfor classifier in classifiers:\n classifier.fit(training_data[:1500], validation_data[:1500])\n expected = validation_data[681:]\n predicted = classifier.predict(training_data[681:])\n print(\"Classification report for classifier %s:\\n%s\\n\"\n % (classifier, metrics.classification_report(expected, predicted)))\n print(\"Confusion matrix:\\n%s\" % metrics.confusion_matrix(expected, predicted))\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def get_lang_dirs(path): languages = [] for name in os.listdir(path): dir_path = os.path.join(path, name) if os.path.isdir(dir_path): cards_file = os.path.join(dir_path, 'cards_' + name + '.json') sets_file = os.path.join(dir_path, 'sets_' + name + '.json') if os.path.isfile(cards_file) and os.path.isfile(sets_file): languages.append(name) return languages def get_json_data(json_file_path): print('reading {}'.format(json_file_path)) with codecs.open(json_file_path, 'r', 'utf-8') as json_file: data = json.load(json_file) assert data, "Could not load json at: '%r' " % json_file_path return data def json_dict_entry(entry, separator=''): json_data = json.dumps(entry, indent=4, ensure_ascii=False, sort_keys=True) json_data = json_data.strip('{}').rstrip() return separator + json_data def multikeysort(items, columns): from operator import itemgetter for c in columns[::-1]: items = sorted(items, key=itemgetter(c)) return items def main(args): languages = get_lang_dirs(args.card_db_dir) languages.remove(LANGUAGE_DEFAULT) languages.insert(0, LANGUAGE_DEFAULT) if LANGUAGE_XX not in languages: languages.append(LANGUAGE_XX) print('Languages:') print(languages) print() if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) for lang in languages: lang_dir = os.path.join(args.output_dir, lang) if not os.path.exists(lang_dir): os.makedirs(lang_dir) type_parts = set() type_data = get_json_data(os.path.join(args.card_db_dir, 'types_db.json')) sorted_type_data = multikeysort(type_data, ['card_type']) with io.open(os.path.join(args.output_dir, 'types_db.json'), 'w', encoding='utf-8') as f: json.dump(sorted_type_data, f, indent=4, ensure_ascii=False) type_parts = list(set().union(*[set(t['card_type']) for t in sorted_type_data])) type_parts.sort() print('Unique Types:') print(type_parts) print() all_labels = [] label_data = get_json_data(os.path.join(args.card_db_dir, 'labels_db.json') ) all_labels = list(set().union(*[set(label['names']) for label in label_data])) with io.open(os.path.join(args.output_dir, 'labels_db.json'), 'w', encoding='utf-8') as f: json.dump(label_data, f, indent=4, ensure_ascii=False) all_labels.sort() print('Labels: ') print(all_labels) print() for lang in languages: lang_file = 'types_' + lang + '.json' fname = os.path.join(args.card_db_dir, lang, lang_file) if os.path.isfile(fname): lang_type_data = get_json_data(fname) else: lang_type_data = {} for t in sorted(type_parts): if t not in lang_type_data: if lang == LANGUAGE_DEFAULT: lang_type_data[t] = t lang_type_default = lang_type_data else: lang_type_data[t] = lang_type_default[t] with io.open(os.path.join(args.output_dir, lang, lang_file), 'w', encoding='utf-8') as f: json.dump(lang_type_data, f, indent=4, ensure_ascii=False) if lang == LANGUAGE_DEFAULT: lang_type_default = lang_type_data card_data = get_json_data(os.path.join(args.card_db_dir, 'cards_db.json')) cards = set(card['card_tag'] for card in card_data) groups = set(card['group_tag'] for card in card_data if 'group_tag' in card ) super_groups = set(['events', 'landmarks']) for card in card_data: card['cardset_tags'].sort() if 'base' in card['cardset_tags']: card['cardset_tags'].remove('base') card['cardset_tags'].insert(0, 'base') sorted_card_data = multikeysort(card_data, ['cardset_tags', 'card_tag']) with io.open(os.path.join(args.output_dir, 'cards_db.json'), 'w', encoding='utf-8') as lang_out: json.dump(sorted_card_data, lang_out, indent=4, ensure_ascii=False) cards = [c['card_tag'] for c in sorted_card_data] cards.extend(sorted(groups)) cards.extend(sorted(super_groups)) print('Cards:') print(cards) print() for lang in languages: lang_file = 'cards_' + lang + '.json' fname = os.path.join(args.card_db_dir, lang, lang_file) if os.path.isfile(fname): lang_data = get_json_data(fname) else: lang_data = {} sorted_lang_data = collections.OrderedDict() fields = ['description', 'extra', 'name'] for card_tag in cards: lang_card = lang_data.get(card_tag) if not lang_card or lang == LANGUAGE_XX: lang_card = {} if lang == LANGUAGE_DEFAULT: lang_card['extra'] = '' lang_card['name'] = card lang_card['description'] = '' lang_card['untranslated'] = fields lang_default = lang_data else: lang_card['extra'] = lang_default[card_tag]['extra'] lang_card['name'] = lang_default[card_tag]['name'] lang_card['description'] = lang_default[card_tag][ 'description'] lang_card['untranslated'] = fields elif lang != LANGUAGE_DEFAULT: if 'untranslated' in lang_card: if not lang_card['untranslated']: del lang_card['untranslated'] else: for field in fields: if field in lang_card['untranslated']: lang_card[field] = lang_default[card_tag][field ] else: untranslated = [] for field in fields: if field not in lang_data[card_tag]: lang_card[field] = lang_default[card_tag][field] untranslated.append(field) if untranslated: lang_card['untranslated'] = untranslated lang_card['used'] = True sorted_lang_data[card_tag] = lang_card unused = [c for c in lang_data.values() if 'used' not in c] print( f"unused in {lang}: {len(unused)}, used: {len([c for c in lang_data.values() if 'used' in c])}" ) print([c['name'] for c in unused]) for card_tag in lang_data: lang_card = lang_data.get(card_tag) if 'used' not in lang_card: if lang != LANGUAGE_XX: lang_card['untranslated'] = [ 'Note: This card is currently not used.'] sorted_lang_data[card_tag] = lang_card else: del lang_card['used'] with io.open(os.path.join(args.output_dir, lang, lang_file), 'w', encoding='utf-8') as lang_out: json.dump(sorted_lang_data, lang_out, indent=4, ensure_ascii=False) if lang == LANGUAGE_DEFAULT: lang_default = lang_data lang_file = 'sets_db.json' set_data = get_json_data(os.path.join(args.card_db_dir, lang_file)) with io.open(os.path.join(args.output_dir, lang_file), 'w', encoding= 'utf-8') as lang_out: json.dump(set_data, lang_out, sort_keys=True, indent=4, ensure_ascii=False) print('Sets:') print(set(set_data)) print() for lang in languages: lang_file = 'sets_' + lang + '.json' fname = os.path.join(args.card_db_dir, lang, lang_file) if os.path.isfile(fname): lang_set_data = get_json_data(fname) else: lang_set_data = {} for s in sorted(set_data): if s not in lang_set_data: lang_set_data[s] = {} if lang == LANGUAGE_DEFAULT: lang_set_data[s]['set_name'] = s.title() lang_set_data[s]['text_icon'] = set_data[s]['text_icon'] if 'short_name' in set_data[s]: lang_set_data[s]['short_name'] = set_data[s][ 'short_name'] if 'set_text' in set_data[s]: lang_set_data[s]['set_text'] = set_data[s]['set_text'] else: lang_set_data[s]['set_name'] = lang_default[s]['set_name'] lang_set_data[s]['text_icon'] = lang_default[s]['text_icon' ] if 'short_name' in lang_default[s]: lang_set_data[s]['short_name'] = lang_default[s][ 'short_name'] if 'set_text' in lang_default[s]: lang_set_data[s]['set_text'] = lang_default[s][ 'set_text'] elif lang != LANGUAGE_DEFAULT: for x in lang_default[s]: if x not in lang_set_data[s] and x != 'used': lang_set_data[s][x] = lang_default[s][x] if lang == LANGUAGE_DEFAULT: lang_default = lang_set_data with io.open(os.path.join(args.output_dir, lang, lang_file), 'w', encoding='utf-8') as lang_out: json.dump(lang_set_data, lang_out, ensure_ascii=False, indent=4) for lang in languages: fromLanguage = lang if lang == LANGUAGE_XX: fromLanguage = LANGUAGE_DEFAULT copyfile(os.path.join(args.card_db_dir, fromLanguage, 'bonuses_' + fromLanguage + '.json'), os.path.join(args.output_dir, lang, 'bonuses_' + lang + '.json')) copyfile(os.path.join(args.card_db_dir, 'translation.md'), os.path.join (args.output_dir, 'translation.md')) copyfile(os.path.join(args.card_db_dir, LANGUAGE_XX, 'translation.txt'), os.path.join(args.output_dir, LANGUAGE_XX, 'translation.txt')) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def get_lang_dirs(path): languages = [] for name in os.listdir(path): dir_path = os.path.join(path, name) if os.path.isdir(dir_path): cards_file = os.path.join(dir_path, 'cards_' + name + '.json') sets_file = os.path.join(dir_path, 'sets_' + name + '.json') if os.path.isfile(cards_file) and os.path.isfile(sets_file): languages.append(name) return languages def get_json_data(json_file_path): print('reading {}'.format(json_file_path)) with codecs.open(json_file_path, 'r', 'utf-8') as json_file: data = json.load(json_file) assert data, "Could not load json at: '%r' " % json_file_path return data def json_dict_entry(entry, separator=''): json_data = json.dumps(entry, indent=4, ensure_ascii=False, sort_keys=True) json_data = json_data.strip('{}').rstrip() return separator + json_data def multikeysort(items, columns): from operator import itemgetter for c in columns[::-1]: items = sorted(items, key=itemgetter(c)) return items def main(args): languages = get_lang_dirs(args.card_db_dir) languages.remove(LANGUAGE_DEFAULT) languages.insert(0, LANGUAGE_DEFAULT) if LANGUAGE_XX not in languages: languages.append(LANGUAGE_XX) print('Languages:') print(languages) print() if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) for lang in languages: lang_dir = os.path.join(args.output_dir, lang) if not os.path.exists(lang_dir): os.makedirs(lang_dir) type_parts = set() type_data = get_json_data(os.path.join(args.card_db_dir, 'types_db.json')) sorted_type_data = multikeysort(type_data, ['card_type']) with io.open(os.path.join(args.output_dir, 'types_db.json'), 'w', encoding='utf-8') as f: json.dump(sorted_type_data, f, indent=4, ensure_ascii=False) type_parts = list(set().union(*[set(t['card_type']) for t in sorted_type_data])) type_parts.sort() print('Unique Types:') print(type_parts) print() all_labels = [] label_data = get_json_data(os.path.join(args.card_db_dir, 'labels_db.json') ) all_labels = list(set().union(*[set(label['names']) for label in label_data])) with io.open(os.path.join(args.output_dir, 'labels_db.json'), 'w', encoding='utf-8') as f: json.dump(label_data, f, indent=4, ensure_ascii=False) all_labels.sort() print('Labels: ') print(all_labels) print() for lang in languages: lang_file = 'types_' + lang + '.json' fname = os.path.join(args.card_db_dir, lang, lang_file) if os.path.isfile(fname): lang_type_data = get_json_data(fname) else: lang_type_data = {} for t in sorted(type_parts): if t not in lang_type_data: if lang == LANGUAGE_DEFAULT: lang_type_data[t] = t lang_type_default = lang_type_data else: lang_type_data[t] = lang_type_default[t] with io.open(os.path.join(args.output_dir, lang, lang_file), 'w', encoding='utf-8') as f: json.dump(lang_type_data, f, indent=4, ensure_ascii=False) if lang == LANGUAGE_DEFAULT: lang_type_default = lang_type_data card_data = get_json_data(os.path.join(args.card_db_dir, 'cards_db.json')) cards = set(card['card_tag'] for card in card_data) groups = set(card['group_tag'] for card in card_data if 'group_tag' in card ) super_groups = set(['events', 'landmarks']) for card in card_data: card['cardset_tags'].sort() if 'base' in card['cardset_tags']: card['cardset_tags'].remove('base') card['cardset_tags'].insert(0, 'base') sorted_card_data = multikeysort(card_data, ['cardset_tags', 'card_tag']) with io.open(os.path.join(args.output_dir, 'cards_db.json'), 'w', encoding='utf-8') as lang_out: json.dump(sorted_card_data, lang_out, indent=4, ensure_ascii=False) cards = [c['card_tag'] for c in sorted_card_data] cards.extend(sorted(groups)) cards.extend(sorted(super_groups)) print('Cards:') print(cards) print() for lang in languages: lang_file = 'cards_' + lang + '.json' fname = os.path.join(args.card_db_dir, lang, lang_file) if os.path.isfile(fname): lang_data = get_json_data(fname) else: lang_data = {} sorted_lang_data = collections.OrderedDict() fields = ['description', 'extra', 'name'] for card_tag in cards: lang_card = lang_data.get(card_tag) if not lang_card or lang == LANGUAGE_XX: lang_card = {} if lang == LANGUAGE_DEFAULT: lang_card['extra'] = '' lang_card['name'] = card lang_card['description'] = '' lang_card['untranslated'] = fields lang_default = lang_data else: lang_card['extra'] = lang_default[card_tag]['extra'] lang_card['name'] = lang_default[card_tag]['name'] lang_card['description'] = lang_default[card_tag][ 'description'] lang_card['untranslated'] = fields elif lang != LANGUAGE_DEFAULT: if 'untranslated' in lang_card: if not lang_card['untranslated']: del lang_card['untranslated'] else: for field in fields: if field in lang_card['untranslated']: lang_card[field] = lang_default[card_tag][field ] else: untranslated = [] for field in fields: if field not in lang_data[card_tag]: lang_card[field] = lang_default[card_tag][field] untranslated.append(field) if untranslated: lang_card['untranslated'] = untranslated lang_card['used'] = True sorted_lang_data[card_tag] = lang_card unused = [c for c in lang_data.values() if 'used' not in c] print( f"unused in {lang}: {len(unused)}, used: {len([c for c in lang_data.values() if 'used' in c])}" ) print([c['name'] for c in unused]) for card_tag in lang_data: lang_card = lang_data.get(card_tag) if 'used' not in lang_card: if lang != LANGUAGE_XX: lang_card['untranslated'] = [ 'Note: This card is currently not used.'] sorted_lang_data[card_tag] = lang_card else: del lang_card['used'] with io.open(os.path.join(args.output_dir, lang, lang_file), 'w', encoding='utf-8') as lang_out: json.dump(sorted_lang_data, lang_out, indent=4, ensure_ascii=False) if lang == LANGUAGE_DEFAULT: lang_default = lang_data lang_file = 'sets_db.json' set_data = get_json_data(os.path.join(args.card_db_dir, lang_file)) with io.open(os.path.join(args.output_dir, lang_file), 'w', encoding= 'utf-8') as lang_out: json.dump(set_data, lang_out, sort_keys=True, indent=4, ensure_ascii=False) print('Sets:') print(set(set_data)) print() for lang in languages: lang_file = 'sets_' + lang + '.json' fname = os.path.join(args.card_db_dir, lang, lang_file) if os.path.isfile(fname): lang_set_data = get_json_data(fname) else: lang_set_data = {} for s in sorted(set_data): if s not in lang_set_data: lang_set_data[s] = {} if lang == LANGUAGE_DEFAULT: lang_set_data[s]['set_name'] = s.title() lang_set_data[s]['text_icon'] = set_data[s]['text_icon'] if 'short_name' in set_data[s]: lang_set_data[s]['short_name'] = set_data[s][ 'short_name'] if 'set_text' in set_data[s]: lang_set_data[s]['set_text'] = set_data[s]['set_text'] else: lang_set_data[s]['set_name'] = lang_default[s]['set_name'] lang_set_data[s]['text_icon'] = lang_default[s]['text_icon' ] if 'short_name' in lang_default[s]: lang_set_data[s]['short_name'] = lang_default[s][ 'short_name'] if 'set_text' in lang_default[s]: lang_set_data[s]['set_text'] = lang_default[s][ 'set_text'] elif lang != LANGUAGE_DEFAULT: for x in lang_default[s]: if x not in lang_set_data[s] and x != 'used': lang_set_data[s][x] = lang_default[s][x] if lang == LANGUAGE_DEFAULT: lang_default = lang_set_data with io.open(os.path.join(args.output_dir, lang, lang_file), 'w', encoding='utf-8') as lang_out: json.dump(lang_set_data, lang_out, ensure_ascii=False, indent=4) for lang in languages: fromLanguage = lang if lang == LANGUAGE_XX: fromLanguage = LANGUAGE_DEFAULT copyfile(os.path.join(args.card_db_dir, fromLanguage, 'bonuses_' + fromLanguage + '.json'), os.path.join(args.output_dir, lang, 'bonuses_' + lang + '.json')) copyfile(os.path.join(args.card_db_dir, 'translation.md'), os.path.join (args.output_dir, 'translation.md')) copyfile(os.path.join(args.card_db_dir, LANGUAGE_XX, 'translation.txt'), os.path.join(args.output_dir, LANGUAGE_XX, 'translation.txt')) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--card_db_dir', default=os.path.join(os.path. dirname(os.path.abspath(__file__)), '..', 'src', 'domdiv', 'card_db'), help='directory of card data') parser.add_argument('--output_dir', default=os.path.join(os.path. dirname(os.path.abspath(__file__)), '.', 'card_db'), help= 'directory for output data') args = parser.parse_args() main(args) <|reserved_special_token_1|> <|reserved_special_token_0|> LANGUAGE_DEFAULT = 'en_us' LANGUAGE_XX = 'xx' def get_lang_dirs(path): languages = [] for name in os.listdir(path): dir_path = os.path.join(path, name) if os.path.isdir(dir_path): cards_file = os.path.join(dir_path, 'cards_' + name + '.json') sets_file = os.path.join(dir_path, 'sets_' + name + '.json') if os.path.isfile(cards_file) and os.path.isfile(sets_file): languages.append(name) return languages def get_json_data(json_file_path): print('reading {}'.format(json_file_path)) with codecs.open(json_file_path, 'r', 'utf-8') as json_file: data = json.load(json_file) assert data, "Could not load json at: '%r' " % json_file_path return data def json_dict_entry(entry, separator=''): json_data = json.dumps(entry, indent=4, ensure_ascii=False, sort_keys=True) json_data = json_data.strip('{}').rstrip() return separator + json_data def multikeysort(items, columns): from operator import itemgetter for c in columns[::-1]: items = sorted(items, key=itemgetter(c)) return items def main(args): languages = get_lang_dirs(args.card_db_dir) languages.remove(LANGUAGE_DEFAULT) languages.insert(0, LANGUAGE_DEFAULT) if LANGUAGE_XX not in languages: languages.append(LANGUAGE_XX) print('Languages:') print(languages) print() if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) for lang in languages: lang_dir = os.path.join(args.output_dir, lang) if not os.path.exists(lang_dir): os.makedirs(lang_dir) type_parts = set() type_data = get_json_data(os.path.join(args.card_db_dir, 'types_db.json')) sorted_type_data = multikeysort(type_data, ['card_type']) with io.open(os.path.join(args.output_dir, 'types_db.json'), 'w', encoding='utf-8') as f: json.dump(sorted_type_data, f, indent=4, ensure_ascii=False) type_parts = list(set().union(*[set(t['card_type']) for t in sorted_type_data])) type_parts.sort() print('Unique Types:') print(type_parts) print() all_labels = [] label_data = get_json_data(os.path.join(args.card_db_dir, 'labels_db.json') ) all_labels = list(set().union(*[set(label['names']) for label in label_data])) with io.open(os.path.join(args.output_dir, 'labels_db.json'), 'w', encoding='utf-8') as f: json.dump(label_data, f, indent=4, ensure_ascii=False) all_labels.sort() print('Labels: ') print(all_labels) print() for lang in languages: lang_file = 'types_' + lang + '.json' fname = os.path.join(args.card_db_dir, lang, lang_file) if os.path.isfile(fname): lang_type_data = get_json_data(fname) else: lang_type_data = {} for t in sorted(type_parts): if t not in lang_type_data: if lang == LANGUAGE_DEFAULT: lang_type_data[t] = t lang_type_default = lang_type_data else: lang_type_data[t] = lang_type_default[t] with io.open(os.path.join(args.output_dir, lang, lang_file), 'w', encoding='utf-8') as f: json.dump(lang_type_data, f, indent=4, ensure_ascii=False) if lang == LANGUAGE_DEFAULT: lang_type_default = lang_type_data card_data = get_json_data(os.path.join(args.card_db_dir, 'cards_db.json')) cards = set(card['card_tag'] for card in card_data) groups = set(card['group_tag'] for card in card_data if 'group_tag' in card ) super_groups = set(['events', 'landmarks']) for card in card_data: card['cardset_tags'].sort() if 'base' in card['cardset_tags']: card['cardset_tags'].remove('base') card['cardset_tags'].insert(0, 'base') sorted_card_data = multikeysort(card_data, ['cardset_tags', 'card_tag']) with io.open(os.path.join(args.output_dir, 'cards_db.json'), 'w', encoding='utf-8') as lang_out: json.dump(sorted_card_data, lang_out, indent=4, ensure_ascii=False) cards = [c['card_tag'] for c in sorted_card_data] cards.extend(sorted(groups)) cards.extend(sorted(super_groups)) print('Cards:') print(cards) print() for lang in languages: lang_file = 'cards_' + lang + '.json' fname = os.path.join(args.card_db_dir, lang, lang_file) if os.path.isfile(fname): lang_data = get_json_data(fname) else: lang_data = {} sorted_lang_data = collections.OrderedDict() fields = ['description', 'extra', 'name'] for card_tag in cards: lang_card = lang_data.get(card_tag) if not lang_card or lang == LANGUAGE_XX: lang_card = {} if lang == LANGUAGE_DEFAULT: lang_card['extra'] = '' lang_card['name'] = card lang_card['description'] = '' lang_card['untranslated'] = fields lang_default = lang_data else: lang_card['extra'] = lang_default[card_tag]['extra'] lang_card['name'] = lang_default[card_tag]['name'] lang_card['description'] = lang_default[card_tag][ 'description'] lang_card['untranslated'] = fields elif lang != LANGUAGE_DEFAULT: if 'untranslated' in lang_card: if not lang_card['untranslated']: del lang_card['untranslated'] else: for field in fields: if field in lang_card['untranslated']: lang_card[field] = lang_default[card_tag][field ] else: untranslated = [] for field in fields: if field not in lang_data[card_tag]: lang_card[field] = lang_default[card_tag][field] untranslated.append(field) if untranslated: lang_card['untranslated'] = untranslated lang_card['used'] = True sorted_lang_data[card_tag] = lang_card unused = [c for c in lang_data.values() if 'used' not in c] print( f"unused in {lang}: {len(unused)}, used: {len([c for c in lang_data.values() if 'used' in c])}" ) print([c['name'] for c in unused]) for card_tag in lang_data: lang_card = lang_data.get(card_tag) if 'used' not in lang_card: if lang != LANGUAGE_XX: lang_card['untranslated'] = [ 'Note: This card is currently not used.'] sorted_lang_data[card_tag] = lang_card else: del lang_card['used'] with io.open(os.path.join(args.output_dir, lang, lang_file), 'w', encoding='utf-8') as lang_out: json.dump(sorted_lang_data, lang_out, indent=4, ensure_ascii=False) if lang == LANGUAGE_DEFAULT: lang_default = lang_data lang_file = 'sets_db.json' set_data = get_json_data(os.path.join(args.card_db_dir, lang_file)) with io.open(os.path.join(args.output_dir, lang_file), 'w', encoding= 'utf-8') as lang_out: json.dump(set_data, lang_out, sort_keys=True, indent=4, ensure_ascii=False) print('Sets:') print(set(set_data)) print() for lang in languages: lang_file = 'sets_' + lang + '.json' fname = os.path.join(args.card_db_dir, lang, lang_file) if os.path.isfile(fname): lang_set_data = get_json_data(fname) else: lang_set_data = {} for s in sorted(set_data): if s not in lang_set_data: lang_set_data[s] = {} if lang == LANGUAGE_DEFAULT: lang_set_data[s]['set_name'] = s.title() lang_set_data[s]['text_icon'] = set_data[s]['text_icon'] if 'short_name' in set_data[s]: lang_set_data[s]['short_name'] = set_data[s][ 'short_name'] if 'set_text' in set_data[s]: lang_set_data[s]['set_text'] = set_data[s]['set_text'] else: lang_set_data[s]['set_name'] = lang_default[s]['set_name'] lang_set_data[s]['text_icon'] = lang_default[s]['text_icon' ] if 'short_name' in lang_default[s]: lang_set_data[s]['short_name'] = lang_default[s][ 'short_name'] if 'set_text' in lang_default[s]: lang_set_data[s]['set_text'] = lang_default[s][ 'set_text'] elif lang != LANGUAGE_DEFAULT: for x in lang_default[s]: if x not in lang_set_data[s] and x != 'used': lang_set_data[s][x] = lang_default[s][x] if lang == LANGUAGE_DEFAULT: lang_default = lang_set_data with io.open(os.path.join(args.output_dir, lang, lang_file), 'w', encoding='utf-8') as lang_out: json.dump(lang_set_data, lang_out, ensure_ascii=False, indent=4) for lang in languages: fromLanguage = lang if lang == LANGUAGE_XX: fromLanguage = LANGUAGE_DEFAULT copyfile(os.path.join(args.card_db_dir, fromLanguage, 'bonuses_' + fromLanguage + '.json'), os.path.join(args.output_dir, lang, 'bonuses_' + lang + '.json')) copyfile(os.path.join(args.card_db_dir, 'translation.md'), os.path.join (args.output_dir, 'translation.md')) copyfile(os.path.join(args.card_db_dir, LANGUAGE_XX, 'translation.txt'), os.path.join(args.output_dir, LANGUAGE_XX, 'translation.txt')) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--card_db_dir', default=os.path.join(os.path. dirname(os.path.abspath(__file__)), '..', 'src', 'domdiv', 'card_db'), help='directory of card data') parser.add_argument('--output_dir', default=os.path.join(os.path. dirname(os.path.abspath(__file__)), '.', 'card_db'), help= 'directory for output data') args = parser.parse_args() main(args) <|reserved_special_token_1|> import os import os.path import io import codecs import json from shutil import copyfile import argparse import collections LANGUAGE_DEFAULT = 'en_us' LANGUAGE_XX = 'xx' def get_lang_dirs(path): languages = [] for name in os.listdir(path): dir_path = os.path.join(path, name) if os.path.isdir(dir_path): cards_file = os.path.join(dir_path, 'cards_' + name + '.json') sets_file = os.path.join(dir_path, 'sets_' + name + '.json') if os.path.isfile(cards_file) and os.path.isfile(sets_file): languages.append(name) return languages def get_json_data(json_file_path): print('reading {}'.format(json_file_path)) with codecs.open(json_file_path, 'r', 'utf-8') as json_file: data = json.load(json_file) assert data, "Could not load json at: '%r' " % json_file_path return data def json_dict_entry(entry, separator=''): json_data = json.dumps(entry, indent=4, ensure_ascii=False, sort_keys=True) json_data = json_data.strip('{}').rstrip() return separator + json_data def multikeysort(items, columns): from operator import itemgetter for c in columns[::-1]: items = sorted(items, key=itemgetter(c)) return items def main(args): languages = get_lang_dirs(args.card_db_dir) languages.remove(LANGUAGE_DEFAULT) languages.insert(0, LANGUAGE_DEFAULT) if LANGUAGE_XX not in languages: languages.append(LANGUAGE_XX) print('Languages:') print(languages) print() if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) for lang in languages: lang_dir = os.path.join(args.output_dir, lang) if not os.path.exists(lang_dir): os.makedirs(lang_dir) type_parts = set() type_data = get_json_data(os.path.join(args.card_db_dir, 'types_db.json')) sorted_type_data = multikeysort(type_data, ['card_type']) with io.open(os.path.join(args.output_dir, 'types_db.json'), 'w', encoding='utf-8') as f: json.dump(sorted_type_data, f, indent=4, ensure_ascii=False) type_parts = list(set().union(*[set(t['card_type']) for t in sorted_type_data])) type_parts.sort() print('Unique Types:') print(type_parts) print() all_labels = [] label_data = get_json_data(os.path.join(args.card_db_dir, 'labels_db.json') ) all_labels = list(set().union(*[set(label['names']) for label in label_data])) with io.open(os.path.join(args.output_dir, 'labels_db.json'), 'w', encoding='utf-8') as f: json.dump(label_data, f, indent=4, ensure_ascii=False) all_labels.sort() print('Labels: ') print(all_labels) print() for lang in languages: lang_file = 'types_' + lang + '.json' fname = os.path.join(args.card_db_dir, lang, lang_file) if os.path.isfile(fname): lang_type_data = get_json_data(fname) else: lang_type_data = {} for t in sorted(type_parts): if t not in lang_type_data: if lang == LANGUAGE_DEFAULT: lang_type_data[t] = t lang_type_default = lang_type_data else: lang_type_data[t] = lang_type_default[t] with io.open(os.path.join(args.output_dir, lang, lang_file), 'w', encoding='utf-8') as f: json.dump(lang_type_data, f, indent=4, ensure_ascii=False) if lang == LANGUAGE_DEFAULT: lang_type_default = lang_type_data card_data = get_json_data(os.path.join(args.card_db_dir, 'cards_db.json')) cards = set(card['card_tag'] for card in card_data) groups = set(card['group_tag'] for card in card_data if 'group_tag' in card ) super_groups = set(['events', 'landmarks']) for card in card_data: card['cardset_tags'].sort() if 'base' in card['cardset_tags']: card['cardset_tags'].remove('base') card['cardset_tags'].insert(0, 'base') sorted_card_data = multikeysort(card_data, ['cardset_tags', 'card_tag']) with io.open(os.path.join(args.output_dir, 'cards_db.json'), 'w', encoding='utf-8') as lang_out: json.dump(sorted_card_data, lang_out, indent=4, ensure_ascii=False) cards = [c['card_tag'] for c in sorted_card_data] cards.extend(sorted(groups)) cards.extend(sorted(super_groups)) print('Cards:') print(cards) print() for lang in languages: lang_file = 'cards_' + lang + '.json' fname = os.path.join(args.card_db_dir, lang, lang_file) if os.path.isfile(fname): lang_data = get_json_data(fname) else: lang_data = {} sorted_lang_data = collections.OrderedDict() fields = ['description', 'extra', 'name'] for card_tag in cards: lang_card = lang_data.get(card_tag) if not lang_card or lang == LANGUAGE_XX: lang_card = {} if lang == LANGUAGE_DEFAULT: lang_card['extra'] = '' lang_card['name'] = card lang_card['description'] = '' lang_card['untranslated'] = fields lang_default = lang_data else: lang_card['extra'] = lang_default[card_tag]['extra'] lang_card['name'] = lang_default[card_tag]['name'] lang_card['description'] = lang_default[card_tag][ 'description'] lang_card['untranslated'] = fields elif lang != LANGUAGE_DEFAULT: if 'untranslated' in lang_card: if not lang_card['untranslated']: del lang_card['untranslated'] else: for field in fields: if field in lang_card['untranslated']: lang_card[field] = lang_default[card_tag][field ] else: untranslated = [] for field in fields: if field not in lang_data[card_tag]: lang_card[field] = lang_default[card_tag][field] untranslated.append(field) if untranslated: lang_card['untranslated'] = untranslated lang_card['used'] = True sorted_lang_data[card_tag] = lang_card unused = [c for c in lang_data.values() if 'used' not in c] print( f"unused in {lang}: {len(unused)}, used: {len([c for c in lang_data.values() if 'used' in c])}" ) print([c['name'] for c in unused]) for card_tag in lang_data: lang_card = lang_data.get(card_tag) if 'used' not in lang_card: if lang != LANGUAGE_XX: lang_card['untranslated'] = [ 'Note: This card is currently not used.'] sorted_lang_data[card_tag] = lang_card else: del lang_card['used'] with io.open(os.path.join(args.output_dir, lang, lang_file), 'w', encoding='utf-8') as lang_out: json.dump(sorted_lang_data, lang_out, indent=4, ensure_ascii=False) if lang == LANGUAGE_DEFAULT: lang_default = lang_data lang_file = 'sets_db.json' set_data = get_json_data(os.path.join(args.card_db_dir, lang_file)) with io.open(os.path.join(args.output_dir, lang_file), 'w', encoding= 'utf-8') as lang_out: json.dump(set_data, lang_out, sort_keys=True, indent=4, ensure_ascii=False) print('Sets:') print(set(set_data)) print() for lang in languages: lang_file = 'sets_' + lang + '.json' fname = os.path.join(args.card_db_dir, lang, lang_file) if os.path.isfile(fname): lang_set_data = get_json_data(fname) else: lang_set_data = {} for s in sorted(set_data): if s not in lang_set_data: lang_set_data[s] = {} if lang == LANGUAGE_DEFAULT: lang_set_data[s]['set_name'] = s.title() lang_set_data[s]['text_icon'] = set_data[s]['text_icon'] if 'short_name' in set_data[s]: lang_set_data[s]['short_name'] = set_data[s][ 'short_name'] if 'set_text' in set_data[s]: lang_set_data[s]['set_text'] = set_data[s]['set_text'] else: lang_set_data[s]['set_name'] = lang_default[s]['set_name'] lang_set_data[s]['text_icon'] = lang_default[s]['text_icon' ] if 'short_name' in lang_default[s]: lang_set_data[s]['short_name'] = lang_default[s][ 'short_name'] if 'set_text' in lang_default[s]: lang_set_data[s]['set_text'] = lang_default[s][ 'set_text'] elif lang != LANGUAGE_DEFAULT: for x in lang_default[s]: if x not in lang_set_data[s] and x != 'used': lang_set_data[s][x] = lang_default[s][x] if lang == LANGUAGE_DEFAULT: lang_default = lang_set_data with io.open(os.path.join(args.output_dir, lang, lang_file), 'w', encoding='utf-8') as lang_out: json.dump(lang_set_data, lang_out, ensure_ascii=False, indent=4) for lang in languages: fromLanguage = lang if lang == LANGUAGE_XX: fromLanguage = LANGUAGE_DEFAULT copyfile(os.path.join(args.card_db_dir, fromLanguage, 'bonuses_' + fromLanguage + '.json'), os.path.join(args.output_dir, lang, 'bonuses_' + lang + '.json')) copyfile(os.path.join(args.card_db_dir, 'translation.md'), os.path.join (args.output_dir, 'translation.md')) copyfile(os.path.join(args.card_db_dir, LANGUAGE_XX, 'translation.txt'), os.path.join(args.output_dir, LANGUAGE_XX, 'translation.txt')) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--card_db_dir', default=os.path.join(os.path. dirname(os.path.abspath(__file__)), '..', 'src', 'domdiv', 'card_db'), help='directory of card data') parser.add_argument('--output_dir', default=os.path.join(os.path. dirname(os.path.abspath(__file__)), '.', 'card_db'), help= 'directory for output data') args = parser.parse_args() main(args) <|reserved_special_token_1|> ########################################################################### # This file provides maintenance on the various language files # 1. Create new "xx/cards_xx.json" files that have entries ordered as: # a. the card_tag entries in "cards_db.json" # b. the group_tag entries as found in "cards_db.json" # c. the super group entries (grouping across all expansions" # d. any unused entries existing in the file (assumed to be work in progress) # # 2. Create new "sets_db.json" and "xx/cards_xx.json" with entries sorted alphabetically # # All output is in the designated output directory. Original files are not overwritten. ########################################################################### import os import os.path import io import codecs import json from shutil import copyfile import argparse import collections LANGUAGE_DEFAULT = "en_us" # default language, which takes priority LANGUAGE_XX = "xx" # language for starting a translation def get_lang_dirs(path): # Find all valid languages. languages = [] for name in os.listdir(path): dir_path = os.path.join(path, name) if os.path.isdir(dir_path): cards_file = os.path.join(dir_path, "cards_" + name + ".json") sets_file = os.path.join(dir_path, "sets_" + name + ".json") if os.path.isfile(cards_file) and os.path.isfile(sets_file): languages.append(name) return languages def get_json_data(json_file_path): print(("reading {}".format(json_file_path))) # Read in the json from the specified file with codecs.open(json_file_path, "r", "utf-8") as json_file: data = json.load(json_file) assert data, "Could not load json at: '%r' " % json_file_path return data def json_dict_entry(entry, separator=""): # Return a nicely formated json dict entry. # It does not include the enclosing {} and removes trailing white space json_data = json.dumps(entry, indent=4, ensure_ascii=False, sort_keys=True) json_data = json_data.strip( "{}" ).rstrip() # Remove outer{} and then trailing whitespace return separator + json_data # Multikey sort # see: http://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys def multikeysort(items, columns): from operator import itemgetter for c in columns[::-1]: items = sorted(items, key=itemgetter(c)) return items def main(args): ########################################################################### # Get all the languages, and place the default language first in the list ########################################################################### languages = get_lang_dirs(args.card_db_dir) languages.remove(LANGUAGE_DEFAULT) languages.insert(0, LANGUAGE_DEFAULT) if LANGUAGE_XX not in languages: languages.append(LANGUAGE_XX) print("Languages:") print(languages) print() ########################################################################### # Make sure the directories exist to hold the output ########################################################################### # main output directory if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) # each language directory for lang in languages: # Make sure the directory is there to hold the file lang_dir = os.path.join(args.output_dir, lang) if not os.path.exists(lang_dir): os.makedirs(lang_dir) ########################################################################### # Get the types_db information # Store in a list in the order found in types[]. Ordered by card_type # 1. card_tags, 2. group_tags, 3. super groups ########################################################################### type_parts = set() # Get the card data type_data = get_json_data(os.path.join(args.card_db_dir, "types_db.json")) # Sort the cards by cardset_tags, then card_tag sorted_type_data = multikeysort(type_data, ["card_type"]) with io.open( os.path.join(args.output_dir, "types_db.json"), "w", encoding="utf-8" ) as f: json.dump(sorted_type_data, f, indent=4, ensure_ascii=False) type_parts = list(set().union(*[set(t["card_type"]) for t in sorted_type_data])) type_parts.sort() print("Unique Types:") print(type_parts) print() ########################################################################### # Get the labels_db information # Store in a list in the order found. ########################################################################### all_labels = [] # Get the card data label_data = get_json_data(os.path.join(args.card_db_dir, "labels_db.json")) all_labels = list(set().union(*[set(label["names"]) for label in label_data])) with io.open( os.path.join(args.output_dir, "labels_db.json"), "w", encoding="utf-8" ) as f: json.dump(label_data, f, indent=4, ensure_ascii=False) all_labels.sort() print("Labels: ") print(all_labels) print() ########################################################################### # Fix up all the xx/types_xx.json files # Place entries in alphabetical order # If entries don't exist: # If the default language, set from information in the "types_db.json" file, # If not the default language, set based on information from the default language. # Lastly, keep any extra entries that are not currently used, just in case needed # in the future or is a work in progress. ########################################################################### for lang in languages: lang_file = "types_" + lang + ".json" fname = os.path.join(args.card_db_dir, lang, lang_file) if os.path.isfile(fname): lang_type_data = get_json_data(fname) else: lang_type_data = {} for t in sorted(type_parts): if t not in lang_type_data: if lang == LANGUAGE_DEFAULT: lang_type_data[t] = t lang_type_default = lang_type_data else: lang_type_data[t] = lang_type_default[t] with io.open( os.path.join(args.output_dir, lang, lang_file), "w", encoding="utf-8" ) as f: json.dump(lang_type_data, f, indent=4, ensure_ascii=False) if lang == LANGUAGE_DEFAULT: lang_type_default = lang_type_data # Keep for later languages ########################################################################### # Get the cards_db information # Store in a list in the order found in cards[]. Ordered as follows: # 1. card_tags, 2. group_tags, 3. super groups ########################################################################### # Get the card data card_data = get_json_data(os.path.join(args.card_db_dir, "cards_db.json")) cards = set(card["card_tag"] for card in card_data) groups = set(card["group_tag"] for card in card_data if "group_tag" in card) super_groups = set(["events", "landmarks"]) # Sort the cardset_tags for card in card_data: card["cardset_tags"].sort() # But put all the base cards together by moving to front of the list if "base" in card["cardset_tags"]: card["cardset_tags"].remove("base") card["cardset_tags"].insert(0, "base") # Sort the cards by cardset_tags, then card_tag sorted_card_data = multikeysort(card_data, ["cardset_tags", "card_tag"]) with io.open( os.path.join(args.output_dir, "cards_db.json"), "w", encoding="utf-8" ) as lang_out: json.dump(sorted_card_data, lang_out, indent=4, ensure_ascii=False) # maintain the sorted order, but expand with groups and super_groups cards = [c["card_tag"] for c in sorted_card_data] cards.extend(sorted(groups)) cards.extend(sorted(super_groups)) print("Cards:") print(cards) print() ########################################################################### # Fix up all the cards_xx.json files # Place entries in the same order as given in "cards_db.json". # If entries don't exist: # If the default language, set base on information in the "cards_db.json" file, # If not the default language, set based on information from the default language. # Lastly, keep any extra entries that are not currently used, just in case needed # in the future or is a work in progress. ########################################################################### for lang in languages: # contruct the cards json file name lang_file = "cards_" + lang + ".json" fname = os.path.join(args.card_db_dir, lang, lang_file) if os.path.isfile(fname): lang_data = get_json_data(fname) else: lang_data = {} sorted_lang_data = collections.OrderedDict() fields = ["description", "extra", "name"] for card_tag in cards: lang_card = lang_data.get(card_tag) # print(f'looking at {card_tag}: {lang_card}') if not lang_card or lang == LANGUAGE_XX: # Card is missing, need to add it lang_card = {} if lang == LANGUAGE_DEFAULT: # Default language gets bare minimum. Really need to add by hand. lang_card["extra"] = "" lang_card["name"] = card lang_card["description"] = "" lang_card["untranslated"] = fields lang_default = lang_data else: # All other languages should get the default languages' text lang_card["extra"] = lang_default[card_tag]["extra"] lang_card["name"] = lang_default[card_tag]["name"] lang_card["description"] = lang_default[card_tag]["description"] lang_card["untranslated"] = fields else: # Card exists, figure out what needs updating (don't update default language) if lang != LANGUAGE_DEFAULT: if "untranslated" in lang_card: # Has an 'untranslated' field. Process accordingly if not lang_card["untranslated"]: # It is empty, so just remove it del lang_card["untranslated"] else: # If a field remains untranslated, then replace with the default languages copy for field in fields: if field in lang_card["untranslated"]: lang_card[field] = lang_default[card_tag][field] else: # Need to create the 'untranslated' field and update based upon existing fields untranslated = [] for field in fields: if field not in lang_data[card_tag]: lang_card[field] = lang_default[card_tag][field] untranslated.append(field) if untranslated: # only add if something is still needing translation lang_card["untranslated"] = untranslated lang_card["used"] = True sorted_lang_data[card_tag] = lang_card unused = [c for c in lang_data.values() if "used" not in c] print( f'unused in {lang}: {len(unused)}, used: {len([c for c in lang_data.values() if "used" in c])}' ) print([c["name"] for c in unused]) # Now keep any unused values just in case needed in the future for card_tag in lang_data: lang_card = lang_data.get(card_tag) if "used" not in lang_card: if lang != LANGUAGE_XX: lang_card["untranslated"] = [ "Note: This card is currently not used." ] sorted_lang_data[card_tag] = lang_card else: del lang_card["used"] # Process the file with io.open( os.path.join(args.output_dir, lang, lang_file), "w", encoding="utf-8" ) as lang_out: json.dump(sorted_lang_data, lang_out, indent=4, ensure_ascii=False) if lang == LANGUAGE_DEFAULT: lang_default = lang_data # Keep for later languages ########################################################################### # Fix up the sets_db.json file # Place entries in alphabetical order ########################################################################### lang_file = "sets_db.json" set_data = get_json_data(os.path.join(args.card_db_dir, lang_file)) with io.open( os.path.join(args.output_dir, lang_file), "w", encoding="utf-8" ) as lang_out: json.dump(set_data, lang_out, sort_keys=True, indent=4, ensure_ascii=False) print("Sets:") print(set(set_data)) print() ########################################################################### # Fix up all the xx/sets_xx.json files # Place entries in alphabetical order # If entries don't exist: # If the default language, set from information in the "sets_db.json" file, # If not the default language, set based on information from the default language. ########################################################################### for lang in languages: lang_file = "sets_" + lang + ".json" fname = os.path.join(args.card_db_dir, lang, lang_file) if os.path.isfile(fname): lang_set_data = get_json_data(fname) else: lang_set_data = {} for s in sorted(set_data): if s not in lang_set_data: lang_set_data[s] = {} if lang == LANGUAGE_DEFAULT: lang_set_data[s]["set_name"] = s.title() lang_set_data[s]["text_icon"] = set_data[s]["text_icon"] if "short_name" in set_data[s]: lang_set_data[s]["short_name"] = set_data[s]["short_name"] if "set_text" in set_data[s]: lang_set_data[s]["set_text"] = set_data[s]["set_text"] else: lang_set_data[s]["set_name"] = lang_default[s]["set_name"] lang_set_data[s]["text_icon"] = lang_default[s]["text_icon"] if "short_name" in lang_default[s]: lang_set_data[s]["short_name"] = lang_default[s]["short_name"] if "set_text" in lang_default[s]: lang_set_data[s]["set_text"] = lang_default[s]["set_text"] else: if lang != LANGUAGE_DEFAULT: for x in lang_default[s]: if x not in lang_set_data[s] and x != "used": lang_set_data[s][x] = lang_default[s][x] if lang == LANGUAGE_DEFAULT: lang_default = lang_set_data # Keep for later languages with io.open( os.path.join(args.output_dir, lang, lang_file), "w", encoding="utf-8" ) as lang_out: json.dump(lang_set_data, lang_out, ensure_ascii=False, indent=4) ########################################################################### # bonuses_xx files ########################################################################### for lang in languages: # Special case for xx. Reseed from default language fromLanguage = lang if lang == LANGUAGE_XX: fromLanguage = LANGUAGE_DEFAULT copyfile( os.path.join( args.card_db_dir, fromLanguage, "bonuses_" + fromLanguage + ".json" ), os.path.join(args.output_dir, lang, "bonuses_" + lang + ".json"), ) ########################################################################### # translation.txt ########################################################################### copyfile( os.path.join(args.card_db_dir, "translation.md"), os.path.join(args.output_dir, "translation.md"), ) # Since xx is the starting point for new translations, # make sure xx has the latest copy of translation.txt copyfile( os.path.join(args.card_db_dir, LANGUAGE_XX, "translation.txt"), os.path.join(args.output_dir, LANGUAGE_XX, "translation.txt"), ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--card_db_dir", default=os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "src", "domdiv", "card_db" ), help="directory of card data", ) parser.add_argument( "--output_dir", default=os.path.join( os.path.dirname(os.path.abspath(__file__)), ".", "card_db" ), help="directory for output data", ) args = parser.parse_args() main(args)
flexible
{ "blob_id": "cc1b3c3c65e8832316f72cbf48737b21ee4a7799", "index": 3887, "step-1": "<mask token>\n\n\ndef get_lang_dirs(path):\n languages = []\n for name in os.listdir(path):\n dir_path = os.path.join(path, name)\n if os.path.isdir(dir_path):\n cards_file = os.path.join(dir_path, 'cards_' + name + '.json')\n sets_file = os.path.join(dir_path, 'sets_' + name + '.json')\n if os.path.isfile(cards_file) and os.path.isfile(sets_file):\n languages.append(name)\n return languages\n\n\ndef get_json_data(json_file_path):\n print('reading {}'.format(json_file_path))\n with codecs.open(json_file_path, 'r', 'utf-8') as json_file:\n data = json.load(json_file)\n assert data, \"Could not load json at: '%r' \" % json_file_path\n return data\n\n\ndef json_dict_entry(entry, separator=''):\n json_data = json.dumps(entry, indent=4, ensure_ascii=False, sort_keys=True)\n json_data = json_data.strip('{}').rstrip()\n return separator + json_data\n\n\ndef multikeysort(items, columns):\n from operator import itemgetter\n for c in columns[::-1]:\n items = sorted(items, key=itemgetter(c))\n return items\n\n\ndef main(args):\n languages = get_lang_dirs(args.card_db_dir)\n languages.remove(LANGUAGE_DEFAULT)\n languages.insert(0, LANGUAGE_DEFAULT)\n if LANGUAGE_XX not in languages:\n languages.append(LANGUAGE_XX)\n print('Languages:')\n print(languages)\n print()\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n for lang in languages:\n lang_dir = os.path.join(args.output_dir, lang)\n if not os.path.exists(lang_dir):\n os.makedirs(lang_dir)\n type_parts = set()\n type_data = get_json_data(os.path.join(args.card_db_dir, 'types_db.json'))\n sorted_type_data = multikeysort(type_data, ['card_type'])\n with io.open(os.path.join(args.output_dir, 'types_db.json'), 'w',\n encoding='utf-8') as f:\n json.dump(sorted_type_data, f, indent=4, ensure_ascii=False)\n type_parts = list(set().union(*[set(t['card_type']) for t in\n sorted_type_data]))\n type_parts.sort()\n print('Unique Types:')\n print(type_parts)\n print()\n all_labels = []\n label_data = get_json_data(os.path.join(args.card_db_dir, 'labels_db.json')\n )\n all_labels = list(set().union(*[set(label['names']) for label in\n label_data]))\n with io.open(os.path.join(args.output_dir, 'labels_db.json'), 'w',\n encoding='utf-8') as f:\n json.dump(label_data, f, indent=4, ensure_ascii=False)\n all_labels.sort()\n print('Labels: ')\n print(all_labels)\n print()\n for lang in languages:\n lang_file = 'types_' + lang + '.json'\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_type_data = get_json_data(fname)\n else:\n lang_type_data = {}\n for t in sorted(type_parts):\n if t not in lang_type_data:\n if lang == LANGUAGE_DEFAULT:\n lang_type_data[t] = t\n lang_type_default = lang_type_data\n else:\n lang_type_data[t] = lang_type_default[t]\n with io.open(os.path.join(args.output_dir, lang, lang_file), 'w',\n encoding='utf-8') as f:\n json.dump(lang_type_data, f, indent=4, ensure_ascii=False)\n if lang == LANGUAGE_DEFAULT:\n lang_type_default = lang_type_data\n card_data = get_json_data(os.path.join(args.card_db_dir, 'cards_db.json'))\n cards = set(card['card_tag'] for card in card_data)\n groups = set(card['group_tag'] for card in card_data if 'group_tag' in card\n )\n super_groups = set(['events', 'landmarks'])\n for card in card_data:\n card['cardset_tags'].sort()\n if 'base' in card['cardset_tags']:\n card['cardset_tags'].remove('base')\n card['cardset_tags'].insert(0, 'base')\n sorted_card_data = multikeysort(card_data, ['cardset_tags', 'card_tag'])\n with io.open(os.path.join(args.output_dir, 'cards_db.json'), 'w',\n encoding='utf-8') as lang_out:\n json.dump(sorted_card_data, lang_out, indent=4, ensure_ascii=False)\n cards = [c['card_tag'] for c in sorted_card_data]\n cards.extend(sorted(groups))\n cards.extend(sorted(super_groups))\n print('Cards:')\n print(cards)\n print()\n for lang in languages:\n lang_file = 'cards_' + lang + '.json'\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_data = get_json_data(fname)\n else:\n lang_data = {}\n sorted_lang_data = collections.OrderedDict()\n fields = ['description', 'extra', 'name']\n for card_tag in cards:\n lang_card = lang_data.get(card_tag)\n if not lang_card or lang == LANGUAGE_XX:\n lang_card = {}\n if lang == LANGUAGE_DEFAULT:\n lang_card['extra'] = ''\n lang_card['name'] = card\n lang_card['description'] = ''\n lang_card['untranslated'] = fields\n lang_default = lang_data\n else:\n lang_card['extra'] = lang_default[card_tag]['extra']\n lang_card['name'] = lang_default[card_tag]['name']\n lang_card['description'] = lang_default[card_tag][\n 'description']\n lang_card['untranslated'] = fields\n elif lang != LANGUAGE_DEFAULT:\n if 'untranslated' in lang_card:\n if not lang_card['untranslated']:\n del lang_card['untranslated']\n else:\n for field in fields:\n if field in lang_card['untranslated']:\n lang_card[field] = lang_default[card_tag][field\n ]\n else:\n untranslated = []\n for field in fields:\n if field not in lang_data[card_tag]:\n lang_card[field] = lang_default[card_tag][field]\n untranslated.append(field)\n if untranslated:\n lang_card['untranslated'] = untranslated\n lang_card['used'] = True\n sorted_lang_data[card_tag] = lang_card\n unused = [c for c in lang_data.values() if 'used' not in c]\n print(\n f\"unused in {lang}: {len(unused)}, used: {len([c for c in lang_data.values() if 'used' in c])}\"\n )\n print([c['name'] for c in unused])\n for card_tag in lang_data:\n lang_card = lang_data.get(card_tag)\n if 'used' not in lang_card:\n if lang != LANGUAGE_XX:\n lang_card['untranslated'] = [\n 'Note: This card is currently not used.']\n sorted_lang_data[card_tag] = lang_card\n else:\n del lang_card['used']\n with io.open(os.path.join(args.output_dir, lang, lang_file), 'w',\n encoding='utf-8') as lang_out:\n json.dump(sorted_lang_data, lang_out, indent=4, ensure_ascii=False)\n if lang == LANGUAGE_DEFAULT:\n lang_default = lang_data\n lang_file = 'sets_db.json'\n set_data = get_json_data(os.path.join(args.card_db_dir, lang_file))\n with io.open(os.path.join(args.output_dir, lang_file), 'w', encoding=\n 'utf-8') as lang_out:\n json.dump(set_data, lang_out, sort_keys=True, indent=4,\n ensure_ascii=False)\n print('Sets:')\n print(set(set_data))\n print()\n for lang in languages:\n lang_file = 'sets_' + lang + '.json'\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_set_data = get_json_data(fname)\n else:\n lang_set_data = {}\n for s in sorted(set_data):\n if s not in lang_set_data:\n lang_set_data[s] = {}\n if lang == LANGUAGE_DEFAULT:\n lang_set_data[s]['set_name'] = s.title()\n lang_set_data[s]['text_icon'] = set_data[s]['text_icon']\n if 'short_name' in set_data[s]:\n lang_set_data[s]['short_name'] = set_data[s][\n 'short_name']\n if 'set_text' in set_data[s]:\n lang_set_data[s]['set_text'] = set_data[s]['set_text']\n else:\n lang_set_data[s]['set_name'] = lang_default[s]['set_name']\n lang_set_data[s]['text_icon'] = lang_default[s]['text_icon'\n ]\n if 'short_name' in lang_default[s]:\n lang_set_data[s]['short_name'] = lang_default[s][\n 'short_name']\n if 'set_text' in lang_default[s]:\n lang_set_data[s]['set_text'] = lang_default[s][\n 'set_text']\n elif lang != LANGUAGE_DEFAULT:\n for x in lang_default[s]:\n if x not in lang_set_data[s] and x != 'used':\n lang_set_data[s][x] = lang_default[s][x]\n if lang == LANGUAGE_DEFAULT:\n lang_default = lang_set_data\n with io.open(os.path.join(args.output_dir, lang, lang_file), 'w',\n encoding='utf-8') as lang_out:\n json.dump(lang_set_data, lang_out, ensure_ascii=False, indent=4)\n for lang in languages:\n fromLanguage = lang\n if lang == LANGUAGE_XX:\n fromLanguage = LANGUAGE_DEFAULT\n copyfile(os.path.join(args.card_db_dir, fromLanguage, 'bonuses_' +\n fromLanguage + '.json'), os.path.join(args.output_dir, lang, \n 'bonuses_' + lang + '.json'))\n copyfile(os.path.join(args.card_db_dir, 'translation.md'), os.path.join\n (args.output_dir, 'translation.md'))\n copyfile(os.path.join(args.card_db_dir, LANGUAGE_XX, 'translation.txt'),\n os.path.join(args.output_dir, LANGUAGE_XX, 'translation.txt'))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_lang_dirs(path):\n languages = []\n for name in os.listdir(path):\n dir_path = os.path.join(path, name)\n if os.path.isdir(dir_path):\n cards_file = os.path.join(dir_path, 'cards_' + name + '.json')\n sets_file = os.path.join(dir_path, 'sets_' + name + '.json')\n if os.path.isfile(cards_file) and os.path.isfile(sets_file):\n languages.append(name)\n return languages\n\n\ndef get_json_data(json_file_path):\n print('reading {}'.format(json_file_path))\n with codecs.open(json_file_path, 'r', 'utf-8') as json_file:\n data = json.load(json_file)\n assert data, \"Could not load json at: '%r' \" % json_file_path\n return data\n\n\ndef json_dict_entry(entry, separator=''):\n json_data = json.dumps(entry, indent=4, ensure_ascii=False, sort_keys=True)\n json_data = json_data.strip('{}').rstrip()\n return separator + json_data\n\n\ndef multikeysort(items, columns):\n from operator import itemgetter\n for c in columns[::-1]:\n items = sorted(items, key=itemgetter(c))\n return items\n\n\ndef main(args):\n languages = get_lang_dirs(args.card_db_dir)\n languages.remove(LANGUAGE_DEFAULT)\n languages.insert(0, LANGUAGE_DEFAULT)\n if LANGUAGE_XX not in languages:\n languages.append(LANGUAGE_XX)\n print('Languages:')\n print(languages)\n print()\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n for lang in languages:\n lang_dir = os.path.join(args.output_dir, lang)\n if not os.path.exists(lang_dir):\n os.makedirs(lang_dir)\n type_parts = set()\n type_data = get_json_data(os.path.join(args.card_db_dir, 'types_db.json'))\n sorted_type_data = multikeysort(type_data, ['card_type'])\n with io.open(os.path.join(args.output_dir, 'types_db.json'), 'w',\n encoding='utf-8') as f:\n json.dump(sorted_type_data, f, indent=4, ensure_ascii=False)\n type_parts = list(set().union(*[set(t['card_type']) for t in\n sorted_type_data]))\n type_parts.sort()\n print('Unique Types:')\n print(type_parts)\n print()\n all_labels = []\n label_data = get_json_data(os.path.join(args.card_db_dir, 'labels_db.json')\n )\n all_labels = list(set().union(*[set(label['names']) for label in\n label_data]))\n with io.open(os.path.join(args.output_dir, 'labels_db.json'), 'w',\n encoding='utf-8') as f:\n json.dump(label_data, f, indent=4, ensure_ascii=False)\n all_labels.sort()\n print('Labels: ')\n print(all_labels)\n print()\n for lang in languages:\n lang_file = 'types_' + lang + '.json'\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_type_data = get_json_data(fname)\n else:\n lang_type_data = {}\n for t in sorted(type_parts):\n if t not in lang_type_data:\n if lang == LANGUAGE_DEFAULT:\n lang_type_data[t] = t\n lang_type_default = lang_type_data\n else:\n lang_type_data[t] = lang_type_default[t]\n with io.open(os.path.join(args.output_dir, lang, lang_file), 'w',\n encoding='utf-8') as f:\n json.dump(lang_type_data, f, indent=4, ensure_ascii=False)\n if lang == LANGUAGE_DEFAULT:\n lang_type_default = lang_type_data\n card_data = get_json_data(os.path.join(args.card_db_dir, 'cards_db.json'))\n cards = set(card['card_tag'] for card in card_data)\n groups = set(card['group_tag'] for card in card_data if 'group_tag' in card\n )\n super_groups = set(['events', 'landmarks'])\n for card in card_data:\n card['cardset_tags'].sort()\n if 'base' in card['cardset_tags']:\n card['cardset_tags'].remove('base')\n card['cardset_tags'].insert(0, 'base')\n sorted_card_data = multikeysort(card_data, ['cardset_tags', 'card_tag'])\n with io.open(os.path.join(args.output_dir, 'cards_db.json'), 'w',\n encoding='utf-8') as lang_out:\n json.dump(sorted_card_data, lang_out, indent=4, ensure_ascii=False)\n cards = [c['card_tag'] for c in sorted_card_data]\n cards.extend(sorted(groups))\n cards.extend(sorted(super_groups))\n print('Cards:')\n print(cards)\n print()\n for lang in languages:\n lang_file = 'cards_' + lang + '.json'\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_data = get_json_data(fname)\n else:\n lang_data = {}\n sorted_lang_data = collections.OrderedDict()\n fields = ['description', 'extra', 'name']\n for card_tag in cards:\n lang_card = lang_data.get(card_tag)\n if not lang_card or lang == LANGUAGE_XX:\n lang_card = {}\n if lang == LANGUAGE_DEFAULT:\n lang_card['extra'] = ''\n lang_card['name'] = card\n lang_card['description'] = ''\n lang_card['untranslated'] = fields\n lang_default = lang_data\n else:\n lang_card['extra'] = lang_default[card_tag]['extra']\n lang_card['name'] = lang_default[card_tag]['name']\n lang_card['description'] = lang_default[card_tag][\n 'description']\n lang_card['untranslated'] = fields\n elif lang != LANGUAGE_DEFAULT:\n if 'untranslated' in lang_card:\n if not lang_card['untranslated']:\n del lang_card['untranslated']\n else:\n for field in fields:\n if field in lang_card['untranslated']:\n lang_card[field] = lang_default[card_tag][field\n ]\n else:\n untranslated = []\n for field in fields:\n if field not in lang_data[card_tag]:\n lang_card[field] = lang_default[card_tag][field]\n untranslated.append(field)\n if untranslated:\n lang_card['untranslated'] = untranslated\n lang_card['used'] = True\n sorted_lang_data[card_tag] = lang_card\n unused = [c for c in lang_data.values() if 'used' not in c]\n print(\n f\"unused in {lang}: {len(unused)}, used: {len([c for c in lang_data.values() if 'used' in c])}\"\n )\n print([c['name'] for c in unused])\n for card_tag in lang_data:\n lang_card = lang_data.get(card_tag)\n if 'used' not in lang_card:\n if lang != LANGUAGE_XX:\n lang_card['untranslated'] = [\n 'Note: This card is currently not used.']\n sorted_lang_data[card_tag] = lang_card\n else:\n del lang_card['used']\n with io.open(os.path.join(args.output_dir, lang, lang_file), 'w',\n encoding='utf-8') as lang_out:\n json.dump(sorted_lang_data, lang_out, indent=4, ensure_ascii=False)\n if lang == LANGUAGE_DEFAULT:\n lang_default = lang_data\n lang_file = 'sets_db.json'\n set_data = get_json_data(os.path.join(args.card_db_dir, lang_file))\n with io.open(os.path.join(args.output_dir, lang_file), 'w', encoding=\n 'utf-8') as lang_out:\n json.dump(set_data, lang_out, sort_keys=True, indent=4,\n ensure_ascii=False)\n print('Sets:')\n print(set(set_data))\n print()\n for lang in languages:\n lang_file = 'sets_' + lang + '.json'\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_set_data = get_json_data(fname)\n else:\n lang_set_data = {}\n for s in sorted(set_data):\n if s not in lang_set_data:\n lang_set_data[s] = {}\n if lang == LANGUAGE_DEFAULT:\n lang_set_data[s]['set_name'] = s.title()\n lang_set_data[s]['text_icon'] = set_data[s]['text_icon']\n if 'short_name' in set_data[s]:\n lang_set_data[s]['short_name'] = set_data[s][\n 'short_name']\n if 'set_text' in set_data[s]:\n lang_set_data[s]['set_text'] = set_data[s]['set_text']\n else:\n lang_set_data[s]['set_name'] = lang_default[s]['set_name']\n lang_set_data[s]['text_icon'] = lang_default[s]['text_icon'\n ]\n if 'short_name' in lang_default[s]:\n lang_set_data[s]['short_name'] = lang_default[s][\n 'short_name']\n if 'set_text' in lang_default[s]:\n lang_set_data[s]['set_text'] = lang_default[s][\n 'set_text']\n elif lang != LANGUAGE_DEFAULT:\n for x in lang_default[s]:\n if x not in lang_set_data[s] and x != 'used':\n lang_set_data[s][x] = lang_default[s][x]\n if lang == LANGUAGE_DEFAULT:\n lang_default = lang_set_data\n with io.open(os.path.join(args.output_dir, lang, lang_file), 'w',\n encoding='utf-8') as lang_out:\n json.dump(lang_set_data, lang_out, ensure_ascii=False, indent=4)\n for lang in languages:\n fromLanguage = lang\n if lang == LANGUAGE_XX:\n fromLanguage = LANGUAGE_DEFAULT\n copyfile(os.path.join(args.card_db_dir, fromLanguage, 'bonuses_' +\n fromLanguage + '.json'), os.path.join(args.output_dir, lang, \n 'bonuses_' + lang + '.json'))\n copyfile(os.path.join(args.card_db_dir, 'translation.md'), os.path.join\n (args.output_dir, 'translation.md'))\n copyfile(os.path.join(args.card_db_dir, LANGUAGE_XX, 'translation.txt'),\n os.path.join(args.output_dir, LANGUAGE_XX, 'translation.txt'))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--card_db_dir', default=os.path.join(os.path.\n dirname(os.path.abspath(__file__)), '..', 'src', 'domdiv',\n 'card_db'), help='directory of card data')\n parser.add_argument('--output_dir', default=os.path.join(os.path.\n dirname(os.path.abspath(__file__)), '.', 'card_db'), help=\n 'directory for output data')\n args = parser.parse_args()\n main(args)\n", "step-3": "<mask token>\nLANGUAGE_DEFAULT = 'en_us'\nLANGUAGE_XX = 'xx'\n\n\ndef get_lang_dirs(path):\n languages = []\n for name in os.listdir(path):\n dir_path = os.path.join(path, name)\n if os.path.isdir(dir_path):\n cards_file = os.path.join(dir_path, 'cards_' + name + '.json')\n sets_file = os.path.join(dir_path, 'sets_' + name + '.json')\n if os.path.isfile(cards_file) and os.path.isfile(sets_file):\n languages.append(name)\n return languages\n\n\ndef get_json_data(json_file_path):\n print('reading {}'.format(json_file_path))\n with codecs.open(json_file_path, 'r', 'utf-8') as json_file:\n data = json.load(json_file)\n assert data, \"Could not load json at: '%r' \" % json_file_path\n return data\n\n\ndef json_dict_entry(entry, separator=''):\n json_data = json.dumps(entry, indent=4, ensure_ascii=False, sort_keys=True)\n json_data = json_data.strip('{}').rstrip()\n return separator + json_data\n\n\ndef multikeysort(items, columns):\n from operator import itemgetter\n for c in columns[::-1]:\n items = sorted(items, key=itemgetter(c))\n return items\n\n\ndef main(args):\n languages = get_lang_dirs(args.card_db_dir)\n languages.remove(LANGUAGE_DEFAULT)\n languages.insert(0, LANGUAGE_DEFAULT)\n if LANGUAGE_XX not in languages:\n languages.append(LANGUAGE_XX)\n print('Languages:')\n print(languages)\n print()\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n for lang in languages:\n lang_dir = os.path.join(args.output_dir, lang)\n if not os.path.exists(lang_dir):\n os.makedirs(lang_dir)\n type_parts = set()\n type_data = get_json_data(os.path.join(args.card_db_dir, 'types_db.json'))\n sorted_type_data = multikeysort(type_data, ['card_type'])\n with io.open(os.path.join(args.output_dir, 'types_db.json'), 'w',\n encoding='utf-8') as f:\n json.dump(sorted_type_data, f, indent=4, ensure_ascii=False)\n type_parts = list(set().union(*[set(t['card_type']) for t in\n sorted_type_data]))\n type_parts.sort()\n print('Unique Types:')\n print(type_parts)\n print()\n all_labels = []\n label_data = get_json_data(os.path.join(args.card_db_dir, 'labels_db.json')\n )\n all_labels = list(set().union(*[set(label['names']) for label in\n label_data]))\n with io.open(os.path.join(args.output_dir, 'labels_db.json'), 'w',\n encoding='utf-8') as f:\n json.dump(label_data, f, indent=4, ensure_ascii=False)\n all_labels.sort()\n print('Labels: ')\n print(all_labels)\n print()\n for lang in languages:\n lang_file = 'types_' + lang + '.json'\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_type_data = get_json_data(fname)\n else:\n lang_type_data = {}\n for t in sorted(type_parts):\n if t not in lang_type_data:\n if lang == LANGUAGE_DEFAULT:\n lang_type_data[t] = t\n lang_type_default = lang_type_data\n else:\n lang_type_data[t] = lang_type_default[t]\n with io.open(os.path.join(args.output_dir, lang, lang_file), 'w',\n encoding='utf-8') as f:\n json.dump(lang_type_data, f, indent=4, ensure_ascii=False)\n if lang == LANGUAGE_DEFAULT:\n lang_type_default = lang_type_data\n card_data = get_json_data(os.path.join(args.card_db_dir, 'cards_db.json'))\n cards = set(card['card_tag'] for card in card_data)\n groups = set(card['group_tag'] for card in card_data if 'group_tag' in card\n )\n super_groups = set(['events', 'landmarks'])\n for card in card_data:\n card['cardset_tags'].sort()\n if 'base' in card['cardset_tags']:\n card['cardset_tags'].remove('base')\n card['cardset_tags'].insert(0, 'base')\n sorted_card_data = multikeysort(card_data, ['cardset_tags', 'card_tag'])\n with io.open(os.path.join(args.output_dir, 'cards_db.json'), 'w',\n encoding='utf-8') as lang_out:\n json.dump(sorted_card_data, lang_out, indent=4, ensure_ascii=False)\n cards = [c['card_tag'] for c in sorted_card_data]\n cards.extend(sorted(groups))\n cards.extend(sorted(super_groups))\n print('Cards:')\n print(cards)\n print()\n for lang in languages:\n lang_file = 'cards_' + lang + '.json'\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_data = get_json_data(fname)\n else:\n lang_data = {}\n sorted_lang_data = collections.OrderedDict()\n fields = ['description', 'extra', 'name']\n for card_tag in cards:\n lang_card = lang_data.get(card_tag)\n if not lang_card or lang == LANGUAGE_XX:\n lang_card = {}\n if lang == LANGUAGE_DEFAULT:\n lang_card['extra'] = ''\n lang_card['name'] = card\n lang_card['description'] = ''\n lang_card['untranslated'] = fields\n lang_default = lang_data\n else:\n lang_card['extra'] = lang_default[card_tag]['extra']\n lang_card['name'] = lang_default[card_tag]['name']\n lang_card['description'] = lang_default[card_tag][\n 'description']\n lang_card['untranslated'] = fields\n elif lang != LANGUAGE_DEFAULT:\n if 'untranslated' in lang_card:\n if not lang_card['untranslated']:\n del lang_card['untranslated']\n else:\n for field in fields:\n if field in lang_card['untranslated']:\n lang_card[field] = lang_default[card_tag][field\n ]\n else:\n untranslated = []\n for field in fields:\n if field not in lang_data[card_tag]:\n lang_card[field] = lang_default[card_tag][field]\n untranslated.append(field)\n if untranslated:\n lang_card['untranslated'] = untranslated\n lang_card['used'] = True\n sorted_lang_data[card_tag] = lang_card\n unused = [c for c in lang_data.values() if 'used' not in c]\n print(\n f\"unused in {lang}: {len(unused)}, used: {len([c for c in lang_data.values() if 'used' in c])}\"\n )\n print([c['name'] for c in unused])\n for card_tag in lang_data:\n lang_card = lang_data.get(card_tag)\n if 'used' not in lang_card:\n if lang != LANGUAGE_XX:\n lang_card['untranslated'] = [\n 'Note: This card is currently not used.']\n sorted_lang_data[card_tag] = lang_card\n else:\n del lang_card['used']\n with io.open(os.path.join(args.output_dir, lang, lang_file), 'w',\n encoding='utf-8') as lang_out:\n json.dump(sorted_lang_data, lang_out, indent=4, ensure_ascii=False)\n if lang == LANGUAGE_DEFAULT:\n lang_default = lang_data\n lang_file = 'sets_db.json'\n set_data = get_json_data(os.path.join(args.card_db_dir, lang_file))\n with io.open(os.path.join(args.output_dir, lang_file), 'w', encoding=\n 'utf-8') as lang_out:\n json.dump(set_data, lang_out, sort_keys=True, indent=4,\n ensure_ascii=False)\n print('Sets:')\n print(set(set_data))\n print()\n for lang in languages:\n lang_file = 'sets_' + lang + '.json'\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_set_data = get_json_data(fname)\n else:\n lang_set_data = {}\n for s in sorted(set_data):\n if s not in lang_set_data:\n lang_set_data[s] = {}\n if lang == LANGUAGE_DEFAULT:\n lang_set_data[s]['set_name'] = s.title()\n lang_set_data[s]['text_icon'] = set_data[s]['text_icon']\n if 'short_name' in set_data[s]:\n lang_set_data[s]['short_name'] = set_data[s][\n 'short_name']\n if 'set_text' in set_data[s]:\n lang_set_data[s]['set_text'] = set_data[s]['set_text']\n else:\n lang_set_data[s]['set_name'] = lang_default[s]['set_name']\n lang_set_data[s]['text_icon'] = lang_default[s]['text_icon'\n ]\n if 'short_name' in lang_default[s]:\n lang_set_data[s]['short_name'] = lang_default[s][\n 'short_name']\n if 'set_text' in lang_default[s]:\n lang_set_data[s]['set_text'] = lang_default[s][\n 'set_text']\n elif lang != LANGUAGE_DEFAULT:\n for x in lang_default[s]:\n if x not in lang_set_data[s] and x != 'used':\n lang_set_data[s][x] = lang_default[s][x]\n if lang == LANGUAGE_DEFAULT:\n lang_default = lang_set_data\n with io.open(os.path.join(args.output_dir, lang, lang_file), 'w',\n encoding='utf-8') as lang_out:\n json.dump(lang_set_data, lang_out, ensure_ascii=False, indent=4)\n for lang in languages:\n fromLanguage = lang\n if lang == LANGUAGE_XX:\n fromLanguage = LANGUAGE_DEFAULT\n copyfile(os.path.join(args.card_db_dir, fromLanguage, 'bonuses_' +\n fromLanguage + '.json'), os.path.join(args.output_dir, lang, \n 'bonuses_' + lang + '.json'))\n copyfile(os.path.join(args.card_db_dir, 'translation.md'), os.path.join\n (args.output_dir, 'translation.md'))\n copyfile(os.path.join(args.card_db_dir, LANGUAGE_XX, 'translation.txt'),\n os.path.join(args.output_dir, LANGUAGE_XX, 'translation.txt'))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--card_db_dir', default=os.path.join(os.path.\n dirname(os.path.abspath(__file__)), '..', 'src', 'domdiv',\n 'card_db'), help='directory of card data')\n parser.add_argument('--output_dir', default=os.path.join(os.path.\n dirname(os.path.abspath(__file__)), '.', 'card_db'), help=\n 'directory for output data')\n args = parser.parse_args()\n main(args)\n", "step-4": "import os\nimport os.path\nimport io\nimport codecs\nimport json\nfrom shutil import copyfile\nimport argparse\nimport collections\nLANGUAGE_DEFAULT = 'en_us'\nLANGUAGE_XX = 'xx'\n\n\ndef get_lang_dirs(path):\n languages = []\n for name in os.listdir(path):\n dir_path = os.path.join(path, name)\n if os.path.isdir(dir_path):\n cards_file = os.path.join(dir_path, 'cards_' + name + '.json')\n sets_file = os.path.join(dir_path, 'sets_' + name + '.json')\n if os.path.isfile(cards_file) and os.path.isfile(sets_file):\n languages.append(name)\n return languages\n\n\ndef get_json_data(json_file_path):\n print('reading {}'.format(json_file_path))\n with codecs.open(json_file_path, 'r', 'utf-8') as json_file:\n data = json.load(json_file)\n assert data, \"Could not load json at: '%r' \" % json_file_path\n return data\n\n\ndef json_dict_entry(entry, separator=''):\n json_data = json.dumps(entry, indent=4, ensure_ascii=False, sort_keys=True)\n json_data = json_data.strip('{}').rstrip()\n return separator + json_data\n\n\ndef multikeysort(items, columns):\n from operator import itemgetter\n for c in columns[::-1]:\n items = sorted(items, key=itemgetter(c))\n return items\n\n\ndef main(args):\n languages = get_lang_dirs(args.card_db_dir)\n languages.remove(LANGUAGE_DEFAULT)\n languages.insert(0, LANGUAGE_DEFAULT)\n if LANGUAGE_XX not in languages:\n languages.append(LANGUAGE_XX)\n print('Languages:')\n print(languages)\n print()\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n for lang in languages:\n lang_dir = os.path.join(args.output_dir, lang)\n if not os.path.exists(lang_dir):\n os.makedirs(lang_dir)\n type_parts = set()\n type_data = get_json_data(os.path.join(args.card_db_dir, 'types_db.json'))\n sorted_type_data = multikeysort(type_data, ['card_type'])\n with io.open(os.path.join(args.output_dir, 'types_db.json'), 'w',\n encoding='utf-8') as f:\n json.dump(sorted_type_data, f, indent=4, ensure_ascii=False)\n type_parts = list(set().union(*[set(t['card_type']) for t in\n sorted_type_data]))\n type_parts.sort()\n print('Unique Types:')\n print(type_parts)\n print()\n all_labels = []\n label_data = get_json_data(os.path.join(args.card_db_dir, 'labels_db.json')\n )\n all_labels = list(set().union(*[set(label['names']) for label in\n label_data]))\n with io.open(os.path.join(args.output_dir, 'labels_db.json'), 'w',\n encoding='utf-8') as f:\n json.dump(label_data, f, indent=4, ensure_ascii=False)\n all_labels.sort()\n print('Labels: ')\n print(all_labels)\n print()\n for lang in languages:\n lang_file = 'types_' + lang + '.json'\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_type_data = get_json_data(fname)\n else:\n lang_type_data = {}\n for t in sorted(type_parts):\n if t not in lang_type_data:\n if lang == LANGUAGE_DEFAULT:\n lang_type_data[t] = t\n lang_type_default = lang_type_data\n else:\n lang_type_data[t] = lang_type_default[t]\n with io.open(os.path.join(args.output_dir, lang, lang_file), 'w',\n encoding='utf-8') as f:\n json.dump(lang_type_data, f, indent=4, ensure_ascii=False)\n if lang == LANGUAGE_DEFAULT:\n lang_type_default = lang_type_data\n card_data = get_json_data(os.path.join(args.card_db_dir, 'cards_db.json'))\n cards = set(card['card_tag'] for card in card_data)\n groups = set(card['group_tag'] for card in card_data if 'group_tag' in card\n )\n super_groups = set(['events', 'landmarks'])\n for card in card_data:\n card['cardset_tags'].sort()\n if 'base' in card['cardset_tags']:\n card['cardset_tags'].remove('base')\n card['cardset_tags'].insert(0, 'base')\n sorted_card_data = multikeysort(card_data, ['cardset_tags', 'card_tag'])\n with io.open(os.path.join(args.output_dir, 'cards_db.json'), 'w',\n encoding='utf-8') as lang_out:\n json.dump(sorted_card_data, lang_out, indent=4, ensure_ascii=False)\n cards = [c['card_tag'] for c in sorted_card_data]\n cards.extend(sorted(groups))\n cards.extend(sorted(super_groups))\n print('Cards:')\n print(cards)\n print()\n for lang in languages:\n lang_file = 'cards_' + lang + '.json'\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_data = get_json_data(fname)\n else:\n lang_data = {}\n sorted_lang_data = collections.OrderedDict()\n fields = ['description', 'extra', 'name']\n for card_tag in cards:\n lang_card = lang_data.get(card_tag)\n if not lang_card or lang == LANGUAGE_XX:\n lang_card = {}\n if lang == LANGUAGE_DEFAULT:\n lang_card['extra'] = ''\n lang_card['name'] = card\n lang_card['description'] = ''\n lang_card['untranslated'] = fields\n lang_default = lang_data\n else:\n lang_card['extra'] = lang_default[card_tag]['extra']\n lang_card['name'] = lang_default[card_tag]['name']\n lang_card['description'] = lang_default[card_tag][\n 'description']\n lang_card['untranslated'] = fields\n elif lang != LANGUAGE_DEFAULT:\n if 'untranslated' in lang_card:\n if not lang_card['untranslated']:\n del lang_card['untranslated']\n else:\n for field in fields:\n if field in lang_card['untranslated']:\n lang_card[field] = lang_default[card_tag][field\n ]\n else:\n untranslated = []\n for field in fields:\n if field not in lang_data[card_tag]:\n lang_card[field] = lang_default[card_tag][field]\n untranslated.append(field)\n if untranslated:\n lang_card['untranslated'] = untranslated\n lang_card['used'] = True\n sorted_lang_data[card_tag] = lang_card\n unused = [c for c in lang_data.values() if 'used' not in c]\n print(\n f\"unused in {lang}: {len(unused)}, used: {len([c for c in lang_data.values() if 'used' in c])}\"\n )\n print([c['name'] for c in unused])\n for card_tag in lang_data:\n lang_card = lang_data.get(card_tag)\n if 'used' not in lang_card:\n if lang != LANGUAGE_XX:\n lang_card['untranslated'] = [\n 'Note: This card is currently not used.']\n sorted_lang_data[card_tag] = lang_card\n else:\n del lang_card['used']\n with io.open(os.path.join(args.output_dir, lang, lang_file), 'w',\n encoding='utf-8') as lang_out:\n json.dump(sorted_lang_data, lang_out, indent=4, ensure_ascii=False)\n if lang == LANGUAGE_DEFAULT:\n lang_default = lang_data\n lang_file = 'sets_db.json'\n set_data = get_json_data(os.path.join(args.card_db_dir, lang_file))\n with io.open(os.path.join(args.output_dir, lang_file), 'w', encoding=\n 'utf-8') as lang_out:\n json.dump(set_data, lang_out, sort_keys=True, indent=4,\n ensure_ascii=False)\n print('Sets:')\n print(set(set_data))\n print()\n for lang in languages:\n lang_file = 'sets_' + lang + '.json'\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_set_data = get_json_data(fname)\n else:\n lang_set_data = {}\n for s in sorted(set_data):\n if s not in lang_set_data:\n lang_set_data[s] = {}\n if lang == LANGUAGE_DEFAULT:\n lang_set_data[s]['set_name'] = s.title()\n lang_set_data[s]['text_icon'] = set_data[s]['text_icon']\n if 'short_name' in set_data[s]:\n lang_set_data[s]['short_name'] = set_data[s][\n 'short_name']\n if 'set_text' in set_data[s]:\n lang_set_data[s]['set_text'] = set_data[s]['set_text']\n else:\n lang_set_data[s]['set_name'] = lang_default[s]['set_name']\n lang_set_data[s]['text_icon'] = lang_default[s]['text_icon'\n ]\n if 'short_name' in lang_default[s]:\n lang_set_data[s]['short_name'] = lang_default[s][\n 'short_name']\n if 'set_text' in lang_default[s]:\n lang_set_data[s]['set_text'] = lang_default[s][\n 'set_text']\n elif lang != LANGUAGE_DEFAULT:\n for x in lang_default[s]:\n if x not in lang_set_data[s] and x != 'used':\n lang_set_data[s][x] = lang_default[s][x]\n if lang == LANGUAGE_DEFAULT:\n lang_default = lang_set_data\n with io.open(os.path.join(args.output_dir, lang, lang_file), 'w',\n encoding='utf-8') as lang_out:\n json.dump(lang_set_data, lang_out, ensure_ascii=False, indent=4)\n for lang in languages:\n fromLanguage = lang\n if lang == LANGUAGE_XX:\n fromLanguage = LANGUAGE_DEFAULT\n copyfile(os.path.join(args.card_db_dir, fromLanguage, 'bonuses_' +\n fromLanguage + '.json'), os.path.join(args.output_dir, lang, \n 'bonuses_' + lang + '.json'))\n copyfile(os.path.join(args.card_db_dir, 'translation.md'), os.path.join\n (args.output_dir, 'translation.md'))\n copyfile(os.path.join(args.card_db_dir, LANGUAGE_XX, 'translation.txt'),\n os.path.join(args.output_dir, LANGUAGE_XX, 'translation.txt'))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--card_db_dir', default=os.path.join(os.path.\n dirname(os.path.abspath(__file__)), '..', 'src', 'domdiv',\n 'card_db'), help='directory of card data')\n parser.add_argument('--output_dir', default=os.path.join(os.path.\n dirname(os.path.abspath(__file__)), '.', 'card_db'), help=\n 'directory for output data')\n args = parser.parse_args()\n main(args)\n", "step-5": "###########################################################################\n# This file provides maintenance on the various language files\n# 1. Create new \"xx/cards_xx.json\" files that have entries ordered as:\n# a. the card_tag entries in \"cards_db.json\"\n# b. the group_tag entries as found in \"cards_db.json\"\n# c. the super group entries (grouping across all expansions\"\n# d. any unused entries existing in the file (assumed to be work in progress)\n#\n# 2. Create new \"sets_db.json\" and \"xx/cards_xx.json\" with entries sorted alphabetically\n#\n# All output is in the designated output directory. Original files are not overwritten.\n###########################################################################\n\nimport os\nimport os.path\nimport io\nimport codecs\nimport json\nfrom shutil import copyfile\nimport argparse\nimport collections\n\nLANGUAGE_DEFAULT = \"en_us\" # default language, which takes priority\nLANGUAGE_XX = \"xx\" # language for starting a translation\n\n\ndef get_lang_dirs(path):\n # Find all valid languages.\n languages = []\n for name in os.listdir(path):\n dir_path = os.path.join(path, name)\n if os.path.isdir(dir_path):\n cards_file = os.path.join(dir_path, \"cards_\" + name + \".json\")\n sets_file = os.path.join(dir_path, \"sets_\" + name + \".json\")\n if os.path.isfile(cards_file) and os.path.isfile(sets_file):\n languages.append(name)\n return languages\n\n\ndef get_json_data(json_file_path):\n print((\"reading {}\".format(json_file_path)))\n # Read in the json from the specified file\n with codecs.open(json_file_path, \"r\", \"utf-8\") as json_file:\n data = json.load(json_file)\n assert data, \"Could not load json at: '%r' \" % json_file_path\n return data\n\n\ndef json_dict_entry(entry, separator=\"\"):\n # Return a nicely formated json dict entry.\n # It does not include the enclosing {} and removes trailing white space\n json_data = json.dumps(entry, indent=4, ensure_ascii=False, sort_keys=True)\n json_data = json_data.strip(\n \"{}\"\n ).rstrip() # Remove outer{} and then trailing whitespace\n return separator + json_data\n\n\n# Multikey sort\n# see: http://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys\ndef multikeysort(items, columns):\n from operator import itemgetter\n\n for c in columns[::-1]:\n items = sorted(items, key=itemgetter(c))\n return items\n\n\ndef main(args):\n ###########################################################################\n # Get all the languages, and place the default language first in the list\n ###########################################################################\n languages = get_lang_dirs(args.card_db_dir)\n languages.remove(LANGUAGE_DEFAULT)\n languages.insert(0, LANGUAGE_DEFAULT)\n if LANGUAGE_XX not in languages:\n languages.append(LANGUAGE_XX)\n print(\"Languages:\")\n print(languages)\n print()\n\n ###########################################################################\n # Make sure the directories exist to hold the output\n ###########################################################################\n\n # main output directory\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n # each language directory\n for lang in languages:\n # Make sure the directory is there to hold the file\n lang_dir = os.path.join(args.output_dir, lang)\n if not os.path.exists(lang_dir):\n os.makedirs(lang_dir)\n\n ###########################################################################\n # Get the types_db information\n # Store in a list in the order found in types[]. Ordered by card_type\n # 1. card_tags, 2. group_tags, 3. super groups\n ###########################################################################\n type_parts = set()\n\n # Get the card data\n type_data = get_json_data(os.path.join(args.card_db_dir, \"types_db.json\"))\n\n # Sort the cards by cardset_tags, then card_tag\n sorted_type_data = multikeysort(type_data, [\"card_type\"])\n\n with io.open(\n os.path.join(args.output_dir, \"types_db.json\"), \"w\", encoding=\"utf-8\"\n ) as f:\n json.dump(sorted_type_data, f, indent=4, ensure_ascii=False)\n\n type_parts = list(set().union(*[set(t[\"card_type\"]) for t in sorted_type_data]))\n type_parts.sort()\n print(\"Unique Types:\")\n print(type_parts)\n print()\n\n ###########################################################################\n # Get the labels_db information\n # Store in a list in the order found.\n ###########################################################################\n all_labels = []\n\n # Get the card data\n label_data = get_json_data(os.path.join(args.card_db_dir, \"labels_db.json\"))\n\n all_labels = list(set().union(*[set(label[\"names\"]) for label in label_data]))\n\n with io.open(\n os.path.join(args.output_dir, \"labels_db.json\"), \"w\", encoding=\"utf-8\"\n ) as f:\n json.dump(label_data, f, indent=4, ensure_ascii=False)\n\n all_labels.sort()\n print(\"Labels: \")\n print(all_labels)\n print()\n ###########################################################################\n # Fix up all the xx/types_xx.json files\n # Place entries in alphabetical order\n # If entries don't exist:\n # If the default language, set from information in the \"types_db.json\" file,\n # If not the default language, set based on information from the default language.\n # Lastly, keep any extra entries that are not currently used, just in case needed\n # in the future or is a work in progress.\n ###########################################################################\n for lang in languages:\n lang_file = \"types_\" + lang + \".json\"\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_type_data = get_json_data(fname)\n else:\n lang_type_data = {}\n\n for t in sorted(type_parts):\n if t not in lang_type_data:\n if lang == LANGUAGE_DEFAULT:\n lang_type_data[t] = t\n lang_type_default = lang_type_data\n else:\n lang_type_data[t] = lang_type_default[t]\n\n with io.open(\n os.path.join(args.output_dir, lang, lang_file), \"w\", encoding=\"utf-8\"\n ) as f:\n json.dump(lang_type_data, f, indent=4, ensure_ascii=False)\n\n if lang == LANGUAGE_DEFAULT:\n lang_type_default = lang_type_data # Keep for later languages\n\n ###########################################################################\n # Get the cards_db information\n # Store in a list in the order found in cards[]. Ordered as follows:\n # 1. card_tags, 2. group_tags, 3. super groups\n ###########################################################################\n\n # Get the card data\n card_data = get_json_data(os.path.join(args.card_db_dir, \"cards_db.json\"))\n\n cards = set(card[\"card_tag\"] for card in card_data)\n groups = set(card[\"group_tag\"] for card in card_data if \"group_tag\" in card)\n super_groups = set([\"events\", \"landmarks\"])\n\n # Sort the cardset_tags\n for card in card_data:\n card[\"cardset_tags\"].sort()\n # But put all the base cards together by moving to front of the list\n if \"base\" in card[\"cardset_tags\"]:\n card[\"cardset_tags\"].remove(\"base\")\n card[\"cardset_tags\"].insert(0, \"base\")\n\n # Sort the cards by cardset_tags, then card_tag\n sorted_card_data = multikeysort(card_data, [\"cardset_tags\", \"card_tag\"])\n\n with io.open(\n os.path.join(args.output_dir, \"cards_db.json\"), \"w\", encoding=\"utf-8\"\n ) as lang_out:\n json.dump(sorted_card_data, lang_out, indent=4, ensure_ascii=False)\n\n # maintain the sorted order, but expand with groups and super_groups\n cards = [c[\"card_tag\"] for c in sorted_card_data]\n cards.extend(sorted(groups))\n cards.extend(sorted(super_groups))\n\n print(\"Cards:\")\n print(cards)\n print()\n\n ###########################################################################\n # Fix up all the cards_xx.json files\n # Place entries in the same order as given in \"cards_db.json\".\n # If entries don't exist:\n # If the default language, set base on information in the \"cards_db.json\" file,\n # If not the default language, set based on information from the default language.\n # Lastly, keep any extra entries that are not currently used, just in case needed\n # in the future or is a work in progress.\n ###########################################################################\n for lang in languages:\n\n # contruct the cards json file name\n lang_file = \"cards_\" + lang + \".json\"\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_data = get_json_data(fname)\n else:\n lang_data = {}\n\n sorted_lang_data = collections.OrderedDict()\n fields = [\"description\", \"extra\", \"name\"]\n for card_tag in cards:\n lang_card = lang_data.get(card_tag)\n # print(f'looking at {card_tag}: {lang_card}')\n if not lang_card or lang == LANGUAGE_XX:\n # Card is missing, need to add it\n lang_card = {}\n if lang == LANGUAGE_DEFAULT:\n # Default language gets bare minimum. Really need to add by hand.\n lang_card[\"extra\"] = \"\"\n lang_card[\"name\"] = card\n lang_card[\"description\"] = \"\"\n lang_card[\"untranslated\"] = fields\n lang_default = lang_data\n else:\n # All other languages should get the default languages' text\n lang_card[\"extra\"] = lang_default[card_tag][\"extra\"]\n lang_card[\"name\"] = lang_default[card_tag][\"name\"]\n lang_card[\"description\"] = lang_default[card_tag][\"description\"]\n lang_card[\"untranslated\"] = fields\n else:\n # Card exists, figure out what needs updating (don't update default language)\n if lang != LANGUAGE_DEFAULT:\n if \"untranslated\" in lang_card:\n # Has an 'untranslated' field. Process accordingly\n if not lang_card[\"untranslated\"]:\n # It is empty, so just remove it\n del lang_card[\"untranslated\"]\n else:\n # If a field remains untranslated, then replace with the default languages copy\n for field in fields:\n if field in lang_card[\"untranslated\"]:\n lang_card[field] = lang_default[card_tag][field]\n else:\n # Need to create the 'untranslated' field and update based upon existing fields\n untranslated = []\n for field in fields:\n if field not in lang_data[card_tag]:\n lang_card[field] = lang_default[card_tag][field]\n untranslated.append(field)\n if untranslated:\n # only add if something is still needing translation\n lang_card[\"untranslated\"] = untranslated\n lang_card[\"used\"] = True\n sorted_lang_data[card_tag] = lang_card\n unused = [c for c in lang_data.values() if \"used\" not in c]\n print(\n f'unused in {lang}: {len(unused)}, used: {len([c for c in lang_data.values() if \"used\" in c])}'\n )\n print([c[\"name\"] for c in unused])\n # Now keep any unused values just in case needed in the future\n for card_tag in lang_data:\n lang_card = lang_data.get(card_tag)\n if \"used\" not in lang_card:\n if lang != LANGUAGE_XX:\n lang_card[\"untranslated\"] = [\n \"Note: This card is currently not used.\"\n ]\n sorted_lang_data[card_tag] = lang_card\n else:\n del lang_card[\"used\"]\n\n # Process the file\n with io.open(\n os.path.join(args.output_dir, lang, lang_file), \"w\", encoding=\"utf-8\"\n ) as lang_out:\n json.dump(sorted_lang_data, lang_out, indent=4, ensure_ascii=False)\n\n if lang == LANGUAGE_DEFAULT:\n lang_default = lang_data # Keep for later languages\n\n ###########################################################################\n # Fix up the sets_db.json file\n # Place entries in alphabetical order\n ###########################################################################\n lang_file = \"sets_db.json\"\n set_data = get_json_data(os.path.join(args.card_db_dir, lang_file))\n\n with io.open(\n os.path.join(args.output_dir, lang_file), \"w\", encoding=\"utf-8\"\n ) as lang_out:\n json.dump(set_data, lang_out, sort_keys=True, indent=4, ensure_ascii=False)\n\n print(\"Sets:\")\n print(set(set_data))\n print()\n\n ###########################################################################\n # Fix up all the xx/sets_xx.json files\n # Place entries in alphabetical order\n # If entries don't exist:\n # If the default language, set from information in the \"sets_db.json\" file,\n # If not the default language, set based on information from the default language.\n ###########################################################################\n for lang in languages:\n lang_file = \"sets_\" + lang + \".json\"\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_set_data = get_json_data(fname)\n else:\n lang_set_data = {}\n\n for s in sorted(set_data):\n if s not in lang_set_data:\n lang_set_data[s] = {}\n if lang == LANGUAGE_DEFAULT:\n lang_set_data[s][\"set_name\"] = s.title()\n lang_set_data[s][\"text_icon\"] = set_data[s][\"text_icon\"]\n if \"short_name\" in set_data[s]:\n lang_set_data[s][\"short_name\"] = set_data[s][\"short_name\"]\n if \"set_text\" in set_data[s]:\n lang_set_data[s][\"set_text\"] = set_data[s][\"set_text\"]\n else:\n lang_set_data[s][\"set_name\"] = lang_default[s][\"set_name\"]\n lang_set_data[s][\"text_icon\"] = lang_default[s][\"text_icon\"]\n if \"short_name\" in lang_default[s]:\n lang_set_data[s][\"short_name\"] = lang_default[s][\"short_name\"]\n if \"set_text\" in lang_default[s]:\n lang_set_data[s][\"set_text\"] = lang_default[s][\"set_text\"]\n else:\n if lang != LANGUAGE_DEFAULT:\n for x in lang_default[s]:\n if x not in lang_set_data[s] and x != \"used\":\n lang_set_data[s][x] = lang_default[s][x]\n\n if lang == LANGUAGE_DEFAULT:\n lang_default = lang_set_data # Keep for later languages\n\n with io.open(\n os.path.join(args.output_dir, lang, lang_file), \"w\", encoding=\"utf-8\"\n ) as lang_out:\n json.dump(lang_set_data, lang_out, ensure_ascii=False, indent=4)\n\n ###########################################################################\n # bonuses_xx files\n ###########################################################################\n for lang in languages:\n # Special case for xx. Reseed from default language\n fromLanguage = lang\n if lang == LANGUAGE_XX:\n fromLanguage = LANGUAGE_DEFAULT\n\n copyfile(\n os.path.join(\n args.card_db_dir, fromLanguage, \"bonuses_\" + fromLanguage + \".json\"\n ),\n os.path.join(args.output_dir, lang, \"bonuses_\" + lang + \".json\"),\n )\n\n ###########################################################################\n # translation.txt\n ###########################################################################\n copyfile(\n os.path.join(args.card_db_dir, \"translation.md\"),\n os.path.join(args.output_dir, \"translation.md\"),\n )\n\n # Since xx is the starting point for new translations,\n # make sure xx has the latest copy of translation.txt\n copyfile(\n os.path.join(args.card_db_dir, LANGUAGE_XX, \"translation.txt\"),\n os.path.join(args.output_dir, LANGUAGE_XX, \"translation.txt\"),\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--card_db_dir\",\n default=os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"..\", \"src\", \"domdiv\", \"card_db\"\n ),\n help=\"directory of card data\",\n )\n parser.add_argument(\n \"--output_dir\",\n default=os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \".\", \"card_db\"\n ),\n help=\"directory for output data\",\n )\n args = parser.parse_args()\n main(args)\n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
<|reserved_special_token_0|> class RoRo(Monument): def set_adm_location(self): counties = self.data_files['counties'] self.set_from_dict_match(counties, 'iso_code', 'judetul_iso', 'located_adm') <|reserved_special_token_0|> def set_heritage_id(self): self.add_statement('romanian_monument_id', self.cod) <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def __init__(self, db_row_dict, mapping, data_files, existing, repository): Monument.__init__(self, db_row_dict, mapping, data_files, existing, repository) self.set_monuments_all_id('cod') self.set_changed() self.set_wlm_source() self.set_heritage_id() self.set_heritage() self.set_country() self.set_adm_location() self.set_address() self.set_location() self.set_coords() self.set_commonscat() self.set_image('imagine') self.update_labels() self.update_descriptions() self.set_wd_item(self.find_matching_wikidata(mapping)) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class RoRo(Monument): def set_adm_location(self): counties = self.data_files['counties'] self.set_from_dict_match(counties, 'iso_code', 'judetul_iso', 'located_adm') <|reserved_special_token_0|> def set_heritage_id(self): self.add_statement('romanian_monument_id', self.cod) def update_descriptions(self): adm_code = self.judetul_iso counties = self.data_files['counties'] county_item = utils.get_item_from_dict_by_key(dict_name=counties, search_term=adm_code, return_content_of='itemLabel', search_in= 'iso_code') if len(county_item) == 1: place_name = '{}, Romania'.format(county_item[0]) else: place_name = 'Romania' desc = 'heritage site in {}'.format(place_name) self.add_description('en', desc) self.add_disambiguator(str(self.cod)) <|reserved_special_token_0|> <|reserved_special_token_0|> def __init__(self, db_row_dict, mapping, data_files, existing, repository): Monument.__init__(self, db_row_dict, mapping, data_files, existing, repository) self.set_monuments_all_id('cod') self.set_changed() self.set_wlm_source() self.set_heritage_id() self.set_heritage() self.set_country() self.set_adm_location() self.set_address() self.set_location() self.set_coords() self.set_commonscat() self.set_image('imagine') self.update_labels() self.update_descriptions() self.set_wd_item(self.find_matching_wikidata(mapping)) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class RoRo(Monument): def set_adm_location(self): counties = self.data_files['counties'] self.set_from_dict_match(counties, 'iso_code', 'judetul_iso', 'located_adm') def set_location(self): """ Set Location property from article linked in localitate. Run this after set_adm_location. localitate can contain several links (we take the 1st which seems to be the most granular one) and a mix of administrative types. Compare with admin location so that they're not the same. """ if self.has_non_empty_attribute('localitate'): loc_item = None if utils.count_wikilinks(self.localitate) > 0: loc_link = utils.get_wikilinks(self.localitate)[0] loc_item = utils.q_from_wikipedia('ro', loc_link.title) adm_item = self.get_statement_values('located_adm') if loc_item and loc_item != adm_item[0]: self.add_statement('location', loc_item) if not loc_item: self.add_to_report('localitate', self.localitate, 'location') def set_heritage_id(self): self.add_statement('romanian_monument_id', self.cod) def update_descriptions(self): adm_code = self.judetul_iso counties = self.data_files['counties'] county_item = utils.get_item_from_dict_by_key(dict_name=counties, search_term=adm_code, return_content_of='itemLabel', search_in= 'iso_code') if len(county_item) == 1: place_name = '{}, Romania'.format(county_item[0]) else: place_name = 'Romania' desc = 'heritage site in {}'.format(place_name) self.add_description('en', desc) self.add_disambiguator(str(self.cod)) def set_address(self): street_patterns = 'piața', 'str.', 'bd.' if self.has_non_empty_attribute('adresa'): adr_lower = self.adresa.lower() adr_nice = utils.remove_markup(self.adresa) if any(pattern in adr_lower for pattern in street_patterns): if self.has_non_empty_attribute('localitate'): town = utils.remove_markup(self.localitate) adr_nice = '{}, {}'.format(adr_nice, town) self.add_statement('located_street', adr_nice) else: directions = utils.package_monolingual(adr_nice, 'ro') self.add_statement('directions', directions) def update_labels(self): romanian = utils.remove_markup(self.denumire) self.add_label('ro', romanian) def __init__(self, db_row_dict, mapping, data_files, existing, repository): Monument.__init__(self, db_row_dict, mapping, data_files, existing, repository) self.set_monuments_all_id('cod') self.set_changed() self.set_wlm_source() self.set_heritage_id() self.set_heritage() self.set_country() self.set_adm_location() self.set_address() self.set_location() self.set_coords() self.set_commonscat() self.set_image('imagine') self.update_labels() self.update_descriptions() self.set_wd_item(self.find_matching_wikidata(mapping)) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class RoRo(Monument): def set_adm_location(self): counties = self.data_files['counties'] self.set_from_dict_match(counties, 'iso_code', 'judetul_iso', 'located_adm') def set_location(self): """ Set Location property from article linked in localitate. Run this after set_adm_location. localitate can contain several links (we take the 1st which seems to be the most granular one) and a mix of administrative types. Compare with admin location so that they're not the same. """ if self.has_non_empty_attribute('localitate'): loc_item = None if utils.count_wikilinks(self.localitate) > 0: loc_link = utils.get_wikilinks(self.localitate)[0] loc_item = utils.q_from_wikipedia('ro', loc_link.title) adm_item = self.get_statement_values('located_adm') if loc_item and loc_item != adm_item[0]: self.add_statement('location', loc_item) if not loc_item: self.add_to_report('localitate', self.localitate, 'location') def set_heritage_id(self): self.add_statement('romanian_monument_id', self.cod) def update_descriptions(self): adm_code = self.judetul_iso counties = self.data_files['counties'] county_item = utils.get_item_from_dict_by_key(dict_name=counties, search_term=adm_code, return_content_of='itemLabel', search_in= 'iso_code') if len(county_item) == 1: place_name = '{}, Romania'.format(county_item[0]) else: place_name = 'Romania' desc = 'heritage site in {}'.format(place_name) self.add_description('en', desc) self.add_disambiguator(str(self.cod)) def set_address(self): street_patterns = 'piața', 'str.', 'bd.' if self.has_non_empty_attribute('adresa'): adr_lower = self.adresa.lower() adr_nice = utils.remove_markup(self.adresa) if any(pattern in adr_lower for pattern in street_patterns): if self.has_non_empty_attribute('localitate'): town = utils.remove_markup(self.localitate) adr_nice = '{}, {}'.format(adr_nice, town) self.add_statement('located_street', adr_nice) else: directions = utils.package_monolingual(adr_nice, 'ro') self.add_statement('directions', directions) def update_labels(self): romanian = utils.remove_markup(self.denumire) self.add_label('ro', romanian) def __init__(self, db_row_dict, mapping, data_files, existing, repository): Monument.__init__(self, db_row_dict, mapping, data_files, existing, repository) self.set_monuments_all_id('cod') self.set_changed() self.set_wlm_source() self.set_heritage_id() self.set_heritage() self.set_country() self.set_adm_location() self.set_address() self.set_location() self.set_coords() self.set_commonscat() self.set_image('imagine') self.update_labels() self.update_descriptions() self.set_wd_item(self.find_matching_wikidata(mapping)) if __name__ == '__main__': """Command line entry point for importer.""" args = importer.handle_args() dataset = Dataset('ro', 'ro', RoRo) dataset.data_files = {'counties': 'romania_counties.json'} importer.main(args, dataset) <|reserved_special_token_1|> from Monument import Monument, Dataset import importer_utils as utils import importer as importer class RoRo(Monument): def set_adm_location(self): counties = self.data_files["counties"] self.set_from_dict_match(counties, "iso_code", "judetul_iso", "located_adm") def set_location(self): """ Set Location property from article linked in localitate. Run this after set_adm_location. localitate can contain several links (we take the 1st which seems to be the most granular one) and a mix of administrative types. Compare with admin location so that they're not the same. """ if self.has_non_empty_attribute("localitate"): loc_item = None if utils.count_wikilinks(self.localitate) > 0: loc_link = utils.get_wikilinks(self.localitate)[0] loc_item = utils.q_from_wikipedia("ro", loc_link.title) adm_item = self.get_statement_values("located_adm") if loc_item and loc_item != adm_item[0]: self.add_statement("location", loc_item) if not loc_item: self.add_to_report("localitate", self.localitate, "location") def set_heritage_id(self): self.add_statement("romanian_monument_id", self.cod) def update_descriptions(self): adm_code = self.judetul_iso counties = self.data_files["counties"] county_item = utils.get_item_from_dict_by_key(dict_name=counties, search_term=adm_code, return_content_of="itemLabel", search_in="iso_code") if len(county_item) == 1: place_name = "{}, Romania".format(county_item[0]) else: place_name = "Romania" desc = "heritage site in {}".format(place_name) self.add_description("en", desc) self.add_disambiguator(str(self.cod)) def set_address(self): street_patterns = ("piața", "str.", "bd.") if self.has_non_empty_attribute("adresa"): adr_lower = self.adresa.lower() adr_nice = utils.remove_markup(self.adresa) if any(pattern in adr_lower for pattern in street_patterns): if self.has_non_empty_attribute("localitate"): town = utils.remove_markup(self.localitate) adr_nice = "{}, {}".format(adr_nice, town) self.add_statement("located_street", adr_nice) else: directions = utils.package_monolingual(adr_nice, 'ro') self.add_statement("directions", directions) def update_labels(self): romanian = utils.remove_markup(self.denumire) self.add_label("ro", romanian) def __init__(self, db_row_dict, mapping, data_files, existing, repository): Monument.__init__(self, db_row_dict, mapping, data_files, existing, repository) self.set_monuments_all_id("cod") self.set_changed() self.set_wlm_source() self.set_heritage_id() self.set_heritage() self.set_country() self.set_adm_location() self.set_address() self.set_location() self.set_coords() self.set_commonscat() self.set_image("imagine") self.update_labels() self.update_descriptions() self.set_wd_item(self.find_matching_wikidata(mapping)) if __name__ == "__main__": """Command line entry point for importer.""" args = importer.handle_args() dataset = Dataset("ro", "ro", RoRo) dataset.data_files = {"counties": "romania_counties.json"} importer.main(args, dataset)
flexible
{ "blob_id": "5f8a9d82a3245671b438475d1fac7be4db769fbe", "index": 8493, "step-1": "<mask token>\n\n\nclass RoRo(Monument):\n\n def set_adm_location(self):\n counties = self.data_files['counties']\n self.set_from_dict_match(counties, 'iso_code', 'judetul_iso',\n 'located_adm')\n <mask token>\n\n def set_heritage_id(self):\n self.add_statement('romanian_monument_id', self.cod)\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, db_row_dict, mapping, data_files, existing, repository):\n Monument.__init__(self, db_row_dict, mapping, data_files, existing,\n repository)\n self.set_monuments_all_id('cod')\n self.set_changed()\n self.set_wlm_source()\n self.set_heritage_id()\n self.set_heritage()\n self.set_country()\n self.set_adm_location()\n self.set_address()\n self.set_location()\n self.set_coords()\n self.set_commonscat()\n self.set_image('imagine')\n self.update_labels()\n self.update_descriptions()\n self.set_wd_item(self.find_matching_wikidata(mapping))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass RoRo(Monument):\n\n def set_adm_location(self):\n counties = self.data_files['counties']\n self.set_from_dict_match(counties, 'iso_code', 'judetul_iso',\n 'located_adm')\n <mask token>\n\n def set_heritage_id(self):\n self.add_statement('romanian_monument_id', self.cod)\n\n def update_descriptions(self):\n adm_code = self.judetul_iso\n counties = self.data_files['counties']\n county_item = utils.get_item_from_dict_by_key(dict_name=counties,\n search_term=adm_code, return_content_of='itemLabel', search_in=\n 'iso_code')\n if len(county_item) == 1:\n place_name = '{}, Romania'.format(county_item[0])\n else:\n place_name = 'Romania'\n desc = 'heritage site in {}'.format(place_name)\n self.add_description('en', desc)\n self.add_disambiguator(str(self.cod))\n <mask token>\n <mask token>\n\n def __init__(self, db_row_dict, mapping, data_files, existing, repository):\n Monument.__init__(self, db_row_dict, mapping, data_files, existing,\n repository)\n self.set_monuments_all_id('cod')\n self.set_changed()\n self.set_wlm_source()\n self.set_heritage_id()\n self.set_heritage()\n self.set_country()\n self.set_adm_location()\n self.set_address()\n self.set_location()\n self.set_coords()\n self.set_commonscat()\n self.set_image('imagine')\n self.update_labels()\n self.update_descriptions()\n self.set_wd_item(self.find_matching_wikidata(mapping))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass RoRo(Monument):\n\n def set_adm_location(self):\n counties = self.data_files['counties']\n self.set_from_dict_match(counties, 'iso_code', 'judetul_iso',\n 'located_adm')\n\n def set_location(self):\n \"\"\"\n Set Location property from article linked in localitate.\n\n Run this after set_adm_location. localitate can\n contain several links (we take the 1st which seems to\n be the most granular one) and a mix of administrative\n types. Compare with admin location so that they're not\n the same.\n \"\"\"\n if self.has_non_empty_attribute('localitate'):\n loc_item = None\n if utils.count_wikilinks(self.localitate) > 0:\n loc_link = utils.get_wikilinks(self.localitate)[0]\n loc_item = utils.q_from_wikipedia('ro', loc_link.title)\n adm_item = self.get_statement_values('located_adm')\n if loc_item and loc_item != adm_item[0]:\n self.add_statement('location', loc_item)\n if not loc_item:\n self.add_to_report('localitate', self.localitate, 'location')\n\n def set_heritage_id(self):\n self.add_statement('romanian_monument_id', self.cod)\n\n def update_descriptions(self):\n adm_code = self.judetul_iso\n counties = self.data_files['counties']\n county_item = utils.get_item_from_dict_by_key(dict_name=counties,\n search_term=adm_code, return_content_of='itemLabel', search_in=\n 'iso_code')\n if len(county_item) == 1:\n place_name = '{}, Romania'.format(county_item[0])\n else:\n place_name = 'Romania'\n desc = 'heritage site in {}'.format(place_name)\n self.add_description('en', desc)\n self.add_disambiguator(str(self.cod))\n\n def set_address(self):\n street_patterns = 'piața', 'str.', 'bd.'\n if self.has_non_empty_attribute('adresa'):\n adr_lower = self.adresa.lower()\n adr_nice = utils.remove_markup(self.adresa)\n if any(pattern in adr_lower for pattern in street_patterns):\n if self.has_non_empty_attribute('localitate'):\n town = utils.remove_markup(self.localitate)\n adr_nice = '{}, {}'.format(adr_nice, town)\n self.add_statement('located_street', adr_nice)\n else:\n directions = utils.package_monolingual(adr_nice, 'ro')\n self.add_statement('directions', directions)\n\n def update_labels(self):\n romanian = utils.remove_markup(self.denumire)\n self.add_label('ro', romanian)\n\n def __init__(self, db_row_dict, mapping, data_files, existing, repository):\n Monument.__init__(self, db_row_dict, mapping, data_files, existing,\n repository)\n self.set_monuments_all_id('cod')\n self.set_changed()\n self.set_wlm_source()\n self.set_heritage_id()\n self.set_heritage()\n self.set_country()\n self.set_adm_location()\n self.set_address()\n self.set_location()\n self.set_coords()\n self.set_commonscat()\n self.set_image('imagine')\n self.update_labels()\n self.update_descriptions()\n self.set_wd_item(self.find_matching_wikidata(mapping))\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass RoRo(Monument):\n\n def set_adm_location(self):\n counties = self.data_files['counties']\n self.set_from_dict_match(counties, 'iso_code', 'judetul_iso',\n 'located_adm')\n\n def set_location(self):\n \"\"\"\n Set Location property from article linked in localitate.\n\n Run this after set_adm_location. localitate can\n contain several links (we take the 1st which seems to\n be the most granular one) and a mix of administrative\n types. Compare with admin location so that they're not\n the same.\n \"\"\"\n if self.has_non_empty_attribute('localitate'):\n loc_item = None\n if utils.count_wikilinks(self.localitate) > 0:\n loc_link = utils.get_wikilinks(self.localitate)[0]\n loc_item = utils.q_from_wikipedia('ro', loc_link.title)\n adm_item = self.get_statement_values('located_adm')\n if loc_item and loc_item != adm_item[0]:\n self.add_statement('location', loc_item)\n if not loc_item:\n self.add_to_report('localitate', self.localitate, 'location')\n\n def set_heritage_id(self):\n self.add_statement('romanian_monument_id', self.cod)\n\n def update_descriptions(self):\n adm_code = self.judetul_iso\n counties = self.data_files['counties']\n county_item = utils.get_item_from_dict_by_key(dict_name=counties,\n search_term=adm_code, return_content_of='itemLabel', search_in=\n 'iso_code')\n if len(county_item) == 1:\n place_name = '{}, Romania'.format(county_item[0])\n else:\n place_name = 'Romania'\n desc = 'heritage site in {}'.format(place_name)\n self.add_description('en', desc)\n self.add_disambiguator(str(self.cod))\n\n def set_address(self):\n street_patterns = 'piața', 'str.', 'bd.'\n if self.has_non_empty_attribute('adresa'):\n adr_lower = self.adresa.lower()\n adr_nice = utils.remove_markup(self.adresa)\n if any(pattern in adr_lower for pattern in street_patterns):\n if self.has_non_empty_attribute('localitate'):\n town = utils.remove_markup(self.localitate)\n adr_nice = '{}, {}'.format(adr_nice, town)\n self.add_statement('located_street', adr_nice)\n else:\n directions = utils.package_monolingual(adr_nice, 'ro')\n self.add_statement('directions', directions)\n\n def update_labels(self):\n romanian = utils.remove_markup(self.denumire)\n self.add_label('ro', romanian)\n\n def __init__(self, db_row_dict, mapping, data_files, existing, repository):\n Monument.__init__(self, db_row_dict, mapping, data_files, existing,\n repository)\n self.set_monuments_all_id('cod')\n self.set_changed()\n self.set_wlm_source()\n self.set_heritage_id()\n self.set_heritage()\n self.set_country()\n self.set_adm_location()\n self.set_address()\n self.set_location()\n self.set_coords()\n self.set_commonscat()\n self.set_image('imagine')\n self.update_labels()\n self.update_descriptions()\n self.set_wd_item(self.find_matching_wikidata(mapping))\n\n\nif __name__ == '__main__':\n \"\"\"Command line entry point for importer.\"\"\"\n args = importer.handle_args()\n dataset = Dataset('ro', 'ro', RoRo)\n dataset.data_files = {'counties': 'romania_counties.json'}\n importer.main(args, dataset)\n", "step-5": "from Monument import Monument, Dataset\nimport importer_utils as utils\nimport importer as importer\n\n\nclass RoRo(Monument):\n\n def set_adm_location(self):\n counties = self.data_files[\"counties\"]\n self.set_from_dict_match(counties, \"iso_code\",\n \"judetul_iso\", \"located_adm\")\n\n def set_location(self):\n \"\"\"\n Set Location property from article linked in localitate.\n\n Run this after set_adm_location. localitate can\n contain several links (we take the 1st which seems to\n be the most granular one) and a mix of administrative\n types. Compare with admin location so that they're not\n the same.\n \"\"\"\n if self.has_non_empty_attribute(\"localitate\"):\n loc_item = None\n if utils.count_wikilinks(self.localitate) > 0:\n loc_link = utils.get_wikilinks(self.localitate)[0]\n loc_item = utils.q_from_wikipedia(\"ro\", loc_link.title)\n adm_item = self.get_statement_values(\"located_adm\")\n if loc_item and loc_item != adm_item[0]:\n self.add_statement(\"location\", loc_item)\n\n if not loc_item:\n self.add_to_report(\"localitate\", self.localitate, \"location\")\n\n def set_heritage_id(self):\n self.add_statement(\"romanian_monument_id\", self.cod)\n\n def update_descriptions(self):\n adm_code = self.judetul_iso\n counties = self.data_files[\"counties\"]\n county_item = utils.get_item_from_dict_by_key(dict_name=counties,\n search_term=adm_code,\n return_content_of=\"itemLabel\",\n search_in=\"iso_code\")\n if len(county_item) == 1:\n place_name = \"{}, Romania\".format(county_item[0])\n else:\n place_name = \"Romania\"\n desc = \"heritage site in {}\".format(place_name)\n self.add_description(\"en\", desc)\n self.add_disambiguator(str(self.cod))\n\n def set_address(self):\n street_patterns = (\"piața\", \"str.\", \"bd.\")\n if self.has_non_empty_attribute(\"adresa\"):\n adr_lower = self.adresa.lower()\n adr_nice = utils.remove_markup(self.adresa)\n if any(pattern in adr_lower for pattern in street_patterns):\n if self.has_non_empty_attribute(\"localitate\"):\n town = utils.remove_markup(self.localitate)\n adr_nice = \"{}, {}\".format(adr_nice, town)\n self.add_statement(\"located_street\", adr_nice)\n else:\n directions = utils.package_monolingual(adr_nice, 'ro')\n self.add_statement(\"directions\", directions)\n\n def update_labels(self):\n romanian = utils.remove_markup(self.denumire)\n self.add_label(\"ro\", romanian)\n\n def __init__(self, db_row_dict, mapping, data_files, existing, repository):\n Monument.__init__(self, db_row_dict, mapping,\n data_files, existing, repository)\n self.set_monuments_all_id(\"cod\")\n self.set_changed()\n self.set_wlm_source()\n self.set_heritage_id()\n self.set_heritage()\n self.set_country()\n self.set_adm_location()\n self.set_address()\n self.set_location()\n self.set_coords()\n self.set_commonscat()\n self.set_image(\"imagine\")\n self.update_labels()\n self.update_descriptions()\n self.set_wd_item(self.find_matching_wikidata(mapping))\n\n\nif __name__ == \"__main__\":\n \"\"\"Command line entry point for importer.\"\"\"\n args = importer.handle_args()\n dataset = Dataset(\"ro\", \"ro\", RoRo)\n dataset.data_files = {\"counties\": \"romania_counties.json\"}\n importer.main(args, dataset)\n", "step-ids": [ 4, 5, 8, 9, 11 ] }
[ 4, 5, 8, 9, 11 ]
<|reserved_special_token_0|> class MultiSpeakerBRIR(SimpleFreeFieldHRIR): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def add_metadata(self, database): super().add_metadata(database) database.Data.Type = 'FIRE' database.Room.Type = 'reverberant' return <|reserved_special_token_1|> <|reserved_special_token_0|> class MultiSpeakerBRIR(SimpleFreeFieldHRIR): <|reserved_special_token_0|> <|reserved_special_token_0|> def __init__(self): super().__init__() self.default_objects['Receiver']['count'] = 2 self.conditions['must have 2 Receivers'] = (lambda name, fixed, variances, count: name != 'Receiver' or count == 2) self.conditions['must have Listener Up and View'] = (lambda name, fixed, variances, count: name != 'Listener' or 'Up' in fixed + variances and 'View' in fixed + variances) (self.conditions['must have both Emitter View and Up or neither']) = ( lambda name, fixed, variances, count: name != 'Emitter' or 'View' not in fixed + variances or 'Up' in fixed + variances and 'View' in fixed + variances) def add_metadata(self, database): super().add_metadata(database) database.Data.Type = 'FIRE' database.Room.Type = 'reverberant' return <|reserved_special_token_1|> <|reserved_special_token_0|> class MultiSpeakerBRIR(SimpleFreeFieldHRIR): name = 'MultiSpeakerBRIR' version = '0.3' def __init__(self): super().__init__() self.default_objects['Receiver']['count'] = 2 self.conditions['must have 2 Receivers'] = (lambda name, fixed, variances, count: name != 'Receiver' or count == 2) self.conditions['must have Listener Up and View'] = (lambda name, fixed, variances, count: name != 'Listener' or 'Up' in fixed + variances and 'View' in fixed + variances) (self.conditions['must have both Emitter View and Up or neither']) = ( lambda name, fixed, variances, count: name != 'Emitter' or 'View' not in fixed + variances or 'Up' in fixed + variances and 'View' in fixed + variances) def add_metadata(self, database): super().add_metadata(database) database.Data.Type = 'FIRE' database.Room.Type = 'reverberant' return <|reserved_special_token_1|> from .SimpleFreeFieldHRIR import SimpleFreeFieldHRIR class MultiSpeakerBRIR(SimpleFreeFieldHRIR): name = 'MultiSpeakerBRIR' version = '0.3' def __init__(self): super().__init__() self.default_objects['Receiver']['count'] = 2 self.conditions['must have 2 Receivers'] = (lambda name, fixed, variances, count: name != 'Receiver' or count == 2) self.conditions['must have Listener Up and View'] = (lambda name, fixed, variances, count: name != 'Listener' or 'Up' in fixed + variances and 'View' in fixed + variances) (self.conditions['must have both Emitter View and Up or neither']) = ( lambda name, fixed, variances, count: name != 'Emitter' or 'View' not in fixed + variances or 'Up' in fixed + variances and 'View' in fixed + variances) def add_metadata(self, database): super().add_metadata(database) database.Data.Type = 'FIRE' database.Room.Type = 'reverberant' return <|reserved_special_token_1|> # Copyright (c) 2019 Jannika Lossner # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from .SimpleFreeFieldHRIR import SimpleFreeFieldHRIR class MultiSpeakerBRIR(SimpleFreeFieldHRIR): name = "MultiSpeakerBRIR" version = "0.3" def __init__(self): super().__init__() self.default_objects["Receiver"]["count"] = 2 #self.default_data["IR"] = 1 self.conditions["must have 2 Receivers"] = lambda name, fixed, variances, count: name != "Receiver" or count == 2 self.conditions["must have Listener Up and View"] = lambda name, fixed, variances, count: name != "Listener" or ("Up" in fixed + variances and "View" in fixed + variances) self.conditions["must have both Emitter View and Up or neither"] = lambda name, fixed, variances, count: name != "Emitter" or "View" not in fixed + variances or ("Up" in fixed + variances and "View" in fixed + variances) def add_metadata(self, database): super().add_metadata(database) database.Data.Type = "FIRE" database.Room.Type = "reverberant" return
flexible
{ "blob_id": "e30bd33ae18881307e7cf4f60d3c60eae91573bc", "index": 181, "step-1": "<mask token>\n\n\nclass MultiSpeakerBRIR(SimpleFreeFieldHRIR):\n <mask token>\n <mask token>\n <mask token>\n\n def add_metadata(self, database):\n super().add_metadata(database)\n database.Data.Type = 'FIRE'\n database.Room.Type = 'reverberant'\n return\n", "step-2": "<mask token>\n\n\nclass MultiSpeakerBRIR(SimpleFreeFieldHRIR):\n <mask token>\n <mask token>\n\n def __init__(self):\n super().__init__()\n self.default_objects['Receiver']['count'] = 2\n self.conditions['must have 2 Receivers'] = (lambda name, fixed,\n variances, count: name != 'Receiver' or count == 2)\n self.conditions['must have Listener Up and View'] = (lambda name,\n fixed, variances, count: name != 'Listener' or 'Up' in fixed +\n variances and 'View' in fixed + variances)\n (self.conditions['must have both Emitter View and Up or neither']) = (\n lambda name, fixed, variances, count: name != 'Emitter' or \n 'View' not in fixed + variances or 'Up' in fixed + variances and\n 'View' in fixed + variances)\n\n def add_metadata(self, database):\n super().add_metadata(database)\n database.Data.Type = 'FIRE'\n database.Room.Type = 'reverberant'\n return\n", "step-3": "<mask token>\n\n\nclass MultiSpeakerBRIR(SimpleFreeFieldHRIR):\n name = 'MultiSpeakerBRIR'\n version = '0.3'\n\n def __init__(self):\n super().__init__()\n self.default_objects['Receiver']['count'] = 2\n self.conditions['must have 2 Receivers'] = (lambda name, fixed,\n variances, count: name != 'Receiver' or count == 2)\n self.conditions['must have Listener Up and View'] = (lambda name,\n fixed, variances, count: name != 'Listener' or 'Up' in fixed +\n variances and 'View' in fixed + variances)\n (self.conditions['must have both Emitter View and Up or neither']) = (\n lambda name, fixed, variances, count: name != 'Emitter' or \n 'View' not in fixed + variances or 'Up' in fixed + variances and\n 'View' in fixed + variances)\n\n def add_metadata(self, database):\n super().add_metadata(database)\n database.Data.Type = 'FIRE'\n database.Room.Type = 'reverberant'\n return\n", "step-4": "from .SimpleFreeFieldHRIR import SimpleFreeFieldHRIR\n\n\nclass MultiSpeakerBRIR(SimpleFreeFieldHRIR):\n name = 'MultiSpeakerBRIR'\n version = '0.3'\n\n def __init__(self):\n super().__init__()\n self.default_objects['Receiver']['count'] = 2\n self.conditions['must have 2 Receivers'] = (lambda name, fixed,\n variances, count: name != 'Receiver' or count == 2)\n self.conditions['must have Listener Up and View'] = (lambda name,\n fixed, variances, count: name != 'Listener' or 'Up' in fixed +\n variances and 'View' in fixed + variances)\n (self.conditions['must have both Emitter View and Up or neither']) = (\n lambda name, fixed, variances, count: name != 'Emitter' or \n 'View' not in fixed + variances or 'Up' in fixed + variances and\n 'View' in fixed + variances)\n\n def add_metadata(self, database):\n super().add_metadata(database)\n database.Data.Type = 'FIRE'\n database.Room.Type = 'reverberant'\n return\n", "step-5": "# Copyright (c) 2019 Jannika Lossner\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom .SimpleFreeFieldHRIR import SimpleFreeFieldHRIR\n\nclass MultiSpeakerBRIR(SimpleFreeFieldHRIR):\n name = \"MultiSpeakerBRIR\"\n version = \"0.3\"\n def __init__(self):\n super().__init__()\n self.default_objects[\"Receiver\"][\"count\"] = 2\n\n #self.default_data[\"IR\"] = 1\n\n self.conditions[\"must have 2 Receivers\"] = lambda name, fixed, variances, count: name != \"Receiver\" or count == 2\n self.conditions[\"must have Listener Up and View\"] = lambda name, fixed, variances, count: name != \"Listener\" or (\"Up\" in fixed + variances and \"View\" in fixed + variances)\n self.conditions[\"must have both Emitter View and Up or neither\"] = lambda name, fixed, variances, count: name != \"Emitter\" or \"View\" not in fixed + variances or (\"Up\" in fixed + variances and \"View\" in fixed + variances)\n\n def add_metadata(self, database):\n super().add_metadata(database)\n\n database.Data.Type = \"FIRE\"\n database.Room.Type = \"reverberant\"\n return\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from .base import BaseLevel from map_objects import DefinedMap from entity.monster import Daemon from entity.weapons import Axe class FinalLevel(BaseLevel): def __init__(self): lvl_map = DefinedMap('levels/demon_lair.xp') super().__init__(lvl_map.width, lvl_map.height) self.map = lvl_map self.set_entrance(50, 29) boss = Daemon(8, 27, 10) self.add_entity(boss) def add_player(self, player): super().add_player(player) self.player.fov = 100 self.player.weapon = Axe()
normal
{ "blob_id": "7ba8f0bd962413f6ff825df27330447b11360f10", "index": 6089, "step-1": "<mask token>\n\n\nclass FinalLevel(BaseLevel):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass FinalLevel(BaseLevel):\n\n def __init__(self):\n lvl_map = DefinedMap('levels/demon_lair.xp')\n super().__init__(lvl_map.width, lvl_map.height)\n self.map = lvl_map\n self.set_entrance(50, 29)\n boss = Daemon(8, 27, 10)\n self.add_entity(boss)\n <mask token>\n", "step-3": "<mask token>\n\n\nclass FinalLevel(BaseLevel):\n\n def __init__(self):\n lvl_map = DefinedMap('levels/demon_lair.xp')\n super().__init__(lvl_map.width, lvl_map.height)\n self.map = lvl_map\n self.set_entrance(50, 29)\n boss = Daemon(8, 27, 10)\n self.add_entity(boss)\n\n def add_player(self, player):\n super().add_player(player)\n self.player.fov = 100\n self.player.weapon = Axe()\n", "step-4": "from .base import BaseLevel\nfrom map_objects import DefinedMap\nfrom entity.monster import Daemon\nfrom entity.weapons import Axe\n\n\nclass FinalLevel(BaseLevel):\n\n def __init__(self):\n lvl_map = DefinedMap('levels/demon_lair.xp')\n super().__init__(lvl_map.width, lvl_map.height)\n self.map = lvl_map\n self.set_entrance(50, 29)\n boss = Daemon(8, 27, 10)\n self.add_entity(boss)\n\n def add_player(self, player):\n super().add_player(player)\n self.player.fov = 100\n self.player.weapon = Axe()\n", "step-5": null, "step-ids": [ 1, 2, 3, 4 ] }
[ 1, 2, 3, 4 ]
# !/usr/bin/python # sudo mn --custom _mininet_topo.py --topo mytopo,5 # sudo mn --custom _mininet_topo.py --topo mytopo,3 --test simpletest # or just run this python file from mininet.topo import Topo from mininet.net import Mininet from mininet.util import dumpNodeConnections from mininet.log import setLogLevel from mininet.cli import CLI class SingleSwitchTopo(Topo): "Single switch connected to n hosts." def build(self): # switch = self.addSwitch('s1') # # Python's range(N) generates 0..N-1 # for h in range(n): # host = self.addHost('h%s' % (h + 1)) # self.addLink(host, switch) s1 = self.addSwitch('s1') h1 = self.addHost('h1') h2 = self.addHost('h2') h3 = self.addHost('h3') h4 = self.addHost('h4') h5 = self.addHost('h5') h6 = self.addHost('h6') self.addLink(h1, s1) self.addLink(h2, s1) self.addLink(h3, s1) self.addLink(h4, s1) self.addLink(h5, s1) self.addLink(h6, s1) # def simpleTest(): "Create and test a simple network" topo = SingleSwitchTopo() net = Mininet(topo) net.start() print "Dumping host connections" dumpNodeConnections(net.hosts) print "Testing network connectivity" net.pingAll() # net.stop() h1 = net.get('h1') h2 = net.get('h2') h3 = net.get('h3') h4 = net.get('h4') h5 = net.get('h5') h6 = net.get('h6') for host in [h1, h2, h3, h4, h5, h6]: host.cmdPrint('cd /media/sf_DHT-Torrent') h1.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 600 --ip ' + h1.IP() + ' \' > h1.sh') h2.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 500 --ip ' + h2.IP() + " --nextpeerid 600 --nextpeerip " + h1.IP() + ' \' > h2.sh') h3.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 400 --ip ' + h3.IP() + " --nextpeerid 500 --nextpeerip " + h2.IP() + ' \' > h3.sh') h4.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 300 --ip ' + h4.IP() + " --nextpeerid 400 --nextpeerip " + h3.IP() + ' \' > h4.sh') h5.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 200 --ip ' + h5.IP() + " --nextpeerid 300 --nextpeerip " + h4.IP() + ' \' > h5.sh') h6.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 100 --ip ' + h6.IP() + " --nextpeerid 200 --nextpeerip " + h5.IP() + ' \' > h6.sh') # h1.cmdPrint('ls') net.startTerms() CLI(net) # CLI(net).do_xterm(h1) net.stopXterms() net.stop() if __name__ == '__main__': # Tell mininet to print useful information setLogLevel('info') simpleTest() topos = { 'mytopo': SingleSwitchTopo } # tests = { 'mytest': simpleTest }
normal
{ "blob_id": "8fd74287fbc653ea3ed4aa76a272486aa29185cf", "index": 1032, "step-1": "# !/usr/bin/python\n\n# sudo mn --custom _mininet_topo.py --topo mytopo,5\n# sudo mn --custom _mininet_topo.py --topo mytopo,3 --test simpletest\n# or just run this python file\n\nfrom mininet.topo import Topo\nfrom mininet.net import Mininet\nfrom mininet.util import dumpNodeConnections\nfrom mininet.log import setLogLevel\nfrom mininet.cli import CLI\n\n\nclass SingleSwitchTopo(Topo):\n \"Single switch connected to n hosts.\"\n\n def build(self):\n # switch = self.addSwitch('s1')\n # # Python's range(N) generates 0..N-1\n # for h in range(n):\n # host = self.addHost('h%s' % (h + 1))\n # self.addLink(host, switch)\n\n s1 = self.addSwitch('s1')\n\n h1 = self.addHost('h1')\n h2 = self.addHost('h2')\n h3 = self.addHost('h3')\n h4 = self.addHost('h4')\n h5 = self.addHost('h5')\n h6 = self.addHost('h6')\n\n self.addLink(h1, s1)\n self.addLink(h2, s1)\n self.addLink(h3, s1)\n self.addLink(h4, s1)\n self.addLink(h5, s1)\n self.addLink(h6, s1)\n\n#\ndef simpleTest():\n \"Create and test a simple network\"\n topo = SingleSwitchTopo()\n net = Mininet(topo)\n net.start()\n print \"Dumping host connections\"\n dumpNodeConnections(net.hosts)\n print \"Testing network connectivity\"\n net.pingAll()\n # net.stop()\n\n h1 = net.get('h1')\n h2 = net.get('h2')\n h3 = net.get('h3')\n h4 = net.get('h4')\n h5 = net.get('h5')\n h6 = net.get('h6')\n\n\n for host in [h1, h2, h3, h4, h5, h6]:\n host.cmdPrint('cd /media/sf_DHT-Torrent')\n\n h1.cmdPrint('echo \\'python /media/sf_DHT-Torrent/start.py --static --id 600 --ip ' + h1.IP() + ' \\' > h1.sh')\n h2.cmdPrint('echo \\'python /media/sf_DHT-Torrent/start.py --static --id 500 --ip ' + h2.IP() + \" --nextpeerid 600 --nextpeerip \" + h1.IP() + ' \\' > h2.sh')\n h3.cmdPrint('echo \\'python /media/sf_DHT-Torrent/start.py --static --id 400 --ip ' + h3.IP() + \" --nextpeerid 500 --nextpeerip \" + h2.IP() + ' \\' > h3.sh')\n h4.cmdPrint('echo \\'python /media/sf_DHT-Torrent/start.py --static --id 300 --ip ' + h4.IP() + \" --nextpeerid 400 --nextpeerip \" + h3.IP() + ' \\' > h4.sh')\n h5.cmdPrint('echo \\'python /media/sf_DHT-Torrent/start.py --static --id 200 --ip ' + h5.IP() + \" --nextpeerid 300 --nextpeerip \" + h4.IP() + ' \\' > h5.sh')\n h6.cmdPrint('echo \\'python /media/sf_DHT-Torrent/start.py --static --id 100 --ip ' + h6.IP() + \" --nextpeerid 200 --nextpeerip \" + h5.IP() + ' \\' > h6.sh')\n\n # h1.cmdPrint('ls')\n\n net.startTerms()\n CLI(net)\n # CLI(net).do_xterm(h1)\n\n net.stopXterms()\n net.stop()\n\nif __name__ == '__main__':\n # Tell mininet to print useful information\n setLogLevel('info')\n simpleTest()\n\ntopos = { 'mytopo': SingleSwitchTopo }\n# tests = { 'mytest': simpleTest }", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> class AEalLayerColorTemplate(alShadersTemplate): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class AEalLayerColorTemplate(alShadersTemplate): <|reserved_special_token_0|> <|reserved_special_token_0|> def setup(self): self.params.clear() self.params['layer1'] = Param('layer1', 'Layer 1', 'The background layer (will be blended over black if its alpha is not 1.' , 'rgb', presets=None) self.params['layer1a'] = Param('layer1a', 'Layer 1 Alpha', 'The alpha of the background layer', 'float', presets=None) self.params['layer1blend'] = Param('layer1blend', 'Mode', 'Blend mode for the background layer.', 'enum', presets=None) self.params['layer2'] = Param('layer2', 'Layer 2', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer2a'] = Param('layer2a', 'Layer 2 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer2blend'] = Param('layer2blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer3'] = Param('layer3', 'Layer 3', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer3a'] = Param('layer3a', 'Layer 3 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer3blend'] = Param('layer3blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer4'] = Param('layer4', 'Layer 4', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer4a'] = Param('layer4a', 'Layer 4 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer4blend'] = Param('layer4blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer5'] = Param('layer5', 'Layer 5', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer5a'] = Param('layer5a', 'Layer 5 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer5blend'] = Param('layer5blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer6'] = Param('layer6', 'Layer 6', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer6a'] = Param('layer6a', 'Layer 6 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer6blend'] = Param('layer6blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer7'] = Param('layer7', 'Layer 7', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer7a'] = Param('layer7a', 'Layer 7 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer7blend'] = Param('layer7blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer8'] = Param('layer8', 'Layer 8', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer8a'] = Param('layer8a', 'Layer 8 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer8blend'] = Param('layer8blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.addSwatch() self.beginScrollLayout() self.addCustomRgb('layer1') self.addCustomFlt('layer1a') self.addControl('layer1blend', label='Mode', annotation= 'Blend mode for the background layer.') self.addCustomRgb('layer2') self.addCustomFlt('layer2a') self.addControl('layer2blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer3') self.addCustomFlt('layer3a') self.addControl('layer3blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer4') self.addCustomFlt('layer4a') self.addControl('layer4blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer5') self.addCustomFlt('layer5a') self.addControl('layer5blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer6') self.addCustomFlt('layer6a') self.addControl('layer6blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer7') self.addCustomFlt('layer7a') self.addControl('layer7blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer8') self.addCustomFlt('layer8a') self.addControl('layer8blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') pm.mel.AEdependNodeTemplate(self.nodeName) self.addExtraControls() self.endScrollLayout() <|reserved_special_token_1|> <|reserved_special_token_0|> class AEalLayerColorTemplate(alShadersTemplate): controls = {} params = {} def setup(self): self.params.clear() self.params['layer1'] = Param('layer1', 'Layer 1', 'The background layer (will be blended over black if its alpha is not 1.' , 'rgb', presets=None) self.params['layer1a'] = Param('layer1a', 'Layer 1 Alpha', 'The alpha of the background layer', 'float', presets=None) self.params['layer1blend'] = Param('layer1blend', 'Mode', 'Blend mode for the background layer.', 'enum', presets=None) self.params['layer2'] = Param('layer2', 'Layer 2', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer2a'] = Param('layer2a', 'Layer 2 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer2blend'] = Param('layer2blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer3'] = Param('layer3', 'Layer 3', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer3a'] = Param('layer3a', 'Layer 3 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer3blend'] = Param('layer3blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer4'] = Param('layer4', 'Layer 4', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer4a'] = Param('layer4a', 'Layer 4 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer4blend'] = Param('layer4blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer5'] = Param('layer5', 'Layer 5', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer5a'] = Param('layer5a', 'Layer 5 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer5blend'] = Param('layer5blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer6'] = Param('layer6', 'Layer 6', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer6a'] = Param('layer6a', 'Layer 6 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer6blend'] = Param('layer6blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer7'] = Param('layer7', 'Layer 7', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer7a'] = Param('layer7a', 'Layer 7 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer7blend'] = Param('layer7blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer8'] = Param('layer8', 'Layer 8', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer8a'] = Param('layer8a', 'Layer 8 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer8blend'] = Param('layer8blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.addSwatch() self.beginScrollLayout() self.addCustomRgb('layer1') self.addCustomFlt('layer1a') self.addControl('layer1blend', label='Mode', annotation= 'Blend mode for the background layer.') self.addCustomRgb('layer2') self.addCustomFlt('layer2a') self.addControl('layer2blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer3') self.addCustomFlt('layer3a') self.addControl('layer3blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer4') self.addCustomFlt('layer4a') self.addControl('layer4blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer5') self.addCustomFlt('layer5a') self.addControl('layer5blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer6') self.addCustomFlt('layer6a') self.addControl('layer6blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer7') self.addCustomFlt('layer7a') self.addControl('layer7blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer8') self.addCustomFlt('layer8a') self.addControl('layer8blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') pm.mel.AEdependNodeTemplate(self.nodeName) self.addExtraControls() self.endScrollLayout() <|reserved_special_token_1|> import pymel.core as pm from alShaders import * class AEalLayerColorTemplate(alShadersTemplate): controls = {} params = {} def setup(self): self.params.clear() self.params['layer1'] = Param('layer1', 'Layer 1', 'The background layer (will be blended over black if its alpha is not 1.' , 'rgb', presets=None) self.params['layer1a'] = Param('layer1a', 'Layer 1 Alpha', 'The alpha of the background layer', 'float', presets=None) self.params['layer1blend'] = Param('layer1blend', 'Mode', 'Blend mode for the background layer.', 'enum', presets=None) self.params['layer2'] = Param('layer2', 'Layer 2', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer2a'] = Param('layer2a', 'Layer 2 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer2blend'] = Param('layer2blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer3'] = Param('layer3', 'Layer 3', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer3a'] = Param('layer3a', 'Layer 3 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer3blend'] = Param('layer3blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer4'] = Param('layer4', 'Layer 4', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer4a'] = Param('layer4a', 'Layer 4 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer4blend'] = Param('layer4blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer5'] = Param('layer5', 'Layer 5', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer5a'] = Param('layer5a', 'Layer 5 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer5blend'] = Param('layer5blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer6'] = Param('layer6', 'Layer 6', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer6a'] = Param('layer6a', 'Layer 6 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer6blend'] = Param('layer6blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer7'] = Param('layer7', 'Layer 7', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer7a'] = Param('layer7a', 'Layer 7 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer7blend'] = Param('layer7blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.params['layer8'] = Param('layer8', 'Layer 8', 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.' , 'rgb', presets=None) self.params['layer8a'] = Param('layer8a', 'Layer 8 Alpha', 'The alpha used to blend this layer over the layers below.', 'float', presets=None) self.params['layer8blend'] = Param('layer8blend', 'Mode', 'The blend mode used to blend this layer over the layers below.', 'enum', presets=None) self.addSwatch() self.beginScrollLayout() self.addCustomRgb('layer1') self.addCustomFlt('layer1a') self.addControl('layer1blend', label='Mode', annotation= 'Blend mode for the background layer.') self.addCustomRgb('layer2') self.addCustomFlt('layer2a') self.addControl('layer2blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer3') self.addCustomFlt('layer3a') self.addControl('layer3blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer4') self.addCustomFlt('layer4a') self.addControl('layer4blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer5') self.addCustomFlt('layer5a') self.addControl('layer5blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer6') self.addCustomFlt('layer6a') self.addControl('layer6blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer7') self.addCustomFlt('layer7a') self.addControl('layer7blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') self.addCustomRgb('layer8') self.addCustomFlt('layer8a') self.addControl('layer8blend', label='Mode', annotation= 'The blend mode used to blend this layer over the layers below.') pm.mel.AEdependNodeTemplate(self.nodeName) self.addExtraControls() self.endScrollLayout() <|reserved_special_token_1|> import pymel.core as pm from alShaders import * class AEalLayerColorTemplate(alShadersTemplate): controls = {} params = {} def setup(self): self.params.clear() self.params["layer1"] = Param("layer1", "Layer 1", "The background layer (will be blended over black if its alpha is not 1.", "rgb", presets=None) self.params["layer1a"] = Param("layer1a", "Layer 1 Alpha", "The alpha of the background layer", "float", presets=None) self.params["layer1blend"] = Param("layer1blend", "Mode", "Blend mode for the background layer.", "enum", presets=None) self.params["layer2"] = Param("layer2", "Layer 2", "The color plugged in here will be blended over the layers below according to its alpha and blend mode.", "rgb", presets=None) self.params["layer2a"] = Param("layer2a", "Layer 2 Alpha", "The alpha used to blend this layer over the layers below.", "float", presets=None) self.params["layer2blend"] = Param("layer2blend", "Mode", "The blend mode used to blend this layer over the layers below.", "enum", presets=None) self.params["layer3"] = Param("layer3", "Layer 3", "The color plugged in here will be blended over the layers below according to its alpha and blend mode.", "rgb", presets=None) self.params["layer3a"] = Param("layer3a", "Layer 3 Alpha", "The alpha used to blend this layer over the layers below.", "float", presets=None) self.params["layer3blend"] = Param("layer3blend", "Mode", "The blend mode used to blend this layer over the layers below.", "enum", presets=None) self.params["layer4"] = Param("layer4", "Layer 4", "The color plugged in here will be blended over the layers below according to its alpha and blend mode.", "rgb", presets=None) self.params["layer4a"] = Param("layer4a", "Layer 4 Alpha", "The alpha used to blend this layer over the layers below.", "float", presets=None) self.params["layer4blend"] = Param("layer4blend", "Mode", "The blend mode used to blend this layer over the layers below.", "enum", presets=None) self.params["layer5"] = Param("layer5", "Layer 5", "The color plugged in here will be blended over the layers below according to its alpha and blend mode.", "rgb", presets=None) self.params["layer5a"] = Param("layer5a", "Layer 5 Alpha", "The alpha used to blend this layer over the layers below.", "float", presets=None) self.params["layer5blend"] = Param("layer5blend", "Mode", "The blend mode used to blend this layer over the layers below.", "enum", presets=None) self.params["layer6"] = Param("layer6", "Layer 6", "The color plugged in here will be blended over the layers below according to its alpha and blend mode.", "rgb", presets=None) self.params["layer6a"] = Param("layer6a", "Layer 6 Alpha", "The alpha used to blend this layer over the layers below.", "float", presets=None) self.params["layer6blend"] = Param("layer6blend", "Mode", "The blend mode used to blend this layer over the layers below.", "enum", presets=None) self.params["layer7"] = Param("layer7", "Layer 7", "The color plugged in here will be blended over the layers below according to its alpha and blend mode.", "rgb", presets=None) self.params["layer7a"] = Param("layer7a", "Layer 7 Alpha", "The alpha used to blend this layer over the layers below.", "float", presets=None) self.params["layer7blend"] = Param("layer7blend", "Mode", "The blend mode used to blend this layer over the layers below.", "enum", presets=None) self.params["layer8"] = Param("layer8", "Layer 8", "The color plugged in here will be blended over the layers below according to its alpha and blend mode.", "rgb", presets=None) self.params["layer8a"] = Param("layer8a", "Layer 8 Alpha", "The alpha used to blend this layer over the layers below.", "float", presets=None) self.params["layer8blend"] = Param("layer8blend", "Mode", "The blend mode used to blend this layer over the layers below.", "enum", presets=None) self.addSwatch() self.beginScrollLayout() self.addCustomRgb("layer1") self.addCustomFlt("layer1a") self.addControl("layer1blend", label="Mode", annotation="Blend mode for the background layer.") self.addCustomRgb("layer2") self.addCustomFlt("layer2a") self.addControl("layer2blend", label="Mode", annotation="The blend mode used to blend this layer over the layers below.") self.addCustomRgb("layer3") self.addCustomFlt("layer3a") self.addControl("layer3blend", label="Mode", annotation="The blend mode used to blend this layer over the layers below.") self.addCustomRgb("layer4") self.addCustomFlt("layer4a") self.addControl("layer4blend", label="Mode", annotation="The blend mode used to blend this layer over the layers below.") self.addCustomRgb("layer5") self.addCustomFlt("layer5a") self.addControl("layer5blend", label="Mode", annotation="The blend mode used to blend this layer over the layers below.") self.addCustomRgb("layer6") self.addCustomFlt("layer6a") self.addControl("layer6blend", label="Mode", annotation="The blend mode used to blend this layer over the layers below.") self.addCustomRgb("layer7") self.addCustomFlt("layer7a") self.addControl("layer7blend", label="Mode", annotation="The blend mode used to blend this layer over the layers below.") self.addCustomRgb("layer8") self.addCustomFlt("layer8a") self.addControl("layer8blend", label="Mode", annotation="The blend mode used to blend this layer over the layers below.") pm.mel.AEdependNodeTemplate(self.nodeName) self.addExtraControls() self.endScrollLayout()
flexible
{ "blob_id": "c847e7abe36b62c4518bb535789064e22b5f1db7", "index": 5750, "step-1": "<mask token>\n\n\nclass AEalLayerColorTemplate(alShadersTemplate):\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass AEalLayerColorTemplate(alShadersTemplate):\n <mask token>\n <mask token>\n\n def setup(self):\n self.params.clear()\n self.params['layer1'] = Param('layer1', 'Layer 1',\n 'The background layer (will be blended over black if its alpha is not 1.'\n , 'rgb', presets=None)\n self.params['layer1a'] = Param('layer1a', 'Layer 1 Alpha',\n 'The alpha of the background layer', 'float', presets=None)\n self.params['layer1blend'] = Param('layer1blend', 'Mode',\n 'Blend mode for the background layer.', 'enum', presets=None)\n self.params['layer2'] = Param('layer2', 'Layer 2',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer2a'] = Param('layer2a', 'Layer 2 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer2blend'] = Param('layer2blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer3'] = Param('layer3', 'Layer 3',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer3a'] = Param('layer3a', 'Layer 3 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer3blend'] = Param('layer3blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer4'] = Param('layer4', 'Layer 4',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer4a'] = Param('layer4a', 'Layer 4 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer4blend'] = Param('layer4blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer5'] = Param('layer5', 'Layer 5',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer5a'] = Param('layer5a', 'Layer 5 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer5blend'] = Param('layer5blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer6'] = Param('layer6', 'Layer 6',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer6a'] = Param('layer6a', 'Layer 6 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer6blend'] = Param('layer6blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer7'] = Param('layer7', 'Layer 7',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer7a'] = Param('layer7a', 'Layer 7 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer7blend'] = Param('layer7blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer8'] = Param('layer8', 'Layer 8',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer8a'] = Param('layer8a', 'Layer 8 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer8blend'] = Param('layer8blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.addSwatch()\n self.beginScrollLayout()\n self.addCustomRgb('layer1')\n self.addCustomFlt('layer1a')\n self.addControl('layer1blend', label='Mode', annotation=\n 'Blend mode for the background layer.')\n self.addCustomRgb('layer2')\n self.addCustomFlt('layer2a')\n self.addControl('layer2blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer3')\n self.addCustomFlt('layer3a')\n self.addControl('layer3blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer4')\n self.addCustomFlt('layer4a')\n self.addControl('layer4blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer5')\n self.addCustomFlt('layer5a')\n self.addControl('layer5blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer6')\n self.addCustomFlt('layer6a')\n self.addControl('layer6blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer7')\n self.addCustomFlt('layer7a')\n self.addControl('layer7blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer8')\n self.addCustomFlt('layer8a')\n self.addControl('layer8blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n pm.mel.AEdependNodeTemplate(self.nodeName)\n self.addExtraControls()\n self.endScrollLayout()\n", "step-3": "<mask token>\n\n\nclass AEalLayerColorTemplate(alShadersTemplate):\n controls = {}\n params = {}\n\n def setup(self):\n self.params.clear()\n self.params['layer1'] = Param('layer1', 'Layer 1',\n 'The background layer (will be blended over black if its alpha is not 1.'\n , 'rgb', presets=None)\n self.params['layer1a'] = Param('layer1a', 'Layer 1 Alpha',\n 'The alpha of the background layer', 'float', presets=None)\n self.params['layer1blend'] = Param('layer1blend', 'Mode',\n 'Blend mode for the background layer.', 'enum', presets=None)\n self.params['layer2'] = Param('layer2', 'Layer 2',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer2a'] = Param('layer2a', 'Layer 2 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer2blend'] = Param('layer2blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer3'] = Param('layer3', 'Layer 3',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer3a'] = Param('layer3a', 'Layer 3 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer3blend'] = Param('layer3blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer4'] = Param('layer4', 'Layer 4',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer4a'] = Param('layer4a', 'Layer 4 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer4blend'] = Param('layer4blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer5'] = Param('layer5', 'Layer 5',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer5a'] = Param('layer5a', 'Layer 5 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer5blend'] = Param('layer5blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer6'] = Param('layer6', 'Layer 6',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer6a'] = Param('layer6a', 'Layer 6 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer6blend'] = Param('layer6blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer7'] = Param('layer7', 'Layer 7',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer7a'] = Param('layer7a', 'Layer 7 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer7blend'] = Param('layer7blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer8'] = Param('layer8', 'Layer 8',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer8a'] = Param('layer8a', 'Layer 8 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer8blend'] = Param('layer8blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.addSwatch()\n self.beginScrollLayout()\n self.addCustomRgb('layer1')\n self.addCustomFlt('layer1a')\n self.addControl('layer1blend', label='Mode', annotation=\n 'Blend mode for the background layer.')\n self.addCustomRgb('layer2')\n self.addCustomFlt('layer2a')\n self.addControl('layer2blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer3')\n self.addCustomFlt('layer3a')\n self.addControl('layer3blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer4')\n self.addCustomFlt('layer4a')\n self.addControl('layer4blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer5')\n self.addCustomFlt('layer5a')\n self.addControl('layer5blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer6')\n self.addCustomFlt('layer6a')\n self.addControl('layer6blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer7')\n self.addCustomFlt('layer7a')\n self.addControl('layer7blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer8')\n self.addCustomFlt('layer8a')\n self.addControl('layer8blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n pm.mel.AEdependNodeTemplate(self.nodeName)\n self.addExtraControls()\n self.endScrollLayout()\n", "step-4": "import pymel.core as pm\nfrom alShaders import *\n\n\nclass AEalLayerColorTemplate(alShadersTemplate):\n controls = {}\n params = {}\n\n def setup(self):\n self.params.clear()\n self.params['layer1'] = Param('layer1', 'Layer 1',\n 'The background layer (will be blended over black if its alpha is not 1.'\n , 'rgb', presets=None)\n self.params['layer1a'] = Param('layer1a', 'Layer 1 Alpha',\n 'The alpha of the background layer', 'float', presets=None)\n self.params['layer1blend'] = Param('layer1blend', 'Mode',\n 'Blend mode for the background layer.', 'enum', presets=None)\n self.params['layer2'] = Param('layer2', 'Layer 2',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer2a'] = Param('layer2a', 'Layer 2 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer2blend'] = Param('layer2blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer3'] = Param('layer3', 'Layer 3',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer3a'] = Param('layer3a', 'Layer 3 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer3blend'] = Param('layer3blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer4'] = Param('layer4', 'Layer 4',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer4a'] = Param('layer4a', 'Layer 4 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer4blend'] = Param('layer4blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer5'] = Param('layer5', 'Layer 5',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer5a'] = Param('layer5a', 'Layer 5 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer5blend'] = Param('layer5blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer6'] = Param('layer6', 'Layer 6',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer6a'] = Param('layer6a', 'Layer 6 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer6blend'] = Param('layer6blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer7'] = Param('layer7', 'Layer 7',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer7a'] = Param('layer7a', 'Layer 7 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer7blend'] = Param('layer7blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.params['layer8'] = Param('layer8', 'Layer 8',\n 'The color plugged in here will be blended over the layers below according to its alpha and blend mode.'\n , 'rgb', presets=None)\n self.params['layer8a'] = Param('layer8a', 'Layer 8 Alpha',\n 'The alpha used to blend this layer over the layers below.',\n 'float', presets=None)\n self.params['layer8blend'] = Param('layer8blend', 'Mode',\n 'The blend mode used to blend this layer over the layers below.',\n 'enum', presets=None)\n self.addSwatch()\n self.beginScrollLayout()\n self.addCustomRgb('layer1')\n self.addCustomFlt('layer1a')\n self.addControl('layer1blend', label='Mode', annotation=\n 'Blend mode for the background layer.')\n self.addCustomRgb('layer2')\n self.addCustomFlt('layer2a')\n self.addControl('layer2blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer3')\n self.addCustomFlt('layer3a')\n self.addControl('layer3blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer4')\n self.addCustomFlt('layer4a')\n self.addControl('layer4blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer5')\n self.addCustomFlt('layer5a')\n self.addControl('layer5blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer6')\n self.addCustomFlt('layer6a')\n self.addControl('layer6blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer7')\n self.addCustomFlt('layer7a')\n self.addControl('layer7blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n self.addCustomRgb('layer8')\n self.addCustomFlt('layer8a')\n self.addControl('layer8blend', label='Mode', annotation=\n 'The blend mode used to blend this layer over the layers below.')\n pm.mel.AEdependNodeTemplate(self.nodeName)\n self.addExtraControls()\n self.endScrollLayout()\n", "step-5": "import pymel.core as pm\nfrom alShaders import *\n\nclass AEalLayerColorTemplate(alShadersTemplate):\n\tcontrols = {}\n\tparams = {}\n\tdef setup(self):\n\t\tself.params.clear()\n\t\tself.params[\"layer1\"] = Param(\"layer1\", \"Layer 1\", \"The background layer (will be blended over black if its alpha is not 1.\", \"rgb\", presets=None)\n\t\tself.params[\"layer1a\"] = Param(\"layer1a\", \"Layer 1 Alpha\", \"The alpha of the background layer\", \"float\", presets=None)\n\t\tself.params[\"layer1blend\"] = Param(\"layer1blend\", \"Mode\", \"Blend mode for the background layer.\", \"enum\", presets=None)\n\t\tself.params[\"layer2\"] = Param(\"layer2\", \"Layer 2\", \"The color plugged in here will be blended over the layers below according to its alpha and blend mode.\", \"rgb\", presets=None)\n\t\tself.params[\"layer2a\"] = Param(\"layer2a\", \"Layer 2 Alpha\", \"The alpha used to blend this layer over the layers below.\", \"float\", presets=None)\n\t\tself.params[\"layer2blend\"] = Param(\"layer2blend\", \"Mode\", \"The blend mode used to blend this layer over the layers below.\", \"enum\", presets=None)\n\t\tself.params[\"layer3\"] = Param(\"layer3\", \"Layer 3\", \"The color plugged in here will be blended over the layers below according to its alpha and blend mode.\", \"rgb\", presets=None)\n\t\tself.params[\"layer3a\"] = Param(\"layer3a\", \"Layer 3 Alpha\", \"The alpha used to blend this layer over the layers below.\", \"float\", presets=None)\n\t\tself.params[\"layer3blend\"] = Param(\"layer3blend\", \"Mode\", \"The blend mode used to blend this layer over the layers below.\", \"enum\", presets=None)\n\t\tself.params[\"layer4\"] = Param(\"layer4\", \"Layer 4\", \"The color plugged in here will be blended over the layers below according to its alpha and blend mode.\", \"rgb\", presets=None)\n\t\tself.params[\"layer4a\"] = Param(\"layer4a\", \"Layer 4 Alpha\", \"The alpha used to blend this layer over the layers below.\", \"float\", presets=None)\n\t\tself.params[\"layer4blend\"] = Param(\"layer4blend\", \"Mode\", \"The blend mode used to blend this layer over the layers below.\", \"enum\", presets=None)\n\t\tself.params[\"layer5\"] = Param(\"layer5\", \"Layer 5\", \"The color plugged in here will be blended over the layers below according to its alpha and blend mode.\", \"rgb\", presets=None)\n\t\tself.params[\"layer5a\"] = Param(\"layer5a\", \"Layer 5 Alpha\", \"The alpha used to blend this layer over the layers below.\", \"float\", presets=None)\n\t\tself.params[\"layer5blend\"] = Param(\"layer5blend\", \"Mode\", \"The blend mode used to blend this layer over the layers below.\", \"enum\", presets=None)\n\t\tself.params[\"layer6\"] = Param(\"layer6\", \"Layer 6\", \"The color plugged in here will be blended over the layers below according to its alpha and blend mode.\", \"rgb\", presets=None)\n\t\tself.params[\"layer6a\"] = Param(\"layer6a\", \"Layer 6 Alpha\", \"The alpha used to blend this layer over the layers below.\", \"float\", presets=None)\n\t\tself.params[\"layer6blend\"] = Param(\"layer6blend\", \"Mode\", \"The blend mode used to blend this layer over the layers below.\", \"enum\", presets=None)\n\t\tself.params[\"layer7\"] = Param(\"layer7\", \"Layer 7\", \"The color plugged in here will be blended over the layers below according to its alpha and blend mode.\", \"rgb\", presets=None)\n\t\tself.params[\"layer7a\"] = Param(\"layer7a\", \"Layer 7 Alpha\", \"The alpha used to blend this layer over the layers below.\", \"float\", presets=None)\n\t\tself.params[\"layer7blend\"] = Param(\"layer7blend\", \"Mode\", \"The blend mode used to blend this layer over the layers below.\", \"enum\", presets=None)\n\t\tself.params[\"layer8\"] = Param(\"layer8\", \"Layer 8\", \"The color plugged in here will be blended over the layers below according to its alpha and blend mode.\", \"rgb\", presets=None)\n\t\tself.params[\"layer8a\"] = Param(\"layer8a\", \"Layer 8 Alpha\", \"The alpha used to blend this layer over the layers below.\", \"float\", presets=None)\n\t\tself.params[\"layer8blend\"] = Param(\"layer8blend\", \"Mode\", \"The blend mode used to blend this layer over the layers below.\", \"enum\", presets=None)\n\n\t\tself.addSwatch()\n\t\tself.beginScrollLayout()\n\n\t\tself.addCustomRgb(\"layer1\")\n\t\tself.addCustomFlt(\"layer1a\")\n\t\tself.addControl(\"layer1blend\", label=\"Mode\", annotation=\"Blend mode for the background layer.\")\n\t\tself.addCustomRgb(\"layer2\")\n\t\tself.addCustomFlt(\"layer2a\")\n\t\tself.addControl(\"layer2blend\", label=\"Mode\", annotation=\"The blend mode used to blend this layer over the layers below.\")\n\t\tself.addCustomRgb(\"layer3\")\n\t\tself.addCustomFlt(\"layer3a\")\n\t\tself.addControl(\"layer3blend\", label=\"Mode\", annotation=\"The blend mode used to blend this layer over the layers below.\")\n\t\tself.addCustomRgb(\"layer4\")\n\t\tself.addCustomFlt(\"layer4a\")\n\t\tself.addControl(\"layer4blend\", label=\"Mode\", annotation=\"The blend mode used to blend this layer over the layers below.\")\n\t\tself.addCustomRgb(\"layer5\")\n\t\tself.addCustomFlt(\"layer5a\")\n\t\tself.addControl(\"layer5blend\", label=\"Mode\", annotation=\"The blend mode used to blend this layer over the layers below.\")\n\t\tself.addCustomRgb(\"layer6\")\n\t\tself.addCustomFlt(\"layer6a\")\n\t\tself.addControl(\"layer6blend\", label=\"Mode\", annotation=\"The blend mode used to blend this layer over the layers below.\")\n\t\tself.addCustomRgb(\"layer7\")\n\t\tself.addCustomFlt(\"layer7a\")\n\t\tself.addControl(\"layer7blend\", label=\"Mode\", annotation=\"The blend mode used to blend this layer over the layers below.\")\n\t\tself.addCustomRgb(\"layer8\")\n\t\tself.addCustomFlt(\"layer8a\")\n\t\tself.addControl(\"layer8blend\", label=\"Mode\", annotation=\"The blend mode used to blend this layer over the layers below.\")\n\n\t\tpm.mel.AEdependNodeTemplate(self.nodeName)\n\t\tself.addExtraControls()\n\n\t\tself.endScrollLayout()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> @conf def load_debug_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'debug' configuration """ v = conf.env load_linux_x64_common_settings(v) @conf def load_profile_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'profile' configuration """ v = conf.env load_linux_x64_common_settings(v) <|reserved_special_token_0|> @conf def load_release_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'release' configuration """ v = conf.env load_linux_x64_common_settings(v) <|reserved_special_token_1|> <|reserved_special_token_0|> @conf def load_debug_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'debug' configuration """ v = conf.env load_linux_x64_common_settings(v) @conf def load_profile_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'profile' configuration """ v = conf.env load_linux_x64_common_settings(v) @conf def load_performance_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'performance' configuration """ v = conf.env load_linux_x64_common_settings(v) @conf def load_release_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'release' configuration """ v = conf.env load_linux_x64_common_settings(v) <|reserved_special_token_1|> <|reserved_special_token_0|> def load_linux_x64_common_settings(v): """ Setup all compiler and linker settings shared over all linux_x64 configurations """ v['DEFINES'] += ['LINUX64'] @conf def load_debug_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'debug' configuration """ v = conf.env load_linux_x64_common_settings(v) @conf def load_profile_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'profile' configuration """ v = conf.env load_linux_x64_common_settings(v) @conf def load_performance_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'performance' configuration """ v = conf.env load_linux_x64_common_settings(v) @conf def load_release_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'release' configuration """ v = conf.env load_linux_x64_common_settings(v) <|reserved_special_token_1|> from waflib.Configure import conf def load_linux_x64_common_settings(v): """ Setup all compiler and linker settings shared over all linux_x64 configurations """ v['DEFINES'] += ['LINUX64'] @conf def load_debug_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'debug' configuration """ v = conf.env load_linux_x64_common_settings(v) @conf def load_profile_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'profile' configuration """ v = conf.env load_linux_x64_common_settings(v) @conf def load_performance_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'performance' configuration """ v = conf.env load_linux_x64_common_settings(v) @conf def load_release_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'release' configuration """ v = conf.env load_linux_x64_common_settings(v) <|reserved_special_token_1|> ############################################################################# ## Crytek Source File ## Copyright (C) 2013, Crytek Studios ## ## Creator: Christopher Bolte ## Date: Oct 31, 2013 ## Description: WAF based build system ############################################################################# from waflib.Configure import conf def load_linux_x64_common_settings(v): """ Setup all compiler and linker settings shared over all linux_x64 configurations """ # Add common linux x64 defines v['DEFINES'] += [ 'LINUX64' ] @conf def load_debug_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'debug' configuration """ v = conf.env load_linux_x64_common_settings(v) @conf def load_profile_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'profile' configuration """ v = conf.env load_linux_x64_common_settings(v) @conf def load_performance_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'performance' configuration """ v = conf.env load_linux_x64_common_settings(v) @conf def load_release_linux_x64_settings(conf): """ Setup all compiler and linker settings shared over all linux_x64 configurations for the 'release' configuration """ v = conf.env load_linux_x64_common_settings(v)
flexible
{ "blob_id": "5848273a76995825f01df53d6beed534e6f9f9fe", "index": 8730, "step-1": "<mask token>\n\n\n@conf\ndef load_debug_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'debug' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_profile_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'profile' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n<mask token>\n\n\n@conf\ndef load_release_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'release' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n", "step-2": "<mask token>\n\n\n@conf\ndef load_debug_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'debug' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_profile_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'profile' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_performance_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'performance' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_release_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'release' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n", "step-3": "<mask token>\n\n\ndef load_linux_x64_common_settings(v):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations\n\t\"\"\"\n v['DEFINES'] += ['LINUX64']\n\n\n@conf\ndef load_debug_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'debug' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_profile_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'profile' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_performance_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'performance' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_release_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'release' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n", "step-4": "from waflib.Configure import conf\n\n\ndef load_linux_x64_common_settings(v):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations\n\t\"\"\"\n v['DEFINES'] += ['LINUX64']\n\n\n@conf\ndef load_debug_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'debug' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_profile_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'profile' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_performance_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'performance' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_release_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'release' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n", "step-5": "#############################################################################\n## Crytek Source File\n## Copyright (C) 2013, Crytek Studios\n##\n## Creator: Christopher Bolte\n## Date: Oct 31, 2013\n## Description: WAF based build system\n#############################################################################\nfrom waflib.Configure import conf\n\ndef load_linux_x64_common_settings(v):\n\t\"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations\n\t\"\"\"\n\t\n\t# Add common linux x64 defines\n\tv['DEFINES'] += [ 'LINUX64' ]\t\n\t\n@conf\ndef load_debug_linux_x64_settings(conf):\n\t\"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'debug' configuration\n\t\"\"\"\n\tv = conf.env\n\tload_linux_x64_common_settings(v)\n\t\n@conf\ndef load_profile_linux_x64_settings(conf):\n\t\"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'profile' configuration\n\t\"\"\"\n\tv = conf.env\n\tload_linux_x64_common_settings(v)\n\t\n@conf\ndef load_performance_linux_x64_settings(conf):\n\t\"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'performance' configuration\n\t\"\"\"\n\tv = conf.env\n\tload_linux_x64_common_settings(v)\n\t\n@conf\ndef load_release_linux_x64_settings(conf):\n\t\"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'release' configuration\n\t\"\"\"\n\tv = conf.env\n\tload_linux_x64_common_settings(v)\n\t", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for i in range(amount_of_bullets): print(i) baraban[i] = 1 print('Посмотрите на барабан', baraban) <|reserved_special_token_0|> for i in range(how_much): random.shuffle(baraban) if baraban[0] == 1: print('Бабах') exit() else: print('щелк') <|reserved_special_token_1|> <|reserved_special_token_0|> amount_of_bullets = int(input('Сколько вы хотите вставить патронов?')) baraban = [0, 0, 0, 0, 0, 0] for i in range(amount_of_bullets): print(i) baraban[i] = 1 print('Посмотрите на барабан', baraban) how_much = int(input('сколько раз вы собираетесь нажать на курок? ')) for i in range(how_much): random.shuffle(baraban) if baraban[0] == 1: print('Бабах') exit() else: print('щелк') <|reserved_special_token_1|> import random amount_of_bullets = int(input('Сколько вы хотите вставить патронов?')) baraban = [0, 0, 0, 0, 0, 0] for i in range(amount_of_bullets): print(i) baraban[i] = 1 print('Посмотрите на барабан', baraban) how_much = int(input('сколько раз вы собираетесь нажать на курок? ')) for i in range(how_much): random.shuffle(baraban) if baraban[0] == 1: print('Бабах') exit() else: print('щелк') <|reserved_special_token_1|> #давайте напишем программу русской рулетки import random amount_of_bullets = int(input("Сколько вы хотите вставить патронов?")) baraban = [0, 0, 0, 0, 0, 0] # 0 -аналогия пустого гнезда # 1 - аналогия гнезда с патроном for i in range(amount_of_bullets): print(i) baraban[i] = 1 print("Посмотрите на барабан", baraban) how_much = int(input("сколько раз вы собираетесь нажать на курок? ")) for i in range(how_much): random.shuffle(baraban) if baraban[0] == 1: print("Бабах") exit() else: print('щелк')
flexible
{ "blob_id": "6c0080aa62579b4cbdaf3a55102924bfe31ffb40", "index": 8107, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(amount_of_bullets):\n print(i)\n baraban[i] = 1\nprint('Посмотрите на барабан', baraban)\n<mask token>\nfor i in range(how_much):\n random.shuffle(baraban)\n if baraban[0] == 1:\n print('Бабах')\n exit()\n else:\n print('щелк')\n", "step-3": "<mask token>\namount_of_bullets = int(input('Сколько вы хотите вставить патронов?'))\nbaraban = [0, 0, 0, 0, 0, 0]\nfor i in range(amount_of_bullets):\n print(i)\n baraban[i] = 1\nprint('Посмотрите на барабан', baraban)\nhow_much = int(input('сколько раз вы собираетесь нажать на курок? '))\nfor i in range(how_much):\n random.shuffle(baraban)\n if baraban[0] == 1:\n print('Бабах')\n exit()\n else:\n print('щелк')\n", "step-4": "import random\namount_of_bullets = int(input('Сколько вы хотите вставить патронов?'))\nbaraban = [0, 0, 0, 0, 0, 0]\nfor i in range(amount_of_bullets):\n print(i)\n baraban[i] = 1\nprint('Посмотрите на барабан', baraban)\nhow_much = int(input('сколько раз вы собираетесь нажать на курок? '))\nfor i in range(how_much):\n random.shuffle(baraban)\n if baraban[0] == 1:\n print('Бабах')\n exit()\n else:\n print('щелк')\n", "step-5": "#давайте напишем программу русской рулетки\n\nimport random\n\namount_of_bullets = int(input(\"Сколько вы хотите вставить патронов?\"))\n\nbaraban = [0, 0, 0, 0, 0, 0]\n# 0 -аналогия пустого гнезда\n# 1 - аналогия гнезда с патроном\n\nfor i in range(amount_of_bullets):\n print(i)\n baraban[i] = 1\n\nprint(\"Посмотрите на барабан\", baraban)\n\nhow_much = int(input(\"сколько раз вы собираетесь нажать на курок? \"))\nfor i in range(how_much):\n random.shuffle(baraban)\n if baraban[0] == 1:\n print(\"Бабах\")\n exit()\n else:\n print('щелк')\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> pathlib.Path(DIR).mkdir(parents=True, exist_ok=True) print('--------Query Topshot GraphQL Endpoint--------') for setsId in setsIdList: for setId in setsId: count += 1 query = gql( """ { getSet (input: {setID: "%s"}) { set { id flowId flowName flowSeriesNumber plays { id description flowID stats { playerName playCategory primaryPosition } } } } } """ % setId) result = client.execute(query) setName = result['getSet']['set']['flowName'] setSeries = result['getSet']['set']['flowSeriesNumber'] setName += ' S' + str(setSeries) + '.json' path = os.path.join(DIR, setName) with open(path, 'w') as outfile: json.dump(result, outfile, indent=4) print(f'Finished writing file: {setName}') print() print(f'Total query: {count}') print('--------Querying COMPLETED.--------') print() <|reserved_special_token_1|> <|reserved_special_token_0|> transport = AIOHTTPTransport(url='https://public-api.nbatopshot.com/graphql') client = Client(transport=transport, fetch_schema_from_transport=True) DIR = 'graphqlData' count = 0 setsId_s1 = ['28eddc41-6a11-4ff8-8ec6-15041aa01e80', 'c561f66b-5bd8-451c-8686-156073c3fb69', 'a3a4f935-4d05-4166-b8c4-ce8e52eb3ca3', '7b797690-5b53-45a7-b972-bd2d5152654a', '12a8288a-addc-4e5c-8af7-b3ba6e5161d4', 'a494c64e-9e93-418c-8934-f331ee47a39b', 'feead270-621c-4cde-baac-2f6834e9e594', 'd2378dc1-1168-410b-893d-e084170a402e', 'a156f083-e902-49d3-a113-bd61702c336a', 'd4712d31-9030-40de-b1a6-1fb9964348f3', '5f85e04f-798f-434c-89d4-2b0a575bd652', '252e83ac-b3a4-407e-82d2-138beb60b5b9', '9c8202c7-698b-4f44-b029-b70ddc49e9dc', 'dd7c595c-5a1b-4f43-8493-db0a2bbcc5aa', '3a0ae6ce-f22e-4d98-b1fe-906f859df983', '4e166b27-3099-44c3-9de3-cac2b9751692', '18b2d80e-d38d-4678-9b7f-c2bfb223259e', '2dbc545a-25a5-4208-8e89-bbb6c3e3a364', '2ab08547-9f62-4ff4-aff9-1bdc0de8fa3e', '320cae53-d585-4e74-8a66-404fa3543c19', '814c5183-596f-41d7-9135-c6b29faa9c6d', 'b73fe6f1-ae28-468b-a4b3-4adb68e7d6bc', '827f9328-03aa-4cb5-97cd-7b5f2c2386fd'] setsId_s2 = ['757f23fd-f7ae-465c-a006-f09dcfd5dbd5', '496d52b8-8d6c-4071-8672-d18551f86a3e', '208ae30a-a4fe-42d4-9e51-e6fd1ad2a7a9', '122b048d-585e-4c63-8275-c23949576fd6', '708a6f60-5c93-406e-854f-50dd6734c0dd', 'f493db4a-a775-4d6e-be8a-56fae509a92d', '0a528e81-5bb0-4bf8-a7f9-6dbd183528ce', '737f9997-8817-4a74-9c13-88b99c37d118', 'b2605806-0d47-439f-ba72-784897470bb0', '33a4a3a3-a32c-4925-a4e8-7d24e56b105e', '54bc2e0d-91e9-4f4c-9361-a8d7eeefe91e', 'ad8e85a4-2240-4604-95f6-be826966d988'] setsIdList = [setsId_s1, setsId_s2] pathlib.Path(DIR).mkdir(parents=True, exist_ok=True) print('--------Query Topshot GraphQL Endpoint--------') for setsId in setsIdList: for setId in setsId: count += 1 query = gql( """ { getSet (input: {setID: "%s"}) { set { id flowId flowName flowSeriesNumber plays { id description flowID stats { playerName playCategory primaryPosition } } } } } """ % setId) result = client.execute(query) setName = result['getSet']['set']['flowName'] setSeries = result['getSet']['set']['flowSeriesNumber'] setName += ' S' + str(setSeries) + '.json' path = os.path.join(DIR, setName) with open(path, 'w') as outfile: json.dump(result, outfile, indent=4) print(f'Finished writing file: {setName}') print() print(f'Total query: {count}') print('--------Querying COMPLETED.--------') print() <|reserved_special_token_1|> import os import json import pathlib from gql import gql, Client from gql.transport.aiohttp import AIOHTTPTransport transport = AIOHTTPTransport(url='https://public-api.nbatopshot.com/graphql') client = Client(transport=transport, fetch_schema_from_transport=True) DIR = 'graphqlData' count = 0 setsId_s1 = ['28eddc41-6a11-4ff8-8ec6-15041aa01e80', 'c561f66b-5bd8-451c-8686-156073c3fb69', 'a3a4f935-4d05-4166-b8c4-ce8e52eb3ca3', '7b797690-5b53-45a7-b972-bd2d5152654a', '12a8288a-addc-4e5c-8af7-b3ba6e5161d4', 'a494c64e-9e93-418c-8934-f331ee47a39b', 'feead270-621c-4cde-baac-2f6834e9e594', 'd2378dc1-1168-410b-893d-e084170a402e', 'a156f083-e902-49d3-a113-bd61702c336a', 'd4712d31-9030-40de-b1a6-1fb9964348f3', '5f85e04f-798f-434c-89d4-2b0a575bd652', '252e83ac-b3a4-407e-82d2-138beb60b5b9', '9c8202c7-698b-4f44-b029-b70ddc49e9dc', 'dd7c595c-5a1b-4f43-8493-db0a2bbcc5aa', '3a0ae6ce-f22e-4d98-b1fe-906f859df983', '4e166b27-3099-44c3-9de3-cac2b9751692', '18b2d80e-d38d-4678-9b7f-c2bfb223259e', '2dbc545a-25a5-4208-8e89-bbb6c3e3a364', '2ab08547-9f62-4ff4-aff9-1bdc0de8fa3e', '320cae53-d585-4e74-8a66-404fa3543c19', '814c5183-596f-41d7-9135-c6b29faa9c6d', 'b73fe6f1-ae28-468b-a4b3-4adb68e7d6bc', '827f9328-03aa-4cb5-97cd-7b5f2c2386fd'] setsId_s2 = ['757f23fd-f7ae-465c-a006-f09dcfd5dbd5', '496d52b8-8d6c-4071-8672-d18551f86a3e', '208ae30a-a4fe-42d4-9e51-e6fd1ad2a7a9', '122b048d-585e-4c63-8275-c23949576fd6', '708a6f60-5c93-406e-854f-50dd6734c0dd', 'f493db4a-a775-4d6e-be8a-56fae509a92d', '0a528e81-5bb0-4bf8-a7f9-6dbd183528ce', '737f9997-8817-4a74-9c13-88b99c37d118', 'b2605806-0d47-439f-ba72-784897470bb0', '33a4a3a3-a32c-4925-a4e8-7d24e56b105e', '54bc2e0d-91e9-4f4c-9361-a8d7eeefe91e', 'ad8e85a4-2240-4604-95f6-be826966d988'] setsIdList = [setsId_s1, setsId_s2] pathlib.Path(DIR).mkdir(parents=True, exist_ok=True) print('--------Query Topshot GraphQL Endpoint--------') for setsId in setsIdList: for setId in setsId: count += 1 query = gql( """ { getSet (input: {setID: "%s"}) { set { id flowId flowName flowSeriesNumber plays { id description flowID stats { playerName playCategory primaryPosition } } } } } """ % setId) result = client.execute(query) setName = result['getSet']['set']['flowName'] setSeries = result['getSet']['set']['flowSeriesNumber'] setName += ' S' + str(setSeries) + '.json' path = os.path.join(DIR, setName) with open(path, 'w') as outfile: json.dump(result, outfile, indent=4) print(f'Finished writing file: {setName}') print() print(f'Total query: {count}') print('--------Querying COMPLETED.--------') print() <|reserved_special_token_1|> import os import json import pathlib from gql import gql, Client from gql.transport.aiohttp import AIOHTTPTransport # Select your transport with a defined url endpoint transport = AIOHTTPTransport(url="https://public-api.nbatopshot.com/graphql") # Create a GraphQL client using the defined transport client = Client(transport=transport, fetch_schema_from_transport=True) # Set the directory name DIR = "graphqlData" # Set Query counter count = 0 # set ids setsId_s1 = [ "28eddc41-6a11-4ff8-8ec6-15041aa01e80", "c561f66b-5bd8-451c-8686-156073c3fb69", "a3a4f935-4d05-4166-b8c4-ce8e52eb3ca3", "7b797690-5b53-45a7-b972-bd2d5152654a", "12a8288a-addc-4e5c-8af7-b3ba6e5161d4", "a494c64e-9e93-418c-8934-f331ee47a39b", "feead270-621c-4cde-baac-2f6834e9e594", "d2378dc1-1168-410b-893d-e084170a402e", "a156f083-e902-49d3-a113-bd61702c336a", "d4712d31-9030-40de-b1a6-1fb9964348f3", "5f85e04f-798f-434c-89d4-2b0a575bd652", "252e83ac-b3a4-407e-82d2-138beb60b5b9", "9c8202c7-698b-4f44-b029-b70ddc49e9dc", "dd7c595c-5a1b-4f43-8493-db0a2bbcc5aa", "3a0ae6ce-f22e-4d98-b1fe-906f859df983", "4e166b27-3099-44c3-9de3-cac2b9751692", "18b2d80e-d38d-4678-9b7f-c2bfb223259e", "2dbc545a-25a5-4208-8e89-bbb6c3e3a364", "2ab08547-9f62-4ff4-aff9-1bdc0de8fa3e", "320cae53-d585-4e74-8a66-404fa3543c19", "814c5183-596f-41d7-9135-c6b29faa9c6d", "b73fe6f1-ae28-468b-a4b3-4adb68e7d6bc", "827f9328-03aa-4cb5-97cd-7b5f2c2386fd" ] setsId_s2 = [ "757f23fd-f7ae-465c-a006-f09dcfd5dbd5", "496d52b8-8d6c-4071-8672-d18551f86a3e", "208ae30a-a4fe-42d4-9e51-e6fd1ad2a7a9", "122b048d-585e-4c63-8275-c23949576fd6", "708a6f60-5c93-406e-854f-50dd6734c0dd", "f493db4a-a775-4d6e-be8a-56fae509a92d", "0a528e81-5bb0-4bf8-a7f9-6dbd183528ce", "737f9997-8817-4a74-9c13-88b99c37d118", "b2605806-0d47-439f-ba72-784897470bb0", "33a4a3a3-a32c-4925-a4e8-7d24e56b105e", "54bc2e0d-91e9-4f4c-9361-a8d7eeefe91e", "ad8e85a4-2240-4604-95f6-be826966d988" ] setsIdList = [setsId_s1, setsId_s2] # Make a directory if not exist pathlib.Path(DIR).mkdir(parents=True, exist_ok=True) print("--------Query Topshot GraphQL Endpoint--------") # Provide a GraphQL query for setsId in setsIdList: for setId in setsId: count += 1 query = gql( """ { getSet (input: {setID: "%s"}) { set { id flowId flowName flowSeriesNumber plays { id description flowID stats { playerName playCategory primaryPosition } } } } } """ % setId ) # Execute the query on the transport result = client.execute(query) # Configure json filename and save path setName = result["getSet"]["set"]["flowName"] setSeries = result["getSet"]["set"]["flowSeriesNumber"] setName += " S" + str(setSeries) + ".json" path = os.path.join(DIR, setName) # Write files to save path with open(path, 'w') as outfile: json.dump(result, outfile, indent=4) print(f"Finished writing file: {setName}") print() print(f"Total query: {count}") print("--------Querying COMPLETED.--------") print()
flexible
{ "blob_id": "df518fd719b7eafffd8fee92c926d4d24b65ce18", "index": 7877, "step-1": "<mask token>\n", "step-2": "<mask token>\npathlib.Path(DIR).mkdir(parents=True, exist_ok=True)\nprint('--------Query Topshot GraphQL Endpoint--------')\nfor setsId in setsIdList:\n for setId in setsId:\n count += 1\n query = gql(\n \"\"\"\n {\n getSet (input: {setID: \"%s\"}) {\n set {\n id\n flowId\n flowName\n flowSeriesNumber\n plays {\n id\n description\n flowID\n stats {\n playerName\n playCategory\n primaryPosition\n }\n }\n }\n }\n }\n \"\"\"\n % setId)\n result = client.execute(query)\n setName = result['getSet']['set']['flowName']\n setSeries = result['getSet']['set']['flowSeriesNumber']\n setName += ' S' + str(setSeries) + '.json'\n path = os.path.join(DIR, setName)\n with open(path, 'w') as outfile:\n json.dump(result, outfile, indent=4)\n print(f'Finished writing file: {setName}')\nprint()\nprint(f'Total query: {count}')\nprint('--------Querying COMPLETED.--------')\nprint()\n", "step-3": "<mask token>\ntransport = AIOHTTPTransport(url='https://public-api.nbatopshot.com/graphql')\nclient = Client(transport=transport, fetch_schema_from_transport=True)\nDIR = 'graphqlData'\ncount = 0\nsetsId_s1 = ['28eddc41-6a11-4ff8-8ec6-15041aa01e80',\n 'c561f66b-5bd8-451c-8686-156073c3fb69',\n 'a3a4f935-4d05-4166-b8c4-ce8e52eb3ca3',\n '7b797690-5b53-45a7-b972-bd2d5152654a',\n '12a8288a-addc-4e5c-8af7-b3ba6e5161d4',\n 'a494c64e-9e93-418c-8934-f331ee47a39b',\n 'feead270-621c-4cde-baac-2f6834e9e594',\n 'd2378dc1-1168-410b-893d-e084170a402e',\n 'a156f083-e902-49d3-a113-bd61702c336a',\n 'd4712d31-9030-40de-b1a6-1fb9964348f3',\n '5f85e04f-798f-434c-89d4-2b0a575bd652',\n '252e83ac-b3a4-407e-82d2-138beb60b5b9',\n '9c8202c7-698b-4f44-b029-b70ddc49e9dc',\n 'dd7c595c-5a1b-4f43-8493-db0a2bbcc5aa',\n '3a0ae6ce-f22e-4d98-b1fe-906f859df983',\n '4e166b27-3099-44c3-9de3-cac2b9751692',\n '18b2d80e-d38d-4678-9b7f-c2bfb223259e',\n '2dbc545a-25a5-4208-8e89-bbb6c3e3a364',\n '2ab08547-9f62-4ff4-aff9-1bdc0de8fa3e',\n '320cae53-d585-4e74-8a66-404fa3543c19',\n '814c5183-596f-41d7-9135-c6b29faa9c6d',\n 'b73fe6f1-ae28-468b-a4b3-4adb68e7d6bc',\n '827f9328-03aa-4cb5-97cd-7b5f2c2386fd']\nsetsId_s2 = ['757f23fd-f7ae-465c-a006-f09dcfd5dbd5',\n '496d52b8-8d6c-4071-8672-d18551f86a3e',\n '208ae30a-a4fe-42d4-9e51-e6fd1ad2a7a9',\n '122b048d-585e-4c63-8275-c23949576fd6',\n '708a6f60-5c93-406e-854f-50dd6734c0dd',\n 'f493db4a-a775-4d6e-be8a-56fae509a92d',\n '0a528e81-5bb0-4bf8-a7f9-6dbd183528ce',\n '737f9997-8817-4a74-9c13-88b99c37d118',\n 'b2605806-0d47-439f-ba72-784897470bb0',\n '33a4a3a3-a32c-4925-a4e8-7d24e56b105e',\n '54bc2e0d-91e9-4f4c-9361-a8d7eeefe91e',\n 'ad8e85a4-2240-4604-95f6-be826966d988']\nsetsIdList = [setsId_s1, setsId_s2]\npathlib.Path(DIR).mkdir(parents=True, exist_ok=True)\nprint('--------Query Topshot GraphQL Endpoint--------')\nfor setsId in setsIdList:\n for setId in setsId:\n count += 1\n query = gql(\n \"\"\"\n {\n getSet (input: {setID: \"%s\"}) {\n set {\n id\n flowId\n flowName\n flowSeriesNumber\n plays {\n id\n description\n flowID\n stats {\n playerName\n playCategory\n primaryPosition\n }\n }\n }\n }\n }\n \"\"\"\n % setId)\n result = client.execute(query)\n setName = result['getSet']['set']['flowName']\n setSeries = result['getSet']['set']['flowSeriesNumber']\n setName += ' S' + str(setSeries) + '.json'\n path = os.path.join(DIR, setName)\n with open(path, 'w') as outfile:\n json.dump(result, outfile, indent=4)\n print(f'Finished writing file: {setName}')\nprint()\nprint(f'Total query: {count}')\nprint('--------Querying COMPLETED.--------')\nprint()\n", "step-4": "import os\nimport json\nimport pathlib\nfrom gql import gql, Client\nfrom gql.transport.aiohttp import AIOHTTPTransport\ntransport = AIOHTTPTransport(url='https://public-api.nbatopshot.com/graphql')\nclient = Client(transport=transport, fetch_schema_from_transport=True)\nDIR = 'graphqlData'\ncount = 0\nsetsId_s1 = ['28eddc41-6a11-4ff8-8ec6-15041aa01e80',\n 'c561f66b-5bd8-451c-8686-156073c3fb69',\n 'a3a4f935-4d05-4166-b8c4-ce8e52eb3ca3',\n '7b797690-5b53-45a7-b972-bd2d5152654a',\n '12a8288a-addc-4e5c-8af7-b3ba6e5161d4',\n 'a494c64e-9e93-418c-8934-f331ee47a39b',\n 'feead270-621c-4cde-baac-2f6834e9e594',\n 'd2378dc1-1168-410b-893d-e084170a402e',\n 'a156f083-e902-49d3-a113-bd61702c336a',\n 'd4712d31-9030-40de-b1a6-1fb9964348f3',\n '5f85e04f-798f-434c-89d4-2b0a575bd652',\n '252e83ac-b3a4-407e-82d2-138beb60b5b9',\n '9c8202c7-698b-4f44-b029-b70ddc49e9dc',\n 'dd7c595c-5a1b-4f43-8493-db0a2bbcc5aa',\n '3a0ae6ce-f22e-4d98-b1fe-906f859df983',\n '4e166b27-3099-44c3-9de3-cac2b9751692',\n '18b2d80e-d38d-4678-9b7f-c2bfb223259e',\n '2dbc545a-25a5-4208-8e89-bbb6c3e3a364',\n '2ab08547-9f62-4ff4-aff9-1bdc0de8fa3e',\n '320cae53-d585-4e74-8a66-404fa3543c19',\n '814c5183-596f-41d7-9135-c6b29faa9c6d',\n 'b73fe6f1-ae28-468b-a4b3-4adb68e7d6bc',\n '827f9328-03aa-4cb5-97cd-7b5f2c2386fd']\nsetsId_s2 = ['757f23fd-f7ae-465c-a006-f09dcfd5dbd5',\n '496d52b8-8d6c-4071-8672-d18551f86a3e',\n '208ae30a-a4fe-42d4-9e51-e6fd1ad2a7a9',\n '122b048d-585e-4c63-8275-c23949576fd6',\n '708a6f60-5c93-406e-854f-50dd6734c0dd',\n 'f493db4a-a775-4d6e-be8a-56fae509a92d',\n '0a528e81-5bb0-4bf8-a7f9-6dbd183528ce',\n '737f9997-8817-4a74-9c13-88b99c37d118',\n 'b2605806-0d47-439f-ba72-784897470bb0',\n '33a4a3a3-a32c-4925-a4e8-7d24e56b105e',\n '54bc2e0d-91e9-4f4c-9361-a8d7eeefe91e',\n 'ad8e85a4-2240-4604-95f6-be826966d988']\nsetsIdList = [setsId_s1, setsId_s2]\npathlib.Path(DIR).mkdir(parents=True, exist_ok=True)\nprint('--------Query Topshot GraphQL Endpoint--------')\nfor setsId in setsIdList:\n for setId in setsId:\n count += 1\n query = gql(\n \"\"\"\n {\n getSet (input: {setID: \"%s\"}) {\n set {\n id\n flowId\n flowName\n flowSeriesNumber\n plays {\n id\n description\n flowID\n stats {\n playerName\n playCategory\n primaryPosition\n }\n }\n }\n }\n }\n \"\"\"\n % setId)\n result = client.execute(query)\n setName = result['getSet']['set']['flowName']\n setSeries = result['getSet']['set']['flowSeriesNumber']\n setName += ' S' + str(setSeries) + '.json'\n path = os.path.join(DIR, setName)\n with open(path, 'w') as outfile:\n json.dump(result, outfile, indent=4)\n print(f'Finished writing file: {setName}')\nprint()\nprint(f'Total query: {count}')\nprint('--------Querying COMPLETED.--------')\nprint()\n", "step-5": "import os\nimport json\nimport pathlib\nfrom gql import gql, Client\nfrom gql.transport.aiohttp import AIOHTTPTransport\n\n# Select your transport with a defined url endpoint\ntransport = AIOHTTPTransport(url=\"https://public-api.nbatopshot.com/graphql\")\n\n# Create a GraphQL client using the defined transport\nclient = Client(transport=transport, fetch_schema_from_transport=True)\n\n# Set the directory name\nDIR = \"graphqlData\"\n\n# Set Query counter\ncount = 0\n\n# set ids\nsetsId_s1 = [\n \"28eddc41-6a11-4ff8-8ec6-15041aa01e80\", \n \"c561f66b-5bd8-451c-8686-156073c3fb69\",\n \"a3a4f935-4d05-4166-b8c4-ce8e52eb3ca3\",\n \"7b797690-5b53-45a7-b972-bd2d5152654a\",\n \"12a8288a-addc-4e5c-8af7-b3ba6e5161d4\",\n \"a494c64e-9e93-418c-8934-f331ee47a39b\",\n \"feead270-621c-4cde-baac-2f6834e9e594\",\n \"d2378dc1-1168-410b-893d-e084170a402e\",\n \"a156f083-e902-49d3-a113-bd61702c336a\",\n \"d4712d31-9030-40de-b1a6-1fb9964348f3\",\n \"5f85e04f-798f-434c-89d4-2b0a575bd652\",\n \"252e83ac-b3a4-407e-82d2-138beb60b5b9\",\n \"9c8202c7-698b-4f44-b029-b70ddc49e9dc\",\n \"dd7c595c-5a1b-4f43-8493-db0a2bbcc5aa\",\n \"3a0ae6ce-f22e-4d98-b1fe-906f859df983\",\n \"4e166b27-3099-44c3-9de3-cac2b9751692\",\n \"18b2d80e-d38d-4678-9b7f-c2bfb223259e\",\n \"2dbc545a-25a5-4208-8e89-bbb6c3e3a364\",\n \"2ab08547-9f62-4ff4-aff9-1bdc0de8fa3e\",\n \"320cae53-d585-4e74-8a66-404fa3543c19\",\n \"814c5183-596f-41d7-9135-c6b29faa9c6d\",\n \"b73fe6f1-ae28-468b-a4b3-4adb68e7d6bc\",\n \"827f9328-03aa-4cb5-97cd-7b5f2c2386fd\"\n]\n\nsetsId_s2 = [\n \"757f23fd-f7ae-465c-a006-f09dcfd5dbd5\",\n \"496d52b8-8d6c-4071-8672-d18551f86a3e\",\n \"208ae30a-a4fe-42d4-9e51-e6fd1ad2a7a9\",\n \"122b048d-585e-4c63-8275-c23949576fd6\",\n \"708a6f60-5c93-406e-854f-50dd6734c0dd\",\n \"f493db4a-a775-4d6e-be8a-56fae509a92d\",\n \"0a528e81-5bb0-4bf8-a7f9-6dbd183528ce\",\n \"737f9997-8817-4a74-9c13-88b99c37d118\",\n \"b2605806-0d47-439f-ba72-784897470bb0\",\n \"33a4a3a3-a32c-4925-a4e8-7d24e56b105e\",\n \"54bc2e0d-91e9-4f4c-9361-a8d7eeefe91e\",\n \"ad8e85a4-2240-4604-95f6-be826966d988\"\n]\n\nsetsIdList = [setsId_s1, setsId_s2]\n\n# Make a directory if not exist\npathlib.Path(DIR).mkdir(parents=True, exist_ok=True)\n\nprint(\"--------Query Topshot GraphQL Endpoint--------\")\n\n# Provide a GraphQL query\nfor setsId in setsIdList:\n for setId in setsId:\n count += 1\n query = gql(\n \"\"\"\n {\n getSet (input: {setID: \"%s\"}) {\n set {\n id\n flowId\n flowName\n flowSeriesNumber\n plays {\n id\n description\n flowID\n stats {\n playerName\n playCategory\n primaryPosition\n }\n }\n }\n }\n }\n \"\"\" % setId\n )\n # Execute the query on the transport\n result = client.execute(query)\n\n # Configure json filename and save path\n setName = result[\"getSet\"][\"set\"][\"flowName\"]\n setSeries = result[\"getSet\"][\"set\"][\"flowSeriesNumber\"]\n setName += \" S\" + str(setSeries) + \".json\"\n path = os.path.join(DIR, setName)\n\n # Write files to save path\n with open(path, 'w') as outfile:\n json.dump(result, outfile, indent=4)\n print(f\"Finished writing file: {setName}\")\n\nprint()\nprint(f\"Total query: {count}\")\nprint(\"--------Querying COMPLETED.--------\")\nprint()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> if v % 4 == 0: print('Yeah!') else: print('End of the program') <|reserved_special_token_1|> v = 426 if v % 4 == 0: print('Yeah!') else: print('End of the program') <|reserved_special_token_1|> v = 426 # print 'Yeah!' if dividable by 4 but print 'End of program' after regardless if (v%4) == 0: print ("Yeah!") else: print ("End of the program")
flexible
{ "blob_id": "ceca1be15aded0a842c5f2c6183e4f54aba4fd24", "index": 6752, "step-1": "<mask token>\n", "step-2": "<mask token>\nif v % 4 == 0:\n print('Yeah!')\nelse:\n print('End of the program')\n", "step-3": "v = 426\nif v % 4 == 0:\n print('Yeah!')\nelse:\n print('End of the program')\n", "step-4": "v = 426\n# print 'Yeah!' if dividable by 4 but print 'End of program' after regardless\n\nif (v%4) == 0:\n print (\"Yeah!\")\nelse:\n print (\"End of the program\")\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> class TeamWordBinding(ResourceBinding): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> @classmethod def group_names(self, instance, action): return [str(instance.user.group.team)] def has_permission(self, user, action, pk): logger.debug('TW has_permission {} {} {}'.format(user, action, pk)) if action in ['update', 'delete']: return False if action == 'create': payload = json.loads(self.message.content['text']) if 'data' not in payload or 'word' not in payload['data']: logger.debug('Possibly malicious malformed TeamWord from {}' .format(self.user.username)) return False word = payload['data']['word'] word_letters = set(word.lower()) if len(word_letters) == 0: return False user = self.user user_letters = set() for letter in UserLetter.objects.filter(user=user): user_letters.add(letter.letter.lower()) for letter in LetterTransaction.objects.filter(borrower=user, approved=True): user_letters.add(letter.letter.lower()) if not word_letters.issubset(user_letters): return False team_words = set() for tword in self.get_queryset(): team_words.add(tword.word) if word in team_words: return False try: wordObj = Dictionary.objects.get(word=word) except Exception as e: return False return True return True class UserLetterBinding(ResourceBinding): model = UserLetter stream = 'userletters' serializer_class = UserLetterSerializer def get_queryset(self): queries = Q(user=self.user) for profile in self.message.user.group.profile_set.all(): queries |= Q(user=profile.user) return UserLetter.objects.filter(queries) @classmethod def group_names(self, instance, action): logger.debug(str(instance)) return [instance.user.username + 'solo'] def has_permission(self, user, action, pk): logger.debug('UL has_permission {} {} {}'.format(user, action, pk)) if action in ['create', 'update', 'delete']: return False return True class LetterTransactionBinding(ResourceBinding): model = LetterTransaction stream = 'lettertransactions' serializer_class = LetterTransactionSerializer def get_queryset(self): return LetterTransaction.objects.filter(Q(borrower=self.user) | Q( letter__user=self.user)) @classmethod def group_names(self, instance, action): return [instance.borrower.username + 'solo', instance.letter.user. username + 'solo'] def has_permission(self, user, action, pk): logger.debug('TR has_permission {} {} {}'.format(user, action, self .message.content['text'])) if action == 'delete': return False if action == 'create' or action == 'update': payload = json.loads(self.message.content['text']) if 'data' not in payload or 'letter' not in payload['data']: logger.debug( 'Possibly malicious malformed LetterTransaction from {}' .format(self.user.username)) return False ul = UserLetter.objects.get(pk=payload['data']['letter']) if ul.user.profile not in self.user.group.profile_set.all(): logger.debug( 'Malicious LetterTransaction creation suspected by {}'. format(self.user.username)) return False return True <|reserved_special_token_1|> <|reserved_special_token_0|> class TeamWordBinding(ResourceBinding): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def get_queryset(self): return TeamWord.objects.filter(user__group__team=self.user.group.team) @classmethod def group_names(self, instance, action): return [str(instance.user.group.team)] def has_permission(self, user, action, pk): logger.debug('TW has_permission {} {} {}'.format(user, action, pk)) if action in ['update', 'delete']: return False if action == 'create': payload = json.loads(self.message.content['text']) if 'data' not in payload or 'word' not in payload['data']: logger.debug('Possibly malicious malformed TeamWord from {}' .format(self.user.username)) return False word = payload['data']['word'] word_letters = set(word.lower()) if len(word_letters) == 0: return False user = self.user user_letters = set() for letter in UserLetter.objects.filter(user=user): user_letters.add(letter.letter.lower()) for letter in LetterTransaction.objects.filter(borrower=user, approved=True): user_letters.add(letter.letter.lower()) if not word_letters.issubset(user_letters): return False team_words = set() for tword in self.get_queryset(): team_words.add(tword.word) if word in team_words: return False try: wordObj = Dictionary.objects.get(word=word) except Exception as e: return False return True return True class UserLetterBinding(ResourceBinding): model = UserLetter stream = 'userletters' serializer_class = UserLetterSerializer def get_queryset(self): queries = Q(user=self.user) for profile in self.message.user.group.profile_set.all(): queries |= Q(user=profile.user) return UserLetter.objects.filter(queries) @classmethod def group_names(self, instance, action): logger.debug(str(instance)) return [instance.user.username + 'solo'] def has_permission(self, user, action, pk): logger.debug('UL has_permission {} {} {}'.format(user, action, pk)) if action in ['create', 'update', 'delete']: return False return True class LetterTransactionBinding(ResourceBinding): model = LetterTransaction stream = 'lettertransactions' serializer_class = LetterTransactionSerializer def get_queryset(self): return LetterTransaction.objects.filter(Q(borrower=self.user) | Q( letter__user=self.user)) @classmethod def group_names(self, instance, action): return [instance.borrower.username + 'solo', instance.letter.user. username + 'solo'] def has_permission(self, user, action, pk): logger.debug('TR has_permission {} {} {}'.format(user, action, self .message.content['text'])) if action == 'delete': return False if action == 'create' or action == 'update': payload = json.loads(self.message.content['text']) if 'data' not in payload or 'letter' not in payload['data']: logger.debug( 'Possibly malicious malformed LetterTransaction from {}' .format(self.user.username)) return False ul = UserLetter.objects.get(pk=payload['data']['letter']) if ul.user.profile not in self.user.group.profile_set.all(): logger.debug( 'Malicious LetterTransaction creation suspected by {}'. format(self.user.username)) return False return True <|reserved_special_token_1|> <|reserved_special_token_0|> class TeamWordBinding(ResourceBinding): model = TeamWord stream = 'teamwords' serializer_class = TeamWordSerializer def get_queryset(self): return TeamWord.objects.filter(user__group__team=self.user.group.team) @classmethod def group_names(self, instance, action): return [str(instance.user.group.team)] def has_permission(self, user, action, pk): logger.debug('TW has_permission {} {} {}'.format(user, action, pk)) if action in ['update', 'delete']: return False if action == 'create': payload = json.loads(self.message.content['text']) if 'data' not in payload or 'word' not in payload['data']: logger.debug('Possibly malicious malformed TeamWord from {}' .format(self.user.username)) return False word = payload['data']['word'] word_letters = set(word.lower()) if len(word_letters) == 0: return False user = self.user user_letters = set() for letter in UserLetter.objects.filter(user=user): user_letters.add(letter.letter.lower()) for letter in LetterTransaction.objects.filter(borrower=user, approved=True): user_letters.add(letter.letter.lower()) if not word_letters.issubset(user_letters): return False team_words = set() for tword in self.get_queryset(): team_words.add(tword.word) if word in team_words: return False try: wordObj = Dictionary.objects.get(word=word) except Exception as e: return False return True return True class UserLetterBinding(ResourceBinding): model = UserLetter stream = 'userletters' serializer_class = UserLetterSerializer def get_queryset(self): queries = Q(user=self.user) for profile in self.message.user.group.profile_set.all(): queries |= Q(user=profile.user) return UserLetter.objects.filter(queries) @classmethod def group_names(self, instance, action): logger.debug(str(instance)) return [instance.user.username + 'solo'] def has_permission(self, user, action, pk): logger.debug('UL has_permission {} {} {}'.format(user, action, pk)) if action in ['create', 'update', 'delete']: return False return True class LetterTransactionBinding(ResourceBinding): model = LetterTransaction stream = 'lettertransactions' serializer_class = LetterTransactionSerializer def get_queryset(self): return LetterTransaction.objects.filter(Q(borrower=self.user) | Q( letter__user=self.user)) @classmethod def group_names(self, instance, action): return [instance.borrower.username + 'solo', instance.letter.user. username + 'solo'] def has_permission(self, user, action, pk): logger.debug('TR has_permission {} {} {}'.format(user, action, self .message.content['text'])) if action == 'delete': return False if action == 'create' or action == 'update': payload = json.loads(self.message.content['text']) if 'data' not in payload or 'letter' not in payload['data']: logger.debug( 'Possibly malicious malformed LetterTransaction from {}' .format(self.user.username)) return False ul = UserLetter.objects.get(pk=payload['data']['letter']) if ul.user.profile not in self.user.group.profile_set.all(): logger.debug( 'Malicious LetterTransaction creation suspected by {}'. format(self.user.username)) return False return True <|reserved_special_token_1|> import json import logging logger = logging.getLogger(__name__) from django.db.models import Q from channels_api.bindings import ResourceBinding from .models import LetterTransaction, UserLetter, TeamWord, Dictionary from .serializers import LetterTransactionSerializer, UserLetterSerializer, TeamWordSerializer class TeamWordBinding(ResourceBinding): model = TeamWord stream = 'teamwords' serializer_class = TeamWordSerializer def get_queryset(self): return TeamWord.objects.filter(user__group__team=self.user.group.team) @classmethod def group_names(self, instance, action): return [str(instance.user.group.team)] def has_permission(self, user, action, pk): logger.debug('TW has_permission {} {} {}'.format(user, action, pk)) if action in ['update', 'delete']: return False if action == 'create': payload = json.loads(self.message.content['text']) if 'data' not in payload or 'word' not in payload['data']: logger.debug('Possibly malicious malformed TeamWord from {}' .format(self.user.username)) return False word = payload['data']['word'] word_letters = set(word.lower()) if len(word_letters) == 0: return False user = self.user user_letters = set() for letter in UserLetter.objects.filter(user=user): user_letters.add(letter.letter.lower()) for letter in LetterTransaction.objects.filter(borrower=user, approved=True): user_letters.add(letter.letter.lower()) if not word_letters.issubset(user_letters): return False team_words = set() for tword in self.get_queryset(): team_words.add(tword.word) if word in team_words: return False try: wordObj = Dictionary.objects.get(word=word) except Exception as e: return False return True return True class UserLetterBinding(ResourceBinding): model = UserLetter stream = 'userletters' serializer_class = UserLetterSerializer def get_queryset(self): queries = Q(user=self.user) for profile in self.message.user.group.profile_set.all(): queries |= Q(user=profile.user) return UserLetter.objects.filter(queries) @classmethod def group_names(self, instance, action): logger.debug(str(instance)) return [instance.user.username + 'solo'] def has_permission(self, user, action, pk): logger.debug('UL has_permission {} {} {}'.format(user, action, pk)) if action in ['create', 'update', 'delete']: return False return True class LetterTransactionBinding(ResourceBinding): model = LetterTransaction stream = 'lettertransactions' serializer_class = LetterTransactionSerializer def get_queryset(self): return LetterTransaction.objects.filter(Q(borrower=self.user) | Q( letter__user=self.user)) @classmethod def group_names(self, instance, action): return [instance.borrower.username + 'solo', instance.letter.user. username + 'solo'] def has_permission(self, user, action, pk): logger.debug('TR has_permission {} {} {}'.format(user, action, self .message.content['text'])) if action == 'delete': return False if action == 'create' or action == 'update': payload = json.loads(self.message.content['text']) if 'data' not in payload or 'letter' not in payload['data']: logger.debug( 'Possibly malicious malformed LetterTransaction from {}' .format(self.user.username)) return False ul = UserLetter.objects.get(pk=payload['data']['letter']) if ul.user.profile not in self.user.group.profile_set.all(): logger.debug( 'Malicious LetterTransaction creation suspected by {}'. format(self.user.username)) return False return True <|reserved_special_token_1|> import json import logging logger = logging.getLogger(__name__) from django.db.models import Q from channels_api.bindings import ResourceBinding from .models import LetterTransaction, UserLetter, TeamWord, Dictionary from .serializers import LetterTransactionSerializer, UserLetterSerializer, TeamWordSerializer class TeamWordBinding(ResourceBinding): model = TeamWord stream = "teamwords" serializer_class = TeamWordSerializer def get_queryset(self): return TeamWord.objects.filter(user__group__team=self.user.group.team) @classmethod def group_names(self, instance, action): return [str(instance.user.group.team)] def has_permission(self, user, action, pk): logger.debug("TW has_permission {} {} {}".format(user, action, pk)) if action in ['update', 'delete']: return False if action == 'create': payload = json.loads(self.message.content['text']) if 'data' not in payload or 'word' not in payload['data']: logger.debug("Possibly malicious malformed TeamWord from {}".format(self.user.username)) return False word = payload['data']['word'] word_letters = set(word.lower()) if len(word_letters) == 0: return False user = self.user user_letters = set() for letter in UserLetter.objects.filter(user=user): user_letters.add(letter.letter.lower()) for letter in LetterTransaction.objects.filter(borrower=user, approved=True): user_letters.add(letter.letter.lower()) if not word_letters.issubset(user_letters): return False team_words = set() for tword in self.get_queryset(): team_words.add(tword.word) if word in team_words: return False try: wordObj = Dictionary.objects.get(word=word) except Exception as e: return False return True # allow list, retrieve, subscribe return True class UserLetterBinding(ResourceBinding): model = UserLetter stream = "userletters" serializer_class = UserLetterSerializer def get_queryset(self): queries = Q(user=self.user) for profile in self.message.user.group.profile_set.all(): queries |= Q(user=profile.user) return UserLetter.objects.filter(queries) @classmethod def group_names(self, instance, action): logger.debug(str(instance)) return [instance.user.username + "solo"] def has_permission(self, user, action, pk): logger.debug("UL has_permission {} {} {}".format(user, action, pk)) if action in ['create', 'update', 'delete']: return False # allow list, retrieve, subscribe return True class LetterTransactionBinding(ResourceBinding): model = LetterTransaction stream = "lettertransactions" serializer_class = LetterTransactionSerializer def get_queryset(self): return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(letter__user=self.user)) @classmethod def group_names(self, instance, action): # Send this to only the borrower and lender return [instance.borrower.username + "solo", instance.letter.user.username + "solo"] def has_permission(self, user, action, pk): logger.debug("TR has_permission {} {} {}".format(user, action, self.message.content['text'])) if action == "delete": return False if action == "create" or action == "update": payload = json.loads(self.message.content['text']) if 'data' not in payload or 'letter' not in payload['data']: logger.debug("Possibly malicious malformed LetterTransaction from {}".format(self.user.username)) return False ul = UserLetter.objects.get(pk=payload['data']['letter']) # If this UserLetter is not owned by a friend, permission denied if ul.user.profile not in self.user.group.profile_set.all(): logger.debug("Malicious LetterTransaction creation suspected by {}".format(self.user.username)) return False # allow list, retrieve, subscribe, and legitimate create return True
flexible
{ "blob_id": "c2e0f2eda6ef44a52ee4e192b8eb71bde0a69bff", "index": 8954, "step-1": "<mask token>\n\n\nclass TeamWordBinding(ResourceBinding):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def group_names(self, instance, action):\n return [str(instance.user.group.team)]\n\n def has_permission(self, user, action, pk):\n logger.debug('TW has_permission {} {} {}'.format(user, action, pk))\n if action in ['update', 'delete']:\n return False\n if action == 'create':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'word' not in payload['data']:\n logger.debug('Possibly malicious malformed TeamWord from {}'\n .format(self.user.username))\n return False\n word = payload['data']['word']\n word_letters = set(word.lower())\n if len(word_letters) == 0:\n return False\n user = self.user\n user_letters = set()\n for letter in UserLetter.objects.filter(user=user):\n user_letters.add(letter.letter.lower())\n for letter in LetterTransaction.objects.filter(borrower=user,\n approved=True):\n user_letters.add(letter.letter.lower())\n if not word_letters.issubset(user_letters):\n return False\n team_words = set()\n for tword in self.get_queryset():\n team_words.add(tword.word)\n if word in team_words:\n return False\n try:\n wordObj = Dictionary.objects.get(word=word)\n except Exception as e:\n return False\n return True\n return True\n\n\nclass UserLetterBinding(ResourceBinding):\n model = UserLetter\n stream = 'userletters'\n serializer_class = UserLetterSerializer\n\n def get_queryset(self):\n queries = Q(user=self.user)\n for profile in self.message.user.group.profile_set.all():\n queries |= Q(user=profile.user)\n return UserLetter.objects.filter(queries)\n\n @classmethod\n def group_names(self, instance, action):\n logger.debug(str(instance))\n return [instance.user.username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('UL has_permission {} {} {}'.format(user, action, pk))\n if action in ['create', 'update', 'delete']:\n return False\n return True\n\n\nclass LetterTransactionBinding(ResourceBinding):\n model = LetterTransaction\n stream = 'lettertransactions'\n serializer_class = LetterTransactionSerializer\n\n def get_queryset(self):\n return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(\n letter__user=self.user))\n\n @classmethod\n def group_names(self, instance, action):\n return [instance.borrower.username + 'solo', instance.letter.user.\n username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('TR has_permission {} {} {}'.format(user, action, self\n .message.content['text']))\n if action == 'delete':\n return False\n if action == 'create' or action == 'update':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'letter' not in payload['data']:\n logger.debug(\n 'Possibly malicious malformed LetterTransaction from {}'\n .format(self.user.username))\n return False\n ul = UserLetter.objects.get(pk=payload['data']['letter'])\n if ul.user.profile not in self.user.group.profile_set.all():\n logger.debug(\n 'Malicious LetterTransaction creation suspected by {}'.\n format(self.user.username))\n return False\n return True\n", "step-2": "<mask token>\n\n\nclass TeamWordBinding(ResourceBinding):\n <mask token>\n <mask token>\n <mask token>\n\n def get_queryset(self):\n return TeamWord.objects.filter(user__group__team=self.user.group.team)\n\n @classmethod\n def group_names(self, instance, action):\n return [str(instance.user.group.team)]\n\n def has_permission(self, user, action, pk):\n logger.debug('TW has_permission {} {} {}'.format(user, action, pk))\n if action in ['update', 'delete']:\n return False\n if action == 'create':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'word' not in payload['data']:\n logger.debug('Possibly malicious malformed TeamWord from {}'\n .format(self.user.username))\n return False\n word = payload['data']['word']\n word_letters = set(word.lower())\n if len(word_letters) == 0:\n return False\n user = self.user\n user_letters = set()\n for letter in UserLetter.objects.filter(user=user):\n user_letters.add(letter.letter.lower())\n for letter in LetterTransaction.objects.filter(borrower=user,\n approved=True):\n user_letters.add(letter.letter.lower())\n if not word_letters.issubset(user_letters):\n return False\n team_words = set()\n for tword in self.get_queryset():\n team_words.add(tword.word)\n if word in team_words:\n return False\n try:\n wordObj = Dictionary.objects.get(word=word)\n except Exception as e:\n return False\n return True\n return True\n\n\nclass UserLetterBinding(ResourceBinding):\n model = UserLetter\n stream = 'userletters'\n serializer_class = UserLetterSerializer\n\n def get_queryset(self):\n queries = Q(user=self.user)\n for profile in self.message.user.group.profile_set.all():\n queries |= Q(user=profile.user)\n return UserLetter.objects.filter(queries)\n\n @classmethod\n def group_names(self, instance, action):\n logger.debug(str(instance))\n return [instance.user.username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('UL has_permission {} {} {}'.format(user, action, pk))\n if action in ['create', 'update', 'delete']:\n return False\n return True\n\n\nclass LetterTransactionBinding(ResourceBinding):\n model = LetterTransaction\n stream = 'lettertransactions'\n serializer_class = LetterTransactionSerializer\n\n def get_queryset(self):\n return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(\n letter__user=self.user))\n\n @classmethod\n def group_names(self, instance, action):\n return [instance.borrower.username + 'solo', instance.letter.user.\n username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('TR has_permission {} {} {}'.format(user, action, self\n .message.content['text']))\n if action == 'delete':\n return False\n if action == 'create' or action == 'update':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'letter' not in payload['data']:\n logger.debug(\n 'Possibly malicious malformed LetterTransaction from {}'\n .format(self.user.username))\n return False\n ul = UserLetter.objects.get(pk=payload['data']['letter'])\n if ul.user.profile not in self.user.group.profile_set.all():\n logger.debug(\n 'Malicious LetterTransaction creation suspected by {}'.\n format(self.user.username))\n return False\n return True\n", "step-3": "<mask token>\n\n\nclass TeamWordBinding(ResourceBinding):\n model = TeamWord\n stream = 'teamwords'\n serializer_class = TeamWordSerializer\n\n def get_queryset(self):\n return TeamWord.objects.filter(user__group__team=self.user.group.team)\n\n @classmethod\n def group_names(self, instance, action):\n return [str(instance.user.group.team)]\n\n def has_permission(self, user, action, pk):\n logger.debug('TW has_permission {} {} {}'.format(user, action, pk))\n if action in ['update', 'delete']:\n return False\n if action == 'create':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'word' not in payload['data']:\n logger.debug('Possibly malicious malformed TeamWord from {}'\n .format(self.user.username))\n return False\n word = payload['data']['word']\n word_letters = set(word.lower())\n if len(word_letters) == 0:\n return False\n user = self.user\n user_letters = set()\n for letter in UserLetter.objects.filter(user=user):\n user_letters.add(letter.letter.lower())\n for letter in LetterTransaction.objects.filter(borrower=user,\n approved=True):\n user_letters.add(letter.letter.lower())\n if not word_letters.issubset(user_letters):\n return False\n team_words = set()\n for tword in self.get_queryset():\n team_words.add(tword.word)\n if word in team_words:\n return False\n try:\n wordObj = Dictionary.objects.get(word=word)\n except Exception as e:\n return False\n return True\n return True\n\n\nclass UserLetterBinding(ResourceBinding):\n model = UserLetter\n stream = 'userletters'\n serializer_class = UserLetterSerializer\n\n def get_queryset(self):\n queries = Q(user=self.user)\n for profile in self.message.user.group.profile_set.all():\n queries |= Q(user=profile.user)\n return UserLetter.objects.filter(queries)\n\n @classmethod\n def group_names(self, instance, action):\n logger.debug(str(instance))\n return [instance.user.username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('UL has_permission {} {} {}'.format(user, action, pk))\n if action in ['create', 'update', 'delete']:\n return False\n return True\n\n\nclass LetterTransactionBinding(ResourceBinding):\n model = LetterTransaction\n stream = 'lettertransactions'\n serializer_class = LetterTransactionSerializer\n\n def get_queryset(self):\n return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(\n letter__user=self.user))\n\n @classmethod\n def group_names(self, instance, action):\n return [instance.borrower.username + 'solo', instance.letter.user.\n username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('TR has_permission {} {} {}'.format(user, action, self\n .message.content['text']))\n if action == 'delete':\n return False\n if action == 'create' or action == 'update':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'letter' not in payload['data']:\n logger.debug(\n 'Possibly malicious malformed LetterTransaction from {}'\n .format(self.user.username))\n return False\n ul = UserLetter.objects.get(pk=payload['data']['letter'])\n if ul.user.profile not in self.user.group.profile_set.all():\n logger.debug(\n 'Malicious LetterTransaction creation suspected by {}'.\n format(self.user.username))\n return False\n return True\n", "step-4": "import json\nimport logging\nlogger = logging.getLogger(__name__)\nfrom django.db.models import Q\nfrom channels_api.bindings import ResourceBinding\nfrom .models import LetterTransaction, UserLetter, TeamWord, Dictionary\nfrom .serializers import LetterTransactionSerializer, UserLetterSerializer, TeamWordSerializer\n\n\nclass TeamWordBinding(ResourceBinding):\n model = TeamWord\n stream = 'teamwords'\n serializer_class = TeamWordSerializer\n\n def get_queryset(self):\n return TeamWord.objects.filter(user__group__team=self.user.group.team)\n\n @classmethod\n def group_names(self, instance, action):\n return [str(instance.user.group.team)]\n\n def has_permission(self, user, action, pk):\n logger.debug('TW has_permission {} {} {}'.format(user, action, pk))\n if action in ['update', 'delete']:\n return False\n if action == 'create':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'word' not in payload['data']:\n logger.debug('Possibly malicious malformed TeamWord from {}'\n .format(self.user.username))\n return False\n word = payload['data']['word']\n word_letters = set(word.lower())\n if len(word_letters) == 0:\n return False\n user = self.user\n user_letters = set()\n for letter in UserLetter.objects.filter(user=user):\n user_letters.add(letter.letter.lower())\n for letter in LetterTransaction.objects.filter(borrower=user,\n approved=True):\n user_letters.add(letter.letter.lower())\n if not word_letters.issubset(user_letters):\n return False\n team_words = set()\n for tword in self.get_queryset():\n team_words.add(tword.word)\n if word in team_words:\n return False\n try:\n wordObj = Dictionary.objects.get(word=word)\n except Exception as e:\n return False\n return True\n return True\n\n\nclass UserLetterBinding(ResourceBinding):\n model = UserLetter\n stream = 'userletters'\n serializer_class = UserLetterSerializer\n\n def get_queryset(self):\n queries = Q(user=self.user)\n for profile in self.message.user.group.profile_set.all():\n queries |= Q(user=profile.user)\n return UserLetter.objects.filter(queries)\n\n @classmethod\n def group_names(self, instance, action):\n logger.debug(str(instance))\n return [instance.user.username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('UL has_permission {} {} {}'.format(user, action, pk))\n if action in ['create', 'update', 'delete']:\n return False\n return True\n\n\nclass LetterTransactionBinding(ResourceBinding):\n model = LetterTransaction\n stream = 'lettertransactions'\n serializer_class = LetterTransactionSerializer\n\n def get_queryset(self):\n return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(\n letter__user=self.user))\n\n @classmethod\n def group_names(self, instance, action):\n return [instance.borrower.username + 'solo', instance.letter.user.\n username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('TR has_permission {} {} {}'.format(user, action, self\n .message.content['text']))\n if action == 'delete':\n return False\n if action == 'create' or action == 'update':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'letter' not in payload['data']:\n logger.debug(\n 'Possibly malicious malformed LetterTransaction from {}'\n .format(self.user.username))\n return False\n ul = UserLetter.objects.get(pk=payload['data']['letter'])\n if ul.user.profile not in self.user.group.profile_set.all():\n logger.debug(\n 'Malicious LetterTransaction creation suspected by {}'.\n format(self.user.username))\n return False\n return True\n", "step-5": "import json\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom django.db.models import Q\n\nfrom channels_api.bindings import ResourceBinding\n\nfrom .models import LetterTransaction, UserLetter, TeamWord, Dictionary\nfrom .serializers import LetterTransactionSerializer, UserLetterSerializer, TeamWordSerializer\n\n\nclass TeamWordBinding(ResourceBinding):\n\n model = TeamWord\n stream = \"teamwords\"\n serializer_class = TeamWordSerializer\n\n def get_queryset(self):\n return TeamWord.objects.filter(user__group__team=self.user.group.team)\n\n @classmethod\n def group_names(self, instance, action):\n return [str(instance.user.group.team)]\n\n def has_permission(self, user, action, pk):\n logger.debug(\"TW has_permission {} {} {}\".format(user, action, pk))\n\n if action in ['update', 'delete']:\n return False\n\n if action == 'create':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'word' not in payload['data']:\n logger.debug(\"Possibly malicious malformed TeamWord from {}\".format(self.user.username))\n return False\n\n word = payload['data']['word']\n word_letters = set(word.lower())\n if len(word_letters) == 0:\n return False\n\n user = self.user\n user_letters = set()\n for letter in UserLetter.objects.filter(user=user):\n user_letters.add(letter.letter.lower())\n for letter in LetterTransaction.objects.filter(borrower=user, approved=True):\n user_letters.add(letter.letter.lower())\n\n if not word_letters.issubset(user_letters):\n return False\n\n team_words = set()\n for tword in self.get_queryset():\n team_words.add(tword.word)\n\n if word in team_words:\n return False\n\n try:\n wordObj = Dictionary.objects.get(word=word)\n except Exception as e:\n return False\n\n return True\n\n # allow list, retrieve, subscribe\n return True\n\n \nclass UserLetterBinding(ResourceBinding):\n\n model = UserLetter\n stream = \"userletters\"\n serializer_class = UserLetterSerializer\n\n def get_queryset(self):\n queries = Q(user=self.user)\n for profile in self.message.user.group.profile_set.all():\n queries |= Q(user=profile.user)\n\n return UserLetter.objects.filter(queries)\n\n @classmethod\n def group_names(self, instance, action):\n logger.debug(str(instance))\n return [instance.user.username + \"solo\"]\n\n def has_permission(self, user, action, pk):\n logger.debug(\"UL has_permission {} {} {}\".format(user, action, pk))\n\n if action in ['create', 'update', 'delete']:\n return False\n\n # allow list, retrieve, subscribe\n return True\n\n\nclass LetterTransactionBinding(ResourceBinding):\n\n model = LetterTransaction\n stream = \"lettertransactions\"\n serializer_class = LetterTransactionSerializer\n\n def get_queryset(self):\n return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(letter__user=self.user))\n\n @classmethod\n def group_names(self, instance, action):\n # Send this to only the borrower and lender\n return [instance.borrower.username + \"solo\", instance.letter.user.username + \"solo\"]\n\n def has_permission(self, user, action, pk):\n logger.debug(\"TR has_permission {} {} {}\".format(user, action, self.message.content['text']))\n\n if action == \"delete\":\n return False\n\n if action == \"create\" or action == \"update\":\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'letter' not in payload['data']:\n logger.debug(\"Possibly malicious malformed LetterTransaction from {}\".format(self.user.username))\n return False\n\n ul = UserLetter.objects.get(pk=payload['data']['letter'])\n\n # If this UserLetter is not owned by a friend, permission denied\n if ul.user.profile not in self.user.group.profile_set.all():\n logger.debug(\"Malicious LetterTransaction creation suspected by {}\".format(self.user.username))\n return False\n\n # allow list, retrieve, subscribe, and legitimate create\n return True\n", "step-ids": [ 13, 14, 15, 17, 18 ] }
[ 13, 14, 15, 17, 18 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> curforth.execute(sql) <|reserved_special_token_0|> for record in result: print(record) <|reserved_special_token_1|> <|reserved_special_token_0|> forth = sqlite3.connect('databaserupin.db') sql = 'SELECT * from rupin;' curforth = forth.cursor() curforth.execute(sql) result = curforth.fetchall() for record in result: print(record) <|reserved_special_token_1|> import sqlite3 forth = sqlite3.connect('databaserupin.db') sql = 'SELECT * from rupin;' curforth = forth.cursor() curforth.execute(sql) result = curforth.fetchall() for record in result: print(record) <|reserved_special_token_1|> import sqlite3 forth = sqlite3.connect('databaserupin.db') sql = "SELECT * from rupin;" curforth = forth.cursor() curforth.execute(sql) result = curforth.fetchall() for record in result: print(record)
flexible
{ "blob_id": "a7f082737bf476a4bc6a40c962764c05bed9ee14", "index": 9247, "step-1": "<mask token>\n", "step-2": "<mask token>\ncurforth.execute(sql)\n<mask token>\nfor record in result:\n print(record)\n", "step-3": "<mask token>\nforth = sqlite3.connect('databaserupin.db')\nsql = 'SELECT * from rupin;'\ncurforth = forth.cursor()\ncurforth.execute(sql)\nresult = curforth.fetchall()\nfor record in result:\n print(record)\n", "step-4": "import sqlite3\nforth = sqlite3.connect('databaserupin.db')\nsql = 'SELECT * from rupin;'\ncurforth = forth.cursor()\ncurforth.execute(sql)\nresult = curforth.fetchall()\nfor record in result:\n print(record)\n", "step-5": "import sqlite3\n\nforth = sqlite3.connect('databaserupin.db')\n\nsql = \"SELECT * from rupin;\"\n\ncurforth = forth.cursor()\ncurforth.execute(sql)\n\nresult = curforth.fetchall()\nfor record in result:\n print(record)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# https://leetcode-cn.com/problems/zigzag-conversion/ # 6. Z 字形变换 class Solution: def convert(self, s: str, numRows: int) -> str: res = '' for i in range(numRows): pass return res
normal
{ "blob_id": "aa952e8f9a1855b5578cb26d6e5aca42605ee585", "index": 5454, "step-1": "<mask token>\n", "step-2": "class Solution:\n <mask token>\n", "step-3": "class Solution:\n\n def convert(self, s: str, numRows: int) ->str:\n res = ''\n for i in range(numRows):\n pass\n return res\n", "step-4": "# https://leetcode-cn.com/problems/zigzag-conversion/\n# 6. Z 字形变换\n\n\nclass Solution:\n def convert(self, s: str, numRows: int) -> str:\n res = ''\n for i in range(numRows):\n pass\n return res\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def main(): n = int(input()) a_list = list(map(int, input().split())) a_list_reversed, num_reverse = bubble_sort(a_list, n) print(' '.join(map(str, a_list_reversed))) print(num_reverse) <|reserved_special_token_0|> <|reserved_special_token_1|> def bubble_sort(a_list, n): num_reverse = 0 for i in range(n): for j in range(n - i - 1): if a_list[-j - 2] > a_list[-j - 1]: tmp_elem = a_list[-j - 1] a_list[-j - 1] = a_list[-j - 2] a_list[-j - 2] = tmp_elem num_reverse += 1 return a_list, num_reverse def main(): n = int(input()) a_list = list(map(int, input().split())) a_list_reversed, num_reverse = bubble_sort(a_list, n) print(' '.join(map(str, a_list_reversed))) print(num_reverse) <|reserved_special_token_0|> <|reserved_special_token_1|> def bubble_sort(a_list, n): num_reverse = 0 for i in range(n): for j in range(n - i - 1): if a_list[-j - 2] > a_list[-j - 1]: tmp_elem = a_list[-j - 1] a_list[-j - 1] = a_list[-j - 2] a_list[-j - 2] = tmp_elem num_reverse += 1 return a_list, num_reverse def main(): n = int(input()) a_list = list(map(int, input().split())) a_list_reversed, num_reverse = bubble_sort(a_list, n) print(' '.join(map(str, a_list_reversed))) print(num_reverse) if __name__ == '__main__': main() <|reserved_special_token_1|> # Accepted def bubble_sort(a_list, n): num_reverse = 0 for i in range(n): for j in range(n - i - 1): # With a for roop (reversed order), # index starts -1, -2 ,..., # NOT -0, -1, ... if a_list[-j - 2] > a_list[-j - 1]: tmp_elem = a_list[-j - 1] a_list[-j - 1] = a_list[-j - 2] a_list[-j - 2] = tmp_elem num_reverse += 1 return a_list, num_reverse def main(): # Input n = int(input()) a_list = list(map(int, input().split())) # Sort a_list_reversed, num_reverse = bubble_sort(a_list, n) # Output print(" ".join(map(str, a_list_reversed))) print(num_reverse) if __name__ == '__main__': main()
flexible
{ "blob_id": "fef1273552350bfaf075d90279c9f10a965cae25", "index": 2939, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n n = int(input())\n a_list = list(map(int, input().split()))\n a_list_reversed, num_reverse = bubble_sort(a_list, n)\n print(' '.join(map(str, a_list_reversed)))\n print(num_reverse)\n\n\n<mask token>\n", "step-3": "def bubble_sort(a_list, n):\n num_reverse = 0\n for i in range(n):\n for j in range(n - i - 1):\n if a_list[-j - 2] > a_list[-j - 1]:\n tmp_elem = a_list[-j - 1]\n a_list[-j - 1] = a_list[-j - 2]\n a_list[-j - 2] = tmp_elem\n num_reverse += 1\n return a_list, num_reverse\n\n\ndef main():\n n = int(input())\n a_list = list(map(int, input().split()))\n a_list_reversed, num_reverse = bubble_sort(a_list, n)\n print(' '.join(map(str, a_list_reversed)))\n print(num_reverse)\n\n\n<mask token>\n", "step-4": "def bubble_sort(a_list, n):\n num_reverse = 0\n for i in range(n):\n for j in range(n - i - 1):\n if a_list[-j - 2] > a_list[-j - 1]:\n tmp_elem = a_list[-j - 1]\n a_list[-j - 1] = a_list[-j - 2]\n a_list[-j - 2] = tmp_elem\n num_reverse += 1\n return a_list, num_reverse\n\n\ndef main():\n n = int(input())\n a_list = list(map(int, input().split()))\n a_list_reversed, num_reverse = bubble_sort(a_list, n)\n print(' '.join(map(str, a_list_reversed)))\n print(num_reverse)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "# Accepted\ndef bubble_sort(a_list, n):\n num_reverse = 0\n for i in range(n):\n for j in range(n - i - 1):\n # With a for roop (reversed order), \n # index starts -1, -2 ,...,\n # NOT -0, -1, ...\n if a_list[-j - 2] > a_list[-j - 1]:\n tmp_elem = a_list[-j - 1]\n a_list[-j - 1] = a_list[-j - 2]\n a_list[-j - 2] = tmp_elem\n num_reverse += 1\n return a_list, num_reverse\n\n\ndef main():\n # Input\n n = int(input())\n a_list = list(map(int, input().split()))\n # Sort\n a_list_reversed, num_reverse = bubble_sort(a_list, n)\n # Output\n print(\" \".join(map(str, a_list_reversed)))\n print(num_reverse)\n\n\nif __name__ == '__main__':\n main()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import os # must pip install sox # type sudo apt install sox into cmd duration = .2 # seconds freq = 550 # Hz os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
normal
{ "blob_id": "8397dcdcb9ec2f35dac0c26b8878a23f9149512b", "index": 3113, "step-1": "<mask token>\n", "step-2": "<mask token>\nos.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))\n", "step-3": "<mask token>\nduration = 0.2\nfreq = 550\nos.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))\n", "step-4": "import os\nduration = 0.2\nfreq = 550\nos.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))\n", "step-5": "\nimport os\n# must pip install sox\n# type sudo apt install sox into cmd\nduration = .2 # seconds\nfreq = 550 # Hz\nos.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> @pytest.fixture() def test_data(): """Return data used for tests in this file.""" x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float) y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float) z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, 0.225, 3.364], dtype=float) return x, y, z @pytest.fixture() def test_grid(): """Return grid locations used for tests in this file.""" with get_test_data('interpolation_test_grid.npz') as fobj: data = np.load(fobj) return data['xg'], data['yg'] def test_natural_neighbor(test_data, test_grid): """Test natural neighbor interpolation function.""" xp, yp, z = test_data xg, yg = test_grid img = natural_neighbor(xp, yp, z, xg, yg) with get_test_data('nn_bbox0to100.npz') as fobj: truth = np.load(fobj)['img'] assert_array_almost_equal(truth, img) <|reserved_special_token_0|> @pytest.mark.parametrize('method', interp_methods) def test_inverse_distance(method, test_data, test_grid): """Test inverse distance interpolation function.""" xp, yp, z = test_data xg, yg = test_grid extra_kw = {} if method == 'cressman': extra_kw['r'] = 20 extra_kw['min_neighbors'] = 1 test_file = 'cressman_r20_mn1.npz' elif method == 'barnes': extra_kw['r'] = 40 extra_kw['kappa'] = 100 test_file = 'barnes_r40_k100.npz' img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw) with get_test_data(test_file) as fobj: truth = np.load(fobj)['img'] assert_array_almost_equal(truth, img) <|reserved_special_token_0|> def test_barnes_weights(): """Test Barnes weights function.""" kappa = 1000000 gamma = 0.5 dist = np.array([1000, 2000, 3000, 4000]) ** 2 weights = barnes_weights(dist, kappa, gamma) * 10000000 truth = [1353352.8323661268, 3354.6262790251185, 0.152299797447126, 1.26641655e-07] assert_array_almost_equal(truth, weights) <|reserved_special_token_0|> def test_cressman_point(test_data): """Test Cressman interpolation for a point function.""" xp, yp, z = test_data r = 40 obs_tree = cKDTree(list(zip(xp, yp))) indices = obs_tree.query_ball_point([30, 30], r=r) dists = dist_2(30, 30, xp[indices], yp[indices]) values = z[indices] truth = 1.05499444404 value = cressman_point(dists, values, r) assert_almost_equal(truth, value) def test_barnes_point(test_data): """Test Barnes interpolation for a point function.""" xp, yp, z = test_data r = 40 obs_tree = cKDTree(list(zip(xp, yp))) indices = obs_tree.query_ball_point([60, 60], r=r) dists = dist_2(60, 60, xp[indices], yp[indices]) values = z[indices] truth = 4.08718241061 ave_spacing = np.mean(cdist(list(zip(xp, yp)), list(zip(xp, yp)))) kappa = calc_kappa(ave_spacing) value = barnes_point(dists, values, kappa) assert_almost_equal(truth, value) <|reserved_special_token_1|> <|reserved_special_token_0|> @pytest.fixture() def test_data(): """Return data used for tests in this file.""" x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float) y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float) z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, 0.225, 3.364], dtype=float) return x, y, z @pytest.fixture() def test_grid(): """Return grid locations used for tests in this file.""" with get_test_data('interpolation_test_grid.npz') as fobj: data = np.load(fobj) return data['xg'], data['yg'] def test_natural_neighbor(test_data, test_grid): """Test natural neighbor interpolation function.""" xp, yp, z = test_data xg, yg = test_grid img = natural_neighbor(xp, yp, z, xg, yg) with get_test_data('nn_bbox0to100.npz') as fobj: truth = np.load(fobj)['img'] assert_array_almost_equal(truth, img) <|reserved_special_token_0|> @pytest.mark.parametrize('method', interp_methods) def test_inverse_distance(method, test_data, test_grid): """Test inverse distance interpolation function.""" xp, yp, z = test_data xg, yg = test_grid extra_kw = {} if method == 'cressman': extra_kw['r'] = 20 extra_kw['min_neighbors'] = 1 test_file = 'cressman_r20_mn1.npz' elif method == 'barnes': extra_kw['r'] = 40 extra_kw['kappa'] = 100 test_file = 'barnes_r40_k100.npz' img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw) with get_test_data(test_file) as fobj: truth = np.load(fobj)['img'] assert_array_almost_equal(truth, img) def test_nn_point(test_data): """Test find natural neighbors for a point interpolation function.""" xp, yp, z = test_data tri = Delaunay(list(zip(xp, yp))) sim_gridx = [30] sim_gridy = [30] members, tri_info = find_natural_neighbors(tri, list(zip(sim_gridx, sim_gridy))) val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]], tri, members[0], tri_info) truth = 1.009 assert_almost_equal(truth, val, 3) def test_barnes_weights(): """Test Barnes weights function.""" kappa = 1000000 gamma = 0.5 dist = np.array([1000, 2000, 3000, 4000]) ** 2 weights = barnes_weights(dist, kappa, gamma) * 10000000 truth = [1353352.8323661268, 3354.6262790251185, 0.152299797447126, 1.26641655e-07] assert_array_almost_equal(truth, weights) def test_cressman_weights(): """Test Cressman weights function.""" r = 5000 dist = np.array([1000, 2000, 3000, 4000]) ** 2 weights = cressman_weights(dist, r) truth = [0.923076923076923, 0.724137931034482, 0.470588235294117, 0.219512195121951] assert_array_almost_equal(truth, weights) def test_cressman_point(test_data): """Test Cressman interpolation for a point function.""" xp, yp, z = test_data r = 40 obs_tree = cKDTree(list(zip(xp, yp))) indices = obs_tree.query_ball_point([30, 30], r=r) dists = dist_2(30, 30, xp[indices], yp[indices]) values = z[indices] truth = 1.05499444404 value = cressman_point(dists, values, r) assert_almost_equal(truth, value) def test_barnes_point(test_data): """Test Barnes interpolation for a point function.""" xp, yp, z = test_data r = 40 obs_tree = cKDTree(list(zip(xp, yp))) indices = obs_tree.query_ball_point([60, 60], r=r) dists = dist_2(60, 60, xp[indices], yp[indices]) values = z[indices] truth = 4.08718241061 ave_spacing = np.mean(cdist(list(zip(xp, yp)), list(zip(xp, yp)))) kappa = calc_kappa(ave_spacing) value = barnes_point(dists, values, kappa) assert_almost_equal(truth, value) <|reserved_special_token_1|> <|reserved_special_token_0|> logging.getLogger('metpy.gridding.interpolation').setLevel(logging.ERROR) @pytest.fixture() def test_data(): """Return data used for tests in this file.""" x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float) y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float) z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, 0.225, 3.364], dtype=float) return x, y, z @pytest.fixture() def test_grid(): """Return grid locations used for tests in this file.""" with get_test_data('interpolation_test_grid.npz') as fobj: data = np.load(fobj) return data['xg'], data['yg'] def test_natural_neighbor(test_data, test_grid): """Test natural neighbor interpolation function.""" xp, yp, z = test_data xg, yg = test_grid img = natural_neighbor(xp, yp, z, xg, yg) with get_test_data('nn_bbox0to100.npz') as fobj: truth = np.load(fobj)['img'] assert_array_almost_equal(truth, img) <|reserved_special_token_0|> @pytest.mark.parametrize('method', interp_methods) def test_inverse_distance(method, test_data, test_grid): """Test inverse distance interpolation function.""" xp, yp, z = test_data xg, yg = test_grid extra_kw = {} if method == 'cressman': extra_kw['r'] = 20 extra_kw['min_neighbors'] = 1 test_file = 'cressman_r20_mn1.npz' elif method == 'barnes': extra_kw['r'] = 40 extra_kw['kappa'] = 100 test_file = 'barnes_r40_k100.npz' img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw) with get_test_data(test_file) as fobj: truth = np.load(fobj)['img'] assert_array_almost_equal(truth, img) def test_nn_point(test_data): """Test find natural neighbors for a point interpolation function.""" xp, yp, z = test_data tri = Delaunay(list(zip(xp, yp))) sim_gridx = [30] sim_gridy = [30] members, tri_info = find_natural_neighbors(tri, list(zip(sim_gridx, sim_gridy))) val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]], tri, members[0], tri_info) truth = 1.009 assert_almost_equal(truth, val, 3) def test_barnes_weights(): """Test Barnes weights function.""" kappa = 1000000 gamma = 0.5 dist = np.array([1000, 2000, 3000, 4000]) ** 2 weights = barnes_weights(dist, kappa, gamma) * 10000000 truth = [1353352.8323661268, 3354.6262790251185, 0.152299797447126, 1.26641655e-07] assert_array_almost_equal(truth, weights) def test_cressman_weights(): """Test Cressman weights function.""" r = 5000 dist = np.array([1000, 2000, 3000, 4000]) ** 2 weights = cressman_weights(dist, r) truth = [0.923076923076923, 0.724137931034482, 0.470588235294117, 0.219512195121951] assert_array_almost_equal(truth, weights) def test_cressman_point(test_data): """Test Cressman interpolation for a point function.""" xp, yp, z = test_data r = 40 obs_tree = cKDTree(list(zip(xp, yp))) indices = obs_tree.query_ball_point([30, 30], r=r) dists = dist_2(30, 30, xp[indices], yp[indices]) values = z[indices] truth = 1.05499444404 value = cressman_point(dists, values, r) assert_almost_equal(truth, value) def test_barnes_point(test_data): """Test Barnes interpolation for a point function.""" xp, yp, z = test_data r = 40 obs_tree = cKDTree(list(zip(xp, yp))) indices = obs_tree.query_ball_point([60, 60], r=r) dists = dist_2(60, 60, xp[indices], yp[indices]) values = z[indices] truth = 4.08718241061 ave_spacing = np.mean(cdist(list(zip(xp, yp)), list(zip(xp, yp)))) kappa = calc_kappa(ave_spacing) value = barnes_point(dists, values, kappa) assert_almost_equal(truth, value) <|reserved_special_token_1|> <|reserved_special_token_0|> logging.getLogger('metpy.gridding.interpolation').setLevel(logging.ERROR) @pytest.fixture() def test_data(): """Return data used for tests in this file.""" x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float) y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float) z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, 0.225, 3.364], dtype=float) return x, y, z @pytest.fixture() def test_grid(): """Return grid locations used for tests in this file.""" with get_test_data('interpolation_test_grid.npz') as fobj: data = np.load(fobj) return data['xg'], data['yg'] def test_natural_neighbor(test_data, test_grid): """Test natural neighbor interpolation function.""" xp, yp, z = test_data xg, yg = test_grid img = natural_neighbor(xp, yp, z, xg, yg) with get_test_data('nn_bbox0to100.npz') as fobj: truth = np.load(fobj)['img'] assert_array_almost_equal(truth, img) interp_methods = ['cressman', 'barnes'] @pytest.mark.parametrize('method', interp_methods) def test_inverse_distance(method, test_data, test_grid): """Test inverse distance interpolation function.""" xp, yp, z = test_data xg, yg = test_grid extra_kw = {} if method == 'cressman': extra_kw['r'] = 20 extra_kw['min_neighbors'] = 1 test_file = 'cressman_r20_mn1.npz' elif method == 'barnes': extra_kw['r'] = 40 extra_kw['kappa'] = 100 test_file = 'barnes_r40_k100.npz' img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw) with get_test_data(test_file) as fobj: truth = np.load(fobj)['img'] assert_array_almost_equal(truth, img) def test_nn_point(test_data): """Test find natural neighbors for a point interpolation function.""" xp, yp, z = test_data tri = Delaunay(list(zip(xp, yp))) sim_gridx = [30] sim_gridy = [30] members, tri_info = find_natural_neighbors(tri, list(zip(sim_gridx, sim_gridy))) val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]], tri, members[0], tri_info) truth = 1.009 assert_almost_equal(truth, val, 3) def test_barnes_weights(): """Test Barnes weights function.""" kappa = 1000000 gamma = 0.5 dist = np.array([1000, 2000, 3000, 4000]) ** 2 weights = barnes_weights(dist, kappa, gamma) * 10000000 truth = [1353352.8323661268, 3354.6262790251185, 0.152299797447126, 1.26641655e-07] assert_array_almost_equal(truth, weights) def test_cressman_weights(): """Test Cressman weights function.""" r = 5000 dist = np.array([1000, 2000, 3000, 4000]) ** 2 weights = cressman_weights(dist, r) truth = [0.923076923076923, 0.724137931034482, 0.470588235294117, 0.219512195121951] assert_array_almost_equal(truth, weights) def test_cressman_point(test_data): """Test Cressman interpolation for a point function.""" xp, yp, z = test_data r = 40 obs_tree = cKDTree(list(zip(xp, yp))) indices = obs_tree.query_ball_point([30, 30], r=r) dists = dist_2(30, 30, xp[indices], yp[indices]) values = z[indices] truth = 1.05499444404 value = cressman_point(dists, values, r) assert_almost_equal(truth, value) def test_barnes_point(test_data): """Test Barnes interpolation for a point function.""" xp, yp, z = test_data r = 40 obs_tree = cKDTree(list(zip(xp, yp))) indices = obs_tree.query_ball_point([60, 60], r=r) dists = dist_2(60, 60, xp[indices], yp[indices]) values = z[indices] truth = 4.08718241061 ave_spacing = np.mean(cdist(list(zip(xp, yp)), list(zip(xp, yp)))) kappa = calc_kappa(ave_spacing) value = barnes_point(dists, values, kappa) assert_almost_equal(truth, value) <|reserved_special_token_1|> # Copyright (c) 2008-2016 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause """Test the `interpolation` module.""" from __future__ import division import logging import numpy as np from numpy.testing import assert_almost_equal, assert_array_almost_equal import pytest from scipy.spatial import cKDTree, Delaunay from scipy.spatial.distance import cdist from metpy.cbook import get_test_data from metpy.gridding.gridding_functions import calc_kappa from metpy.gridding.interpolation import (barnes_point, barnes_weights, cressman_point, cressman_weights, inverse_distance, natural_neighbor, nn_point) from metpy.gridding.triangles import dist_2, find_natural_neighbors logging.getLogger('metpy.gridding.interpolation').setLevel(logging.ERROR) @pytest.fixture() def test_data(): r"""Return data used for tests in this file.""" x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float) y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float) z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, 0.225, 3.364], dtype=float) return x, y, z @pytest.fixture() def test_grid(): r"""Return grid locations used for tests in this file.""" with get_test_data('interpolation_test_grid.npz') as fobj: data = np.load(fobj) return data['xg'], data['yg'] def test_natural_neighbor(test_data, test_grid): r"""Test natural neighbor interpolation function.""" xp, yp, z = test_data xg, yg = test_grid img = natural_neighbor(xp, yp, z, xg, yg) with get_test_data('nn_bbox0to100.npz') as fobj: truth = np.load(fobj)['img'] assert_array_almost_equal(truth, img) interp_methods = ['cressman', 'barnes'] @pytest.mark.parametrize('method', interp_methods) def test_inverse_distance(method, test_data, test_grid): r"""Test inverse distance interpolation function.""" xp, yp, z = test_data xg, yg = test_grid extra_kw = {} if method == 'cressman': extra_kw['r'] = 20 extra_kw['min_neighbors'] = 1 test_file = 'cressman_r20_mn1.npz' elif method == 'barnes': extra_kw['r'] = 40 extra_kw['kappa'] = 100 test_file = 'barnes_r40_k100.npz' img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw) with get_test_data(test_file) as fobj: truth = np.load(fobj)['img'] assert_array_almost_equal(truth, img) def test_nn_point(test_data): r"""Test find natural neighbors for a point interpolation function.""" xp, yp, z = test_data tri = Delaunay(list(zip(xp, yp))) sim_gridx = [30] sim_gridy = [30] members, tri_info = find_natural_neighbors(tri, list(zip(sim_gridx, sim_gridy))) val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]], tri, members[0], tri_info) truth = 1.009 assert_almost_equal(truth, val, 3) def test_barnes_weights(): r"""Test Barnes weights function.""" kappa = 1000000 gamma = 0.5 dist = np.array([1000, 2000, 3000, 4000])**2 weights = barnes_weights(dist, kappa, gamma) * 10000000 truth = [1353352.832366126918939, 3354.626279025118388, .152299797447126, .000000126641655] assert_array_almost_equal(truth, weights) def test_cressman_weights(): r"""Test Cressman weights function.""" r = 5000 dist = np.array([1000, 2000, 3000, 4000])**2 weights = cressman_weights(dist, r) truth = [0.923076923076923, 0.724137931034482, 0.470588235294117, 0.219512195121951] assert_array_almost_equal(truth, weights) def test_cressman_point(test_data): r"""Test Cressman interpolation for a point function.""" xp, yp, z = test_data r = 40 obs_tree = cKDTree(list(zip(xp, yp))) indices = obs_tree.query_ball_point([30, 30], r=r) dists = dist_2(30, 30, xp[indices], yp[indices]) values = z[indices] truth = 1.05499444404 value = cressman_point(dists, values, r) assert_almost_equal(truth, value) def test_barnes_point(test_data): r"""Test Barnes interpolation for a point function.""" xp, yp, z = test_data r = 40 obs_tree = cKDTree(list(zip(xp, yp))) indices = obs_tree.query_ball_point([60, 60], r=r) dists = dist_2(60, 60, xp[indices], yp[indices]) values = z[indices] truth = 4.08718241061 ave_spacing = np.mean((cdist(list(zip(xp, yp)), list(zip(xp, yp))))) kappa = calc_kappa(ave_spacing) value = barnes_point(dists, values, kappa) assert_almost_equal(truth, value)
flexible
{ "blob_id": "9e987e057ee5322765415b84e84ef3c4d2827742", "index": 5466, "step-1": "<mask token>\n\n\[email protected]()\ndef test_data():\n \"\"\"Return data used for tests in this file.\"\"\"\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, \n 0.225, 3.364], dtype=float)\n return x, y, z\n\n\[email protected]()\ndef test_grid():\n \"\"\"Return grid locations used for tests in this file.\"\"\"\n with get_test_data('interpolation_test_grid.npz') as fobj:\n data = np.load(fobj)\n return data['xg'], data['yg']\n\n\ndef test_natural_neighbor(test_data, test_grid):\n \"\"\"Test natural neighbor interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n img = natural_neighbor(xp, yp, z, xg, yg)\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\n<mask token>\n\n\[email protected]('method', interp_methods)\ndef test_inverse_distance(method, test_data, test_grid):\n \"\"\"Test inverse distance interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\n<mask token>\n\n\ndef test_barnes_weights():\n \"\"\"Test Barnes weights function.\"\"\"\n kappa = 1000000\n gamma = 0.5\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n truth = [1353352.8323661268, 3354.6262790251185, 0.152299797447126, \n 1.26641655e-07]\n assert_array_almost_equal(truth, weights)\n\n\n<mask token>\n\n\ndef test_cressman_point(test_data):\n \"\"\"Test Cressman interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([30, 30], r=r)\n dists = dist_2(30, 30, xp[indices], yp[indices])\n values = z[indices]\n truth = 1.05499444404\n value = cressman_point(dists, values, r)\n assert_almost_equal(truth, value)\n\n\ndef test_barnes_point(test_data):\n \"\"\"Test Barnes interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([60, 60], r=r)\n dists = dist_2(60, 60, xp[indices], yp[indices])\n values = z[indices]\n truth = 4.08718241061\n ave_spacing = np.mean(cdist(list(zip(xp, yp)), list(zip(xp, yp))))\n kappa = calc_kappa(ave_spacing)\n value = barnes_point(dists, values, kappa)\n assert_almost_equal(truth, value)\n", "step-2": "<mask token>\n\n\[email protected]()\ndef test_data():\n \"\"\"Return data used for tests in this file.\"\"\"\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, \n 0.225, 3.364], dtype=float)\n return x, y, z\n\n\[email protected]()\ndef test_grid():\n \"\"\"Return grid locations used for tests in this file.\"\"\"\n with get_test_data('interpolation_test_grid.npz') as fobj:\n data = np.load(fobj)\n return data['xg'], data['yg']\n\n\ndef test_natural_neighbor(test_data, test_grid):\n \"\"\"Test natural neighbor interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n img = natural_neighbor(xp, yp, z, xg, yg)\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\n<mask token>\n\n\[email protected]('method', interp_methods)\ndef test_inverse_distance(method, test_data, test_grid):\n \"\"\"Test inverse distance interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\ndef test_nn_point(test_data):\n \"\"\"Test find natural neighbors for a point interpolation function.\"\"\"\n xp, yp, z = test_data\n tri = Delaunay(list(zip(xp, yp)))\n sim_gridx = [30]\n sim_gridy = [30]\n members, tri_info = find_natural_neighbors(tri, list(zip(sim_gridx,\n sim_gridy)))\n val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]], tri, members[0],\n tri_info)\n truth = 1.009\n assert_almost_equal(truth, val, 3)\n\n\ndef test_barnes_weights():\n \"\"\"Test Barnes weights function.\"\"\"\n kappa = 1000000\n gamma = 0.5\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n truth = [1353352.8323661268, 3354.6262790251185, 0.152299797447126, \n 1.26641655e-07]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_weights():\n \"\"\"Test Cressman weights function.\"\"\"\n r = 5000\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = cressman_weights(dist, r)\n truth = [0.923076923076923, 0.724137931034482, 0.470588235294117, \n 0.219512195121951]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_point(test_data):\n \"\"\"Test Cressman interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([30, 30], r=r)\n dists = dist_2(30, 30, xp[indices], yp[indices])\n values = z[indices]\n truth = 1.05499444404\n value = cressman_point(dists, values, r)\n assert_almost_equal(truth, value)\n\n\ndef test_barnes_point(test_data):\n \"\"\"Test Barnes interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([60, 60], r=r)\n dists = dist_2(60, 60, xp[indices], yp[indices])\n values = z[indices]\n truth = 4.08718241061\n ave_spacing = np.mean(cdist(list(zip(xp, yp)), list(zip(xp, yp))))\n kappa = calc_kappa(ave_spacing)\n value = barnes_point(dists, values, kappa)\n assert_almost_equal(truth, value)\n", "step-3": "<mask token>\nlogging.getLogger('metpy.gridding.interpolation').setLevel(logging.ERROR)\n\n\[email protected]()\ndef test_data():\n \"\"\"Return data used for tests in this file.\"\"\"\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, \n 0.225, 3.364], dtype=float)\n return x, y, z\n\n\[email protected]()\ndef test_grid():\n \"\"\"Return grid locations used for tests in this file.\"\"\"\n with get_test_data('interpolation_test_grid.npz') as fobj:\n data = np.load(fobj)\n return data['xg'], data['yg']\n\n\ndef test_natural_neighbor(test_data, test_grid):\n \"\"\"Test natural neighbor interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n img = natural_neighbor(xp, yp, z, xg, yg)\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\n<mask token>\n\n\[email protected]('method', interp_methods)\ndef test_inverse_distance(method, test_data, test_grid):\n \"\"\"Test inverse distance interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\ndef test_nn_point(test_data):\n \"\"\"Test find natural neighbors for a point interpolation function.\"\"\"\n xp, yp, z = test_data\n tri = Delaunay(list(zip(xp, yp)))\n sim_gridx = [30]\n sim_gridy = [30]\n members, tri_info = find_natural_neighbors(tri, list(zip(sim_gridx,\n sim_gridy)))\n val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]], tri, members[0],\n tri_info)\n truth = 1.009\n assert_almost_equal(truth, val, 3)\n\n\ndef test_barnes_weights():\n \"\"\"Test Barnes weights function.\"\"\"\n kappa = 1000000\n gamma = 0.5\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n truth = [1353352.8323661268, 3354.6262790251185, 0.152299797447126, \n 1.26641655e-07]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_weights():\n \"\"\"Test Cressman weights function.\"\"\"\n r = 5000\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = cressman_weights(dist, r)\n truth = [0.923076923076923, 0.724137931034482, 0.470588235294117, \n 0.219512195121951]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_point(test_data):\n \"\"\"Test Cressman interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([30, 30], r=r)\n dists = dist_2(30, 30, xp[indices], yp[indices])\n values = z[indices]\n truth = 1.05499444404\n value = cressman_point(dists, values, r)\n assert_almost_equal(truth, value)\n\n\ndef test_barnes_point(test_data):\n \"\"\"Test Barnes interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([60, 60], r=r)\n dists = dist_2(60, 60, xp[indices], yp[indices])\n values = z[indices]\n truth = 4.08718241061\n ave_spacing = np.mean(cdist(list(zip(xp, yp)), list(zip(xp, yp))))\n kappa = calc_kappa(ave_spacing)\n value = barnes_point(dists, values, kappa)\n assert_almost_equal(truth, value)\n", "step-4": "<mask token>\nlogging.getLogger('metpy.gridding.interpolation').setLevel(logging.ERROR)\n\n\[email protected]()\ndef test_data():\n \"\"\"Return data used for tests in this file.\"\"\"\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, \n 0.225, 3.364], dtype=float)\n return x, y, z\n\n\[email protected]()\ndef test_grid():\n \"\"\"Return grid locations used for tests in this file.\"\"\"\n with get_test_data('interpolation_test_grid.npz') as fobj:\n data = np.load(fobj)\n return data['xg'], data['yg']\n\n\ndef test_natural_neighbor(test_data, test_grid):\n \"\"\"Test natural neighbor interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n img = natural_neighbor(xp, yp, z, xg, yg)\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\ninterp_methods = ['cressman', 'barnes']\n\n\[email protected]('method', interp_methods)\ndef test_inverse_distance(method, test_data, test_grid):\n \"\"\"Test inverse distance interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\ndef test_nn_point(test_data):\n \"\"\"Test find natural neighbors for a point interpolation function.\"\"\"\n xp, yp, z = test_data\n tri = Delaunay(list(zip(xp, yp)))\n sim_gridx = [30]\n sim_gridy = [30]\n members, tri_info = find_natural_neighbors(tri, list(zip(sim_gridx,\n sim_gridy)))\n val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]], tri, members[0],\n tri_info)\n truth = 1.009\n assert_almost_equal(truth, val, 3)\n\n\ndef test_barnes_weights():\n \"\"\"Test Barnes weights function.\"\"\"\n kappa = 1000000\n gamma = 0.5\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n truth = [1353352.8323661268, 3354.6262790251185, 0.152299797447126, \n 1.26641655e-07]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_weights():\n \"\"\"Test Cressman weights function.\"\"\"\n r = 5000\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = cressman_weights(dist, r)\n truth = [0.923076923076923, 0.724137931034482, 0.470588235294117, \n 0.219512195121951]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_point(test_data):\n \"\"\"Test Cressman interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([30, 30], r=r)\n dists = dist_2(30, 30, xp[indices], yp[indices])\n values = z[indices]\n truth = 1.05499444404\n value = cressman_point(dists, values, r)\n assert_almost_equal(truth, value)\n\n\ndef test_barnes_point(test_data):\n \"\"\"Test Barnes interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([60, 60], r=r)\n dists = dist_2(60, 60, xp[indices], yp[indices])\n values = z[indices]\n truth = 4.08718241061\n ave_spacing = np.mean(cdist(list(zip(xp, yp)), list(zip(xp, yp))))\n kappa = calc_kappa(ave_spacing)\n value = barnes_point(dists, values, kappa)\n assert_almost_equal(truth, value)\n", "step-5": "# Copyright (c) 2008-2016 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"Test the `interpolation` module.\"\"\"\n\nfrom __future__ import division\n\nimport logging\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_array_almost_equal\nimport pytest\nfrom scipy.spatial import cKDTree, Delaunay\nfrom scipy.spatial.distance import cdist\n\nfrom metpy.cbook import get_test_data\nfrom metpy.gridding.gridding_functions import calc_kappa\nfrom metpy.gridding.interpolation import (barnes_point, barnes_weights, cressman_point,\n cressman_weights, inverse_distance,\n natural_neighbor, nn_point)\nfrom metpy.gridding.triangles import dist_2, find_natural_neighbors\n\nlogging.getLogger('metpy.gridding.interpolation').setLevel(logging.ERROR)\n\n\[email protected]()\ndef test_data():\n r\"\"\"Return data used for tests in this file.\"\"\"\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156,\n 0.225, 3.364], dtype=float)\n\n return x, y, z\n\n\[email protected]()\ndef test_grid():\n r\"\"\"Return grid locations used for tests in this file.\"\"\"\n with get_test_data('interpolation_test_grid.npz') as fobj:\n data = np.load(fobj)\n return data['xg'], data['yg']\n\n\ndef test_natural_neighbor(test_data, test_grid):\n r\"\"\"Test natural neighbor interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n\n img = natural_neighbor(xp, yp, z, xg, yg)\n\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n\n assert_array_almost_equal(truth, img)\n\n\ninterp_methods = ['cressman', 'barnes']\n\n\[email protected]('method', interp_methods)\ndef test_inverse_distance(method, test_data, test_grid):\n r\"\"\"Test inverse distance interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n\n assert_array_almost_equal(truth, img)\n\n\ndef test_nn_point(test_data):\n r\"\"\"Test find natural neighbors for a point interpolation function.\"\"\"\n xp, yp, z = test_data\n\n tri = Delaunay(list(zip(xp, yp)))\n\n sim_gridx = [30]\n sim_gridy = [30]\n\n members, tri_info = find_natural_neighbors(tri,\n list(zip(sim_gridx, sim_gridy)))\n\n val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]],\n tri, members[0], tri_info)\n\n truth = 1.009\n\n assert_almost_equal(truth, val, 3)\n\n\ndef test_barnes_weights():\n r\"\"\"Test Barnes weights function.\"\"\"\n kappa = 1000000\n\n gamma = 0.5\n\n dist = np.array([1000, 2000, 3000, 4000])**2\n\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n\n truth = [1353352.832366126918939,\n 3354.626279025118388,\n .152299797447126,\n .000000126641655]\n\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_weights():\n r\"\"\"Test Cressman weights function.\"\"\"\n r = 5000\n\n dist = np.array([1000, 2000, 3000, 4000])**2\n\n weights = cressman_weights(dist, r)\n\n truth = [0.923076923076923,\n 0.724137931034482,\n 0.470588235294117,\n 0.219512195121951]\n\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_point(test_data):\n r\"\"\"Test Cressman interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n\n r = 40\n\n obs_tree = cKDTree(list(zip(xp, yp)))\n\n indices = obs_tree.query_ball_point([30, 30], r=r)\n\n dists = dist_2(30, 30, xp[indices], yp[indices])\n values = z[indices]\n\n truth = 1.05499444404\n\n value = cressman_point(dists, values, r)\n\n assert_almost_equal(truth, value)\n\n\ndef test_barnes_point(test_data):\n r\"\"\"Test Barnes interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n\n r = 40\n\n obs_tree = cKDTree(list(zip(xp, yp)))\n\n indices = obs_tree.query_ball_point([60, 60], r=r)\n\n dists = dist_2(60, 60, xp[indices], yp[indices])\n values = z[indices]\n\n truth = 4.08718241061\n\n ave_spacing = np.mean((cdist(list(zip(xp, yp)), list(zip(xp, yp)))))\n\n kappa = calc_kappa(ave_spacing)\n\n value = barnes_point(dists, values, kappa)\n\n assert_almost_equal(truth, value)\n", "step-ids": [ 7, 9, 10, 11, 13 ] }
[ 7, 9, 10, 11, 13 ]
<|reserved_special_token_0|> def walkDockerfiles(path, splitFirt=True): """ 遍历目录中的所有dockerfile Arguments: path {string} -- 目录路径 Keyword Arguments: splitFirt {bool} -- 去除文件开头的path (default: {True}) Returns: array -- dockerfile文件列表 """ files_list = [] if not os.path.exists(path): return -1 for root, sub_dirs, files in os.walk(path): for filename in files: if isDockerfile(filename): fullFileName = os.path.join(root, filename) if splitFirt: fullFileName = fullFileName.replace(path, '') files_list.append(fullFileName) return files_list <|reserved_special_token_0|> def printMsg(level='I', msg=''): print(datetime.datetime.now().isoformat() + ' [' + level + '] ' + msg) def main(): parser = _parse_args() dfs = parser.dfs registry = parser.registry push = parser.push action = parser.action if action == 'auto': dfs = gitLastDockerFiles() if len(dfs) < 1: printMsg('I', '最近1次无Dockerfile修改') elif action == 'all': dfs = walkDockerfiles('./') elif action == 'dfs': pass else: printMsg('E', '-a 错误,输入的参数,未定义') if len(dfs) > 0: for df in dfs: dockerDo(df, 'build', registry) if True == push: dockerDo(df, 'push', registry) else: printMsg('E', 'Dockerfile未找到') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def walkDockerfiles(path, splitFirt=True): """ 遍历目录中的所有dockerfile Arguments: path {string} -- 目录路径 Keyword Arguments: splitFirt {bool} -- 去除文件开头的path (default: {True}) Returns: array -- dockerfile文件列表 """ files_list = [] if not os.path.exists(path): return -1 for root, sub_dirs, files in os.walk(path): for filename in files: if isDockerfile(filename): fullFileName = os.path.join(root, filename) if splitFirt: fullFileName = fullFileName.replace(path, '') files_list.append(fullFileName) return files_list def isDockerfile(filename): dockerfileStr = 'Dockerfile' if dockerfileStr in filename: return True return False def gitLastDockerFiles(): """ git最近一次修改的Dockerfile文件 Returns: array -- 最近一次修改的Dockerfile """ gitlastcmd = 'git --no-pager whatchanged --name-only --oneline -1' os.chdir(os.path.dirname(os.path.realpath(__file__))) process = os.popen(gitlastcmd) gitlastOut = process.read() process.close() lines = gitlastOut.split('\n') last_files = [] for line in lines: line = line.strip('\n') if isDockerfile(line): last_files.append(line) return last_files def dockerDo(df='', action='build', registry=''): if df == '' or registry == '': printMsg('E', '输入的参数不完整') """tag生成策略 nginx/Dockerfile >> registry/nginx:latest nginx/alpine/Dockerfile >> registry/nginx:alpine php/7.2-fpm-alpine/Dockerfile >> registry/php:7.2-fpm-alpine 目前只支持两级目录 """ dfpath = df.replace('/Dockerfile', '') tagArr = dfpath.split('/') tagArrLen = len(tagArr) if 1 == tagArrLen: tag = registry + '/' + tagArr[0] + ':latest' elif 2 <= tagArrLen: tag = registry + '/' + tagArr[0] + ':' + tagArr[1] cmd = 'docker info' if action == 'build': cmd = 'docker build -t ' + tag + ' ./' + dfpath elif action == 'push': cmd = 'docker push ' + tag os.system(cmd) def scan_files(directory, prefix=None, postfix=None): files_list = [] for root, sub_dirs, files in os.walk(directory): for special_file in files: if postfix: if special_file.endswith(postfix): files_list.append(os.path.join(root, special_file)) elif prefix: if special_file.startswith(prefix): files_list.append(os.path.join(root, special_file)) else: files_list.append(os.path.join(root, special_file)) return files_list def _parse_args(): parser = argparse.ArgumentParser() parser.add_argument('dfs', nargs='*', help='Dockerfile文件相对路径支持多个,用空格分割', metavar='dfs') parser.add_argument('-a', '--action', default='auto', help= '设置build Dockerfile的范围 auto(默认)为自动模式取git最后一次修改的Dockerfile all全部的Dockerfile dfs指定的Dockerfile' , metavar='action') parser.add_argument('-r', '--registry', default='index.docker.io', help ='定义docker仓库地址', metavar='registry') parser.add_argument('-p', '--push', default=True, help= 'build完成是否\x08运行docker push', metavar='push') parser.add_argument('-v', '--version', action='version', version= '%(prog)s 1.0.0') return parser.parse_args() def printMsg(level='I', msg=''): print(datetime.datetime.now().isoformat() + ' [' + level + '] ' + msg) def main(): parser = _parse_args() dfs = parser.dfs registry = parser.registry push = parser.push action = parser.action if action == 'auto': dfs = gitLastDockerFiles() if len(dfs) < 1: printMsg('I', '最近1次无Dockerfile修改') elif action == 'all': dfs = walkDockerfiles('./') elif action == 'dfs': pass else: printMsg('E', '-a 错误,输入的参数,未定义') if len(dfs) > 0: for df in dfs: dockerDo(df, 'build', registry) if True == push: dockerDo(df, 'push', registry) else: printMsg('E', 'Dockerfile未找到') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def walkDockerfiles(path, splitFirt=True): """ 遍历目录中的所有dockerfile Arguments: path {string} -- 目录路径 Keyword Arguments: splitFirt {bool} -- 去除文件开头的path (default: {True}) Returns: array -- dockerfile文件列表 """ files_list = [] if not os.path.exists(path): return -1 for root, sub_dirs, files in os.walk(path): for filename in files: if isDockerfile(filename): fullFileName = os.path.join(root, filename) if splitFirt: fullFileName = fullFileName.replace(path, '') files_list.append(fullFileName) return files_list def isDockerfile(filename): dockerfileStr = 'Dockerfile' if dockerfileStr in filename: return True return False def gitLastDockerFiles(): """ git最近一次修改的Dockerfile文件 Returns: array -- 最近一次修改的Dockerfile """ gitlastcmd = 'git --no-pager whatchanged --name-only --oneline -1' os.chdir(os.path.dirname(os.path.realpath(__file__))) process = os.popen(gitlastcmd) gitlastOut = process.read() process.close() lines = gitlastOut.split('\n') last_files = [] for line in lines: line = line.strip('\n') if isDockerfile(line): last_files.append(line) return last_files def dockerDo(df='', action='build', registry=''): if df == '' or registry == '': printMsg('E', '输入的参数不完整') """tag生成策略 nginx/Dockerfile >> registry/nginx:latest nginx/alpine/Dockerfile >> registry/nginx:alpine php/7.2-fpm-alpine/Dockerfile >> registry/php:7.2-fpm-alpine 目前只支持两级目录 """ dfpath = df.replace('/Dockerfile', '') tagArr = dfpath.split('/') tagArrLen = len(tagArr) if 1 == tagArrLen: tag = registry + '/' + tagArr[0] + ':latest' elif 2 <= tagArrLen: tag = registry + '/' + tagArr[0] + ':' + tagArr[1] cmd = 'docker info' if action == 'build': cmd = 'docker build -t ' + tag + ' ./' + dfpath elif action == 'push': cmd = 'docker push ' + tag os.system(cmd) def scan_files(directory, prefix=None, postfix=None): files_list = [] for root, sub_dirs, files in os.walk(directory): for special_file in files: if postfix: if special_file.endswith(postfix): files_list.append(os.path.join(root, special_file)) elif prefix: if special_file.startswith(prefix): files_list.append(os.path.join(root, special_file)) else: files_list.append(os.path.join(root, special_file)) return files_list def _parse_args(): parser = argparse.ArgumentParser() parser.add_argument('dfs', nargs='*', help='Dockerfile文件相对路径支持多个,用空格分割', metavar='dfs') parser.add_argument('-a', '--action', default='auto', help= '设置build Dockerfile的范围 auto(默认)为自动模式取git最后一次修改的Dockerfile all全部的Dockerfile dfs指定的Dockerfile' , metavar='action') parser.add_argument('-r', '--registry', default='index.docker.io', help ='定义docker仓库地址', metavar='registry') parser.add_argument('-p', '--push', default=True, help= 'build完成是否\x08运行docker push', metavar='push') parser.add_argument('-v', '--version', action='version', version= '%(prog)s 1.0.0') return parser.parse_args() def printMsg(level='I', msg=''): print(datetime.datetime.now().isoformat() + ' [' + level + '] ' + msg) def main(): parser = _parse_args() dfs = parser.dfs registry = parser.registry push = parser.push action = parser.action if action == 'auto': dfs = gitLastDockerFiles() if len(dfs) < 1: printMsg('I', '最近1次无Dockerfile修改') elif action == 'all': dfs = walkDockerfiles('./') elif action == 'dfs': pass else: printMsg('E', '-a 错误,输入的参数,未定义') if len(dfs) > 0: for df in dfs: dockerDo(df, 'build', registry) if True == push: dockerDo(df, 'push', registry) else: printMsg('E', 'Dockerfile未找到') if __name__ == '__main__': main() <|reserved_special_token_1|> <|reserved_special_token_0|> import os import argparse import datetime def walkDockerfiles(path, splitFirt=True): """ 遍历目录中的所有dockerfile Arguments: path {string} -- 目录路径 Keyword Arguments: splitFirt {bool} -- 去除文件开头的path (default: {True}) Returns: array -- dockerfile文件列表 """ files_list = [] if not os.path.exists(path): return -1 for root, sub_dirs, files in os.walk(path): for filename in files: if isDockerfile(filename): fullFileName = os.path.join(root, filename) if splitFirt: fullFileName = fullFileName.replace(path, '') files_list.append(fullFileName) return files_list def isDockerfile(filename): dockerfileStr = 'Dockerfile' if dockerfileStr in filename: return True return False def gitLastDockerFiles(): """ git最近一次修改的Dockerfile文件 Returns: array -- 最近一次修改的Dockerfile """ gitlastcmd = 'git --no-pager whatchanged --name-only --oneline -1' os.chdir(os.path.dirname(os.path.realpath(__file__))) process = os.popen(gitlastcmd) gitlastOut = process.read() process.close() lines = gitlastOut.split('\n') last_files = [] for line in lines: line = line.strip('\n') if isDockerfile(line): last_files.append(line) return last_files def dockerDo(df='', action='build', registry=''): if df == '' or registry == '': printMsg('E', '输入的参数不完整') """tag生成策略 nginx/Dockerfile >> registry/nginx:latest nginx/alpine/Dockerfile >> registry/nginx:alpine php/7.2-fpm-alpine/Dockerfile >> registry/php:7.2-fpm-alpine 目前只支持两级目录 """ dfpath = df.replace('/Dockerfile', '') tagArr = dfpath.split('/') tagArrLen = len(tagArr) if 1 == tagArrLen: tag = registry + '/' + tagArr[0] + ':latest' elif 2 <= tagArrLen: tag = registry + '/' + tagArr[0] + ':' + tagArr[1] cmd = 'docker info' if action == 'build': cmd = 'docker build -t ' + tag + ' ./' + dfpath elif action == 'push': cmd = 'docker push ' + tag os.system(cmd) def scan_files(directory, prefix=None, postfix=None): files_list = [] for root, sub_dirs, files in os.walk(directory): for special_file in files: if postfix: if special_file.endswith(postfix): files_list.append(os.path.join(root, special_file)) elif prefix: if special_file.startswith(prefix): files_list.append(os.path.join(root, special_file)) else: files_list.append(os.path.join(root, special_file)) return files_list def _parse_args(): parser = argparse.ArgumentParser() parser.add_argument('dfs', nargs='*', help='Dockerfile文件相对路径支持多个,用空格分割', metavar='dfs') parser.add_argument('-a', '--action', default='auto', help= '设置build Dockerfile的范围 auto(默认)为自动模式取git最后一次修改的Dockerfile all全部的Dockerfile dfs指定的Dockerfile' , metavar='action') parser.add_argument('-r', '--registry', default='index.docker.io', help ='定义docker仓库地址', metavar='registry') parser.add_argument('-p', '--push', default=True, help= 'build完成是否\x08运行docker push', metavar='push') parser.add_argument('-v', '--version', action='version', version= '%(prog)s 1.0.0') return parser.parse_args() def printMsg(level='I', msg=''): print(datetime.datetime.now().isoformat() + ' [' + level + '] ' + msg) def main(): parser = _parse_args() dfs = parser.dfs registry = parser.registry push = parser.push action = parser.action if action == 'auto': dfs = gitLastDockerFiles() if len(dfs) < 1: printMsg('I', '最近1次无Dockerfile修改') elif action == 'all': dfs = walkDockerfiles('./') elif action == 'dfs': pass else: printMsg('E', '-a 错误,输入的参数,未定义') if len(dfs) > 0: for df in dfs: dockerDo(df, 'build', registry) if True == push: dockerDo(df, 'push', registry) else: printMsg('E', 'Dockerfile未找到') if __name__ == '__main__': main() <|reserved_special_token_1|> #!/usr/bin/env python3 #coding=utf-8 """ dfsbuild.py 单Git仓库多Dockerfile构建工具,提高了构建效率 快速使用: chmod +x ./dfsbuild.py 只构建Git最近一次修改的Dockerfile ./dfsbuild.py -a auto -r registry.cn-shanghai.aliyuncs.com/userename 构建所有的Dockerfile ./dfsbuild.py -a all -r registry.cn-shanghai.aliyuncs.com/userename 构建特定的Dockerfile ./dfsbuild.py -a dfs -r registry.cn-shanghai.aliyuncs.com/userename nginx 解决的问题: 通常我们用大量的基础Dockerfile需要维护 很多时候这些大量的Dockerfile会放在同一个Git仓库当中 当Git push时Git server的webhook功能去触发CI(Jenkins等)系统 CI系统会去自动docker build镜像 产生的问题是每次都会docker build全部的Dockerfile文件 构建的过程中虽然会使用缓存,但实际的构建时间还是不能接受的 本工具可以自动处理只构建Git最近一次修改的Dockerfile 从而大大提高了单Git仓库多Dockerfile的docker build构建速度 关键点: git最近一次修改的Dockerfile git --no-pager whatchanged --name-only --oneline -1 参看gitLastDockerFiles函数实现 """ import os import argparse import datetime def walkDockerfiles(path,splitFirt=True): """ 遍历目录中的所有dockerfile Arguments: path {string} -- 目录路径 Keyword Arguments: splitFirt {bool} -- 去除文件开头的path (default: {True}) Returns: array -- dockerfile文件列表 """ files_list = [] if not os.path.exists(path): return -1 for root, sub_dirs, files in os.walk(path): for filename in files: if isDockerfile(filename): fullFileName = os.path.join(root, filename) if splitFirt: fullFileName = fullFileName.replace(path,"") files_list.append(fullFileName) # 路径和文件名连接构成完整路径 return files_list def isDockerfile(filename): dockerfileStr = "Dockerfile" if dockerfileStr in filename: return True return False def gitLastDockerFiles(): """ git最近一次修改的Dockerfile文件 Returns: array -- 最近一次修改的Dockerfile """ gitlastcmd = "git --no-pager whatchanged --name-only --oneline -1" os.chdir(os.path.dirname(os.path.realpath(__file__))) process = os.popen(gitlastcmd) # return file gitlastOut = process.read() process.close() lines = gitlastOut.split('\n') last_files = [] for line in lines: line = line.strip('\n') if isDockerfile(line): last_files.append(line) return last_files def dockerDo(df="", action="build", registry=""): if df == "" or registry == "": printMsg("E","输入的参数不完整") """tag生成策略 nginx/Dockerfile >> registry/nginx:latest nginx/alpine/Dockerfile >> registry/nginx:alpine php/7.2-fpm-alpine/Dockerfile >> registry/php:7.2-fpm-alpine 目前只支持两级目录 """ dfpath = df.replace('/Dockerfile','') tagArr = dfpath.split('/') tagArrLen = len(tagArr) if 1 == tagArrLen: tag = registry + "/" + tagArr[0] + ":latest" elif 2 <= tagArrLen: tag = registry + "/" + tagArr[0] + ":" + tagArr[1] cmd = "docker info" if action == "build": cmd = 'docker build -t ' + tag + ' ./' + dfpath elif action == "push": cmd = 'docker push ' + tag os.system(cmd) def scan_files(directory,prefix=None,postfix=None): files_list=[] for root, sub_dirs, files in os.walk(directory): for special_file in files: if postfix: if special_file.endswith(postfix): files_list.append(os.path.join(root,special_file)) elif prefix: if special_file.startswith(prefix): files_list.append(os.path.join(root,special_file)) else: files_list.append(os.path.join(root,special_file)) return files_list def _parse_args(): parser = argparse.ArgumentParser() parser.add_argument( 'dfs', nargs='*', help='Dockerfile文件相对路径支持多个,用空格分割', metavar='dfs' ) parser.add_argument( '-a', '--action', default='auto', help="设置build Dockerfile的范围 \ auto(默认)为自动模式取git最后一次修改的Dockerfile \ all全部的Dockerfile \ dfs指定的Dockerfile", metavar='action', ) parser.add_argument( '-r', '--registry', default='index.docker.io', help="定义docker仓库地址", metavar='registry', ) parser.add_argument( '-p', '--push', default=True, help="build完成是否运行docker push", metavar='push', ) parser.add_argument( '-v', '--version', action='version', version='%(prog)s 1.0.0', ) return parser.parse_args() def printMsg(level="I",msg=""): print(datetime.datetime.now().isoformat() + " ["+level+"] "+msg) def main(): parser = _parse_args() dfs = parser.dfs registry = parser.registry push = parser.push action = parser.action if action == "auto": dfs = gitLastDockerFiles() if len(dfs) < 1: printMsg("I", "最近1次无Dockerfile修改") elif action == "all": dfs = walkDockerfiles("./") elif action == "dfs": pass else: printMsg("E","-a 错误,输入的参数,未定义") if len(dfs) > 0: for df in dfs: dockerDo(df, 'build', registry) if True == push: dockerDo(df, 'push', registry) else: printMsg("E", "Dockerfile未找到") if __name__ == '__main__': main()
flexible
{ "blob_id": "400f9b6fb0ab73a920e6b73373615b2f8d1103bb", "index": 2301, "step-1": "<mask token>\n\n\ndef walkDockerfiles(path, splitFirt=True):\n \"\"\" 遍历目录中的所有dockerfile\n \n Arguments:\n path {string} -- 目录路径\n \n Keyword Arguments:\n splitFirt {bool} -- 去除文件开头的path (default: {True})\n \n Returns:\n array -- dockerfile文件列表\n \"\"\"\n files_list = []\n if not os.path.exists(path):\n return -1\n for root, sub_dirs, files in os.walk(path):\n for filename in files:\n if isDockerfile(filename):\n fullFileName = os.path.join(root, filename)\n if splitFirt:\n fullFileName = fullFileName.replace(path, '')\n files_list.append(fullFileName)\n return files_list\n\n\n<mask token>\n\n\ndef printMsg(level='I', msg=''):\n print(datetime.datetime.now().isoformat() + ' [' + level + '] ' + msg)\n\n\ndef main():\n parser = _parse_args()\n dfs = parser.dfs\n registry = parser.registry\n push = parser.push\n action = parser.action\n if action == 'auto':\n dfs = gitLastDockerFiles()\n if len(dfs) < 1:\n printMsg('I', '最近1次无Dockerfile修改')\n elif action == 'all':\n dfs = walkDockerfiles('./')\n elif action == 'dfs':\n pass\n else:\n printMsg('E', '-a 错误,输入的参数,未定义')\n if len(dfs) > 0:\n for df in dfs:\n dockerDo(df, 'build', registry)\n if True == push:\n dockerDo(df, 'push', registry)\n else:\n printMsg('E', 'Dockerfile未找到')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef walkDockerfiles(path, splitFirt=True):\n \"\"\" 遍历目录中的所有dockerfile\n \n Arguments:\n path {string} -- 目录路径\n \n Keyword Arguments:\n splitFirt {bool} -- 去除文件开头的path (default: {True})\n \n Returns:\n array -- dockerfile文件列表\n \"\"\"\n files_list = []\n if not os.path.exists(path):\n return -1\n for root, sub_dirs, files in os.walk(path):\n for filename in files:\n if isDockerfile(filename):\n fullFileName = os.path.join(root, filename)\n if splitFirt:\n fullFileName = fullFileName.replace(path, '')\n files_list.append(fullFileName)\n return files_list\n\n\ndef isDockerfile(filename):\n dockerfileStr = 'Dockerfile'\n if dockerfileStr in filename:\n return True\n return False\n\n\ndef gitLastDockerFiles():\n \"\"\" git最近一次修改的Dockerfile文件\n \n Returns:\n array -- 最近一次修改的Dockerfile\n \"\"\"\n gitlastcmd = 'git --no-pager whatchanged --name-only --oneline -1'\n os.chdir(os.path.dirname(os.path.realpath(__file__)))\n process = os.popen(gitlastcmd)\n gitlastOut = process.read()\n process.close()\n lines = gitlastOut.split('\\n')\n last_files = []\n for line in lines:\n line = line.strip('\\n')\n if isDockerfile(line):\n last_files.append(line)\n return last_files\n\n\ndef dockerDo(df='', action='build', registry=''):\n if df == '' or registry == '':\n printMsg('E', '输入的参数不完整')\n \"\"\"tag生成策略\n nginx/Dockerfile >> registry/nginx:latest\n nginx/alpine/Dockerfile >> registry/nginx:alpine\n php/7.2-fpm-alpine/Dockerfile >> registry/php:7.2-fpm-alpine\n 目前只支持两级目录\n \"\"\"\n dfpath = df.replace('/Dockerfile', '')\n tagArr = dfpath.split('/')\n tagArrLen = len(tagArr)\n if 1 == tagArrLen:\n tag = registry + '/' + tagArr[0] + ':latest'\n elif 2 <= tagArrLen:\n tag = registry + '/' + tagArr[0] + ':' + tagArr[1]\n cmd = 'docker info'\n if action == 'build':\n cmd = 'docker build -t ' + tag + ' ./' + dfpath\n elif action == 'push':\n cmd = 'docker push ' + tag\n os.system(cmd)\n\n\ndef scan_files(directory, prefix=None, postfix=None):\n files_list = []\n for root, sub_dirs, files in os.walk(directory):\n for special_file in files:\n if postfix:\n if special_file.endswith(postfix):\n files_list.append(os.path.join(root, special_file))\n elif prefix:\n if special_file.startswith(prefix):\n files_list.append(os.path.join(root, special_file))\n else:\n files_list.append(os.path.join(root, special_file))\n return files_list\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('dfs', nargs='*', help='Dockerfile文件相对路径支持多个,用空格分割',\n metavar='dfs')\n parser.add_argument('-a', '--action', default='auto', help=\n '设置build Dockerfile的范围 auto(默认)为自动模式取git最后一次修改的Dockerfile all全部的Dockerfile dfs指定的Dockerfile'\n , metavar='action')\n parser.add_argument('-r', '--registry', default='index.docker.io', help\n ='定义docker仓库地址', metavar='registry')\n parser.add_argument('-p', '--push', default=True, help=\n 'build完成是否\\x08运行docker push', metavar='push')\n parser.add_argument('-v', '--version', action='version', version=\n '%(prog)s 1.0.0')\n return parser.parse_args()\n\n\ndef printMsg(level='I', msg=''):\n print(datetime.datetime.now().isoformat() + ' [' + level + '] ' + msg)\n\n\ndef main():\n parser = _parse_args()\n dfs = parser.dfs\n registry = parser.registry\n push = parser.push\n action = parser.action\n if action == 'auto':\n dfs = gitLastDockerFiles()\n if len(dfs) < 1:\n printMsg('I', '最近1次无Dockerfile修改')\n elif action == 'all':\n dfs = walkDockerfiles('./')\n elif action == 'dfs':\n pass\n else:\n printMsg('E', '-a 错误,输入的参数,未定义')\n if len(dfs) > 0:\n for df in dfs:\n dockerDo(df, 'build', registry)\n if True == push:\n dockerDo(df, 'push', registry)\n else:\n printMsg('E', 'Dockerfile未找到')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef walkDockerfiles(path, splitFirt=True):\n \"\"\" 遍历目录中的所有dockerfile\n \n Arguments:\n path {string} -- 目录路径\n \n Keyword Arguments:\n splitFirt {bool} -- 去除文件开头的path (default: {True})\n \n Returns:\n array -- dockerfile文件列表\n \"\"\"\n files_list = []\n if not os.path.exists(path):\n return -1\n for root, sub_dirs, files in os.walk(path):\n for filename in files:\n if isDockerfile(filename):\n fullFileName = os.path.join(root, filename)\n if splitFirt:\n fullFileName = fullFileName.replace(path, '')\n files_list.append(fullFileName)\n return files_list\n\n\ndef isDockerfile(filename):\n dockerfileStr = 'Dockerfile'\n if dockerfileStr in filename:\n return True\n return False\n\n\ndef gitLastDockerFiles():\n \"\"\" git最近一次修改的Dockerfile文件\n \n Returns:\n array -- 最近一次修改的Dockerfile\n \"\"\"\n gitlastcmd = 'git --no-pager whatchanged --name-only --oneline -1'\n os.chdir(os.path.dirname(os.path.realpath(__file__)))\n process = os.popen(gitlastcmd)\n gitlastOut = process.read()\n process.close()\n lines = gitlastOut.split('\\n')\n last_files = []\n for line in lines:\n line = line.strip('\\n')\n if isDockerfile(line):\n last_files.append(line)\n return last_files\n\n\ndef dockerDo(df='', action='build', registry=''):\n if df == '' or registry == '':\n printMsg('E', '输入的参数不完整')\n \"\"\"tag生成策略\n nginx/Dockerfile >> registry/nginx:latest\n nginx/alpine/Dockerfile >> registry/nginx:alpine\n php/7.2-fpm-alpine/Dockerfile >> registry/php:7.2-fpm-alpine\n 目前只支持两级目录\n \"\"\"\n dfpath = df.replace('/Dockerfile', '')\n tagArr = dfpath.split('/')\n tagArrLen = len(tagArr)\n if 1 == tagArrLen:\n tag = registry + '/' + tagArr[0] + ':latest'\n elif 2 <= tagArrLen:\n tag = registry + '/' + tagArr[0] + ':' + tagArr[1]\n cmd = 'docker info'\n if action == 'build':\n cmd = 'docker build -t ' + tag + ' ./' + dfpath\n elif action == 'push':\n cmd = 'docker push ' + tag\n os.system(cmd)\n\n\ndef scan_files(directory, prefix=None, postfix=None):\n files_list = []\n for root, sub_dirs, files in os.walk(directory):\n for special_file in files:\n if postfix:\n if special_file.endswith(postfix):\n files_list.append(os.path.join(root, special_file))\n elif prefix:\n if special_file.startswith(prefix):\n files_list.append(os.path.join(root, special_file))\n else:\n files_list.append(os.path.join(root, special_file))\n return files_list\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('dfs', nargs='*', help='Dockerfile文件相对路径支持多个,用空格分割',\n metavar='dfs')\n parser.add_argument('-a', '--action', default='auto', help=\n '设置build Dockerfile的范围 auto(默认)为自动模式取git最后一次修改的Dockerfile all全部的Dockerfile dfs指定的Dockerfile'\n , metavar='action')\n parser.add_argument('-r', '--registry', default='index.docker.io', help\n ='定义docker仓库地址', metavar='registry')\n parser.add_argument('-p', '--push', default=True, help=\n 'build完成是否\\x08运行docker push', metavar='push')\n parser.add_argument('-v', '--version', action='version', version=\n '%(prog)s 1.0.0')\n return parser.parse_args()\n\n\ndef printMsg(level='I', msg=''):\n print(datetime.datetime.now().isoformat() + ' [' + level + '] ' + msg)\n\n\ndef main():\n parser = _parse_args()\n dfs = parser.dfs\n registry = parser.registry\n push = parser.push\n action = parser.action\n if action == 'auto':\n dfs = gitLastDockerFiles()\n if len(dfs) < 1:\n printMsg('I', '最近1次无Dockerfile修改')\n elif action == 'all':\n dfs = walkDockerfiles('./')\n elif action == 'dfs':\n pass\n else:\n printMsg('E', '-a 错误,输入的参数,未定义')\n if len(dfs) > 0:\n for df in dfs:\n dockerDo(df, 'build', registry)\n if True == push:\n dockerDo(df, 'push', registry)\n else:\n printMsg('E', 'Dockerfile未找到')\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "<mask token>\nimport os\nimport argparse\nimport datetime\n\n\ndef walkDockerfiles(path, splitFirt=True):\n \"\"\" 遍历目录中的所有dockerfile\n \n Arguments:\n path {string} -- 目录路径\n \n Keyword Arguments:\n splitFirt {bool} -- 去除文件开头的path (default: {True})\n \n Returns:\n array -- dockerfile文件列表\n \"\"\"\n files_list = []\n if not os.path.exists(path):\n return -1\n for root, sub_dirs, files in os.walk(path):\n for filename in files:\n if isDockerfile(filename):\n fullFileName = os.path.join(root, filename)\n if splitFirt:\n fullFileName = fullFileName.replace(path, '')\n files_list.append(fullFileName)\n return files_list\n\n\ndef isDockerfile(filename):\n dockerfileStr = 'Dockerfile'\n if dockerfileStr in filename:\n return True\n return False\n\n\ndef gitLastDockerFiles():\n \"\"\" git最近一次修改的Dockerfile文件\n \n Returns:\n array -- 最近一次修改的Dockerfile\n \"\"\"\n gitlastcmd = 'git --no-pager whatchanged --name-only --oneline -1'\n os.chdir(os.path.dirname(os.path.realpath(__file__)))\n process = os.popen(gitlastcmd)\n gitlastOut = process.read()\n process.close()\n lines = gitlastOut.split('\\n')\n last_files = []\n for line in lines:\n line = line.strip('\\n')\n if isDockerfile(line):\n last_files.append(line)\n return last_files\n\n\ndef dockerDo(df='', action='build', registry=''):\n if df == '' or registry == '':\n printMsg('E', '输入的参数不完整')\n \"\"\"tag生成策略\n nginx/Dockerfile >> registry/nginx:latest\n nginx/alpine/Dockerfile >> registry/nginx:alpine\n php/7.2-fpm-alpine/Dockerfile >> registry/php:7.2-fpm-alpine\n 目前只支持两级目录\n \"\"\"\n dfpath = df.replace('/Dockerfile', '')\n tagArr = dfpath.split('/')\n tagArrLen = len(tagArr)\n if 1 == tagArrLen:\n tag = registry + '/' + tagArr[0] + ':latest'\n elif 2 <= tagArrLen:\n tag = registry + '/' + tagArr[0] + ':' + tagArr[1]\n cmd = 'docker info'\n if action == 'build':\n cmd = 'docker build -t ' + tag + ' ./' + dfpath\n elif action == 'push':\n cmd = 'docker push ' + tag\n os.system(cmd)\n\n\ndef scan_files(directory, prefix=None, postfix=None):\n files_list = []\n for root, sub_dirs, files in os.walk(directory):\n for special_file in files:\n if postfix:\n if special_file.endswith(postfix):\n files_list.append(os.path.join(root, special_file))\n elif prefix:\n if special_file.startswith(prefix):\n files_list.append(os.path.join(root, special_file))\n else:\n files_list.append(os.path.join(root, special_file))\n return files_list\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('dfs', nargs='*', help='Dockerfile文件相对路径支持多个,用空格分割',\n metavar='dfs')\n parser.add_argument('-a', '--action', default='auto', help=\n '设置build Dockerfile的范围 auto(默认)为自动模式取git最后一次修改的Dockerfile all全部的Dockerfile dfs指定的Dockerfile'\n , metavar='action')\n parser.add_argument('-r', '--registry', default='index.docker.io', help\n ='定义docker仓库地址', metavar='registry')\n parser.add_argument('-p', '--push', default=True, help=\n 'build完成是否\\x08运行docker push', metavar='push')\n parser.add_argument('-v', '--version', action='version', version=\n '%(prog)s 1.0.0')\n return parser.parse_args()\n\n\ndef printMsg(level='I', msg=''):\n print(datetime.datetime.now().isoformat() + ' [' + level + '] ' + msg)\n\n\ndef main():\n parser = _parse_args()\n dfs = parser.dfs\n registry = parser.registry\n push = parser.push\n action = parser.action\n if action == 'auto':\n dfs = gitLastDockerFiles()\n if len(dfs) < 1:\n printMsg('I', '最近1次无Dockerfile修改')\n elif action == 'all':\n dfs = walkDockerfiles('./')\n elif action == 'dfs':\n pass\n else:\n printMsg('E', '-a 错误,输入的参数,未定义')\n if len(dfs) > 0:\n for df in dfs:\n dockerDo(df, 'build', registry)\n if True == push:\n dockerDo(df, 'push', registry)\n else:\n printMsg('E', 'Dockerfile未找到')\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "#!/usr/bin/env python3\n#coding=utf-8\n\n\"\"\"\ndfsbuild.py\n单Git仓库多Dockerfile构建工具,提高了构建效率\n\n快速使用:\nchmod +x ./dfsbuild.py\n只构建Git最近一次修改的Dockerfile\n./dfsbuild.py -a auto -r registry.cn-shanghai.aliyuncs.com/userename\n\n构建所有的Dockerfile\n./dfsbuild.py -a all -r registry.cn-shanghai.aliyuncs.com/userename\n\n构建特定的Dockerfile\n./dfsbuild.py -a dfs -r registry.cn-shanghai.aliyuncs.com/userename nginx\n\n解决的问题:\n通常我们用大量的基础Dockerfile需要维护\n很多时候这些大量的Dockerfile会放在同一个Git仓库当中\n当Git push时Git server的webhook功能去触发CI(Jenkins等)系统\nCI系统会去自动docker build镜像\n产生的问题是每次都会docker build全部的Dockerfile文件\n构建的过程中虽然会使用缓存,但实际的构建时间还是不能接受的\n本工具可以自动处理只构建Git最近一次修改的Dockerfile\n从而大大提高了单Git仓库多Dockerfile的docker build构建速度\n\n关键点:\ngit最近一次修改的Dockerfile\ngit --no-pager whatchanged --name-only --oneline -1\n参看gitLastDockerFiles函数实现\n\"\"\"\n\n\nimport os\nimport argparse\nimport datetime\n\ndef walkDockerfiles(path,splitFirt=True):\n \"\"\" 遍历目录中的所有dockerfile\n \n Arguments:\n path {string} -- 目录路径\n \n Keyword Arguments:\n splitFirt {bool} -- 去除文件开头的path (default: {True})\n \n Returns:\n array -- dockerfile文件列表\n \"\"\"\n\n\n files_list = []\n if not os.path.exists(path):\n return -1\n for root, sub_dirs, files in os.walk(path):\n for filename in files:\n if isDockerfile(filename):\n fullFileName = os.path.join(root, filename)\n if splitFirt:\n fullFileName = fullFileName.replace(path,\"\")\n files_list.append(fullFileName) # 路径和文件名连接构成完整路径\n return files_list\n\ndef isDockerfile(filename):\n dockerfileStr = \"Dockerfile\"\n if dockerfileStr in filename:\n return True\n return False\n\ndef gitLastDockerFiles():\n \"\"\" git最近一次修改的Dockerfile文件\n \n Returns:\n array -- 最近一次修改的Dockerfile\n \"\"\"\n\n gitlastcmd = \"git --no-pager whatchanged --name-only --oneline -1\"\n os.chdir(os.path.dirname(os.path.realpath(__file__)))\n process = os.popen(gitlastcmd) # return file\n gitlastOut = process.read()\n process.close()\n lines = gitlastOut.split('\\n')\n last_files = []\n for line in lines:\n line = line.strip('\\n')\n if isDockerfile(line):\n last_files.append(line)\n return last_files\n \n\ndef dockerDo(df=\"\", action=\"build\", registry=\"\"):\n if df == \"\" or registry == \"\":\n printMsg(\"E\",\"输入的参数不完整\")\n \n \"\"\"tag生成策略\n nginx/Dockerfile >> registry/nginx:latest\n nginx/alpine/Dockerfile >> registry/nginx:alpine\n php/7.2-fpm-alpine/Dockerfile >> registry/php:7.2-fpm-alpine\n 目前只支持两级目录\n \"\"\"\n dfpath = df.replace('/Dockerfile','')\n tagArr = dfpath.split('/')\n tagArrLen = len(tagArr)\n if 1 == tagArrLen:\n tag = registry + \"/\" + tagArr[0] + \":latest\"\n elif 2 <= tagArrLen:\n tag = registry + \"/\" + tagArr[0] + \":\" + tagArr[1]\n cmd = \"docker info\"\n if action == \"build\":\n cmd = 'docker build -t ' + tag + ' ./' + dfpath\n elif action == \"push\":\n cmd = 'docker push ' + tag\n os.system(cmd)\n\ndef scan_files(directory,prefix=None,postfix=None):\n files_list=[]\n \n for root, sub_dirs, files in os.walk(directory):\n for special_file in files:\n if postfix:\n if special_file.endswith(postfix):\n files_list.append(os.path.join(root,special_file))\n elif prefix:\n if special_file.startswith(prefix):\n files_list.append(os.path.join(root,special_file))\n else:\n files_list.append(os.path.join(root,special_file))\n \n return files_list\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'dfs',\n nargs='*',\n help='Dockerfile文件相对路径支持多个,用空格分割',\n metavar='dfs'\n )\n parser.add_argument(\n '-a', '--action',\n default='auto',\n help=\"设置build Dockerfile的范围 \\\n auto(默认)为自动模式取git最后一次修改的Dockerfile \\\n all全部的Dockerfile \\\n dfs指定的Dockerfile\",\n metavar='action',\n )\n parser.add_argument(\n '-r', '--registry',\n default='index.docker.io',\n help=\"定义docker仓库地址\",\n metavar='registry',\n )\n parser.add_argument(\n '-p', '--push',\n default=True,\n help=\"build完成是否\b运行docker push\",\n metavar='push',\n )\n parser.add_argument(\n '-v', '--version',\n action='version',\n version='%(prog)s 1.0.0',\n )\n return parser.parse_args()\n\ndef printMsg(level=\"I\",msg=\"\"):\n print(datetime.datetime.now().isoformat() + \" [\"+level+\"] \"+msg)\n\ndef main():\n\n parser = _parse_args()\n dfs = parser.dfs\n registry = parser.registry\n push = parser.push\n action = parser.action\n if action == \"auto\":\n dfs = gitLastDockerFiles()\n if len(dfs) < 1:\n printMsg(\"I\", \"最近1次无Dockerfile修改\")\n \n elif action == \"all\":\n dfs = walkDockerfiles(\"./\")\n elif action == \"dfs\":\n pass\n else:\n printMsg(\"E\",\"-a 错误,输入的参数,未定义\")\n\n if len(dfs) > 0:\n for df in dfs:\n dockerDo(df, 'build', registry)\n if True == push:\n dockerDo(df, 'push', registry)\n else:\n printMsg(\"E\", \"Dockerfile未找到\")\n\nif __name__ == '__main__':\n main()\n", "step-ids": [ 3, 8, 9, 10, 11 ] }
[ 3, 8, 9, 10, 11 ]
<|reserved_special_token_0|> class MTCNN: def __init__(self, device=None, model=None): if device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' self.device = device url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt' if model is None: model = torch.hub.load_state_dict_from_url(url) else: model = torch.load(model, map_location=device) self.pnet = PNet().to(device) self.rnet = RNet().to(device) self.onet = ONet().to(device) self.pnet.load_state_dict(model['pnet']) self.rnet.load_state_dict(model['rnet']) self.onet.load_state_dict(model['onet']) def detect(self, imgs, minsize=None): if len(imgs) == 0: return [] if isinstance(imgs[0], np.ndarray): h, w = imgs[0].shape[:2] else: w, h = imgs[0].size if minsize is None: minsize = max(96 * min(w, h) / 1080, 40) boxes, points = [], [] with torch.no_grad(): batches = [imgs[i:i + 10] for i in range(0, len(imgs), 10)] for batch in batches: batch_boxes, batch_points = detect_face(batch, minsize, self.pnet, self.rnet, self.onet, [0.7, 0.8, 0.9], 0.709, self.device) boxes += list(batch_boxes) points += list(batch_points) result = [] for box, point in zip(boxes, points): box = np.array(box) point = np.array(point) if len(box) == 0: result.append(None) else: result.append((box[:, :4], box[:, 4], point)) return result <|reserved_special_token_0|> class PNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 10, kernel_size=3) self.prelu1 = nn.PReLU(10) self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True) self.conv2 = nn.Conv2d(10, 16, kernel_size=3) self.prelu2 = nn.PReLU(16) self.conv3 = nn.Conv2d(16, 32, kernel_size=3) self.prelu3 = nn.PReLU(32) self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1) self.softmax4_1 = nn.Softmax(dim=1) self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1) def forward(self, x): x = self.conv1(x) x = self.prelu1(x) x = self.pool1(x) x = self.conv2(x) x = self.prelu2(x) x = self.conv3(x) x = self.prelu3(x) a = self.conv4_1(x) a = self.softmax4_1(a) b = self.conv4_2(x) return b, a class RNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 28, kernel_size=3) self.prelu1 = nn.PReLU(28) self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv2 = nn.Conv2d(28, 48, kernel_size=3) self.prelu2 = nn.PReLU(48) self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv3 = nn.Conv2d(48, 64, kernel_size=2) self.prelu3 = nn.PReLU(64) self.dense4 = nn.Linear(576, 128) self.prelu4 = nn.PReLU(128) self.dense5_1 = nn.Linear(128, 2) self.softmax5_1 = nn.Softmax(dim=1) self.dense5_2 = nn.Linear(128, 4) def forward(self, x): x = self.conv1(x) x = self.prelu1(x) x = self.pool1(x) x = self.conv2(x) x = self.prelu2(x) x = self.pool2(x) x = self.conv3(x) x = self.prelu3(x) x = x.permute(0, 3, 2, 1).contiguous() x = self.dense4(x.view(x.shape[0], -1)) x = self.prelu4(x) a = self.dense5_1(x) a = self.softmax5_1(a) b = self.dense5_2(x) return b, a class ONet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=3) self.prelu1 = nn.PReLU(32) self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv2 = nn.Conv2d(32, 64, kernel_size=3) self.prelu2 = nn.PReLU(64) self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv3 = nn.Conv2d(64, 64, kernel_size=3) self.prelu3 = nn.PReLU(64) self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True) self.conv4 = nn.Conv2d(64, 128, kernel_size=2) self.prelu4 = nn.PReLU(128) self.dense5 = nn.Linear(1152, 256) self.prelu5 = nn.PReLU(256) self.dense6_1 = nn.Linear(256, 2) self.softmax6_1 = nn.Softmax(dim=1) self.dense6_2 = nn.Linear(256, 4) self.dense6_3 = nn.Linear(256, 10) def forward(self, x): x = self.conv1(x) x = self.prelu1(x) x = self.pool1(x) x = self.conv2(x) x = self.prelu2(x) x = self.pool2(x) x = self.conv3(x) x = self.prelu3(x) x = self.pool3(x) x = self.conv4(x) x = self.prelu4(x) x = x.permute(0, 3, 2, 1).contiguous() x = self.dense5(x.view(x.shape[0], -1)) x = self.prelu5(x) a = self.dense6_1(x) a = self.softmax6_1(a) b = self.dense6_2(x) c = self.dense6_3(x) return b, c, a <|reserved_special_token_0|> def bbreg(boundingbox, reg): if reg.shape[1] == 1: reg = torch.reshape(reg, (reg.shape[2], reg.shape[3])) w = boundingbox[:, 2] - boundingbox[:, 0] + 1 h = boundingbox[:, 3] - boundingbox[:, 1] + 1 b1 = boundingbox[:, 0] + reg[:, 0] * w b2 = boundingbox[:, 1] + reg[:, 1] * h b3 = boundingbox[:, 2] + reg[:, 2] * w b4 = boundingbox[:, 3] + reg[:, 3] * h boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0) return boundingbox def generateBoundingBox(reg, probs, scale, thresh): stride = 2 cellsize = 12 reg = reg.permute(1, 0, 2, 3) mask = probs >= thresh mask_inds = mask.nonzero(as_tuple=False) image_inds = mask_inds[:, 0] score = probs[mask] reg = reg[:, mask].permute(1, 0) bb = mask_inds[:, 1:].type(reg.dtype).flip(1) q1 = ((stride * bb + 1) / scale).floor() q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor() boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1) return boundingbox, image_inds def nms_numpy(boxes, scores, threshold, method): if boxes.size == 0: return np.empty((0, 3)) x1 = boxes[:, 0].copy() y1 = boxes[:, 1].copy() x2 = boxes[:, 2].copy() y2 = boxes[:, 3].copy() s = scores area = (x2 - x1 + 1) * (y2 - y1 + 1) I = np.argsort(s) pick = np.zeros_like(s, dtype=np.int16) counter = 0 while I.size > 0: i = I[-1] pick[counter] = i counter += 1 idx = I[0:-1] xx1 = np.maximum(x1[i], x1[idx]).copy() yy1 = np.maximum(y1[i], y1[idx]).copy() xx2 = np.minimum(x2[i], x2[idx]).copy() yy2 = np.minimum(y2[i], y2[idx]).copy() w = np.maximum(0.0, xx2 - xx1 + 1).copy() h = np.maximum(0.0, yy2 - yy1 + 1).copy() inter = w * h if method == 'Min': o = inter / np.minimum(area[i], area[idx]) else: o = inter / (area[i] + area[idx] - inter) I = I[np.where(o <= threshold)] pick = pick[:counter].copy() return pick def batched_nms_numpy(boxes, scores, idxs, threshold, method): device = boxes.device if boxes.numel() == 0: return torch.empty((0,), dtype=torch.int64, device=device) max_coordinate = boxes.max() offsets = idxs.to(boxes) * (max_coordinate + 1) boxes_for_nms = boxes + offsets[:, None] boxes_for_nms = boxes_for_nms.cpu().numpy() scores = scores.cpu().numpy() keep = nms_numpy(boxes_for_nms, scores, threshold, method) return torch.as_tensor(keep, dtype=torch.long, device=device) <|reserved_special_token_0|> def imresample(img, sz): im_data = interpolate(img, size=sz, mode='area') return im_data <|reserved_special_token_1|> <|reserved_special_token_0|> class MTCNN: def __init__(self, device=None, model=None): if device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' self.device = device url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt' if model is None: model = torch.hub.load_state_dict_from_url(url) else: model = torch.load(model, map_location=device) self.pnet = PNet().to(device) self.rnet = RNet().to(device) self.onet = ONet().to(device) self.pnet.load_state_dict(model['pnet']) self.rnet.load_state_dict(model['rnet']) self.onet.load_state_dict(model['onet']) def detect(self, imgs, minsize=None): if len(imgs) == 0: return [] if isinstance(imgs[0], np.ndarray): h, w = imgs[0].shape[:2] else: w, h = imgs[0].size if minsize is None: minsize = max(96 * min(w, h) / 1080, 40) boxes, points = [], [] with torch.no_grad(): batches = [imgs[i:i + 10] for i in range(0, len(imgs), 10)] for batch in batches: batch_boxes, batch_points = detect_face(batch, minsize, self.pnet, self.rnet, self.onet, [0.7, 0.8, 0.9], 0.709, self.device) boxes += list(batch_boxes) points += list(batch_points) result = [] for box, point in zip(boxes, points): box = np.array(box) point = np.array(point) if len(box) == 0: result.append(None) else: result.append((box[:, :4], box[:, 4], point)) return result <|reserved_special_token_0|> class PNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 10, kernel_size=3) self.prelu1 = nn.PReLU(10) self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True) self.conv2 = nn.Conv2d(10, 16, kernel_size=3) self.prelu2 = nn.PReLU(16) self.conv3 = nn.Conv2d(16, 32, kernel_size=3) self.prelu3 = nn.PReLU(32) self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1) self.softmax4_1 = nn.Softmax(dim=1) self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1) def forward(self, x): x = self.conv1(x) x = self.prelu1(x) x = self.pool1(x) x = self.conv2(x) x = self.prelu2(x) x = self.conv3(x) x = self.prelu3(x) a = self.conv4_1(x) a = self.softmax4_1(a) b = self.conv4_2(x) return b, a class RNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 28, kernel_size=3) self.prelu1 = nn.PReLU(28) self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv2 = nn.Conv2d(28, 48, kernel_size=3) self.prelu2 = nn.PReLU(48) self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv3 = nn.Conv2d(48, 64, kernel_size=2) self.prelu3 = nn.PReLU(64) self.dense4 = nn.Linear(576, 128) self.prelu4 = nn.PReLU(128) self.dense5_1 = nn.Linear(128, 2) self.softmax5_1 = nn.Softmax(dim=1) self.dense5_2 = nn.Linear(128, 4) def forward(self, x): x = self.conv1(x) x = self.prelu1(x) x = self.pool1(x) x = self.conv2(x) x = self.prelu2(x) x = self.pool2(x) x = self.conv3(x) x = self.prelu3(x) x = x.permute(0, 3, 2, 1).contiguous() x = self.dense4(x.view(x.shape[0], -1)) x = self.prelu4(x) a = self.dense5_1(x) a = self.softmax5_1(a) b = self.dense5_2(x) return b, a class ONet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=3) self.prelu1 = nn.PReLU(32) self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv2 = nn.Conv2d(32, 64, kernel_size=3) self.prelu2 = nn.PReLU(64) self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv3 = nn.Conv2d(64, 64, kernel_size=3) self.prelu3 = nn.PReLU(64) self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True) self.conv4 = nn.Conv2d(64, 128, kernel_size=2) self.prelu4 = nn.PReLU(128) self.dense5 = nn.Linear(1152, 256) self.prelu5 = nn.PReLU(256) self.dense6_1 = nn.Linear(256, 2) self.softmax6_1 = nn.Softmax(dim=1) self.dense6_2 = nn.Linear(256, 4) self.dense6_3 = nn.Linear(256, 10) def forward(self, x): x = self.conv1(x) x = self.prelu1(x) x = self.pool1(x) x = self.conv2(x) x = self.prelu2(x) x = self.pool2(x) x = self.conv3(x) x = self.prelu3(x) x = self.pool3(x) x = self.conv4(x) x = self.prelu4(x) x = x.permute(0, 3, 2, 1).contiguous() x = self.dense5(x.view(x.shape[0], -1)) x = self.prelu5(x) a = self.dense6_1(x) a = self.softmax6_1(a) b = self.dense6_2(x) c = self.dense6_3(x) return b, c, a <|reserved_special_token_0|> def bbreg(boundingbox, reg): if reg.shape[1] == 1: reg = torch.reshape(reg, (reg.shape[2], reg.shape[3])) w = boundingbox[:, 2] - boundingbox[:, 0] + 1 h = boundingbox[:, 3] - boundingbox[:, 1] + 1 b1 = boundingbox[:, 0] + reg[:, 0] * w b2 = boundingbox[:, 1] + reg[:, 1] * h b3 = boundingbox[:, 2] + reg[:, 2] * w b4 = boundingbox[:, 3] + reg[:, 3] * h boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0) return boundingbox def generateBoundingBox(reg, probs, scale, thresh): stride = 2 cellsize = 12 reg = reg.permute(1, 0, 2, 3) mask = probs >= thresh mask_inds = mask.nonzero(as_tuple=False) image_inds = mask_inds[:, 0] score = probs[mask] reg = reg[:, mask].permute(1, 0) bb = mask_inds[:, 1:].type(reg.dtype).flip(1) q1 = ((stride * bb + 1) / scale).floor() q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor() boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1) return boundingbox, image_inds def nms_numpy(boxes, scores, threshold, method): if boxes.size == 0: return np.empty((0, 3)) x1 = boxes[:, 0].copy() y1 = boxes[:, 1].copy() x2 = boxes[:, 2].copy() y2 = boxes[:, 3].copy() s = scores area = (x2 - x1 + 1) * (y2 - y1 + 1) I = np.argsort(s) pick = np.zeros_like(s, dtype=np.int16) counter = 0 while I.size > 0: i = I[-1] pick[counter] = i counter += 1 idx = I[0:-1] xx1 = np.maximum(x1[i], x1[idx]).copy() yy1 = np.maximum(y1[i], y1[idx]).copy() xx2 = np.minimum(x2[i], x2[idx]).copy() yy2 = np.minimum(y2[i], y2[idx]).copy() w = np.maximum(0.0, xx2 - xx1 + 1).copy() h = np.maximum(0.0, yy2 - yy1 + 1).copy() inter = w * h if method == 'Min': o = inter / np.minimum(area[i], area[idx]) else: o = inter / (area[i] + area[idx] - inter) I = I[np.where(o <= threshold)] pick = pick[:counter].copy() return pick def batched_nms_numpy(boxes, scores, idxs, threshold, method): device = boxes.device if boxes.numel() == 0: return torch.empty((0,), dtype=torch.int64, device=device) max_coordinate = boxes.max() offsets = idxs.to(boxes) * (max_coordinate + 1) boxes_for_nms = boxes + offsets[:, None] boxes_for_nms = boxes_for_nms.cpu().numpy() scores = scores.cpu().numpy() keep = nms_numpy(boxes_for_nms, scores, threshold, method) return torch.as_tensor(keep, dtype=torch.long, device=device) def pad(boxes, w, h): boxes = boxes.trunc().int().cpu().numpy() x = boxes[:, 0] y = boxes[:, 1] ex = boxes[:, 2] ey = boxes[:, 3] x[x < 1] = 1 y[y < 1] = 1 ex[ex > w] = w ey[ey > h] = h return y, ey, x, ex <|reserved_special_token_0|> def imresample(img, sz): im_data = interpolate(img, size=sz, mode='area') return im_data <|reserved_special_token_1|> <|reserved_special_token_0|> class MTCNN: def __init__(self, device=None, model=None): if device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' self.device = device url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt' if model is None: model = torch.hub.load_state_dict_from_url(url) else: model = torch.load(model, map_location=device) self.pnet = PNet().to(device) self.rnet = RNet().to(device) self.onet = ONet().to(device) self.pnet.load_state_dict(model['pnet']) self.rnet.load_state_dict(model['rnet']) self.onet.load_state_dict(model['onet']) def detect(self, imgs, minsize=None): if len(imgs) == 0: return [] if isinstance(imgs[0], np.ndarray): h, w = imgs[0].shape[:2] else: w, h = imgs[0].size if minsize is None: minsize = max(96 * min(w, h) / 1080, 40) boxes, points = [], [] with torch.no_grad(): batches = [imgs[i:i + 10] for i in range(0, len(imgs), 10)] for batch in batches: batch_boxes, batch_points = detect_face(batch, minsize, self.pnet, self.rnet, self.onet, [0.7, 0.8, 0.9], 0.709, self.device) boxes += list(batch_boxes) points += list(batch_points) result = [] for box, point in zip(boxes, points): box = np.array(box) point = np.array(point) if len(box) == 0: result.append(None) else: result.append((box[:, :4], box[:, 4], point)) return result def empty_cache(device): if 'cuda' in device: with torch.cuda.device(device): torch.cuda.empty_cache() class PNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 10, kernel_size=3) self.prelu1 = nn.PReLU(10) self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True) self.conv2 = nn.Conv2d(10, 16, kernel_size=3) self.prelu2 = nn.PReLU(16) self.conv3 = nn.Conv2d(16, 32, kernel_size=3) self.prelu3 = nn.PReLU(32) self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1) self.softmax4_1 = nn.Softmax(dim=1) self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1) def forward(self, x): x = self.conv1(x) x = self.prelu1(x) x = self.pool1(x) x = self.conv2(x) x = self.prelu2(x) x = self.conv3(x) x = self.prelu3(x) a = self.conv4_1(x) a = self.softmax4_1(a) b = self.conv4_2(x) return b, a class RNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 28, kernel_size=3) self.prelu1 = nn.PReLU(28) self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv2 = nn.Conv2d(28, 48, kernel_size=3) self.prelu2 = nn.PReLU(48) self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv3 = nn.Conv2d(48, 64, kernel_size=2) self.prelu3 = nn.PReLU(64) self.dense4 = nn.Linear(576, 128) self.prelu4 = nn.PReLU(128) self.dense5_1 = nn.Linear(128, 2) self.softmax5_1 = nn.Softmax(dim=1) self.dense5_2 = nn.Linear(128, 4) def forward(self, x): x = self.conv1(x) x = self.prelu1(x) x = self.pool1(x) x = self.conv2(x) x = self.prelu2(x) x = self.pool2(x) x = self.conv3(x) x = self.prelu3(x) x = x.permute(0, 3, 2, 1).contiguous() x = self.dense4(x.view(x.shape[0], -1)) x = self.prelu4(x) a = self.dense5_1(x) a = self.softmax5_1(a) b = self.dense5_2(x) return b, a class ONet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=3) self.prelu1 = nn.PReLU(32) self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv2 = nn.Conv2d(32, 64, kernel_size=3) self.prelu2 = nn.PReLU(64) self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv3 = nn.Conv2d(64, 64, kernel_size=3) self.prelu3 = nn.PReLU(64) self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True) self.conv4 = nn.Conv2d(64, 128, kernel_size=2) self.prelu4 = nn.PReLU(128) self.dense5 = nn.Linear(1152, 256) self.prelu5 = nn.PReLU(256) self.dense6_1 = nn.Linear(256, 2) self.softmax6_1 = nn.Softmax(dim=1) self.dense6_2 = nn.Linear(256, 4) self.dense6_3 = nn.Linear(256, 10) def forward(self, x): x = self.conv1(x) x = self.prelu1(x) x = self.pool1(x) x = self.conv2(x) x = self.prelu2(x) x = self.pool2(x) x = self.conv3(x) x = self.prelu3(x) x = self.pool3(x) x = self.conv4(x) x = self.prelu4(x) x = x.permute(0, 3, 2, 1).contiguous() x = self.dense5(x.view(x.shape[0], -1)) x = self.prelu5(x) a = self.dense6_1(x) a = self.softmax6_1(a) b = self.dense6_2(x) c = self.dense6_3(x) return b, c, a <|reserved_special_token_0|> def bbreg(boundingbox, reg): if reg.shape[1] == 1: reg = torch.reshape(reg, (reg.shape[2], reg.shape[3])) w = boundingbox[:, 2] - boundingbox[:, 0] + 1 h = boundingbox[:, 3] - boundingbox[:, 1] + 1 b1 = boundingbox[:, 0] + reg[:, 0] * w b2 = boundingbox[:, 1] + reg[:, 1] * h b3 = boundingbox[:, 2] + reg[:, 2] * w b4 = boundingbox[:, 3] + reg[:, 3] * h boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0) return boundingbox def generateBoundingBox(reg, probs, scale, thresh): stride = 2 cellsize = 12 reg = reg.permute(1, 0, 2, 3) mask = probs >= thresh mask_inds = mask.nonzero(as_tuple=False) image_inds = mask_inds[:, 0] score = probs[mask] reg = reg[:, mask].permute(1, 0) bb = mask_inds[:, 1:].type(reg.dtype).flip(1) q1 = ((stride * bb + 1) / scale).floor() q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor() boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1) return boundingbox, image_inds def nms_numpy(boxes, scores, threshold, method): if boxes.size == 0: return np.empty((0, 3)) x1 = boxes[:, 0].copy() y1 = boxes[:, 1].copy() x2 = boxes[:, 2].copy() y2 = boxes[:, 3].copy() s = scores area = (x2 - x1 + 1) * (y2 - y1 + 1) I = np.argsort(s) pick = np.zeros_like(s, dtype=np.int16) counter = 0 while I.size > 0: i = I[-1] pick[counter] = i counter += 1 idx = I[0:-1] xx1 = np.maximum(x1[i], x1[idx]).copy() yy1 = np.maximum(y1[i], y1[idx]).copy() xx2 = np.minimum(x2[i], x2[idx]).copy() yy2 = np.minimum(y2[i], y2[idx]).copy() w = np.maximum(0.0, xx2 - xx1 + 1).copy() h = np.maximum(0.0, yy2 - yy1 + 1).copy() inter = w * h if method == 'Min': o = inter / np.minimum(area[i], area[idx]) else: o = inter / (area[i] + area[idx] - inter) I = I[np.where(o <= threshold)] pick = pick[:counter].copy() return pick def batched_nms_numpy(boxes, scores, idxs, threshold, method): device = boxes.device if boxes.numel() == 0: return torch.empty((0,), dtype=torch.int64, device=device) max_coordinate = boxes.max() offsets = idxs.to(boxes) * (max_coordinate + 1) boxes_for_nms = boxes + offsets[:, None] boxes_for_nms = boxes_for_nms.cpu().numpy() scores = scores.cpu().numpy() keep = nms_numpy(boxes_for_nms, scores, threshold, method) return torch.as_tensor(keep, dtype=torch.long, device=device) def pad(boxes, w, h): boxes = boxes.trunc().int().cpu().numpy() x = boxes[:, 0] y = boxes[:, 1] ex = boxes[:, 2] ey = boxes[:, 3] x[x < 1] = 1 y[y < 1] = 1 ex[ex > w] = w ey[ey > h] = h return y, ey, x, ex <|reserved_special_token_0|> def imresample(img, sz): im_data = interpolate(img, size=sz, mode='area') return im_data <|reserved_special_token_1|> <|reserved_special_token_0|> class MTCNN: def __init__(self, device=None, model=None): if device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' self.device = device url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt' if model is None: model = torch.hub.load_state_dict_from_url(url) else: model = torch.load(model, map_location=device) self.pnet = PNet().to(device) self.rnet = RNet().to(device) self.onet = ONet().to(device) self.pnet.load_state_dict(model['pnet']) self.rnet.load_state_dict(model['rnet']) self.onet.load_state_dict(model['onet']) def detect(self, imgs, minsize=None): if len(imgs) == 0: return [] if isinstance(imgs[0], np.ndarray): h, w = imgs[0].shape[:2] else: w, h = imgs[0].size if minsize is None: minsize = max(96 * min(w, h) / 1080, 40) boxes, points = [], [] with torch.no_grad(): batches = [imgs[i:i + 10] for i in range(0, len(imgs), 10)] for batch in batches: batch_boxes, batch_points = detect_face(batch, minsize, self.pnet, self.rnet, self.onet, [0.7, 0.8, 0.9], 0.709, self.device) boxes += list(batch_boxes) points += list(batch_points) result = [] for box, point in zip(boxes, points): box = np.array(box) point = np.array(point) if len(box) == 0: result.append(None) else: result.append((box[:, :4], box[:, 4], point)) return result def empty_cache(device): if 'cuda' in device: with torch.cuda.device(device): torch.cuda.empty_cache() class PNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 10, kernel_size=3) self.prelu1 = nn.PReLU(10) self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True) self.conv2 = nn.Conv2d(10, 16, kernel_size=3) self.prelu2 = nn.PReLU(16) self.conv3 = nn.Conv2d(16, 32, kernel_size=3) self.prelu3 = nn.PReLU(32) self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1) self.softmax4_1 = nn.Softmax(dim=1) self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1) def forward(self, x): x = self.conv1(x) x = self.prelu1(x) x = self.pool1(x) x = self.conv2(x) x = self.prelu2(x) x = self.conv3(x) x = self.prelu3(x) a = self.conv4_1(x) a = self.softmax4_1(a) b = self.conv4_2(x) return b, a class RNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 28, kernel_size=3) self.prelu1 = nn.PReLU(28) self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv2 = nn.Conv2d(28, 48, kernel_size=3) self.prelu2 = nn.PReLU(48) self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv3 = nn.Conv2d(48, 64, kernel_size=2) self.prelu3 = nn.PReLU(64) self.dense4 = nn.Linear(576, 128) self.prelu4 = nn.PReLU(128) self.dense5_1 = nn.Linear(128, 2) self.softmax5_1 = nn.Softmax(dim=1) self.dense5_2 = nn.Linear(128, 4) def forward(self, x): x = self.conv1(x) x = self.prelu1(x) x = self.pool1(x) x = self.conv2(x) x = self.prelu2(x) x = self.pool2(x) x = self.conv3(x) x = self.prelu3(x) x = x.permute(0, 3, 2, 1).contiguous() x = self.dense4(x.view(x.shape[0], -1)) x = self.prelu4(x) a = self.dense5_1(x) a = self.softmax5_1(a) b = self.dense5_2(x) return b, a class ONet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=3) self.prelu1 = nn.PReLU(32) self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv2 = nn.Conv2d(32, 64, kernel_size=3) self.prelu2 = nn.PReLU(64) self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv3 = nn.Conv2d(64, 64, kernel_size=3) self.prelu3 = nn.PReLU(64) self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True) self.conv4 = nn.Conv2d(64, 128, kernel_size=2) self.prelu4 = nn.PReLU(128) self.dense5 = nn.Linear(1152, 256) self.prelu5 = nn.PReLU(256) self.dense6_1 = nn.Linear(256, 2) self.softmax6_1 = nn.Softmax(dim=1) self.dense6_2 = nn.Linear(256, 4) self.dense6_3 = nn.Linear(256, 10) def forward(self, x): x = self.conv1(x) x = self.prelu1(x) x = self.pool1(x) x = self.conv2(x) x = self.prelu2(x) x = self.pool2(x) x = self.conv3(x) x = self.prelu3(x) x = self.pool3(x) x = self.conv4(x) x = self.prelu4(x) x = x.permute(0, 3, 2, 1).contiguous() x = self.dense5(x.view(x.shape[0], -1)) x = self.prelu5(x) a = self.dense6_1(x) a = self.softmax6_1(a) b = self.dense6_2(x) c = self.dense6_3(x) return b, c, a def detect_face(imgs, minsize, pnet, rnet, onet, threshold, factor, device): if isinstance(imgs, (np.ndarray, torch.Tensor)): imgs = torch.as_tensor(imgs, device=device) if len(imgs.shape) == 3: imgs = imgs.unsqueeze(0) else: if not isinstance(imgs, (list, tuple)): imgs = [imgs] if any(img.size != imgs[0].size for img in imgs): raise Exception( 'MTCNN batch processing only compatible with equal-dimension images.' ) imgs = np.stack([np.uint8(img) for img in imgs]) imgs = torch.as_tensor(imgs, device=device) model_dtype = next(pnet.parameters()).dtype imgs = imgs.permute(0, 3, 1, 2).type(model_dtype) batch_size = len(imgs) h, w = imgs.shape[2:4] m = 12.0 / minsize minl = min(h, w) minl = minl * m scale_i = m scales = [] while minl >= 12: scales.append(scale_i) scale_i = scale_i * factor minl = minl * factor boxes = [] image_inds = [] all_inds = [] all_i = 0 for scale in scales: im_data = imresample(imgs, (int(h * scale + 1), int(w * scale + 1))) im_data = (im_data - 127.5) * 0.0078125 reg, probs = pnet(im_data) empty_cache(device) boxes_scale, image_inds_scale = generateBoundingBox(reg, probs[:, 1 ], scale, threshold[0]) boxes.append(boxes_scale) image_inds.append(image_inds_scale) all_inds.append(all_i + image_inds_scale) all_i += batch_size boxes = torch.cat(boxes, dim=0) image_inds = torch.cat(image_inds, dim=0).cpu() all_inds = torch.cat(all_inds, dim=0) pick = batched_nms(boxes[:, :4], boxes[:, 4], all_inds, 0.5) boxes, image_inds = boxes[pick], image_inds[pick] pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7) boxes, image_inds = boxes[pick], image_inds[pick] regw = boxes[:, 2] - boxes[:, 0] regh = boxes[:, 3] - boxes[:, 1] qq1 = boxes[:, 0] + boxes[:, 5] * regw qq2 = boxes[:, 1] + boxes[:, 6] * regh qq3 = boxes[:, 2] + boxes[:, 7] * regw qq4 = boxes[:, 3] + boxes[:, 8] * regh boxes = torch.stack([qq1, qq2, qq3, qq4, boxes[:, 4]]).permute(1, 0) boxes = rerec(boxes) y, ey, x, ex = pad(boxes, w, h) if len(boxes) > 0: im_data = [] for k in range(len(y)): if ey[k] > y[k] - 1 and ex[k] > x[k] - 1: img_k = imgs[image_inds[k], :, y[k] - 1:ey[k], x[k] - 1:ex[k] ].unsqueeze(0) im_data.append(imresample(img_k, (24, 24))) im_data = torch.cat(im_data, dim=0) im_data = (im_data - 127.5) * 0.0078125 out = [] for batch in im_data.split(2000): out += [rnet(batch)] z = list(zip(*out)) out = torch.cat(z[0]), torch.cat(z[1]) empty_cache(device) out0 = out[0].permute(1, 0) out1 = out[1].permute(1, 0) score = out1[1, :] ipass = score > threshold[1] boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1) image_inds = image_inds[ipass] mv = out0[:, ipass].permute(1, 0) pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7) boxes, image_inds, mv = boxes[pick], image_inds[pick], mv[pick] boxes = bbreg(boxes, mv) boxes = rerec(boxes) points = torch.zeros(0, 5, 2, device=device) if len(boxes) > 0: y, ey, x, ex = pad(boxes, w, h) im_data = [] for k in range(len(y)): if ey[k] > y[k] - 1 and ex[k] > x[k] - 1: img_k = imgs[image_inds[k], :, y[k] - 1:ey[k], x[k] - 1:ex[k] ].unsqueeze(0) im_data.append(imresample(img_k, (48, 48))) im_data = torch.cat(im_data, dim=0) im_data = (im_data - 127.5) * 0.0078125 out = [] for batch in im_data.split(500): out += [onet(batch)] z = list(zip(*out)) out = torch.cat(z[0]), torch.cat(z[1]), torch.cat(z[2]) empty_cache(device) out0 = out[0].permute(1, 0) out1 = out[1].permute(1, 0) out2 = out[2].permute(1, 0) score = out2[1, :] points = out1 ipass = score > threshold[2] points = points[:, ipass] boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1) image_inds = image_inds[ipass] mv = out0[:, ipass].permute(1, 0) w_i = boxes[:, 2] - boxes[:, 0] + 1 h_i = boxes[:, 3] - boxes[:, 1] + 1 points_x = w_i.repeat(5, 1) * points[:5, :] + boxes[:, 0].repeat(5, 1 ) - 1 points_y = h_i.repeat(5, 1) * points[5:10, :] + boxes[:, 1].repeat(5, 1 ) - 1 points = torch.stack((points_x, points_y)).permute(2, 1, 0) boxes = bbreg(boxes, mv) pick = batched_nms_numpy(boxes[:, :4], boxes[:, 4], image_inds, 0.7, 'Min') boxes, image_inds, points = boxes[pick], image_inds[pick], points[pick] boxes = boxes.cpu().numpy() points = points.cpu().numpy() batch_boxes = [] batch_points = [] for b_i in range(batch_size): b_i_inds = np.where(image_inds == b_i) batch_boxes.append(boxes[b_i_inds].copy()) batch_points.append(points[b_i_inds].copy()) batch_boxes, batch_points = np.array(batch_boxes), np.array(batch_points) empty_cache(device) return batch_boxes, batch_points def bbreg(boundingbox, reg): if reg.shape[1] == 1: reg = torch.reshape(reg, (reg.shape[2], reg.shape[3])) w = boundingbox[:, 2] - boundingbox[:, 0] + 1 h = boundingbox[:, 3] - boundingbox[:, 1] + 1 b1 = boundingbox[:, 0] + reg[:, 0] * w b2 = boundingbox[:, 1] + reg[:, 1] * h b3 = boundingbox[:, 2] + reg[:, 2] * w b4 = boundingbox[:, 3] + reg[:, 3] * h boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0) return boundingbox def generateBoundingBox(reg, probs, scale, thresh): stride = 2 cellsize = 12 reg = reg.permute(1, 0, 2, 3) mask = probs >= thresh mask_inds = mask.nonzero(as_tuple=False) image_inds = mask_inds[:, 0] score = probs[mask] reg = reg[:, mask].permute(1, 0) bb = mask_inds[:, 1:].type(reg.dtype).flip(1) q1 = ((stride * bb + 1) / scale).floor() q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor() boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1) return boundingbox, image_inds def nms_numpy(boxes, scores, threshold, method): if boxes.size == 0: return np.empty((0, 3)) x1 = boxes[:, 0].copy() y1 = boxes[:, 1].copy() x2 = boxes[:, 2].copy() y2 = boxes[:, 3].copy() s = scores area = (x2 - x1 + 1) * (y2 - y1 + 1) I = np.argsort(s) pick = np.zeros_like(s, dtype=np.int16) counter = 0 while I.size > 0: i = I[-1] pick[counter] = i counter += 1 idx = I[0:-1] xx1 = np.maximum(x1[i], x1[idx]).copy() yy1 = np.maximum(y1[i], y1[idx]).copy() xx2 = np.minimum(x2[i], x2[idx]).copy() yy2 = np.minimum(y2[i], y2[idx]).copy() w = np.maximum(0.0, xx2 - xx1 + 1).copy() h = np.maximum(0.0, yy2 - yy1 + 1).copy() inter = w * h if method == 'Min': o = inter / np.minimum(area[i], area[idx]) else: o = inter / (area[i] + area[idx] - inter) I = I[np.where(o <= threshold)] pick = pick[:counter].copy() return pick def batched_nms_numpy(boxes, scores, idxs, threshold, method): device = boxes.device if boxes.numel() == 0: return torch.empty((0,), dtype=torch.int64, device=device) max_coordinate = boxes.max() offsets = idxs.to(boxes) * (max_coordinate + 1) boxes_for_nms = boxes + offsets[:, None] boxes_for_nms = boxes_for_nms.cpu().numpy() scores = scores.cpu().numpy() keep = nms_numpy(boxes_for_nms, scores, threshold, method) return torch.as_tensor(keep, dtype=torch.long, device=device) def pad(boxes, w, h): boxes = boxes.trunc().int().cpu().numpy() x = boxes[:, 0] y = boxes[:, 1] ex = boxes[:, 2] ey = boxes[:, 3] x[x < 1] = 1 y[y < 1] = 1 ex[ex > w] = w ey[ey > h] = h return y, ey, x, ex def rerec(bboxA): h = bboxA[:, 3] - bboxA[:, 1] w = bboxA[:, 2] - bboxA[:, 0] l = torch.max(w, h) bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5 bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5 bboxA[:, 2:4] = bboxA[:, :2] + l.repeat(2, 1).permute(1, 0) return bboxA def imresample(img, sz): im_data = interpolate(img, size=sz, mode='area') return im_data <|reserved_special_token_1|> import numpy as np import torch import torch.nn as nn from torch.nn.functional import interpolate from torchvision.ops.boxes import batched_nms class MTCNN(): def __init__(self, device=None, model=None): if device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' self.device = device url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt' if model is None: model = torch.hub.load_state_dict_from_url(url) else: model = torch.load(model, map_location=device) self.pnet = PNet().to(device) self.rnet = RNet().to(device) self.onet = ONet().to(device) self.pnet.load_state_dict(model['pnet']) self.rnet.load_state_dict(model['rnet']) self.onet.load_state_dict(model['onet']) def detect(self, imgs, minsize=None): if len(imgs) == 0: return [] if isinstance(imgs[0], np.ndarray): h, w = imgs[0].shape[:2] else: w, h = imgs[0].size if minsize is None: minsize = max(96 * min(w, h)/1080, 40) boxes, points = [], [] with torch.no_grad(): batches = [imgs[i:i+10] for i in range(0, len(imgs), 10)] for batch in batches: batch_boxes, batch_points = detect_face( batch, minsize, self.pnet, self.rnet, self.onet, [0.7, 0.8, 0.9], 0.709, self.device) boxes += list(batch_boxes) points += list(batch_points) result = [] for box, point in zip(boxes, points): box = np.array(box) point = np.array(point) if len(box) == 0: result.append(None) else: result.append((box[:, :4], box[:, 4], point)) return result def empty_cache(device): if 'cuda' in device: with torch.cuda.device(device): torch.cuda.empty_cache() class PNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 10, kernel_size=3) self.prelu1 = nn.PReLU(10) self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True) self.conv2 = nn.Conv2d(10, 16, kernel_size=3) self.prelu2 = nn.PReLU(16) self.conv3 = nn.Conv2d(16, 32, kernel_size=3) self.prelu3 = nn.PReLU(32) self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1) self.softmax4_1 = nn.Softmax(dim=1) self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1) def forward(self, x): x = self.conv1(x) x = self.prelu1(x) x = self.pool1(x) x = self.conv2(x) x = self.prelu2(x) x = self.conv3(x) x = self.prelu3(x) a = self.conv4_1(x) a = self.softmax4_1(a) b = self.conv4_2(x) return b, a class RNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 28, kernel_size=3) self.prelu1 = nn.PReLU(28) self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv2 = nn.Conv2d(28, 48, kernel_size=3) self.prelu2 = nn.PReLU(48) self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv3 = nn.Conv2d(48, 64, kernel_size=2) self.prelu3 = nn.PReLU(64) self.dense4 = nn.Linear(576, 128) self.prelu4 = nn.PReLU(128) self.dense5_1 = nn.Linear(128, 2) self.softmax5_1 = nn.Softmax(dim=1) self.dense5_2 = nn.Linear(128, 4) def forward(self, x): x = self.conv1(x) x = self.prelu1(x) x = self.pool1(x) x = self.conv2(x) x = self.prelu2(x) x = self.pool2(x) x = self.conv3(x) x = self.prelu3(x) x = x.permute(0, 3, 2, 1).contiguous() x = self.dense4(x.view(x.shape[0], -1)) x = self.prelu4(x) a = self.dense5_1(x) a = self.softmax5_1(a) b = self.dense5_2(x) return b, a class ONet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=3) self.prelu1 = nn.PReLU(32) self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv2 = nn.Conv2d(32, 64, kernel_size=3) self.prelu2 = nn.PReLU(64) self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True) self.conv3 = nn.Conv2d(64, 64, kernel_size=3) self.prelu3 = nn.PReLU(64) self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True) self.conv4 = nn.Conv2d(64, 128, kernel_size=2) self.prelu4 = nn.PReLU(128) self.dense5 = nn.Linear(1152, 256) self.prelu5 = nn.PReLU(256) self.dense6_1 = nn.Linear(256, 2) self.softmax6_1 = nn.Softmax(dim=1) self.dense6_2 = nn.Linear(256, 4) self.dense6_3 = nn.Linear(256, 10) def forward(self, x): x = self.conv1(x) x = self.prelu1(x) x = self.pool1(x) x = self.conv2(x) x = self.prelu2(x) x = self.pool2(x) x = self.conv3(x) x = self.prelu3(x) x = self.pool3(x) x = self.conv4(x) x = self.prelu4(x) x = x.permute(0, 3, 2, 1).contiguous() x = self.dense5(x.view(x.shape[0], -1)) x = self.prelu5(x) a = self.dense6_1(x) a = self.softmax6_1(a) b = self.dense6_2(x) c = self.dense6_3(x) return b, c, a def detect_face(imgs, minsize, pnet, rnet, onet, threshold, factor, device): if isinstance(imgs, (np.ndarray, torch.Tensor)): imgs = torch.as_tensor(imgs, device=device) if len(imgs.shape) == 3: imgs = imgs.unsqueeze(0) else: if not isinstance(imgs, (list, tuple)): imgs = [imgs] if any(img.size != imgs[0].size for img in imgs): raise Exception("MTCNN batch processing only compatible with equal-dimension images.") imgs = np.stack([np.uint8(img) for img in imgs]) imgs = torch.as_tensor(imgs, device=device) model_dtype = next(pnet.parameters()).dtype imgs = imgs.permute(0, 3, 1, 2).type(model_dtype) batch_size = len(imgs) h, w = imgs.shape[2:4] m = 12.0 / minsize minl = min(h, w) minl = minl * m # Create scale pyramid scale_i = m scales = [] while minl >= 12: scales.append(scale_i) scale_i = scale_i * factor minl = minl * factor # First stage boxes = [] image_inds = [] all_inds = [] all_i = 0 for scale in scales: im_data = imresample(imgs, (int(h * scale + 1), int(w * scale + 1))) im_data = (im_data - 127.5) * 0.0078125 reg, probs = pnet(im_data) empty_cache(device) boxes_scale, image_inds_scale = generateBoundingBox(reg, probs[:, 1], scale, threshold[0]) boxes.append(boxes_scale) image_inds.append(image_inds_scale) all_inds.append(all_i + image_inds_scale) all_i += batch_size boxes = torch.cat(boxes, dim=0) image_inds = torch.cat(image_inds, dim=0).cpu() all_inds = torch.cat(all_inds, dim=0) # NMS within each scale + image pick = batched_nms(boxes[:, :4], boxes[:, 4], all_inds, 0.5) boxes, image_inds = boxes[pick], image_inds[pick] # NMS within each image pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7) boxes, image_inds = boxes[pick], image_inds[pick] regw = boxes[:, 2] - boxes[:, 0] regh = boxes[:, 3] - boxes[:, 1] qq1 = boxes[:, 0] + boxes[:, 5] * regw qq2 = boxes[:, 1] + boxes[:, 6] * regh qq3 = boxes[:, 2] + boxes[:, 7] * regw qq4 = boxes[:, 3] + boxes[:, 8] * regh boxes = torch.stack([qq1, qq2, qq3, qq4, boxes[:, 4]]).permute(1, 0) boxes = rerec(boxes) y, ey, x, ex = pad(boxes, w, h) # Second stage if len(boxes) > 0: im_data = [] for k in range(len(y)): if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1): img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0) im_data.append(imresample(img_k, (24, 24))) im_data = torch.cat(im_data, dim=0) im_data = (im_data - 127.5) * 0.0078125 out = [] for batch in im_data.split(2000): out += [rnet(batch)] z = list(zip(*out)) out = (torch.cat(z[0]), torch.cat(z[1])) empty_cache(device) out0 = out[0].permute(1, 0) out1 = out[1].permute(1, 0) score = out1[1, :] ipass = score > threshold[1] boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1) image_inds = image_inds[ipass] mv = out0[:, ipass].permute(1, 0) # NMS within each image pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7) boxes, image_inds, mv = boxes[pick], image_inds[pick], mv[pick] boxes = bbreg(boxes, mv) boxes = rerec(boxes) # Third stage points = torch.zeros(0, 5, 2, device=device) if len(boxes) > 0: y, ey, x, ex = pad(boxes, w, h) im_data = [] for k in range(len(y)): if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1): img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0) im_data.append(imresample(img_k, (48, 48))) im_data = torch.cat(im_data, dim=0) im_data = (im_data - 127.5) * 0.0078125 out = [] for batch in im_data.split(500): out += [onet(batch)] z = list(zip(*out)) out = (torch.cat(z[0]), torch.cat(z[1]), torch.cat(z[2])) empty_cache(device) out0 = out[0].permute(1, 0) out1 = out[1].permute(1, 0) out2 = out[2].permute(1, 0) score = out2[1, :] points = out1 ipass = score > threshold[2] points = points[:, ipass] boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1) image_inds = image_inds[ipass] mv = out0[:, ipass].permute(1, 0) w_i = boxes[:, 2] - boxes[:, 0] + 1 h_i = boxes[:, 3] - boxes[:, 1] + 1 points_x = w_i.repeat(5, 1) * points[:5, :] + boxes[:, 0].repeat(5, 1) - 1 points_y = h_i.repeat(5, 1) * points[5:10, :] + boxes[:, 1].repeat(5, 1) - 1 points = torch.stack((points_x, points_y)).permute(2, 1, 0) boxes = bbreg(boxes, mv) # NMS within each image using "Min" strategy # pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7) pick = batched_nms_numpy(boxes[:, :4], boxes[:, 4], image_inds, 0.7, 'Min') boxes, image_inds, points = boxes[pick], image_inds[pick], points[pick] boxes = boxes.cpu().numpy() points = points.cpu().numpy() batch_boxes = [] batch_points = [] for b_i in range(batch_size): b_i_inds = np.where(image_inds == b_i) batch_boxes.append(boxes[b_i_inds].copy()) batch_points.append(points[b_i_inds].copy()) batch_boxes, batch_points = np.array(batch_boxes), np.array(batch_points) empty_cache(device) return batch_boxes, batch_points def bbreg(boundingbox, reg): if reg.shape[1] == 1: reg = torch.reshape(reg, (reg.shape[2], reg.shape[3])) w = boundingbox[:, 2] - boundingbox[:, 0] + 1 h = boundingbox[:, 3] - boundingbox[:, 1] + 1 b1 = boundingbox[:, 0] + reg[:, 0] * w b2 = boundingbox[:, 1] + reg[:, 1] * h b3 = boundingbox[:, 2] + reg[:, 2] * w b4 = boundingbox[:, 3] + reg[:, 3] * h boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0) return boundingbox def generateBoundingBox(reg, probs, scale, thresh): stride = 2 cellsize = 12 reg = reg.permute(1, 0, 2, 3) mask = probs >= thresh mask_inds = mask.nonzero(as_tuple=False) image_inds = mask_inds[:, 0] score = probs[mask] reg = reg[:, mask].permute(1, 0) bb = mask_inds[:, 1:].type(reg.dtype).flip(1) q1 = ((stride * bb + 1) / scale).floor() q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor() boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1) return boundingbox, image_inds def nms_numpy(boxes, scores, threshold, method): if boxes.size == 0: return np.empty((0, 3)) x1 = boxes[:, 0].copy() y1 = boxes[:, 1].copy() x2 = boxes[:, 2].copy() y2 = boxes[:, 3].copy() s = scores area = (x2 - x1 + 1) * (y2 - y1 + 1) I = np.argsort(s) pick = np.zeros_like(s, dtype=np.int16) counter = 0 while I.size > 0: i = I[-1] pick[counter] = i counter += 1 idx = I[0:-1] xx1 = np.maximum(x1[i], x1[idx]).copy() yy1 = np.maximum(y1[i], y1[idx]).copy() xx2 = np.minimum(x2[i], x2[idx]).copy() yy2 = np.minimum(y2[i], y2[idx]).copy() w = np.maximum(0.0, xx2 - xx1 + 1).copy() h = np.maximum(0.0, yy2 - yy1 + 1).copy() inter = w * h if method == "Min": o = inter / np.minimum(area[i], area[idx]) else: o = inter / (area[i] + area[idx] - inter) I = I[np.where(o <= threshold)] pick = pick[:counter].copy() return pick def batched_nms_numpy(boxes, scores, idxs, threshold, method): device = boxes.device if boxes.numel() == 0: return torch.empty((0,), dtype=torch.int64, device=device) # strategy: in order to perform NMS independently per class. # we add an offset to all the boxes. The offset is dependent # only on the class idx, and is large enough so that boxes # from different classes do not overlap max_coordinate = boxes.max() offsets = idxs.to(boxes) * (max_coordinate + 1) boxes_for_nms = boxes + offsets[:, None] boxes_for_nms = boxes_for_nms.cpu().numpy() scores = scores.cpu().numpy() keep = nms_numpy(boxes_for_nms, scores, threshold, method) return torch.as_tensor(keep, dtype=torch.long, device=device) def pad(boxes, w, h): boxes = boxes.trunc().int().cpu().numpy() x = boxes[:, 0] y = boxes[:, 1] ex = boxes[:, 2] ey = boxes[:, 3] x[x < 1] = 1 y[y < 1] = 1 ex[ex > w] = w ey[ey > h] = h return y, ey, x, ex def rerec(bboxA): h = bboxA[:, 3] - bboxA[:, 1] w = bboxA[:, 2] - bboxA[:, 0] l = torch.max(w, h) bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5 bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5 bboxA[:, 2:4] = bboxA[:, :2] + l.repeat(2, 1).permute(1, 0) return bboxA def imresample(img, sz): im_data = interpolate(img, size=sz, mode="area") return im_data
flexible
{ "blob_id": "865121e7eb5f9c70adf44d33d21f30c22f13ec56", "index": 7012, "step-1": "<mask token>\n\n\nclass MTCNN:\n\n def __init__(self, device=None, model=None):\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.device = device\n url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt'\n if model is None:\n model = torch.hub.load_state_dict_from_url(url)\n else:\n model = torch.load(model, map_location=device)\n self.pnet = PNet().to(device)\n self.rnet = RNet().to(device)\n self.onet = ONet().to(device)\n self.pnet.load_state_dict(model['pnet'])\n self.rnet.load_state_dict(model['rnet'])\n self.onet.load_state_dict(model['onet'])\n\n def detect(self, imgs, minsize=None):\n if len(imgs) == 0:\n return []\n if isinstance(imgs[0], np.ndarray):\n h, w = imgs[0].shape[:2]\n else:\n w, h = imgs[0].size\n if minsize is None:\n minsize = max(96 * min(w, h) / 1080, 40)\n boxes, points = [], []\n with torch.no_grad():\n batches = [imgs[i:i + 10] for i in range(0, len(imgs), 10)]\n for batch in batches:\n batch_boxes, batch_points = detect_face(batch, minsize,\n self.pnet, self.rnet, self.onet, [0.7, 0.8, 0.9], 0.709,\n self.device)\n boxes += list(batch_boxes)\n points += list(batch_points)\n result = []\n for box, point in zip(boxes, points):\n box = np.array(box)\n point = np.array(point)\n if len(box) == 0:\n result.append(None)\n else:\n result.append((box[:, :4], box[:, 4], point))\n return result\n\n\n<mask token>\n\n\nclass PNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 10, kernel_size=3)\n self.prelu1 = nn.PReLU(10)\n self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(10, 16, kernel_size=3)\n self.prelu2 = nn.PReLU(16)\n self.conv3 = nn.Conv2d(16, 32, kernel_size=3)\n self.prelu3 = nn.PReLU(32)\n self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)\n self.softmax4_1 = nn.Softmax(dim=1)\n self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n a = self.conv4_1(x)\n a = self.softmax4_1(a)\n b = self.conv4_2(x)\n return b, a\n\n\nclass RNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 28, kernel_size=3)\n self.prelu1 = nn.PReLU(28)\n self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(28, 48, kernel_size=3)\n self.prelu2 = nn.PReLU(48)\n self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv3 = nn.Conv2d(48, 64, kernel_size=2)\n self.prelu3 = nn.PReLU(64)\n self.dense4 = nn.Linear(576, 128)\n self.prelu4 = nn.PReLU(128)\n self.dense5_1 = nn.Linear(128, 2)\n self.softmax5_1 = nn.Softmax(dim=1)\n self.dense5_2 = nn.Linear(128, 4)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense4(x.view(x.shape[0], -1))\n x = self.prelu4(x)\n a = self.dense5_1(x)\n a = self.softmax5_1(a)\n b = self.dense5_2(x)\n return b, a\n\n\nclass ONet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3)\n self.prelu1 = nn.PReLU(32)\n self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3)\n self.prelu2 = nn.PReLU(64)\n self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv3 = nn.Conv2d(64, 64, kernel_size=3)\n self.prelu3 = nn.PReLU(64)\n self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.conv4 = nn.Conv2d(64, 128, kernel_size=2)\n self.prelu4 = nn.PReLU(128)\n self.dense5 = nn.Linear(1152, 256)\n self.prelu5 = nn.PReLU(256)\n self.dense6_1 = nn.Linear(256, 2)\n self.softmax6_1 = nn.Softmax(dim=1)\n self.dense6_2 = nn.Linear(256, 4)\n self.dense6_3 = nn.Linear(256, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = self.pool3(x)\n x = self.conv4(x)\n x = self.prelu4(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense5(x.view(x.shape[0], -1))\n x = self.prelu5(x)\n a = self.dense6_1(x)\n a = self.softmax6_1(a)\n b = self.dense6_2(x)\n c = self.dense6_3(x)\n return b, c, a\n\n\n<mask token>\n\n\ndef bbreg(boundingbox, reg):\n if reg.shape[1] == 1:\n reg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))\n w = boundingbox[:, 2] - boundingbox[:, 0] + 1\n h = boundingbox[:, 3] - boundingbox[:, 1] + 1\n b1 = boundingbox[:, 0] + reg[:, 0] * w\n b2 = boundingbox[:, 1] + reg[:, 1] * h\n b3 = boundingbox[:, 2] + reg[:, 2] * w\n b4 = boundingbox[:, 3] + reg[:, 3] * h\n boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)\n return boundingbox\n\n\ndef generateBoundingBox(reg, probs, scale, thresh):\n stride = 2\n cellsize = 12\n reg = reg.permute(1, 0, 2, 3)\n mask = probs >= thresh\n mask_inds = mask.nonzero(as_tuple=False)\n image_inds = mask_inds[:, 0]\n score = probs[mask]\n reg = reg[:, mask].permute(1, 0)\n bb = mask_inds[:, 1:].type(reg.dtype).flip(1)\n q1 = ((stride * bb + 1) / scale).floor()\n q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()\n boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)\n return boundingbox, image_inds\n\n\ndef nms_numpy(boxes, scores, threshold, method):\n if boxes.size == 0:\n return np.empty((0, 3))\n x1 = boxes[:, 0].copy()\n y1 = boxes[:, 1].copy()\n x2 = boxes[:, 2].copy()\n y2 = boxes[:, 3].copy()\n s = scores\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n I = np.argsort(s)\n pick = np.zeros_like(s, dtype=np.int16)\n counter = 0\n while I.size > 0:\n i = I[-1]\n pick[counter] = i\n counter += 1\n idx = I[0:-1]\n xx1 = np.maximum(x1[i], x1[idx]).copy()\n yy1 = np.maximum(y1[i], y1[idx]).copy()\n xx2 = np.minimum(x2[i], x2[idx]).copy()\n yy2 = np.minimum(y2[i], y2[idx]).copy()\n w = np.maximum(0.0, xx2 - xx1 + 1).copy()\n h = np.maximum(0.0, yy2 - yy1 + 1).copy()\n inter = w * h\n if method == 'Min':\n o = inter / np.minimum(area[i], area[idx])\n else:\n o = inter / (area[i] + area[idx] - inter)\n I = I[np.where(o <= threshold)]\n pick = pick[:counter].copy()\n return pick\n\n\ndef batched_nms_numpy(boxes, scores, idxs, threshold, method):\n device = boxes.device\n if boxes.numel() == 0:\n return torch.empty((0,), dtype=torch.int64, device=device)\n max_coordinate = boxes.max()\n offsets = idxs.to(boxes) * (max_coordinate + 1)\n boxes_for_nms = boxes + offsets[:, None]\n boxes_for_nms = boxes_for_nms.cpu().numpy()\n scores = scores.cpu().numpy()\n keep = nms_numpy(boxes_for_nms, scores, threshold, method)\n return torch.as_tensor(keep, dtype=torch.long, device=device)\n\n\n<mask token>\n\n\ndef imresample(img, sz):\n im_data = interpolate(img, size=sz, mode='area')\n return im_data\n", "step-2": "<mask token>\n\n\nclass MTCNN:\n\n def __init__(self, device=None, model=None):\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.device = device\n url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt'\n if model is None:\n model = torch.hub.load_state_dict_from_url(url)\n else:\n model = torch.load(model, map_location=device)\n self.pnet = PNet().to(device)\n self.rnet = RNet().to(device)\n self.onet = ONet().to(device)\n self.pnet.load_state_dict(model['pnet'])\n self.rnet.load_state_dict(model['rnet'])\n self.onet.load_state_dict(model['onet'])\n\n def detect(self, imgs, minsize=None):\n if len(imgs) == 0:\n return []\n if isinstance(imgs[0], np.ndarray):\n h, w = imgs[0].shape[:2]\n else:\n w, h = imgs[0].size\n if minsize is None:\n minsize = max(96 * min(w, h) / 1080, 40)\n boxes, points = [], []\n with torch.no_grad():\n batches = [imgs[i:i + 10] for i in range(0, len(imgs), 10)]\n for batch in batches:\n batch_boxes, batch_points = detect_face(batch, minsize,\n self.pnet, self.rnet, self.onet, [0.7, 0.8, 0.9], 0.709,\n self.device)\n boxes += list(batch_boxes)\n points += list(batch_points)\n result = []\n for box, point in zip(boxes, points):\n box = np.array(box)\n point = np.array(point)\n if len(box) == 0:\n result.append(None)\n else:\n result.append((box[:, :4], box[:, 4], point))\n return result\n\n\n<mask token>\n\n\nclass PNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 10, kernel_size=3)\n self.prelu1 = nn.PReLU(10)\n self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(10, 16, kernel_size=3)\n self.prelu2 = nn.PReLU(16)\n self.conv3 = nn.Conv2d(16, 32, kernel_size=3)\n self.prelu3 = nn.PReLU(32)\n self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)\n self.softmax4_1 = nn.Softmax(dim=1)\n self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n a = self.conv4_1(x)\n a = self.softmax4_1(a)\n b = self.conv4_2(x)\n return b, a\n\n\nclass RNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 28, kernel_size=3)\n self.prelu1 = nn.PReLU(28)\n self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(28, 48, kernel_size=3)\n self.prelu2 = nn.PReLU(48)\n self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv3 = nn.Conv2d(48, 64, kernel_size=2)\n self.prelu3 = nn.PReLU(64)\n self.dense4 = nn.Linear(576, 128)\n self.prelu4 = nn.PReLU(128)\n self.dense5_1 = nn.Linear(128, 2)\n self.softmax5_1 = nn.Softmax(dim=1)\n self.dense5_2 = nn.Linear(128, 4)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense4(x.view(x.shape[0], -1))\n x = self.prelu4(x)\n a = self.dense5_1(x)\n a = self.softmax5_1(a)\n b = self.dense5_2(x)\n return b, a\n\n\nclass ONet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3)\n self.prelu1 = nn.PReLU(32)\n self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3)\n self.prelu2 = nn.PReLU(64)\n self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv3 = nn.Conv2d(64, 64, kernel_size=3)\n self.prelu3 = nn.PReLU(64)\n self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.conv4 = nn.Conv2d(64, 128, kernel_size=2)\n self.prelu4 = nn.PReLU(128)\n self.dense5 = nn.Linear(1152, 256)\n self.prelu5 = nn.PReLU(256)\n self.dense6_1 = nn.Linear(256, 2)\n self.softmax6_1 = nn.Softmax(dim=1)\n self.dense6_2 = nn.Linear(256, 4)\n self.dense6_3 = nn.Linear(256, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = self.pool3(x)\n x = self.conv4(x)\n x = self.prelu4(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense5(x.view(x.shape[0], -1))\n x = self.prelu5(x)\n a = self.dense6_1(x)\n a = self.softmax6_1(a)\n b = self.dense6_2(x)\n c = self.dense6_3(x)\n return b, c, a\n\n\n<mask token>\n\n\ndef bbreg(boundingbox, reg):\n if reg.shape[1] == 1:\n reg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))\n w = boundingbox[:, 2] - boundingbox[:, 0] + 1\n h = boundingbox[:, 3] - boundingbox[:, 1] + 1\n b1 = boundingbox[:, 0] + reg[:, 0] * w\n b2 = boundingbox[:, 1] + reg[:, 1] * h\n b3 = boundingbox[:, 2] + reg[:, 2] * w\n b4 = boundingbox[:, 3] + reg[:, 3] * h\n boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)\n return boundingbox\n\n\ndef generateBoundingBox(reg, probs, scale, thresh):\n stride = 2\n cellsize = 12\n reg = reg.permute(1, 0, 2, 3)\n mask = probs >= thresh\n mask_inds = mask.nonzero(as_tuple=False)\n image_inds = mask_inds[:, 0]\n score = probs[mask]\n reg = reg[:, mask].permute(1, 0)\n bb = mask_inds[:, 1:].type(reg.dtype).flip(1)\n q1 = ((stride * bb + 1) / scale).floor()\n q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()\n boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)\n return boundingbox, image_inds\n\n\ndef nms_numpy(boxes, scores, threshold, method):\n if boxes.size == 0:\n return np.empty((0, 3))\n x1 = boxes[:, 0].copy()\n y1 = boxes[:, 1].copy()\n x2 = boxes[:, 2].copy()\n y2 = boxes[:, 3].copy()\n s = scores\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n I = np.argsort(s)\n pick = np.zeros_like(s, dtype=np.int16)\n counter = 0\n while I.size > 0:\n i = I[-1]\n pick[counter] = i\n counter += 1\n idx = I[0:-1]\n xx1 = np.maximum(x1[i], x1[idx]).copy()\n yy1 = np.maximum(y1[i], y1[idx]).copy()\n xx2 = np.minimum(x2[i], x2[idx]).copy()\n yy2 = np.minimum(y2[i], y2[idx]).copy()\n w = np.maximum(0.0, xx2 - xx1 + 1).copy()\n h = np.maximum(0.0, yy2 - yy1 + 1).copy()\n inter = w * h\n if method == 'Min':\n o = inter / np.minimum(area[i], area[idx])\n else:\n o = inter / (area[i] + area[idx] - inter)\n I = I[np.where(o <= threshold)]\n pick = pick[:counter].copy()\n return pick\n\n\ndef batched_nms_numpy(boxes, scores, idxs, threshold, method):\n device = boxes.device\n if boxes.numel() == 0:\n return torch.empty((0,), dtype=torch.int64, device=device)\n max_coordinate = boxes.max()\n offsets = idxs.to(boxes) * (max_coordinate + 1)\n boxes_for_nms = boxes + offsets[:, None]\n boxes_for_nms = boxes_for_nms.cpu().numpy()\n scores = scores.cpu().numpy()\n keep = nms_numpy(boxes_for_nms, scores, threshold, method)\n return torch.as_tensor(keep, dtype=torch.long, device=device)\n\n\ndef pad(boxes, w, h):\n boxes = boxes.trunc().int().cpu().numpy()\n x = boxes[:, 0]\n y = boxes[:, 1]\n ex = boxes[:, 2]\n ey = boxes[:, 3]\n x[x < 1] = 1\n y[y < 1] = 1\n ex[ex > w] = w\n ey[ey > h] = h\n return y, ey, x, ex\n\n\n<mask token>\n\n\ndef imresample(img, sz):\n im_data = interpolate(img, size=sz, mode='area')\n return im_data\n", "step-3": "<mask token>\n\n\nclass MTCNN:\n\n def __init__(self, device=None, model=None):\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.device = device\n url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt'\n if model is None:\n model = torch.hub.load_state_dict_from_url(url)\n else:\n model = torch.load(model, map_location=device)\n self.pnet = PNet().to(device)\n self.rnet = RNet().to(device)\n self.onet = ONet().to(device)\n self.pnet.load_state_dict(model['pnet'])\n self.rnet.load_state_dict(model['rnet'])\n self.onet.load_state_dict(model['onet'])\n\n def detect(self, imgs, minsize=None):\n if len(imgs) == 0:\n return []\n if isinstance(imgs[0], np.ndarray):\n h, w = imgs[0].shape[:2]\n else:\n w, h = imgs[0].size\n if minsize is None:\n minsize = max(96 * min(w, h) / 1080, 40)\n boxes, points = [], []\n with torch.no_grad():\n batches = [imgs[i:i + 10] for i in range(0, len(imgs), 10)]\n for batch in batches:\n batch_boxes, batch_points = detect_face(batch, minsize,\n self.pnet, self.rnet, self.onet, [0.7, 0.8, 0.9], 0.709,\n self.device)\n boxes += list(batch_boxes)\n points += list(batch_points)\n result = []\n for box, point in zip(boxes, points):\n box = np.array(box)\n point = np.array(point)\n if len(box) == 0:\n result.append(None)\n else:\n result.append((box[:, :4], box[:, 4], point))\n return result\n\n\ndef empty_cache(device):\n if 'cuda' in device:\n with torch.cuda.device(device):\n torch.cuda.empty_cache()\n\n\nclass PNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 10, kernel_size=3)\n self.prelu1 = nn.PReLU(10)\n self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(10, 16, kernel_size=3)\n self.prelu2 = nn.PReLU(16)\n self.conv3 = nn.Conv2d(16, 32, kernel_size=3)\n self.prelu3 = nn.PReLU(32)\n self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)\n self.softmax4_1 = nn.Softmax(dim=1)\n self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n a = self.conv4_1(x)\n a = self.softmax4_1(a)\n b = self.conv4_2(x)\n return b, a\n\n\nclass RNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 28, kernel_size=3)\n self.prelu1 = nn.PReLU(28)\n self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(28, 48, kernel_size=3)\n self.prelu2 = nn.PReLU(48)\n self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv3 = nn.Conv2d(48, 64, kernel_size=2)\n self.prelu3 = nn.PReLU(64)\n self.dense4 = nn.Linear(576, 128)\n self.prelu4 = nn.PReLU(128)\n self.dense5_1 = nn.Linear(128, 2)\n self.softmax5_1 = nn.Softmax(dim=1)\n self.dense5_2 = nn.Linear(128, 4)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense4(x.view(x.shape[0], -1))\n x = self.prelu4(x)\n a = self.dense5_1(x)\n a = self.softmax5_1(a)\n b = self.dense5_2(x)\n return b, a\n\n\nclass ONet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3)\n self.prelu1 = nn.PReLU(32)\n self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3)\n self.prelu2 = nn.PReLU(64)\n self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv3 = nn.Conv2d(64, 64, kernel_size=3)\n self.prelu3 = nn.PReLU(64)\n self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.conv4 = nn.Conv2d(64, 128, kernel_size=2)\n self.prelu4 = nn.PReLU(128)\n self.dense5 = nn.Linear(1152, 256)\n self.prelu5 = nn.PReLU(256)\n self.dense6_1 = nn.Linear(256, 2)\n self.softmax6_1 = nn.Softmax(dim=1)\n self.dense6_2 = nn.Linear(256, 4)\n self.dense6_3 = nn.Linear(256, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = self.pool3(x)\n x = self.conv4(x)\n x = self.prelu4(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense5(x.view(x.shape[0], -1))\n x = self.prelu5(x)\n a = self.dense6_1(x)\n a = self.softmax6_1(a)\n b = self.dense6_2(x)\n c = self.dense6_3(x)\n return b, c, a\n\n\n<mask token>\n\n\ndef bbreg(boundingbox, reg):\n if reg.shape[1] == 1:\n reg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))\n w = boundingbox[:, 2] - boundingbox[:, 0] + 1\n h = boundingbox[:, 3] - boundingbox[:, 1] + 1\n b1 = boundingbox[:, 0] + reg[:, 0] * w\n b2 = boundingbox[:, 1] + reg[:, 1] * h\n b3 = boundingbox[:, 2] + reg[:, 2] * w\n b4 = boundingbox[:, 3] + reg[:, 3] * h\n boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)\n return boundingbox\n\n\ndef generateBoundingBox(reg, probs, scale, thresh):\n stride = 2\n cellsize = 12\n reg = reg.permute(1, 0, 2, 3)\n mask = probs >= thresh\n mask_inds = mask.nonzero(as_tuple=False)\n image_inds = mask_inds[:, 0]\n score = probs[mask]\n reg = reg[:, mask].permute(1, 0)\n bb = mask_inds[:, 1:].type(reg.dtype).flip(1)\n q1 = ((stride * bb + 1) / scale).floor()\n q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()\n boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)\n return boundingbox, image_inds\n\n\ndef nms_numpy(boxes, scores, threshold, method):\n if boxes.size == 0:\n return np.empty((0, 3))\n x1 = boxes[:, 0].copy()\n y1 = boxes[:, 1].copy()\n x2 = boxes[:, 2].copy()\n y2 = boxes[:, 3].copy()\n s = scores\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n I = np.argsort(s)\n pick = np.zeros_like(s, dtype=np.int16)\n counter = 0\n while I.size > 0:\n i = I[-1]\n pick[counter] = i\n counter += 1\n idx = I[0:-1]\n xx1 = np.maximum(x1[i], x1[idx]).copy()\n yy1 = np.maximum(y1[i], y1[idx]).copy()\n xx2 = np.minimum(x2[i], x2[idx]).copy()\n yy2 = np.minimum(y2[i], y2[idx]).copy()\n w = np.maximum(0.0, xx2 - xx1 + 1).copy()\n h = np.maximum(0.0, yy2 - yy1 + 1).copy()\n inter = w * h\n if method == 'Min':\n o = inter / np.minimum(area[i], area[idx])\n else:\n o = inter / (area[i] + area[idx] - inter)\n I = I[np.where(o <= threshold)]\n pick = pick[:counter].copy()\n return pick\n\n\ndef batched_nms_numpy(boxes, scores, idxs, threshold, method):\n device = boxes.device\n if boxes.numel() == 0:\n return torch.empty((0,), dtype=torch.int64, device=device)\n max_coordinate = boxes.max()\n offsets = idxs.to(boxes) * (max_coordinate + 1)\n boxes_for_nms = boxes + offsets[:, None]\n boxes_for_nms = boxes_for_nms.cpu().numpy()\n scores = scores.cpu().numpy()\n keep = nms_numpy(boxes_for_nms, scores, threshold, method)\n return torch.as_tensor(keep, dtype=torch.long, device=device)\n\n\ndef pad(boxes, w, h):\n boxes = boxes.trunc().int().cpu().numpy()\n x = boxes[:, 0]\n y = boxes[:, 1]\n ex = boxes[:, 2]\n ey = boxes[:, 3]\n x[x < 1] = 1\n y[y < 1] = 1\n ex[ex > w] = w\n ey[ey > h] = h\n return y, ey, x, ex\n\n\n<mask token>\n\n\ndef imresample(img, sz):\n im_data = interpolate(img, size=sz, mode='area')\n return im_data\n", "step-4": "<mask token>\n\n\nclass MTCNN:\n\n def __init__(self, device=None, model=None):\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.device = device\n url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt'\n if model is None:\n model = torch.hub.load_state_dict_from_url(url)\n else:\n model = torch.load(model, map_location=device)\n self.pnet = PNet().to(device)\n self.rnet = RNet().to(device)\n self.onet = ONet().to(device)\n self.pnet.load_state_dict(model['pnet'])\n self.rnet.load_state_dict(model['rnet'])\n self.onet.load_state_dict(model['onet'])\n\n def detect(self, imgs, minsize=None):\n if len(imgs) == 0:\n return []\n if isinstance(imgs[0], np.ndarray):\n h, w = imgs[0].shape[:2]\n else:\n w, h = imgs[0].size\n if minsize is None:\n minsize = max(96 * min(w, h) / 1080, 40)\n boxes, points = [], []\n with torch.no_grad():\n batches = [imgs[i:i + 10] for i in range(0, len(imgs), 10)]\n for batch in batches:\n batch_boxes, batch_points = detect_face(batch, minsize,\n self.pnet, self.rnet, self.onet, [0.7, 0.8, 0.9], 0.709,\n self.device)\n boxes += list(batch_boxes)\n points += list(batch_points)\n result = []\n for box, point in zip(boxes, points):\n box = np.array(box)\n point = np.array(point)\n if len(box) == 0:\n result.append(None)\n else:\n result.append((box[:, :4], box[:, 4], point))\n return result\n\n\ndef empty_cache(device):\n if 'cuda' in device:\n with torch.cuda.device(device):\n torch.cuda.empty_cache()\n\n\nclass PNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 10, kernel_size=3)\n self.prelu1 = nn.PReLU(10)\n self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(10, 16, kernel_size=3)\n self.prelu2 = nn.PReLU(16)\n self.conv3 = nn.Conv2d(16, 32, kernel_size=3)\n self.prelu3 = nn.PReLU(32)\n self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)\n self.softmax4_1 = nn.Softmax(dim=1)\n self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n a = self.conv4_1(x)\n a = self.softmax4_1(a)\n b = self.conv4_2(x)\n return b, a\n\n\nclass RNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 28, kernel_size=3)\n self.prelu1 = nn.PReLU(28)\n self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(28, 48, kernel_size=3)\n self.prelu2 = nn.PReLU(48)\n self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv3 = nn.Conv2d(48, 64, kernel_size=2)\n self.prelu3 = nn.PReLU(64)\n self.dense4 = nn.Linear(576, 128)\n self.prelu4 = nn.PReLU(128)\n self.dense5_1 = nn.Linear(128, 2)\n self.softmax5_1 = nn.Softmax(dim=1)\n self.dense5_2 = nn.Linear(128, 4)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense4(x.view(x.shape[0], -1))\n x = self.prelu4(x)\n a = self.dense5_1(x)\n a = self.softmax5_1(a)\n b = self.dense5_2(x)\n return b, a\n\n\nclass ONet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3)\n self.prelu1 = nn.PReLU(32)\n self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3)\n self.prelu2 = nn.PReLU(64)\n self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv3 = nn.Conv2d(64, 64, kernel_size=3)\n self.prelu3 = nn.PReLU(64)\n self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.conv4 = nn.Conv2d(64, 128, kernel_size=2)\n self.prelu4 = nn.PReLU(128)\n self.dense5 = nn.Linear(1152, 256)\n self.prelu5 = nn.PReLU(256)\n self.dense6_1 = nn.Linear(256, 2)\n self.softmax6_1 = nn.Softmax(dim=1)\n self.dense6_2 = nn.Linear(256, 4)\n self.dense6_3 = nn.Linear(256, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = self.pool3(x)\n x = self.conv4(x)\n x = self.prelu4(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense5(x.view(x.shape[0], -1))\n x = self.prelu5(x)\n a = self.dense6_1(x)\n a = self.softmax6_1(a)\n b = self.dense6_2(x)\n c = self.dense6_3(x)\n return b, c, a\n\n\ndef detect_face(imgs, minsize, pnet, rnet, onet, threshold, factor, device):\n if isinstance(imgs, (np.ndarray, torch.Tensor)):\n imgs = torch.as_tensor(imgs, device=device)\n if len(imgs.shape) == 3:\n imgs = imgs.unsqueeze(0)\n else:\n if not isinstance(imgs, (list, tuple)):\n imgs = [imgs]\n if any(img.size != imgs[0].size for img in imgs):\n raise Exception(\n 'MTCNN batch processing only compatible with equal-dimension images.'\n )\n imgs = np.stack([np.uint8(img) for img in imgs])\n imgs = torch.as_tensor(imgs, device=device)\n model_dtype = next(pnet.parameters()).dtype\n imgs = imgs.permute(0, 3, 1, 2).type(model_dtype)\n batch_size = len(imgs)\n h, w = imgs.shape[2:4]\n m = 12.0 / minsize\n minl = min(h, w)\n minl = minl * m\n scale_i = m\n scales = []\n while minl >= 12:\n scales.append(scale_i)\n scale_i = scale_i * factor\n minl = minl * factor\n boxes = []\n image_inds = []\n all_inds = []\n all_i = 0\n for scale in scales:\n im_data = imresample(imgs, (int(h * scale + 1), int(w * scale + 1)))\n im_data = (im_data - 127.5) * 0.0078125\n reg, probs = pnet(im_data)\n empty_cache(device)\n boxes_scale, image_inds_scale = generateBoundingBox(reg, probs[:, 1\n ], scale, threshold[0])\n boxes.append(boxes_scale)\n image_inds.append(image_inds_scale)\n all_inds.append(all_i + image_inds_scale)\n all_i += batch_size\n boxes = torch.cat(boxes, dim=0)\n image_inds = torch.cat(image_inds, dim=0).cpu()\n all_inds = torch.cat(all_inds, dim=0)\n pick = batched_nms(boxes[:, :4], boxes[:, 4], all_inds, 0.5)\n boxes, image_inds = boxes[pick], image_inds[pick]\n pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)\n boxes, image_inds = boxes[pick], image_inds[pick]\n regw = boxes[:, 2] - boxes[:, 0]\n regh = boxes[:, 3] - boxes[:, 1]\n qq1 = boxes[:, 0] + boxes[:, 5] * regw\n qq2 = boxes[:, 1] + boxes[:, 6] * regh\n qq3 = boxes[:, 2] + boxes[:, 7] * regw\n qq4 = boxes[:, 3] + boxes[:, 8] * regh\n boxes = torch.stack([qq1, qq2, qq3, qq4, boxes[:, 4]]).permute(1, 0)\n boxes = rerec(boxes)\n y, ey, x, ex = pad(boxes, w, h)\n if len(boxes) > 0:\n im_data = []\n for k in range(len(y)):\n if ey[k] > y[k] - 1 and ex[k] > x[k] - 1:\n img_k = imgs[image_inds[k], :, y[k] - 1:ey[k], x[k] - 1:ex[k]\n ].unsqueeze(0)\n im_data.append(imresample(img_k, (24, 24)))\n im_data = torch.cat(im_data, dim=0)\n im_data = (im_data - 127.5) * 0.0078125\n out = []\n for batch in im_data.split(2000):\n out += [rnet(batch)]\n z = list(zip(*out))\n out = torch.cat(z[0]), torch.cat(z[1])\n empty_cache(device)\n out0 = out[0].permute(1, 0)\n out1 = out[1].permute(1, 0)\n score = out1[1, :]\n ipass = score > threshold[1]\n boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)\n image_inds = image_inds[ipass]\n mv = out0[:, ipass].permute(1, 0)\n pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)\n boxes, image_inds, mv = boxes[pick], image_inds[pick], mv[pick]\n boxes = bbreg(boxes, mv)\n boxes = rerec(boxes)\n points = torch.zeros(0, 5, 2, device=device)\n if len(boxes) > 0:\n y, ey, x, ex = pad(boxes, w, h)\n im_data = []\n for k in range(len(y)):\n if ey[k] > y[k] - 1 and ex[k] > x[k] - 1:\n img_k = imgs[image_inds[k], :, y[k] - 1:ey[k], x[k] - 1:ex[k]\n ].unsqueeze(0)\n im_data.append(imresample(img_k, (48, 48)))\n im_data = torch.cat(im_data, dim=0)\n im_data = (im_data - 127.5) * 0.0078125\n out = []\n for batch in im_data.split(500):\n out += [onet(batch)]\n z = list(zip(*out))\n out = torch.cat(z[0]), torch.cat(z[1]), torch.cat(z[2])\n empty_cache(device)\n out0 = out[0].permute(1, 0)\n out1 = out[1].permute(1, 0)\n out2 = out[2].permute(1, 0)\n score = out2[1, :]\n points = out1\n ipass = score > threshold[2]\n points = points[:, ipass]\n boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)\n image_inds = image_inds[ipass]\n mv = out0[:, ipass].permute(1, 0)\n w_i = boxes[:, 2] - boxes[:, 0] + 1\n h_i = boxes[:, 3] - boxes[:, 1] + 1\n points_x = w_i.repeat(5, 1) * points[:5, :] + boxes[:, 0].repeat(5, 1\n ) - 1\n points_y = h_i.repeat(5, 1) * points[5:10, :] + boxes[:, 1].repeat(5, 1\n ) - 1\n points = torch.stack((points_x, points_y)).permute(2, 1, 0)\n boxes = bbreg(boxes, mv)\n pick = batched_nms_numpy(boxes[:, :4], boxes[:, 4], image_inds, 0.7,\n 'Min')\n boxes, image_inds, points = boxes[pick], image_inds[pick], points[pick]\n boxes = boxes.cpu().numpy()\n points = points.cpu().numpy()\n batch_boxes = []\n batch_points = []\n for b_i in range(batch_size):\n b_i_inds = np.where(image_inds == b_i)\n batch_boxes.append(boxes[b_i_inds].copy())\n batch_points.append(points[b_i_inds].copy())\n batch_boxes, batch_points = np.array(batch_boxes), np.array(batch_points)\n empty_cache(device)\n return batch_boxes, batch_points\n\n\ndef bbreg(boundingbox, reg):\n if reg.shape[1] == 1:\n reg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))\n w = boundingbox[:, 2] - boundingbox[:, 0] + 1\n h = boundingbox[:, 3] - boundingbox[:, 1] + 1\n b1 = boundingbox[:, 0] + reg[:, 0] * w\n b2 = boundingbox[:, 1] + reg[:, 1] * h\n b3 = boundingbox[:, 2] + reg[:, 2] * w\n b4 = boundingbox[:, 3] + reg[:, 3] * h\n boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)\n return boundingbox\n\n\ndef generateBoundingBox(reg, probs, scale, thresh):\n stride = 2\n cellsize = 12\n reg = reg.permute(1, 0, 2, 3)\n mask = probs >= thresh\n mask_inds = mask.nonzero(as_tuple=False)\n image_inds = mask_inds[:, 0]\n score = probs[mask]\n reg = reg[:, mask].permute(1, 0)\n bb = mask_inds[:, 1:].type(reg.dtype).flip(1)\n q1 = ((stride * bb + 1) / scale).floor()\n q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()\n boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)\n return boundingbox, image_inds\n\n\ndef nms_numpy(boxes, scores, threshold, method):\n if boxes.size == 0:\n return np.empty((0, 3))\n x1 = boxes[:, 0].copy()\n y1 = boxes[:, 1].copy()\n x2 = boxes[:, 2].copy()\n y2 = boxes[:, 3].copy()\n s = scores\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n I = np.argsort(s)\n pick = np.zeros_like(s, dtype=np.int16)\n counter = 0\n while I.size > 0:\n i = I[-1]\n pick[counter] = i\n counter += 1\n idx = I[0:-1]\n xx1 = np.maximum(x1[i], x1[idx]).copy()\n yy1 = np.maximum(y1[i], y1[idx]).copy()\n xx2 = np.minimum(x2[i], x2[idx]).copy()\n yy2 = np.minimum(y2[i], y2[idx]).copy()\n w = np.maximum(0.0, xx2 - xx1 + 1).copy()\n h = np.maximum(0.0, yy2 - yy1 + 1).copy()\n inter = w * h\n if method == 'Min':\n o = inter / np.minimum(area[i], area[idx])\n else:\n o = inter / (area[i] + area[idx] - inter)\n I = I[np.where(o <= threshold)]\n pick = pick[:counter].copy()\n return pick\n\n\ndef batched_nms_numpy(boxes, scores, idxs, threshold, method):\n device = boxes.device\n if boxes.numel() == 0:\n return torch.empty((0,), dtype=torch.int64, device=device)\n max_coordinate = boxes.max()\n offsets = idxs.to(boxes) * (max_coordinate + 1)\n boxes_for_nms = boxes + offsets[:, None]\n boxes_for_nms = boxes_for_nms.cpu().numpy()\n scores = scores.cpu().numpy()\n keep = nms_numpy(boxes_for_nms, scores, threshold, method)\n return torch.as_tensor(keep, dtype=torch.long, device=device)\n\n\ndef pad(boxes, w, h):\n boxes = boxes.trunc().int().cpu().numpy()\n x = boxes[:, 0]\n y = boxes[:, 1]\n ex = boxes[:, 2]\n ey = boxes[:, 3]\n x[x < 1] = 1\n y[y < 1] = 1\n ex[ex > w] = w\n ey[ey > h] = h\n return y, ey, x, ex\n\n\ndef rerec(bboxA):\n h = bboxA[:, 3] - bboxA[:, 1]\n w = bboxA[:, 2] - bboxA[:, 0]\n l = torch.max(w, h)\n bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5\n bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5\n bboxA[:, 2:4] = bboxA[:, :2] + l.repeat(2, 1).permute(1, 0)\n return bboxA\n\n\ndef imresample(img, sz):\n im_data = interpolate(img, size=sz, mode='area')\n return im_data\n", "step-5": "import numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn.functional import interpolate\nfrom torchvision.ops.boxes import batched_nms\n\n\nclass MTCNN():\n\tdef __init__(self, device=None, model=None):\n\t\tif device is None:\n\t\t\tdevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\t\tself.device = device\n\n\t\turl = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt'\n\t\tif model is None:\n\t\t\tmodel = torch.hub.load_state_dict_from_url(url)\n\t\telse:\n\t\t\tmodel = torch.load(model, map_location=device)\n\n\t\tself.pnet = PNet().to(device)\n\t\tself.rnet = RNet().to(device)\n\t\tself.onet = ONet().to(device)\n\n\t\tself.pnet.load_state_dict(model['pnet'])\n\t\tself.rnet.load_state_dict(model['rnet'])\n\t\tself.onet.load_state_dict(model['onet'])\n\n\n\tdef detect(self, imgs, minsize=None):\n\t\tif len(imgs) == 0:\n\t\t\treturn []\n\n\t\tif isinstance(imgs[0], np.ndarray):\n\t\t\th, w = imgs[0].shape[:2]\n\t\telse:\n\t\t\tw, h = imgs[0].size\n\n\t\tif minsize is None:\n\t\t\tminsize = max(96 * min(w, h)/1080, 40)\n\n\t\tboxes, points = [], []\n\n\t\twith torch.no_grad():\n\t\t\tbatches = [imgs[i:i+10] for i in range(0, len(imgs), 10)]\n\t\t\tfor batch in batches:\n\t\t\t\tbatch_boxes, batch_points = detect_face(\n\t\t\t\t\tbatch, minsize, self.pnet, self.rnet, self.onet,\n\t\t\t\t\t[0.7, 0.8, 0.9], 0.709, self.device)\n\t\t\t\tboxes += list(batch_boxes)\n\t\t\t\tpoints += list(batch_points)\n\n\t\tresult = []\n\t\tfor box, point in zip(boxes, points):\n\t\t\tbox = np.array(box)\n\t\t\tpoint = np.array(point)\n\t\t\tif len(box) == 0:\n\t\t\t\tresult.append(None)\n\t\t\telse:\n\t\t\t\tresult.append((box[:, :4], box[:, 4], point))\n\t\treturn result\n\n\ndef empty_cache(device):\n\tif 'cuda' in device:\n\t\twith torch.cuda.device(device):\n\t\t\ttorch.cuda.empty_cache()\n\n\nclass PNet(nn.Module):\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\n\t\tself.conv1 = nn.Conv2d(3, 10, kernel_size=3)\n\t\tself.prelu1 = nn.PReLU(10)\n\t\tself.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)\n\t\tself.conv2 = nn.Conv2d(10, 16, kernel_size=3)\n\t\tself.prelu2 = nn.PReLU(16)\n\t\tself.conv3 = nn.Conv2d(16, 32, kernel_size=3)\n\t\tself.prelu3 = nn.PReLU(32)\n\t\tself.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)\n\t\tself.softmax4_1 = nn.Softmax(dim=1)\n\t\tself.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)\n\n\tdef forward(self, x):\n\t\tx = self.conv1(x)\n\t\tx = self.prelu1(x)\n\t\tx = self.pool1(x)\n\t\tx = self.conv2(x)\n\t\tx = self.prelu2(x)\n\t\tx = self.conv3(x)\n\t\tx = self.prelu3(x)\n\t\ta = self.conv4_1(x)\n\t\ta = self.softmax4_1(a)\n\t\tb = self.conv4_2(x)\n\t\treturn b, a\n\n\nclass RNet(nn.Module):\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\n\t\tself.conv1 = nn.Conv2d(3, 28, kernel_size=3)\n\t\tself.prelu1 = nn.PReLU(28)\n\t\tself.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n\t\tself.conv2 = nn.Conv2d(28, 48, kernel_size=3)\n\t\tself.prelu2 = nn.PReLU(48)\n\t\tself.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n\t\tself.conv3 = nn.Conv2d(48, 64, kernel_size=2)\n\t\tself.prelu3 = nn.PReLU(64)\n\t\tself.dense4 = nn.Linear(576, 128)\n\t\tself.prelu4 = nn.PReLU(128)\n\t\tself.dense5_1 = nn.Linear(128, 2)\n\t\tself.softmax5_1 = nn.Softmax(dim=1)\n\t\tself.dense5_2 = nn.Linear(128, 4)\n\n\tdef forward(self, x):\n\t\tx = self.conv1(x)\n\t\tx = self.prelu1(x)\n\t\tx = self.pool1(x)\n\t\tx = self.conv2(x)\n\t\tx = self.prelu2(x)\n\t\tx = self.pool2(x)\n\t\tx = self.conv3(x)\n\t\tx = self.prelu3(x)\n\t\tx = x.permute(0, 3, 2, 1).contiguous()\n\t\tx = self.dense4(x.view(x.shape[0], -1))\n\t\tx = self.prelu4(x)\n\t\ta = self.dense5_1(x)\n\t\ta = self.softmax5_1(a)\n\t\tb = self.dense5_2(x)\n\t\treturn b, a\n\n\nclass ONet(nn.Module):\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\n\t\tself.conv1 = nn.Conv2d(3, 32, kernel_size=3)\n\t\tself.prelu1 = nn.PReLU(32)\n\t\tself.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n\t\tself.conv2 = nn.Conv2d(32, 64, kernel_size=3)\n\t\tself.prelu2 = nn.PReLU(64)\n\t\tself.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n\t\tself.conv3 = nn.Conv2d(64, 64, kernel_size=3)\n\t\tself.prelu3 = nn.PReLU(64)\n\t\tself.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)\n\t\tself.conv4 = nn.Conv2d(64, 128, kernel_size=2)\n\t\tself.prelu4 = nn.PReLU(128)\n\t\tself.dense5 = nn.Linear(1152, 256)\n\t\tself.prelu5 = nn.PReLU(256)\n\t\tself.dense6_1 = nn.Linear(256, 2)\n\t\tself.softmax6_1 = nn.Softmax(dim=1)\n\t\tself.dense6_2 = nn.Linear(256, 4)\n\t\tself.dense6_3 = nn.Linear(256, 10)\n\n\tdef forward(self, x):\n\t\tx = self.conv1(x)\n\t\tx = self.prelu1(x)\n\t\tx = self.pool1(x)\n\t\tx = self.conv2(x)\n\t\tx = self.prelu2(x)\n\t\tx = self.pool2(x)\n\t\tx = self.conv3(x)\n\t\tx = self.prelu3(x)\n\t\tx = self.pool3(x)\n\t\tx = self.conv4(x)\n\t\tx = self.prelu4(x)\n\t\tx = x.permute(0, 3, 2, 1).contiguous()\n\t\tx = self.dense5(x.view(x.shape[0], -1))\n\t\tx = self.prelu5(x)\n\t\ta = self.dense6_1(x)\n\t\ta = self.softmax6_1(a)\n\t\tb = self.dense6_2(x)\n\t\tc = self.dense6_3(x)\n\t\treturn b, c, a\n\n\ndef detect_face(imgs, minsize, pnet, rnet, onet, threshold, factor, device):\n\tif isinstance(imgs, (np.ndarray, torch.Tensor)):\n\t\timgs = torch.as_tensor(imgs, device=device)\n\t\tif len(imgs.shape) == 3:\n\t\t\timgs = imgs.unsqueeze(0)\n\telse:\n\t\tif not isinstance(imgs, (list, tuple)):\n\t\t\timgs = [imgs]\n\t\tif any(img.size != imgs[0].size for img in imgs):\n\t\t\traise Exception(\"MTCNN batch processing only compatible with equal-dimension images.\")\n\t\timgs = np.stack([np.uint8(img) for img in imgs])\n\n\timgs = torch.as_tensor(imgs, device=device)\n\n\tmodel_dtype = next(pnet.parameters()).dtype\n\timgs = imgs.permute(0, 3, 1, 2).type(model_dtype)\n\n\tbatch_size = len(imgs)\n\th, w = imgs.shape[2:4]\n\tm = 12.0 / minsize\n\tminl = min(h, w)\n\tminl = minl * m\n\n\t# Create scale pyramid\n\tscale_i = m\n\tscales = []\n\twhile minl >= 12:\n\t\tscales.append(scale_i)\n\t\tscale_i = scale_i * factor\n\t\tminl = minl * factor\n\n\t# First stage\n\tboxes = []\n\timage_inds = []\n\tall_inds = []\n\tall_i = 0\n\tfor scale in scales:\n\t\tim_data = imresample(imgs, (int(h * scale + 1), int(w * scale + 1)))\n\t\tim_data = (im_data - 127.5) * 0.0078125\n\t\treg, probs = pnet(im_data)\n\t\tempty_cache(device)\n\t\tboxes_scale, image_inds_scale = generateBoundingBox(reg, probs[:, 1], scale, threshold[0])\n\t\tboxes.append(boxes_scale)\n\t\timage_inds.append(image_inds_scale)\n\t\tall_inds.append(all_i + image_inds_scale)\n\t\tall_i += batch_size\n\n\tboxes = torch.cat(boxes, dim=0)\n\timage_inds = torch.cat(image_inds, dim=0).cpu()\n\tall_inds = torch.cat(all_inds, dim=0)\n\n\t# NMS within each scale + image\n\tpick = batched_nms(boxes[:, :4], boxes[:, 4], all_inds, 0.5)\n\tboxes, image_inds = boxes[pick], image_inds[pick]\n\n\t# NMS within each image\n\tpick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)\n\tboxes, image_inds = boxes[pick], image_inds[pick]\n\n\tregw = boxes[:, 2] - boxes[:, 0]\n\tregh = boxes[:, 3] - boxes[:, 1]\n\tqq1 = boxes[:, 0] + boxes[:, 5] * regw\n\tqq2 = boxes[:, 1] + boxes[:, 6] * regh\n\tqq3 = boxes[:, 2] + boxes[:, 7] * regw\n\tqq4 = boxes[:, 3] + boxes[:, 8] * regh\n\tboxes = torch.stack([qq1, qq2, qq3, qq4, boxes[:, 4]]).permute(1, 0)\n\tboxes = rerec(boxes)\n\ty, ey, x, ex = pad(boxes, w, h)\n\n\t# Second stage\n\tif len(boxes) > 0:\n\t\tim_data = []\n\t\tfor k in range(len(y)):\n\t\t\tif ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):\n\t\t\t\timg_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)\n\t\t\t\tim_data.append(imresample(img_k, (24, 24)))\n\t\tim_data = torch.cat(im_data, dim=0)\n\t\tim_data = (im_data - 127.5) * 0.0078125\n\n\t\tout = []\n\t\tfor batch in im_data.split(2000):\n\t\t\tout += [rnet(batch)]\n\t\tz = list(zip(*out))\n\t\tout = (torch.cat(z[0]), torch.cat(z[1]))\n\t\tempty_cache(device)\n\n\t\tout0 = out[0].permute(1, 0)\n\t\tout1 = out[1].permute(1, 0)\n\t\tscore = out1[1, :]\n\t\tipass = score > threshold[1]\n\t\tboxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)\n\t\timage_inds = image_inds[ipass]\n\t\tmv = out0[:, ipass].permute(1, 0)\n\n\t\t# NMS within each image\n\t\tpick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)\n\t\tboxes, image_inds, mv = boxes[pick], image_inds[pick], mv[pick]\n\t\tboxes = bbreg(boxes, mv)\n\t\tboxes = rerec(boxes)\n\n\t# Third stage\n\tpoints = torch.zeros(0, 5, 2, device=device)\n\tif len(boxes) > 0:\n\t\ty, ey, x, ex = pad(boxes, w, h)\n\t\tim_data = []\n\t\tfor k in range(len(y)):\n\t\t\tif ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):\n\t\t\t\timg_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)\n\t\t\t\tim_data.append(imresample(img_k, (48, 48)))\n\t\tim_data = torch.cat(im_data, dim=0)\n\t\tim_data = (im_data - 127.5) * 0.0078125\n\n\t\tout = []\n\t\tfor batch in im_data.split(500):\n\t\t\tout += [onet(batch)]\n\t\tz = list(zip(*out))\n\t\tout = (torch.cat(z[0]), torch.cat(z[1]), torch.cat(z[2]))\n\t\tempty_cache(device)\n\n\t\tout0 = out[0].permute(1, 0)\n\t\tout1 = out[1].permute(1, 0)\n\t\tout2 = out[2].permute(1, 0)\n\t\tscore = out2[1, :]\n\t\tpoints = out1\n\t\tipass = score > threshold[2]\n\t\tpoints = points[:, ipass]\n\t\tboxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)\n\t\timage_inds = image_inds[ipass]\n\t\tmv = out0[:, ipass].permute(1, 0)\n\n\t\tw_i = boxes[:, 2] - boxes[:, 0] + 1\n\t\th_i = boxes[:, 3] - boxes[:, 1] + 1\n\t\tpoints_x = w_i.repeat(5, 1) * points[:5, :] + boxes[:, 0].repeat(5, 1) - 1\n\t\tpoints_y = h_i.repeat(5, 1) * points[5:10, :] + boxes[:, 1].repeat(5, 1) - 1\n\t\tpoints = torch.stack((points_x, points_y)).permute(2, 1, 0)\n\t\tboxes = bbreg(boxes, mv)\n\n\t\t# NMS within each image using \"Min\" strategy\n\t\t# pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)\n\t\tpick = batched_nms_numpy(boxes[:, :4], boxes[:, 4], image_inds, 0.7, 'Min')\n\t\tboxes, image_inds, points = boxes[pick], image_inds[pick], points[pick]\n\n\tboxes = boxes.cpu().numpy()\n\tpoints = points.cpu().numpy()\n\n\tbatch_boxes = []\n\tbatch_points = []\n\tfor b_i in range(batch_size):\n\t\tb_i_inds = np.where(image_inds == b_i)\n\t\tbatch_boxes.append(boxes[b_i_inds].copy())\n\t\tbatch_points.append(points[b_i_inds].copy())\n\n\tbatch_boxes, batch_points = np.array(batch_boxes), np.array(batch_points)\n\tempty_cache(device)\n\n\treturn batch_boxes, batch_points\n\n\ndef bbreg(boundingbox, reg):\n\tif reg.shape[1] == 1:\n\t\treg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))\n\n\tw = boundingbox[:, 2] - boundingbox[:, 0] + 1\n\th = boundingbox[:, 3] - boundingbox[:, 1] + 1\n\tb1 = boundingbox[:, 0] + reg[:, 0] * w\n\tb2 = boundingbox[:, 1] + reg[:, 1] * h\n\tb3 = boundingbox[:, 2] + reg[:, 2] * w\n\tb4 = boundingbox[:, 3] + reg[:, 3] * h\n\tboundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)\n\n\treturn boundingbox\n\n\ndef generateBoundingBox(reg, probs, scale, thresh):\n\tstride = 2\n\tcellsize = 12\n\n\treg = reg.permute(1, 0, 2, 3)\n\n\tmask = probs >= thresh\n\tmask_inds = mask.nonzero(as_tuple=False)\n\timage_inds = mask_inds[:, 0]\n\tscore = probs[mask]\n\treg = reg[:, mask].permute(1, 0)\n\tbb = mask_inds[:, 1:].type(reg.dtype).flip(1)\n\tq1 = ((stride * bb + 1) / scale).floor()\n\tq2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()\n\tboundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)\n\treturn boundingbox, image_inds\n\n\ndef nms_numpy(boxes, scores, threshold, method):\n\tif boxes.size == 0:\n\t\treturn np.empty((0, 3))\n\n\tx1 = boxes[:, 0].copy()\n\ty1 = boxes[:, 1].copy()\n\tx2 = boxes[:, 2].copy()\n\ty2 = boxes[:, 3].copy()\n\ts = scores\n\tarea = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n\tI = np.argsort(s)\n\tpick = np.zeros_like(s, dtype=np.int16)\n\tcounter = 0\n\twhile I.size > 0:\n\t\ti = I[-1]\n\t\tpick[counter] = i\n\t\tcounter += 1\n\t\tidx = I[0:-1]\n\n\t\txx1 = np.maximum(x1[i], x1[idx]).copy()\n\t\tyy1 = np.maximum(y1[i], y1[idx]).copy()\n\t\txx2 = np.minimum(x2[i], x2[idx]).copy()\n\t\tyy2 = np.minimum(y2[i], y2[idx]).copy()\n\n\t\tw = np.maximum(0.0, xx2 - xx1 + 1).copy()\n\t\th = np.maximum(0.0, yy2 - yy1 + 1).copy()\n\n\t\tinter = w * h\n\t\tif method == \"Min\":\n\t\t\to = inter / np.minimum(area[i], area[idx])\n\t\telse:\n\t\t\to = inter / (area[i] + area[idx] - inter)\n\t\tI = I[np.where(o <= threshold)]\n\n\tpick = pick[:counter].copy()\n\treturn pick\n\n\ndef batched_nms_numpy(boxes, scores, idxs, threshold, method):\n\tdevice = boxes.device\n\tif boxes.numel() == 0:\n\t\treturn torch.empty((0,), dtype=torch.int64, device=device)\n\t# strategy: in order to perform NMS independently per class.\n\t# we add an offset to all the boxes. The offset is dependent\n\t# only on the class idx, and is large enough so that boxes\n\t# from different classes do not overlap\n\tmax_coordinate = boxes.max()\n\toffsets = idxs.to(boxes) * (max_coordinate + 1)\n\tboxes_for_nms = boxes + offsets[:, None]\n\tboxes_for_nms = boxes_for_nms.cpu().numpy()\n\tscores = scores.cpu().numpy()\n\tkeep = nms_numpy(boxes_for_nms, scores, threshold, method)\n\treturn torch.as_tensor(keep, dtype=torch.long, device=device)\n\n\ndef pad(boxes, w, h):\n\tboxes = boxes.trunc().int().cpu().numpy()\n\tx = boxes[:, 0]\n\ty = boxes[:, 1]\n\tex = boxes[:, 2]\n\tey = boxes[:, 3]\n\n\tx[x < 1] = 1\n\ty[y < 1] = 1\n\tex[ex > w] = w\n\tey[ey > h] = h\n\n\treturn y, ey, x, ex\n\n\ndef rerec(bboxA):\n\th = bboxA[:, 3] - bboxA[:, 1]\n\tw = bboxA[:, 2] - bboxA[:, 0]\n\n\tl = torch.max(w, h)\n\tbboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5\n\tbboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5\n\tbboxA[:, 2:4] = bboxA[:, :2] + l.repeat(2, 1).permute(1, 0)\n\n\treturn bboxA\n\n\ndef imresample(img, sz):\n\tim_data = interpolate(img, size=sz, mode=\"area\")\n\treturn im_data", "step-ids": [ 17, 18, 19, 21, 23 ] }
[ 17, 18, 19, 21, 23 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Book(models.Model): <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Book(models.Model): title = models.TextField(max_length=32, blank=False, null=False) <|reserved_special_token_1|> from django.db import models class Book(models.Model): title = models.TextField(max_length=32, blank=False, null=False) <|reserved_special_token_1|> from django.db import models class Book(models.Model): title = models.TextField(max_length=32, blank=False, null=False) # from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager # # # class UserAccountManager(BaseUserManager): # def create_user(self, email, firstname,lastname, phonenumber, password=None,): # # if not email: # raise ValueError('Users must have an email address') # email = self.normalize_email(email) # user = self.model(email=email, name=firstname) # user.set_password(password) # user.save() # # class UserAccount(AbstractBaseUser, PermissionsMixin): # email = models.EmailField(max_length=255, unique=True) # firstname = models.CharField(max_length=255) # lastname = models.CharField(max_length=255) # is_active = models.BooleanField(default=True) # is_staff = models.BooleanField(default=True) # # objects = UserAccountManager() # # USERNAME_FILED = 'email' # REQUIRED_FIELDS = ['firstname','lastname','phonenumber'] # # def get_full_name(self): # return self.firstname + " " + self.lastname # # def get_short_name(self): # return self.firstname # # def __str__(self): # return self.email
flexible
{ "blob_id": "8286407987301ace7af97d6acdcf6299ce3d8525", "index": 5440, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Book(models.Model):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Book(models.Model):\n title = models.TextField(max_length=32, blank=False, null=False)\n", "step-4": "from django.db import models\n\n\nclass Book(models.Model):\n title = models.TextField(max_length=32, blank=False, null=False)\n", "step-5": "from django.db import models\n\n\nclass Book(models.Model):\n title = models.TextField(max_length=32, blank=False, null=False)\n\n# from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager\n#\n#\n# class UserAccountManager(BaseUserManager):\n# def create_user(self, email, firstname,lastname, phonenumber, password=None,):\n#\n# if not email:\n# raise ValueError('Users must have an email address')\n# email = self.normalize_email(email)\n# user = self.model(email=email, name=firstname)\n# user.set_password(password)\n# user.save()\n#\n# class UserAccount(AbstractBaseUser, PermissionsMixin):\n# email = models.EmailField(max_length=255, unique=True)\n# firstname = models.CharField(max_length=255)\n# lastname = models.CharField(max_length=255)\n# is_active = models.BooleanField(default=True)\n# is_staff = models.BooleanField(default=True)\n#\n# objects = UserAccountManager()\n#\n# USERNAME_FILED = 'email'\n# REQUIRED_FIELDS = ['firstname','lastname','phonenumber']\n#\n# def get_full_name(self):\n# return self.firstname + \" \" + self.lastname\n#\n# def get_short_name(self):\n# return self.firstname\n#\n# def __str__(self):\n# return self.email\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from django.http import HttpResponseRedirect from django.shortcuts import render from django.views.generic import TemplateView from pos.service.sumup import API_URL, create_checkout from pos.models.sumup import SumUpAPIKey, SumUpOnline from pos.forms import RemotePayForm from pos.models.user import User class RemotePayView(TemplateView): template_name = 'remotepay/pay.djhtml' def pay(request): if request.method == 'POST': form = RemotePayForm(request.POST) if form.is_valid(): phone = form.cleaned_data['phone'] amount = form.cleaned_data['amount'] # Check if user exists try: user = User.objects.get(phone=phone, is_crew=False) except User.DoesNotExist: return render(request, 'remotepay/pay.djhtml', {'form': form, 'error': True}) # Assuming the user exists, we proceed t = SumUpOnline.objects.create(user=user, amount=amount) try: txid = create_checkout(SumUpAPIKey.objects.all().last(), t.id, t.amount, user.phone) t.transaction_id = txid t.status = 1 t.save() return render(request, 'remotepay/process.djhtml', {'txid': txid, 'phone': phone, 'amount': amount}) except: return render(request, 'remotepay/pay.djhtml', {'form': form, 'systemerror': True}) else: form = RemotePayForm return render(request, 'remotepay/pay.djhtml', {'form': form}) def pay_callback(request, checkoutid): # Get the status of the transaction for the user t = SumUpOnline.objects.get(transaction_id=checkoutid) if (t.status == 0 or t.status == 3): return HttpResponseRedirect('/pay/error/') elif (t.status == 4): return HttpResponseRedirect('/pay/success/') elif (t.status == 1) or (t.status == 2): return render(request, 'remotepay/hold.djhtml', {'checkoutid': checkoutid}) def pay_success(request): return render(request, 'remotepay/success.djhtml') def pay_error(request): return render(request, 'remotepay/error.djhtml') def pay_hold(request): return render(request, 'remotepay/hold.djhtml')
normal
{ "blob_id": "731d2891bbc29879fd8900a11077c93550e4e88d", "index": 4251, "step-1": "<mask token>\n\n\nclass RemotePayView(TemplateView):\n template_name = 'remotepay/pay.djhtml'\n\n\n<mask token>\n\n\ndef pay_callback(request, checkoutid):\n t = SumUpOnline.objects.get(transaction_id=checkoutid)\n if t.status == 0 or t.status == 3:\n return HttpResponseRedirect('/pay/error/')\n elif t.status == 4:\n return HttpResponseRedirect('/pay/success/')\n elif t.status == 1 or t.status == 2:\n return render(request, 'remotepay/hold.djhtml', {'checkoutid':\n checkoutid})\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass RemotePayView(TemplateView):\n template_name = 'remotepay/pay.djhtml'\n\n\n<mask token>\n\n\ndef pay_callback(request, checkoutid):\n t = SumUpOnline.objects.get(transaction_id=checkoutid)\n if t.status == 0 or t.status == 3:\n return HttpResponseRedirect('/pay/error/')\n elif t.status == 4:\n return HttpResponseRedirect('/pay/success/')\n elif t.status == 1 or t.status == 2:\n return render(request, 'remotepay/hold.djhtml', {'checkoutid':\n checkoutid})\n\n\ndef pay_success(request):\n return render(request, 'remotepay/success.djhtml')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass RemotePayView(TemplateView):\n template_name = 'remotepay/pay.djhtml'\n\n\ndef pay(request):\n if request.method == 'POST':\n form = RemotePayForm(request.POST)\n if form.is_valid():\n phone = form.cleaned_data['phone']\n amount = form.cleaned_data['amount']\n try:\n user = User.objects.get(phone=phone, is_crew=False)\n except User.DoesNotExist:\n return render(request, 'remotepay/pay.djhtml', {'form':\n form, 'error': True})\n t = SumUpOnline.objects.create(user=user, amount=amount)\n try:\n txid = create_checkout(SumUpAPIKey.objects.all().last(), t.\n id, t.amount, user.phone)\n t.transaction_id = txid\n t.status = 1\n t.save()\n return render(request, 'remotepay/process.djhtml', {'txid':\n txid, 'phone': phone, 'amount': amount})\n except:\n return render(request, 'remotepay/pay.djhtml', {'form':\n form, 'systemerror': True})\n else:\n form = RemotePayForm\n return render(request, 'remotepay/pay.djhtml', {'form': form})\n\n\ndef pay_callback(request, checkoutid):\n t = SumUpOnline.objects.get(transaction_id=checkoutid)\n if t.status == 0 or t.status == 3:\n return HttpResponseRedirect('/pay/error/')\n elif t.status == 4:\n return HttpResponseRedirect('/pay/success/')\n elif t.status == 1 or t.status == 2:\n return render(request, 'remotepay/hold.djhtml', {'checkoutid':\n checkoutid})\n\n\ndef pay_success(request):\n return render(request, 'remotepay/success.djhtml')\n\n\ndef pay_error(request):\n return render(request, 'remotepay/error.djhtml')\n\n\ndef pay_hold(request):\n return render(request, 'remotepay/hold.djhtml')\n", "step-4": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom pos.service.sumup import API_URL, create_checkout\nfrom pos.models.sumup import SumUpAPIKey, SumUpOnline\nfrom pos.forms import RemotePayForm\nfrom pos.models.user import User\n\n\nclass RemotePayView(TemplateView):\n template_name = 'remotepay/pay.djhtml'\n\n\ndef pay(request):\n if request.method == 'POST':\n form = RemotePayForm(request.POST)\n if form.is_valid():\n phone = form.cleaned_data['phone']\n amount = form.cleaned_data['amount']\n try:\n user = User.objects.get(phone=phone, is_crew=False)\n except User.DoesNotExist:\n return render(request, 'remotepay/pay.djhtml', {'form':\n form, 'error': True})\n t = SumUpOnline.objects.create(user=user, amount=amount)\n try:\n txid = create_checkout(SumUpAPIKey.objects.all().last(), t.\n id, t.amount, user.phone)\n t.transaction_id = txid\n t.status = 1\n t.save()\n return render(request, 'remotepay/process.djhtml', {'txid':\n txid, 'phone': phone, 'amount': amount})\n except:\n return render(request, 'remotepay/pay.djhtml', {'form':\n form, 'systemerror': True})\n else:\n form = RemotePayForm\n return render(request, 'remotepay/pay.djhtml', {'form': form})\n\n\ndef pay_callback(request, checkoutid):\n t = SumUpOnline.objects.get(transaction_id=checkoutid)\n if t.status == 0 or t.status == 3:\n return HttpResponseRedirect('/pay/error/')\n elif t.status == 4:\n return HttpResponseRedirect('/pay/success/')\n elif t.status == 1 or t.status == 2:\n return render(request, 'remotepay/hold.djhtml', {'checkoutid':\n checkoutid})\n\n\ndef pay_success(request):\n return render(request, 'remotepay/success.djhtml')\n\n\ndef pay_error(request):\n return render(request, 'remotepay/error.djhtml')\n\n\ndef pay_hold(request):\n return render(request, 'remotepay/hold.djhtml')\n", "step-5": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom pos.service.sumup import API_URL, create_checkout\nfrom pos.models.sumup import SumUpAPIKey, SumUpOnline\n\nfrom pos.forms import RemotePayForm\nfrom pos.models.user import User\n\n\nclass RemotePayView(TemplateView):\n template_name = 'remotepay/pay.djhtml'\n\n\ndef pay(request):\n if request.method == 'POST':\n form = RemotePayForm(request.POST)\n\n if form.is_valid():\n phone = form.cleaned_data['phone']\n amount = form.cleaned_data['amount']\n # Check if user exists\n try:\n user = User.objects.get(phone=phone, is_crew=False)\n except User.DoesNotExist:\n return render(request, 'remotepay/pay.djhtml', {'form': form, 'error': True})\n\n # Assuming the user exists, we proceed\n t = SumUpOnline.objects.create(user=user, amount=amount)\n\n try:\n txid = create_checkout(SumUpAPIKey.objects.all().last(), t.id, t.amount, user.phone)\n t.transaction_id = txid\n t.status = 1\n t.save()\n return render(request, 'remotepay/process.djhtml', {'txid': txid, 'phone': phone, 'amount': amount})\n except:\n return render(request, 'remotepay/pay.djhtml', {'form': form, 'systemerror': True})\n\n else:\n form = RemotePayForm\n\n return render(request, 'remotepay/pay.djhtml', {'form': form})\n\ndef pay_callback(request, checkoutid):\n # Get the status of the transaction for the user\n t = SumUpOnline.objects.get(transaction_id=checkoutid)\n\n if (t.status == 0 or t.status == 3):\n return HttpResponseRedirect('/pay/error/')\n elif (t.status == 4):\n return HttpResponseRedirect('/pay/success/')\n elif (t.status == 1) or (t.status == 2):\n return render(request, 'remotepay/hold.djhtml', {'checkoutid': checkoutid})\n\n\ndef pay_success(request):\n return render(request, 'remotepay/success.djhtml')\n\n\ndef pay_error(request):\n return render(request, 'remotepay/error.djhtml')\n\n\ndef pay_hold(request):\n return render(request, 'remotepay/hold.djhtml')\n", "step-ids": [ 3, 4, 7, 8, 9 ] }
[ 3, 4, 7, 8, 9 ]
import os from apps.app_base.app_utils.cryp_key import decrypt, get_secret_key BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) SECRET_KEY = get_secret_key DEBUG = True ALLOWED_HOSTS = ['.localhost', '127.0.0.1', '[::1]'] # Application definition INSTALLED_APPS = [ 'corsheaders', 'django.contrib.sessions', ] MIDDLEWARE = [ # CORS 'corsheaders.middleware.CorsMiddleware', # Session 'django.contrib.sessions.middleware.SessionMiddleware', # Cache 'django.middleware.cache.UpdateCacheMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.cache.FetchFromCacheMiddleware', ] ROOT_URLCONF = 'apps.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'apps.wsgi.application' # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': 'luck', 'USER': 'postgres', 'PASSWORD': decrypt(b'gAAAAABfesT5OW3keTFXv6sUP_4NWJfG6U_ZEInkmCvJGdVSNA74VPJeG3lZLky8ZWEsjLsdxe_k_vgVCSIVCoTx1hOQsTb1kw=='), 'HOST': '127.0.0.1', 'PORT': '5432' } } # Password validation # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True CACHES = { # Local Memory Cache https://docs.djangoproject.com/en/3.1/topics/cache/ "default": { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'local-memory-lru', }, "redis": { "BACKEND": "django_redis.cache.RedisCache", "LOCATION": "redis://127.0.0.1:6379/0", # db0 "OPTIONS": { "CLIENT_CLASS": "django_redis.client.DefaultClient", "CONNECTION_POOL_KWARGS": {"max_connections": 100} } } } # Use Redis for session SESSION_ENGINE = "django.contrib.sessions.backends.cache" SESSION_CACHE_ALIAS = "redis" SESSION_COOKIE_AGE = 3600 * 24 # In seconds STATIC_URL = '/static/' CORS_ORIGIN_ALLOW_ALL = True CORS_ALLOW_CREDENTIALS = True
normal
{ "blob_id": "027a049ffced721f2cd697bc928bfdf718630623", "index": 4692, "step-1": "<mask token>\n", "step-2": "<mask token>\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = get_secret_key\nDEBUG = True\nALLOWED_HOSTS = ['.localhost', '127.0.0.1', '[::1]']\nINSTALLED_APPS = ['corsheaders', 'django.contrib.sessions']\nMIDDLEWARE = ['corsheaders.middleware.CorsMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.cache.UpdateCacheMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.cache.FetchFromCacheMiddleware']\nROOT_URLCONF = 'apps.urls'\nTEMPLATES = [{'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': {'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages']}}]\nWSGI_APPLICATION = 'apps.wsgi.application'\nDATABASES = {'default': {'ENGINE': 'django.db.backends.postgresql', 'NAME':\n 'luck', 'USER': 'postgres', 'PASSWORD': decrypt(\n b'gAAAAABfesT5OW3keTFXv6sUP_4NWJfG6U_ZEInkmCvJGdVSNA74VPJeG3lZLky8ZWEsjLsdxe_k_vgVCSIVCoTx1hOQsTb1kw=='\n ), 'HOST': '127.0.0.1', 'PORT': '5432'}}\nAUTH_PASSWORD_VALIDATORS = [{'NAME':\n 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'\n }, {'NAME':\n 'django.contrib.auth.password_validation.MinimumLengthValidator'}, {\n 'NAME':\n 'django.contrib.auth.password_validation.CommonPasswordValidator'}, {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}\n ]\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nCACHES = {'default': {'BACKEND':\n 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION':\n 'local-memory-lru'}, 'redis': {'BACKEND':\n 'django_redis.cache.RedisCache', 'LOCATION': 'redis://127.0.0.1:6379/0',\n 'OPTIONS': {'CLIENT_CLASS': 'django_redis.client.DefaultClient',\n 'CONNECTION_POOL_KWARGS': {'max_connections': 100}}}}\nSESSION_ENGINE = 'django.contrib.sessions.backends.cache'\nSESSION_CACHE_ALIAS = 'redis'\nSESSION_COOKIE_AGE = 3600 * 24\nSTATIC_URL = '/static/'\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_ALLOW_CREDENTIALS = True\n", "step-3": "import os\nfrom apps.app_base.app_utils.cryp_key import decrypt, get_secret_key\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = get_secret_key\nDEBUG = True\nALLOWED_HOSTS = ['.localhost', '127.0.0.1', '[::1]']\nINSTALLED_APPS = ['corsheaders', 'django.contrib.sessions']\nMIDDLEWARE = ['corsheaders.middleware.CorsMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.cache.UpdateCacheMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.cache.FetchFromCacheMiddleware']\nROOT_URLCONF = 'apps.urls'\nTEMPLATES = [{'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': {'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages']}}]\nWSGI_APPLICATION = 'apps.wsgi.application'\nDATABASES = {'default': {'ENGINE': 'django.db.backends.postgresql', 'NAME':\n 'luck', 'USER': 'postgres', 'PASSWORD': decrypt(\n b'gAAAAABfesT5OW3keTFXv6sUP_4NWJfG6U_ZEInkmCvJGdVSNA74VPJeG3lZLky8ZWEsjLsdxe_k_vgVCSIVCoTx1hOQsTb1kw=='\n ), 'HOST': '127.0.0.1', 'PORT': '5432'}}\nAUTH_PASSWORD_VALIDATORS = [{'NAME':\n 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'\n }, {'NAME':\n 'django.contrib.auth.password_validation.MinimumLengthValidator'}, {\n 'NAME':\n 'django.contrib.auth.password_validation.CommonPasswordValidator'}, {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}\n ]\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nCACHES = {'default': {'BACKEND':\n 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION':\n 'local-memory-lru'}, 'redis': {'BACKEND':\n 'django_redis.cache.RedisCache', 'LOCATION': 'redis://127.0.0.1:6379/0',\n 'OPTIONS': {'CLIENT_CLASS': 'django_redis.client.DefaultClient',\n 'CONNECTION_POOL_KWARGS': {'max_connections': 100}}}}\nSESSION_ENGINE = 'django.contrib.sessions.backends.cache'\nSESSION_CACHE_ALIAS = 'redis'\nSESSION_COOKIE_AGE = 3600 * 24\nSTATIC_URL = '/static/'\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_ALLOW_CREDENTIALS = True\n", "step-4": "import os\nfrom apps.app_base.app_utils.cryp_key import decrypt, get_secret_key\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSECRET_KEY = get_secret_key\n\nDEBUG = True\n\nALLOWED_HOSTS = ['.localhost', '127.0.0.1', '[::1]']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'corsheaders',\n 'django.contrib.sessions',\n]\n\nMIDDLEWARE = [\n # CORS\n 'corsheaders.middleware.CorsMiddleware',\n # Session\n 'django.contrib.sessions.middleware.SessionMiddleware',\n # Cache\n 'django.middleware.cache.UpdateCacheMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.cache.FetchFromCacheMiddleware',\n]\n\nROOT_URLCONF = 'apps.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'apps.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': 'luck',\n 'USER': 'postgres',\n 'PASSWORD': decrypt(b'gAAAAABfesT5OW3keTFXv6sUP_4NWJfG6U_ZEInkmCvJGdVSNA74VPJeG3lZLky8ZWEsjLsdxe_k_vgVCSIVCoTx1hOQsTb1kw=='),\n 'HOST': '127.0.0.1',\n 'PORT': '5432'\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.0/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\nCACHES = {\n # Local Memory Cache https://docs.djangoproject.com/en/3.1/topics/cache/\n \"default\": {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'local-memory-lru',\n },\n \"redis\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": \"redis://127.0.0.1:6379/0\", # db0\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n \"CONNECTION_POOL_KWARGS\": {\"max_connections\": 100}\n }\n }\n}\n\n# Use Redis for session\nSESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\nSESSION_CACHE_ALIAS = \"redis\"\nSESSION_COOKIE_AGE = 3600 * 24 # In seconds\n\n\nSTATIC_URL = '/static/'\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_ALLOW_CREDENTIALS = True\n\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from . import * from rest_framework import permissions from core.serializers import CategorySerializer from core.models.category_model import Category class CategoryViewSet(viewsets.ModelViewSet): serializer_class = CategorySerializer queryset = Category.objects.all() def get_permissions(self): permission_classes = (permissions.AllowAny,) return [permission() for permission in permission_classes]
normal
{ "blob_id": "5723e7889663142832a8131bb5f4c35d29692a49", "index": 6325, "step-1": "<mask token>\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n\n def get_permissions(self):\n permission_classes = permissions.AllowAny,\n return [permission() for permission in permission_classes]\n", "step-3": "<mask token>\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n serializer_class = CategorySerializer\n queryset = Category.objects.all()\n\n def get_permissions(self):\n permission_classes = permissions.AllowAny,\n return [permission() for permission in permission_classes]\n", "step-4": "from . import *\nfrom rest_framework import permissions\nfrom core.serializers import CategorySerializer\nfrom core.models.category_model import Category\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n serializer_class = CategorySerializer\n queryset = Category.objects.all()\n\n def get_permissions(self):\n permission_classes = permissions.AllowAny,\n return [permission() for permission in permission_classes]\n", "step-5": "from . import *\nfrom rest_framework import permissions\n\nfrom core.serializers import CategorySerializer\nfrom core.models.category_model import Category\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n serializer_class = CategorySerializer\n queryset = Category.objects.all()\n\n def get_permissions(self):\n permission_classes = (permissions.AllowAny,)\n return [permission() for permission in permission_classes]\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> def evaluate(model, test_features, test_labels): predictions = model.predict(test_features) errors = abs(predictions - test_labels) mape = 100 * np.mean(errors / test_labels) accuracy = 100 - mape print('平均气温误差.', np.mean(errors)) print('Accuracy = {:0.2f}%.'.format(accuracy)) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print('Training Features Shape:', train_features.shape) print('Training Labels Shape:', train_labels.shape) print('Testing Features Shape:', test_features.shape) print('Testing Labels Shape:', test_labels.shape) <|reserved_special_token_0|> print('Important train features shape:', important_train_features.shape) print('Important test features shape:', important_test_features.shape) <|reserved_special_token_0|> pprint(rf.get_params()) <|reserved_special_token_0|> max_depth.append(None) <|reserved_special_token_0|> def evaluate(model, test_features, test_labels): predictions = model.predict(test_features) errors = abs(predictions - test_labels) mape = 100 * np.mean(errors / test_labels) accuracy = 100 - mape print('平均气温误差.', np.mean(errors)) print('Accuracy = {:0.2f}%.'.format(accuracy)) <|reserved_special_token_0|> base_model.fit(train_features, train_labels) print('默认参数') evaluate(base_model, test_features, test_labels) <|reserved_special_token_0|> best_random.fit(train_features, train_labels) print('局部最好') evaluate(best_random, test_features, test_labels) <|reserved_special_token_0|> grid_search.fit(train_features, train_labels) <|reserved_special_token_0|> evaluate(best_grid, test_features, test_labels) <|reserved_special_token_1|> <|reserved_special_token_0|> features = pd.read_csv('data/temps_extended.csv') features = pd.get_dummies(features) labels = features['actual'] features = features.drop('actual', axis=1) feature_list = list(features.columns) <|reserved_special_token_0|> features = np.array(features) labels = np.array(labels) <|reserved_special_token_0|> train_features, test_features, train_labels, test_labels = train_test_split( features, labels, test_size=0.25, random_state=42) print('Training Features Shape:', train_features.shape) print('Training Labels Shape:', train_labels.shape) print('Testing Features Shape:', test_features.shape) print('Testing Labels Shape:', test_labels.shape) important_feature_names = ['temp_1', 'average', 'ws_1', 'temp_2', 'friend', 'year'] important_indices = [feature_list.index(feature) for feature in important_feature_names] important_train_features = train_features[:, important_indices] important_test_features = test_features[:, important_indices] print('Important train features shape:', important_train_features.shape) print('Important test features shape:', important_test_features.shape) train_features = important_train_features[:] test_features = important_test_features[:] feature_list = important_feature_names[:] <|reserved_special_token_0|> rf = RandomForestRegressor(random_state=42) <|reserved_special_token_0|> pprint(rf.get_params()) <|reserved_special_token_0|> n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)] max_features = ['auto', 'sqrt'] max_depth = [int(x) for x in np.linspace(10, 20, num=2)] max_depth.append(None) min_samples_split = [2, 5, 10] min_samples_leaf = [1, 2, 4] bootstrap = [True, False] random_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap} rf = RandomForestRegressor() rf_random = RandomizedSearchCV(estimator=rf, param_distributions= random_grid, n_iter=100, scoring='neg_mean_absolute_error', cv=3, verbose=2, random_state=42, n_jobs=-1) best_params = {'n_estimators': 1800, 'min_samples_split': 10, 'min_samples_leaf': 4, 'max_features': 'auto', 'max_depth': None, 'bootstrap': True} def evaluate(model, test_features, test_labels): predictions = model.predict(test_features) errors = abs(predictions - test_labels) mape = 100 * np.mean(errors / test_labels) accuracy = 100 - mape print('平均气温误差.', np.mean(errors)) print('Accuracy = {:0.2f}%.'.format(accuracy)) base_model = RandomForestRegressor(random_state=42) base_model.fit(train_features, train_labels) print('默认参数') evaluate(base_model, test_features, test_labels) best_random = RandomForestRegressor(n_estimators=1800, min_samples_split=10, random_state=42, min_samples_leaf=4, max_features='auto', max_depth= None, bootstrap=True) best_random.fit(train_features, train_labels) print('局部最好') evaluate(best_random, test_features, test_labels) <|reserved_special_token_0|> param_grid = {'n_estimators': [1000, 1200, 1400, 1600], 'min_samples_split': [3, 5, 7], 'min_samples_leaf': [2, 3, 4, 5, 6], 'max_features': ['auto' ], 'max_depth': [None], 'bootstrap': [True]} rf = RandomForestRegressor() grid_search = GridSearchCV(estimator=rf, param_grid=param_grid, scoring= 'neg_mean_absolute_error', cv=3, n_jobs=-1, verbose=2) grid_search.fit(train_features, train_labels) best_grid = grid_search.best_estimator_ evaluate(best_grid, test_features, test_labels) <|reserved_special_token_1|> import pandas as pd features = pd.read_csv('data/temps_extended.csv') features = pd.get_dummies(features) labels = features['actual'] features = features.drop('actual', axis=1) feature_list = list(features.columns) import numpy as np features = np.array(features) labels = np.array(labels) from sklearn.model_selection import train_test_split train_features, test_features, train_labels, test_labels = train_test_split( features, labels, test_size=0.25, random_state=42) print('Training Features Shape:', train_features.shape) print('Training Labels Shape:', train_labels.shape) print('Testing Features Shape:', test_features.shape) print('Testing Labels Shape:', test_labels.shape) important_feature_names = ['temp_1', 'average', 'ws_1', 'temp_2', 'friend', 'year'] important_indices = [feature_list.index(feature) for feature in important_feature_names] important_train_features = train_features[:, important_indices] important_test_features = test_features[:, important_indices] print('Important train features shape:', important_train_features.shape) print('Important test features shape:', important_test_features.shape) train_features = important_train_features[:] test_features = important_test_features[:] feature_list = important_feature_names[:] from sklearn.ensemble import RandomForestRegressor rf = RandomForestRegressor(random_state=42) from pprint import pprint pprint(rf.get_params()) from sklearn.model_selection import RandomizedSearchCV n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)] max_features = ['auto', 'sqrt'] max_depth = [int(x) for x in np.linspace(10, 20, num=2)] max_depth.append(None) min_samples_split = [2, 5, 10] min_samples_leaf = [1, 2, 4] bootstrap = [True, False] random_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap} rf = RandomForestRegressor() rf_random = RandomizedSearchCV(estimator=rf, param_distributions= random_grid, n_iter=100, scoring='neg_mean_absolute_error', cv=3, verbose=2, random_state=42, n_jobs=-1) best_params = {'n_estimators': 1800, 'min_samples_split': 10, 'min_samples_leaf': 4, 'max_features': 'auto', 'max_depth': None, 'bootstrap': True} def evaluate(model, test_features, test_labels): predictions = model.predict(test_features) errors = abs(predictions - test_labels) mape = 100 * np.mean(errors / test_labels) accuracy = 100 - mape print('平均气温误差.', np.mean(errors)) print('Accuracy = {:0.2f}%.'.format(accuracy)) base_model = RandomForestRegressor(random_state=42) base_model.fit(train_features, train_labels) print('默认参数') evaluate(base_model, test_features, test_labels) best_random = RandomForestRegressor(n_estimators=1800, min_samples_split=10, random_state=42, min_samples_leaf=4, max_features='auto', max_depth= None, bootstrap=True) best_random.fit(train_features, train_labels) print('局部最好') evaluate(best_random, test_features, test_labels) from sklearn.model_selection import GridSearchCV param_grid = {'n_estimators': [1000, 1200, 1400, 1600], 'min_samples_split': [3, 5, 7], 'min_samples_leaf': [2, 3, 4, 5, 6], 'max_features': ['auto' ], 'max_depth': [None], 'bootstrap': [True]} rf = RandomForestRegressor() grid_search = GridSearchCV(estimator=rf, param_grid=param_grid, scoring= 'neg_mean_absolute_error', cv=3, n_jobs=-1, verbose=2) grid_search.fit(train_features, train_labels) best_grid = grid_search.best_estimator_ evaluate(best_grid, test_features, test_labels) <|reserved_special_token_1|> # -*- coding:utf-8 -*- #随机森林调参 #RandomizedSearchCV 随机最佳 #GridSearchCV 地毯式最佳 import pandas as pd features = pd.read_csv('data/temps_extended.csv') features = pd.get_dummies(features) labels = features['actual'] features = features.drop('actual', axis = 1) feature_list = list(features.columns) import numpy as np features = np.array(features) labels = np.array(labels) from sklearn.model_selection import train_test_split train_features, test_features, train_labels, test_labels = train_test_split(features, labels, test_size = 0.25, random_state = 42) print('Training Features Shape:', train_features.shape) print('Training Labels Shape:', train_labels.shape) print('Testing Features Shape:', test_features.shape) print('Testing Labels Shape:', test_labels.shape) #################选择6个比较重要的参数当做训练集,重新创建训练集############################## important_feature_names = ['temp_1', 'average', 'ws_1', 'temp_2', 'friend', 'year'] important_indices = [feature_list.index(feature) for feature in important_feature_names] important_train_features = train_features[:, important_indices] important_test_features = test_features[:, important_indices] print('Important train features shape:', important_train_features.shape) print('Important test features shape:', important_test_features.shape) train_features = important_train_features[:] test_features = important_test_features[:] feature_list = important_feature_names[:] #################选择6个比较重要的参数当做训练集,重新创建训练集############################## ########创建随机森林模型################### from sklearn.ensemble import RandomForestRegressor rf = RandomForestRegressor(random_state = 42) from pprint import pprint # 打印所有参数 pprint(rf.get_params()) # {'bootstrap': True,#是否随机采样 # 'criterion': 'mse',#指定目标方程 损失的计算方法 熵值 回归 mse计算误差 # 'max_depth': None,# 树的最大深度 重要 # 'max_features': 'auto', # 'max_leaf_nodes': None, 最大叶子节点 重要 # 'min_impurity_decrease': 0.0, # 'min_impurity_split': None, # 'min_samples_leaf': 1, 信息增益 重要 # 'min_samples_split': 2, 最小分裂次数 重要 # 'min_weight_fraction_leaf': 0.0, # 'n_estimators': 'warn', # 'n_jobs': None, #多少核CPU 去跑 # 'oob_score': False, # 'random_state': 42, # 'verbose': 0, # 'warm_start': False} from sklearn.model_selection import RandomizedSearchCV# 随机最好 # 建立树的个数 n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)] # 最大特征的选择方式 max_features = ['auto', 'sqrt'] # 树的最大深度 10 20 none max_depth = [int(x) for x in np.linspace(10, 20, num = 2)] max_depth.append(None) # 节点最小分裂所需样本个数 min_samples_split = [2, 5, 10] # 叶子节点最小样本数,任何分裂不能让其子节点样本数少于此值 min_samples_leaf = [1, 2, 4] # 样本采样方法 bootstrap = [True, False] # Random grid random_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap} rf = RandomForestRegressor()# 创建模型 #随机寻找参数 cv:交叉验证 , n_iter 随机100次,scoring:评估方法,verbose:打印信息,n_jobs:所以cpu去跑 rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid, n_iter = 100, scoring='neg_mean_absolute_error', cv = 3, verbose=2, random_state=42, n_jobs=-1) # 执行寻找操作 # rf_random.fit(train_features, train_labels) # print(rf_random.best_params_) best_params = {'n_estimators': 1800, 'min_samples_split': 10, 'min_samples_leaf': 4, 'max_features': 'auto', 'max_depth': None, 'bootstrap': True} def evaluate(model, test_features, test_labels): #评估 predictions = model.predict(test_features) errors = abs(predictions - test_labels) mape = 100 * np.mean(errors / test_labels) accuracy = 100 - mape print('平均气温误差.',np.mean(errors)) print('Accuracy = {:0.2f}%.'.format(accuracy)) #################使用默认参数########################## # 平均气温误差. 3.91697080292 # Accuracy = 93.36%. base_model = RandomForestRegressor( random_state = 42) #使用默认的参数 base_model.fit(train_features, train_labels) print('默认参数') evaluate(base_model, test_features, test_labels) #################使用默认参数########################## #################使用最好参数########################## # 平均气温误差. 3.7141472957 # Accuracy = 93.73%. best_random = RandomForestRegressor(n_estimators=1800,min_samples_split=10,random_state = 42,min_samples_leaf=4,max_features='auto',max_depth=None,bootstrap=True) best_random.fit(train_features, train_labels) print('局部最好') evaluate(best_random, test_features, test_labels) #################使用最好参数########################## ################在随机最好的参数进行微调###################### # 平均气温误差. 3.69222090145 # Accuracy = 93.77%. from sklearn.model_selection import GridSearchCV# 地毯式搜索 param_grid = {'n_estimators': [1000, 1200, 1400, 1600], 'min_samples_split': [3, 5, 7], 'min_samples_leaf': [2,3, 4, 5,6], 'max_features': ['auto'], 'max_depth': [None], 'bootstrap': [True]} rf = RandomForestRegressor() # 网络搜索 grid_search = GridSearchCV(estimator = rf, param_grid = param_grid, scoring = 'neg_mean_absolute_error', cv = 3, n_jobs = -1, verbose = 2) grid_search.fit(train_features, train_labels) best_grid = grid_search.best_estimator_ evaluate(best_grid, test_features, test_labels) ################在随机最好的参数进行微调###################### ########创建随机森林模型###################
flexible
{ "blob_id": "de4e14a4fa8520c1aae60805084224337dd9620c", "index": 9009, "step-1": "<mask token>\n\n\ndef evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('平均气温误差.', np.mean(errors))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n\n<mask token>\n", "step-2": "<mask token>\nprint('Training Features Shape:', train_features.shape)\nprint('Training Labels Shape:', train_labels.shape)\nprint('Testing Features Shape:', test_features.shape)\nprint('Testing Labels Shape:', test_labels.shape)\n<mask token>\nprint('Important train features shape:', important_train_features.shape)\nprint('Important test features shape:', important_test_features.shape)\n<mask token>\npprint(rf.get_params())\n<mask token>\nmax_depth.append(None)\n<mask token>\n\n\ndef evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('平均气温误差.', np.mean(errors))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n\n<mask token>\nbase_model.fit(train_features, train_labels)\nprint('默认参数')\nevaluate(base_model, test_features, test_labels)\n<mask token>\nbest_random.fit(train_features, train_labels)\nprint('局部最好')\nevaluate(best_random, test_features, test_labels)\n<mask token>\ngrid_search.fit(train_features, train_labels)\n<mask token>\nevaluate(best_grid, test_features, test_labels)\n", "step-3": "<mask token>\nfeatures = pd.read_csv('data/temps_extended.csv')\nfeatures = pd.get_dummies(features)\nlabels = features['actual']\nfeatures = features.drop('actual', axis=1)\nfeature_list = list(features.columns)\n<mask token>\nfeatures = np.array(features)\nlabels = np.array(labels)\n<mask token>\ntrain_features, test_features, train_labels, test_labels = train_test_split(\n features, labels, test_size=0.25, random_state=42)\nprint('Training Features Shape:', train_features.shape)\nprint('Training Labels Shape:', train_labels.shape)\nprint('Testing Features Shape:', test_features.shape)\nprint('Testing Labels Shape:', test_labels.shape)\nimportant_feature_names = ['temp_1', 'average', 'ws_1', 'temp_2', 'friend',\n 'year']\nimportant_indices = [feature_list.index(feature) for feature in\n important_feature_names]\nimportant_train_features = train_features[:, important_indices]\nimportant_test_features = test_features[:, important_indices]\nprint('Important train features shape:', important_train_features.shape)\nprint('Important test features shape:', important_test_features.shape)\ntrain_features = important_train_features[:]\ntest_features = important_test_features[:]\nfeature_list = important_feature_names[:]\n<mask token>\nrf = RandomForestRegressor(random_state=42)\n<mask token>\npprint(rf.get_params())\n<mask token>\nn_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]\nmax_features = ['auto', 'sqrt']\nmax_depth = [int(x) for x in np.linspace(10, 20, num=2)]\nmax_depth.append(None)\nmin_samples_split = [2, 5, 10]\nmin_samples_leaf = [1, 2, 4]\nbootstrap = [True, False]\nrandom_grid = {'n_estimators': n_estimators, 'max_features': max_features,\n 'max_depth': max_depth, 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap}\nrf = RandomForestRegressor()\nrf_random = RandomizedSearchCV(estimator=rf, param_distributions=\n random_grid, n_iter=100, scoring='neg_mean_absolute_error', cv=3,\n verbose=2, random_state=42, n_jobs=-1)\nbest_params = {'n_estimators': 1800, 'min_samples_split': 10,\n 'min_samples_leaf': 4, 'max_features': 'auto', 'max_depth': None,\n 'bootstrap': True}\n\n\ndef evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('平均气温误差.', np.mean(errors))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n\nbase_model = RandomForestRegressor(random_state=42)\nbase_model.fit(train_features, train_labels)\nprint('默认参数')\nevaluate(base_model, test_features, test_labels)\nbest_random = RandomForestRegressor(n_estimators=1800, min_samples_split=10,\n random_state=42, min_samples_leaf=4, max_features='auto', max_depth=\n None, bootstrap=True)\nbest_random.fit(train_features, train_labels)\nprint('局部最好')\nevaluate(best_random, test_features, test_labels)\n<mask token>\nparam_grid = {'n_estimators': [1000, 1200, 1400, 1600], 'min_samples_split':\n [3, 5, 7], 'min_samples_leaf': [2, 3, 4, 5, 6], 'max_features': ['auto'\n ], 'max_depth': [None], 'bootstrap': [True]}\nrf = RandomForestRegressor()\ngrid_search = GridSearchCV(estimator=rf, param_grid=param_grid, scoring=\n 'neg_mean_absolute_error', cv=3, n_jobs=-1, verbose=2)\ngrid_search.fit(train_features, train_labels)\nbest_grid = grid_search.best_estimator_\nevaluate(best_grid, test_features, test_labels)\n", "step-4": "import pandas as pd\nfeatures = pd.read_csv('data/temps_extended.csv')\nfeatures = pd.get_dummies(features)\nlabels = features['actual']\nfeatures = features.drop('actual', axis=1)\nfeature_list = list(features.columns)\nimport numpy as np\nfeatures = np.array(features)\nlabels = np.array(labels)\nfrom sklearn.model_selection import train_test_split\ntrain_features, test_features, train_labels, test_labels = train_test_split(\n features, labels, test_size=0.25, random_state=42)\nprint('Training Features Shape:', train_features.shape)\nprint('Training Labels Shape:', train_labels.shape)\nprint('Testing Features Shape:', test_features.shape)\nprint('Testing Labels Shape:', test_labels.shape)\nimportant_feature_names = ['temp_1', 'average', 'ws_1', 'temp_2', 'friend',\n 'year']\nimportant_indices = [feature_list.index(feature) for feature in\n important_feature_names]\nimportant_train_features = train_features[:, important_indices]\nimportant_test_features = test_features[:, important_indices]\nprint('Important train features shape:', important_train_features.shape)\nprint('Important test features shape:', important_test_features.shape)\ntrain_features = important_train_features[:]\ntest_features = important_test_features[:]\nfeature_list = important_feature_names[:]\nfrom sklearn.ensemble import RandomForestRegressor\nrf = RandomForestRegressor(random_state=42)\nfrom pprint import pprint\npprint(rf.get_params())\nfrom sklearn.model_selection import RandomizedSearchCV\nn_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]\nmax_features = ['auto', 'sqrt']\nmax_depth = [int(x) for x in np.linspace(10, 20, num=2)]\nmax_depth.append(None)\nmin_samples_split = [2, 5, 10]\nmin_samples_leaf = [1, 2, 4]\nbootstrap = [True, False]\nrandom_grid = {'n_estimators': n_estimators, 'max_features': max_features,\n 'max_depth': max_depth, 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap}\nrf = RandomForestRegressor()\nrf_random = RandomizedSearchCV(estimator=rf, param_distributions=\n random_grid, n_iter=100, scoring='neg_mean_absolute_error', cv=3,\n verbose=2, random_state=42, n_jobs=-1)\nbest_params = {'n_estimators': 1800, 'min_samples_split': 10,\n 'min_samples_leaf': 4, 'max_features': 'auto', 'max_depth': None,\n 'bootstrap': True}\n\n\ndef evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('平均气温误差.', np.mean(errors))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n\nbase_model = RandomForestRegressor(random_state=42)\nbase_model.fit(train_features, train_labels)\nprint('默认参数')\nevaluate(base_model, test_features, test_labels)\nbest_random = RandomForestRegressor(n_estimators=1800, min_samples_split=10,\n random_state=42, min_samples_leaf=4, max_features='auto', max_depth=\n None, bootstrap=True)\nbest_random.fit(train_features, train_labels)\nprint('局部最好')\nevaluate(best_random, test_features, test_labels)\nfrom sklearn.model_selection import GridSearchCV\nparam_grid = {'n_estimators': [1000, 1200, 1400, 1600], 'min_samples_split':\n [3, 5, 7], 'min_samples_leaf': [2, 3, 4, 5, 6], 'max_features': ['auto'\n ], 'max_depth': [None], 'bootstrap': [True]}\nrf = RandomForestRegressor()\ngrid_search = GridSearchCV(estimator=rf, param_grid=param_grid, scoring=\n 'neg_mean_absolute_error', cv=3, n_jobs=-1, verbose=2)\ngrid_search.fit(train_features, train_labels)\nbest_grid = grid_search.best_estimator_\nevaluate(best_grid, test_features, test_labels)\n", "step-5": "# -*- coding:utf-8 -*-\n\n#随机森林调参\n#RandomizedSearchCV 随机最佳\n#GridSearchCV 地毯式最佳\n\n\nimport pandas as pd\nfeatures = pd.read_csv('data/temps_extended.csv')\n\n\nfeatures = pd.get_dummies(features)\n\nlabels = features['actual']\nfeatures = features.drop('actual', axis = 1)\n\nfeature_list = list(features.columns)\n\nimport numpy as np\n\nfeatures = np.array(features)\nlabels = np.array(labels)\n\nfrom sklearn.model_selection import train_test_split\n\ntrain_features, test_features, train_labels, test_labels = train_test_split(features, labels,\n test_size = 0.25, random_state = 42)\n\nprint('Training Features Shape:', train_features.shape)\nprint('Training Labels Shape:', train_labels.shape)\nprint('Testing Features Shape:', test_features.shape)\nprint('Testing Labels Shape:', test_labels.shape)\n\n#################选择6个比较重要的参数当做训练集,重新创建训练集##############################\nimportant_feature_names = ['temp_1', 'average', 'ws_1', 'temp_2', 'friend', 'year']\n\nimportant_indices = [feature_list.index(feature) for feature in important_feature_names]\n\nimportant_train_features = train_features[:, important_indices]\nimportant_test_features = test_features[:, important_indices]\n\nprint('Important train features shape:', important_train_features.shape)\nprint('Important test features shape:', important_test_features.shape)\n\ntrain_features = important_train_features[:]\ntest_features = important_test_features[:]\n\nfeature_list = important_feature_names[:]\n\n#################选择6个比较重要的参数当做训练集,重新创建训练集##############################\n\n########创建随机森林模型###################\nfrom sklearn.ensemble import RandomForestRegressor\n\nrf = RandomForestRegressor(random_state = 42)\n\nfrom pprint import pprint\n\n# 打印所有参数\npprint(rf.get_params())\n\n# {'bootstrap': True,#是否随机采样\n# 'criterion': 'mse',#指定目标方程 损失的计算方法 熵值 回归 mse计算误差\n# 'max_depth': None,# 树的最大深度 重要\n# 'max_features': 'auto',\n# 'max_leaf_nodes': None, 最大叶子节点 重要\n# 'min_impurity_decrease': 0.0,\n# 'min_impurity_split': None,\n# 'min_samples_leaf': 1, 信息增益 重要\n# 'min_samples_split': 2, 最小分裂次数 重要\n# 'min_weight_fraction_leaf': 0.0,\n# 'n_estimators': 'warn',\n# 'n_jobs': None, #多少核CPU 去跑\n# 'oob_score': False,\n# 'random_state': 42,\n# 'verbose': 0,\n# 'warm_start': False}\n\nfrom sklearn.model_selection import RandomizedSearchCV# 随机最好\n# 建立树的个数\nn_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]\n# 最大特征的选择方式\nmax_features = ['auto', 'sqrt']\n# 树的最大深度 10 20 none\nmax_depth = [int(x) for x in np.linspace(10, 20, num = 2)]\nmax_depth.append(None)\n# 节点最小分裂所需样本个数\nmin_samples_split = [2, 5, 10]\n# 叶子节点最小样本数,任何分裂不能让其子节点样本数少于此值\nmin_samples_leaf = [1, 2, 4]\n# 样本采样方法\nbootstrap = [True, False]\n\n# Random grid\nrandom_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\n\n\nrf = RandomForestRegressor()# 创建模型\n#随机寻找参数 cv:交叉验证 , n_iter 随机100次,scoring:评估方法,verbose:打印信息,n_jobs:所以cpu去跑\nrf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid,\n n_iter = 100, scoring='neg_mean_absolute_error',\n cv = 3, verbose=2, random_state=42, n_jobs=-1)\n\n\n\n\n# 执行寻找操作\n# rf_random.fit(train_features, train_labels)\n# print(rf_random.best_params_)\nbest_params = {'n_estimators': 1800, 'min_samples_split': 10, 'min_samples_leaf': 4, 'max_features': 'auto', 'max_depth': None, 'bootstrap': True}\n\n\ndef evaluate(model, test_features, test_labels): #评估\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n\n print('平均气温误差.',np.mean(errors))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n\n\n#################使用默认参数##########################\n# 平均气温误差. 3.91697080292\n# Accuracy = 93.36%.\nbase_model = RandomForestRegressor( random_state = 42) #使用默认的参数\nbase_model.fit(train_features, train_labels)\nprint('默认参数')\nevaluate(base_model, test_features, test_labels)\n#################使用默认参数##########################\n\n\n#################使用最好参数##########################\n# 平均气温误差. 3.7141472957\n# Accuracy = 93.73%.\nbest_random = RandomForestRegressor(n_estimators=1800,min_samples_split=10,random_state = 42,min_samples_leaf=4,max_features='auto',max_depth=None,bootstrap=True)\nbest_random.fit(train_features, train_labels)\nprint('局部最好')\nevaluate(best_random, test_features, test_labels)\n#################使用最好参数##########################\n\n################在随机最好的参数进行微调######################\n# 平均气温误差. 3.69222090145\n# Accuracy = 93.77%.\nfrom sklearn.model_selection import GridSearchCV# 地毯式搜索\n\nparam_grid = {'n_estimators': [1000, 1200, 1400, 1600],\n 'min_samples_split': [3, 5, 7],\n 'min_samples_leaf': [2,3, 4, 5,6],\n 'max_features': ['auto'],\n 'max_depth': [None],\n 'bootstrap': [True]}\n\n\n\nrf = RandomForestRegressor()\n\n# 网络搜索\ngrid_search = GridSearchCV(estimator = rf, param_grid = param_grid,\n scoring = 'neg_mean_absolute_error', cv = 3,\n n_jobs = -1, verbose = 2)\ngrid_search.fit(train_features, train_labels)\nbest_grid = grid_search.best_estimator_\nevaluate(best_grid, test_features, test_labels)\n################在随机最好的参数进行微调######################\n\n\n########创建随机森林模型###################", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> class FundOperationCreateView(CreateView): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def form_valid(self, form): context = self.get_context_data() lines = context['lines'] with transaction.atomic(): form.instance.create_user = self.request.user self.object = form.save() if lines.is_valid(): lines.instance = self.object lines.save() return super().form_valid(form) <|reserved_special_token_0|> class FundOperationUpdateView(UpdateView): model = FundOperation template_name = 'forms/fund_operation/update.html' form_class = FundOperationForm success_url = None def _get_initial_data(self): if self.object.lines.all(): return None initial = [{'body': 'प्रदेश सरकार'}, {'body': 'संघीय सरकार'}, { 'body': 'स्थानीय तह'}, {'body': 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी'}, {'body': 'अन्तरराष्ट्रिय गैर सरकारी संस्था'}, {'body': 'गैरसरकारी संस्था'}] return initial def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) initial = self._get_initial_data() if self.request.POST: data['lines'] = FundOperationFormSet(self.request.POST, instance=self.object, initial=initial) else: data['lines'] = FundOperationFormSet(instance=self.object, initial=initial) data['lines'].extra = len(initial) if initial else 1 return data def form_valid(self, form): context = self.get_context_data() lines = context['lines'] with transaction.atomic(): form.instance.create_user = self.request.user self.object = form.save() if lines.is_valid(): lines.instance = self.object lines.save() else: return self.form_invalid(form, lines) return super().form_valid(form) def form_invalid(self, form, lines=None): return self.render_to_response(self.get_context_data(form=form, lines=lines)) def get_success_url(self): return reverse_lazy('fund_operation:fund_operation_update', kwargs= {'pk': self.object.pk}) <|reserved_special_token_1|> <|reserved_special_token_0|> class FundOperationCreateView(CreateView): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) if self.request.POST: data['lines'] = FundOperationFormSet(self.request.POST) else: data['lines'] = FundOperationFormSet() return data def form_valid(self, form): context = self.get_context_data() lines = context['lines'] with transaction.atomic(): form.instance.create_user = self.request.user self.object = form.save() if lines.is_valid(): lines.instance = self.object lines.save() return super().form_valid(form) <|reserved_special_token_0|> class FundOperationUpdateView(UpdateView): model = FundOperation template_name = 'forms/fund_operation/update.html' form_class = FundOperationForm success_url = None def _get_initial_data(self): if self.object.lines.all(): return None initial = [{'body': 'प्रदेश सरकार'}, {'body': 'संघीय सरकार'}, { 'body': 'स्थानीय तह'}, {'body': 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी'}, {'body': 'अन्तरराष्ट्रिय गैर सरकारी संस्था'}, {'body': 'गैरसरकारी संस्था'}] return initial def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) initial = self._get_initial_data() if self.request.POST: data['lines'] = FundOperationFormSet(self.request.POST, instance=self.object, initial=initial) else: data['lines'] = FundOperationFormSet(instance=self.object, initial=initial) data['lines'].extra = len(initial) if initial else 1 return data def form_valid(self, form): context = self.get_context_data() lines = context['lines'] with transaction.atomic(): form.instance.create_user = self.request.user self.object = form.save() if lines.is_valid(): lines.instance = self.object lines.save() else: return self.form_invalid(form, lines) return super().form_valid(form) def form_invalid(self, form, lines=None): return self.render_to_response(self.get_context_data(form=form, lines=lines)) def get_success_url(self): return reverse_lazy('fund_operation:fund_operation_update', kwargs= {'pk': self.object.pk}) <|reserved_special_token_1|> <|reserved_special_token_0|> class FundOperationCreateView(CreateView): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) if self.request.POST: data['lines'] = FundOperationFormSet(self.request.POST) else: data['lines'] = FundOperationFormSet() return data def form_valid(self, form): context = self.get_context_data() lines = context['lines'] with transaction.atomic(): form.instance.create_user = self.request.user self.object = form.save() if lines.is_valid(): lines.instance = self.object lines.save() return super().form_valid(form) def get_success_url(self): return reverse_lazy('fund_operation:fund_operation_create') class FundOperationUpdateView(UpdateView): model = FundOperation template_name = 'forms/fund_operation/update.html' form_class = FundOperationForm success_url = None def _get_initial_data(self): if self.object.lines.all(): return None initial = [{'body': 'प्रदेश सरकार'}, {'body': 'संघीय सरकार'}, { 'body': 'स्थानीय तह'}, {'body': 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी'}, {'body': 'अन्तरराष्ट्रिय गैर सरकारी संस्था'}, {'body': 'गैरसरकारी संस्था'}] return initial def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) initial = self._get_initial_data() if self.request.POST: data['lines'] = FundOperationFormSet(self.request.POST, instance=self.object, initial=initial) else: data['lines'] = FundOperationFormSet(instance=self.object, initial=initial) data['lines'].extra = len(initial) if initial else 1 return data def form_valid(self, form): context = self.get_context_data() lines = context['lines'] with transaction.atomic(): form.instance.create_user = self.request.user self.object = form.save() if lines.is_valid(): lines.instance = self.object lines.save() else: return self.form_invalid(form, lines) return super().form_valid(form) def form_invalid(self, form, lines=None): return self.render_to_response(self.get_context_data(form=form, lines=lines)) def get_success_url(self): return reverse_lazy('fund_operation:fund_operation_update', kwargs= {'pk': self.object.pk}) <|reserved_special_token_1|> <|reserved_special_token_0|> class FundOperationCreateView(CreateView): model = FundOperation template_name = 'forms/fund_operation/create.html' form_class = FundOperationForm success_url = None def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) if self.request.POST: data['lines'] = FundOperationFormSet(self.request.POST) else: data['lines'] = FundOperationFormSet() return data def form_valid(self, form): context = self.get_context_data() lines = context['lines'] with transaction.atomic(): form.instance.create_user = self.request.user self.object = form.save() if lines.is_valid(): lines.instance = self.object lines.save() return super().form_valid(form) def get_success_url(self): return reverse_lazy('fund_operation:fund_operation_create') class FundOperationUpdateView(UpdateView): model = FundOperation template_name = 'forms/fund_operation/update.html' form_class = FundOperationForm success_url = None def _get_initial_data(self): if self.object.lines.all(): return None initial = [{'body': 'प्रदेश सरकार'}, {'body': 'संघीय सरकार'}, { 'body': 'स्थानीय तह'}, {'body': 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी'}, {'body': 'अन्तरराष्ट्रिय गैर सरकारी संस्था'}, {'body': 'गैरसरकारी संस्था'}] return initial def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) initial = self._get_initial_data() if self.request.POST: data['lines'] = FundOperationFormSet(self.request.POST, instance=self.object, initial=initial) else: data['lines'] = FundOperationFormSet(instance=self.object, initial=initial) data['lines'].extra = len(initial) if initial else 1 return data def form_valid(self, form): context = self.get_context_data() lines = context['lines'] with transaction.atomic(): form.instance.create_user = self.request.user self.object = form.save() if lines.is_valid(): lines.instance = self.object lines.save() else: return self.form_invalid(form, lines) return super().form_valid(form) def form_invalid(self, form, lines=None): return self.render_to_response(self.get_context_data(form=form, lines=lines)) def get_success_url(self): return reverse_lazy('fund_operation:fund_operation_update', kwargs= {'pk': self.object.pk}) <|reserved_special_token_1|> from django.db import transaction from django.forms import inlineformset_factory from django.shortcuts import render from django.urls import reverse_lazy from django.views.generic import CreateView, UpdateView from forms.models.fund_operation import FundOperation from forms.forms.fund_operation_forms import FundOperationForm, FundOperationLineForm, FundOperationFormSet class FundOperationCreateView(CreateView): model = FundOperation template_name = "forms/fund_operation/create.html" form_class = FundOperationForm success_url = None def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) if self.request.POST: data['lines'] = FundOperationFormSet(self.request.POST) else: data['lines'] = FundOperationFormSet() return data def form_valid(self, form): context = self.get_context_data() lines = context['lines'] with transaction.atomic(): form.instance.create_user = self.request.user self.object = form.save() if lines.is_valid(): lines.instance = self.object lines.save() return super().form_valid(form) def get_success_url(self): return reverse_lazy('fund_operation:fund_operation_create') class FundOperationUpdateView(UpdateView): model =FundOperation template_name = "forms/fund_operation/update.html" form_class = FundOperationForm success_url = None def _get_initial_data(self): if self.object.lines.all(): return None initial = [ { 'body': 'प्रदेश सरकार', }, { 'body': 'संघीय सरकार', }, { 'body': 'स्थानीय तह', }, { 'body': 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी', }, { 'body': 'अन्तरराष्ट्रिय गैर सरकारी संस्था', }, { 'body': 'गैरसरकारी संस्था', }, ] return initial def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) initial = self._get_initial_data() if self.request.POST: data['lines'] = FundOperationFormSet( self.request.POST, instance=self.object, initial=initial ) else: data['lines'] = FundOperationFormSet( instance=self.object, initial=initial ) data['lines'].extra = len(initial) if initial else 1 return data def form_valid(self, form): context = self.get_context_data() lines = context['lines'] with transaction.atomic(): form.instance.create_user = self.request.user self.object = form.save() if lines.is_valid(): lines.instance = self.object lines.save() else: return self.form_invalid(form, lines) return super().form_valid(form) def form_invalid(self, form, lines=None): return self.render_to_response(self.get_context_data(form=form, lines=lines)) def get_success_url(self): return reverse_lazy('fund_operation:fund_operation_update', kwargs={'pk': self.object.pk})
flexible
{ "blob_id": "3c2fb3d09edab92da08ac8850f650a2fa22fad92", "index": 8806, "step-1": "<mask token>\n\n\nclass FundOperationCreateView(CreateView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n return super().form_valid(form)\n <mask token>\n\n\nclass FundOperationUpdateView(UpdateView):\n model = FundOperation\n template_name = 'forms/fund_operation/update.html'\n form_class = FundOperationForm\n success_url = None\n\n def _get_initial_data(self):\n if self.object.lines.all():\n return None\n initial = [{'body': 'प्रदेश सरकार'}, {'body': 'संघीय सरकार'}, {\n 'body': 'स्थानीय तह'}, {'body':\n 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी'}, {'body':\n 'अन्तरराष्ट्रिय गैर सरकारी संस्था'}, {'body': 'गैरसरकारी संस्था'}]\n return initial\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n initial = self._get_initial_data()\n if self.request.POST:\n data['lines'] = FundOperationFormSet(self.request.POST,\n instance=self.object, initial=initial)\n else:\n data['lines'] = FundOperationFormSet(instance=self.object,\n initial=initial)\n data['lines'].extra = len(initial) if initial else 1\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n else:\n return self.form_invalid(form, lines)\n return super().form_valid(form)\n\n def form_invalid(self, form, lines=None):\n return self.render_to_response(self.get_context_data(form=form,\n lines=lines))\n\n def get_success_url(self):\n return reverse_lazy('fund_operation:fund_operation_update', kwargs=\n {'pk': self.object.pk})\n", "step-2": "<mask token>\n\n\nclass FundOperationCreateView(CreateView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n if self.request.POST:\n data['lines'] = FundOperationFormSet(self.request.POST)\n else:\n data['lines'] = FundOperationFormSet()\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n return super().form_valid(form)\n <mask token>\n\n\nclass FundOperationUpdateView(UpdateView):\n model = FundOperation\n template_name = 'forms/fund_operation/update.html'\n form_class = FundOperationForm\n success_url = None\n\n def _get_initial_data(self):\n if self.object.lines.all():\n return None\n initial = [{'body': 'प्रदेश सरकार'}, {'body': 'संघीय सरकार'}, {\n 'body': 'स्थानीय तह'}, {'body':\n 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी'}, {'body':\n 'अन्तरराष्ट्रिय गैर सरकारी संस्था'}, {'body': 'गैरसरकारी संस्था'}]\n return initial\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n initial = self._get_initial_data()\n if self.request.POST:\n data['lines'] = FundOperationFormSet(self.request.POST,\n instance=self.object, initial=initial)\n else:\n data['lines'] = FundOperationFormSet(instance=self.object,\n initial=initial)\n data['lines'].extra = len(initial) if initial else 1\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n else:\n return self.form_invalid(form, lines)\n return super().form_valid(form)\n\n def form_invalid(self, form, lines=None):\n return self.render_to_response(self.get_context_data(form=form,\n lines=lines))\n\n def get_success_url(self):\n return reverse_lazy('fund_operation:fund_operation_update', kwargs=\n {'pk': self.object.pk})\n", "step-3": "<mask token>\n\n\nclass FundOperationCreateView(CreateView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n if self.request.POST:\n data['lines'] = FundOperationFormSet(self.request.POST)\n else:\n data['lines'] = FundOperationFormSet()\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse_lazy('fund_operation:fund_operation_create')\n\n\nclass FundOperationUpdateView(UpdateView):\n model = FundOperation\n template_name = 'forms/fund_operation/update.html'\n form_class = FundOperationForm\n success_url = None\n\n def _get_initial_data(self):\n if self.object.lines.all():\n return None\n initial = [{'body': 'प्रदेश सरकार'}, {'body': 'संघीय सरकार'}, {\n 'body': 'स्थानीय तह'}, {'body':\n 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी'}, {'body':\n 'अन्तरराष्ट्रिय गैर सरकारी संस्था'}, {'body': 'गैरसरकारी संस्था'}]\n return initial\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n initial = self._get_initial_data()\n if self.request.POST:\n data['lines'] = FundOperationFormSet(self.request.POST,\n instance=self.object, initial=initial)\n else:\n data['lines'] = FundOperationFormSet(instance=self.object,\n initial=initial)\n data['lines'].extra = len(initial) if initial else 1\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n else:\n return self.form_invalid(form, lines)\n return super().form_valid(form)\n\n def form_invalid(self, form, lines=None):\n return self.render_to_response(self.get_context_data(form=form,\n lines=lines))\n\n def get_success_url(self):\n return reverse_lazy('fund_operation:fund_operation_update', kwargs=\n {'pk': self.object.pk})\n", "step-4": "<mask token>\n\n\nclass FundOperationCreateView(CreateView):\n model = FundOperation\n template_name = 'forms/fund_operation/create.html'\n form_class = FundOperationForm\n success_url = None\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n if self.request.POST:\n data['lines'] = FundOperationFormSet(self.request.POST)\n else:\n data['lines'] = FundOperationFormSet()\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse_lazy('fund_operation:fund_operation_create')\n\n\nclass FundOperationUpdateView(UpdateView):\n model = FundOperation\n template_name = 'forms/fund_operation/update.html'\n form_class = FundOperationForm\n success_url = None\n\n def _get_initial_data(self):\n if self.object.lines.all():\n return None\n initial = [{'body': 'प्रदेश सरकार'}, {'body': 'संघीय सरकार'}, {\n 'body': 'स्थानीय तह'}, {'body':\n 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी'}, {'body':\n 'अन्तरराष्ट्रिय गैर सरकारी संस्था'}, {'body': 'गैरसरकारी संस्था'}]\n return initial\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n initial = self._get_initial_data()\n if self.request.POST:\n data['lines'] = FundOperationFormSet(self.request.POST,\n instance=self.object, initial=initial)\n else:\n data['lines'] = FundOperationFormSet(instance=self.object,\n initial=initial)\n data['lines'].extra = len(initial) if initial else 1\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n else:\n return self.form_invalid(form, lines)\n return super().form_valid(form)\n\n def form_invalid(self, form, lines=None):\n return self.render_to_response(self.get_context_data(form=form,\n lines=lines))\n\n def get_success_url(self):\n return reverse_lazy('fund_operation:fund_operation_update', kwargs=\n {'pk': self.object.pk})\n", "step-5": "from django.db import transaction\nfrom django.forms import inlineformset_factory\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.generic import CreateView, UpdateView\nfrom forms.models.fund_operation import FundOperation\nfrom forms.forms.fund_operation_forms import FundOperationForm, FundOperationLineForm, FundOperationFormSet\n\n\nclass FundOperationCreateView(CreateView):\n model = FundOperation\n template_name = \"forms/fund_operation/create.html\"\n form_class = FundOperationForm\n success_url = None\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n if self.request.POST:\n data['lines'] = FundOperationFormSet(self.request.POST)\n else:\n data['lines'] = FundOperationFormSet()\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse_lazy('fund_operation:fund_operation_create')\n\n\nclass FundOperationUpdateView(UpdateView):\n model =FundOperation\n template_name = \"forms/fund_operation/update.html\"\n form_class = FundOperationForm\n success_url = None\n\n def _get_initial_data(self):\n if self.object.lines.all():\n return None\n\n initial = [\n {\n 'body': 'प्रदेश सरकार',\n },\n {\n 'body': 'संघीय सरकार',\n },\n {\n 'body': 'स्थानीय तह',\n },\n {\n 'body': 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी',\n },\n {\n 'body': 'अन्तरराष्ट्रिय गैर सरकारी संस्था',\n },\n {\n 'body': 'गैरसरकारी संस्था',\n },\n ]\n return initial\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n\n initial = self._get_initial_data()\n if self.request.POST:\n data['lines'] = FundOperationFormSet(\n self.request.POST,\n instance=self.object,\n initial=initial\n )\n else:\n data['lines'] = FundOperationFormSet(\n instance=self.object,\n initial=initial\n )\n data['lines'].extra = len(initial) if initial else 1\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n lines = context['lines']\n with transaction.atomic():\n form.instance.create_user = self.request.user\n self.object = form.save()\n if lines.is_valid():\n lines.instance = self.object\n lines.save()\n else:\n return self.form_invalid(form, lines)\n\n return super().form_valid(form)\n\n def form_invalid(self, form, lines=None):\n return self.render_to_response(self.get_context_data(form=form, lines=lines))\n\n def get_success_url(self):\n return reverse_lazy('fund_operation:fund_operation_update', kwargs={'pk': self.object.pk})\n", "step-ids": [ 9, 10, 11, 12, 14 ] }
[ 9, 10, 11, 12, 14 ]
class MyClass: name = "alice" def set_name(self, name): self.name = name def get_name(self): return self.name def say_hello(self): self.greet = "Hello" def say_hi(self): print("HI~~~~~") p1 = MyClass() p2 = MyClass() print(p1.name) p1.set_name("bob") print(p1.name) print(p2.name) # 인스턴스 멤버를 적용한후에 그 인스턴스 멤버에 접근 할 수 있다 p1.say_hello() print(p1.greet) #클래스 메서드를 클래스. 으로 호출 했기 떄문에 self 파라미터를 하나 넘겨 줘야 한다 MyClass.say_hi("gg")
normal
{ "blob_id": "babb5ac680c74e19db5c86c2c3323e8285d169ff", "index": 9939, "step-1": "class MyClass:\n <mask token>\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def say_hello(self):\n self.greet = 'Hello'\n\n def say_hi(self):\n print('HI~~~~~')\n\n\n<mask token>\n", "step-2": "class MyClass:\n name = 'alice'\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def say_hello(self):\n self.greet = 'Hello'\n\n def say_hi(self):\n print('HI~~~~~')\n\n\n<mask token>\n", "step-3": "class MyClass:\n name = 'alice'\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def say_hello(self):\n self.greet = 'Hello'\n\n def say_hi(self):\n print('HI~~~~~')\n\n\n<mask token>\nprint(p1.name)\np1.set_name('bob')\nprint(p1.name)\nprint(p2.name)\np1.say_hello()\nprint(p1.greet)\nMyClass.say_hi('gg')\n", "step-4": "class MyClass:\n name = 'alice'\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def say_hello(self):\n self.greet = 'Hello'\n\n def say_hi(self):\n print('HI~~~~~')\n\n\np1 = MyClass()\np2 = MyClass()\nprint(p1.name)\np1.set_name('bob')\nprint(p1.name)\nprint(p2.name)\np1.say_hello()\nprint(p1.greet)\nMyClass.say_hi('gg')\n", "step-5": "class MyClass:\n name = \"alice\"\n \n def set_name(self, name):\n self.name = name\n \n def get_name(self):\n return self.name\n \n def say_hello(self):\n self.greet = \"Hello\"\n \n def say_hi(self):\n print(\"HI~~~~~\")\n \n\n\np1 = MyClass()\np2 = MyClass()\n\nprint(p1.name)\np1.set_name(\"bob\")\nprint(p1.name)\n\nprint(p2.name)\n\n# 인스턴스 멤버를 적용한후에 그 인스턴스 멤버에 접근 할 수 있다\np1.say_hello()\nprint(p1.greet)\n\n#클래스 메서드를 클래스. 으로 호출 했기 떄문에 self 파라미터를 하나 넘겨 줘야 한다 \nMyClass.say_hi(\"gg\")\n\n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> LOGIN_USERNAME = 'YOUR_USERNAME' LOGIN_PASSWORD = 'YOUR_PASSWORD'
flexible
{ "blob_id": "5a092150896e4082431849828793f86adcd2211c", "index": 8202, "step-1": "<mask token>\n", "step-2": "LOGIN_USERNAME = 'YOUR_USERNAME'\nLOGIN_PASSWORD = 'YOUR_PASSWORD'\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
# -*- coding: utf-8 -*- import datetime from unittest.mock import patch from odoo.tests import common import odoo from .common import RunbotCase class TestSchedule(RunbotCase): def setUp(self): # entering test mode to avoid that the _schedule method commits records registry = odoo.registry() super(TestSchedule, self).setUp() self.repo = self.Repo.create({'name': '[email protected]:foo/bar'}) self.branch = self.Branch.create({ 'repo_id': self.repo.id, 'name': 'refs/heads/master' }) @patch('odoo.addons.runbot.models.build.os.path.getmtime') @patch('odoo.addons.runbot.models.build.docker_state') def test_schedule_mark_done(self, mock_docker_state, mock_getmtime): """ Test that results are set even when job_30_run is skipped """ job_end_time = datetime.datetime.now() mock_getmtime.return_value = job_end_time.timestamp() build = self.Build.create({ 'local_state': 'testing', 'branch_id': self.branch.id, 'name': 'd0d0caca0000ffffffffffffffffffffffffffff', 'port': '1234', 'host': 'runbotxx', 'job_start': datetime.datetime.now(), 'config_id': self.env.ref('runbot.runbot_build_config_default').id, 'active_step': self.env.ref('runbot.runbot_build_config_step_run').id, }) domain = [('repo_id', 'in', (self.repo.id, ))] domain_host = domain + [('host', '=', 'runbotxx')] build_ids = self.Build.search(domain_host + [('local_state', 'in', ['testing', 'running'])]) mock_docker_state.return_value = 'UNKNOWN' self.assertEqual(build.local_state, 'testing') build_ids._schedule() # too fast, docker not started self.assertEqual(build.local_state, 'testing') build_ids.write({'job_start': datetime.datetime.now() - datetime.timedelta(seconds=70)}) # docker never started build_ids._schedule() self.assertEqual(build.local_state, 'done') self.assertEqual(build.local_result, 'ok')
normal
{ "blob_id": "aa515b1b919eb557cd8c7e5f4d22773980b5af96", "index": 8213, "step-1": "<mask token>\n\n\nclass TestSchedule(RunbotCase):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass TestSchedule(RunbotCase):\n <mask token>\n\n @patch('odoo.addons.runbot.models.build.os.path.getmtime')\n @patch('odoo.addons.runbot.models.build.docker_state')\n def test_schedule_mark_done(self, mock_docker_state, mock_getmtime):\n \"\"\" Test that results are set even when job_30_run is skipped \"\"\"\n job_end_time = datetime.datetime.now()\n mock_getmtime.return_value = job_end_time.timestamp()\n build = self.Build.create({'local_state': 'testing', 'branch_id':\n self.branch.id, 'name':\n 'd0d0caca0000ffffffffffffffffffffffffffff', 'port': '1234',\n 'host': 'runbotxx', 'job_start': datetime.datetime.now(),\n 'config_id': self.env.ref('runbot.runbot_build_config_default')\n .id, 'active_step': self.env.ref(\n 'runbot.runbot_build_config_step_run').id})\n domain = [('repo_id', 'in', (self.repo.id,))]\n domain_host = domain + [('host', '=', 'runbotxx')]\n build_ids = self.Build.search(domain_host + [('local_state', 'in',\n ['testing', 'running'])])\n mock_docker_state.return_value = 'UNKNOWN'\n self.assertEqual(build.local_state, 'testing')\n build_ids._schedule()\n self.assertEqual(build.local_state, 'testing')\n build_ids.write({'job_start': datetime.datetime.now() - datetime.\n timedelta(seconds=70)})\n build_ids._schedule()\n self.assertEqual(build.local_state, 'done')\n self.assertEqual(build.local_result, 'ok')\n", "step-3": "<mask token>\n\n\nclass TestSchedule(RunbotCase):\n\n def setUp(self):\n registry = odoo.registry()\n super(TestSchedule, self).setUp()\n self.repo = self.Repo.create({'name': '[email protected]:foo/bar'})\n self.branch = self.Branch.create({'repo_id': self.repo.id, 'name':\n 'refs/heads/master'})\n\n @patch('odoo.addons.runbot.models.build.os.path.getmtime')\n @patch('odoo.addons.runbot.models.build.docker_state')\n def test_schedule_mark_done(self, mock_docker_state, mock_getmtime):\n \"\"\" Test that results are set even when job_30_run is skipped \"\"\"\n job_end_time = datetime.datetime.now()\n mock_getmtime.return_value = job_end_time.timestamp()\n build = self.Build.create({'local_state': 'testing', 'branch_id':\n self.branch.id, 'name':\n 'd0d0caca0000ffffffffffffffffffffffffffff', 'port': '1234',\n 'host': 'runbotxx', 'job_start': datetime.datetime.now(),\n 'config_id': self.env.ref('runbot.runbot_build_config_default')\n .id, 'active_step': self.env.ref(\n 'runbot.runbot_build_config_step_run').id})\n domain = [('repo_id', 'in', (self.repo.id,))]\n domain_host = domain + [('host', '=', 'runbotxx')]\n build_ids = self.Build.search(domain_host + [('local_state', 'in',\n ['testing', 'running'])])\n mock_docker_state.return_value = 'UNKNOWN'\n self.assertEqual(build.local_state, 'testing')\n build_ids._schedule()\n self.assertEqual(build.local_state, 'testing')\n build_ids.write({'job_start': datetime.datetime.now() - datetime.\n timedelta(seconds=70)})\n build_ids._schedule()\n self.assertEqual(build.local_state, 'done')\n self.assertEqual(build.local_result, 'ok')\n", "step-4": "import datetime\nfrom unittest.mock import patch\nfrom odoo.tests import common\nimport odoo\nfrom .common import RunbotCase\n\n\nclass TestSchedule(RunbotCase):\n\n def setUp(self):\n registry = odoo.registry()\n super(TestSchedule, self).setUp()\n self.repo = self.Repo.create({'name': '[email protected]:foo/bar'})\n self.branch = self.Branch.create({'repo_id': self.repo.id, 'name':\n 'refs/heads/master'})\n\n @patch('odoo.addons.runbot.models.build.os.path.getmtime')\n @patch('odoo.addons.runbot.models.build.docker_state')\n def test_schedule_mark_done(self, mock_docker_state, mock_getmtime):\n \"\"\" Test that results are set even when job_30_run is skipped \"\"\"\n job_end_time = datetime.datetime.now()\n mock_getmtime.return_value = job_end_time.timestamp()\n build = self.Build.create({'local_state': 'testing', 'branch_id':\n self.branch.id, 'name':\n 'd0d0caca0000ffffffffffffffffffffffffffff', 'port': '1234',\n 'host': 'runbotxx', 'job_start': datetime.datetime.now(),\n 'config_id': self.env.ref('runbot.runbot_build_config_default')\n .id, 'active_step': self.env.ref(\n 'runbot.runbot_build_config_step_run').id})\n domain = [('repo_id', 'in', (self.repo.id,))]\n domain_host = domain + [('host', '=', 'runbotxx')]\n build_ids = self.Build.search(domain_host + [('local_state', 'in',\n ['testing', 'running'])])\n mock_docker_state.return_value = 'UNKNOWN'\n self.assertEqual(build.local_state, 'testing')\n build_ids._schedule()\n self.assertEqual(build.local_state, 'testing')\n build_ids.write({'job_start': datetime.datetime.now() - datetime.\n timedelta(seconds=70)})\n build_ids._schedule()\n self.assertEqual(build.local_state, 'done')\n self.assertEqual(build.local_result, 'ok')\n", "step-5": "# -*- coding: utf-8 -*-\nimport datetime\nfrom unittest.mock import patch\nfrom odoo.tests import common\nimport odoo\nfrom .common import RunbotCase\n\n\nclass TestSchedule(RunbotCase):\n\n def setUp(self):\n # entering test mode to avoid that the _schedule method commits records\n registry = odoo.registry()\n super(TestSchedule, self).setUp()\n\n self.repo = self.Repo.create({'name': '[email protected]:foo/bar'})\n self.branch = self.Branch.create({\n 'repo_id': self.repo.id,\n 'name': 'refs/heads/master'\n })\n\n @patch('odoo.addons.runbot.models.build.os.path.getmtime')\n @patch('odoo.addons.runbot.models.build.docker_state')\n def test_schedule_mark_done(self, mock_docker_state, mock_getmtime):\n \"\"\" Test that results are set even when job_30_run is skipped \"\"\"\n job_end_time = datetime.datetime.now()\n mock_getmtime.return_value = job_end_time.timestamp()\n\n build = self.Build.create({\n 'local_state': 'testing',\n 'branch_id': self.branch.id,\n 'name': 'd0d0caca0000ffffffffffffffffffffffffffff',\n 'port': '1234',\n 'host': 'runbotxx',\n 'job_start': datetime.datetime.now(),\n 'config_id': self.env.ref('runbot.runbot_build_config_default').id,\n 'active_step': self.env.ref('runbot.runbot_build_config_step_run').id,\n })\n domain = [('repo_id', 'in', (self.repo.id, ))]\n domain_host = domain + [('host', '=', 'runbotxx')]\n build_ids = self.Build.search(domain_host + [('local_state', 'in', ['testing', 'running'])])\n mock_docker_state.return_value = 'UNKNOWN'\n self.assertEqual(build.local_state, 'testing')\n build_ids._schedule() # too fast, docker not started\n self.assertEqual(build.local_state, 'testing')\n\n build_ids.write({'job_start': datetime.datetime.now() - datetime.timedelta(seconds=70)}) # docker never started\n build_ids._schedule()\n self.assertEqual(build.local_state, 'done')\n self.assertEqual(build.local_result, 'ok')\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import subprocess import glob import os import time import sys import xml.etree.ElementTree as ET import getpass import psutil if len(sys.argv)==1: photoscanname = r"C:\Program Files\Agisoft\PhotoScan Pro\photoscan.exe" scriptname = r"C:\Users\slocumr\github\SimUAS\batchphotoscan\agiproc.py" #xmlnames = r"P:\Slocum\USVI_project\01_DATA\20180319_USVI_UAS_BATHY\02_PROCDATA\06_PROCIMAGES\*\06_QUICKPROC\*2.xml" xmlnames = r"C:\Users\slocumr\github\SimUAS\data\testagiproc\06_QUICKPROC\*.xml" nprocesses = 1 else: photoscanname = sys.argv[1] scriptname = sys.argv[2] xmlnames = sys.argv[3] nprocesses = 1 SLEEPTIME = 10 DODEBUG = True # get xmlfiles xmlfiles = glob.glob(xmlnames) nfiles = len(xmlfiles) # empty lists processes = [] procname = [] procind = [] logname = [] currentloghandles = [] currentind = [] proclog = open("simUASagiproc_log.log",'at') try: # detect already processed or processing folders nexist = 0 for i,fname in enumerate(xmlfiles): rootdir,f = os.path.split(fname) rootoutput = ET.parse(fname).getroot().find('export').get('rootname') logname.append( rootdir + "/" + rootoutput + "/autoproc.log" ) procind.append(i) if os.path.exists(rootdir + "/" + rootoutput + "/autoproc.log"): nexist = nexist+1 print('{:3d}/{:3d} ALREADY EXIST'.format(nexist,nfiles)) proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist,nfiles) + '\n') for fname,i,logfile in zip(xmlfiles,procind,logname): i = i+1 if not os.path.exists(logfile): currentind.append(i) print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : START : " + '{:3d}/{:3d}'.format(i,nfiles) + " : " + fname) proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : START : " + '{:3d}/{:3d}'.format(i,nfiles) + " : " + fname + '\n') foldername,foo = os.path.split(logfile) if not os.path.exists(foldername): os.makedirs(foldername) iloghandle = open(logfile,'wt') iloghandle.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + "\n") iloghandle.write(getpass.getuser() + "\n") iloghandle.flush() currentloghandles.append(iloghandle) processes.append(subprocess.Popen([photoscanname,"-r",scriptname,fname],stdin=iloghandle, stdout=iloghandle, stderr=iloghandle)) procname.append(fname) while len(processes)>=nprocesses: time.sleep(SLEEPTIME) if DODEBUG: cpu_percent = psutil.cpu_percent() ram_percent = psutil.virtual_memory().percent print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent)) proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent) + '\n') for p, ind, name, log in zip(processes, currentind, procname, currentloghandles): if p.poll() is not None: print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : DONE : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + fname) proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : DONE : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + fname + '\n') iloghandle.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + "\n") iloghandle.flush() iloghandle.close() procname[:] = [n for n,p in zip(procname,processes) if p.poll() is None] currentind[:] = [ind for ind,p in zip(currentind,processes) if p.poll() is None] currentloghandles[:] = [log for log,p in zip(currentloghandles,processes) if p.poll() is None] processes[:] = [p for p in processes if p.poll() is None] # Wait for everything to finish while len(processes)>0: time.sleep(SLEEPTIME) if DODEBUG: cpu_percent = psutil.cpu_percent() ram_percent = psutil.virtual_memory().percent print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent)) proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent) + '\n') for p, ind, name, log in zip(processes, currentind, procname, currentloghandles): if p.poll() is not None: print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : DONE : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + fname) proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : DONE : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + fname + '\n') iloghandle= log iloghandle.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + "\n") iloghandle.flush() iloghandle.close() procname[:] = [n for n,p in zip(procname,processes) if p.poll() is None] currentind[:] = [ind for ind,p in zip(currentind,processes) if p.poll() is None] currentloghandles[:] = [log for log,p in zip(currentloghandles,processes) if p.poll() is None] processes[:] = [p for p in processes if p.poll() is None] except KeyboardInterrupt: for p, ind, name, iloghandle in zip(processes, currentind, procname, currentloghandles): print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : KILL : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + name) proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : KILL : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + name + '\n') p.kill() iloghandle.flush() iloghandle.close() time.sleep(0.1) os.remove(logname[ind-1]) proclog.flush() proclog.close() print("Done")
normal
{ "blob_id": "00f95733505b3e853a76bbdd65439bcb230fa262", "index": 3345, "step-1": "<mask token>\n", "step-2": "<mask token>\nif len(sys.argv) == 1:\n photoscanname = 'C:\\\\Program Files\\\\Agisoft\\\\PhotoScan Pro\\\\photoscan.exe'\n scriptname = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\batchphotoscan\\\\agiproc.py')\n xmlnames = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\data\\\\testagiproc\\\\06_QUICKPROC\\\\*.xml'\n )\n nprocesses = 1\nelse:\n photoscanname = sys.argv[1]\n scriptname = sys.argv[2]\n xmlnames = sys.argv[3]\n nprocesses = 1\n<mask token>\ntry:\n nexist = 0\n for i, fname in enumerate(xmlfiles):\n rootdir, f = os.path.split(fname)\n rootoutput = ET.parse(fname).getroot().find('export').get('rootname')\n logname.append(rootdir + '/' + rootoutput + '/autoproc.log')\n procind.append(i)\n if os.path.exists(rootdir + '/' + rootoutput + '/autoproc.log'):\n nexist = nexist + 1\n print('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles))\n proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles) + '\\n')\n for fname, i, logfile in zip(xmlfiles, procind, logname):\n i = i + 1\n if not os.path.exists(logfile):\n currentind.append(i)\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' : START : ' + '{:3d}/{:3d}'.format(i, nfiles) +\n ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' : START : ' + '{:3d}/{:3d}'.format(i,\n nfiles) + ' : ' + fname + '\\n')\n foldername, foo = os.path.split(logfile)\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n iloghandle = open(logfile, 'wt')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime\n (time.time())) + '\\n')\n iloghandle.write(getpass.getuser() + '\\n')\n iloghandle.flush()\n currentloghandles.append(iloghandle)\n processes.append(subprocess.Popen([photoscanname, '-r',\n scriptname, fname], stdin=iloghandle, stdout=iloghandle,\n stderr=iloghandle))\n procname.append(fname)\n while len(processes) >= nprocesses:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.\n format(cpu_percent, ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) +\n ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind,\n procname, currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' +\n fname + '\\n')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.\n poll() is None]\n currentind[:] = [ind for ind, p in zip(currentind,\n processes) if p.poll() is None]\n currentloghandles[:] = [log for log, p in zip(\n currentloghandles, processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\n while len(processes) > 0:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(\n cpu_percent, ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind, procname,\n currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : DONE : ' + '{:3d}/{:3d}'.format(ind,\n nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' + '{:3d}/{:3d}'.\n format(ind, nfiles) + ' : ' + fname + '\\n')\n iloghandle = log\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.poll() is\n None]\n currentind[:] = [ind for ind, p in zip(currentind, processes) if p.\n poll() is None]\n currentloghandles[:] = [log for log, p in zip(currentloghandles,\n processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\nexcept KeyboardInterrupt:\n for p, ind, name, iloghandle in zip(processes, currentind, procname,\n currentloghandles):\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time())) +\n ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + name)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) +\n ' : ' + name + '\\n')\n p.kill()\n iloghandle.flush()\n iloghandle.close()\n time.sleep(0.1)\n os.remove(logname[ind - 1])\nproclog.flush()\nproclog.close()\nprint('Done')\n", "step-3": "<mask token>\nif len(sys.argv) == 1:\n photoscanname = 'C:\\\\Program Files\\\\Agisoft\\\\PhotoScan Pro\\\\photoscan.exe'\n scriptname = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\batchphotoscan\\\\agiproc.py')\n xmlnames = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\data\\\\testagiproc\\\\06_QUICKPROC\\\\*.xml'\n )\n nprocesses = 1\nelse:\n photoscanname = sys.argv[1]\n scriptname = sys.argv[2]\n xmlnames = sys.argv[3]\n nprocesses = 1\nSLEEPTIME = 10\nDODEBUG = True\nxmlfiles = glob.glob(xmlnames)\nnfiles = len(xmlfiles)\nprocesses = []\nprocname = []\nprocind = []\nlogname = []\ncurrentloghandles = []\ncurrentind = []\nproclog = open('simUASagiproc_log.log', 'at')\ntry:\n nexist = 0\n for i, fname in enumerate(xmlfiles):\n rootdir, f = os.path.split(fname)\n rootoutput = ET.parse(fname).getroot().find('export').get('rootname')\n logname.append(rootdir + '/' + rootoutput + '/autoproc.log')\n procind.append(i)\n if os.path.exists(rootdir + '/' + rootoutput + '/autoproc.log'):\n nexist = nexist + 1\n print('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles))\n proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles) + '\\n')\n for fname, i, logfile in zip(xmlfiles, procind, logname):\n i = i + 1\n if not os.path.exists(logfile):\n currentind.append(i)\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' : START : ' + '{:3d}/{:3d}'.format(i, nfiles) +\n ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' : START : ' + '{:3d}/{:3d}'.format(i,\n nfiles) + ' : ' + fname + '\\n')\n foldername, foo = os.path.split(logfile)\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n iloghandle = open(logfile, 'wt')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime\n (time.time())) + '\\n')\n iloghandle.write(getpass.getuser() + '\\n')\n iloghandle.flush()\n currentloghandles.append(iloghandle)\n processes.append(subprocess.Popen([photoscanname, '-r',\n scriptname, fname], stdin=iloghandle, stdout=iloghandle,\n stderr=iloghandle))\n procname.append(fname)\n while len(processes) >= nprocesses:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.\n format(cpu_percent, ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) +\n ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind,\n procname, currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' +\n fname + '\\n')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.\n poll() is None]\n currentind[:] = [ind for ind, p in zip(currentind,\n processes) if p.poll() is None]\n currentloghandles[:] = [log for log, p in zip(\n currentloghandles, processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\n while len(processes) > 0:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(\n cpu_percent, ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind, procname,\n currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : DONE : ' + '{:3d}/{:3d}'.format(ind,\n nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' + '{:3d}/{:3d}'.\n format(ind, nfiles) + ' : ' + fname + '\\n')\n iloghandle = log\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.poll() is\n None]\n currentind[:] = [ind for ind, p in zip(currentind, processes) if p.\n poll() is None]\n currentloghandles[:] = [log for log, p in zip(currentloghandles,\n processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\nexcept KeyboardInterrupt:\n for p, ind, name, iloghandle in zip(processes, currentind, procname,\n currentloghandles):\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time())) +\n ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + name)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) +\n ' : ' + name + '\\n')\n p.kill()\n iloghandle.flush()\n iloghandle.close()\n time.sleep(0.1)\n os.remove(logname[ind - 1])\nproclog.flush()\nproclog.close()\nprint('Done')\n", "step-4": "import subprocess\nimport glob\nimport os\nimport time\nimport sys\nimport xml.etree.ElementTree as ET\nimport getpass\nimport psutil\nif len(sys.argv) == 1:\n photoscanname = 'C:\\\\Program Files\\\\Agisoft\\\\PhotoScan Pro\\\\photoscan.exe'\n scriptname = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\batchphotoscan\\\\agiproc.py')\n xmlnames = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\data\\\\testagiproc\\\\06_QUICKPROC\\\\*.xml'\n )\n nprocesses = 1\nelse:\n photoscanname = sys.argv[1]\n scriptname = sys.argv[2]\n xmlnames = sys.argv[3]\n nprocesses = 1\nSLEEPTIME = 10\nDODEBUG = True\nxmlfiles = glob.glob(xmlnames)\nnfiles = len(xmlfiles)\nprocesses = []\nprocname = []\nprocind = []\nlogname = []\ncurrentloghandles = []\ncurrentind = []\nproclog = open('simUASagiproc_log.log', 'at')\ntry:\n nexist = 0\n for i, fname in enumerate(xmlfiles):\n rootdir, f = os.path.split(fname)\n rootoutput = ET.parse(fname).getroot().find('export').get('rootname')\n logname.append(rootdir + '/' + rootoutput + '/autoproc.log')\n procind.append(i)\n if os.path.exists(rootdir + '/' + rootoutput + '/autoproc.log'):\n nexist = nexist + 1\n print('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles))\n proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles) + '\\n')\n for fname, i, logfile in zip(xmlfiles, procind, logname):\n i = i + 1\n if not os.path.exists(logfile):\n currentind.append(i)\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' : START : ' + '{:3d}/{:3d}'.format(i, nfiles) +\n ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' : START : ' + '{:3d}/{:3d}'.format(i,\n nfiles) + ' : ' + fname + '\\n')\n foldername, foo = os.path.split(logfile)\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n iloghandle = open(logfile, 'wt')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime\n (time.time())) + '\\n')\n iloghandle.write(getpass.getuser() + '\\n')\n iloghandle.flush()\n currentloghandles.append(iloghandle)\n processes.append(subprocess.Popen([photoscanname, '-r',\n scriptname, fname], stdin=iloghandle, stdout=iloghandle,\n stderr=iloghandle))\n procname.append(fname)\n while len(processes) >= nprocesses:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.\n format(cpu_percent, ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) +\n ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind,\n procname, currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' +\n fname + '\\n')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.\n poll() is None]\n currentind[:] = [ind for ind, p in zip(currentind,\n processes) if p.poll() is None]\n currentloghandles[:] = [log for log, p in zip(\n currentloghandles, processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\n while len(processes) > 0:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(\n cpu_percent, ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind, procname,\n currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : DONE : ' + '{:3d}/{:3d}'.format(ind,\n nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' + '{:3d}/{:3d}'.\n format(ind, nfiles) + ' : ' + fname + '\\n')\n iloghandle = log\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.poll() is\n None]\n currentind[:] = [ind for ind, p in zip(currentind, processes) if p.\n poll() is None]\n currentloghandles[:] = [log for log, p in zip(currentloghandles,\n processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\nexcept KeyboardInterrupt:\n for p, ind, name, iloghandle in zip(processes, currentind, procname,\n currentloghandles):\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time())) +\n ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + name)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) +\n ' : ' + name + '\\n')\n p.kill()\n iloghandle.flush()\n iloghandle.close()\n time.sleep(0.1)\n os.remove(logname[ind - 1])\nproclog.flush()\nproclog.close()\nprint('Done')\n", "step-5": "import subprocess\nimport glob\nimport os\nimport time\nimport sys\nimport xml.etree.ElementTree as ET\nimport getpass\nimport psutil\n\nif len(sys.argv)==1:\n photoscanname = r\"C:\\Program Files\\Agisoft\\PhotoScan Pro\\photoscan.exe\"\n scriptname = r\"C:\\Users\\slocumr\\github\\SimUAS\\batchphotoscan\\agiproc.py\"\n #xmlnames = r\"P:\\Slocum\\USVI_project\\01_DATA\\20180319_USVI_UAS_BATHY\\02_PROCDATA\\06_PROCIMAGES\\*\\06_QUICKPROC\\*2.xml\"\n xmlnames = r\"C:\\Users\\slocumr\\github\\SimUAS\\data\\testagiproc\\06_QUICKPROC\\*.xml\"\n nprocesses = 1\nelse:\n photoscanname = sys.argv[1]\n scriptname = sys.argv[2]\n xmlnames = sys.argv[3]\n nprocesses = 1\n\nSLEEPTIME = 10\nDODEBUG = True\n\n# get xmlfiles\nxmlfiles = glob.glob(xmlnames)\nnfiles = len(xmlfiles)\n\n# empty lists\nprocesses = []\nprocname = []\nprocind = []\nlogname = []\ncurrentloghandles = []\ncurrentind = []\n\nproclog = open(\"simUASagiproc_log.log\",'at')\ntry:\n # detect already processed or processing folders\n nexist = 0\n for i,fname in enumerate(xmlfiles):\n rootdir,f = os.path.split(fname)\n rootoutput = ET.parse(fname).getroot().find('export').get('rootname')\n logname.append( rootdir + \"/\" + rootoutput + \"/autoproc.log\" )\n procind.append(i)\n if os.path.exists(rootdir + \"/\" + rootoutput + \"/autoproc.log\"):\n nexist = nexist+1\n print('{:3d}/{:3d} ALREADY EXIST'.format(nexist,nfiles))\n proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist,nfiles) + '\\n')\n for fname,i,logfile in zip(xmlfiles,procind,logname):\n i = i+1\n if not os.path.exists(logfile):\n\n currentind.append(i)\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : START : \" + '{:3d}/{:3d}'.format(i,nfiles) + \" : \" + fname)\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : START : \" + '{:3d}/{:3d}'.format(i,nfiles) + \" : \" + fname + '\\n')\n foldername,foo = os.path.split(logfile)\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n iloghandle = open(logfile,'wt')\n iloghandle.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \"\\n\")\n iloghandle.write(getpass.getuser() + \"\\n\")\n iloghandle.flush()\n currentloghandles.append(iloghandle)\n processes.append(subprocess.Popen([photoscanname,\"-r\",scriptname,fname],stdin=iloghandle, stdout=iloghandle, stderr=iloghandle))\n procname.append(fname)\n while len(processes)>=nprocesses:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent))\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind, procname, currentloghandles):\n if p.poll() is not None:\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : DONE : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + fname)\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : DONE : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + fname + '\\n')\n iloghandle.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \"\\n\")\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n,p in zip(procname,processes) if p.poll() is None]\n currentind[:] = [ind for ind,p in zip(currentind,processes) if p.poll() is None]\n currentloghandles[:] = [log for log,p in zip(currentloghandles,processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\n \n # Wait for everything to finish\n while len(processes)>0:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent))\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind, procname, currentloghandles):\n if p.poll() is not None:\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : DONE : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + fname)\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : DONE : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + fname + '\\n')\n iloghandle= log\n iloghandle.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \"\\n\")\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n,p in zip(procname,processes) if p.poll() is None]\n currentind[:] = [ind for ind,p in zip(currentind,processes) if p.poll() is None]\n currentloghandles[:] = [log for log,p in zip(currentloghandles,processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\nexcept KeyboardInterrupt:\n for p, ind, name, iloghandle in zip(processes, currentind, procname, currentloghandles):\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : KILL : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + name)\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : KILL : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + name + '\\n')\n p.kill()\n iloghandle.flush()\n iloghandle.close()\n time.sleep(0.1)\n os.remove(logname[ind-1])\nproclog.flush()\nproclog.close()\nprint(\"Done\")\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): dependencies = [('presentes', '0015_caso_lugar_del_hecho')] operations = [migrations.AddField(model_name='organizacion', name= 'descripcion', field=models.TextField(default='')), migrations. AddField(model_name='organizacion', name='email', field=models. CharField(default='', max_length=200))] <|reserved_special_token_1|> from django.db import migrations, models class Migration(migrations.Migration): dependencies = [('presentes', '0015_caso_lugar_del_hecho')] operations = [migrations.AddField(model_name='organizacion', name= 'descripcion', field=models.TextField(default='')), migrations. AddField(model_name='organizacion', name='email', field=models. CharField(default='', max_length=200))] <|reserved_special_token_1|> # Generated by Django 2.2.1 on 2019-05-23 14:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('presentes', '0015_caso_lugar_del_hecho'), ] operations = [ migrations.AddField( model_name='organizacion', name='descripcion', field=models.TextField(default=''), ), migrations.AddField( model_name='organizacion', name='email', field=models.CharField(default='', max_length=200), ), ]
flexible
{ "blob_id": "5cd767564e8a261561e141abeebb5221cb3ef2c2", "index": 6919, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('presentes', '0015_caso_lugar_del_hecho')]\n operations = [migrations.AddField(model_name='organizacion', name=\n 'descripcion', field=models.TextField(default='')), migrations.\n AddField(model_name='organizacion', name='email', field=models.\n CharField(default='', max_length=200))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('presentes', '0015_caso_lugar_del_hecho')]\n operations = [migrations.AddField(model_name='organizacion', name=\n 'descripcion', field=models.TextField(default='')), migrations.\n AddField(model_name='organizacion', name='email', field=models.\n CharField(default='', max_length=200))]\n", "step-5": "# Generated by Django 2.2.1 on 2019-05-23 14:07\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('presentes', '0015_caso_lugar_del_hecho'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='organizacion',\n name='descripcion',\n field=models.TextField(default=''),\n ),\n migrations.AddField(\n model_name='organizacion',\n name='email',\n field=models.CharField(default='', max_length=200),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import random #quicksort a list of objects based on keys, which can be any of 3 values # done in O(n) time in one pass, and O(1) additional space complexity def quicksort(x, pivot_index): key1_idx, key2_idx, key3_idx = 0, 0, len(x) key1_val, key2_val= 'key1', 'key2' while key2_idx < key3_idx: if x[key2_idx]['key'] == key1_val: x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx] key1_idx, key2_idx = key1_idx + 1, key2_idx + 1 elif x[key2_idx]['key'] == key2_val: key2_idx += 1 else: key3_idx -= 1 x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx] return x if __name__ == '__main__': keys = ['key1', 'key2', 'key3'] values = [0, 1, 2, 3, 4] key_values = [{'key': key, 'value': value} for key in keys for value in values] random.shuffle(key_values) print(quicksort(key_values, 7))
normal
{ "blob_id": "f193094c551df2a32860948b1a8710b53ca0dfb6", "index": 2413, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef quicksort(x, pivot_index):\n key1_idx, key2_idx, key3_idx = 0, 0, len(x)\n key1_val, key2_val = 'key1', 'key2'\n while key2_idx < key3_idx:\n if x[key2_idx]['key'] == key1_val:\n x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]\n key1_idx, key2_idx = key1_idx + 1, key2_idx + 1\n elif x[key2_idx]['key'] == key2_val:\n key2_idx += 1\n else:\n key3_idx -= 1\n x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]\n return x\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef quicksort(x, pivot_index):\n key1_idx, key2_idx, key3_idx = 0, 0, len(x)\n key1_val, key2_val = 'key1', 'key2'\n while key2_idx < key3_idx:\n if x[key2_idx]['key'] == key1_val:\n x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]\n key1_idx, key2_idx = key1_idx + 1, key2_idx + 1\n elif x[key2_idx]['key'] == key2_val:\n key2_idx += 1\n else:\n key3_idx -= 1\n x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]\n return x\n\n\nif __name__ == '__main__':\n keys = ['key1', 'key2', 'key3']\n values = [0, 1, 2, 3, 4]\n key_values = [{'key': key, 'value': value} for key in keys for value in\n values]\n random.shuffle(key_values)\n print(quicksort(key_values, 7))\n", "step-4": "import random\n\n\ndef quicksort(x, pivot_index):\n key1_idx, key2_idx, key3_idx = 0, 0, len(x)\n key1_val, key2_val = 'key1', 'key2'\n while key2_idx < key3_idx:\n if x[key2_idx]['key'] == key1_val:\n x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]\n key1_idx, key2_idx = key1_idx + 1, key2_idx + 1\n elif x[key2_idx]['key'] == key2_val:\n key2_idx += 1\n else:\n key3_idx -= 1\n x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]\n return x\n\n\nif __name__ == '__main__':\n keys = ['key1', 'key2', 'key3']\n values = [0, 1, 2, 3, 4]\n key_values = [{'key': key, 'value': value} for key in keys for value in\n values]\n random.shuffle(key_values)\n print(quicksort(key_values, 7))\n", "step-5": "import random\n\n#quicksort a list of objects based on keys, which can be any of 3 values\n# done in O(n) time in one pass, and O(1) additional space complexity\ndef quicksort(x, pivot_index):\n key1_idx, key2_idx, key3_idx = 0, 0, len(x)\n key1_val, key2_val= 'key1', 'key2'\n\n while key2_idx < key3_idx:\n if x[key2_idx]['key'] == key1_val:\n x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]\n key1_idx, key2_idx = key1_idx + 1, key2_idx + 1\n elif x[key2_idx]['key'] == key2_val:\n key2_idx += 1\n else:\n key3_idx -= 1\n x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]\n\n return x\n\nif __name__ == '__main__':\n keys = ['key1', 'key2', 'key3']\n values = [0, 1, 2, 3, 4]\n\n key_values = [{'key': key, 'value': value} for key in keys for value in values]\n random.shuffle(key_values)\n\n print(quicksort(key_values, 7))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python from svg_ros.srv import * import rospy from std_msgs.msg import String from geometry_msgs.msg import Twist from math import * import roslib from nav_msgs.msg import Odometry #Global variables base_distance_x0=0 base_distance_y0=0 base_angle_0=0 base_distance_x1=0 base_distance_y1=0 base_angle_1=0 flag=0 def update_value(msg): global base_distance_x1 global base_distance_y1 global base_angle_1 base_distance_x1=msg.pose.pose.position.x base_distance_y1=msg.pose.pose.position.y base_angle_1=msg.pose.pose.orientation.w print "x: "+str(base_distance_x1) print "y: "+str(base_distance_y1) print "Ang: "+str( acos(base_angle_1)*2 ) #print msg.pose.pose def move_robot(req): print req.param global flag flag=0 global base_distance_x1 global base_distance_y1 global base_angle_1 global base_distance_x0 global base_distance_y0 global base_angle_0 r = rospy.Rate(10) angle=req.param.split() angle=(float)(angle[2]) distance=req.param.split() distance=(float)(distance[1]) # print angle # print distance angle=(int)((angle*57.2958)/.9) distance=(int)(distance/.04) # print angle # print distance cmd_vel = rospy.Publisher('cmd_vel_mux/input/navi', Twist, queue_size=10) move_cmd = Twist() if distance<0: move_cmd.linear.x =-0.2 else: move_cmd.linear.x =0.2#0.4 turn_cmd = Twist() turn_cmd.linear.x = 0 r = rospy.Rate(100) if angle<0: turn_cmd.angular.z =radians(-90) angle*=-1 else : turn_cmd.angular.z =radians(90) if angle!=0: rospy.loginfo("Turning") for x in range(0,angle): cmd_vel.publish(turn_cmd) r.sleep() turn_cmd.angular.z =0 cmd_vel.publish(move_cmd) base_distance_x0=base_distance_x1 base_distance_y0=base_distance_y1 base_angle_0=base_angle_1 r = rospy.Rate(5) rospy.loginfo("Going Straight") if distance<=0: for x in range(0,abs(distance)): #Resolucion .02m cmd_vel.publish(move_cmd) r.sleep() flag=0 elif distance<=.15: for x in range(0,abs(distance)): #Resolucion .02m cmd_vel.publish(move_cmd) r.sleep() flag=0 else: print 'siiiiiiiiiiiiiiiiiiiisisisissi' flag=1 if distance<=0: move_cmd.linear.x =0 cmd_vel.publish(move_cmd) rospy.loginfo("Finished") print 'Distancia: ' print ( ((base_distance_x1-base_distance_x0)**2) + ((base_distance_y1-base_distance_y0)**2) )**1/2 #print 'Angulo: ' #print base_angle_1 return req.param def mv_turtle(): rospy.init_node('AUX') rospy.Subscriber('odom',Odometry,update_value) print "Ready to move turtle bot." rospy.Rate(2) rospy.spin() if __name__ == '__main__': try: mv_turtle() except rospy.ROSInterruptException: pass
normal
{ "blob_id": "402acaa263ee620fbd9bf7d271dce2e5de4eeae0", "index": 2005, "step-1": "#!/usr/bin/env python\nfrom svg_ros.srv import *\nimport rospy\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Twist\nfrom math import *\nimport roslib\nfrom nav_msgs.msg import Odometry\n\n\n#Global variables\nbase_distance_x0=0\nbase_distance_y0=0\nbase_angle_0=0\nbase_distance_x1=0\nbase_distance_y1=0\nbase_angle_1=0\nflag=0\n \ndef update_value(msg):\n global base_distance_x1\n global base_distance_y1\n global base_angle_1\n base_distance_x1=msg.pose.pose.position.x \n base_distance_y1=msg.pose.pose.position.y \n base_angle_1=msg.pose.pose.orientation.w \n\n print \"x: \"+str(base_distance_x1)\n print \"y: \"+str(base_distance_y1)\n print \"Ang: \"+str( acos(base_angle_1)*2 )\n #print msg.pose.pose\n\ndef move_robot(req):\n print req.param\n global flag\n flag=0 \n global base_distance_x1\n global base_distance_y1\n global base_angle_1\n\n global base_distance_x0\n global base_distance_y0\n global base_angle_0\n\n r = rospy.Rate(10)\n angle=req.param.split()\n angle=(float)(angle[2])\n distance=req.param.split()\n distance=(float)(distance[1])\n\n# print angle\n# print distance\n angle=(int)((angle*57.2958)/.9)\n distance=(int)(distance/.04)\n# print angle\n# print distance\n\n cmd_vel = rospy.Publisher('cmd_vel_mux/input/navi', Twist, queue_size=10)\n \n move_cmd = Twist()\n if distance<0:\n move_cmd.linear.x =-0.2\n else:\n move_cmd.linear.x =0.2#0.4 \n\n turn_cmd = Twist()\n turn_cmd.linear.x = 0\n\n\n r = rospy.Rate(100)\n if angle<0:\n turn_cmd.angular.z =radians(-90)\n angle*=-1\n else :\n turn_cmd.angular.z =radians(90)\n\n if angle!=0:\n rospy.loginfo(\"Turning\")\n for x in range(0,angle):\n cmd_vel.publish(turn_cmd)\n r.sleep()\n\n turn_cmd.angular.z =0\n cmd_vel.publish(move_cmd)\n \n base_distance_x0=base_distance_x1\n base_distance_y0=base_distance_y1\n base_angle_0=base_angle_1\n\n r = rospy.Rate(5)\n \n rospy.loginfo(\"Going Straight\")\n if distance<=0:\n for x in range(0,abs(distance)): #Resolucion .02m\n cmd_vel.publish(move_cmd)\n r.sleep()\n flag=0\n elif distance<=.15:\n for x in range(0,abs(distance)): #Resolucion .02m\n cmd_vel.publish(move_cmd)\n r.sleep()\n flag=0\n else:\n print 'siiiiiiiiiiiiiiiiiiiisisisissi'\n flag=1\n \n\n if distance<=0:\n move_cmd.linear.x =0\n cmd_vel.publish(move_cmd)\n \n rospy.loginfo(\"Finished\")\n\n print 'Distancia: '\n print ( ((base_distance_x1-base_distance_x0)**2) + ((base_distance_y1-base_distance_y0)**2) )**1/2\n #print 'Angulo: '\n #print base_angle_1\n\n \n return req.param\n\n\n\ndef mv_turtle():\n \n \n\n rospy.init_node('AUX')\n\n rospy.Subscriber('odom',Odometry,update_value)\n print \"Ready to move turtle bot.\"\n rospy.Rate(2)\n rospy.spin()\n\n\n\nif __name__ == '__main__':\n try:\n mv_turtle()\n except rospy.ROSInterruptException:\n pass", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from django.urls import path from . import apiviews from rest_framework.authtoken.views import obtain_auth_token urlpatterns = [path('contacts', apiviews.ContactsView.as_view(), name= 'contacts'), path('contact/<int:pk>', apiviews.ContactView.as_view(), name='contact'), path('signup', apiviews.create_user_with_token, name= 'signup'), path('signin', apiviews.signin, name='signin'), path( 'signout', apiviews.sign_out, name='signout'), path('api-token-auth/', obtain_auth_token, name='api_token_auth')]
normal
{ "blob_id": "5f56838ad0717c4f7a2da6b53f586a88b0166113", "index": 8629, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [path('contacts', apiviews.ContactsView.as_view(), name=\n 'contacts'), path('contact/<int:pk>', apiviews.ContactView.as_view(),\n name='contact'), path('signup', apiviews.create_user_with_token, name=\n 'signup'), path('signin', apiviews.signin, name='signin'), path(\n 'signout', apiviews.sign_out, name='signout'), path('api-token-auth/',\n obtain_auth_token, name='api_token_auth')]\n", "step-3": "from django.urls import path\nfrom . import apiviews\nfrom rest_framework.authtoken.views import obtain_auth_token\nurlpatterns = [path('contacts', apiviews.ContactsView.as_view(), name=\n 'contacts'), path('contact/<int:pk>', apiviews.ContactView.as_view(),\n name='contact'), path('signup', apiviews.create_user_with_token, name=\n 'signup'), path('signin', apiviews.signin, name='signin'), path(\n 'signout', apiviews.sign_out, name='signout'), path('api-token-auth/',\n obtain_auth_token, name='api_token_auth')]\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> with open('faculty.csv') as facultycsv: emails = list() for line in facultycsv: line = line.split(',') if line[0] == 'name': continue try: email = line[3].rstrip() emails.append(email) except: continue with open('emails.csv', 'w') as emailcsv: writer = csv.writer(emailcsv, quoting=csv.QUOTE_MINIMAL) for email in emails: writer.writerow([email]) <|reserved_special_token_1|> import csv with open('faculty.csv') as facultycsv: emails = list() for line in facultycsv: line = line.split(',') if line[0] == 'name': continue try: email = line[3].rstrip() emails.append(email) except: continue with open('emails.csv', 'w') as emailcsv: writer = csv.writer(emailcsv, quoting=csv.QUOTE_MINIMAL) for email in emails: writer.writerow([email]) <|reserved_special_token_1|> import csv with open('faculty.csv') as facultycsv: emails = list() #all email addresses for line in facultycsv: line = line.split(',') if line[0] == 'name' : continue try: email = line[3].rstrip() emails.append(email) except: continue with open('emails.csv', 'w') as emailcsv: writer = csv.writer(emailcsv, quoting=csv.QUOTE_MINIMAL) for email in emails: writer.writerow([email])
flexible
{ "blob_id": "5af5c10c149c7b0e2a969be7895780d26a4294d0", "index": 7326, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('faculty.csv') as facultycsv:\n emails = list()\n for line in facultycsv:\n line = line.split(',')\n if line[0] == 'name':\n continue\n try:\n email = line[3].rstrip()\n emails.append(email)\n except:\n continue\nwith open('emails.csv', 'w') as emailcsv:\n writer = csv.writer(emailcsv, quoting=csv.QUOTE_MINIMAL)\n for email in emails:\n writer.writerow([email])\n", "step-3": "import csv\nwith open('faculty.csv') as facultycsv:\n emails = list()\n for line in facultycsv:\n line = line.split(',')\n if line[0] == 'name':\n continue\n try:\n email = line[3].rstrip()\n emails.append(email)\n except:\n continue\nwith open('emails.csv', 'w') as emailcsv:\n writer = csv.writer(emailcsv, quoting=csv.QUOTE_MINIMAL)\n for email in emails:\n writer.writerow([email])\n", "step-4": "import csv\n\nwith open('faculty.csv') as facultycsv:\n emails = list() #all email addresses\n\n for line in facultycsv:\n line = line.split(',')\n if line[0] == 'name' : continue\n try:\n email = line[3].rstrip()\n emails.append(email)\n except:\n continue\n\nwith open('emails.csv', 'w') as emailcsv:\n writer = csv.writer(emailcsv, quoting=csv.QUOTE_MINIMAL)\n for email in emails:\n writer.writerow([email])\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Copyright (c) 2011-2014 by California Institute of Technology # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the California Institute of Technology nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH # OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT # OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. r"""Equality Set Projection (ESP). Non-vertex polytope projection method from - https://web.archive.org/web/20150103142532/ https://www-control.eng.cam.ac.uk/~cnj22/research/projection.html - https://infoscience.epfl.ch/record/169768 Very unstable, can not handle complex polytopes. Reference ========= \cite{Jones04} """ # Created by P. Nilsson, 8/2/11 import pickle import numpy as np from scipy import io as sio from scipy import linalg from polytope import solvers class Ridge(object): """A ridge. Attributes: - `E_r`: Equality set of a facet - `ar, br`: Affine hull of the facet s.t. P_{E_0} = P intersection {x | ar x = br}. """ def __init__(self, E, a, b): self.E_r = E self.ar = a self.br = b class Ridge_Facet(object): """A ridge facet. Attributes: - `E_r`: Equality set of a ridge - `ar,br`: Affine hull of the ridge s.t. P_{E_f} intersection {x | ar x = br} defines the ridge, where E_f is the equality set of the facet. - `E_0`: Equality set of a facet - `af,bf`: Affine hull of the facet. """ def __init__(self, E_r, ar, br, E_0, af, bf): self.E_r = E_r self.ar = ar self.br = br self.E_0 = E_0 self.af = af self.bf = bf def esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0): """Project polytope [C D] x <= b onto C coordinates. Projects the polytope [C D] x <= b onto the coordinates that correspond to C. The projection of the polytope P = {[C D]x <= b} where C is M x D and D is M x K is defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b} """ if 'glpk' not in solvers.installed_solvers: raise Exception( "projection_esp error:" " Equality set projection requires `cvxopt.glpk` to run.") # Remove zero columns and rows nonzerorows = np.nonzero( np.sum(np.abs(np.hstack([CC, DD])), axis=1) > abs_tol)[0] nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0] nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0] C = CC[nonzerorows, :].copy() D = DD[nonzerorows, :].copy() C = C[:, nonzeroxcols] D = D[:, nonzeroycols] b = bb[nonzerorows].copy() # Make sure origo is inside polytope if not centered: xc0, yc0, trans = cheby_center(C, D, b) if trans: b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten() else: b = b else: trans = False d = C.shape[1] k = D.shape[1] if verbose > 0: print("Projecting from dim " + str(d + k) + " to " + str(d)) if k == 0: # Not projecting return C, bb, [] if d == 1: # Projection to 1D c = np.zeros(d + k) c[0] = 1 G = np.hstack([C, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') if sol['status'] != "optimal": raise Exception( "esp: projection to 1D is not full-dimensional, " "LP returned status " + str(sol['status'])) min_sol = np.array(sol['x']).flatten() min_dual_sol = np.array(sol['z']).flatten() sol = solvers.lpsolve(-c, G, b, solver='glpk') if sol['status'] != "optimal": raise Exception( "esp: projection to 1D is not full-dimensional, " + "LP returned status " + str(sol['status'])) max_sol = np.array(sol['x']).flatten() max_dual_sol = np.array(sol['z']).flatten() # min, max x_min = min_sol[0] x_max = max_sol[0] y_min = min_sol[range(1, k + 1)] y_max = max_sol[range(1, k + 1)] if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol): # Min case, relax constraint a little to avoid infeasibility E_min = unique_equalityset( C, D, b, np.array([1.]), x_min + abs_tol / 3, abs_tol=abs_tol) else: E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0] if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol): # Max case, relax constraint a little to avoid infeasibility E_max = unique_equalityset( C, D, b, np.array([1.]), x_max - abs_tol / 3, abs_tol=abs_tol) else: E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0] G = np.array([[1.], [-1.]]) g = np.array([x_max, -x_min]) # Relocate if trans: g = g + np.dot(G, xc0) # Return zero cols/rows E_max = nonzerorows[E_max] E_min = nonzerorows[E_min] if verbose > 0: print( "Returning projection from dim " + str(d + k) + " to dim 1 \n") return G, g, [E_max, E_min] E = [] L = [] E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol) ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose) for i in range(len(ridge_list)): r = ridge_list[i] L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf)) G = af.T g = bf if verbose > 0: print("\nStarting eq set " + str(E_0) + "\nStarting ridges ") for rr in L: print(str(rr.E_r)) E.append(E_0) while len(L) > 0: rid_fac1 = L[0] if verbose > 0: print("\nLooking for neighbors to " + str(rid_fac1.E_0) + " and " + str(rid_fac1.E_r) + " ..") E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol) if verbose > 0: print("found neighbor " + str(E_adj) + ". \n\nLooking for ridges of neighbor..") ridge_list = ridge( C, D, b, E_adj, a_adj, b_adj, abs_tol=abs_tol, verbose=verbose) if verbose > 0: print("found " + str(len(ridge_list)) + " ridges\n") found_org = False for i in range(len(ridge_list)): r = ridge_list[i] E_r = r.E_r ar = r.ar br = r.br found = False for j in range(len(L)): rid_fac2 = L[j] A_r = rid_fac2.E_r if len(A_r) != len(E_r): continue t1 = np.sort(np.array(A_r)) t2 = np.sort(np.array(E_r)) if np.sum(np.abs(t1 - t2)) < abs_tol: found = True break if found: if verbose > 0: print("Ridge " + str(E_r) + " already visited, removing from L..") if rid_fac2 == rid_fac1: found_org = True L.remove(rid_fac2) else: if verbose > 0: print("Adding ridge-facet " + str(E_adj) + " " + str(E_r) + "") L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj)) if not found_org: print("Expected ridge " + str(rid_fac1.E_r)) print("but got ridges ") for rid in ridge_list: print(rid.E_r) raise Exception( "esp: ridge did not return neighboring ridge as expected") G = np.vstack([G, a_adj]) g = np.hstack([g, b_adj]) E.append(E_adj) # Restore center if trans: g = g + np.dot(G, xc0) # Return zero rows for Ef in E: Ef = nonzerorows[Ef] return G, g, E def shoot(C, D, b, maxiter=1000, abs_tol=1e-7): """Return random equality set of P that projects on a projection facet. Returns randomly selected equality set E_0 of P such that the projection of the equality set is a facet of the projection. @param C: Matrix defining the polytope Cx+Dy <= b @param D: Matrix defining the polytope Cx+Dy <= b @param b: Vector defining the polytope Cx+Dy <= b @return: `E_0,af,bf`: Equality set and affine hull """ d = C.shape[1] k = D.shape[1] iter = 0 while True: if iter > maxiter: raise Exception( "shoot: could not find starting equality set") gamma = np.random.rand(d) - 0.5 c = np.zeros(k + 1) c[0] = -1 G = np.hstack([np.array([np.dot(C, gamma)]).T, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') opt_sol = np.array(sol['x']).flatten() opt_dual = np.array(sol['z']).flatten() r_opt = opt_sol[0] y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten() x_opt = r_opt * gamma E_0 = np.nonzero( np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0] DE0 = D[E_0, :] CE0 = C[E_0, :] b0 = b[E_0] if rank(np.dot(null_space(DE0.T).T, CE0)) == 1: break iter += 1 af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol) if is_dual_degenerate(c, G, b, None, None, opt_sol, opt_dual, abs_tol=abs_tol): E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol) af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0]) if len(bf) > 1: raise Exception("shoot: wrong dimension of affine hull") return E_0, af.flatten(), bf def ridge(C, D, b, E, af, bf, abs_tol=1e-7, verbose=0): """Compute all ridges of a facet in the projection. Input: `C,D,b`: Original polytope data `E,af,bf`: Equality set and affine hull of a facet in the projection Output: `ridge_list`: A list containing all the ridges of the facet as Ridge objects """ d = C.shape[1] k = D.shape[1] Er_list = [] q = C.shape[0] E_c = np.setdiff1d(range(q), E) # E slices C_E = C[E, :] D_E = D[E, :] b_E = b[E, :] # E_c slices C_Ec = C[E_c, :] D_Ec = D[E_c, :] b_Ec = b[E_c] # dots S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E) L = np.dot(D_Ec, null_space(D_E)) t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E)) if rank(np.hstack([C_E, D_E])) < k + 1: if verbose > 1: print("Doing recursive ESP call") u, s, v = linalg.svd(np.array([af]), full_matrices=1) sigma = s[0] v = v.T * u[0, 0] # Correct sign V_hat = v[:, [0]] V_tilde = v[:, range(1, v.shape[1])] Cnew = np.dot(S, V_tilde) Dnew = L bnew = t - np.dot(S, V_hat).flatten() * bf / sigma Anew = np.hstack([Cnew, Dnew]) xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew) bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten() Gt, gt, E_t = esp( Cnew, Dnew, bnew, centered=True, abs_tol=abs_tol, verbose=0) if (len(E_t[0]) == 0) or (len(E_t[1]) == 0): raise Exception( "ridge: recursive call did not return any equality sets") for i in range(len(E_t)): E_f = E_t[i] er = np.sort(np.hstack([E, E_c[E_f]])) ar = np.dot(Gt[i, :], V_tilde.T).flatten() br0 = gt[i].flatten() # Make orthogonal to facet ar = ar - af * np.dot(af.flatten(), ar.flatten()) br = br0 - bf * np.dot(af.flatten(), ar.flatten()) # Normalize and make ridge equation point outwards norm = np.sqrt(np.sum(ar * ar)) ar = ar * np.sign(br) / norm br = br * np.sign(br) / norm # Restore center br = br + np.dot(Gt[i, :], xc2) / norm if len(ar) > d: raise Exception("ridge: wrong length of new ridge!") Er_list.append(Ridge(er, ar, br)) else: if verbose > 0: print("Doing direct calculation of ridges") X = np.arange(S.shape[0]) while len(X) > 0: i = X[0] X = np.setdiff1d(X, i) if np.linalg.norm(S[i, :]) < abs_tol: continue Si = S[i, :] Si = Si / np.linalg.norm(Si) if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol: test1 = null_space( np.vstack([ np.hstack([af, bf]), np.hstack([S[i, :], t[i]])]), nonempty=True) test2 = np.hstack([S, np.array([t]).T]) test = np.dot(test1.T, test2.T) test = np.sum(np.abs(test), 0) Q_i = np.nonzero(test > abs_tol)[0] Q = np.nonzero(test < abs_tol)[0] X = np.setdiff1d(X, Q) # Have Q_i Sq = S[Q_i, :] tq = t[Q_i] c = np.zeros(d + 1) c[0] = 1 Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq]) Gdo = np.hstack([-1, np.zeros(Sq.shape[1])]) G = np.vstack([Gup, Gdo]) h = np.hstack([tq, 1]) Al = np.zeros([2, 1]) Ar = np.vstack([af, S[i, :]]) A = np.hstack([Al, Ar]) bb = np.hstack([bf, t[i]]) sol = solvers._solve_lp_using_cvxopt( c, G, h, A=A, b=bb) if sol['status'] == 'optimal': tau = sol['x'][0] if tau < -abs_tol: ar = np.array([S[i, :]]).flatten() br = t[i].flatten() # Make orthogonal to facet ar = ar - af * np.dot(af.flatten(), ar.flatten()) br = br - bf * np.dot(af.flatten(), ar.flatten()) # Normalize and make ridge equation point outwards norm = np.sqrt(np.sum(ar * ar)) ar = ar / norm br = br / norm # accumulate Er_list.append( Ridge(np.sort(np.hstack([E, E_c[Q]])), ar, br)) return Er_list def adjacent(C, D, b, rid_fac, abs_tol=1e-7): """Compute the (unique) adjacent facet. @param rid_fac: A Ridge_Facet object containing the parameters for a facet and one of its ridges. @return: (E_adj,a_adj,b_adj): The equality set and parameters for the adjacent facet such that:: P_{E_adj} = P intersection {x | a_adj x = b_adj} """ E = rid_fac.E_0 af = rid_fac.af bf = rid_fac.bf # E_r = rid_fac.E_r ar = rid_fac.ar br = rid_fac.br # shape d = C.shape[1] k = D.shape[1] # E_r slices C_er = C[E_r, :] D_er = D[E_r, :] b_er = b[E_r] # stack c = -np.hstack([ar, np.zeros(k)]) G = np.hstack([C_er, D_er]) h = b_er A = np.hstack([af, np.zeros(k)]) sol = solvers._solve_lp_using_cvxopt( c, G, h, A=A.T, b=bf * (1 - 0.01)) if sol['status'] != "optimal": print(G) print(h) print(af) print(bf) print(ar) print(br) print(np.dot(af, ar)) data = {} data["C"] = C data["D"] = D data["b"] = b sio.savemat("matlabdata", data) with open('polytope.p', 'wb') as f: pickle.dump(data, f) raise Exception( "adjacent: Lp returned status " + str(sol['status'])) opt_sol = np.array(sol['x']).flatten() dual_opt_sol = np.array(sol['z']).flatten() x_opt = opt_sol[range(d)] y_opt = opt_sol[range(d, d + k)] if is_dual_degenerate( c.flatten(), G, h, A, bf * (1 - 0.01), opt_sol, dual_opt_sol, abs_tol=abs_tol): # If degenerate, compute affine hull and take preimage E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0] a_temp, b_temp = proj_aff( C_er[E_temp, :], D_er[E_temp, :], b_er[E_temp], expected_dim=1, abs_tol=abs_tol) E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol) if len(E_adj) == 0: data = {} data["C"] = C data["D"] = D data["b"] = b data["Er"] = E_r + 1 data["ar"] = ar data["br"] = br data["Ef"] = E + 1 data["af"] = af data["bf"] = bf sio.savemat("matlabdata", data) raise Exception( "adjacent: equality set computation returned empty set") else: r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol E_adj = np.nonzero(r)[0] C_eadj = C[E_adj, :] D_eadj = D[E_adj, :] b_eadj = b[E_adj] af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol) return E_adj, af_adj, bf_adj def proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-7): """Affine projection. Compute the set aff = {x | Ce x + De y = be} on the form aff = ({x | a x = b} intersection {Ce x + De y < be}). Input: Polytope parameters Ce, De and be Output: Constants a and b """ # Remove zero columns ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0] D = De[:, ind] if D.shape[1] == 0: a = Ce b = be a_n, b_n = normalize(a, b) if expected_dim is not None: if expected_dim != b_n.size: raise Exception( "proj_aff: wrong dimension calculated in 1") return a_n.flatten(), b_n sh = np.shape(D.T) m = sh[0] n = sh[1] nDe = null_space(D.T) a = np.dot(nDe.T, Ce) b = np.dot(nDe.T, be) a_n, b_n = normalize(a, b) if expected_dim is not None: if expected_dim != b_n.size: raise Exception("proj_aff: wrong dimension calculated in 2") return a_n, b_n def is_dual_degenerate(c, G, h, A, b, x_opt, z_opt, abs_tol=1e-7): """Return `True` if pair of dual problems is dual degenerate. Checks if the pair of dual problems:: (P): min c'x (D): max h'z + b'y s.t Gx <= h s.t G'z + A'y = c Ax = b z <= 0 is dual degenerate, i.e. if (P) has several optimal solutions. Optimal solutions x* and z* are required. Input: `G,h,A,b`: Parameters of (P) `x_opt`: One optimal solution to (P) `z_opt`: The optimal solution to (D) corresponding to _inequality constraints_ in (P) Output: `dual`: Boolean indicating whether (P) has many optimal solutions. """ D = - G d = - h.flatten() mu = - z_opt.flatten() # mu >= 0 # Active constraints I = np.nonzero(np.abs(np.dot(D, x_opt).flatten() - d) < abs_tol)[0] # Positive elements in dual opt J = np.nonzero(mu > abs_tol)[0] # i, j i = mu < abs_tol # Zero elements in dual opt i = i.astype(int) j = np.zeros(len(mu), dtype=int) j[I] = 1 # 1 if active # Indices where active constraints have 0 dual opt L = np.nonzero(i + j == 2)[0] # sizes nI = len(I) nJ = len(J) nL = len(L) # constraints DI = D[I, :] # Active constraints DJ = D[J, :] # Constraints with positive lagrange mult DL = D[L, :] # Active constraints with zero dual opt dual = 0 if A is None: test = DI else: test = np.vstack([DI, A]) if rank(test) < np.amin(DI.shape): return True else: if len(L) > 0: if A is None: Ae = DJ else: Ae = np.vstack([DJ, A]) be = np.zeros(Ae.shape[0]) Ai = - DL bi = np.zeros(nL) sol = solvers._solve_lp_using_cvxopt( c= - np.sum(DL, axis=0), G=Ai, h=bi, A=Ae, b=be) if sol['status'] == "dual infeasible": # Dual infeasible -> primal unbounded -> value>epsilon return True if sol['primal objective'] > abs_tol: return True return False def unique_equalityset(C, D, b, af, bf, abs_tol=1e-7, verbose=0): """Return equality set E with the following property: P_E = {x | af x = bf} intersection P where P is the polytope C x + D y < b The inequalities have to be satisfied with equality everywhere on the face defined by af and bf. """ if D is not None: A = np.hstack([C, D]) a = np.hstack([af, np.zeros(D.shape[1])]) else: A = C a = af E = [] for i in range(A.shape[0]): A_i = np.array(A[i, :]) b_i = b[i] sol = solvers._solve_lp_using_cvxopt( c=A_i, G=A, h=b, A=a.T, b=bf) if sol['status'] != "optimal": raise Exception( "unique_equalityset: LP returned status " + str(sol['status'])) if np.abs(sol['primal objective'] - b_i) < abs_tol: # Constraint is active everywhere E.append(i) if len(E) == 0: raise Exception("unique_equalityset: empty E") return np.array(E) def unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-7): A = np.hstack([C, D]) E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0] af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1) # stack ineq = np.hstack([af, np.zeros(D.shape[1])]) G = np.vstack([A, np.vstack([ineq, -ineq])]) h = np.hstack([b, np.hstack([bf, -bf])]) # shape m = G.shape[0] n = G.shape[1] # ht e = 1e-3 v = np.vstack([np.zeros([1, n]), np.eye(n)]).T v = v - np.array([np.mean(v, axis=1)]).T v = v * e ht = h + np.amin(-np.dot(G, v), axis=1) # stack H1 = np.hstack([G, -np.eye(m)]) H2 = np.hstack([G, np.zeros([m, m])]) H3 = np.hstack([np.zeros([m, n]), -np.eye(m)]) H = np.vstack([H1, np.vstack([H2, H3])]) h = np.hstack([ht, np.hstack([h, np.zeros(m)])]) c = np.hstack([np.zeros(n), np.ones(m)]) sol = solvers.lpsolve(c, H, h, solver='glpk') if not sol['status'] == "optimal": raise Exception( "unique_equalityset: LP returned status " + str(sol['status'])) opt_sol2 = np.array(sol['x']).flatten() x = opt_sol2[range(n)] s = opt_sol2[range(n, len(opt_sol2))] E = np.nonzero(s > abs_tol)[0] print(E) E = np.sort(E[np.nonzero(E < C.shape[0])]) # Check that they define the same projection at, bt = proj_aff(C[E, :], D[E, :], b[E]) if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol: raise Exception("unique_equalityset2: affine hulls not the same") return E def cheby_center(C, D, b): """Calculate Chebyshev center for the polytope `C x + D y <= b`. Input: `C, D, b`: Polytope parameters Output: `x_0, y_0`: The chebyshev centra `boolean`: True if a point could be found, False otherwise. """ d = C.shape[1] k = D.shape[1] A = np.hstack([C, D]) dim = np.shape(A)[1] c = - np.r_[np.zeros(dim), 1] norm2 = np.sqrt(np.sum(A * A, axis=1)) G = np.c_[A, norm2] sol = solvers.lpsolve(c, G, h=b, solver='glpk') if sol['status'] == "optimal": opt = np.array(sol['x'][0:-1]).flatten() return opt[range(d)], opt[range(d, d + k)], True else: return np.zeros(d), np.zeros(k), False def normalize(AA, bb, abs_tol=1e-7): """Normalize `A x = b` such that `A'A = 1` and `b > 0`. Also, remove duplicate lines. """ if AA.size == 0: return AA, bb dim = AA.size / bb.size A = AA.copy().reshape(bb.size, dim) b = bb.copy().reshape(bb.size, 1) # Remove zero lines keepind = np.nonzero( np.sum(np.abs(np.hstack([A, b])), axis=1) > abs_tol)[0] A = A[keepind, :] b = b[keepind] # Normalize anorm = np.sqrt(np.sum(A * A, axis=1)) for i in range(len(anorm)): A[i, :] = A[i, :] * np.sign(b[i, 0]) / anorm[i] b[i, 0] = np.sign(b[i, 0]) * b[i, 0] / anorm[i] # Remove duplicate rows keep_row = [] for i in range(len(anorm)): unique = True for j in range(i + 1, len(anorm)): test = (np.sum(np.abs(A[i, :] - A[j, :])) + np.abs(b[i, 0] - b[j, 0])) if test < abs_tol: unique = False break if unique: keep_row.append(i) A_n = A[keep_row, :] b_n = b[keep_row, 0] # Return flat A if only one row if A_n.size == dim: A_n = A_n.flatten() return A_n, b_n.flatten() def rank(A, eps=1e-15): u, s, vh = linalg.svd(A) m = A.shape[0] n = A.shape[1] tol = np.amax([m, n]) * np.amax(s) * eps return np.sum(s > tol) def null_space(A, eps=1e-15, nonempty=False): """Returns the null space N_A to matrix A such that A N_A = 0.""" u, s, v = linalg.svd(A, full_matrices=1) m = A.shape[0] n = A.shape[1] tol = np.amax([m, n]) * np.amax(s) * eps rank = np.sum(s > tol) N_space = v[range(rank, n), :].T if nonempty and (len(N_space) == 0): N_space = v[range(np.amax(n - 1, 1), n), :] return N_space
normal
{ "blob_id": "707c83bc83f606b570af973094574e6675cfc83f", "index": 8793, "step-1": "<mask token>\n\n\nclass Ridge(object):\n \"\"\"A ridge.\n\n Attributes:\n\n - `E_r`: Equality set of a facet\n\n - `ar, br`: Affine hull of the facet\n s.t. P_{E_0} = P intersection {x | ar x = br}.\n \"\"\"\n\n def __init__(self, E, a, b):\n self.E_r = E\n self.ar = a\n self.br = b\n\n\nclass Ridge_Facet(object):\n \"\"\"A ridge facet.\n\n Attributes:\n\n - `E_r`: Equality set of a ridge\n\n - `ar,br`: Affine hull of the ridge s.t.\n P_{E_f} intersection {x | ar x = br}\n defines the ridge, where E_f is the\n equality set of the facet.\n\n - `E_0`: Equality set of a facet\n\n - `af,bf`: Affine hull of the facet.\n \"\"\"\n\n def __init__(self, E_r, ar, br, E_0, af, bf):\n self.E_r = E_r\n self.ar = ar\n self.br = br\n self.E_0 = E_0\n self.af = af\n self.bf = bf\n\n\n<mask token>\n\n\ndef unique_equalityset(C, D, b, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Return equality set E with the following property:\n\n P_E = {x | af x = bf} intersection P\n\n where P is the polytope C x + D y < b\n\n The inequalities have to be satisfied with equality everywhere on\n the face defined by af and bf.\n \"\"\"\n if D is not None:\n A = np.hstack([C, D])\n a = np.hstack([af, np.zeros(D.shape[1])])\n else:\n A = C\n a = af\n E = []\n for i in range(A.shape[0]):\n A_i = np.array(A[i, :])\n b_i = b[i]\n sol = solvers._solve_lp_using_cvxopt(c=A_i, G=A, h=b, A=a.T, b=bf)\n if sol['status'] != 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str\n (sol['status']))\n if np.abs(sol['primal objective'] - b_i) < abs_tol:\n E.append(i)\n if len(E) == 0:\n raise Exception('unique_equalityset: empty E')\n return np.array(E)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Ridge(object):\n \"\"\"A ridge.\n\n Attributes:\n\n - `E_r`: Equality set of a facet\n\n - `ar, br`: Affine hull of the facet\n s.t. P_{E_0} = P intersection {x | ar x = br}.\n \"\"\"\n\n def __init__(self, E, a, b):\n self.E_r = E\n self.ar = a\n self.br = b\n\n\nclass Ridge_Facet(object):\n \"\"\"A ridge facet.\n\n Attributes:\n\n - `E_r`: Equality set of a ridge\n\n - `ar,br`: Affine hull of the ridge s.t.\n P_{E_f} intersection {x | ar x = br}\n defines the ridge, where E_f is the\n equality set of the facet.\n\n - `E_0`: Equality set of a facet\n\n - `af,bf`: Affine hull of the facet.\n \"\"\"\n\n def __init__(self, E_r, ar, br, E_0, af, bf):\n self.E_r = E_r\n self.ar = ar\n self.br = br\n self.E_0 = E_0\n self.af = af\n self.bf = bf\n\n\n<mask token>\n\n\ndef unique_equalityset(C, D, b, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Return equality set E with the following property:\n\n P_E = {x | af x = bf} intersection P\n\n where P is the polytope C x + D y < b\n\n The inequalities have to be satisfied with equality everywhere on\n the face defined by af and bf.\n \"\"\"\n if D is not None:\n A = np.hstack([C, D])\n a = np.hstack([af, np.zeros(D.shape[1])])\n else:\n A = C\n a = af\n E = []\n for i in range(A.shape[0]):\n A_i = np.array(A[i, :])\n b_i = b[i]\n sol = solvers._solve_lp_using_cvxopt(c=A_i, G=A, h=b, A=a.T, b=bf)\n if sol['status'] != 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str\n (sol['status']))\n if np.abs(sol['primal objective'] - b_i) < abs_tol:\n E.append(i)\n if len(E) == 0:\n raise Exception('unique_equalityset: empty E')\n return np.array(E)\n\n\ndef unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-07):\n A = np.hstack([C, D])\n E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0]\n af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1)\n ineq = np.hstack([af, np.zeros(D.shape[1])])\n G = np.vstack([A, np.vstack([ineq, -ineq])])\n h = np.hstack([b, np.hstack([bf, -bf])])\n m = G.shape[0]\n n = G.shape[1]\n e = 0.001\n v = np.vstack([np.zeros([1, n]), np.eye(n)]).T\n v = v - np.array([np.mean(v, axis=1)]).T\n v = v * e\n ht = h + np.amin(-np.dot(G, v), axis=1)\n H1 = np.hstack([G, -np.eye(m)])\n H2 = np.hstack([G, np.zeros([m, m])])\n H3 = np.hstack([np.zeros([m, n]), -np.eye(m)])\n H = np.vstack([H1, np.vstack([H2, H3])])\n h = np.hstack([ht, np.hstack([h, np.zeros(m)])])\n c = np.hstack([np.zeros(n), np.ones(m)])\n sol = solvers.lpsolve(c, H, h, solver='glpk')\n if not sol['status'] == 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str(sol\n ['status']))\n opt_sol2 = np.array(sol['x']).flatten()\n x = opt_sol2[range(n)]\n s = opt_sol2[range(n, len(opt_sol2))]\n E = np.nonzero(s > abs_tol)[0]\n print(E)\n E = np.sort(E[np.nonzero(E < C.shape[0])])\n at, bt = proj_aff(C[E, :], D[E, :], b[E])\n if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol:\n raise Exception('unique_equalityset2: affine hulls not the same')\n return E\n\n\ndef cheby_center(C, D, b):\n \"\"\"Calculate Chebyshev center for the polytope `C x + D y <= b`.\n\n Input:\n `C, D, b`: Polytope parameters\n\n Output:\n `x_0, y_0`: The chebyshev centra\n `boolean`: True if a point could be found, False otherwise.\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n A = np.hstack([C, D])\n dim = np.shape(A)[1]\n c = -np.r_[np.zeros(dim), 1]\n norm2 = np.sqrt(np.sum(A * A, axis=1))\n G = np.c_[A, norm2]\n sol = solvers.lpsolve(c, G, h=b, solver='glpk')\n if sol['status'] == 'optimal':\n opt = np.array(sol['x'][0:-1]).flatten()\n return opt[range(d)], opt[range(d, d + k)], True\n else:\n return np.zeros(d), np.zeros(k), False\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Ridge(object):\n \"\"\"A ridge.\n\n Attributes:\n\n - `E_r`: Equality set of a facet\n\n - `ar, br`: Affine hull of the facet\n s.t. P_{E_0} = P intersection {x | ar x = br}.\n \"\"\"\n\n def __init__(self, E, a, b):\n self.E_r = E\n self.ar = a\n self.br = b\n\n\nclass Ridge_Facet(object):\n \"\"\"A ridge facet.\n\n Attributes:\n\n - `E_r`: Equality set of a ridge\n\n - `ar,br`: Affine hull of the ridge s.t.\n P_{E_f} intersection {x | ar x = br}\n defines the ridge, where E_f is the\n equality set of the facet.\n\n - `E_0`: Equality set of a facet\n\n - `af,bf`: Affine hull of the facet.\n \"\"\"\n\n def __init__(self, E_r, ar, br, E_0, af, bf):\n self.E_r = E_r\n self.ar = ar\n self.br = br\n self.E_0 = E_0\n self.af = af\n self.bf = bf\n\n\ndef esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0):\n \"\"\"Project polytope [C D] x <= b onto C coordinates.\n\n Projects the polytope [C D] x <= b onto the\n coordinates that correspond to C. The projection of the polytope\n P = {[C D]x <= b} where C is M x D and D is M x K is\n defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b}\n \"\"\"\n if 'glpk' not in solvers.installed_solvers:\n raise Exception(\n 'projection_esp error: Equality set projection requires `cvxopt.glpk` to run.'\n )\n nonzerorows = np.nonzero(np.sum(np.abs(np.hstack([CC, DD])), axis=1) >\n abs_tol)[0]\n nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0]\n nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0]\n C = CC[nonzerorows, :].copy()\n D = DD[nonzerorows, :].copy()\n C = C[:, nonzeroxcols]\n D = D[:, nonzeroycols]\n b = bb[nonzerorows].copy()\n if not centered:\n xc0, yc0, trans = cheby_center(C, D, b)\n if trans:\n b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten()\n else:\n b = b\n else:\n trans = False\n d = C.shape[1]\n k = D.shape[1]\n if verbose > 0:\n print('Projecting from dim ' + str(d + k) + ' to ' + str(d))\n if k == 0:\n return C, bb, []\n if d == 1:\n c = np.zeros(d + k)\n c[0] = 1\n G = np.hstack([C, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n if sol['status'] != 'optimal':\n raise Exception(\n 'esp: projection to 1D is not full-dimensional, LP returned status '\n + str(sol['status']))\n min_sol = np.array(sol['x']).flatten()\n min_dual_sol = np.array(sol['z']).flatten()\n sol = solvers.lpsolve(-c, G, b, solver='glpk')\n if sol['status'] != 'optimal':\n raise Exception(\n 'esp: projection to 1D is not full-dimensional, ' +\n 'LP returned status ' + str(sol['status']))\n max_sol = np.array(sol['x']).flatten()\n max_dual_sol = np.array(sol['z']).flatten()\n x_min = min_sol[0]\n x_max = max_sol[0]\n y_min = min_sol[range(1, k + 1)]\n y_max = max_sol[range(1, k + 1)]\n if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol):\n E_min = unique_equalityset(C, D, b, np.array([1.0]), x_min + \n abs_tol / 3, abs_tol=abs_tol)\n else:\n E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0]\n if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol):\n E_max = unique_equalityset(C, D, b, np.array([1.0]), x_max - \n abs_tol / 3, abs_tol=abs_tol)\n else:\n E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0]\n G = np.array([[1.0], [-1.0]])\n g = np.array([x_max, -x_min])\n if trans:\n g = g + np.dot(G, xc0)\n E_max = nonzerorows[E_max]\n E_min = nonzerorows[E_min]\n if verbose > 0:\n print('Returning projection from dim ' + str(d + k) +\n ' to dim 1 \\n')\n return G, g, [E_max, E_min]\n E = []\n L = []\n E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol)\n ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose)\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf))\n G = af.T\n g = bf\n if verbose > 0:\n print('\\nStarting eq set ' + str(E_0) + '\\nStarting ridges ')\n for rr in L:\n print(str(rr.E_r))\n E.append(E_0)\n while len(L) > 0:\n rid_fac1 = L[0]\n if verbose > 0:\n print('\\nLooking for neighbors to ' + str(rid_fac1.E_0) +\n ' and ' + str(rid_fac1.E_r) + ' ..')\n E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol)\n if verbose > 0:\n print('found neighbor ' + str(E_adj) +\n '. \\n\\nLooking for ridges of neighbor..')\n ridge_list = ridge(C, D, b, E_adj, a_adj, b_adj, abs_tol=abs_tol,\n verbose=verbose)\n if verbose > 0:\n print('found ' + str(len(ridge_list)) + ' ridges\\n')\n found_org = False\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n E_r = r.E_r\n ar = r.ar\n br = r.br\n found = False\n for j in range(len(L)):\n rid_fac2 = L[j]\n A_r = rid_fac2.E_r\n if len(A_r) != len(E_r):\n continue\n t1 = np.sort(np.array(A_r))\n t2 = np.sort(np.array(E_r))\n if np.sum(np.abs(t1 - t2)) < abs_tol:\n found = True\n break\n if found:\n if verbose > 0:\n print('Ridge ' + str(E_r) +\n ' already visited, removing from L..')\n if rid_fac2 == rid_fac1:\n found_org = True\n L.remove(rid_fac2)\n else:\n if verbose > 0:\n print('Adding ridge-facet ' + str(E_adj) + ' ' + str(\n E_r) + '')\n L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj))\n if not found_org:\n print('Expected ridge ' + str(rid_fac1.E_r))\n print('but got ridges ')\n for rid in ridge_list:\n print(rid.E_r)\n raise Exception(\n 'esp: ridge did not return neighboring ridge as expected')\n G = np.vstack([G, a_adj])\n g = np.hstack([g, b_adj])\n E.append(E_adj)\n if trans:\n g = g + np.dot(G, xc0)\n for Ef in E:\n Ef = nonzerorows[Ef]\n return G, g, E\n\n\ndef shoot(C, D, b, maxiter=1000, abs_tol=1e-07):\n \"\"\"Return random equality set of P that projects on a projection facet.\n\n Returns randomly selected equality set E_0 of P such\n that the projection of the equality set is a facet of the projection.\n\n @param C: Matrix defining the polytope Cx+Dy <= b\n @param D: Matrix defining the polytope Cx+Dy <= b\n @param b: Vector defining the polytope Cx+Dy <= b\n\n @return: `E_0,af,bf`: Equality set and affine hull\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n iter = 0\n while True:\n if iter > maxiter:\n raise Exception('shoot: could not find starting equality set')\n gamma = np.random.rand(d) - 0.5\n c = np.zeros(k + 1)\n c[0] = -1\n G = np.hstack([np.array([np.dot(C, gamma)]).T, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n opt_sol = np.array(sol['x']).flatten()\n opt_dual = np.array(sol['z']).flatten()\n r_opt = opt_sol[0]\n y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten()\n x_opt = r_opt * gamma\n E_0 = np.nonzero(np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) <\n abs_tol)[0]\n DE0 = D[E_0, :]\n CE0 = C[E_0, :]\n b0 = b[E_0]\n if rank(np.dot(null_space(DE0.T).T, CE0)) == 1:\n break\n iter += 1\n af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol)\n if is_dual_degenerate(c, G, b, None, None, opt_sol, opt_dual, abs_tol=\n abs_tol):\n E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol)\n af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0])\n if len(bf) > 1:\n raise Exception('shoot: wrong dimension of affine hull')\n return E_0, af.flatten(), bf\n\n\ndef ridge(C, D, b, E, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Compute all ridges of a facet in the projection.\n\n Input:\n `C,D,b`: Original polytope data\n `E,af,bf`: Equality set and affine hull of a facet in the projection\n\n Output:\n `ridge_list`: A list containing all the ridges of\n the facet as Ridge objects\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n Er_list = []\n q = C.shape[0]\n E_c = np.setdiff1d(range(q), E)\n C_E = C[E, :]\n D_E = D[E, :]\n b_E = b[E, :]\n C_Ec = C[E_c, :]\n D_Ec = D[E_c, :]\n b_Ec = b[E_c]\n S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E)\n L = np.dot(D_Ec, null_space(D_E))\n t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E))\n if rank(np.hstack([C_E, D_E])) < k + 1:\n if verbose > 1:\n print('Doing recursive ESP call')\n u, s, v = linalg.svd(np.array([af]), full_matrices=1)\n sigma = s[0]\n v = v.T * u[0, 0]\n V_hat = v[:, [0]]\n V_tilde = v[:, range(1, v.shape[1])]\n Cnew = np.dot(S, V_tilde)\n Dnew = L\n bnew = t - np.dot(S, V_hat).flatten() * bf / sigma\n Anew = np.hstack([Cnew, Dnew])\n xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew)\n bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten()\n Gt, gt, E_t = esp(Cnew, Dnew, bnew, centered=True, abs_tol=abs_tol,\n verbose=0)\n if len(E_t[0]) == 0 or len(E_t[1]) == 0:\n raise Exception(\n 'ridge: recursive call did not return any equality sets')\n for i in range(len(E_t)):\n E_f = E_t[i]\n er = np.sort(np.hstack([E, E_c[E_f]]))\n ar = np.dot(Gt[i, :], V_tilde.T).flatten()\n br0 = gt[i].flatten()\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br0 - bf * np.dot(af.flatten(), ar.flatten())\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar * np.sign(br) / norm\n br = br * np.sign(br) / norm\n br = br + np.dot(Gt[i, :], xc2) / norm\n if len(ar) > d:\n raise Exception('ridge: wrong length of new ridge!')\n Er_list.append(Ridge(er, ar, br))\n else:\n if verbose > 0:\n print('Doing direct calculation of ridges')\n X = np.arange(S.shape[0])\n while len(X) > 0:\n i = X[0]\n X = np.setdiff1d(X, i)\n if np.linalg.norm(S[i, :]) < abs_tol:\n continue\n Si = S[i, :]\n Si = Si / np.linalg.norm(Si)\n if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol:\n test1 = null_space(np.vstack([np.hstack([af, bf]), np.\n hstack([S[i, :], t[i]])]), nonempty=True)\n test2 = np.hstack([S, np.array([t]).T])\n test = np.dot(test1.T, test2.T)\n test = np.sum(np.abs(test), 0)\n Q_i = np.nonzero(test > abs_tol)[0]\n Q = np.nonzero(test < abs_tol)[0]\n X = np.setdiff1d(X, Q)\n Sq = S[Q_i, :]\n tq = t[Q_i]\n c = np.zeros(d + 1)\n c[0] = 1\n Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq])\n Gdo = np.hstack([-1, np.zeros(Sq.shape[1])])\n G = np.vstack([Gup, Gdo])\n h = np.hstack([tq, 1])\n Al = np.zeros([2, 1])\n Ar = np.vstack([af, S[i, :]])\n A = np.hstack([Al, Ar])\n bb = np.hstack([bf, t[i]])\n sol = solvers._solve_lp_using_cvxopt(c, G, h, A=A, b=bb)\n if sol['status'] == 'optimal':\n tau = sol['x'][0]\n if tau < -abs_tol:\n ar = np.array([S[i, :]]).flatten()\n br = t[i].flatten()\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br - bf * np.dot(af.flatten(), ar.flatten())\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar / norm\n br = br / norm\n Er_list.append(Ridge(np.sort(np.hstack([E, E_c[Q]])\n ), ar, br))\n return Er_list\n\n\ndef adjacent(C, D, b, rid_fac, abs_tol=1e-07):\n \"\"\"Compute the (unique) adjacent facet.\n\n @param rid_fac: A Ridge_Facet object containing the parameters for\n a facet and one of its ridges.\n\n @return: (E_adj,a_adj,b_adj): The equality set and parameters for\n the adjacent facet such that::\n\n P_{E_adj} = P intersection {x | a_adj x = b_adj}\n \"\"\"\n E = rid_fac.E_0\n af = rid_fac.af\n bf = rid_fac.bf\n E_r = rid_fac.E_r\n ar = rid_fac.ar\n br = rid_fac.br\n d = C.shape[1]\n k = D.shape[1]\n C_er = C[E_r, :]\n D_er = D[E_r, :]\n b_er = b[E_r]\n c = -np.hstack([ar, np.zeros(k)])\n G = np.hstack([C_er, D_er])\n h = b_er\n A = np.hstack([af, np.zeros(k)])\n sol = solvers._solve_lp_using_cvxopt(c, G, h, A=A.T, b=bf * (1 - 0.01))\n if sol['status'] != 'optimal':\n print(G)\n print(h)\n print(af)\n print(bf)\n print(ar)\n print(br)\n print(np.dot(af, ar))\n data = {}\n data['C'] = C\n data['D'] = D\n data['b'] = b\n sio.savemat('matlabdata', data)\n with open('polytope.p', 'wb') as f:\n pickle.dump(data, f)\n raise Exception('adjacent: Lp returned status ' + str(sol['status']))\n opt_sol = np.array(sol['x']).flatten()\n dual_opt_sol = np.array(sol['z']).flatten()\n x_opt = opt_sol[range(d)]\n y_opt = opt_sol[range(d, d + k)]\n if is_dual_degenerate(c.flatten(), G, h, A, bf * (1 - 0.01), opt_sol,\n dual_opt_sol, abs_tol=abs_tol):\n E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0]\n a_temp, b_temp = proj_aff(C_er[E_temp, :], D_er[E_temp, :], b_er[\n E_temp], expected_dim=1, abs_tol=abs_tol)\n E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol)\n if len(E_adj) == 0:\n data = {}\n data['C'] = C\n data['D'] = D\n data['b'] = b\n data['Er'] = E_r + 1\n data['ar'] = ar\n data['br'] = br\n data['Ef'] = E + 1\n data['af'] = af\n data['bf'] = bf\n sio.savemat('matlabdata', data)\n raise Exception(\n 'adjacent: equality set computation returned empty set')\n else:\n r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol\n E_adj = np.nonzero(r)[0]\n C_eadj = C[E_adj, :]\n D_eadj = D[E_adj, :]\n b_eadj = b[E_adj]\n af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol)\n return E_adj, af_adj, bf_adj\n\n\ndef proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-07):\n \"\"\"Affine projection.\n\n Compute the set aff = {x | Ce x + De y = be} on the form\n aff = ({x | a x = b} intersection {Ce x + De y < be}).\n\n Input: Polytope parameters Ce, De and be\n\n Output: Constants a and b\n \"\"\"\n ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0]\n D = De[:, ind]\n if D.shape[1] == 0:\n a = Ce\n b = be\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception('proj_aff: wrong dimension calculated in 1')\n return a_n.flatten(), b_n\n sh = np.shape(D.T)\n m = sh[0]\n n = sh[1]\n nDe = null_space(D.T)\n a = np.dot(nDe.T, Ce)\n b = np.dot(nDe.T, be)\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception('proj_aff: wrong dimension calculated in 2')\n return a_n, b_n\n\n\n<mask token>\n\n\ndef unique_equalityset(C, D, b, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Return equality set E with the following property:\n\n P_E = {x | af x = bf} intersection P\n\n where P is the polytope C x + D y < b\n\n The inequalities have to be satisfied with equality everywhere on\n the face defined by af and bf.\n \"\"\"\n if D is not None:\n A = np.hstack([C, D])\n a = np.hstack([af, np.zeros(D.shape[1])])\n else:\n A = C\n a = af\n E = []\n for i in range(A.shape[0]):\n A_i = np.array(A[i, :])\n b_i = b[i]\n sol = solvers._solve_lp_using_cvxopt(c=A_i, G=A, h=b, A=a.T, b=bf)\n if sol['status'] != 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str\n (sol['status']))\n if np.abs(sol['primal objective'] - b_i) < abs_tol:\n E.append(i)\n if len(E) == 0:\n raise Exception('unique_equalityset: empty E')\n return np.array(E)\n\n\ndef unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-07):\n A = np.hstack([C, D])\n E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0]\n af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1)\n ineq = np.hstack([af, np.zeros(D.shape[1])])\n G = np.vstack([A, np.vstack([ineq, -ineq])])\n h = np.hstack([b, np.hstack([bf, -bf])])\n m = G.shape[0]\n n = G.shape[1]\n e = 0.001\n v = np.vstack([np.zeros([1, n]), np.eye(n)]).T\n v = v - np.array([np.mean(v, axis=1)]).T\n v = v * e\n ht = h + np.amin(-np.dot(G, v), axis=1)\n H1 = np.hstack([G, -np.eye(m)])\n H2 = np.hstack([G, np.zeros([m, m])])\n H3 = np.hstack([np.zeros([m, n]), -np.eye(m)])\n H = np.vstack([H1, np.vstack([H2, H3])])\n h = np.hstack([ht, np.hstack([h, np.zeros(m)])])\n c = np.hstack([np.zeros(n), np.ones(m)])\n sol = solvers.lpsolve(c, H, h, solver='glpk')\n if not sol['status'] == 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str(sol\n ['status']))\n opt_sol2 = np.array(sol['x']).flatten()\n x = opt_sol2[range(n)]\n s = opt_sol2[range(n, len(opt_sol2))]\n E = np.nonzero(s > abs_tol)[0]\n print(E)\n E = np.sort(E[np.nonzero(E < C.shape[0])])\n at, bt = proj_aff(C[E, :], D[E, :], b[E])\n if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol:\n raise Exception('unique_equalityset2: affine hulls not the same')\n return E\n\n\ndef cheby_center(C, D, b):\n \"\"\"Calculate Chebyshev center for the polytope `C x + D y <= b`.\n\n Input:\n `C, D, b`: Polytope parameters\n\n Output:\n `x_0, y_0`: The chebyshev centra\n `boolean`: True if a point could be found, False otherwise.\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n A = np.hstack([C, D])\n dim = np.shape(A)[1]\n c = -np.r_[np.zeros(dim), 1]\n norm2 = np.sqrt(np.sum(A * A, axis=1))\n G = np.c_[A, norm2]\n sol = solvers.lpsolve(c, G, h=b, solver='glpk')\n if sol['status'] == 'optimal':\n opt = np.array(sol['x'][0:-1]).flatten()\n return opt[range(d)], opt[range(d, d + k)], True\n else:\n return np.zeros(d), np.zeros(k), False\n\n\n<mask token>\n\n\ndef rank(A, eps=1e-15):\n u, s, vh = linalg.svd(A)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n return np.sum(s > tol)\n\n\ndef null_space(A, eps=1e-15, nonempty=False):\n \"\"\"Returns the null space N_A to matrix A such that A N_A = 0.\"\"\"\n u, s, v = linalg.svd(A, full_matrices=1)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n rank = np.sum(s > tol)\n N_space = v[range(rank, n), :].T\n if nonempty and len(N_space) == 0:\n N_space = v[range(np.amax(n - 1, 1), n), :]\n return N_space\n", "step-4": "<mask token>\n\n\nclass Ridge(object):\n \"\"\"A ridge.\n\n Attributes:\n\n - `E_r`: Equality set of a facet\n\n - `ar, br`: Affine hull of the facet\n s.t. P_{E_0} = P intersection {x | ar x = br}.\n \"\"\"\n\n def __init__(self, E, a, b):\n self.E_r = E\n self.ar = a\n self.br = b\n\n\nclass Ridge_Facet(object):\n \"\"\"A ridge facet.\n\n Attributes:\n\n - `E_r`: Equality set of a ridge\n\n - `ar,br`: Affine hull of the ridge s.t.\n P_{E_f} intersection {x | ar x = br}\n defines the ridge, where E_f is the\n equality set of the facet.\n\n - `E_0`: Equality set of a facet\n\n - `af,bf`: Affine hull of the facet.\n \"\"\"\n\n def __init__(self, E_r, ar, br, E_0, af, bf):\n self.E_r = E_r\n self.ar = ar\n self.br = br\n self.E_0 = E_0\n self.af = af\n self.bf = bf\n\n\ndef esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0):\n \"\"\"Project polytope [C D] x <= b onto C coordinates.\n\n Projects the polytope [C D] x <= b onto the\n coordinates that correspond to C. The projection of the polytope\n P = {[C D]x <= b} where C is M x D and D is M x K is\n defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b}\n \"\"\"\n if 'glpk' not in solvers.installed_solvers:\n raise Exception(\n 'projection_esp error: Equality set projection requires `cvxopt.glpk` to run.'\n )\n nonzerorows = np.nonzero(np.sum(np.abs(np.hstack([CC, DD])), axis=1) >\n abs_tol)[0]\n nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0]\n nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0]\n C = CC[nonzerorows, :].copy()\n D = DD[nonzerorows, :].copy()\n C = C[:, nonzeroxcols]\n D = D[:, nonzeroycols]\n b = bb[nonzerorows].copy()\n if not centered:\n xc0, yc0, trans = cheby_center(C, D, b)\n if trans:\n b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten()\n else:\n b = b\n else:\n trans = False\n d = C.shape[1]\n k = D.shape[1]\n if verbose > 0:\n print('Projecting from dim ' + str(d + k) + ' to ' + str(d))\n if k == 0:\n return C, bb, []\n if d == 1:\n c = np.zeros(d + k)\n c[0] = 1\n G = np.hstack([C, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n if sol['status'] != 'optimal':\n raise Exception(\n 'esp: projection to 1D is not full-dimensional, LP returned status '\n + str(sol['status']))\n min_sol = np.array(sol['x']).flatten()\n min_dual_sol = np.array(sol['z']).flatten()\n sol = solvers.lpsolve(-c, G, b, solver='glpk')\n if sol['status'] != 'optimal':\n raise Exception(\n 'esp: projection to 1D is not full-dimensional, ' +\n 'LP returned status ' + str(sol['status']))\n max_sol = np.array(sol['x']).flatten()\n max_dual_sol = np.array(sol['z']).flatten()\n x_min = min_sol[0]\n x_max = max_sol[0]\n y_min = min_sol[range(1, k + 1)]\n y_max = max_sol[range(1, k + 1)]\n if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol):\n E_min = unique_equalityset(C, D, b, np.array([1.0]), x_min + \n abs_tol / 3, abs_tol=abs_tol)\n else:\n E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0]\n if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol):\n E_max = unique_equalityset(C, D, b, np.array([1.0]), x_max - \n abs_tol / 3, abs_tol=abs_tol)\n else:\n E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0]\n G = np.array([[1.0], [-1.0]])\n g = np.array([x_max, -x_min])\n if trans:\n g = g + np.dot(G, xc0)\n E_max = nonzerorows[E_max]\n E_min = nonzerorows[E_min]\n if verbose > 0:\n print('Returning projection from dim ' + str(d + k) +\n ' to dim 1 \\n')\n return G, g, [E_max, E_min]\n E = []\n L = []\n E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol)\n ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose)\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf))\n G = af.T\n g = bf\n if verbose > 0:\n print('\\nStarting eq set ' + str(E_0) + '\\nStarting ridges ')\n for rr in L:\n print(str(rr.E_r))\n E.append(E_0)\n while len(L) > 0:\n rid_fac1 = L[0]\n if verbose > 0:\n print('\\nLooking for neighbors to ' + str(rid_fac1.E_0) +\n ' and ' + str(rid_fac1.E_r) + ' ..')\n E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol)\n if verbose > 0:\n print('found neighbor ' + str(E_adj) +\n '. \\n\\nLooking for ridges of neighbor..')\n ridge_list = ridge(C, D, b, E_adj, a_adj, b_adj, abs_tol=abs_tol,\n verbose=verbose)\n if verbose > 0:\n print('found ' + str(len(ridge_list)) + ' ridges\\n')\n found_org = False\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n E_r = r.E_r\n ar = r.ar\n br = r.br\n found = False\n for j in range(len(L)):\n rid_fac2 = L[j]\n A_r = rid_fac2.E_r\n if len(A_r) != len(E_r):\n continue\n t1 = np.sort(np.array(A_r))\n t2 = np.sort(np.array(E_r))\n if np.sum(np.abs(t1 - t2)) < abs_tol:\n found = True\n break\n if found:\n if verbose > 0:\n print('Ridge ' + str(E_r) +\n ' already visited, removing from L..')\n if rid_fac2 == rid_fac1:\n found_org = True\n L.remove(rid_fac2)\n else:\n if verbose > 0:\n print('Adding ridge-facet ' + str(E_adj) + ' ' + str(\n E_r) + '')\n L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj))\n if not found_org:\n print('Expected ridge ' + str(rid_fac1.E_r))\n print('but got ridges ')\n for rid in ridge_list:\n print(rid.E_r)\n raise Exception(\n 'esp: ridge did not return neighboring ridge as expected')\n G = np.vstack([G, a_adj])\n g = np.hstack([g, b_adj])\n E.append(E_adj)\n if trans:\n g = g + np.dot(G, xc0)\n for Ef in E:\n Ef = nonzerorows[Ef]\n return G, g, E\n\n\ndef shoot(C, D, b, maxiter=1000, abs_tol=1e-07):\n \"\"\"Return random equality set of P that projects on a projection facet.\n\n Returns randomly selected equality set E_0 of P such\n that the projection of the equality set is a facet of the projection.\n\n @param C: Matrix defining the polytope Cx+Dy <= b\n @param D: Matrix defining the polytope Cx+Dy <= b\n @param b: Vector defining the polytope Cx+Dy <= b\n\n @return: `E_0,af,bf`: Equality set and affine hull\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n iter = 0\n while True:\n if iter > maxiter:\n raise Exception('shoot: could not find starting equality set')\n gamma = np.random.rand(d) - 0.5\n c = np.zeros(k + 1)\n c[0] = -1\n G = np.hstack([np.array([np.dot(C, gamma)]).T, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n opt_sol = np.array(sol['x']).flatten()\n opt_dual = np.array(sol['z']).flatten()\n r_opt = opt_sol[0]\n y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten()\n x_opt = r_opt * gamma\n E_0 = np.nonzero(np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) <\n abs_tol)[0]\n DE0 = D[E_0, :]\n CE0 = C[E_0, :]\n b0 = b[E_0]\n if rank(np.dot(null_space(DE0.T).T, CE0)) == 1:\n break\n iter += 1\n af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol)\n if is_dual_degenerate(c, G, b, None, None, opt_sol, opt_dual, abs_tol=\n abs_tol):\n E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol)\n af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0])\n if len(bf) > 1:\n raise Exception('shoot: wrong dimension of affine hull')\n return E_0, af.flatten(), bf\n\n\ndef ridge(C, D, b, E, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Compute all ridges of a facet in the projection.\n\n Input:\n `C,D,b`: Original polytope data\n `E,af,bf`: Equality set and affine hull of a facet in the projection\n\n Output:\n `ridge_list`: A list containing all the ridges of\n the facet as Ridge objects\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n Er_list = []\n q = C.shape[0]\n E_c = np.setdiff1d(range(q), E)\n C_E = C[E, :]\n D_E = D[E, :]\n b_E = b[E, :]\n C_Ec = C[E_c, :]\n D_Ec = D[E_c, :]\n b_Ec = b[E_c]\n S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E)\n L = np.dot(D_Ec, null_space(D_E))\n t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E))\n if rank(np.hstack([C_E, D_E])) < k + 1:\n if verbose > 1:\n print('Doing recursive ESP call')\n u, s, v = linalg.svd(np.array([af]), full_matrices=1)\n sigma = s[0]\n v = v.T * u[0, 0]\n V_hat = v[:, [0]]\n V_tilde = v[:, range(1, v.shape[1])]\n Cnew = np.dot(S, V_tilde)\n Dnew = L\n bnew = t - np.dot(S, V_hat).flatten() * bf / sigma\n Anew = np.hstack([Cnew, Dnew])\n xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew)\n bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten()\n Gt, gt, E_t = esp(Cnew, Dnew, bnew, centered=True, abs_tol=abs_tol,\n verbose=0)\n if len(E_t[0]) == 0 or len(E_t[1]) == 0:\n raise Exception(\n 'ridge: recursive call did not return any equality sets')\n for i in range(len(E_t)):\n E_f = E_t[i]\n er = np.sort(np.hstack([E, E_c[E_f]]))\n ar = np.dot(Gt[i, :], V_tilde.T).flatten()\n br0 = gt[i].flatten()\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br0 - bf * np.dot(af.flatten(), ar.flatten())\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar * np.sign(br) / norm\n br = br * np.sign(br) / norm\n br = br + np.dot(Gt[i, :], xc2) / norm\n if len(ar) > d:\n raise Exception('ridge: wrong length of new ridge!')\n Er_list.append(Ridge(er, ar, br))\n else:\n if verbose > 0:\n print('Doing direct calculation of ridges')\n X = np.arange(S.shape[0])\n while len(X) > 0:\n i = X[0]\n X = np.setdiff1d(X, i)\n if np.linalg.norm(S[i, :]) < abs_tol:\n continue\n Si = S[i, :]\n Si = Si / np.linalg.norm(Si)\n if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol:\n test1 = null_space(np.vstack([np.hstack([af, bf]), np.\n hstack([S[i, :], t[i]])]), nonempty=True)\n test2 = np.hstack([S, np.array([t]).T])\n test = np.dot(test1.T, test2.T)\n test = np.sum(np.abs(test), 0)\n Q_i = np.nonzero(test > abs_tol)[0]\n Q = np.nonzero(test < abs_tol)[0]\n X = np.setdiff1d(X, Q)\n Sq = S[Q_i, :]\n tq = t[Q_i]\n c = np.zeros(d + 1)\n c[0] = 1\n Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq])\n Gdo = np.hstack([-1, np.zeros(Sq.shape[1])])\n G = np.vstack([Gup, Gdo])\n h = np.hstack([tq, 1])\n Al = np.zeros([2, 1])\n Ar = np.vstack([af, S[i, :]])\n A = np.hstack([Al, Ar])\n bb = np.hstack([bf, t[i]])\n sol = solvers._solve_lp_using_cvxopt(c, G, h, A=A, b=bb)\n if sol['status'] == 'optimal':\n tau = sol['x'][0]\n if tau < -abs_tol:\n ar = np.array([S[i, :]]).flatten()\n br = t[i].flatten()\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br - bf * np.dot(af.flatten(), ar.flatten())\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar / norm\n br = br / norm\n Er_list.append(Ridge(np.sort(np.hstack([E, E_c[Q]])\n ), ar, br))\n return Er_list\n\n\ndef adjacent(C, D, b, rid_fac, abs_tol=1e-07):\n \"\"\"Compute the (unique) adjacent facet.\n\n @param rid_fac: A Ridge_Facet object containing the parameters for\n a facet and one of its ridges.\n\n @return: (E_adj,a_adj,b_adj): The equality set and parameters for\n the adjacent facet such that::\n\n P_{E_adj} = P intersection {x | a_adj x = b_adj}\n \"\"\"\n E = rid_fac.E_0\n af = rid_fac.af\n bf = rid_fac.bf\n E_r = rid_fac.E_r\n ar = rid_fac.ar\n br = rid_fac.br\n d = C.shape[1]\n k = D.shape[1]\n C_er = C[E_r, :]\n D_er = D[E_r, :]\n b_er = b[E_r]\n c = -np.hstack([ar, np.zeros(k)])\n G = np.hstack([C_er, D_er])\n h = b_er\n A = np.hstack([af, np.zeros(k)])\n sol = solvers._solve_lp_using_cvxopt(c, G, h, A=A.T, b=bf * (1 - 0.01))\n if sol['status'] != 'optimal':\n print(G)\n print(h)\n print(af)\n print(bf)\n print(ar)\n print(br)\n print(np.dot(af, ar))\n data = {}\n data['C'] = C\n data['D'] = D\n data['b'] = b\n sio.savemat('matlabdata', data)\n with open('polytope.p', 'wb') as f:\n pickle.dump(data, f)\n raise Exception('adjacent: Lp returned status ' + str(sol['status']))\n opt_sol = np.array(sol['x']).flatten()\n dual_opt_sol = np.array(sol['z']).flatten()\n x_opt = opt_sol[range(d)]\n y_opt = opt_sol[range(d, d + k)]\n if is_dual_degenerate(c.flatten(), G, h, A, bf * (1 - 0.01), opt_sol,\n dual_opt_sol, abs_tol=abs_tol):\n E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0]\n a_temp, b_temp = proj_aff(C_er[E_temp, :], D_er[E_temp, :], b_er[\n E_temp], expected_dim=1, abs_tol=abs_tol)\n E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol)\n if len(E_adj) == 0:\n data = {}\n data['C'] = C\n data['D'] = D\n data['b'] = b\n data['Er'] = E_r + 1\n data['ar'] = ar\n data['br'] = br\n data['Ef'] = E + 1\n data['af'] = af\n data['bf'] = bf\n sio.savemat('matlabdata', data)\n raise Exception(\n 'adjacent: equality set computation returned empty set')\n else:\n r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol\n E_adj = np.nonzero(r)[0]\n C_eadj = C[E_adj, :]\n D_eadj = D[E_adj, :]\n b_eadj = b[E_adj]\n af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol)\n return E_adj, af_adj, bf_adj\n\n\ndef proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-07):\n \"\"\"Affine projection.\n\n Compute the set aff = {x | Ce x + De y = be} on the form\n aff = ({x | a x = b} intersection {Ce x + De y < be}).\n\n Input: Polytope parameters Ce, De and be\n\n Output: Constants a and b\n \"\"\"\n ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0]\n D = De[:, ind]\n if D.shape[1] == 0:\n a = Ce\n b = be\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception('proj_aff: wrong dimension calculated in 1')\n return a_n.flatten(), b_n\n sh = np.shape(D.T)\n m = sh[0]\n n = sh[1]\n nDe = null_space(D.T)\n a = np.dot(nDe.T, Ce)\n b = np.dot(nDe.T, be)\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception('proj_aff: wrong dimension calculated in 2')\n return a_n, b_n\n\n\ndef is_dual_degenerate(c, G, h, A, b, x_opt, z_opt, abs_tol=1e-07):\n \"\"\"Return `True` if pair of dual problems is dual degenerate.\n\n Checks if the pair of dual problems::\n\n (P): min c'x (D): max h'z + b'y\n s.t Gx <= h s.t G'z + A'y = c\n Ax = b z <= 0\n\n is dual degenerate, i.e. if (P) has several optimal solutions.\n Optimal solutions x* and z* are required.\n\n Input:\n\n `G,h,A,b`: Parameters of (P)\n `x_opt`: One optimal solution to (P)\n `z_opt`: The optimal solution to (D) corresponding to\n _inequality constraints_ in (P)\n\n Output:\n `dual`: Boolean indicating whether (P) has many optimal solutions.\n \"\"\"\n D = -G\n d = -h.flatten()\n mu = -z_opt.flatten()\n I = np.nonzero(np.abs(np.dot(D, x_opt).flatten() - d) < abs_tol)[0]\n J = np.nonzero(mu > abs_tol)[0]\n i = mu < abs_tol\n i = i.astype(int)\n j = np.zeros(len(mu), dtype=int)\n j[I] = 1\n L = np.nonzero(i + j == 2)[0]\n nI = len(I)\n nJ = len(J)\n nL = len(L)\n DI = D[I, :]\n DJ = D[J, :]\n DL = D[L, :]\n dual = 0\n if A is None:\n test = DI\n else:\n test = np.vstack([DI, A])\n if rank(test) < np.amin(DI.shape):\n return True\n elif len(L) > 0:\n if A is None:\n Ae = DJ\n else:\n Ae = np.vstack([DJ, A])\n be = np.zeros(Ae.shape[0])\n Ai = -DL\n bi = np.zeros(nL)\n sol = solvers._solve_lp_using_cvxopt(c=-np.sum(DL, axis=0), G=Ai, h\n =bi, A=Ae, b=be)\n if sol['status'] == 'dual infeasible':\n return True\n if sol['primal objective'] > abs_tol:\n return True\n return False\n\n\ndef unique_equalityset(C, D, b, af, bf, abs_tol=1e-07, verbose=0):\n \"\"\"Return equality set E with the following property:\n\n P_E = {x | af x = bf} intersection P\n\n where P is the polytope C x + D y < b\n\n The inequalities have to be satisfied with equality everywhere on\n the face defined by af and bf.\n \"\"\"\n if D is not None:\n A = np.hstack([C, D])\n a = np.hstack([af, np.zeros(D.shape[1])])\n else:\n A = C\n a = af\n E = []\n for i in range(A.shape[0]):\n A_i = np.array(A[i, :])\n b_i = b[i]\n sol = solvers._solve_lp_using_cvxopt(c=A_i, G=A, h=b, A=a.T, b=bf)\n if sol['status'] != 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str\n (sol['status']))\n if np.abs(sol['primal objective'] - b_i) < abs_tol:\n E.append(i)\n if len(E) == 0:\n raise Exception('unique_equalityset: empty E')\n return np.array(E)\n\n\ndef unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-07):\n A = np.hstack([C, D])\n E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0]\n af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1)\n ineq = np.hstack([af, np.zeros(D.shape[1])])\n G = np.vstack([A, np.vstack([ineq, -ineq])])\n h = np.hstack([b, np.hstack([bf, -bf])])\n m = G.shape[0]\n n = G.shape[1]\n e = 0.001\n v = np.vstack([np.zeros([1, n]), np.eye(n)]).T\n v = v - np.array([np.mean(v, axis=1)]).T\n v = v * e\n ht = h + np.amin(-np.dot(G, v), axis=1)\n H1 = np.hstack([G, -np.eye(m)])\n H2 = np.hstack([G, np.zeros([m, m])])\n H3 = np.hstack([np.zeros([m, n]), -np.eye(m)])\n H = np.vstack([H1, np.vstack([H2, H3])])\n h = np.hstack([ht, np.hstack([h, np.zeros(m)])])\n c = np.hstack([np.zeros(n), np.ones(m)])\n sol = solvers.lpsolve(c, H, h, solver='glpk')\n if not sol['status'] == 'optimal':\n raise Exception('unique_equalityset: LP returned status ' + str(sol\n ['status']))\n opt_sol2 = np.array(sol['x']).flatten()\n x = opt_sol2[range(n)]\n s = opt_sol2[range(n, len(opt_sol2))]\n E = np.nonzero(s > abs_tol)[0]\n print(E)\n E = np.sort(E[np.nonzero(E < C.shape[0])])\n at, bt = proj_aff(C[E, :], D[E, :], b[E])\n if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol:\n raise Exception('unique_equalityset2: affine hulls not the same')\n return E\n\n\ndef cheby_center(C, D, b):\n \"\"\"Calculate Chebyshev center for the polytope `C x + D y <= b`.\n\n Input:\n `C, D, b`: Polytope parameters\n\n Output:\n `x_0, y_0`: The chebyshev centra\n `boolean`: True if a point could be found, False otherwise.\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n A = np.hstack([C, D])\n dim = np.shape(A)[1]\n c = -np.r_[np.zeros(dim), 1]\n norm2 = np.sqrt(np.sum(A * A, axis=1))\n G = np.c_[A, norm2]\n sol = solvers.lpsolve(c, G, h=b, solver='glpk')\n if sol['status'] == 'optimal':\n opt = np.array(sol['x'][0:-1]).flatten()\n return opt[range(d)], opt[range(d, d + k)], True\n else:\n return np.zeros(d), np.zeros(k), False\n\n\ndef normalize(AA, bb, abs_tol=1e-07):\n \"\"\"Normalize `A x = b` such that `A'A = 1` and `b > 0`.\n\n Also, remove duplicate lines.\n \"\"\"\n if AA.size == 0:\n return AA, bb\n dim = AA.size / bb.size\n A = AA.copy().reshape(bb.size, dim)\n b = bb.copy().reshape(bb.size, 1)\n keepind = np.nonzero(np.sum(np.abs(np.hstack([A, b])), axis=1) > abs_tol)[0\n ]\n A = A[keepind, :]\n b = b[keepind]\n anorm = np.sqrt(np.sum(A * A, axis=1))\n for i in range(len(anorm)):\n A[i, :] = A[i, :] * np.sign(b[i, 0]) / anorm[i]\n b[i, 0] = np.sign(b[i, 0]) * b[i, 0] / anorm[i]\n keep_row = []\n for i in range(len(anorm)):\n unique = True\n for j in range(i + 1, len(anorm)):\n test = np.sum(np.abs(A[i, :] - A[j, :])) + np.abs(b[i, 0] - b[j, 0]\n )\n if test < abs_tol:\n unique = False\n break\n if unique:\n keep_row.append(i)\n A_n = A[keep_row, :]\n b_n = b[keep_row, 0]\n if A_n.size == dim:\n A_n = A_n.flatten()\n return A_n, b_n.flatten()\n\n\ndef rank(A, eps=1e-15):\n u, s, vh = linalg.svd(A)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n return np.sum(s > tol)\n\n\ndef null_space(A, eps=1e-15, nonempty=False):\n \"\"\"Returns the null space N_A to matrix A such that A N_A = 0.\"\"\"\n u, s, v = linalg.svd(A, full_matrices=1)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n rank = np.sum(s > tol)\n N_space = v[range(rank, n), :].T\n if nonempty and len(N_space) == 0:\n N_space = v[range(np.amax(n - 1, 1), n), :]\n return N_space\n", "step-5": "# Copyright (c) 2011-2014 by California Institute of Technology\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the California Institute of Technology nor\n# the names of its contributors may be used to endorse or promote\n# products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH\n# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF\n# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\n# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\nr\"\"\"Equality Set Projection (ESP).\n\nNon-vertex polytope projection method from\n- https://web.archive.org/web/20150103142532/\n https://www-control.eng.cam.ac.uk/~cnj22/research/projection.html\n- https://infoscience.epfl.ch/record/169768\n\nVery unstable, can not handle complex polytopes.\n\n\nReference\n=========\n\n\\cite{Jones04}\n\"\"\"\n# Created by P. Nilsson, 8/2/11\nimport pickle\n\nimport numpy as np\nfrom scipy import io as sio\nfrom scipy import linalg\n\nfrom polytope import solvers\n\n\nclass Ridge(object):\n \"\"\"A ridge.\n\n Attributes:\n\n - `E_r`: Equality set of a facet\n\n - `ar, br`: Affine hull of the facet\n s.t. P_{E_0} = P intersection {x | ar x = br}.\n \"\"\"\n\n def __init__(self, E, a, b):\n self.E_r = E\n self.ar = a\n self.br = b\n\n\nclass Ridge_Facet(object):\n \"\"\"A ridge facet.\n\n Attributes:\n\n - `E_r`: Equality set of a ridge\n\n - `ar,br`: Affine hull of the ridge s.t.\n P_{E_f} intersection {x | ar x = br}\n defines the ridge, where E_f is the\n equality set of the facet.\n\n - `E_0`: Equality set of a facet\n\n - `af,bf`: Affine hull of the facet.\n \"\"\"\n\n def __init__(self, E_r, ar, br, E_0, af, bf):\n self.E_r = E_r\n self.ar = ar\n self.br = br\n self.E_0 = E_0\n self.af = af\n self.bf = bf\n\n\ndef esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0):\n \"\"\"Project polytope [C D] x <= b onto C coordinates.\n\n Projects the polytope [C D] x <= b onto the\n coordinates that correspond to C. The projection of the polytope\n P = {[C D]x <= b} where C is M x D and D is M x K is\n defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b}\n \"\"\"\n if 'glpk' not in solvers.installed_solvers:\n raise Exception(\n \"projection_esp error:\"\n \" Equality set projection requires `cvxopt.glpk` to run.\")\n # Remove zero columns and rows\n nonzerorows = np.nonzero(\n np.sum(np.abs(np.hstack([CC, DD])), axis=1) > abs_tol)[0]\n nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0]\n nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0]\n C = CC[nonzerorows, :].copy()\n D = DD[nonzerorows, :].copy()\n C = C[:, nonzeroxcols]\n D = D[:, nonzeroycols]\n b = bb[nonzerorows].copy()\n # Make sure origo is inside polytope\n if not centered:\n xc0, yc0, trans = cheby_center(C, D, b)\n if trans:\n b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten()\n else:\n b = b\n else:\n trans = False\n d = C.shape[1]\n k = D.shape[1]\n if verbose > 0:\n print(\"Projecting from dim \" + str(d + k) + \" to \" + str(d))\n if k == 0:\n # Not projecting\n return C, bb, []\n if d == 1:\n # Projection to 1D\n c = np.zeros(d + k)\n c[0] = 1\n G = np.hstack([C, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n if sol['status'] != \"optimal\":\n raise Exception(\n \"esp: projection to 1D is not full-dimensional, \"\n \"LP returned status \" + str(sol['status']))\n min_sol = np.array(sol['x']).flatten()\n min_dual_sol = np.array(sol['z']).flatten()\n sol = solvers.lpsolve(-c, G, b, solver='glpk')\n if sol['status'] != \"optimal\":\n raise Exception(\n \"esp: projection to 1D is not full-dimensional, \" +\n \"LP returned status \" + str(sol['status']))\n max_sol = np.array(sol['x']).flatten()\n max_dual_sol = np.array(sol['z']).flatten()\n # min, max\n x_min = min_sol[0]\n x_max = max_sol[0]\n y_min = min_sol[range(1, k + 1)]\n y_max = max_sol[range(1, k + 1)]\n if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol):\n # Min case, relax constraint a little to avoid infeasibility\n E_min = unique_equalityset(\n C, D, b, np.array([1.]), x_min + abs_tol / 3, abs_tol=abs_tol)\n else:\n E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0]\n if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol):\n # Max case, relax constraint a little to avoid infeasibility\n E_max = unique_equalityset(\n C, D, b, np.array([1.]), x_max - abs_tol / 3, abs_tol=abs_tol)\n else:\n E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0]\n G = np.array([[1.], [-1.]])\n g = np.array([x_max, -x_min])\n # Relocate\n if trans:\n g = g + np.dot(G, xc0)\n # Return zero cols/rows\n E_max = nonzerorows[E_max]\n E_min = nonzerorows[E_min]\n if verbose > 0:\n print(\n \"Returning projection from dim \" +\n str(d + k) + \" to dim 1 \\n\")\n return G, g, [E_max, E_min]\n E = []\n L = []\n E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol)\n ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose)\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf))\n G = af.T\n g = bf\n if verbose > 0:\n print(\"\\nStarting eq set \" + str(E_0) + \"\\nStarting ridges \")\n for rr in L:\n print(str(rr.E_r))\n E.append(E_0)\n while len(L) > 0:\n rid_fac1 = L[0]\n if verbose > 0:\n print(\"\\nLooking for neighbors to \" + str(rid_fac1.E_0) +\n \" and \" + str(rid_fac1.E_r) + \" ..\")\n E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol)\n if verbose > 0:\n print(\"found neighbor \" + str(E_adj) +\n \". \\n\\nLooking for ridges of neighbor..\")\n ridge_list = ridge(\n C, D, b, E_adj, a_adj, b_adj,\n abs_tol=abs_tol, verbose=verbose)\n if verbose > 0:\n print(\"found \" + str(len(ridge_list)) + \" ridges\\n\")\n found_org = False\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n E_r = r.E_r\n ar = r.ar\n br = r.br\n found = False\n for j in range(len(L)):\n rid_fac2 = L[j]\n A_r = rid_fac2.E_r\n if len(A_r) != len(E_r):\n continue\n t1 = np.sort(np.array(A_r))\n t2 = np.sort(np.array(E_r))\n if np.sum(np.abs(t1 - t2)) < abs_tol:\n found = True\n break\n if found:\n if verbose > 0:\n print(\"Ridge \" + str(E_r) +\n \" already visited, removing from L..\")\n if rid_fac2 == rid_fac1:\n found_org = True\n L.remove(rid_fac2)\n else:\n if verbose > 0:\n print(\"Adding ridge-facet \" + str(E_adj) +\n \" \" + str(E_r) + \"\")\n L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj))\n if not found_org:\n print(\"Expected ridge \" + str(rid_fac1.E_r))\n print(\"but got ridges \")\n for rid in ridge_list:\n print(rid.E_r)\n raise Exception(\n \"esp: ridge did not return neighboring ridge as expected\")\n G = np.vstack([G, a_adj])\n g = np.hstack([g, b_adj])\n E.append(E_adj)\n # Restore center\n if trans:\n g = g + np.dot(G, xc0)\n # Return zero rows\n for Ef in E:\n Ef = nonzerorows[Ef]\n return G, g, E\n\n\ndef shoot(C, D, b, maxiter=1000, abs_tol=1e-7):\n \"\"\"Return random equality set of P that projects on a projection facet.\n\n Returns randomly selected equality set E_0 of P such\n that the projection of the equality set is a facet of the projection.\n\n @param C: Matrix defining the polytope Cx+Dy <= b\n @param D: Matrix defining the polytope Cx+Dy <= b\n @param b: Vector defining the polytope Cx+Dy <= b\n\n @return: `E_0,af,bf`: Equality set and affine hull\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n iter = 0\n while True:\n if iter > maxiter:\n raise Exception(\n \"shoot: could not find starting equality set\")\n gamma = np.random.rand(d) - 0.5\n c = np.zeros(k + 1)\n c[0] = -1\n G = np.hstack([np.array([np.dot(C, gamma)]).T, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n opt_sol = np.array(sol['x']).flatten()\n opt_dual = np.array(sol['z']).flatten()\n r_opt = opt_sol[0]\n y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten()\n x_opt = r_opt * gamma\n E_0 = np.nonzero(\n np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0]\n DE0 = D[E_0, :]\n CE0 = C[E_0, :]\n b0 = b[E_0]\n if rank(np.dot(null_space(DE0.T).T, CE0)) == 1:\n break\n iter += 1\n af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol)\n if is_dual_degenerate(c, G, b, None, None, opt_sol,\n opt_dual, abs_tol=abs_tol):\n E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol)\n af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0])\n if len(bf) > 1:\n raise Exception(\"shoot: wrong dimension of affine hull\")\n return E_0, af.flatten(), bf\n\n\ndef ridge(C, D, b, E, af, bf, abs_tol=1e-7, verbose=0):\n \"\"\"Compute all ridges of a facet in the projection.\n\n Input:\n `C,D,b`: Original polytope data\n `E,af,bf`: Equality set and affine hull of a facet in the projection\n\n Output:\n `ridge_list`: A list containing all the ridges of\n the facet as Ridge objects\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n Er_list = []\n q = C.shape[0]\n E_c = np.setdiff1d(range(q), E)\n # E slices\n C_E = C[E, :]\n D_E = D[E, :]\n b_E = b[E, :]\n # E_c slices\n C_Ec = C[E_c, :]\n D_Ec = D[E_c, :]\n b_Ec = b[E_c]\n # dots\n S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E)\n L = np.dot(D_Ec, null_space(D_E))\n t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E))\n if rank(np.hstack([C_E, D_E])) < k + 1:\n if verbose > 1:\n print(\"Doing recursive ESP call\")\n u, s, v = linalg.svd(np.array([af]), full_matrices=1)\n sigma = s[0]\n v = v.T * u[0, 0] # Correct sign\n V_hat = v[:, [0]]\n V_tilde = v[:, range(1, v.shape[1])]\n Cnew = np.dot(S, V_tilde)\n Dnew = L\n bnew = t - np.dot(S, V_hat).flatten() * bf / sigma\n Anew = np.hstack([Cnew, Dnew])\n xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew)\n bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten()\n Gt, gt, E_t = esp(\n Cnew, Dnew, bnew,\n centered=True, abs_tol=abs_tol, verbose=0)\n if (len(E_t[0]) == 0) or (len(E_t[1]) == 0):\n raise Exception(\n \"ridge: recursive call did not return any equality sets\")\n for i in range(len(E_t)):\n E_f = E_t[i]\n er = np.sort(np.hstack([E, E_c[E_f]]))\n ar = np.dot(Gt[i, :], V_tilde.T).flatten()\n br0 = gt[i].flatten()\n # Make orthogonal to facet\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br0 - bf * np.dot(af.flatten(), ar.flatten())\n # Normalize and make ridge equation point outwards\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar * np.sign(br) / norm\n br = br * np.sign(br) / norm\n # Restore center\n br = br + np.dot(Gt[i, :], xc2) / norm\n if len(ar) > d:\n raise Exception(\"ridge: wrong length of new ridge!\")\n Er_list.append(Ridge(er, ar, br))\n else:\n if verbose > 0:\n print(\"Doing direct calculation of ridges\")\n X = np.arange(S.shape[0])\n while len(X) > 0:\n i = X[0]\n X = np.setdiff1d(X, i)\n if np.linalg.norm(S[i, :]) < abs_tol:\n continue\n Si = S[i, :]\n Si = Si / np.linalg.norm(Si)\n if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol:\n test1 = null_space(\n np.vstack([\n np.hstack([af, bf]),\n np.hstack([S[i, :], t[i]])]),\n nonempty=True)\n test2 = np.hstack([S, np.array([t]).T])\n test = np.dot(test1.T, test2.T)\n test = np.sum(np.abs(test), 0)\n Q_i = np.nonzero(test > abs_tol)[0]\n Q = np.nonzero(test < abs_tol)[0]\n X = np.setdiff1d(X, Q)\n # Have Q_i\n Sq = S[Q_i, :]\n tq = t[Q_i]\n c = np.zeros(d + 1)\n c[0] = 1\n Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq])\n Gdo = np.hstack([-1, np.zeros(Sq.shape[1])])\n G = np.vstack([Gup, Gdo])\n h = np.hstack([tq, 1])\n Al = np.zeros([2, 1])\n Ar = np.vstack([af, S[i, :]])\n A = np.hstack([Al, Ar])\n bb = np.hstack([bf, t[i]])\n sol = solvers._solve_lp_using_cvxopt(\n c, G, h, A=A, b=bb)\n if sol['status'] == 'optimal':\n tau = sol['x'][0]\n if tau < -abs_tol:\n ar = np.array([S[i, :]]).flatten()\n br = t[i].flatten()\n # Make orthogonal to facet\n ar = ar - af * np.dot(af.flatten(), ar.flatten())\n br = br - bf * np.dot(af.flatten(), ar.flatten())\n # Normalize and make ridge equation point outwards\n norm = np.sqrt(np.sum(ar * ar))\n ar = ar / norm\n br = br / norm\n # accumulate\n Er_list.append(\n Ridge(np.sort(np.hstack([E, E_c[Q]])), ar, br))\n return Er_list\n\n\ndef adjacent(C, D, b, rid_fac, abs_tol=1e-7):\n \"\"\"Compute the (unique) adjacent facet.\n\n @param rid_fac: A Ridge_Facet object containing the parameters for\n a facet and one of its ridges.\n\n @return: (E_adj,a_adj,b_adj): The equality set and parameters for\n the adjacent facet such that::\n\n P_{E_adj} = P intersection {x | a_adj x = b_adj}\n \"\"\"\n E = rid_fac.E_0\n af = rid_fac.af\n bf = rid_fac.bf\n #\n E_r = rid_fac.E_r\n ar = rid_fac.ar\n br = rid_fac.br\n # shape\n d = C.shape[1]\n k = D.shape[1]\n # E_r slices\n C_er = C[E_r, :]\n D_er = D[E_r, :]\n b_er = b[E_r]\n # stack\n c = -np.hstack([ar, np.zeros(k)])\n G = np.hstack([C_er, D_er])\n h = b_er\n A = np.hstack([af, np.zeros(k)])\n sol = solvers._solve_lp_using_cvxopt(\n c, G, h, A=A.T, b=bf * (1 - 0.01))\n if sol['status'] != \"optimal\":\n print(G)\n print(h)\n print(af)\n print(bf)\n print(ar)\n print(br)\n print(np.dot(af, ar))\n data = {}\n data[\"C\"] = C\n data[\"D\"] = D\n data[\"b\"] = b\n sio.savemat(\"matlabdata\", data)\n with open('polytope.p', 'wb') as f:\n pickle.dump(data, f)\n raise Exception(\n \"adjacent: Lp returned status \" + str(sol['status']))\n opt_sol = np.array(sol['x']).flatten()\n dual_opt_sol = np.array(sol['z']).flatten()\n x_opt = opt_sol[range(d)]\n y_opt = opt_sol[range(d, d + k)]\n if is_dual_degenerate(\n c.flatten(), G, h, A, bf * (1 - 0.01),\n opt_sol, dual_opt_sol, abs_tol=abs_tol):\n # If degenerate, compute affine hull and take preimage\n E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0]\n a_temp, b_temp = proj_aff(\n C_er[E_temp, :], D_er[E_temp, :], b_er[E_temp],\n expected_dim=1, abs_tol=abs_tol)\n E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol)\n if len(E_adj) == 0:\n data = {}\n data[\"C\"] = C\n data[\"D\"] = D\n data[\"b\"] = b\n data[\"Er\"] = E_r + 1\n data[\"ar\"] = ar\n data[\"br\"] = br\n data[\"Ef\"] = E + 1\n data[\"af\"] = af\n data[\"bf\"] = bf\n sio.savemat(\"matlabdata\", data)\n raise Exception(\n \"adjacent: equality set computation returned empty set\")\n else:\n r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol\n E_adj = np.nonzero(r)[0]\n C_eadj = C[E_adj, :]\n D_eadj = D[E_adj, :]\n b_eadj = b[E_adj]\n af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol)\n return E_adj, af_adj, bf_adj\n\n\ndef proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-7):\n \"\"\"Affine projection.\n\n Compute the set aff = {x | Ce x + De y = be} on the form\n aff = ({x | a x = b} intersection {Ce x + De y < be}).\n\n Input: Polytope parameters Ce, De and be\n\n Output: Constants a and b\n \"\"\"\n # Remove zero columns\n ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0]\n D = De[:, ind]\n if D.shape[1] == 0:\n a = Ce\n b = be\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception(\n \"proj_aff: wrong dimension calculated in 1\")\n return a_n.flatten(), b_n\n sh = np.shape(D.T)\n m = sh[0]\n n = sh[1]\n nDe = null_space(D.T)\n a = np.dot(nDe.T, Ce)\n b = np.dot(nDe.T, be)\n a_n, b_n = normalize(a, b)\n if expected_dim is not None:\n if expected_dim != b_n.size:\n raise Exception(\"proj_aff: wrong dimension calculated in 2\")\n return a_n, b_n\n\n\ndef is_dual_degenerate(c, G, h, A, b, x_opt, z_opt, abs_tol=1e-7):\n \"\"\"Return `True` if pair of dual problems is dual degenerate.\n\n Checks if the pair of dual problems::\n\n (P): min c'x (D): max h'z + b'y\n s.t Gx <= h s.t G'z + A'y = c\n Ax = b z <= 0\n\n is dual degenerate, i.e. if (P) has several optimal solutions.\n Optimal solutions x* and z* are required.\n\n Input:\n\n `G,h,A,b`: Parameters of (P)\n `x_opt`: One optimal solution to (P)\n `z_opt`: The optimal solution to (D) corresponding to\n _inequality constraints_ in (P)\n\n Output:\n `dual`: Boolean indicating whether (P) has many optimal solutions.\n \"\"\"\n D = - G\n d = - h.flatten()\n mu = - z_opt.flatten() # mu >= 0\n # Active constraints\n I = np.nonzero(np.abs(np.dot(D, x_opt).flatten() - d) < abs_tol)[0]\n # Positive elements in dual opt\n J = np.nonzero(mu > abs_tol)[0]\n # i, j\n i = mu < abs_tol # Zero elements in dual opt\n i = i.astype(int)\n j = np.zeros(len(mu), dtype=int)\n j[I] = 1 # 1 if active\n # Indices where active constraints have 0 dual opt\n L = np.nonzero(i + j == 2)[0]\n # sizes\n nI = len(I)\n nJ = len(J)\n nL = len(L)\n # constraints\n DI = D[I, :] # Active constraints\n DJ = D[J, :] # Constraints with positive lagrange mult\n DL = D[L, :] # Active constraints with zero dual opt\n dual = 0\n if A is None:\n test = DI\n else:\n test = np.vstack([DI, A])\n if rank(test) < np.amin(DI.shape):\n return True\n else:\n if len(L) > 0:\n if A is None:\n Ae = DJ\n else:\n Ae = np.vstack([DJ, A])\n be = np.zeros(Ae.shape[0])\n Ai = - DL\n bi = np.zeros(nL)\n sol = solvers._solve_lp_using_cvxopt(\n c= - np.sum(DL, axis=0), G=Ai,\n h=bi, A=Ae, b=be)\n if sol['status'] == \"dual infeasible\":\n # Dual infeasible -> primal unbounded -> value>epsilon\n return True\n if sol['primal objective'] > abs_tol:\n return True\n return False\n\n\ndef unique_equalityset(C, D, b, af, bf, abs_tol=1e-7, verbose=0):\n \"\"\"Return equality set E with the following property:\n\n P_E = {x | af x = bf} intersection P\n\n where P is the polytope C x + D y < b\n\n The inequalities have to be satisfied with equality everywhere on\n the face defined by af and bf.\n \"\"\"\n if D is not None:\n A = np.hstack([C, D])\n a = np.hstack([af, np.zeros(D.shape[1])])\n else:\n A = C\n a = af\n E = []\n for i in range(A.shape[0]):\n A_i = np.array(A[i, :])\n b_i = b[i]\n sol = solvers._solve_lp_using_cvxopt(\n c=A_i, G=A, h=b,\n A=a.T, b=bf)\n if sol['status'] != \"optimal\":\n raise Exception(\n \"unique_equalityset: LP returned status \" +\n str(sol['status']))\n if np.abs(sol['primal objective'] - b_i) < abs_tol:\n # Constraint is active everywhere\n E.append(i)\n if len(E) == 0:\n raise Exception(\"unique_equalityset: empty E\")\n return np.array(E)\n\n\ndef unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-7):\n A = np.hstack([C, D])\n E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0]\n af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1)\n # stack\n ineq = np.hstack([af, np.zeros(D.shape[1])])\n G = np.vstack([A, np.vstack([ineq, -ineq])])\n h = np.hstack([b, np.hstack([bf, -bf])])\n # shape\n m = G.shape[0]\n n = G.shape[1]\n # ht\n e = 1e-3\n v = np.vstack([np.zeros([1, n]), np.eye(n)]).T\n v = v - np.array([np.mean(v, axis=1)]).T\n v = v * e\n ht = h + np.amin(-np.dot(G, v), axis=1)\n # stack\n H1 = np.hstack([G, -np.eye(m)])\n H2 = np.hstack([G, np.zeros([m, m])])\n H3 = np.hstack([np.zeros([m, n]), -np.eye(m)])\n H = np.vstack([H1, np.vstack([H2, H3])])\n h = np.hstack([ht, np.hstack([h, np.zeros(m)])])\n c = np.hstack([np.zeros(n), np.ones(m)])\n sol = solvers.lpsolve(c, H, h, solver='glpk')\n if not sol['status'] == \"optimal\":\n raise Exception(\n \"unique_equalityset: LP returned status \" +\n str(sol['status']))\n opt_sol2 = np.array(sol['x']).flatten()\n x = opt_sol2[range(n)]\n s = opt_sol2[range(n, len(opt_sol2))]\n E = np.nonzero(s > abs_tol)[0]\n print(E)\n E = np.sort(E[np.nonzero(E < C.shape[0])])\n # Check that they define the same projection\n at, bt = proj_aff(C[E, :], D[E, :], b[E])\n if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol:\n raise Exception(\"unique_equalityset2: affine hulls not the same\")\n return E\n\n\ndef cheby_center(C, D, b):\n \"\"\"Calculate Chebyshev center for the polytope `C x + D y <= b`.\n\n Input:\n `C, D, b`: Polytope parameters\n\n Output:\n `x_0, y_0`: The chebyshev centra\n `boolean`: True if a point could be found, False otherwise.\n \"\"\"\n d = C.shape[1]\n k = D.shape[1]\n A = np.hstack([C, D])\n dim = np.shape(A)[1]\n c = - np.r_[np.zeros(dim), 1]\n norm2 = np.sqrt(np.sum(A * A, axis=1))\n G = np.c_[A, norm2]\n sol = solvers.lpsolve(c, G, h=b, solver='glpk')\n if sol['status'] == \"optimal\":\n opt = np.array(sol['x'][0:-1]).flatten()\n return opt[range(d)], opt[range(d, d + k)], True\n else:\n return np.zeros(d), np.zeros(k), False\n\n\ndef normalize(AA, bb, abs_tol=1e-7):\n \"\"\"Normalize `A x = b` such that `A'A = 1` and `b > 0`.\n\n Also, remove duplicate lines.\n \"\"\"\n if AA.size == 0:\n return AA, bb\n dim = AA.size / bb.size\n A = AA.copy().reshape(bb.size, dim)\n b = bb.copy().reshape(bb.size, 1)\n # Remove zero lines\n keepind = np.nonzero(\n np.sum(np.abs(np.hstack([A, b])), axis=1) > abs_tol)[0]\n A = A[keepind, :]\n b = b[keepind]\n # Normalize\n anorm = np.sqrt(np.sum(A * A, axis=1))\n for i in range(len(anorm)):\n A[i, :] = A[i, :] * np.sign(b[i, 0]) / anorm[i]\n b[i, 0] = np.sign(b[i, 0]) * b[i, 0] / anorm[i]\n # Remove duplicate rows\n keep_row = []\n for i in range(len(anorm)):\n unique = True\n for j in range(i + 1, len(anorm)):\n test = (np.sum(np.abs(A[i, :] - A[j, :])) +\n np.abs(b[i, 0] - b[j, 0]))\n if test < abs_tol:\n unique = False\n break\n if unique:\n keep_row.append(i)\n A_n = A[keep_row, :]\n b_n = b[keep_row, 0]\n # Return flat A if only one row\n if A_n.size == dim:\n A_n = A_n.flatten()\n return A_n, b_n.flatten()\n\n\ndef rank(A, eps=1e-15):\n u, s, vh = linalg.svd(A)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n return np.sum(s > tol)\n\n\ndef null_space(A, eps=1e-15, nonempty=False):\n \"\"\"Returns the null space N_A to matrix A such that A N_A = 0.\"\"\"\n u, s, v = linalg.svd(A, full_matrices=1)\n m = A.shape[0]\n n = A.shape[1]\n tol = np.amax([m, n]) * np.amax(s) * eps\n rank = np.sum(s > tol)\n N_space = v[range(rank, n), :].T\n if nonempty and (len(N_space) == 0):\n N_space = v[range(np.amax(n - 1, 1), n), :]\n return N_space\n", "step-ids": [ 7, 9, 16, 18, 20 ] }
[ 7, 9, 16, 18, 20 ]
# coding: utf-8 import sys #from operator import itemgetter sysread = sys.stdin.readline read = sys.stdin.read from heapq import heappop, heappush from collections import defaultdict sys.setrecursionlimit(10**7) import math #from itertools import product#accumulate, combinations, product #import bisect# lower_bound etc #import numpy as np #from copy import deepcopy def run(): mod = 1000000007 N, *AB = map(int, read().split()) A_B = [] INF = float('inf') zerozero = 0 for i in range(N): a = AB[2*i] b = AB[2*i+1] if a== 0 and b == 0: zerozero += 1 elif b == 0: A_B.append((INF, 0)) elif a == 0: A_B.append((0, INF)) else: tmp = math.gcd(a,b) if a / b > 0 :v = 1 else: v = -1 A_B.append((abs(a//tmp), v * abs(b//tmp))) comb_dict = defaultdict(lambda:[0,0]) for ai, bi in A_B: if ai == INF: comb_dict[0][1] += 1 elif bi == INF: comb_dict[0][0] += 1 elif bi < 0: comb_dict[(ai,bi)][0] += 1 else: comb_dict[(bi, -ai)][1] += 1 ret = 1 for _, val_list in comb_dict.items(): a,b = val_list if a == 0 or b == 0: ret *= pow(2, max(a,b), mod) else: ret *= pow(2, a, mod) + pow(2, b, mod) - 1 ret %= mod ret += zerozero-1 print(ret%mod) if __name__ == "__main__": run()
normal
{ "blob_id": "f73a3bd7665ac9cc90085fcac2530c93bef69d3d", "index": 6705, "step-1": "<mask token>\n\n\ndef run():\n mod = 1000000007\n N, *AB = map(int, read().split())\n A_B = []\n INF = float('inf')\n zerozero = 0\n for i in range(N):\n a = AB[2 * i]\n b = AB[2 * i + 1]\n if a == 0 and b == 0:\n zerozero += 1\n elif b == 0:\n A_B.append((INF, 0))\n elif a == 0:\n A_B.append((0, INF))\n else:\n tmp = math.gcd(a, b)\n if a / b > 0:\n v = 1\n else:\n v = -1\n A_B.append((abs(a // tmp), v * abs(b // tmp)))\n comb_dict = defaultdict(lambda : [0, 0])\n for ai, bi in A_B:\n if ai == INF:\n comb_dict[0][1] += 1\n elif bi == INF:\n comb_dict[0][0] += 1\n elif bi < 0:\n comb_dict[ai, bi][0] += 1\n else:\n comb_dict[bi, -ai][1] += 1\n ret = 1\n for _, val_list in comb_dict.items():\n a, b = val_list\n if a == 0 or b == 0:\n ret *= pow(2, max(a, b), mod)\n else:\n ret *= pow(2, a, mod) + pow(2, b, mod) - 1\n ret %= mod\n ret += zerozero - 1\n print(ret % mod)\n\n\n<mask token>\n", "step-2": "<mask token>\nsys.setrecursionlimit(10 ** 7)\n<mask token>\n\n\ndef run():\n mod = 1000000007\n N, *AB = map(int, read().split())\n A_B = []\n INF = float('inf')\n zerozero = 0\n for i in range(N):\n a = AB[2 * i]\n b = AB[2 * i + 1]\n if a == 0 and b == 0:\n zerozero += 1\n elif b == 0:\n A_B.append((INF, 0))\n elif a == 0:\n A_B.append((0, INF))\n else:\n tmp = math.gcd(a, b)\n if a / b > 0:\n v = 1\n else:\n v = -1\n A_B.append((abs(a // tmp), v * abs(b // tmp)))\n comb_dict = defaultdict(lambda : [0, 0])\n for ai, bi in A_B:\n if ai == INF:\n comb_dict[0][1] += 1\n elif bi == INF:\n comb_dict[0][0] += 1\n elif bi < 0:\n comb_dict[ai, bi][0] += 1\n else:\n comb_dict[bi, -ai][1] += 1\n ret = 1\n for _, val_list in comb_dict.items():\n a, b = val_list\n if a == 0 or b == 0:\n ret *= pow(2, max(a, b), mod)\n else:\n ret *= pow(2, a, mod) + pow(2, b, mod) - 1\n ret %= mod\n ret += zerozero - 1\n print(ret % mod)\n\n\nif __name__ == '__main__':\n run()\n", "step-3": "<mask token>\nsysread = sys.stdin.readline\nread = sys.stdin.read\n<mask token>\nsys.setrecursionlimit(10 ** 7)\n<mask token>\n\n\ndef run():\n mod = 1000000007\n N, *AB = map(int, read().split())\n A_B = []\n INF = float('inf')\n zerozero = 0\n for i in range(N):\n a = AB[2 * i]\n b = AB[2 * i + 1]\n if a == 0 and b == 0:\n zerozero += 1\n elif b == 0:\n A_B.append((INF, 0))\n elif a == 0:\n A_B.append((0, INF))\n else:\n tmp = math.gcd(a, b)\n if a / b > 0:\n v = 1\n else:\n v = -1\n A_B.append((abs(a // tmp), v * abs(b // tmp)))\n comb_dict = defaultdict(lambda : [0, 0])\n for ai, bi in A_B:\n if ai == INF:\n comb_dict[0][1] += 1\n elif bi == INF:\n comb_dict[0][0] += 1\n elif bi < 0:\n comb_dict[ai, bi][0] += 1\n else:\n comb_dict[bi, -ai][1] += 1\n ret = 1\n for _, val_list in comb_dict.items():\n a, b = val_list\n if a == 0 or b == 0:\n ret *= pow(2, max(a, b), mod)\n else:\n ret *= pow(2, a, mod) + pow(2, b, mod) - 1\n ret %= mod\n ret += zerozero - 1\n print(ret % mod)\n\n\nif __name__ == '__main__':\n run()\n", "step-4": "import sys\nsysread = sys.stdin.readline\nread = sys.stdin.read\nfrom heapq import heappop, heappush\nfrom collections import defaultdict\nsys.setrecursionlimit(10 ** 7)\nimport math\n\n\ndef run():\n mod = 1000000007\n N, *AB = map(int, read().split())\n A_B = []\n INF = float('inf')\n zerozero = 0\n for i in range(N):\n a = AB[2 * i]\n b = AB[2 * i + 1]\n if a == 0 and b == 0:\n zerozero += 1\n elif b == 0:\n A_B.append((INF, 0))\n elif a == 0:\n A_B.append((0, INF))\n else:\n tmp = math.gcd(a, b)\n if a / b > 0:\n v = 1\n else:\n v = -1\n A_B.append((abs(a // tmp), v * abs(b // tmp)))\n comb_dict = defaultdict(lambda : [0, 0])\n for ai, bi in A_B:\n if ai == INF:\n comb_dict[0][1] += 1\n elif bi == INF:\n comb_dict[0][0] += 1\n elif bi < 0:\n comb_dict[ai, bi][0] += 1\n else:\n comb_dict[bi, -ai][1] += 1\n ret = 1\n for _, val_list in comb_dict.items():\n a, b = val_list\n if a == 0 or b == 0:\n ret *= pow(2, max(a, b), mod)\n else:\n ret *= pow(2, a, mod) + pow(2, b, mod) - 1\n ret %= mod\n ret += zerozero - 1\n print(ret % mod)\n\n\nif __name__ == '__main__':\n run()\n", "step-5": "# coding: utf-8\nimport sys\n#from operator import itemgetter\nsysread = sys.stdin.readline\nread = sys.stdin.read\nfrom heapq import heappop, heappush\nfrom collections import defaultdict\nsys.setrecursionlimit(10**7)\nimport math\n#from itertools import product#accumulate, combinations, product\n#import bisect# lower_bound etc\n#import numpy as np\n#from copy import deepcopy\ndef run():\n mod = 1000000007\n N, *AB = map(int, read().split())\n A_B = []\n INF = float('inf')\n zerozero = 0\n for i in range(N):\n a = AB[2*i]\n b = AB[2*i+1]\n if a== 0 and b == 0:\n zerozero += 1\n elif b == 0:\n A_B.append((INF, 0))\n elif a == 0:\n A_B.append((0, INF))\n else:\n tmp = math.gcd(a,b)\n if a / b > 0 :v = 1\n else: v = -1\n A_B.append((abs(a//tmp), v * abs(b//tmp)))\n\n comb_dict = defaultdict(lambda:[0,0])\n\n for ai, bi in A_B:\n if ai == INF:\n comb_dict[0][1] += 1\n elif bi == INF:\n comb_dict[0][0] += 1\n elif bi < 0:\n comb_dict[(ai,bi)][0] += 1\n else:\n comb_dict[(bi, -ai)][1] += 1\n\n ret = 1\n for _, val_list in comb_dict.items():\n a,b = val_list\n if a == 0 or b == 0:\n ret *= pow(2, max(a,b), mod)\n else:\n ret *= pow(2, a, mod) + pow(2, b, mod) - 1\n ret %= mod\n ret += zerozero-1\n print(ret%mod)\n\n\n\nif __name__ == \"__main__\":\n run()", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> class Conexion: def __init__(self, direccion, destino): self.set_direccion(direccion) self.set_destino(destino) <|reserved_special_token_0|> <|reserved_special_token_0|> def set_direccion(self, direccion): self._direccion = direccion <|reserved_special_token_0|> <|reserved_special_token_0|> class GrupoConexiones(grupo.Grupo): def conexiones(self): return self.coleccion() def conecta_al(self, direccion): for conexion in self.conexiones(): if conexion.direccion() == direccion: return conexion.destino() return localidad_nula class Localidad: def __init__(self, nombre, descripcion, conexiones=None, contiene=None): self.set_nombre(nombre) self.set_descripcion(descripcion) self._conexiones = GrupoConexiones(conexiones) self._grupo_items = items.GrupoItems(contiene) def __repr__(self): return self.nombre() def nombre(self): return self._nombre def set_nombre(self, nombre): self._nombre = nombre def descripcion(self): return self._descripcion def set_descripcion(self, descripcion): self._descripcion = descripcion def conexiones(self): return self._conexiones def items(self): return self._grupo_items def describir(self): print(self.nombre()) print(self.descripcion()) if not self.items().esta_vacio(): print('También puedes ver:') for item in self.items(): print('-', item.nombre()) def conecta_con(self, iterable): self.conexiones().meter_masivo(iterable) def conecta_al(self, direccion): return self.conexiones().conecta_al(direccion) def meter_conexion(self, conexion): self.conexiones().meter(conexion) def contiene_token(self, token): return self.items().contiene_token(token) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Conexion: def __init__(self, direccion, destino): self.set_direccion(direccion) self.set_destino(destino) <|reserved_special_token_0|> def direccion(self): return self._direccion def set_direccion(self, direccion): self._direccion = direccion <|reserved_special_token_0|> def set_destino(self, destino): self._destino = destino class GrupoConexiones(grupo.Grupo): def conexiones(self): return self.coleccion() def conecta_al(self, direccion): for conexion in self.conexiones(): if conexion.direccion() == direccion: return conexion.destino() return localidad_nula class Localidad: def __init__(self, nombre, descripcion, conexiones=None, contiene=None): self.set_nombre(nombre) self.set_descripcion(descripcion) self._conexiones = GrupoConexiones(conexiones) self._grupo_items = items.GrupoItems(contiene) def __repr__(self): return self.nombre() def nombre(self): return self._nombre def set_nombre(self, nombre): self._nombre = nombre def descripcion(self): return self._descripcion def set_descripcion(self, descripcion): self._descripcion = descripcion def conexiones(self): return self._conexiones def items(self): return self._grupo_items def describir(self): print(self.nombre()) print(self.descripcion()) if not self.items().esta_vacio(): print('También puedes ver:') for item in self.items(): print('-', item.nombre()) def conecta_con(self, iterable): self.conexiones().meter_masivo(iterable) def conecta_al(self, direccion): return self.conexiones().conecta_al(direccion) def meter_conexion(self, conexion): self.conexiones().meter(conexion) def contiene_token(self, token): return self.items().contiene_token(token) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Conexion: def __init__(self, direccion, destino): self.set_direccion(direccion) self.set_destino(destino) def __repr__(self): return str(self.direccion()) + ' => ' + str(self.destino()) def direccion(self): return self._direccion def set_direccion(self, direccion): self._direccion = direccion def destino(self): return self._destino def set_destino(self, destino): self._destino = destino class GrupoConexiones(grupo.Grupo): def conexiones(self): return self.coleccion() def conecta_al(self, direccion): for conexion in self.conexiones(): if conexion.direccion() == direccion: return conexion.destino() return localidad_nula class Localidad: def __init__(self, nombre, descripcion, conexiones=None, contiene=None): self.set_nombre(nombre) self.set_descripcion(descripcion) self._conexiones = GrupoConexiones(conexiones) self._grupo_items = items.GrupoItems(contiene) def __repr__(self): return self.nombre() def nombre(self): return self._nombre def set_nombre(self, nombre): self._nombre = nombre def descripcion(self): return self._descripcion def set_descripcion(self, descripcion): self._descripcion = descripcion def conexiones(self): return self._conexiones def items(self): return self._grupo_items def describir(self): print(self.nombre()) print(self.descripcion()) if not self.items().esta_vacio(): print('También puedes ver:') for item in self.items(): print('-', item.nombre()) def conecta_con(self, iterable): self.conexiones().meter_masivo(iterable) def conecta_al(self, direccion): return self.conexiones().conecta_al(direccion) def meter_conexion(self, conexion): self.conexiones().meter(conexion) def contiene_token(self, token): return self.items().contiene_token(token) localidad_nula = Localidad('NULA', 'Localidad nula.') <|reserved_special_token_1|> import items import grupo class Conexion: def __init__(self, direccion, destino): self.set_direccion(direccion) self.set_destino(destino) def __repr__(self): return str(self.direccion()) + ' => ' + str(self.destino()) def direccion(self): return self._direccion def set_direccion(self, direccion): self._direccion = direccion def destino(self): return self._destino def set_destino(self, destino): self._destino = destino class GrupoConexiones(grupo.Grupo): def conexiones(self): return self.coleccion() def conecta_al(self, direccion): for conexion in self.conexiones(): if conexion.direccion() == direccion: return conexion.destino() return localidad_nula class Localidad: def __init__(self, nombre, descripcion, conexiones=None, contiene=None): self.set_nombre(nombre) self.set_descripcion(descripcion) self._conexiones = GrupoConexiones(conexiones) self._grupo_items = items.GrupoItems(contiene) def __repr__(self): return self.nombre() def nombre(self): return self._nombre def set_nombre(self, nombre): self._nombre = nombre def descripcion(self): return self._descripcion def set_descripcion(self, descripcion): self._descripcion = descripcion def conexiones(self): return self._conexiones def items(self): return self._grupo_items def describir(self): print(self.nombre()) print(self.descripcion()) if not self.items().esta_vacio(): print('También puedes ver:') for item in self.items(): print('-', item.nombre()) def conecta_con(self, iterable): self.conexiones().meter_masivo(iterable) def conecta_al(self, direccion): return self.conexiones().conecta_al(direccion) def meter_conexion(self, conexion): self.conexiones().meter(conexion) def contiene_token(self, token): return self.items().contiene_token(token) localidad_nula = Localidad('NULA', 'Localidad nula.') <|reserved_special_token_1|> import items import grupo class Conexion: def __init__(self, direccion, destino): self.set_direccion(direccion) self.set_destino(destino) def __repr__(self): return str(self.direccion()) + ' => ' + str(self.destino()) def direccion(self): return self._direccion def set_direccion(self, direccion): self._direccion = direccion def destino(self): return self._destino def set_destino(self, destino): self._destino = destino class GrupoConexiones(grupo.Grupo): def conexiones(self): return self.coleccion() def conecta_al(self, direccion): for conexion in self.conexiones(): if conexion.direccion() == direccion: return conexion.destino() return localidad_nula class Localidad: def __init__(self, nombre, descripcion, conexiones=None, contiene=None): self.set_nombre(nombre) self.set_descripcion(descripcion) self._conexiones = GrupoConexiones(conexiones) self._grupo_items = items.GrupoItems(contiene) def __repr__(self): return self.nombre() def nombre(self): return self._nombre def set_nombre(self, nombre): self._nombre = nombre def descripcion(self): return self._descripcion def set_descripcion(self, descripcion): self._descripcion = descripcion def conexiones(self): return self._conexiones def items(self): return self._grupo_items def describir(self): print(self.nombre()) print(self.descripcion()) if not self.items().esta_vacio(): print('También puedes ver:') for item in self.items(): print('-', item.nombre()) def conecta_con(self, iterable): self.conexiones().meter_masivo(iterable) def conecta_al(self, direccion): return self.conexiones().conecta_al(direccion) def meter_conexion(self, conexion): self.conexiones().meter(conexion) def contiene_token(self, token): return self.items().contiene_token(token) # def meter_item(self, item): # self._grupo_items.meter(item) # def sacar_item(self, item): # self._grupo_items.sacar(item) # def contiene_item(self, item): # return self._grupo_items.contiene(item) # def tiene_items(self): # return self._grupo_items.esta_vacio() localidad_nula = Localidad('NULA', 'Localidad nula.')
flexible
{ "blob_id": "f59e61977f7c72ab191aadccbd72d23f831b3a1c", "index": 7050, "step-1": "<mask token>\n\n\nclass Conexion:\n\n def __init__(self, direccion, destino):\n self.set_direccion(direccion)\n self.set_destino(destino)\n <mask token>\n <mask token>\n\n def set_direccion(self, direccion):\n self._direccion = direccion\n <mask token>\n <mask token>\n\n\nclass GrupoConexiones(grupo.Grupo):\n\n def conexiones(self):\n return self.coleccion()\n\n def conecta_al(self, direccion):\n for conexion in self.conexiones():\n if conexion.direccion() == direccion:\n return conexion.destino()\n return localidad_nula\n\n\nclass Localidad:\n\n def __init__(self, nombre, descripcion, conexiones=None, contiene=None):\n self.set_nombre(nombre)\n self.set_descripcion(descripcion)\n self._conexiones = GrupoConexiones(conexiones)\n self._grupo_items = items.GrupoItems(contiene)\n\n def __repr__(self):\n return self.nombre()\n\n def nombre(self):\n return self._nombre\n\n def set_nombre(self, nombre):\n self._nombre = nombre\n\n def descripcion(self):\n return self._descripcion\n\n def set_descripcion(self, descripcion):\n self._descripcion = descripcion\n\n def conexiones(self):\n return self._conexiones\n\n def items(self):\n return self._grupo_items\n\n def describir(self):\n print(self.nombre())\n print(self.descripcion())\n if not self.items().esta_vacio():\n print('También puedes ver:')\n for item in self.items():\n print('-', item.nombre())\n\n def conecta_con(self, iterable):\n self.conexiones().meter_masivo(iterable)\n\n def conecta_al(self, direccion):\n return self.conexiones().conecta_al(direccion)\n\n def meter_conexion(self, conexion):\n self.conexiones().meter(conexion)\n\n def contiene_token(self, token):\n return self.items().contiene_token(token)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Conexion:\n\n def __init__(self, direccion, destino):\n self.set_direccion(direccion)\n self.set_destino(destino)\n <mask token>\n\n def direccion(self):\n return self._direccion\n\n def set_direccion(self, direccion):\n self._direccion = direccion\n <mask token>\n\n def set_destino(self, destino):\n self._destino = destino\n\n\nclass GrupoConexiones(grupo.Grupo):\n\n def conexiones(self):\n return self.coleccion()\n\n def conecta_al(self, direccion):\n for conexion in self.conexiones():\n if conexion.direccion() == direccion:\n return conexion.destino()\n return localidad_nula\n\n\nclass Localidad:\n\n def __init__(self, nombre, descripcion, conexiones=None, contiene=None):\n self.set_nombre(nombre)\n self.set_descripcion(descripcion)\n self._conexiones = GrupoConexiones(conexiones)\n self._grupo_items = items.GrupoItems(contiene)\n\n def __repr__(self):\n return self.nombre()\n\n def nombre(self):\n return self._nombre\n\n def set_nombre(self, nombre):\n self._nombre = nombre\n\n def descripcion(self):\n return self._descripcion\n\n def set_descripcion(self, descripcion):\n self._descripcion = descripcion\n\n def conexiones(self):\n return self._conexiones\n\n def items(self):\n return self._grupo_items\n\n def describir(self):\n print(self.nombre())\n print(self.descripcion())\n if not self.items().esta_vacio():\n print('También puedes ver:')\n for item in self.items():\n print('-', item.nombre())\n\n def conecta_con(self, iterable):\n self.conexiones().meter_masivo(iterable)\n\n def conecta_al(self, direccion):\n return self.conexiones().conecta_al(direccion)\n\n def meter_conexion(self, conexion):\n self.conexiones().meter(conexion)\n\n def contiene_token(self, token):\n return self.items().contiene_token(token)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Conexion:\n\n def __init__(self, direccion, destino):\n self.set_direccion(direccion)\n self.set_destino(destino)\n\n def __repr__(self):\n return str(self.direccion()) + ' => ' + str(self.destino())\n\n def direccion(self):\n return self._direccion\n\n def set_direccion(self, direccion):\n self._direccion = direccion\n\n def destino(self):\n return self._destino\n\n def set_destino(self, destino):\n self._destino = destino\n\n\nclass GrupoConexiones(grupo.Grupo):\n\n def conexiones(self):\n return self.coleccion()\n\n def conecta_al(self, direccion):\n for conexion in self.conexiones():\n if conexion.direccion() == direccion:\n return conexion.destino()\n return localidad_nula\n\n\nclass Localidad:\n\n def __init__(self, nombre, descripcion, conexiones=None, contiene=None):\n self.set_nombre(nombre)\n self.set_descripcion(descripcion)\n self._conexiones = GrupoConexiones(conexiones)\n self._grupo_items = items.GrupoItems(contiene)\n\n def __repr__(self):\n return self.nombre()\n\n def nombre(self):\n return self._nombre\n\n def set_nombre(self, nombre):\n self._nombre = nombre\n\n def descripcion(self):\n return self._descripcion\n\n def set_descripcion(self, descripcion):\n self._descripcion = descripcion\n\n def conexiones(self):\n return self._conexiones\n\n def items(self):\n return self._grupo_items\n\n def describir(self):\n print(self.nombre())\n print(self.descripcion())\n if not self.items().esta_vacio():\n print('También puedes ver:')\n for item in self.items():\n print('-', item.nombre())\n\n def conecta_con(self, iterable):\n self.conexiones().meter_masivo(iterable)\n\n def conecta_al(self, direccion):\n return self.conexiones().conecta_al(direccion)\n\n def meter_conexion(self, conexion):\n self.conexiones().meter(conexion)\n\n def contiene_token(self, token):\n return self.items().contiene_token(token)\n\n\nlocalidad_nula = Localidad('NULA', 'Localidad nula.')\n", "step-4": "import items\nimport grupo\n\n\nclass Conexion:\n\n def __init__(self, direccion, destino):\n self.set_direccion(direccion)\n self.set_destino(destino)\n\n def __repr__(self):\n return str(self.direccion()) + ' => ' + str(self.destino())\n\n def direccion(self):\n return self._direccion\n\n def set_direccion(self, direccion):\n self._direccion = direccion\n\n def destino(self):\n return self._destino\n\n def set_destino(self, destino):\n self._destino = destino\n\n\nclass GrupoConexiones(grupo.Grupo):\n\n def conexiones(self):\n return self.coleccion()\n\n def conecta_al(self, direccion):\n for conexion in self.conexiones():\n if conexion.direccion() == direccion:\n return conexion.destino()\n return localidad_nula\n\n\nclass Localidad:\n\n def __init__(self, nombre, descripcion, conexiones=None, contiene=None):\n self.set_nombre(nombre)\n self.set_descripcion(descripcion)\n self._conexiones = GrupoConexiones(conexiones)\n self._grupo_items = items.GrupoItems(contiene)\n\n def __repr__(self):\n return self.nombre()\n\n def nombre(self):\n return self._nombre\n\n def set_nombre(self, nombre):\n self._nombre = nombre\n\n def descripcion(self):\n return self._descripcion\n\n def set_descripcion(self, descripcion):\n self._descripcion = descripcion\n\n def conexiones(self):\n return self._conexiones\n\n def items(self):\n return self._grupo_items\n\n def describir(self):\n print(self.nombre())\n print(self.descripcion())\n if not self.items().esta_vacio():\n print('También puedes ver:')\n for item in self.items():\n print('-', item.nombre())\n\n def conecta_con(self, iterable):\n self.conexiones().meter_masivo(iterable)\n\n def conecta_al(self, direccion):\n return self.conexiones().conecta_al(direccion)\n\n def meter_conexion(self, conexion):\n self.conexiones().meter(conexion)\n\n def contiene_token(self, token):\n return self.items().contiene_token(token)\n\n\nlocalidad_nula = Localidad('NULA', 'Localidad nula.')\n", "step-5": "import items\nimport grupo\n\nclass Conexion:\n def __init__(self, direccion, destino):\n self.set_direccion(direccion)\n self.set_destino(destino)\n\n def __repr__(self):\n return str(self.direccion()) + ' => ' + str(self.destino())\n\n def direccion(self):\n return self._direccion\n\n def set_direccion(self, direccion):\n self._direccion = direccion\n\n def destino(self):\n return self._destino\n\n def set_destino(self, destino):\n self._destino = destino\n\nclass GrupoConexiones(grupo.Grupo):\n def conexiones(self):\n return self.coleccion()\n\n def conecta_al(self, direccion):\n for conexion in self.conexiones():\n if conexion.direccion() == direccion:\n return conexion.destino()\n return localidad_nula\n\nclass Localidad:\n def __init__(self, nombre, descripcion, conexiones=None, contiene=None):\n self.set_nombre(nombre)\n self.set_descripcion(descripcion)\n self._conexiones = GrupoConexiones(conexiones)\n self._grupo_items = items.GrupoItems(contiene)\n\n def __repr__(self):\n return self.nombre()\n\n def nombre(self):\n return self._nombre\n\n def set_nombre(self, nombre):\n self._nombre = nombre\n\n def descripcion(self):\n return self._descripcion\n\n def set_descripcion(self, descripcion):\n self._descripcion = descripcion\n\n def conexiones(self):\n return self._conexiones\n\n def items(self):\n return self._grupo_items\n\n def describir(self):\n print(self.nombre())\n print(self.descripcion())\n if not self.items().esta_vacio():\n print('También puedes ver:')\n for item in self.items():\n print('-', item.nombre())\n\n def conecta_con(self, iterable):\n self.conexiones().meter_masivo(iterable)\n\n def conecta_al(self, direccion):\n return self.conexiones().conecta_al(direccion)\n\n def meter_conexion(self, conexion):\n self.conexiones().meter(conexion)\n\n def contiene_token(self, token):\n return self.items().contiene_token(token)\n\n # def meter_item(self, item):\n # self._grupo_items.meter(item)\n\n # def sacar_item(self, item):\n # self._grupo_items.sacar(item)\n\n # def contiene_item(self, item):\n # return self._grupo_items.contiene(item)\n\n # def tiene_items(self):\n # return self._grupo_items.esta_vacio()\n\nlocalidad_nula = Localidad('NULA', 'Localidad nula.')\n", "step-ids": [ 20, 22, 25, 26, 27 ] }
[ 20, 22, 25, 26, 27 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> urlpatterns = patterns('', url('^$', 'analyze.views.analyze', name='analyze')) <|reserved_special_token_1|> from django.conf.urls import patterns, include, url from django.contrib.auth.decorators import login_required from django.views.generic import TemplateView from analyze import views urlpatterns = patterns('', url('^$', 'analyze.views.analyze', name='analyze')) <|reserved_special_token_1|> from django.conf.urls import patterns, include, url from django.contrib.auth.decorators import login_required from django.views.generic import TemplateView from analyze import views #from lecture import views urlpatterns = patterns('', url(r'^$', 'analyze.views.analyze', name='analyze'), )
flexible
{ "blob_id": "035de226c2d2ee85cb7e319de35fb09b21bc523d", "index": 9061, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = patterns('', url('^$', 'analyze.views.analyze', name='analyze'))\n", "step-3": "from django.conf.urls import patterns, include, url\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import TemplateView\nfrom analyze import views\nurlpatterns = patterns('', url('^$', 'analyze.views.analyze', name='analyze'))\n", "step-4": "from django.conf.urls import patterns, include, url\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import TemplateView\n\nfrom analyze import views\n\n#from lecture import views\nurlpatterns = patterns('',\n\turl(r'^$', 'analyze.views.analyze', name='analyze'),\n)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import logging, os, zc.buildout, sys, shutil class ZipEggs: def __init__(self, buildout, name, options): self.name, self.options = name, options if options['target'] is None: raise zc.buildout.UserError('Invalid Target') if options['source'] is None: raise zc.buildout.UserError('Invalid Source') def zipit(self): target = self.options['target'] if not os.path.exists(target): os.mkdir(target) path = self.options['source'] for dirs in os.listdir(path): try: source = os.path.join(path, dirs) dist = "%s/%s" % (target, dirs) print "%s > %s" % (source, dist) shutil.make_archive(dist, "zip", source) os.rename(dist+".zip", dist) except OSError: print "ignore %s" % dirs return [] def install(self): return self.zipit() def update(self): return self.zipit()
normal
{ "blob_id": "e7bec9018f25ba9e3c3ae8a5bbe11f8bc4b54a04", "index": 5714, "step-1": "import logging, os, zc.buildout, sys, shutil\n\nclass ZipEggs:\n def __init__(self, buildout, name, options):\n self.name, self.options = name, options\n if options['target'] is None:\n raise zc.buildout.UserError('Invalid Target')\n if options['source'] is None:\n raise zc.buildout.UserError('Invalid Source')\n\n\n def zipit(self):\n target = self.options['target']\n if not os.path.exists(target):\n os.mkdir(target)\n path = self.options['source']\n for dirs in os.listdir(path):\n try:\n source = os.path.join(path, dirs)\n dist = \"%s/%s\" % (target, dirs)\n print \"%s > %s\" % (source, dist)\n shutil.make_archive(dist, \"zip\", source)\n os.rename(dist+\".zip\", dist)\n except OSError:\n print \"ignore %s\" % dirs\n return []\n\n def install(self):\n return self.zipit()\n\n def update(self):\n return self.zipit()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
# Example solution for HW 5 # %% # Import the modules we will use import os import numpy as np import pandas as pd import matplotlib.pyplot as plt # %% # ** MODIFY ** # Set the file name and path to where you have stored the data filename = 'streamflow_week5.txt' #modified filename filepath = os.path.join('../data', filename) #modified path to look one directory up print(os.getcwd()) print(filepath) #filepath = '../Assignments/Solutions/data/streamflow_week5.txt' # %% #Read the data into a pandas dataframe data=pd.read_table(filepath, sep = '\t', skiprows=30, names=['agency_cd', 'site_no', 'datetime', 'flow', 'code'] ) # Expand the dates to year month day data[["year", "month", "day"]] =data["datetime"].str.split("-", expand=True) data['year'] = data['year'].astype(int) data['month'] = data['month'].astype(int) data['day'] = data['day'].astype(int) # %% # Sorry no more helpers past here this week, you are on your own now :) # Hints - you will need the functions: describe, info, groupby, sort, head and tail. # %% Start of Mekha's code # 1 and 2 week forecast # Look at most recent 2 weeks of data ending 9/26 print(data.tail(14)) # Calculate avg of last two week's flow print(data.tail(14).describe()) # Calculate avg of last week's flow print(data.tail(7).describe()) # Look at stats for 2019 because from my previous analysis, I know it is a smiliarly dry year data_2019 = data[data['year']==2019] print(data_2019['flow'].describe()) # Look at stats for 2019 by month print(data_2019.groupby(['month'])[['flow']].describe()) # %% 1. Provide a summary of the data frames properties. # What are the column names? # What is its index? # What data types do each of the columns have? print(data.info()) # %% 2.Provide a summary of the flow column including the min, mean, max, standard # deviation and quartiles. print(data['flow'].describe()) # %% 3.Provide the same information but on a monthly basis. (Note: you should be # able to do this with one or two lines of code) print(data.groupby(['month'])[['flow']].describe()) # %% 4.Provide a table with the 5 highest and 5 lowest flow values for the period # of record. Include the date, month and flow values in your summary. # 5 highest print(data.sort_values(by="flow",ascending=True).tail()) # 5 lowest print(data.sort_values(by="flow",ascending=True).head()) # %% 5.Find the highest and lowest flow values for every month of the year (i.e. you # will find 12 maxes and 12 mins) and report back what year these occurred in. # highest value for each month for i in range(1,13): month_data = data[data['month']==i] print(month_data.nlargest(1,['flow'])) # lowest value for each month for i in range(1,13): month_data = data[data['month']==i] print(month_data.nsmallest(1,['flow'])) # %% 6.Provide a list of historical dates with flows that are within 10% of your week 1 # forecast value. If there are none than increase the %10 window until you have at # least one other value and report the date and the new window you used forecast = 58.4 data_10percent = data[(data['flow'] >= (0.9*forecast)) & (data['flow'] <= (1.1*forecast))] pd.set_option('display.max_rows', None) print(data_10percent['datetime']) # %%
normal
{ "blob_id": "5024db0538f0022b84c203882df9c35979ba978a", "index": 4571, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(os.getcwd())\nprint(filepath)\n<mask token>\nprint(data.tail(14))\nprint(data.tail(14).describe())\nprint(data.tail(7).describe())\n<mask token>\nprint(data_2019['flow'].describe())\nprint(data_2019.groupby(['month'])[['flow']].describe())\nprint(data.info())\nprint(data['flow'].describe())\nprint(data.groupby(['month'])[['flow']].describe())\nprint(data.sort_values(by='flow', ascending=True).tail())\nprint(data.sort_values(by='flow', ascending=True).head())\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nlargest(1, ['flow']))\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nsmallest(1, ['flow']))\n<mask token>\npd.set_option('display.max_rows', None)\nprint(data_10percent['datetime'])\n", "step-3": "<mask token>\nfilename = 'streamflow_week5.txt'\nfilepath = os.path.join('../data', filename)\nprint(os.getcwd())\nprint(filepath)\ndata = pd.read_table(filepath, sep='\\t', skiprows=30, names=['agency_cd',\n 'site_no', 'datetime', 'flow', 'code'])\ndata[['year', 'month', 'day']] = data['datetime'].str.split('-', expand=True)\ndata['year'] = data['year'].astype(int)\ndata['month'] = data['month'].astype(int)\ndata['day'] = data['day'].astype(int)\nprint(data.tail(14))\nprint(data.tail(14).describe())\nprint(data.tail(7).describe())\ndata_2019 = data[data['year'] == 2019]\nprint(data_2019['flow'].describe())\nprint(data_2019.groupby(['month'])[['flow']].describe())\nprint(data.info())\nprint(data['flow'].describe())\nprint(data.groupby(['month'])[['flow']].describe())\nprint(data.sort_values(by='flow', ascending=True).tail())\nprint(data.sort_values(by='flow', ascending=True).head())\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nlargest(1, ['flow']))\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nsmallest(1, ['flow']))\nforecast = 58.4\ndata_10percent = data[(data['flow'] >= 0.9 * forecast) & (data['flow'] <= \n 1.1 * forecast)]\npd.set_option('display.max_rows', None)\nprint(data_10percent['datetime'])\n", "step-4": "import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfilename = 'streamflow_week5.txt'\nfilepath = os.path.join('../data', filename)\nprint(os.getcwd())\nprint(filepath)\ndata = pd.read_table(filepath, sep='\\t', skiprows=30, names=['agency_cd',\n 'site_no', 'datetime', 'flow', 'code'])\ndata[['year', 'month', 'day']] = data['datetime'].str.split('-', expand=True)\ndata['year'] = data['year'].astype(int)\ndata['month'] = data['month'].astype(int)\ndata['day'] = data['day'].astype(int)\nprint(data.tail(14))\nprint(data.tail(14).describe())\nprint(data.tail(7).describe())\ndata_2019 = data[data['year'] == 2019]\nprint(data_2019['flow'].describe())\nprint(data_2019.groupby(['month'])[['flow']].describe())\nprint(data.info())\nprint(data['flow'].describe())\nprint(data.groupby(['month'])[['flow']].describe())\nprint(data.sort_values(by='flow', ascending=True).tail())\nprint(data.sort_values(by='flow', ascending=True).head())\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nlargest(1, ['flow']))\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nsmallest(1, ['flow']))\nforecast = 58.4\ndata_10percent = data[(data['flow'] >= 0.9 * forecast) & (data['flow'] <= \n 1.1 * forecast)]\npd.set_option('display.max_rows', None)\nprint(data_10percent['datetime'])\n", "step-5": "# Example solution for HW 5\n\n# %%\n# Import the modules we will use\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# %%\n# ** MODIFY **\n# Set the file name and path to where you have stored the data\nfilename = 'streamflow_week5.txt' #modified filename\nfilepath = os.path.join('../data', filename) #modified path to look one directory up\nprint(os.getcwd())\nprint(filepath)\n\n#filepath = '../Assignments/Solutions/data/streamflow_week5.txt'\n\n# %%\n#Read the data into a pandas dataframe\ndata=pd.read_table(filepath, sep = '\\t', skiprows=30,\n names=['agency_cd', 'site_no', 'datetime', 'flow', 'code']\n )\n\n# Expand the dates to year month day\ndata[[\"year\", \"month\", \"day\"]] =data[\"datetime\"].str.split(\"-\", expand=True)\ndata['year'] = data['year'].astype(int)\ndata['month'] = data['month'].astype(int)\ndata['day'] = data['day'].astype(int)\n\n# %%\n# Sorry no more helpers past here this week, you are on your own now :) \n# Hints - you will need the functions: describe, info, groupby, sort, head and tail.\n\n# %% Start of Mekha's code\n\n# 1 and 2 week forecast\n\n# Look at most recent 2 weeks of data ending 9/26\nprint(data.tail(14))\n\n# Calculate avg of last two week's flow\nprint(data.tail(14).describe())\n\n# Calculate avg of last week's flow\nprint(data.tail(7).describe())\n\n# Look at stats for 2019 because from my previous analysis, I know it is a smiliarly dry year\ndata_2019 = data[data['year']==2019]\nprint(data_2019['flow'].describe())\n\n# Look at stats for 2019 by month\nprint(data_2019.groupby(['month'])[['flow']].describe())\n\n# %% 1. Provide a summary of the data frames properties.\n# What are the column names?\n# What is its index?\n# What data types do each of the columns have?\nprint(data.info())\n\n# %% 2.Provide a summary of the flow column including the min, mean, max, standard \n# deviation and quartiles.\nprint(data['flow'].describe())\n\n# %% 3.Provide the same information but on a monthly basis. (Note: you should be \n# able to do this with one or two lines of code)\nprint(data.groupby(['month'])[['flow']].describe())\n\n# %% 4.Provide a table with the 5 highest and 5 lowest flow values for the period \n# of record. Include the date, month and flow values in your summary.\n\n# 5 highest\nprint(data.sort_values(by=\"flow\",ascending=True).tail())\n\n# 5 lowest\nprint(data.sort_values(by=\"flow\",ascending=True).head())\n\n\n# %% 5.Find the highest and lowest flow values for every month of the year (i.e. you \n# will find 12 maxes and 12 mins) and report back what year these occurred in.\n\n# highest value for each month\nfor i in range(1,13):\n month_data = data[data['month']==i]\n print(month_data.nlargest(1,['flow']))\n\n# lowest value for each month\nfor i in range(1,13):\n month_data = data[data['month']==i]\n print(month_data.nsmallest(1,['flow']))\n\n# %% 6.Provide a list of historical dates with flows that are within 10% of your week 1 \n# forecast value. If there are none than increase the %10 window until you have at \n# least one other value and report the date and the new window you used\n\nforecast = 58.4\ndata_10percent = data[(data['flow'] >= (0.9*forecast)) & (data['flow'] <= (1.1*forecast))]\npd.set_option('display.max_rows', None)\nprint(data_10percent['datetime'])\n\n# %%\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): dependencies = [('story1', '0006_visitor')] operations = [migrations.RenameField(model_name='visitor', old_name= 'identitiy_number', new_name='identity_number')] <|reserved_special_token_1|> from django.db import migrations class Migration(migrations.Migration): dependencies = [('story1', '0006_visitor')] operations = [migrations.RenameField(model_name='visitor', old_name= 'identitiy_number', new_name='identity_number')] <|reserved_special_token_1|> # Generated by Django 3.1.2 on 2020-10-17 15:46 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('story1', '0006_visitor'), ] operations = [ migrations.RenameField( model_name='visitor', old_name='identitiy_number', new_name='identity_number', ), ]
flexible
{ "blob_id": "1aaace83af0235341d10b8ac3b47d00a944dac37", "index": 1422, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('story1', '0006_visitor')]\n operations = [migrations.RenameField(model_name='visitor', old_name=\n 'identitiy_number', new_name='identity_number')]\n", "step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('story1', '0006_visitor')]\n operations = [migrations.RenameField(model_name='visitor', old_name=\n 'identitiy_number', new_name='identity_number')]\n", "step-5": "# Generated by Django 3.1.2 on 2020-10-17 15:46\r\n\r\nfrom django.db import migrations\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('story1', '0006_visitor'),\r\n ]\r\n\r\n operations = [\r\n migrations.RenameField(\r\n model_name='visitor',\r\n old_name='identitiy_number',\r\n new_name='identity_number',\r\n ),\r\n ]\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from nltk.tokenize import RegexpTokenizer from stop_words import get_stop_words from nltk.stem.porter import PorterStemmer from gensim import corpora, models import gensim tokenizer = RegexpTokenizer(r'\w+') # create English stop words list en_stop = get_stop_words('en') # Create p_stemmer of class PorterStemmer p_stemmer = PorterStemmer() # create sample documents doc_a = "Brocolli is good to eat. My brother likes to eat good brocolli, but not my mother." doc_b = "My mother spends a lot of time driving my brother around to baseball practice." doc_c = "Some health experts suggest that driving may cause increased tension and blood pressure." doc_d = "I often feel pressure to perform well at school, but my mother never seems to drive my brother to do better." doc_e = "Health professionals say that brocolli is good for your health." rev1 = "I pre-ordered this for my wife mostly to use as a Kindle E-reader as I figured the tablet would be slow and the display would be less than impressive. I was wrong. What a bargain this little beauty is! This model cost $49.00 but it comes with ad's displayed on the lock screen when your tablet is dormant. Once your screen times out, they disappear. You can pay $15.00 up front to get an ad free version so I assumed to unlock the tablet I'd have to spend 15 to 30 seconds looking at an ad for Amazon Prime, or a product from the daily specials section of Amazon.com I abstained from paying for Ad removal and was pleasantly surprised to find that the ads are only on the lock screen and that as soon as I unlock the tablet they disappear immediately. Here are my pros and cons thus far. PRO: Perfect size for Ebooks, and web surfing to alleviate strain on the eyes from my 5 phone display nice sturdy casing that gives it a nice heft but still weighs in as one of the lighter tablets on the market Child Accounts- Amazon allows you to set up this tablet with age restricted access for kids making this a low cost piece of tech that is perfect for school kids and allows mom and dad to ration the amount of time lil Johnny can play Clash of Clans and how much he can hit the ol' Visa card for. Battery life thus far; wife was on it for about 5 hours last night and battery was at about 46% Kindle Integration -this goes without saying but having my ebooks and audible books synced to the tablet is awesome and my Kindle books look great" rev2 = "UPDATED - After spending quite a bit more time with the device, I would give it a 4.5 due to a few specific gaps that are a bit annoying. However, you are still getting an amazing 7” tablet, with front and rear facing cameras, a gorgeous interface, fairly snappy performance and durability, all for under 50 bucks! I can’t imagine not buying these for myself and my whole family, but not a primary tablet for a techie adult by any means. For background, I have every Kindle, a couple Fires, and multiple tablets from Apple, Microsoft and Samsung. Note that my review with 5 stars considers the value equation, not just performance and how that may or may not compare to other tablets - if you are expecting this to compare to a tablet costing several times more, don't bother. But if you are looking for a great entry level tablet that does most of the things people want, this little tablet definitely delivers the value! PRICING/CONFIG: I prefer this tablet with ads and no accessories to keep the costs down. You have the option to spend more money, but I recommend against it. You can easily see the specs online, so I won’t do you the discourtesy of simply cutting and pasting those here. Here is the price breakdown: 9.99 base price – what an incredible price point! Or buy 5 and get a sixth one free! This puts it into reach of schools and non-profits." rev3 ="The short/summed up version: it's the new budget king in the 6-8 size. It's screen is a little lower in resolution but still pleasant to look at, it has enough power for most of the typical tablet tasks, and it shares many of the same features as its higher priced brothers such as front and back cameras, b/g/n wifi, and good overall battery life (minus an hour) My favorite size tablet is 8, so if you're looking at the amazon fire lineup, i would take this over the 6 for sure, and would have a hard time picking the 8 fire at 3x the price. If you re not a prime member, it s still a good tablet, if you are a prime member: it s a great tablet. Possible quality control issue: Mine had two dead pixels (not very noticeable, but still will exchange) You can load APKs(enable unknown sources), i loaded antutu and panasonic image app, both work properly." # compile sample documents into a list #doc_set = [doc_a, doc_b, doc_c, doc_d, doc_e] doc_set = [rev1,rev2,rev3] # list for tokenized documents in loop texts = [] # loop through document list for i in doc_set: # clean and tokenize document string raw = i.lower() tokens = tokenizer.tokenize(raw) # remove stop words from tokens stopped_tokens = [i for i in tokens if not i in en_stop] # stem tokens stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens] # add tokens to list texts.append(stemmed_tokens) # turn our tokenized documents into a id <-> term dictionary dictionary = corpora.Dictionary(texts) # convert tokenized documents into a document-term matrix corpus = [dictionary.doc2bow(text) for text in texts] # generate LDA model ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=4, id2word=dictionary, passes=20) print("LDA............") topics = ldamodel.print_topics(num_topics=3, num_words=5) for topic in topics: print(type(topic)) print(topic) print("LSA.................") #id2word = gensim.corpora.Dictionary.load_from_text("c:\lda_test.txt") lsi = gensim.models.lsimodel.LsiModel(corpus, id2word=dictionary) from nltk.corpus import sentiwordnet as swn topics = lsi.print_topics(5) for topic in topics: print(topic[1]) print(swn.senti_synsets(topic[1])) print("----------------------------------------") #print(list(swn.senti_synsets('slow'))) happy = swn.senti_synsets('happy') print(happy.neg_score()) all = swn.all_senti_synsets() #print(all)
normal
{ "blob_id": "3035ac8044b5629d0b5de7934e46890ad36ed551", "index": 7798, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in doc_set:\n raw = i.lower()\n tokens = tokenizer.tokenize(raw)\n stopped_tokens = [i for i in tokens if not i in en_stop]\n stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]\n texts.append(stemmed_tokens)\n<mask token>\nprint('LDA............')\n<mask token>\nfor topic in topics:\n print(type(topic))\n print(topic)\nprint('LSA.................')\n<mask token>\nfor topic in topics:\n print(topic[1])\n print(swn.senti_synsets(topic[1]))\n print('----------------------------------------')\n<mask token>\nprint(happy.neg_score())\n<mask token>\n", "step-3": "<mask token>\ntokenizer = RegexpTokenizer('\\\\w+')\nen_stop = get_stop_words('en')\np_stemmer = PorterStemmer()\ndoc_a = (\n 'Brocolli is good to eat. My brother likes to eat good brocolli, but not my mother.'\n )\ndoc_b = (\n 'My mother spends a lot of time driving my brother around to baseball practice.'\n )\ndoc_c = (\n 'Some health experts suggest that driving may cause increased tension and blood pressure.'\n )\ndoc_d = (\n 'I often feel pressure to perform well at school, but my mother never seems to drive my brother to do better.'\n )\ndoc_e = 'Health professionals say that brocolli is good for your health.'\nrev1 = (\n \"I pre-ordered this for my wife mostly to use as a Kindle E-reader as I figured the tablet would be slow and the display would be less than impressive. I was wrong. What a bargain this little beauty is! This model cost $49.00 but it comes with ad's displayed on the lock screen when your tablet is dormant. Once your screen times out, they disappear. You can pay $15.00 up front to get an ad free version so I assumed to unlock the tablet I'd have to spend 15 to 30 seconds looking at an ad for Amazon Prime, or a product from the daily specials section of Amazon.com I abstained from paying for Ad removal and was pleasantly surprised to find that the ads are only on the lock screen and that as soon as I unlock the tablet they disappear immediately. Here are my pros and cons thus far. PRO: Perfect size for Ebooks, and web surfing to alleviate strain on the eyes from my 5 phone display nice sturdy casing that gives it a nice heft but still weighs in as one of the lighter tablets on the market Child Accounts- Amazon allows you to set up this tablet with age restricted access for kids making this a low cost piece of tech that is perfect for school kids and allows mom and dad to ration the amount of time lil Johnny can play Clash of Clans and how much he can hit the ol' Visa card for. Battery life thus far; wife was on it for about 5 hours last night and battery was at about 46% Kindle Integration -this goes without saying but having my ebooks and audible books synced to the tablet is awesome and my Kindle books look great\"\n )\nrev2 = (\n \"UPDATED - After spending quite a bit more time with the device, I would give it a 4.5 due to a few specific gaps that are a bit annoying. However, you are still getting an amazing 7” tablet, with front and rear facing cameras, a gorgeous interface, fairly snappy performance and durability, all for under 50 bucks! I can’t imagine not buying these for myself and my whole family, but not a primary tablet for a techie adult by any means. For background, I have every Kindle, a couple Fires, and multiple tablets from Apple, Microsoft and Samsung. Note that my review with 5 stars considers the value equation, not just performance and how that may or may not compare to other tablets - if you are expecting this to compare to a tablet costing several times more, don't bother. But if you are looking for a great entry level tablet that does most of the things people want, this little tablet definitely delivers the value! PRICING/CONFIG: I prefer this tablet with ads and no accessories to keep the costs down. You have the option to spend more money, but I recommend against it. You can easily see the specs online, so I won’t do you the discourtesy of simply cutting and pasting those here. Here is the price breakdown: 9.99 base price – what an incredible price point! Or buy 5 and get a sixth one free! This puts it into reach of schools and non-profits.\"\n )\nrev3 = (\n \"The short/summed up version: it's the new budget king in the 6-8 size. It's screen is a little lower in resolution but still pleasant to look at, it has enough power for most of the typical tablet tasks, and it shares many of the same features as its higher priced brothers such as front and back cameras, b/g/n wifi, and good overall battery life (minus an hour) My favorite size tablet is 8, so if you're looking at the amazon fire lineup, i would take this over the 6 for sure, and would have a hard time picking the 8 fire at 3x the price. If you re not a prime member, it s still a good tablet, if you are a prime member: it s a great tablet. Possible quality control issue: Mine had two dead pixels (not very noticeable, but still will exchange) You can load APKs(enable unknown sources), i loaded antutu and panasonic image app, both work properly.\"\n )\ndoc_set = [rev1, rev2, rev3]\ntexts = []\nfor i in doc_set:\n raw = i.lower()\n tokens = tokenizer.tokenize(raw)\n stopped_tokens = [i for i in tokens if not i in en_stop]\n stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]\n texts.append(stemmed_tokens)\ndictionary = corpora.Dictionary(texts)\ncorpus = [dictionary.doc2bow(text) for text in texts]\nldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=4, id2word=\n dictionary, passes=20)\nprint('LDA............')\ntopics = ldamodel.print_topics(num_topics=3, num_words=5)\nfor topic in topics:\n print(type(topic))\n print(topic)\nprint('LSA.................')\nlsi = gensim.models.lsimodel.LsiModel(corpus, id2word=dictionary)\n<mask token>\ntopics = lsi.print_topics(5)\nfor topic in topics:\n print(topic[1])\n print(swn.senti_synsets(topic[1]))\n print('----------------------------------------')\nhappy = swn.senti_synsets('happy')\nprint(happy.neg_score())\nall = swn.all_senti_synsets()\n", "step-4": "from nltk.tokenize import RegexpTokenizer\nfrom stop_words import get_stop_words\nfrom nltk.stem.porter import PorterStemmer\nfrom gensim import corpora, models\nimport gensim\ntokenizer = RegexpTokenizer('\\\\w+')\nen_stop = get_stop_words('en')\np_stemmer = PorterStemmer()\ndoc_a = (\n 'Brocolli is good to eat. My brother likes to eat good brocolli, but not my mother.'\n )\ndoc_b = (\n 'My mother spends a lot of time driving my brother around to baseball practice.'\n )\ndoc_c = (\n 'Some health experts suggest that driving may cause increased tension and blood pressure.'\n )\ndoc_d = (\n 'I often feel pressure to perform well at school, but my mother never seems to drive my brother to do better.'\n )\ndoc_e = 'Health professionals say that brocolli is good for your health.'\nrev1 = (\n \"I pre-ordered this for my wife mostly to use as a Kindle E-reader as I figured the tablet would be slow and the display would be less than impressive. I was wrong. What a bargain this little beauty is! This model cost $49.00 but it comes with ad's displayed on the lock screen when your tablet is dormant. Once your screen times out, they disappear. You can pay $15.00 up front to get an ad free version so I assumed to unlock the tablet I'd have to spend 15 to 30 seconds looking at an ad for Amazon Prime, or a product from the daily specials section of Amazon.com I abstained from paying for Ad removal and was pleasantly surprised to find that the ads are only on the lock screen and that as soon as I unlock the tablet they disappear immediately. Here are my pros and cons thus far. PRO: Perfect size for Ebooks, and web surfing to alleviate strain on the eyes from my 5 phone display nice sturdy casing that gives it a nice heft but still weighs in as one of the lighter tablets on the market Child Accounts- Amazon allows you to set up this tablet with age restricted access for kids making this a low cost piece of tech that is perfect for school kids and allows mom and dad to ration the amount of time lil Johnny can play Clash of Clans and how much he can hit the ol' Visa card for. Battery life thus far; wife was on it for about 5 hours last night and battery was at about 46% Kindle Integration -this goes without saying but having my ebooks and audible books synced to the tablet is awesome and my Kindle books look great\"\n )\nrev2 = (\n \"UPDATED - After spending quite a bit more time with the device, I would give it a 4.5 due to a few specific gaps that are a bit annoying. However, you are still getting an amazing 7” tablet, with front and rear facing cameras, a gorgeous interface, fairly snappy performance and durability, all for under 50 bucks! I can’t imagine not buying these for myself and my whole family, but not a primary tablet for a techie adult by any means. For background, I have every Kindle, a couple Fires, and multiple tablets from Apple, Microsoft and Samsung. Note that my review with 5 stars considers the value equation, not just performance and how that may or may not compare to other tablets - if you are expecting this to compare to a tablet costing several times more, don't bother. But if you are looking for a great entry level tablet that does most of the things people want, this little tablet definitely delivers the value! PRICING/CONFIG: I prefer this tablet with ads and no accessories to keep the costs down. You have the option to spend more money, but I recommend against it. You can easily see the specs online, so I won’t do you the discourtesy of simply cutting and pasting those here. Here is the price breakdown: 9.99 base price – what an incredible price point! Or buy 5 and get a sixth one free! This puts it into reach of schools and non-profits.\"\n )\nrev3 = (\n \"The short/summed up version: it's the new budget king in the 6-8 size. It's screen is a little lower in resolution but still pleasant to look at, it has enough power for most of the typical tablet tasks, and it shares many of the same features as its higher priced brothers such as front and back cameras, b/g/n wifi, and good overall battery life (minus an hour) My favorite size tablet is 8, so if you're looking at the amazon fire lineup, i would take this over the 6 for sure, and would have a hard time picking the 8 fire at 3x the price. If you re not a prime member, it s still a good tablet, if you are a prime member: it s a great tablet. Possible quality control issue: Mine had two dead pixels (not very noticeable, but still will exchange) You can load APKs(enable unknown sources), i loaded antutu and panasonic image app, both work properly.\"\n )\ndoc_set = [rev1, rev2, rev3]\ntexts = []\nfor i in doc_set:\n raw = i.lower()\n tokens = tokenizer.tokenize(raw)\n stopped_tokens = [i for i in tokens if not i in en_stop]\n stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]\n texts.append(stemmed_tokens)\ndictionary = corpora.Dictionary(texts)\ncorpus = [dictionary.doc2bow(text) for text in texts]\nldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=4, id2word=\n dictionary, passes=20)\nprint('LDA............')\ntopics = ldamodel.print_topics(num_topics=3, num_words=5)\nfor topic in topics:\n print(type(topic))\n print(topic)\nprint('LSA.................')\nlsi = gensim.models.lsimodel.LsiModel(corpus, id2word=dictionary)\nfrom nltk.corpus import sentiwordnet as swn\ntopics = lsi.print_topics(5)\nfor topic in topics:\n print(topic[1])\n print(swn.senti_synsets(topic[1]))\n print('----------------------------------------')\nhappy = swn.senti_synsets('happy')\nprint(happy.neg_score())\nall = swn.all_senti_synsets()\n", "step-5": "from nltk.tokenize import RegexpTokenizer\nfrom stop_words import get_stop_words\nfrom nltk.stem.porter import PorterStemmer\nfrom gensim import corpora, models\nimport gensim\n\ntokenizer = RegexpTokenizer(r'\\w+')\n\n# create English stop words list\nen_stop = get_stop_words('en')\n\n# Create p_stemmer of class PorterStemmer\np_stemmer = PorterStemmer()\n\n# create sample documents\ndoc_a = \"Brocolli is good to eat. My brother likes to eat good brocolli, but not my mother.\"\ndoc_b = \"My mother spends a lot of time driving my brother around to baseball practice.\"\ndoc_c = \"Some health experts suggest that driving may cause increased tension and blood pressure.\"\ndoc_d = \"I often feel pressure to perform well at school, but my mother never seems to drive my brother to do better.\"\ndoc_e = \"Health professionals say that brocolli is good for your health.\"\n\nrev1 = \"I pre-ordered this for my wife mostly to use as a Kindle E-reader as I figured the tablet would be slow and the display would be less than impressive. I was wrong. What a bargain this little beauty is! This model cost $49.00 but it comes with ad's displayed on the lock screen when your tablet is dormant. Once your screen times out, they disappear. You can pay $15.00 up front to get an ad free version so I assumed to unlock the tablet I'd have to spend 15 to 30 seconds looking at an ad for Amazon Prime, or a product from the daily specials section of Amazon.com I abstained from paying for Ad removal and was pleasantly surprised to find that the ads are only on the lock screen and that as soon as I unlock the tablet they disappear immediately. Here are my pros and cons thus far. PRO: Perfect size for Ebooks, and web surfing to alleviate strain on the eyes from my 5 phone display nice sturdy casing that gives it a nice heft but still weighs in as one of the lighter tablets on the market Child Accounts- Amazon allows you to set up this tablet with age restricted access for kids making this a low cost piece of tech that is perfect for school kids and allows mom and dad to ration the amount of time lil Johnny can play Clash of Clans and how much he can hit the ol' Visa card for. Battery life thus far; wife was on it for about 5 hours last night and battery was at about 46% Kindle Integration -this goes without saying but having my ebooks and audible books synced to the tablet is awesome and my Kindle books look great\"\nrev2 = \"UPDATED - After spending quite a bit more time with the device, I would give it a 4.5 due to a few specific gaps that are a bit annoying. However, you are still getting an amazing 7” tablet, with front and rear facing cameras, a gorgeous interface, fairly snappy performance and durability, all for under 50 bucks! I can’t imagine not buying these for myself and my whole family, but not a primary tablet for a techie adult by any means. For background, I have every Kindle, a couple Fires, and multiple tablets from Apple, Microsoft and Samsung. Note that my review with 5 stars considers the value equation, not just performance and how that may or may not compare to other tablets - if you are expecting this to compare to a tablet costing several times more, don't bother. But if you are looking for a great entry level tablet that does most of the things people want, this little tablet definitely delivers the value! PRICING/CONFIG: I prefer this tablet with ads and no accessories to keep the costs down. You have the option to spend more money, but I recommend against it. You can easily see the specs online, so I won’t do you the discourtesy of simply cutting and pasting those here. Here is the price breakdown: 9.99 base price – what an incredible price point! Or buy 5 and get a sixth one free! This puts it into reach of schools and non-profits.\"\nrev3 =\"The short/summed up version: it's the new budget king in the 6-8 size. It's screen is a little lower in resolution but still pleasant to look at, it has enough power for most of the typical tablet tasks, and it shares many of the same features as its higher priced brothers such as front and back cameras, b/g/n wifi, and good overall battery life (minus an hour) My favorite size tablet is 8, so if you're looking at the amazon fire lineup, i would take this over the 6 for sure, and would have a hard time picking the 8 fire at 3x the price. If you re not a prime member, it s still a good tablet, if you are a prime member: it s a great tablet. Possible quality control issue: Mine had two dead pixels (not very noticeable, but still will exchange) You can load APKs(enable unknown sources), i loaded antutu and panasonic image app, both work properly.\"\n\n# compile sample documents into a list\n#doc_set = [doc_a, doc_b, doc_c, doc_d, doc_e]\ndoc_set = [rev1,rev2,rev3]\n# list for tokenized documents in loop\ntexts = []\n\n# loop through document list\nfor i in doc_set:\n # clean and tokenize document string\n raw = i.lower()\n tokens = tokenizer.tokenize(raw)\n\n # remove stop words from tokens\n stopped_tokens = [i for i in tokens if not i in en_stop]\n\n # stem tokens\n stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]\n\n # add tokens to list\n texts.append(stemmed_tokens)\n\n# turn our tokenized documents into a id <-> term dictionary\ndictionary = corpora.Dictionary(texts)\n\n# convert tokenized documents into a document-term matrix\ncorpus = [dictionary.doc2bow(text) for text in texts]\n\n# generate LDA model\nldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=4, id2word=dictionary, passes=20)\nprint(\"LDA............\")\ntopics = ldamodel.print_topics(num_topics=3, num_words=5)\nfor topic in topics:\n print(type(topic))\n print(topic)\n\nprint(\"LSA.................\")\n#id2word = gensim.corpora.Dictionary.load_from_text(\"c:\\lda_test.txt\")\nlsi = gensim.models.lsimodel.LsiModel(corpus, id2word=dictionary)\n\nfrom nltk.corpus import sentiwordnet as swn\n\ntopics = lsi.print_topics(5)\nfor topic in topics:\n print(topic[1])\n print(swn.senti_synsets(topic[1]))\n print(\"----------------------------------------\")\n\n\n\n#print(list(swn.senti_synsets('slow')))\n\nhappy = swn.senti_synsets('happy')\n\nprint(happy.neg_score())\n\nall = swn.all_senti_synsets()\n#print(all)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def readadc(adcnum, clockpin, mosipin, misopin, cspin): if adcnum > 7 or adcnum < 0: return -1 GPIO.output(cspin, True) GPIO.output(clockpin, False) GPIO.output(cspin, False) commandout = adcnum commandout |= 24 commandout <<= 3 for i in range(5): if commandout & 128: GPIO.output(mosipin, True) else: GPIO.output(mosipin, False) commandout <<= 1 GPIO.output(clockpin, True) GPIO.output(clockpin, False) adcout = 0 for i in range(12): GPIO.output(clockpin, True) GPIO.output(clockpin, False) adcout <<= 1 if GPIO.input(misopin): adcout |= 1 GPIO.output(cspin, True) adcout >>= 1 return adcout def spi_setup(): GPIO.setup(SPIMOSI, GPIO.OUT) GPIO.setup(SPIMISO, GPIO.IN) GPIO.setup(SPICLK, GPIO.OUT) GPIO.setup(SPICS, GPIO.OUT) GPIO.setup(POWER_PIN, GPIO.OUT) def spi_readout(adc_pin): return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS) <|reserved_special_token_0|> def adc_to_temp(readout): millivolts = readout * (3300.0 / 1024.0) temp_c = (millivolts - 100.0) / 10.0 - 40.0 return temp_c <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def readadc(adcnum, clockpin, mosipin, misopin, cspin): if adcnum > 7 or adcnum < 0: return -1 GPIO.output(cspin, True) GPIO.output(clockpin, False) GPIO.output(cspin, False) commandout = adcnum commandout |= 24 commandout <<= 3 for i in range(5): if commandout & 128: GPIO.output(mosipin, True) else: GPIO.output(mosipin, False) commandout <<= 1 GPIO.output(clockpin, True) GPIO.output(clockpin, False) adcout = 0 for i in range(12): GPIO.output(clockpin, True) GPIO.output(clockpin, False) adcout <<= 1 if GPIO.input(misopin): adcout |= 1 GPIO.output(cspin, True) adcout >>= 1 return adcout def spi_setup(): GPIO.setup(SPIMOSI, GPIO.OUT) GPIO.setup(SPIMISO, GPIO.IN) GPIO.setup(SPICLK, GPIO.OUT) GPIO.setup(SPICS, GPIO.OUT) GPIO.setup(POWER_PIN, GPIO.OUT) def spi_readout(adc_pin): return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS) <|reserved_special_token_0|> def power_off(): GPIO.output(POWER_PIN, False) def adc_to_temp(readout): millivolts = readout * (3300.0 / 1024.0) temp_c = (millivolts - 100.0) / 10.0 - 40.0 return temp_c <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def readadc(adcnum, clockpin, mosipin, misopin, cspin): if adcnum > 7 or adcnum < 0: return -1 GPIO.output(cspin, True) GPIO.output(clockpin, False) GPIO.output(cspin, False) commandout = adcnum commandout |= 24 commandout <<= 3 for i in range(5): if commandout & 128: GPIO.output(mosipin, True) else: GPIO.output(mosipin, False) commandout <<= 1 GPIO.output(clockpin, True) GPIO.output(clockpin, False) adcout = 0 for i in range(12): GPIO.output(clockpin, True) GPIO.output(clockpin, False) adcout <<= 1 if GPIO.input(misopin): adcout |= 1 GPIO.output(cspin, True) adcout >>= 1 return adcout def spi_setup(): GPIO.setup(SPIMOSI, GPIO.OUT) GPIO.setup(SPIMISO, GPIO.IN) GPIO.setup(SPICLK, GPIO.OUT) GPIO.setup(SPICS, GPIO.OUT) GPIO.setup(POWER_PIN, GPIO.OUT) def spi_readout(adc_pin): return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS) def power_on(): GPIO.output(POWER_PIN, True) def power_off(): GPIO.output(POWER_PIN, False) def adc_to_temp(readout): millivolts = readout * (3300.0 / 1024.0) temp_c = (millivolts - 100.0) / 10.0 - 40.0 return temp_c <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> GPIO.setmode(GPIO.BCM) <|reserved_special_token_0|> def readadc(adcnum, clockpin, mosipin, misopin, cspin): if adcnum > 7 or adcnum < 0: return -1 GPIO.output(cspin, True) GPIO.output(clockpin, False) GPIO.output(cspin, False) commandout = adcnum commandout |= 24 commandout <<= 3 for i in range(5): if commandout & 128: GPIO.output(mosipin, True) else: GPIO.output(mosipin, False) commandout <<= 1 GPIO.output(clockpin, True) GPIO.output(clockpin, False) adcout = 0 for i in range(12): GPIO.output(clockpin, True) GPIO.output(clockpin, False) adcout <<= 1 if GPIO.input(misopin): adcout |= 1 GPIO.output(cspin, True) adcout >>= 1 return adcout def spi_setup(): GPIO.setup(SPIMOSI, GPIO.OUT) GPIO.setup(SPIMISO, GPIO.IN) GPIO.setup(SPICLK, GPIO.OUT) GPIO.setup(SPICS, GPIO.OUT) GPIO.setup(POWER_PIN, GPIO.OUT) def spi_readout(adc_pin): return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS) def power_on(): GPIO.output(POWER_PIN, True) def power_off(): GPIO.output(POWER_PIN, False) def adc_to_temp(readout): millivolts = readout * (3300.0 / 1024.0) temp_c = (millivolts - 100.0) / 10.0 - 40.0 return temp_c if __name__ == '__main__': HYGROMETER = 0 TEMP = 1 LIGHT = 2 spi_setup() power_on() time.sleep(PAUSE) print('Hygrometer value %d' % spi_readout(HYGROMETER)) power_off() time.sleep(PAUSE) temp = adc_to_temp(spi_readout(TEMP)) print('Temp sensor: %.1f C' % temp) time.sleep(PAUSE) light_level = float(spi_readout(LIGHT)) / 1024.0 * 100.0 print('Light level {}% '.format(light_level)) GPIO.cleanup() <|reserved_special_token_1|> import time import RPi.GPIO as GPIO GPIO.setmode(GPIO.BCM) POWER_PIN = 21 SPICLK = 18 SPIMISO = 23 SPIMOSI = 24 SPICS = 25 PAUSE = 0.1 # read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7) def readadc(adcnum, clockpin, mosipin, misopin, cspin): if ((adcnum > 7) or (adcnum < 0)): return -1 GPIO.output(cspin, True) GPIO.output(clockpin, False) # start clock low GPIO.output(cspin, False) # bring CS low commandout = adcnum commandout |= 0x18 # start bit + single-ended bit commandout <<= 3 # we only need to send 5 bits here for i in range(5): if (commandout & 0x80): GPIO.output(mosipin, True) else: GPIO.output(mosipin, False) commandout <<= 1 GPIO.output(clockpin, True) GPIO.output(clockpin, False) adcout = 0 # read in one empty bit, one null bit and 10 ADC bits for i in range(12): GPIO.output(clockpin, True) GPIO.output(clockpin, False) adcout <<= 1 if (GPIO.input(misopin)): adcout |= 0x1 GPIO.output(cspin, True) adcout >>= 1 # first bit is 'null' so drop it return adcout def spi_setup(): GPIO.setup(SPIMOSI, GPIO.OUT) GPIO.setup(SPIMISO, GPIO.IN) GPIO.setup(SPICLK, GPIO.OUT) GPIO.setup(SPICS, GPIO.OUT) GPIO.setup(POWER_PIN, GPIO.OUT) def spi_readout(adc_pin): # read the analog pin return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS) def power_on(): GPIO.output(POWER_PIN, True) def power_off(): GPIO.output(POWER_PIN, False) def adc_to_temp(readout): millivolts = readout * (3300.0 / 1024.0) temp_c = ((millivolts - 100.0) / 10.0) - 40.0 return temp_c if __name__ == "__main__": HYGROMETER = 0 TEMP = 1 LIGHT = 2 spi_setup() power_on() time.sleep(PAUSE) print("Hygrometer value %d" % spi_readout(HYGROMETER)) power_off() time.sleep(PAUSE) temp = adc_to_temp(spi_readout(TEMP)) print("Temp sensor: %.1f C" % temp) time.sleep(PAUSE) light_level = (float(spi_readout(LIGHT))/1024.0) * 100.0 print("Light level {}% ".format(light_level)) GPIO.cleanup()
flexible
{ "blob_id": "fcdb43e36a4610ca0201a27d82b1a583f1482878", "index": 8924, "step-1": "<mask token>\n\n\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if adcnum > 7 or adcnum < 0:\n return -1\n GPIO.output(cspin, True)\n GPIO.output(clockpin, False)\n GPIO.output(cspin, False)\n commandout = adcnum\n commandout |= 24\n commandout <<= 3\n for i in range(5):\n if commandout & 128:\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout = 0\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if GPIO.input(misopin):\n adcout |= 1\n GPIO.output(cspin, True)\n adcout >>= 1\n return adcout\n\n\ndef spi_setup():\n GPIO.setup(SPIMOSI, GPIO.OUT)\n GPIO.setup(SPIMISO, GPIO.IN)\n GPIO.setup(SPICLK, GPIO.OUT)\n GPIO.setup(SPICS, GPIO.OUT)\n GPIO.setup(POWER_PIN, GPIO.OUT)\n\n\ndef spi_readout(adc_pin):\n return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n\n<mask token>\n\n\ndef adc_to_temp(readout):\n millivolts = readout * (3300.0 / 1024.0)\n temp_c = (millivolts - 100.0) / 10.0 - 40.0\n return temp_c\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if adcnum > 7 or adcnum < 0:\n return -1\n GPIO.output(cspin, True)\n GPIO.output(clockpin, False)\n GPIO.output(cspin, False)\n commandout = adcnum\n commandout |= 24\n commandout <<= 3\n for i in range(5):\n if commandout & 128:\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout = 0\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if GPIO.input(misopin):\n adcout |= 1\n GPIO.output(cspin, True)\n adcout >>= 1\n return adcout\n\n\ndef spi_setup():\n GPIO.setup(SPIMOSI, GPIO.OUT)\n GPIO.setup(SPIMISO, GPIO.IN)\n GPIO.setup(SPICLK, GPIO.OUT)\n GPIO.setup(SPICS, GPIO.OUT)\n GPIO.setup(POWER_PIN, GPIO.OUT)\n\n\ndef spi_readout(adc_pin):\n return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n\n<mask token>\n\n\ndef power_off():\n GPIO.output(POWER_PIN, False)\n\n\ndef adc_to_temp(readout):\n millivolts = readout * (3300.0 / 1024.0)\n temp_c = (millivolts - 100.0) / 10.0 - 40.0\n return temp_c\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if adcnum > 7 or adcnum < 0:\n return -1\n GPIO.output(cspin, True)\n GPIO.output(clockpin, False)\n GPIO.output(cspin, False)\n commandout = adcnum\n commandout |= 24\n commandout <<= 3\n for i in range(5):\n if commandout & 128:\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout = 0\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if GPIO.input(misopin):\n adcout |= 1\n GPIO.output(cspin, True)\n adcout >>= 1\n return adcout\n\n\ndef spi_setup():\n GPIO.setup(SPIMOSI, GPIO.OUT)\n GPIO.setup(SPIMISO, GPIO.IN)\n GPIO.setup(SPICLK, GPIO.OUT)\n GPIO.setup(SPICS, GPIO.OUT)\n GPIO.setup(POWER_PIN, GPIO.OUT)\n\n\ndef spi_readout(adc_pin):\n return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n\ndef power_on():\n GPIO.output(POWER_PIN, True)\n\n\ndef power_off():\n GPIO.output(POWER_PIN, False)\n\n\ndef adc_to_temp(readout):\n millivolts = readout * (3300.0 / 1024.0)\n temp_c = (millivolts - 100.0) / 10.0 - 40.0\n return temp_c\n\n\n<mask token>\n", "step-4": "<mask token>\nGPIO.setmode(GPIO.BCM)\n<mask token>\n\n\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if adcnum > 7 or adcnum < 0:\n return -1\n GPIO.output(cspin, True)\n GPIO.output(clockpin, False)\n GPIO.output(cspin, False)\n commandout = adcnum\n commandout |= 24\n commandout <<= 3\n for i in range(5):\n if commandout & 128:\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout = 0\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if GPIO.input(misopin):\n adcout |= 1\n GPIO.output(cspin, True)\n adcout >>= 1\n return adcout\n\n\ndef spi_setup():\n GPIO.setup(SPIMOSI, GPIO.OUT)\n GPIO.setup(SPIMISO, GPIO.IN)\n GPIO.setup(SPICLK, GPIO.OUT)\n GPIO.setup(SPICS, GPIO.OUT)\n GPIO.setup(POWER_PIN, GPIO.OUT)\n\n\ndef spi_readout(adc_pin):\n return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n\ndef power_on():\n GPIO.output(POWER_PIN, True)\n\n\ndef power_off():\n GPIO.output(POWER_PIN, False)\n\n\ndef adc_to_temp(readout):\n millivolts = readout * (3300.0 / 1024.0)\n temp_c = (millivolts - 100.0) / 10.0 - 40.0\n return temp_c\n\n\nif __name__ == '__main__':\n HYGROMETER = 0\n TEMP = 1\n LIGHT = 2\n spi_setup()\n power_on()\n time.sleep(PAUSE)\n print('Hygrometer value %d' % spi_readout(HYGROMETER))\n power_off()\n time.sleep(PAUSE)\n temp = adc_to_temp(spi_readout(TEMP))\n print('Temp sensor: %.1f C' % temp)\n time.sleep(PAUSE)\n light_level = float(spi_readout(LIGHT)) / 1024.0 * 100.0\n print('Light level {}% '.format(light_level))\n GPIO.cleanup()\n", "step-5": "import time\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\n\nPOWER_PIN = 21\nSPICLK = 18\nSPIMISO = 23\nSPIMOSI = 24\nSPICS = 25\n\nPAUSE = 0.1\n\n# read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7)\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n if ((adcnum > 7) or (adcnum < 0)):\n return -1\n GPIO.output(cspin, True)\n\n GPIO.output(clockpin, False) # start clock low\n GPIO.output(cspin, False) # bring CS low\n\n commandout = adcnum\n commandout |= 0x18 # start bit + single-ended bit\n commandout <<= 3 # we only need to send 5 bits here\n for i in range(5):\n if (commandout & 0x80):\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n\n adcout = 0\n # read in one empty bit, one null bit and 10 ADC bits\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if (GPIO.input(misopin)):\n adcout |= 0x1\n\n GPIO.output(cspin, True)\n\n adcout >>= 1 # first bit is 'null' so drop it\n return adcout\n\n\ndef spi_setup():\n GPIO.setup(SPIMOSI, GPIO.OUT)\n GPIO.setup(SPIMISO, GPIO.IN)\n GPIO.setup(SPICLK, GPIO.OUT)\n GPIO.setup(SPICS, GPIO.OUT)\n GPIO.setup(POWER_PIN, GPIO.OUT)\n\n\ndef spi_readout(adc_pin):\n # read the analog pin\n return readadc(adc_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n\ndef power_on():\n\n GPIO.output(POWER_PIN, True)\n\n\ndef power_off():\n GPIO.output(POWER_PIN, False)\n\n\ndef adc_to_temp(readout):\n millivolts = readout * (3300.0 / 1024.0)\n temp_c = ((millivolts - 100.0) / 10.0) - 40.0\n return temp_c\n\nif __name__ == \"__main__\":\n HYGROMETER = 0\n TEMP = 1\n LIGHT = 2\n spi_setup()\n power_on()\n time.sleep(PAUSE)\n print(\"Hygrometer value %d\" % spi_readout(HYGROMETER))\n power_off()\n time.sleep(PAUSE)\n temp = adc_to_temp(spi_readout(TEMP))\n print(\"Temp sensor: %.1f C\" % temp)\n time.sleep(PAUSE)\n light_level = (float(spi_readout(LIGHT))/1024.0) * 100.0\n print(\"Light level {}% \".format(light_level))\n GPIO.cleanup()\n", "step-ids": [ 4, 5, 6, 7, 10 ] }
[ 4, 5, 6, 7, 10 ]
<|reserved_special_token_0|> def rotate(files, dst, value=90): for file_ in files: img = Image.open(file_) img = img.rotate(value) name = '{}{}{}'.format(dst, os.sep, os.path.basename(file_)) img.save(name) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def rotate(files, dst, value=90): for file_ in files: img = Image.open(file_) img = img.rotate(value) name = '{}{}{}'.format(dst, os.sep, os.path.basename(file_)) img.save(name) <|reserved_special_token_0|> rotate(common, dst) <|reserved_special_token_1|> <|reserved_special_token_0|> def rotate(files, dst, value=90): for file_ in files: img = Image.open(file_) img = img.rotate(value) name = '{}{}{}'.format(dst, os.sep, os.path.basename(file_)) img.save(name) src = '/home/andrew/code/tmp_photos' dst = '/home/andrew/code/tmp_photos2' common = glob.glob('{}{}*.*'.format(src, os.sep)) rotate(common, dst) <|reserved_special_token_1|> import glob import os from PIL import Image def rotate(files, dst, value=90): for file_ in files: img = Image.open(file_) img = img.rotate(value) name = '{}{}{}'.format(dst, os.sep, os.path.basename(file_)) img.save(name) src = '/home/andrew/code/tmp_photos' dst = '/home/andrew/code/tmp_photos2' common = glob.glob('{}{}*.*'.format(src, os.sep)) rotate(common, dst) <|reserved_special_token_1|> #!/usr/bin/python3 # -*- coding: utf-8 -*- # Author: xurongzhong#126.com 技术支持qq群:6089740 # CreateDate: 2018-3-27 # pillow_rotate.py import glob import os from PIL import Image def rotate(files, dst, value=90): for file_ in files: img = Image.open(file_) img = img.rotate(value) name = "{}{}{}".format(dst, os.sep, os.path.basename(file_)) img.save(name) src = r'/home/andrew/code/tmp_photos' dst = r'/home/andrew/code/tmp_photos2' common = glob.glob('{}{}*.*'.format(src, os.sep)) rotate(common, dst)
flexible
{ "blob_id": "cd104eec21be8a59e8fb3bd8ab061dd357fc126a", "index": 667, "step-1": "<mask token>\n\n\ndef rotate(files, dst, value=90):\n for file_ in files:\n img = Image.open(file_)\n img = img.rotate(value)\n name = '{}{}{}'.format(dst, os.sep, os.path.basename(file_))\n img.save(name)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef rotate(files, dst, value=90):\n for file_ in files:\n img = Image.open(file_)\n img = img.rotate(value)\n name = '{}{}{}'.format(dst, os.sep, os.path.basename(file_))\n img.save(name)\n\n\n<mask token>\nrotate(common, dst)\n", "step-3": "<mask token>\n\n\ndef rotate(files, dst, value=90):\n for file_ in files:\n img = Image.open(file_)\n img = img.rotate(value)\n name = '{}{}{}'.format(dst, os.sep, os.path.basename(file_))\n img.save(name)\n\n\nsrc = '/home/andrew/code/tmp_photos'\ndst = '/home/andrew/code/tmp_photos2'\ncommon = glob.glob('{}{}*.*'.format(src, os.sep))\nrotate(common, dst)\n", "step-4": "import glob\nimport os\nfrom PIL import Image\n\n\ndef rotate(files, dst, value=90):\n for file_ in files:\n img = Image.open(file_)\n img = img.rotate(value)\n name = '{}{}{}'.format(dst, os.sep, os.path.basename(file_))\n img.save(name)\n\n\nsrc = '/home/andrew/code/tmp_photos'\ndst = '/home/andrew/code/tmp_photos2'\ncommon = glob.glob('{}{}*.*'.format(src, os.sep))\nrotate(common, dst)\n", "step-5": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# Author: xurongzhong#126.com 技术支持qq群:6089740\n# CreateDate: 2018-3-27\n# pillow_rotate.py\nimport glob\nimport os \nfrom PIL import Image\n\ndef rotate(files, dst, value=90):\n for file_ in files:\n img = Image.open(file_)\n img = img.rotate(value)\n name = \"{}{}{}\".format(dst, os.sep, os.path.basename(file_))\n img.save(name)\n\nsrc = r'/home/andrew/code/tmp_photos'\ndst = r'/home/andrew/code/tmp_photos2'\n\ncommon = glob.glob('{}{}*.*'.format(src, os.sep)) \nrotate(common, dst)\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import os, argparse,collections defaults ={'color':'red','user':'guest'} parser=argparse.ArgumentParser() parser.add_argument('-u','--user') parser.add_argument('-c','--color') #a simple Namespace object will be built up from attributes parsed out of the command lin namespace= parser.parse_args() command_line_args= {k: v for k , v in vars(namespace).items()if v is not None} combined= collections.ChainMap(command_line_args,os.environ,defaults) print(combined['color']) print(combined['user'])
normal
{ "blob_id": "3c31e3f2a6f320bc5ae33f0ba1d234a089371899", "index": 9199, "step-1": "<mask token>\n", "step-2": "<mask token>\nparser.add_argument('-u', '--user')\nparser.add_argument('-c', '--color')\n<mask token>\nprint(combined['color'])\nprint(combined['user'])\n", "step-3": "<mask token>\ndefaults = {'color': 'red', 'user': 'guest'}\nparser = argparse.ArgumentParser()\nparser.add_argument('-u', '--user')\nparser.add_argument('-c', '--color')\nnamespace = parser.parse_args()\ncommand_line_args = {k: v for k, v in vars(namespace).items() if v is not None}\ncombined = collections.ChainMap(command_line_args, os.environ, defaults)\nprint(combined['color'])\nprint(combined['user'])\n", "step-4": "import os, argparse, collections\ndefaults = {'color': 'red', 'user': 'guest'}\nparser = argparse.ArgumentParser()\nparser.add_argument('-u', '--user')\nparser.add_argument('-c', '--color')\nnamespace = parser.parse_args()\ncommand_line_args = {k: v for k, v in vars(namespace).items() if v is not None}\ncombined = collections.ChainMap(command_line_args, os.environ, defaults)\nprint(combined['color'])\nprint(combined['user'])\n", "step-5": "import os, argparse,collections\n\ndefaults ={'color':'red','user':'guest'}\nparser=argparse.ArgumentParser()\nparser.add_argument('-u','--user')\nparser.add_argument('-c','--color')\n\n#a simple Namespace object will be built up from attributes parsed out of the command lin\n\nnamespace= parser.parse_args()\ncommand_line_args= {k: v for k , v in vars(namespace).items()if v is not None}\n\ncombined= collections.ChainMap(command_line_args,os.environ,defaults)\n\nprint(combined['color'])\nprint(combined['user'])", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def hash_string(input_string: str) ->str: return hashlib.sha256(input_string.encode('utf-8')).hexdigest() <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def hash_string(input_string: str) ->str: return hashlib.sha256(input_string.encode('utf-8')).hexdigest() def main(mytimer: func.TimerRequest) ->None: utc_timestamp = datetime.datetime.utcnow().replace(tzinfo=datetime. timezone.utc).isoformat() if mytimer.past_due: logging.info('The timer is past due!') logging.info('Python timer trigger function ran at %s', utc_timestamp) url = os.environ['TargetUrl'] search_term = os.environ['SearchTerm'] reqs = requests.get(url) soup = BeautifulSoup(reqs.text, 'html.parser') token = telebot.TeleBot(os.environ['TelebotToken']) chat_id = os.environ['TelebotChatId'] urls = [] for link in soup.find_all('a'): link_url = link.get('href') if search_term in link_url: urls.append(link_url) logging.info(f'Looking for: {search_term}') logging.info(f'Urls conatining the pattern: {urls}') lst_to_str = ';'.join([str(i) for i in urls]) new_hash = hash_string(lst_to_str) now = datetime.datetime.now() file_suffix = now.strftime('%Y%m%d%I%M%S') year = now.year month = now.month day = now.day blob = BlobClient.from_connection_string(conn_str=os.environ[ 'AzureWebJobsStorage'], container_name='hashstore', blob_name= f'urls/{year}/{month}/{day}/html-{file_suffix}.html') blob.upload_blob(lst_to_str, blob_type='BlockBlob') logging.info(new_hash) blob = BlobClient.from_connection_string(conn_str=os.environ[ 'AzureWebJobsStorage'], container_name='hashstore', blob_name= 'hash.tmp') blob_hash = '' if blob.exists(): blob_hash = str(blob.download_blob().readall()) if blob_hash != new_hash: message = f'Hash of this page: {url} has changed' bot = telebot.TeleBot(token) bot.config['api_key'] = token bot.send_message(chat_id, message) blob.delete_blob() blob.upload_blob(new_hash, blob_type='BlockBlob') logging.info(f'Old hash >>>> {blob_hash}') logging.info(f'New hash >>>> {new_hash}') <|reserved_special_token_1|> import datetime import logging import os import requests from bs4 import BeautifulSoup import telebot from azure.storage.blob import BlobClient import hashlib import azure.functions as func def hash_string(input_string: str) ->str: return hashlib.sha256(input_string.encode('utf-8')).hexdigest() def main(mytimer: func.TimerRequest) ->None: utc_timestamp = datetime.datetime.utcnow().replace(tzinfo=datetime. timezone.utc).isoformat() if mytimer.past_due: logging.info('The timer is past due!') logging.info('Python timer trigger function ran at %s', utc_timestamp) url = os.environ['TargetUrl'] search_term = os.environ['SearchTerm'] reqs = requests.get(url) soup = BeautifulSoup(reqs.text, 'html.parser') token = telebot.TeleBot(os.environ['TelebotToken']) chat_id = os.environ['TelebotChatId'] urls = [] for link in soup.find_all('a'): link_url = link.get('href') if search_term in link_url: urls.append(link_url) logging.info(f'Looking for: {search_term}') logging.info(f'Urls conatining the pattern: {urls}') lst_to_str = ';'.join([str(i) for i in urls]) new_hash = hash_string(lst_to_str) now = datetime.datetime.now() file_suffix = now.strftime('%Y%m%d%I%M%S') year = now.year month = now.month day = now.day blob = BlobClient.from_connection_string(conn_str=os.environ[ 'AzureWebJobsStorage'], container_name='hashstore', blob_name= f'urls/{year}/{month}/{day}/html-{file_suffix}.html') blob.upload_blob(lst_to_str, blob_type='BlockBlob') logging.info(new_hash) blob = BlobClient.from_connection_string(conn_str=os.environ[ 'AzureWebJobsStorage'], container_name='hashstore', blob_name= 'hash.tmp') blob_hash = '' if blob.exists(): blob_hash = str(blob.download_blob().readall()) if blob_hash != new_hash: message = f'Hash of this page: {url} has changed' bot = telebot.TeleBot(token) bot.config['api_key'] = token bot.send_message(chat_id, message) blob.delete_blob() blob.upload_blob(new_hash, blob_type='BlockBlob') logging.info(f'Old hash >>>> {blob_hash}') logging.info(f'New hash >>>> {new_hash}') <|reserved_special_token_1|> import datetime import logging import os import requests from bs4 import BeautifulSoup import telebot from azure.storage.blob import BlobClient import hashlib import azure.functions as func def hash_string(input_string: str) -> str: return hashlib.sha256(input_string.encode("utf-8")).hexdigest() def main(mytimer: func.TimerRequest) -> None: utc_timestamp = datetime.datetime.utcnow().replace( tzinfo=datetime.timezone.utc).isoformat() if mytimer.past_due: logging.info('The timer is past due!') logging.info('Python timer trigger function ran at %s', utc_timestamp) url = os.environ['TargetUrl'] search_term = os.environ['SearchTerm'] reqs = requests.get(url) soup = BeautifulSoup(reqs.text, 'html.parser') token = telebot.TeleBot(os.environ['TelebotToken']) chat_id = os.environ['TelebotChatId'] urls = [] for link in soup.find_all('a'): link_url = link.get('href') # Add only links that contain the search term if search_term in link_url: urls.append(link_url) logging.info(f"Looking for: {search_term}") logging.info(f"Urls conatining the pattern: {urls}") lst_to_str = ';'.join([str(i) for i in urls]) new_hash = hash_string(lst_to_str) now = datetime.datetime.now() file_suffix = now.strftime("%Y%m%d%I%M%S") year = now.year month = now.month day = now.day blob = BlobClient.from_connection_string( conn_str=os.environ['AzureWebJobsStorage'], container_name="hashstore", blob_name=f'urls/{year}/{month}/{day}/html-{file_suffix}.html') blob.upload_blob(lst_to_str, blob_type='BlockBlob') logging.info(new_hash) blob = BlobClient.from_connection_string( conn_str=os.environ['AzureWebJobsStorage'], container_name="hashstore", blob_name='hash.tmp') blob_hash = '' if blob.exists(): blob_hash = str(blob.download_blob().readall()) if blob_hash != new_hash: message = f'Hash of this page: {url} has changed' bot = telebot.TeleBot(token) bot.config['api_key'] = token bot.send_message(chat_id, message) blob.delete_blob() blob.upload_blob(new_hash, blob_type='BlockBlob') logging.info(f'Old hash >>>> {blob_hash}') logging.info(f'New hash >>>> {new_hash}')
flexible
{ "blob_id": "670a23aa910a6709735281b7e64e5254a19277c6", "index": 7924, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef hash_string(input_string: str) ->str:\n return hashlib.sha256(input_string.encode('utf-8')).hexdigest()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef hash_string(input_string: str) ->str:\n return hashlib.sha256(input_string.encode('utf-8')).hexdigest()\n\n\ndef main(mytimer: func.TimerRequest) ->None:\n utc_timestamp = datetime.datetime.utcnow().replace(tzinfo=datetime.\n timezone.utc).isoformat()\n if mytimer.past_due:\n logging.info('The timer is past due!')\n logging.info('Python timer trigger function ran at %s', utc_timestamp)\n url = os.environ['TargetUrl']\n search_term = os.environ['SearchTerm']\n reqs = requests.get(url)\n soup = BeautifulSoup(reqs.text, 'html.parser')\n token = telebot.TeleBot(os.environ['TelebotToken'])\n chat_id = os.environ['TelebotChatId']\n urls = []\n for link in soup.find_all('a'):\n link_url = link.get('href')\n if search_term in link_url:\n urls.append(link_url)\n logging.info(f'Looking for: {search_term}')\n logging.info(f'Urls conatining the pattern: {urls}')\n lst_to_str = ';'.join([str(i) for i in urls])\n new_hash = hash_string(lst_to_str)\n now = datetime.datetime.now()\n file_suffix = now.strftime('%Y%m%d%I%M%S')\n year = now.year\n month = now.month\n day = now.day\n blob = BlobClient.from_connection_string(conn_str=os.environ[\n 'AzureWebJobsStorage'], container_name='hashstore', blob_name=\n f'urls/{year}/{month}/{day}/html-{file_suffix}.html')\n blob.upload_blob(lst_to_str, blob_type='BlockBlob')\n logging.info(new_hash)\n blob = BlobClient.from_connection_string(conn_str=os.environ[\n 'AzureWebJobsStorage'], container_name='hashstore', blob_name=\n 'hash.tmp')\n blob_hash = ''\n if blob.exists():\n blob_hash = str(blob.download_blob().readall())\n if blob_hash != new_hash:\n message = f'Hash of this page: {url} has changed'\n bot = telebot.TeleBot(token)\n bot.config['api_key'] = token\n bot.send_message(chat_id, message)\n blob.delete_blob()\n blob.upload_blob(new_hash, blob_type='BlockBlob')\n logging.info(f'Old hash >>>> {blob_hash}')\n logging.info(f'New hash >>>> {new_hash}')\n", "step-4": "import datetime\nimport logging\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\nimport telebot\nfrom azure.storage.blob import BlobClient\nimport hashlib\nimport azure.functions as func\n\n\ndef hash_string(input_string: str) ->str:\n return hashlib.sha256(input_string.encode('utf-8')).hexdigest()\n\n\ndef main(mytimer: func.TimerRequest) ->None:\n utc_timestamp = datetime.datetime.utcnow().replace(tzinfo=datetime.\n timezone.utc).isoformat()\n if mytimer.past_due:\n logging.info('The timer is past due!')\n logging.info('Python timer trigger function ran at %s', utc_timestamp)\n url = os.environ['TargetUrl']\n search_term = os.environ['SearchTerm']\n reqs = requests.get(url)\n soup = BeautifulSoup(reqs.text, 'html.parser')\n token = telebot.TeleBot(os.environ['TelebotToken'])\n chat_id = os.environ['TelebotChatId']\n urls = []\n for link in soup.find_all('a'):\n link_url = link.get('href')\n if search_term in link_url:\n urls.append(link_url)\n logging.info(f'Looking for: {search_term}')\n logging.info(f'Urls conatining the pattern: {urls}')\n lst_to_str = ';'.join([str(i) for i in urls])\n new_hash = hash_string(lst_to_str)\n now = datetime.datetime.now()\n file_suffix = now.strftime('%Y%m%d%I%M%S')\n year = now.year\n month = now.month\n day = now.day\n blob = BlobClient.from_connection_string(conn_str=os.environ[\n 'AzureWebJobsStorage'], container_name='hashstore', blob_name=\n f'urls/{year}/{month}/{day}/html-{file_suffix}.html')\n blob.upload_blob(lst_to_str, blob_type='BlockBlob')\n logging.info(new_hash)\n blob = BlobClient.from_connection_string(conn_str=os.environ[\n 'AzureWebJobsStorage'], container_name='hashstore', blob_name=\n 'hash.tmp')\n blob_hash = ''\n if blob.exists():\n blob_hash = str(blob.download_blob().readall())\n if blob_hash != new_hash:\n message = f'Hash of this page: {url} has changed'\n bot = telebot.TeleBot(token)\n bot.config['api_key'] = token\n bot.send_message(chat_id, message)\n blob.delete_blob()\n blob.upload_blob(new_hash, blob_type='BlockBlob')\n logging.info(f'Old hash >>>> {blob_hash}')\n logging.info(f'New hash >>>> {new_hash}')\n", "step-5": "import datetime\r\nimport logging\r\nimport os\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport telebot\r\nfrom azure.storage.blob import BlobClient\r\nimport hashlib\r\n\r\nimport azure.functions as func\r\n\r\n\r\ndef hash_string(input_string: str) -> str:\r\n return hashlib.sha256(input_string.encode(\"utf-8\")).hexdigest()\r\n\r\n\r\ndef main(mytimer: func.TimerRequest) -> None:\r\n utc_timestamp = datetime.datetime.utcnow().replace(\r\n tzinfo=datetime.timezone.utc).isoformat()\r\n\r\n if mytimer.past_due:\r\n logging.info('The timer is past due!')\r\n\r\n logging.info('Python timer trigger function ran at %s', utc_timestamp)\r\n\r\n url = os.environ['TargetUrl']\r\n search_term = os.environ['SearchTerm']\r\n reqs = requests.get(url)\r\n soup = BeautifulSoup(reqs.text, 'html.parser')\r\n token = telebot.TeleBot(os.environ['TelebotToken'])\r\n chat_id = os.environ['TelebotChatId']\r\n\r\n urls = []\r\n for link in soup.find_all('a'):\r\n link_url = link.get('href')\r\n # Add only links that contain the search term\r\n if search_term in link_url:\r\n urls.append(link_url)\r\n\r\n logging.info(f\"Looking for: {search_term}\")\r\n logging.info(f\"Urls conatining the pattern: {urls}\")\r\n\r\n lst_to_str = ';'.join([str(i) for i in urls])\r\n new_hash = hash_string(lst_to_str)\r\n now = datetime.datetime.now()\r\n file_suffix = now.strftime(\"%Y%m%d%I%M%S\")\r\n year = now.year\r\n month = now.month\r\n day = now.day\r\n\r\n blob = BlobClient.from_connection_string(\r\n conn_str=os.environ['AzureWebJobsStorage'], container_name=\"hashstore\", blob_name=f'urls/{year}/{month}/{day}/html-{file_suffix}.html')\r\n blob.upload_blob(lst_to_str, blob_type='BlockBlob')\r\n\r\n logging.info(new_hash)\r\n\r\n blob = BlobClient.from_connection_string(\r\n conn_str=os.environ['AzureWebJobsStorage'], container_name=\"hashstore\", blob_name='hash.tmp')\r\n blob_hash = ''\r\n if blob.exists():\r\n blob_hash = str(blob.download_blob().readall())\r\n if blob_hash != new_hash:\r\n message = f'Hash of this page: {url} has changed'\r\n bot = telebot.TeleBot(token)\r\n bot.config['api_key'] = token\r\n bot.send_message(chat_id, message)\r\n blob.delete_blob()\r\n\r\n blob.upload_blob(new_hash, blob_type='BlockBlob')\r\n\r\n logging.info(f'Old hash >>>> {blob_hash}')\r\n logging.info(f'New hash >>>> {new_hash}')\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> def func(i): if i % 2 != 0: return False visited = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] temp = i while i: x = i % 10 if visited[x] == 1 or x == 0: break visited[x] = 1 i = int(i / 10) if i == 0: for y in str(temp): if temp % int(y) != 0: return False else: return False return True <|reserved_special_token_0|> <|reserved_special_token_1|> def func(i): if i % 2 != 0: return False visited = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] temp = i while i: x = i % 10 if visited[x] == 1 or x == 0: break visited[x] = 1 i = int(i / 10) if i == 0: for y in str(temp): if temp % int(y) != 0: return False else: return False return True <|reserved_special_token_0|> print(sum([(1) for i in range(n, m) if func(i)])) <|reserved_special_token_1|> def func(i): if i % 2 != 0: return False visited = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] temp = i while i: x = i % 10 if visited[x] == 1 or x == 0: break visited[x] = 1 i = int(i / 10) if i == 0: for y in str(temp): if temp % int(y) != 0: return False else: return False return True n, m = map(int, input().split()) print(sum([(1) for i in range(n, m) if func(i)])) <|reserved_special_token_1|> def func(i): if(i % 2 != 0): return False visited = [0,0,0,0,0,0,0,0,0,0] temp = i while(i): x = i%10 if (visited[x] == 1) or (x == 0): break visited[x] = 1; i = (int)(i / 10); if(i == 0): for y in str(temp): if(temp % int(y) != 0): return False else: return False return True n,m = map(int, input().split()) print(sum([1 for i in range(n,m) if func(i)]))
flexible
{ "blob_id": "1a8c9be389aad37a36630a962c20a0a36c449bdd", "index": 3809, "step-1": "<mask token>\n", "step-2": "def func(i):\n if i % 2 != 0:\n return False\n visited = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n temp = i\n while i:\n x = i % 10\n if visited[x] == 1 or x == 0:\n break\n visited[x] = 1\n i = int(i / 10)\n if i == 0:\n for y in str(temp):\n if temp % int(y) != 0:\n return False\n else:\n return False\n return True\n\n\n<mask token>\n", "step-3": "def func(i):\n if i % 2 != 0:\n return False\n visited = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n temp = i\n while i:\n x = i % 10\n if visited[x] == 1 or x == 0:\n break\n visited[x] = 1\n i = int(i / 10)\n if i == 0:\n for y in str(temp):\n if temp % int(y) != 0:\n return False\n else:\n return False\n return True\n\n\n<mask token>\nprint(sum([(1) for i in range(n, m) if func(i)]))\n", "step-4": "def func(i):\n if i % 2 != 0:\n return False\n visited = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n temp = i\n while i:\n x = i % 10\n if visited[x] == 1 or x == 0:\n break\n visited[x] = 1\n i = int(i / 10)\n if i == 0:\n for y in str(temp):\n if temp % int(y) != 0:\n return False\n else:\n return False\n return True\n\n\nn, m = map(int, input().split())\nprint(sum([(1) for i in range(n, m) if func(i)]))\n", "step-5": "def func(i):\r\n if(i % 2 != 0): return False\r\n visited = [0,0,0,0,0,0,0,0,0,0]\r\n temp = i\r\n while(i):\r\n x = i%10\r\n if (visited[x] == 1) or (x == 0): break\r\n visited[x] = 1; \r\n i = (int)(i / 10); \r\n\r\n if(i == 0):\r\n for y in str(temp):\r\n if(temp % int(y) != 0): return False\r\n\r\n else: return False\r\n return True\r\n\r\nn,m = map(int, input().split())\r\n\r\nprint(sum([1 for i in range(n,m) if func(i)]))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def isPentagon(item): num = math.floor(math.sqrt(item * 2 // 3)) + 1 if num * (3 * num - 1) // 2 == item: return True return False <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def isPentagon(item): num = math.floor(math.sqrt(item * 2 // 3)) + 1 if num * (3 * num - 1) // 2 == item: return True return False def isHexagon(item): num = math.floor(math.sqrt(item // 2)) + 1 if num * (2 * num - 1) == item: return True return False <|reserved_special_token_0|> while t == 0: i += 1 n = i * (i + 1) // 2 if isPentagon(n) and isHexagon(n): t = 1 print(n) print('time:', time.time() - t1) <|reserved_special_token_1|> <|reserved_special_token_0|> t1 = time.time() def isPentagon(item): num = math.floor(math.sqrt(item * 2 // 3)) + 1 if num * (3 * num - 1) // 2 == item: return True return False def isHexagon(item): num = math.floor(math.sqrt(item // 2)) + 1 if num * (2 * num - 1) == item: return True return False i = 285 t = 0 while t == 0: i += 1 n = i * (i + 1) // 2 if isPentagon(n) and isHexagon(n): t = 1 print(n) print('time:', time.time() - t1) <|reserved_special_token_1|> import math import time t1 = time.time() def isPentagon(item): num = math.floor(math.sqrt(item * 2 // 3)) + 1 if num * (3 * num - 1) // 2 == item: return True return False def isHexagon(item): num = math.floor(math.sqrt(item // 2)) + 1 if num * (2 * num - 1) == item: return True return False i = 285 t = 0 while t == 0: i += 1 n = i * (i + 1) // 2 if isPentagon(n) and isHexagon(n): t = 1 print(n) print('time:', time.time() - t1) <|reserved_special_token_1|> import math import time t1 = time.time() # n(3n-1)/2 def isPentagon(item): num = math.floor(math.sqrt(item*2//3))+1 if num*(3*num-1)//2 == item: return True return False # n(2n-1) def isHexagon(item): num = math.floor(math.sqrt(item//2))+1 if num*(2*num-1) == item: return True return False i = 285 t = 0 while t == 0: i += 1 n = i*(i+1)//2 if isPentagon(n) and isHexagon(n): t = 1 print (n) print("time:",time.time()-t1)
flexible
{ "blob_id": "0aec3fbc9f4b9f33aee021fa417c43f0feb0e3d1", "index": 3296, "step-1": "<mask token>\n\n\ndef isPentagon(item):\n num = math.floor(math.sqrt(item * 2 // 3)) + 1\n if num * (3 * num - 1) // 2 == item:\n return True\n return False\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef isPentagon(item):\n num = math.floor(math.sqrt(item * 2 // 3)) + 1\n if num * (3 * num - 1) // 2 == item:\n return True\n return False\n\n\ndef isHexagon(item):\n num = math.floor(math.sqrt(item // 2)) + 1\n if num * (2 * num - 1) == item:\n return True\n return False\n\n\n<mask token>\nwhile t == 0:\n i += 1\n n = i * (i + 1) // 2\n if isPentagon(n) and isHexagon(n):\n t = 1\n print(n)\nprint('time:', time.time() - t1)\n", "step-3": "<mask token>\nt1 = time.time()\n\n\ndef isPentagon(item):\n num = math.floor(math.sqrt(item * 2 // 3)) + 1\n if num * (3 * num - 1) // 2 == item:\n return True\n return False\n\n\ndef isHexagon(item):\n num = math.floor(math.sqrt(item // 2)) + 1\n if num * (2 * num - 1) == item:\n return True\n return False\n\n\ni = 285\nt = 0\nwhile t == 0:\n i += 1\n n = i * (i + 1) // 2\n if isPentagon(n) and isHexagon(n):\n t = 1\n print(n)\nprint('time:', time.time() - t1)\n", "step-4": "import math\nimport time\nt1 = time.time()\n\n\ndef isPentagon(item):\n num = math.floor(math.sqrt(item * 2 // 3)) + 1\n if num * (3 * num - 1) // 2 == item:\n return True\n return False\n\n\ndef isHexagon(item):\n num = math.floor(math.sqrt(item // 2)) + 1\n if num * (2 * num - 1) == item:\n return True\n return False\n\n\ni = 285\nt = 0\nwhile t == 0:\n i += 1\n n = i * (i + 1) // 2\n if isPentagon(n) and isHexagon(n):\n t = 1\n print(n)\nprint('time:', time.time() - t1)\n", "step-5": "import math\nimport time\n\nt1 = time.time()\n\n# n(3n-1)/2\ndef isPentagon(item):\n num = math.floor(math.sqrt(item*2//3))+1\n if num*(3*num-1)//2 == item:\n return True\n return False\n\n# n(2n-1)\ndef isHexagon(item):\n num = math.floor(math.sqrt(item//2))+1\n if num*(2*num-1) == item:\n return True\n return False\n\n\ni = 285\nt = 0\nwhile t == 0:\n i += 1\n n = i*(i+1)//2\n if isPentagon(n) and isHexagon(n):\n t = 1\n print (n)\n\nprint(\"time:\",time.time()-t1)\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
""""Module for miscellaneous behavior stuff For example, stuff like extracting lick times or choice times. TrialSpeak shouldn't depend on stuff like that. # Also get the pldf and use that to get lick times ldf = ArduFSM.TrialSpeak.read_logfile_into_df(bdf.loc[idx, 'filename']) # Get the lick times lick_times = ArduFSM.TrialSpeak.get_commands_from_parsed_lines(ldf, 'TCH') # Group them by trial number and lick type and extract times tt2licks = lick_times.groupby(['trial', 'arg0']).groups for (trial, lick_type) in tt2licks: tt2licks[(trial, lick_type)] = \ ldf.loc[tt2licks[(trial, lick_type)], 'time'].values / 1000. # Get response window time as first transition into response window state_change_df = ArduFSM.TrialSpeak.get_commands_from_parsed_lines( ldf, 'ST_CHG2') rwin_open_times = my.pick_rows(state_change_df, arg1=state_name2num['RESPONSE_WINDOW']) rwin_open_times_by_trial = rwin_open_times.groupby( 'trial').first()['time'] / 1000. # Get choice time as first transition out of response window state_change_df = ArduFSM.TrialSpeak.get_commands_from_parsed_lines( ldf, 'ST_CHG2') rwin_close_times = my.pick_rows(state_change_df, arg0=state_name2num['RESPONSE_WINDOW']) rwin_close_times_by_trial = rwin_close_times.groupby( 'trial').first()['time'] / 1000. """ import MCwatch import ArduFSM import numpy as np def get_choice_times(behavior_filename, verbose=False): """Calculates the choice time for each trial in the logfile""" # Find the state number for response window state_num2names = MCwatch.behavior.db.get_state_num2names() resp_win_num = dict([(v, k) for k, v in list(state_num2names.items())])[ 'RESPONSE_WINDOW'] # Get the lines lines = ArduFSM.TrialSpeak.read_lines_from_file(behavior_filename) parsed_df_by_trial = \ ArduFSM.TrialSpeak.parse_lines_into_df_split_by_trial(lines, verbose=verbose) # Identify times of state change out of response window # No sense in warning because there's also multiple state changes on # rewarded trials choice_times = ArduFSM.TrialSpeak.identify_state_change_times( parsed_df_by_trial, state0=resp_win_num, show_warnings=False) return choice_times def get_included_trials(trial_times, data_range, t_start=0, t_stop=0): """Identify the trials included in a temporal range. trial_times : Series of trial times (e.g., rwin times) indexed by trial labels data_range : 2-tuple (start, stop) specifying interval to include t_start, t_stop : amount of time before (after) each trial time that must be within data_range in order for that trial to be included. Returns: trial_labels that are included Ex: ## Get the trial matrix tm = MCwatch.behavior.db.get_trial_matrix(vs.bsession_name, True) # Include all random trials tm = my.pick_rows(tm, isrnd=True, outcome=['hit', 'error']) # Identify range of trials to include video_range_bbase = extras.get_video_range_bbase(vs) included_trials = extras.get_included_trials(tm['rwin_time'], data_range=video_range_bbase, t_start=-2, t_stop=0) tm = tm.loc[included_trials] """ return trial_times[ (trial_times + t_start >= data_range[0]) & (trial_times + t_stop < data_range[1]) ].index
normal
{ "blob_id": "78761eda403ad8f54187e5858a23c23d3dd79b09", "index": 8821, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef get_choice_times(behavior_filename, verbose=False):\n \"\"\"Calculates the choice time for each trial in the logfile\"\"\"\n state_num2names = MCwatch.behavior.db.get_state_num2names()\n resp_win_num = dict([(v, k) for k, v in list(state_num2names.items())])[\n 'RESPONSE_WINDOW']\n lines = ArduFSM.TrialSpeak.read_lines_from_file(behavior_filename)\n parsed_df_by_trial = ArduFSM.TrialSpeak.parse_lines_into_df_split_by_trial(\n lines, verbose=verbose)\n choice_times = ArduFSM.TrialSpeak.identify_state_change_times(\n parsed_df_by_trial, state0=resp_win_num, show_warnings=False)\n return choice_times\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef get_choice_times(behavior_filename, verbose=False):\n \"\"\"Calculates the choice time for each trial in the logfile\"\"\"\n state_num2names = MCwatch.behavior.db.get_state_num2names()\n resp_win_num = dict([(v, k) for k, v in list(state_num2names.items())])[\n 'RESPONSE_WINDOW']\n lines = ArduFSM.TrialSpeak.read_lines_from_file(behavior_filename)\n parsed_df_by_trial = ArduFSM.TrialSpeak.parse_lines_into_df_split_by_trial(\n lines, verbose=verbose)\n choice_times = ArduFSM.TrialSpeak.identify_state_change_times(\n parsed_df_by_trial, state0=resp_win_num, show_warnings=False)\n return choice_times\n\n\ndef get_included_trials(trial_times, data_range, t_start=0, t_stop=0):\n \"\"\"Identify the trials included in a temporal range.\n \n trial_times : Series of trial times (e.g., rwin times) indexed by\n trial labels\n \n data_range : 2-tuple (start, stop) specifying interval to include\n \n t_start, t_stop : amount of time before (after) each trial time that\n must be within data_range in order for that trial to be included.\n \n Returns: trial_labels that are included \n\n Ex:\n ## Get the trial matrix\n tm = MCwatch.behavior.db.get_trial_matrix(vs.bsession_name, True)\n\n # Include all random trials\n tm = my.pick_rows(tm, isrnd=True, outcome=['hit', 'error'])\n \n # Identify range of trials to include\n video_range_bbase = extras.get_video_range_bbase(vs)\n included_trials = extras.get_included_trials(tm['rwin_time'], \n data_range=video_range_bbase, t_start=-2, t_stop=0)\n tm = tm.loc[included_trials]\n \"\"\"\n return trial_times[(trial_times + t_start >= data_range[0]) & (\n trial_times + t_stop < data_range[1])].index\n", "step-4": "<mask token>\nimport MCwatch\nimport ArduFSM\nimport numpy as np\n\n\ndef get_choice_times(behavior_filename, verbose=False):\n \"\"\"Calculates the choice time for each trial in the logfile\"\"\"\n state_num2names = MCwatch.behavior.db.get_state_num2names()\n resp_win_num = dict([(v, k) for k, v in list(state_num2names.items())])[\n 'RESPONSE_WINDOW']\n lines = ArduFSM.TrialSpeak.read_lines_from_file(behavior_filename)\n parsed_df_by_trial = ArduFSM.TrialSpeak.parse_lines_into_df_split_by_trial(\n lines, verbose=verbose)\n choice_times = ArduFSM.TrialSpeak.identify_state_change_times(\n parsed_df_by_trial, state0=resp_win_num, show_warnings=False)\n return choice_times\n\n\ndef get_included_trials(trial_times, data_range, t_start=0, t_stop=0):\n \"\"\"Identify the trials included in a temporal range.\n \n trial_times : Series of trial times (e.g., rwin times) indexed by\n trial labels\n \n data_range : 2-tuple (start, stop) specifying interval to include\n \n t_start, t_stop : amount of time before (after) each trial time that\n must be within data_range in order for that trial to be included.\n \n Returns: trial_labels that are included \n\n Ex:\n ## Get the trial matrix\n tm = MCwatch.behavior.db.get_trial_matrix(vs.bsession_name, True)\n\n # Include all random trials\n tm = my.pick_rows(tm, isrnd=True, outcome=['hit', 'error'])\n \n # Identify range of trials to include\n video_range_bbase = extras.get_video_range_bbase(vs)\n included_trials = extras.get_included_trials(tm['rwin_time'], \n data_range=video_range_bbase, t_start=-2, t_stop=0)\n tm = tm.loc[included_trials]\n \"\"\"\n return trial_times[(trial_times + t_start >= data_range[0]) & (\n trial_times + t_stop < data_range[1])].index\n", "step-5": "\"\"\"\"Module for miscellaneous behavior stuff\n\nFor example, stuff like extracting lick times or choice times.\nTrialSpeak shouldn't depend on stuff like that.\n\n\n # Also get the pldf and use that to get lick times\n ldf = ArduFSM.TrialSpeak.read_logfile_into_df(bdf.loc[idx, 'filename']) \n \n # Get the lick times\n lick_times = ArduFSM.TrialSpeak.get_commands_from_parsed_lines(ldf, 'TCH')\n \n # Group them by trial number and lick type and extract times\n tt2licks = lick_times.groupby(['trial', 'arg0']).groups\n for (trial, lick_type) in tt2licks:\n tt2licks[(trial, lick_type)] = \\\n ldf.loc[tt2licks[(trial, lick_type)], 'time'].values / 1000.\n \n # Get response window time as first transition into response window\n state_change_df = ArduFSM.TrialSpeak.get_commands_from_parsed_lines(\n ldf, 'ST_CHG2')\n rwin_open_times = my.pick_rows(state_change_df, \n arg1=state_name2num['RESPONSE_WINDOW'])\n rwin_open_times_by_trial = rwin_open_times.groupby(\n 'trial').first()['time'] / 1000.\n \n # Get choice time as first transition out of response window\n state_change_df = ArduFSM.TrialSpeak.get_commands_from_parsed_lines(\n ldf, 'ST_CHG2')\n rwin_close_times = my.pick_rows(state_change_df, \n arg0=state_name2num['RESPONSE_WINDOW'])\n rwin_close_times_by_trial = rwin_close_times.groupby(\n 'trial').first()['time'] / 1000.\n\"\"\"\nimport MCwatch\nimport ArduFSM\nimport numpy as np\n\ndef get_choice_times(behavior_filename, verbose=False):\n \"\"\"Calculates the choice time for each trial in the logfile\"\"\"\n # Find the state number for response window\n state_num2names = MCwatch.behavior.db.get_state_num2names() \n resp_win_num = dict([(v, k) for k, v in list(state_num2names.items())])[\n 'RESPONSE_WINDOW']\n \n # Get the lines\n lines = ArduFSM.TrialSpeak.read_lines_from_file(behavior_filename)\n parsed_df_by_trial = \\\n ArduFSM.TrialSpeak.parse_lines_into_df_split_by_trial(lines, \n verbose=verbose)\n \n # Identify times of state change out of response window\n # No sense in warning because there's also multiple state changes on\n # rewarded trials\n choice_times = ArduFSM.TrialSpeak.identify_state_change_times(\n parsed_df_by_trial, state0=resp_win_num, show_warnings=False)\n \n return choice_times \n\ndef get_included_trials(trial_times, data_range, t_start=0, t_stop=0):\n \"\"\"Identify the trials included in a temporal range.\n \n trial_times : Series of trial times (e.g., rwin times) indexed by\n trial labels\n \n data_range : 2-tuple (start, stop) specifying interval to include\n \n t_start, t_stop : amount of time before (after) each trial time that\n must be within data_range in order for that trial to be included.\n \n Returns: trial_labels that are included \n\n Ex:\n ## Get the trial matrix\n tm = MCwatch.behavior.db.get_trial_matrix(vs.bsession_name, True)\n\n # Include all random trials\n tm = my.pick_rows(tm, isrnd=True, outcome=['hit', 'error'])\n \n # Identify range of trials to include\n video_range_bbase = extras.get_video_range_bbase(vs)\n included_trials = extras.get_included_trials(tm['rwin_time'], \n data_range=video_range_bbase, t_start=-2, t_stop=0)\n tm = tm.loc[included_trials]\n \"\"\"\n return trial_times[\n (trial_times + t_start >= data_range[0]) &\n (trial_times + t_stop < data_range[1])\n ].index\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import re import ngram import smoothedNgram def split_into_sentences(text): text = text.lower() sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text) getSentences(sentences,text) return sentences def getTextWithoutSpaces(text): withoutLineBreaks = text.replace("\n", "") withoutSpaces = re.sub(' +', ' ', withoutLineBreaks) return withoutSpaces def getSentences(sentences,text): data = re.findall(r'\b[a-zA-Z]+|[.!?]', text) unique_words = set(data) sentenceCounter=0 wordCounter=0 for i in sentences: sentenceCounter += 1 i = i.lower() words = i.split() wordCounter += len(words) print('Total sentence in the text : ' + str(sentenceCounter-1)) print('Total word in the text : ' + str(wordCounter)) print('Unique word number : ' + str(len(unique_words)-1)) def getText(): file = open("hw01_FireFairies.txt") data = file.read() return data def listResults(): print('') split_into_sentences(getText()) print('') words,listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(getTextWithoutSpaces(getText())) listOfProbBigram, listOfBigrams, listOfProbUnigram, words = ngram.calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts) words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams, listOfProbUnigram, words) ngram.findLeastValues(words, flipped) if __name__ == '__main__': listResults()
normal
{ "blob_id": "6d7db5b9a64ec25763f5af6ceec1a46d629d549c", "index": 472, "step-1": "<mask token>\n\n\ndef getTextWithoutSpaces(text):\n withoutLineBreaks = text.replace('\\n', '')\n withoutSpaces = re.sub(' +', ' ', withoutLineBreaks)\n return withoutSpaces\n\n\ndef getSentences(sentences, text):\n data = re.findall('\\\\b[a-zA-Z]+|[.!?]', text)\n unique_words = set(data)\n sentenceCounter = 0\n wordCounter = 0\n for i in sentences:\n sentenceCounter += 1\n i = i.lower()\n words = i.split()\n wordCounter += len(words)\n print('Total sentence in the text : ' + str(sentenceCounter - 1))\n print('Total word in the text : ' + str(wordCounter))\n print('Unique word number : ' + str(len(unique_words) - 1))\n\n\n<mask token>\n\n\ndef listResults():\n print('')\n split_into_sentences(getText())\n print('')\n words, listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(\n getTextWithoutSpaces(getText()))\n listOfProbBigram, listOfBigrams, listOfProbUnigram, words = (ngram.\n calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts))\n words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams,\n listOfProbUnigram, words)\n ngram.findLeastValues(words, flipped)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef split_into_sentences(text):\n text = text.lower()\n sentences = re.split('(?<!\\\\w\\\\.\\\\w.)(?<![A-Z][a-z]\\\\.)(?<=\\\\.|\\\\?)\\\\s',\n text)\n getSentences(sentences, text)\n return sentences\n\n\ndef getTextWithoutSpaces(text):\n withoutLineBreaks = text.replace('\\n', '')\n withoutSpaces = re.sub(' +', ' ', withoutLineBreaks)\n return withoutSpaces\n\n\ndef getSentences(sentences, text):\n data = re.findall('\\\\b[a-zA-Z]+|[.!?]', text)\n unique_words = set(data)\n sentenceCounter = 0\n wordCounter = 0\n for i in sentences:\n sentenceCounter += 1\n i = i.lower()\n words = i.split()\n wordCounter += len(words)\n print('Total sentence in the text : ' + str(sentenceCounter - 1))\n print('Total word in the text : ' + str(wordCounter))\n print('Unique word number : ' + str(len(unique_words) - 1))\n\n\ndef getText():\n file = open('hw01_FireFairies.txt')\n data = file.read()\n return data\n\n\ndef listResults():\n print('')\n split_into_sentences(getText())\n print('')\n words, listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(\n getTextWithoutSpaces(getText()))\n listOfProbBigram, listOfBigrams, listOfProbUnigram, words = (ngram.\n calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts))\n words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams,\n listOfProbUnigram, words)\n ngram.findLeastValues(words, flipped)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef split_into_sentences(text):\n text = text.lower()\n sentences = re.split('(?<!\\\\w\\\\.\\\\w.)(?<![A-Z][a-z]\\\\.)(?<=\\\\.|\\\\?)\\\\s',\n text)\n getSentences(sentences, text)\n return sentences\n\n\ndef getTextWithoutSpaces(text):\n withoutLineBreaks = text.replace('\\n', '')\n withoutSpaces = re.sub(' +', ' ', withoutLineBreaks)\n return withoutSpaces\n\n\ndef getSentences(sentences, text):\n data = re.findall('\\\\b[a-zA-Z]+|[.!?]', text)\n unique_words = set(data)\n sentenceCounter = 0\n wordCounter = 0\n for i in sentences:\n sentenceCounter += 1\n i = i.lower()\n words = i.split()\n wordCounter += len(words)\n print('Total sentence in the text : ' + str(sentenceCounter - 1))\n print('Total word in the text : ' + str(wordCounter))\n print('Unique word number : ' + str(len(unique_words) - 1))\n\n\ndef getText():\n file = open('hw01_FireFairies.txt')\n data = file.read()\n return data\n\n\ndef listResults():\n print('')\n split_into_sentences(getText())\n print('')\n words, listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(\n getTextWithoutSpaces(getText()))\n listOfProbBigram, listOfBigrams, listOfProbUnigram, words = (ngram.\n calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts))\n words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams,\n listOfProbUnigram, words)\n ngram.findLeastValues(words, flipped)\n\n\nif __name__ == '__main__':\n listResults()\n", "step-4": "import re\nimport ngram\nimport smoothedNgram\n\n\ndef split_into_sentences(text):\n text = text.lower()\n sentences = re.split('(?<!\\\\w\\\\.\\\\w.)(?<![A-Z][a-z]\\\\.)(?<=\\\\.|\\\\?)\\\\s',\n text)\n getSentences(sentences, text)\n return sentences\n\n\ndef getTextWithoutSpaces(text):\n withoutLineBreaks = text.replace('\\n', '')\n withoutSpaces = re.sub(' +', ' ', withoutLineBreaks)\n return withoutSpaces\n\n\ndef getSentences(sentences, text):\n data = re.findall('\\\\b[a-zA-Z]+|[.!?]', text)\n unique_words = set(data)\n sentenceCounter = 0\n wordCounter = 0\n for i in sentences:\n sentenceCounter += 1\n i = i.lower()\n words = i.split()\n wordCounter += len(words)\n print('Total sentence in the text : ' + str(sentenceCounter - 1))\n print('Total word in the text : ' + str(wordCounter))\n print('Unique word number : ' + str(len(unique_words) - 1))\n\n\ndef getText():\n file = open('hw01_FireFairies.txt')\n data = file.read()\n return data\n\n\ndef listResults():\n print('')\n split_into_sentences(getText())\n print('')\n words, listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(\n getTextWithoutSpaces(getText()))\n listOfProbBigram, listOfBigrams, listOfProbUnigram, words = (ngram.\n calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts))\n words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams,\n listOfProbUnigram, words)\n ngram.findLeastValues(words, flipped)\n\n\nif __name__ == '__main__':\n listResults()\n", "step-5": "import re\nimport ngram\nimport smoothedNgram\n\ndef split_into_sentences(text):\n text = text.lower()\n sentences = re.split(r'(?<!\\w\\.\\w.)(?<![A-Z][a-z]\\.)(?<=\\.|\\?)\\s', text)\n getSentences(sentences,text)\n return sentences\n\ndef getTextWithoutSpaces(text):\n withoutLineBreaks = text.replace(\"\\n\", \"\")\n withoutSpaces = re.sub(' +', ' ', withoutLineBreaks)\n return withoutSpaces\n\ndef getSentences(sentences,text):\n data = re.findall(r'\\b[a-zA-Z]+|[.!?]', text)\n unique_words = set(data)\n sentenceCounter=0\n wordCounter=0\n for i in sentences:\n sentenceCounter += 1\n i = i.lower()\n words = i.split()\n wordCounter += len(words)\n print('Total sentence in the text : ' + str(sentenceCounter-1))\n print('Total word in the text : ' + str(wordCounter))\n print('Unique word number : ' + str(len(unique_words)-1))\n\n\ndef getText():\n file = open(\"hw01_FireFairies.txt\")\n data = file.read()\n return data\n\n\ndef listResults():\n print('')\n split_into_sentences(getText())\n print('')\n words,listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(getTextWithoutSpaces(getText()))\n listOfProbBigram, listOfBigrams, listOfProbUnigram, words = ngram.calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts)\n words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams, listOfProbUnigram, words)\n ngram.findLeastValues(words, flipped)\n\n\nif __name__ == '__main__':\n listResults()\n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
# Copyright 2021-2022 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ Data operations, will be used in run_pretrain.py """ import os import math import numpy as np import mindspore.common.dtype as mstype import mindspore.dataset as de import mindspore.dataset.transforms as C from mindspore import log as logger class BucketDatasetGenerator: """ Provide data distribution of different gears for the bert network. Args: data_set (Dataset): The training dataset. batch_size (Int): The training batchsize. bucket_list (List): List of different sentence lengths,such as [128, 256, 512]. Default: None. """ def __init__(self, data_set, batch_size, bucket_list=None): self.dataset = data_set self.batch_size = batch_size self.bucket_list = bucket_list self.data_bucket = {bucket: [] for bucket in bucket_list} bucket_size = len(bucket_list) self.random_list = np.random.binomial(n=(bucket_size - 1), p=0.5, size=self.__len__()) self.random_list = (self.random_list + 2) % bucket_size self.random_list = [bucket_list[i] for i in self.random_list] self.iter = 0 def __next__(self): for item in self.iterator: for seq_length in self.bucket_list: if np.sum(item[1]) <= seq_length: self.data_bucket[seq_length].append(item) break for key in self.data_bucket.keys(): data = self.data_bucket[key] if len(data) >= self.batch_size and self.random_list[self.iter] == key: self.data_bucket[key] = self.data_bucket[key][self.batch_size:] arr = data[0] for i in range(1, self.batch_size): current_data = data[i] for j in range(len(current_data)): arr[j] = np.concatenate((arr[j], current_data[j])) res = () for label in arr: newlabel = np.reshape(label, (self.batch_size, -1)) res += (newlabel,) res += (np.array(key, np.int32),) self.iter += 1 return res raise StopIteration def __iter__(self): self.iterator = self.dataset.create_tuple_iterator(output_numpy=True) return self def __len__(self): return (self.dataset.get_dataset_size() // self.batch_size) - 1 def create_albert_dataset(device_num=1, rank=0, do_shuffle="true", data_dir=None, schema_dir=None, batch_size=32, bucket_list=None): """create train dataset""" # apply repeat operations files = os.listdir(data_dir) data_files = [] for file_name in files: if "tfrecord" in file_name: data_files.append(os.path.join(data_dir, file_name)) data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None, columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels", "masked_lm_positions", "masked_lm_ids", "masked_lm_weights"], shuffle=de.Shuffle.FILES if do_shuffle == "true" else False, num_shards=device_num, shard_id=rank, shard_equal_rows=True) if bucket_list: bucket_dataset = BucketDatasetGenerator(data_set, batch_size, bucket_list=bucket_list) data_set = de.GeneratorDataset(bucket_dataset, column_names=["input_ids", "input_mask", "segment_ids", "next_sentence_labels", "masked_lm_positions", "masked_lm_ids", "masked_lm_weights", "sentence_flag"], shuffle=False) else: data_set = data_set.batch(batch_size, drop_remainder=True) ori_dataset_size = data_set.get_dataset_size() print('origin dataset size: ', ori_dataset_size) type_cast_op = C.TypeCast(mstype.int32) data_set = data_set.map(operations=type_cast_op, input_columns="masked_lm_ids") data_set = data_set.map(operations=type_cast_op, input_columns="masked_lm_positions") data_set = data_set.map(operations=type_cast_op, input_columns="next_sentence_labels") data_set = data_set.map(operations=type_cast_op, input_columns="segment_ids") data_set = data_set.map(operations=type_cast_op, input_columns="input_mask") data_set = data_set.map(operations=type_cast_op, input_columns="input_ids") # apply batch operations logger.info("data size: {}".format(data_set.get_dataset_size())) logger.info("repeat count: {}".format(data_set.get_repeat_count())) return data_set def create_classification_dataset(batch_size=1, repeat_count=1, assessment_method="accuracy", data_file_path=None, schema_file_path=None, do_shuffle=True, rank_size=1, rank_id=0): """create finetune or evaluation dataset""" type_cast_op = C.TypeCast(mstype.int32) ds = de.MindDataset([data_file_path], columns_list=["input_ids", "input_mask", "segment_ids", "label_ids"], shuffle=do_shuffle, num_shards=rank_size, shard_id=rank_id) if assessment_method == "Spearman_correlation": type_cast_op_float = C.TypeCast(mstype.float32) ds = ds.map(operations=type_cast_op_float, input_columns="label_ids") else: ds = ds.map(operations=type_cast_op, input_columns="label_ids") ds = ds.map(operations=type_cast_op, input_columns="segment_ids") ds = ds.map(operations=type_cast_op, input_columns="input_mask") ds = ds.map(operations=type_cast_op, input_columns="input_ids") ds = ds.repeat(repeat_count) # apply batch operations ds = ds.batch(batch_size, drop_remainder=True) return ds def generator_squad(data_features): for feature in data_features: yield (feature.input_ids, feature.input_mask, feature.segment_ids, feature.unique_id) def generator_squad_train(data_features): for feature in data_features: yield (feature.input_ids, feature.input_mask, feature.segment_ids, feature.start_position, feature.end_position, feature.unique_id, feature.is_impossible) def create_squad_dataset(batch_size=1, repeat_count=1, data_file_path=None, schema_file_path=None, is_training=True, do_shuffle=True, rank_size=1, rank_id=0): """create finetune or evaluation dataset""" type_cast_op = C.TypeCast(mstype.int32) if is_training: print("data_file_path: ", data_file_path) print("rank_id: ", rank_id) ds = de.MindDataset([data_file_path], columns_list=["input_ids", "input_mask", "segment_ids", "start_positions", "end_positions", "unique_ids", "is_impossible"], shuffle=do_shuffle, num_shards=rank_size, shard_id=rank_id) ds = ds.map(operations=type_cast_op, input_columns="start_positions") ds = ds.map(operations=type_cast_op, input_columns="end_positions") else: ds = de.GeneratorDataset(generator_squad(data_file_path), shuffle=do_shuffle, column_names=["input_ids", "input_mask", "segment_ids", "unique_ids"]) ds = ds.map(operations=type_cast_op, input_columns="input_ids") ds = ds.map(operations=type_cast_op, input_columns="input_mask") ds = ds.map(operations=type_cast_op, input_columns="segment_ids") ds = ds.map(operations=type_cast_op, input_columns="unique_ids") ds = ds.repeat(repeat_count) # apply batch operations ds = ds.batch(batch_size, drop_remainder=True) return ds def create_eval_dataset(batchsize=32, device_num=1, rank=0, data_dir=None, schema_dir=None): """create evaluation dataset""" data_files = [] if os.path.isdir(data_dir): files = os.listdir(data_dir) for file_name in files: if "tfrecord" in file_name: data_files.append(os.path.join(data_dir, file_name)) else: data_files.append(data_dir) data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None, columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels", "masked_lm_positions", "masked_lm_ids", "masked_lm_weights"], shard_equal_rows=True) ori_dataset_size = data_set.get_dataset_size() print("origin eval size: ", ori_dataset_size) dtypes = data_set.output_types() shapes = data_set.output_shapes() output_batches = math.ceil(ori_dataset_size / device_num / batchsize) padded_num = output_batches * device_num * batchsize - ori_dataset_size print("padded num: ", padded_num) if padded_num > 0: item = {"input_ids": np.zeros(shapes[0], dtypes[0]), "input_mask": np.zeros(shapes[1], dtypes[1]), "segment_ids": np.zeros(shapes[2], dtypes[2]), "next_sentence_labels": np.zeros(shapes[3], dtypes[3]), "masked_lm_positions": np.zeros(shapes[4], dtypes[4]), "masked_lm_ids": np.zeros(shapes[5], dtypes[5]), "masked_lm_weights": np.zeros(shapes[6], dtypes[6])} padded_samples = [item for x in range(padded_num)] padded_ds = de.PaddedDataset(padded_samples) eval_ds = data_set + padded_ds sampler = de.DistributedSampler(num_shards=device_num, shard_id=rank, shuffle=False) eval_ds.use_sampler(sampler) else: eval_ds = de.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None, columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels", "masked_lm_positions", "masked_lm_ids", "masked_lm_weights"], num_shards=device_num, shard_id=rank, shard_equal_rows=True) type_cast_op = C.TypeCast(mstype.int32) eval_ds = eval_ds.map(input_columns="masked_lm_ids", operations=type_cast_op) eval_ds = eval_ds.map(input_columns="masked_lm_positions", operations=type_cast_op) eval_ds = eval_ds.map(input_columns="next_sentence_labels", operations=type_cast_op) eval_ds = eval_ds.map(input_columns="segment_ids", operations=type_cast_op) eval_ds = eval_ds.map(input_columns="input_mask", operations=type_cast_op) eval_ds = eval_ds.map(input_columns="input_ids", operations=type_cast_op) eval_ds = eval_ds.batch(batchsize, drop_remainder=True) print("eval data size: {}".format(eval_ds.get_dataset_size())) print("eval repeat count: {}".format(eval_ds.get_repeat_count())) return eval_ds
normal
{ "blob_id": "8ae10aada79b0a687732e341d275eb3823ec0e4a", "index": 9475, "step-1": "<mask token>\n\n\nclass BucketDatasetGenerator:\n \"\"\"\n Provide data distribution of different gears for the bert network.\n\n Args:\n data_set (Dataset): The training dataset.\n batch_size (Int): The training batchsize.\n bucket_list (List): List of different sentence lengths,such as [128, 256, 512]. Default: None.\n \"\"\"\n\n def __init__(self, data_set, batch_size, bucket_list=None):\n self.dataset = data_set\n self.batch_size = batch_size\n self.bucket_list = bucket_list\n self.data_bucket = {bucket: [] for bucket in bucket_list}\n bucket_size = len(bucket_list)\n self.random_list = np.random.binomial(n=bucket_size - 1, p=0.5,\n size=self.__len__())\n self.random_list = (self.random_list + 2) % bucket_size\n self.random_list = [bucket_list[i] for i in self.random_list]\n self.iter = 0\n\n def __next__(self):\n for item in self.iterator:\n for seq_length in self.bucket_list:\n if np.sum(item[1]) <= seq_length:\n self.data_bucket[seq_length].append(item)\n break\n for key in self.data_bucket.keys():\n data = self.data_bucket[key]\n if len(data) >= self.batch_size and self.random_list[self.iter\n ] == key:\n self.data_bucket[key] = self.data_bucket[key][self.\n batch_size:]\n arr = data[0]\n for i in range(1, self.batch_size):\n current_data = data[i]\n for j in range(len(current_data)):\n arr[j] = np.concatenate((arr[j], current_data[j]))\n res = ()\n for label in arr:\n newlabel = np.reshape(label, (self.batch_size, -1))\n res += newlabel,\n res += np.array(key, np.int32),\n self.iter += 1\n return res\n raise StopIteration\n\n def __iter__(self):\n self.iterator = self.dataset.create_tuple_iterator(output_numpy=True)\n return self\n\n def __len__(self):\n return self.dataset.get_dataset_size() // self.batch_size - 1\n\n\n<mask token>\n\n\ndef create_squad_dataset(batch_size=1, repeat_count=1, data_file_path=None,\n schema_file_path=None, is_training=True, do_shuffle=True, rank_size=1,\n rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n if is_training:\n print('data_file_path: ', data_file_path)\n print('rank_id: ', rank_id)\n ds = de.MindDataset([data_file_path], columns_list=['input_ids',\n 'input_mask', 'segment_ids', 'start_positions', 'end_positions',\n 'unique_ids', 'is_impossible'], shuffle=do_shuffle, num_shards=\n rank_size, shard_id=rank_id)\n ds = ds.map(operations=type_cast_op, input_columns='start_positions')\n ds = ds.map(operations=type_cast_op, input_columns='end_positions')\n else:\n ds = de.GeneratorDataset(generator_squad(data_file_path), shuffle=\n do_shuffle, column_names=['input_ids', 'input_mask',\n 'segment_ids', 'unique_ids'])\n ds = ds.map(operations=type_cast_op, input_columns='input_ids')\n ds = ds.map(operations=type_cast_op, input_columns='input_mask')\n ds = ds.map(operations=type_cast_op, input_columns='segment_ids')\n ds = ds.map(operations=type_cast_op, input_columns='unique_ids')\n ds = ds.repeat(repeat_count)\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef create_eval_dataset(batchsize=32, device_num=1, rank=0, data_dir=None,\n schema_dir=None):\n \"\"\"create evaluation dataset\"\"\"\n data_files = []\n if os.path.isdir(data_dir):\n files = os.listdir(data_dir)\n for file_name in files:\n if 'tfrecord' in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n else:\n data_files.append(data_dir)\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], shard_equal_rows=True)\n ori_dataset_size = data_set.get_dataset_size()\n print('origin eval size: ', ori_dataset_size)\n dtypes = data_set.output_types()\n shapes = data_set.output_shapes()\n output_batches = math.ceil(ori_dataset_size / device_num / batchsize)\n padded_num = output_batches * device_num * batchsize - ori_dataset_size\n print('padded num: ', padded_num)\n if padded_num > 0:\n item = {'input_ids': np.zeros(shapes[0], dtypes[0]), 'input_mask':\n np.zeros(shapes[1], dtypes[1]), 'segment_ids': np.zeros(shapes[\n 2], dtypes[2]), 'next_sentence_labels': np.zeros(shapes[3],\n dtypes[3]), 'masked_lm_positions': np.zeros(shapes[4], dtypes[4\n ]), 'masked_lm_ids': np.zeros(shapes[5], dtypes[5]),\n 'masked_lm_weights': np.zeros(shapes[6], dtypes[6])}\n padded_samples = [item for x in range(padded_num)]\n padded_ds = de.PaddedDataset(padded_samples)\n eval_ds = data_set + padded_ds\n sampler = de.DistributedSampler(num_shards=device_num, shard_id=\n rank, shuffle=False)\n eval_ds.use_sampler(sampler)\n else:\n eval_ds = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], num_shards=device_num,\n shard_id=rank, shard_equal_rows=True)\n type_cast_op = C.TypeCast(mstype.int32)\n eval_ds = eval_ds.map(input_columns='masked_lm_ids', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='masked_lm_positions', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='next_sentence_labels', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='segment_ids', operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns='input_mask', operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns='input_ids', operations=type_cast_op)\n eval_ds = eval_ds.batch(batchsize, drop_remainder=True)\n print('eval data size: {}'.format(eval_ds.get_dataset_size()))\n print('eval repeat count: {}'.format(eval_ds.get_repeat_count()))\n return eval_ds\n", "step-2": "<mask token>\n\n\nclass BucketDatasetGenerator:\n \"\"\"\n Provide data distribution of different gears for the bert network.\n\n Args:\n data_set (Dataset): The training dataset.\n batch_size (Int): The training batchsize.\n bucket_list (List): List of different sentence lengths,such as [128, 256, 512]. Default: None.\n \"\"\"\n\n def __init__(self, data_set, batch_size, bucket_list=None):\n self.dataset = data_set\n self.batch_size = batch_size\n self.bucket_list = bucket_list\n self.data_bucket = {bucket: [] for bucket in bucket_list}\n bucket_size = len(bucket_list)\n self.random_list = np.random.binomial(n=bucket_size - 1, p=0.5,\n size=self.__len__())\n self.random_list = (self.random_list + 2) % bucket_size\n self.random_list = [bucket_list[i] for i in self.random_list]\n self.iter = 0\n\n def __next__(self):\n for item in self.iterator:\n for seq_length in self.bucket_list:\n if np.sum(item[1]) <= seq_length:\n self.data_bucket[seq_length].append(item)\n break\n for key in self.data_bucket.keys():\n data = self.data_bucket[key]\n if len(data) >= self.batch_size and self.random_list[self.iter\n ] == key:\n self.data_bucket[key] = self.data_bucket[key][self.\n batch_size:]\n arr = data[0]\n for i in range(1, self.batch_size):\n current_data = data[i]\n for j in range(len(current_data)):\n arr[j] = np.concatenate((arr[j], current_data[j]))\n res = ()\n for label in arr:\n newlabel = np.reshape(label, (self.batch_size, -1))\n res += newlabel,\n res += np.array(key, np.int32),\n self.iter += 1\n return res\n raise StopIteration\n\n def __iter__(self):\n self.iterator = self.dataset.create_tuple_iterator(output_numpy=True)\n return self\n\n def __len__(self):\n return self.dataset.get_dataset_size() // self.batch_size - 1\n\n\ndef create_albert_dataset(device_num=1, rank=0, do_shuffle='true', data_dir\n =None, schema_dir=None, batch_size=32, bucket_list=None):\n \"\"\"create train dataset\"\"\"\n files = os.listdir(data_dir)\n data_files = []\n for file_name in files:\n if 'tfrecord' in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], shuffle=de.Shuffle.FILES if \n do_shuffle == 'true' else False, num_shards=device_num, shard_id=\n rank, shard_equal_rows=True)\n if bucket_list:\n bucket_dataset = BucketDatasetGenerator(data_set, batch_size,\n bucket_list=bucket_list)\n data_set = de.GeneratorDataset(bucket_dataset, column_names=[\n 'input_ids', 'input_mask', 'segment_ids',\n 'next_sentence_labels', 'masked_lm_positions', 'masked_lm_ids',\n 'masked_lm_weights', 'sentence_flag'], shuffle=False)\n else:\n data_set = data_set.batch(batch_size, drop_remainder=True)\n ori_dataset_size = data_set.get_dataset_size()\n print('origin dataset size: ', ori_dataset_size)\n type_cast_op = C.TypeCast(mstype.int32)\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'masked_lm_ids')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'masked_lm_positions')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'next_sentence_labels')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'segment_ids')\n data_set = data_set.map(operations=type_cast_op, input_columns='input_mask'\n )\n data_set = data_set.map(operations=type_cast_op, input_columns='input_ids')\n logger.info('data size: {}'.format(data_set.get_dataset_size()))\n logger.info('repeat count: {}'.format(data_set.get_repeat_count()))\n return data_set\n\n\ndef create_classification_dataset(batch_size=1, repeat_count=1,\n assessment_method='accuracy', data_file_path=None, schema_file_path=\n None, do_shuffle=True, rank_size=1, rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n ds = de.MindDataset([data_file_path], columns_list=['input_ids',\n 'input_mask', 'segment_ids', 'label_ids'], shuffle=do_shuffle,\n num_shards=rank_size, shard_id=rank_id)\n if assessment_method == 'Spearman_correlation':\n type_cast_op_float = C.TypeCast(mstype.float32)\n ds = ds.map(operations=type_cast_op_float, input_columns='label_ids')\n else:\n ds = ds.map(operations=type_cast_op, input_columns='label_ids')\n ds = ds.map(operations=type_cast_op, input_columns='segment_ids')\n ds = ds.map(operations=type_cast_op, input_columns='input_mask')\n ds = ds.map(operations=type_cast_op, input_columns='input_ids')\n ds = ds.repeat(repeat_count)\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\n<mask token>\n\n\ndef generator_squad_train(data_features):\n for feature in data_features:\n yield feature.input_ids, feature.input_mask, feature.segment_ids, feature.start_position, feature.end_position, feature.unique_id, feature.is_impossible\n\n\ndef create_squad_dataset(batch_size=1, repeat_count=1, data_file_path=None,\n schema_file_path=None, is_training=True, do_shuffle=True, rank_size=1,\n rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n if is_training:\n print('data_file_path: ', data_file_path)\n print('rank_id: ', rank_id)\n ds = de.MindDataset([data_file_path], columns_list=['input_ids',\n 'input_mask', 'segment_ids', 'start_positions', 'end_positions',\n 'unique_ids', 'is_impossible'], shuffle=do_shuffle, num_shards=\n rank_size, shard_id=rank_id)\n ds = ds.map(operations=type_cast_op, input_columns='start_positions')\n ds = ds.map(operations=type_cast_op, input_columns='end_positions')\n else:\n ds = de.GeneratorDataset(generator_squad(data_file_path), shuffle=\n do_shuffle, column_names=['input_ids', 'input_mask',\n 'segment_ids', 'unique_ids'])\n ds = ds.map(operations=type_cast_op, input_columns='input_ids')\n ds = ds.map(operations=type_cast_op, input_columns='input_mask')\n ds = ds.map(operations=type_cast_op, input_columns='segment_ids')\n ds = ds.map(operations=type_cast_op, input_columns='unique_ids')\n ds = ds.repeat(repeat_count)\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef create_eval_dataset(batchsize=32, device_num=1, rank=0, data_dir=None,\n schema_dir=None):\n \"\"\"create evaluation dataset\"\"\"\n data_files = []\n if os.path.isdir(data_dir):\n files = os.listdir(data_dir)\n for file_name in files:\n if 'tfrecord' in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n else:\n data_files.append(data_dir)\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], shard_equal_rows=True)\n ori_dataset_size = data_set.get_dataset_size()\n print('origin eval size: ', ori_dataset_size)\n dtypes = data_set.output_types()\n shapes = data_set.output_shapes()\n output_batches = math.ceil(ori_dataset_size / device_num / batchsize)\n padded_num = output_batches * device_num * batchsize - ori_dataset_size\n print('padded num: ', padded_num)\n if padded_num > 0:\n item = {'input_ids': np.zeros(shapes[0], dtypes[0]), 'input_mask':\n np.zeros(shapes[1], dtypes[1]), 'segment_ids': np.zeros(shapes[\n 2], dtypes[2]), 'next_sentence_labels': np.zeros(shapes[3],\n dtypes[3]), 'masked_lm_positions': np.zeros(shapes[4], dtypes[4\n ]), 'masked_lm_ids': np.zeros(shapes[5], dtypes[5]),\n 'masked_lm_weights': np.zeros(shapes[6], dtypes[6])}\n padded_samples = [item for x in range(padded_num)]\n padded_ds = de.PaddedDataset(padded_samples)\n eval_ds = data_set + padded_ds\n sampler = de.DistributedSampler(num_shards=device_num, shard_id=\n rank, shuffle=False)\n eval_ds.use_sampler(sampler)\n else:\n eval_ds = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], num_shards=device_num,\n shard_id=rank, shard_equal_rows=True)\n type_cast_op = C.TypeCast(mstype.int32)\n eval_ds = eval_ds.map(input_columns='masked_lm_ids', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='masked_lm_positions', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='next_sentence_labels', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='segment_ids', operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns='input_mask', operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns='input_ids', operations=type_cast_op)\n eval_ds = eval_ds.batch(batchsize, drop_remainder=True)\n print('eval data size: {}'.format(eval_ds.get_dataset_size()))\n print('eval repeat count: {}'.format(eval_ds.get_repeat_count()))\n return eval_ds\n", "step-3": "<mask token>\n\n\nclass BucketDatasetGenerator:\n \"\"\"\n Provide data distribution of different gears for the bert network.\n\n Args:\n data_set (Dataset): The training dataset.\n batch_size (Int): The training batchsize.\n bucket_list (List): List of different sentence lengths,such as [128, 256, 512]. Default: None.\n \"\"\"\n\n def __init__(self, data_set, batch_size, bucket_list=None):\n self.dataset = data_set\n self.batch_size = batch_size\n self.bucket_list = bucket_list\n self.data_bucket = {bucket: [] for bucket in bucket_list}\n bucket_size = len(bucket_list)\n self.random_list = np.random.binomial(n=bucket_size - 1, p=0.5,\n size=self.__len__())\n self.random_list = (self.random_list + 2) % bucket_size\n self.random_list = [bucket_list[i] for i in self.random_list]\n self.iter = 0\n\n def __next__(self):\n for item in self.iterator:\n for seq_length in self.bucket_list:\n if np.sum(item[1]) <= seq_length:\n self.data_bucket[seq_length].append(item)\n break\n for key in self.data_bucket.keys():\n data = self.data_bucket[key]\n if len(data) >= self.batch_size and self.random_list[self.iter\n ] == key:\n self.data_bucket[key] = self.data_bucket[key][self.\n batch_size:]\n arr = data[0]\n for i in range(1, self.batch_size):\n current_data = data[i]\n for j in range(len(current_data)):\n arr[j] = np.concatenate((arr[j], current_data[j]))\n res = ()\n for label in arr:\n newlabel = np.reshape(label, (self.batch_size, -1))\n res += newlabel,\n res += np.array(key, np.int32),\n self.iter += 1\n return res\n raise StopIteration\n\n def __iter__(self):\n self.iterator = self.dataset.create_tuple_iterator(output_numpy=True)\n return self\n\n def __len__(self):\n return self.dataset.get_dataset_size() // self.batch_size - 1\n\n\ndef create_albert_dataset(device_num=1, rank=0, do_shuffle='true', data_dir\n =None, schema_dir=None, batch_size=32, bucket_list=None):\n \"\"\"create train dataset\"\"\"\n files = os.listdir(data_dir)\n data_files = []\n for file_name in files:\n if 'tfrecord' in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], shuffle=de.Shuffle.FILES if \n do_shuffle == 'true' else False, num_shards=device_num, shard_id=\n rank, shard_equal_rows=True)\n if bucket_list:\n bucket_dataset = BucketDatasetGenerator(data_set, batch_size,\n bucket_list=bucket_list)\n data_set = de.GeneratorDataset(bucket_dataset, column_names=[\n 'input_ids', 'input_mask', 'segment_ids',\n 'next_sentence_labels', 'masked_lm_positions', 'masked_lm_ids',\n 'masked_lm_weights', 'sentence_flag'], shuffle=False)\n else:\n data_set = data_set.batch(batch_size, drop_remainder=True)\n ori_dataset_size = data_set.get_dataset_size()\n print('origin dataset size: ', ori_dataset_size)\n type_cast_op = C.TypeCast(mstype.int32)\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'masked_lm_ids')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'masked_lm_positions')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'next_sentence_labels')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'segment_ids')\n data_set = data_set.map(operations=type_cast_op, input_columns='input_mask'\n )\n data_set = data_set.map(operations=type_cast_op, input_columns='input_ids')\n logger.info('data size: {}'.format(data_set.get_dataset_size()))\n logger.info('repeat count: {}'.format(data_set.get_repeat_count()))\n return data_set\n\n\ndef create_classification_dataset(batch_size=1, repeat_count=1,\n assessment_method='accuracy', data_file_path=None, schema_file_path=\n None, do_shuffle=True, rank_size=1, rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n ds = de.MindDataset([data_file_path], columns_list=['input_ids',\n 'input_mask', 'segment_ids', 'label_ids'], shuffle=do_shuffle,\n num_shards=rank_size, shard_id=rank_id)\n if assessment_method == 'Spearman_correlation':\n type_cast_op_float = C.TypeCast(mstype.float32)\n ds = ds.map(operations=type_cast_op_float, input_columns='label_ids')\n else:\n ds = ds.map(operations=type_cast_op, input_columns='label_ids')\n ds = ds.map(operations=type_cast_op, input_columns='segment_ids')\n ds = ds.map(operations=type_cast_op, input_columns='input_mask')\n ds = ds.map(operations=type_cast_op, input_columns='input_ids')\n ds = ds.repeat(repeat_count)\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef generator_squad(data_features):\n for feature in data_features:\n yield feature.input_ids, feature.input_mask, feature.segment_ids, feature.unique_id\n\n\ndef generator_squad_train(data_features):\n for feature in data_features:\n yield feature.input_ids, feature.input_mask, feature.segment_ids, feature.start_position, feature.end_position, feature.unique_id, feature.is_impossible\n\n\ndef create_squad_dataset(batch_size=1, repeat_count=1, data_file_path=None,\n schema_file_path=None, is_training=True, do_shuffle=True, rank_size=1,\n rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n if is_training:\n print('data_file_path: ', data_file_path)\n print('rank_id: ', rank_id)\n ds = de.MindDataset([data_file_path], columns_list=['input_ids',\n 'input_mask', 'segment_ids', 'start_positions', 'end_positions',\n 'unique_ids', 'is_impossible'], shuffle=do_shuffle, num_shards=\n rank_size, shard_id=rank_id)\n ds = ds.map(operations=type_cast_op, input_columns='start_positions')\n ds = ds.map(operations=type_cast_op, input_columns='end_positions')\n else:\n ds = de.GeneratorDataset(generator_squad(data_file_path), shuffle=\n do_shuffle, column_names=['input_ids', 'input_mask',\n 'segment_ids', 'unique_ids'])\n ds = ds.map(operations=type_cast_op, input_columns='input_ids')\n ds = ds.map(operations=type_cast_op, input_columns='input_mask')\n ds = ds.map(operations=type_cast_op, input_columns='segment_ids')\n ds = ds.map(operations=type_cast_op, input_columns='unique_ids')\n ds = ds.repeat(repeat_count)\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef create_eval_dataset(batchsize=32, device_num=1, rank=0, data_dir=None,\n schema_dir=None):\n \"\"\"create evaluation dataset\"\"\"\n data_files = []\n if os.path.isdir(data_dir):\n files = os.listdir(data_dir)\n for file_name in files:\n if 'tfrecord' in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n else:\n data_files.append(data_dir)\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], shard_equal_rows=True)\n ori_dataset_size = data_set.get_dataset_size()\n print('origin eval size: ', ori_dataset_size)\n dtypes = data_set.output_types()\n shapes = data_set.output_shapes()\n output_batches = math.ceil(ori_dataset_size / device_num / batchsize)\n padded_num = output_batches * device_num * batchsize - ori_dataset_size\n print('padded num: ', padded_num)\n if padded_num > 0:\n item = {'input_ids': np.zeros(shapes[0], dtypes[0]), 'input_mask':\n np.zeros(shapes[1], dtypes[1]), 'segment_ids': np.zeros(shapes[\n 2], dtypes[2]), 'next_sentence_labels': np.zeros(shapes[3],\n dtypes[3]), 'masked_lm_positions': np.zeros(shapes[4], dtypes[4\n ]), 'masked_lm_ids': np.zeros(shapes[5], dtypes[5]),\n 'masked_lm_weights': np.zeros(shapes[6], dtypes[6])}\n padded_samples = [item for x in range(padded_num)]\n padded_ds = de.PaddedDataset(padded_samples)\n eval_ds = data_set + padded_ds\n sampler = de.DistributedSampler(num_shards=device_num, shard_id=\n rank, shuffle=False)\n eval_ds.use_sampler(sampler)\n else:\n eval_ds = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], num_shards=device_num,\n shard_id=rank, shard_equal_rows=True)\n type_cast_op = C.TypeCast(mstype.int32)\n eval_ds = eval_ds.map(input_columns='masked_lm_ids', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='masked_lm_positions', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='next_sentence_labels', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='segment_ids', operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns='input_mask', operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns='input_ids', operations=type_cast_op)\n eval_ds = eval_ds.batch(batchsize, drop_remainder=True)\n print('eval data size: {}'.format(eval_ds.get_dataset_size()))\n print('eval repeat count: {}'.format(eval_ds.get_repeat_count()))\n return eval_ds\n", "step-4": "<mask token>\nimport os\nimport math\nimport numpy as np\nimport mindspore.common.dtype as mstype\nimport mindspore.dataset as de\nimport mindspore.dataset.transforms as C\nfrom mindspore import log as logger\n\n\nclass BucketDatasetGenerator:\n \"\"\"\n Provide data distribution of different gears for the bert network.\n\n Args:\n data_set (Dataset): The training dataset.\n batch_size (Int): The training batchsize.\n bucket_list (List): List of different sentence lengths,such as [128, 256, 512]. Default: None.\n \"\"\"\n\n def __init__(self, data_set, batch_size, bucket_list=None):\n self.dataset = data_set\n self.batch_size = batch_size\n self.bucket_list = bucket_list\n self.data_bucket = {bucket: [] for bucket in bucket_list}\n bucket_size = len(bucket_list)\n self.random_list = np.random.binomial(n=bucket_size - 1, p=0.5,\n size=self.__len__())\n self.random_list = (self.random_list + 2) % bucket_size\n self.random_list = [bucket_list[i] for i in self.random_list]\n self.iter = 0\n\n def __next__(self):\n for item in self.iterator:\n for seq_length in self.bucket_list:\n if np.sum(item[1]) <= seq_length:\n self.data_bucket[seq_length].append(item)\n break\n for key in self.data_bucket.keys():\n data = self.data_bucket[key]\n if len(data) >= self.batch_size and self.random_list[self.iter\n ] == key:\n self.data_bucket[key] = self.data_bucket[key][self.\n batch_size:]\n arr = data[0]\n for i in range(1, self.batch_size):\n current_data = data[i]\n for j in range(len(current_data)):\n arr[j] = np.concatenate((arr[j], current_data[j]))\n res = ()\n for label in arr:\n newlabel = np.reshape(label, (self.batch_size, -1))\n res += newlabel,\n res += np.array(key, np.int32),\n self.iter += 1\n return res\n raise StopIteration\n\n def __iter__(self):\n self.iterator = self.dataset.create_tuple_iterator(output_numpy=True)\n return self\n\n def __len__(self):\n return self.dataset.get_dataset_size() // self.batch_size - 1\n\n\ndef create_albert_dataset(device_num=1, rank=0, do_shuffle='true', data_dir\n =None, schema_dir=None, batch_size=32, bucket_list=None):\n \"\"\"create train dataset\"\"\"\n files = os.listdir(data_dir)\n data_files = []\n for file_name in files:\n if 'tfrecord' in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], shuffle=de.Shuffle.FILES if \n do_shuffle == 'true' else False, num_shards=device_num, shard_id=\n rank, shard_equal_rows=True)\n if bucket_list:\n bucket_dataset = BucketDatasetGenerator(data_set, batch_size,\n bucket_list=bucket_list)\n data_set = de.GeneratorDataset(bucket_dataset, column_names=[\n 'input_ids', 'input_mask', 'segment_ids',\n 'next_sentence_labels', 'masked_lm_positions', 'masked_lm_ids',\n 'masked_lm_weights', 'sentence_flag'], shuffle=False)\n else:\n data_set = data_set.batch(batch_size, drop_remainder=True)\n ori_dataset_size = data_set.get_dataset_size()\n print('origin dataset size: ', ori_dataset_size)\n type_cast_op = C.TypeCast(mstype.int32)\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'masked_lm_ids')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'masked_lm_positions')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'next_sentence_labels')\n data_set = data_set.map(operations=type_cast_op, input_columns=\n 'segment_ids')\n data_set = data_set.map(operations=type_cast_op, input_columns='input_mask'\n )\n data_set = data_set.map(operations=type_cast_op, input_columns='input_ids')\n logger.info('data size: {}'.format(data_set.get_dataset_size()))\n logger.info('repeat count: {}'.format(data_set.get_repeat_count()))\n return data_set\n\n\ndef create_classification_dataset(batch_size=1, repeat_count=1,\n assessment_method='accuracy', data_file_path=None, schema_file_path=\n None, do_shuffle=True, rank_size=1, rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n ds = de.MindDataset([data_file_path], columns_list=['input_ids',\n 'input_mask', 'segment_ids', 'label_ids'], shuffle=do_shuffle,\n num_shards=rank_size, shard_id=rank_id)\n if assessment_method == 'Spearman_correlation':\n type_cast_op_float = C.TypeCast(mstype.float32)\n ds = ds.map(operations=type_cast_op_float, input_columns='label_ids')\n else:\n ds = ds.map(operations=type_cast_op, input_columns='label_ids')\n ds = ds.map(operations=type_cast_op, input_columns='segment_ids')\n ds = ds.map(operations=type_cast_op, input_columns='input_mask')\n ds = ds.map(operations=type_cast_op, input_columns='input_ids')\n ds = ds.repeat(repeat_count)\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef generator_squad(data_features):\n for feature in data_features:\n yield feature.input_ids, feature.input_mask, feature.segment_ids, feature.unique_id\n\n\ndef generator_squad_train(data_features):\n for feature in data_features:\n yield feature.input_ids, feature.input_mask, feature.segment_ids, feature.start_position, feature.end_position, feature.unique_id, feature.is_impossible\n\n\ndef create_squad_dataset(batch_size=1, repeat_count=1, data_file_path=None,\n schema_file_path=None, is_training=True, do_shuffle=True, rank_size=1,\n rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n if is_training:\n print('data_file_path: ', data_file_path)\n print('rank_id: ', rank_id)\n ds = de.MindDataset([data_file_path], columns_list=['input_ids',\n 'input_mask', 'segment_ids', 'start_positions', 'end_positions',\n 'unique_ids', 'is_impossible'], shuffle=do_shuffle, num_shards=\n rank_size, shard_id=rank_id)\n ds = ds.map(operations=type_cast_op, input_columns='start_positions')\n ds = ds.map(operations=type_cast_op, input_columns='end_positions')\n else:\n ds = de.GeneratorDataset(generator_squad(data_file_path), shuffle=\n do_shuffle, column_names=['input_ids', 'input_mask',\n 'segment_ids', 'unique_ids'])\n ds = ds.map(operations=type_cast_op, input_columns='input_ids')\n ds = ds.map(operations=type_cast_op, input_columns='input_mask')\n ds = ds.map(operations=type_cast_op, input_columns='segment_ids')\n ds = ds.map(operations=type_cast_op, input_columns='unique_ids')\n ds = ds.repeat(repeat_count)\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef create_eval_dataset(batchsize=32, device_num=1, rank=0, data_dir=None,\n schema_dir=None):\n \"\"\"create evaluation dataset\"\"\"\n data_files = []\n if os.path.isdir(data_dir):\n files = os.listdir(data_dir)\n for file_name in files:\n if 'tfrecord' in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n else:\n data_files.append(data_dir)\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], shard_equal_rows=True)\n ori_dataset_size = data_set.get_dataset_size()\n print('origin eval size: ', ori_dataset_size)\n dtypes = data_set.output_types()\n shapes = data_set.output_shapes()\n output_batches = math.ceil(ori_dataset_size / device_num / batchsize)\n padded_num = output_batches * device_num * batchsize - ori_dataset_size\n print('padded num: ', padded_num)\n if padded_num > 0:\n item = {'input_ids': np.zeros(shapes[0], dtypes[0]), 'input_mask':\n np.zeros(shapes[1], dtypes[1]), 'segment_ids': np.zeros(shapes[\n 2], dtypes[2]), 'next_sentence_labels': np.zeros(shapes[3],\n dtypes[3]), 'masked_lm_positions': np.zeros(shapes[4], dtypes[4\n ]), 'masked_lm_ids': np.zeros(shapes[5], dtypes[5]),\n 'masked_lm_weights': np.zeros(shapes[6], dtypes[6])}\n padded_samples = [item for x in range(padded_num)]\n padded_ds = de.PaddedDataset(padded_samples)\n eval_ds = data_set + padded_ds\n sampler = de.DistributedSampler(num_shards=device_num, shard_id=\n rank, shuffle=False)\n eval_ds.use_sampler(sampler)\n else:\n eval_ds = de.TFRecordDataset(data_files, schema_dir if schema_dir !=\n '' else None, columns_list=['input_ids', 'input_mask',\n 'segment_ids', 'next_sentence_labels', 'masked_lm_positions',\n 'masked_lm_ids', 'masked_lm_weights'], num_shards=device_num,\n shard_id=rank, shard_equal_rows=True)\n type_cast_op = C.TypeCast(mstype.int32)\n eval_ds = eval_ds.map(input_columns='masked_lm_ids', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='masked_lm_positions', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='next_sentence_labels', operations=\n type_cast_op)\n eval_ds = eval_ds.map(input_columns='segment_ids', operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns='input_mask', operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns='input_ids', operations=type_cast_op)\n eval_ds = eval_ds.batch(batchsize, drop_remainder=True)\n print('eval data size: {}'.format(eval_ds.get_dataset_size()))\n print('eval repeat count: {}'.format(eval_ds.get_repeat_count()))\n return eval_ds\n", "step-5": "# Copyright 2021-2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nData operations, will be used in run_pretrain.py\n\"\"\"\nimport os\nimport math\nimport numpy as np\nimport mindspore.common.dtype as mstype\nimport mindspore.dataset as de\nimport mindspore.dataset.transforms as C\nfrom mindspore import log as logger\n\n\nclass BucketDatasetGenerator:\n \"\"\"\n Provide data distribution of different gears for the bert network.\n\n Args:\n data_set (Dataset): The training dataset.\n batch_size (Int): The training batchsize.\n bucket_list (List): List of different sentence lengths,such as [128, 256, 512]. Default: None.\n \"\"\"\n\n def __init__(self, data_set, batch_size, bucket_list=None):\n self.dataset = data_set\n self.batch_size = batch_size\n self.bucket_list = bucket_list\n self.data_bucket = {bucket: [] for bucket in bucket_list}\n bucket_size = len(bucket_list)\n self.random_list = np.random.binomial(n=(bucket_size - 1), p=0.5, size=self.__len__())\n self.random_list = (self.random_list + 2) % bucket_size\n self.random_list = [bucket_list[i] for i in self.random_list]\n self.iter = 0\n\n def __next__(self):\n for item in self.iterator:\n for seq_length in self.bucket_list:\n if np.sum(item[1]) <= seq_length:\n self.data_bucket[seq_length].append(item)\n break\n for key in self.data_bucket.keys():\n data = self.data_bucket[key]\n if len(data) >= self.batch_size and self.random_list[self.iter] == key:\n self.data_bucket[key] = self.data_bucket[key][self.batch_size:]\n arr = data[0]\n for i in range(1, self.batch_size):\n current_data = data[i]\n for j in range(len(current_data)):\n arr[j] = np.concatenate((arr[j], current_data[j]))\n res = ()\n for label in arr:\n newlabel = np.reshape(label, (self.batch_size, -1))\n res += (newlabel,)\n res += (np.array(key, np.int32),)\n self.iter += 1\n return res\n raise StopIteration\n\n def __iter__(self):\n self.iterator = self.dataset.create_tuple_iterator(output_numpy=True)\n return self\n\n def __len__(self):\n return (self.dataset.get_dataset_size() // self.batch_size) - 1\n\n\ndef create_albert_dataset(device_num=1, rank=0, do_shuffle=\"true\", data_dir=None, schema_dir=None, batch_size=32,\n bucket_list=None):\n \"\"\"create train dataset\"\"\"\n # apply repeat operations\n files = os.listdir(data_dir)\n data_files = []\n for file_name in files:\n if \"tfrecord\" in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir != \"\" else None,\n columns_list=[\"input_ids\", \"input_mask\", \"segment_ids\", \"next_sentence_labels\",\n \"masked_lm_positions\", \"masked_lm_ids\", \"masked_lm_weights\"],\n shuffle=de.Shuffle.FILES if do_shuffle == \"true\" else False,\n num_shards=device_num, shard_id=rank, shard_equal_rows=True)\n if bucket_list:\n bucket_dataset = BucketDatasetGenerator(data_set, batch_size, bucket_list=bucket_list)\n data_set = de.GeneratorDataset(bucket_dataset,\n column_names=[\"input_ids\", \"input_mask\", \"segment_ids\",\n \"next_sentence_labels\",\n \"masked_lm_positions\", \"masked_lm_ids\", \"masked_lm_weights\",\n \"sentence_flag\"],\n shuffle=False)\n else:\n data_set = data_set.batch(batch_size, drop_remainder=True)\n ori_dataset_size = data_set.get_dataset_size()\n print('origin dataset size: ', ori_dataset_size)\n type_cast_op = C.TypeCast(mstype.int32)\n data_set = data_set.map(operations=type_cast_op, input_columns=\"masked_lm_ids\")\n data_set = data_set.map(operations=type_cast_op, input_columns=\"masked_lm_positions\")\n data_set = data_set.map(operations=type_cast_op, input_columns=\"next_sentence_labels\")\n data_set = data_set.map(operations=type_cast_op, input_columns=\"segment_ids\")\n data_set = data_set.map(operations=type_cast_op, input_columns=\"input_mask\")\n data_set = data_set.map(operations=type_cast_op, input_columns=\"input_ids\")\n # apply batch operations\n logger.info(\"data size: {}\".format(data_set.get_dataset_size()))\n logger.info(\"repeat count: {}\".format(data_set.get_repeat_count()))\n return data_set\n\n\ndef create_classification_dataset(batch_size=1, repeat_count=1, assessment_method=\"accuracy\",\n data_file_path=None, schema_file_path=None, do_shuffle=True,\n rank_size=1, rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n ds = de.MindDataset([data_file_path],\n columns_list=[\"input_ids\", \"input_mask\", \"segment_ids\", \"label_ids\"], shuffle=do_shuffle,\n num_shards=rank_size, shard_id=rank_id)\n if assessment_method == \"Spearman_correlation\":\n type_cast_op_float = C.TypeCast(mstype.float32)\n ds = ds.map(operations=type_cast_op_float, input_columns=\"label_ids\")\n else:\n ds = ds.map(operations=type_cast_op, input_columns=\"label_ids\")\n ds = ds.map(operations=type_cast_op, input_columns=\"segment_ids\")\n ds = ds.map(operations=type_cast_op, input_columns=\"input_mask\")\n ds = ds.map(operations=type_cast_op, input_columns=\"input_ids\")\n ds = ds.repeat(repeat_count)\n # apply batch operations\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef generator_squad(data_features):\n for feature in data_features:\n yield (feature.input_ids, feature.input_mask, feature.segment_ids, feature.unique_id)\n\n\ndef generator_squad_train(data_features):\n for feature in data_features:\n yield (feature.input_ids, feature.input_mask, feature.segment_ids, feature.start_position, feature.end_position,\n feature.unique_id, feature.is_impossible)\n\n\ndef create_squad_dataset(batch_size=1, repeat_count=1, data_file_path=None, schema_file_path=None,\n is_training=True, do_shuffle=True, rank_size=1,\n rank_id=0):\n \"\"\"create finetune or evaluation dataset\"\"\"\n type_cast_op = C.TypeCast(mstype.int32)\n if is_training:\n print(\"data_file_path: \", data_file_path)\n print(\"rank_id: \", rank_id)\n ds = de.MindDataset([data_file_path],\n columns_list=[\"input_ids\", \"input_mask\", \"segment_ids\", \"start_positions\",\n \"end_positions\", \"unique_ids\", \"is_impossible\"],\n shuffle=do_shuffle, num_shards=rank_size, shard_id=rank_id)\n ds = ds.map(operations=type_cast_op, input_columns=\"start_positions\")\n ds = ds.map(operations=type_cast_op, input_columns=\"end_positions\")\n else:\n ds = de.GeneratorDataset(generator_squad(data_file_path), shuffle=do_shuffle,\n column_names=[\"input_ids\", \"input_mask\", \"segment_ids\", \"unique_ids\"])\n\n ds = ds.map(operations=type_cast_op, input_columns=\"input_ids\")\n ds = ds.map(operations=type_cast_op, input_columns=\"input_mask\")\n ds = ds.map(operations=type_cast_op, input_columns=\"segment_ids\")\n ds = ds.map(operations=type_cast_op, input_columns=\"unique_ids\")\n ds = ds.repeat(repeat_count)\n # apply batch operations\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef create_eval_dataset(batchsize=32, device_num=1, rank=0, data_dir=None, schema_dir=None):\n \"\"\"create evaluation dataset\"\"\"\n data_files = []\n if os.path.isdir(data_dir):\n files = os.listdir(data_dir)\n for file_name in files:\n if \"tfrecord\" in file_name:\n data_files.append(os.path.join(data_dir, file_name))\n else:\n data_files.append(data_dir)\n data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir != \"\" else None,\n columns_list=[\"input_ids\", \"input_mask\", \"segment_ids\", \"next_sentence_labels\",\n \"masked_lm_positions\", \"masked_lm_ids\", \"masked_lm_weights\"],\n shard_equal_rows=True)\n ori_dataset_size = data_set.get_dataset_size()\n print(\"origin eval size: \", ori_dataset_size)\n dtypes = data_set.output_types()\n shapes = data_set.output_shapes()\n output_batches = math.ceil(ori_dataset_size / device_num / batchsize)\n padded_num = output_batches * device_num * batchsize - ori_dataset_size\n print(\"padded num: \", padded_num)\n if padded_num > 0:\n item = {\"input_ids\": np.zeros(shapes[0], dtypes[0]),\n \"input_mask\": np.zeros(shapes[1], dtypes[1]),\n \"segment_ids\": np.zeros(shapes[2], dtypes[2]),\n \"next_sentence_labels\": np.zeros(shapes[3], dtypes[3]),\n \"masked_lm_positions\": np.zeros(shapes[4], dtypes[4]),\n \"masked_lm_ids\": np.zeros(shapes[5], dtypes[5]),\n \"masked_lm_weights\": np.zeros(shapes[6], dtypes[6])}\n padded_samples = [item for x in range(padded_num)]\n padded_ds = de.PaddedDataset(padded_samples)\n eval_ds = data_set + padded_ds\n sampler = de.DistributedSampler(num_shards=device_num, shard_id=rank, shuffle=False)\n eval_ds.use_sampler(sampler)\n else:\n eval_ds = de.TFRecordDataset(data_files, schema_dir if schema_dir != \"\" else None,\n columns_list=[\"input_ids\", \"input_mask\", \"segment_ids\",\n \"next_sentence_labels\",\n \"masked_lm_positions\", \"masked_lm_ids\", \"masked_lm_weights\"],\n num_shards=device_num, shard_id=rank, shard_equal_rows=True)\n\n type_cast_op = C.TypeCast(mstype.int32)\n eval_ds = eval_ds.map(input_columns=\"masked_lm_ids\", operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns=\"masked_lm_positions\", operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns=\"next_sentence_labels\", operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns=\"segment_ids\", operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns=\"input_mask\", operations=type_cast_op)\n eval_ds = eval_ds.map(input_columns=\"input_ids\", operations=type_cast_op)\n eval_ds = eval_ds.batch(batchsize, drop_remainder=True)\n print(\"eval data size: {}\".format(eval_ds.get_dataset_size()))\n print(\"eval repeat count: {}\".format(eval_ds.get_repeat_count()))\n return eval_ds\n", "step-ids": [ 8, 11, 12, 13, 14 ] }
[ 8, 11, 12, 13, 14 ]
import random def multi(): scc = [6, 5, 4] sc = [6, 5] cc = [5, 4] crew = [4] captain = [5] ship = [6] n = 0 while n <= 2: inp = input("Hit enter to roll") if inp == "": roll5 = random.choices(range(1, 7), k=5) print(roll5) if set(scc).issubset(roll5): result_scc = [i for i in roll5 if not i in scc or scc.remove(i)] total_scc = sum(result_scc) inp_scc = input("Do you wish to keep one, both, or neither of the remaining dice? ") if inp_scc == "both": print("Total score: " + str(total_scc) + ".") if inp_scc == "neither": roll2_scc = random.choices(range(1, 7), k=2) print(roll2_scc) inp_scc_none = input("Do you wish to keep one, both, or neither of the remaining dice? ") if inp_scc_none == "both": total_scc_none = sum(roll2_scc) print("Total score: " + str(total_scc_none) + ".") if inp_scc_none == "neither": roll2_scc_none = random.choices(range(1, 7), k=2) total_scc_none2 = sum(roll2_scc_none) print(roll2_scc_none) print("Your total score is: " + str(total_scc_none2) + ".") if inp_scc_none == "one": inp_scc_none_one = input("Which die do you want to keep? ") roll1_scc_none_one = random.randint(1, 6) total_scc_none_one = roll1_scc_none_one + int(inp_scc_none_one) print(roll1_scc_none_one) print("Your total score is: " + str(total_scc_none_one) + ".") if inp_scc == "one": inp_scc_one = input("Which die do you want to keep? ") roll1_scc_one = random.randint(1, 6) print(roll1_scc_one) total_scc_one = roll1_scc_one + int(inp_scc_one) inp_scc_one2 = input("Hit enter to roll again or type 'pass' to keep your score ") if inp_scc_one2 == "pass": print("Your total score is: " + str(total_scc_one) + ".") if inp_scc_one2 == "": roll1_scc_one2 = random.randint(1, 6) print(roll1_scc_one2) total_scc_one2 = roll1_scc_one2 + int(inp_scc_one) print("Your total score is: " + str(total_scc_one2) + ".") if set(sc).issubset(roll5): inp_sc = input("Now you need a 4(the Crew). Hit enter to roll the remaining dice") if inp_sc == "": roll3 = random.choices(range(1, 7), k=3) print(roll3) if set(crew).issubset(roll3): result_crew = [i for i in roll3 if not i in crew or crew.remove(i)] total_crew = sum(result_crew) inp_crew = input("Do you wish to keep one, both, or neither of the remaining dice? ") if inp_crew == "both": print("Total score: " + str(total_crew) + ".") if inp_crew == "neither": roll2_crew = random.choices(range(1, 7), k=2) print(roll2_crew) total_crew_none = sum(roll2_crew) print("Your total score is: " + str(total_crew_none) + ".") if inp_crew == "one": inp_crew_one = input("Which die do you want to keep? ") roll1_crew_one = random.randint(1, 6) print(roll1_crew_one) total_crew_one = roll1_crew_one + int(inp_crew_one) print("Your total score is: " + str(total_crew_one) + ".") else: inp_sc3 = input("Still no 4. Hit enter to roll again") if inp_sc3 == "": roll3_sc3 = random.choices(range(1, 7), k=3) print(roll3_sc3) if set(crew).issubset(roll3_sc3): result_crew_sc3 = [i for i in roll3_sc3 if not i in crew or crew.remove(i)] total_crew_sc3 = sum(result_crew_sc3) print("Your total score is: " + str(total_crew_sc3) + ".") else: print("Sorry, you get no points because the Ship, Captain, and Crew wasn't completed.") if set(ship).issubset(roll5) and 5 not in roll5 and n < 2: inp_ship = input( "Now you need a 5(the Captain) and a 4(the Crew). Hit enter to roll the remaining dice ") if inp_ship == "": roll4_ship = random.choices(range(1, 7), k=4) print(roll4_ship) if set(cc).issubset(roll4_ship): result_ship_cc = [i for i in roll4_ship if not i in cc or cc.remove(i)] total_ship_cc = sum(result_ship_cc) inp_ship_cc = input("Do you wish to keep one, both, or neither of the remaining dice? ") if inp_ship_cc == "both": print("Your total is: " + str(total_ship_cc) + ".") if inp_ship_cc == "neither": roll2_cc = random.choices(range(1, 7), k=2) print(roll2_cc) total_ship_cc_none = sum(roll2_cc) print("Your total score is: " + str(total_ship_cc_none) + ".") if inp_ship_cc == "one": inp_ship_cc_one = input("Which die do you want to keep? ") roll1_ship_cc_one = random.randint(1, 6) print(roll1_ship_cc_one) total_ship_cc_one = roll1_ship_cc_one + int(inp_ship_cc_one) print("Your total score is: " + str(total_ship_cc_one) + ".") if set(captain).issubset(roll4_ship): roll3_captain = random.choices(range(1, 7), k=3) print(roll3_captain) if set(crew).issubset(roll3_captain): result_ship_captain = [i for i in roll3_captain if not i in crew or crew.remove(i)] total_ship_captain = sum(result_ship_captain) print("Your total score is: " + str(total_ship_captain) + ".") else: print("Sorry, you get no points because the Ship, Captain, and Crew wasn't completed.") else: n = n + 1 inp = input("How many players are there? ") players = range(int(inp)) roll_dict = dict() for i in players: multi()
normal
{ "blob_id": "bb540ba4cd96e2485e77ba099f0a1a9ea03e1120", "index": 8144, "step-1": "<mask token>\n\n\ndef multi():\n scc = [6, 5, 4]\n sc = [6, 5]\n cc = [5, 4]\n crew = [4]\n captain = [5]\n ship = [6]\n n = 0\n while n <= 2:\n inp = input('Hit enter to roll')\n if inp == '':\n roll5 = random.choices(range(1, 7), k=5)\n print(roll5)\n if set(scc).issubset(roll5):\n result_scc = [i for i in roll5 if not i in scc or scc.remove(i)\n ]\n total_scc = sum(result_scc)\n inp_scc = input(\n 'Do you wish to keep one, both, or neither of the remaining dice? '\n )\n if inp_scc == 'both':\n print('Total score: ' + str(total_scc) + '.')\n if inp_scc == 'neither':\n roll2_scc = random.choices(range(1, 7), k=2)\n print(roll2_scc)\n inp_scc_none = input(\n 'Do you wish to keep one, both, or neither of the remaining dice? '\n )\n if inp_scc_none == 'both':\n total_scc_none = sum(roll2_scc)\n print('Total score: ' + str(total_scc_none) + '.')\n if inp_scc_none == 'neither':\n roll2_scc_none = random.choices(range(1, 7), k=2)\n total_scc_none2 = sum(roll2_scc_none)\n print(roll2_scc_none)\n print('Your total score is: ' + str(total_scc_none2\n ) + '.')\n if inp_scc_none == 'one':\n inp_scc_none_one = input(\n 'Which die do you want to keep? ')\n roll1_scc_none_one = random.randint(1, 6)\n total_scc_none_one = roll1_scc_none_one + int(\n inp_scc_none_one)\n print(roll1_scc_none_one)\n print('Your total score is: ' + str(\n total_scc_none_one) + '.')\n if inp_scc == 'one':\n inp_scc_one = input('Which die do you want to keep? ')\n roll1_scc_one = random.randint(1, 6)\n print(roll1_scc_one)\n total_scc_one = roll1_scc_one + int(inp_scc_one)\n inp_scc_one2 = input(\n \"Hit enter to roll again or type 'pass' to keep your score \"\n )\n if inp_scc_one2 == 'pass':\n print('Your total score is: ' + str(total_scc_one) +\n '.')\n if inp_scc_one2 == '':\n roll1_scc_one2 = random.randint(1, 6)\n print(roll1_scc_one2)\n total_scc_one2 = roll1_scc_one2 + int(inp_scc_one)\n print('Your total score is: ' + str(total_scc_one2) +\n '.')\n if set(sc).issubset(roll5):\n inp_sc = input(\n 'Now you need a 4(the Crew). Hit enter to roll the remaining dice'\n )\n if inp_sc == '':\n roll3 = random.choices(range(1, 7), k=3)\n print(roll3)\n if set(crew).issubset(roll3):\n result_crew = [i for i in roll3 if not i in crew or\n crew.remove(i)]\n total_crew = sum(result_crew)\n inp_crew = input(\n 'Do you wish to keep one, both, or neither of the remaining dice? '\n )\n if inp_crew == 'both':\n print('Total score: ' + str(total_crew) + '.')\n if inp_crew == 'neither':\n roll2_crew = random.choices(range(1, 7), k=2)\n print(roll2_crew)\n total_crew_none = sum(roll2_crew)\n print('Your total score is: ' + str(\n total_crew_none) + '.')\n if inp_crew == 'one':\n inp_crew_one = input(\n 'Which die do you want to keep? ')\n roll1_crew_one = random.randint(1, 6)\n print(roll1_crew_one)\n total_crew_one = roll1_crew_one + int(inp_crew_one)\n print('Your total score is: ' + str(\n total_crew_one) + '.')\n else:\n inp_sc3 = input('Still no 4. Hit enter to roll again')\n if inp_sc3 == '':\n roll3_sc3 = random.choices(range(1, 7), k=3)\n print(roll3_sc3)\n if set(crew).issubset(roll3_sc3):\n result_crew_sc3 = [i for i in roll3_sc3 if \n not i in crew or crew.remove(i)]\n total_crew_sc3 = sum(result_crew_sc3)\n print('Your total score is: ' + str(\n total_crew_sc3) + '.')\n else:\n print(\n \"Sorry, you get no points because the Ship, Captain, and Crew wasn't completed.\"\n )\n if set(ship).issubset(roll5) and 5 not in roll5 and n < 2:\n inp_ship = input(\n 'Now you need a 5(the Captain) and a 4(the Crew). Hit enter to roll the remaining dice '\n )\n if inp_ship == '':\n roll4_ship = random.choices(range(1, 7), k=4)\n print(roll4_ship)\n if set(cc).issubset(roll4_ship):\n result_ship_cc = [i for i in roll4_ship if not i in\n cc or cc.remove(i)]\n total_ship_cc = sum(result_ship_cc)\n inp_ship_cc = input(\n 'Do you wish to keep one, both, or neither of the remaining dice? '\n )\n if inp_ship_cc == 'both':\n print('Your total is: ' + str(total_ship_cc) + '.')\n if inp_ship_cc == 'neither':\n roll2_cc = random.choices(range(1, 7), k=2)\n print(roll2_cc)\n total_ship_cc_none = sum(roll2_cc)\n print('Your total score is: ' + str(\n total_ship_cc_none) + '.')\n if inp_ship_cc == 'one':\n inp_ship_cc_one = input(\n 'Which die do you want to keep? ')\n roll1_ship_cc_one = random.randint(1, 6)\n print(roll1_ship_cc_one)\n total_ship_cc_one = roll1_ship_cc_one + int(\n inp_ship_cc_one)\n print('Your total score is: ' + str(\n total_ship_cc_one) + '.')\n if set(captain).issubset(roll4_ship):\n roll3_captain = random.choices(range(1, 7), k=3)\n print(roll3_captain)\n if set(crew).issubset(roll3_captain):\n result_ship_captain = [i for i in roll3_captain if\n not i in crew or crew.remove(i)]\n total_ship_captain = sum(result_ship_captain)\n print('Your total score is: ' + str(\n total_ship_captain) + '.')\n else:\n print(\n \"Sorry, you get no points because the Ship, Captain, and Crew wasn't completed.\"\n )\n else:\n n = n + 1\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef multi():\n scc = [6, 5, 4]\n sc = [6, 5]\n cc = [5, 4]\n crew = [4]\n captain = [5]\n ship = [6]\n n = 0\n while n <= 2:\n inp = input('Hit enter to roll')\n if inp == '':\n roll5 = random.choices(range(1, 7), k=5)\n print(roll5)\n if set(scc).issubset(roll5):\n result_scc = [i for i in roll5 if not i in scc or scc.remove(i)\n ]\n total_scc = sum(result_scc)\n inp_scc = input(\n 'Do you wish to keep one, both, or neither of the remaining dice? '\n )\n if inp_scc == 'both':\n print('Total score: ' + str(total_scc) + '.')\n if inp_scc == 'neither':\n roll2_scc = random.choices(range(1, 7), k=2)\n print(roll2_scc)\n inp_scc_none = input(\n 'Do you wish to keep one, both, or neither of the remaining dice? '\n )\n if inp_scc_none == 'both':\n total_scc_none = sum(roll2_scc)\n print('Total score: ' + str(total_scc_none) + '.')\n if inp_scc_none == 'neither':\n roll2_scc_none = random.choices(range(1, 7), k=2)\n total_scc_none2 = sum(roll2_scc_none)\n print(roll2_scc_none)\n print('Your total score is: ' + str(total_scc_none2\n ) + '.')\n if inp_scc_none == 'one':\n inp_scc_none_one = input(\n 'Which die do you want to keep? ')\n roll1_scc_none_one = random.randint(1, 6)\n total_scc_none_one = roll1_scc_none_one + int(\n inp_scc_none_one)\n print(roll1_scc_none_one)\n print('Your total score is: ' + str(\n total_scc_none_one) + '.')\n if inp_scc == 'one':\n inp_scc_one = input('Which die do you want to keep? ')\n roll1_scc_one = random.randint(1, 6)\n print(roll1_scc_one)\n total_scc_one = roll1_scc_one + int(inp_scc_one)\n inp_scc_one2 = input(\n \"Hit enter to roll again or type 'pass' to keep your score \"\n )\n if inp_scc_one2 == 'pass':\n print('Your total score is: ' + str(total_scc_one) +\n '.')\n if inp_scc_one2 == '':\n roll1_scc_one2 = random.randint(1, 6)\n print(roll1_scc_one2)\n total_scc_one2 = roll1_scc_one2 + int(inp_scc_one)\n print('Your total score is: ' + str(total_scc_one2) +\n '.')\n if set(sc).issubset(roll5):\n inp_sc = input(\n 'Now you need a 4(the Crew). Hit enter to roll the remaining dice'\n )\n if inp_sc == '':\n roll3 = random.choices(range(1, 7), k=3)\n print(roll3)\n if set(crew).issubset(roll3):\n result_crew = [i for i in roll3 if not i in crew or\n crew.remove(i)]\n total_crew = sum(result_crew)\n inp_crew = input(\n 'Do you wish to keep one, both, or neither of the remaining dice? '\n )\n if inp_crew == 'both':\n print('Total score: ' + str(total_crew) + '.')\n if inp_crew == 'neither':\n roll2_crew = random.choices(range(1, 7), k=2)\n print(roll2_crew)\n total_crew_none = sum(roll2_crew)\n print('Your total score is: ' + str(\n total_crew_none) + '.')\n if inp_crew == 'one':\n inp_crew_one = input(\n 'Which die do you want to keep? ')\n roll1_crew_one = random.randint(1, 6)\n print(roll1_crew_one)\n total_crew_one = roll1_crew_one + int(inp_crew_one)\n print('Your total score is: ' + str(\n total_crew_one) + '.')\n else:\n inp_sc3 = input('Still no 4. Hit enter to roll again')\n if inp_sc3 == '':\n roll3_sc3 = random.choices(range(1, 7), k=3)\n print(roll3_sc3)\n if set(crew).issubset(roll3_sc3):\n result_crew_sc3 = [i for i in roll3_sc3 if \n not i in crew or crew.remove(i)]\n total_crew_sc3 = sum(result_crew_sc3)\n print('Your total score is: ' + str(\n total_crew_sc3) + '.')\n else:\n print(\n \"Sorry, you get no points because the Ship, Captain, and Crew wasn't completed.\"\n )\n if set(ship).issubset(roll5) and 5 not in roll5 and n < 2:\n inp_ship = input(\n 'Now you need a 5(the Captain) and a 4(the Crew). Hit enter to roll the remaining dice '\n )\n if inp_ship == '':\n roll4_ship = random.choices(range(1, 7), k=4)\n print(roll4_ship)\n if set(cc).issubset(roll4_ship):\n result_ship_cc = [i for i in roll4_ship if not i in\n cc or cc.remove(i)]\n total_ship_cc = sum(result_ship_cc)\n inp_ship_cc = input(\n 'Do you wish to keep one, both, or neither of the remaining dice? '\n )\n if inp_ship_cc == 'both':\n print('Your total is: ' + str(total_ship_cc) + '.')\n if inp_ship_cc == 'neither':\n roll2_cc = random.choices(range(1, 7), k=2)\n print(roll2_cc)\n total_ship_cc_none = sum(roll2_cc)\n print('Your total score is: ' + str(\n total_ship_cc_none) + '.')\n if inp_ship_cc == 'one':\n inp_ship_cc_one = input(\n 'Which die do you want to keep? ')\n roll1_ship_cc_one = random.randint(1, 6)\n print(roll1_ship_cc_one)\n total_ship_cc_one = roll1_ship_cc_one + int(\n inp_ship_cc_one)\n print('Your total score is: ' + str(\n total_ship_cc_one) + '.')\n if set(captain).issubset(roll4_ship):\n roll3_captain = random.choices(range(1, 7), k=3)\n print(roll3_captain)\n if set(crew).issubset(roll3_captain):\n result_ship_captain = [i for i in roll3_captain if\n not i in crew or crew.remove(i)]\n total_ship_captain = sum(result_ship_captain)\n print('Your total score is: ' + str(\n total_ship_captain) + '.')\n else:\n print(\n \"Sorry, you get no points because the Ship, Captain, and Crew wasn't completed.\"\n )\n else:\n n = n + 1\n\n\n<mask token>\nfor i in players:\n multi()\n", "step-3": "<mask token>\n\n\ndef multi():\n scc = [6, 5, 4]\n sc = [6, 5]\n cc = [5, 4]\n crew = [4]\n captain = [5]\n ship = [6]\n n = 0\n while n <= 2:\n inp = input('Hit enter to roll')\n if inp == '':\n roll5 = random.choices(range(1, 7), k=5)\n print(roll5)\n if set(scc).issubset(roll5):\n result_scc = [i for i in roll5 if not i in scc or scc.remove(i)\n ]\n total_scc = sum(result_scc)\n inp_scc = input(\n 'Do you wish to keep one, both, or neither of the remaining dice? '\n )\n if inp_scc == 'both':\n print('Total score: ' + str(total_scc) + '.')\n if inp_scc == 'neither':\n roll2_scc = random.choices(range(1, 7), k=2)\n print(roll2_scc)\n inp_scc_none = input(\n 'Do you wish to keep one, both, or neither of the remaining dice? '\n )\n if inp_scc_none == 'both':\n total_scc_none = sum(roll2_scc)\n print('Total score: ' + str(total_scc_none) + '.')\n if inp_scc_none == 'neither':\n roll2_scc_none = random.choices(range(1, 7), k=2)\n total_scc_none2 = sum(roll2_scc_none)\n print(roll2_scc_none)\n print('Your total score is: ' + str(total_scc_none2\n ) + '.')\n if inp_scc_none == 'one':\n inp_scc_none_one = input(\n 'Which die do you want to keep? ')\n roll1_scc_none_one = random.randint(1, 6)\n total_scc_none_one = roll1_scc_none_one + int(\n inp_scc_none_one)\n print(roll1_scc_none_one)\n print('Your total score is: ' + str(\n total_scc_none_one) + '.')\n if inp_scc == 'one':\n inp_scc_one = input('Which die do you want to keep? ')\n roll1_scc_one = random.randint(1, 6)\n print(roll1_scc_one)\n total_scc_one = roll1_scc_one + int(inp_scc_one)\n inp_scc_one2 = input(\n \"Hit enter to roll again or type 'pass' to keep your score \"\n )\n if inp_scc_one2 == 'pass':\n print('Your total score is: ' + str(total_scc_one) +\n '.')\n if inp_scc_one2 == '':\n roll1_scc_one2 = random.randint(1, 6)\n print(roll1_scc_one2)\n total_scc_one2 = roll1_scc_one2 + int(inp_scc_one)\n print('Your total score is: ' + str(total_scc_one2) +\n '.')\n if set(sc).issubset(roll5):\n inp_sc = input(\n 'Now you need a 4(the Crew). Hit enter to roll the remaining dice'\n )\n if inp_sc == '':\n roll3 = random.choices(range(1, 7), k=3)\n print(roll3)\n if set(crew).issubset(roll3):\n result_crew = [i for i in roll3 if not i in crew or\n crew.remove(i)]\n total_crew = sum(result_crew)\n inp_crew = input(\n 'Do you wish to keep one, both, or neither of the remaining dice? '\n )\n if inp_crew == 'both':\n print('Total score: ' + str(total_crew) + '.')\n if inp_crew == 'neither':\n roll2_crew = random.choices(range(1, 7), k=2)\n print(roll2_crew)\n total_crew_none = sum(roll2_crew)\n print('Your total score is: ' + str(\n total_crew_none) + '.')\n if inp_crew == 'one':\n inp_crew_one = input(\n 'Which die do you want to keep? ')\n roll1_crew_one = random.randint(1, 6)\n print(roll1_crew_one)\n total_crew_one = roll1_crew_one + int(inp_crew_one)\n print('Your total score is: ' + str(\n total_crew_one) + '.')\n else:\n inp_sc3 = input('Still no 4. Hit enter to roll again')\n if inp_sc3 == '':\n roll3_sc3 = random.choices(range(1, 7), k=3)\n print(roll3_sc3)\n if set(crew).issubset(roll3_sc3):\n result_crew_sc3 = [i for i in roll3_sc3 if \n not i in crew or crew.remove(i)]\n total_crew_sc3 = sum(result_crew_sc3)\n print('Your total score is: ' + str(\n total_crew_sc3) + '.')\n else:\n print(\n \"Sorry, you get no points because the Ship, Captain, and Crew wasn't completed.\"\n )\n if set(ship).issubset(roll5) and 5 not in roll5 and n < 2:\n inp_ship = input(\n 'Now you need a 5(the Captain) and a 4(the Crew). Hit enter to roll the remaining dice '\n )\n if inp_ship == '':\n roll4_ship = random.choices(range(1, 7), k=4)\n print(roll4_ship)\n if set(cc).issubset(roll4_ship):\n result_ship_cc = [i for i in roll4_ship if not i in\n cc or cc.remove(i)]\n total_ship_cc = sum(result_ship_cc)\n inp_ship_cc = input(\n 'Do you wish to keep one, both, or neither of the remaining dice? '\n )\n if inp_ship_cc == 'both':\n print('Your total is: ' + str(total_ship_cc) + '.')\n if inp_ship_cc == 'neither':\n roll2_cc = random.choices(range(1, 7), k=2)\n print(roll2_cc)\n total_ship_cc_none = sum(roll2_cc)\n print('Your total score is: ' + str(\n total_ship_cc_none) + '.')\n if inp_ship_cc == 'one':\n inp_ship_cc_one = input(\n 'Which die do you want to keep? ')\n roll1_ship_cc_one = random.randint(1, 6)\n print(roll1_ship_cc_one)\n total_ship_cc_one = roll1_ship_cc_one + int(\n inp_ship_cc_one)\n print('Your total score is: ' + str(\n total_ship_cc_one) + '.')\n if set(captain).issubset(roll4_ship):\n roll3_captain = random.choices(range(1, 7), k=3)\n print(roll3_captain)\n if set(crew).issubset(roll3_captain):\n result_ship_captain = [i for i in roll3_captain if\n not i in crew or crew.remove(i)]\n total_ship_captain = sum(result_ship_captain)\n print('Your total score is: ' + str(\n total_ship_captain) + '.')\n else:\n print(\n \"Sorry, you get no points because the Ship, Captain, and Crew wasn't completed.\"\n )\n else:\n n = n + 1\n\n\ninp = input('How many players are there? ')\nplayers = range(int(inp))\nroll_dict = dict()\nfor i in players:\n multi()\n", "step-4": "import random\n\n\ndef multi():\n scc = [6, 5, 4]\n sc = [6, 5]\n cc = [5, 4]\n crew = [4]\n captain = [5]\n ship = [6]\n n = 0\n while n <= 2:\n inp = input('Hit enter to roll')\n if inp == '':\n roll5 = random.choices(range(1, 7), k=5)\n print(roll5)\n if set(scc).issubset(roll5):\n result_scc = [i for i in roll5 if not i in scc or scc.remove(i)\n ]\n total_scc = sum(result_scc)\n inp_scc = input(\n 'Do you wish to keep one, both, or neither of the remaining dice? '\n )\n if inp_scc == 'both':\n print('Total score: ' + str(total_scc) + '.')\n if inp_scc == 'neither':\n roll2_scc = random.choices(range(1, 7), k=2)\n print(roll2_scc)\n inp_scc_none = input(\n 'Do you wish to keep one, both, or neither of the remaining dice? '\n )\n if inp_scc_none == 'both':\n total_scc_none = sum(roll2_scc)\n print('Total score: ' + str(total_scc_none) + '.')\n if inp_scc_none == 'neither':\n roll2_scc_none = random.choices(range(1, 7), k=2)\n total_scc_none2 = sum(roll2_scc_none)\n print(roll2_scc_none)\n print('Your total score is: ' + str(total_scc_none2\n ) + '.')\n if inp_scc_none == 'one':\n inp_scc_none_one = input(\n 'Which die do you want to keep? ')\n roll1_scc_none_one = random.randint(1, 6)\n total_scc_none_one = roll1_scc_none_one + int(\n inp_scc_none_one)\n print(roll1_scc_none_one)\n print('Your total score is: ' + str(\n total_scc_none_one) + '.')\n if inp_scc == 'one':\n inp_scc_one = input('Which die do you want to keep? ')\n roll1_scc_one = random.randint(1, 6)\n print(roll1_scc_one)\n total_scc_one = roll1_scc_one + int(inp_scc_one)\n inp_scc_one2 = input(\n \"Hit enter to roll again or type 'pass' to keep your score \"\n )\n if inp_scc_one2 == 'pass':\n print('Your total score is: ' + str(total_scc_one) +\n '.')\n if inp_scc_one2 == '':\n roll1_scc_one2 = random.randint(1, 6)\n print(roll1_scc_one2)\n total_scc_one2 = roll1_scc_one2 + int(inp_scc_one)\n print('Your total score is: ' + str(total_scc_one2) +\n '.')\n if set(sc).issubset(roll5):\n inp_sc = input(\n 'Now you need a 4(the Crew). Hit enter to roll the remaining dice'\n )\n if inp_sc == '':\n roll3 = random.choices(range(1, 7), k=3)\n print(roll3)\n if set(crew).issubset(roll3):\n result_crew = [i for i in roll3 if not i in crew or\n crew.remove(i)]\n total_crew = sum(result_crew)\n inp_crew = input(\n 'Do you wish to keep one, both, or neither of the remaining dice? '\n )\n if inp_crew == 'both':\n print('Total score: ' + str(total_crew) + '.')\n if inp_crew == 'neither':\n roll2_crew = random.choices(range(1, 7), k=2)\n print(roll2_crew)\n total_crew_none = sum(roll2_crew)\n print('Your total score is: ' + str(\n total_crew_none) + '.')\n if inp_crew == 'one':\n inp_crew_one = input(\n 'Which die do you want to keep? ')\n roll1_crew_one = random.randint(1, 6)\n print(roll1_crew_one)\n total_crew_one = roll1_crew_one + int(inp_crew_one)\n print('Your total score is: ' + str(\n total_crew_one) + '.')\n else:\n inp_sc3 = input('Still no 4. Hit enter to roll again')\n if inp_sc3 == '':\n roll3_sc3 = random.choices(range(1, 7), k=3)\n print(roll3_sc3)\n if set(crew).issubset(roll3_sc3):\n result_crew_sc3 = [i for i in roll3_sc3 if \n not i in crew or crew.remove(i)]\n total_crew_sc3 = sum(result_crew_sc3)\n print('Your total score is: ' + str(\n total_crew_sc3) + '.')\n else:\n print(\n \"Sorry, you get no points because the Ship, Captain, and Crew wasn't completed.\"\n )\n if set(ship).issubset(roll5) and 5 not in roll5 and n < 2:\n inp_ship = input(\n 'Now you need a 5(the Captain) and a 4(the Crew). Hit enter to roll the remaining dice '\n )\n if inp_ship == '':\n roll4_ship = random.choices(range(1, 7), k=4)\n print(roll4_ship)\n if set(cc).issubset(roll4_ship):\n result_ship_cc = [i for i in roll4_ship if not i in\n cc or cc.remove(i)]\n total_ship_cc = sum(result_ship_cc)\n inp_ship_cc = input(\n 'Do you wish to keep one, both, or neither of the remaining dice? '\n )\n if inp_ship_cc == 'both':\n print('Your total is: ' + str(total_ship_cc) + '.')\n if inp_ship_cc == 'neither':\n roll2_cc = random.choices(range(1, 7), k=2)\n print(roll2_cc)\n total_ship_cc_none = sum(roll2_cc)\n print('Your total score is: ' + str(\n total_ship_cc_none) + '.')\n if inp_ship_cc == 'one':\n inp_ship_cc_one = input(\n 'Which die do you want to keep? ')\n roll1_ship_cc_one = random.randint(1, 6)\n print(roll1_ship_cc_one)\n total_ship_cc_one = roll1_ship_cc_one + int(\n inp_ship_cc_one)\n print('Your total score is: ' + str(\n total_ship_cc_one) + '.')\n if set(captain).issubset(roll4_ship):\n roll3_captain = random.choices(range(1, 7), k=3)\n print(roll3_captain)\n if set(crew).issubset(roll3_captain):\n result_ship_captain = [i for i in roll3_captain if\n not i in crew or crew.remove(i)]\n total_ship_captain = sum(result_ship_captain)\n print('Your total score is: ' + str(\n total_ship_captain) + '.')\n else:\n print(\n \"Sorry, you get no points because the Ship, Captain, and Crew wasn't completed.\"\n )\n else:\n n = n + 1\n\n\ninp = input('How many players are there? ')\nplayers = range(int(inp))\nroll_dict = dict()\nfor i in players:\n multi()\n", "step-5": "import random\ndef multi():\n\n scc = [6, 5, 4]\n sc = [6, 5]\n cc = [5, 4]\n crew = [4]\n captain = [5]\n ship = [6]\n n = 0\n while n <= 2:\n inp = input(\"Hit enter to roll\")\n if inp == \"\":\n roll5 = random.choices(range(1, 7), k=5)\n print(roll5)\n if set(scc).issubset(roll5):\n result_scc = [i for i in roll5 if not i in scc or scc.remove(i)]\n total_scc = sum(result_scc)\n inp_scc = input(\"Do you wish to keep one, both, or neither of the remaining dice? \")\n if inp_scc == \"both\":\n print(\"Total score: \" + str(total_scc) + \".\")\n if inp_scc == \"neither\":\n roll2_scc = random.choices(range(1, 7), k=2)\n print(roll2_scc)\n inp_scc_none = input(\"Do you wish to keep one, both, or neither of the remaining dice? \")\n if inp_scc_none == \"both\":\n total_scc_none = sum(roll2_scc)\n print(\"Total score: \" + str(total_scc_none) + \".\")\n if inp_scc_none == \"neither\":\n roll2_scc_none = random.choices(range(1, 7), k=2)\n total_scc_none2 = sum(roll2_scc_none)\n print(roll2_scc_none)\n print(\"Your total score is: \" + str(total_scc_none2) + \".\")\n if inp_scc_none == \"one\":\n inp_scc_none_one = input(\"Which die do you want to keep? \")\n roll1_scc_none_one = random.randint(1, 6)\n total_scc_none_one = roll1_scc_none_one + int(inp_scc_none_one)\n print(roll1_scc_none_one)\n print(\"Your total score is: \" + str(total_scc_none_one) + \".\")\n if inp_scc == \"one\":\n inp_scc_one = input(\"Which die do you want to keep? \")\n roll1_scc_one = random.randint(1, 6)\n print(roll1_scc_one)\n total_scc_one = roll1_scc_one + int(inp_scc_one)\n inp_scc_one2 = input(\"Hit enter to roll again or type 'pass' to keep your score \")\n if inp_scc_one2 == \"pass\":\n print(\"Your total score is: \" + str(total_scc_one) + \".\")\n if inp_scc_one2 == \"\":\n roll1_scc_one2 = random.randint(1, 6)\n print(roll1_scc_one2)\n total_scc_one2 = roll1_scc_one2 + int(inp_scc_one)\n print(\"Your total score is: \" + str(total_scc_one2) + \".\")\n if set(sc).issubset(roll5):\n inp_sc = input(\"Now you need a 4(the Crew). Hit enter to roll the remaining dice\")\n if inp_sc == \"\":\n roll3 = random.choices(range(1, 7), k=3)\n print(roll3)\n if set(crew).issubset(roll3):\n result_crew = [i for i in roll3 if not i in crew or crew.remove(i)]\n total_crew = sum(result_crew)\n inp_crew = input(\"Do you wish to keep one, both, or neither of the remaining dice? \")\n if inp_crew == \"both\":\n print(\"Total score: \" + str(total_crew) + \".\")\n if inp_crew == \"neither\":\n roll2_crew = random.choices(range(1, 7), k=2)\n print(roll2_crew)\n total_crew_none = sum(roll2_crew)\n print(\"Your total score is: \" + str(total_crew_none) + \".\")\n if inp_crew == \"one\":\n inp_crew_one = input(\"Which die do you want to keep? \")\n roll1_crew_one = random.randint(1, 6)\n print(roll1_crew_one)\n total_crew_one = roll1_crew_one + int(inp_crew_one)\n print(\"Your total score is: \" + str(total_crew_one) + \".\")\n else:\n inp_sc3 = input(\"Still no 4. Hit enter to roll again\")\n if inp_sc3 == \"\":\n roll3_sc3 = random.choices(range(1, 7), k=3)\n print(roll3_sc3)\n if set(crew).issubset(roll3_sc3):\n result_crew_sc3 = [i for i in roll3_sc3 if not i in crew or crew.remove(i)]\n total_crew_sc3 = sum(result_crew_sc3)\n print(\"Your total score is: \" + str(total_crew_sc3) + \".\")\n else:\n print(\"Sorry, you get no points because the Ship, Captain, and Crew wasn't completed.\")\n if set(ship).issubset(roll5) and 5 not in roll5 and n < 2:\n inp_ship = input(\n \"Now you need a 5(the Captain) and a 4(the Crew). Hit enter to roll the remaining dice \")\n if inp_ship == \"\":\n roll4_ship = random.choices(range(1, 7), k=4)\n print(roll4_ship)\n if set(cc).issubset(roll4_ship):\n result_ship_cc = [i for i in roll4_ship if not i in cc or cc.remove(i)]\n total_ship_cc = sum(result_ship_cc)\n inp_ship_cc = input(\"Do you wish to keep one, both, or neither of the remaining dice? \")\n if inp_ship_cc == \"both\":\n print(\"Your total is: \" + str(total_ship_cc) + \".\")\n if inp_ship_cc == \"neither\":\n roll2_cc = random.choices(range(1, 7), k=2)\n print(roll2_cc)\n total_ship_cc_none = sum(roll2_cc)\n print(\"Your total score is: \" + str(total_ship_cc_none) + \".\")\n if inp_ship_cc == \"one\":\n inp_ship_cc_one = input(\"Which die do you want to keep? \")\n roll1_ship_cc_one = random.randint(1, 6)\n print(roll1_ship_cc_one)\n total_ship_cc_one = roll1_ship_cc_one + int(inp_ship_cc_one)\n print(\"Your total score is: \" + str(total_ship_cc_one) + \".\")\n if set(captain).issubset(roll4_ship):\n roll3_captain = random.choices(range(1, 7), k=3)\n print(roll3_captain)\n if set(crew).issubset(roll3_captain):\n result_ship_captain = [i for i in roll3_captain if not i in crew or crew.remove(i)]\n total_ship_captain = sum(result_ship_captain)\n print(\"Your total score is: \" + str(total_ship_captain) + \".\")\n else:\n print(\"Sorry, you get no points because the Ship, Captain, and Crew wasn't completed.\")\n else:\n n = n + 1\n\n\ninp = input(\"How many players are there? \")\nplayers = range(int(inp))\nroll_dict = dict()\nfor i in players:\n multi()", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#---------------------------- # | # Instagram Bot- Devesh Kr. Verma # instagram- @felon_tpf # | #---------------------------- from selenium import webdriver from time import sleep from selenium.webdriver.common.keys import Keys import random import string from time import sleep from selenium import webdriver #Change this list to your wanted comments (what you wnat to comment on posts) comments = ['Please Visite on my page take a look if you like please follow ', 'Nice post- just follow me @eyetunities ', 'loool very nice!-want to earn money just follow me @eyetunities ', 'I like it!-follow me for daily motivational post on your wall', 'Super ;)-follow me guys @eyetunities ', 'hmmm,interesting-follow me for daily money earning tips ', ' wow- follow me for online money earning tips ', 'amazing post dude-also check out my profile , for Online money earning tips ', 'learn something new - follow me @eyetunities ', 'Mind blowing - follow for money earning tips Online money ', 'I like it , great post- follow my page please -daily money earning tips ', ] #This variables to keep tracking of the posts posts=0 #Chromedriver path. Make sure to have the same Chromedriver version as your Google Chrome browser browser = webdriver.Chrome(executable_path= r"D:\pythonlearn\python_projects\chromedriver.exe") # <----- ENTER PATH HERE browser.get(('https://www.instagram.com/accounts/login/?source=auth_switcher')) sleep(2) def likeAndComm(): # Likes and Comments the first 9 posts global posts for y in range (1,4): for x in range(1,4): post = browser.find_element_by_xpath('/html/body/div[1]/section/main/div/div[1]/div/div['+str(y)+']/div['+str(x)+']') browser.implicitly_wait(1) post.click() sleep(2) postLike = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]').click() #postLike.click() print("Post liked") sleep(2) #comment = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form').click() print("click1") sleep(3) comment = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form').click() print("click2") comment = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form/textarea').send_keys(random.choice(comments)) print("send1-Writing comment") sleep(3) sendComment = browser.find_element_by_xpath("//button[@type='submit']") sendComment.click() print("click3-Comment-posted") print("searching for new post, searching...") sleep(4) posts+=1 closePost=browser.find_element_by_xpath('/html/body/div[4]/div[3]/button/div') closePost.click() sleep(3) print ('No. of posts: ' +str(posts)) sleep(5) browser.get('https://www.instagram.com/explore/') sleep(6) likeAndComm() def start(): username = browser.find_element_by_name('username') username.send_keys('Username') # <- INSERT YOUR INSTAGRAM USERNAME HERE password = browser.find_element_by_name('password') password.send_keys('Password') # <- INSERT YOUR INSTAGRAM PASSWORD HERE nextButton = browser.find_element_by_xpath("//button[@type='submit']") nextButton.click() sleep(4) notification = browser.find_element_by_xpath("//button[contains(text(), 'Not Now')]") notification.click() browser.get('https://www.instagram.com/explore/') sleep(6) likeAndComm() # likeAndComm function sleep(5) #Start the programm start()
normal
{ "blob_id": "6d18aa585c656b244d1e4272caa8419c04b20b6c", "index": 2363, "step-1": "<mask token>\n\n\ndef start():\n username = browser.find_element_by_name('username')\n username.send_keys('Username')\n password = browser.find_element_by_name('password')\n password.send_keys('Password')\n nextButton = browser.find_element_by_xpath(\"//button[@type='submit']\")\n nextButton.click()\n sleep(4)\n notification = browser.find_element_by_xpath(\n \"//button[contains(text(), 'Not Now')]\")\n notification.click()\n browser.get('https://www.instagram.com/explore/')\n sleep(6)\n likeAndComm()\n sleep(5)\n\n\n<mask token>\n", "step-2": "<mask token>\nbrowser.get('https://www.instagram.com/accounts/login/?source=auth_switcher')\nsleep(2)\n\n\ndef likeAndComm():\n global posts\n for y in range(1, 4):\n for x in range(1, 4):\n post = browser.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[1]/div/div[' + str(\n y) + ']/div[' + str(x) + ']')\n browser.implicitly_wait(1)\n post.click()\n sleep(2)\n postLike = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]'\n ).click()\n print('Post liked')\n sleep(2)\n print('click1')\n sleep(3)\n comment = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form'\n ).click()\n print('click2')\n comment = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form/textarea'\n ).send_keys(random.choice(comments))\n print('send1-Writing comment')\n sleep(3)\n sendComment = browser.find_element_by_xpath(\n \"//button[@type='submit']\")\n sendComment.click()\n print('click3-Comment-posted')\n print('searching for new post, searching...')\n sleep(4)\n posts += 1\n closePost = browser.find_element_by_xpath(\n '/html/body/div[4]/div[3]/button/div')\n closePost.click()\n sleep(3)\n print('No. of posts: ' + str(posts))\n sleep(5)\n browser.get('https://www.instagram.com/explore/')\n sleep(6)\n likeAndComm()\n\n\ndef start():\n username = browser.find_element_by_name('username')\n username.send_keys('Username')\n password = browser.find_element_by_name('password')\n password.send_keys('Password')\n nextButton = browser.find_element_by_xpath(\"//button[@type='submit']\")\n nextButton.click()\n sleep(4)\n notification = browser.find_element_by_xpath(\n \"//button[contains(text(), 'Not Now')]\")\n notification.click()\n browser.get('https://www.instagram.com/explore/')\n sleep(6)\n likeAndComm()\n sleep(5)\n\n\nstart()\n", "step-3": "<mask token>\ncomments = ['Please Visite on my page take a look if you like please follow ',\n 'Nice post- just follow me @eyetunities ',\n 'loool very nice!-want to earn money just follow me @eyetunities ',\n 'I like it!-follow me for daily motivational post on your wall',\n 'Super ;)-follow me guys @eyetunities ',\n 'hmmm,interesting-follow me for daily money earning tips ',\n ' wow- follow me for online money earning tips ',\n 'amazing post dude-also check out my profile , for Online money earning tips '\n , 'learn something new - follow me @eyetunities ',\n 'Mind blowing - follow for money earning tips Online money ',\n 'I like it , great post- follow my page please -daily money earning tips ']\nposts = 0\nbrowser = webdriver.Chrome(executable_path=\n 'D:\\\\pythonlearn\\\\python_projects\\\\chromedriver.exe')\nbrowser.get('https://www.instagram.com/accounts/login/?source=auth_switcher')\nsleep(2)\n\n\ndef likeAndComm():\n global posts\n for y in range(1, 4):\n for x in range(1, 4):\n post = browser.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[1]/div/div[' + str(\n y) + ']/div[' + str(x) + ']')\n browser.implicitly_wait(1)\n post.click()\n sleep(2)\n postLike = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]'\n ).click()\n print('Post liked')\n sleep(2)\n print('click1')\n sleep(3)\n comment = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form'\n ).click()\n print('click2')\n comment = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form/textarea'\n ).send_keys(random.choice(comments))\n print('send1-Writing comment')\n sleep(3)\n sendComment = browser.find_element_by_xpath(\n \"//button[@type='submit']\")\n sendComment.click()\n print('click3-Comment-posted')\n print('searching for new post, searching...')\n sleep(4)\n posts += 1\n closePost = browser.find_element_by_xpath(\n '/html/body/div[4]/div[3]/button/div')\n closePost.click()\n sleep(3)\n print('No. of posts: ' + str(posts))\n sleep(5)\n browser.get('https://www.instagram.com/explore/')\n sleep(6)\n likeAndComm()\n\n\ndef start():\n username = browser.find_element_by_name('username')\n username.send_keys('Username')\n password = browser.find_element_by_name('password')\n password.send_keys('Password')\n nextButton = browser.find_element_by_xpath(\"//button[@type='submit']\")\n nextButton.click()\n sleep(4)\n notification = browser.find_element_by_xpath(\n \"//button[contains(text(), 'Not Now')]\")\n notification.click()\n browser.get('https://www.instagram.com/explore/')\n sleep(6)\n likeAndComm()\n sleep(5)\n\n\nstart()\n", "step-4": "from selenium import webdriver\nfrom time import sleep\nfrom selenium.webdriver.common.keys import Keys\nimport random\nimport string\nfrom time import sleep\nfrom selenium import webdriver\ncomments = ['Please Visite on my page take a look if you like please follow ',\n 'Nice post- just follow me @eyetunities ',\n 'loool very nice!-want to earn money just follow me @eyetunities ',\n 'I like it!-follow me for daily motivational post on your wall',\n 'Super ;)-follow me guys @eyetunities ',\n 'hmmm,interesting-follow me for daily money earning tips ',\n ' wow- follow me for online money earning tips ',\n 'amazing post dude-also check out my profile , for Online money earning tips '\n , 'learn something new - follow me @eyetunities ',\n 'Mind blowing - follow for money earning tips Online money ',\n 'I like it , great post- follow my page please -daily money earning tips ']\nposts = 0\nbrowser = webdriver.Chrome(executable_path=\n 'D:\\\\pythonlearn\\\\python_projects\\\\chromedriver.exe')\nbrowser.get('https://www.instagram.com/accounts/login/?source=auth_switcher')\nsleep(2)\n\n\ndef likeAndComm():\n global posts\n for y in range(1, 4):\n for x in range(1, 4):\n post = browser.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[1]/div/div[' + str(\n y) + ']/div[' + str(x) + ']')\n browser.implicitly_wait(1)\n post.click()\n sleep(2)\n postLike = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]'\n ).click()\n print('Post liked')\n sleep(2)\n print('click1')\n sleep(3)\n comment = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form'\n ).click()\n print('click2')\n comment = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form/textarea'\n ).send_keys(random.choice(comments))\n print('send1-Writing comment')\n sleep(3)\n sendComment = browser.find_element_by_xpath(\n \"//button[@type='submit']\")\n sendComment.click()\n print('click3-Comment-posted')\n print('searching for new post, searching...')\n sleep(4)\n posts += 1\n closePost = browser.find_element_by_xpath(\n '/html/body/div[4]/div[3]/button/div')\n closePost.click()\n sleep(3)\n print('No. of posts: ' + str(posts))\n sleep(5)\n browser.get('https://www.instagram.com/explore/')\n sleep(6)\n likeAndComm()\n\n\ndef start():\n username = browser.find_element_by_name('username')\n username.send_keys('Username')\n password = browser.find_element_by_name('password')\n password.send_keys('Password')\n nextButton = browser.find_element_by_xpath(\"//button[@type='submit']\")\n nextButton.click()\n sleep(4)\n notification = browser.find_element_by_xpath(\n \"//button[contains(text(), 'Not Now')]\")\n notification.click()\n browser.get('https://www.instagram.com/explore/')\n sleep(6)\n likeAndComm()\n sleep(5)\n\n\nstart()\n", "step-5": "#----------------------------\n#\t\t\t\t\t\t |\n# Instagram Bot- Devesh Kr. Verma \n# instagram- @felon_tpf\t\n#\t\t\t\t\t\t\t|\n#----------------------------\n\nfrom selenium import webdriver\nfrom time import sleep\nfrom selenium.webdriver.common.keys import Keys\nimport random\nimport string\nfrom time import sleep\nfrom selenium import webdriver\n#Change this list to your wanted comments (what you wnat to comment on posts)\ncomments = ['Please Visite on my page take a look if you like please follow ', 'Nice post- just follow me @eyetunities ', 'loool very nice!-want to earn money just follow me @eyetunities ', 'I like it!-follow me for daily motivational post on your wall', 'Super ;)-follow me guys @eyetunities ', 'hmmm,interesting-follow me for daily money earning tips ', ' wow- follow me for online money earning tips ', 'amazing post dude-also check out my profile , for Online money earning tips ', 'learn something new - follow me @eyetunities ', 'Mind blowing - follow for money earning tips Online money ', 'I like it , great post- follow my page please -daily money earning tips ', ]\n\n#This variables to keep tracking of the posts \nposts=0\n\n#Chromedriver path. Make sure to have the same Chromedriver version as your Google Chrome browser\nbrowser = webdriver.Chrome(executable_path= r\"D:\\pythonlearn\\python_projects\\chromedriver.exe\") # <----- ENTER PATH HERE \n\nbrowser.get(('https://www.instagram.com/accounts/login/?source=auth_switcher'))\nsleep(2) \n\t\n\ndef likeAndComm(): # Likes and Comments the first 9 posts\n\tglobal posts\n\tfor y in range (1,4):\n\t\tfor x in range(1,4):\n\t\t\tpost = browser.find_element_by_xpath('/html/body/div[1]/section/main/div/div[1]/div/div['+str(y)+']/div['+str(x)+']') \n\t\t\tbrowser.implicitly_wait(1) \n\t\t\tpost.click()\n\t\t\tsleep(2)\n\t\t\tpostLike = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]').click()\n\t\t\t#postLike.click()\n\t\t\tprint(\"Post liked\") \n\t\t\tsleep(2)\n\t\t\t#comment = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form').click() \n\t\t\tprint(\"click1\")\n\t\t\tsleep(3)\n\t\t\tcomment = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form').click() \n\t\t\tprint(\"click2\")\n\t\t\tcomment = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form/textarea').send_keys(random.choice(comments))\t\n\t\t\tprint(\"send1-Writing comment\")\n\t\t\tsleep(3)\n\t\t\tsendComment = browser.find_element_by_xpath(\"//button[@type='submit']\") \n\t\t\tsendComment.click()\n\t\t\tprint(\"click3-Comment-posted\")\n\t\t\tprint(\"searching for new post, searching...\")\n\t\t\tsleep(4)\n\t\t\tposts+=1\n\t\t\tclosePost=browser.find_element_by_xpath('/html/body/div[4]/div[3]/button/div')\n\t\t\tclosePost.click()\n\t\t\tsleep(3)\n\t\tprint ('No. of posts: ' +str(posts))\n\t\n\tsleep(5)\n\tbrowser.get('https://www.instagram.com/explore/')\n\tsleep(6)\n\tlikeAndComm()\n\t\n\t\t\ndef start():\n\t\n\tusername = browser.find_element_by_name('username')\n\tusername.send_keys('Username') # <- INSERT YOUR INSTAGRAM USERNAME HERE\n\tpassword = browser.find_element_by_name('password')\n\tpassword.send_keys('Password') # <- INSERT YOUR INSTAGRAM PASSWORD HERE\n\tnextButton = browser.find_element_by_xpath(\"//button[@type='submit']\")\n\tnextButton.click()\n\tsleep(4)\n\tnotification = browser.find_element_by_xpath(\"//button[contains(text(), 'Not Now')]\")\n\tnotification.click()\n\tbrowser.get('https://www.instagram.com/explore/')\n\tsleep(6)\n\tlikeAndComm() # likeAndComm function \n\tsleep(5)\n\t\n\t\n#Start the programm\nstart()\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
from dataclasses import dataclass from datetime import date @dataclass class Book: id: int title: str author: str genre: str published: date status: str = 'Available' def __str__(self): return f'{self.id}: {self.title} by {self.author}' def get_more_information(self): return f"Gatunek: {self.genre}\nData publikacji: {self.published}\nStatus: {self.status}"
normal
{ "blob_id": "dc13ca17bff8e2a5254c7758bd7274926bafd454", "index": 5312, "step-1": "<mask token>\n\n\n@dataclass\nclass Book:\n id: int\n title: str\n author: str\n genre: str\n published: date\n status: str = 'Available'\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\n@dataclass\nclass Book:\n id: int\n title: str\n author: str\n genre: str\n published: date\n status: str = 'Available'\n <mask token>\n\n def get_more_information(self):\n return (\n f'Gatunek: {self.genre}\\nData publikacji: {self.published}\\nStatus: {self.status}'\n )\n", "step-3": "<mask token>\n\n\n@dataclass\nclass Book:\n id: int\n title: str\n author: str\n genre: str\n published: date\n status: str = 'Available'\n\n def __str__(self):\n return f'{self.id}: {self.title} by {self.author}'\n\n def get_more_information(self):\n return (\n f'Gatunek: {self.genre}\\nData publikacji: {self.published}\\nStatus: {self.status}'\n )\n", "step-4": "from dataclasses import dataclass\nfrom datetime import date\n\n\n@dataclass\nclass Book:\n id: int\n title: str\n author: str\n genre: str\n published: date\n status: str = 'Available'\n\n def __str__(self):\n return f'{self.id}: {self.title} by {self.author}'\n\n def get_more_information(self):\n return (\n f'Gatunek: {self.genre}\\nData publikacji: {self.published}\\nStatus: {self.status}'\n )\n", "step-5": "from dataclasses import dataclass\nfrom datetime import date\n\n\n@dataclass\nclass Book:\n id: int\n title: str\n author: str\n genre: str\n published: date\n status: str = 'Available'\n\n def __str__(self):\n return f'{self.id}: {self.title} by {self.author}'\n\n def get_more_information(self):\n return f\"Gatunek: {self.genre}\\nData publikacji: {self.published}\\nStatus: {self.status}\"\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import torch from training import PointNetTrain, PointAugmentTrain, Model #from PointAugment.Augment.config import opts from data_utils.dataloader import DataLoaderClass from mpl_toolkits import mplot3d import matplotlib.pyplot as plt import numpy as np import yaml def visualize_batch(pointclouds, pred_labels, labels, categories): batch_size = len(pointclouds) fig = plt.figure(figsize=(8, batch_size / 2)) ncols = 5 nrows = max(1, batch_size // 5) for idx, pc in enumerate(pointclouds): label = categories[int(labels[idx].item())] pred = categories[int(pred_labels[idx])] colour = 'g' if label == pred else 'r' pc = pc.cpu().numpy() ax = fig.add_subplot(nrows, ncols, idx + 1, projection='3d') ax.scatter(pc[:, 0], pc[:, 1], pc[:, 2], c=colour, s=2) ax.axis('off') ax.set_title('GT: {0}\nPred: {1}'.format(label, pred)) plt.show() if __name__ == '__main__': with open("config.yaml", "r") as yamlfile: config = yaml.load(yamlfile, Loader=yaml.FullLoader) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # PointNet training_instance_2 = PointNetTrain(config['MODEL']['POINTNET'], device) modelnet10_dataloader = DataLoaderClass(config['DATA']['MODELNET10'], config['MODEL']['POINTNET']['TRAINING']) #training_instance_2.train(modelnet10_dataloader.trainloader, modelnet10_dataloader.validloader, adv = False) training_instance_2.test(modelnet10_dataloader.validloader) # Point Augment #training_instance_1 = PointAugmentTrain(config['MODEL']['POINT_AUGMENT'], device) #modelnet10_dataloader = DataLoaderClass(config['DATA']['MODELNET10'], config['MODEL']['POINTNET']['TRAINING']) #training_instance_1.train(modelnet10_dataloader.trainloader, modelnet10_dataloader.validloader, adv = False) #training_instance_1.test(modelnet10_dataloader.validloader)
normal
{ "blob_id": "0ced42c8bfaad32fc2b397326150e6c7bc5cedab", "index": 4991, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef visualize_batch(pointclouds, pred_labels, labels, categories):\n batch_size = len(pointclouds)\n fig = plt.figure(figsize=(8, batch_size / 2))\n ncols = 5\n nrows = max(1, batch_size // 5)\n for idx, pc in enumerate(pointclouds):\n label = categories[int(labels[idx].item())]\n pred = categories[int(pred_labels[idx])]\n colour = 'g' if label == pred else 'r'\n pc = pc.cpu().numpy()\n ax = fig.add_subplot(nrows, ncols, idx + 1, projection='3d')\n ax.scatter(pc[:, 0], pc[:, 1], pc[:, 2], c=colour, s=2)\n ax.axis('off')\n ax.set_title('GT: {0}\\nPred: {1}'.format(label, pred))\n plt.show()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef visualize_batch(pointclouds, pred_labels, labels, categories):\n batch_size = len(pointclouds)\n fig = plt.figure(figsize=(8, batch_size / 2))\n ncols = 5\n nrows = max(1, batch_size // 5)\n for idx, pc in enumerate(pointclouds):\n label = categories[int(labels[idx].item())]\n pred = categories[int(pred_labels[idx])]\n colour = 'g' if label == pred else 'r'\n pc = pc.cpu().numpy()\n ax = fig.add_subplot(nrows, ncols, idx + 1, projection='3d')\n ax.scatter(pc[:, 0], pc[:, 1], pc[:, 2], c=colour, s=2)\n ax.axis('off')\n ax.set_title('GT: {0}\\nPred: {1}'.format(label, pred))\n plt.show()\n\n\nif __name__ == '__main__':\n with open('config.yaml', 'r') as yamlfile:\n config = yaml.load(yamlfile, Loader=yaml.FullLoader)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n training_instance_2 = PointNetTrain(config['MODEL']['POINTNET'], device)\n modelnet10_dataloader = DataLoaderClass(config['DATA']['MODELNET10'],\n config['MODEL']['POINTNET']['TRAINING'])\n training_instance_2.test(modelnet10_dataloader.validloader)\n", "step-4": "import torch\nfrom training import PointNetTrain, PointAugmentTrain, Model\nfrom data_utils.dataloader import DataLoaderClass\nfrom mpl_toolkits import mplot3d\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport yaml\n\n\ndef visualize_batch(pointclouds, pred_labels, labels, categories):\n batch_size = len(pointclouds)\n fig = plt.figure(figsize=(8, batch_size / 2))\n ncols = 5\n nrows = max(1, batch_size // 5)\n for idx, pc in enumerate(pointclouds):\n label = categories[int(labels[idx].item())]\n pred = categories[int(pred_labels[idx])]\n colour = 'g' if label == pred else 'r'\n pc = pc.cpu().numpy()\n ax = fig.add_subplot(nrows, ncols, idx + 1, projection='3d')\n ax.scatter(pc[:, 0], pc[:, 1], pc[:, 2], c=colour, s=2)\n ax.axis('off')\n ax.set_title('GT: {0}\\nPred: {1}'.format(label, pred))\n plt.show()\n\n\nif __name__ == '__main__':\n with open('config.yaml', 'r') as yamlfile:\n config = yaml.load(yamlfile, Loader=yaml.FullLoader)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n training_instance_2 = PointNetTrain(config['MODEL']['POINTNET'], device)\n modelnet10_dataloader = DataLoaderClass(config['DATA']['MODELNET10'],\n config['MODEL']['POINTNET']['TRAINING'])\n training_instance_2.test(modelnet10_dataloader.validloader)\n", "step-5": "import torch\nfrom training import PointNetTrain, PointAugmentTrain, Model\n#from PointAugment.Augment.config import opts\nfrom data_utils.dataloader import DataLoaderClass\nfrom mpl_toolkits import mplot3d\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport yaml\n\ndef visualize_batch(pointclouds, pred_labels, labels, categories):\n batch_size = len(pointclouds)\n fig = plt.figure(figsize=(8, batch_size / 2))\n\n ncols = 5\n nrows = max(1, batch_size // 5)\n for idx, pc in enumerate(pointclouds):\n label = categories[int(labels[idx].item())]\n pred = categories[int(pred_labels[idx])]\n colour = 'g' if label == pred else 'r'\n pc = pc.cpu().numpy()\n ax = fig.add_subplot(nrows, ncols, idx + 1, projection='3d')\n ax.scatter(pc[:, 0], pc[:, 1], pc[:, 2], c=colour, s=2)\n ax.axis('off')\n ax.set_title('GT: {0}\\nPred: {1}'.format(label, pred))\n\n plt.show()\n\n\nif __name__ == '__main__':\n with open(\"config.yaml\", \"r\") as yamlfile:\n config = yaml.load(yamlfile, Loader=yaml.FullLoader)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n # PointNet\n training_instance_2 = PointNetTrain(config['MODEL']['POINTNET'], device)\n modelnet10_dataloader = DataLoaderClass(config['DATA']['MODELNET10'], config['MODEL']['POINTNET']['TRAINING']) \n #training_instance_2.train(modelnet10_dataloader.trainloader, modelnet10_dataloader.validloader, adv = False)\n training_instance_2.test(modelnet10_dataloader.validloader)\n\n # Point Augment\n #training_instance_1 = PointAugmentTrain(config['MODEL']['POINT_AUGMENT'], device)\n #modelnet10_dataloader = DataLoaderClass(config['DATA']['MODELNET10'], config['MODEL']['POINTNET']['TRAINING']) \n #training_instance_1.train(modelnet10_dataloader.trainloader, modelnet10_dataloader.validloader, adv = False)\n #training_instance_1.test(modelnet10_dataloader.validloader)\n\n\n \n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import RPi.GPIO as GPIO import numpy as np import array import time import json import LED_GPIO as led import BUTTON_GPIO as btn import parseJson as gjs rndBtnState = False interval = .1 rndbtn = gjs.getJsonRnd() gpioValues = gjs.getJsonData() strArray = gpioValues[0] btnArray = gpioValues[1] ledArray = gpioValues[2] clrArray = gpioValues[3] arraySize = len(btnArray) frequencyArray = [0.0] * arraySize btnStateArray = [False] * arraySize GPIO.setmode(GPIO.BCM) GPIO.setwarnings(True) led.init_LED_pins(ledArray, clrArray) btn.init_BTN_pins(btnArray, rndbtn) try: while True: time.sleep(interval) btnStateArray = btn.checkButtons(btnArray, btnStateArray, ledArray) rndBtnState = btn.checkRndButton(rndBtnState, rndbtn, btnStateArray) if(rndBtnState): frequencyArray = led.randomLights(ledArray, frequencyArray) led.rnd_RGB_ON(clrArray) else: led.rnd_RGB_OFF(clrArray) led.setLEDs(strArray, btnStateArray, ledArray) led.getfrequency(btnStateArray, frequencyArray) except KeyboardInterrupt: print "\n" print '%-7s %-7s %-10s' % ('color','occurrences','percent') print '--------------------------------------------' #testing just how random python's random module is index = 0 total = sum(frequencyArray) # print tabulate([strArray, frequencyArray], 'color', ' occurrences') for index, occurrences in enumerate(frequencyArray): s = strArray[index] print '%-7s %12d %-0.2f' % (strArray[index], occurrences, occurrences/total * 100), "%" index+=1 print "\n" print "Total : ", total finally: GPIO.cleanup()
normal
{ "blob_id": "1b741b34649193b64479724670244d258cfbbdfc", "index": 5055, "step-1": "import RPi.GPIO as GPIO\nimport numpy as np\nimport array\nimport time\nimport json\n\nimport LED_GPIO as led \nimport BUTTON_GPIO as btn\nimport parseJson as gjs\n\nrndBtnState = False\ninterval = .1\n\nrndbtn = gjs.getJsonRnd()\n\ngpioValues = gjs.getJsonData()\n\nstrArray = gpioValues[0]\nbtnArray = gpioValues[1]\nledArray = gpioValues[2]\nclrArray = gpioValues[3]\n\narraySize = len(btnArray)\nfrequencyArray = [0.0] * arraySize\nbtnStateArray = [False] * arraySize\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(True)\n\nled.init_LED_pins(ledArray, clrArray)\nbtn.init_BTN_pins(btnArray, rndbtn)\n\ntry:\n while True:\n\n time.sleep(interval)\n \n btnStateArray = btn.checkButtons(btnArray, btnStateArray, ledArray)\n rndBtnState = btn.checkRndButton(rndBtnState, rndbtn, btnStateArray)\n\n if(rndBtnState):\n frequencyArray = led.randomLights(ledArray, frequencyArray)\n led.rnd_RGB_ON(clrArray)\n else:\n led.rnd_RGB_OFF(clrArray)\n led.setLEDs(strArray, btnStateArray, ledArray)\n\n led.getfrequency(btnStateArray, frequencyArray)\n\nexcept KeyboardInterrupt:\n\n print \"\\n\"\n print '%-7s %-7s %-10s' % ('color','occurrences','percent')\n print '--------------------------------------------'\n #testing just how random python's random module is\n index = 0\n total = sum(frequencyArray)\n\n# print tabulate([strArray, frequencyArray], 'color', ' occurrences')\n for index, occurrences in enumerate(frequencyArray):\n s = strArray[index]\n print '%-7s %12d %-0.2f' % (strArray[index], occurrences, occurrences/total * 100), \"%\"\n index+=1\n print \"\\n\"\n print \"Total : \", total\n\n\nfinally: \n GPIO.cleanup()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> setuptools.setup(name='cppersist', install_requires=['Eve']) <|reserved_special_token_1|> import setuptools setuptools.setup(name='cppersist', install_requires=['Eve'])
flexible
{ "blob_id": "4f1956b34ac3b55b2d40220b79816c139b4a2f5c", "index": 9574, "step-1": "<mask token>\n", "step-2": "<mask token>\nsetuptools.setup(name='cppersist', install_requires=['Eve'])\n", "step-3": "import setuptools\nsetuptools.setup(name='cppersist', install_requires=['Eve'])\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import socket END = bytearray() END.append(255) print(END[0]) def recvall(sock): # Odbiór danych BUFF_SIZE = 4096 # 4 KiB data = b'' while True: # odbieramy dane, pakiety 4KiB part = sock.recv(BUFF_SIZE) data += part if len(part) < BUFF_SIZE: # 0 lub koniec danych break return data def create_dict(data): # Odczytuje otrzymany słownik dict = {} i = 0 while True: dict[chr(data[i])] = '' j = 1 while data[i + j] != END[0]: # Dopóki nie znajdzie FF, uznaje bajty za 'kod' slowa dict[chr(data[i])] += str(chr(data[i + j])) j += 1 i += 1 + j if data[i] == END[0] and data[i + 1] == END[0]: # Gdy znajdzie 3x FF, kończy słownik break return dict def extract_start(data): # Poszukuje pącztka segmentu danych i = 0 while True: if data[i] == END[0] and data[i + 1] == END[0] and data[i + 2] == END[0]: return i + 3 i += 1 def bytes_to_bits(data, begin): # Zamienia bajty na znakowy odpowiednik w bitach bits = '' for i in range(begin, len(data)): bits += format(data[i], "08b") return bits def data_to_extract(data, dict): # Otrzymane dane na podstawie slownika odczytuje do tekstu begin = extract_start(data) # Szukamy początku tekstu print(begin) data = bytes_to_bits(data, begin) dict = {y: x for x, y in dict.items()} # Zamiana kluczy z wartością w słowniku text = '' temp_code = '' for i in range(len(data)): # Dla kazdego bitu temp_code += data[i] if temp_code in dict: # Szukamy czy utworzona tymczasowo zmienna nie zawiera się # w słowniku text += dict[temp_code] temp_code = '' return text def recieve_data(codedpath, decodedpath, ip, port): port = int(port) #Segment odpowiedzialny za utworzenie połaczenia przy użyciu gniazda sock = socket.socket() sock.bind((ip, int(port))) sock.listen() conn, addr = sock.accept() print('Połączono:', addr) rec_data = recvall(conn) #Odbierz dane rec_dict = create_dict(rec_data) #Utwórz słownik z danych extracted = data_to_extract(rec_data, rec_dict) #Na podstawie słownika, odkoduj tekst print("ODEBRANY SLOWNIK\n") print(rec_dict) print(extracted) f = open(codedpath, "wb") #Zapis otrzymanych danych f.write(rec_data) f.close() f = open(decodedpath, "w") f.write(extracted) f.close() return 0
normal
{ "blob_id": "aa13278a4686e9bab7948c2f212f87f9bd6eee00", "index": 969, "step-1": "<mask token>\n\n\ndef recvall(sock):\n BUFF_SIZE = 4096\n data = b''\n while True:\n part = sock.recv(BUFF_SIZE)\n data += part\n if len(part) < BUFF_SIZE:\n break\n return data\n\n\n<mask token>\n\n\ndef extract_start(data):\n i = 0\n while True:\n if data[i] == END[0] and data[i + 1] == END[0] and data[i + 2] == END[0\n ]:\n return i + 3\n i += 1\n\n\ndef bytes_to_bits(data, begin):\n bits = ''\n for i in range(begin, len(data)):\n bits += format(data[i], '08b')\n return bits\n\n\ndef data_to_extract(data, dict):\n begin = extract_start(data)\n print(begin)\n data = bytes_to_bits(data, begin)\n dict = {y: x for x, y in dict.items()}\n text = ''\n temp_code = ''\n for i in range(len(data)):\n temp_code += data[i]\n if temp_code in dict:\n text += dict[temp_code]\n temp_code = ''\n return text\n\n\ndef recieve_data(codedpath, decodedpath, ip, port):\n port = int(port)\n sock = socket.socket()\n sock.bind((ip, int(port)))\n sock.listen()\n conn, addr = sock.accept()\n print('Połączono:', addr)\n rec_data = recvall(conn)\n rec_dict = create_dict(rec_data)\n extracted = data_to_extract(rec_data, rec_dict)\n print('ODEBRANY SLOWNIK\\n')\n print(rec_dict)\n print(extracted)\n f = open(codedpath, 'wb')\n f.write(rec_data)\n f.close()\n f = open(decodedpath, 'w')\n f.write(extracted)\n f.close()\n return 0\n", "step-2": "<mask token>\n\n\ndef recvall(sock):\n BUFF_SIZE = 4096\n data = b''\n while True:\n part = sock.recv(BUFF_SIZE)\n data += part\n if len(part) < BUFF_SIZE:\n break\n return data\n\n\ndef create_dict(data):\n dict = {}\n i = 0\n while True:\n dict[chr(data[i])] = ''\n j = 1\n while data[i + j] != END[0]:\n dict[chr(data[i])] += str(chr(data[i + j]))\n j += 1\n i += 1 + j\n if data[i] == END[0] and data[i + 1] == END[0]:\n break\n return dict\n\n\ndef extract_start(data):\n i = 0\n while True:\n if data[i] == END[0] and data[i + 1] == END[0] and data[i + 2] == END[0\n ]:\n return i + 3\n i += 1\n\n\ndef bytes_to_bits(data, begin):\n bits = ''\n for i in range(begin, len(data)):\n bits += format(data[i], '08b')\n return bits\n\n\ndef data_to_extract(data, dict):\n begin = extract_start(data)\n print(begin)\n data = bytes_to_bits(data, begin)\n dict = {y: x for x, y in dict.items()}\n text = ''\n temp_code = ''\n for i in range(len(data)):\n temp_code += data[i]\n if temp_code in dict:\n text += dict[temp_code]\n temp_code = ''\n return text\n\n\ndef recieve_data(codedpath, decodedpath, ip, port):\n port = int(port)\n sock = socket.socket()\n sock.bind((ip, int(port)))\n sock.listen()\n conn, addr = sock.accept()\n print('Połączono:', addr)\n rec_data = recvall(conn)\n rec_dict = create_dict(rec_data)\n extracted = data_to_extract(rec_data, rec_dict)\n print('ODEBRANY SLOWNIK\\n')\n print(rec_dict)\n print(extracted)\n f = open(codedpath, 'wb')\n f.write(rec_data)\n f.close()\n f = open(decodedpath, 'w')\n f.write(extracted)\n f.close()\n return 0\n", "step-3": "<mask token>\nEND = bytearray()\nEND.append(255)\nprint(END[0])\n\n\ndef recvall(sock):\n BUFF_SIZE = 4096\n data = b''\n while True:\n part = sock.recv(BUFF_SIZE)\n data += part\n if len(part) < BUFF_SIZE:\n break\n return data\n\n\ndef create_dict(data):\n dict = {}\n i = 0\n while True:\n dict[chr(data[i])] = ''\n j = 1\n while data[i + j] != END[0]:\n dict[chr(data[i])] += str(chr(data[i + j]))\n j += 1\n i += 1 + j\n if data[i] == END[0] and data[i + 1] == END[0]:\n break\n return dict\n\n\ndef extract_start(data):\n i = 0\n while True:\n if data[i] == END[0] and data[i + 1] == END[0] and data[i + 2] == END[0\n ]:\n return i + 3\n i += 1\n\n\ndef bytes_to_bits(data, begin):\n bits = ''\n for i in range(begin, len(data)):\n bits += format(data[i], '08b')\n return bits\n\n\ndef data_to_extract(data, dict):\n begin = extract_start(data)\n print(begin)\n data = bytes_to_bits(data, begin)\n dict = {y: x for x, y in dict.items()}\n text = ''\n temp_code = ''\n for i in range(len(data)):\n temp_code += data[i]\n if temp_code in dict:\n text += dict[temp_code]\n temp_code = ''\n return text\n\n\ndef recieve_data(codedpath, decodedpath, ip, port):\n port = int(port)\n sock = socket.socket()\n sock.bind((ip, int(port)))\n sock.listen()\n conn, addr = sock.accept()\n print('Połączono:', addr)\n rec_data = recvall(conn)\n rec_dict = create_dict(rec_data)\n extracted = data_to_extract(rec_data, rec_dict)\n print('ODEBRANY SLOWNIK\\n')\n print(rec_dict)\n print(extracted)\n f = open(codedpath, 'wb')\n f.write(rec_data)\n f.close()\n f = open(decodedpath, 'w')\n f.write(extracted)\n f.close()\n return 0\n", "step-4": "import socket\nEND = bytearray()\nEND.append(255)\nprint(END[0])\n\n\ndef recvall(sock):\n BUFF_SIZE = 4096\n data = b''\n while True:\n part = sock.recv(BUFF_SIZE)\n data += part\n if len(part) < BUFF_SIZE:\n break\n return data\n\n\ndef create_dict(data):\n dict = {}\n i = 0\n while True:\n dict[chr(data[i])] = ''\n j = 1\n while data[i + j] != END[0]:\n dict[chr(data[i])] += str(chr(data[i + j]))\n j += 1\n i += 1 + j\n if data[i] == END[0] and data[i + 1] == END[0]:\n break\n return dict\n\n\ndef extract_start(data):\n i = 0\n while True:\n if data[i] == END[0] and data[i + 1] == END[0] and data[i + 2] == END[0\n ]:\n return i + 3\n i += 1\n\n\ndef bytes_to_bits(data, begin):\n bits = ''\n for i in range(begin, len(data)):\n bits += format(data[i], '08b')\n return bits\n\n\ndef data_to_extract(data, dict):\n begin = extract_start(data)\n print(begin)\n data = bytes_to_bits(data, begin)\n dict = {y: x for x, y in dict.items()}\n text = ''\n temp_code = ''\n for i in range(len(data)):\n temp_code += data[i]\n if temp_code in dict:\n text += dict[temp_code]\n temp_code = ''\n return text\n\n\ndef recieve_data(codedpath, decodedpath, ip, port):\n port = int(port)\n sock = socket.socket()\n sock.bind((ip, int(port)))\n sock.listen()\n conn, addr = sock.accept()\n print('Połączono:', addr)\n rec_data = recvall(conn)\n rec_dict = create_dict(rec_data)\n extracted = data_to_extract(rec_data, rec_dict)\n print('ODEBRANY SLOWNIK\\n')\n print(rec_dict)\n print(extracted)\n f = open(codedpath, 'wb')\n f.write(rec_data)\n f.close()\n f = open(decodedpath, 'w')\n f.write(extracted)\n f.close()\n return 0\n", "step-5": "import socket\n\nEND = bytearray()\nEND.append(255)\nprint(END[0])\n\n\ndef recvall(sock): # Odbiór danych\n BUFF_SIZE = 4096 # 4 KiB\n data = b''\n while True: # odbieramy dane, pakiety 4KiB\n part = sock.recv(BUFF_SIZE)\n data += part\n if len(part) < BUFF_SIZE:\n # 0 lub koniec danych\n break\n return data\n\n\ndef create_dict(data): # Odczytuje otrzymany słownik\n dict = {}\n i = 0\n while True:\n dict[chr(data[i])] = ''\n j = 1\n while data[i + j] != END[0]: # Dopóki nie znajdzie FF, uznaje bajty za 'kod' slowa\n dict[chr(data[i])] += str(chr(data[i + j]))\n j += 1\n\n i += 1 + j\n if data[i] == END[0] and data[i + 1] == END[0]: # Gdy znajdzie 3x FF, kończy słownik\n break\n return dict\n\n\ndef extract_start(data): # Poszukuje pącztka segmentu danych\n i = 0\n while True:\n if data[i] == END[0] and data[i + 1] == END[0] and data[i + 2] == END[0]:\n return i + 3\n i += 1\n\n\ndef bytes_to_bits(data, begin): # Zamienia bajty na znakowy odpowiednik w bitach\n bits = ''\n for i in range(begin, len(data)):\n bits += format(data[i], \"08b\")\n return bits\n\n\ndef data_to_extract(data, dict): # Otrzymane dane na podstawie slownika odczytuje do tekstu\n begin = extract_start(data) # Szukamy początku tekstu\n print(begin)\n data = bytes_to_bits(data, begin)\n dict = {y: x for x, y in dict.items()} # Zamiana kluczy z wartością w słowniku\n text = ''\n temp_code = ''\n for i in range(len(data)): # Dla kazdego bitu\n temp_code += data[i]\n if temp_code in dict: # Szukamy czy utworzona tymczasowo zmienna nie zawiera się\n # w słowniku\n text += dict[temp_code]\n temp_code = ''\n return text\n\n\ndef recieve_data(codedpath, decodedpath, ip, port):\n port = int(port) #Segment odpowiedzialny za utworzenie połaczenia przy użyciu gniazda\n sock = socket.socket()\n sock.bind((ip, int(port)))\n sock.listen()\n conn, addr = sock.accept()\n print('Połączono:', addr)\n rec_data = recvall(conn) #Odbierz dane\n rec_dict = create_dict(rec_data) #Utwórz słownik z danych\n extracted = data_to_extract(rec_data, rec_dict) #Na podstawie słownika, odkoduj tekst\n\n print(\"ODEBRANY SLOWNIK\\n\")\n print(rec_dict)\n print(extracted)\n\n f = open(codedpath, \"wb\") #Zapis otrzymanych danych\n f.write(rec_data)\n f.close()\n f = open(decodedpath, \"w\")\n f.write(extracted)\n f.close()\n return 0\n", "step-ids": [ 5, 6, 8, 9, 10 ] }
[ 5, 6, 8, 9, 10 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> import FWCore.ParameterSet.Config as cms from RecoTracker.MeasurementDet.UpdaterService_cfi import * from RecoTracker.MeasurementDet.MeasurementTrackerESProducer_cfi import *
flexible
{ "blob_id": "e79505e802a06f091bbb12708c45e04c4e80da60", "index": 7618, "step-1": "<mask token>\n", "step-2": "import FWCore.ParameterSet.Config as cms\nfrom RecoTracker.MeasurementDet.UpdaterService_cfi import *\nfrom RecoTracker.MeasurementDet.MeasurementTrackerESProducer_cfi import *\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
<|reserved_special_token_0|> class Autoencoder(function.Function): <|reserved_special_token_0|> def hidden(self, x): h = _Encoder(self.W, self.b1)(x) if self.activation is not None: h = self.activation(h) h.unchain_backward() return h @property def parameter_names(self): return 'W', 'b1', 'b2' <|reserved_special_token_0|> def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) x_type, = in_types type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) == type_check.Variable(self.W.shape[1], 'W.shape[1]')) def check_type_backward(self, in_types, out_types): type_check.expect(in_types.size() == 1, out_types.size() == 1) x_type, = in_types y_type, = out_types type_check.expect(y_type.dtype == numpy.float32, y_type.ndim == 2, y_type.shape[0] == x_type.shape[0], y_type.shape[1] == type_check.Variable(self.W.shape[1], 'W.shape[1]')) <|reserved_special_token_0|> def forward(self, x): _x = _as_mat(x[0]) Wx = _x.dot(self.W.T) Wx += self.b1 self.x_activation = Wx if self.activation is not None: h, = self.activation.forward([Wx]) else: h = Wx self.x_decode = h y = h.dot(self.W) y += self.b2 return y, def backward(self, x, gy): _x = self.x_decode _gy = gy[0] self.gW += _x.T.dot(_gy) self.gb2 += _gy.sum(0) _gy = _gy.dot(self.W.T).reshape(_x.shape) if self.activation is not None: _gy, = self.activation.backward([self.x_activation], [_gy]) _x = _as_mat(x[0]) self.gW += _gy.T.dot(_x) self.gb1 += _gy.sum(0) return _gy.dot(self.W).reshape(x[0].shape), class _Encoder(function.Function): def __init__(self, initialW, initial_Bias): self.W = initialW self.b = initial_Bias def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) x_type, = in_types type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) == type_check.Variable(self.W.shape[1], 'W.shape[1]')) def forward(self, x): x = _as_mat(x[0]) Wx = x.dot(self.W.T) Wx += self.b return Wx, <|reserved_special_token_1|> <|reserved_special_token_0|> class Autoencoder(function.Function): def __init__(self, in_size, hidden_size, activation=Sigmoid, wscale=1, bias=0, initialW=None, initial_bias1=None, initial_bias2=None): self.W = None self.gW = None self.b1 = None self.b2 = None self.gb1 = None self.gb2 = None self.activation = None if initialW is not None: assert initialW.shape == (hidden_size, in_size) self.W = initialW else: self.W = numpy.random.normal(0, wscale * math.sqrt(1.0 / in_size), (hidden_size, in_size)).astype(numpy.float32) xp = cuda.get_array_module(self.W) self.gW = xp.full_like(self.W, numpy.nan) if initial_bias1 is not None: assert initial_bias1.shape == (hidden_size,) self.b1 = initial_bias1 else: self.b1 = numpy.repeat(numpy.float32(bias), hidden_size) if initial_bias2 is not None: assert initial_bias2.shape == (in_size,) self.b2 = initial_bias2 else: self.b2 = numpy.repeat(numpy.float32(bias), in_size) self.gb1 = xp.empty_like(self.b1) self.gb2 = xp.empty_like(self.b2) if activation is not None: if activation == Sigmoid: self.activation = activation() else: self.activation = activation def hidden(self, x): h = _Encoder(self.W, self.b1)(x) if self.activation is not None: h = self.activation(h) h.unchain_backward() return h @property def parameter_names(self): return 'W', 'b1', 'b2' @property def gradient_names(self): return 'gW', 'gb1', 'gb2' def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) x_type, = in_types type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) == type_check.Variable(self.W.shape[1], 'W.shape[1]')) def check_type_backward(self, in_types, out_types): type_check.expect(in_types.size() == 1, out_types.size() == 1) x_type, = in_types y_type, = out_types type_check.expect(y_type.dtype == numpy.float32, y_type.ndim == 2, y_type.shape[0] == x_type.shape[0], y_type.shape[1] == type_check.Variable(self.W.shape[1], 'W.shape[1]')) <|reserved_special_token_0|> def forward(self, x): _x = _as_mat(x[0]) Wx = _x.dot(self.W.T) Wx += self.b1 self.x_activation = Wx if self.activation is not None: h, = self.activation.forward([Wx]) else: h = Wx self.x_decode = h y = h.dot(self.W) y += self.b2 return y, def backward(self, x, gy): _x = self.x_decode _gy = gy[0] self.gW += _x.T.dot(_gy) self.gb2 += _gy.sum(0) _gy = _gy.dot(self.W.T).reshape(_x.shape) if self.activation is not None: _gy, = self.activation.backward([self.x_activation], [_gy]) _x = _as_mat(x[0]) self.gW += _gy.T.dot(_x) self.gb1 += _gy.sum(0) return _gy.dot(self.W).reshape(x[0].shape), class _Encoder(function.Function): def __init__(self, initialW, initial_Bias): self.W = initialW self.b = initial_Bias def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) x_type, = in_types type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) == type_check.Variable(self.W.shape[1], 'W.shape[1]')) def forward(self, x): x = _as_mat(x[0]) Wx = x.dot(self.W.T) Wx += self.b return Wx, <|reserved_special_token_1|> <|reserved_special_token_0|> class Autoencoder(function.Function): def __init__(self, in_size, hidden_size, activation=Sigmoid, wscale=1, bias=0, initialW=None, initial_bias1=None, initial_bias2=None): self.W = None self.gW = None self.b1 = None self.b2 = None self.gb1 = None self.gb2 = None self.activation = None if initialW is not None: assert initialW.shape == (hidden_size, in_size) self.W = initialW else: self.W = numpy.random.normal(0, wscale * math.sqrt(1.0 / in_size), (hidden_size, in_size)).astype(numpy.float32) xp = cuda.get_array_module(self.W) self.gW = xp.full_like(self.W, numpy.nan) if initial_bias1 is not None: assert initial_bias1.shape == (hidden_size,) self.b1 = initial_bias1 else: self.b1 = numpy.repeat(numpy.float32(bias), hidden_size) if initial_bias2 is not None: assert initial_bias2.shape == (in_size,) self.b2 = initial_bias2 else: self.b2 = numpy.repeat(numpy.float32(bias), in_size) self.gb1 = xp.empty_like(self.b1) self.gb2 = xp.empty_like(self.b2) if activation is not None: if activation == Sigmoid: self.activation = activation() else: self.activation = activation def hidden(self, x): h = _Encoder(self.W, self.b1)(x) if self.activation is not None: h = self.activation(h) h.unchain_backward() return h @property def parameter_names(self): return 'W', 'b1', 'b2' @property def gradient_names(self): return 'gW', 'gb1', 'gb2' def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) x_type, = in_types type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) == type_check.Variable(self.W.shape[1], 'W.shape[1]')) def check_type_backward(self, in_types, out_types): type_check.expect(in_types.size() == 1, out_types.size() == 1) x_type, = in_types y_type, = out_types type_check.expect(y_type.dtype == numpy.float32, y_type.ndim == 2, y_type.shape[0] == x_type.shape[0], y_type.shape[1] == type_check.Variable(self.W.shape[1], 'W.shape[1]')) def zero_grads(self): self.gW.fill(0) self.gb1.fill(0) self.gb2.fill(0) def forward(self, x): _x = _as_mat(x[0]) Wx = _x.dot(self.W.T) Wx += self.b1 self.x_activation = Wx if self.activation is not None: h, = self.activation.forward([Wx]) else: h = Wx self.x_decode = h y = h.dot(self.W) y += self.b2 return y, def backward(self, x, gy): _x = self.x_decode _gy = gy[0] self.gW += _x.T.dot(_gy) self.gb2 += _gy.sum(0) _gy = _gy.dot(self.W.T).reshape(_x.shape) if self.activation is not None: _gy, = self.activation.backward([self.x_activation], [_gy]) _x = _as_mat(x[0]) self.gW += _gy.T.dot(_x) self.gb1 += _gy.sum(0) return _gy.dot(self.W).reshape(x[0].shape), class _Encoder(function.Function): def __init__(self, initialW, initial_Bias): self.W = initialW self.b = initial_Bias def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) x_type, = in_types type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) == type_check.Variable(self.W.shape[1], 'W.shape[1]')) def forward(self, x): x = _as_mat(x[0]) Wx = x.dot(self.W.T) Wx += self.b return Wx, <|reserved_special_token_1|> <|reserved_special_token_0|> def _as_mat(x): if x.ndim == 2: return x return x.reshape(len(x), -1) class Autoencoder(function.Function): def __init__(self, in_size, hidden_size, activation=Sigmoid, wscale=1, bias=0, initialW=None, initial_bias1=None, initial_bias2=None): self.W = None self.gW = None self.b1 = None self.b2 = None self.gb1 = None self.gb2 = None self.activation = None if initialW is not None: assert initialW.shape == (hidden_size, in_size) self.W = initialW else: self.W = numpy.random.normal(0, wscale * math.sqrt(1.0 / in_size), (hidden_size, in_size)).astype(numpy.float32) xp = cuda.get_array_module(self.W) self.gW = xp.full_like(self.W, numpy.nan) if initial_bias1 is not None: assert initial_bias1.shape == (hidden_size,) self.b1 = initial_bias1 else: self.b1 = numpy.repeat(numpy.float32(bias), hidden_size) if initial_bias2 is not None: assert initial_bias2.shape == (in_size,) self.b2 = initial_bias2 else: self.b2 = numpy.repeat(numpy.float32(bias), in_size) self.gb1 = xp.empty_like(self.b1) self.gb2 = xp.empty_like(self.b2) if activation is not None: if activation == Sigmoid: self.activation = activation() else: self.activation = activation def hidden(self, x): h = _Encoder(self.W, self.b1)(x) if self.activation is not None: h = self.activation(h) h.unchain_backward() return h @property def parameter_names(self): return 'W', 'b1', 'b2' @property def gradient_names(self): return 'gW', 'gb1', 'gb2' def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) x_type, = in_types type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) == type_check.Variable(self.W.shape[1], 'W.shape[1]')) def check_type_backward(self, in_types, out_types): type_check.expect(in_types.size() == 1, out_types.size() == 1) x_type, = in_types y_type, = out_types type_check.expect(y_type.dtype == numpy.float32, y_type.ndim == 2, y_type.shape[0] == x_type.shape[0], y_type.shape[1] == type_check.Variable(self.W.shape[1], 'W.shape[1]')) def zero_grads(self): self.gW.fill(0) self.gb1.fill(0) self.gb2.fill(0) def forward(self, x): _x = _as_mat(x[0]) Wx = _x.dot(self.W.T) Wx += self.b1 self.x_activation = Wx if self.activation is not None: h, = self.activation.forward([Wx]) else: h = Wx self.x_decode = h y = h.dot(self.W) y += self.b2 return y, def backward(self, x, gy): _x = self.x_decode _gy = gy[0] self.gW += _x.T.dot(_gy) self.gb2 += _gy.sum(0) _gy = _gy.dot(self.W.T).reshape(_x.shape) if self.activation is not None: _gy, = self.activation.backward([self.x_activation], [_gy]) _x = _as_mat(x[0]) self.gW += _gy.T.dot(_x) self.gb1 += _gy.sum(0) return _gy.dot(self.W).reshape(x[0].shape), class _Encoder(function.Function): def __init__(self, initialW, initial_Bias): self.W = initialW self.b = initial_Bias def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) x_type, = in_types type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) == type_check.Variable(self.W.shape[1], 'W.shape[1]')) def forward(self, x): x = _as_mat(x[0]) Wx = x.dot(self.W.T) Wx += self.b return Wx, <|reserved_special_token_1|> import math from chainer import cuda from chainer import function from chainer.functions import Sigmoid from chainer.utils import type_check import numpy def _as_mat(x): if x.ndim == 2: return x return x.reshape(len(x), -1) class Autoencoder(function.Function): def __init__(self, in_size, hidden_size, activation=Sigmoid, wscale=1, bias=0, initialW=None, initial_bias1=None, initial_bias2=None): self.W = None self.gW = None self.b1 = None self.b2 = None self.gb1 = None self.gb2 = None self.activation = None if initialW is not None: assert initialW.shape == (hidden_size, in_size) self.W = initialW else: self.W = numpy.random.normal( 0, wscale * math.sqrt(1. / in_size), (hidden_size, in_size)).astype(numpy.float32) xp = cuda.get_array_module(self.W) self.gW = xp.full_like(self.W, numpy.nan) if initial_bias1 is not None: assert initial_bias1.shape == (hidden_size,) self.b1 = initial_bias1 else: self.b1 = numpy.repeat(numpy.float32(bias), hidden_size) if initial_bias2 is not None: assert initial_bias2.shape == (in_size,) self.b2 = initial_bias2 else: self.b2 = numpy.repeat(numpy.float32(bias), in_size) self.gb1 = xp.empty_like(self.b1) self.gb2 = xp.empty_like(self.b2) if activation is not None: if activation == Sigmoid: self.activation = activation() else: self.activation = activation def hidden(self, x): h = _Encoder(self.W, self.b1)(x) if self.activation is not None: h = self.activation(h) h.unchain_backward() return h @property def parameter_names(self): return 'W', 'b1', 'b2' @property def gradient_names(self): return 'gW', 'gb1', 'gb2' def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) x_type, = in_types type_check.expect( x_type.dtype == numpy.float32, x_type.ndim >= 2, (type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) == type_check.Variable(self.W.shape[1], 'W.shape[1]')), ) def check_type_backward(self, in_types, out_types): type_check.expect( in_types.size() == 1, out_types.size() == 1, ) x_type, = in_types y_type, = out_types type_check.expect( y_type.dtype == numpy.float32, y_type.ndim == 2, y_type.shape[0] == x_type.shape[0], y_type.shape[1] == type_check.Variable(self.W.shape[1], 'W.shape[1]'), ) def zero_grads(self): self.gW.fill(0) self.gb1.fill(0) self.gb2.fill(0) def forward(self, x): _x = _as_mat(x[0]) Wx = _x.dot(self.W.T) Wx += self.b1 self.x_activation = Wx if self.activation is not None: h, = self.activation.forward([Wx]) else: h = Wx self.x_decode = h y = h.dot(self.W) y += self.b2 return y, def backward(self, x, gy): _x = self.x_decode _gy = gy[0] self.gW += _x.T.dot(_gy) self.gb2 += _gy.sum(0) _gy = _gy.dot(self.W.T).reshape(_x.shape) if self.activation is not None: _gy, = self.activation.backward([self.x_activation], [_gy]) _x = _as_mat(x[0]) self.gW += _gy.T.dot(_x) self.gb1 += _gy.sum(0) return _gy.dot(self.W).reshape(x[0].shape), # undifferentiable Linear function class _Encoder(function.Function): def __init__(self, initialW, initial_Bias): self.W = initialW self.b = initial_Bias def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) x_type, = in_types type_check.expect( x_type.dtype == numpy.float32, x_type.ndim >= 2, (type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) == type_check.Variable(self.W.shape[1], 'W.shape[1]')), ) def forward(self, x): x = _as_mat(x[0]) Wx = x.dot(self.W.T) Wx += self.b return Wx,
flexible
{ "blob_id": "97eb599ae8bf726d827d6f8313b7cf2838f9c125", "index": 4098, "step-1": "<mask token>\n\n\nclass Autoencoder(function.Function):\n <mask token>\n\n def hidden(self, x):\n h = _Encoder(self.W, self.b1)(x)\n if self.activation is not None:\n h = self.activation(h)\n h.unchain_backward()\n return h\n\n @property\n def parameter_names(self):\n return 'W', 'b1', 'b2'\n <mask token>\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, \n type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(in_types.size() == 1, out_types.size() == 1)\n x_type, = in_types\n y_type, = out_types\n type_check.expect(y_type.dtype == numpy.float32, y_type.ndim == 2, \n y_type.shape[0] == x_type.shape[0], y_type.shape[1] ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n <mask token>\n\n def forward(self, x):\n _x = _as_mat(x[0])\n Wx = _x.dot(self.W.T)\n Wx += self.b1\n self.x_activation = Wx\n if self.activation is not None:\n h, = self.activation.forward([Wx])\n else:\n h = Wx\n self.x_decode = h\n y = h.dot(self.W)\n y += self.b2\n return y,\n\n def backward(self, x, gy):\n _x = self.x_decode\n _gy = gy[0]\n self.gW += _x.T.dot(_gy)\n self.gb2 += _gy.sum(0)\n _gy = _gy.dot(self.W.T).reshape(_x.shape)\n if self.activation is not None:\n _gy, = self.activation.backward([self.x_activation], [_gy])\n _x = _as_mat(x[0])\n self.gW += _gy.T.dot(_x)\n self.gb1 += _gy.sum(0)\n return _gy.dot(self.W).reshape(x[0].shape),\n\n\nclass _Encoder(function.Function):\n\n def __init__(self, initialW, initial_Bias):\n self.W = initialW\n self.b = initial_Bias\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, \n type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def forward(self, x):\n x = _as_mat(x[0])\n Wx = x.dot(self.W.T)\n Wx += self.b\n return Wx,\n", "step-2": "<mask token>\n\n\nclass Autoencoder(function.Function):\n\n def __init__(self, in_size, hidden_size, activation=Sigmoid, wscale=1,\n bias=0, initialW=None, initial_bias1=None, initial_bias2=None):\n self.W = None\n self.gW = None\n self.b1 = None\n self.b2 = None\n self.gb1 = None\n self.gb2 = None\n self.activation = None\n if initialW is not None:\n assert initialW.shape == (hidden_size, in_size)\n self.W = initialW\n else:\n self.W = numpy.random.normal(0, wscale * math.sqrt(1.0 /\n in_size), (hidden_size, in_size)).astype(numpy.float32)\n xp = cuda.get_array_module(self.W)\n self.gW = xp.full_like(self.W, numpy.nan)\n if initial_bias1 is not None:\n assert initial_bias1.shape == (hidden_size,)\n self.b1 = initial_bias1\n else:\n self.b1 = numpy.repeat(numpy.float32(bias), hidden_size)\n if initial_bias2 is not None:\n assert initial_bias2.shape == (in_size,)\n self.b2 = initial_bias2\n else:\n self.b2 = numpy.repeat(numpy.float32(bias), in_size)\n self.gb1 = xp.empty_like(self.b1)\n self.gb2 = xp.empty_like(self.b2)\n if activation is not None:\n if activation == Sigmoid:\n self.activation = activation()\n else:\n self.activation = activation\n\n def hidden(self, x):\n h = _Encoder(self.W, self.b1)(x)\n if self.activation is not None:\n h = self.activation(h)\n h.unchain_backward()\n return h\n\n @property\n def parameter_names(self):\n return 'W', 'b1', 'b2'\n\n @property\n def gradient_names(self):\n return 'gW', 'gb1', 'gb2'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, \n type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(in_types.size() == 1, out_types.size() == 1)\n x_type, = in_types\n y_type, = out_types\n type_check.expect(y_type.dtype == numpy.float32, y_type.ndim == 2, \n y_type.shape[0] == x_type.shape[0], y_type.shape[1] ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n <mask token>\n\n def forward(self, x):\n _x = _as_mat(x[0])\n Wx = _x.dot(self.W.T)\n Wx += self.b1\n self.x_activation = Wx\n if self.activation is not None:\n h, = self.activation.forward([Wx])\n else:\n h = Wx\n self.x_decode = h\n y = h.dot(self.W)\n y += self.b2\n return y,\n\n def backward(self, x, gy):\n _x = self.x_decode\n _gy = gy[0]\n self.gW += _x.T.dot(_gy)\n self.gb2 += _gy.sum(0)\n _gy = _gy.dot(self.W.T).reshape(_x.shape)\n if self.activation is not None:\n _gy, = self.activation.backward([self.x_activation], [_gy])\n _x = _as_mat(x[0])\n self.gW += _gy.T.dot(_x)\n self.gb1 += _gy.sum(0)\n return _gy.dot(self.W).reshape(x[0].shape),\n\n\nclass _Encoder(function.Function):\n\n def __init__(self, initialW, initial_Bias):\n self.W = initialW\n self.b = initial_Bias\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, \n type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def forward(self, x):\n x = _as_mat(x[0])\n Wx = x.dot(self.W.T)\n Wx += self.b\n return Wx,\n", "step-3": "<mask token>\n\n\nclass Autoencoder(function.Function):\n\n def __init__(self, in_size, hidden_size, activation=Sigmoid, wscale=1,\n bias=0, initialW=None, initial_bias1=None, initial_bias2=None):\n self.W = None\n self.gW = None\n self.b1 = None\n self.b2 = None\n self.gb1 = None\n self.gb2 = None\n self.activation = None\n if initialW is not None:\n assert initialW.shape == (hidden_size, in_size)\n self.W = initialW\n else:\n self.W = numpy.random.normal(0, wscale * math.sqrt(1.0 /\n in_size), (hidden_size, in_size)).astype(numpy.float32)\n xp = cuda.get_array_module(self.W)\n self.gW = xp.full_like(self.W, numpy.nan)\n if initial_bias1 is not None:\n assert initial_bias1.shape == (hidden_size,)\n self.b1 = initial_bias1\n else:\n self.b1 = numpy.repeat(numpy.float32(bias), hidden_size)\n if initial_bias2 is not None:\n assert initial_bias2.shape == (in_size,)\n self.b2 = initial_bias2\n else:\n self.b2 = numpy.repeat(numpy.float32(bias), in_size)\n self.gb1 = xp.empty_like(self.b1)\n self.gb2 = xp.empty_like(self.b2)\n if activation is not None:\n if activation == Sigmoid:\n self.activation = activation()\n else:\n self.activation = activation\n\n def hidden(self, x):\n h = _Encoder(self.W, self.b1)(x)\n if self.activation is not None:\n h = self.activation(h)\n h.unchain_backward()\n return h\n\n @property\n def parameter_names(self):\n return 'W', 'b1', 'b2'\n\n @property\n def gradient_names(self):\n return 'gW', 'gb1', 'gb2'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, \n type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(in_types.size() == 1, out_types.size() == 1)\n x_type, = in_types\n y_type, = out_types\n type_check.expect(y_type.dtype == numpy.float32, y_type.ndim == 2, \n y_type.shape[0] == x_type.shape[0], y_type.shape[1] ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def zero_grads(self):\n self.gW.fill(0)\n self.gb1.fill(0)\n self.gb2.fill(0)\n\n def forward(self, x):\n _x = _as_mat(x[0])\n Wx = _x.dot(self.W.T)\n Wx += self.b1\n self.x_activation = Wx\n if self.activation is not None:\n h, = self.activation.forward([Wx])\n else:\n h = Wx\n self.x_decode = h\n y = h.dot(self.W)\n y += self.b2\n return y,\n\n def backward(self, x, gy):\n _x = self.x_decode\n _gy = gy[0]\n self.gW += _x.T.dot(_gy)\n self.gb2 += _gy.sum(0)\n _gy = _gy.dot(self.W.T).reshape(_x.shape)\n if self.activation is not None:\n _gy, = self.activation.backward([self.x_activation], [_gy])\n _x = _as_mat(x[0])\n self.gW += _gy.T.dot(_x)\n self.gb1 += _gy.sum(0)\n return _gy.dot(self.W).reshape(x[0].shape),\n\n\nclass _Encoder(function.Function):\n\n def __init__(self, initialW, initial_Bias):\n self.W = initialW\n self.b = initial_Bias\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, \n type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def forward(self, x):\n x = _as_mat(x[0])\n Wx = x.dot(self.W.T)\n Wx += self.b\n return Wx,\n", "step-4": "<mask token>\n\n\ndef _as_mat(x):\n if x.ndim == 2:\n return x\n return x.reshape(len(x), -1)\n\n\nclass Autoencoder(function.Function):\n\n def __init__(self, in_size, hidden_size, activation=Sigmoid, wscale=1,\n bias=0, initialW=None, initial_bias1=None, initial_bias2=None):\n self.W = None\n self.gW = None\n self.b1 = None\n self.b2 = None\n self.gb1 = None\n self.gb2 = None\n self.activation = None\n if initialW is not None:\n assert initialW.shape == (hidden_size, in_size)\n self.W = initialW\n else:\n self.W = numpy.random.normal(0, wscale * math.sqrt(1.0 /\n in_size), (hidden_size, in_size)).astype(numpy.float32)\n xp = cuda.get_array_module(self.W)\n self.gW = xp.full_like(self.W, numpy.nan)\n if initial_bias1 is not None:\n assert initial_bias1.shape == (hidden_size,)\n self.b1 = initial_bias1\n else:\n self.b1 = numpy.repeat(numpy.float32(bias), hidden_size)\n if initial_bias2 is not None:\n assert initial_bias2.shape == (in_size,)\n self.b2 = initial_bias2\n else:\n self.b2 = numpy.repeat(numpy.float32(bias), in_size)\n self.gb1 = xp.empty_like(self.b1)\n self.gb2 = xp.empty_like(self.b2)\n if activation is not None:\n if activation == Sigmoid:\n self.activation = activation()\n else:\n self.activation = activation\n\n def hidden(self, x):\n h = _Encoder(self.W, self.b1)(x)\n if self.activation is not None:\n h = self.activation(h)\n h.unchain_backward()\n return h\n\n @property\n def parameter_names(self):\n return 'W', 'b1', 'b2'\n\n @property\n def gradient_names(self):\n return 'gW', 'gb1', 'gb2'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, \n type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(in_types.size() == 1, out_types.size() == 1)\n x_type, = in_types\n y_type, = out_types\n type_check.expect(y_type.dtype == numpy.float32, y_type.ndim == 2, \n y_type.shape[0] == x_type.shape[0], y_type.shape[1] ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def zero_grads(self):\n self.gW.fill(0)\n self.gb1.fill(0)\n self.gb2.fill(0)\n\n def forward(self, x):\n _x = _as_mat(x[0])\n Wx = _x.dot(self.W.T)\n Wx += self.b1\n self.x_activation = Wx\n if self.activation is not None:\n h, = self.activation.forward([Wx])\n else:\n h = Wx\n self.x_decode = h\n y = h.dot(self.W)\n y += self.b2\n return y,\n\n def backward(self, x, gy):\n _x = self.x_decode\n _gy = gy[0]\n self.gW += _x.T.dot(_gy)\n self.gb2 += _gy.sum(0)\n _gy = _gy.dot(self.W.T).reshape(_x.shape)\n if self.activation is not None:\n _gy, = self.activation.backward([self.x_activation], [_gy])\n _x = _as_mat(x[0])\n self.gW += _gy.T.dot(_x)\n self.gb1 += _gy.sum(0)\n return _gy.dot(self.W).reshape(x[0].shape),\n\n\nclass _Encoder(function.Function):\n\n def __init__(self, initialW, initial_Bias):\n self.W = initialW\n self.b = initial_Bias\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype == numpy.float32, x_type.ndim >= 2, \n type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]'))\n\n def forward(self, x):\n x = _as_mat(x[0])\n Wx = x.dot(self.W.T)\n Wx += self.b\n return Wx,\n", "step-5": "import math\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.functions import Sigmoid\nfrom chainer.utils import type_check\n\nimport numpy\n\ndef _as_mat(x):\n if x.ndim == 2:\n return x\n return x.reshape(len(x), -1)\n\nclass Autoencoder(function.Function):\n\n def __init__(self, in_size, hidden_size, activation=Sigmoid,\n wscale=1, bias=0,\n initialW=None, initial_bias1=None, initial_bias2=None):\n self.W = None\n self.gW = None\n self.b1 = None\n self.b2 = None\n self.gb1 = None\n self.gb2 = None\n self.activation = None\n\n if initialW is not None:\n assert initialW.shape == (hidden_size, in_size)\n self.W = initialW\n else:\n self.W = numpy.random.normal(\n 0, wscale * math.sqrt(1. / in_size),\n (hidden_size, in_size)).astype(numpy.float32)\n xp = cuda.get_array_module(self.W)\n self.gW = xp.full_like(self.W, numpy.nan)\n\n if initial_bias1 is not None:\n assert initial_bias1.shape == (hidden_size,)\n self.b1 = initial_bias1\n else:\n self.b1 = numpy.repeat(numpy.float32(bias), hidden_size)\n\n if initial_bias2 is not None:\n assert initial_bias2.shape == (in_size,)\n self.b2 = initial_bias2\n else:\n self.b2 = numpy.repeat(numpy.float32(bias), in_size)\n\n self.gb1 = xp.empty_like(self.b1)\n self.gb2 = xp.empty_like(self.b2)\n\n if activation is not None:\n if activation == Sigmoid:\n self.activation = activation()\n else:\n self.activation = activation\n\n def hidden(self, x):\n h = _Encoder(self.W, self.b1)(x)\n if self.activation is not None:\n h = self.activation(h)\n h.unchain_backward()\n return h\n\n @property\n def parameter_names(self):\n return 'W', 'b1', 'b2'\n\n @property\n def gradient_names(self):\n return 'gW', 'gb1', 'gb2'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n\n type_check.expect(\n x_type.dtype == numpy.float32,\n x_type.ndim >= 2,\n (type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]')),\n )\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(\n in_types.size() == 1,\n out_types.size() == 1,\n )\n x_type, = in_types\n y_type, = out_types\n\n type_check.expect(\n y_type.dtype == numpy.float32,\n y_type.ndim == 2,\n y_type.shape[0] == x_type.shape[0],\n y_type.shape[1] == type_check.Variable(self.W.shape[1],\n 'W.shape[1]'),\n )\n\n def zero_grads(self):\n self.gW.fill(0)\n self.gb1.fill(0)\n self.gb2.fill(0)\n\n def forward(self, x):\n _x = _as_mat(x[0])\n Wx = _x.dot(self.W.T)\n Wx += self.b1\n\n self.x_activation = Wx\n if self.activation is not None:\n h, = self.activation.forward([Wx])\n else:\n h = Wx\n self.x_decode = h\n y = h.dot(self.W)\n y += self.b2\n\n return y,\n\n def backward(self, x, gy):\n _x = self.x_decode\n _gy = gy[0]\n self.gW += _x.T.dot(_gy)\n self.gb2 += _gy.sum(0)\n _gy = _gy.dot(self.W.T).reshape(_x.shape)\n\n if self.activation is not None:\n _gy, = self.activation.backward([self.x_activation], [_gy])\n\n _x = _as_mat(x[0])\n self.gW += _gy.T.dot(_x)\n self.gb1 += _gy.sum(0)\n\n return _gy.dot(self.W).reshape(x[0].shape),\n\n# undifferentiable Linear function\nclass _Encoder(function.Function):\n\n def __init__(self, initialW, initial_Bias):\n self.W = initialW\n self.b = initial_Bias\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n\n type_check.expect(\n x_type.dtype == numpy.float32,\n x_type.ndim >= 2,\n (type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==\n type_check.Variable(self.W.shape[1], 'W.shape[1]')),\n )\n\n def forward(self, x):\n x = _as_mat(x[0])\n Wx = x.dot(self.W.T)\n Wx += self.b\n return Wx,\n", "step-ids": [ 11, 13, 14, 15, 17 ] }
[ 11, 13, 14, 15, 17 ]
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from nemo_text_processing.text_normalization.en.graph_utils import ( NEMO_NOT_QUOTE, NEMO_SIGMA, NEMO_SPACE, GraphFst, delete_preserve_order, ) from nemo_text_processing.text_normalization.es.graph_utils import shift_cardinal_gender, strip_cardinal_apocope from nemo_text_processing.text_normalization.es.utils import get_abs_path try: import pynini from pynini.lib import pynutil fem = pynini.string_file((get_abs_path("data/money/currency_plural_fem.tsv"))) masc = pynini.string_file((get_abs_path("data/money/currency_plural_masc.tsv"))) fem_singular = pynini.project(fem, "input") masc_singular = pynini.project(masc, "input") fem_plural = pynini.project(fem, "output") masc_plural = pynini.project(masc, "output") PYNINI_AVAILABLE = True except (ModuleNotFoundError, ImportError): fem_plural = None masc_plural = None fem_singular = None masc_singular = None PYNINI_AVAILABLE = False class MoneyFst(GraphFst): """ Finite state transducer for verbalizing money, e.g. money { currency_maj: "euro" integer_part: "un"} -> "un euro" money { currency_maj: "euro" integer_part: "un" fractional_part: "cero cero un"} -> "uno coma cero cero uno euros" money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" preserve_order: true} -> "una libra cuarenta" money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" currency_min: "peniques" preserve_order: true} -> "una libra con cuarenta peniques" money { fractional_part: "un" currency_min: "penique" preserve_order: true} -> "un penique" Args: decimal: GraphFst deterministic: if True will provide a single transduction option, for False multiple transduction are generated (used for audio-based normalization) """ def __init__(self, decimal: GraphFst, deterministic: bool = True): super().__init__(name="money", kind="verbalize", deterministic=deterministic) maj_singular_masc = ( pynutil.delete("currency_maj: \"") + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_singular) + pynutil.delete("\"") ) maj_singular_fem = ( pynutil.delete("currency_maj: \"") + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_singular) + pynutil.delete("\"") ) maj_plural_masc = ( pynutil.delete("currency_maj: \"") + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_plural) + pynutil.delete("\"") ) maj_plural_fem = ( pynutil.delete("currency_maj: \"") + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_plural) + pynutil.delete("\"") ) maj_masc = maj_plural_masc | maj_singular_masc # Tagger kept quantity resolution stable maj_fem = maj_plural_fem | maj_singular_fem min_singular_masc = ( pynutil.delete("currency_min: \"") + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_singular) + pynutil.delete("\"") ) min_singular_fem = ( pynutil.delete("currency_min: \"") + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_singular) + pynutil.delete("\"") ) min_plural_masc = ( pynutil.delete("currency_min: \"") + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_plural) + pynutil.delete("\"") ) min_plural_fem = ( pynutil.delete("currency_min: \"") + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_plural) + pynutil.delete("\"") ) min_masc = min_plural_masc | min_singular_masc min_fem = min_plural_fem | min_singular_fem fractional_part = ( pynutil.delete("fractional_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"") ) integer_part = pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"") optional_add_and = pynini.closure(pynutil.insert(pynini.union("con ", "y ")), 0, 1) # *** currency_maj graph_integer_masc = integer_part + NEMO_SPACE + maj_masc graph_integer_fem = shift_cardinal_gender(integer_part) + NEMO_SPACE + maj_fem graph_integer = graph_integer_fem | graph_integer_masc # *** currency_maj + (***) | ((con) *** current_min) graph_integer_with_minor_masc = ( graph_integer_masc + NEMO_SPACE + pynini.union( optional_add_and + strip_cardinal_apocope(fractional_part), (optional_add_and + fractional_part + NEMO_SPACE + min_masc), (optional_add_and + shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem), ) # Could be minor currency that is different gender + delete_preserve_order ) graph_integer_with_minor_fem = ( graph_integer_fem + NEMO_SPACE + pynini.union( optional_add_and + shift_cardinal_gender(fractional_part), (optional_add_and + fractional_part + NEMO_SPACE + min_masc), (optional_add_and + shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem), ) # Could be minor currency that is different gender + delete_preserve_order ) graph_integer_with_minor = graph_integer_with_minor_fem | graph_integer_with_minor_masc ## *** coma *** currency_maj graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc graph_decimal_fem = decimal.graph_fem graph_decimal_fem |= decimal.numbers_only_quantity # can still have "x billions" with fem currency graph_decimal_fem += NEMO_SPACE + maj_fem graph_decimal = graph_decimal_fem | graph_decimal_masc graph_decimal = ( pynini.cdrewrite( pynutil.insert(" de"), "quantity: \"" + pynini.closure(NEMO_NOT_QUOTE, 1), "\"", NEMO_SIGMA ) @ graph_decimal ) # formally it's millones/billones de *** # *** current_min graph_minor_masc = fractional_part + NEMO_SPACE + min_masc + delete_preserve_order graph_minor_fem = shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem + delete_preserve_order graph_minor = graph_minor_fem | graph_minor_masc graph = graph_integer | graph_integer_with_minor | graph_decimal | graph_minor delete_tokens = self.delete_tokens(graph) self.fst = delete_tokens.optimize()
normal
{ "blob_id": "dccdca65cce2959b07657636e23e7c9ab8a4f96c", "index": 1382, "step-1": "<mask token>\n\n\nclass MoneyFst(GraphFst):\n <mask token>\n\n def __init__(self, decimal: GraphFst, deterministic: bool=True):\n super().__init__(name='money', kind='verbalize', deterministic=\n deterministic)\n maj_singular_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n maj_singular_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n maj_plural_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n maj_plural_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n maj_masc = maj_plural_masc | maj_singular_masc\n maj_fem = maj_plural_fem | maj_singular_fem\n min_singular_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n min_singular_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n min_plural_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n min_plural_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n min_masc = min_plural_masc | min_singular_masc\n min_fem = min_plural_fem | min_singular_fem\n fractional_part = pynutil.delete('fractional_part: \"'\n ) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n integer_part = pynutil.delete('integer_part: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n optional_add_and = pynini.closure(pynutil.insert(pynini.union(\n 'con ', 'y ')), 0, 1)\n graph_integer_masc = integer_part + NEMO_SPACE + maj_masc\n graph_integer_fem = shift_cardinal_gender(integer_part\n ) + NEMO_SPACE + maj_fem\n graph_integer = graph_integer_fem | graph_integer_masc\n graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE +\n pynini.union(optional_add_and + strip_cardinal_apocope(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE +\n pynini.union(optional_add_and + shift_cardinal_gender(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor = (graph_integer_with_minor_fem |\n graph_integer_with_minor_masc)\n graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc\n graph_decimal_fem = decimal.graph_fem\n graph_decimal_fem |= decimal.numbers_only_quantity\n graph_decimal_fem += NEMO_SPACE + maj_fem\n graph_decimal = graph_decimal_fem | graph_decimal_masc\n graph_decimal = pynini.cdrewrite(pynutil.insert(' de'), \n 'quantity: \"' + pynini.closure(NEMO_NOT_QUOTE, 1), '\"', NEMO_SIGMA\n ) @ graph_decimal\n graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc +\n delete_preserve_order)\n graph_minor_fem = shift_cardinal_gender(fractional_part\n ) + NEMO_SPACE + min_fem + delete_preserve_order\n graph_minor = graph_minor_fem | graph_minor_masc\n graph = (graph_integer | graph_integer_with_minor | graph_decimal |\n graph_minor)\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n", "step-2": "<mask token>\n\n\nclass MoneyFst(GraphFst):\n \"\"\"\n Finite state transducer for verbalizing money, e.g.\n money { currency_maj: \"euro\" integer_part: \"un\"} -> \"un euro\"\n money { currency_maj: \"euro\" integer_part: \"un\" fractional_part: \"cero cero un\"} -> \"uno coma cero cero uno euros\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" preserve_order: true} -> \"una libra cuarenta\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" currency_min: \"peniques\" preserve_order: true} -> \"una libra con cuarenta peniques\"\n money { fractional_part: \"un\" currency_min: \"penique\" preserve_order: true} -> \"un penique\"\n\n Args:\n decimal: GraphFst\n deterministic: if True will provide a single transduction option,\n for False multiple transduction are generated (used for audio-based normalization)\n \"\"\"\n\n def __init__(self, decimal: GraphFst, deterministic: bool=True):\n super().__init__(name='money', kind='verbalize', deterministic=\n deterministic)\n maj_singular_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n maj_singular_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n maj_plural_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n maj_plural_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n maj_masc = maj_plural_masc | maj_singular_masc\n maj_fem = maj_plural_fem | maj_singular_fem\n min_singular_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n min_singular_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n min_plural_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n min_plural_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n min_masc = min_plural_masc | min_singular_masc\n min_fem = min_plural_fem | min_singular_fem\n fractional_part = pynutil.delete('fractional_part: \"'\n ) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n integer_part = pynutil.delete('integer_part: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n optional_add_and = pynini.closure(pynutil.insert(pynini.union(\n 'con ', 'y ')), 0, 1)\n graph_integer_masc = integer_part + NEMO_SPACE + maj_masc\n graph_integer_fem = shift_cardinal_gender(integer_part\n ) + NEMO_SPACE + maj_fem\n graph_integer = graph_integer_fem | graph_integer_masc\n graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE +\n pynini.union(optional_add_and + strip_cardinal_apocope(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE +\n pynini.union(optional_add_and + shift_cardinal_gender(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor = (graph_integer_with_minor_fem |\n graph_integer_with_minor_masc)\n graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc\n graph_decimal_fem = decimal.graph_fem\n graph_decimal_fem |= decimal.numbers_only_quantity\n graph_decimal_fem += NEMO_SPACE + maj_fem\n graph_decimal = graph_decimal_fem | graph_decimal_masc\n graph_decimal = pynini.cdrewrite(pynutil.insert(' de'), \n 'quantity: \"' + pynini.closure(NEMO_NOT_QUOTE, 1), '\"', NEMO_SIGMA\n ) @ graph_decimal\n graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc +\n delete_preserve_order)\n graph_minor_fem = shift_cardinal_gender(fractional_part\n ) + NEMO_SPACE + min_fem + delete_preserve_order\n graph_minor = graph_minor_fem | graph_minor_masc\n graph = (graph_integer | graph_integer_with_minor | graph_decimal |\n graph_minor)\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n", "step-3": "<mask token>\ntry:\n import pynini\n from pynini.lib import pynutil\n fem = pynini.string_file(get_abs_path('data/money/currency_plural_fem.tsv')\n )\n masc = pynini.string_file(get_abs_path(\n 'data/money/currency_plural_masc.tsv'))\n fem_singular = pynini.project(fem, 'input')\n masc_singular = pynini.project(masc, 'input')\n fem_plural = pynini.project(fem, 'output')\n masc_plural = pynini.project(masc, 'output')\n PYNINI_AVAILABLE = True\nexcept (ModuleNotFoundError, ImportError):\n fem_plural = None\n masc_plural = None\n fem_singular = None\n masc_singular = None\n PYNINI_AVAILABLE = False\n\n\nclass MoneyFst(GraphFst):\n \"\"\"\n Finite state transducer for verbalizing money, e.g.\n money { currency_maj: \"euro\" integer_part: \"un\"} -> \"un euro\"\n money { currency_maj: \"euro\" integer_part: \"un\" fractional_part: \"cero cero un\"} -> \"uno coma cero cero uno euros\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" preserve_order: true} -> \"una libra cuarenta\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" currency_min: \"peniques\" preserve_order: true} -> \"una libra con cuarenta peniques\"\n money { fractional_part: \"un\" currency_min: \"penique\" preserve_order: true} -> \"un penique\"\n\n Args:\n decimal: GraphFst\n deterministic: if True will provide a single transduction option,\n for False multiple transduction are generated (used for audio-based normalization)\n \"\"\"\n\n def __init__(self, decimal: GraphFst, deterministic: bool=True):\n super().__init__(name='money', kind='verbalize', deterministic=\n deterministic)\n maj_singular_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n maj_singular_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n maj_plural_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n maj_plural_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n maj_masc = maj_plural_masc | maj_singular_masc\n maj_fem = maj_plural_fem | maj_singular_fem\n min_singular_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n min_singular_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n min_plural_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n min_plural_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n min_masc = min_plural_masc | min_singular_masc\n min_fem = min_plural_fem | min_singular_fem\n fractional_part = pynutil.delete('fractional_part: \"'\n ) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n integer_part = pynutil.delete('integer_part: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n optional_add_and = pynini.closure(pynutil.insert(pynini.union(\n 'con ', 'y ')), 0, 1)\n graph_integer_masc = integer_part + NEMO_SPACE + maj_masc\n graph_integer_fem = shift_cardinal_gender(integer_part\n ) + NEMO_SPACE + maj_fem\n graph_integer = graph_integer_fem | graph_integer_masc\n graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE +\n pynini.union(optional_add_and + strip_cardinal_apocope(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE +\n pynini.union(optional_add_and + shift_cardinal_gender(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor = (graph_integer_with_minor_fem |\n graph_integer_with_minor_masc)\n graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc\n graph_decimal_fem = decimal.graph_fem\n graph_decimal_fem |= decimal.numbers_only_quantity\n graph_decimal_fem += NEMO_SPACE + maj_fem\n graph_decimal = graph_decimal_fem | graph_decimal_masc\n graph_decimal = pynini.cdrewrite(pynutil.insert(' de'), \n 'quantity: \"' + pynini.closure(NEMO_NOT_QUOTE, 1), '\"', NEMO_SIGMA\n ) @ graph_decimal\n graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc +\n delete_preserve_order)\n graph_minor_fem = shift_cardinal_gender(fractional_part\n ) + NEMO_SPACE + min_fem + delete_preserve_order\n graph_minor = graph_minor_fem | graph_minor_masc\n graph = (graph_integer | graph_integer_with_minor | graph_decimal |\n graph_minor)\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n", "step-4": "from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, NEMO_SIGMA, NEMO_SPACE, GraphFst, delete_preserve_order\nfrom nemo_text_processing.text_normalization.es.graph_utils import shift_cardinal_gender, strip_cardinal_apocope\nfrom nemo_text_processing.text_normalization.es.utils import get_abs_path\ntry:\n import pynini\n from pynini.lib import pynutil\n fem = pynini.string_file(get_abs_path('data/money/currency_plural_fem.tsv')\n )\n masc = pynini.string_file(get_abs_path(\n 'data/money/currency_plural_masc.tsv'))\n fem_singular = pynini.project(fem, 'input')\n masc_singular = pynini.project(masc, 'input')\n fem_plural = pynini.project(fem, 'output')\n masc_plural = pynini.project(masc, 'output')\n PYNINI_AVAILABLE = True\nexcept (ModuleNotFoundError, ImportError):\n fem_plural = None\n masc_plural = None\n fem_singular = None\n masc_singular = None\n PYNINI_AVAILABLE = False\n\n\nclass MoneyFst(GraphFst):\n \"\"\"\n Finite state transducer for verbalizing money, e.g.\n money { currency_maj: \"euro\" integer_part: \"un\"} -> \"un euro\"\n money { currency_maj: \"euro\" integer_part: \"un\" fractional_part: \"cero cero un\"} -> \"uno coma cero cero uno euros\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" preserve_order: true} -> \"una libra cuarenta\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" currency_min: \"peniques\" preserve_order: true} -> \"una libra con cuarenta peniques\"\n money { fractional_part: \"un\" currency_min: \"penique\" preserve_order: true} -> \"un penique\"\n\n Args:\n decimal: GraphFst\n deterministic: if True will provide a single transduction option,\n for False multiple transduction are generated (used for audio-based normalization)\n \"\"\"\n\n def __init__(self, decimal: GraphFst, deterministic: bool=True):\n super().__init__(name='money', kind='verbalize', deterministic=\n deterministic)\n maj_singular_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n maj_singular_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n maj_plural_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n maj_plural_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n maj_masc = maj_plural_masc | maj_singular_masc\n maj_fem = maj_plural_fem | maj_singular_fem\n min_singular_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n min_singular_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n min_plural_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n min_plural_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n min_masc = min_plural_masc | min_singular_masc\n min_fem = min_plural_fem | min_singular_fem\n fractional_part = pynutil.delete('fractional_part: \"'\n ) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n integer_part = pynutil.delete('integer_part: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n optional_add_and = pynini.closure(pynutil.insert(pynini.union(\n 'con ', 'y ')), 0, 1)\n graph_integer_masc = integer_part + NEMO_SPACE + maj_masc\n graph_integer_fem = shift_cardinal_gender(integer_part\n ) + NEMO_SPACE + maj_fem\n graph_integer = graph_integer_fem | graph_integer_masc\n graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE +\n pynini.union(optional_add_and + strip_cardinal_apocope(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE +\n pynini.union(optional_add_and + shift_cardinal_gender(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor = (graph_integer_with_minor_fem |\n graph_integer_with_minor_masc)\n graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc\n graph_decimal_fem = decimal.graph_fem\n graph_decimal_fem |= decimal.numbers_only_quantity\n graph_decimal_fem += NEMO_SPACE + maj_fem\n graph_decimal = graph_decimal_fem | graph_decimal_masc\n graph_decimal = pynini.cdrewrite(pynutil.insert(' de'), \n 'quantity: \"' + pynini.closure(NEMO_NOT_QUOTE, 1), '\"', NEMO_SIGMA\n ) @ graph_decimal\n graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc +\n delete_preserve_order)\n graph_minor_fem = shift_cardinal_gender(fractional_part\n ) + NEMO_SPACE + min_fem + delete_preserve_order\n graph_minor = graph_minor_fem | graph_minor_masc\n graph = (graph_integer | graph_integer_with_minor | graph_decimal |\n graph_minor)\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n", "step-5": "# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom nemo_text_processing.text_normalization.en.graph_utils import (\n NEMO_NOT_QUOTE,\n NEMO_SIGMA,\n NEMO_SPACE,\n GraphFst,\n delete_preserve_order,\n)\nfrom nemo_text_processing.text_normalization.es.graph_utils import shift_cardinal_gender, strip_cardinal_apocope\nfrom nemo_text_processing.text_normalization.es.utils import get_abs_path\n\ntry:\n import pynini\n from pynini.lib import pynutil\n\n fem = pynini.string_file((get_abs_path(\"data/money/currency_plural_fem.tsv\")))\n masc = pynini.string_file((get_abs_path(\"data/money/currency_plural_masc.tsv\")))\n\n fem_singular = pynini.project(fem, \"input\")\n masc_singular = pynini.project(masc, \"input\")\n\n fem_plural = pynini.project(fem, \"output\")\n masc_plural = pynini.project(masc, \"output\")\n\n PYNINI_AVAILABLE = True\n\nexcept (ModuleNotFoundError, ImportError):\n fem_plural = None\n masc_plural = None\n\n fem_singular = None\n masc_singular = None\n\n PYNINI_AVAILABLE = False\n\n\nclass MoneyFst(GraphFst):\n \"\"\"\n Finite state transducer for verbalizing money, e.g.\n money { currency_maj: \"euro\" integer_part: \"un\"} -> \"un euro\"\n money { currency_maj: \"euro\" integer_part: \"un\" fractional_part: \"cero cero un\"} -> \"uno coma cero cero uno euros\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" preserve_order: true} -> \"una libra cuarenta\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" currency_min: \"peniques\" preserve_order: true} -> \"una libra con cuarenta peniques\"\n money { fractional_part: \"un\" currency_min: \"penique\" preserve_order: true} -> \"un penique\"\n\n Args:\n decimal: GraphFst\n deterministic: if True will provide a single transduction option,\n for False multiple transduction are generated (used for audio-based normalization)\n \"\"\"\n\n def __init__(self, decimal: GraphFst, deterministic: bool = True):\n super().__init__(name=\"money\", kind=\"verbalize\", deterministic=deterministic)\n\n maj_singular_masc = (\n pynutil.delete(\"currency_maj: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_singular)\n + pynutil.delete(\"\\\"\")\n )\n maj_singular_fem = (\n pynutil.delete(\"currency_maj: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_singular)\n + pynutil.delete(\"\\\"\")\n )\n\n maj_plural_masc = (\n pynutil.delete(\"currency_maj: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_plural)\n + pynutil.delete(\"\\\"\")\n )\n maj_plural_fem = (\n pynutil.delete(\"currency_maj: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_plural)\n + pynutil.delete(\"\\\"\")\n )\n\n maj_masc = maj_plural_masc | maj_singular_masc # Tagger kept quantity resolution stable\n maj_fem = maj_plural_fem | maj_singular_fem\n\n min_singular_masc = (\n pynutil.delete(\"currency_min: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_singular)\n + pynutil.delete(\"\\\"\")\n )\n min_singular_fem = (\n pynutil.delete(\"currency_min: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_singular)\n + pynutil.delete(\"\\\"\")\n )\n\n min_plural_masc = (\n pynutil.delete(\"currency_min: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_plural)\n + pynutil.delete(\"\\\"\")\n )\n min_plural_fem = (\n pynutil.delete(\"currency_min: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_plural)\n + pynutil.delete(\"\\\"\")\n )\n\n min_masc = min_plural_masc | min_singular_masc\n min_fem = min_plural_fem | min_singular_fem\n\n fractional_part = (\n pynutil.delete(\"fractional_part: \\\"\") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete(\"\\\"\")\n )\n\n integer_part = pynutil.delete(\"integer_part: \\\"\") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete(\"\\\"\")\n optional_add_and = pynini.closure(pynutil.insert(pynini.union(\"con \", \"y \")), 0, 1)\n\n # *** currency_maj\n graph_integer_masc = integer_part + NEMO_SPACE + maj_masc\n graph_integer_fem = shift_cardinal_gender(integer_part) + NEMO_SPACE + maj_fem\n\n graph_integer = graph_integer_fem | graph_integer_masc\n\n # *** currency_maj + (***) | ((con) *** current_min)\n graph_integer_with_minor_masc = (\n graph_integer_masc\n + NEMO_SPACE\n + pynini.union(\n optional_add_and + strip_cardinal_apocope(fractional_part),\n (optional_add_and + fractional_part + NEMO_SPACE + min_masc),\n (optional_add_and + shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem),\n ) # Could be minor currency that is different gender\n + delete_preserve_order\n )\n\n graph_integer_with_minor_fem = (\n graph_integer_fem\n + NEMO_SPACE\n + pynini.union(\n optional_add_and + shift_cardinal_gender(fractional_part),\n (optional_add_and + fractional_part + NEMO_SPACE + min_masc),\n (optional_add_and + shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem),\n ) # Could be minor currency that is different gender\n + delete_preserve_order\n )\n\n graph_integer_with_minor = graph_integer_with_minor_fem | graph_integer_with_minor_masc\n\n ## *** coma *** currency_maj\n graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc\n\n graph_decimal_fem = decimal.graph_fem\n graph_decimal_fem |= decimal.numbers_only_quantity # can still have \"x billions\" with fem currency\n graph_decimal_fem += NEMO_SPACE + maj_fem\n\n graph_decimal = graph_decimal_fem | graph_decimal_masc\n graph_decimal = (\n pynini.cdrewrite(\n pynutil.insert(\" de\"), \"quantity: \\\"\" + pynini.closure(NEMO_NOT_QUOTE, 1), \"\\\"\", NEMO_SIGMA\n )\n @ graph_decimal\n ) # formally it's millones/billones de ***\n\n # *** current_min\n graph_minor_masc = fractional_part + NEMO_SPACE + min_masc + delete_preserve_order\n graph_minor_fem = shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem + delete_preserve_order\n\n graph_minor = graph_minor_fem | graph_minor_masc\n\n graph = graph_integer | graph_integer_with_minor | graph_decimal | graph_minor\n\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def n_gram_hash(hash_bits=16, ngram_length=1, skip_length=0, all_lengths= True, seed=314489979, ordered=True, invert_hash=0, **params): """ **Description** Extracts NGrams from text and convert them to vector using hashing trick. :param hash_bits: Number of bits to hash into. Must be between 1 and 30, inclusive. (settings). :param ngram_length: Ngram length (settings). :param skip_length: Maximum number of tokens to skip when constructing an ngram (settings). :param all_lengths: Whether to include all ngram lengths up to ngramLength or only ngramLength (settings). :param seed: Hashing seed (settings). :param ordered: Whether the position of each source column should be included in the hash (when there are multiple source columns). (settings). :param invert_hash: Limit the number of keys used to generate the slot name to this many. 0 means no invert hashing, -1 means no limit. (settings). """ entrypoint_name = 'NGramHash' settings = {} if hash_bits is not None: settings['HashBits'] = try_set(obj=hash_bits, none_acceptable=True, is_of_type=numbers.Real) if ngram_length is not None: settings['NgramLength'] = try_set(obj=ngram_length, none_acceptable =True, is_of_type=numbers.Real) if skip_length is not None: settings['SkipLength'] = try_set(obj=skip_length, none_acceptable= True, is_of_type=numbers.Real) if all_lengths is not None: settings['AllLengths'] = try_set(obj=all_lengths, none_acceptable= True, is_of_type=bool) if seed is not None: settings['Seed'] = try_set(obj=seed, none_acceptable=True, is_of_type=numbers.Real) if ordered is not None: settings['Ordered'] = try_set(obj=ordered, none_acceptable=True, is_of_type=bool) if invert_hash is not None: settings['InvertHash'] = try_set(obj=invert_hash, none_acceptable= True, is_of_type=numbers.Real) component = Component(name=entrypoint_name, settings=settings, kind= 'NgramExtractor') return component <|reserved_special_token_1|> <|reserved_special_token_0|> import numbers from ..utils.entrypoints import Component from ..utils.utils import try_set def n_gram_hash(hash_bits=16, ngram_length=1, skip_length=0, all_lengths= True, seed=314489979, ordered=True, invert_hash=0, **params): """ **Description** Extracts NGrams from text and convert them to vector using hashing trick. :param hash_bits: Number of bits to hash into. Must be between 1 and 30, inclusive. (settings). :param ngram_length: Ngram length (settings). :param skip_length: Maximum number of tokens to skip when constructing an ngram (settings). :param all_lengths: Whether to include all ngram lengths up to ngramLength or only ngramLength (settings). :param seed: Hashing seed (settings). :param ordered: Whether the position of each source column should be included in the hash (when there are multiple source columns). (settings). :param invert_hash: Limit the number of keys used to generate the slot name to this many. 0 means no invert hashing, -1 means no limit. (settings). """ entrypoint_name = 'NGramHash' settings = {} if hash_bits is not None: settings['HashBits'] = try_set(obj=hash_bits, none_acceptable=True, is_of_type=numbers.Real) if ngram_length is not None: settings['NgramLength'] = try_set(obj=ngram_length, none_acceptable =True, is_of_type=numbers.Real) if skip_length is not None: settings['SkipLength'] = try_set(obj=skip_length, none_acceptable= True, is_of_type=numbers.Real) if all_lengths is not None: settings['AllLengths'] = try_set(obj=all_lengths, none_acceptable= True, is_of_type=bool) if seed is not None: settings['Seed'] = try_set(obj=seed, none_acceptable=True, is_of_type=numbers.Real) if ordered is not None: settings['Ordered'] = try_set(obj=ordered, none_acceptable=True, is_of_type=bool) if invert_hash is not None: settings['InvertHash'] = try_set(obj=invert_hash, none_acceptable= True, is_of_type=numbers.Real) component = Component(name=entrypoint_name, settings=settings, kind= 'NgramExtractor') return component <|reserved_special_token_1|> # - Generated by tools/entrypoint_compiler.py: do not edit by hand """ NGramHash """ import numbers from ..utils.entrypoints import Component from ..utils.utils import try_set def n_gram_hash( hash_bits=16, ngram_length=1, skip_length=0, all_lengths=True, seed=314489979, ordered=True, invert_hash=0, **params): """ **Description** Extracts NGrams from text and convert them to vector using hashing trick. :param hash_bits: Number of bits to hash into. Must be between 1 and 30, inclusive. (settings). :param ngram_length: Ngram length (settings). :param skip_length: Maximum number of tokens to skip when constructing an ngram (settings). :param all_lengths: Whether to include all ngram lengths up to ngramLength or only ngramLength (settings). :param seed: Hashing seed (settings). :param ordered: Whether the position of each source column should be included in the hash (when there are multiple source columns). (settings). :param invert_hash: Limit the number of keys used to generate the slot name to this many. 0 means no invert hashing, -1 means no limit. (settings). """ entrypoint_name = 'NGramHash' settings = {} if hash_bits is not None: settings['HashBits'] = try_set( obj=hash_bits, none_acceptable=True, is_of_type=numbers.Real) if ngram_length is not None: settings['NgramLength'] = try_set( obj=ngram_length, none_acceptable=True, is_of_type=numbers.Real) if skip_length is not None: settings['SkipLength'] = try_set( obj=skip_length, none_acceptable=True, is_of_type=numbers.Real) if all_lengths is not None: settings['AllLengths'] = try_set( obj=all_lengths, none_acceptable=True, is_of_type=bool) if seed is not None: settings['Seed'] = try_set( obj=seed, none_acceptable=True, is_of_type=numbers.Real) if ordered is not None: settings['Ordered'] = try_set( obj=ordered, none_acceptable=True, is_of_type=bool) if invert_hash is not None: settings['InvertHash'] = try_set( obj=invert_hash, none_acceptable=True, is_of_type=numbers.Real) component = Component( name=entrypoint_name, settings=settings, kind='NgramExtractor') return component
flexible
{ "blob_id": "fb1974ad7ac9ae54344812814cb95a7fccfefc66", "index": 5880, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef n_gram_hash(hash_bits=16, ngram_length=1, skip_length=0, all_lengths=\n True, seed=314489979, ordered=True, invert_hash=0, **params):\n \"\"\"\n **Description**\n Extracts NGrams from text and convert them to vector using hashing\n trick.\n\n :param hash_bits: Number of bits to hash into. Must be between 1\n and 30, inclusive. (settings).\n :param ngram_length: Ngram length (settings).\n :param skip_length: Maximum number of tokens to skip when\n constructing an ngram (settings).\n :param all_lengths: Whether to include all ngram lengths up to\n ngramLength or only ngramLength (settings).\n :param seed: Hashing seed (settings).\n :param ordered: Whether the position of each source column should\n be included in the hash (when there are multiple source\n columns). (settings).\n :param invert_hash: Limit the number of keys used to generate the\n slot name to this many. 0 means no invert hashing, -1 means\n no limit. (settings).\n \"\"\"\n entrypoint_name = 'NGramHash'\n settings = {}\n if hash_bits is not None:\n settings['HashBits'] = try_set(obj=hash_bits, none_acceptable=True,\n is_of_type=numbers.Real)\n if ngram_length is not None:\n settings['NgramLength'] = try_set(obj=ngram_length, none_acceptable\n =True, is_of_type=numbers.Real)\n if skip_length is not None:\n settings['SkipLength'] = try_set(obj=skip_length, none_acceptable=\n True, is_of_type=numbers.Real)\n if all_lengths is not None:\n settings['AllLengths'] = try_set(obj=all_lengths, none_acceptable=\n True, is_of_type=bool)\n if seed is not None:\n settings['Seed'] = try_set(obj=seed, none_acceptable=True,\n is_of_type=numbers.Real)\n if ordered is not None:\n settings['Ordered'] = try_set(obj=ordered, none_acceptable=True,\n is_of_type=bool)\n if invert_hash is not None:\n settings['InvertHash'] = try_set(obj=invert_hash, none_acceptable=\n True, is_of_type=numbers.Real)\n component = Component(name=entrypoint_name, settings=settings, kind=\n 'NgramExtractor')\n return component\n", "step-3": "<mask token>\nimport numbers\nfrom ..utils.entrypoints import Component\nfrom ..utils.utils import try_set\n\n\ndef n_gram_hash(hash_bits=16, ngram_length=1, skip_length=0, all_lengths=\n True, seed=314489979, ordered=True, invert_hash=0, **params):\n \"\"\"\n **Description**\n Extracts NGrams from text and convert them to vector using hashing\n trick.\n\n :param hash_bits: Number of bits to hash into. Must be between 1\n and 30, inclusive. (settings).\n :param ngram_length: Ngram length (settings).\n :param skip_length: Maximum number of tokens to skip when\n constructing an ngram (settings).\n :param all_lengths: Whether to include all ngram lengths up to\n ngramLength or only ngramLength (settings).\n :param seed: Hashing seed (settings).\n :param ordered: Whether the position of each source column should\n be included in the hash (when there are multiple source\n columns). (settings).\n :param invert_hash: Limit the number of keys used to generate the\n slot name to this many. 0 means no invert hashing, -1 means\n no limit. (settings).\n \"\"\"\n entrypoint_name = 'NGramHash'\n settings = {}\n if hash_bits is not None:\n settings['HashBits'] = try_set(obj=hash_bits, none_acceptable=True,\n is_of_type=numbers.Real)\n if ngram_length is not None:\n settings['NgramLength'] = try_set(obj=ngram_length, none_acceptable\n =True, is_of_type=numbers.Real)\n if skip_length is not None:\n settings['SkipLength'] = try_set(obj=skip_length, none_acceptable=\n True, is_of_type=numbers.Real)\n if all_lengths is not None:\n settings['AllLengths'] = try_set(obj=all_lengths, none_acceptable=\n True, is_of_type=bool)\n if seed is not None:\n settings['Seed'] = try_set(obj=seed, none_acceptable=True,\n is_of_type=numbers.Real)\n if ordered is not None:\n settings['Ordered'] = try_set(obj=ordered, none_acceptable=True,\n is_of_type=bool)\n if invert_hash is not None:\n settings['InvertHash'] = try_set(obj=invert_hash, none_acceptable=\n True, is_of_type=numbers.Real)\n component = Component(name=entrypoint_name, settings=settings, kind=\n 'NgramExtractor')\n return component\n", "step-4": "# - Generated by tools/entrypoint_compiler.py: do not edit by hand\n\"\"\"\nNGramHash\n\"\"\"\n\nimport numbers\n\nfrom ..utils.entrypoints import Component\nfrom ..utils.utils import try_set\n\n\ndef n_gram_hash(\n hash_bits=16,\n ngram_length=1,\n skip_length=0,\n all_lengths=True,\n seed=314489979,\n ordered=True,\n invert_hash=0,\n **params):\n \"\"\"\n **Description**\n Extracts NGrams from text and convert them to vector using hashing\n trick.\n\n :param hash_bits: Number of bits to hash into. Must be between 1\n and 30, inclusive. (settings).\n :param ngram_length: Ngram length (settings).\n :param skip_length: Maximum number of tokens to skip when\n constructing an ngram (settings).\n :param all_lengths: Whether to include all ngram lengths up to\n ngramLength or only ngramLength (settings).\n :param seed: Hashing seed (settings).\n :param ordered: Whether the position of each source column should\n be included in the hash (when there are multiple source\n columns). (settings).\n :param invert_hash: Limit the number of keys used to generate the\n slot name to this many. 0 means no invert hashing, -1 means\n no limit. (settings).\n \"\"\"\n\n entrypoint_name = 'NGramHash'\n settings = {}\n\n if hash_bits is not None:\n settings['HashBits'] = try_set(\n obj=hash_bits,\n none_acceptable=True,\n is_of_type=numbers.Real)\n if ngram_length is not None:\n settings['NgramLength'] = try_set(\n obj=ngram_length,\n none_acceptable=True,\n is_of_type=numbers.Real)\n if skip_length is not None:\n settings['SkipLength'] = try_set(\n obj=skip_length,\n none_acceptable=True,\n is_of_type=numbers.Real)\n if all_lengths is not None:\n settings['AllLengths'] = try_set(\n obj=all_lengths,\n none_acceptable=True,\n is_of_type=bool)\n if seed is not None:\n settings['Seed'] = try_set(\n obj=seed,\n none_acceptable=True,\n is_of_type=numbers.Real)\n if ordered is not None:\n settings['Ordered'] = try_set(\n obj=ordered, none_acceptable=True, is_of_type=bool)\n if invert_hash is not None:\n settings['InvertHash'] = try_set(\n obj=invert_hash,\n none_acceptable=True,\n is_of_type=numbers.Real)\n\n component = Component(\n name=entrypoint_name,\n settings=settings,\n kind='NgramExtractor')\n return component\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
''' Given a binary tree, find its maximum depth. The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node. Note: A leaf is a node with no children. ''' # Definition for a binary tree node. class TreeNode(object): def __init__(self, x): self.val = x self.left = None self.right = None class Solution(object): depth = [] def maxDepth_helper(self, root, cur_d): if root.left is None and root.right is None: self.depth.append(cur_d) return elif root.left is not None and root.right is None: self.maxDepth_helper(root.left, cur_d += 1) elif root.right is not None and root.left is None: self.maxDepth_helper(root.right, cur_d += 1) else: self.maxDepth_helper(root.left, cur_d += 1) self.maxDepth_helper(root.right, cur_d += 1) def maxDepth(self, root): """ :type root: TreeNode :rtype: int """ self.maxDepth_helper(root, 0) return max(depth)
normal
{ "blob_id": "fa081ccd8081f5c3319f482b7d8abd7415d8e757", "index": 1273, "step-1": "'''\nGiven a binary tree, find its maximum depth.\n\nThe maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.\n\nNote: A leaf is a node with no children.\n\n'''\n\n\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n\n depth = []\n\n def maxDepth_helper(self, root, cur_d):\n\n if root.left is None and root.right is None:\n self.depth.append(cur_d)\n return\n\n elif root.left is not None and root.right is None:\n self.maxDepth_helper(root.left, cur_d += 1)\n\n elif root.right is not None and root.left is None:\n self.maxDepth_helper(root.right, cur_d += 1)\n\n else:\n self.maxDepth_helper(root.left, cur_d += 1)\n self.maxDepth_helper(root.right, cur_d += 1)\n\n\n def maxDepth(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n self.maxDepth_helper(root, 0)\n\n return max(depth)\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
# Generated by Django 3.0.4 on 2020-03-29 19:51 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('index', '0003_auto_20200330_0444'), ] operations = [ migrations.AlterField( model_name='information', name='comment', field=models.CharField(blank=True, max_length=200, null=True), ), migrations.AlterField( model_name='information', name='picture', field=models.ImageField(blank=True, null=True, upload_to='images/'), ), migrations.AlterField( model_name='myclass', name='day', field=models.CharField(blank=True, max_length=1, null=True), ), migrations.AlterField( model_name='myclass', name='period', field=models.CharField(blank=True, max_length=10, null=True), ), migrations.AlterField( model_name='myclass', name='place', field=models.CharField(blank=True, max_length=50, null=True), ), ]
normal
{ "blob_id": "72c1226d40b3cdce29ef28493344c3cf68892149", "index": 6001, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('index', '0003_auto_20200330_0444')]\n operations = [migrations.AlterField(model_name='information', name=\n 'comment', field=models.CharField(blank=True, max_length=200, null=\n True)), migrations.AlterField(model_name='information', name=\n 'picture', field=models.ImageField(blank=True, null=True, upload_to\n ='images/')), migrations.AlterField(model_name='myclass', name=\n 'day', field=models.CharField(blank=True, max_length=1, null=True)),\n migrations.AlterField(model_name='myclass', name='period', field=\n models.CharField(blank=True, max_length=10, null=True)), migrations\n .AlterField(model_name='myclass', name='place', field=models.\n CharField(blank=True, max_length=50, null=True))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('index', '0003_auto_20200330_0444')]\n operations = [migrations.AlterField(model_name='information', name=\n 'comment', field=models.CharField(blank=True, max_length=200, null=\n True)), migrations.AlterField(model_name='information', name=\n 'picture', field=models.ImageField(blank=True, null=True, upload_to\n ='images/')), migrations.AlterField(model_name='myclass', name=\n 'day', field=models.CharField(blank=True, max_length=1, null=True)),\n migrations.AlterField(model_name='myclass', name='period', field=\n models.CharField(blank=True, max_length=10, null=True)), migrations\n .AlterField(model_name='myclass', name='place', field=models.\n CharField(blank=True, max_length=50, null=True))]\n", "step-5": "# Generated by Django 3.0.4 on 2020-03-29 19:51\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('index', '0003_auto_20200330_0444'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='information',\n name='comment',\n field=models.CharField(blank=True, max_length=200, null=True),\n ),\n migrations.AlterField(\n model_name='information',\n name='picture',\n field=models.ImageField(blank=True, null=True, upload_to='images/'),\n ),\n migrations.AlterField(\n model_name='myclass',\n name='day',\n field=models.CharField(blank=True, max_length=1, null=True),\n ),\n migrations.AlterField(\n model_name='myclass',\n name='period',\n field=models.CharField(blank=True, max_length=10, null=True),\n ),\n migrations.AlterField(\n model_name='myclass',\n name='place',\n field=models.CharField(blank=True, max_length=50, null=True),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class tgApp(object): def __init__(self): builder = gtk.Builder() builder.add_from_file('../tg.glade') self.window = builder.get_object('window1') self.text_area = builder.get_object('text_entry') self.window.show() self.opcao = '' builder.connect_signals({'gtk_main_quit': gtk.main_quit, 'on_button_analisar_clicked': self.analisar_frase, 'on_button_clear_clicked': self.clear_text, 'on_button_dilma_clicked': self.opcao_dilma, 'on_button_copa_clicked': self.opcao_copa, 'on_button_palmeiras_clicked': self.opcao_palmeiras, 'on_button_fatec_clicked': self.opcao_fatec, 'on_sad_show': self.sad_show}) <|reserved_special_token_0|> def clear_text(self, widget): """Função: para apagar o texto na área de texto""" self.text_area.set_text('') def opcao_dilma(self, widget): """Função: para definir a opcao Dilma""" self.opcao = 'dilma' def opcao_copa(self, widget): """Função: para definir a opcao Copa""" self.opcao = 'copa' <|reserved_special_token_0|> def opcao_fatec(self, widget): """Função: para definir a opcao Fatec""" self.opcao = 'fatec' <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class tgApp(object): def __init__(self): builder = gtk.Builder() builder.add_from_file('../tg.glade') self.window = builder.get_object('window1') self.text_area = builder.get_object('text_entry') self.window.show() self.opcao = '' builder.connect_signals({'gtk_main_quit': gtk.main_quit, 'on_button_analisar_clicked': self.analisar_frase, 'on_button_clear_clicked': self.clear_text, 'on_button_dilma_clicked': self.opcao_dilma, 'on_button_copa_clicked': self.opcao_copa, 'on_button_palmeiras_clicked': self.opcao_palmeiras, 'on_button_fatec_clicked': self.opcao_fatec, 'on_sad_show': self.sad_show}) <|reserved_special_token_0|> def clear_text(self, widget): """Função: para apagar o texto na área de texto""" self.text_area.set_text('') def opcao_dilma(self, widget): """Função: para definir a opcao Dilma""" self.opcao = 'dilma' def opcao_copa(self, widget): """Função: para definir a opcao Copa""" self.opcao = 'copa' def opcao_palmeiras(self, widget): """Função: para definir a opcao Palmeiras""" self.opcao = 'palmeiras' def opcao_fatec(self, widget): """Função: para definir a opcao Fatec""" self.opcao = 'fatec' def sad_show(self, widget): """Função: para definir se imagem Sad ira aparecer""" self.visible = True <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class tgApp(object): def __init__(self): builder = gtk.Builder() builder.add_from_file('../tg.glade') self.window = builder.get_object('window1') self.text_area = builder.get_object('text_entry') self.window.show() self.opcao = '' builder.connect_signals({'gtk_main_quit': gtk.main_quit, 'on_button_analisar_clicked': self.analisar_frase, 'on_button_clear_clicked': self.clear_text, 'on_button_dilma_clicked': self.opcao_dilma, 'on_button_copa_clicked': self.opcao_copa, 'on_button_palmeiras_clicked': self.opcao_palmeiras, 'on_button_fatec_clicked': self.opcao_fatec, 'on_sad_show': self.sad_show}) def analisar_frase(self, widget): """Função: analisar a frase que o usuário""" frase = self.text_area.get_text() if frase != '': frase_proc = normalizar(frase) self.text_area.set_text(frase) if (self.opcao == 'dilma' or self.opcao == 'copa' or self.opcao == 'palmeiras' or self.opcao == 'fatec'): print('Opcao: %s ' % self.opcao) featureList = gera_lista_features(self.opcao) lista_feature_fell = get_lista_feature_fell() features_msg = getFeatureVector(frase_proc) training_set = apply_features(extract_features, lista_feature_fell) fell = avaliar_Sentimento(features_msg, training_set) print('Sentimento: %s ' % fell) def clear_text(self, widget): """Função: para apagar o texto na área de texto""" self.text_area.set_text('') def opcao_dilma(self, widget): """Função: para definir a opcao Dilma""" self.opcao = 'dilma' def opcao_copa(self, widget): """Função: para definir a opcao Copa""" self.opcao = 'copa' def opcao_palmeiras(self, widget): """Função: para definir a opcao Palmeiras""" self.opcao = 'palmeiras' def opcao_fatec(self, widget): """Função: para definir a opcao Fatec""" self.opcao = 'fatec' def sad_show(self, widget): """Função: para definir se imagem Sad ira aparecer""" self.visible = True <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> pygtk.require('2.0') <|reserved_special_token_0|> class tgApp(object): def __init__(self): builder = gtk.Builder() builder.add_from_file('../tg.glade') self.window = builder.get_object('window1') self.text_area = builder.get_object('text_entry') self.window.show() self.opcao = '' builder.connect_signals({'gtk_main_quit': gtk.main_quit, 'on_button_analisar_clicked': self.analisar_frase, 'on_button_clear_clicked': self.clear_text, 'on_button_dilma_clicked': self.opcao_dilma, 'on_button_copa_clicked': self.opcao_copa, 'on_button_palmeiras_clicked': self.opcao_palmeiras, 'on_button_fatec_clicked': self.opcao_fatec, 'on_sad_show': self.sad_show}) def analisar_frase(self, widget): """Função: analisar a frase que o usuário""" frase = self.text_area.get_text() if frase != '': frase_proc = normalizar(frase) self.text_area.set_text(frase) if (self.opcao == 'dilma' or self.opcao == 'copa' or self.opcao == 'palmeiras' or self.opcao == 'fatec'): print('Opcao: %s ' % self.opcao) featureList = gera_lista_features(self.opcao) lista_feature_fell = get_lista_feature_fell() features_msg = getFeatureVector(frase_proc) training_set = apply_features(extract_features, lista_feature_fell) fell = avaliar_Sentimento(features_msg, training_set) print('Sentimento: %s ' % fell) def clear_text(self, widget): """Função: para apagar o texto na área de texto""" self.text_area.set_text('') def opcao_dilma(self, widget): """Função: para definir a opcao Dilma""" self.opcao = 'dilma' def opcao_copa(self, widget): """Função: para definir a opcao Copa""" self.opcao = 'copa' def opcao_palmeiras(self, widget): """Função: para definir a opcao Palmeiras""" self.opcao = 'palmeiras' def opcao_fatec(self, widget): """Função: para definir a opcao Fatec""" self.opcao = 'fatec' def sad_show(self, widget): """Função: para definir se imagem Sad ira aparecer""" self.visible = True if __name__ == '__main__': app = tgApp() gtk.main() <|reserved_special_token_1|> #!/usr/bin/env python #-*- coding: utf-8 -*- import pygtk pygtk.require("2.0") import gtk from testarMsg import * class tgApp(object): def __init__(self): builder = gtk.Builder() builder.add_from_file("../tg.glade") self.window = builder.get_object("window1") self.text_area = builder.get_object("text_entry") self.window.show() self.opcao = "" builder.connect_signals({"gtk_main_quit": gtk.main_quit, "on_button_analisar_clicked": self.analisar_frase, "on_button_clear_clicked": self.clear_text, "on_button_dilma_clicked": self.opcao_dilma, "on_button_copa_clicked": self.opcao_copa, "on_button_palmeiras_clicked": self.opcao_palmeiras, "on_button_fatec_clicked": self.opcao_fatec, "on_sad_show": self.sad_show, }) def analisar_frase(self, widget): """Função: analisar a frase que o usuário""" frase = self.text_area.get_text() if ( frase != ""): frase_proc= normalizar(frase) self.text_area.set_text(frase) if (self.opcao == 'dilma' or self.opcao == 'copa' or self.opcao == 'palmeiras' or self.opcao == 'fatec'): print("Opcao: %s "%self.opcao) featureList = gera_lista_features(self.opcao) lista_feature_fell = get_lista_feature_fell() features_msg = getFeatureVector(frase_proc) training_set = apply_features(extract_features,lista_feature_fell) fell = avaliar_Sentimento(features_msg,training_set) print ("Sentimento: %s "%fell) def clear_text(self, widget): """Função: para apagar o texto na área de texto""" self.text_area.set_text("") def opcao_dilma(self, widget): """Função: para definir a opcao Dilma""" self.opcao="dilma" def opcao_copa(self, widget): """Função: para definir a opcao Copa""" self.opcao="copa" def opcao_palmeiras(self, widget): """Função: para definir a opcao Palmeiras""" self.opcao="palmeiras" def opcao_fatec(self, widget): """Função: para definir a opcao Fatec""" self.opcao="fatec" def sad_show(self,widget): """Função: para definir se imagem Sad ira aparecer""" self.visible=True if __name__ == "__main__": app = tgApp() gtk.main()
flexible
{ "blob_id": "6b6fac3bfb1b1478dd491fc4dd9c45a19aeb7bd8", "index": 6102, "step-1": "<mask token>\n\n\nclass tgApp(object):\n\n def __init__(self):\n builder = gtk.Builder()\n builder.add_from_file('../tg.glade')\n self.window = builder.get_object('window1')\n self.text_area = builder.get_object('text_entry')\n self.window.show()\n self.opcao = ''\n builder.connect_signals({'gtk_main_quit': gtk.main_quit,\n 'on_button_analisar_clicked': self.analisar_frase,\n 'on_button_clear_clicked': self.clear_text,\n 'on_button_dilma_clicked': self.opcao_dilma,\n 'on_button_copa_clicked': self.opcao_copa,\n 'on_button_palmeiras_clicked': self.opcao_palmeiras,\n 'on_button_fatec_clicked': self.opcao_fatec, 'on_sad_show':\n self.sad_show})\n <mask token>\n\n def clear_text(self, widget):\n \"\"\"Função: para apagar o texto na área de texto\"\"\"\n self.text_area.set_text('')\n\n def opcao_dilma(self, widget):\n \"\"\"Função: para definir a opcao Dilma\"\"\"\n self.opcao = 'dilma'\n\n def opcao_copa(self, widget):\n \"\"\"Função: para definir a opcao Copa\"\"\"\n self.opcao = 'copa'\n <mask token>\n\n def opcao_fatec(self, widget):\n \"\"\"Função: para definir a opcao Fatec\"\"\"\n self.opcao = 'fatec'\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass tgApp(object):\n\n def __init__(self):\n builder = gtk.Builder()\n builder.add_from_file('../tg.glade')\n self.window = builder.get_object('window1')\n self.text_area = builder.get_object('text_entry')\n self.window.show()\n self.opcao = ''\n builder.connect_signals({'gtk_main_quit': gtk.main_quit,\n 'on_button_analisar_clicked': self.analisar_frase,\n 'on_button_clear_clicked': self.clear_text,\n 'on_button_dilma_clicked': self.opcao_dilma,\n 'on_button_copa_clicked': self.opcao_copa,\n 'on_button_palmeiras_clicked': self.opcao_palmeiras,\n 'on_button_fatec_clicked': self.opcao_fatec, 'on_sad_show':\n self.sad_show})\n <mask token>\n\n def clear_text(self, widget):\n \"\"\"Função: para apagar o texto na área de texto\"\"\"\n self.text_area.set_text('')\n\n def opcao_dilma(self, widget):\n \"\"\"Função: para definir a opcao Dilma\"\"\"\n self.opcao = 'dilma'\n\n def opcao_copa(self, widget):\n \"\"\"Função: para definir a opcao Copa\"\"\"\n self.opcao = 'copa'\n\n def opcao_palmeiras(self, widget):\n \"\"\"Função: para definir a opcao Palmeiras\"\"\"\n self.opcao = 'palmeiras'\n\n def opcao_fatec(self, widget):\n \"\"\"Função: para definir a opcao Fatec\"\"\"\n self.opcao = 'fatec'\n\n def sad_show(self, widget):\n \"\"\"Função: para definir se imagem Sad ira aparecer\"\"\"\n self.visible = True\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass tgApp(object):\n\n def __init__(self):\n builder = gtk.Builder()\n builder.add_from_file('../tg.glade')\n self.window = builder.get_object('window1')\n self.text_area = builder.get_object('text_entry')\n self.window.show()\n self.opcao = ''\n builder.connect_signals({'gtk_main_quit': gtk.main_quit,\n 'on_button_analisar_clicked': self.analisar_frase,\n 'on_button_clear_clicked': self.clear_text,\n 'on_button_dilma_clicked': self.opcao_dilma,\n 'on_button_copa_clicked': self.opcao_copa,\n 'on_button_palmeiras_clicked': self.opcao_palmeiras,\n 'on_button_fatec_clicked': self.opcao_fatec, 'on_sad_show':\n self.sad_show})\n\n def analisar_frase(self, widget):\n \"\"\"Função: analisar a frase que o usuário\"\"\"\n frase = self.text_area.get_text()\n if frase != '':\n frase_proc = normalizar(frase)\n self.text_area.set_text(frase)\n if (self.opcao == 'dilma' or self.opcao == 'copa' or self.opcao ==\n 'palmeiras' or self.opcao == 'fatec'):\n print('Opcao: %s ' % self.opcao)\n featureList = gera_lista_features(self.opcao)\n lista_feature_fell = get_lista_feature_fell()\n features_msg = getFeatureVector(frase_proc)\n training_set = apply_features(extract_features,\n lista_feature_fell)\n fell = avaliar_Sentimento(features_msg, training_set)\n print('Sentimento: %s ' % fell)\n\n def clear_text(self, widget):\n \"\"\"Função: para apagar o texto na área de texto\"\"\"\n self.text_area.set_text('')\n\n def opcao_dilma(self, widget):\n \"\"\"Função: para definir a opcao Dilma\"\"\"\n self.opcao = 'dilma'\n\n def opcao_copa(self, widget):\n \"\"\"Função: para definir a opcao Copa\"\"\"\n self.opcao = 'copa'\n\n def opcao_palmeiras(self, widget):\n \"\"\"Função: para definir a opcao Palmeiras\"\"\"\n self.opcao = 'palmeiras'\n\n def opcao_fatec(self, widget):\n \"\"\"Função: para definir a opcao Fatec\"\"\"\n self.opcao = 'fatec'\n\n def sad_show(self, widget):\n \"\"\"Função: para definir se imagem Sad ira aparecer\"\"\"\n self.visible = True\n\n\n<mask token>\n", "step-4": "<mask token>\npygtk.require('2.0')\n<mask token>\n\n\nclass tgApp(object):\n\n def __init__(self):\n builder = gtk.Builder()\n builder.add_from_file('../tg.glade')\n self.window = builder.get_object('window1')\n self.text_area = builder.get_object('text_entry')\n self.window.show()\n self.opcao = ''\n builder.connect_signals({'gtk_main_quit': gtk.main_quit,\n 'on_button_analisar_clicked': self.analisar_frase,\n 'on_button_clear_clicked': self.clear_text,\n 'on_button_dilma_clicked': self.opcao_dilma,\n 'on_button_copa_clicked': self.opcao_copa,\n 'on_button_palmeiras_clicked': self.opcao_palmeiras,\n 'on_button_fatec_clicked': self.opcao_fatec, 'on_sad_show':\n self.sad_show})\n\n def analisar_frase(self, widget):\n \"\"\"Função: analisar a frase que o usuário\"\"\"\n frase = self.text_area.get_text()\n if frase != '':\n frase_proc = normalizar(frase)\n self.text_area.set_text(frase)\n if (self.opcao == 'dilma' or self.opcao == 'copa' or self.opcao ==\n 'palmeiras' or self.opcao == 'fatec'):\n print('Opcao: %s ' % self.opcao)\n featureList = gera_lista_features(self.opcao)\n lista_feature_fell = get_lista_feature_fell()\n features_msg = getFeatureVector(frase_proc)\n training_set = apply_features(extract_features,\n lista_feature_fell)\n fell = avaliar_Sentimento(features_msg, training_set)\n print('Sentimento: %s ' % fell)\n\n def clear_text(self, widget):\n \"\"\"Função: para apagar o texto na área de texto\"\"\"\n self.text_area.set_text('')\n\n def opcao_dilma(self, widget):\n \"\"\"Função: para definir a opcao Dilma\"\"\"\n self.opcao = 'dilma'\n\n def opcao_copa(self, widget):\n \"\"\"Função: para definir a opcao Copa\"\"\"\n self.opcao = 'copa'\n\n def opcao_palmeiras(self, widget):\n \"\"\"Função: para definir a opcao Palmeiras\"\"\"\n self.opcao = 'palmeiras'\n\n def opcao_fatec(self, widget):\n \"\"\"Função: para definir a opcao Fatec\"\"\"\n self.opcao = 'fatec'\n\n def sad_show(self, widget):\n \"\"\"Função: para definir se imagem Sad ira aparecer\"\"\"\n self.visible = True\n\n\nif __name__ == '__main__':\n app = tgApp()\n gtk.main()\n", "step-5": "#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nimport pygtk\npygtk.require(\"2.0\")\nimport gtk\nfrom testarMsg import *\n\n\nclass tgApp(object):\n def __init__(self):\n builder = gtk.Builder()\n builder.add_from_file(\"../tg.glade\")\n self.window = builder.get_object(\"window1\")\n self.text_area = builder.get_object(\"text_entry\")\n self.window.show()\n self.opcao = \"\"\n builder.connect_signals({\"gtk_main_quit\": gtk.main_quit,\n \"on_button_analisar_clicked\": self.analisar_frase,\n \"on_button_clear_clicked\": self.clear_text,\n \"on_button_dilma_clicked\": self.opcao_dilma,\n \"on_button_copa_clicked\": self.opcao_copa,\n \"on_button_palmeiras_clicked\": self.opcao_palmeiras,\n \"on_button_fatec_clicked\": self.opcao_fatec,\n \"on_sad_show\": self.sad_show,\n })\n \n def analisar_frase(self, widget):\n \"\"\"Função: analisar a frase que o usuário\"\"\"\n frase = self.text_area.get_text()\n if ( frase != \"\"):\n frase_proc= normalizar(frase)\n self.text_area.set_text(frase)\n if (self.opcao == 'dilma' or self.opcao == 'copa' or self.opcao == 'palmeiras' or self.opcao == 'fatec'):\n print(\"Opcao: %s \"%self.opcao)\n featureList = gera_lista_features(self.opcao)\n lista_feature_fell = get_lista_feature_fell()\n features_msg = getFeatureVector(frase_proc)\n training_set = apply_features(extract_features,lista_feature_fell)\n fell = avaliar_Sentimento(features_msg,training_set)\n print (\"Sentimento: %s \"%fell)\n \n \n def clear_text(self, widget):\n \"\"\"Função: para apagar o texto na área de texto\"\"\"\n self.text_area.set_text(\"\")\n\n def opcao_dilma(self, widget):\n \"\"\"Função: para definir a opcao Dilma\"\"\"\n self.opcao=\"dilma\"\n \n def opcao_copa(self, widget):\n \"\"\"Função: para definir a opcao Copa\"\"\"\n self.opcao=\"copa\"\n\n def opcao_palmeiras(self, widget):\n \"\"\"Função: para definir a opcao Palmeiras\"\"\"\n self.opcao=\"palmeiras\"\n\n def opcao_fatec(self, widget):\n \"\"\"Função: para definir a opcao Fatec\"\"\"\n self.opcao=\"fatec\"\n \n def sad_show(self,widget):\n \"\"\"Função: para definir se imagem Sad ira aparecer\"\"\"\n self.visible=True\n\n \nif __name__ == \"__main__\":\n \n app = tgApp()\n gtk.main()\n \n \n", "step-ids": [ 6, 8, 9, 10, 12 ] }
[ 6, 8, 9, 10, 12 ]
from selenium import webdriver; from selenium.webdriver import ActionChains from selenium.webdriver.common.by import By from selenium.webdriver.support.select import Select from webdriver_manager.chrome import ChromeDriverManager from selenium.webdriver.common.keys import Keys import time driver = webdriver.Chrome(ChromeDriverManager().install()) driver.implicitly_wait(10) driver.maximize_window() driver.get("http://demo.automationtesting.in/Register.html") interactions = driver.find_element_by_xpath("//a[@class='dropdown-toggle' and contains(text(),'Interactions ')]") drag = driver.find_element_by_xpath("//a[@class='dropdown-toggle' and contains(text(),'Drag and Drop ')]") static = driver.find_element_by_xpath("//ul[@class='childmenu ']//a[contains(text(),'Static ')]") actions = ActionChains(driver) actions.move_to_element(interactions).move_to_element(drag).move_to_element(static).click().perform() time.sleep(5) driver.get("http://testautomationpractice.blogspot.com/") ele = driver.find_element_by_xpath("//*[@id='HTML10']/div[1]/button") actions.double_click(ele).perform() time.sleep(5) driver.close()
normal
{ "blob_id": "1a1a217b382f3c58c6c4cd3c1c3f556ae945f5a7", "index": 7850, "step-1": "<mask token>\n", "step-2": "<mask token>\ndriver.implicitly_wait(10)\ndriver.maximize_window()\ndriver.get('http://demo.automationtesting.in/Register.html')\n<mask token>\nactions.move_to_element(interactions).move_to_element(drag).move_to_element(\n static).click().perform()\ntime.sleep(5)\ndriver.get('http://testautomationpractice.blogspot.com/')\n<mask token>\nactions.double_click(ele).perform()\ntime.sleep(5)\ndriver.close()\n", "step-3": "<mask token>\ndriver = webdriver.Chrome(ChromeDriverManager().install())\ndriver.implicitly_wait(10)\ndriver.maximize_window()\ndriver.get('http://demo.automationtesting.in/Register.html')\ninteractions = driver.find_element_by_xpath(\n \"//a[@class='dropdown-toggle' and contains(text(),'Interactions ')]\")\ndrag = driver.find_element_by_xpath(\n \"//a[@class='dropdown-toggle' and contains(text(),'Drag and Drop ')]\")\nstatic = driver.find_element_by_xpath(\n \"//ul[@class='childmenu ']//a[contains(text(),'Static ')]\")\nactions = ActionChains(driver)\nactions.move_to_element(interactions).move_to_element(drag).move_to_element(\n static).click().perform()\ntime.sleep(5)\ndriver.get('http://testautomationpractice.blogspot.com/')\nele = driver.find_element_by_xpath(\"//*[@id='HTML10']/div[1]/button\")\nactions.double_click(ele).perform()\ntime.sleep(5)\ndriver.close()\n", "step-4": "from selenium import webdriver\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.keys import Keys\nimport time\ndriver = webdriver.Chrome(ChromeDriverManager().install())\ndriver.implicitly_wait(10)\ndriver.maximize_window()\ndriver.get('http://demo.automationtesting.in/Register.html')\ninteractions = driver.find_element_by_xpath(\n \"//a[@class='dropdown-toggle' and contains(text(),'Interactions ')]\")\ndrag = driver.find_element_by_xpath(\n \"//a[@class='dropdown-toggle' and contains(text(),'Drag and Drop ')]\")\nstatic = driver.find_element_by_xpath(\n \"//ul[@class='childmenu ']//a[contains(text(),'Static ')]\")\nactions = ActionChains(driver)\nactions.move_to_element(interactions).move_to_element(drag).move_to_element(\n static).click().perform()\ntime.sleep(5)\ndriver.get('http://testautomationpractice.blogspot.com/')\nele = driver.find_element_by_xpath(\"//*[@id='HTML10']/div[1]/button\")\nactions.double_click(ele).perform()\ntime.sleep(5)\ndriver.close()\n", "step-5": "from selenium import webdriver;\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\ndriver = webdriver.Chrome(ChromeDriverManager().install())\ndriver.implicitly_wait(10)\ndriver.maximize_window()\ndriver.get(\"http://demo.automationtesting.in/Register.html\")\ninteractions = driver.find_element_by_xpath(\"//a[@class='dropdown-toggle' and contains(text(),'Interactions ')]\")\ndrag = driver.find_element_by_xpath(\"//a[@class='dropdown-toggle' and contains(text(),'Drag and Drop ')]\")\nstatic = driver.find_element_by_xpath(\"//ul[@class='childmenu ']//a[contains(text(),'Static ')]\")\nactions = ActionChains(driver)\nactions.move_to_element(interactions).move_to_element(drag).move_to_element(static).click().perform()\ntime.sleep(5)\ndriver.get(\"http://testautomationpractice.blogspot.com/\")\nele = driver.find_element_by_xpath(\"//*[@id='HTML10']/div[1]/button\")\nactions.double_click(ele).perform()\ntime.sleep(5)\ndriver.close()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import sys, string, math s = input() print(ord(s))
normal
{ "blob_id": "ade300f2921ca860bbe92aa351df2c88238b7996", "index": 6039, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(ord(s))\n", "step-3": "<mask token>\ns = input()\nprint(ord(s))\n", "step-4": "import sys, string, math\ns = input()\nprint(ord(s))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from setuptools import setup, find_packages def find_version(): with open('pytest_defer.py') as fp: for line in fp: if '__version__' in line: version = line.split('=')[-1].strip() return version[1:-1] # trim '' with open('README.md') as fp: long_desc = fp.read() setup( version=find_version(), name='pytest-defer', license='MIT', long_description=long_desc, long_description_content_type='text/markdown', author='Miki Tebeka', author_email='[email protected]', url='https://github.com/tebeka/pytest-defer', packages=find_packages(), entry_points={ 'pytest11': [ 'defer = pytest_defer', ], }, install_requires=[ 'pytest>=6.2', ], )
normal
{ "blob_id": "7903484b4a36d4b6ea03b9eaf3bf2b2e056baad8", "index": 8148, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef find_version():\n with open('pytest_defer.py') as fp:\n for line in fp:\n if '__version__' in line:\n version = line.split('=')[-1].strip()\n return version[1:-1]\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef find_version():\n with open('pytest_defer.py') as fp:\n for line in fp:\n if '__version__' in line:\n version = line.split('=')[-1].strip()\n return version[1:-1]\n\n\nwith open('README.md') as fp:\n long_desc = fp.read()\nsetup(version=find_version(), name='pytest-defer', license='MIT',\n long_description=long_desc, long_description_content_type=\n 'text/markdown', author='Miki Tebeka', author_email=\n '[email protected]', url='https://github.com/tebeka/pytest-defer',\n packages=find_packages(), entry_points={'pytest11': [\n 'defer = pytest_defer']}, install_requires=['pytest>=6.2'])\n", "step-4": "from setuptools import setup, find_packages\n\n\ndef find_version():\n with open('pytest_defer.py') as fp:\n for line in fp:\n if '__version__' in line:\n version = line.split('=')[-1].strip()\n return version[1:-1]\n\n\nwith open('README.md') as fp:\n long_desc = fp.read()\nsetup(version=find_version(), name='pytest-defer', license='MIT',\n long_description=long_desc, long_description_content_type=\n 'text/markdown', author='Miki Tebeka', author_email=\n '[email protected]', url='https://github.com/tebeka/pytest-defer',\n packages=find_packages(), entry_points={'pytest11': [\n 'defer = pytest_defer']}, install_requires=['pytest>=6.2'])\n", "step-5": "from setuptools import setup, find_packages\n\n\ndef find_version():\n with open('pytest_defer.py') as fp:\n for line in fp:\n if '__version__' in line:\n version = line.split('=')[-1].strip()\n return version[1:-1] # trim ''\n\n\nwith open('README.md') as fp:\n long_desc = fp.read()\n\n\nsetup(\n version=find_version(),\n name='pytest-defer',\n license='MIT',\n long_description=long_desc,\n long_description_content_type='text/markdown',\n author='Miki Tebeka',\n author_email='[email protected]',\n url='https://github.com/tebeka/pytest-defer',\n packages=find_packages(),\n entry_points={\n 'pytest11': [\n 'defer = pytest_defer',\n ],\n },\n install_requires=[\n 'pytest>=6.2',\n ],\n)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class NeuralNetworkClassifier: <|reserved_special_token_0|> def fit(self, X_train, Y_train): num_input_dimensions = X_train.shape[1] self._num_classes = Y_train.shape[1] training_set_size = X_train.shape[0] self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self. _hidden_units, num_input_dimensions) self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self. _num_classes, self._hidden_units) self._b_1 = 0.01 * np.ones((self._hidden_units, 1)) self._b_2 = 0.01 * np.ones((self._num_classes, 1)) for epoch in range(self._epochs): for batch_start in range(0, training_set_size, self._batch_size): batch_end = batch_start + self._batch_size X_batch = X_train[batch_start:batch_end] Y_batch = Y_train[batch_start:batch_end] num_examples = X_batch.shape[0] W_1_prime_total = 0 W_2_prime_total = 0 b_1_prime_total = 0 b_2_prime_total = 0 for i in range(num_examples): x = np.vstack(X_batch[i, :]) y = np.vstack(Y_batch[i, :]) z_1, h_1, y_hat = self._forward_propagation(x) W_1_prime, W_2_prime, b_1_prime, b_2_prime = (self. _backward_propagation(x, y, z_1, h_1, y_hat)) W_1_prime_total += W_1_prime W_2_prime_total += W_2_prime b_1_prime_total += b_1_prime b_2_prime_total += b_2_prime self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total Y_hats = self.predict(X_batch) y_hat = self.predict(X_train) print('Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f' % ( epoch + 1, self._epochs, self._cross_entropy_loss(Y_batch, Y_hats), self.score(Y_train, y_hat))) <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def _relu_prime(self, x): y = np.zeros((x.shape[0], x.shape[1])) y[x > 0] = 1.0 return y def _softmax(self, Z): exp = np.exp(Z) total = np.sum(exp, axis=0) return exp / total def _g(self, df_dy, W_2, z_1): return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T <|reserved_special_token_0|> <|reserved_special_token_0|> def _W_1_prime(self, x, g, W_1, alpha_1, beta_1): return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1) def _b_1_prime(self, g): return g <|reserved_special_token_0|> def _l_2_loss(self, W): return 0.5 * np.linalg.norm(W) def _cross_entropy_loss(self, y, yhat): loss = 0 yhat_log = np.log(yhat.T) for i in range(len(y)): loss -= y[i, :].dot(yhat_log[:, i]) l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1 ) + self._l_1_beta_2 * self._l_1_loss(self._W_2) l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1 ) + self._l_2_alpha_2 * self._l_2_loss(self._W_2) return loss + l_1_regularization + l_2_regularization <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class NeuralNetworkClassifier: <|reserved_special_token_0|> def fit(self, X_train, Y_train): num_input_dimensions = X_train.shape[1] self._num_classes = Y_train.shape[1] training_set_size = X_train.shape[0] self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self. _hidden_units, num_input_dimensions) self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self. _num_classes, self._hidden_units) self._b_1 = 0.01 * np.ones((self._hidden_units, 1)) self._b_2 = 0.01 * np.ones((self._num_classes, 1)) for epoch in range(self._epochs): for batch_start in range(0, training_set_size, self._batch_size): batch_end = batch_start + self._batch_size X_batch = X_train[batch_start:batch_end] Y_batch = Y_train[batch_start:batch_end] num_examples = X_batch.shape[0] W_1_prime_total = 0 W_2_prime_total = 0 b_1_prime_total = 0 b_2_prime_total = 0 for i in range(num_examples): x = np.vstack(X_batch[i, :]) y = np.vstack(Y_batch[i, :]) z_1, h_1, y_hat = self._forward_propagation(x) W_1_prime, W_2_prime, b_1_prime, b_2_prime = (self. _backward_propagation(x, y, z_1, h_1, y_hat)) W_1_prime_total += W_1_prime W_2_prime_total += W_2_prime b_1_prime_total += b_1_prime b_2_prime_total += b_2_prime self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total Y_hats = self.predict(X_batch) y_hat = self.predict(X_train) print('Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f' % ( epoch + 1, self._epochs, self._cross_entropy_loss(Y_batch, Y_hats), self.score(Y_train, y_hat))) def _forward_propagation(self, x): z_1 = self._W_1.dot(x) + self._b_1 h_1 = self._relu(z_1) z_2 = self._W_2.dot(h_1) + self._b_2 y_hat = self._softmax(z_2) return z_1, h_1, y_hat def _backward_propagation(self, x, y, z_1, h_1, y_hat): df_dy = y_hat - y g = self._g(df_dy, self._W_2, z_1) W_1_prime = self._W_1_prime(x, g, self._W_1, self._l_2_alpha_1, self._l_1_beta_1) W_2_prime = self._W_2_prime(df_dy, h_1, self._W_2, self. _l_2_alpha_2, self._l_1_beta_2) b_1_prime = self._learning_rate * self._b_1_prime(g) b_2_prime = self._learning_rate * self._b_2_prime(df_dy) return W_1_prime, W_2_prime, b_1_prime, b_2_prime def predict(self, X): num_examples = X.shape[0] Y_hat = np.zeros((num_examples, self._num_classes)) for i in range(num_examples): x = np.vstack(X[i, :]) _, _, y_hat = self._forward_propagation(x) Y_hat[i, :] = y_hat[:, 0] return Y_hat <|reserved_special_token_0|> def _relu_prime(self, x): y = np.zeros((x.shape[0], x.shape[1])) y[x > 0] = 1.0 return y def _softmax(self, Z): exp = np.exp(Z) total = np.sum(exp, axis=0) return exp / total def _g(self, df_dy, W_2, z_1): return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T <|reserved_special_token_0|> <|reserved_special_token_0|> def _W_1_prime(self, x, g, W_1, alpha_1, beta_1): return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1) def _b_1_prime(self, g): return g def _l_1_loss(self, W): return np.sum(np.absolute(W)) def _l_2_loss(self, W): return 0.5 * np.linalg.norm(W) def _cross_entropy_loss(self, y, yhat): loss = 0 yhat_log = np.log(yhat.T) for i in range(len(y)): loss -= y[i, :].dot(yhat_log[:, i]) l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1 ) + self._l_1_beta_2 * self._l_1_loss(self._W_2) l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1 ) + self._l_2_alpha_2 * self._l_2_loss(self._W_2) return loss + l_1_regularization + l_2_regularization def _toClassIndices(self, probabilities): return np.argmax(probabilities, axis=1) def loss(self, testing_labels, predicted_labels): return 0 <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class NeuralNetworkClassifier: <|reserved_special_token_0|> def fit(self, X_train, Y_train): num_input_dimensions = X_train.shape[1] self._num_classes = Y_train.shape[1] training_set_size = X_train.shape[0] self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self. _hidden_units, num_input_dimensions) self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self. _num_classes, self._hidden_units) self._b_1 = 0.01 * np.ones((self._hidden_units, 1)) self._b_2 = 0.01 * np.ones((self._num_classes, 1)) for epoch in range(self._epochs): for batch_start in range(0, training_set_size, self._batch_size): batch_end = batch_start + self._batch_size X_batch = X_train[batch_start:batch_end] Y_batch = Y_train[batch_start:batch_end] num_examples = X_batch.shape[0] W_1_prime_total = 0 W_2_prime_total = 0 b_1_prime_total = 0 b_2_prime_total = 0 for i in range(num_examples): x = np.vstack(X_batch[i, :]) y = np.vstack(Y_batch[i, :]) z_1, h_1, y_hat = self._forward_propagation(x) W_1_prime, W_2_prime, b_1_prime, b_2_prime = (self. _backward_propagation(x, y, z_1, h_1, y_hat)) W_1_prime_total += W_1_prime W_2_prime_total += W_2_prime b_1_prime_total += b_1_prime b_2_prime_total += b_2_prime self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total Y_hats = self.predict(X_batch) y_hat = self.predict(X_train) print('Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f' % ( epoch + 1, self._epochs, self._cross_entropy_loss(Y_batch, Y_hats), self.score(Y_train, y_hat))) def _forward_propagation(self, x): z_1 = self._W_1.dot(x) + self._b_1 h_1 = self._relu(z_1) z_2 = self._W_2.dot(h_1) + self._b_2 y_hat = self._softmax(z_2) return z_1, h_1, y_hat def _backward_propagation(self, x, y, z_1, h_1, y_hat): df_dy = y_hat - y g = self._g(df_dy, self._W_2, z_1) W_1_prime = self._W_1_prime(x, g, self._W_1, self._l_2_alpha_1, self._l_1_beta_1) W_2_prime = self._W_2_prime(df_dy, h_1, self._W_2, self. _l_2_alpha_2, self._l_1_beta_2) b_1_prime = self._learning_rate * self._b_1_prime(g) b_2_prime = self._learning_rate * self._b_2_prime(df_dy) return W_1_prime, W_2_prime, b_1_prime, b_2_prime def predict(self, X): num_examples = X.shape[0] Y_hat = np.zeros((num_examples, self._num_classes)) for i in range(num_examples): x = np.vstack(X[i, :]) _, _, y_hat = self._forward_propagation(x) Y_hat[i, :] = y_hat[:, 0] return Y_hat def _relu(self, x): return np.maximum(x, 0) def _relu_prime(self, x): y = np.zeros((x.shape[0], x.shape[1])) y[x > 0] = 1.0 return y def _softmax(self, Z): exp = np.exp(Z) total = np.sum(exp, axis=0) return exp / total def _g(self, df_dy, W_2, z_1): return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T def _W_2_prime(self, df_dy, h_1, W_2, alpha_2, beta_2): return df_dy.dot(h_1.T) + alpha_2 * W_2 + beta_2 * np.sign(W_2) def _b_2_prime(self, df_dy): return df_dy def _W_1_prime(self, x, g, W_1, alpha_1, beta_1): return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1) def _b_1_prime(self, g): return g def _l_1_loss(self, W): return np.sum(np.absolute(W)) def _l_2_loss(self, W): return 0.5 * np.linalg.norm(W) def _cross_entropy_loss(self, y, yhat): loss = 0 yhat_log = np.log(yhat.T) for i in range(len(y)): loss -= y[i, :].dot(yhat_log[:, i]) l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1 ) + self._l_1_beta_2 * self._l_1_loss(self._W_2) l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1 ) + self._l_2_alpha_2 * self._l_2_loss(self._W_2) return loss + l_1_regularization + l_2_regularization def _toClassIndices(self, probabilities): return np.argmax(probabilities, axis=1) def loss(self, testing_labels, predicted_labels): return 0 def score(self, expected_labels, predicted_labels): return np.mean(self._toClassIndices(expected_labels) == self. _toClassIndices(predicted_labels)) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class NeuralNetworkClassifier: def __init__(self, hidden_units, learning_rate, batch_size, epochs, l_1_beta_1, l_1_beta_2, l_2_alpha_1, l_2_alpha_2): self._hidden_units = hidden_units self._learning_rate = learning_rate self._batch_size = batch_size self._epochs = epochs self._l_1_beta_1 = l_1_beta_1 self._l_1_beta_2 = l_1_beta_2 self._l_2_alpha_1 = l_2_alpha_1 self._l_2_alpha_2 = l_2_alpha_2 def fit(self, X_train, Y_train): num_input_dimensions = X_train.shape[1] self._num_classes = Y_train.shape[1] training_set_size = X_train.shape[0] self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self. _hidden_units, num_input_dimensions) self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self. _num_classes, self._hidden_units) self._b_1 = 0.01 * np.ones((self._hidden_units, 1)) self._b_2 = 0.01 * np.ones((self._num_classes, 1)) for epoch in range(self._epochs): for batch_start in range(0, training_set_size, self._batch_size): batch_end = batch_start + self._batch_size X_batch = X_train[batch_start:batch_end] Y_batch = Y_train[batch_start:batch_end] num_examples = X_batch.shape[0] W_1_prime_total = 0 W_2_prime_total = 0 b_1_prime_total = 0 b_2_prime_total = 0 for i in range(num_examples): x = np.vstack(X_batch[i, :]) y = np.vstack(Y_batch[i, :]) z_1, h_1, y_hat = self._forward_propagation(x) W_1_prime, W_2_prime, b_1_prime, b_2_prime = (self. _backward_propagation(x, y, z_1, h_1, y_hat)) W_1_prime_total += W_1_prime W_2_prime_total += W_2_prime b_1_prime_total += b_1_prime b_2_prime_total += b_2_prime self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total Y_hats = self.predict(X_batch) y_hat = self.predict(X_train) print('Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f' % ( epoch + 1, self._epochs, self._cross_entropy_loss(Y_batch, Y_hats), self.score(Y_train, y_hat))) def _forward_propagation(self, x): z_1 = self._W_1.dot(x) + self._b_1 h_1 = self._relu(z_1) z_2 = self._W_2.dot(h_1) + self._b_2 y_hat = self._softmax(z_2) return z_1, h_1, y_hat def _backward_propagation(self, x, y, z_1, h_1, y_hat): df_dy = y_hat - y g = self._g(df_dy, self._W_2, z_1) W_1_prime = self._W_1_prime(x, g, self._W_1, self._l_2_alpha_1, self._l_1_beta_1) W_2_prime = self._W_2_prime(df_dy, h_1, self._W_2, self. _l_2_alpha_2, self._l_1_beta_2) b_1_prime = self._learning_rate * self._b_1_prime(g) b_2_prime = self._learning_rate * self._b_2_prime(df_dy) return W_1_prime, W_2_prime, b_1_prime, b_2_prime def predict(self, X): num_examples = X.shape[0] Y_hat = np.zeros((num_examples, self._num_classes)) for i in range(num_examples): x = np.vstack(X[i, :]) _, _, y_hat = self._forward_propagation(x) Y_hat[i, :] = y_hat[:, 0] return Y_hat def _relu(self, x): return np.maximum(x, 0) def _relu_prime(self, x): y = np.zeros((x.shape[0], x.shape[1])) y[x > 0] = 1.0 return y def _softmax(self, Z): exp = np.exp(Z) total = np.sum(exp, axis=0) return exp / total def _g(self, df_dy, W_2, z_1): return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T def _W_2_prime(self, df_dy, h_1, W_2, alpha_2, beta_2): return df_dy.dot(h_1.T) + alpha_2 * W_2 + beta_2 * np.sign(W_2) def _b_2_prime(self, df_dy): return df_dy def _W_1_prime(self, x, g, W_1, alpha_1, beta_1): return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1) def _b_1_prime(self, g): return g def _l_1_loss(self, W): return np.sum(np.absolute(W)) def _l_2_loss(self, W): return 0.5 * np.linalg.norm(W) def _cross_entropy_loss(self, y, yhat): loss = 0 yhat_log = np.log(yhat.T) for i in range(len(y)): loss -= y[i, :].dot(yhat_log[:, i]) l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1 ) + self._l_1_beta_2 * self._l_1_loss(self._W_2) l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1 ) + self._l_2_alpha_2 * self._l_2_loss(self._W_2) return loss + l_1_regularization + l_2_regularization def _toClassIndices(self, probabilities): return np.argmax(probabilities, axis=1) def loss(self, testing_labels, predicted_labels): return 0 def score(self, expected_labels, predicted_labels): return np.mean(self._toClassIndices(expected_labels) == self. _toClassIndices(predicted_labels)) <|reserved_special_token_0|> def main(): training_images = np.load('mnist_train_images.npy') training_labels = np.load('mnist_train_labels.npy') testing_images = np.load('mnist_test_images.npy') testing_labels = np.load('mnist_test_labels.npy') validation_images = np.load('mnist_validation_images.npy') validation_labels = np.load('mnist_validation_labels.npy') parameters = findBestHyperparameters(training_images[0:16000, :], training_labels[0:16000, :], validation_images, validation_labels) clf = NeuralNetworkClassifier(hidden_units=parameters[0], learning_rate =parameters[1], batch_size=parameters[2], epochs=parameters[3], l_1_beta_1=parameters[4], l_1_beta_2=parameters[4], l_2_alpha_1= parameters[5], l_2_alpha_2=parameters[5]) clf.fit(training_images, training_labels) predicted_labels = clf.predict(testing_images) <|reserved_special_token_0|> <|reserved_special_token_1|> import numpy as np import sys class NeuralNetworkClassifier(): def __init__(self, hidden_units, learning_rate, batch_size, epochs, l_1_beta_1, l_1_beta_2, l_2_alpha_1, l_2_alpha_2): self._hidden_units = hidden_units self._learning_rate = learning_rate self._batch_size = batch_size self._epochs = epochs self._l_1_beta_1 = l_1_beta_1 self._l_1_beta_2 = l_1_beta_2 self._l_2_alpha_1 = l_2_alpha_1 self._l_2_alpha_2 = l_2_alpha_2 def fit(self, X_train, Y_train): num_input_dimensions = X_train.shape[1] self._num_classes = Y_train.shape[1] training_set_size = X_train.shape[0] self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self._hidden_units, num_input_dimensions) self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self._num_classes, self._hidden_units) self._b_1 = 0.01 * np.ones((self._hidden_units, 1)) self._b_2 = 0.01 * np.ones((self._num_classes, 1)) for epoch in range(self._epochs): for batch_start in range(0, training_set_size, self._batch_size): batch_end = batch_start + self._batch_size X_batch = X_train[batch_start:batch_end] Y_batch = Y_train[batch_start:batch_end] num_examples = X_batch.shape[0] W_1_prime_total = 0 W_2_prime_total = 0 b_1_prime_total = 0 b_2_prime_total = 0 for i in range(num_examples): x = np.vstack(X_batch[i, :]) y = np.vstack(Y_batch[i, :]) z_1, h_1, y_hat = self._forward_propagation(x) W_1_prime, W_2_prime, b_1_prime, b_2_prime = self._backward_propagation(x, y, z_1, h_1, y_hat) W_1_prime_total += W_1_prime W_2_prime_total += W_2_prime b_1_prime_total += b_1_prime b_2_prime_total += b_2_prime self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total Y_hats = self.predict(X_batch) y_hat = self.predict(X_train) print("Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f" % (epoch + 1, self._epochs,self._cross_entropy_loss(Y_batch, Y_hats), self.score(Y_train, y_hat))) def _forward_propagation(self, x): z_1 = self._W_1.dot(x) + self._b_1 # print("_forward_propagation W_1=", self._W_1.shape) # print("_forward_propagation b_1=", self._b_1.shape) # print("_forward_propagation x=", x.shape) # print("_forward_propagation z=", z_1.shape) h_1 = self._relu(z_1) # print("_forward_propagation h_1=", h_1.shape) z_2 = self._W_2.dot(h_1) + self._b_2 # print("_forward_propagation z_2=", z_2.shape) y_hat = self._softmax(z_2) # print("_forward_propagation y_hat=", y_hat.shape) return z_1, h_1, y_hat def _backward_propagation(self, x, y, z_1, h_1, y_hat): df_dy = y_hat - y g = self._g(df_dy, self._W_2, z_1) W_1_prime = self._W_1_prime(x, g, self._W_1, self._l_2_alpha_1, self._l_1_beta_1) W_2_prime = self._W_2_prime(df_dy, h_1, self._W_2, self._l_2_alpha_2, self._l_1_beta_2) b_1_prime = self._learning_rate * self._b_1_prime(g) b_2_prime = self._learning_rate * self._b_2_prime(df_dy) return W_1_prime, W_2_prime, b_1_prime, b_2_prime def predict(self, X): num_examples = X.shape[0] Y_hat = np.zeros((num_examples, self._num_classes)) for i in range(num_examples): x = np.vstack(X[i, :]) _, _, y_hat = self._forward_propagation(x) Y_hat[i, :] = y_hat[:, 0] return Y_hat def _relu(self, x): return np.maximum(x, 0) def _relu_prime(self, x): y = np.zeros((x.shape[0], x.shape[1])) y[x > 0] = 1.0 return y def _softmax(self, Z): exp = np.exp(Z) total = np.sum(exp, axis=0) return exp / total def _g(self, df_dy, W_2, z_1): return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T def _W_2_prime(self, df_dy, h_1, W_2, alpha_2, beta_2): return df_dy.dot(h_1.T) + alpha_2 * W_2 + beta_2 * np.sign(W_2) def _b_2_prime(self, df_dy): return df_dy def _W_1_prime(self, x, g, W_1, alpha_1, beta_1): return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1) def _b_1_prime(self, g): return g def _l_1_loss(self, W): return np.sum(np.absolute(W)) def _l_2_loss(self, W): return 0.5 * np.linalg.norm(W) def _cross_entropy_loss(self, y, yhat): loss = 0 yhat_log = np.log(yhat.T) for i in range(len(y)): loss -= y[i, :].dot(yhat_log[:, i]) l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1) + self._l_1_beta_2 * self._l_1_loss(self._W_2) l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1) + self._l_2_alpha_2 * self._l_2_loss(self._W_2) return loss + l_1_regularization + l_2_regularization def _toClassIndices(self, probabilities): return np.argmax(probabilities, axis=1) def loss(self, testing_labels, predicted_labels): return 0 def score(self, expected_labels, predicted_labels): return np.mean(self._toClassIndices(expected_labels) == self._toClassIndices(predicted_labels)) def describe_hyperparameters(hyperparameters): return "\nHidden Units: {0} Learning Rate: {1} Minibatch Size: {2} Epochs: {3} L1 Strength: {4} L2 Strength: {5}".format( hyperparameters[0], hyperparameters[1], hyperparameters[2], hyperparameters[3], hyperparameters[4], hyperparameters[5]) def findBestHyperparameters(training_images, training_labels, validation_images, validation_labels): print("Start training...") print() all_hidden_units = [20, 20, 30, 30, 40, 40, 50, 50, 60, 30] all_learning_rates = [0.0001, 0.001, 0.01, 0.01, 0.01, 0.02, 0.02, 0.1, 0.2, 0.007] all_minibatch_sizes = [2, 5, 10, 10, 20, 20, 100, 50, 50, 25] all_num_epochs = [1, 1, 1, 1, 2, 2, 2, 2, 3, 3] all_l1_strengths = [0.0, 0.0, 0, 0.01, 0.0, 0.001, 0.01, 0.02, 0.01, 0.001] all_l2_strengths = [0.0, 0.01, 0.001, 0.0, 0.01, 0.001, 0.01, 0.02, 0.01, 0.001] best_accuracy = 0 best_hyperparamters = [] for i in range(10): hyperparameters = (all_hidden_units[slice_start+i], all_learning_rates[slice_start+i], all_minibatch_sizes[slice_start+i], all_num_epochs[slice_start+i], all_l1_strengths[slice_start+i], all_l2_strengths[slice_start+i]) print(describe_hyperparameters(hyperparameters)) clf = NeuralNetworkClassifier( hidden_units = hyperparameters[0], learning_rate = hyperparameters[1], batch_size = hyperparameters[2], epochs = hyperparameters[3], l_1_beta_1 = hyperparameters[4], l_1_beta_2 = hyperparameters[4], l_2_alpha_1 = hyperparameters[5], l_2_alpha_2 = hyperparameters[5]) clf.fit(training_images, training_labels) predicted_labels = clf.predict(validation_images) accuracy = clf.score(validation_labels, predicted_labels) print("Accuracy: %f" % accuracy) print("Cross Entropy Loss = %.2f" % (clf.loss(validation_labels, predicted_labels))) if(accuracy > best_accuracy): best_accuracy = accuracy best_hyperparamters = hyperparameters print("Found new best hyperparameters.") print("\n") print(describe_hyperparameters(best_hyperparamters)) return best_hyperparamters def main(): training_images = np.load("mnist_train_images.npy") training_labels = np.load("mnist_train_labels.npy") testing_images = np.load("mnist_test_images.npy") testing_labels = np.load("mnist_test_labels.npy") validation_images = np.load("mnist_validation_images.npy") validation_labels = np.load("mnist_validation_labels.npy") parameters = findBestHyperparameters(training_images[0:16000, :], training_labels[0:16000, :], validation_images, validation_labels) clf = NeuralNetworkClassifier(hidden_units=parameters[0], learning_rate=parameters[1], batch_size=parameters[2], epochs=parameters[3], l_1_beta_1=parameters[4], l_1_beta_2=parameters[4], l_2_alpha_1=parameters[5], l_2_alpha_2=parameters[5]) clf.fit(training_images, training_labels) predicted_labels = clf.predict(testing_images) if __name__ == "__main__": if len(sys.argv) != 1: print("Usage: python3 digit_recognizer.py") exit() main()
flexible
{ "blob_id": "6199a2ac12e80395f4a7a54877c5b639315e64aa", "index": 7702, "step-1": "<mask token>\n\n\nclass NeuralNetworkClassifier:\n <mask token>\n\n def fit(self, X_train, Y_train):\n num_input_dimensions = X_train.shape[1]\n self._num_classes = Y_train.shape[1]\n training_set_size = X_train.shape[0]\n self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self.\n _hidden_units, num_input_dimensions)\n self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self.\n _num_classes, self._hidden_units)\n self._b_1 = 0.01 * np.ones((self._hidden_units, 1))\n self._b_2 = 0.01 * np.ones((self._num_classes, 1))\n for epoch in range(self._epochs):\n for batch_start in range(0, training_set_size, self._batch_size):\n batch_end = batch_start + self._batch_size\n X_batch = X_train[batch_start:batch_end]\n Y_batch = Y_train[batch_start:batch_end]\n num_examples = X_batch.shape[0]\n W_1_prime_total = 0\n W_2_prime_total = 0\n b_1_prime_total = 0\n b_2_prime_total = 0\n for i in range(num_examples):\n x = np.vstack(X_batch[i, :])\n y = np.vstack(Y_batch[i, :])\n z_1, h_1, y_hat = self._forward_propagation(x)\n W_1_prime, W_2_prime, b_1_prime, b_2_prime = (self.\n _backward_propagation(x, y, z_1, h_1, y_hat))\n W_1_prime_total += W_1_prime\n W_2_prime_total += W_2_prime\n b_1_prime_total += b_1_prime\n b_2_prime_total += b_2_prime\n self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total\n self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total\n self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total\n self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total\n Y_hats = self.predict(X_batch)\n y_hat = self.predict(X_train)\n print('Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f' % (\n epoch + 1, self._epochs, self._cross_entropy_loss(Y_batch,\n Y_hats), self.score(Y_train, y_hat)))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _relu_prime(self, x):\n y = np.zeros((x.shape[0], x.shape[1]))\n y[x > 0] = 1.0\n return y\n\n def _softmax(self, Z):\n exp = np.exp(Z)\n total = np.sum(exp, axis=0)\n return exp / total\n\n def _g(self, df_dy, W_2, z_1):\n return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T\n <mask token>\n <mask token>\n\n def _W_1_prime(self, x, g, W_1, alpha_1, beta_1):\n return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1)\n\n def _b_1_prime(self, g):\n return g\n <mask token>\n\n def _l_2_loss(self, W):\n return 0.5 * np.linalg.norm(W)\n\n def _cross_entropy_loss(self, y, yhat):\n loss = 0\n yhat_log = np.log(yhat.T)\n for i in range(len(y)):\n loss -= y[i, :].dot(yhat_log[:, i])\n l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1\n ) + self._l_1_beta_2 * self._l_1_loss(self._W_2)\n l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1\n ) + self._l_2_alpha_2 * self._l_2_loss(self._W_2)\n return loss + l_1_regularization + l_2_regularization\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass NeuralNetworkClassifier:\n <mask token>\n\n def fit(self, X_train, Y_train):\n num_input_dimensions = X_train.shape[1]\n self._num_classes = Y_train.shape[1]\n training_set_size = X_train.shape[0]\n self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self.\n _hidden_units, num_input_dimensions)\n self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self.\n _num_classes, self._hidden_units)\n self._b_1 = 0.01 * np.ones((self._hidden_units, 1))\n self._b_2 = 0.01 * np.ones((self._num_classes, 1))\n for epoch in range(self._epochs):\n for batch_start in range(0, training_set_size, self._batch_size):\n batch_end = batch_start + self._batch_size\n X_batch = X_train[batch_start:batch_end]\n Y_batch = Y_train[batch_start:batch_end]\n num_examples = X_batch.shape[0]\n W_1_prime_total = 0\n W_2_prime_total = 0\n b_1_prime_total = 0\n b_2_prime_total = 0\n for i in range(num_examples):\n x = np.vstack(X_batch[i, :])\n y = np.vstack(Y_batch[i, :])\n z_1, h_1, y_hat = self._forward_propagation(x)\n W_1_prime, W_2_prime, b_1_prime, b_2_prime = (self.\n _backward_propagation(x, y, z_1, h_1, y_hat))\n W_1_prime_total += W_1_prime\n W_2_prime_total += W_2_prime\n b_1_prime_total += b_1_prime\n b_2_prime_total += b_2_prime\n self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total\n self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total\n self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total\n self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total\n Y_hats = self.predict(X_batch)\n y_hat = self.predict(X_train)\n print('Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f' % (\n epoch + 1, self._epochs, self._cross_entropy_loss(Y_batch,\n Y_hats), self.score(Y_train, y_hat)))\n\n def _forward_propagation(self, x):\n z_1 = self._W_1.dot(x) + self._b_1\n h_1 = self._relu(z_1)\n z_2 = self._W_2.dot(h_1) + self._b_2\n y_hat = self._softmax(z_2)\n return z_1, h_1, y_hat\n\n def _backward_propagation(self, x, y, z_1, h_1, y_hat):\n df_dy = y_hat - y\n g = self._g(df_dy, self._W_2, z_1)\n W_1_prime = self._W_1_prime(x, g, self._W_1, self._l_2_alpha_1,\n self._l_1_beta_1)\n W_2_prime = self._W_2_prime(df_dy, h_1, self._W_2, self.\n _l_2_alpha_2, self._l_1_beta_2)\n b_1_prime = self._learning_rate * self._b_1_prime(g)\n b_2_prime = self._learning_rate * self._b_2_prime(df_dy)\n return W_1_prime, W_2_prime, b_1_prime, b_2_prime\n\n def predict(self, X):\n num_examples = X.shape[0]\n Y_hat = np.zeros((num_examples, self._num_classes))\n for i in range(num_examples):\n x = np.vstack(X[i, :])\n _, _, y_hat = self._forward_propagation(x)\n Y_hat[i, :] = y_hat[:, 0]\n return Y_hat\n <mask token>\n\n def _relu_prime(self, x):\n y = np.zeros((x.shape[0], x.shape[1]))\n y[x > 0] = 1.0\n return y\n\n def _softmax(self, Z):\n exp = np.exp(Z)\n total = np.sum(exp, axis=0)\n return exp / total\n\n def _g(self, df_dy, W_2, z_1):\n return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T\n <mask token>\n <mask token>\n\n def _W_1_prime(self, x, g, W_1, alpha_1, beta_1):\n return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1)\n\n def _b_1_prime(self, g):\n return g\n\n def _l_1_loss(self, W):\n return np.sum(np.absolute(W))\n\n def _l_2_loss(self, W):\n return 0.5 * np.linalg.norm(W)\n\n def _cross_entropy_loss(self, y, yhat):\n loss = 0\n yhat_log = np.log(yhat.T)\n for i in range(len(y)):\n loss -= y[i, :].dot(yhat_log[:, i])\n l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1\n ) + self._l_1_beta_2 * self._l_1_loss(self._W_2)\n l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1\n ) + self._l_2_alpha_2 * self._l_2_loss(self._W_2)\n return loss + l_1_regularization + l_2_regularization\n\n def _toClassIndices(self, probabilities):\n return np.argmax(probabilities, axis=1)\n\n def loss(self, testing_labels, predicted_labels):\n return 0\n <mask token>\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass NeuralNetworkClassifier:\n <mask token>\n\n def fit(self, X_train, Y_train):\n num_input_dimensions = X_train.shape[1]\n self._num_classes = Y_train.shape[1]\n training_set_size = X_train.shape[0]\n self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self.\n _hidden_units, num_input_dimensions)\n self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self.\n _num_classes, self._hidden_units)\n self._b_1 = 0.01 * np.ones((self._hidden_units, 1))\n self._b_2 = 0.01 * np.ones((self._num_classes, 1))\n for epoch in range(self._epochs):\n for batch_start in range(0, training_set_size, self._batch_size):\n batch_end = batch_start + self._batch_size\n X_batch = X_train[batch_start:batch_end]\n Y_batch = Y_train[batch_start:batch_end]\n num_examples = X_batch.shape[0]\n W_1_prime_total = 0\n W_2_prime_total = 0\n b_1_prime_total = 0\n b_2_prime_total = 0\n for i in range(num_examples):\n x = np.vstack(X_batch[i, :])\n y = np.vstack(Y_batch[i, :])\n z_1, h_1, y_hat = self._forward_propagation(x)\n W_1_prime, W_2_prime, b_1_prime, b_2_prime = (self.\n _backward_propagation(x, y, z_1, h_1, y_hat))\n W_1_prime_total += W_1_prime\n W_2_prime_total += W_2_prime\n b_1_prime_total += b_1_prime\n b_2_prime_total += b_2_prime\n self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total\n self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total\n self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total\n self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total\n Y_hats = self.predict(X_batch)\n y_hat = self.predict(X_train)\n print('Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f' % (\n epoch + 1, self._epochs, self._cross_entropy_loss(Y_batch,\n Y_hats), self.score(Y_train, y_hat)))\n\n def _forward_propagation(self, x):\n z_1 = self._W_1.dot(x) + self._b_1\n h_1 = self._relu(z_1)\n z_2 = self._W_2.dot(h_1) + self._b_2\n y_hat = self._softmax(z_2)\n return z_1, h_1, y_hat\n\n def _backward_propagation(self, x, y, z_1, h_1, y_hat):\n df_dy = y_hat - y\n g = self._g(df_dy, self._W_2, z_1)\n W_1_prime = self._W_1_prime(x, g, self._W_1, self._l_2_alpha_1,\n self._l_1_beta_1)\n W_2_prime = self._W_2_prime(df_dy, h_1, self._W_2, self.\n _l_2_alpha_2, self._l_1_beta_2)\n b_1_prime = self._learning_rate * self._b_1_prime(g)\n b_2_prime = self._learning_rate * self._b_2_prime(df_dy)\n return W_1_prime, W_2_prime, b_1_prime, b_2_prime\n\n def predict(self, X):\n num_examples = X.shape[0]\n Y_hat = np.zeros((num_examples, self._num_classes))\n for i in range(num_examples):\n x = np.vstack(X[i, :])\n _, _, y_hat = self._forward_propagation(x)\n Y_hat[i, :] = y_hat[:, 0]\n return Y_hat\n\n def _relu(self, x):\n return np.maximum(x, 0)\n\n def _relu_prime(self, x):\n y = np.zeros((x.shape[0], x.shape[1]))\n y[x > 0] = 1.0\n return y\n\n def _softmax(self, Z):\n exp = np.exp(Z)\n total = np.sum(exp, axis=0)\n return exp / total\n\n def _g(self, df_dy, W_2, z_1):\n return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T\n\n def _W_2_prime(self, df_dy, h_1, W_2, alpha_2, beta_2):\n return df_dy.dot(h_1.T) + alpha_2 * W_2 + beta_2 * np.sign(W_2)\n\n def _b_2_prime(self, df_dy):\n return df_dy\n\n def _W_1_prime(self, x, g, W_1, alpha_1, beta_1):\n return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1)\n\n def _b_1_prime(self, g):\n return g\n\n def _l_1_loss(self, W):\n return np.sum(np.absolute(W))\n\n def _l_2_loss(self, W):\n return 0.5 * np.linalg.norm(W)\n\n def _cross_entropy_loss(self, y, yhat):\n loss = 0\n yhat_log = np.log(yhat.T)\n for i in range(len(y)):\n loss -= y[i, :].dot(yhat_log[:, i])\n l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1\n ) + self._l_1_beta_2 * self._l_1_loss(self._W_2)\n l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1\n ) + self._l_2_alpha_2 * self._l_2_loss(self._W_2)\n return loss + l_1_regularization + l_2_regularization\n\n def _toClassIndices(self, probabilities):\n return np.argmax(probabilities, axis=1)\n\n def loss(self, testing_labels, predicted_labels):\n return 0\n\n def score(self, expected_labels, predicted_labels):\n return np.mean(self._toClassIndices(expected_labels) == self.\n _toClassIndices(predicted_labels))\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass NeuralNetworkClassifier:\n\n def __init__(self, hidden_units, learning_rate, batch_size, epochs,\n l_1_beta_1, l_1_beta_2, l_2_alpha_1, l_2_alpha_2):\n self._hidden_units = hidden_units\n self._learning_rate = learning_rate\n self._batch_size = batch_size\n self._epochs = epochs\n self._l_1_beta_1 = l_1_beta_1\n self._l_1_beta_2 = l_1_beta_2\n self._l_2_alpha_1 = l_2_alpha_1\n self._l_2_alpha_2 = l_2_alpha_2\n\n def fit(self, X_train, Y_train):\n num_input_dimensions = X_train.shape[1]\n self._num_classes = Y_train.shape[1]\n training_set_size = X_train.shape[0]\n self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self.\n _hidden_units, num_input_dimensions)\n self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self.\n _num_classes, self._hidden_units)\n self._b_1 = 0.01 * np.ones((self._hidden_units, 1))\n self._b_2 = 0.01 * np.ones((self._num_classes, 1))\n for epoch in range(self._epochs):\n for batch_start in range(0, training_set_size, self._batch_size):\n batch_end = batch_start + self._batch_size\n X_batch = X_train[batch_start:batch_end]\n Y_batch = Y_train[batch_start:batch_end]\n num_examples = X_batch.shape[0]\n W_1_prime_total = 0\n W_2_prime_total = 0\n b_1_prime_total = 0\n b_2_prime_total = 0\n for i in range(num_examples):\n x = np.vstack(X_batch[i, :])\n y = np.vstack(Y_batch[i, :])\n z_1, h_1, y_hat = self._forward_propagation(x)\n W_1_prime, W_2_prime, b_1_prime, b_2_prime = (self.\n _backward_propagation(x, y, z_1, h_1, y_hat))\n W_1_prime_total += W_1_prime\n W_2_prime_total += W_2_prime\n b_1_prime_total += b_1_prime\n b_2_prime_total += b_2_prime\n self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total\n self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total\n self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total\n self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total\n Y_hats = self.predict(X_batch)\n y_hat = self.predict(X_train)\n print('Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f' % (\n epoch + 1, self._epochs, self._cross_entropy_loss(Y_batch,\n Y_hats), self.score(Y_train, y_hat)))\n\n def _forward_propagation(self, x):\n z_1 = self._W_1.dot(x) + self._b_1\n h_1 = self._relu(z_1)\n z_2 = self._W_2.dot(h_1) + self._b_2\n y_hat = self._softmax(z_2)\n return z_1, h_1, y_hat\n\n def _backward_propagation(self, x, y, z_1, h_1, y_hat):\n df_dy = y_hat - y\n g = self._g(df_dy, self._W_2, z_1)\n W_1_prime = self._W_1_prime(x, g, self._W_1, self._l_2_alpha_1,\n self._l_1_beta_1)\n W_2_prime = self._W_2_prime(df_dy, h_1, self._W_2, self.\n _l_2_alpha_2, self._l_1_beta_2)\n b_1_prime = self._learning_rate * self._b_1_prime(g)\n b_2_prime = self._learning_rate * self._b_2_prime(df_dy)\n return W_1_prime, W_2_prime, b_1_prime, b_2_prime\n\n def predict(self, X):\n num_examples = X.shape[0]\n Y_hat = np.zeros((num_examples, self._num_classes))\n for i in range(num_examples):\n x = np.vstack(X[i, :])\n _, _, y_hat = self._forward_propagation(x)\n Y_hat[i, :] = y_hat[:, 0]\n return Y_hat\n\n def _relu(self, x):\n return np.maximum(x, 0)\n\n def _relu_prime(self, x):\n y = np.zeros((x.shape[0], x.shape[1]))\n y[x > 0] = 1.0\n return y\n\n def _softmax(self, Z):\n exp = np.exp(Z)\n total = np.sum(exp, axis=0)\n return exp / total\n\n def _g(self, df_dy, W_2, z_1):\n return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T\n\n def _W_2_prime(self, df_dy, h_1, W_2, alpha_2, beta_2):\n return df_dy.dot(h_1.T) + alpha_2 * W_2 + beta_2 * np.sign(W_2)\n\n def _b_2_prime(self, df_dy):\n return df_dy\n\n def _W_1_prime(self, x, g, W_1, alpha_1, beta_1):\n return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1)\n\n def _b_1_prime(self, g):\n return g\n\n def _l_1_loss(self, W):\n return np.sum(np.absolute(W))\n\n def _l_2_loss(self, W):\n return 0.5 * np.linalg.norm(W)\n\n def _cross_entropy_loss(self, y, yhat):\n loss = 0\n yhat_log = np.log(yhat.T)\n for i in range(len(y)):\n loss -= y[i, :].dot(yhat_log[:, i])\n l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1\n ) + self._l_1_beta_2 * self._l_1_loss(self._W_2)\n l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1\n ) + self._l_2_alpha_2 * self._l_2_loss(self._W_2)\n return loss + l_1_regularization + l_2_regularization\n\n def _toClassIndices(self, probabilities):\n return np.argmax(probabilities, axis=1)\n\n def loss(self, testing_labels, predicted_labels):\n return 0\n\n def score(self, expected_labels, predicted_labels):\n return np.mean(self._toClassIndices(expected_labels) == self.\n _toClassIndices(predicted_labels))\n\n\n<mask token>\n\n\ndef main():\n training_images = np.load('mnist_train_images.npy')\n training_labels = np.load('mnist_train_labels.npy')\n testing_images = np.load('mnist_test_images.npy')\n testing_labels = np.load('mnist_test_labels.npy')\n validation_images = np.load('mnist_validation_images.npy')\n validation_labels = np.load('mnist_validation_labels.npy')\n parameters = findBestHyperparameters(training_images[0:16000, :],\n training_labels[0:16000, :], validation_images, validation_labels)\n clf = NeuralNetworkClassifier(hidden_units=parameters[0], learning_rate\n =parameters[1], batch_size=parameters[2], epochs=parameters[3],\n l_1_beta_1=parameters[4], l_1_beta_2=parameters[4], l_2_alpha_1=\n parameters[5], l_2_alpha_2=parameters[5])\n clf.fit(training_images, training_labels)\n predicted_labels = clf.predict(testing_images)\n\n\n<mask token>\n", "step-5": "import numpy as np\nimport sys\n\n\nclass NeuralNetworkClassifier():\n def __init__(self, hidden_units, learning_rate, batch_size, epochs, l_1_beta_1, l_1_beta_2, l_2_alpha_1, l_2_alpha_2):\n self._hidden_units = hidden_units\n self._learning_rate = learning_rate\n self._batch_size = batch_size\n self._epochs = epochs\n self._l_1_beta_1 = l_1_beta_1\n self._l_1_beta_2 = l_1_beta_2\n self._l_2_alpha_1 = l_2_alpha_1\n self._l_2_alpha_2 = l_2_alpha_2\n\n def fit(self, X_train, Y_train):\n num_input_dimensions = X_train.shape[1]\n self._num_classes = Y_train.shape[1]\n training_set_size = X_train.shape[0]\n\n self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self._hidden_units, num_input_dimensions)\n self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self._num_classes, self._hidden_units)\n self._b_1 = 0.01 * np.ones((self._hidden_units, 1))\n self._b_2 = 0.01 * np.ones((self._num_classes, 1))\n\n for epoch in range(self._epochs):\n for batch_start in range(0, training_set_size, self._batch_size):\n batch_end = batch_start + self._batch_size\n X_batch = X_train[batch_start:batch_end]\n Y_batch = Y_train[batch_start:batch_end]\n\n num_examples = X_batch.shape[0]\n\n W_1_prime_total = 0\n W_2_prime_total = 0\n b_1_prime_total = 0\n b_2_prime_total = 0\n\n for i in range(num_examples):\n x = np.vstack(X_batch[i, :])\n y = np.vstack(Y_batch[i, :])\n\n z_1, h_1, y_hat = self._forward_propagation(x)\n W_1_prime, W_2_prime, b_1_prime, b_2_prime = self._backward_propagation(x, y, z_1, h_1, y_hat)\n\n W_1_prime_total += W_1_prime\n W_2_prime_total += W_2_prime\n b_1_prime_total += b_1_prime\n b_2_prime_total += b_2_prime\n \n self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total\n self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total\n self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total\n self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total\n \n Y_hats = self.predict(X_batch)\n y_hat = self.predict(X_train)\n print(\"Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f\" % (epoch + 1, self._epochs,self._cross_entropy_loss(Y_batch, Y_hats), self.score(Y_train, y_hat)))\n\n def _forward_propagation(self, x):\n z_1 = self._W_1.dot(x) + self._b_1\n\n # print(\"_forward_propagation W_1=\", self._W_1.shape)\n # print(\"_forward_propagation b_1=\", self._b_1.shape)\n # print(\"_forward_propagation x=\", x.shape)\n # print(\"_forward_propagation z=\", z_1.shape)\n h_1 = self._relu(z_1)\n\n # print(\"_forward_propagation h_1=\", h_1.shape)\n z_2 = self._W_2.dot(h_1) + self._b_2\n\n # print(\"_forward_propagation z_2=\", z_2.shape)\n y_hat = self._softmax(z_2)\n\n # print(\"_forward_propagation y_hat=\", y_hat.shape)\n return z_1, h_1, y_hat\n \n def _backward_propagation(self, x, y, z_1, h_1, y_hat):\n df_dy = y_hat - y\n g = self._g(df_dy, self._W_2, z_1)\n\n W_1_prime = self._W_1_prime(x, g, self._W_1, self._l_2_alpha_1, self._l_1_beta_1)\n W_2_prime = self._W_2_prime(df_dy, h_1, self._W_2, self._l_2_alpha_2, self._l_1_beta_2)\n b_1_prime = self._learning_rate * self._b_1_prime(g)\n b_2_prime = self._learning_rate * self._b_2_prime(df_dy)\n\n return W_1_prime, W_2_prime, b_1_prime, b_2_prime\n\n def predict(self, X):\n num_examples = X.shape[0]\n Y_hat = np.zeros((num_examples, self._num_classes))\n for i in range(num_examples):\n x = np.vstack(X[i, :])\n _, _, y_hat = self._forward_propagation(x)\n Y_hat[i, :] = y_hat[:, 0]\n return Y_hat\n \n def _relu(self, x):\n return np.maximum(x, 0)\n\n def _relu_prime(self, x):\n y = np.zeros((x.shape[0], x.shape[1]))\n y[x > 0] = 1.0\n return y\n\n def _softmax(self, Z):\n exp = np.exp(Z)\n total = np.sum(exp, axis=0)\n return exp / total\n \n def _g(self, df_dy, W_2, z_1):\n return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T\n\n def _W_2_prime(self, df_dy, h_1, W_2, alpha_2, beta_2):\n return df_dy.dot(h_1.T) + alpha_2 * W_2 + beta_2 * np.sign(W_2) \n \n def _b_2_prime(self, df_dy):\n return df_dy\n\n def _W_1_prime(self, x, g, W_1, alpha_1, beta_1):\n return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1) \n\n def _b_1_prime(self, g):\n return g\n\n def _l_1_loss(self, W):\n return np.sum(np.absolute(W))\n \n def _l_2_loss(self, W):\n return 0.5 * np.linalg.norm(W)\n\n def _cross_entropy_loss(self, y, yhat):\n loss = 0\n yhat_log = np.log(yhat.T)\n for i in range(len(y)):\n loss -= y[i, :].dot(yhat_log[:, i])\n\n l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1) + self._l_1_beta_2 * self._l_1_loss(self._W_2)\n l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1) + self._l_2_alpha_2 * self._l_2_loss(self._W_2)\n return loss + l_1_regularization + l_2_regularization\n\n def _toClassIndices(self, probabilities):\n return np.argmax(probabilities, axis=1)\n\n def loss(self, testing_labels, predicted_labels):\n return 0\n\n def score(self, expected_labels, predicted_labels):\n return np.mean(self._toClassIndices(expected_labels) == self._toClassIndices(predicted_labels))\n\ndef describe_hyperparameters(hyperparameters):\n return \"\\nHidden Units: {0} Learning Rate: {1} Minibatch Size: {2} Epochs: {3} L1 Strength: {4} L2 Strength: {5}\".format(\n hyperparameters[0], hyperparameters[1], hyperparameters[2], hyperparameters[3], hyperparameters[4], hyperparameters[5])\n\n\ndef findBestHyperparameters(training_images, training_labels, validation_images, validation_labels):\n print(\"Start training...\")\n print()\n\n all_hidden_units = [20, 20, 30, 30, 40, 40, 50, 50, 60, 30]\n all_learning_rates = [0.0001, 0.001, 0.01, 0.01, 0.01, 0.02, 0.02, 0.1, 0.2, 0.007]\n all_minibatch_sizes = [2, 5, 10, 10, 20, 20, 100, 50, 50, 25]\n all_num_epochs = [1, 1, 1, 1, 2, 2, 2, 2, 3, 3]\n all_l1_strengths = [0.0, 0.0, 0, 0.01, 0.0, 0.001, 0.01, 0.02, 0.01, 0.001]\n all_l2_strengths = [0.0, 0.01, 0.001, 0.0, 0.01, 0.001, 0.01, 0.02, 0.01, 0.001]\n\n best_accuracy = 0\n best_hyperparamters = [] \n\n for i in range(10):\n hyperparameters = (all_hidden_units[slice_start+i], \n all_learning_rates[slice_start+i],\n all_minibatch_sizes[slice_start+i],\n all_num_epochs[slice_start+i],\n all_l1_strengths[slice_start+i],\n all_l2_strengths[slice_start+i])\n\n print(describe_hyperparameters(hyperparameters))\n\n clf = NeuralNetworkClassifier(\n hidden_units = hyperparameters[0],\n learning_rate = hyperparameters[1], \n batch_size = hyperparameters[2], \n epochs = hyperparameters[3], \n l_1_beta_1 = hyperparameters[4], \n l_1_beta_2 = hyperparameters[4], \n l_2_alpha_1 = hyperparameters[5], \n l_2_alpha_2 = hyperparameters[5])\n\n clf.fit(training_images, training_labels)\n\n predicted_labels = clf.predict(validation_images)\n\n accuracy = clf.score(validation_labels, predicted_labels)\n\n print(\"Accuracy: %f\" % accuracy)\n print(\"Cross Entropy Loss = %.2f\" % (clf.loss(validation_labels, predicted_labels)))\n\n if(accuracy > best_accuracy):\n best_accuracy = accuracy\n best_hyperparamters = hyperparameters\n print(\"Found new best hyperparameters.\")\n \n print(\"\\n\")\n \n print(describe_hyperparameters(best_hyperparamters))\n return best_hyperparamters\n\ndef main():\n training_images = np.load(\"mnist_train_images.npy\")\n training_labels = np.load(\"mnist_train_labels.npy\")\n testing_images = np.load(\"mnist_test_images.npy\")\n testing_labels = np.load(\"mnist_test_labels.npy\")\n validation_images = np.load(\"mnist_validation_images.npy\")\n validation_labels = np.load(\"mnist_validation_labels.npy\")\n \n parameters = findBestHyperparameters(training_images[0:16000, :], training_labels[0:16000, :], \n validation_images, validation_labels)\n\n clf = NeuralNetworkClassifier(hidden_units=parameters[0], \n learning_rate=parameters[1], \n batch_size=parameters[2], \n epochs=parameters[3], l_1_beta_1=parameters[4], l_1_beta_2=parameters[4], l_2_alpha_1=parameters[5], l_2_alpha_2=parameters[5])\n\n clf.fit(training_images, training_labels)\n predicted_labels = clf.predict(testing_images)\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 1:\n print(\"Usage: python3 digit_recognizer.py\")\n exit()\n main()", "step-ids": [ 9, 15, 19, 21, 26 ] }
[ 9, 15, 19, 21, 26 ]
import logging from django.contrib.auth.models import User import json from django.http import HttpResponse from enumfields.fields import EnumFieldMixin from Api.models import Status logger = logging.getLogger() logger.setLevel(logging.INFO) def check_cookie(request): # Post.objects.all().delete() result = { "status": True } try: user_id = request.GET.get('user_id') user = User.objects.get(pk=user_id) cookie_status = user.profile.cookie_status if cookie_status is Status.DEACTIVATE: result['cookie_status'] = "0" elif cookie_status is Status.ACTIVATE: result['cookie_status'] = "1" elif cookie_status is Status.EMPTY: result['cookie_status'] = "2" elif cookie_status is Status.WARNING: result['cookie_status'] = "3" elif cookie_status is Status.ERROR: result['cookie_status'] = "4" except Exception as e: logger.info(e) result["status"] = False return HttpResponse(json.dumps(result), content_type="application/json")
normal
{ "blob_id": "2bc3b0df720788e43da3d9c28adb22b3b1be8c58", "index": 5002, "step-1": "<mask token>\n\n\ndef check_cookie(request):\n result = {'status': True}\n try:\n user_id = request.GET.get('user_id')\n user = User.objects.get(pk=user_id)\n cookie_status = user.profile.cookie_status\n if cookie_status is Status.DEACTIVATE:\n result['cookie_status'] = '0'\n elif cookie_status is Status.ACTIVATE:\n result['cookie_status'] = '1'\n elif cookie_status is Status.EMPTY:\n result['cookie_status'] = '2'\n elif cookie_status is Status.WARNING:\n result['cookie_status'] = '3'\n elif cookie_status is Status.ERROR:\n result['cookie_status'] = '4'\n except Exception as e:\n logger.info(e)\n result['status'] = False\n return HttpResponse(json.dumps(result), content_type='application/json')\n", "step-2": "<mask token>\nlogger.setLevel(logging.INFO)\n\n\ndef check_cookie(request):\n result = {'status': True}\n try:\n user_id = request.GET.get('user_id')\n user = User.objects.get(pk=user_id)\n cookie_status = user.profile.cookie_status\n if cookie_status is Status.DEACTIVATE:\n result['cookie_status'] = '0'\n elif cookie_status is Status.ACTIVATE:\n result['cookie_status'] = '1'\n elif cookie_status is Status.EMPTY:\n result['cookie_status'] = '2'\n elif cookie_status is Status.WARNING:\n result['cookie_status'] = '3'\n elif cookie_status is Status.ERROR:\n result['cookie_status'] = '4'\n except Exception as e:\n logger.info(e)\n result['status'] = False\n return HttpResponse(json.dumps(result), content_type='application/json')\n", "step-3": "<mask token>\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef check_cookie(request):\n result = {'status': True}\n try:\n user_id = request.GET.get('user_id')\n user = User.objects.get(pk=user_id)\n cookie_status = user.profile.cookie_status\n if cookie_status is Status.DEACTIVATE:\n result['cookie_status'] = '0'\n elif cookie_status is Status.ACTIVATE:\n result['cookie_status'] = '1'\n elif cookie_status is Status.EMPTY:\n result['cookie_status'] = '2'\n elif cookie_status is Status.WARNING:\n result['cookie_status'] = '3'\n elif cookie_status is Status.ERROR:\n result['cookie_status'] = '4'\n except Exception as e:\n logger.info(e)\n result['status'] = False\n return HttpResponse(json.dumps(result), content_type='application/json')\n", "step-4": "import logging\nfrom django.contrib.auth.models import User\nimport json\nfrom django.http import HttpResponse\nfrom enumfields.fields import EnumFieldMixin\nfrom Api.models import Status\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef check_cookie(request):\n result = {'status': True}\n try:\n user_id = request.GET.get('user_id')\n user = User.objects.get(pk=user_id)\n cookie_status = user.profile.cookie_status\n if cookie_status is Status.DEACTIVATE:\n result['cookie_status'] = '0'\n elif cookie_status is Status.ACTIVATE:\n result['cookie_status'] = '1'\n elif cookie_status is Status.EMPTY:\n result['cookie_status'] = '2'\n elif cookie_status is Status.WARNING:\n result['cookie_status'] = '3'\n elif cookie_status is Status.ERROR:\n result['cookie_status'] = '4'\n except Exception as e:\n logger.info(e)\n result['status'] = False\n return HttpResponse(json.dumps(result), content_type='application/json')\n", "step-5": "import logging\nfrom django.contrib.auth.models import User\nimport json\nfrom django.http import HttpResponse\nfrom enumfields.fields import EnumFieldMixin\n\nfrom Api.models import Status\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef check_cookie(request):\n # Post.objects.all().delete()\n result = {\n \"status\": True\n }\n try:\n user_id = request.GET.get('user_id')\n user = User.objects.get(pk=user_id)\n cookie_status = user.profile.cookie_status\n\n if cookie_status is Status.DEACTIVATE:\n result['cookie_status'] = \"0\"\n elif cookie_status is Status.ACTIVATE:\n result['cookie_status'] = \"1\"\n elif cookie_status is Status.EMPTY:\n result['cookie_status'] = \"2\"\n elif cookie_status is Status.WARNING:\n result['cookie_status'] = \"3\"\n elif cookie_status is Status.ERROR:\n result['cookie_status'] = \"4\"\n except Exception as e:\n logger.info(e)\n result[\"status\"] = False\n return HttpResponse(json.dumps(result), content_type=\"application/json\")\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]