code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import os
def test_development_config(test_app):
test_app.config.from_object('flask_ecom_api.config.DevelopmentConfig')
assert not test_app.config['TESTING']
assert test_app.config['SQLALCHEMY_DATABASE_URI'] == os.environ.get('DATABASE_URL')
def test_testing_config(test_app):
test_app.config.from_object('flask_ecom_api.config.TestingConfig')
assert test_app.config['TESTING']
assert test_app.config['SQLALCHEMY_DATABASE_URI'] == os.environ.get('DATABASE_TEST_URL')
def test_production_config(test_app):
test_app.config.from_object('flask_ecom_api.config.ProductionConfig')
assert not test_app.config['TESTING']
assert test_app.config['SQLALCHEMY_DATABASE_URI'] == os.environ.get('DATABASE_URL')
| [
"os.environ.get"
]
| [((225, 255), 'os.environ.get', 'os.environ.get', (['"""DATABASE_URL"""'], {}), "('DATABASE_URL')\n", (239, 255), False, 'import os\n'), ((459, 494), 'os.environ.get', 'os.environ.get', (['"""DATABASE_TEST_URL"""'], {}), "('DATABASE_TEST_URL')\n", (473, 494), False, 'import os\n'), ((708, 738), 'os.environ.get', 'os.environ.get', (['"""DATABASE_URL"""'], {}), "('DATABASE_URL')\n", (722, 738), False, 'import os\n')] |
from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
from leasing.enums import (
InfillDevelopmentCompensationState,
LeaseState,
TenantContactType,
)
from leasing.models import Contact, DecisionMaker, District, LeaseType, Municipality
from leasing.validators import validate_business_id
class CommaSeparatedChoiceField(forms.ChoiceField):
def to_python(self, value):
if value in validators.EMPTY_VALUES:
return []
value = [item.strip() for item in str(value).split(",") if item.strip()]
return value
def validate(self, value):
if self.required and not value:
raise ValidationError(self.error_messages["required"], code="required")
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages["invalid_choice"],
code="invalid_choice",
params={"value": val},
)
class LeaseSearchForm(forms.Form):
succinct = forms.BooleanField(label="Succinct", required=False)
identifier = forms.CharField(
label="Lease identifier", max_length=255, required=False, empty_value=None
)
tenant_name = forms.CharField(label="Tenant name", max_length=255, required=False)
tenantcontact_type = CommaSeparatedChoiceField(
label="Tenant role",
required=False,
choices=tuple((x.value, str(x)) for x in TenantContactType),
)
only_past_tenants = forms.BooleanField(label="Only past tenants", required=False)
tenant_activity = forms.ChoiceField(
label="Tenants",
required=False,
choices=(
("all", "All"),
("past", "Only past tenants"),
("active", "Only active tenants"),
),
)
lease_start_date_start = forms.DateField(required=False)
lease_start_date_end = forms.DateField(required=False)
lease_end_date_start = forms.DateField(required=False)
lease_end_date_end = forms.DateField(required=False)
only_active_leases = forms.BooleanField(label="Active", required=False)
only_expired_leases = forms.BooleanField(label="Expired", required=False)
has_geometry = forms.NullBooleanField(label="Has geometry", required=False)
property_identifier = forms.CharField(
label="Real property identifier",
max_length=255,
required=False,
empty_value=None,
)
address = forms.CharField(
label="Address", max_length=255, required=False, empty_value=None
)
lease_type = forms.ModelChoiceField(
label="Lease type", queryset=LeaseType.objects.all(), required=False
)
municipality = forms.ModelChoiceField(
label="Municipality", queryset=Municipality.objects.all(), required=False
)
district = forms.ModelChoiceField(
label="District", queryset=District.objects.all(), required=False
)
sequence = forms.IntegerField(label="Sequence", required=False)
lease_state = CommaSeparatedChoiceField(
label="Lease state",
required=False,
choices=tuple((x.value, str(x)) for x in LeaseState),
)
business_id = forms.CharField(
label="Business id",
max_length=255,
required=False,
empty_value=None,
validators=[validate_business_id],
)
national_identification_number = forms.CharField(
label="National identification number",
max_length=255,
required=False,
empty_value=None,
)
lessor = forms.ModelChoiceField(
label="Lessor", queryset=Contact.objects.filter(is_lessor=True), required=False
)
contract_number = forms.CharField(
label="Contract number", max_length=255, required=False, empty_value=None
)
decision_maker = forms.ModelChoiceField(
label="Decision maker", queryset=DecisionMaker.objects.all(), required=False
)
decision_date = forms.DateField(required=False)
decision_section = forms.CharField(
label="Decision section", max_length=255, required=False, empty_value=None
)
reference_number = forms.CharField(
label="Reference number", max_length=255, required=False, empty_value=None
)
invoice_number = forms.CharField(
label="Invoice number", max_length=255, required=False, empty_value=None
)
class BasisOfRentSearchForm(forms.Form):
search = forms.CharField(
label="Search", max_length=255, required=False, empty_value=None
)
decision_maker = forms.ModelChoiceField(
label="Decision maker", queryset=DecisionMaker.objects.all(), required=False
)
decision_date = forms.DateField(required=False)
decision_section = forms.CharField(
label="Decision section", max_length=255, required=False, empty_value=None
)
reference_number = forms.CharField(
label="Reference number", max_length=255, required=False, empty_value=None
)
class InfillDevelopmentCompensationSearchForm(forms.Form):
search = forms.CharField(
label="Search", max_length=255, required=False, empty_value=None
)
state = CommaSeparatedChoiceField(
label="State",
required=False,
choices=tuple((x.value, str(x)) for x in InfillDevelopmentCompensationState),
)
decision_maker = forms.ModelChoiceField(
label="Decision maker", queryset=DecisionMaker.objects.all(), required=False
)
decision_date = forms.DateField(required=False)
decision_section = forms.CharField(
label="Decision section", max_length=255, required=False, empty_value=None
)
reference_number = forms.CharField(
label="Reference number", max_length=255, required=False, empty_value=None
)
class AuditLogSearchForm(forms.Form):
type = forms.ChoiceField(
label="Type",
required=True,
choices=(("lease", "Lease"), ("contact", "Contact")),
)
id = forms.IntegerField(label="Id", required=False)
| [
"leasing.models.Municipality.objects.all",
"django.forms.BooleanField",
"django.forms.CharField",
"django.forms.DateField",
"django.forms.NullBooleanField",
"leasing.models.District.objects.all",
"leasing.models.DecisionMaker.objects.all",
"django.core.exceptions.ValidationError",
"django.forms.ChoiceField",
"leasing.models.Contact.objects.filter",
"django.forms.IntegerField",
"leasing.models.LeaseType.objects.all"
]
| [((1166, 1218), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'label': '"""Succinct"""', 'required': '(False)'}), "(label='Succinct', required=False)\n", (1184, 1218), False, 'from django import forms\n'), ((1236, 1331), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Lease identifier"""', 'max_length': '(255)', 'required': '(False)', 'empty_value': 'None'}), "(label='Lease identifier', max_length=255, required=False,\n empty_value=None)\n", (1251, 1331), False, 'from django import forms\n'), ((1360, 1428), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Tenant name"""', 'max_length': '(255)', 'required': '(False)'}), "(label='Tenant name', max_length=255, required=False)\n", (1375, 1428), False, 'from django import forms\n'), ((1633, 1694), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'label': '"""Only past tenants"""', 'required': '(False)'}), "(label='Only past tenants', required=False)\n", (1651, 1694), False, 'from django import forms\n'), ((1717, 1863), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'label': '"""Tenants"""', 'required': '(False)', 'choices': "(('all', 'All'), ('past', 'Only past tenants'), ('active',\n 'Only active tenants'))"}), "(label='Tenants', required=False, choices=(('all', 'All'),\n ('past', 'Only past tenants'), ('active', 'Only active tenants')))\n", (1734, 1863), False, 'from django import forms\n'), ((1967, 1998), 'django.forms.DateField', 'forms.DateField', ([], {'required': '(False)'}), '(required=False)\n', (1982, 1998), False, 'from django import forms\n'), ((2026, 2057), 'django.forms.DateField', 'forms.DateField', ([], {'required': '(False)'}), '(required=False)\n', (2041, 2057), False, 'from django import forms\n'), ((2085, 2116), 'django.forms.DateField', 'forms.DateField', ([], {'required': '(False)'}), '(required=False)\n', (2100, 2116), False, 'from django import forms\n'), ((2142, 2173), 'django.forms.DateField', 'forms.DateField', ([], {'required': '(False)'}), '(required=False)\n', (2157, 2173), False, 'from django import forms\n'), ((2199, 2249), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'label': '"""Active"""', 'required': '(False)'}), "(label='Active', required=False)\n", (2217, 2249), False, 'from django import forms\n'), ((2276, 2327), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'label': '"""Expired"""', 'required': '(False)'}), "(label='Expired', required=False)\n", (2294, 2327), False, 'from django import forms\n'), ((2347, 2407), 'django.forms.NullBooleanField', 'forms.NullBooleanField', ([], {'label': '"""Has geometry"""', 'required': '(False)'}), "(label='Has geometry', required=False)\n", (2369, 2407), False, 'from django import forms\n'), ((2434, 2538), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Real property identifier"""', 'max_length': '(255)', 'required': '(False)', 'empty_value': 'None'}), "(label='Real property identifier', max_length=255, required=\n False, empty_value=None)\n", (2449, 2538), False, 'from django import forms\n'), ((2587, 2673), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Address"""', 'max_length': '(255)', 'required': '(False)', 'empty_value': 'None'}), "(label='Address', max_length=255, required=False,\n empty_value=None)\n", (2602, 2673), False, 'from django import forms\n'), ((3073, 3125), 'django.forms.IntegerField', 'forms.IntegerField', ([], {'label': '"""Sequence"""', 'required': '(False)'}), "(label='Sequence', required=False)\n", (3091, 3125), False, 'from django import forms\n'), ((3310, 3435), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Business id"""', 'max_length': '(255)', 'required': '(False)', 'empty_value': 'None', 'validators': '[validate_business_id]'}), "(label='Business id', max_length=255, required=False,\n empty_value=None, validators=[validate_business_id])\n", (3325, 3435), False, 'from django import forms\n'), ((3516, 3625), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""National identification number"""', 'max_length': '(255)', 'required': '(False)', 'empty_value': 'None'}), "(label='National identification number', max_length=255,\n required=False, empty_value=None)\n", (3531, 3625), False, 'from django import forms\n'), ((3814, 3908), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Contract number"""', 'max_length': '(255)', 'required': '(False)', 'empty_value': 'None'}), "(label='Contract number', max_length=255, required=False,\n empty_value=None)\n", (3829, 3908), False, 'from django import forms\n'), ((4075, 4106), 'django.forms.DateField', 'forms.DateField', ([], {'required': '(False)'}), '(required=False)\n', (4090, 4106), False, 'from django import forms\n'), ((4130, 4225), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Decision section"""', 'max_length': '(255)', 'required': '(False)', 'empty_value': 'None'}), "(label='Decision section', max_length=255, required=False,\n empty_value=None)\n", (4145, 4225), False, 'from django import forms\n'), ((4259, 4354), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Reference number"""', 'max_length': '(255)', 'required': '(False)', 'empty_value': 'None'}), "(label='Reference number', max_length=255, required=False,\n empty_value=None)\n", (4274, 4354), False, 'from django import forms\n'), ((4386, 4479), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Invoice number"""', 'max_length': '(255)', 'required': '(False)', 'empty_value': 'None'}), "(label='Invoice number', max_length=255, required=False,\n empty_value=None)\n", (4401, 4479), False, 'from django import forms\n'), ((4546, 4632), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Search"""', 'max_length': '(255)', 'required': '(False)', 'empty_value': 'None'}), "(label='Search', max_length=255, required=False, empty_value\n =None)\n", (4561, 4632), False, 'from django import forms\n'), ((4798, 4829), 'django.forms.DateField', 'forms.DateField', ([], {'required': '(False)'}), '(required=False)\n', (4813, 4829), False, 'from django import forms\n'), ((4853, 4948), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Decision section"""', 'max_length': '(255)', 'required': '(False)', 'empty_value': 'None'}), "(label='Decision section', max_length=255, required=False,\n empty_value=None)\n", (4868, 4948), False, 'from django import forms\n'), ((4982, 5077), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Reference number"""', 'max_length': '(255)', 'required': '(False)', 'empty_value': 'None'}), "(label='Reference number', max_length=255, required=False,\n empty_value=None)\n", (4997, 5077), False, 'from django import forms\n'), ((5162, 5248), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Search"""', 'max_length': '(255)', 'required': '(False)', 'empty_value': 'None'}), "(label='Search', max_length=255, required=False, empty_value\n =None)\n", (5177, 5248), False, 'from django import forms\n'), ((5593, 5624), 'django.forms.DateField', 'forms.DateField', ([], {'required': '(False)'}), '(required=False)\n', (5608, 5624), False, 'from django import forms\n'), ((5648, 5743), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Decision section"""', 'max_length': '(255)', 'required': '(False)', 'empty_value': 'None'}), "(label='Decision section', max_length=255, required=False,\n empty_value=None)\n", (5663, 5743), False, 'from django import forms\n'), ((5777, 5872), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Reference number"""', 'max_length': '(255)', 'required': '(False)', 'empty_value': 'None'}), "(label='Reference number', max_length=255, required=False,\n empty_value=None)\n", (5792, 5872), False, 'from django import forms\n'), ((5934, 6038), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'label': '"""Type"""', 'required': '(True)', 'choices': "(('lease', 'Lease'), ('contact', 'Contact'))"}), "(label='Type', required=True, choices=(('lease', 'Lease'),\n ('contact', 'Contact')))\n", (5951, 6038), False, 'from django import forms\n'), ((6075, 6121), 'django.forms.IntegerField', 'forms.IntegerField', ([], {'label': '"""Id"""', 'required': '(False)'}), "(label='Id', required=False)\n", (6093, 6121), False, 'from django import forms\n'), ((705, 770), 'django.core.exceptions.ValidationError', 'ValidationError', (["self.error_messages['required']"], {'code': '"""required"""'}), "(self.error_messages['required'], code='required')\n", (720, 770), False, 'from django.core.exceptions import ValidationError\n'), ((2762, 2785), 'leasing.models.LeaseType.objects.all', 'LeaseType.objects.all', ([], {}), '()\n', (2783, 2785), False, 'from leasing.models import Contact, DecisionMaker, District, LeaseType, Municipality\n'), ((2890, 2916), 'leasing.models.Municipality.objects.all', 'Municipality.objects.all', ([], {}), '()\n', (2914, 2916), False, 'from leasing.models import Contact, DecisionMaker, District, LeaseType, Municipality\n'), ((3013, 3035), 'leasing.models.District.objects.all', 'District.objects.all', ([], {}), '()\n', (3033, 3035), False, 'from leasing.models import Contact, DecisionMaker, District, LeaseType, Municipality\n'), ((3731, 3769), 'leasing.models.Contact.objects.filter', 'Contact.objects.filter', ([], {'is_lessor': '(True)'}), '(is_lessor=True)\n', (3753, 3769), False, 'from leasing.models import Contact, DecisionMaker, District, LeaseType, Municipality\n'), ((4005, 4032), 'leasing.models.DecisionMaker.objects.all', 'DecisionMaker.objects.all', ([], {}), '()\n', (4030, 4032), False, 'from leasing.models import Contact, DecisionMaker, District, LeaseType, Municipality\n'), ((4728, 4755), 'leasing.models.DecisionMaker.objects.all', 'DecisionMaker.objects.all', ([], {}), '()\n', (4753, 4755), False, 'from leasing.models import Contact, DecisionMaker, District, LeaseType, Municipality\n'), ((5523, 5550), 'leasing.models.DecisionMaker.objects.all', 'DecisionMaker.objects.all', ([], {}), '()\n', (5548, 5550), False, 'from leasing.models import Contact, DecisionMaker, District, LeaseType, Municipality\n'), ((934, 1039), 'django.core.exceptions.ValidationError', 'ValidationError', (["self.error_messages['invalid_choice']"], {'code': '"""invalid_choice"""', 'params': "{'value': val}"}), "(self.error_messages['invalid_choice'], code=\n 'invalid_choice', params={'value': val})\n", (949, 1039), False, 'from django.core.exceptions import ValidationError\n')] |
# polls/management/commands/create_admin_user.py
import sys
import logging
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.conf import settings
class Command(BaseCommand):
help = 'Creates the initial admin user'
def handle(self, *args, **options):
if User.objects.filter(username="admin").exists():
print("admin exists")
else:
u = User(username='admin')
u.set_password('<PASSWORD>')
u.is_superuser = True
u.is_staff = True
u.save()
print("admin created")
sys.exit()
| [
"django.contrib.auth.models.User.objects.filter",
"django.contrib.auth.models.User",
"sys.exit"
]
| [((649, 659), 'sys.exit', 'sys.exit', ([], {}), '()\n', (657, 659), False, 'import sys\n'), ((457, 479), 'django.contrib.auth.models.User', 'User', ([], {'username': '"""admin"""'}), "(username='admin')\n", (461, 479), False, 'from django.contrib.auth.models import User\n'), ((345, 382), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'username': '"""admin"""'}), "(username='admin')\n", (364, 382), False, 'from django.contrib.auth.models import User\n')] |
from caching.base import CachingManager, CachingMixin
from django.db import models
from tsdata.models import CensusProfile
PURPOSE_CHOICES = (
(1, "Speed Limit Violation"),
(2, "Stop Light/Sign Violation"),
(3, "Driving While Impaired"),
(4, "Safe Movement Violation"),
(5, "Vehicle Equipment Violation"),
(6, "Vehicle Regulatory Violation"),
(7, "Seat Belt Violation"),
(8, "Investigation"),
(9, "Other Motor Vehicle Violation"),
(10, "Checkpoint"),
)
ACTION_CHOICES = (
(1, "Verbal Warning"),
(2, "Written Warning"),
(3, "Citation Issued"),
(4, "On-View Arrest"),
(5, "No Action Taken"),
)
PERSON_TYPE_CHOICES = (("D", "Driver"), ("P", "Passenger"))
GENDER_CHOICES = (("M", "Male"), ("F", "Female"))
ETHNICITY_CHOICES = (("H", "Hispanic"), ("N", "Non-Hispanic"))
RACE_CHOICES = (
("A", "Asian"),
("B", "Black"),
("I", "Native American"),
("U", "Other"),
("W", "White"),
)
SEARCH_TYPE_CHOICES = (
(1, "Consent"),
(2, "Search Warrant"),
(3, "Probable Cause"),
(4, "Search Incident to Arrest"),
(5, "Protective Frisk"),
)
SEARCH_BASIS_CHOICES = (
("ER", "Erratic/Suspicious Behavior"),
("OB", "Observation of Suspected Contraband"),
("OI", "Other Official Information"),
("SM", "Suspicious Movement"),
("TIP", "Informant Tip"),
("WTNS", "Witness Observation"),
)
class Stop(CachingMixin, models.Model):
stop_id = models.PositiveIntegerField(primary_key=True)
agency_description = models.CharField(max_length=100)
agency = models.ForeignKey("Agency", null=True, related_name="stops", on_delete=models.CASCADE)
date = models.DateTimeField(db_index=True)
purpose = models.PositiveSmallIntegerField(choices=PURPOSE_CHOICES)
action = models.PositiveSmallIntegerField(choices=ACTION_CHOICES)
driver_arrest = models.BooleanField(default=False)
passenger_arrest = models.BooleanField(default=False)
encounter_force = models.BooleanField(default=False)
engage_force = models.BooleanField(default=False)
officer_injury = models.BooleanField(default=False)
driver_injury = models.BooleanField(default=False)
passenger_injury = models.BooleanField(default=False)
officer_id = models.CharField(max_length=15) # todo: keys
stop_location = models.CharField(max_length=15) # todo: keys
stop_city = models.CharField(max_length=20)
objects = CachingManager()
class Person(CachingMixin, models.Model):
person_id = models.IntegerField(primary_key=True)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
type = models.CharField(max_length=2, choices=PERSON_TYPE_CHOICES)
age = models.PositiveSmallIntegerField()
gender = models.CharField(max_length=2, choices=GENDER_CHOICES)
ethnicity = models.CharField(max_length=2, choices=ETHNICITY_CHOICES)
race = models.CharField(max_length=2, choices=RACE_CHOICES)
objects = CachingManager()
class Search(CachingMixin, models.Model):
search_id = models.IntegerField(primary_key=True)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
type = models.PositiveSmallIntegerField(choices=SEARCH_TYPE_CHOICES)
vehicle_search = models.BooleanField(default=False)
driver_search = models.BooleanField(default=False)
passenger_search = models.BooleanField(default=False)
property_search = models.BooleanField(default=False)
vehicle_siezed = models.BooleanField(default=False)
personal_property_siezed = models.BooleanField(default=False)
other_property_sized = models.BooleanField(default=False)
objects = CachingManager()
class Contraband(CachingMixin, models.Model):
contraband_id = models.IntegerField(primary_key=True)
search = models.ForeignKey(Search, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
ounces = models.FloatField(default=0, null=True)
pounds = models.FloatField(default=0, null=True)
pints = models.FloatField(default=0, null=True)
gallons = models.FloatField(default=0, null=True)
dosages = models.FloatField(default=0, null=True)
grams = models.FloatField(default=0, null=True)
kilos = models.FloatField(default=0, null=True)
money = models.FloatField(default=0, null=True)
weapons = models.FloatField(default=0, null=True)
dollar_amount = models.FloatField(default=0, null=True)
objects = CachingManager()
class SearchBasis(CachingMixin, models.Model):
search_basis_id = models.IntegerField(primary_key=True)
search = models.ForeignKey(Search, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
basis = models.CharField(max_length=4, choices=SEARCH_BASIS_CHOICES)
objects = CachingManager()
class Agency(CachingMixin, models.Model):
name = models.CharField(max_length=255)
# link to CensusProfile (no cross-database foreign key)
census_profile_id = models.CharField(max_length=16, blank=True, default="")
last_reported_stop = models.DateField(null=True)
objects = CachingManager()
class Meta(object):
verbose_name_plural = "Agencies"
def __str__(self):
return self.name
@property
def census_profile(self):
if self.census_profile_id:
profile = CensusProfile.objects.get(id=self.census_profile_id)
return profile.get_census_dict()
else:
return dict()
| [
"django.db.models.FloatField",
"django.db.models.DateField",
"django.db.models.ForeignKey",
"caching.base.CachingManager",
"django.db.models.IntegerField",
"tsdata.models.CensusProfile.objects.get",
"django.db.models.BooleanField",
"django.db.models.PositiveIntegerField",
"django.db.models.DateTimeField",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.CharField"
]
| [((1456, 1501), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1483, 1501), False, 'from django.db import models\n'), ((1527, 1559), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1543, 1559), False, 'from django.db import models\n'), ((1573, 1664), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Agency"""'], {'null': '(True)', 'related_name': '"""stops"""', 'on_delete': 'models.CASCADE'}), "('Agency', null=True, related_name='stops', on_delete=\n models.CASCADE)\n", (1590, 1664), False, 'from django.db import models\n'), ((1671, 1706), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'db_index': '(True)'}), '(db_index=True)\n', (1691, 1706), False, 'from django.db import models\n'), ((1721, 1778), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': 'PURPOSE_CHOICES'}), '(choices=PURPOSE_CHOICES)\n', (1753, 1778), False, 'from django.db import models\n'), ((1792, 1848), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': 'ACTION_CHOICES'}), '(choices=ACTION_CHOICES)\n', (1824, 1848), False, 'from django.db import models\n'), ((1869, 1903), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1888, 1903), False, 'from django.db import models\n'), ((1927, 1961), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1946, 1961), False, 'from django.db import models\n'), ((1984, 2018), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2003, 2018), False, 'from django.db import models\n'), ((2038, 2072), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2057, 2072), False, 'from django.db import models\n'), ((2094, 2128), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2113, 2128), False, 'from django.db import models\n'), ((2149, 2183), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2168, 2183), False, 'from django.db import models\n'), ((2207, 2241), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2226, 2241), False, 'from django.db import models\n'), ((2259, 2290), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)'}), '(max_length=15)\n', (2275, 2290), False, 'from django.db import models\n'), ((2325, 2356), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)'}), '(max_length=15)\n', (2341, 2356), False, 'from django.db import models\n'), ((2387, 2418), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (2403, 2418), False, 'from django.db import models\n'), ((2434, 2450), 'caching.base.CachingManager', 'CachingManager', ([], {}), '()\n', (2448, 2450), False, 'from caching.base import CachingManager, CachingMixin\n'), ((2511, 2548), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (2530, 2548), False, 'from django.db import models\n'), ((2560, 2609), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Stop'], {'on_delete': 'models.CASCADE'}), '(Stop, on_delete=models.CASCADE)\n', (2577, 2609), False, 'from django.db import models\n'), ((2621, 2680), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2)', 'choices': 'PERSON_TYPE_CHOICES'}), '(max_length=2, choices=PERSON_TYPE_CHOICES)\n', (2637, 2680), False, 'from django.db import models\n'), ((2691, 2725), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {}), '()\n', (2723, 2725), False, 'from django.db import models\n'), ((2739, 2793), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2)', 'choices': 'GENDER_CHOICES'}), '(max_length=2, choices=GENDER_CHOICES)\n', (2755, 2793), False, 'from django.db import models\n'), ((2810, 2867), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2)', 'choices': 'ETHNICITY_CHOICES'}), '(max_length=2, choices=ETHNICITY_CHOICES)\n', (2826, 2867), False, 'from django.db import models\n'), ((2879, 2931), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2)', 'choices': 'RACE_CHOICES'}), '(max_length=2, choices=RACE_CHOICES)\n', (2895, 2931), False, 'from django.db import models\n'), ((2947, 2963), 'caching.base.CachingManager', 'CachingManager', ([], {}), '()\n', (2961, 2963), False, 'from caching.base import CachingManager, CachingMixin\n'), ((3024, 3061), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (3043, 3061), False, 'from django.db import models\n'), ((3073, 3122), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Stop'], {'on_delete': 'models.CASCADE'}), '(Stop, on_delete=models.CASCADE)\n', (3090, 3122), False, 'from django.db import models\n'), ((3136, 3187), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Person'], {'on_delete': 'models.CASCADE'}), '(Person, on_delete=models.CASCADE)\n', (3153, 3187), False, 'from django.db import models\n'), ((3199, 3260), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': 'SEARCH_TYPE_CHOICES'}), '(choices=SEARCH_TYPE_CHOICES)\n', (3231, 3260), False, 'from django.db import models\n'), ((3282, 3316), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3301, 3316), False, 'from django.db import models\n'), ((3337, 3371), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3356, 3371), False, 'from django.db import models\n'), ((3395, 3429), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3414, 3429), False, 'from django.db import models\n'), ((3452, 3486), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3471, 3486), False, 'from django.db import models\n'), ((3508, 3542), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3527, 3542), False, 'from django.db import models\n'), ((3574, 3608), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3593, 3608), False, 'from django.db import models\n'), ((3636, 3670), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3655, 3670), False, 'from django.db import models\n'), ((3686, 3702), 'caching.base.CachingManager', 'CachingManager', ([], {}), '()\n', (3700, 3702), False, 'from caching.base import CachingManager, CachingMixin\n'), ((3771, 3808), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (3790, 3808), False, 'from django.db import models\n'), ((3822, 3873), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Search'], {'on_delete': 'models.CASCADE'}), '(Search, on_delete=models.CASCADE)\n', (3839, 3873), False, 'from django.db import models\n'), ((3887, 3938), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Person'], {'on_delete': 'models.CASCADE'}), '(Person, on_delete=models.CASCADE)\n', (3904, 3938), False, 'from django.db import models\n'), ((3950, 3999), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Stop'], {'on_delete': 'models.CASCADE'}), '(Stop, on_delete=models.CASCADE)\n', (3967, 3999), False, 'from django.db import models\n'), ((4013, 4052), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)', 'null': '(True)'}), '(default=0, null=True)\n', (4030, 4052), False, 'from django.db import models\n'), ((4066, 4105), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)', 'null': '(True)'}), '(default=0, null=True)\n', (4083, 4105), False, 'from django.db import models\n'), ((4118, 4157), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)', 'null': '(True)'}), '(default=0, null=True)\n', (4135, 4157), False, 'from django.db import models\n'), ((4172, 4211), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)', 'null': '(True)'}), '(default=0, null=True)\n', (4189, 4211), False, 'from django.db import models\n'), ((4226, 4265), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)', 'null': '(True)'}), '(default=0, null=True)\n', (4243, 4265), False, 'from django.db import models\n'), ((4278, 4317), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)', 'null': '(True)'}), '(default=0, null=True)\n', (4295, 4317), False, 'from django.db import models\n'), ((4330, 4369), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)', 'null': '(True)'}), '(default=0, null=True)\n', (4347, 4369), False, 'from django.db import models\n'), ((4382, 4421), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)', 'null': '(True)'}), '(default=0, null=True)\n', (4399, 4421), False, 'from django.db import models\n'), ((4436, 4475), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)', 'null': '(True)'}), '(default=0, null=True)\n', (4453, 4475), False, 'from django.db import models\n'), ((4496, 4535), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)', 'null': '(True)'}), '(default=0, null=True)\n', (4513, 4535), False, 'from django.db import models\n'), ((4551, 4567), 'caching.base.CachingManager', 'CachingManager', ([], {}), '()\n', (4565, 4567), False, 'from caching.base import CachingManager, CachingMixin\n'), ((4639, 4676), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (4658, 4676), False, 'from django.db import models\n'), ((4690, 4741), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Search'], {'on_delete': 'models.CASCADE'}), '(Search, on_delete=models.CASCADE)\n', (4707, 4741), False, 'from django.db import models\n'), ((4755, 4806), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Person'], {'on_delete': 'models.CASCADE'}), '(Person, on_delete=models.CASCADE)\n', (4772, 4806), False, 'from django.db import models\n'), ((4818, 4867), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Stop'], {'on_delete': 'models.CASCADE'}), '(Stop, on_delete=models.CASCADE)\n', (4835, 4867), False, 'from django.db import models\n'), ((4880, 4940), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'choices': 'SEARCH_BASIS_CHOICES'}), '(max_length=4, choices=SEARCH_BASIS_CHOICES)\n', (4896, 4940), False, 'from django.db import models\n'), ((4956, 4972), 'caching.base.CachingManager', 'CachingManager', ([], {}), '()\n', (4970, 4972), False, 'from caching.base import CachingManager, CachingMixin\n'), ((5028, 5060), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (5044, 5060), False, 'from django.db import models\n'), ((5145, 5200), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)', 'blank': '(True)', 'default': '""""""'}), "(max_length=16, blank=True, default='')\n", (5161, 5200), False, 'from django.db import models\n'), ((5226, 5253), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)'}), '(null=True)\n', (5242, 5253), False, 'from django.db import models\n'), ((5269, 5285), 'caching.base.CachingManager', 'CachingManager', ([], {}), '()\n', (5283, 5285), False, 'from caching.base import CachingManager, CachingMixin\n'), ((5503, 5555), 'tsdata.models.CensusProfile.objects.get', 'CensusProfile.objects.get', ([], {'id': 'self.census_profile_id'}), '(id=self.census_profile_id)\n', (5528, 5555), False, 'from tsdata.models import CensusProfile\n')] |
from django.urls import path
from django.contrib import admin
from rest_framework_swagger.views import get_swagger_view
from .views import notification
schema_view = get_swagger_view(title='MAIL API')
urlpatterns = [
path('front/betsy/irish/embargo/admin/', admin.site.urls),
# Swagger API
path(
'api/',
schema_view,
name='api'
),
# notification
path(
'notification/',
notification.NotificationServicesRest.as_view(),
name=notification.NotificationServicesRest.name
),
]
| [
"rest_framework_swagger.views.get_swagger_view",
"django.urls.path"
]
| [((168, 202), 'rest_framework_swagger.views.get_swagger_view', 'get_swagger_view', ([], {'title': '"""MAIL API"""'}), "(title='MAIL API')\n", (184, 202), False, 'from rest_framework_swagger.views import get_swagger_view\n'), ((225, 282), 'django.urls.path', 'path', (['"""front/betsy/irish/embargo/admin/"""', 'admin.site.urls'], {}), "('front/betsy/irish/embargo/admin/', admin.site.urls)\n", (229, 282), False, 'from django.urls import path\n'), ((307, 344), 'django.urls.path', 'path', (['"""api/"""', 'schema_view'], {'name': '"""api"""'}), "('api/', schema_view, name='api')\n", (311, 344), False, 'from django.urls import path\n')] |
# -*- coding: utf-8 -*-
import api,points
from api.bottle import *
II_PATH=os.path.dirname(__file__) or '.'
TEMPLATE_PATH.insert(0,II_PATH)
@route('/list.txt')
def list_txt():
response.set_header ('content-type','text/plain; charset=utf-8')
lst = api.load_echo(False)[1:]
if request.query.n:
return '\n'.join([t[0] for t in lst])
else:
return '\n'.join(['%s:%s:%s' % t for t in lst])
@route('/blacklist.txt')
def blacklist_txt():
response.set_header ('content-type','text/plain; charset=utf-8')
return api.ru('blacklist.txt')
@route('/u/m/<h:path>')
def jt_outmsg(h):
response.set_header ('content-type','text/plain; charset=iso-8859-1')
lst = [x for x in h.split('/') if len(x) == 20]
return '\n'.join( [api.mk_jt(x,api.raw_msg(x)) for x in lst] )
@route('/u/e/<names:path>')
def index_list(names):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.echoareas(names.split('/'))
def _point_msg(pauth,tmsg):
msgfrom, addr = points.check_hash(pauth)
if not addr: return 'auth error!'
cfg = api.load_echo(False)
mo = api.toss(msgfrom,'%s,%s' % (cfg[0][1],addr),tmsg.strip())
if mo.msg.startswith('@repto:'):
tmpmsg = mo.msg.splitlines()
mo.repto = tmpmsg[0][7:]
mo.msg = '\n'.join(tmpmsg[1:])
# а ещё лучше - засунуть это в api.toss
if len(mo.msg.encode('utf-8')) < 64100:
h = api.point_newmsg(mo)
if h:
return 'msg ok:%s: <a href="/%s">%s</a>' % (h, mo.echoarea, mo.echoarea)
else:
return 'error:unknown'
else:
return 'msg big!'
@route('/u/point/<pauth>/<tmsg:path>')
def point_msg_get(pauth,tmsg):
return _point_msg(pauth,tmsg)
@post('/u/point')
def point_msg_get():
return _point_msg(request.POST['pauth'],request.POST['tmsg'])
@route('/m/<msg>')
def get_msg(msg):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.raw_msg(msg)
@route('/e/<echoarea>')
def get_echolist(echoarea):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.get_echoarea(echoarea,True)
import iitpl
iitpl.II_PATH=II_PATH
run(host='127.0.0.1',port=62220,debug=False)
| [
"api.load_echo",
"api.get_echoarea",
"api.ru",
"api.raw_msg",
"api.point_newmsg",
"points.check_hash"
]
| [((546, 569), 'api.ru', 'api.ru', (['"""blacklist.txt"""'], {}), "('blacklist.txt')\n", (552, 569), False, 'import api, points\n'), ((1019, 1043), 'points.check_hash', 'points.check_hash', (['pauth'], {}), '(pauth)\n', (1036, 1043), False, 'import api, points\n'), ((1092, 1112), 'api.load_echo', 'api.load_echo', (['(False)'], {}), '(False)\n', (1105, 1112), False, 'import api, points\n'), ((1964, 1980), 'api.raw_msg', 'api.raw_msg', (['msg'], {}), '(msg)\n', (1975, 1980), False, 'import api, points\n'), ((2114, 2146), 'api.get_echoarea', 'api.get_echoarea', (['echoarea', '(True)'], {}), '(echoarea, True)\n', (2130, 2146), False, 'import api, points\n'), ((258, 278), 'api.load_echo', 'api.load_echo', (['(False)'], {}), '(False)\n', (271, 278), False, 'import api, points\n'), ((1430, 1450), 'api.point_newmsg', 'api.point_newmsg', (['mo'], {}), '(mo)\n', (1446, 1450), False, 'import api, points\n'), ((774, 788), 'api.raw_msg', 'api.raw_msg', (['x'], {}), '(x)\n', (785, 788), False, 'import api, points\n')] |
from dataclasses import dataclass
import logging
from attributes import get_ability_modifier
from sourcetree.utils import (
get_feats_list,
get_feat_perks,
get_feat_proficiencies,
get_feat_requirements,
)
from stdio import prompt
log = logging.getLogger("thespian.tweaks")
class AbilityScoreImprovementError(Exception):
"""Handles ability score improvement errors."""
class FlagParserError(Exception):
"""Handles an invalid flag format error."""
class FeatOptionParser:
"""Generates and parses feat characteristic flags by feat.
FLAG OPTION PARSER SYSTEM
PIPEBAR: Used to separate flags. i.e: ability=Strength|proficiency=skills
Two flag options are designated in the above example: 'ability', and 'proficiency'.
ALLOWED FLAG OPTIONS:
Designates certain instructions for applying feat related "perks" to a character.
- ability
- proficiency
- savingthrows
- speed
COMMA: Used to identify the number of occurences of a flag. i.e: languages,2
The example above means that a player can choose two languages.
EQUAL SIGN: Used to separate option parameters. i.e ability=Strength,0
The example above means Strength is a designated parameter for the ability option.
In this case the character would get an enhancement to Strength.
There is more to this and is explained further below.
DOUBLE AMPERSAND: Used to separater parameter options. i.e ability=Strength&&Dexerity,1
The example above means the player can choose a one time ehancement to Strength or Dexterity.
PLUS SIGN: Used to seperate parameter options. i.e ability=Strength+Dexterity
The example above means the player can gain an enhancement in both Strength and Dexterity.
"""
# Parser Option Separators
PARSER_OPTIONS = "|"
OPTION_INCREMENT = ","
OPTION_PARAMETER = "="
PARAM_SINGLE_SELECTION = "&&"
PARAM_MULTIPLE_SELECTION = "+"
def __init__(self, feat, prof):
self.feat = feat
self.profile = prof
self.perks = get_feat_perks(self.feat)
def _get_proficiency_options(self, prof_type: str) -> list:
"""Returns a list of bonus proficiencies for a feat by proficiency type."""
return get_feat_proficiencies(self.feat, prof_type)
def _get_sub_menu_options(self, available_options) -> dict | bool:
"""Creates a dictionary of sub menu options, if applicable."""
if self.is_sub_menu(available_options):
sub_options = dict()
for option in available_options:
sub_options[option] = self._get_proficiency_options(option)
return sub_options
return False
@staticmethod
def _is_sub_menu(available_options) -> bool:
"""Returns True if sub menu options are available. False otherwise."""
for option in available_options:
if not option.islower():
return False
return True
def _parse_flags(self) -> dict:
"""Generates the characteristics for the specified feat."""
parsed_flags = dict()
raw_flags = self.perks.get("flags")
if raw_flags is None:
return parsed_flags
flag_pairs = raw_flags.split(self.PARSER_OPTIONS)
for flag_pair in flag_pairs:
if self.OPTION_INCREMENT not in flag_pair:
raise FlagParserError("Pairs must be formatted in 'name,value' pairs.")
attribute_name, increment = flag_pair.split(self.OPTION_INCREMENT)
if self.OPTION_PARAMETER not in attribute_name:
parsed_flags[attribute_name] = {"increment": increment}
else:
flag_options = attribute_name.split(self.OPTION_PARAMETER)
# Allowable flags: ability, proficiency, savingthrows, speed
attribute_name = flag_options[0]
try:
if attribute_name not in (
"ability",
"proficiency",
"savingthrows",
"speed",
):
raise FlagParserError(
f"Illegal flag name '{attribute_name}' specified."
)
except FlagParserError:
# pass
return parsed_flags
if self.PARAM_SINGLE_SELECTION in flag_options[1]:
options = flag_options[1].split(self.PARAM_SINGLE_SELECTION)
else:
options = flag_options[1]
parsed_flags[attribute_name] = {
"increment": increment,
"options": options,
}
return parsed_flags
def parse(self) -> dict:
"""Parses the generated flags for the chosen feat."""
final_flag = self._parse_flags()
if len(final_flag) == 0:
return
parsed_flag = dict()
for flag, options in final_flag.items():
if flag in ("ability", "proficiency"):
increment = int(options["increment"])
menu_options = options["options"]
if len(menu_options) < 1:
raise FlagParserError("Malformed parser instructions error.")
if flag == "ability":
if increment == 0:
raise FlagParserError(
"Flag attribute 'ability' requires a positive integer value."
)
# For feats that use the 'savingthrows' flag.
# Limits choices based on current saving throw proficiencies.
if "savingthrows" in final_flag:
menu_options = [
x for x in menu_options if x not in self.profile["savingthrows"]
]
if isinstance(menu_options, str):
my_ability = menu_options
elif isinstance(menu_options, list):
for _ in range(increment):
my_ability = prompt(
"Choose the ability you would like to apply a bonus to.",
menu_options,
)
menu_options.remove(my_ability)
log.info(f"You selected the ability '{my_ability}'.")
# If 'savingthrows' flag specified, add proficiency for ability saving throw.
if "savingthrows" in final_flag:
self.profile["savingthrows"].append(my_ability)
log.info(
f"You gained proficiency in the '{my_ability}' saving throw."
)
bonus_value = self.perks[flag][my_ability]
parsed_flag[flag] = (my_ability, bonus_value)
elif flag == "proficiency":
# Increment value of 0 means append ALL listed bonuses.
# Increment values other than 0 means add # of bonuses == increment value.
chosen_options = dict()
submenu_options = None
if isinstance(menu_options, str) and increment == 0:
chosen_options[menu_options] = self._get_proficiency_options(
menu_options
)
elif isinstance(menu_options, list):
for _ in range(increment):
my_bonus = prompt(f"Choose your bonus: '{flag}'.", menu_options)
if not self._is_sub_menu(menu_options):
menu_options.remove(my_bonus)
else:
# Generate submenu options, if applicable.
if submenu_options is None:
submenu_options = self._get_sub_menu_options(
menu_options
)
submenu_options[my_bonus] = [
x
for x in submenu_options[my_bonus]
if x not in self.profile[my_bonus]
]
# Create storage handler for selections, if applicable.
if len(chosen_options) == 0:
for opt in submenu_options:
chosen_options[opt] = list()
submenu_choice = prompt(
f"Choose submenu option: '{my_bonus}'.",
submenu_options.get(my_bonus),
)
chosen_options[my_bonus].append(submenu_choice)
submenu_options[my_bonus].remove(submenu_choice)
# Reset the submenu options after use
submenu_options = None
log.info(
f"You selected the {flag} ({my_bonus}) bonus '{submenu_choice}'."
)
elif isinstance(menu_options, str):
for prof_type in menu_options.split(self.PARAM_MULTIPLE_SELECTION):
chosen_proficiencies = list()
# Pull full collection of bonus proficiencies,
proficiency_options = get_feat_proficiencies(
self.feat, prof_type
)
# If collection is dict, sort through sub categories,
# And choose only the unselected options in that category.
# Otherwise, simply sort out the unselected options
if isinstance(proficiency_options, dict):
temp = list()
for types in tuple(proficiency_options.keys()):
if types not in self.profile[prof_type]:
temp += proficiency_options[types]
proficiency_options = temp
else:
proficiency_options = [
x
for x in proficiency_options
if x not in self.profile[prof_type]
]
for _ in range(increment):
# Clear out the temporarily chosen options.
proficiency_options = [
x
for x in proficiency_options
if x not in chosen_proficiencies
]
my_bonus = prompt(
f"Choose your bonus: {flag}.", proficiency_options
)
chosen_proficiencies.append(my_bonus)
proficiency_options.remove(my_bonus)
log.info(
f"You selected the {flag} ({prof_type}) bonus '{my_bonus}'."
)
chosen_options[prof_type] = chosen_proficiencies
for k, v in chosen_options.items():
parsed_flag[k] = v
elif flag == "speed":
speed_value = self.perks[flag]
if speed_value != 0:
parsed_flag[flag] = speed_value
elif flag == "spells":
bonus_spells = self.perks[flag]
for index, spell in enumerate(bonus_spells):
if isinstance(spell, list):
spell_choice = prompt("Choose your bonus spell.", spell)
bonus_spells[index] = spell_choice
log.info(f"You selected the spell {spell_choice}.")
parsed_flag[flag] = bonus_spells
return parsed_flag
@dataclass
class AbilityScoreImprovement:
"""Used to apply ability and/or feat upgrades."""
character: dict
def _add_feat_perks(self, feat: str) -> None:
"""Applies feat related perks."""
parsed_attributes = FeatOptionParser(feat, self.character).parse()
if parsed_attributes is None:
return
for flag, options in parsed_attributes.items():
if flag == "ability":
ability, bonus = options
self._set_ability_score(ability, bonus)
else:
self.character[flag] += options
def _count_upgrades(self) -> int:
"""Returns the number of available upgrades."""
upgrade_count = 0
for x in range(1, self.character["level"] + 1):
if (x % 4) == 0 and x != 20:
upgrade_count += 1
if self.character["klass"] == "Fighter" and self.character["level"] >= 6:
upgrade_count += 1
if self.character["klass"] == "Rogue" and self.character["level"] >= 8:
upgrade_count += 1
if self.character["klass"] == "Fighter" and self.character["level"] >= 14:
upgrade_count += 1
if self.character["level"] >= 19:
upgrade_count += 1
return upgrade_count
def _has_requirements(self, feat: str) -> bool:
"""Checks if feat requirements have been met."""
# Character already has feat
if feat in self.character["feats"]:
return False
# If Heavily, Lightly, or Moderately Armored feat or a Monk.
# "Armor Related" or Weapon Master feat but already proficient.
if (
feat
in (
"Heavily Armored",
"Lightly Armored",
"Moderately Armored",
)
and self.character["klass"] == "Monk"
):
return False
elif feat in (
"Heavily Armored",
"Lightly Armored",
"Moderately Armored",
"Weapon Master",
):
# Heavily Armored: Character already has heavy armor proficiency.
# Lightly Armored: Character already has light armor proficiency.
# Moderately Armored: Character already has medium armor proficiency.
# Weapon Master: Character already has martial weapon proficiency.
if feat == "Heavily Armored" and "Heavy" in self.character["armors"]:
return False
elif feat == "Lightly Armored" and "Light" in self.character["armors"]:
return False
elif feat == "Moderately Armored" and "Medium" in self.character["armors"]:
return False
elif feat == "Weapon Master" and "Martial" in self.character["weapons"]:
return False
# Cycle through ALL prerequisites for the feat.
prerequisite = get_feat_requirements(feat)
for requirement, _ in prerequisite.items():
# Ignore requirements that are None
if prerequisite.get(requirement) is None:
continue
# Check ability requirements
if requirement == "ability":
for ability, required_score in prerequisite.get(requirement).items():
my_score = self.character["scores"][ability]
if my_score < required_score:
return False
# Check caster requirements
if requirement == "caster":
# If no spellcasting ability.
if prerequisite[requirement] and self.character["spellslots"] == "0":
return False
# Magic Initiative requirements check
if feat == "Magic Initiative" and self.character["klass"] not in (
"Bard",
"Cleric",
"Druid",
"Sorcerer",
"Warlock",
"Wizard",
):
return False
# Ritual Caster requirements check
if feat == "Ritual Caster":
primary_ability = self.ability[0]
my_score = self.scores[primary_ability]
required_score = prerequisite["ability"][primary_ability]
if my_score < required_score:
return False
# Check proficiency requirements
if requirement == "proficiency":
if feat in (
"Heavy Armor Master",
"Heavily Armored",
"Medium Armor Master",
"Moderately Armored",
):
armors = prerequisite.get(requirement).get("armors")
for armor in armors:
if armor not in self.character["armors"]:
return False
# Check race requirements
if requirement == "race":
if self.character["race"] not in prerequisite.get(requirement):
return False
# Check subrace requirements
if requirement == "subrace":
if self.character["subrace"] not in prerequisite.get(requirement):
return False
return True
def _is_adjustable(self, ability: str, bonus: int = 1) -> bool:
"""Checks if ability is adjustable < 20."""
if not isinstance(ability, str):
raise AbilityScoreImprovementError(
"Argument 'ability' must be of type 'str'."
)
if not isinstance(bonus, int):
raise AbilityScoreImprovementError(
"Argument 'bonus' must be of type 'int'."
)
if ability not in self.character["scores"]:
raise AbilityScoreImprovementError(
f"Invalid ability '{ability}' specified."
)
if (self.character["scores"][ability] + bonus) > 20:
return False
return True
def run(self) -> None:
"""Executes the ability score improvement class."""
# Determine actual hp.
modifier = get_ability_modifier("Constitution", self.character["scores"])
log.info(f"You have a Constitution modifier of {modifier}.")
bonus_hit_points = modifier * self.character["level"]
log.info(f"Your modifier*level provide {bonus_hit_points} bonus hit points.")
total_hit_points = self.character["hit_points"] + bonus_hit_points
self.character["hit_points"] = total_hit_points
log.info(f"You have {total_hit_points} total hit points.")
if self.character["level"] < 4:
return
num_of_upgrades = self._count_upgrades()
while num_of_upgrades > 0:
if num_of_upgrades > 1:
log.info(f"You have {num_of_upgrades} upgrades available.")
else:
log.info("You have 1 upgrade available.")
my_path = prompt(
"Follow which upgrade path?", ["Upgrade Ability", "Choose Feat"]
)
# Path #1: Upgrade an Ability.
if my_path == "Upgrade Ability":
my_bonus = prompt("Apply how many points?", ["1", "2"])
log.info(f"You chose an ability bonus of: +{my_bonus}.")
my_bonus = int(my_bonus)
ability_options = [
a
for a in (
"Strength",
"Dexterity",
"Constitution",
"Intelligence",
"Wisdom",
"Charisma",
)
if self._is_adjustable(a, my_bonus)
]
# Apply +2 bonus to one ability.
# Apply +1 bonus to two abilities.
if my_bonus == 1:
for _ in range(2):
my_ability = prompt(
"Which ability?",
ability_options,
)
ability_options.remove(my_ability)
self._set_ability_score(my_ability, my_bonus)
elif my_bonus == 2:
my_ability = prompt(
"Which ability?",
ability_options,
)
self._set_ability_score(my_ability, my_bonus)
# Path #2: Add a new Feat.
elif my_path == "Choose Feat":
feat_options = [
x for x in get_feats_list() if x not in self.character["feats"]
]
my_feat = prompt(
"Which feat do you want to acquire?",
feat_options,
)
log.info(f"Checking requirements for the requested feat {my_feat}...")
while not self._has_requirements(my_feat):
feat_options.remove(my_feat)
log.warn(
f"You don't meet the requirements for '{my_feat}'.",
)
my_feat = prompt(
f"Which feat do you want to acquire?",
feat_options,
)
else:
self._add_feat_perks(my_feat)
self.character["feats"].append(my_feat)
log.info(f"You selected the feat {my_feat}.")
num_of_upgrades -= 1
def _set_ability_score(self, ability, bonus=1) -> None:
"""Applies a bonus to a specified ability."""
if not self._is_adjustable(ability, bonus):
log.warn(f"Ability '{ability}' is not adjustable.")
else:
new_score = self.character.get("scores").get(ability) + bonus
self.character["scores"][ability] = new_score
log.info(f"You applied a +{bonus} bonus to your {ability}.")
log.info(f"Your {ability} score is now a {new_score}.")
| [
"logging.getLogger",
"sourcetree.utils.get_feat_proficiencies",
"stdio.prompt",
"sourcetree.utils.get_feat_requirements",
"sourcetree.utils.get_feats_list",
"sourcetree.utils.get_feat_perks",
"attributes.get_ability_modifier"
]
| [((254, 290), 'logging.getLogger', 'logging.getLogger', (['"""thespian.tweaks"""'], {}), "('thespian.tweaks')\n", (271, 290), False, 'import logging\n'), ((2118, 2143), 'sourcetree.utils.get_feat_perks', 'get_feat_perks', (['self.feat'], {}), '(self.feat)\n', (2132, 2143), False, 'from sourcetree.utils import get_feats_list, get_feat_perks, get_feat_proficiencies, get_feat_requirements\n'), ((2308, 2352), 'sourcetree.utils.get_feat_proficiencies', 'get_feat_proficiencies', (['self.feat', 'prof_type'], {}), '(self.feat, prof_type)\n', (2330, 2352), False, 'from sourcetree.utils import get_feats_list, get_feat_perks, get_feat_proficiencies, get_feat_requirements\n'), ((15176, 15203), 'sourcetree.utils.get_feat_requirements', 'get_feat_requirements', (['feat'], {}), '(feat)\n', (15197, 15203), False, 'from sourcetree.utils import get_feats_list, get_feat_perks, get_feat_proficiencies, get_feat_requirements\n'), ((18495, 18557), 'attributes.get_ability_modifier', 'get_ability_modifier', (['"""Constitution"""', "self.character['scores']"], {}), "('Constitution', self.character['scores'])\n", (18515, 18557), False, 'from attributes import get_ability_modifier\n'), ((19329, 19401), 'stdio.prompt', 'prompt', (['"""Follow which upgrade path?"""', "['Upgrade Ability', 'Choose Feat']"], {}), "('Follow which upgrade path?', ['Upgrade Ability', 'Choose Feat'])\n", (19335, 19401), False, 'from stdio import prompt\n'), ((19548, 19592), 'stdio.prompt', 'prompt', (['"""Apply how many points?"""', "['1', '2']"], {}), "('Apply how many points?', ['1', '2'])\n", (19554, 19592), False, 'from stdio import prompt\n'), ((21073, 21131), 'stdio.prompt', 'prompt', (['"""Which feat do you want to acquire?"""', 'feat_options'], {}), "('Which feat do you want to acquire?', feat_options)\n", (21079, 21131), False, 'from stdio import prompt\n'), ((20326, 20367), 'stdio.prompt', 'prompt', (['"""Which ability?"""', 'ability_options'], {}), "('Which ability?', ability_options)\n", (20332, 20367), False, 'from stdio import prompt\n'), ((20649, 20690), 'stdio.prompt', 'prompt', (['"""Which ability?"""', 'ability_options'], {}), "('Which ability?', ability_options)\n", (20655, 20690), False, 'from stdio import prompt\n'), ((21546, 21605), 'stdio.prompt', 'prompt', (['f"""Which feat do you want to acquire?"""', 'feat_options'], {}), "(f'Which feat do you want to acquire?', feat_options)\n", (21552, 21605), False, 'from stdio import prompt\n'), ((6166, 6244), 'stdio.prompt', 'prompt', (['"""Choose the ability you would like to apply a bonus to."""', 'menu_options'], {}), "('Choose the ability you would like to apply a bonus to.', menu_options)\n", (6172, 6244), False, 'from stdio import prompt\n'), ((20975, 20991), 'sourcetree.utils.get_feats_list', 'get_feats_list', ([], {}), '()\n', (20989, 20991), False, 'from sourcetree.utils import get_feats_list, get_feat_perks, get_feat_proficiencies, get_feat_requirements\n'), ((7611, 7664), 'stdio.prompt', 'prompt', (['f"""Choose your bonus: \'{flag}\'."""', 'menu_options'], {}), '(f"Choose your bonus: \'{flag}\'.", menu_options)\n', (7617, 7664), False, 'from stdio import prompt\n'), ((9617, 9661), 'sourcetree.utils.get_feat_proficiencies', 'get_feat_proficiencies', (['self.feat', 'prof_type'], {}), '(self.feat, prof_type)\n', (9639, 9661), False, 'from sourcetree.utils import get_feats_list, get_feat_perks, get_feat_proficiencies, get_feat_requirements\n'), ((11999, 12040), 'stdio.prompt', 'prompt', (['"""Choose your bonus spell."""', 'spell'], {}), "('Choose your bonus spell.', spell)\n", (12005, 12040), False, 'from stdio import prompt\n'), ((11017, 11075), 'stdio.prompt', 'prompt', (['f"""Choose your bonus: {flag}."""', 'proficiency_options'], {}), "(f'Choose your bonus: {flag}.', proficiency_options)\n", (11023, 11075), False, 'from stdio import prompt\n')] |
class ImpuritySpecies(object):
# For storing OpenADAS data related to a particular impurity species
# Loosely based on cfe316/atomic/atomic_data.py/AtomicData class (although with much less code since
# all of the F77 importing is done in the seperate <<make json_update>> code since BOUT++ protocol
# requires fortran code be isolated from main operation)
def __init__(self,symbol,adas_files_dict={},rate_coefficients={},impurity_fraction=None):
# Searches for year, atomic_number, has_charge_exchange from user_input.json
#
# Default initialiser for class
# symbol : (str) | element symbol (e.g. 'C')
# name : (str) | full name of element (for printing only)
# year : (int) | year for which OpenADAS data was searched (1996)
# has_charge_exchange : (bool) | whether cx_power (prc) was found for this element-year combination (True)
# atomic_number : (int) | number of protons for impurity species (6)
# adas_files_dict : (str -> str) | dictionary of OpenADAS files, indexed by file-type ('ionisation': 'scd96_c', ...)
# rate_coefficients : (str -> RateCoefficient) | dictionary of RateCoefficient objects corresponding to adas files ('ionisation': <RateCoefficientObject>, ...)
import json
self = ImpuritySpecies
with open('user_input.json','r') as fp:
data_dict = json.load(fp)
element_dict = data_dict[symbol]
assert symbol == element_dict['symbol']
self.symbol = symbol
self.name = element_dict['name']
self.year = element_dict['year']
self.has_charge_exchange = element_dict['has_charge_exchange']
self.atomic_number = element_dict['atomic_number']
self.adas_files_dict = adas_files_dict
self.rate_coefficients = rate_coefficients
def __str__(self):
# Printing method, for easier inspection of object data
_print_adas_dict = ''
if len(self.adas_files_dict) == 0:
_print_adas_check = 'Not initialised'
else:
_print_adas_check = 'Initialised'
for key, value in self.adas_files_dict.items():
_print_adas_dict = _print_adas_dict + '{:>25} -> {}\n'.format(key,value)
if len(self.rate_coefficients) == 0:
_print_rate_check = 'Not initialised'
else:
_print_rate_check = 'Initialised'
_printing_string = 'ImpuritySpecies object with attributes'+\
'\n{:>25} = {}'.format('symbol', self.symbol)+\
'\n{:>25} = {}'.format('year', self.year)+\
'\n{:>25} = {}'.format('has_charge_exchange', self.has_charge_exchange)+\
'\n{:>25} = {}'.format('atomic_number', self.atomic_number)+\
'\n{:>25} = {}'.format('adas_files_dict', _print_adas_check)+\
'\n{:>25} = {}'.format('rate_coefficients', _print_rate_check)
if len(self.adas_files_dict) != 0:
_printing_string += '\n--------------------------------------------------\n'+_print_adas_dict
return _printing_string
def addJSONFiles(self,physics_process,filetype_code,JSON_database_path):
# 1. Make the filename string expected for the json adas file
# 2. Check that this file exists in the JSON_database_path/json_data directory
# 3. Add this file to the atomic data .adas_files_dict attribute
import os.path
filename = '{}{}_{}.json'.format(filetype_code,str(self.year)[-2:],self.symbol)
full_path = '{}/json_data/{}'.format(JSON_database_path,filename)
if not(os.path.isfile(full_path)):
raise FileNotFoundError('File {} not found in {}/json_data'.format(filename,JSON_database_path))
self.adas_files_dict[physics_process] = filename
def makeRateCoefficients(self,JSON_database_path):
# Calls the RateCoefficient.__init__ method for each entry in the .adas_files_dict
# Generates a dictionary of RateCoefficient objects as .rate_coefficients
from atomic1D import RateCoefficient
for physics_process, filename in self.adas_files_dict.items():
full_path = '{}/json_data/{}'.format(JSON_database_path,filename)
self.rate_coefficients[physics_process] = RateCoefficient(full_path)
| [
"json.load",
"atomic1D.RateCoefficient"
]
| [((1472, 1485), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (1481, 1485), False, 'import json\n'), ((4108, 4134), 'atomic1D.RateCoefficient', 'RateCoefficient', (['full_path'], {}), '(full_path)\n', (4123, 4134), False, 'from atomic1D import RateCoefficient\n')] |
"""
==========================================================
Fitting model on imbalanced datasets and how to fight bias
==========================================================
This example illustrates the problem induced by learning on datasets having
imbalanced classes. Subsequently, we compare different approaches alleviating
these negative effects.
"""
# Authors: <NAME> <<EMAIL>>
# License: MIT
# %%
print(__doc__)
# %% [markdown]
# Problem definition
# ------------------
#
# We are dropping the following features:
#
# - "fnlwgt": this feature was created while studying the "adult" dataset.
# Thus, we will not use this feature which is not acquired during the survey.
# - "education-num": it is encoding the same information than "education".
# Thus, we are removing one of these 2 features.
# %%
from sklearn.datasets import fetch_openml
df, y = fetch_openml("adult", version=2, as_frame=True, return_X_y=True)
df = df.drop(columns=["fnlwgt", "education-num"])
# %% [markdown]
# The "adult" dataset as a class ratio of about 3:1
# %%
classes_count = y.value_counts()
classes_count
# %% [markdown]
# This dataset is only slightly imbalanced. To better highlight the effect of
# learning from an imbalanced dataset, we will increase its ratio to 30:1
# %%
from imblearn.datasets import make_imbalance
ratio = 30
df_res, y_res = make_imbalance(
df,
y,
sampling_strategy={classes_count.idxmin(): classes_count.max() // ratio},
)
y_res.value_counts()
# %% [markdown]
# We will perform a cross-validation evaluation to get an estimate of the test
# score.
#
# As a baseline, we could use a classifier which will always predict the
# majority class independently of the features provided.
# %%
from sklearn.model_selection import cross_validate
from sklearn.dummy import DummyClassifier
dummy_clf = DummyClassifier(strategy="most_frequent")
scoring = ["accuracy", "balanced_accuracy"]
cv_result = cross_validate(dummy_clf, df_res, y_res, scoring=scoring)
print(f"Accuracy score of a dummy classifier: {cv_result['test_accuracy'].mean():.3f}")
# %% [markdown]
# Instead of using the accuracy, we can use the balanced accuracy which will
# take into account the balancing issue.
# %%
print(
f"Balanced accuracy score of a dummy classifier: "
f"{cv_result['test_balanced_accuracy'].mean():.3f}"
)
# %% [markdown]
# Strategies to learn from an imbalanced dataset
# ----------------------------------------------
# We will use a dictionary and a list to continuously store the results of
# our experiments and show them as a pandas dataframe.
# %%
index = []
scores = {"Accuracy": [], "Balanced accuracy": []}
# %% [markdown]
# Dummy baseline
# ..............
#
# Before to train a real machine learning model, we can store the results
# obtained with our :class:`~sklearn.dummy.DummyClassifier`.
# %%
import pandas as pd
index += ["Dummy classifier"]
cv_result = cross_validate(dummy_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# Linear classifier baseline
# ..........................
#
# We will create a machine learning pipeline using a
# :class:`~sklearn.linear_model.LogisticRegression` classifier. In this regard,
# we will need to one-hot encode the categorical columns and standardized the
# numerical columns before to inject the data into the
# :class:`~sklearn.linear_model.LogisticRegression` classifier.
#
# First, we define our numerical and categorical pipelines.
# %%
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import make_pipeline
num_pipe = make_pipeline(
StandardScaler(), SimpleImputer(strategy="mean", add_indicator=True)
)
cat_pipe = make_pipeline(
SimpleImputer(strategy="constant", fill_value="missing"),
OneHotEncoder(handle_unknown="ignore"),
)
# %% [markdown]
# Then, we can create a preprocessor which will dispatch the categorical
# columns to the categorical pipeline and the numerical columns to the
# numerical pipeline
# %%
from sklearn.compose import make_column_transformer
from sklearn.compose import make_column_selector as selector
preprocessor_linear = make_column_transformer(
(num_pipe, selector(dtype_include="number")),
(cat_pipe, selector(dtype_include="category")),
n_jobs=2,
)
# %% [markdown]
# Finally, we connect our preprocessor with our
# :class:`~sklearn.linear_model.LogisticRegression`. We can then evaluate our
# model.
# %%
from sklearn.linear_model import LogisticRegression
lr_clf = make_pipeline(preprocessor_linear, LogisticRegression(max_iter=1000))
# %%
index += ["Logistic regression"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# We can see that our linear model is learning slightly better than our dummy
# baseline. However, it is impacted by the class imbalance.
#
# We can verify that something similar is happening with a tree-based model
# such as :class:`~sklearn.ensemble.RandomForestClassifier`. With this type of
# classifier, we will not need to scale the numerical data, and we will only
# need to ordinal encode the categorical data.
# %%
from sklearn.preprocessing import OrdinalEncoder
from sklearn.ensemble import RandomForestClassifier
num_pipe = SimpleImputer(strategy="mean", add_indicator=True)
cat_pipe = make_pipeline(
SimpleImputer(strategy="constant", fill_value="missing"),
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
)
preprocessor_tree = make_column_transformer(
(num_pipe, selector(dtype_include="number")),
(cat_pipe, selector(dtype_include="category")),
n_jobs=2,
)
rf_clf = make_pipeline(
preprocessor_tree, RandomForestClassifier(random_state=42, n_jobs=2)
)
# %%
index += ["Random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# The :class:`~sklearn.ensemble.RandomForestClassifier` is as well affected by
# the class imbalanced, slightly less than the linear model. Now, we will
# present different approach to improve the performance of these 2 models.
#
# Use `class_weight`
# ..................
#
# Most of the models in `scikit-learn` have a parameter `class_weight`. This
# parameter will affect the computation of the loss in linear model or the
# criterion in the tree-based model to penalize differently a false
# classification from the minority and majority class. We can set
# `class_weight="balanced"` such that the weight applied is inversely
# proportional to the class frequency. We test this parametrization in both
# linear model and tree-based model.
# %%
lr_clf.set_params(logisticregression__class_weight="balanced")
index += ["Logistic regression with balanced class weights"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %%
rf_clf.set_params(randomforestclassifier__class_weight="balanced")
index += ["Random forest with balanced class weights"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# We can see that using `class_weight` was really effective for the linear
# model, alleviating the issue of learning from imbalanced classes. However,
# the :class:`~sklearn.ensemble.RandomForestClassifier` is still biased toward
# the majority class, mainly due to the criterion which is not suited enough to
# fight the class imbalance.
#
# Resample the training set during learning
# .........................................
#
# Another way is to resample the training set by under-sampling or
# over-sampling some of the samples. `imbalanced-learn` provides some samplers
# to do such processing.
# %%
from imblearn.pipeline import make_pipeline as make_pipeline_with_sampler
from imblearn.under_sampling import RandomUnderSampler
lr_clf = make_pipeline_with_sampler(
preprocessor_linear,
RandomUnderSampler(random_state=42),
LogisticRegression(max_iter=1000),
)
# %%
index += ["Under-sampling + Logistic regression"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %%
rf_clf = make_pipeline_with_sampler(
preprocessor_tree,
RandomUnderSampler(random_state=42),
RandomForestClassifier(random_state=42, n_jobs=2),
)
# %%
index += ["Under-sampling + Random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# Applying a random under-sampler before the training of the linear model or
# random forest, allows to not focus on the majority class at the cost of
# making more mistake for samples in the majority class (i.e. decreased
# accuracy).
#
# We could apply any type of samplers and find which sampler is working best
# on the current dataset.
#
# Instead, we will present another way by using classifiers which will apply
# sampling internally.
#
# Use of specific balanced algorithms from imbalanced-learn
# .........................................................
#
# We already showed that random under-sampling can be effective on decision
# tree. However, instead of under-sampling once the dataset, one could
# under-sample the original dataset before to take a bootstrap sample. This is
# the base of the :class:`imblearn.ensemble.BalancedRandomForestClassifier` and
# :class:`~imblearn.ensemble.BalancedBaggingClassifier`.
# %%
from imblearn.ensemble import BalancedRandomForestClassifier
rf_clf = make_pipeline(
preprocessor_tree,
BalancedRandomForestClassifier(random_state=42, n_jobs=2),
)
# %%
index += ["Balanced random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# The performance with the
# :class:`~imblearn.ensemble.BalancedRandomForestClassifier` is better than
# applying a single random under-sampling. We will use a gradient-boosting
# classifier within a :class:`~imblearn.ensemble.BalancedBaggingClassifier`.
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
from imblearn.ensemble import BalancedBaggingClassifier
bag_clf = make_pipeline(
preprocessor_tree,
BalancedBaggingClassifier(
base_estimator=HistGradientBoostingClassifier(random_state=42),
n_estimators=10,
random_state=42,
n_jobs=2,
),
)
index += ["Balanced bag of histogram gradient boosting"]
cv_result = cross_validate(bag_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# This last approach is the most effective. The different under-sampling allows
# to bring some diversity for the different GBDT to learn and not focus on a
# portion of the majority class.
| [
"sklearn.ensemble.HistGradientBoostingClassifier",
"sklearn.datasets.fetch_openml",
"pandas.DataFrame",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.model_selection.cross_validate",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.linear_model.LogisticRegression",
"sklearn.preprocessing.StandardScaler",
"imblearn.ensemble.BalancedRandomForestClassifier",
"sklearn.impute.SimpleImputer",
"sklearn.compose.make_column_selector",
"sklearn.preprocessing.OrdinalEncoder",
"sklearn.dummy.DummyClassifier",
"imblearn.under_sampling.RandomUnderSampler"
]
| [((872, 936), 'sklearn.datasets.fetch_openml', 'fetch_openml', (['"""adult"""'], {'version': '(2)', 'as_frame': '(True)', 'return_X_y': '(True)'}), "('adult', version=2, as_frame=True, return_X_y=True)\n", (884, 936), False, 'from sklearn.datasets import fetch_openml\n'), ((1838, 1879), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': '"""most_frequent"""'}), "(strategy='most_frequent')\n", (1853, 1879), False, 'from sklearn.dummy import DummyClassifier\n'), ((1936, 1993), 'sklearn.model_selection.cross_validate', 'cross_validate', (['dummy_clf', 'df_res', 'y_res'], {'scoring': 'scoring'}), '(dummy_clf, df_res, y_res, scoring=scoring)\n', (1950, 1993), False, 'from sklearn.model_selection import cross_validate\n'), ((2913, 2970), 'sklearn.model_selection.cross_validate', 'cross_validate', (['dummy_clf', 'df_res', 'y_res'], {'scoring': 'scoring'}), '(dummy_clf, df_res, y_res, scoring=scoring)\n', (2927, 2970), False, 'from sklearn.model_selection import cross_validate\n'), ((3124, 3157), 'pandas.DataFrame', 'pd.DataFrame', (['scores'], {'index': 'index'}), '(scores, index=index)\n', (3136, 3157), True, 'import pandas as pd\n'), ((4868, 4922), 'sklearn.model_selection.cross_validate', 'cross_validate', (['lr_clf', 'df_res', 'y_res'], {'scoring': 'scoring'}), '(lr_clf, df_res, y_res, scoring=scoring)\n', (4882, 4922), False, 'from sklearn.model_selection import cross_validate\n'), ((5076, 5109), 'pandas.DataFrame', 'pd.DataFrame', (['scores'], {'index': 'index'}), '(scores, index=index)\n', (5088, 5109), True, 'import pandas as pd\n'), ((5675, 5725), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""mean"""', 'add_indicator': '(True)'}), "(strategy='mean', add_indicator=True)\n", (5688, 5725), False, 'from sklearn.impute import SimpleImputer\n'), ((6199, 6253), 'sklearn.model_selection.cross_validate', 'cross_validate', (['rf_clf', 'df_res', 'y_res'], {'scoring': 'scoring'}), '(rf_clf, df_res, y_res, scoring=scoring)\n', (6213, 6253), False, 'from sklearn.model_selection import cross_validate\n'), ((6407, 6440), 'pandas.DataFrame', 'pd.DataFrame', (['scores'], {'index': 'index'}), '(scores, index=index)\n', (6419, 6440), True, 'import pandas as pd\n'), ((7354, 7408), 'sklearn.model_selection.cross_validate', 'cross_validate', (['lr_clf', 'df_res', 'y_res'], {'scoring': 'scoring'}), '(lr_clf, df_res, y_res, scoring=scoring)\n', (7368, 7408), False, 'from sklearn.model_selection import cross_validate\n'), ((7562, 7595), 'pandas.DataFrame', 'pd.DataFrame', (['scores'], {'index': 'index'}), '(scores, index=index)\n', (7574, 7595), True, 'import pandas as pd\n'), ((7747, 7801), 'sklearn.model_selection.cross_validate', 'cross_validate', (['rf_clf', 'df_res', 'y_res'], {'scoring': 'scoring'}), '(rf_clf, df_res, y_res, scoring=scoring)\n', (7761, 7801), False, 'from sklearn.model_selection import cross_validate\n'), ((7955, 7988), 'pandas.DataFrame', 'pd.DataFrame', (['scores'], {'index': 'index'}), '(scores, index=index)\n', (7967, 7988), True, 'import pandas as pd\n'), ((8967, 9021), 'sklearn.model_selection.cross_validate', 'cross_validate', (['lr_clf', 'df_res', 'y_res'], {'scoring': 'scoring'}), '(lr_clf, df_res, y_res, scoring=scoring)\n', (8981, 9021), False, 'from sklearn.model_selection import cross_validate\n'), ((9175, 9208), 'pandas.DataFrame', 'pd.DataFrame', (['scores'], {'index': 'index'}), '(scores, index=index)\n', (9187, 9208), True, 'import pandas as pd\n'), ((9445, 9499), 'sklearn.model_selection.cross_validate', 'cross_validate', (['rf_clf', 'df_res', 'y_res'], {'scoring': 'scoring'}), '(rf_clf, df_res, y_res, scoring=scoring)\n', (9459, 9499), False, 'from sklearn.model_selection import cross_validate\n'), ((9653, 9686), 'pandas.DataFrame', 'pd.DataFrame', (['scores'], {'index': 'index'}), '(scores, index=index)\n', (9665, 9686), True, 'import pandas as pd\n'), ((10878, 10932), 'sklearn.model_selection.cross_validate', 'cross_validate', (['rf_clf', 'df_res', 'y_res'], {'scoring': 'scoring'}), '(rf_clf, df_res, y_res, scoring=scoring)\n', (10892, 10932), False, 'from sklearn.model_selection import cross_validate\n'), ((11086, 11119), 'pandas.DataFrame', 'pd.DataFrame', (['scores'], {'index': 'index'}), '(scores, index=index)\n', (11098, 11119), True, 'import pandas as pd\n'), ((11889, 11944), 'sklearn.model_selection.cross_validate', 'cross_validate', (['bag_clf', 'df_res', 'y_res'], {'scoring': 'scoring'}), '(bag_clf, df_res, y_res, scoring=scoring)\n', (11903, 11944), False, 'from sklearn.model_selection import cross_validate\n'), ((12098, 12131), 'pandas.DataFrame', 'pd.DataFrame', (['scores'], {'index': 'index'}), '(scores, index=index)\n', (12110, 12131), True, 'import pandas as pd\n'), ((3855, 3871), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3869, 3871), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3873, 3923), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""mean"""', 'add_indicator': '(True)'}), "(strategy='mean', add_indicator=True)\n", (3886, 3923), False, 'from sklearn.impute import SimpleImputer\n'), ((3956, 4012), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""constant"""', 'fill_value': '"""missing"""'}), "(strategy='constant', fill_value='missing')\n", (3969, 4012), False, 'from sklearn.impute import SimpleImputer\n'), ((4018, 4056), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (4031, 4056), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((4782, 4815), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(1000)'}), '(max_iter=1000)\n', (4800, 4815), False, 'from sklearn.linear_model import LogisticRegression\n'), ((5756, 5812), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""constant"""', 'fill_value': '"""missing"""'}), "(strategy='constant', fill_value='missing')\n", (5769, 5812), False, 'from sklearn.impute import SimpleImputer\n'), ((5818, 5886), 'sklearn.preprocessing.OrdinalEncoder', 'OrdinalEncoder', ([], {'handle_unknown': '"""use_encoded_value"""', 'unknown_value': '(-1)'}), "(handle_unknown='use_encoded_value', unknown_value=-1)\n", (5832, 5886), False, 'from sklearn.preprocessing import OrdinalEncoder\n'), ((6102, 6151), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(42)', 'n_jobs': '(2)'}), '(random_state=42, n_jobs=2)\n', (6124, 6151), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((8821, 8856), 'imblearn.under_sampling.RandomUnderSampler', 'RandomUnderSampler', ([], {'random_state': '(42)'}), '(random_state=42)\n', (8839, 8856), False, 'from imblearn.under_sampling import RandomUnderSampler\n'), ((8862, 8895), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(1000)'}), '(max_iter=1000)\n', (8880, 8895), False, 'from sklearn.linear_model import LogisticRegression\n'), ((9289, 9324), 'imblearn.under_sampling.RandomUnderSampler', 'RandomUnderSampler', ([], {'random_state': '(42)'}), '(random_state=42)\n', (9307, 9324), False, 'from imblearn.under_sampling import RandomUnderSampler\n'), ((9330, 9379), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(42)', 'n_jobs': '(2)'}), '(random_state=42, n_jobs=2)\n', (9352, 9379), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((10763, 10820), 'imblearn.ensemble.BalancedRandomForestClassifier', 'BalancedRandomForestClassifier', ([], {'random_state': '(42)', 'n_jobs': '(2)'}), '(random_state=42, n_jobs=2)\n', (10793, 10820), False, 'from imblearn.ensemble import BalancedRandomForestClassifier\n'), ((4424, 4456), 'sklearn.compose.make_column_selector', 'selector', ([], {'dtype_include': '"""number"""'}), "(dtype_include='number')\n", (4432, 4456), True, 'from sklearn.compose import make_column_selector as selector\n'), ((4474, 4508), 'sklearn.compose.make_column_selector', 'selector', ([], {'dtype_include': '"""category"""'}), "(dtype_include='category')\n", (4482, 4508), True, 'from sklearn.compose import make_column_selector as selector\n'), ((5951, 5983), 'sklearn.compose.make_column_selector', 'selector', ([], {'dtype_include': '"""number"""'}), "(dtype_include='number')\n", (5959, 5983), True, 'from sklearn.compose import make_column_selector as selector\n'), ((6001, 6035), 'sklearn.compose.make_column_selector', 'selector', ([], {'dtype_include': '"""category"""'}), "(dtype_include='category')\n", (6009, 6035), True, 'from sklearn.compose import make_column_selector as selector\n'), ((11693, 11740), 'sklearn.ensemble.HistGradientBoostingClassifier', 'HistGradientBoostingClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (11723, 11740), False, 'from sklearn.ensemble import HistGradientBoostingClassifier\n')] |
from django.core.exceptions import ObjectDoesNotExist
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import render
from django.conf import settings
from .models import Language, Type, MusicFocus, Category, Topic, RTRCategory, Host, Note, RRule, Schedule, Show, TimeSlot
from .forms import MusicFocusForm
from datetime import date, datetime, time, timedelta
class ActivityFilter(admin.SimpleListFilter):
title = _("Activity")
def lookups(self, request, model_admin):
return (
('yes', _("active")),
('no', _("inactive"))
)
def queryset(self, request, queryset):
if self.parameter_name == 'has_timeslots': # active/inactive Schedules
if self.value() == 'yes':
return queryset.filter(until__gt=datetime.now()).distinct()
if self.value() == 'no':
return queryset.filter(until__lt=datetime.now()).distinct()
if self.parameter_name == 'has_schedules_timeslots': # active/inactive Shows
if self.value() == 'yes':
return queryset.filter(schedules__until__gt=datetime.now()).distinct()
if self.value() == 'no':
return queryset.filter(schedules__until__lt=datetime.now()).distinct()
if self.parameter_name == 'has_shows_schedules_timeslots': # active/inactive Hosts
if self.value() == 'yes':
return queryset.filter(shows__schedules__until__gt=datetime.now()).distinct()
if self.value() == 'no':
return queryset.filter(shows__schedules__until__lt=datetime.now()).distinct()
class ActiveSchedulesFilter(ActivityFilter):
parameter_name = 'has_timeslots'
class ActiveShowsFilter(ActivityFilter):
parameter_name = 'has_schedules_timeslots'
class ActiveHostsFilter(ActivityFilter):
parameter_name = 'has_shows_schedules_timeslots'
class TypeAdmin(admin.ModelAdmin):
list_display = ('type', 'admin_color', 'is_active')
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('type',)}
class MusicFocusAdmin(admin.ModelAdmin):
form = MusicFocusForm
list_display = ('focus', 'abbrev', 'admin_buttons', 'is_active')
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('focus',)}
class CategoryAdmin(admin.ModelAdmin):
list_display = ('category', 'abbrev', 'admin_buttons', 'is_active')
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('category',)}
class LanguageAdmin(admin.ModelAdmin):
list_display = ('name', 'is_active')
list_filter = ('is_active',)
class TopicAdmin(admin.ModelAdmin):
list_display = ('topic', 'abbrev', 'admin_buttons', 'is_active')
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('topic',)}
class RTRCategoryAdmin(admin.ModelAdmin):
list_display = ('rtrcategory', 'abbrev', 'is_active' )
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('rtrcategory',)}
class HostAdmin(admin.ModelAdmin):
list_display = ('name', 'email', 'is_active')
list_filter = (ActiveHostsFilter, 'is_active',)
def get_queryset(self, request):
if request.user.is_superuser:
return Host.objects.all()
# Common users only see hosts of shows they own
return Host.objects.filter(shows__in=request.user.shows.all()).distinct()
class NoteAdmin(admin.ModelAdmin):
date_hierarchy = 'start'
list_display = ('title', 'show', 'start', 'status', 'user')
fields = (( 'show', 'timeslot'), 'title', 'slug', 'summary', 'content', 'image', 'host', 'status', 'cba_id')
prepopulated_fields = {'slug': ('title',)}
list_filter = ('status',)
ordering = ('timeslot',)
save_as = True
class Media:
js = [ settings.MEDIA_URL + 'js/calendar/lib/moment.min.js',
settings.MEDIA_URL + 'js/note_change.js', ]
def get_queryset(self, request):
if request.user.is_superuser:
shows = Show.objects.all()
else:
# Commons users only see notes of shows they own
shows = request.user.shows.all()
return super(NoteAdmin, self).get_queryset(request).filter(show__in=shows)
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
four_weeks_ago = datetime.now() - timedelta(weeks=4)
in_twelve_weeks = datetime.now() + timedelta(weeks=12)
if db_field.name == 'timeslot':
# Adding/Editing a note: load timeslots of the user's shows into the dropdown
# TODO: Don't show any timeslot in the select by default.
# User should first choose show, then timeslots are loaded into the select via ajax.
#
# How to do this while not constraining the queryset?
# Saving won't be possible otherwise, if queryset doesn't contain the selectable elements beforehand
#kwargs['queryset'] = TimeSlot.objects.filter(show=-1)
# Superusers see every timeslot for every show
if request.user.is_superuser:
kwargs['queryset'] = TimeSlot.objects.filter(start__gt=four_weeks_ago,
start__lt=in_twelve_weeks) # note__isnull=True
# Users see timeslots of shows they own
else:
kwargs['queryset'] = TimeSlot.objects.filter(show__in=request.user.shows.all(), start__gt=four_weeks_ago,
start__lt=in_twelve_weeks) # note__isnull=True
if db_field.name == 'show':
# Adding/Editing a note: load user's shows into the dropdown
# Common users only see shows they own
if not request.user.is_superuser:
kwargs['queryset'] = Show.objects.filter(pk__in=request.user.shows.all(), is_active=True)
if db_field.name == 'host':
# Common users only see hosts of shows they own
if not request.user.is_superuser:
kwargs['queryset'] = Host.objects.filter(shows__in=request.user.shows.all(), is_active=True).distinct()
return super(NoteAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def save_model(self, request, obj, form, change):
# Save the creator when adding a note
if not change:
obj.user = request.user
# Try to get direct audio URL from CBA
obj.audio_url = Note.get_audio_url(obj.cba_id)
obj.save()
class TimeSlotInline(admin.TabularInline):
model = TimeSlot
ordering = ('-end',)
class TimeSlotAdmin(admin.ModelAdmin):
model = TimeSlot
class ScheduleAdmin(admin.ModelAdmin):
actions = ('renew',)
inlines = (TimeSlotInline,)
fields = (('rrule', 'byweekday'), ('dstart', 'tstart', 'tend'), 'until', 'is_repetition', 'automation_id', 'fallback_id')
list_display = ('get_show_name', 'byweekday', 'rrule', 'tstart', 'tend', 'until')
list_filter = (ActiveSchedulesFilter, 'byweekday', 'rrule', 'is_repetition')
ordering = ('byweekday', 'dstart')
save_on_top = True
search_fields = ('show__name',)
def renew(self, request, queryset):
next_year = date.today().year + 1
until = date(next_year, 12, 31)
renewed = queryset.update(until=until)
if renewed == 1:
message = _("1 schedule was renewed until %s") % until
else:
message = _("%s schedule were renewed until %s") % (renewed, until)
self.message_user(request, message)
renew.short_description = _("Renew selected schedules")
def get_show_name(self, obj):
return obj.show.name
get_show_name.admin_order_field = 'show'
get_show_name.short_description = "Show"
class ScheduleInline(admin.TabularInline):
model = Schedule
ordering = ('pk', '-until', 'byweekday')
class ShowAdmin(admin.ModelAdmin):
filter_horizontal = ('hosts', 'owners', 'musicfocus', 'category', 'topic', 'language')
inlines = (ScheduleInline,)
list_display = ('name', 'short_description')
list_filter = (ActiveShowsFilter, 'type', 'category', 'topic', 'musicfocus', 'rtrcategory', 'language')
ordering = ('slug',)
prepopulated_fields = {'slug': ('name',)}
search_fields = ('name', 'short_description', 'description')
fields = (
'predecessor', 'type', 'name', 'slug', 'image', 'logo', 'short_description', 'description',
'email', 'website', 'hosts', 'owners', 'language', 'category', 'rtrcategory', 'topic',
'musicfocus', 'fallback_id', 'cba_series_id',
)
class Media:
js = [ settings.MEDIA_URL + 'js/calendar/lib/moment.min.js',
settings.MEDIA_URL + 'js/show_change.js', ]
css = { 'all': ('/program/styles.css',) }
def get_queryset(self, request):
if request.user.is_superuser:
# Superusers see all shows
shows = Show.objects.all()
else:
# Users only see shows they own
shows = request.user.shows.all()
return super(ShowAdmin, self).get_queryset(request).filter(pk__in=shows)
def get_readonly_fields(self, request, obj=None):
'''Limit field access for common users'''
if not request.user.is_superuser:
# TODO: how to set field 'name' readonly although it's required?
return ('predecessor', 'type', 'hosts', 'owners', 'language', 'category', 'topic', 'musicfocus', 'rtrcategory')
return list()
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
try:
show_id = int(request.get_full_path().split('/')[-2])
except ValueError:
show_id = None
print(db_field.name)
if db_field.name == 'predecessor' and show_id:
kwargs['queryset'] = Show.objects.exclude(pk=show_id)
if db_field.name == 'type':
kwargs['queryset'] = Type.objects.filter(is_active=True)
if db_field.name == 'rtrcategory':
kwargs['queryset'] = RTRCategory.objects.filter(is_active=True)
return super(ShowAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == 'hosts':
kwargs["queryset"] = Host.objects.filter(is_active=True)
if db_field.name == 'language':
kwargs["queryset"] = Language.objects.filter(is_active=True)
if db_field.name == 'category':
kwargs["queryset"] = Category.objects.filter(is_active=True)
if db_field.name == 'topic':
kwargs["queryset"] = Topic.objects.filter(is_active=True)
if db_field.name == 'musicfocus':
kwargs["queryset"] = MusicFocus.objects.filter(is_active=True)
return super(ShowAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
def save_formset(self, request, form, formset, change):
"""
Is called after the "save show"-form or collision-form were submitted
Saves the show after first submit
If any changes in schedules happened
* added/changed schedules are used to generate new timeslots and
matched against existing ones, which will be displayed in the collision form
If a collision form was submitted
* save the current schedule
* delete/create timeslots and relink notes after confirmation
Each step passes on to response_add or response_change which will
* either display the collision form for the next step
* or redirect to the original show-form if the resolving process has been finished
(= if either max_steps was surpassed or end_reached was True)
"""
self.end_reached = False
schedule_instances = formset.save(commit=False)
# If there are no schedules to save, do nothing
if schedule_instances:
show_id = schedule_instances[0].show.id
else:
self.end_reached = True
schedule = []
timeslots = []
max_steps = int(len(schedule_instances)) if len(schedule_instances) > 0 else 1
step = 1
if request.POST.get('step') == None:
# First save-show submit
# Generate thumbnails
if form.instance.image.name and settings.THUMBNAIL_SIZES:
for size in settings.THUMBNAIL_SIZES:
thumbnail = form.instance.image.crop[size].name
# Save show data only
form.save();
# Delete schedules (as well as related timeslots and notes) if flagged as such
for obj in formset.deleted_objects:
obj.delete()
# If nothing else changed, do nothing and redirect to show-form
if not formset.changed_objects and not formset.new_objects:
self.end_reached = True
else:
# If a collision form was submitted
step = int(request.POST.get('step'))
if request.POST.get('num_inputs') != None and int(request.POST.get('num_inputs')) > 0:
print("Resolving conflicts...")
'''Declare and retrieve variables'''
# Either datetimes as string (e.g. '2017-01-01 00:00:00 - 2017-01-01 01:00:00') to create
# or ints of colliding timeslots to keep otherwise
resolved_timeslots = []
# IDs of colliding timeslots found in the db. If there's no corresponding collision to the
# same index in create_timeslot, value will be None
collisions = []
# Datetimes as string (e.g. '2017-01-01 00:00:00 - 2017-01-01 01:00:00') for timeslots to create
create_timeslots = []
# IDs of timeslots to delete
delete_timeslots = set()
# Number of timeslots to be generated
num_inputs = int(request.POST.get('num_inputs'))
# Numbers of notes to relink for existing timeslots and newly created ones
# each of them relating to one of these POST vars:
# POST.ntids[idx][id] and POST.ntids[idx][note_id] contain ids of existing timeslots and note_ids to link, while
# POST.ntind[idx][id] and POST.ntind[idx][note_id] contain indices of corresponding elements in create_timeslots
# and note_ids which will be linked after they're created and thus split into two lists beforehand
num_ntids = int(request.POST.get('num_ntids'))
num_ntind = int(request.POST.get('num_ntind'))
# Retrieve POST vars of current schedule
schedule_id = int(request.POST.get('ps_save_id')) if request.POST.get('ps_save_id') != 'None' else None
rrule = RRule.objects.get(pk=int(request.POST.get('ps_save_rrule_id')))
show = Show.objects.get(pk=show_id)
byweekday = int(request.POST.get('ps_save_byweekday'))
tstart = datetime.strptime(request.POST.get('ps_save_tstart'), '%H:%M').time()
tend = datetime.strptime(request.POST.get('ps_save_tend'), '%H:%M').time()
dstart = datetime.strptime(request.POST.get('ps_save_dstart'), '%Y-%m-%d').date()
if dstart < datetime.today().date(): # Create or delete upcoming timeslots only
dstart = datetime.today().date()
until = datetime.strptime(request.POST.get('ps_save_until'), '%Y-%m-%d').date()
is_repetition = request.POST.get('ps_save_is_repetition')
automation_id = int(request.POST.get('ps_save_automation_id')) if request.POST.get('ps_save_automation_id') != 'None' else 0
fallback_id = int(request.POST.get('ps_save_fallback_id')) if request.POST.get('ps_save_fallback_id') != 'None' else 0
# Put timeslot POST vars into lists with same indices
for i in range(num_inputs):
resolved_ts = request.POST.get('resolved_timeslots[' + str(i) + ']')
if resolved_ts != None:
resolved_timeslots.append( resolved_ts )
create_timeslots.append( request.POST.get('create_timeslots[' + str(i) + ']') ) # May contain None
collisions.append( request.POST.get('collisions[' + str(i) + ']') ) # May contain None
else:
num_inputs -= 1
'''Prepare resolved timeslots'''
# Separate timeslots to delete from those to create
keep_collisions = []
for x in range(num_inputs):
if resolved_timeslots[x] == None or resolved_timeslots[x].isdigit():
# If it's a digit, keep the existing timeslot by preventing the new one from being created
create_timeslots[x] = None
keep_collisions.append(int(collisions[x]))
else:
# Otherwise collect the timeslot ids to be deleted later
if len(collisions[x]) > 0:
delete_timeslots.add(int(collisions[x]))
# Collect IDs of upcoming timeslots of the same schedule to delete except those in keep_collision
if schedule_id != None:
for ts in TimeSlot.objects.filter(start__gte=dstart,end__lte=until,schedule_id=schedule_id).exclude(pk__in=keep_collisions).values_list('id', flat=True):
delete_timeslots.add(ts)
'''Save schedule'''
new_schedule = Schedule(pk=schedule_id,
rrule=rrule,
byweekday=byweekday,
show=show,
dstart=dstart,
tstart=tstart,
tend=tend,
until=until,
is_repetition=is_repetition,
automation_id=automation_id,
fallback_id=fallback_id)
# Only save schedule if any timeslots changed
if len(resolved_timeslots) > 0:
new_schedule.save()
'''Relink notes to existing timeslots and prepare those to be linked'''
# Relink notes with existing timeslot ids
for i in range(num_ntids):
try:
note = Note.objects.get(pk=int(request.POST.get('ntids[' + str(i) + '][note_id]')))
note.timeslot_id = int(request.POST.get('ntids[' + str(i) + '][id]'))
note.save(update_fields=["timeslot_id"])
print("Rewrote note " + str(note.id) + "...to timeslot_id " + str(note.timeslot_id))
except ObjectDoesNotExist:
pass
# Put list indices of yet to be created timeslots and note_ids in corresponding lists to relink them during creation
note_indices = []
note_ids = []
for i in range(num_ntind):
note_indices.append( int(request.POST.get('ntind[' + str(i) + '][id]')) )
note_ids.append( int(request.POST.get('ntind[' + str(i) + '][note_id]')) )
'''Database changes for resolved timeslots and relinked notes for newly created'''
for idx, ts in enumerate(create_timeslots):
if ts != None:
start_end = ts.split(' - ')
# Only create upcoming timeslots
if datetime.strptime(start_end[0], "%Y-%m-%d %H:%M:%S") > datetime.today():
timeslot_created = TimeSlot.objects.create(schedule=new_schedule, is_repetition=new_schedule.is_repetition, start=start_end[0], end=start_end[1])
# Link a note to the new timeslot
if idx in note_indices:
note_idx = note_indices.index( idx ) # Get the note_id's index...
note_id = note_ids[note_idx] # ...which contains the note_id to relate to
try:
note = Note.objects.get(pk=note_id)
note.timeslot_id = timeslot_created.id
note.save(update_fields=["timeslot_id"])
print("Timeslot " + str(timeslot_created.id) + " linked to note " + str(note_id))
except ObjectDoesNotExist:
pass
# Finally delete discarded timeslots
for timeslot_id in delete_timeslots:
TimeSlot.objects.filter(pk=timeslot_id).delete()
if step > max_steps:
self.end_reached = True
'''
Everything below here is called when a new collision is loaded before being handed over to the client
'''
# Generate timeslots from current schedule
k = 1
for instance in schedule_instances:
if isinstance(instance, Schedule):
if k == step:
timeslots = Schedule.generate_timeslots(instance)
schedule = instance
break
k += 1
# Get collisions for timeslots
collisions = Schedule.get_collisions(timeslots)
# Get notes of colliding timeslots
notes = []
for id in collisions:
try:
notes.append( Note.objects.get(timeslot_id=id) )
except ObjectDoesNotExist:
pass
self.schedule = schedule
self.timeslots = timeslots
self.collisions = collisions
self.num_collisions = len([ s for s in self.collisions if s != 'None']) # Number of real collisions displayed to the user
self.notes = notes
self.showform = form
self.schedulesform = formset
self.step = step + 1 # Becomes upcoming step
self.max_steps = max_steps
# Pass it on to response_add() or response_change()
return self
def response_add(self, request, obj):
return ShowAdmin.respond(self, request, obj)
def response_change(self, request, obj):
return ShowAdmin.respond(self, request, obj)
def respond(self, request, obj):
"""
Redirects to the show-change-form if no schedules changed or resolving has been finished (or any other form validation error occured)
Displays the collision form for the current schedule otherwise
"""
# Never check for collisions if not superuser
# Common users can't edit the formset, so save_formset() will never be called thus end_reached wasn't set yet
if not request.user.is_superuser:
self.end_reached = True
if self.end_reached:
return super(ShowAdmin, self).response_change(request, obj)
timeslots_to_collisions = list(zip(self.timeslots, self.collisions))
return render(request, 'collisions.html', {'self' : self, 'obj': obj, 'request': request,
'timeslots': self.timeslots,
'collisions': self.collisions,
'schedule': self.schedule,
'timeslots_to_collisions': timeslots_to_collisions,
'schedulesform': self.schedulesform,
'showform': self.showform,
'num_inputs': len(self.timeslots),
'step': self.step,
'max_steps': self.max_steps,
'now': datetime.now(),
'num_collisions': self.num_collisions})
admin.site.register(Language, LanguageAdmin)
admin.site.register(Type, TypeAdmin)
admin.site.register(MusicFocus, MusicFocusAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Topic, TopicAdmin)
admin.site.register(RTRCategory, RTRCategoryAdmin)
admin.site.register(Host, HostAdmin)
admin.site.register(Note, NoteAdmin)
#admin.site.register(Schedule, ScheduleAdmin)
admin.site.register(TimeSlot, TimeSlotAdmin)
admin.site.register(Show, ShowAdmin) | [
"django.utils.translation.ugettext_lazy",
"datetime.datetime.strptime",
"django.contrib.admin.site.register",
"datetime.date.today",
"datetime.datetime.now",
"datetime.date",
"datetime.datetime.today",
"datetime.timedelta"
]
| [((24620, 24664), 'django.contrib.admin.site.register', 'admin.site.register', (['Language', 'LanguageAdmin'], {}), '(Language, LanguageAdmin)\n', (24639, 24664), False, 'from django.contrib import admin\n'), ((24665, 24701), 'django.contrib.admin.site.register', 'admin.site.register', (['Type', 'TypeAdmin'], {}), '(Type, TypeAdmin)\n', (24684, 24701), False, 'from django.contrib import admin\n'), ((24702, 24750), 'django.contrib.admin.site.register', 'admin.site.register', (['MusicFocus', 'MusicFocusAdmin'], {}), '(MusicFocus, MusicFocusAdmin)\n', (24721, 24750), False, 'from django.contrib import admin\n'), ((24751, 24795), 'django.contrib.admin.site.register', 'admin.site.register', (['Category', 'CategoryAdmin'], {}), '(Category, CategoryAdmin)\n', (24770, 24795), False, 'from django.contrib import admin\n'), ((24796, 24834), 'django.contrib.admin.site.register', 'admin.site.register', (['Topic', 'TopicAdmin'], {}), '(Topic, TopicAdmin)\n', (24815, 24834), False, 'from django.contrib import admin\n'), ((24835, 24885), 'django.contrib.admin.site.register', 'admin.site.register', (['RTRCategory', 'RTRCategoryAdmin'], {}), '(RTRCategory, RTRCategoryAdmin)\n', (24854, 24885), False, 'from django.contrib import admin\n'), ((24886, 24922), 'django.contrib.admin.site.register', 'admin.site.register', (['Host', 'HostAdmin'], {}), '(Host, HostAdmin)\n', (24905, 24922), False, 'from django.contrib import admin\n'), ((24923, 24959), 'django.contrib.admin.site.register', 'admin.site.register', (['Note', 'NoteAdmin'], {}), '(Note, NoteAdmin)\n', (24942, 24959), False, 'from django.contrib import admin\n'), ((25006, 25050), 'django.contrib.admin.site.register', 'admin.site.register', (['TimeSlot', 'TimeSlotAdmin'], {}), '(TimeSlot, TimeSlotAdmin)\n', (25025, 25050), False, 'from django.contrib import admin\n'), ((25051, 25087), 'django.contrib.admin.site.register', 'admin.site.register', (['Show', 'ShowAdmin'], {}), '(Show, ShowAdmin)\n', (25070, 25087), False, 'from django.contrib import admin\n'), ((482, 495), 'django.utils.translation.ugettext_lazy', '_', (['"""Activity"""'], {}), "('Activity')\n", (483, 495), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7643, 7672), 'django.utils.translation.ugettext_lazy', '_', (['"""Renew selected schedules"""'], {}), "('Renew selected schedules')\n", (7644, 7672), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7312, 7335), 'datetime.date', 'date', (['next_year', '(12)', '(31)'], {}), '(next_year, 12, 31)\n', (7316, 7335), False, 'from datetime import date, datetime, time, timedelta\n'), ((4355, 4369), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4367, 4369), False, 'from datetime import date, datetime, time, timedelta\n'), ((4372, 4390), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(4)'}), '(weeks=4)\n', (4381, 4390), False, 'from datetime import date, datetime, time, timedelta\n'), ((4417, 4431), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4429, 4431), False, 'from datetime import date, datetime, time, timedelta\n'), ((4434, 4453), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(12)'}), '(weeks=12)\n', (4443, 4453), False, 'from datetime import date, datetime, time, timedelta\n'), ((579, 590), 'django.utils.translation.ugettext_lazy', '_', (['"""active"""'], {}), "('active')\n", (580, 590), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((612, 625), 'django.utils.translation.ugettext_lazy', '_', (['"""inactive"""'], {}), "('inactive')\n", (613, 625), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7274, 7286), 'datetime.date.today', 'date.today', ([], {}), '()\n', (7284, 7286), False, 'from datetime import date, datetime, time, timedelta\n'), ((7430, 7466), 'django.utils.translation.ugettext_lazy', '_', (['"""1 schedule was renewed until %s"""'], {}), "('1 schedule was renewed until %s')\n", (7431, 7466), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7511, 7549), 'django.utils.translation.ugettext_lazy', '_', (['"""%s schedule were renewed until %s"""'], {}), "('%s schedule were renewed until %s')\n", (7512, 7549), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((24511, 24525), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (24523, 24525), False, 'from datetime import date, datetime, time, timedelta\n'), ((15476, 15492), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (15490, 15492), False, 'from datetime import date, datetime, time, timedelta\n'), ((15573, 15589), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (15587, 15589), False, 'from datetime import date, datetime, time, timedelta\n'), ((20106, 20158), 'datetime.datetime.strptime', 'datetime.strptime', (['start_end[0]', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(start_end[0], '%Y-%m-%d %H:%M:%S')\n", (20123, 20158), False, 'from datetime import date, datetime, time, timedelta\n'), ((20161, 20177), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (20175, 20177), False, 'from datetime import date, datetime, time, timedelta\n'), ((848, 862), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (860, 862), False, 'from datetime import date, datetime, time, timedelta\n'), ((961, 975), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (973, 975), False, 'from datetime import date, datetime, time, timedelta\n'), ((1173, 1187), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1185, 1187), False, 'from datetime import date, datetime, time, timedelta\n'), ((1297, 1311), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1309, 1311), False, 'from datetime import date, datetime, time, timedelta\n'), ((1522, 1536), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1534, 1536), False, 'from datetime import date, datetime, time, timedelta\n'), ((1653, 1667), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1665, 1667), False, 'from datetime import date, datetime, time, timedelta\n')] |
import os
import glob
import json
from pathlib import Path
from flask_restful import Api, Resource, reqparse
from flask_jwt_extended import jwt_required
from flask import Flask, request, escape, make_response, send_from_directory
import utils
# incase you can't install ansi2html it's won't break the api
try:
from ansi2html import Ansi2HTMLConverter
except:
pass
current_path = os.path.dirname(os.path.realpath(__file__))
'''
render stdout content
'''
class Wscdn(Resource):
def verify_file(self, filename):
option_files = glob.glob(
current_path + '/storages/**/options.json', recursive=True)
# loop though all options avalible
for option in option_files:
json_option = utils.reading_json(option)
stdout_path = json_option.get('WORKSPACES') + "/" + filename
if utils.not_empty_file(stdout_path):
return json_option.get('WORKSPACES'), os.path.normpath(filename)
# get real path
p = Path(filename)
ws = p.parts[0]
if ws != utils.url_encode(ws):
# just replace the first one
filename_encode = filename.replace(ws, utils.url_encode(ws), 1)
stdout_path_encode = json_option.get('WORKSPACES') + filename_encode
if utils.not_empty_file(stdout_path_encode):
return json_option.get('WORKSPACES'), os.path.normpath(filename_encode)
return False, False
def get(self, filename):
ws_path, stdout_path = self.verify_file(filename)
if not stdout_path:
return 'Custom 404 here', 404
return send_from_directory(ws_path, stdout_path)
| [
"flask.send_from_directory",
"pathlib.Path",
"utils.not_empty_file",
"os.path.realpath",
"os.path.normpath",
"glob.glob",
"utils.url_encode",
"utils.reading_json"
]
| [((405, 431), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (421, 431), False, 'import os\n'), ((551, 620), 'glob.glob', 'glob.glob', (["(current_path + '/storages/**/options.json')"], {'recursive': '(True)'}), "(current_path + '/storages/**/options.json', recursive=True)\n", (560, 620), False, 'import glob\n'), ((1679, 1720), 'flask.send_from_directory', 'send_from_directory', (['ws_path', 'stdout_path'], {}), '(ws_path, stdout_path)\n', (1698, 1720), False, 'from flask import Flask, request, escape, make_response, send_from_directory\n'), ((740, 766), 'utils.reading_json', 'utils.reading_json', (['option'], {}), '(option)\n', (758, 766), False, 'import utils\n'), ((856, 889), 'utils.not_empty_file', 'utils.not_empty_file', (['stdout_path'], {}), '(stdout_path)\n', (876, 889), False, 'import utils\n'), ((1018, 1032), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (1022, 1032), False, 'from pathlib import Path\n'), ((1082, 1102), 'utils.url_encode', 'utils.url_encode', (['ws'], {}), '(ws)\n', (1098, 1102), False, 'import utils\n'), ((1333, 1373), 'utils.not_empty_file', 'utils.not_empty_file', (['stdout_path_encode'], {}), '(stdout_path_encode)\n', (1353, 1373), False, 'import utils\n'), ((945, 971), 'os.path.normpath', 'os.path.normpath', (['filename'], {}), '(filename)\n', (961, 971), False, 'import os\n'), ((1204, 1224), 'utils.url_encode', 'utils.url_encode', (['ws'], {}), '(ws)\n', (1220, 1224), False, 'import utils\n'), ((1433, 1466), 'os.path.normpath', 'os.path.normpath', (['filename_encode'], {}), '(filename_encode)\n', (1449, 1466), False, 'import os\n')] |
"""Module with hahomematic services."""
from __future__ import annotations
from datetime import datetime
import logging
from hahomematic.const import (
ATTR_ADDRESS,
ATTR_INTERFACE_ID,
ATTR_NAME,
ATTR_PARAMETER,
ATTR_VALUE,
HmPlatform,
)
from hahomematic.device import HmDevice
from hahomematic.entity import BaseEntity, GenericEntity
import voluptuous as vol
from homeassistant.const import ATTR_ENTITY_ID, ATTR_MODE, ATTR_TIME
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import device_registry as dr
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import comp_entity_ids
from homeassistant.helpers.device_registry import DeviceEntry
from homeassistant.helpers.service import (
async_register_admin_service,
verify_domain_control,
)
from .const import (
ATTR_PARAMSET,
ATTR_PARAMSET_KEY,
ATTR_RX_MODE,
ATTR_VALUE_TYPE,
DOMAIN,
)
from .control_unit import ControlUnit, HaHub
from .helpers import get_device_address_at_interface_from_identifiers
_LOGGER = logging.getLogger(__name__)
ATTR_CHANNEL = "channel"
ATTR_DEVICE_ID = "device_id"
DEFAULT_CHANNEL = 1
SERVICE_EXPORT_DEVICE_DEFINITION = "export_device_definition"
SERVICE_PUT_PARAMSET = "put_paramset"
SERVICE_SET_DEVICE_VALUE = "set_device_value"
SERVICE_SET_INSTALL_MODE = "set_install_mode"
SERVICE_SET_VARIABLE_VALUE = "set_variable_value"
HAHM_SERVICES = [
SERVICE_EXPORT_DEVICE_DEFINITION,
SERVICE_PUT_PARAMSET,
SERVICE_SET_DEVICE_VALUE,
SERVICE_SET_INSTALL_MODE,
SERVICE_SET_VARIABLE_VALUE,
]
SCHEMA_SERVICE_EXPORT_DEVICE_DEFINITION = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
}
)
SCHEMA_SERVICE_SET_VARIABLE_VALUE = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): comp_entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_VALUE): cv.match_all,
}
)
SCHEMA_SERVICE_SET_INSTALL_MODE = vol.Schema(
{
vol.Required(ATTR_INTERFACE_ID): cv.string,
vol.Optional(ATTR_TIME, default=60): cv.positive_int,
vol.Optional(ATTR_MODE, default=1): vol.All(vol.Coerce(int), vol.In([1, 2])),
vol.Optional(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
}
)
SCHEMA_SERVICE_SET_DEVICE_VALUE = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
vol.Required(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAMETER): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_VALUE_TYPE): vol.In(
["boolean", "dateTime.iso8601", "double", "int", "string"]
),
vol.Optional(ATTR_RX_MODE): vol.All(cv.string, vol.Upper),
}
)
SCHEMA_SERVICE_PUT_PARAMSET = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
vol.Required(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAMSET_KEY): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_PARAMSET): dict,
vol.Optional(ATTR_RX_MODE): vol.All(cv.string, vol.Upper),
}
)
async def async_setup_services(hass: HomeAssistant) -> None:
"""Create the hahomematic services."""
@verify_domain_control(hass, DOMAIN)
async def async_call_hahm_service(service: ServiceCall) -> None:
"""Call correct HomematicIP Cloud service."""
service_name = service.service
if service_name == SERVICE_EXPORT_DEVICE_DEFINITION:
await _async_service_export_device_definition(hass=hass, service=service)
elif service_name == SERVICE_PUT_PARAMSET:
await _async_service_put_paramset(hass=hass, service=service)
elif service_name == SERVICE_SET_INSTALL_MODE:
await _async_service_set_install_mode(hass=hass, service=service)
elif service_name == SERVICE_SET_DEVICE_VALUE:
await _async_service_set_device_value(hass=hass, service=service)
elif service_name == SERVICE_SET_VARIABLE_VALUE:
await _async_service_set_variable_value(hass=hass, service=service)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_EXPORT_DEVICE_DEFINITION,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_EXPORT_DEVICE_DEFINITION,
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_SET_VARIABLE_VALUE,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_SET_VARIABLE_VALUE,
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_SET_DEVICE_VALUE,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_SET_DEVICE_VALUE,
)
async_register_admin_service(
hass=hass,
domain=DOMAIN,
service=SERVICE_SET_INSTALL_MODE,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_SET_INSTALL_MODE,
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_PUT_PARAMSET,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_PUT_PARAMSET,
)
async def async_unload_services(hass: HomeAssistant) -> None:
"""Unload HAHM services."""
if hass.data[DOMAIN]:
return
for hahm_service in HAHM_SERVICES:
hass.services.async_remove(domain=DOMAIN, service=hahm_service)
async def _async_service_export_device_definition(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to call setValue method for HomeMatic devices."""
device_id = service.data[ATTR_DEVICE_ID]
if hm_device := _get_device(hass=hass, device_id=device_id):
await hm_device.export_device_definition()
_LOGGER.debug(
"Calling export_device_definition: %s, %s",
hm_device.name,
hm_device.device_address,
)
async def _async_service_set_variable_value(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to call setValue method for HomeMatic system variable."""
entity_id = service.data[ATTR_ENTITY_ID]
name = service.data[ATTR_NAME]
value = service.data[ATTR_VALUE]
if hub := _get_hub_by_entity_id(hass=hass, entity_id=entity_id):
await hub.async_set_variable(name=name, value=value)
async def _async_service_set_device_value(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to call setValue method for HomeMatic devices."""
device_id = service.data[ATTR_DEVICE_ID]
channel = service.data[ATTR_CHANNEL]
parameter = service.data[ATTR_PARAMETER]
value = service.data[ATTR_VALUE]
rx_mode = service.data.get(ATTR_RX_MODE)
# Convert value into correct XML-RPC Type.
# https://docs.python.org/3/library/xmlrpc.client.html#xmlrpc.client.ServerProxy
if value_type := service.data.get(ATTR_VALUE_TYPE):
if value_type == "int":
value = int(value)
elif value_type == "double":
value = float(value)
elif value_type == "boolean":
value = bool(value)
elif value_type == "dateTime.iso8601":
value = datetime.strptime(value, "%Y%m%dT%H:%M:%S")
else:
# Default is 'string'
value = str(value)
if (
address_data := _get_interface_channel_address(
hass=hass, device_id=device_id, channel=channel
)
) is None:
return None
interface_id: str = address_data[0]
channel_address: str = address_data[1]
_LOGGER.debug(
"Calling setValue: %s, %s, %s, %s, %s, %s",
interface_id,
channel_address,
parameter,
value,
value_type,
rx_mode,
)
if interface_id and channel_address:
if control_unit := _get_cu_by_interface_id(
hass=hass, interface_id=interface_id
):
await control_unit.central.set_value(
interface_id=interface_id,
channel_address=channel_address,
parameter=parameter,
value=value,
rx_mode=rx_mode,
)
async def _async_service_set_install_mode(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to set interface_id into install mode."""
interface_id = service.data[ATTR_INTERFACE_ID]
mode: int = service.data.get(ATTR_MODE, 1)
time: int = service.data.get(ATTR_TIME, 60)
device_address = service.data.get(ATTR_ADDRESS)
if control_unit := _get_cu_by_interface_id(hass=hass, interface_id=interface_id):
await control_unit.central.set_install_mode(
interface_id, t=time, mode=mode, device_address=device_address
)
async def _async_service_put_paramset(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to call the putParamset method on a HomeMatic connection."""
device_id = service.data[ATTR_DEVICE_ID]
channel = service.data[ATTR_CHANNEL]
paramset_key = service.data[ATTR_PARAMSET_KEY]
# When passing in the paramset from a YAML file we get an OrderedDict
# here instead of a dict, so add this explicit cast.
# The service schema makes sure that this cast works.
paramset = dict(service.data[ATTR_PARAMSET])
rx_mode = service.data.get(ATTR_RX_MODE)
if (
address_data := _get_interface_channel_address(
hass=hass, device_id=device_id, channel=channel
)
) is None:
return None
interface_id: str = address_data[0]
channel_address: str = address_data[1]
_LOGGER.debug(
"Calling putParamset: %s, %s, %s, %s, %s",
interface_id,
channel_address,
paramset_key,
paramset,
rx_mode,
)
if interface_id and channel_address:
if control_unit := _get_cu_by_interface_id(
hass=hass, interface_id=interface_id
):
await control_unit.central.put_paramset(
interface_id=interface_id,
channel_address=channel_address,
paramset=paramset_key,
value=paramset,
rx_mode=rx_mode,
)
def _get_device(hass: HomeAssistant, device_id: str) -> HmDevice | None:
"""Return the homematic device."""
device_registry = dr.async_get(hass)
device_entry: DeviceEntry | None = device_registry.async_get(device_id)
if not device_entry:
return None
if (
data := get_device_address_at_interface_from_identifiers(
identifiers=device_entry.identifiers
)
) is None:
return None
device_address = data[0]
interface_id = data[1]
if control_unit := _get_cu_by_interface_id(hass=hass, interface_id=interface_id):
return control_unit.central.hm_devices.get(device_address)
return None
def _get_interface_channel_address(
hass: HomeAssistant, device_id: str, channel: int
) -> tuple[str, str] | None:
"""Return interface and channel_address with given device_id and channel."""
device_registry = dr.async_get(hass)
device_entry: DeviceEntry | None = device_registry.async_get(device_id)
if not device_entry:
return None
if (
data := get_device_address_at_interface_from_identifiers(
identifiers=device_entry.identifiers
)
) is None:
return None
device_address = data[0]
interface_id = data[1]
channel_address = f"{device_address}:{channel}"
return interface_id, channel_address
def _get_entity(hass: HomeAssistant, entity_id: str) -> BaseEntity | None:
"""Return entity by given entity_id."""
control_unit: ControlUnit
for control_unit in hass.data[DOMAIN].values():
if hm_entity := control_unit.async_get_hm_entity(entity_id=entity_id):
if isinstance(hm_entity, BaseEntity):
return hm_entity
return None
def _get_entities_by_platform(
hass: HomeAssistant, platform: HmPlatform
) -> list[BaseEntity]:
"""Return entities by given platform."""
control_unit: ControlUnit
hm_entities: list[BaseEntity] = []
for control_unit in hass.data[DOMAIN].values():
hm_entities.extend(
control_unit.async_get_hm_entities_by_platform(platform=platform)
)
return hm_entities
def _get_hm_entity(
hass: HomeAssistant, interface_id: str, channel_address: str, parameter: str
) -> GenericEntity | None:
"""Get homematic entity."""
if control_unit := _get_cu_by_interface_id(hass=hass, interface_id=interface_id):
return control_unit.central.get_hm_entity_by_parameter(
channel_address=channel_address, parameter=parameter
)
return None
def _get_cu_by_interface_id(
hass: HomeAssistant, interface_id: str
) -> ControlUnit | None:
"""Get ControlUnit by interface_id."""
for entry_id in hass.data[DOMAIN].keys():
control_unit: ControlUnit = hass.data[DOMAIN][entry_id]
if control_unit and control_unit.central.clients.get(interface_id):
return control_unit
return None
def _get_hub_by_entity_id(hass: HomeAssistant, entity_id: str) -> HaHub | None:
"""Get ControlUnit by device address."""
for entry_id in hass.data[DOMAIN].keys():
control_unit: ControlUnit = hass.data[DOMAIN][entry_id]
if (
control_unit
and control_unit.hub
and control_unit.hub.entity_id == entity_id
):
return control_unit.hub
return None
| [
"logging.getLogger",
"voluptuous.Required",
"homeassistant.helpers.service.verify_domain_control",
"datetime.datetime.strptime",
"homeassistant.helpers.device_registry.async_get",
"homeassistant.helpers.service.async_register_admin_service",
"voluptuous.Optional",
"voluptuous.Coerce",
"voluptuous.All",
"voluptuous.In"
]
| [((1103, 1130), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1120, 1130), False, 'import logging\n'), ((3261, 3296), 'homeassistant.helpers.service.verify_domain_control', 'verify_domain_control', (['hass', 'DOMAIN'], {}), '(hass, DOMAIN)\n', (3282, 3296), False, 'from homeassistant.helpers.service import async_register_admin_service, verify_domain_control\n'), ((4760, 4936), 'homeassistant.helpers.service.async_register_admin_service', 'async_register_admin_service', ([], {'hass': 'hass', 'domain': 'DOMAIN', 'service': 'SERVICE_SET_INSTALL_MODE', 'service_func': 'async_call_hahm_service', 'schema': 'SCHEMA_SERVICE_SET_INSTALL_MODE'}), '(hass=hass, domain=DOMAIN, service=\n SERVICE_SET_INSTALL_MODE, service_func=async_call_hahm_service, schema=\n SCHEMA_SERVICE_SET_INSTALL_MODE)\n', (4788, 4936), False, 'from homeassistant.helpers.service import async_register_admin_service, verify_domain_control\n'), ((10320, 10338), 'homeassistant.helpers.device_registry.async_get', 'dr.async_get', (['hass'], {}), '(hass)\n', (10332, 10338), True, 'from homeassistant.helpers import device_registry as dr\n'), ((11080, 11098), 'homeassistant.helpers.device_registry.async_get', 'dr.async_get', (['hass'], {}), '(hass)\n', (11092, 11098), True, 'from homeassistant.helpers import device_registry as dr\n'), ((1695, 1723), 'voluptuous.Required', 'vol.Required', (['ATTR_DEVICE_ID'], {}), '(ATTR_DEVICE_ID)\n', (1707, 1723), True, 'import voluptuous as vol\n'), ((1807, 1835), 'voluptuous.Required', 'vol.Required', (['ATTR_ENTITY_ID'], {}), '(ATTR_ENTITY_ID)\n', (1819, 1835), True, 'import voluptuous as vol\n'), ((1862, 1885), 'voluptuous.Required', 'vol.Required', (['ATTR_NAME'], {}), '(ATTR_NAME)\n', (1874, 1885), True, 'import voluptuous as vol\n'), ((1906, 1930), 'voluptuous.Required', 'vol.Required', (['ATTR_VALUE'], {}), '(ATTR_VALUE)\n', (1918, 1930), True, 'import voluptuous as vol\n'), ((2015, 2046), 'voluptuous.Required', 'vol.Required', (['ATTR_INTERFACE_ID'], {}), '(ATTR_INTERFACE_ID)\n', (2027, 2046), True, 'import voluptuous as vol\n'), ((2067, 2102), 'voluptuous.Optional', 'vol.Optional', (['ATTR_TIME'], {'default': '(60)'}), '(ATTR_TIME, default=60)\n', (2079, 2102), True, 'import voluptuous as vol\n'), ((2129, 2163), 'voluptuous.Optional', 'vol.Optional', (['ATTR_MODE'], {'default': '(1)'}), '(ATTR_MODE, default=1)\n', (2141, 2163), True, 'import voluptuous as vol\n'), ((2215, 2241), 'voluptuous.Optional', 'vol.Optional', (['ATTR_ADDRESS'], {}), '(ATTR_ADDRESS)\n', (2227, 2241), True, 'import voluptuous as vol\n'), ((2243, 2272), 'voluptuous.All', 'vol.All', (['cv.string', 'vol.Upper'], {}), '(cv.string, vol.Upper)\n', (2250, 2272), True, 'import voluptuous as vol\n'), ((2343, 2371), 'voluptuous.Required', 'vol.Required', (['ATTR_DEVICE_ID'], {}), '(ATTR_DEVICE_ID)\n', (2355, 2371), True, 'import voluptuous as vol\n'), ((2392, 2443), 'voluptuous.Required', 'vol.Required', (['ATTR_CHANNEL'], {'default': 'DEFAULT_CHANNEL'}), '(ATTR_CHANNEL, default=DEFAULT_CHANNEL)\n', (2404, 2443), True, 'import voluptuous as vol\n'), ((2470, 2498), 'voluptuous.Required', 'vol.Required', (['ATTR_PARAMETER'], {}), '(ATTR_PARAMETER)\n', (2482, 2498), True, 'import voluptuous as vol\n'), ((2539, 2563), 'voluptuous.Required', 'vol.Required', (['ATTR_VALUE'], {}), '(ATTR_VALUE)\n', (2551, 2563), True, 'import voluptuous as vol\n'), ((2587, 2616), 'voluptuous.Optional', 'vol.Optional', (['ATTR_VALUE_TYPE'], {}), '(ATTR_VALUE_TYPE)\n', (2599, 2616), True, 'import voluptuous as vol\n'), ((2716, 2742), 'voluptuous.Optional', 'vol.Optional', (['ATTR_RX_MODE'], {}), '(ATTR_RX_MODE)\n', (2728, 2742), True, 'import voluptuous as vol\n'), ((2445, 2460), 'voluptuous.Coerce', 'vol.Coerce', (['int'], {}), '(int)\n', (2455, 2460), True, 'import voluptuous as vol\n'), ((2500, 2529), 'voluptuous.All', 'vol.All', (['cv.string', 'vol.Upper'], {}), '(cv.string, vol.Upper)\n', (2507, 2529), True, 'import voluptuous as vol\n'), ((2618, 2684), 'voluptuous.In', 'vol.In', (["['boolean', 'dateTime.iso8601', 'double', 'int', 'string']"], {}), "(['boolean', 'dateTime.iso8601', 'double', 'int', 'string'])\n", (2624, 2684), True, 'import voluptuous as vol\n'), ((2744, 2773), 'voluptuous.All', 'vol.All', (['cv.string', 'vol.Upper'], {}), '(cv.string, vol.Upper)\n', (2751, 2773), True, 'import voluptuous as vol\n'), ((2840, 2868), 'voluptuous.Required', 'vol.Required', (['ATTR_DEVICE_ID'], {}), '(ATTR_DEVICE_ID)\n', (2852, 2868), True, 'import voluptuous as vol\n'), ((2889, 2940), 'voluptuous.Required', 'vol.Required', (['ATTR_CHANNEL'], {'default': 'DEFAULT_CHANNEL'}), '(ATTR_CHANNEL, default=DEFAULT_CHANNEL)\n', (2901, 2940), True, 'import voluptuous as vol\n'), ((2967, 2998), 'voluptuous.Required', 'vol.Required', (['ATTR_PARAMSET_KEY'], {}), '(ATTR_PARAMSET_KEY)\n', (2979, 2998), True, 'import voluptuous as vol\n'), ((3039, 3066), 'voluptuous.Required', 'vol.Required', (['ATTR_PARAMSET'], {}), '(ATTR_PARAMSET)\n', (3051, 3066), True, 'import voluptuous as vol\n'), ((3082, 3108), 'voluptuous.Optional', 'vol.Optional', (['ATTR_RX_MODE'], {}), '(ATTR_RX_MODE)\n', (3094, 3108), True, 'import voluptuous as vol\n'), ((2942, 2957), 'voluptuous.Coerce', 'vol.Coerce', (['int'], {}), '(int)\n', (2952, 2957), True, 'import voluptuous as vol\n'), ((3000, 3029), 'voluptuous.All', 'vol.All', (['cv.string', 'vol.Upper'], {}), '(cv.string, vol.Upper)\n', (3007, 3029), True, 'import voluptuous as vol\n'), ((3110, 3139), 'voluptuous.All', 'vol.All', (['cv.string', 'vol.Upper'], {}), '(cv.string, vol.Upper)\n', (3117, 3139), True, 'import voluptuous as vol\n'), ((2173, 2188), 'voluptuous.Coerce', 'vol.Coerce', (['int'], {}), '(int)\n', (2183, 2188), True, 'import voluptuous as vol\n'), ((2190, 2204), 'voluptuous.In', 'vol.In', (['[1, 2]'], {}), '([1, 2])\n', (2196, 2204), True, 'import voluptuous as vol\n'), ((7172, 7215), 'datetime.datetime.strptime', 'datetime.strptime', (['value', '"""%Y%m%dT%H:%M:%S"""'], {}), "(value, '%Y%m%dT%H:%M:%S')\n", (7189, 7215), False, 'from datetime import datetime\n')] |
# Generated by Django 3.2.3 on 2021-06-03 00:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Livro',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imagem', models.ImageField(upload_to='imagens')),
('titulo', models.CharField(max_length=150)),
('autor', models.CharField(max_length=50)),
('genero', models.CharField(max_length=50)),
('serieunico', models.CharField(max_length=50)),
('nota', models.CharField(max_length=2)),
('opiniao', models.CharField(max_length=300)),
],
),
]
| [
"django.db.models.ImageField",
"django.db.models.CharField",
"django.db.models.BigAutoField"
]
| [((301, 397), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (320, 397), False, 'from django.db import migrations, models\n'), ((423, 461), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""imagens"""'}), "(upload_to='imagens')\n", (440, 461), False, 'from django.db import migrations, models\n'), ((491, 523), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (507, 523), False, 'from django.db import migrations, models\n'), ((552, 583), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (568, 583), False, 'from django.db import migrations, models\n'), ((613, 644), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (629, 644), False, 'from django.db import migrations, models\n'), ((678, 709), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (694, 709), False, 'from django.db import migrations, models\n'), ((737, 767), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2)'}), '(max_length=2)\n', (753, 767), False, 'from django.db import migrations, models\n'), ((798, 830), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (814, 830), False, 'from django.db import migrations, models\n')] |
# Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import re
COPYRIGHT_HEADER = """\
/*
* Copyright (c) 2017-2019, Oracle and/or its affiliates.
* Copyright (c) 2014 by <NAME>
*
* The MIT License (MIT)
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
// Checkstyle: stop
// JaCoCo Exclude
//@formatter:off
{0}
"""
PTRN_SUPPRESS_WARNINGS = re.compile(r"@SuppressWarnings.*")
def replace_suppress_warnings(line):
return PTRN_SUPPRESS_WARNINGS.sub('@SuppressWarnings("all")', line)
def replace_rulectx(line):
return line.replace("(RuleContext)_localctx", "_localctx")
def replace_localctx(line):
return re.sub(r'\(\((([a-zA-Z]*?_?)*[a-zA-Z]*)\)_localctx\)', '_localctx', line)
TRANSFORMS = [
replace_suppress_warnings,
replace_rulectx,
replace_localctx,
]
def postprocess(file):
lines = []
for line in file:
for transform in TRANSFORMS:
line = transform(line)
lines.append(line)
return ''.join(lines)
if __name__ == '__main__':
fpath = sys.argv[1]
with open(fpath, 'r') as FILE:
content = COPYRIGHT_HEADER.format(postprocess(FILE))
with open(fpath, 'w+') as FILE:
FILE.write(content)
| [
"re.sub",
"re.compile"
]
| [((3344, 3377), 're.compile', 're.compile', (['"""@SuppressWarnings.*"""'], {}), "('@SuppressWarnings.*')\n", (3354, 3377), False, 'import re\n'), ((3634, 3710), 're.sub', 're.sub', (['"""\\\\(\\\\((([a-zA-Z]*?_?)*[a-zA-Z]*)\\\\)_localctx\\\\)"""', '"""_localctx"""', 'line'], {}), "('\\\\(\\\\((([a-zA-Z]*?_?)*[a-zA-Z]*)\\\\)_localctx\\\\)', '_localctx', line)\n", (3640, 3710), False, 'import re\n')] |
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA-Commons module to manage AMQP connections on REANA."""
import json
import logging
from kombu import Connection, Exchange, Queue
from .config import (
MQ_CONNECTION_STRING,
MQ_DEFAULT_EXCHANGE,
MQ_DEFAULT_FORMAT,
MQ_DEFAULT_QUEUES,
MQ_PRODUCER_MAX_RETRIES,
)
class BasePublisher(object):
"""Base publisher to MQ."""
def __init__(
self,
queue,
routing_key,
connection=None,
exchange=None,
durable=False,
max_priority=None,
):
"""Initialise the BasePublisher class.
:param connection: A :class:`kombu.Connection`, if not provided a
:class:`kombu.Connection` with the default configuration will
be instantiated.
:param queue: String which represents the queue the messages will
be sent to.
:param routing_key: String which represents the routing key which
will be used to send the messages, if not provided default
routing key will be used.
:param exchange: A :class:`kombu.Exchange` where the messages will
be delivered to, if not provided, it will be instantiated with
the default configuration.
"""
self._routing_key = routing_key
self._exchange = (
exchange
if isinstance(exchange, Exchange)
else Exchange(name=exchange or MQ_DEFAULT_EXCHANGE, type="direct")
)
self._queue = (
queue
if isinstance(queue, Queue)
else Queue(
queue,
durable=durable,
exchange=self._exchange,
routing_key=self._routing_key,
max_priority=max_priority,
)
)
self._connection = connection or Connection(MQ_CONNECTION_STRING)
self.producer = self._build_producer()
def _build_producer(self):
"""Instantiate a :class:`kombu.Producer`."""
return self._connection.Producer(serializer=MQ_DEFAULT_FORMAT)
def __error_callback(self, exception, interval):
"""Execute when there is an error while sending a message.
:param exception: Exception which has been thrown while trying to send
the message.
:param interval: Interval in which the message delivery will be
retried.
"""
logging.error("Error while publishing {}".format(exception))
logging.info("Retry in %s seconds.", interval)
def _publish(self, msg, priority=None):
"""Publish, handling retries, a message in the queue.
:param msg: Object which represents the message to be sent in
the queue. Note that this object should be serializable in the
configured format (by default JSON).
:param priority: Message priority.
"""
connection = self._connection.clone()
publish = connection.ensure(
self.producer,
self.producer.publish,
errback=self.__error_callback,
max_retries=MQ_PRODUCER_MAX_RETRIES,
)
publish(
json.dumps(msg),
exchange=self._exchange,
routing_key=self._routing_key,
declare=[self._queue],
priority=priority,
)
logging.debug("Publisher: message sent: %s", msg)
def close(self):
"""Close connection."""
logging.debug("Publisher: closing queue connection")
self._connection.release()
class WorkflowStatusPublisher(BasePublisher):
"""Progress publisher to MQ."""
def __init__(self, **kwargs):
"""Initialise the WorkflowStatusPublisher class."""
queue = "jobs-status"
if "queue" not in kwargs:
kwargs["queue"] = "jobs-status"
if "routing_key" not in kwargs:
kwargs["routing_key"] = MQ_DEFAULT_QUEUES[queue]["routing_key"]
if "durable" not in kwargs:
kwargs["durable"] = MQ_DEFAULT_QUEUES[queue]["durable"]
super(WorkflowStatusPublisher, self).__init__(**kwargs)
def publish_workflow_status(self, workflow_uuid, status, logs="", message=None):
"""Publish workflow status using the configured.
:param workflow_uudid: String which represents the workflow UUID.
:param status: Integer which represents the status of the workflow,
this is defined in the `reana-db` `Workflow` models.
:param logs: String which represents the logs which the workflow
has produced as output.
:param message: Dictionary which includes additional information
can be attached such as the overall progress of the workflow.
"""
msg = {
"workflow_uuid": workflow_uuid,
"logs": logs,
"status": status,
"message": message,
}
self._publish(msg)
class WorkflowSubmissionPublisher(BasePublisher):
"""Workflow submission publisher."""
def __init__(self, **kwargs):
"""Initialise the WorkflowSubmissionPublisher class."""
queue = "workflow-submission"
super(WorkflowSubmissionPublisher, self).__init__(
queue,
MQ_DEFAULT_QUEUES[queue]["routing_key"],
durable=MQ_DEFAULT_QUEUES[queue]["durable"],
max_priority=MQ_DEFAULT_QUEUES[queue]["max_priority"],
**kwargs
)
def publish_workflow_submission(
self, user_id, workflow_id_or_name, parameters, priority=0, min_job_memory=0,
):
"""Publish workflow submission parameters."""
msg = {
"user": user_id,
"workflow_id_or_name": workflow_id_or_name,
"parameters": parameters,
"priority": priority,
"min_job_memory": min_job_memory,
}
self._publish(msg, priority)
| [
"kombu.Queue",
"logging.debug",
"json.dumps",
"kombu.Connection",
"logging.info",
"kombu.Exchange"
]
| [((2684, 2730), 'logging.info', 'logging.info', (['"""Retry in %s seconds."""', 'interval'], {}), "('Retry in %s seconds.', interval)\n", (2696, 2730), False, 'import logging\n'), ((3545, 3594), 'logging.debug', 'logging.debug', (['"""Publisher: message sent: %s"""', 'msg'], {}), "('Publisher: message sent: %s', msg)\n", (3558, 3594), False, 'import logging\n'), ((3657, 3709), 'logging.debug', 'logging.debug', (['"""Publisher: closing queue connection"""'], {}), "('Publisher: closing queue connection')\n", (3670, 3709), False, 'import logging\n'), ((1610, 1671), 'kombu.Exchange', 'Exchange', ([], {'name': '(exchange or MQ_DEFAULT_EXCHANGE)', 'type': '"""direct"""'}), "(name=exchange or MQ_DEFAULT_EXCHANGE, type='direct')\n", (1618, 1671), False, 'from kombu import Connection, Exchange, Queue\n'), ((1781, 1898), 'kombu.Queue', 'Queue', (['queue'], {'durable': 'durable', 'exchange': 'self._exchange', 'routing_key': 'self._routing_key', 'max_priority': 'max_priority'}), '(queue, durable=durable, exchange=self._exchange, routing_key=self.\n _routing_key, max_priority=max_priority)\n', (1786, 1898), False, 'from kombu import Connection, Exchange, Queue\n'), ((2040, 2072), 'kombu.Connection', 'Connection', (['MQ_CONNECTION_STRING'], {}), '(MQ_CONNECTION_STRING)\n', (2050, 2072), False, 'from kombu import Connection, Exchange, Queue\n'), ((3364, 3379), 'json.dumps', 'json.dumps', (['msg'], {}), '(msg)\n', (3374, 3379), False, 'import json\n')] |
from sqlalchemy import (
BigInteger,
Column,
DateTime,
Text,
String,
Integer,
)
from sqlalchemy.sql.functions import current_timestamp
from model.base import BaseObject
class Commit(BaseObject):
__tablename__ = 'commits'
id = Column(BigInteger, primary_key=True, autoincrement=True)
created_at = Column(DateTime, default=current_timestamp(), nullable=False)
updated_at = Column(DateTime, default=current_timestamp(), onupdate=current_timestamp(), nullable=False)
sha = Column(String(40), unique=True, nullable=False)
message = Column(Text)
parent_a = Column(String(40))
parent_b = Column(String(40))
production_reported = Column(Integer)
class Issue(BaseObject):
__tablename__ = 'issues'
id = Column(BigInteger, primary_key=True, autoincrement=True)
created_at = Column(DateTime, default=current_timestamp(), nullable=False)
updated_at = Column(DateTime, default=current_timestamp(), onupdate=current_timestamp(), nullable=False)
number = Column(Integer, unique=True, nullable=False)
state = Column(String(10))
title = Column(Text)
body = Column(Text)
labels = Column(String(128))
assignee = Column(String(128))
| [
"sqlalchemy.String",
"sqlalchemy.sql.functions.current_timestamp",
"sqlalchemy.Column"
]
| [((262, 318), 'sqlalchemy.Column', 'Column', (['BigInteger'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(BigInteger, primary_key=True, autoincrement=True)\n', (268, 318), False, 'from sqlalchemy import BigInteger, Column, DateTime, Text, String, Integer\n'), ((579, 591), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (585, 591), False, 'from sqlalchemy import BigInteger, Column, DateTime, Text, String, Integer\n'), ((686, 701), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (692, 701), False, 'from sqlalchemy import BigInteger, Column, DateTime, Text, String, Integer\n'), ((768, 824), 'sqlalchemy.Column', 'Column', (['BigInteger'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(BigInteger, primary_key=True, autoincrement=True)\n', (774, 824), False, 'from sqlalchemy import BigInteger, Column, DateTime, Text, String, Integer\n'), ((1026, 1070), 'sqlalchemy.Column', 'Column', (['Integer'], {'unique': '(True)', 'nullable': '(False)'}), '(Integer, unique=True, nullable=False)\n', (1032, 1070), False, 'from sqlalchemy import BigInteger, Column, DateTime, Text, String, Integer\n'), ((1114, 1126), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (1120, 1126), False, 'from sqlalchemy import BigInteger, Column, DateTime, Text, String, Integer\n'), ((1138, 1150), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (1144, 1150), False, 'from sqlalchemy import BigInteger, Column, DateTime, Text, String, Integer\n'), ((524, 534), 'sqlalchemy.String', 'String', (['(40)'], {}), '(40)\n', (530, 534), False, 'from sqlalchemy import BigInteger, Column, DateTime, Text, String, Integer\n'), ((614, 624), 'sqlalchemy.String', 'String', (['(40)'], {}), '(40)\n', (620, 624), False, 'from sqlalchemy import BigInteger, Column, DateTime, Text, String, Integer\n'), ((648, 658), 'sqlalchemy.String', 'String', (['(40)'], {}), '(40)\n', (654, 658), False, 'from sqlalchemy import BigInteger, Column, DateTime, Text, String, Integer\n'), ((1090, 1100), 'sqlalchemy.String', 'String', (['(10)'], {}), '(10)\n', (1096, 1100), False, 'from sqlalchemy import BigInteger, Column, DateTime, Text, String, Integer\n'), ((1171, 1182), 'sqlalchemy.String', 'String', (['(128)'], {}), '(128)\n', (1177, 1182), False, 'from sqlalchemy import BigInteger, Column, DateTime, Text, String, Integer\n'), ((1206, 1217), 'sqlalchemy.String', 'String', (['(128)'], {}), '(128)\n', (1212, 1217), False, 'from sqlalchemy import BigInteger, Column, DateTime, Text, String, Integer\n'), ((361, 380), 'sqlalchemy.sql.functions.current_timestamp', 'current_timestamp', ([], {}), '()\n', (378, 380), False, 'from sqlalchemy.sql.functions import current_timestamp\n'), ((440, 459), 'sqlalchemy.sql.functions.current_timestamp', 'current_timestamp', ([], {}), '()\n', (457, 459), False, 'from sqlalchemy.sql.functions import current_timestamp\n'), ((470, 489), 'sqlalchemy.sql.functions.current_timestamp', 'current_timestamp', ([], {}), '()\n', (487, 489), False, 'from sqlalchemy.sql.functions import current_timestamp\n'), ((867, 886), 'sqlalchemy.sql.functions.current_timestamp', 'current_timestamp', ([], {}), '()\n', (884, 886), False, 'from sqlalchemy.sql.functions import current_timestamp\n'), ((946, 965), 'sqlalchemy.sql.functions.current_timestamp', 'current_timestamp', ([], {}), '()\n', (963, 965), False, 'from sqlalchemy.sql.functions import current_timestamp\n'), ((976, 995), 'sqlalchemy.sql.functions.current_timestamp', 'current_timestamp', ([], {}), '()\n', (993, 995), False, 'from sqlalchemy.sql.functions import current_timestamp\n')] |
"""The tests for the Template select platform."""
import pytest
from homeassistant import setup
from homeassistant.components.input_select import (
ATTR_OPTION as INPUT_SELECT_ATTR_OPTION,
ATTR_OPTIONS as INPUT_SELECT_ATTR_OPTIONS,
DOMAIN as INPUT_SELECT_DOMAIN,
SERVICE_SELECT_OPTION as INPUT_SELECT_SERVICE_SELECT_OPTION,
SERVICE_SET_OPTIONS,
)
from homeassistant.components.select.const import (
ATTR_OPTION as SELECT_ATTR_OPTION,
ATTR_OPTIONS as SELECT_ATTR_OPTIONS,
DOMAIN as SELECT_DOMAIN,
SERVICE_SELECT_OPTION as SELECT_SERVICE_SELECT_OPTION,
)
from homeassistant.const import ATTR_ICON, CONF_ENTITY_ID, STATE_UNKNOWN
from homeassistant.core import Context
from homeassistant.helpers.entity_registry import async_get
from tests.common import (
assert_setup_component,
async_capture_events,
async_mock_service,
)
_TEST_SELECT = "select.template_select"
# Represent for select's current_option
_OPTION_INPUT_SELECT = "input_select.option"
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_missing_optional_config(hass, calls):
"""Test: missing optional template is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"])
async def test_multiple_configs(hass, calls):
"""Test: multiple select entities get created."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": [
{
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
},
{
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
},
]
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"])
_verify(hass, "a", ["a", "b"], f"{_TEST_SELECT}_2")
async def test_missing_required_keys(hass, calls):
"""Test: missing required fields will fail."""
with assert_setup_component(0, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
with assert_setup_component(0, "select"):
assert await setup.async_setup_component(
hass,
"select",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
}
}
},
)
with assert_setup_component(0, "select"):
assert await setup.async_setup_component(
hass,
"select",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all("select") == []
async def test_templates_with_entities(hass, calls):
"""Test templates with values from other entities."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "b",
"select": {
"state": f"{{{{ states('{_OPTION_INPUT_SELECT}') }}}}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data_template": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"unique_id": "a",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
ent_reg = async_get(hass)
entry = ent_reg.async_get(_TEST_SELECT)
assert entry
assert entry.unique_id == "b-a"
_verify(hass, "a", ["a", "b"])
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
_verify(hass, "b", ["a", "b"])
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
SERVICE_SET_OPTIONS,
{
CONF_ENTITY_ID: _OPTION_INPUT_SELECT,
INPUT_SELECT_ATTR_OPTIONS: ["a", "b", "c"],
},
blocking=True,
)
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b", "c"])
await hass.services.async_call(
SELECT_DOMAIN,
SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _TEST_SELECT, SELECT_ATTR_OPTION: "c"},
blocking=True,
)
_verify(hass, "c", ["a", "b", "c"])
async def test_trigger_select(hass):
"""Test trigger based template select."""
events = async_capture_events(hass, "test_number_event")
assert await setup.async_setup_component(
hass,
"template",
{
"template": [
{"invalid": "config"},
# Config after invalid should still be set up
{
"unique_id": "listening-test-event",
"trigger": {"platform": "event", "event_type": "test_event"},
"select": [
{
"name": "<NAME>",
"unique_id": "hello_name-id",
"state": "{{ trigger.event.data.beer }}",
"options": "{{ trigger.event.data.beers }}",
"select_option": {"event": "test_number_event"},
"optimistic": True,
},
],
},
],
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("select.hello_name")
assert state is not None
assert state.state == STATE_UNKNOWN
context = Context()
hass.bus.async_fire(
"test_event", {"beer": "duff", "beers": ["duff", "alamo"]}, context=context
)
await hass.async_block_till_done()
state = hass.states.get("select.hello_name")
assert state is not None
assert state.state == "duff"
assert state.attributes["options"] == ["duff", "alamo"]
await hass.services.async_call(
SELECT_DOMAIN,
SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: "select.hello_name", SELECT_ATTR_OPTION: "alamo"},
blocking=True,
)
assert len(events) == 1
assert events[0].event_type == "test_number_event"
def _verify(hass, expected_current_option, expected_options, entity_name=_TEST_SELECT):
"""Verify select's state."""
state = hass.states.get(entity_name)
attributes = state.attributes
assert state.state == str(expected_current_option)
assert attributes.get(SELECT_ATTR_OPTIONS) == expected_options
async def test_template_icon_with_entities(hass, calls):
"""Test templates with values from other entities."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "b",
"select": {
"state": f"{{{{ states('{_OPTION_INPUT_SELECT}') }}}}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"unique_id": "a",
"icon": f"{{% if (states('{_OPTION_INPUT_SELECT}') == 'a') %}}mdi:greater{{% else %}}mdi:less{{% endif %}}",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "a"
assert state.attributes[ATTR_ICON] == "mdi:greater"
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "b"
assert state.attributes[ATTR_ICON] == "mdi:less"
async def test_template_icon_with_trigger(hass):
"""Test trigger based template select."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"trigger": {"platform": "state", "entity_id": _OPTION_INPUT_SELECT},
"select": {
"unique_id": "b",
"state": "{{ trigger.to_state.state }}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"icon": "{% if (trigger.to_state.state or '') == 'a' %}mdi:greater{% else %}mdi:less{% endif %}",
},
},
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state is not None
assert state.state == "b"
assert state.attributes[ATTR_ICON] == "mdi:less"
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "a"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "a"
assert state.attributes[ATTR_ICON] == "mdi:greater"
| [
"homeassistant.setup.async_setup_component",
"homeassistant.core.Context",
"homeassistant.helpers.entity_registry.async_get",
"tests.common.async_mock_service",
"tests.common.assert_setup_component",
"tests.common.async_capture_events"
]
| [((1083, 1129), 'tests.common.async_mock_service', 'async_mock_service', (['hass', '"""test"""', '"""automation"""'], {}), "(hass, 'test', 'automation')\n", (1101, 1129), False, 'from tests.common import assert_setup_component, async_capture_events, async_mock_service\n'), ((5971, 5986), 'homeassistant.helpers.entity_registry.async_get', 'async_get', (['hass'], {}), '(hass)\n', (5980, 5986), False, 'from homeassistant.helpers.entity_registry import async_get\n'), ((7072, 7119), 'tests.common.async_capture_events', 'async_capture_events', (['hass', '"""test_number_event"""'], {}), "(hass, 'test_number_event')\n", (7092, 7119), False, 'from tests.common import assert_setup_component, async_capture_events, async_mock_service\n'), ((8267, 8276), 'homeassistant.core.Context', 'Context', ([], {}), '()\n', (8274, 8276), False, 'from homeassistant.core import Context\n'), ((1243, 1280), 'tests.common.assert_setup_component', 'assert_setup_component', (['(1)', '"""template"""'], {}), "(1, 'template')\n", (1265, 1280), False, 'from tests.common import assert_setup_component, async_capture_events, async_mock_service\n'), ((1949, 1986), 'tests.common.assert_setup_component', 'assert_setup_component', (['(1)', '"""template"""'], {}), "(1, 'template')\n", (1971, 1986), False, 'from tests.common import assert_setup_component, async_capture_events, async_mock_service\n'), ((3022, 3059), 'tests.common.assert_setup_component', 'assert_setup_component', (['(0)', '"""template"""'], {}), "(0, 'template')\n", (3044, 3059), False, 'from tests.common import assert_setup_component, async_capture_events, async_mock_service\n'), ((3437, 3472), 'tests.common.assert_setup_component', 'assert_setup_component', (['(0)', '"""select"""'], {}), "(0, 'select')\n", (3459, 3472), False, 'from tests.common import assert_setup_component, async_capture_events, async_mock_service\n'), ((3839, 3874), 'tests.common.assert_setup_component', 'assert_setup_component', (['(0)', '"""select"""'], {}), "(0, 'select')\n", (3861, 3874), False, 'from tests.common import assert_setup_component, async_capture_events, async_mock_service\n'), ((4488, 4529), 'tests.common.assert_setup_component', 'assert_setup_component', (['(1)', '"""input_select"""'], {}), "(1, 'input_select')\n", (4510, 4529), False, 'from tests.common import assert_setup_component, async_capture_events, async_mock_service\n'), ((4912, 4949), 'tests.common.assert_setup_component', 'assert_setup_component', (['(1)', '"""template"""'], {}), "(1, 'template')\n", (4934, 4949), False, 'from tests.common import assert_setup_component, async_capture_events, async_mock_service\n'), ((7137, 7560), 'homeassistant.setup.async_setup_component', 'setup.async_setup_component', (['hass', '"""template"""', "{'template': [{'invalid': 'config'}, {'unique_id': 'listening-test-event',\n 'trigger': {'platform': 'event', 'event_type': 'test_event'}, 'select':\n [{'name': '<NAME>', 'unique_id': 'hello_name-id', 'state':\n '{{ trigger.event.data.beer }}', 'options':\n '{{ trigger.event.data.beers }}', 'select_option': {'event':\n 'test_number_event'}, 'optimistic': True}]}]}"], {}), "(hass, 'template', {'template': [{'invalid':\n 'config'}, {'unique_id': 'listening-test-event', 'trigger': {'platform':\n 'event', 'event_type': 'test_event'}, 'select': [{'name': '<NAME>',\n 'unique_id': 'hello_name-id', 'state': '{{ trigger.event.data.beer }}',\n 'options': '{{ trigger.event.data.beers }}', 'select_option': {'event':\n 'test_number_event'}, 'optimistic': True}]}]})\n", (7164, 7560), False, 'from homeassistant import setup\n'), ((9335, 9376), 'tests.common.assert_setup_component', 'assert_setup_component', (['(1)', '"""input_select"""'], {}), "(1, 'input_select')\n", (9357, 9376), False, 'from tests.common import assert_setup_component, async_capture_events, async_mock_service\n'), ((9759, 9796), 'tests.common.assert_setup_component', 'assert_setup_component', (['(1)', '"""template"""'], {}), "(1, 'template')\n", (9781, 9796), False, 'from tests.common import assert_setup_component, async_capture_events, async_mock_service\n'), ((11545, 11586), 'tests.common.assert_setup_component', 'assert_setup_component', (['(1)', '"""input_select"""'], {}), "(1, 'input_select')\n", (11567, 11586), False, 'from tests.common import assert_setup_component, async_capture_events, async_mock_service\n'), ((11977, 12557), 'homeassistant.setup.async_setup_component', 'setup.async_setup_component', (['hass', '"""template"""', '{\'template\': {\'trigger\': {\'platform\': \'state\', \'entity_id\':\n _OPTION_INPUT_SELECT}, \'select\': {\'unique_id\': \'b\', \'state\':\n \'{{ trigger.to_state.state }}\', \'options\':\n f"{{{{ state_attr(\'{_OPTION_INPUT_SELECT}\', \'{INPUT_SELECT_ATTR_OPTIONS}\') }}}}"\n , \'select_option\': {\'service\': \'input_select.select_option\', \'data\': {\n \'entity_id\': _OPTION_INPUT_SELECT, \'option\': \'{{ option }}\'}},\n \'optimistic\': True, \'icon\':\n "{% if (trigger.to_state.state or \'\') == \'a\' %}mdi:greater{% else %}mdi:less{% endif %}"\n }}}'], {}), '(hass, \'template\', {\'template\': {\'trigger\': {\n \'platform\': \'state\', \'entity_id\': _OPTION_INPUT_SELECT}, \'select\': {\n \'unique_id\': \'b\', \'state\': \'{{ trigger.to_state.state }}\', \'options\':\n f"{{{{ state_attr(\'{_OPTION_INPUT_SELECT}\', \'{INPUT_SELECT_ATTR_OPTIONS}\') }}}}"\n , \'select_option\': {\'service\': \'input_select.select_option\', \'data\': {\n \'entity_id\': _OPTION_INPUT_SELECT, \'option\': \'{{ option }}\'}},\n \'optimistic\': True, \'icon\':\n "{% if (trigger.to_state.state or \'\') == \'a\' %}mdi:greater{% else %}mdi:less{% endif %}"\n }}})\n', (12004, 12557), False, 'from homeassistant import setup\n'), ((1303, 1492), 'homeassistant.setup.async_setup_component', 'setup.async_setup_component', (['hass', '"""template"""', '{\'template\': {\'select\': {\'state\': "{{ \'a\' }}", \'select_option\': {\'service\':\n \'script.select_option\'}, \'options\': "{{ [\'a\', \'b\'] }}"}}}'], {}), '(hass, \'template\', {\'template\': {\'select\': {\n \'state\': "{{ \'a\' }}", \'select_option\': {\'service\':\n \'script.select_option\'}, \'options\': "{{ [\'a\', \'b\'] }}"}}})\n', (1330, 1492), False, 'from homeassistant import setup\n'), ((2009, 2317), 'homeassistant.setup.async_setup_component', 'setup.async_setup_component', (['hass', '"""template"""', '{\'template\': {\'select\': [{\'state\': "{{ \'a\' }}", \'select_option\': {\'service\':\n \'script.select_option\'}, \'options\': "{{ [\'a\', \'b\'] }}"}, {\'state\':\n "{{ \'a\' }}", \'select_option\': {\'service\': \'script.select_option\'},\n \'options\': "{{ [\'a\', \'b\'] }}"}]}}'], {}), '(hass, \'template\', {\'template\': {\'select\': [{\n \'state\': "{{ \'a\' }}", \'select_option\': {\'service\':\n \'script.select_option\'}, \'options\': "{{ [\'a\', \'b\'] }}"}, {\'state\':\n "{{ \'a\' }}", \'select_option\': {\'service\': \'script.select_option\'},\n \'options\': "{{ [\'a\', \'b\'] }}"}]}})\n', (2036, 2317), False, 'from homeassistant import setup\n'), ((3082, 3249), 'homeassistant.setup.async_setup_component', 'setup.async_setup_component', (['hass', '"""template"""', '{\'template\': {\'select\': {\'select_option\': {\'service\':\n \'script.select_option\'}, \'options\': "{{ [\'a\', \'b\'] }}"}}}'], {}), '(hass, \'template\', {\'template\': {\'select\': {\n \'select_option\': {\'service\': \'script.select_option\'}, \'options\':\n "{{ [\'a\', \'b\'] }}"}}})\n', (3109, 3249), False, 'from homeassistant import setup\n'), ((3495, 3651), 'homeassistant.setup.async_setup_component', 'setup.async_setup_component', (['hass', '"""select"""', '{\'template\': {\'select\': {\'state\': "{{ \'a\' }}", \'select_option\': {\'service\':\n \'script.select_option\'}}}}'], {}), '(hass, \'select\', {\'template\': {\'select\': {\n \'state\': "{{ \'a\' }}", \'select_option\': {\'service\':\n \'script.select_option\'}}}})\n', (3522, 3651), False, 'from homeassistant import setup\n'), ((3897, 4026), 'homeassistant.setup.async_setup_component', 'setup.async_setup_component', (['hass', '"""select"""', '{\'template\': {\'select\': {\'state\': "{{ \'a\' }}", \'options\': "{{ [\'a\', \'b\'] }}"}}}'], {}), '(hass, \'select\', {\'template\': {\'select\': {\n \'state\': "{{ \'a\' }}", \'options\': "{{ [\'a\', \'b\'] }}"}}})\n', (3924, 4026), False, 'from homeassistant import setup\n'), ((4552, 4695), 'homeassistant.setup.async_setup_component', 'setup.async_setup_component', (['hass', '"""input_select"""', "{'input_select': {'option': {'options': ['a', 'b'], 'initial': 'a', 'name':\n 'Option'}}}"], {}), "(hass, 'input_select', {'input_select': {\n 'option': {'options': ['a', 'b'], 'initial': 'a', 'name': 'Option'}}})\n", (4579, 4695), False, 'from homeassistant import setup\n'), ((4972, 5415), 'homeassistant.setup.async_setup_component', 'setup.async_setup_component', (['hass', '"""template"""', '{\'template\': {\'unique_id\': \'b\', \'select\': {\'state\':\n f"{{{{ states(\'{_OPTION_INPUT_SELECT}\') }}}}", \'options\':\n f"{{{{ state_attr(\'{_OPTION_INPUT_SELECT}\', \'{INPUT_SELECT_ATTR_OPTIONS}\') }}}}"\n , \'select_option\': {\'service\': \'input_select.select_option\',\n \'data_template\': {\'entity_id\': _OPTION_INPUT_SELECT, \'option\':\n \'{{ option }}\'}}, \'optimistic\': True, \'unique_id\': \'a\'}}}'], {}), '(hass, \'template\', {\'template\': {\'unique_id\':\n \'b\', \'select\': {\'state\': f"{{{{ states(\'{_OPTION_INPUT_SELECT}\') }}}}",\n \'options\':\n f"{{{{ state_attr(\'{_OPTION_INPUT_SELECT}\', \'{INPUT_SELECT_ATTR_OPTIONS}\') }}}}"\n , \'select_option\': {\'service\': \'input_select.select_option\',\n \'data_template\': {\'entity_id\': _OPTION_INPUT_SELECT, \'option\':\n \'{{ option }}\'}}, \'optimistic\': True, \'unique_id\': \'a\'}}})\n', (4999, 5415), False, 'from homeassistant import setup\n'), ((9399, 9542), 'homeassistant.setup.async_setup_component', 'setup.async_setup_component', (['hass', '"""input_select"""', "{'input_select': {'option': {'options': ['a', 'b'], 'initial': 'a', 'name':\n 'Option'}}}"], {}), "(hass, 'input_select', {'input_select': {\n 'option': {'options': ['a', 'b'], 'initial': 'a', 'name': 'Option'}}})\n", (9426, 9542), False, 'from homeassistant import setup\n'), ((9819, 10372), 'homeassistant.setup.async_setup_component', 'setup.async_setup_component', (['hass', '"""template"""', '{\'template\': {\'unique_id\': \'b\', \'select\': {\'state\':\n f"{{{{ states(\'{_OPTION_INPUT_SELECT}\') }}}}", \'options\':\n f"{{{{ state_attr(\'{_OPTION_INPUT_SELECT}\', \'{INPUT_SELECT_ATTR_OPTIONS}\') }}}}"\n , \'select_option\': {\'service\': \'input_select.select_option\', \'data\': {\n \'entity_id\': _OPTION_INPUT_SELECT, \'option\': \'{{ option }}\'}},\n \'optimistic\': True, \'unique_id\': \'a\', \'icon\':\n f"{{% if (states(\'{_OPTION_INPUT_SELECT}\') == \'a\') %}}mdi:greater{{% else %}}mdi:less{{% endif %}}"\n }}}'], {}), '(hass, \'template\', {\'template\': {\'unique_id\':\n \'b\', \'select\': {\'state\': f"{{{{ states(\'{_OPTION_INPUT_SELECT}\') }}}}",\n \'options\':\n f"{{{{ state_attr(\'{_OPTION_INPUT_SELECT}\', \'{INPUT_SELECT_ATTR_OPTIONS}\') }}}}"\n , \'select_option\': {\'service\': \'input_select.select_option\', \'data\': {\n \'entity_id\': _OPTION_INPUT_SELECT, \'option\': \'{{ option }}\'}},\n \'optimistic\': True, \'unique_id\': \'a\', \'icon\':\n f"{{% if (states(\'{_OPTION_INPUT_SELECT}\') == \'a\') %}}mdi:greater{{% else %}}mdi:less{{% endif %}}"\n }}})\n', (9846, 10372), False, 'from homeassistant import setup\n'), ((11609, 11752), 'homeassistant.setup.async_setup_component', 'setup.async_setup_component', (['hass', '"""input_select"""', "{'input_select': {'option': {'options': ['a', 'b'], 'initial': 'a', 'name':\n 'Option'}}}"], {}), "(hass, 'input_select', {'input_select': {\n 'option': {'options': ['a', 'b'], 'initial': 'a', 'name': 'Option'}}})\n", (11636, 11752), False, 'from homeassistant import setup\n')] |
'''
#;+
#; NAME:
#; sdss.qso
#; Version 1.1
#;
#; PURPOSE:
#; Class for SDSS QSO
#; 2015 Written by JXP
#;-
#;------------------------------------------------------------------------------
'''
# Import libraries
import numpy as np
import os
from astropy.table import QTable, Column
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.units import Quantity
from xastropy.obs import radec as xor
from xastropy.xutils import xdebug as xdb
class SdssQso(object):
'''Class to handle a single SDSS Quasar
Parameters:
----------
coord: SkyCoord, optional
RA/Dec of the sightline
z: float, optional
Emission redshift
database: SdssQuasars class, optional
Required for grabbing data, etc.
'''
# Init
def __init__(self, coord=None, z=0., database=None, verbose=True):
# Init
if coord is None:
radec = (0.*u.deg, 0.*u.deg)
self.coord = SkyCoord(ra=radec[0], dec=radec[0])
else:
self.coord = coord
self.z = z
self.verbose = verbose
self.database = database
# None init
self._specfil = None
def get_specfil(self):
'''Parse the SDSS spectrum file
Requires a link to the database Class
'''
if self.database is None:
raise IOError('SdssQso: Need to be linked to an SDSS Database')
# Generate file name (DR4 is different)
pnm = '{0:04d}'.format(
self.database._data[self.database.index]['PLATE'])
#fnm = '{0:04d}'.format(
# self.database._data[self.database.index]['FIBERID'])
fnm = '{0:03d}'.format(
self.database._data[self.database.index]['FIBERID'])
mjd = str(self.database._data[self.database.index]['MJD'])
sfil = self.database._datdir+pnm+'/1d/'+'spSpec-'
# Finish
self._specfil = sfil+mjd+'-'+pnm+'-'+fnm+'.fit' # Is usually gzipped
def load_spec(self):
'''Input the Spectrum
'''
from linetools.spectra.xspectrum1d import XSpectrum1D
if self._specfil is None:
self.get_specfil()
#
if self.verbose:
print('SdssQso: Loading spectrum from {:s}'.format(self._specfil))
self.spec = XSpectrum1D.from_file(self._specfil)
def __repr__(self):
''' For printing
'''
return '[{:s}: {:s} {:s}, z={:g}]'.format(self.__class__.__name__,
self.coord.ra.to_string(unit=u.hour,sep=':',pad=True),
self.coord.dec.to_string(sep=':',pad=True,alwayssign=True), self.z)
| [
"linetools.spectra.xspectrum1d.XSpectrum1D.from_file",
"astropy.coordinates.SkyCoord"
]
| [((2306, 2342), 'linetools.spectra.xspectrum1d.XSpectrum1D.from_file', 'XSpectrum1D.from_file', (['self._specfil'], {}), '(self._specfil)\n', (2327, 2342), False, 'from linetools.spectra.xspectrum1d import XSpectrum1D\n'), ((967, 1002), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': 'radec[0]', 'dec': 'radec[0]'}), '(ra=radec[0], dec=radec[0])\n', (975, 1002), False, 'from astropy.coordinates import SkyCoord\n')] |
import unittest
import numpy as np
from openmdao.utils.assert_utils import assert_near_equal
from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer
try:
import dakota
except ImportError:
dakota = None
@unittest.skipIf(dakota is None, "only run if Dakota is installed.")
class TestDakotaOptimization(unittest.TestCase):
def test_2D_opt_max_iterations(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.5)
def test_2D_opt_EGO(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"initial_samples": 5, "method": "efficient_global", "seed": 123456}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.999996864)
def test_two_variables(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]]), "z": [1.0, 2.0]}
desvars = {"x": np.array([0.0, 0.25]), "z": 1.5}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_two_variable as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), 1.0)
def test_constraint(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y", "con"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_low_model as model"
output_scalers = [1.0, 1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), 0.5)
assert_near_equal(np.min(np.array(results["con"])), 0.0)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.array",
"unittest.skipIf",
"wisdem.optimization_drivers.dakota_driver.DakotaOptimizer"
]
| [((243, 310), 'unittest.skipIf', 'unittest.skipIf', (['(dakota is None)', '"""only run if Dakota is installed."""'], {}), "(dakota is None, 'only run if Dakota is installed.')\n", (258, 310), False, 'import unittest\n'), ((3093, 3108), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3106, 3108), False, 'import unittest\n'), ((817, 846), 'wisdem.optimization_drivers.dakota_driver.DakotaOptimizer', 'DakotaOptimizer', (['template_dir'], {}), '(template_dir)\n', (832, 846), False, 'from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer\n'), ((1468, 1497), 'wisdem.optimization_drivers.dakota_driver.DakotaOptimizer', 'DakotaOptimizer', (['template_dir'], {}), '(template_dir)\n', (1483, 1497), False, 'from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer\n'), ((2146, 2175), 'wisdem.optimization_drivers.dakota_driver.DakotaOptimizer', 'DakotaOptimizer', (['template_dir'], {}), '(template_dir)\n', (2161, 2175), False, 'from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer\n'), ((2797, 2826), 'wisdem.optimization_drivers.dakota_driver.DakotaOptimizer', 'DakotaOptimizer', (['template_dir'], {}), '(template_dir)\n', (2812, 2826), False, 'from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer\n'), ((428, 462), 'numpy.array', 'np.array', (['[[0.0, 1.0], [0.0, 1.0]]'], {}), '([[0.0, 1.0], [0.0, 1.0]])\n', (436, 462), True, 'import numpy as np\n'), ((489, 510), 'numpy.array', 'np.array', (['[0.0, 0.25]'], {}), '([0.0, 0.25])\n', (497, 510), True, 'import numpy as np\n'), ((1069, 1103), 'numpy.array', 'np.array', (['[[0.0, 1.0], [0.0, 1.0]]'], {}), '([[0.0, 1.0], [0.0, 1.0]])\n', (1077, 1103), True, 'import numpy as np\n'), ((1130, 1151), 'numpy.array', 'np.array', (['[0.0, 0.25]'], {}), '([0.0, 0.25])\n', (1138, 1151), True, 'import numpy as np\n'), ((1731, 1765), 'numpy.array', 'np.array', (['[[0.0, 1.0], [0.0, 1.0]]'], {}), '([[0.0, 1.0], [0.0, 1.0]])\n', (1739, 1765), True, 'import numpy as np\n'), ((1809, 1830), 'numpy.array', 'np.array', (['[0.0, 0.25]'], {}), '([0.0, 0.25])\n', (1817, 1830), True, 'import numpy as np\n'), ((2397, 2431), 'numpy.array', 'np.array', (['[[0.0, 1.0], [0.0, 1.0]]'], {}), '([[0.0, 1.0], [0.0, 1.0]])\n', (2405, 2431), True, 'import numpy as np\n'), ((2458, 2479), 'numpy.array', 'np.array', (['[0.0, 0.25]'], {}), '([0.0, 0.25])\n', (2466, 2479), True, 'import numpy as np\n'), ((980, 1002), 'numpy.array', 'np.array', (["results['y']"], {}), "(results['y'])\n", (988, 1002), True, 'import numpy as np\n'), ((1631, 1653), 'numpy.array', 'np.array', (["results['y']"], {}), "(results['y'])\n", (1639, 1653), True, 'import numpy as np\n'), ((2309, 2331), 'numpy.array', 'np.array', (["results['y']"], {}), "(results['y'])\n", (2317, 2331), True, 'import numpy as np\n'), ((2960, 2982), 'numpy.array', 'np.array', (["results['y']"], {}), "(results['y'])\n", (2968, 2982), True, 'import numpy as np\n'), ((3024, 3048), 'numpy.array', 'np.array', (["results['con']"], {}), "(results['con'])\n", (3032, 3048), True, 'import numpy as np\n')] |
#-*- coding: utf-8 -*-
#! /usr/bin/env python
'''
#------------------------------------------------------------
filename: lab4_runTFCurveFitting.py
This is an example for linear regression in tensorflow
Which is a curve fitting example
written by <NAME> @ Aug 2017
#------------------------------------------------------------
'''
from os import getcwd
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_io
# from __future__ import print_function
# Preparing data set ================================================
from tensorflow.examples.tutorials.mnist import input_data
# generation of sinusoid data set
total_size = 5000
training_size = 4000
validation_size = total_size - training_size
xsize = 50 # the size of single x_data
x_data = np.zeros([xsize, total_size])
cos_x = np.zeros([xsize, total_size])
mag = 1.0
phase_rad = np.pi/4
rad_freq = np.pi / 2.0
for i in range(total_size):
x_data[:,i] = np.linspace(-4,4,xsize)
cos_x = np.cos(rad_freq*x_data + phase_rad)
noise_var = 0.01
noise = np.sqrt(noise_var) * np.random.randn(xsize,total_size)
y_clean = cos_x
y_data = y_clean + noise
x_training_data = x_data[:,0:training_size]
y_training_data = y_data[:,0:training_size]
x_validation_data = x_data[:,training_size:-1]
y_validation_data = y_data[:,training_size:-1]
# signal plot
# hfig1= plt.figure(1,figsize=[10,10])
# plt.plot(cos_x[:,1],color='b',label='clean')
# plt.plot(y_data[:,1],color='r',label='noisy')
# plt.legend()
# configure training parameters =====================================
learning_rate = 0.01
training_epochs = 20
batch_size = 100
display_step = 1
# computational TF graph construction ================================
##---------------- Define graph nodes -------------------
# tf Graph data input holder
# (x,y) : input / output of prediction model
# which will be feeded by training data in the TF graph computation
# (a,b,c,d) : model parameters
# which will be learned from training data in the TF graph computation
x = tf.placeholder(tf.float32, [xsize,None])
y = tf.placeholder(tf.float32, [xsize,None])
# Set model weights which is calculated in the TF graph
a = tf.Variable(1.) # initialization by 1
b = tf.Variable(1.)
c = tf.Variable(1.)
d = tf.Variable(1.)
print ('TF graph nodes are defined')
##--------------------- Define function -----------------
# define relationshitp btw instance data x and label data y
# define optimizer used in the learning phase
# define cost function for optimization
# Construct model
pred_y = c*tf.cos(a*x+b)+d
# Minimize error using MSE function
cost = tf.reduce_mean(tf.reduce_sum( tf.square(y - pred_y) , reduction_indices=1), name="mse")
# Gradient Descent
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
print ('Functions in TF graph are ready')
## Performance evaluation model ========================_y===========
# y : data output
# pred_y: prediction output by model, a x^3 + b x^2 + c x + d
correct_prediction = cost
# Calculate error rate using data --------------
# where
# tf_reduce_mean(input_tensor, axis) : reduce dimension of tensor by computing the mean of elements
# # 'x' is [[1., 1.]
# [2., 2.]]
# tf.reduce_mean(x) ==> 1.5
# tf.reduce_mean(x, 0) ==> [1.5, 1.5]
# tf.reduce_mean(x, 1) ==> [1., 2.]
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
error_rate_training = np.zeros(training_epochs)
error_rate_validation = np.zeros(training_epochs)
# Launch the graph (execution) ========================================
# Initializing the variables
init = tf.global_variables_initializer()
## -------------------- Learning iteration start --------------------
with tf.Session() as sess:
sess.run(init) # this for variable use
# Training cycle
for epoch in range(training_epochs): # iteration loop
avg_cost = 0.
total_batch = int(training_size/batch_size) #
# Loop over all batches
for i in range(total_batch): # batch loop
data_start_index = i * batch_size
data_end_index = (i + 1) * batch_size
# feed traing data --------------------------
batch_xs = x_training_data[:,data_start_index:data_end_index]
batch_ys = y_training_data[:,data_start_index:data_end_index]
#----------------------------------------------
# Run optimization op (backprop) and cost op (to get loss value)
# feedign training data
_, local_batch_cost = sess.run([optimizer, cost], feed_dict={x: batch_xs,
y: batch_ys})
# Compute average loss
avg_cost += local_batch_cost / total_batch
# print ("At %d-th batch in %d-epoch, avg_cost = %f" % (i,epoch,avg_cost) )
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost/batch_size))
batch_xs = x_training_data
batch_ys = y_training_data
error_rate_training[epoch] = accuracy.eval({x: batch_xs, y: batch_ys},session=sess)/training_size
error_rate_validation[epoch] = accuracy.eval({x: x_validation_data, y: y_validation_data},session=sess)/validation_size
print("Training set MSE:", error_rate_training[epoch])
print("Validation set MSE:", error_rate_validation[epoch])
print("--------------------------------------------")
print("Optimization Finished!")
pred_a = sess.run(a)
pred_b = sess.run(b)
pred_c = sess.run(c)
pred_d = sess.run(d)
hfig1 = plt.figure(1,figsize=(10,10))
epoch_index = np.array([elem for elem in range(training_epochs)])
plt.plot(epoch_index,error_rate_training,label='Training data',color='r',marker='o')
plt.plot(epoch_index,error_rate_validation,label='Validation data',color='b',marker='x')
plt.legend()
plt.title('MSE of prediction:')
plt.xlabel('Iteration epoch')
plt.ylabel('MSE')
hfig2 = plt.figure(2,figsize=(10,10))
pred_y = pred_c * np.cos(pred_a * x_data[:,0] + pred_b) +pred_d
plt.plot(x_validation_data[:,0],y_validation_data[:,0],label='noisy data',color='b',marker='*')
plt.plot(x_validation_data[:,0], pred_y,label='prediction',color='r')
plt.legend()
plt.title('A line fitting example:')
plt.xlabel('X data')
plt.ylabel('Y data')
# FIG_SAVE_DIR = getcwd() + '/figs/'
# hfig1.savefig(FIG_SAVE_DIR + 'runExample_TFLogisticReg_aymeric_ErrRate.png')
# hfig1.clear()
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"tensorflow.cast",
"tensorflow.placeholder",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"tensorflow.Session",
"numpy.linspace",
"tensorflow.square",
"tensorflow.train.AdamOptimizer",
"tensorflow.cos",
"tensorflow.Variable",
"numpy.cos",
"matplotlib.pyplot.title",
"numpy.random.randn",
"matplotlib.pyplot.legend",
"tensorflow.global_variables_initializer",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
| [((989, 1018), 'numpy.zeros', 'np.zeros', (['[xsize, total_size]'], {}), '([xsize, total_size])\n', (997, 1018), True, 'import numpy as np\n'), ((1028, 1057), 'numpy.zeros', 'np.zeros', (['[xsize, total_size]'], {}), '([xsize, total_size])\n', (1036, 1057), True, 'import numpy as np\n'), ((1197, 1234), 'numpy.cos', 'np.cos', (['(rad_freq * x_data + phase_rad)'], {}), '(rad_freq * x_data + phase_rad)\n', (1203, 1234), True, 'import numpy as np\n'), ((2265, 2306), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[xsize, None]'], {}), '(tf.float32, [xsize, None])\n', (2279, 2306), True, 'import tensorflow as tf\n'), ((2310, 2351), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[xsize, None]'], {}), '(tf.float32, [xsize, None])\n', (2324, 2351), True, 'import tensorflow as tf\n'), ((2412, 2428), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {}), '(1.0)\n', (2423, 2428), True, 'import tensorflow as tf\n'), ((2454, 2470), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {}), '(1.0)\n', (2465, 2470), True, 'import tensorflow as tf\n'), ((2474, 2490), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {}), '(1.0)\n', (2485, 2490), True, 'import tensorflow as tf\n'), ((2494, 2510), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {}), '(1.0)\n', (2505, 2510), True, 'import tensorflow as tf\n'), ((3721, 3746), 'numpy.zeros', 'np.zeros', (['training_epochs'], {}), '(training_epochs)\n', (3729, 3746), True, 'import numpy as np\n'), ((3771, 3796), 'numpy.zeros', 'np.zeros', (['training_epochs'], {}), '(training_epochs)\n', (3779, 3796), True, 'import numpy as np\n'), ((3906, 3939), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3937, 3939), True, 'import tensorflow as tf\n'), ((5983, 6014), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(10, 10)'}), '(1, figsize=(10, 10))\n', (5993, 6014), True, 'import matplotlib.pyplot as plt\n'), ((6079, 6171), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch_index', 'error_rate_training'], {'label': '"""Training data"""', 'color': '"""r"""', 'marker': '"""o"""'}), "(epoch_index, error_rate_training, label='Training data', color='r',\n marker='o')\n", (6087, 6171), True, 'import matplotlib.pyplot as plt\n'), ((6164, 6261), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch_index', 'error_rate_validation'], {'label': '"""Validation data"""', 'color': '"""b"""', 'marker': '"""x"""'}), "(epoch_index, error_rate_validation, label='Validation data', color\n ='b', marker='x')\n", (6172, 6261), True, 'import matplotlib.pyplot as plt\n'), ((6253, 6265), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6263, 6265), True, 'import matplotlib.pyplot as plt\n'), ((6266, 6297), 'matplotlib.pyplot.title', 'plt.title', (['"""MSE of prediction:"""'], {}), "('MSE of prediction:')\n", (6275, 6297), True, 'import matplotlib.pyplot as plt\n'), ((6298, 6327), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration epoch"""'], {}), "('Iteration epoch')\n", (6308, 6327), True, 'import matplotlib.pyplot as plt\n'), ((6328, 6345), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MSE"""'], {}), "('MSE')\n", (6338, 6345), True, 'import matplotlib.pyplot as plt\n'), ((6355, 6386), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {'figsize': '(10, 10)'}), '(2, figsize=(10, 10))\n', (6365, 6386), True, 'import matplotlib.pyplot as plt\n'), ((6451, 6557), 'matplotlib.pyplot.plot', 'plt.plot', (['x_validation_data[:, 0]', 'y_validation_data[:, 0]'], {'label': '"""noisy data"""', 'color': '"""b"""', 'marker': '"""*"""'}), "(x_validation_data[:, 0], y_validation_data[:, 0], label=\n 'noisy data', color='b', marker='*')\n", (6459, 6557), True, 'import matplotlib.pyplot as plt\n'), ((6547, 6619), 'matplotlib.pyplot.plot', 'plt.plot', (['x_validation_data[:, 0]', 'pred_y'], {'label': '"""prediction"""', 'color': '"""r"""'}), "(x_validation_data[:, 0], pred_y, label='prediction', color='r')\n", (6555, 6619), True, 'import matplotlib.pyplot as plt\n'), ((6617, 6629), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6627, 6629), True, 'import matplotlib.pyplot as plt\n'), ((6630, 6666), 'matplotlib.pyplot.title', 'plt.title', (['"""A line fitting example:"""'], {}), "('A line fitting example:')\n", (6639, 6666), True, 'import matplotlib.pyplot as plt\n'), ((6667, 6687), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X data"""'], {}), "('X data')\n", (6677, 6687), True, 'import matplotlib.pyplot as plt\n'), ((6688, 6708), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y data"""'], {}), "('Y data')\n", (6698, 6708), True, 'import matplotlib.pyplot as plt\n'), ((1164, 1189), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', 'xsize'], {}), '(-4, 4, xsize)\n', (1175, 1189), True, 'import numpy as np\n'), ((1269, 1287), 'numpy.sqrt', 'np.sqrt', (['noise_var'], {}), '(noise_var)\n', (1276, 1287), True, 'import numpy as np\n'), ((1290, 1324), 'numpy.random.randn', 'np.random.randn', (['xsize', 'total_size'], {}), '(xsize, total_size)\n', (1305, 1324), True, 'import numpy as np\n'), ((3657, 3696), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (3664, 3696), True, 'import tensorflow as tf\n'), ((4016, 4028), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4026, 4028), True, 'import tensorflow as tf\n'), ((2784, 2801), 'tensorflow.cos', 'tf.cos', (['(a * x + b)'], {}), '(a * x + b)\n', (2790, 2801), True, 'import tensorflow as tf\n'), ((2874, 2895), 'tensorflow.square', 'tf.square', (['(y - pred_y)'], {}), '(y - pred_y)\n', (2883, 2895), True, 'import tensorflow as tf\n'), ((3042, 3079), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (3064, 3079), True, 'import tensorflow as tf\n'), ((6404, 6442), 'numpy.cos', 'np.cos', (['(pred_a * x_data[:, 0] + pred_b)'], {}), '(pred_a * x_data[:, 0] + pred_b)\n', (6410, 6442), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name
"""Various utils that should be used during migrations and migrations tests because the AiiDA ORM cannot be used."""
import datetime
import errno
import os
import re
import numpy
from aiida.common import json
ISOFORMAT_DATETIME_REGEX = re.compile(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+(\+\d{2}:\d{2})?$')
def ensure_repository_folder_created(uuid):
"""Make sure that the repository sub folder for the node with the given UUID exists or create it.
:param uuid: UUID of the node
"""
dirpath = get_node_repository_sub_folder(uuid)
try:
os.makedirs(dirpath)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def put_object_from_string(uuid, name, content):
"""Write a file with the given content in the repository sub folder of the given node.
:param uuid: UUID of the node
:param name: name to use for the file
:param content: the content to write to the file
"""
ensure_repository_folder_created(uuid)
filepath = os.path.join(get_node_repository_sub_folder(uuid), name)
with open(filepath, 'w', encoding='utf-8') as handle:
handle.write(content)
def get_object_from_repository(uuid, name):
"""Return the content of a file with the given name in the repository sub folder of the given node.
:param uuid: UUID of the node
:param name: name to use for the file
"""
filepath = os.path.join(get_node_repository_sub_folder(uuid), name)
with open(filepath) as handle:
return handle.read()
def get_node_repository_sub_folder(uuid):
"""Return the absolute path to the sub folder `path` within the repository of the node with the given UUID.
:param uuid: UUID of the node
:return: absolute path to node repository folder, i.e `/some/path/repository/node/12/ab/c123134-a123/path`
"""
from aiida.manage.configuration import get_profile
uuid = str(uuid)
repo_dirpath = os.path.join(get_profile().repository_path, 'repository')
node_dirpath = os.path.join(repo_dirpath, 'node', uuid[:2], uuid[2:4], uuid[4:], 'path')
return node_dirpath
def get_numpy_array_absolute_path(uuid, name):
"""Return the absolute path of a numpy array with the given name in the repository of the node with the given uuid.
:param uuid: the UUID of the node
:param name: the name of the numpy array
:return: the absolute path of the numpy array file
"""
return os.path.join(get_node_repository_sub_folder(uuid), name + '.npy')
def store_numpy_array_in_repository(uuid, name, array):
"""Store a numpy array in the repository folder of a node.
:param uuid: the node UUID
:param name: the name under which to store the array
:param array: the numpy array to store
"""
ensure_repository_folder_created(uuid)
filepath = get_numpy_array_absolute_path(uuid, name)
with open(filepath, 'wb') as handle:
numpy.save(handle, array)
def delete_numpy_array_from_repository(uuid, name):
"""Delete the numpy array with a given name from the repository corresponding to a node with a given uuid.
:param uuid: the UUID of the node
:param name: the name of the numpy array
"""
filepath = get_numpy_array_absolute_path(uuid, name)
try:
os.remove(filepath)
except (IOError, OSError):
pass
def load_numpy_array_from_repository(uuid, name):
"""Load and return a numpy array from the repository folder of a node.
:param uuid: the node UUID
:param name: the name under which to store the array
:return: the numpy array
"""
filepath = get_numpy_array_absolute_path(uuid, name)
return numpy.load(filepath)
def recursive_datetime_to_isoformat(value):
"""Convert all datetime objects in the given value to string representations in ISO format.
:param value: a mapping, sequence or single value optionally containing datetime objects
"""
if isinstance(value, list):
return [recursive_datetime_to_isoformat(_) for _ in value]
if isinstance(value, dict):
return dict((key, recursive_datetime_to_isoformat(val)) for key, val in value.items())
if isinstance(value, datetime.datetime):
return value.isoformat()
return value
def dumps_json(dictionary):
"""Transforms all datetime object into isoformat and then returns the JSON."""
return json.dumps(recursive_datetime_to_isoformat(dictionary))
| [
"os.makedirs",
"re.compile",
"aiida.manage.configuration.get_profile",
"os.path.join",
"numpy.load",
"numpy.save",
"os.remove"
]
| [((903, 991), 're.compile', 're.compile', (['"""^\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d+(\\\\+\\\\d{2}:\\\\d{2})?$"""'], {}), "(\n '^\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d+(\\\\+\\\\d{2}:\\\\d{2})?$')\n", (913, 991), False, 'import re\n'), ((2693, 2766), 'os.path.join', 'os.path.join', (['repo_dirpath', '"""node"""', 'uuid[:2]', 'uuid[2:4]', 'uuid[4:]', '"""path"""'], {}), "(repo_dirpath, 'node', uuid[:2], uuid[2:4], uuid[4:], 'path')\n", (2705, 2766), False, 'import os\n'), ((4339, 4359), 'numpy.load', 'numpy.load', (['filepath'], {}), '(filepath)\n', (4349, 4359), False, 'import numpy\n'), ((1237, 1257), 'os.makedirs', 'os.makedirs', (['dirpath'], {}), '(dirpath)\n', (1248, 1257), False, 'import os\n'), ((3596, 3621), 'numpy.save', 'numpy.save', (['handle', 'array'], {}), '(handle, array)\n', (3606, 3621), False, 'import numpy\n'), ((3954, 3973), 'os.remove', 'os.remove', (['filepath'], {}), '(filepath)\n', (3963, 3973), False, 'import os\n'), ((2629, 2642), 'aiida.manage.configuration.get_profile', 'get_profile', ([], {}), '()\n', (2640, 2642), False, 'from aiida.manage.configuration import get_profile\n')] |
# -*- coding:utf-8 -*-
"""
Description:
Inventory Class
Usage:
from neo.Network.Inventory import Inventory
"""
from neo.IO.MemoryStream import MemoryStream
from neocore.IO.BinaryWriter import BinaryWriter
class Inventory(object):
"""docstring for Inventory"""
def __init__(self):
"""
Create an instance
"""
super(Inventory, self).__init__()
self.hash = None
def GetHashData(self):
"""
Get the hashable data.
Returns:
bytes:
"""
ms = MemoryStream()
w = BinaryWriter(ms)
self.SerializeUnsigned(w)
ms.flush()
return ms.ToArray()
def GetScriptHashesForVerifying(self):
pass
def Serialize(self, writer):
pass
def SerializeUnsigned(self, writer):
pass
def Deserialize(self, reader):
pass
def DeserializeUnsigned(self, reader):
pass
| [
"neocore.IO.BinaryWriter.BinaryWriter",
"neo.IO.MemoryStream.MemoryStream"
]
| [((551, 565), 'neo.IO.MemoryStream.MemoryStream', 'MemoryStream', ([], {}), '()\n', (563, 565), False, 'from neo.IO.MemoryStream import MemoryStream\n'), ((578, 594), 'neocore.IO.BinaryWriter.BinaryWriter', 'BinaryWriter', (['ms'], {}), '(ms)\n', (590, 594), False, 'from neocore.IO.BinaryWriter import BinaryWriter\n')] |
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the OSEO DescribeResultAccess operation"""
from __future__ import absolute_import
import logging
import datetime as dt
from django.core.exceptions import ObjectDoesNotExist
import pytz
import pyxb
import pyxb.bundles.opengis.oseo_1_0 as oseo
from .. import errors
from .. import models
from ..models import Order
from .. import utilities
logger = logging.getLogger(__name__)
def describe_result_access(request, user):
"""Implements the OSEO DescribeResultAccess operation.
This operation returns the location of the order items that are
ready to be downloaded by the user.
The DescribeResultAccess operation only reports on the availability
of order items that specify onlineDataAccess as their delivery option.
Parameters
----------
request: oseo.DescribeResultAccess
The incoming request
user: django.contrib.auth.User
The django user that placed the request
Returns
-------
response: oseo.SubmitAck
The response SubmitAck instance
"""
try:
order = Order.objects.get(id=request.orderId)
except ObjectDoesNotExist:
raise errors.InvalidOrderIdentifierError()
if order.user != user:
raise errors.AuthorizationFailedError
completed_items = get_order_completed_items(order, request.subFunction)
logger.debug("completed_items: {}".format(completed_items))
order.last_describe_result_access_request = dt.datetime.now(pytz.utc)
order.save()
response = oseo.DescribeResultAccessResponse(status='success')
item_id = None
for item in completed_items:
iut = oseo.ItemURLType()
iut.itemId = item_id or item.item_specification.item_id
iut.productId = oseo.ProductIdType(
identifier=item.identifier,
)
iut.productId.collectionId = utilities.get_collection_identifier(
item.item_specification.collection)
iut.itemAddress = oseo.OnLineAccessAddressType()
iut.itemAddress.ResourceAddress = pyxb.BIND()
iut.itemAddress.ResourceAddress.URL = item.url
iut.expirationDate = item.expires_on
response.URLs.append(iut)
return response
def get_order_completed_items(order, behaviour):
"""Get the completed order items for product orders.
Parameters
----------
order: oseoserver.models.Order
The order for which completed items are to be returned
behaviour: str
Either 'allReady' or 'nextReady', as defined in the OSEO
specification
Returns
--------
list
The completed order items for this order
"""
batches = order.batches.all()
all_complete = []
for batch in batches:
complete_items = get_batch_completed_items(batch, behaviour)
all_complete.extend(complete_items)
return all_complete
def get_batch_completed_items(batch, behaviour):
last_time = batch.order.last_describe_result_access_request
list_all_items = last_time is None or behaviour == batch.ALL_READY
order_delivery = batch.order.selected_delivery_option.delivery_type
batch_complete_items = []
queryset = batch.order_items.filter(
status=batch.order.COMPLETED
).order_by("item_specification__id")
for item in queryset:
item_spec = item.item_specification
try:
delivery = (
item_spec.selected_delivery_option.delivery_type)
except models.ItemSpecificationDeliveryOption.DoesNotExist:
delivery = order_delivery
if delivery != models.BaseDeliveryOption.ONLINE_DATA_ACCESS:
# describeResultAccess only applies to items that specify
# 'onlinedataaccess' as delivery type
logger.debug(
"item {} does not specify onlinedataaccess as its "
"delivery type, skipping item...".format(item)
)
continue
completed_since_last = (item.completed_on is None or
last_time is None or
item.completed_on >= last_time)
list_this_item = (
behaviour == batch.NEXT_READY and completed_since_last)
if list_all_items or list_this_item:
batch_complete_items.append(item)
return batch_complete_items
| [
"logging.getLogger",
"pyxb.bundles.opengis.oseo_1_0.ProductIdType",
"pyxb.BIND",
"pyxb.bundles.opengis.oseo_1_0.ItemURLType",
"pyxb.bundles.opengis.oseo_1_0.DescribeResultAccessResponse",
"pyxb.bundles.opengis.oseo_1_0.OnLineAccessAddressType",
"datetime.datetime.now"
]
| [((951, 978), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (968, 978), False, 'import logging\n'), ((2032, 2057), 'datetime.datetime.now', 'dt.datetime.now', (['pytz.utc'], {}), '(pytz.utc)\n', (2047, 2057), True, 'import datetime as dt\n'), ((2090, 2141), 'pyxb.bundles.opengis.oseo_1_0.DescribeResultAccessResponse', 'oseo.DescribeResultAccessResponse', ([], {'status': '"""success"""'}), "(status='success')\n", (2123, 2141), True, 'import pyxb.bundles.opengis.oseo_1_0 as oseo\n'), ((2209, 2227), 'pyxb.bundles.opengis.oseo_1_0.ItemURLType', 'oseo.ItemURLType', ([], {}), '()\n', (2225, 2227), True, 'import pyxb.bundles.opengis.oseo_1_0 as oseo\n'), ((2316, 2362), 'pyxb.bundles.opengis.oseo_1_0.ProductIdType', 'oseo.ProductIdType', ([], {'identifier': 'item.identifier'}), '(identifier=item.identifier)\n', (2334, 2362), True, 'import pyxb.bundles.opengis.oseo_1_0 as oseo\n'), ((2538, 2568), 'pyxb.bundles.opengis.oseo_1_0.OnLineAccessAddressType', 'oseo.OnLineAccessAddressType', ([], {}), '()\n', (2566, 2568), True, 'import pyxb.bundles.opengis.oseo_1_0 as oseo\n'), ((2611, 2622), 'pyxb.BIND', 'pyxb.BIND', ([], {}), '()\n', (2620, 2622), False, 'import pyxb\n')] |
"""Test combined function."""
from cmatools.combine.combine import combined
def test_combined():
"""Test of combined function"""
assert combined() == "this hello cma"
| [
"cmatools.combine.combine.combined"
]
| [((148, 158), 'cmatools.combine.combine.combined', 'combined', ([], {}), '()\n', (156, 158), False, 'from cmatools.combine.combine import combined\n')] |
"""
matmul autotvm
[batch,in_dim] x [in_dim,out_dim]
search_matmul_config(batch,in_dim,out_dim,num_trials):
input: batch,in_dim,out_dim,num_trials
[batch,in_dim] x [in_dim,out_dim]
num_trials: num of trials, default: 1000
output: log (json format)
use autotvm to search configs for the matmul
lookup_matmul_config():
find a proper matmul config
note: trade off kernel's performance and grid & block size
launch_matmul_from_config(config):
input: config (json string)
usage:
1. use search_matmul_config(batch,in_dim,out_dim,num_trials) to search configs
2. use lookup_matmul_config() to get a proper config
3. write the config (in json format) to "matmul_config.json"
4. use launch_matmul_from_config("matmul_config.json") to print the matmul kernel code
"""
import numpy as np
import tvm
import logging
import sys
from tvm import autotvm
import topi
import json
import os
from topi.util import get_const_tuple
import tensorflow as tf
flags = tf.flags
flags.DEFINE_string("input_path", "", "path of input file")
flags.DEFINE_string("autotvm_log", "../autotvm_logs/all_tuned_tilling_dense_nn.1000.log", "path of autotvm tuning log")
flags.DEFINE_string("tvm_profile_log",
"/tmp/tvm_profile.log", "path of tvm profile")
flags.DEFINE_string("output_path", "", "path of output file")
FLAGS = flags.FLAGS
@autotvm.template
def tvm_matmul_tune_op(batch, in_dim, out_dim):
"""
autotvm tuning template
D=A*B
[batch, in_dim] x [in_dim, out_dim]
"""
A = tvm.placeholder((batch, in_dim), name='A', dtype="float32")
B = tvm.placeholder((in_dim, out_dim), name='B', dtype="float32")
k = tvm.reduce_axis((0, in_dim), name='k')
C = tvm.compute((batch, out_dim), lambda i, j: tvm.sum(
A[i, k] * B[k, j], axis=k), name='C')
cfg = autotvm.get_config()
s = tvm.create_schedule(C.op)
AA = s.cache_read(A, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
y, x = C.op.axis
k = CC.op.reduce_axis[0]
cfg.define_split('tile_k', cfg.axis(k), num_outputs=3)
ko, kt, ki = cfg['tile_k'].apply(s, CC, k)
block_x = tvm.thread_axis('blockIdx.x')
block_y = tvm.thread_axis('blockIdx.y')
thread_x = tvm.thread_axis('threadIdx.x')
thread_y = tvm.thread_axis('threadIdx.y')
cfg.define_split('tile_y', cfg.axis(y), num_outputs=4)
cfg.define_split('tile_x', cfg.axis(x), num_outputs=4)
by, tyz, ty, yi = cfg['tile_y'].apply(s, C, y)
bx, txz, tx, xi = cfg['tile_x'].apply(s, C, x)
s[C].bind(by, block_y)
s[C].bind(bx, block_x)
s[C].bind(tyz, tvm.thread_axis('vthread'))
s[C].bind(txz, tvm.thread_axis('vthread'))
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi)
s[CC].compute_at(s[C], tx)
yo, xo = CC.op.axis
s[CC].reorder(ko, kt, yo, xo, ki)
s[CC].unroll(kt)
for stage in [AL, BL]:
s[stage].compute_at(s[CC], kt)
s[stage].double_buffer()
for stage in [AA, BB]:
s[stage].compute_at(s[CC], ko)
fused = s[stage].fuse(*s[stage].op.axis)
ty, tx = s[stage].split(fused, nparts=cfg['tile_y'].size[2])
tx, xi = s[stage].split(tx, nparts=cfg['tile_x'].size[2])
_, xi = s[stage].split(xi, factor=4)
s[stage].bind(ty, thread_y)
s[stage].bind(tx, thread_x)
s[stage].vectorize(xi)
s[stage].double_buffer()
cfg.define_knob('auto_unroll_max_step', [512, 1500])
s[C].pragma(by, 'auto_unroll_max_step', cfg['auto_unroll_max_step'].val)
s[C].pragma(by, 'unroll_explicit', False)
cfg.add_flop(batch * in_dim * out_dim * 2)
return s, [A, B, C]
def search_matmul_config(batch, in_dim, out_dim, num_trials):
logging.getLogger('autotvm').setLevel(logging.DEBUG)
logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))
task = autotvm.task.create(tvm_matmul_tune_op, args=(
batch, in_dim, out_dim), target='cuda')
print(task.config_space)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4)
)
op_name = "tuned_dot_op_float_%d_%d_%d" % (batch, in_dim, out_dim)
log_name = "tuned_kernels/" + op_name + ".log"
tuner = autotvm.tuner.XGBTuner(task)
tuner.tune(n_trial=num_trials, measure_option=measure_option,
callbacks=[autotvm.callback.log_to_file(log_name)])
dispatch_context = autotvm.apply_history_best(log_name)
best_config = dispatch_context.query(task.target, task.workload)
print('\nBest config:')
print(best_config)
with dispatch_context:
with tvm.target.create('cuda'):
s, arg_bufs = tvm_matmul_tune_op(batch, in_dim, out_dim)
func = tvm.build(s, arg_bufs, 'cuda', name='matmul')
ctx = tvm.context('cuda', 0)
a_np = np.random.uniform(size=(batch, in_dim)).astype("float32")
b_np = np.random.uniform(size=(in_dim, out_dim)).astype("float32")
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros((batch, out_dim), dtype='float32'), ctx)
print(func.imported_modules[0].get_source()) # print kernel code
func(a, b, c)
num_flops = 2 * batch * in_dim * out_dim
num_runs = 10
timer_f = func.time_evaluator(func.entry_name, ctx, number=num_runs)
t = timer_f(a, b, c).mean
GFLOPS = num_flops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GFLOPS." %
(num_runs, t * 1e3, GFLOPS))
def lookup_matmul_config(batch, in_dim, out_dim, output_log):
op_name = "tuned_dot_op_float_%d_%d_%d" % (batch, in_dim, out_dim)
log_name = FLAGS.autotvm_log
with open(log_name, "r") as fin:
log_lines = fin.readlines()
# log_records=tvm.autotvm.record.load_from_file(log_name)
log_records_all = []
log_records = []
for line in log_lines:
line = line.rstrip('\n')
# print(line)
record_json = json.loads(line)
tm = record_json['r'][0][0]
if tm > 10000000: # filter bad configs
continue
if record_json['i'][2][0] != batch or record_json['i'][2][1] != in_dim or record_json['i'][2][2] != out_dim: # filter other configs
continue
griddim_x = record_json['i'][5]["e"][2][2][0]
if griddim_x == -1:
griddim_x = int(out_dim / record_json['i'][5]["e"][2][2][1] / record_json['i'][5]["e"][2][2][2] / record_json['i'][5]["e"][2][2][3])
griddim_y = record_json['i'][5]["e"][1][2][0]
if griddim_y == -1:
griddim_y = int(batch / record_json['i'][5]["e"][1][2][1] / record_json['i'][5]["e"][1][2][2] / record_json['i'][5]["e"][1][2][3])
record = {"time": tm,
"grid": [griddim_x, griddim_y, 1],
"block": [record_json['i'][5]["e"][2][2][2], record_json['i'][5]["e"][1][2][2], 1],
"config": line}
log_records_all.append((tm, record))
# if record["block"][0] * record["block"][1] * record["block"][2] % 32 != 0:
# continue
# if record["grid"][0] * record["grid"][1] * record["grid"][2] < 16:
# continue
opt = tm * record["grid"][0] * record["grid"][1] * record["grid"][2] * record["block"][0] * record["block"][1] * record["block"][2]
if record["block"][0] * record["block"][1] * record["block"][2] % 32 != 0:
opt = tm * record["grid"][0] * record["grid"][1] * record["grid"][2] * (record["block"][0] * record["block"][1] * record["block"][2] / 32 + 1) * 32
record.update({"opt": opt})
log_records.append((tm, record))
# print(log_records[-1])
log_records_all.sort(key=lambda item: item[0])
log_records.sort(key=lambda item: item[0])
print(op_name)
log_records_fast = log_records[0:100]
# log_records_fast = log_records
log_records = []
for i in range(len(log_records_fast)):
log_records.append((log_records_fast[i][1]["opt"], log_records_fast[i][1]))
log_records.sort(key=lambda item: item[0])
print("fastest kernel:", log_records_all[0][1]["time"], "grid:", log_records_all[0][1]["grid"], "block:", log_records_all[0][1]["block"])
# print(log_records_fast[0][1]["config"])
print("efficient kernel:",log_records[0][1]["time"], "grid:", log_records[0][1]["grid"], "block:", log_records[0][1]["block"])
with open(output_log, 'a') as fout:
fout.write(log_records[0][1]["config"] + "\n")
def launch_matmul_from_config(config_json_path):
with open(config_json_path, "r") as fin:
config = json.load(fin)
batch = config["i"][2][0]
in_dim = config["i"][2][1]
out_dim = config["i"][2][2]
# print(batch, in_dim, out_dim)
task = autotvm.task.create(
tvm_matmul_tune_op, args=(batch, in_dim, out_dim), target='cuda')
# dispatch_context = autotvm.task.ApplyConfig(config)
dispatch_context = autotvm.apply_history_best(config_json_path)
best_config = dispatch_context.query(task.target, task.workload)
print("Using pretuned config:")
print(best_config)
with dispatch_context:
with tvm.target.create('cuda'):
s, arg_bufs = tvm_matmul_tune_op(batch, in_dim, out_dim)
func = tvm.build(s, arg_bufs, 'cuda', name='matmul')
ctx = tvm.context('cuda', 0)
a_np = np.random.uniform(size=(batch, in_dim)).astype("float32")
b_np = np.random.uniform(size=(in_dim, out_dim)).astype("float32")
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros((batch, out_dim), dtype='float32'), ctx)
print(func.imported_modules[0].get_source()) # print kernel code
func(a, b, c)
num_flops = 2 * batch * in_dim * out_dim
num_runs = 10
timer_f = func.time_evaluator(func.entry_name, ctx, number=num_runs)
t = timer_f(a, b, c).mean
GFLOPS = num_flops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GFLOPS." %
(num_runs, t * 1e3, GFLOPS))
output_log_file = "matmul_nn_autotvm_select_result.log"
if os.path.exists(output_log_file):
os.remove(output_log_file)
lookup_matmul_config(4, 256, 256, output_log_file)
lookup_matmul_config(16, 256, 256, output_log_file)
def tune_dot_codegen(m, k, n, log_path):
logging.getLogger('autotvm').setLevel(logging.DEBUG)
logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))
task = autotvm.task.create(tvm_matmul_tune_op, args=(m, k, n), target='cuda')
op_name = "tuned_dot_nn_op_float_m%d_k%d_n%d" % (m, k, n)
# log_name = "tuned_dot_op_float_%d_%d_%d" % (m, k, n)
# log_name = "tuned_kernels/" + log_name + ".log"
log_name = log_path
dispatch_context = autotvm.apply_history_best(log_name)
best_config = dispatch_context.query(task.target, task.workload)
with dispatch_context:
with tvm.target.create('cuda'):
s, arg_bufs = tvm_matmul_tune_op(m,k,n)
func = tvm.build(s, arg_bufs, 'cuda', name=op_name)
ctx = tvm.context('cuda', 0)
a_np = np.random.uniform(size=[m,k]).astype("float32")
w_np = np.random.uniform(size=[k,n]).astype("float32")
c_np = np.zeros([m,n]).astype("float32")
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
c = tvm.nd.array(c_np, ctx)
kernel_code = func.imported_modules[0].get_source()
func(a, w, c)
return kernel_code
def extract_ops_from_log():
dot_ops = []
dot_ops.append({'arg0_shape': [4, 256], 'arg1_shape': [256, 256], 'out_shape': [4, 256], 'transpose_A': False, 'transpose_B': False})
dot_ops.append({'arg0_shape': [16, 256], 'arg1_shape': [256, 256], 'out_shape': [16, 256], 'transpose_A': False, 'transpose_B': False})
return dot_ops
def get_tvm_topi_func_name(m, k, n):
func_name = "tuned_dot_nn_op_float_m%d_k%d_n%d_kernel0" % (m, k, n)
return func_name
def extract_tvm_profiling_from_log(log_path):
lines = open(log_path).readlines()
deduped_lines = list(set(lines))
# print(deduped_lines)
# print("#convs:", len(lines), "#deduped_convs:", len(deduped_lines))
profiling_result = {}
for line in deduped_lines:
items = line.rstrip('\n').split('|')
profiling_data = {
'gridDim': [int(items[1]), int(items[2]), int(items[3])],
'blockDim': [int(items[4]), int(items[5]), int(items[6])]
}
profiling_result.update({items[0]: profiling_data})
return profiling_result
def generate_db_topi_ops(dot_ops, log_path):
topi_ops = []
tvm_profiling_log_path = FLAGS.tvm_profile_log
if os.path.exists(tvm_profiling_log_path):
os.remove(tvm_profiling_log_path)
for dot_op in dot_ops:
m = dot_op['arg0_shape'][0]
k = dot_op['arg0_shape'][1]
n = dot_op['arg1_shape'][1]
topi_code = tune_dot_codegen(m, k, n, log_path)
topi_op = {
'tvm_func_name': get_tvm_topi_func_name(m, k, n),
'op_type': 'Dot',
'parameters': dot_op,
'code': topi_code
}
topi_ops.append(topi_op)
profiling_result = extract_tvm_profiling_from_log(tvm_profiling_log_path)
for topi_op in topi_ops:
tvm_func_name = topi_op['tvm_func_name']
topi_op.update(profiling_result[tvm_func_name])
return topi_ops
dot_ops = extract_ops_from_log()
topi_ops = generate_db_topi_ops(dot_ops, output_log_file)
with open(FLAGS.output_path, 'w') as fout:
json.dump(topi_ops, fout)
os.remove(output_log_file) | [
"logging.getLogger",
"logging.StreamHandler",
"tvm.autotvm.apply_history_best",
"tvm.context",
"os.remove",
"tvm.autotvm.tuner.XGBTuner",
"os.path.exists",
"tvm.create_schedule",
"tvm.autotvm.LocalRunner",
"tvm.target.create",
"tvm.autotvm.get_config",
"tvm.nd.array",
"json.loads",
"tvm.sum",
"tvm.reduce_axis",
"tvm.autotvm.task.create",
"tvm.placeholder",
"tvm.autotvm.LocalBuilder",
"tvm.thread_axis",
"numpy.zeros",
"tvm.build",
"tvm.autotvm.callback.log_to_file",
"numpy.random.uniform",
"json.load",
"json.dump"
]
| [((10268, 10299), 'os.path.exists', 'os.path.exists', (['output_log_file'], {}), '(output_log_file)\n', (10282, 10299), False, 'import os\n'), ((13700, 13726), 'os.remove', 'os.remove', (['output_log_file'], {}), '(output_log_file)\n', (13709, 13726), False, 'import os\n'), ((1554, 1613), 'tvm.placeholder', 'tvm.placeholder', (['(batch, in_dim)'], {'name': '"""A"""', 'dtype': '"""float32"""'}), "((batch, in_dim), name='A', dtype='float32')\n", (1569, 1613), False, 'import tvm\n'), ((1622, 1683), 'tvm.placeholder', 'tvm.placeholder', (['(in_dim, out_dim)'], {'name': '"""B"""', 'dtype': '"""float32"""'}), "((in_dim, out_dim), name='B', dtype='float32')\n", (1637, 1683), False, 'import tvm\n'), ((1692, 1730), 'tvm.reduce_axis', 'tvm.reduce_axis', (['(0, in_dim)'], {'name': '"""k"""'}), "((0, in_dim), name='k')\n", (1707, 1730), False, 'import tvm\n'), ((1848, 1868), 'tvm.autotvm.get_config', 'autotvm.get_config', ([], {}), '()\n', (1866, 1868), False, 'from tvm import autotvm\n'), ((1877, 1902), 'tvm.create_schedule', 'tvm.create_schedule', (['C.op'], {}), '(C.op)\n', (1896, 1902), False, 'import tvm\n'), ((2271, 2300), 'tvm.thread_axis', 'tvm.thread_axis', (['"""blockIdx.x"""'], {}), "('blockIdx.x')\n", (2286, 2300), False, 'import tvm\n'), ((2315, 2344), 'tvm.thread_axis', 'tvm.thread_axis', (['"""blockIdx.y"""'], {}), "('blockIdx.y')\n", (2330, 2344), False, 'import tvm\n'), ((2360, 2390), 'tvm.thread_axis', 'tvm.thread_axis', (['"""threadIdx.x"""'], {}), "('threadIdx.x')\n", (2375, 2390), False, 'import tvm\n'), ((2406, 2436), 'tvm.thread_axis', 'tvm.thread_axis', (['"""threadIdx.y"""'], {}), "('threadIdx.y')\n", (2421, 2436), False, 'import tvm\n'), ((4031, 4120), 'tvm.autotvm.task.create', 'autotvm.task.create', (['tvm_matmul_tune_op'], {'args': '(batch, in_dim, out_dim)', 'target': '"""cuda"""'}), "(tvm_matmul_tune_op, args=(batch, in_dim, out_dim),\n target='cuda')\n", (4050, 4120), False, 'from tvm import autotvm\n'), ((4456, 4484), 'tvm.autotvm.tuner.XGBTuner', 'autotvm.tuner.XGBTuner', (['task'], {}), '(task)\n', (4478, 4484), False, 'from tvm import autotvm\n'), ((4642, 4678), 'tvm.autotvm.apply_history_best', 'autotvm.apply_history_best', (['log_name'], {}), '(log_name)\n', (4668, 4678), False, 'from tvm import autotvm\n'), ((5012, 5034), 'tvm.context', 'tvm.context', (['"""cuda"""', '(0)'], {}), "('cuda', 0)\n", (5023, 5034), False, 'import tvm\n'), ((5185, 5208), 'tvm.nd.array', 'tvm.nd.array', (['a_np', 'ctx'], {}), '(a_np, ctx)\n', (5197, 5208), False, 'import tvm\n'), ((5217, 5240), 'tvm.nd.array', 'tvm.nd.array', (['b_np', 'ctx'], {}), '(b_np, ctx)\n', (5229, 5240), False, 'import tvm\n'), ((8942, 9031), 'tvm.autotvm.task.create', 'autotvm.task.create', (['tvm_matmul_tune_op'], {'args': '(batch, in_dim, out_dim)', 'target': '"""cuda"""'}), "(tvm_matmul_tune_op, args=(batch, in_dim, out_dim),\n target='cuda')\n", (8961, 9031), False, 'from tvm import autotvm\n'), ((9118, 9162), 'tvm.autotvm.apply_history_best', 'autotvm.apply_history_best', (['config_json_path'], {}), '(config_json_path)\n', (9144, 9162), False, 'from tvm import autotvm\n'), ((9504, 9526), 'tvm.context', 'tvm.context', (['"""cuda"""', '(0)'], {}), "('cuda', 0)\n", (9515, 9526), False, 'import tvm\n'), ((9677, 9700), 'tvm.nd.array', 'tvm.nd.array', (['a_np', 'ctx'], {}), '(a_np, ctx)\n', (9689, 9700), False, 'import tvm\n'), ((9709, 9732), 'tvm.nd.array', 'tvm.nd.array', (['b_np', 'ctx'], {}), '(b_np, ctx)\n', (9721, 9732), False, 'import tvm\n'), ((10305, 10331), 'os.remove', 'os.remove', (['output_log_file'], {}), '(output_log_file)\n', (10314, 10331), False, 'import os\n'), ((10628, 10698), 'tvm.autotvm.task.create', 'autotvm.task.create', (['tvm_matmul_tune_op'], {'args': '(m, k, n)', 'target': '"""cuda"""'}), "(tvm_matmul_tune_op, args=(m, k, n), target='cuda')\n", (10647, 10698), False, 'from tvm import autotvm\n'), ((10928, 10964), 'tvm.autotvm.apply_history_best', 'autotvm.apply_history_best', (['log_name'], {}), '(log_name)\n', (10954, 10964), False, 'from tvm import autotvm\n'), ((11229, 11251), 'tvm.context', 'tvm.context', (['"""cuda"""', '(0)'], {}), "('cuda', 0)\n", (11240, 11251), False, 'import tvm\n'), ((11425, 11448), 'tvm.nd.array', 'tvm.nd.array', (['a_np', 'ctx'], {}), '(a_np, ctx)\n', (11437, 11448), False, 'import tvm\n'), ((11457, 11480), 'tvm.nd.array', 'tvm.nd.array', (['w_np', 'ctx'], {}), '(w_np, ctx)\n', (11469, 11480), False, 'import tvm\n'), ((11489, 11512), 'tvm.nd.array', 'tvm.nd.array', (['c_np', 'ctx'], {}), '(c_np, ctx)\n', (11501, 11512), False, 'import tvm\n'), ((12805, 12843), 'os.path.exists', 'os.path.exists', (['tvm_profiling_log_path'], {}), '(tvm_profiling_log_path)\n', (12819, 12843), False, 'import os\n'), ((13673, 13698), 'json.dump', 'json.dump', (['topi_ops', 'fout'], {}), '(topi_ops, fout)\n', (13682, 13698), False, 'import json\n'), ((2733, 2759), 'tvm.thread_axis', 'tvm.thread_axis', (['"""vthread"""'], {}), "('vthread')\n", (2748, 2759), False, 'import tvm\n'), ((2780, 2806), 'tvm.thread_axis', 'tvm.thread_axis', (['"""vthread"""'], {}), "('vthread')\n", (2795, 2806), False, 'import tvm\n'), ((3985, 4018), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (4006, 4018), False, 'import logging\n'), ((5262, 5305), 'numpy.zeros', 'np.zeros', (['(batch, out_dim)'], {'dtype': '"""float32"""'}), "((batch, out_dim), dtype='float32')\n", (5270, 5305), True, 'import numpy as np\n'), ((6165, 6181), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (6175, 6181), False, 'import json\n'), ((8784, 8798), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (8793, 8798), False, 'import json\n'), ((9754, 9797), 'numpy.zeros', 'np.zeros', (['(batch, out_dim)'], {'dtype': '"""float32"""'}), "((batch, out_dim), dtype='float32')\n", (9762, 9797), True, 'import numpy as np\n'), ((10582, 10615), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (10603, 10615), False, 'import logging\n'), ((12853, 12886), 'os.remove', 'os.remove', (['tvm_profiling_log_path'], {}), '(tvm_profiling_log_path)\n', (12862, 12886), False, 'import os\n'), ((1782, 1816), 'tvm.sum', 'tvm.sum', (['(A[i, k] * B[k, j])'], {'axis': 'k'}), '(A[i, k] * B[k, j], axis=k)\n', (1789, 1816), False, 'import tvm\n'), ((3888, 3916), 'logging.getLogger', 'logging.getLogger', (['"""autotvm"""'], {}), "('autotvm')\n", (3905, 3916), False, 'import logging\n'), ((3945, 3973), 'logging.getLogger', 'logging.getLogger', (['"""autotvm"""'], {}), "('autotvm')\n", (3962, 3973), False, 'import logging\n'), ((4216, 4238), 'tvm.autotvm.LocalBuilder', 'autotvm.LocalBuilder', ([], {}), '()\n', (4236, 4238), False, 'from tvm import autotvm\n'), ((4255, 4314), 'tvm.autotvm.LocalRunner', 'autotvm.LocalRunner', ([], {'repeat': '(3)', 'min_repeat_ms': '(100)', 'timeout': '(4)'}), '(repeat=3, min_repeat_ms=100, timeout=4)\n', (4274, 4314), False, 'from tvm import autotvm\n'), ((4840, 4865), 'tvm.target.create', 'tvm.target.create', (['"""cuda"""'], {}), "('cuda')\n", (4857, 4865), False, 'import tvm\n'), ((4955, 5000), 'tvm.build', 'tvm.build', (['s', 'arg_bufs', '"""cuda"""'], {'name': '"""matmul"""'}), "(s, arg_bufs, 'cuda', name='matmul')\n", (4964, 5000), False, 'import tvm\n'), ((5047, 5086), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(batch, in_dim)'}), '(size=(batch, in_dim))\n', (5064, 5086), True, 'import numpy as np\n'), ((5116, 5157), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(in_dim, out_dim)'}), '(size=(in_dim, out_dim))\n', (5133, 5157), True, 'import numpy as np\n'), ((9332, 9357), 'tvm.target.create', 'tvm.target.create', (['"""cuda"""'], {}), "('cuda')\n", (9349, 9357), False, 'import tvm\n'), ((9447, 9492), 'tvm.build', 'tvm.build', (['s', 'arg_bufs', '"""cuda"""'], {'name': '"""matmul"""'}), "(s, arg_bufs, 'cuda', name='matmul')\n", (9456, 9492), False, 'import tvm\n'), ((9539, 9578), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(batch, in_dim)'}), '(size=(batch, in_dim))\n', (9556, 9578), True, 'import numpy as np\n'), ((9608, 9649), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(in_dim, out_dim)'}), '(size=(in_dim, out_dim))\n', (9625, 9649), True, 'import numpy as np\n'), ((10485, 10513), 'logging.getLogger', 'logging.getLogger', (['"""autotvm"""'], {}), "('autotvm')\n", (10502, 10513), False, 'import logging\n'), ((10542, 10570), 'logging.getLogger', 'logging.getLogger', (['"""autotvm"""'], {}), "('autotvm')\n", (10559, 10570), False, 'import logging\n'), ((11075, 11100), 'tvm.target.create', 'tvm.target.create', (['"""cuda"""'], {}), "('cuda')\n", (11092, 11100), False, 'import tvm\n'), ((11173, 11217), 'tvm.build', 'tvm.build', (['s', 'arg_bufs', '"""cuda"""'], {'name': 'op_name'}), "(s, arg_bufs, 'cuda', name=op_name)\n", (11182, 11217), False, 'import tvm\n'), ((11264, 11294), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[m, k]'}), '(size=[m, k])\n', (11281, 11294), True, 'import numpy as np\n'), ((11323, 11353), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[k, n]'}), '(size=[k, n])\n', (11340, 11353), True, 'import numpy as np\n'), ((11382, 11398), 'numpy.zeros', 'np.zeros', (['[m, n]'], {}), '([m, n])\n', (11390, 11398), True, 'import numpy as np\n'), ((4577, 4615), 'tvm.autotvm.callback.log_to_file', 'autotvm.callback.log_to_file', (['log_name'], {}), '(log_name)\n', (4605, 4615), False, 'from tvm import autotvm\n')] |
import subprocess
import sys
import time
import os
#############################
# COLORING YOUR SHELL #
#############################
R = "\033[1;31m" #
B = "\033[1;34m" #
Y = "\033[1;33m" #
G = "\033[1;32m" #
RS = "\033[0m" #
W = "\033[1;37m" #
#############################
os.system("clear")
print(" ")
print(R + "[" + G + "User Summary " + R + "]" + RS)
print("""
Shows extra information about IPv6 addresses, such as embedded MAC or IPv4 addresses when available.
Some IP address formats encode extra information; for example some IPv6 addresses encode an IPv4 address or MAC address
script can decode these address formats:
• IPv4-compatible IPv6 addresses,
• IPv4-mapped IPv6 addresses,
• Teredo IPv6 addresses,
• 6to4 IPv6 addresses,
• IPv6 addresses using an EUI-64 interface ID,
• IPv4-embedded IPv6 addresses,
• ISATAP Modified EUI-64 IPv6 addresses.
• IPv4-translated IPv6 addresses and
See RFC 4291 for general IPv6 addressing architecture and the definitions of some terms.
""")
print(" ")
webb = input("" + RS + "[" + B + "ENTER TARGET " + R + "WEBSITE " + Y + "IP" + RS + "]" + G + ": " + RS)
subprocess.check_call(['nmap', '-sV', '-sC', webb])
| [
"os.system",
"subprocess.check_call"
]
| [((352, 370), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (361, 370), False, 'import os\n'), ((1209, 1260), 'subprocess.check_call', 'subprocess.check_call', (["['nmap', '-sV', '-sC', webb]"], {}), "(['nmap', '-sV', '-sC', webb])\n", (1230, 1260), False, 'import subprocess\n')] |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities.upgrade_checkpoint import upgrade_checkpoint
@pytest.mark.skip
@pytest.mark.parametrize(
"old_checkpoint, new_checkpoint",
[
(
{"epoch": 1, "global_step": 23, "checkpoint_callback_best": 0.34},
{"epoch": 1, "global_step": 23, "callbacks": {ModelCheckpoint: {"best_model_score": 0.34}}},
),
(
{"epoch": 1, "global_step": 23, "checkpoint_callback_best_model_score": 0.99},
{"epoch": 1, "global_step": 23, "callbacks": {ModelCheckpoint: {"best_model_score": 0.99}}},
),
(
{"epoch": 1, "global_step": 23, "checkpoint_callback_best_model_path": 'path'},
{"epoch": 1, "global_step": 23, "callbacks": {ModelCheckpoint: {"best_model_path": 'path'}}},
),
(
{"epoch": 1, "global_step": 23, "early_stop_callback_wait": 2, "early_stop_callback_patience": 4},
{"epoch": 1, "global_step": 23, "callbacks": {EarlyStopping: {"wait_count": 2, "patience": 4}}},
),
],
)
def test_upgrade_checkpoint(tmpdir, old_checkpoint, new_checkpoint):
filepath = os.path.join(tmpdir, "model.ckpt")
torch.save(old_checkpoint, filepath)
upgrade_checkpoint(filepath)
updated_checkpoint = torch.load(filepath)
assert updated_checkpoint == new_checkpoint
| [
"pytorch_lightning.utilities.upgrade_checkpoint.upgrade_checkpoint",
"torch.load",
"os.path.join",
"pytest.mark.parametrize",
"torch.save"
]
| [((794, 1609), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""old_checkpoint, new_checkpoint"""', "[({'epoch': 1, 'global_step': 23, 'checkpoint_callback_best': 0.34}, {\n 'epoch': 1, 'global_step': 23, 'callbacks': {ModelCheckpoint: {\n 'best_model_score': 0.34}}}), ({'epoch': 1, 'global_step': 23,\n 'checkpoint_callback_best_model_score': 0.99}, {'epoch': 1,\n 'global_step': 23, 'callbacks': {ModelCheckpoint: {'best_model_score': \n 0.99}}}), ({'epoch': 1, 'global_step': 23,\n 'checkpoint_callback_best_model_path': 'path'}, {'epoch': 1,\n 'global_step': 23, 'callbacks': {ModelCheckpoint: {'best_model_path':\n 'path'}}}), ({'epoch': 1, 'global_step': 23, 'early_stop_callback_wait':\n 2, 'early_stop_callback_patience': 4}, {'epoch': 1, 'global_step': 23,\n 'callbacks': {EarlyStopping: {'wait_count': 2, 'patience': 4}}})]"], {}), "('old_checkpoint, new_checkpoint', [({'epoch': 1,\n 'global_step': 23, 'checkpoint_callback_best': 0.34}, {'epoch': 1,\n 'global_step': 23, 'callbacks': {ModelCheckpoint: {'best_model_score': \n 0.34}}}), ({'epoch': 1, 'global_step': 23,\n 'checkpoint_callback_best_model_score': 0.99}, {'epoch': 1,\n 'global_step': 23, 'callbacks': {ModelCheckpoint: {'best_model_score': \n 0.99}}}), ({'epoch': 1, 'global_step': 23,\n 'checkpoint_callback_best_model_path': 'path'}, {'epoch': 1,\n 'global_step': 23, 'callbacks': {ModelCheckpoint: {'best_model_path':\n 'path'}}}), ({'epoch': 1, 'global_step': 23, 'early_stop_callback_wait':\n 2, 'early_stop_callback_patience': 4}, {'epoch': 1, 'global_step': 23,\n 'callbacks': {EarlyStopping: {'wait_count': 2, 'patience': 4}}})])\n", (817, 1609), False, 'import pytest\n'), ((1838, 1872), 'os.path.join', 'os.path.join', (['tmpdir', '"""model.ckpt"""'], {}), "(tmpdir, 'model.ckpt')\n", (1850, 1872), False, 'import os\n'), ((1877, 1913), 'torch.save', 'torch.save', (['old_checkpoint', 'filepath'], {}), '(old_checkpoint, filepath)\n', (1887, 1913), False, 'import torch\n'), ((1918, 1946), 'pytorch_lightning.utilities.upgrade_checkpoint.upgrade_checkpoint', 'upgrade_checkpoint', (['filepath'], {}), '(filepath)\n', (1936, 1946), False, 'from pytorch_lightning.utilities.upgrade_checkpoint import upgrade_checkpoint\n'), ((1972, 1992), 'torch.load', 'torch.load', (['filepath'], {}), '(filepath)\n', (1982, 1992), False, 'import torch\n')] |
import json
import os
from ..metaflow_config import DATASTORE_LOCAL_DIR, DATASTORE_SYSROOT_LOCAL
from .datastore_storage import CloseAfterUse, DataStoreStorage
from .exceptions import DataException
class LocalStorage(DataStoreStorage):
TYPE = "local"
METADATA_DIR = "_meta"
@classmethod
def get_datastore_root_from_config(cls, echo, create_on_absent=True):
result = DATASTORE_SYSROOT_LOCAL
if result is None:
try:
# Python2
current_path = os.getcwdu()
except: # noqa E722
current_path = os.getcwd()
check_dir = os.path.join(current_path, DATASTORE_LOCAL_DIR)
check_dir = os.path.realpath(check_dir)
orig_path = check_dir
top_level_reached = False
while not os.path.isdir(check_dir):
new_path = os.path.dirname(current_path)
if new_path == current_path:
top_level_reached = True
break # We are no longer making upward progress
current_path = new_path
check_dir = os.path.join(current_path, DATASTORE_LOCAL_DIR)
if top_level_reached:
if create_on_absent:
# Could not find any directory to use so create a new one
echo(
"Creating local datastore in current directory (%s)" % orig_path
)
os.mkdir(orig_path)
result = orig_path
else:
return None
else:
result = check_dir
else:
result = os.path.join(result, DATASTORE_LOCAL_DIR)
return result
@staticmethod
def _makedirs(path):
try:
os.makedirs(path)
except OSError as x:
if x.errno == 17:
return
else:
raise
def is_file(self, paths):
results = []
for path in paths:
full_path = self.full_uri(path)
results.append(os.path.isfile(full_path))
return results
def info_file(self, path):
file_exists = self.is_file([path])[0]
if file_exists:
full_meta_path = "%s_meta" % self.full_uri(path)
try:
with open(full_meta_path, "r") as f:
return True, json.load(f)
except OSError:
return True, None
return False, None
def size_file(self, path):
file_exists = self.is_file([path])[0]
if file_exists:
path = self.full_uri(path)
try:
return os.path.getsize(path)
except OSError:
return None
return None
def list_content(self, paths):
results = []
for path in paths:
if path == self.METADATA_DIR:
continue
full_path = self.full_uri(path)
try:
for f in os.listdir(full_path):
if f == self.METADATA_DIR:
continue
results.append(
self.list_content_result(
path=self.path_join(path, f),
is_file=self.is_file([self.path_join(path, f)])[0],
)
)
except FileNotFoundError as e:
pass
return results
def save_bytes(self, path_and_bytes_iter, overwrite=False, len_hint=0):
for path, obj in path_and_bytes_iter:
if isinstance(obj, tuple):
byte_obj, metadata = obj
else:
byte_obj, metadata = obj, None
full_path = self.full_uri(path)
if not overwrite and os.path.exists(full_path):
continue
LocalStorage._makedirs(os.path.dirname(full_path))
with open(full_path, mode="wb") as f:
f.write(byte_obj.read())
if metadata:
with open("%s_meta" % full_path, mode="w") as f:
json.dump(metadata, f)
def load_bytes(self, paths):
def iter_results():
for path in paths:
full_path = self.full_uri(path)
metadata = None
if os.path.exists(full_path):
if os.path.exists("%s_meta" % full_path):
with open("%s_meta" % full_path, mode="r") as f:
metadata = json.load(f)
yield path, full_path, metadata
else:
yield path, None, None
return CloseAfterUse(iter_results())
| [
"os.path.exists",
"os.path.getsize",
"os.listdir",
"os.makedirs",
"os.getcwdu",
"os.path.join",
"os.getcwd",
"os.path.realpath",
"os.path.dirname",
"os.path.isfile",
"os.path.isdir",
"os.mkdir",
"json.load",
"json.dump"
]
| [((632, 679), 'os.path.join', 'os.path.join', (['current_path', 'DATASTORE_LOCAL_DIR'], {}), '(current_path, DATASTORE_LOCAL_DIR)\n', (644, 679), False, 'import os\n'), ((704, 731), 'os.path.realpath', 'os.path.realpath', (['check_dir'], {}), '(check_dir)\n', (720, 731), False, 'import os\n'), ((1691, 1732), 'os.path.join', 'os.path.join', (['result', 'DATASTORE_LOCAL_DIR'], {}), '(result, DATASTORE_LOCAL_DIR)\n', (1703, 1732), False, 'import os\n'), ((1824, 1841), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1835, 1841), False, 'import os\n'), ((519, 531), 'os.getcwdu', 'os.getcwdu', ([], {}), '()\n', (529, 531), False, 'import os\n'), ((826, 850), 'os.path.isdir', 'os.path.isdir', (['check_dir'], {}), '(check_dir)\n', (839, 850), False, 'import os\n'), ((879, 908), 'os.path.dirname', 'os.path.dirname', (['current_path'], {}), '(current_path)\n', (894, 908), False, 'import os\n'), ((1136, 1183), 'os.path.join', 'os.path.join', (['current_path', 'DATASTORE_LOCAL_DIR'], {}), '(current_path, DATASTORE_LOCAL_DIR)\n', (1148, 1183), False, 'import os\n'), ((2114, 2139), 'os.path.isfile', 'os.path.isfile', (['full_path'], {}), '(full_path)\n', (2128, 2139), False, 'import os\n'), ((2713, 2734), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (2728, 2734), False, 'import os\n'), ((3048, 3069), 'os.listdir', 'os.listdir', (['full_path'], {}), '(full_path)\n', (3058, 3069), False, 'import os\n'), ((3855, 3880), 'os.path.exists', 'os.path.exists', (['full_path'], {}), '(full_path)\n', (3869, 3880), False, 'import os\n'), ((3942, 3968), 'os.path.dirname', 'os.path.dirname', (['full_path'], {}), '(full_path)\n', (3957, 3968), False, 'import os\n'), ((4386, 4411), 'os.path.exists', 'os.path.exists', (['full_path'], {}), '(full_path)\n', (4400, 4411), False, 'import os\n'), ((596, 607), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (605, 607), False, 'import os\n'), ((1490, 1509), 'os.mkdir', 'os.mkdir', (['orig_path'], {}), '(orig_path)\n', (1498, 1509), False, 'import os\n'), ((4171, 4193), 'json.dump', 'json.dump', (['metadata', 'f'], {}), '(metadata, f)\n', (4180, 4193), False, 'import json\n'), ((4436, 4473), 'os.path.exists', 'os.path.exists', (["('%s_meta' % full_path)"], {}), "('%s_meta' % full_path)\n", (4450, 4473), False, 'import os\n'), ((2430, 2442), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2439, 2442), False, 'import json\n'), ((4587, 4599), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4596, 4599), False, 'import json\n')] |
# coding: utf-8
"""
markdown2dita
~~~~~~~~~~~~~
A markdown to dita-ot conversion tool written in pure python.
Uses mistune to parse the markdown.
"""
from __future__ import print_function
import argparse
import sys
import mistune
__version__ = '0.3'
__author__ = '<NAME> <<EMAIL>>'
__all__ = ['Renderer', 'Markdown', 'markdown', 'escape']
class Renderer(mistune.Renderer):
def codespan(self, text):
return '<codeph>{0}</codeph>'.format(escape(text.rstrip()))
def link(self, link, title, content):
return '<xref href="{0}">{1}</xref>'.format(link, escape(content or title))
def block_code(self, code, language=None):
code = escape(code.rstrip('\n'))
if language:
return ('<codeblock outputclass="language-{0}">{1}</codeblock>'
.format(language, code))
else:
return '<codeblock>{0}</codeblock>'.format(code)
def block_quote(self, text):
return '<codeblock>{0}</codeblock>'.format(text)
def header(self, text, level, raw=None):
# Dita only supports one title per section
title_level = self.options.get('title_level', 2)
if level <= title_level:
return '</section><section><title>{0}</title>'.format(text)
else:
return '<p><b>{0}</b></p>'.format(text)
def double_emphasis(self, text):
return '<b>{0}</b>'.format(text)
def emphasis(self, text):
return '<i>{0}</i>'.format(text)
def hrule(self):
# Dita has no horizontal rule, ignore it
# could maybe divide sections?
return ''
def inline_html(self, text):
# Dita does not support inline html, just pass it through
return text
def list_item(self, text):
return '<li>{0}</li>'.format(text)
def list(self, body, ordered=True):
if ordered:
return '<ol>{0}</ol>'.format(body)
else:
return '<ul>{0}</ul>'.format(body)
def image(self, src, title, text):
# Derived from the mistune library source code
src = mistune.escape_link(src)
text = escape(text, quote=True)
if title:
title = escape(title, quote=True)
output = ('<fig><title>{0}</title>\n'
'<image href="{1}" alt="{2}"/></fig>'
.format(title, src, text))
else:
output = '<image href="{0}" alt="{1}"/>'.format(src, text)
return output
def table(self, header, body, cols):
col_string = ['<colspec colname="col{0}"/>'.format(x+1)
for x in range(cols)]
output_str = ('<table>\n<tgroup cols="{0}">\n{1}\n'
.format(cols, '\n'.join(col_string)))
return (output_str + '<thead>\n' + header + '</thead>\n<tbody>\n' +
body + '</tbody>\n</tgroup>\n</table>')
def table_row(self, content):
return '<row>\n{0}</row>\n'.format(content)
def table_cell(self, content, **flags):
align = flags['align']
if align:
return '<entry align="{0}">{1}</entry>\n'.format(align, content)
else:
return '<entry>{0}</entry>\n'.format(content)
def autolink(self, link, is_email=False):
text = link = escape(link)
if is_email:
link = 'mailto:{0}'.format(link)
return '<xref href="{0}">{1}</xref>'.format(link, text)
def footnote_ref(self, key, index):
return ''
def footnote_item(self, key, text):
return ''
def footnotes(self, text):
return ''
def strikethrough(self, text):
return text
class Markdown(mistune.Markdown):
def __init__(self, renderer=None, inline=None, block=None, **kwargs):
if not renderer:
renderer = Renderer(**kwargs)
else:
kwargs.update(renderer.options)
super(Markdown, self).__init__(
renderer=renderer, inline=inline, block=block)
def parse(self, text, page_id='enter-id-here',
title='Enter the page title here'):
output = super(Markdown, self).parse(text)
if output.startswith('</section>'):
output = output[9:]
else:
output = '<section>\n' + output
output = """<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE concept PUBLIC "-//OASIS//DTD DITA Concept//EN" "concept.dtd">
<concept xml:lang="en-us" id="{0}">
<title>{1}</title>
<shortdesc>Enter the short description for this page here</shortdesc>
<conbody>
{2}</section>
</conbody>
</concept>""".format(page_id, title, output)
return output
def output_table(self):
# Derived from the mistune library source code
aligns = self.token['align']
aligns_length = len(aligns)
cell = self.renderer.placeholder()
# header part
header = self.renderer.placeholder()
cols = len(self.token['header'])
for i, value in enumerate(self.token['header']):
align = aligns[i] if i < aligns_length else None
flags = {'header': True, 'align': align}
cell += self.renderer.table_cell(self.inline(value), **flags)
header += self.renderer.table_row(cell)
# body part
body = self.renderer.placeholder()
for i, row in enumerate(self.token['cells']):
cell = self.renderer.placeholder()
for j, value in enumerate(row):
align = aligns[j] if j < aligns_length else None
flags = {'header': False, 'align': align}
cell += self.renderer.table_cell(self.inline(value), **flags)
body += self.renderer.table_row(cell)
return self.renderer.table(header, body, cols)
def escape(text, quote=False, smart_amp=True):
return mistune.escape(text, quote=quote, smart_amp=smart_amp)
def _parse_args(args):
parser = argparse.ArgumentParser(description='markdown2dita - a markdown '
'to dita-ot CLI conversion tool.')
parser.add_argument('-i', '--input-file',
help='input markdown file to be converted.'
'If omitted, input is taken from stdin.')
parser.add_argument('-o', '--output-file',
help='output file for the converted dita content.'
'If omitted, output is sent to stdout.')
return parser.parse_args(args)
def markdown(text, escape=True, **kwargs):
return Markdown(escape=escape, **kwargs)(text)
def main():
parsed_args = _parse_args(sys.argv[1:])
if parsed_args.input_file:
input_str = open(parsed_args.input_file, 'r').read()
elif not sys.stdin.isatty():
input_str = ''.join(line for line in sys.stdin)
else:
print('No input file specified and unable to read input on stdin.\n'
"Use the '-h' or '--help' flag to see usage information",
file=sys.stderr)
exit(1)
markdown = Markdown()
dita_output = markdown(input_str)
if parsed_args.output_file:
with open(parsed_args.output_file, 'w') as output_file:
output_file.write(dita_output)
else:
print(dita_output)
if __name__ == '__main__':
main()
| [
"sys.stdin.isatty",
"mistune.escape_link",
"argparse.ArgumentParser",
"mistune.escape"
]
| [((5827, 5881), 'mistune.escape', 'mistune.escape', (['text'], {'quote': 'quote', 'smart_amp': 'smart_amp'}), '(text, quote=quote, smart_amp=smart_amp)\n', (5841, 5881), False, 'import mistune\n'), ((5920, 6022), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""markdown2dita - a markdown to dita-ot CLI conversion tool."""'}), "(description=\n 'markdown2dita - a markdown to dita-ot CLI conversion tool.')\n", (5943, 6022), False, 'import argparse\n'), ((2097, 2121), 'mistune.escape_link', 'mistune.escape_link', (['src'], {}), '(src)\n', (2116, 2121), False, 'import mistune\n'), ((6730, 6748), 'sys.stdin.isatty', 'sys.stdin.isatty', ([], {}), '()\n', (6746, 6748), False, 'import sys\n')] |
import logging
import re
from celery import shared_task
from django.conf import settings
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.template.loader import get_template
from django.urls import reverse
from django.utils import timezone
from architecture_tool_django.utils.confluence_wrapper import (
MyConfluence,
tiny_to_page_id,
)
from .models import Node
logger = logging.getLogger(__name__)
def get_node_attrs(instance):
attributes = {}
schema_properties = instance.nodetype.attribute_schema.schema["properties"]
for key, value in instance.attributeSet.items():
if key in schema_properties:
if "title" in schema_properties[key]:
attributes[schema_properties[key]["title"]] = value
else:
attributes[key] = value
attributes["Domain/Subsystem or Subdomain"] = ""
attributes["Service/Component Responsible"] = instance.attributeSet["name"]
attributes["Contact"] = ""
attributes["Service/Component Status"] = instance.attributeSet["status"]
return attributes
def get_outbound_edges(instance, base_url):
outbound_edges = {}
for edge in instance.outbound_edges.all():
edgetype = edge.edge_type.edgetype
if edgetype not in outbound_edges:
outbound_edges[edgetype] = []
url = base_url + reverse("nodes:node.detail", args=[edge.target.key])
name = edge.target.attributeSet.get("name")
item = f'(<a href="{url}">{edge.target.key}</a>) {name}'
outbound_edges[edgetype].append(item)
return outbound_edges
def get_inbound_edges(instance, base_url):
inbound_edges = {}
for edge in instance.inbound_edges.all():
edgetype = edge.edge_type.edgetype
if edgetype not in inbound_edges:
inbound_edges[edgetype] = []
url = base_url + reverse("nodes:node.detail", args=[edge.source.key])
name = edge.source.attributeSet.get("name")
item = f'(<a href="{url}">{edge.source.key}</a>) {name}'
inbound_edges[edgetype].append(item)
return inbound_edges
def update_confluence(title, context, doc_url):
new_spec = get_template("misc/confluence_page.html").render(context)
tiny = re.sub(r".*\/", "", doc_url)
page_id = tiny_to_page_id(tiny)
confluence = MyConfluence()
# page = confluence.get_page_by_id(page_id, expand="version,body.storage")
# version = int(re.sub(r".*\/", "", r.json()["version"]["_links"]["self"]))
confluence.update_page(
page_id,
title,
new_spec,
parent_id=None,
type="page",
representation="storage",
minor_edit=False,
)
def update_confluence_for_component(nodekey):
instance = get_object_or_404(Node, pk=nodekey)
doc_system = instance.attributeSet.get("primaryDocumentationSystem")
doc_url = instance.attributeSet.get("docupediaPage")
if doc_system != "ARC001" or doc_url == "":
return
base_url = settings.ARCHITECTURE_TOOL_URL
attributes = get_node_attrs(instance)
outbound_edges = get_outbound_edges(instance, base_url)
inbound_edges = get_inbound_edges(instance, base_url)
if "isDomainOf" in outbound_edges:
attributes["Domain/Subsystem or Subdomain"] = outbound_edges["isDomainOf"][0]
if "isResponsibleOf" in outbound_edges:
attributes["Service/Component Responsible"] = outbound_edges["isResponsibleOf"][
0
]
if "isContactOf" in outbound_edges:
attributes["Contact"] = ", ".join(outbound_edges["isContactOf"])
image_url = "https://www.xxx.com"
title = f'({instance.key}) {instance.attributeSet["name"]} ({instance.attributeSet["status"]})'
context = {
"base_url": base_url,
"node": instance,
"attributes": attributes,
"inbound_edges": inbound_edges,
"outbound_edges": outbound_edges,
"image_url": image_url,
}
update_confluence(title, context, doc_url)
@shared_task
def update_component_page_task(nodekey):
update_confluence_for_component(nodekey)
logger.info(f"Task: Page for {nodekey} updated!")
@shared_task
def update_components_page_task():
one_h_ago = timezone.now() - timezone.timedelta(hours=1)
nodes = Node.objects.filter(Q(nodetype="component") & Q(updated__gte=one_h_ago))
for node in nodes:
update_confluence_for_component(node.key)
logger.info("Task: All components updated!")
| [
"logging.getLogger",
"django.shortcuts.get_object_or_404",
"architecture_tool_django.utils.confluence_wrapper.MyConfluence",
"django.utils.timezone.now",
"django.utils.timezone.timedelta",
"django.urls.reverse",
"re.sub",
"django.db.models.Q",
"django.template.loader.get_template",
"architecture_tool_django.utils.confluence_wrapper.tiny_to_page_id"
]
| [((424, 451), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (441, 451), False, 'import logging\n'), ((2268, 2296), 're.sub', 're.sub', (['""".*\\\\/"""', '""""""', 'doc_url'], {}), "('.*\\\\/', '', doc_url)\n", (2274, 2296), False, 'import re\n'), ((2311, 2332), 'architecture_tool_django.utils.confluence_wrapper.tiny_to_page_id', 'tiny_to_page_id', (['tiny'], {}), '(tiny)\n', (2326, 2332), False, 'from architecture_tool_django.utils.confluence_wrapper import MyConfluence, tiny_to_page_id\n'), ((2350, 2364), 'architecture_tool_django.utils.confluence_wrapper.MyConfluence', 'MyConfluence', ([], {}), '()\n', (2362, 2364), False, 'from architecture_tool_django.utils.confluence_wrapper import MyConfluence, tiny_to_page_id\n'), ((2776, 2811), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Node'], {'pk': 'nodekey'}), '(Node, pk=nodekey)\n', (2793, 2811), False, 'from django.shortcuts import get_object_or_404\n'), ((4245, 4259), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (4257, 4259), False, 'from django.utils import timezone\n'), ((4262, 4289), 'django.utils.timezone.timedelta', 'timezone.timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (4280, 4289), False, 'from django.utils import timezone\n'), ((1384, 1436), 'django.urls.reverse', 'reverse', (['"""nodes:node.detail"""'], {'args': '[edge.target.key]'}), "('nodes:node.detail', args=[edge.target.key])\n", (1391, 1436), False, 'from django.urls import reverse\n'), ((1893, 1945), 'django.urls.reverse', 'reverse', (['"""nodes:node.detail"""'], {'args': '[edge.source.key]'}), "('nodes:node.detail', args=[edge.source.key])\n", (1900, 1945), False, 'from django.urls import reverse\n'), ((2199, 2240), 'django.template.loader.get_template', 'get_template', (['"""misc/confluence_page.html"""'], {}), "('misc/confluence_page.html')\n", (2211, 2240), False, 'from django.template.loader import get_template\n'), ((4322, 4345), 'django.db.models.Q', 'Q', ([], {'nodetype': '"""component"""'}), "(nodetype='component')\n", (4323, 4345), False, 'from django.db.models import Q\n'), ((4348, 4373), 'django.db.models.Q', 'Q', ([], {'updated__gte': 'one_h_ago'}), '(updated__gte=one_h_ago)\n', (4349, 4373), False, 'from django.db.models import Q\n')] |
from fastapi import FastAPI, Response, WebSocket, WebSocketDisconnect
from threading import Thread
from .server import Server
from .errors import HoistExistsError
from .error import Error
from .version import __version__
from .flask_wrapper import HTML
import uvicorn
from typing import List, Callable
from fastapi.responses import HTMLResponse, JSONResponse
class FastAPIWrapper:
"""Wrapper for FastAPI."""
@staticmethod
def make_server(*args, **kwargs) -> FastAPI:
"""Generate a FastAPI server."""
return FastAPI(*args, **kwargs)
def get_response(self, auth: str, tokens: List[str], callback: Callable, arg: str, response: Response) -> dict:
if not auth in tokens:
response.status_code = 401
return {'ERROR': 'unauthorized'}
resp, success = callback(arg)
if isinstance(resp, Error):
response.status_code = resp.code
return {'ERROR': resp.message}
if not success:
response.status_code = 500
return {'ERROR': resp}
else:
return {'RESPONSE': resp}
def add_hoist(self, app: FastAPI, handle_errors: bool = True, auth: list = [""], premade_pages: bool = True) -> FastAPI:
"""Function for setting up hoist on an app."""
if hasattr(app, 'HOIST_INTERNALSERVER'):
raise HoistExistsError('hoist is already set up on app')
app.HOIST_INTERNALSERVER = Server(app, handle_errors)
tokens: List[str] = auth.copy() # to stop collisions
app.HOIST_AUTH = tokens
app.HOIST_WRAPPER = self
@app.exception_handler(422)
def invalid_args(req, exc) -> JSONResponse:
print('a')
return JSONResponse({"ERROR": "Invalid arguments."}, status_code = 400)
@app.post('/hoist/send')
def http_send(msg: str, auth: str, response: Response) -> dict:
return self.get_response(auth, tokens, app.HOIST_INTERNALSERVER._received, msg, response)
if premade_pages:
@app.get('/hoist')
def home_get() -> str:
return HTMLResponse(
HTML.replace('{{ version }}', __version__)
)
@app.post('/hoist')
def hoist_post() -> str:
return {'RESPONSE': f'Version {__version__}'}
return app
@staticmethod
def run_server(app: FastAPI, ip: str, port: int) -> None:
"""Function for running a FastAPI server."""
uvicorn.run(app, host = ip, port = port)
def thread_server(self, app: FastAPI, ip: str, port: int) -> FastAPI:
"""Function for running a flask app with a thread."""
server: Thread = Thread(target = self.run_server, args = (app, ip, port))
server.start()
return app
def add_socket(self, app: FastAPI, route: str) -> None:
"""Function for adding a socket to a FastAPI server."""
@app.websocket(route)
async def ws(websocket: WebSocket, response: Response):
sock = app.HOIST_SOCKETS[route]
for i in sock.connect:
i()
await websocket.accept()
while True:
try:
data = await websocket.receive_text()
resp = self.get_response("", app.HOIST_AUTH, sock._received, data, response)
await websocket.send_json(resp)
except WebSocketDisconnect:
for i in sock.disconnect:
i()
break
| [
"threading.Thread",
"fastapi.responses.JSONResponse",
"fastapi.FastAPI",
"uvicorn.run"
]
| [((536, 560), 'fastapi.FastAPI', 'FastAPI', (['*args'], {}), '(*args, **kwargs)\n', (543, 560), False, 'from fastapi import FastAPI, Response, WebSocket, WebSocketDisconnect\n'), ((2568, 2604), 'uvicorn.run', 'uvicorn.run', (['app'], {'host': 'ip', 'port': 'port'}), '(app, host=ip, port=port)\n', (2579, 2604), False, 'import uvicorn\n'), ((2771, 2823), 'threading.Thread', 'Thread', ([], {'target': 'self.run_server', 'args': '(app, ip, port)'}), '(target=self.run_server, args=(app, ip, port))\n', (2777, 2823), False, 'from threading import Thread\n'), ((1752, 1814), 'fastapi.responses.JSONResponse', 'JSONResponse', (["{'ERROR': 'Invalid arguments.'}"], {'status_code': '(400)'}), "({'ERROR': 'Invalid arguments.'}, status_code=400)\n", (1764, 1814), False, 'from fastapi.responses import HTMLResponse, JSONResponse\n')] |
# This file is executed on every boot (including wake-boot from deepsleep)
# 2017-1210 PePo send timestamp and temperature (Celsius) to MQTT-server on BBB
# 2017-1105 PePo add _isLocal: sensor data to serial port (False) of stored in file (True)
# 2017-0819 PePo add sensor, led and print to serial port
# 2017-0811 PePo updated: no debug, disable webrepl,
# source: https://youtu.be/yGKZOwzGePY - Tony D! MP ESP8266 HTTP examples
print('main.py executing...')
# connect to a personal Wifi network ---------
import wifinetwork as wifi
# TODO: JSON config-file with ssid:ww entry/entries
#wifi.connectTo("PePoDevNet", wifi.readPasswordFrom('pepodevnet.txt'))
print('Wifi: connect to PePoDevNet...')
wifi.connectTo("PePoDevNet")
# set the time from nptime ---------
#print('TODO: get current time from the web...')
print('getting time from the web...')
import nptime
print('... UTC time:', nptime.settime())
#print('\tTODO -local time')
# --- SUMMERTIME or not (=WINTERTIME) ---------------
_isSummerTime = False
print('... Summertime:', _isSummerTime)
# temperature ---------
import class_ds18b20
#get sensor at GPIO14
ds = class_ds18b20.DS18B20(14)
# --- location ---------------
_LOCATION = 'studyroom'
#7-segment display
import tm1637
from machine import Pin
import math
# create tm
tm = tm1637.TM1637(clk=Pin(5), dio=Pin(4))
#print('tm: ', tm)
def display_tm1637(t):
#debug: print('display: temp=', t)
tm.temperature( math.floor(t) )
# helper function: returns temperature-record as string
def temp_record(timestamp, temp):
# timestamp[3] correction for Summertime or not
def _tc(t):
correction = 1
if _isSummerTime:
correction = 2
return t + correction
data = '{0},{1},{2},{3},{4},{5},{6},{7:0.2f}\n'.format(_LOCATION, timestamp[0],timestamp[1],timestamp[2],_tc(timestamp[3]),timestamp[4],timestamp[5],temp)
return data
#''' store data in file temperature.txt
# default: 1 measuremtn per 30 seconds
def saveT2File(dt=30.0):
import time
import utime
print('saveT2File({0}) entered...'.format(dt))
# helper function to add sensor data record to file
def write_record(timestamp, temp):
f = open('temperature.txt', 'a') #append mode
#data = '{0},{1},{2},{3},{4},{5},{6},{7:0.2f}\n'.format(_LOCATION, timestamp[0],timestamp[1],timestamp[2],_tc(timestamp[3]),timestamp[4],timestamp[5],temp)
f.write( temp_record(timestamp, temp) )
f.close()
while True:
#FUTURE: led.on()
timestamp = utime.localtime()
temp = ds.celsius
display_tm1637(temp) #display
write_record(timestamp, temp) #write in file
#FUTURE: led.off()
time.sleep(dt)
# send data to MQTT-server
def send2Server(dt=30.0):
import time
import utime
from umqtt.simple import MQTTClient
#print('send2server({0}) entered...'.format(dt))
#MQTT configuration -----------------
mqtt_server = '192.168.178.40' #ip-address of MQTT-server
TOPIC_TEST = b'topic/test' # topic: debug message
TOPIC_VALUE = b'topic/value' # topic: temperature value
TOPIC = b'topic/temperature' # topic: temp-record
#helper: sends data to MTQQ-server: connect-send payload-disconnet
def sendMQTT(payload, topic=TOPIC, server= mqtt_server):
#print('sendMQTT():', payload)
c = MQTTClient("umqtt_client", server)
c.connect() #success: returns 0
#debug: conn = c.connect()
#print('MQTT connection:', conn)
c.publish(topic, payload)
c.disconnect()
#broadcasting via topic:test
payload = b'MQTT-server: {0},\nTOPIC: {1},\nCollecting temperatures...'.format(mqtt_server, TOPIC) #debug
sendMQTT(payload, TOPIC_TEST)
print(payload)
while True:
timestamp = utime.localtime()
temp = ds.celsius
#print('temperature on display')
display_tm1637(temp)
#print('broadcast temp-record')
payload = temp_record(timestamp, temp)
sendMQTT(payload)
#print('broadcast temp-value')
payload = b'{0}'.format(temp)
sendMQTT(payload, TOPIC_VALUE)
time.sleep(dt)
#main run() - by-default 1 measurement per 30 seconds
def run(dt=30.0):
#store data local (True) or send to server (False)
_isLocal = False;
try:
if _isLocal:
# watch out: file can be very large overtime
saveT2File(dt)
else:
send2Server(dt)
except:
print('collecting temperature data intercepted')
pass
# go ahead and start getting, sending/storing the sensor data
if __name__ == "__main__":
run(60.0) # 1 measurement per minute
| [
"wifinetwork.connectTo",
"utime.localtime",
"math.floor",
"class_ds18b20.DS18B20",
"umqtt.simple.MQTTClient",
"machine.Pin",
"time.sleep",
"nptime.settime"
]
| [((714, 742), 'wifinetwork.connectTo', 'wifi.connectTo', (['"""PePoDevNet"""'], {}), "('PePoDevNet')\n", (728, 742), True, 'import wifinetwork as wifi\n'), ((1157, 1182), 'class_ds18b20.DS18B20', 'class_ds18b20.DS18B20', (['(14)'], {}), '(14)\n', (1178, 1182), False, 'import class_ds18b20\n'), ((911, 927), 'nptime.settime', 'nptime.settime', ([], {}), '()\n', (925, 927), False, 'import nptime\n'), ((1354, 1360), 'machine.Pin', 'Pin', (['(5)'], {}), '(5)\n', (1357, 1360), False, 'from machine import Pin\n'), ((1366, 1372), 'machine.Pin', 'Pin', (['(4)'], {}), '(4)\n', (1369, 1372), False, 'from machine import Pin\n'), ((1481, 1494), 'math.floor', 'math.floor', (['t'], {}), '(t)\n', (1491, 1494), False, 'import math\n'), ((2610, 2627), 'utime.localtime', 'utime.localtime', ([], {}), '()\n', (2625, 2627), False, 'import utime\n'), ((2785, 2799), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (2795, 2799), False, 'import time\n'), ((3463, 3497), 'umqtt.simple.MQTTClient', 'MQTTClient', (['"""umqtt_client"""', 'server'], {}), "('umqtt_client', server)\n", (3473, 3497), False, 'from umqtt.simple import MQTTClient\n'), ((3923, 3940), 'utime.localtime', 'utime.localtime', ([], {}), '()\n', (3938, 3940), False, 'import utime\n'), ((4317, 4331), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (4327, 4331), False, 'import time\n')] |
#!/usr/bin/env python
#
# This file is part of the Emotions project. The complete source code is
# available at https://github.com/luigivieira/emotions.
#
# Copyright (c) 2016-2017, <NAME> (http://www.luiz.vieira.nom.br)
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import argparse
import cv2
import numpy as np
from collections import OrderedDict
from datetime import datetime, timedelta
from faces import FaceDetector
from data import FaceData
from gabor import GaborBank
from emotions import EmotionsDetector
#---------------------------------------------
class VideoData:
"""
Helper class to present the detected face region, landmarks and emotions.
"""
#-----------------------------------------
def __init__(self):
"""
Class constructor.
"""
self._faceDet = FaceDetector()
'''
The instance of the face detector.
'''
self._bank = GaborBank()
'''
The instance of the bank of Gabor filters.
'''
self._emotionsDet = EmotionsDetector()
'''
The instance of the emotions detector.
'''
self._face = FaceData()
'''
Data of the last face detected.
'''
self._emotions = OrderedDict()
'''
Data of the last emotions detected.
'''
#-----------------------------------------
def detect(self, frame):
"""
Detects a face and the prototypic emotions on the given frame image.
Parameters
----------
frame: numpy.ndarray
Image where to perform the detections from.
Returns
-------
ret: bool
Indication of success or failure.
"""
ret, face = self._faceDet.detect(frame)
if ret:
self._face = face
# Crop just the face region
frame, face = face.crop(frame)
# Filter it with the Gabor bank
responses = self._bank.filter(frame)
# Detect the prototypic emotions based on the filter responses
self._emotions = self._emotionsDet.detect(face, responses)
return True
else:
self._face = None
return False
#-----------------------------------------
def draw(self, frame):
"""
Draws the detected data of the given frame image.
Parameters
----------
frame: numpy.ndarray
Image where to draw the information to.
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.5
thick = 1
glow = 3 * thick
# Color settings
black = (0, 0, 0)
white = (255, 255, 255)
yellow = (0, 255, 255)
red = (0, 0, 255)
empty = True
# Plot the face landmarks and face distance
x = 5
y = 0
w = int(frame.shape[1]* 0.2)
try:
face = self._face
empty = face.isEmpty()
face.draw(frame)
except:
pass
# Plot the emotion probabilities
try:
emotions = self._emotions
if empty:
labels = []
values = []
else:
labels = list(emotions.keys())
values = list(emotions.values())
bigger = labels[values.index(max(values))]
# Draw the header
text = 'emotions'
size, _ = cv2.getTextSize(text, font, scale, thick)
y += size[1] + 20
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
y += 5
cv2.line(frame, (x,y), (x+w,y), black, 1)
size, _ = cv2.getTextSize('happiness', font, scale, thick)
t = size[0] + 20
w = 150
h = size[1]
for l, v in zip(labels, values):
lab = '{}:'.format(l)
val = '{:.2f}'.format(v)
size, _ = cv2.getTextSize(l, font, scale, thick)
# Set a red color for the emotion with bigger probability
color = red if l == bigger else yellow
y += size[1] + 15
p1 = (x+t, y-size[1]-5)
p2 = (x+t+w, y-size[1]+h+5)
cv2.rectangle(frame, p1, p2, black, 1)
# Draw the filled rectangle proportional to the probability
p2 = (p1[0] + int((p2[0] - p1[0]) * v), p2[1])
cv2.rectangle(frame, p1, p2, color, -1)
cv2.rectangle(frame, p1, p2, black, 1)
# Draw the emotion label
cv2.putText(frame, lab, (x, y), font, scale, black, glow)
cv2.putText(frame, lab, (x, y), font, scale, color, thick)
# Draw the value of the emotion probability
cv2.putText(frame, val, (x+t+5, y), font, scale, black, glow)
cv2.putText(frame, val, (x+t+5, y), font, scale, white, thick)
except Exception as e:
print(e)
pass
#---------------------------------------------
def main(argv):
"""
Main entry of this script.
Parameters
------
argv: list of str
Arguments received from the command line.
"""
# Parse the command line
args = parseCommandLine(argv)
# Loads the video or starts the webcam
if args.source == 'cam':
video = cv2.VideoCapture(args.id)
if not video.isOpened():
print('Error opening webcam of id {}'.format(args.id))
sys.exit(-1)
fps = 0
frameCount = 0
sourceName = 'Webcam #{}'.format(args.id)
else:
video = cv2.VideoCapture(args.file)
if not video.isOpened():
print('Error opening video file {}'.format(args.file))
sys.exit(-1)
fps = int(video.get(cv2.CAP_PROP_FPS))
frameCount = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
sourceName = args.file
# Force HD resolution (if the video was not recorded in this resolution or
# if the camera does not support it, the frames will be stretched to fit it)
# The intention is just to standardize the input (and make the help window
# work as intended)
video.set(cv2.CAP_PROP_FRAME_WIDTH, 1280);
video.set(cv2.CAP_PROP_FRAME_HEIGHT, 720);
# Create the helper class
data = VideoData()
# Text settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 1
thick = 1
glow = 3 * thick
# Color settings
color = (255, 255, 255)
paused = False
frameNum = 0
# Process the video input
while True:
if not paused:
start = datetime.now()
ret, img = video.read()
if ret:
frame = img.copy()
else:
paused = True
drawInfo(frame, frameNum, frameCount, paused, fps, args.source)
data.detect(frame)
data.draw(frame)
cv2.imshow(sourceName, frame)
if paused:
key = cv2.waitKey(0)
else:
end = datetime.now()
delta = (end - start)
if fps != 0:
delay = int(max(1, ((1 / fps) - delta.total_seconds()) * 1000))
else:
delay = 1
key = cv2.waitKey(delay)
if key == ord('q') or key == ord('Q') or key == 27:
break
elif key == ord('p') or key == ord('P'):
paused = not paused
elif args.source == 'video' and (key == ord('r') or key == ord('R')):
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif args.source == 'video' and paused and key == 2424832: # Left key
frameNum -= 1
if frameNum < 0:
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif args.source == 'video' and paused and key == 2555904: # Right key
frameNum += 1
if frameNum >= frameCount:
frameNum = frameCount - 1
elif args.source == 'video' and key == 2162688: # Pageup key
frameNum -= (fps * 10)
if frameNum < 0:
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif args.source == 'video' and key == 2228224: # Pagedown key
frameNum += (fps * 10)
if frameNum >= frameCount:
frameNum = frameCount - 1
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif key == 7340032: # F1
showHelp(sourceName, frame.shape)
if not paused:
frameNum += 1
video.release()
cv2.destroyAllWindows()
#---------------------------------------------
def drawInfo(frame, frameNum, frameCount, paused, fps, source):
"""
Draws text info related to the given frame number into the frame image.
Parameters
----------
image: numpy.ndarray
Image data where to draw the text info.
frameNum: int
Number of the frame of which to drawn the text info.
frameCount: int
Number total of frames in the video.
paused: bool
Indication if the video is paused or not.
fps: int
Frame rate (in frames per second) of the video for time calculation.
source: str
Source of the input images (either "video" or "cam").
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.5
thick = 1
glow = 3 * thick
# Color settings
black = (0, 0, 0)
yellow = (0, 255, 255)
# Print the current frame number and timestamp
if source == 'video':
text = 'Frame: {:d}/{:d} {}'.format(frameNum, frameCount - 1,
'(paused)' if paused else '')
else:
text = 'Frame: {:d} {}'.format(frameNum, '(paused)' if paused else '')
size, _ = cv2.getTextSize(text, font, scale, thick)
x = 5
y = frame.shape[0] - 2 * size[1]
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
if source == 'video':
timestamp = datetime.min + timedelta(seconds=(frameNum / fps))
elapsedTime = datetime.strftime(timestamp, '%H:%M:%S')
timestamp = datetime.min + timedelta(seconds=(frameCount / fps))
totalTime = datetime.strftime(timestamp, '%H:%M:%S')
text = 'Time: {}/{}'.format(elapsedTime, totalTime)
size, _ = cv2.getTextSize(text, font, scale, thick)
y = frame.shape[0] - 5
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
# Print the help message
text = 'Press F1 for help'
size, _ = cv2.getTextSize(text, font, scale, thick)
x = frame.shape[1] - size[0] - 5
y = frame.shape[0] - size[1] + 5
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
#---------------------------------------------
def showHelp(windowTitle, shape):
"""
Displays an image with helping text.
Parameters
----------
windowTitle: str
Title of the window where to display the help
shape: tuple
Height and width of the window to create the help image.
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 1.0
thick = 1
# Color settings
black = (0, 0, 0)
red = (0, 0, 255)
# Create the background image
image = np.ones((shape[0], shape[1], 3)) * 255
# The help text is printed in one line per item in this list
helpText = [
'Controls:',
'-----------------------------------------------',
'[q] or [ESC]: quits from the application.',
'[p]: toggles paused/playing the video/webcam input.',
'[r]: restarts the video playback (video input only).',
'[left/right arrow]: displays the previous/next frame (video input only).',
'[page-up/down]: rewinds/fast forwards by 10 seconds (video input only).',
' ',
' ',
'Press any key to close this window...'
]
# Print the controls help text
xCenter = image.shape[1] // 2
yCenter = image.shape[0] // 2
margin = 20 # between-lines margin in pixels
textWidth = 0
textHeight = margin * (len(helpText) - 1)
lineHeight = 0
for line in helpText:
size, _ = cv2.getTextSize(line, font, scale, thick)
textHeight += size[1]
textWidth = size[0] if size[0] > textWidth else textWidth
lineHeight = size[1] if size[1] > lineHeight else lineHeight
x = xCenter - textWidth // 2
y = yCenter - textHeight // 2
for line in helpText:
cv2.putText(image, line, (x, y), font, scale, black, thick * 3)
cv2.putText(image, line, (x, y), font, scale, red, thick)
y += margin + lineHeight
# Show the image and wait for a key press
cv2.imshow(windowTitle, image)
cv2.waitKey(0)
#---------------------------------------------
def parseCommandLine(argv):
"""
Parse the command line of this utility application.
This function uses the argparse package to handle the command line
arguments. In case of command line errors, the application will be
automatically terminated.
Parameters
------
argv: list of str
Arguments received from the command line.
Returns
------
object
Object with the parsed arguments as attributes (refer to the
documentation of the argparse package for details)
"""
parser = argparse.ArgumentParser(description='Tests the face and emotion '
'detector on a video file input.')
parser.add_argument('source', nargs='?', const='Yes',
choices=['video', 'cam'], default='cam',
help='Indicate the source of the input images for '
'the detectors: "video" for a video file or '
'"cam" for a webcam. The default is "cam".')
parser.add_argument('-f', '--file', metavar='<name>',
help='Name of the video file to use, if the source is '
'"video". The supported formats depend on the codecs '
'installed in the operating system.')
parser.add_argument('-i', '--id', metavar='<number>', default=0, type=int,
help='Numerical id of the webcam to use, if the source '
'is "cam". The default is 0.')
args = parser.parse_args()
if args.source == 'video' and args.file is None:
parser.error('-f is required when source is "video"')
return args
#---------------------------------------------
# namespace verification for invoking main
#---------------------------------------------
if __name__ == '__main__':
main(sys.argv[1:]) | [
"gabor.GaborBank",
"cv2.rectangle",
"cv2.imshow",
"cv2.destroyAllWindows",
"sys.exit",
"datetime.timedelta",
"faces.FaceDetector",
"argparse.ArgumentParser",
"cv2.line",
"data.FaceData",
"cv2.waitKey",
"collections.OrderedDict",
"numpy.ones",
"cv2.putText",
"cv2.getTextSize",
"datetime.datetime.now",
"cv2.VideoCapture",
"emotions.EmotionsDetector",
"datetime.datetime.strftime"
]
| [((9815, 9838), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (9836, 9838), False, 'import cv2\n'), ((11030, 11071), 'cv2.getTextSize', 'cv2.getTextSize', (['text', 'font', 'scale', 'thick'], {}), '(text, font, scale, thick)\n', (11045, 11071), False, 'import cv2\n'), ((11123, 11181), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y)', 'font', 'scale', 'black', 'glow'], {}), '(frame, text, (x, y), font, scale, black, glow)\n', (11134, 11181), False, 'import cv2\n'), ((11186, 11246), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y)', 'font', 'scale', 'yellow', 'thick'], {}), '(frame, text, (x, y), font, scale, yellow, thick)\n', (11197, 11246), False, 'import cv2\n'), ((11905, 11946), 'cv2.getTextSize', 'cv2.getTextSize', (['text', 'font', 'scale', 'thick'], {}), '(text, font, scale, thick)\n', (11920, 11946), False, 'import cv2\n'), ((12025, 12083), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y)', 'font', 'scale', 'black', 'glow'], {}), '(frame, text, (x, y), font, scale, black, glow)\n', (12036, 12083), False, 'import cv2\n'), ((12088, 12148), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y)', 'font', 'scale', 'yellow', 'thick'], {}), '(frame, text, (x, y), font, scale, yellow, thick)\n', (12099, 12148), False, 'import cv2\n'), ((14070, 14100), 'cv2.imshow', 'cv2.imshow', (['windowTitle', 'image'], {}), '(windowTitle, image)\n', (14080, 14100), False, 'import cv2\n'), ((14105, 14119), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (14116, 14119), False, 'import cv2\n'), ((14717, 14819), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Tests the face and emotion detector on a video file input."""'}), "(description=\n 'Tests the face and emotion detector on a video file input.')\n", (14740, 14819), False, 'import argparse\n'), ((1860, 1874), 'faces.FaceDetector', 'FaceDetector', ([], {}), '()\n', (1872, 1874), False, 'from faces import FaceDetector\n'), ((1964, 1975), 'gabor.GaborBank', 'GaborBank', ([], {}), '()\n', (1973, 1975), False, 'from gabor import GaborBank\n'), ((2080, 2098), 'emotions.EmotionsDetector', 'EmotionsDetector', ([], {}), '()\n', (2096, 2098), False, 'from emotions import EmotionsDetector\n'), ((2192, 2202), 'data.FaceData', 'FaceData', ([], {}), '()\n', (2200, 2202), False, 'from data import FaceData\n'), ((2293, 2306), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2304, 2306), False, 'from collections import OrderedDict\n'), ((6594, 6619), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.id'], {}), '(args.id)\n', (6610, 6619), False, 'import cv2\n'), ((6861, 6888), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.file'], {}), '(args.file)\n', (6877, 6888), False, 'import cv2\n'), ((8121, 8150), 'cv2.imshow', 'cv2.imshow', (['sourceName', 'frame'], {}), '(sourceName, frame)\n', (8131, 8150), False, 'import cv2\n'), ((11367, 11407), 'datetime.datetime.strftime', 'datetime.strftime', (['timestamp', '"""%H:%M:%S"""'], {}), "(timestamp, '%H:%M:%S')\n", (11384, 11407), False, 'from datetime import datetime, timedelta\n'), ((11501, 11541), 'datetime.datetime.strftime', 'datetime.strftime', (['timestamp', '"""%H:%M:%S"""'], {}), "(timestamp, '%H:%M:%S')\n", (11518, 11541), False, 'from datetime import datetime, timedelta\n'), ((11621, 11662), 'cv2.getTextSize', 'cv2.getTextSize', (['text', 'font', 'scale', 'thick'], {}), '(text, font, scale, thick)\n', (11636, 11662), False, 'import cv2\n'), ((11702, 11760), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y)', 'font', 'scale', 'black', 'glow'], {}), '(frame, text, (x, y), font, scale, black, glow)\n', (11713, 11760), False, 'import cv2\n'), ((11769, 11829), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y)', 'font', 'scale', 'yellow', 'thick'], {}), '(frame, text, (x, y), font, scale, yellow, thick)\n', (11780, 11829), False, 'import cv2\n'), ((12676, 12708), 'numpy.ones', 'np.ones', (['(shape[0], shape[1], 3)'], {}), '((shape[0], shape[1], 3))\n', (12683, 12708), True, 'import numpy as np\n'), ((13546, 13587), 'cv2.getTextSize', 'cv2.getTextSize', (['line', 'font', 'scale', 'thick'], {}), '(line, font, scale, thick)\n', (13561, 13587), False, 'import cv2\n'), ((13856, 13919), 'cv2.putText', 'cv2.putText', (['image', 'line', '(x, y)', 'font', 'scale', 'black', '(thick * 3)'], {}), '(image, line, (x, y), font, scale, black, thick * 3)\n', (13867, 13919), False, 'import cv2\n'), ((13928, 13985), 'cv2.putText', 'cv2.putText', (['image', 'line', '(x, y)', 'font', 'scale', 'red', 'thick'], {}), '(image, line, (x, y), font, scale, red, thick)\n', (13939, 13985), False, 'import cv2\n'), ((4886, 4934), 'cv2.getTextSize', 'cv2.getTextSize', (['"""happiness"""', 'font', 'scale', 'thick'], {}), "('happiness', font, scale, thick)\n", (4901, 4934), False, 'import cv2\n'), ((6732, 6744), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (6740, 6744), False, 'import sys\n'), ((7001, 7013), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (7009, 7013), False, 'import sys\n'), ((7851, 7865), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7863, 7865), False, 'from datetime import datetime, timedelta\n'), ((8189, 8203), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (8200, 8203), False, 'import cv2\n'), ((8236, 8250), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8248, 8250), False, 'from datetime import datetime, timedelta\n'), ((8453, 8471), 'cv2.waitKey', 'cv2.waitKey', (['delay'], {}), '(delay)\n', (8464, 8471), False, 'import cv2\n'), ((11309, 11342), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(frameNum / fps)'}), '(seconds=frameNum / fps)\n', (11318, 11342), False, 'from datetime import datetime, timedelta\n'), ((11443, 11478), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(frameCount / fps)'}), '(seconds=frameCount / fps)\n', (11452, 11478), False, 'from datetime import datetime, timedelta\n'), ((4552, 4593), 'cv2.getTextSize', 'cv2.getTextSize', (['text', 'font', 'scale', 'thick'], {}), '(text, font, scale, thick)\n', (4567, 4593), False, 'import cv2\n'), ((4645, 4703), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y)', 'font', 'scale', 'black', 'glow'], {}), '(frame, text, (x, y), font, scale, black, glow)\n', (4656, 4703), False, 'import cv2\n'), ((4720, 4780), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y)', 'font', 'scale', 'yellow', 'thick'], {}), '(frame, text, (x, y), font, scale, yellow, thick)\n', (4731, 4780), False, 'import cv2\n'), ((4821, 4866), 'cv2.line', 'cv2.line', (['frame', '(x, y)', '(x + w, y)', 'black', '(1)'], {}), '(frame, (x, y), (x + w, y), black, 1)\n', (4829, 4866), False, 'import cv2\n'), ((5158, 5196), 'cv2.getTextSize', 'cv2.getTextSize', (['l', 'font', 'scale', 'thick'], {}), '(l, font, scale, thick)\n', (5173, 5196), False, 'import cv2\n'), ((5463, 5501), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'p1', 'p2', 'black', '(1)'], {}), '(frame, p1, p2, black, 1)\n', (5476, 5501), False, 'import cv2\n'), ((5658, 5697), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'p1', 'p2', 'color', '(-1)'], {}), '(frame, p1, p2, color, -1)\n', (5671, 5697), False, 'import cv2\n'), ((5714, 5752), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'p1', 'p2', 'black', '(1)'], {}), '(frame, p1, p2, black, 1)\n', (5727, 5752), False, 'import cv2\n'), ((5811, 5868), 'cv2.putText', 'cv2.putText', (['frame', 'lab', '(x, y)', 'font', 'scale', 'black', 'glow'], {}), '(frame, lab, (x, y), font, scale, black, glow)\n', (5822, 5868), False, 'import cv2\n'), ((5885, 5943), 'cv2.putText', 'cv2.putText', (['frame', 'lab', '(x, y)', 'font', 'scale', 'color', 'thick'], {}), '(frame, lab, (x, y), font, scale, color, thick)\n', (5896, 5943), False, 'import cv2\n'), ((6021, 6086), 'cv2.putText', 'cv2.putText', (['frame', 'val', '(x + t + 5, y)', 'font', 'scale', 'black', 'glow'], {}), '(frame, val, (x + t + 5, y), font, scale, black, glow)\n', (6032, 6086), False, 'import cv2\n'), ((6099, 6165), 'cv2.putText', 'cv2.putText', (['frame', 'val', '(x + t + 5, y)', 'font', 'scale', 'white', 'thick'], {}), '(frame, val, (x + t + 5, y), font, scale, white, thick)\n', (6110, 6165), False, 'import cv2\n')] |
"""
Holds global celery application state and startup / shutdown handlers.
"""
from celery import Celery
from celery.app import app_or_default
from celery.signals import (
beat_init,
worker_process_init,
worker_process_shutdown,
setup_logging,
)
from ichnaea.log import configure_logging
from ichnaea.taskapp.config import (
configure_celery,
init_beat,
init_worker,
shutdown_worker,
)
@setup_logging.connect
def setup_logging_process(loglevel, logfile, format, colorize, **kwargs):
"""Called at scheduler and worker setup.
Configures logging using the same configuration as the webapp.
"""
configure_logging()
@beat_init.connect
def init_beat_process(signal, sender, **kw):
"""
Called automatically when `celery beat` is started.
Calls :func:`ichnaea.taskapp.config.init_beat`.
"""
celery_app = app_or_default()
init_beat(sender, celery_app)
@worker_process_init.connect
def init_worker_process(signal, sender, **kw):
"""
Called automatically when `celery worker` is started. This is executed
inside each forked worker process.
Calls :func:`ichnaea.taskapp.config.init_worker`.
"""
# get the app in the current worker process
celery_app = app_or_default()
init_worker(celery_app)
@worker_process_shutdown.connect
def shutdown_worker_process(signal, sender, **kw):
"""
Called automatically when `celery worker` is stopped. This is executed
inside each forked worker process.
Calls :func:`ichnaea.taskapp.config.shutdown_worker`.
"""
celery_app = app_or_default()
shutdown_worker(celery_app)
celery_app = Celery("ichnaea.taskapp.app")
configure_celery(celery_app)
| [
"ichnaea.taskapp.config.init_beat",
"celery.app.app_or_default",
"ichnaea.taskapp.config.configure_celery",
"ichnaea.log.configure_logging",
"celery.Celery",
"ichnaea.taskapp.config.init_worker",
"ichnaea.taskapp.config.shutdown_worker"
]
| [((1651, 1680), 'celery.Celery', 'Celery', (['"""ichnaea.taskapp.app"""'], {}), "('ichnaea.taskapp.app')\n", (1657, 1680), False, 'from celery import Celery\n'), ((1682, 1710), 'ichnaea.taskapp.config.configure_celery', 'configure_celery', (['celery_app'], {}), '(celery_app)\n', (1698, 1710), False, 'from ichnaea.taskapp.config import configure_celery, init_beat, init_worker, shutdown_worker\n'), ((643, 662), 'ichnaea.log.configure_logging', 'configure_logging', ([], {}), '()\n', (660, 662), False, 'from ichnaea.log import configure_logging\n'), ((871, 887), 'celery.app.app_or_default', 'app_or_default', ([], {}), '()\n', (885, 887), False, 'from celery.app import app_or_default\n'), ((892, 921), 'ichnaea.taskapp.config.init_beat', 'init_beat', (['sender', 'celery_app'], {}), '(sender, celery_app)\n', (901, 921), False, 'from ichnaea.taskapp.config import configure_celery, init_beat, init_worker, shutdown_worker\n'), ((1250, 1266), 'celery.app.app_or_default', 'app_or_default', ([], {}), '()\n', (1264, 1266), False, 'from celery.app import app_or_default\n'), ((1271, 1294), 'ichnaea.taskapp.config.init_worker', 'init_worker', (['celery_app'], {}), '(celery_app)\n', (1282, 1294), False, 'from ichnaea.taskapp.config import configure_celery, init_beat, init_worker, shutdown_worker\n'), ((1587, 1603), 'celery.app.app_or_default', 'app_or_default', ([], {}), '()\n', (1601, 1603), False, 'from celery.app import app_or_default\n'), ((1608, 1635), 'ichnaea.taskapp.config.shutdown_worker', 'shutdown_worker', (['celery_app'], {}), '(celery_app)\n', (1623, 1635), False, 'from ichnaea.taskapp.config import configure_celery, init_beat, init_worker, shutdown_worker\n')] |
import asyncio
from typing import Tuple
import heapq
class BasePersistentStorage(object):
async def push_message(self, mid, raw_package):
raise NotImplementedError
def push_message_nowait(self, mid, raw_package) -> asyncio.Future:
try:
asyncio.get_event_loop()
except RuntimeError as err:
if "There is no current event loop in thread" in str(err):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return asyncio.ensure_future(self.push_message(mid, raw_package))
async def pop_message(self) -> Tuple[int, bytes]:
raise NotImplementedError
async def remove_message_by_mid(self, mid):
raise NotImplementedError
@property
async def is_empty(self) -> bool:
raise NotImplementedError
class HeapPersistentStorage(BasePersistentStorage):
def __init__(self, timeout):
self._queue = []
self._timeout = timeout
async def push_message(self, mid, raw_package):
tm = asyncio.get_event_loop().time()
heapq.heappush(self._queue, (tm, mid, raw_package))
async def pop_message(self):
current_time = asyncio.get_event_loop().time()
(tm, mid, raw_package) = heapq.heappop(self._queue)
if current_time - tm > self._timeout:
return mid, raw_package
else:
heapq.heappush(self._queue, (tm, mid, raw_package))
return None
async def remove_message_by_mid(self, mid):
message = next(filter(lambda x: x[1] == mid, self._queue), None)
if message:
self._queue.remove(message)
heapq.heapify(self._queue)
@property
async def is_empty(self):
return not bool(self._queue)
| [
"asyncio.new_event_loop",
"heapq.heappop",
"asyncio.set_event_loop",
"heapq.heapify",
"heapq.heappush",
"asyncio.get_event_loop"
]
| [((1088, 1139), 'heapq.heappush', 'heapq.heappush', (['self._queue', '(tm, mid, raw_package)'], {}), '(self._queue, (tm, mid, raw_package))\n', (1102, 1139), False, 'import heapq\n'), ((1263, 1289), 'heapq.heappop', 'heapq.heappop', (['self._queue'], {}), '(self._queue)\n', (1276, 1289), False, 'import heapq\n'), ((1662, 1688), 'heapq.heapify', 'heapq.heapify', (['self._queue'], {}), '(self._queue)\n', (1675, 1688), False, 'import heapq\n'), ((276, 300), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (298, 300), False, 'import asyncio\n'), ((1399, 1450), 'heapq.heappush', 'heapq.heappush', (['self._queue', '(tm, mid, raw_package)'], {}), '(self._queue, (tm, mid, raw_package))\n', (1413, 1450), False, 'import heapq\n'), ((1048, 1072), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1070, 1072), False, 'import asyncio\n'), ((1197, 1221), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1219, 1221), False, 'import asyncio\n'), ((432, 456), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (454, 456), False, 'import asyncio\n'), ((473, 501), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (495, 501), False, 'import asyncio\n')] |
# Copyright 2017 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib
import logging
import os
import sys
import threading
from collections import Iterable
import pexpect
import yaml
from yaml.error import YAMLError
log = logging.getLogger()
class Tee(object):
"""
Inspired by the bash command: tee
tee - read from standard input and write to standard output and files
"""
def __init__ (self, filename):
super(Tee, self).__init__()
self.file = open(filename, mode="w", buffering=0)
self.stdout = sys.stdout
sys.stdout = self
def __del__ (self):
sys.stdout = self.stdout
self.file.close()
def write (self, data):
self.file.write(data)
self.stdout.write(data)
def __enter__ (self):
return self
def __exit__ (self, exc_type, exc_val, exc_tb):
self.__del__()
class EscapeRunResult():
"""
Container class for storing the result of the test run.
"""
def __init__ (self, output=None, exception=None):
self.log_output = output
self.exception = exception
def was_error (self):
return self.exception is not None
def __iter__ (self):
return iter(self.log_output)
class CommandRunner(object):
"""
Main runner class which capable of running the test script and kill the
process explicitly or based on the timeout value.
"""
KILL_TIMEOUT = 60
def __init__ (self, cmd, cwd=None, kill_timeout=None, output_stream=None):
self._command = self.__evaluate_cmd(cmd)
self._cwd = cwd if cwd else os.path.dirname(__file__)
self.kill_timeout = kill_timeout if kill_timeout else self.KILL_TIMEOUT
self.output_stream = output_stream
self._process = None
self.__killed = False
def __str__ (self):
return "%s(cmd: %s, timeout: %s)" % (
self.__class__.__name__, self._command, self.kill_timeout)
@property
def is_killed (self):
return self.__killed
@property
def is_alive (self):
return self._process and self._process.isalive()
@staticmethod
def __evaluate_cmd (cmd):
"""
Split command to list for pexpect.
:param cmd: str or list
:rtype: list[str]
"""
if isinstance(cmd, basestring):
return cmd.split(' ')
elif isinstance(cmd, Iterable):
return list(cmd)
else:
return None
def execute (self):
"""
Create and start the process. Block until the process ends or timeout is
exceeded.
"""
try:
self._process = pexpect.spawn(self._command[0],
args=self._command[1:],
timeout=self.kill_timeout,
cwd=self._cwd,
logfile=self.output_stream)
self._process.expect(pexpect.EOF)
return self
except pexpect.TIMEOUT:
log.debug("Process running timeout(%ss) is exceeded!" % self.kill_timeout)
self.kill_process()
except pexpect.ExceptionPexpect as e:
log.error("Got unexpected error:\n%s" % e)
self.kill_process()
def kill_process (self):
"""
Kill the process and call the optional hook function.
"""
log.debug("Kill process...")
self.stop()
self.__killed = True
if self.is_alive:
self._process.terminate(force=True)
def stop (self):
"""
Stop the process.
:return: None
"""
log.debug("Terminate program under test: %s" % self)
if self._process:
self._process.sendcontrol('c')
if self.is_alive:
self._process.terminate()
def get_process_output_stream (self):
"""
:return: Return with the process buffer.
"""
return self._process.before if self._process.before else ""
def clone (self):
return copy.deepcopy(self)
def cleanup (self):
log.debug("Cleanup %s..." % self.__class__.__name__)
self._process = None
self.__killed = False
self.__killed = False
pass
class ESCAPECommandRunner(CommandRunner):
"""
Extended CommandRunner class for ESCAPE.
Use threading.Event for signalling ESCAPE is up.
"""
ESC_PARAM_QUIT = "--quit"
ESC_PARAM_SERVICE = "--service"
def __init__ (self, *args, **kwargs):
super(ESCAPECommandRunner, self).__init__(*args, **kwargs)
self.__ready = threading.Event()
self.timeouted = False
@property
def timeout_exceeded (self):
return self.timeouted
def setup_verbose_logging (self):
log.debug("Detect VERBOSE mode --> Add more 'debug' flag")
self._command.extend(('--debug',) * 2)
def setup_standalone_mode (self):
log.debug("Detected standalone mode --> Disable timeout")
self.kill_timeout = None
log.debug("Remove quit mode, add ROS-API")
self._command.extend(("++quit", "--rosapi"))
def execute (self, wait_for_up=True):
"""
Create and start the process. Block until the process ends or timeout is
exceeded.
"""
log.debug("\nStart program under test...")
log.debug(self._command)
try:
self._process = pexpect.spawn(self._command[0],
args=self._command[1:],
timeout=self.kill_timeout,
cwd=self._cwd,
logfile=self.output_stream)
if wait_for_up:
self._process.expect(pattern="ESCAPEv2 is up")
self.__ready.set()
self._process.expect(pexpect.EOF)
return self
except pexpect.TIMEOUT:
log.debug("Process running timeout(%ss) is exceeded!" % self.kill_timeout)
self.kill_process()
self.timeouted = True
except pexpect.ExceptionPexpect as e:
log.error("Got unexpected error:\n%s" % e.message)
log.debug("\n\nError details:\n%s" % self._process.before)
self.kill_process()
def test (self, timeout=CommandRunner.KILL_TIMEOUT):
"""
Start a presumably simple process and test if the process is executed
successfully within the timeout interval or been killed.
:param timeout: use the given timeout instead of the default kill timeout
:type timeout: int
:return: the process is stopped successfully
:rtype: bool
"""
try:
proc = pexpect.spawn(self._command[0],
args=self._command[1:],
cwd=self._cwd,
timeout=timeout)
proc.expect(pexpect.EOF)
return True
except pexpect.ExceptionPexpect:
return False
def wait_for_ready (self):
log.debug("Waiting for ESCAPE...")
self.__ready.wait(timeout=self.kill_timeout)
log.debug("ESCAPE is up! ")
def kill_process (self):
# Call super explicitly because _process is defined in the parent class
# so from child class process cannot be terminated
super(ESCAPECommandRunner, self).kill_process()
def stop (self):
# Call super explicitly because _process is defined in the parent class
# so from child class process cannot be terminated
super(ESCAPECommandRunner, self).stop()
def reset(self):
log.debug("Reset %s status..." % self.__class__.__name__)
self.timeouted = False
self.__ready.clear()
class RunnableTestCaseInfo(object):
"""
Container class for storing the relevant information and config values of a
test case.
"""
CONFIG_FILE_NAME = "test-config.yaml"
CONFIG_CONTAINER_NAME = "test"
RUNNER_SCRIPT_NAME = "run.sh"
README_FILE_NAME = "README.txt"
def __init__ (self, case_path):
# Removing trailing slash
self.__case_path = os.path.normpath(case_path)
self.sub_name = None
log.debug("Reading testcase cfg from: %s" % self.full_testcase_path)
@property
def testcase_dir_name (self):
"""
:return: directory name of the test case
:rtype: str
"""
return os.path.basename(self.__case_path)
@property
def name (self):
if self.sub_name is not None:
return "%s-%s" % (self.testcase_dir_name, self.sub_name)
else:
return self.testcase_dir_name
@property
def full_testcase_path (self):
"""
:return: absolute path of the test case directory.
:rtype: str
"""
return self.__case_path
@property
def test_command (self):
"""
:return: absolute command path of the test case runner script.
:rtype: str
"""
return os.path.join(self.full_testcase_path,
self.RUNNER_SCRIPT_NAME)
@property
def config_file_name (self):
"""
:return: absolute path of the test case config file.
:rtype: str
"""
return os.path.join(self.full_testcase_path,
self.CONFIG_FILE_NAME)
def readme (self):
"""
:return: load the README file
:rtype: str
"""
with open(os.path.join(self.full_testcase_path,
self.README_FILE_NAME)) as f:
readme = f.read()
return readme if readme else ""
def load_test_case_class (self):
"""
:return: Return the TestCase class and it's parameters defined in the
test case config file
:rtype: tuple(object, dict)
"""
test_args = {}
try:
with open(self.config_file_name, 'r') as f:
config = yaml.safe_load(f)
except (IOError, YAMLError) as e:
log.error("Failed to load configuration file: %s" % e)
return None
if self.CONFIG_CONTAINER_NAME in config:
test_args = copy.copy(config[self.CONFIG_CONTAINER_NAME])
try:
m = test_args.pop('module')
c = test_args.pop('class')
return getattr(importlib.import_module(m), c), test_args
except (KeyError, ImportError):
pass
return None, test_args
def load_config (self):
try:
with open(self.config_file_name, 'r') as f:
config = yaml.safe_load(f)
except (IOError, YAMLError) as e:
log.error("Failed to load configuration file: %s" % e)
return None
try:
test_args = copy.copy(config[self.CONFIG_CONTAINER_NAME])
return test_args
except KeyError:
pass
return None
def __repr__ (self):
return "RunnableTestCase [%s]" % self.testcase_dir_name
def clone (self):
return copy.deepcopy(self)
| [
"logging.getLogger",
"importlib.import_module",
"pexpect.spawn",
"os.path.join",
"threading.Event",
"os.path.normpath",
"os.path.dirname",
"yaml.safe_load",
"os.path.basename",
"copy.deepcopy",
"copy.copy"
]
| [((758, 777), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (775, 777), False, 'import logging\n'), ((4232, 4251), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (4245, 4251), False, 'import copy\n'), ((4753, 4770), 'threading.Event', 'threading.Event', ([], {}), '()\n', (4768, 4770), False, 'import threading\n'), ((8004, 8031), 'os.path.normpath', 'os.path.normpath', (['case_path'], {}), '(case_path)\n', (8020, 8031), False, 'import os\n'), ((8263, 8297), 'os.path.basename', 'os.path.basename', (['self.__case_path'], {}), '(self.__case_path)\n', (8279, 8297), False, 'import os\n'), ((8784, 8846), 'os.path.join', 'os.path.join', (['self.full_testcase_path', 'self.RUNNER_SCRIPT_NAME'], {}), '(self.full_testcase_path, self.RUNNER_SCRIPT_NAME)\n', (8796, 8846), False, 'import os\n'), ((9015, 9075), 'os.path.join', 'os.path.join', (['self.full_testcase_path', 'self.CONFIG_FILE_NAME'], {}), '(self.full_testcase_path, self.CONFIG_FILE_NAME)\n', (9027, 9075), False, 'import os\n'), ((10605, 10624), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (10618, 10624), False, 'import copy\n'), ((2031, 2056), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2046, 2056), False, 'import os\n'), ((2967, 3097), 'pexpect.spawn', 'pexpect.spawn', (['self._command[0]'], {'args': 'self._command[1:]', 'timeout': 'self.kill_timeout', 'cwd': 'self._cwd', 'logfile': 'self.output_stream'}), '(self._command[0], args=self._command[1:], timeout=self.\n kill_timeout, cwd=self._cwd, logfile=self.output_stream)\n', (2980, 3097), False, 'import pexpect\n'), ((5490, 5620), 'pexpect.spawn', 'pexpect.spawn', (['self._command[0]'], {'args': 'self._command[1:]', 'timeout': 'self.kill_timeout', 'cwd': 'self._cwd', 'logfile': 'self.output_stream'}), '(self._command[0], args=self._command[1:], timeout=self.\n kill_timeout, cwd=self._cwd, logfile=self.output_stream)\n', (5503, 5620), False, 'import pexpect\n'), ((6672, 6763), 'pexpect.spawn', 'pexpect.spawn', (['self._command[0]'], {'args': 'self._command[1:]', 'cwd': 'self._cwd', 'timeout': 'timeout'}), '(self._command[0], args=self._command[1:], cwd=self._cwd,\n timeout=timeout)\n', (6685, 6763), False, 'import pexpect\n'), ((9836, 9881), 'copy.copy', 'copy.copy', (['config[self.CONFIG_CONTAINER_NAME]'], {}), '(config[self.CONFIG_CONTAINER_NAME])\n', (9845, 9881), False, 'import copy\n'), ((10372, 10417), 'copy.copy', 'copy.copy', (['config[self.CONFIG_CONTAINER_NAME]'], {}), '(config[self.CONFIG_CONTAINER_NAME])\n', (10381, 10417), False, 'import copy\n'), ((9202, 9262), 'os.path.join', 'os.path.join', (['self.full_testcase_path', 'self.README_FILE_NAME'], {}), '(self.full_testcase_path, self.README_FILE_NAME)\n', (9214, 9262), False, 'import os\n'), ((9638, 9655), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (9652, 9655), False, 'import yaml\n'), ((10210, 10227), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (10224, 10227), False, 'import yaml\n'), ((9987, 10013), 'importlib.import_module', 'importlib.import_module', (['m'], {}), '(m)\n', (10010, 10013), False, 'import importlib\n')] |
import click
from covid_data_tracker.registry import PluginRegistry
def plugin_selector(selected_country: str):
"""plugin selector uses COUNTRY_MAP to find the appropriate plugin
for a given country.
Parameters
----------
selected_country : str
specify the country of interest.
Returns
-------
covid_data_tracker.plugins.BasePlugin
More appropriately, returns an instance of a country-specific
subclass of BasePlugin.
"""
if selected_country in PluginRegistry.keys():
klass = PluginRegistry[selected_country]
instance = klass()
else:
raise AttributeError
click.echo('No country plugin available')
return instance
def country_downloader(country: str):
"""Finds country plugin, fetches data, and downloads
to csv with click alerts.
Parameters
----------
country : str
Name of country
Returns
-------
NoneType
"""
click.echo(f"selecting plugin for {country}")
country_plugin = plugin_selector(country)
click.echo(f"attempting to find available data for {country}")
country_plugin.fetch()
click.echo(f"downloading available data for {country}")
country_plugin.check_instance_attributes()
country_plugin.download()
| [
"click.echo",
"covid_data_tracker.registry.PluginRegistry.keys"
]
| [((979, 1024), 'click.echo', 'click.echo', (['f"""selecting plugin for {country}"""'], {}), "(f'selecting plugin for {country}')\n", (989, 1024), False, 'import click\n'), ((1075, 1137), 'click.echo', 'click.echo', (['f"""attempting to find available data for {country}"""'], {}), "(f'attempting to find available data for {country}')\n", (1085, 1137), False, 'import click\n'), ((1169, 1224), 'click.echo', 'click.echo', (['f"""downloading available data for {country}"""'], {}), "(f'downloading available data for {country}')\n", (1179, 1224), False, 'import click\n'), ((517, 538), 'covid_data_tracker.registry.PluginRegistry.keys', 'PluginRegistry.keys', ([], {}), '()\n', (536, 538), False, 'from covid_data_tracker.registry import PluginRegistry\n'), ((663, 704), 'click.echo', 'click.echo', (['"""No country plugin available"""'], {}), "('No country plugin available')\n", (673, 704), False, 'import click\n')] |
from setuptools import setup
setup(name='osmuf',
version='0.1',
install_requires=[
"seaborn",
],
description='Urban Form analysis from OpenStreetMap',
url='http://github.com/atelierlibre/osmuf',
author='AtelierLibre',
author_email='<EMAIL>',
license='MIT',
packages=['osmuf'],
zip_safe=False)
| [
"setuptools.setup"
]
| [((30, 303), 'setuptools.setup', 'setup', ([], {'name': '"""osmuf"""', 'version': '"""0.1"""', 'install_requires': "['seaborn']", 'description': '"""Urban Form analysis from OpenStreetMap"""', 'url': '"""http://github.com/atelierlibre/osmuf"""', 'author': '"""AtelierLibre"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'packages': "['osmuf']", 'zip_safe': '(False)'}), "(name='osmuf', version='0.1', install_requires=['seaborn'],\n description='Urban Form analysis from OpenStreetMap', url=\n 'http://github.com/atelierlibre/osmuf', author='AtelierLibre',\n author_email='<EMAIL>', license='MIT', packages=['osmuf'], zip_safe=False)\n", (35, 303), False, 'from setuptools import setup\n')] |
"""Semi continuous unit operations.
Unit operations that accept constant or box-shaped flow rate profile
and provide periodic flow rate profile.
"""
__all__ = ['AlternatingChromatography', 'ACC', 'PCC', 'PCCWithWashDesorption']
__version__ = '0.7.1'
__author__ = '<NAME>'
import typing as _typing
import numpy as _np
import scipy.interpolate as _interp
from bio_rtd.chromatography import bt_load as _bt_load
import bio_rtd.utils as _utils
import bio_rtd.core as _core
import bio_rtd.pdf as _pdf
class AlternatingChromatography(_core.UnitOperation):
"""Simulation of alternating chromatography.
This class implements logic common to various types of alternating
chromatography. It has a role of a base class for
specific types of alternating chromatography to extend.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "AC".
Notes
-----
**Quick description of which attributes are available:**
Non-binding species (optional):
* :attr:`non_binding_species`
Column volume (exactly one required):
* :attr:`cv`
* :attr:`ft_mean_retentate` and :attr:`column_porosity_retentate`
Column porosity for binding species (required in case of
:attr:`ft_mean_retentate` or wash or load recycling):
* :attr:`column_porosity_retentate`
Equilibration step duration (optional, if both, the values are
added together):
* :attr:`equilibration_cv`
* :attr:`equilibration_t`
Equilibration step flow rate (exactly one needed):
* :attr:`equilibration_f` - absolute, has priority if defined
* :attr:`equilibration_f_rel` - relative, default = 1
Load step duration:
* :attr:`load_cv` - preferred
* :attr:`load_c_end_ss` - concentration limit for breakthrough; also
requires :attr:`load_recycle_pdf`
* :attr:`load_c_end_relative_ss` - concentration limit for
breakthrough relative to steady-state load concentration; also
requires :attr:`load_recycle_pdf`
Iterative optimization of estimation of load step duration
(ignored if :attr:`load_cv` is defined):
* :attr:`load_c_end_estimate_with_iterative_solver` - default = True
* :attr:`load_c_end_estimate_with_iter_solver_max_iter` - default =
1000
Extension of first load step (optional; ignored if no recycling):
* :attr:`load_extend_first_cycle` - default = `False`
* :attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t` - added together if both defined
Load linear velocity - only for column height determination
(optional):
* :attr:`load_target_lin_velocity`
Wash step duration (optional, if both, the values are
added together):
* :attr:`wash_cv`
* :attr:`wash_t`
Wash step flow rate (exactly one needed):
* :attr:`wash_f` - absolute, has priority if defined
* :attr:`wash_f_rel` - relative, default = 1
Unaccounted losses - applied before peak cut (optional):
* :attr:`unaccounted_losses_rel` - relative, default = 1
Elution step duration (optional, if both, the values are
added together):
* :attr:`elution_cv`
* :attr:`elution_t`
Elution step flow rate (exactly one needed):
* :attr:`elution_f` - absolute, has priority if defined
* :attr:`elution_f_rel` - relative, default = 1
Elution buffer composition (optional):
* :attr:`elution_buffer_c`
Elution peak position duration - first momentum
(optional, if both, the values are added together):
* :attr:`elution_peak_position_cv`
* :attr:`elution_peak_position_t`
Elution peak cut start (one is required):
* :attr:`elution_peak_cut_start_t`
* :attr:`elution_peak_cut_start_cv`
* :attr:`elution_peak_cut_start_c_rel_to_peak_max`
* :attr:`elution_peak_cut_start_peak_area_share`
Elution peak cut end (one is required):
* :attr:`elution_peak_cut_end_t`
* :attr:`elution_peak_cut_end_cv`
* :attr:`elution_peak_cut_end_c_rel_to_peak_max`
* :attr:`elution_peak_cut_end_peak_area_share`
Regeneration step duration (optional, if both, the values are
added together):
* :attr:`regeneration_cv`
* :attr:`regeneration_t`
Regeneration step flow rate (exactly one needed):
* :attr:`regeneration_f` - absolute, has priority if defined
* :attr:`regeneration_f_rel` - relative, default = 1
Wash desorption (optional, also check if class supports it):
* :attr:`wash_desorption` - default = `False`
Load breakthrough recycle (optional):
* :attr:`load_recycle` - default = `False`
Load breakthrough propagation dynamics
(required if :attr:`load_recycle` is `True`
or :attr:`load_c_end_ss` is defined or
or :attr:`load_c_end_relative_ss` is defined):
* :attr:`load_recycle_pdf`
Wash recycle (optional):
* :attr:`wash_recycle` - default = `False`
Duration of wash recycling
(optional; ignored if :attr:`wash_recycle` is `False`):
* :attr:`wash_recycle_duration_cv` and
:attr:`wash_recycle_duration_t` - summed together if both defined.
* Entire wash step if
:attr:`wash_recycle_duration_cv` and
:attr:`wash_recycle_duration_t` are not defined.
Please note that subclasses might introduce new attributes or change
the default values of existing attributes.
"""
def __init__(self,
t: _np.ndarray,
uo_id: str,
load_bt: _core.ChromatographyLoadBreakthrough,
peak_shape_pdf: _core.PDF,
gui_title: str = "AC"):
super().__init__(t, uo_id, gui_title)
# Bind parameters.
self.load_bt: _core.ChromatographyLoadBreakthrough = load_bt
"""Determines what part of load material binds to the column."""
self.elution_peak_shape: _core.PDF = peak_shape_pdf
"""Elution peak shape."""
self.non_binding_species: _typing.Sequence[int] = []
"""Process buffer species that are NOT binding to the column.
Indexing starts with 0.
"""
self.cv: float = -1
"""Column volume.
Column volume should be defined by exactly one of the following
attribute groups:
* :attr:`cv` (this one)
* :attr:`ft_mean_retentate`
and :attr:`column_porosity_retentate`
"""
self.ft_mean_retentate: float = -1
"""Flow-through time of retentate under non-binding conditions.
Used to define column volume (independently of scale).
Column volume should be defined by exactly one of the following
attribute groups:
* :attr:`cv`
* :attr:`ft_mean_retentate` (this one) and
:attr:`column_porosity_retentate`
"""
self.column_porosity_retentate: float = -1
"""Column porosity for retentate under non-binding conditions.
Required in case :attr:`ft_mean_retentate` is used to define
column volume.
Required in case :attr:`load_c_end_ss` or
:attr:`load_c_end_relative_ss` are used to estimate
load step duration.
Required in case of load or wash recycling.
"""
self.equilibration_cv: float = -1
"""Duration of equilibration step.
The values of :attr:`equilibration_t` and
:attr:`equilibration_cv` are added together.
"""
self.equilibration_t: float = -1
"""Duration of equilibration step.
The values of :attr:`equilibration_t` and
:attr:`equilibration_cv` are added together.
"""
self.equilibration_f: float = -1
"""Equilibration step flow rate.
Equilibration step flow rate should be defined by
exactly one of the following attributes:
* :attr:`equilibration_f` (this one)
* :attr:`equilibration_f_rel`
"""
self.equilibration_f_rel: float = 1
"""Equilibration step flow rate relative to load flow rate.
Default = 1.
Equilibration step flow rate = :attr:`equilibration_f_rel`
* `load flow rate`
Equilibration step flow rate should be defined by
exactly one of the following attributes:
* :attr:`equilibration_f`
* :attr:`equilibration_f_rel` (this one)
"""
# Duration of the load phase.
self.load_cv: float = -1 # load duration in CV
"""Load phase duration in CV.
This is preferable way to define the duration of the load step
as it does not require any estimations about steady state.
Load phase duration should be defined by exactly one of
the following attribute groups:
* :attr:`load_cv` (this one)
* :attr:`load_c_end_ss`
* :attr:`load_c_end_relative_ss`
Notes
-----
First load step can be extended by setting
:attr:`load_extend_first_cycle`,
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`.
"""
self.load_c_end_ss: _typing.Optional[_np.ndarray] = None
"""Load phase switch based on target product breakthrough conc.
Load phase duration is estimated from simulating steady state
operation and determining when the breakthrough reaches
specified concentration.
Steady state simulation requires
:attr:`column_porosity_retentate`
:attr:`load_recycle_pdf`.
Load phase duration should be defined by exactly one of
the following attribute groups:
* :attr:`load_cv` (preferred)
* :attr:`load_c_end_ss` (this one)
* :attr:`load_c_end_relative_ss`
Notes
-----
First load step can be extended by setting
:attr:`load_extend_first_cycle`,
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`.
"""
self.load_c_end_relative_ss: float = -1
"""Load phase switch based on relative breakthrough conc.
Load phase duration is estimated from simulating steady state
operation and determining when the product (binding species)
in the breakthrough reaches specified relative concentration
(relative to load concentration in steady-state operation).
Steady state simulation requires
:attr:`column_porosity_retentate`
:attr:`load_recycle_pdf`.
Load phase duration should be defined by exactly one of
the following attribute groups:
* :attr:`load_cv` (preferred)
* :attr:`load_c_end_ss`
* :attr:`load_c_end_relative_ss` (this one)
Notes
-----
First load step can be extended by setting
:attr:`load_extend_first_cycle`,
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`.
"""
self.load_c_end_estimate_with_iterative_solver: bool = True
"""Finer optimization of cycle length estimation.
Default = `True`.
In case load step duration is estimated based of breakthrough
criteria (i.e. by :attr:`load_c_end_ss` or
:attr:`load_c_end_relative_ss`), the model needs to simulate
steady-state operation in order to determine fixed load time.
This parameters enables iterative solver that allows more
precise estimation but might slow down the simulation.
Notes
-----
Max number of iteration steps is defined by
:attr:`load_c_end_estimate_with_iter_solver_max_iter`.
"""
self.load_c_end_estimate_with_iter_solver_max_iter: int = 1000
"""Max steps for optimization of cycle length estimation.
Default = 1000.
See Also
--------
:attr:`load_c_end_estimate_with_iterative_solver`
"""
self.load_extend_first_cycle: bool = False
"""Extend first load phase to achieve a faster steady-state.
Only relevant in case wash or load is recycled.
The duration of extension is defined by:
* :attr:`load_extend_first_cycle_cv` or
* :attr:`load_extend_first_cycle_t` or
* is determined automatically.
"""
self.load_extend_first_cycle_cv: float = -1
"""Duration of first load phase extension in column volumes.
Only relevant if :attr:`load_extend_first_cycle` is `True`.
If the duration if defined by
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`
then the values are added together.
"""
self.load_extend_first_cycle_t: float = -1
"""Duration of first load phase extension (time).
Only relevant if :attr:`load_extend_first_cycle` is `True`.
If the duration if defined by
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`
then the values are added together.
"""
self.load_target_lin_velocity: float = -1
"""Target load linear velocity.
It is used to provide information about required column height.
It does not have any impact on the rest of the model.
Units need to match other units in the model.
"""
self.wash_cv: float = -1
"""Duration of wash step.
The values of :attr:`wash_t` and
:attr:`wash_cv` are added together.
"""
self.wash_t: float = -1
"""Duration of wash step.
The values of :attr:`wash_t` and
:attr:`wash_cv` are added together.
"""
self.wash_f: float = -1
"""Wash step flow rate.
Wash step flow rate should be defined by
exactly one of the following attributes:
* :attr:`wash_f` (this one)
* :attr:`wash_f_rel`
"""
self.wash_f_rel: float = 1
"""Wash step flow rate relative to load flow rate. Default = 1.
Wash step flow rate = :attr:`wash_f_rel`
* `load flow rate`
Wash step flow rate should be defined by
exactly one of the following attributes:
* :attr:`wash_f`
* :attr:`wash_f_rel` (this one)
"""
self.unaccounted_losses_rel: float = 0
"""Unaccounted losses as a share of bound material.
Elution peak is scaled down by 1 - `unaccounted_losses_rel`
before applying peak cut criteria.
"""
self.elution_cv: float = -1
"""Duration of elution step.
The values of :attr:`elution_t` and
:attr:`elution_cv` are added together.
"""
self.elution_t: float = -1
"""Duration of elution step.
The values of :attr:`elution_t` and
:attr:`elution_cv` are added together.
"""
self.elution_f: float = -1
"""Elution step flow rate.
Elution step flow rate should be defined by
exactly one of the following attributes:
* :attr:`elution_f` (this one)
* :attr:`elution_f_rel`
"""
self.elution_f_rel: float = 1
"""Elution step flow rate relative to load flow rate.
Default = 1.
Elution step flow rate = :attr:`elution_f_rel`
* `load flow rate`
Elution step flow rate should be defined by
exactly one of the following attributes:
* :attr:`elution_f`
* :attr:`elution_f_rel` (this one)
"""
self.elution_buffer_c: _np.ndarray = _np.array([])
"""Elution buffer composition.
Default = empty array (= all components are 0).
If defined it must have a value for each specie.
"""
self.elution_peak_position_cv: float = -1
"""Position (cv) of elution peak in the elution step.
This is for 1st moment or mean residence time (and not
necessarily peak max position).
The values of :attr:`elution_peak_position_t` and
:attr:`elution_peak_position_cv` are added together.
"""
self.elution_peak_position_t: float = -1
"""Position (time) of elution peak in the elution step.
This is for 1st moment or mean residence time (and not
necessarily peak max position).
The values of :attr:`elution_peak_position_t` and
:attr:`elution_peak_position_cv` are added together.
"""
self.elution_peak_cut_start_t: float = -1
"""Elution peak cut start (time).
Exactly one peak cut start criteria should be defined.
"""
self.elution_peak_cut_start_cv: float = -1
"""Elution peak cut start (cv).
Exactly one peak cut start criteria should be defined.
"""
self.elution_peak_cut_start_c_rel_to_peak_max: float = -1
"""Elution peak cut start (signal relative to peak max).
Exactly one peak cut start criteria should be defined.
"""
self.elution_peak_cut_start_peak_area_share: float = -1
"""Elution peak cut start (share of total peak area).
Exactly one peak cut start criteria should be defined.
"""
self.elution_peak_cut_end_t: float = -1
"""Elution peak cut end (time).
Exactly one peak cut end criteria should be defined.
"""
self.elution_peak_cut_end_cv: float = -1
"""Elution peak cut end (cv).
Exactly one peak cut end criteria should be defined.
"""
self.elution_peak_cut_end_c_rel_to_peak_max: float = -1
"""Elution peak cut end (signal relative to peak max).
Exactly one peak cut end criteria should be defined.
"""
self.elution_peak_cut_end_peak_area_share: float = -1
"""Elution peak cut end (share of total peak area).
Exactly one peak cut end criteria should be defined.
"""
self.regeneration_cv: float = -1
"""Duration of regeneration step.
The values of :attr:`regeneration_t` and
:attr:`regeneration_cv` are added together.
"""
self.regeneration_t: float = -1
"""Duration of regeneration step.
The values of :attr:`regeneration_t` and
:attr:`regeneration_cv` are added together.
"""
self.regeneration_f: float = -1
"""Regeneration step flow rate.
Regeneration step flow rate should be defined by
exactly one of the following attributes:
* :attr:`regeneration_f` (this one)
* :attr:`regeneration_f_rel`
"""
self.regeneration_f_rel: float = 1
"""Regeneration step flow rate relative to load flow rate.
Default = 1.
Regeneration step flow rate = :attr:`regeneration_f_rel`
* `load flow rate`
Regeneration step flow rate should be defined by
exactly one of the following attributes:
* :attr:`regeneration_f`
* :attr:`regeneration_f_rel` (this one)
"""
self.wash_desorption: bool = False
"""Enable wash desorption.
Make sure the class implements the desorption dynamics.
"""
self.load_recycle: bool = False
"""Recycle load breakthrough. Default = False."""
self.load_recycle_pdf: _typing.Optional[_core.PDF] = None
"""PDF of wash and/or unbound load traveling through the column.
The unbound (not captured) part and desorbed part are propagated
through the column by :attr:`load_recycle_pdf`.
Void volume for :attr:`load_recycle_pdf` is defined as
:attr:`column_porosity_retentate` * `column volume`.
"""
self.wash_recycle: bool = False
"""Recycle wash. Default = False.
Wash is recycled onto 3rd column while the 2nd is on load step.
After the wash recycle, the 3rd column is connected to 2nd
column to recycle load breakthrough material.
"""
self.wash_recycle_duration_cv: float = -1
"""Duration of wash recycle (cv).
Relevant if :attr:`wash_recycle` is `True`.
If both (`wash_recycle_duration_cv` and
:attr:`wash_recycle_duration_t`) are defined, then the values
are added together. If none of those is defined, then the
entire wash step is recycled.
"""
self.wash_recycle_duration_t: float = -1
"""Duration of wash recycle (time).
Relevant if :attr:`wash_recycle` is `True`.
If both (`wash_recycle_duration_t` and
:attr:`wash_recycle_duration_cv`) are defined, then the values
are added together. If none of those is defined, then the
entire wash step is recycled.
"""
@_core.UnitOperation.log.setter
def log(self, logger: _core._logger.RtdLogger):
"""Propagates logger across other elements that support it."""
# Default logic.
self._logger = logger
self._logger.set_data_tree(self._log_entity_id, self._log_tree)
# Propagate logger across other elements with logging.
if self.load_recycle_pdf is not None:
self.load_recycle_pdf.set_logger_from_parent(self.uo_id, logger)
if self.load_recycle_pdf is not None:
self.elution_peak_shape.set_logger_from_parent(self.uo_id, logger)
if self.load_recycle_pdf is not None:
self.load_bt.set_logger_from_parent(self.uo_id, logger)
def _get_flow_value(self,
step_name: str, var_name: str,
flow: float, rel_flow: float) -> float:
"""Calc flow rate of chromatographic step.
If `flow` is specified, `flow` is used.
Otherwise `rel_flow` == flow rate relative to load flow rate is
used.
If none are positive, then the load flow rate is used
and a warning is logged.
Parameters
----------
step_name
Step name (e.g. "Wash") for log messages.
var_name
Step variable name (e.g. "wash_t") for log data.
flow
Flow rate.
rel_flow
Flow rate relative to load flow rate.
Returns
-------
float
Flow rate.
"""
if flow > 0:
self.log.i_data(self._log_tree, var_name, flow)
elif rel_flow > 0:
flow = rel_flow * self._load_f
self.log.i_data(self._log_tree, var_name, flow)
else:
self.log.w(f"{step_name} step flow rate is not defined,"
f" using load flow rate instead.")
flow = self._load_f
return flow
def _get_time_value(self,
step_name: str, var_name: str,
t: float, cv: float, flow: float) -> float:
"""Calc duration of chromatographic step.
If the step duration is specified in cv and in t, then the
value are added together.
Parameters
----------
step_name
Step name (e.g. "Wash") for log messages.
var_name
Step variable name (e.g. "wash_t") for log data.
t
Duration (time).
cv
Duration (cv).
flow
Flow rate (required if `cv` > 0).
Returns
-------
float
Total step duration (time).
"""
# Calc.
t_sum = max(t, 0)
if cv > 0:
assert flow > 0, f"{step_name}: Flow rate must be defined (> 0)" \
f" if the duration is specified in CVs."
assert self._cv > 0, f"CV must be determined (by `calc_cv`)" \
f" before calculating duration based on CVs."
t_sum += cv * self._cv / flow # sum
# Log.
if t <= 0 and cv <= 0:
self.log.w(step_name + " time is not defined")
else:
self.log.i_data(self._log_tree, var_name, t_sum)
return t_sum
def _assert_non_binding_species(self):
"""Make sure binding species list is valid."""
if len(self.non_binding_species) > 0:
assert max(self.non_binding_species) < self._n_species, \
"Index of non_binding_species too large (indexes start with 0)"
assert list(set(self.non_binding_species)) \
== list(self.non_binding_species), \
"List of non_binding_species should have ascending order"
assert len(self.non_binding_species) < self._n_species, \
"All species cannot be non-binding."
# Log.
self.log.i_data(self._log_tree,
'non_binding_species',
self.non_binding_species)
def _calc_load_f(self):
"""Determine load flow rate (when on)."""
assert self._is_flow_box_shaped(), "Inlet flow must be box shaped."
self._load_f = self._f.max()
self.log.d_data(self._log_tree, 'load_f', self._load_f)
def _calc_cv(self):
"""Determine column volume."""
self._ensure_single_non_negative_parameter(
log_level_multiple=self.log.ERROR, log_level_none=self.log.ERROR,
cv=self.cv,
ft_mean_retentate=self.ft_mean_retentate,
)
if self.cv > 0:
self._cv = self.cv
else: # `self.ft_mean_retentate` > 0.
assert self.column_porosity_retentate > 0, \
f"porosity_retentate must be defined to calc CV from " \
f" `self.ft_mean_retentate`."
assert self._load_f > 0, f"Load flow rate must be defined to" \
f" calc CV from `self.ft_mean_retentate`."
self._cv = self.ft_mean_retentate * self._load_f \
/ self.column_porosity_retentate
# Log.
self.log.i_data(self._log_tree, 'cv', self._cv)
def _report_column_dimensions(self):
"""Report column dimensions based on load linear velocity."""
if self.load_target_lin_velocity > 0:
self._col_h = self._cv * self.load_target_lin_velocity \
/ self._load_f
self.log.i_data(self._log_tree, "column_h", self._col_h)
self.log.i_data(self._log_tree,
"column_d",
(self._cv / self._col_h / _np.pi) ** 0.5 * 2)
def _calc_equilibration_t(self):
"""Determine equilibration step duration."""
if self.equilibration_cv > 0:
# Flow rate.
eq_f = self._get_flow_value("Equilibration",
"equilibration_f",
self.equilibration_f,
self.equilibration_f_rel)
# Duration.
self._equilibration_t = self._get_time_value("Equilibration",
"equilibration_t",
self.equilibration_t,
self.equilibration_cv,
eq_f)
else:
# Duration.
self._equilibration_t = max(self.equilibration_t, 0)
# Log.
self.log.i_data(self._log_tree,
'equilibration_t',
self._equilibration_t)
def _calc_wash_t_and_f(self):
"""Determine wash step flow rate and duration."""
# Flow rate.
self._wash_f = self._get_flow_value("Wash",
"wash_f",
self.wash_f,
self.wash_f_rel)
# Duration.
self._wash_t = self._get_time_value("Wash",
"wash_t",
self.wash_t,
self.wash_cv,
self._wash_f)
def _calc_elution_t_and_f(self):
"""Determine elution step flow rate and duration."""
# Flow rate.
self._elution_f = self._get_flow_value("Elution",
"elution_f",
self.elution_f,
self.elution_f_rel)
# Duration.
self._elution_t = self._get_time_value("Elution",
"elution_t",
self.elution_t,
self.elution_cv,
self._elution_f)
def _calc_elution_peak_t(self):
"""Determine elution peak mean position (1st momentum)."""
self._elution_peak_t = self._get_time_value(
"elution peak position",
"elution_peak_position_t",
self.elution_peak_position_t,
self.elution_peak_position_cv,
self._elution_f
)
def _update_elution_peak_pdf(self):
"""Update elution peak PDF."""
assert self._elution_peak_t > 0
assert self._elution_f > 0
# Calc elution peak shape.
self.elution_peak_shape.update_pdf(
rt_mean=self._elution_peak_t,
v_void=self._elution_peak_t * self._elution_f,
f=self._elution_f
)
self._p_elution_peak = \
self.elution_peak_shape.get_p() * (1 - self.unaccounted_losses_rel)
self.log.d_data(self._log_tree,
"p_elution_peak",
self._p_elution_peak)
def _calc_elution_peak_cut_i_start_and_i_end(self):
"""Calc elution peak cut start and end in form of time steps.
Values are relative to the beginning of the elution step.
"""
elution_peak_pdf: _np.ndarray = self._p_elution_peak.copy()
# Peak cut start.
self._ensure_single_non_negative_parameter(
log_level_multiple=self.log.ERROR, log_level_none=self.log.WARNING,
elution_peak_cut_start_peak_area_share=self
.elution_peak_cut_start_peak_area_share,
elution_peak_cut_start_c_rel_to_peak_max=self
.elution_peak_cut_start_c_rel_to_peak_max,
elution_peak_cut_start_cv=self.elution_peak_cut_start_cv,
elution_peak_cut_start_t=self.elution_peak_cut_start_t
)
# Calc `elution_peak_cut_start_i`.
if self.elution_peak_cut_start_peak_area_share >= 0:
elution_peak_cut_start_i = _utils.vectors.true_start(
_np.cumsum(elution_peak_pdf * self._dt)
>= self.elution_peak_cut_start_peak_area_share
)
elif self.elution_peak_cut_start_c_rel_to_peak_max >= 0:
elution_peak_cut_start_i = _utils.vectors.true_start(
elution_peak_pdf
>= self.elution_peak_cut_start_c_rel_to_peak_max
* elution_peak_pdf.max()
)
elif self.elution_peak_cut_start_cv >= 0:
elution_peak_cut_start_i = \
int(self.elution_peak_cut_start_cv
* self._cv / self._elution_f / self._dt)
elif self.elution_peak_cut_start_t >= 0:
elution_peak_cut_start_i = \
int(self.elution_peak_cut_start_t / self._dt)
else:
self.log.w(f"Elution peak cut start is not defined."
f" Now collecting from the beginning"
f" of the elution phase.")
elution_peak_cut_start_i = 0
# Log.
self.log.i_data(self._log_tree,
"elution_peak_cut_start_i",
elution_peak_cut_start_i)
self.log.i_data(self._log_tree,
"elution_peak_cut_start_t",
elution_peak_cut_start_i * self._dt)
# Peak cut end.
self._ensure_single_non_negative_parameter(
log_level_multiple=self.log.ERROR, log_level_none=self.log.WARNING,
elution_peak_cut_end_peak_area_share=self
.elution_peak_cut_end_peak_area_share,
elution_peak_cut_end_c_rel_to_peak_max=self
.elution_peak_cut_end_c_rel_to_peak_max,
elution_peak_cut_end_cv=self.elution_peak_cut_end_cv,
elution_peak_cut_end_t=self.elution_peak_cut_end_t,
)
# Calc `elution_peak_cut_end_i`.
if self.elution_peak_cut_end_peak_area_share >= 0:
elution_peak_cut_end_i = _utils.vectors.true_start(
_np.cumsum(elution_peak_pdf * self._dt)
>= (1 - self.elution_peak_cut_end_peak_area_share)
)
elif self.elution_peak_cut_end_c_rel_to_peak_max >= 0:
elution_peak_cut_end_i = _utils.vectors.true_end(
elution_peak_pdf
>= self.elution_peak_cut_end_c_rel_to_peak_max
* elution_peak_pdf.max()
)
elif self.elution_peak_cut_end_cv >= 0:
elution_peak_cut_end_i = \
int(self.elution_peak_cut_end_cv
* self._cv / self._elution_f / self._dt)
elif self.elution_peak_cut_end_t >= 0:
elution_peak_cut_end_i = \
_utils.vectors.true_end(self._t < self.elution_peak_cut_end_t)
else:
self.log.w(f"Elution peak cut end is not defined."
f" Now collecting to the end of the elution phase.")
elution_peak_cut_end_i = elution_peak_pdf.size
self._elution_peak_cut_start_i = elution_peak_cut_start_i
self._elution_peak_cut_end_i = elution_peak_cut_end_i
# Log.
self.log.i_data(self._log_tree,
"elution_peak_cut_end_i",
elution_peak_cut_end_i)
self.log.i_data(self._log_tree,
"elution_peak_cut_end_t",
elution_peak_cut_end_i * self._dt)
if self._elution_peak_cut_end_i * self._dt < self._elution_peak_t:
self.log.w(f"Peak end is cut before its maximum.")
if self._elution_peak_cut_end_i * self._dt > self._elution_t:
self.log.w(f"Peak cut end exceeds elution step duration.")
def _calc_elution_peak_mask(self):
"""Calc where the elution peak gets collected."""
self._elution_peak_mask = \
_np.ones(int(round(self._elution_t / self._dt)), dtype=bool)
self._elution_peak_mask[self._elution_peak_cut_end_i:] = False
self._elution_peak_mask[:self._elution_peak_cut_start_i] = False
self.log.d_data(self._log_tree,
"elution_peak_interval",
self._elution_peak_mask)
def _update_load_btc(self):
"""Update load breakthrough profile."""
assert self._cv > 0, "CV must be defined by now."
self.load_bt.update_btc_parameters(cv=self._cv)
def _calc_regeneration_t(self):
"""Calc regeneration step duration."""
if self.regeneration_cv > 0:
eq_f = self._get_flow_value("Regeneration",
"regeneration_f",
self.regeneration_f,
self.regeneration_f_rel)
self._regeneration_t = self._get_time_value("Regeneration",
"regeneration_t",
self.regeneration_t,
self.regeneration_cv,
eq_f)
else:
self._regeneration_t = max(self.regeneration_t, 0)
# Log.
self.log.i_data(self._log_tree, 'regeneration_t', self._regeneration_t)
def _update_load_recycle_pdf(self, flow):
"""Update pdf that describes propagation of recycled material.
Recycled material si composed of unbound (load) and desorbed
(wash) material throughout the column.
`self.load_recycle_pdf` gets updated.
"""
assert self.load_recycle_pdf is not None, \
f"`load_recycle_pdf` must be defined by now."
assert self.column_porosity_retentate > 0, \
f"Retentate porosity must be defined by now."
assert self._cv > 0, "CV must be defined by now."
v_void = self._cv * self.column_porosity_retentate
self.load_recycle_pdf.update_pdf(v_void=v_void,
f=flow,
rt_mean=v_void / flow)
self._p_load_recycle_pdf = self.load_recycle_pdf.get_p()
def _calc_load_recycle_wash_i(self):
"""Calculate wash recycle duration in form of time steps."""
if self.wash_recycle_duration_t > 0 \
or self.wash_recycle_duration_cv > 0:
self._wash_recycle_i_duration = int(self._get_time_value(
"Wash recycle", "load_wash_recycle_t",
self.wash_recycle_duration_t,
self.wash_recycle_duration_cv,
self._wash_f
) / self._dt)
else:
# Same as wash duration.
assert self._wash_t > 0
self._wash_recycle_i_duration = int(round(self._wash_t / self._dt))
def _get_load_bt_cycle_switch_criteria(self,
load_c_ss: _np.ndarray
) -> _np.ndarray:
"""Get steady-state cycle switch (== end of load) criteria.
Parameters
----------
load_c_ss
Load concentration during steady state operation.
Returns
-------
ndarray
Threshold concentration for load breakthrough.
"""
assert self.load_c_end_ss is not None \
or self.load_c_end_relative_ss > 0, \
f"Load step duration should be defined!"
if self.load_c_end_ss is not None:
load_c_end_ss = self.load_c_end_ss
if self.load_c_end_relative_ss > 0:
self.log.w(f"Cycle time defined by `load_c_end_ss`"
f" and `load_c_end_relative_ss`."
f" Simulation is using `load_c_end_ss`.")
else: # self.load_c_end_relative_ss > 0
load_c_end_ss = self.load_c_end_relative_ss * load_c_ss
# Log.
self.log.i_data(self._log_tree,
'load_c_end_ss',
load_c_end_ss)
return load_c_end_ss
# noinspection DuplicatedCode
def _calc_cycle_t(self):
"""Calculates cycle time (== load time for a single column).
Optional delay of first cycle is not part of this calculation.
"""
assert self._cv > 0
assert self._load_f > 0
if self.load_cv > 0:
t_cycle = self.load_cv * self._cv / self._load_f
if self.load_c_end_ss is not None \
or self.load_c_end_relative_ss > 0:
self.log.w(f"Cycle time defined in more than one way."
f" Simulation is using `load_cv`.")
else:
# Get bt profile for constant inlet.
# Inlet conc.
binding_species = [i for i in range(self._n_species)
if i not in self.non_binding_species]
load_c_ss = self._estimate_steady_state_mean_c(binding_species)
# Simulate first cycle at constant load concentration.
f_first_load = self._load_f * _np.ones(self._t.size)
c_first_load = load_c_ss * _np.ones([len(binding_species),
self._t.size])
bt_first_load: _np.ndarray = \
load_c_ss - self.load_bt.calc_c_bound(f_first_load,
c_first_load)
# Propagate breakthrough.
bt_first_load_out, bt_first_wash_out = \
self._sim_c_recycle_propagation(f_first_load,
bt_first_load,
None)
# Calc cycle duration.
load_c_end_ss = self._get_load_bt_cycle_switch_criteria(load_c_ss)
# noinspection PyTypeChecker
i_t_first_cycle = _utils.vectors.true_start(
bt_first_load_out.sum(0) >= load_c_end_ss.sum())
t_cycle = i_t_first_cycle * self._dt
# Wash desorption.
if self.wash_desorption and self.wash_recycle:
c_wash_desorbed = self._sim_c_wash_desorption(
f_first_load[:i_t_first_cycle],
c_first_load[:, :i_t_first_cycle]
- bt_first_load[:, :i_t_first_cycle])
else:
c_wash_desorbed = None
bt_first_load_out, bt_first_wash_out = \
self._sim_c_recycle_propagation(
f_first_load[:i_t_first_cycle],
bt_first_load[:, :i_t_first_cycle],
c_wash_desorbed)
if self.load_recycle:
if not self.load_c_end_estimate_with_iterative_solver:
self.log.w(f"Estimating cycle duration:"
f" Assuming sharp breakthrough profile.")
i_load_recycle_start = self._wash_recycle_i_duration \
if self.wash_recycle else 0
m_load_recycle = \
bt_first_load_out[
:,
i_load_recycle_start:i_t_first_cycle
].sum() * self._load_f * self._dt
_t_diff = m_load_recycle / self._load_f / load_c_ss.sum()
t_cycle -= _t_diff
self._load_recycle_m_ss = m_load_recycle
self.log.i_data(self._log_tree,
'm_load_recycle_ss',
m_load_recycle)
self.log.i_data(self._log_tree,
'shorten_cycle_t_due_to_bt_recycle',
_t_diff)
if self.wash_recycle:
if not self.load_c_end_estimate_with_iterative_solver:
self.log.w(f"Estimating cycle duration:"
f" Assuming sharp breakthrough profile.")
m_wash_recycle = bt_first_wash_out[
:,
:self._wash_recycle_i_duration
].sum() * self._wash_f * self._dt
_t_diff = m_wash_recycle / self._load_f / load_c_ss.sum()
t_cycle -= _t_diff
self._wash_recycle_m_ss = m_wash_recycle
self.log.i_data(self._log_tree,
'm_wash_recycle_ss',
m_wash_recycle)
self.log.i_data(self._log_tree,
'shorten_cycle_t_due_to_wash_recycle',
_t_diff)
if self.load_c_end_estimate_with_iterative_solver \
and (self.wash_recycle or self.load_recycle):
c_load_fist_cycle = load_c_ss * _np.ones([len(binding_species),
i_t_first_cycle * 2])
def sim_cycle(f_load: _np.ndarray,
c_load: _np.ndarray,
i_prev_cycle: int) -> _typing.Tuple[_np.ndarray,
_np.ndarray,
int]:
"""Simulates load-wash cycle. Calc load duration.
Load duration is determined based on breakthrough
criteria.
Parameters
----------
f_load
Load flow rate profile.
c_load
Load conc profile.
i_prev_cycle
Previous cycle duration in time steps.
Returns
-------
f_load_next_cycle
Load and wash breakthrough flow rate profile.
c_load_next_cycle
Load and wash breakthrough conc profile.
i_cycle
Current cycle duration in time steps.
"""
# Load.
bt_load: _np.ndarray = \
c_load - self.load_bt.calc_c_bound(f_load, c_load)
# Propagate breakthrough.
bt_load_out, _ = self._sim_c_recycle_propagation(
f_load,
bt_load,
None)
# 'Stop' load at specified breakthrough criteria.
# noinspection PyTypeChecker
i_cycle_duration = _utils.vectors.true_start(
bt_load_out.sum(0) >= load_c_end_ss.sum())
# Cut load at specified time.
bt_load = bt_load[:, :i_cycle_duration]
# Wash desorption.
if self.wash_desorption and self.wash_recycle:
c_first_wash_desorbed = self._sim_c_wash_desorption(
f_load[:i_cycle_duration],
c_load[:, :i_cycle_duration]
- bt_load[:, :i_cycle_duration])
else:
c_first_wash_desorbed = None
# Propagate load and wash leftovers.
bt_load_out, bt_wash_out = self._sim_c_recycle_propagation(
f_load[:i_cycle_duration],
bt_load,
c_first_wash_desorbed)
# Construct load for next cycle.
# Recycle load.
if self.load_recycle:
rec_load = bt_load_out[:,
i_prev_cycle:i_cycle_duration]
else:
rec_load = _np.zeros_like(
bt_load_out[:, i_prev_cycle:i_cycle_duration])
# Next load profiles.
c_next_load = _np.concatenate((rec_load,
c_load_fist_cycle),
axis=1)
f_next_load = self._load_f * _np.ones(c_next_load.shape[1])
wash_recycle_i_duration = self._wash_recycle_i_duration \
if self.wash_recycle else 0
# Log.
m_load_recycle_ss = \
bt_first_load_out[
:,
wash_recycle_i_duration:i_t_first_cycle
].sum() * self._load_f * self._dt
self._load_recycle_m_ss = m_load_recycle_ss
self.log.i_data(self._log_tree,
'm_load_recycle_ss',
m_load_recycle_ss)
# Recycle wash.
if self.wash_recycle:
c_next_load[:, :self._wash_recycle_i_duration] = \
bt_wash_out[:, :self._wash_recycle_i_duration]
f_next_load[:self._wash_recycle_i_duration] = \
self._wash_f
m_wash_recycle_ss = \
bt_wash_out[:,
:self._wash_recycle_i_duration
].sum() * self._wash_f * self._dt
self._wash_recycle_m_ss = m_wash_recycle_ss
self.log.i_data(self._log_tree,
'm_wash_recycle_ss',
m_wash_recycle_ss)
# Return next load and cycle duration.
return f_next_load, c_next_load, \
i_cycle_duration - i_prev_cycle
f_load_cycle = \
self._load_f * _np.ones(c_load_fist_cycle.shape[1])
c_load_cycle = c_load_fist_cycle
i_t_cycle_prev = i_t_first_cycle
i_t_cycle_estimate = 0
# Loop until cycle duration converges.
for i in range(
self.load_c_end_estimate_with_iter_solver_max_iter):
if abs(i_t_cycle_prev - i_t_cycle_estimate) <= 1:
self.log.i_data(self._log_tree,
"t_cycle_optimization_loop_iter",
i)
break
i_t_cycle_prev = i_t_cycle_estimate
f_load_cycle, c_load_cycle, i_t_cycle_estimate = \
sim_cycle(f_load_cycle, c_load_cycle, i_t_cycle_prev)
# print([i, i_t_cycle_prev, i_t_cycle_estimate])
if abs(i_t_cycle_prev - i_t_cycle_estimate) > 1:
self.log.w("Cycle duration estimator did not converge.")
t_cycle = i_t_cycle_estimate * self._dt
elif self.load_c_end_estimate_with_iterative_solver:
self.log.i(f"No need to use iterative solver in case of"
f" no recycling of load and/or wash.")
self._cycle_t = t_cycle
self.log.i_data(self._log_tree, 'cycle_t', t_cycle)
# noinspection DuplicatedCode
def _calc_first_cycle_extension_t(self):
"""Calc extension of first load.
First load step might be extended for processes with load and/or
wash recycle in order to get faster into steady-state regime.
"""
if not self.load_recycle and not self.wash_recycle:
self.log.w(f"Estimation of first cycle extension requested"
f" on a process without load recycle.")
self._first_cycle_extension_t = 0
return
elif not self.load_extend_first_cycle:
self.log.w(f"Estimation of first cycle extension requested"
f" on a process without extended first cycle.")
self._first_cycle_extension_t = 0
return
elif self.load_extend_first_cycle_t > 0:
self._first_cycle_extension_t = self.load_extend_first_cycle_t
return
elif self.load_extend_first_cycle_cv >= 0:
assert self._cv > 0, "CV should be defined by now."
assert self._load_f > 0, "Load flow rate should be defined by now."
self._first_cycle_extension_t = \
self.load_extend_first_cycle_cv * self._cv / self._load_f
elif self.load_cv > 0:
raise NotImplementedError(
f"Estimation of first cycle extension is only supported"
f" if the cycle length is defined by breakthrough cutoff"
f" criteria. This is due to the fact that if all the"
f" breakthrough material gets recycles,"
f" there is no single steady-state.")
else:
binding_species = [i for i in range(self._n_species)
if i not in self.non_binding_species]
load_c_ss = self._estimate_steady_state_mean_c(binding_species)
# simulate first cycle at constant load concentration
f_first_load = self._load_f * _np.ones(self._t.size)
c_first_load = load_c_ss * _np.ones([len(binding_species),
self._t.size])
bt_first_load: _np.ndarray = \
load_c_ss - self.load_bt.calc_c_bound(f_first_load,
c_first_load)
# propagate breakthrough
bt_first_load_out, _ = \
self._sim_c_recycle_propagation(f_first_load,
bt_first_load,
None)
load_c_end_ss = self._get_load_bt_cycle_switch_criteria(load_c_ss)
# noinspection PyTypeChecker
i_t_first_cycle = _utils.vectors.true_start(
bt_first_load_out.sum(0) >= load_c_end_ss.sum())
dm = 0
if self.load_recycle:
assert hasattr(self, "_load_recycle_m_ss"), \
f"Function `_calc_cycle_t()` should already be called."
dm += self._load_recycle_m_ss
if self.wash_recycle:
assert hasattr(self, "_wash_recycle_m_ss"), \
f"Function `_calc_cycle_t()` should already be called."
dm += self._wash_recycle_m_ss
di = 0
if dm > 0:
m_ext_bt = _np.cumsum(
bt_first_load_out.sum(0)[i_t_first_cycle:]
) * self._load_f * self._dt
di += _utils.vectors.true_start(m_ext_bt >= dm)
self._first_cycle_extension_t = di * self._dt
def _calc_cycle_start_i_list(self):
"""Calculate load switch positions in form of time steps."""
assert self._cycle_t > 0, \
f"Cycle length must have been determined" \
f" (by `_calc_cycle_t()`) by now"
flow_i_start, flow_i_end = \
_utils.vectors.true_start_and_end(self._f > 0)
if self.load_extend_first_cycle:
assert self._first_cycle_extension_t >= 0, \
f"Prolong of first load cycle is set to `True`," \
f" but the length is undefined."
if self._first_cycle_extension_t == 0:
self.log.w(f"Prolong of first load cycle is set to `True`,"
f" but the length of the extension is 0.")
load_extend_first_cycle_t = self._first_cycle_extension_t
self.log.i_data(self._log_tree,
"load_extend_first_cycle_t",
load_extend_first_cycle_t)
else:
load_extend_first_cycle_t = 0
cycle_start_t_list = _np.arange(
self._t[flow_i_start] + load_extend_first_cycle_t,
self._t[flow_i_end - 1],
self._cycle_t
)
cycle_start_t_list[0] = self._t[flow_i_start]
self._cycle_start_i_list = _np.rint(
cycle_start_t_list / self._dt).astype(_np.int32)
self.log.i_data(self._log_tree,
"cycle_start_t_list",
cycle_start_t_list)
def _prepare_simulation(self):
"""Prepare everything before cycle-by-cycle simulation."""
self._assert_non_binding_species()
self._calc_load_f()
self._calc_cv() # might depend on load_f
self._report_column_dimensions() # optional
# Equilibration.
self._calc_equilibration_t()
# Wash.
self._calc_wash_t_and_f()
# Elution.
self._calc_elution_t_and_f()
self._calc_elution_peak_t()
self._update_elution_peak_pdf()
self._calc_elution_peak_cut_i_start_and_i_end()
self._calc_elution_peak_mask()
# Regeneration.
self._calc_regeneration_t()
# Prepare for estimation of cycle length.
self._update_load_btc()
if self.load_recycle:
self._update_load_recycle_pdf(self._wash_f)
if self.wash_recycle:
self._calc_load_recycle_wash_i()
# Cycle time.
self._calc_cycle_t()
if self.load_extend_first_cycle:
self._calc_first_cycle_extension_t()
# Cycle start positions == column load switch time points.
self._calc_cycle_start_i_list()
# Make sure cycle duration is long enough.
_t_cycle_except_load = self._equilibration_t + self._wash_t \
+ self._elution_t + self._regeneration_t
if self._cycle_t < _t_cycle_except_load:
self.log.e(f"Load step ({self._cycle_t}) should not be shorter"
f" than eq_t + wash_t + elution_t + regeneration_t"
f" ({_t_cycle_except_load: .6})!")
def _sim_c_load_binding(self,
f_load: _np.ndarray,
c_load: _np.ndarray
) -> _typing.Tuple[_np.ndarray, _np.ndarray]:
"""Determine what part of load binds.
Load in this context might also contain wash and load recycle
from previous steps.
Parameters
----------
f_load
Load flow rate profile.
c_load
Load concentration profile.
Returns
-------
c_bound
Conc profile of bound material.
c_unbound
Conc profile of unbound material = `c_load` - `c_bound`.
"""
assert f_load.size == c_load.shape[1], \
"f_load and c_load must have the same length"
assert c_load.shape[0] == \
self._n_species - len(self.non_binding_species), \
"c_load must contain all binding species"
c_bound = self.load_bt.calc_c_bound(f_load, c_load)
# Returns bound and unbound part.
return c_bound, c_load - c_bound
def _sim_c_wash_desorption(self,
f_load: _np.ndarray,
c_bound: _np.ndarray) -> _np.ndarray:
"""Get conc profile of desorbed material during wash step.
The step has no default logic.
Thus it raises `NotImplementedError` if called.
Parameters
----------
f_load
Flow rate profile during 'effective load' step.
The step includes wash recycle, load recycle and load step
as a column sees it in a single cycle.
c_bound
Conc profile of captured material.
Returns
-------
ndarray
Conc profile of desorbed material during wash step.
Raises
------
NotImplementedError
This method has no default implementation. Thus it being
called it will raise the error.
"""
# Not implemented in core this class, as there is
# no consensus on typical dynamics and the way to describe it.
raise NotImplementedError("Function not implemented in this class")
def _sim_c_recycle_propagation(
self,
f_unbound: _np.ndarray,
c_unbound: _np.ndarray,
c_wash_desorbed: _typing.Optional[_np.ndarray]
) -> _typing.Tuple[_np.ndarray, _np.ndarray]:
"""Propagate unbound and desorbed material through the column.
Unbound (breakthrough during load) and desorbed (during wash)
sections might have a different flow rates as they come from
different steps - load and wash.
Parameters
----------
f_unbound
Flow rate profile during 'total load' step for a cycle.
The step includes wash recycle, load recycle and load step.
c_unbound
Conc profile of overloaded material during load step
(plus previous wash and load recycle).
c_wash_desorbed
Conc profile of desorbed material during wash step.
Returns
-------
c_unbound_propagated
Propagated conc profile of overloaded material.
c_wash_desorbed_propagated
Propagated conc profile of desorbed material.
"""
assert hasattr(self, "_wash_f") and self._wash_f > 0
assert hasattr(self, "_wash_t") and self._wash_t > 0
assert self.load_recycle_pdf is not None
assert c_unbound.shape[0] == \
self._n_species - len(self.non_binding_species)
assert c_unbound.shape[1] == f_unbound.size
if c_wash_desorbed is None or c_wash_desorbed.size == 0:
c_wash_desorbed = _np.zeros([
self._n_species - len(self.non_binding_species),
int(round(self._wash_t / self._dt))])
else:
assert c_wash_desorbed.shape[0] == \
self._n_species - len(self.non_binding_species)
assert c_wash_desorbed.shape[1] == \
int(round(self._wash_t / self._dt))
# Combine on volumetric scale.
v_load = self._dt * f_unbound.cumsum()
v_wash = v_load[-1] + \
self._dt * _np.arange(1, c_wash_desorbed.shape[1] + 1) \
* self._wash_f
min_flow = min(f_unbound.min(), self._wash_f)
dv = min_flow * self._dt
v = _np.arange(dv,
(v_wash[-1] if v_wash.size > 0 else v_load[-1]) + dv,
dv)
c_v_combined = _interp.interp1d(
_np.concatenate((v_load, v_wash), axis=0),
_np.concatenate((c_unbound, c_wash_desorbed), axis=1),
fill_value="extrapolate"
)(v)
c_v_combined[c_v_combined < 0] = 0
# Simulate traveling of leftover material through the column.
self._update_load_recycle_pdf(min_flow)
c_v_combined_propagated = _utils.convolution.time_conv(
self._dt, c_v_combined, self._p_load_recycle_pdf)
# Split back on time scale.
c_combined_propagated = _interp.interp1d(
v,
c_v_combined_propagated,
fill_value="extrapolate"
)(_np.concatenate((v_load, v_wash), axis=0))
c_combined_propagated[c_combined_propagated < 0] = 0
c_unbound_propagated = c_combined_propagated[:, :v_load.size]
c_wash_desorbed_propagated = c_combined_propagated[:, v_load.size:]
return c_unbound_propagated, c_wash_desorbed_propagated
def _sim_c_elution_desorption(self,
m_bound: _np.ndarray
) -> _typing.Tuple[_np.ndarray,
_np.ndarray]:
"""Simulate elution step.
Parameters
----------
m_bound
Vector with amount of product being bound to the column.
`m_bound.size == n_species`
Returns
-------
c_elution
Outlet concentration profile during the elution.
b_elution_peak
Boolean vector. Peak is collected where the value is `True`.
"""
assert self._elution_f > 0
assert self._elution_t > 0
i_elution_duration = int(round(self._elution_t / self._dt))
# Multiply elution peak with the amount of captured product.
c_elution = \
self._p_elution_peak[_np.newaxis, :i_elution_duration] * \
m_bound[:, _np.newaxis] / self._elution_f
# Pad with zeros to cover the entire elution step duration.
if c_elution.shape[1] < i_elution_duration:
c_elution = _np.pad(c_elution,
((0, 0),
(0, i_elution_duration - c_elution.shape[1])),
mode="constant")
# Boolean mask - `True` where peak is being collected.
b_elution_peak = self._elution_peak_mask
return c_elution, b_elution_peak
def _sim_c_elution_buffer(self, n_time_steps: int) -> _np.ndarray:
"""Get elution buffer composition at the outlet of the column.
By default the buffer composition is constant throughout the
elution step.
Feel free to override this function if you want to simulate
linear gradient or if the transient phenomena at the beginning
of peak cut needs to be considered.
Parameters
----------
n_time_steps
Duration of elution step in number of time steps.
Returns
-------
ndarray
Buffer concentration profile at the outlet of the column
during the elution step.
"""
# Elution buffer composition.
elution_buffer_composition = \
self.elution_buffer_c.reshape(self.elution_buffer_c.size, 1)
assert elution_buffer_composition.size == 0 \
or elution_buffer_composition.size == self._n_species, \
f"Elution buffer composition must be either empty or have" \
f" a concentration value for each specie."
assert _np.all(elution_buffer_composition >= 0), \
"Concentration values in elution buffer must be >= 0"
if elution_buffer_composition.size == 0:
elution_buffer_composition = _np.zeros([self._n_species, 1])
self.log.i_data(self._log_tree,
"elution_buffer_composition",
elution_buffer_composition)
# Constant profile.
c_elution_buffer = elution_buffer_composition \
* _np.ones_like(self._t[:n_time_steps])
return c_elution_buffer
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def _sim_c_regeneration(self,
m_bound: _np.ndarray
) -> _typing.Optional[_np.ndarray]:
"""Simulate regeneration step.
Parameters
----------
m_bound
Vector with amount of product being bound to the column at
the beginning of the regeneration step.
`m_bound.size == n_species`.
Returns
-------
Optional[ndarray]
Outlet concentration profile during regeneration step.
E.g. regeneration peak.
"""
# No default implementation.
c_regeneration = None
return c_regeneration
def _sim_c_out_cycle(self,
f_load: _np.ndarray,
c_load: _np.ndarray
) -> _typing.Tuple[_typing.Optional[_np.ndarray],
_typing.Optional[_np.ndarray],
_np.ndarray,
_np.ndarray,
_typing.Optional[_np.ndarray]]:
"""Simulates load-wash-elution-regeneration steps.
Regeneration is optional.
This function can be replaced in case user wants to use some
other variation of bind-elution dynamics.
Elution peak cut is applied in this function.
Elution peak shape must be defined by now.
Return profiles that are `None` are considered being zero.
Parameters
----------
f_load
Inlet (recycle + load) flow rate profile for a cycle.
The flow rate might be different during wash recycle.
c_load
Inlet (recycle + load) concentration profile.
Returns
-------
c_load
Conc profile at the outlet of the column during load.
c_wash
Conc profile at the outlet of the column during wash.
c_elution
Conc profile at the outlet of the column during elution.
b_elution
Boolean mask for elution step. `True` where peak is being
collected.
c_regeneration
Conc profile at the outlet of the column during
regeneration.
"""
assert self._load_f > 0
assert self._wash_f > 0
assert self._wash_t > 0
assert self._elution_f > 0
assert self._elution_t > 0
assert self._load_f > 0
assert self._cv > 0
# Evaluate binding.
c_bound, c_unbound = self._sim_c_load_binding(f_load, c_load)
# Log.
m_load = (c_load * f_load[_np.newaxis, :]).sum(1) * self._dt
m_bound = (c_bound * f_load[_np.newaxis, :]).sum(1) * self._dt
self.log.i_data(self._cycle_tree,
"column_utilization",
m_bound / self._cv / self.load_bt.get_total_bc())
self.log.i_data(self._cycle_tree, "m_load", m_load)
self.log.i_data(self._cycle_tree, "m_bound", m_bound)
self.log.i_data(self._cycle_tree, "m_unbound", m_load - m_bound)
self.log.d_data(self._cycle_tree, "f_load", f_load)
self.log.d_data(self._cycle_tree, "c_load", c_load)
self.log.d_data(self._cycle_tree, "c_bound", c_bound)
self.log.d_data(self._cycle_tree, "c_unbound", c_unbound)
# Evaluate desorption during wash.
c_wash_desorbed = None
if self.wash_desorption:
c_wash_desorbed = self._sim_c_wash_desorption(f_load, c_bound)
if c_wash_desorbed.size > 0:
# Subtract desorbed material from bound material.
m_bound -= c_wash_desorbed.sum(1)
# Log.
self.log.i_data(self._cycle_tree,
"m_wash_desorbed",
c_wash_desorbed.sum(1) * self._wash_f * self._dt)
self.log.d_data(self._cycle_tree,
"c_wash_desorbed",
c_wash_desorbed)
# Propagate unbound and desorbed material throughout the column.
c_out_load = c_unbound
c_out_wash = c_wash_desorbed
if self.load_recycle or self.wash_recycle:
c_out_load, c_out_wash = \
self._sim_c_recycle_propagation(f_load,
c_unbound,
c_wash_desorbed)
# Get elution peak.
c_out_elution, elution_peak_mask = \
self._sim_c_elution_desorption(m_bound)
# Log.
m_elution_peak = (c_out_elution * elution_peak_mask[_np.newaxis, :]
).sum(1) * self._elution_f * self._dt
m_elution = c_out_elution.sum(1) * self._elution_f * self._dt
self.log.i_data(self._cycle_tree,
"m_elution_peak", m_elution_peak)
self.log.i_data(self._cycle_tree,
"m_elution", m_elution)
self.log.i_data(self._cycle_tree,
"m_elution_peak_cut_loss", m_elution - m_elution_peak)
# Get regeneration peak.
c_out_regeneration = self._sim_c_regeneration(
m_bound - c_out_elution.sum(1) * self._elution_f * self._dt)
return c_out_load, c_out_wash, c_out_elution, \
elution_peak_mask, c_out_regeneration
def _calculate(self):
# Pre calculate parameters and repetitive profiles.
self._prepare_simulation()
# Assert proper list of binding species.
binding_species = [i for i in range(self._n_species)
if i not in self.non_binding_species]
assert len(binding_species) > 0
# Copy inlet vectors.
c_in_load = self._c[binding_species].copy()
f_in_load = self._f.copy()
f_in_i_end = min(_utils.vectors.true_end(f_in_load > 0), self._t.size)
c_in_load[:, f_in_i_end:] = 0
# Clear for results.
self._c[:] = 0
self._f[:] = 0
# Prepare logger.
log_data_cycles = list()
self.log.set_branch(self._log_tree, "cycles", log_data_cycles)
# Variable to store wash recycle to.
previous_c_bt_wash: _typing.Optional[_np.ndarray] = None
# Loop across cycles.
for i in range(self._cycle_start_i_list.size):
# Load-wash-elution-regeneration-equilibration steps for a column.
# Load step starts at `self._cycle_start_i_list[i]`.
# Prepare logger for this cycle.
self._cycle_tree = dict()
log_data_cycles.append(self._cycle_tree)
# Load start and end time as the column sees it.
if i > 0 and self.load_recycle:
# Column sees leftovers from previous load during recycling.
cycle_load_i_start = self._cycle_start_i_list[i - 1]
else:
cycle_load_i_start = self._cycle_start_i_list[i]
# Calc cycle end (either next cycle or end or simulation time).
if i + 1 < self._cycle_start_i_list.size:
cycle_load_i_end = self._cycle_start_i_list[i + 1]
else:
cycle_load_i_end = f_in_i_end - 1
# Log results.
self.log.i_data(self._cycle_tree,
"i_cycle_load_start",
cycle_load_i_start)
self.log.i_data(self._cycle_tree,
"i_cycle_load_step_start",
self._cycle_start_i_list[i])
self.log.i_data(self._cycle_tree,
"i_cycle_load_end",
cycle_load_i_end)
# Calc profiles at column outlet.
c_out_load, c_out_wash, c_out_elution, \
b_out_elution, c_out_regeneration = self._sim_c_out_cycle(
f_in_load[cycle_load_i_start:cycle_load_i_end],
c_in_load[:, cycle_load_i_start:cycle_load_i_end]
)
self.log.d_data(self._cycle_tree,
"c_out_load", c_out_load)
self.log.d_data(self._cycle_tree,
"c_out_wash", c_out_wash)
self.log.d_data(self._cycle_tree,
"c_out_elution", c_out_elution)
self.log.d_data(self._cycle_tree,
"b_out_elution", b_out_elution)
self.log.d_data(self._cycle_tree,
"c_out_regeneration", c_out_regeneration)
# Load recycle.
if self.load_recycle:
# Recycle load during the load step.
i_load_start_rel = self._cycle_start_i_list[i] \
- cycle_load_i_start
c_load_recycle = c_out_load[:, i_load_start_rel:]
c_in_load[:, self._cycle_start_i_list[i]:cycle_load_i_end] = \
c_load_recycle
self.log.i_data(self._cycle_tree, "m_load_recycle",
c_load_recycle.sum(1)
* self._load_f * self._dt)
self.log.d_data(self._cycle_tree, "c_load_recycle",
c_load_recycle)
# Losses during load == bt through 2nd column.
c_loss_bt_2nd_column = c_out_load[:, i_load_start_rel]
self.log.i_data(self._cycle_tree, "m_loss_bt_2nd_column",
c_loss_bt_2nd_column.sum()
* self._dt * self._load_f)
self.log.d_data(self._cycle_tree, "c_loss_bt_2nd_column",
c_loss_bt_2nd_column)
else:
# report losses during load
m_loss_load = c_out_load.sum() * self._dt * self._load_f
self.log.i_data(self._cycle_tree, "m_loss_load", m_loss_load)
# Wash recycle.
if self.wash_recycle:
if previous_c_bt_wash is not None \
and previous_c_bt_wash.size > 0:
# Clip wash recycle duration if needed.
i_wash_duration = min(
self._wash_recycle_i_duration,
self._t.size - self._cycle_start_i_list[i])
# Log losses due to discarding load bt during wash recycle.
s = c_in_load[
:,
self._cycle_start_i_list[i]:self._cycle_start_i_list[i]
+ i_wash_duration]
self.log.i_data(self._cycle_tree,
"m_loss_load_bt_during_wash_recycle",
s.sum() * self._dt * self._load_f)
self.log.d_data(self._cycle_tree,
"c_lost_load_during_wash_recycle", s)
self.log.d_data(self._cycle_tree, "c_wash_recycle",
previous_c_bt_wash[:, :i_wash_duration])
self.log.i_data(
self._cycle_tree, "m_wash_recycle",
previous_c_bt_wash[:, :i_wash_duration].sum(1)
* self._dt * self._wash_f)
# Apply previous wash recycle onto the inlet profile.
s[:] = previous_c_bt_wash[:, :i_wash_duration]
f_in_load[self._cycle_start_i_list[i]:
self._cycle_start_i_list[i]
+ i_wash_duration] = self._wash_f
# Save wash from this cycle to be used during the next cycle.
previous_c_bt_wash = c_out_wash
else:
# Report losses during wash.
if c_out_wash is None:
c_out_wash = _np.zeros(
[len(binding_species),
int(round(self._wash_t / self._dt))])
m_loss_wash = c_out_wash.sum() * self._dt * self._load_f
self.log.i_data(self._cycle_tree, "m_loss_wash", m_loss_wash)
# Elution.
[i_el_rel_start, i_el_rel_end] = \
_utils.vectors.true_start_and_end(b_out_elution)
i_el_start = min(
self._t.size,
cycle_load_i_end + c_out_wash.shape[1] + i_el_rel_start)
i_el_end = min(
self._t.size,
cycle_load_i_end + c_out_wash.shape[1] + i_el_rel_end)
i_el_rel_end = i_el_rel_start + i_el_end - i_el_start
# Log.
self.log.i_data(self._cycle_tree, "i_elution_start", i_el_start)
self.log.i_data(self._cycle_tree, "i_elution_end", i_el_end)
# Write to global outlet.
self._f[i_el_start:i_el_end] = self._elution_f
self._c[binding_species, i_el_start:i_el_end] = \
c_out_elution[:, i_el_rel_start:i_el_rel_end]
class ACC(AlternatingChromatography):
"""Alternating column chromatography without recycling.
Alternating load-bind-elution twin-column chromatography without
recycling of overloaded or washed out material.
This class offers no dynamics for desorption during wash step.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "ACC".
Notes
-----
For list of attributes refer to :class:`AlternatingChromatography`.
See Also
--------
:class:`AlternatingChromatography`
Examples
--------
>>> dt = 0.5 # min
>>> t = _np.arange(0, 24.1 * 60, dt)
>>> load_bt = _bt_load.ConstantPatternSolution(dt, dbc_100=50, k=0.12)
>>> peak_shape_pdf = _pdf.ExpModGaussianFixedRelativeWidth(t, 0.15, 0.3)
>>> acc_pro_a = ACC(
... t,
... load_bt=load_bt,
... peak_shape_pdf=peak_shape_pdf,
... uo_id="pro_a_acc",
... gui_title="ProteinA ACC",
... )
>>> acc_pro_a.cv = 100 # mL
>>> # Equilibration step.
>>> acc_pro_a.equilibration_cv = 1.5
>>> # Equilibration flow rate is same as load flow rate.
>>> acc_pro_a.equilibration_f_rel = 1
>>> # Load 10 CVs.
>>> acc_pro_a.load_cv = 20
>>> # Define wash step.
>>> acc_pro_a.wash_cv = 5
>>> # Elution step.
>>> acc_pro_a.elution_cv = 3
>>> # 1st momentum of elution peak from data from above.
>>> acc_pro_a.elution_peak_position_cv = 1.2
>>> acc_pro_a.elution_peak_cut_start_c_rel_to_peak_max = 0.05
>>> acc_pro_a.elution_peak_cut_end_c_rel_to_peak_max = 0.05
>>> # Regeneration step.
>>> acc_pro_a.regeneration_cv = 1.5
>>> # Inlet flow rate profile.
>>> f_in = _np.ones_like(t) * 15 # mL/min
>>> c_in = _np.ones([1, t.size]) * 2.5 # mg/mL
>>> # Simulate ACC.
>>> f_out, c_out = acc_pro_a.evaluate(f_in, c_in)
"""
def __init__(self,
t: _np.ndarray,
uo_id: str,
load_bt: _core.ChromatographyLoadBreakthrough,
peak_shape_pdf: _core.PDF,
gui_title: str = "ACC"):
super().__init__(t, uo_id, load_bt, peak_shape_pdf, gui_title)
def _sim_c_wash_desorption(self,
f_load: _np.ndarray,
c_bound: _np.ndarray) -> _np.ndarray:
"""Desorbed material during wash step is not supported by ACC.
Raises
------
NotImplementedError
Raises exception when function if called.
"""
raise NotImplementedError("Function not implemented in this class.")
class PCC(AlternatingChromatography):
"""Alternating column chromatography with recycling of load.
Alternating load-bind-elution twin-column chromatography with
optional recycling of overloaded or washed out material.
This class offers no dynamics for desorption during wash step.
PCC uses :attr:`load_bt` to determine what parts of the load (and
recycled material) bind to the column. The unbound (not captured)
part is propagated through the column by :attr:`load_recycle_pdf`.
Void volume for :attr:`load_recycle_pdf` is defined as
:attr:`column_porosity_retentate` * `column volume`.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
load_recycle_pdf
Propagation of load breakthrough and/or washed out material
through the column.
column_porosity_retentate
Porosity of the column for binding species (protein).
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "PCC".
Notes
-----
For list of additional attributes refer to
:class:`AlternatingChromatography`.
See Also
--------
:class:`AlternatingChromatography`
Examples
--------
>>> dt = 0.5 # min
>>> t = _np.arange(0, 24.1 * 60, dt)
>>> load_bt = _bt_load.ConstantPatternSolution(dt, dbc_100=50, k=0.12)
>>> peak_shape_pdf = _pdf.ExpModGaussianFixedRelativeWidth(t, 0.15, 0.3)
>>> load_recycle_pdf = _pdf.GaussianFixedDispersion(t, 2 * 2 / 30)
>>> pcc_pro_a = PCC(
... t,
... load_bt=load_bt,
... peak_shape_pdf=peak_shape_pdf,
... load_recycle_pdf=load_recycle_pdf,
... # Porosity of the column for protein.
... column_porosity_retentate=0.64,
... uo_id="pro_a_pcc",
... gui_title="ProteinA PCC",
... )
>>> pcc_pro_a.cv = 100 # mL
>>> # Equilibration step.
>>> pcc_pro_a.equilibration_cv = 1.5
>>> # Equilibration flow rate is same as load flow rate.
>>> pcc_pro_a.equilibration_f_rel = 1
>>> # Load until 70 % breakthrough.
>>> pcc_pro_a.load_c_end_relative_ss = 0.7
>>> # Automatically prolong first cycle to faster achieve a steady-state.
>>> pcc_pro_a.load_extend_first_cycle = True
>>> # Define wash step.
>>> # There is no desorption during wash step in this example.
>>> pcc_pro_a.wash_cv = 5
>>> pcc_pro_a.wash_recycle = True
>>> pcc_pro_a.wash_recycle_duration_cv = 2
>>> # Elution step.
>>> pcc_pro_a.elution_cv = 3
>>> # 1st momentum of elution peak from data from above.
>>> pcc_pro_a.elution_peak_position_cv = 1.2
>>> pcc_pro_a.elution_peak_cut_start_c_rel_to_peak_max = 0.05
>>> pcc_pro_a.elution_peak_cut_end_c_rel_to_peak_max = 0.05
>>> # Regeneration step.
>>> pcc_pro_a.regeneration_cv = 1.5
>>> # Inlet flow rate profile.
>>> f_in = _np.ones_like(t) * 15 # mL/min
>>> c_in = _np.ones([1, t.size]) * 2.5 # mg/mL
>>> # Simulate ACC.
>>> f_out, c_out = pcc_pro_a.evaluate(f_in, c_in) # doctest: +ELLIPSIS
pro_a_pcc: Steady-state concentration is being estimated ...
pro_a_pcc: Steady-state concentration is being estimated ...
"""
def __init__(self,
t: _np.ndarray,
uo_id: str,
load_bt: _core.ChromatographyLoadBreakthrough,
load_recycle_pdf: _core.PDF,
column_porosity_retentate: float,
peak_shape_pdf: _core.PDF,
gui_title: str = "PCC"):
super().__init__(t, uo_id, load_bt, peak_shape_pdf, gui_title)
self.load_recycle = True
"""Recycle load breakthrough. Default = `True`."""
self.wash_recycle = False
"""Recycle wash. Default = False."""
self.column_porosity_retentate = column_porosity_retentate
"""Column porosity for binding species.
See Also
--------
:class:`PCC`
Examples
--------
`column_porosity_retentate` is a mean residence time of the
product (protein) traveling through the column during
non-binding conditions (in CVs).
"""
self.load_recycle_pdf = load_recycle_pdf
"""PDF of wash and/or unbound load traveling through the column.
See Also
--------
:class:`PCC`
"""
def _sim_c_wash_desorption(self,
f_load: _np.ndarray,
c_bound: _np.ndarray) -> _np.ndarray:
"""Desorbed material during wash step is not supported by PCC.
Raises
------
NotImplementedError
Raises exception when function if called.
"""
raise NotImplementedError("Function not implemented in this class.")
class PCCWithWashDesorption(PCC):
"""Alternating column chromatography with recycling of load.
Alternating load-bind-elution twin-column chromatography with
optional recycling of overloaded or washed out material.
The material desorption during wash step is defined by exponential
half life time
* :attr:`wash_desorption_tail_half_time_cv`
and the amount of desorbable material which is defined by
* :attr:`wash_desorption_desorbable_material_share` or
* :attr:`wash_desorption_desorbable_above_dbc`.
PCC uses :attr:`load_bt` to determine what parts of the load (and
recycled material) bind to the column.
The unbound (not captured) part and desorbed part are propagated
through the column by :attr:`load_recycle_pdf`.
Void volume for :attr:`load_recycle_pdf` is defined as
:attr:`column_porosity_retentate` * `column volume`.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
load_recycle_pdf
Propagation of load breakthrough and/or washed out material
through the column.
column_porosity_retentate
Porosity of the column for binding species (protein).
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "PCCWithWashDesorption".
Notes
-----
During wash step, weaker binding isoforms might be desorbed and
recycled. In turn they are again desorbed and recycled during next
cycle and so on; resulting in increasing amount of desorbed material
during wash step (even in steady-state). This is not considered by
this class. Furthermore, it is not a favorable case in terms of RTD
as the weakly bound material propagates from column to column for
many cycles.
For list of additional attributes refer to
:class:`PCC` and :class:`AlternatingChromatography`.
See Also
--------
:class:`PCC`
:class:`AlternatingChromatography`
"""
def __init__(self,
t: _np.ndarray,
uo_id: str,
load_bt: _core.ChromatographyLoadBreakthrough,
load_recycle_pdf: _core.PDF,
column_porosity_retentate: float,
peak_shape_pdf: _core.PDF,
gui_title: str = "PCCWithWashDesorption"):
super().__init__(t, uo_id, load_bt, load_recycle_pdf,
column_porosity_retentate, peak_shape_pdf, gui_title)
self.load_recycle = True
"""Recycle load breakthrough. Default = `True`."""
self.wash_recycle = True
"""Recycle wash. Default = `True`."""
self.wash_desorption = True
"""Simulate desorption during wash step. Default = `True`."""
self.wash_desorption_tail_half_time_cv = -1
"""Wash desorption rate.
Required if :attr:`wash_desorption` is `True`.
Wash desorption is simulated as exponential decay with half-life
:attr:`wash_desorption_tail_half_time_cv`.
"""
self.wash_desorption_desorbable_material_share = -1
"""Share of material that can be desorbed during wash step.
Wash desorption is simulated as exponential decay. Only part of
adsorbed material is subjected to that exponential decay. That
part can be defined by:
* :attr:`wash_desorption_desorbable_material_share` (this one)
or
* :attr:`wash_desorption_desorbable_above_dbc`.
"""
self.wash_desorption_desorbable_above_dbc = -1
"""Share of material that can be desorbed during wash step.
Share is defined as a share of material loaded onto the column
that exceeds specified `wash_desorption_desorbable_above_dbc`
binding capacity.
Wash desorption is simulated as exponential decay. Only part of
adsorbed material is subjected to that exponential decay. That
part can be defined by:
* :attr:`wash_desorption_desorbable_material_share` (this one)
or
* :attr:`wash_desorption_desorbable_above_dbc`.
"""
def _sim_c_wash_desorption(self,
f_load: _np.ndarray,
c_bound: _np.ndarray) -> _np.ndarray:
"""Get conc profile of desorbed material during wash step.
`self.wash_desorption_tail_half_time_cv` needs to be defined.
One of `self.wash_desorption_desorbable_material_share` and
`self.wash_desorption_desorbable_above_dbc` needs to be defined.
Parameters
----------
f_load
Flow rate profile during 'effective load' step.
The step includes wash recycle, load recycle and load step
as a column sees it in a single cycle.
c_bound
Conc profile of captured material.
Returns
-------
ndarray
Conc profile of desorbed material during wash step.
"""
assert self.wash_desorption_tail_half_time_cv > 0
assert self._load_f > 0
assert self._wash_f > 0
assert self._wash_t > 0
assert self._cv > 0
assert self.wash_desorption_desorbable_material_share > 0 \
or self.wash_desorption_desorbable_above_dbc > 0
assert f_load.size == c_bound.shape[1]
assert c_bound.shape[0] \
== self._n_species - len(self.non_binding_species)
m_bound = (c_bound * f_load[_np.newaxis, :]).sum(1)[:, _np.newaxis] \
* self._dt
# Calc share of desorbable material.
k = -1
if self.wash_desorption_desorbable_material_share > 0:
k = self.wash_desorption_desorbable_material_share
if self.wash_desorption_desorbable_above_dbc > 0:
if k > 0:
self.log.w(
f"Share of desorbable material defined twice!!"
f" Using `load_recycle_wash_desorbable_material_share`")
else:
k = max(0,
1 - self.wash_desorption_desorbable_above_dbc
* self._cv / m_bound.sum())
assert 1 >= k >= 0, f"Share of desorbable material {k}" \
f" must be >= 0 and <= 1."
i_wash_duration = int(round(self._wash_t / self._dt))
# Generate exponential tail.
exp_pdf = _pdf.TanksInSeries(self._t[:i_wash_duration],
n_tanks=1,
pdf_id=f"wash_desorption_exp_drop")
exp_pdf.allow_open_end = True
exp_pdf.trim_and_normalize = False
tau = self.wash_desorption_tail_half_time_cv \
* self._cv / self._wash_f / _np.log(2)
exp_pdf.update_pdf(rt_mean=tau)
p = exp_pdf.get_p()[_np.newaxis, :i_wash_duration]
# Scale desorbed material conc due to differences in flow rate.
c_desorbed = m_bound * k * p / self._wash_f
# Pad with zeros if needed.
c_desorbed = _np.pad(c_desorbed,
((0, 0),
(0, i_wash_duration - c_desorbed.shape[1])),
mode="constant")
# Log.
self.log.d_data(self._cycle_tree if hasattr(self, "_cycle_tree")
else self._log_tree,
"p_desorbed",
p)
return c_desorbed
| [
"numpy.ones_like",
"bio_rtd.utils.vectors.true_start",
"bio_rtd.utils.convolution.time_conv",
"numpy.ones",
"bio_rtd.utils.vectors.true_end",
"numpy.log",
"numpy.zeros_like",
"bio_rtd.utils.vectors.true_start_and_end",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.pad",
"numpy.zeros",
"numpy.rint",
"numpy.concatenate",
"numpy.cumsum",
"numpy.all",
"bio_rtd.pdf.TanksInSeries",
"numpy.arange"
]
| [((16197, 16210), 'numpy.array', '_np.array', (['[]'], {}), '([])\n', (16206, 16210), True, 'import numpy as _np\n'), ((54794, 54840), 'bio_rtd.utils.vectors.true_start_and_end', '_utils.vectors.true_start_and_end', (['(self._f > 0)'], {}), '(self._f > 0)\n', (54827, 54840), True, 'import bio_rtd.utils as _utils\n'), ((55563, 55669), 'numpy.arange', '_np.arange', (['(self._t[flow_i_start] + load_extend_first_cycle_t)', 'self._t[flow_i_end - 1]', 'self._cycle_t'], {}), '(self._t[flow_i_start] + load_extend_first_cycle_t, self._t[\n flow_i_end - 1], self._cycle_t)\n', (55573, 55669), True, 'import numpy as _np\n'), ((62054, 62126), 'numpy.arange', '_np.arange', (['dv', '((v_wash[-1] if v_wash.size > 0 else v_load[-1]) + dv)', 'dv'], {}), '(dv, (v_wash[-1] if v_wash.size > 0 else v_load[-1]) + dv, dv)\n', (62064, 62126), True, 'import numpy as _np\n'), ((62581, 62659), 'bio_rtd.utils.convolution.time_conv', '_utils.convolution.time_conv', (['self._dt', 'c_v_combined', 'self._p_load_recycle_pdf'], {}), '(self._dt, c_v_combined, self._p_load_recycle_pdf)\n', (62609, 62659), True, 'import bio_rtd.utils as _utils\n'), ((65786, 65826), 'numpy.all', '_np.all', (['(elution_buffer_composition >= 0)'], {}), '(elution_buffer_composition >= 0)\n', (65793, 65826), True, 'import numpy as _np\n'), ((93789, 93886), 'bio_rtd.pdf.TanksInSeries', '_pdf.TanksInSeries', (['self._t[:i_wash_duration]'], {'n_tanks': '(1)', 'pdf_id': 'f"""wash_desorption_exp_drop"""'}), "(self._t[:i_wash_duration], n_tanks=1, pdf_id=\n f'wash_desorption_exp_drop')\n", (93807, 93886), True, 'import bio_rtd.pdf as _pdf\n'), ((94423, 94517), 'numpy.pad', '_np.pad', (['c_desorbed', '((0, 0), (0, i_wash_duration - c_desorbed.shape[1]))'], {'mode': '"""constant"""'}), "(c_desorbed, ((0, 0), (0, i_wash_duration - c_desorbed.shape[1])),\n mode='constant')\n", (94430, 94517), True, 'import numpy as _np\n'), ((62741, 62811), 'scipy.interpolate.interp1d', '_interp.interp1d', (['v', 'c_v_combined_propagated'], {'fill_value': '"""extrapolate"""'}), "(v, c_v_combined_propagated, fill_value='extrapolate')\n", (62757, 62811), True, 'import scipy.interpolate as _interp\n'), ((62858, 62899), 'numpy.concatenate', '_np.concatenate', (['(v_load, v_wash)'], {'axis': '(0)'}), '((v_load, v_wash), axis=0)\n', (62873, 62899), True, 'import numpy as _np\n'), ((64319, 64414), 'numpy.pad', '_np.pad', (['c_elution', '((0, 0), (0, i_elution_duration - c_elution.shape[1]))'], {'mode': '"""constant"""'}), "(c_elution, ((0, 0), (0, i_elution_duration - c_elution.shape[1])),\n mode='constant')\n", (64326, 64414), True, 'import numpy as _np\n'), ((65986, 66017), 'numpy.zeros', '_np.zeros', (['[self._n_species, 1]'], {}), '([self._n_species, 1])\n', (65995, 66017), True, 'import numpy as _np\n'), ((66262, 66299), 'numpy.ones_like', '_np.ones_like', (['self._t[:n_time_steps]'], {}), '(self._t[:n_time_steps])\n', (66275, 66299), True, 'import numpy as _np\n'), ((72260, 72298), 'bio_rtd.utils.vectors.true_end', '_utils.vectors.true_end', (['(f_in_load > 0)'], {}), '(f_in_load > 0)\n', (72283, 72298), True, 'import bio_rtd.utils as _utils\n'), ((78637, 78685), 'bio_rtd.utils.vectors.true_start_and_end', '_utils.vectors.true_start_and_end', (['b_out_elution'], {}), '(b_out_elution)\n', (78670, 78685), True, 'import bio_rtd.utils as _utils\n'), ((94132, 94142), 'numpy.log', '_np.log', (['(2)'], {}), '(2)\n', (94139, 94142), True, 'import numpy as _np\n'), ((40682, 40704), 'numpy.ones', '_np.ones', (['self._t.size'], {}), '(self._t.size)\n', (40690, 40704), True, 'import numpy as _np\n'), ((55800, 55839), 'numpy.rint', '_np.rint', (['(cycle_start_t_list / self._dt)'], {}), '(cycle_start_t_list / self._dt)\n', (55808, 55839), True, 'import numpy as _np\n'), ((62226, 62267), 'numpy.concatenate', '_np.concatenate', (['(v_load, v_wash)'], {'axis': '(0)'}), '((v_load, v_wash), axis=0)\n', (62241, 62267), True, 'import numpy as _np\n'), ((62281, 62334), 'numpy.concatenate', '_np.concatenate', (['(c_unbound, c_wash_desorbed)'], {'axis': '(1)'}), '((c_unbound, c_wash_desorbed), axis=1)\n', (62296, 62334), True, 'import numpy as _np\n'), ((31650, 31689), 'numpy.cumsum', '_np.cumsum', (['(elution_peak_pdf * self._dt)'], {}), '(elution_peak_pdf * self._dt)\n', (31660, 31689), True, 'import numpy as _np\n'), ((33637, 33676), 'numpy.cumsum', '_np.cumsum', (['(elution_peak_pdf * self._dt)'], {}), '(elution_peak_pdf * self._dt)\n', (33647, 33676), True, 'import numpy as _np\n'), ((47617, 47671), 'numpy.concatenate', '_np.concatenate', (['(rec_load, c_load_fist_cycle)'], {'axis': '(1)'}), '((rec_load, c_load_fist_cycle), axis=1)\n', (47632, 47671), True, 'import numpy as _np\n'), ((49538, 49574), 'numpy.ones', '_np.ones', (['c_load_fist_cycle.shape[1]'], {}), '(c_load_fist_cycle.shape[1])\n', (49546, 49574), True, 'import numpy as _np\n'), ((61882, 61925), 'numpy.arange', '_np.arange', (['(1)', '(c_wash_desorbed.shape[1] + 1)'], {}), '(1, c_wash_desorbed.shape[1] + 1)\n', (61892, 61925), True, 'import numpy as _np\n'), ((34333, 34395), 'bio_rtd.utils.vectors.true_end', '_utils.vectors.true_end', (['(self._t < self.elution_peak_cut_end_t)'], {}), '(self._t < self.elution_peak_cut_end_t)\n', (34356, 34395), True, 'import bio_rtd.utils as _utils\n'), ((47450, 47511), 'numpy.zeros_like', '_np.zeros_like', (['bt_load_out[:, i_prev_cycle:i_cycle_duration]'], {}), '(bt_load_out[:, i_prev_cycle:i_cycle_duration])\n', (47464, 47511), True, 'import numpy as _np\n'), ((47822, 47852), 'numpy.ones', '_np.ones', (['c_next_load.shape[1]'], {}), '(c_next_load.shape[1])\n', (47830, 47852), True, 'import numpy as _np\n'), ((52899, 52921), 'numpy.ones', '_np.ones', (['self._t.size'], {}), '(self._t.size)\n', (52907, 52921), True, 'import numpy as _np\n'), ((54397, 54438), 'bio_rtd.utils.vectors.true_start', '_utils.vectors.true_start', (['(m_ext_bt >= dm)'], {}), '(m_ext_bt >= dm)\n', (54422, 54438), True, 'import bio_rtd.utils as _utils\n')] |
import matplotlib.pyplot as plt
def main():
with open('log.txt') as f:
lines = f.readlines()
glob_loss = []
hm_l = []
off_l = []
poly_l = []
depth_l = []
glob_loss_val = []
hm_l_val = []
off_l_val = []
poly_l_val = []
depth_l_val = []
for epoch in lines:
m = epoch.split("|")
if m[0].split(':')[1] == ' AP':
glob_loss_val.append(float(m[1][5:-1]))
hm_l_val.append(float(m[2][5:-1]))
off_l_val.append(float(m[3][6:-1]))
poly_l_val.append(float(m[4][7:-1]))
depth_l_val.append(float(m[5][8:-1]))
else:
nb_epoch = int(m[0].split(":")[-1])
glob_loss.append(float(m[1][5:-1]))
hm_l.append(float(m[2][5:-1]))
off_l.append(float(m[3][6:-1]))
poly_l.append(float(m[4][7:-1]))
depth_l.append(float(m[5][8:-1]))
if len(m) > 8 :
glob_loss_val.append(float(m[7][5:-1]))
hm_l_val.append(float(m[8][5:-1]))
off_l_val.append(float(m[9][6:-1]))
poly_l_val.append(float(m[10][7:-1]))
depth_l_val.append(float(m[11][8:-1]))
plt.plot(glob_loss, label = "glob_loss")
plt.plot(hm_l, label = "hm_l")
plt.plot(off_l, label = "off_l")
plt.plot(poly_l, label = "poly_l")
plt.plot(depth_l, label = "depth_l")
plt.legend()
plt.savefig("loss_train.png")
plt.show()
plt.plot(glob_loss_val, label = "glob_loss_val")
plt.plot(hm_l_val, label = "hm_l_val")
plt.plot(off_l_val, label = "off_l_val")
plt.plot(poly_l_val, label = "poly_l_val")
plt.plot(depth_l_val, label = "depth_l_val")
plt.legend()
plt.savefig("loss_valid.png")
plt.show()
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
]
| [((1068, 1106), 'matplotlib.pyplot.plot', 'plt.plot', (['glob_loss'], {'label': '"""glob_loss"""'}), "(glob_loss, label='glob_loss')\n", (1076, 1106), True, 'import matplotlib.pyplot as plt\n'), ((1111, 1139), 'matplotlib.pyplot.plot', 'plt.plot', (['hm_l'], {'label': '"""hm_l"""'}), "(hm_l, label='hm_l')\n", (1119, 1139), True, 'import matplotlib.pyplot as plt\n'), ((1144, 1174), 'matplotlib.pyplot.plot', 'plt.plot', (['off_l'], {'label': '"""off_l"""'}), "(off_l, label='off_l')\n", (1152, 1174), True, 'import matplotlib.pyplot as plt\n'), ((1179, 1211), 'matplotlib.pyplot.plot', 'plt.plot', (['poly_l'], {'label': '"""poly_l"""'}), "(poly_l, label='poly_l')\n", (1187, 1211), True, 'import matplotlib.pyplot as plt\n'), ((1216, 1250), 'matplotlib.pyplot.plot', 'plt.plot', (['depth_l'], {'label': '"""depth_l"""'}), "(depth_l, label='depth_l')\n", (1224, 1250), True, 'import matplotlib.pyplot as plt\n'), ((1255, 1267), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1265, 1267), True, 'import matplotlib.pyplot as plt\n'), ((1270, 1299), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""loss_train.png"""'], {}), "('loss_train.png')\n", (1281, 1299), True, 'import matplotlib.pyplot as plt\n'), ((1302, 1312), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1310, 1312), True, 'import matplotlib.pyplot as plt\n'), ((1316, 1362), 'matplotlib.pyplot.plot', 'plt.plot', (['glob_loss_val'], {'label': '"""glob_loss_val"""'}), "(glob_loss_val, label='glob_loss_val')\n", (1324, 1362), True, 'import matplotlib.pyplot as plt\n'), ((1367, 1403), 'matplotlib.pyplot.plot', 'plt.plot', (['hm_l_val'], {'label': '"""hm_l_val"""'}), "(hm_l_val, label='hm_l_val')\n", (1375, 1403), True, 'import matplotlib.pyplot as plt\n'), ((1408, 1446), 'matplotlib.pyplot.plot', 'plt.plot', (['off_l_val'], {'label': '"""off_l_val"""'}), "(off_l_val, label='off_l_val')\n", (1416, 1446), True, 'import matplotlib.pyplot as plt\n'), ((1451, 1491), 'matplotlib.pyplot.plot', 'plt.plot', (['poly_l_val'], {'label': '"""poly_l_val"""'}), "(poly_l_val, label='poly_l_val')\n", (1459, 1491), True, 'import matplotlib.pyplot as plt\n'), ((1496, 1538), 'matplotlib.pyplot.plot', 'plt.plot', (['depth_l_val'], {'label': '"""depth_l_val"""'}), "(depth_l_val, label='depth_l_val')\n", (1504, 1538), True, 'import matplotlib.pyplot as plt\n'), ((1543, 1555), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1553, 1555), True, 'import matplotlib.pyplot as plt\n'), ((1558, 1587), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""loss_valid.png"""'], {}), "('loss_valid.png')\n", (1569, 1587), True, 'import matplotlib.pyplot as plt\n'), ((1590, 1600), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1598, 1600), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
###
# Based on signature.R
###
import sys,os,logging
import numpy as np
import pandas as pd
if __name__=="__main__":
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
if (len(sys.argv) < 3):
logging.error("3 file args required, LINCS sig info for GSE70138 and GSE92742, and output file.")
sys.exit(1)
fn1 = sys.argv[1] #GSE70138_Broad_LINCS_sig_info_2017-03-06.txt.gz
fn2 = sys.argv[2] #GSE92742_Broad_LINCS_sig_info.txt.gz
ofile = sys.argv[3] #signature.tsv
#
part1 = pd.read_table(fn1, "\t", na_values=["-666", "-666.0"])
logging.info(f"columns: {part1.columns}")
part1 = part1[["sig_id", "pert_id", "pert_iname", "pert_type", "cell_id", "pert_idose", "pert_itime"]]
#
part2 = pd.read_table(fn2, "\t", na_values=["-666", "-666.0"], dtype="str")
part2.pert_time = part2.pert_time.astype(np.int32)
logging.info(f"columns: {part2.columns}")
part2 = part2[["sig_id", "pert_id", "pert_iname", "pert_type", "cell_id", "pert_idose", "pert_itime"]]
#
sign = pd.concat([part1, part2])
sign.drop_duplicates(subset=["sig_id"], keep="first", inplace=True)
sign.to_csv(ofile, "\t", index=False)
| [
"logging.basicConfig",
"pandas.concat",
"pandas.read_table",
"sys.exit",
"logging.info",
"logging.error"
]
| [((144, 220), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s:%(message)s"""', 'level': 'logging.DEBUG'}), "(format='%(levelname)s:%(message)s', level=logging.DEBUG)\n", (163, 220), False, 'import sys, os, logging\n'), ((545, 599), 'pandas.read_table', 'pd.read_table', (['fn1', '"""\t"""'], {'na_values': "['-666', '-666.0']"}), "(fn1, '\\t', na_values=['-666', '-666.0'])\n", (558, 599), True, 'import pandas as pd\n'), ((602, 643), 'logging.info', 'logging.info', (['f"""columns: {part1.columns}"""'], {}), "(f'columns: {part1.columns}')\n", (614, 643), False, 'import sys, os, logging\n'), ((763, 830), 'pandas.read_table', 'pd.read_table', (['fn2', '"""\t"""'], {'na_values': "['-666', '-666.0']", 'dtype': '"""str"""'}), "(fn2, '\\t', na_values=['-666', '-666.0'], dtype='str')\n", (776, 830), True, 'import pandas as pd\n'), ((886, 927), 'logging.info', 'logging.info', (['f"""columns: {part2.columns}"""'], {}), "(f'columns: {part2.columns}')\n", (898, 927), False, 'import sys, os, logging\n'), ((1046, 1071), 'pandas.concat', 'pd.concat', (['[part1, part2]'], {}), '([part1, part2])\n', (1055, 1071), True, 'import pandas as pd\n'), ((252, 359), 'logging.error', 'logging.error', (['"""3 file args required, LINCS sig info for GSE70138 and GSE92742, and output file."""'], {}), "(\n '3 file args required, LINCS sig info for GSE70138 and GSE92742, and output file.'\n )\n", (265, 359), False, 'import sys, os, logging\n'), ((354, 365), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (362, 365), False, 'import sys, os, logging\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Examples for the NURBS-Python Package
Released under MIT License
Developed by <NAME> (c) 2016-2017
"""
import os
from geomdl import BSpline
from geomdl import utilities
from geomdl import exchange
from geomdl import operations
from geomdl.visualization import VisPlotly
# Fix file path
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Create a BSpline surface instance
surf = BSpline.Surface()
# Set degrees
surf.degree_u = 3
surf.degree_v = 3
# Set control points
surf.set_ctrlpts(*exchange.import_txt("ex_surface02.cpt", two_dimensional=True))
# Set knot vectors
surf.knotvector_u = utilities.generate_knot_vector(surf.degree_u, 6)
surf.knotvector_v = utilities.generate_knot_vector(surf.degree_v, 6)
# Set evaluation delta
surf.delta = 0.025
# Evaluate surface
surf.evaluate()
# Plot the control point grid and the evaluated surface
vis_comp = VisPlotly.VisSurface()
surf.vis = vis_comp
surf.render()
# Evaluate surface tangent and normal at the given u and v
uv = [0.2, 0.9]
surf_tangent = operations.tangent(surf, uv)
surf_normal = operations.normal(surf, uv)
# Good to have something here to put a breakpoint
pass
| [
"geomdl.operations.normal",
"geomdl.BSpline.Surface",
"geomdl.visualization.VisPlotly.VisSurface",
"geomdl.utilities.generate_knot_vector",
"os.path.realpath",
"geomdl.exchange.import_txt",
"geomdl.operations.tangent"
]
| [((450, 467), 'geomdl.BSpline.Surface', 'BSpline.Surface', ([], {}), '()\n', (465, 467), False, 'from geomdl import BSpline\n'), ((662, 710), 'geomdl.utilities.generate_knot_vector', 'utilities.generate_knot_vector', (['surf.degree_u', '(6)'], {}), '(surf.degree_u, 6)\n', (692, 710), False, 'from geomdl import utilities\n'), ((731, 779), 'geomdl.utilities.generate_knot_vector', 'utilities.generate_knot_vector', (['surf.degree_v', '(6)'], {}), '(surf.degree_v, 6)\n', (761, 779), False, 'from geomdl import utilities\n'), ((927, 949), 'geomdl.visualization.VisPlotly.VisSurface', 'VisPlotly.VisSurface', ([], {}), '()\n', (947, 949), False, 'from geomdl.visualization import VisPlotly\n'), ((1075, 1103), 'geomdl.operations.tangent', 'operations.tangent', (['surf', 'uv'], {}), '(surf, uv)\n', (1093, 1103), False, 'from geomdl import operations\n'), ((1118, 1145), 'geomdl.operations.normal', 'operations.normal', (['surf', 'uv'], {}), '(surf, uv)\n', (1135, 1145), False, 'from geomdl import operations\n'), ((377, 403), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (393, 403), False, 'import os\n'), ((559, 620), 'geomdl.exchange.import_txt', 'exchange.import_txt', (['"""ex_surface02.cpt"""'], {'two_dimensional': '(True)'}), "('ex_surface02.cpt', two_dimensional=True)\n", (578, 620), False, 'from geomdl import exchange\n')] |
## Generated by pyxsdgen
from xml.etree import ElementTree as ET
# types
class OrderedStpType(object):
def __init__(self, order, stp):
self.order = order # int
self.stp = stp # StpIdType -> string
@classmethod
def build(self, element):
return OrderedStpType(
element.get('order'),
element.findtext('stp')
)
def xml(self, elementName):
r = ET.Element(elementName, attrib={'order' : str(self.order)})
ET.SubElement(r, 'stp').text = self.stp
return r
class TypeValueType(object):
def __init__(self, type_, value):
self.type_ = type_
self.value = value
@classmethod
def build(self, element):
return TypeValueType(
element.get('type'),
element.text
)
def xml(self, elementName):
r = ET.Element(elementName, attrib={'type' : self.type_})
r.text = self.value
return r
class P2PServiceBaseType(object):
def __init__(self, capacity, directionality, symmetricPath, sourceSTP, destSTP, ero, parameter):
self.capacity = capacity # long
self.directionality = directionality # DirectionalityType -> string
self.symmetricPath = symmetricPath # boolean
self.sourceSTP = sourceSTP # StpIdType -> string
self.destSTP = destSTP # StpIdType -> string
self.ero = ero # [ OrderedStpType ]
self.parameter = parameter # [ TypeValueType ]
@classmethod
def build(self, element):
return P2PServiceBaseType(
int(element.findtext('capacity')),
element.findtext('directionality'),
True if element.findtext('symmetricPath') == 'true' else False if element.find('symmetricPath') is not None else None,
element.findtext('sourceSTP'),
element.findtext('destSTP'),
[ OrderedStpType.build(e) for e in element.find('ero') ] if element.find('ero') is not None else None,
[ TypeValueType.build(e) for e in element.findall('parameter') ] if element.find('parameter') is not None else None
)
def xml(self, elementName):
r = ET.Element(elementName)
ET.SubElement(r, 'capacity').text = str(self.capacity)
ET.SubElement(r, 'directionality').text = self.directionality
if self.symmetricPath is not None:
ET.SubElement(r, 'symmetricPath').text = 'true' if self.symmetricPath else 'false'
ET.SubElement(r, 'sourceSTP').text = self.sourceSTP
ET.SubElement(r, 'destSTP').text = self.destSTP
if self.ero is not None:
ET.SubElement(r, 'ero').extend( [ e.xml('orderedSTP') for e in self.ero ] )
if self.parameter is not None:
for p in self.parameter:
ET.SubElement(r, 'parameter', attrib={'type': p.type_}).text = p.value
return r
POINT2POINT_NS = 'http://schemas.ogf.org/nsi/2013/12/services/point2point'
p2ps = ET.QName(POINT2POINT_NS, 'p2ps')
capacity = ET.QName(POINT2POINT_NS, 'capacity')
parameter = ET.QName(POINT2POINT_NS, 'parameter')
def parse(input_):
root = ET.fromstring(input_)
return parseElement(root)
def parseElement(element):
type_map = {
str(p2ps) : P2PServiceBaseType,
str(parameter) : TypeValueType
}
if not element.tag in type_map:
raise ValueError('No type mapping for tag %s' % element.tag)
type_ = type_map[element.tag]
return type_.build(element)
| [
"xml.etree.ElementTree.Element",
"xml.etree.ElementTree.fromstring",
"xml.etree.ElementTree.QName",
"xml.etree.ElementTree.SubElement"
]
| [((3047, 3079), 'xml.etree.ElementTree.QName', 'ET.QName', (['POINT2POINT_NS', '"""p2ps"""'], {}), "(POINT2POINT_NS, 'p2ps')\n", (3055, 3079), True, 'from xml.etree import ElementTree as ET\n'), ((3094, 3130), 'xml.etree.ElementTree.QName', 'ET.QName', (['POINT2POINT_NS', '"""capacity"""'], {}), "(POINT2POINT_NS, 'capacity')\n", (3102, 3130), True, 'from xml.etree import ElementTree as ET\n'), ((3145, 3182), 'xml.etree.ElementTree.QName', 'ET.QName', (['POINT2POINT_NS', '"""parameter"""'], {}), "(POINT2POINT_NS, 'parameter')\n", (3153, 3182), True, 'from xml.etree import ElementTree as ET\n'), ((3215, 3236), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['input_'], {}), '(input_)\n', (3228, 3236), True, 'from xml.etree import ElementTree as ET\n'), ((895, 947), 'xml.etree.ElementTree.Element', 'ET.Element', (['elementName'], {'attrib': "{'type': self.type_}"}), "(elementName, attrib={'type': self.type_})\n", (905, 947), True, 'from xml.etree import ElementTree as ET\n'), ((2242, 2265), 'xml.etree.ElementTree.Element', 'ET.Element', (['elementName'], {}), '(elementName)\n', (2252, 2265), True, 'from xml.etree import ElementTree as ET\n'), ((509, 532), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['r', '"""stp"""'], {}), "(r, 'stp')\n", (522, 532), True, 'from xml.etree import ElementTree as ET\n'), ((2274, 2302), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['r', '"""capacity"""'], {}), "(r, 'capacity')\n", (2287, 2302), True, 'from xml.etree import ElementTree as ET\n'), ((2337, 2371), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['r', '"""directionality"""'], {}), "(r, 'directionality')\n", (2350, 2371), True, 'from xml.etree import ElementTree as ET\n'), ((2545, 2574), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['r', '"""sourceSTP"""'], {}), "(r, 'sourceSTP')\n", (2558, 2574), True, 'from xml.etree import ElementTree as ET\n'), ((2605, 2632), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['r', '"""destSTP"""'], {}), "(r, 'destSTP')\n", (2618, 2632), True, 'from xml.etree import ElementTree as ET\n'), ((2454, 2487), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['r', '"""symmetricPath"""'], {}), "(r, 'symmetricPath')\n", (2467, 2487), True, 'from xml.etree import ElementTree as ET\n'), ((2698, 2721), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['r', '"""ero"""'], {}), "(r, 'ero')\n", (2711, 2721), True, 'from xml.etree import ElementTree as ET\n'), ((2866, 2921), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['r', '"""parameter"""'], {'attrib': "{'type': p.type_}"}), "(r, 'parameter', attrib={'type': p.type_})\n", (2879, 2921), True, 'from xml.etree import ElementTree as ET\n')] |
import matplotlib.pyplot as plt
import numpy as np
x=20
y=1
plt.plot(x,y)
plt.show()
| [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
]
| [((62, 76), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (70, 76), True, 'import matplotlib.pyplot as plt\n'), ((76, 86), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (84, 86), True, 'import matplotlib.pyplot as plt\n')] |
import os
import sys
import argparse
import copy
import numpy as np
import scipy.special
sys.path.append(os.getcwd())
def log_gaussian_pdf(theta, sigma=1, mu=0, ndim=None):
if ndim is None:
try:
ndim = len(theta)
except TypeError:
assert isinstance(theta, (float, int)), theta
ndim = 1
logl = -(np.sum((theta - mu) ** 2) / (2 * sigma ** 2))
logl -= np.log(2 * np.pi * (sigma ** 2)) * ndim / 2.0
return logl
class Gaussian(object):
def __init__(self, sigma=1.0, nderived=0):
self.sigma = sigma
self.nderived = nderived
def __call__(self, theta):
logl = log_gaussian_pdf(theta, sigma=self.sigma, mu=0)
return logl, [0.0] * self.nderived
class GaussianMix(object):
def __init__(self, sep=4, weights=(0.4, 0.3, 0.2, 0.1), sigma=1,
nderived=0):
assert len(weights) in [2, 3, 4], (
'Weights must have 2, 3 or 4 components. Weights=' + str(weights))
assert np.isclose(sum(weights), 1), (
'Weights must sum to 1! Weights=' + str(weights))
self.nderived = nderived
self.weights = weights
self.sigmas = [sigma] * len(weights)
positions = []
positions.append(np.asarray([0, sep]))
positions.append(np.asarray([0, -sep]))
positions.append(np.asarray([sep, 0]))
positions.append(np.asarray([-sep, 0]))
self.positions = positions[:len(weights)]
def __call__(self, theta):
thetas = []
for pos in self.positions:
thetas.append(copy.deepcopy(theta))
thetas[-1][:2] -= pos
logls = [(Gaussian(sigma=self.sigmas[i])(thetas[i])[0]
+ np.log(self.weights[i])) for i in range(len(self.weights))]
logl = scipy.special.logsumexp(logls)
return logl, [0.0] * self.nderived
def main(args):
from nnest import NestedSampler
g = GaussianMix()
def loglike(z):
return np.array([g(x)[0] for x in z])
def transform(x):
return 10. * x
volume_switch = 1.0 / (5 * args.num_slow)
sampler = NestedSampler(args.x_dim, loglike, transform=transform, log_dir=args.log_dir, num_live_points=args.num_live_points,
hidden_dim=args.hidden_dim, num_layers=args.num_layers, num_blocks=args.num_blocks, num_slow=args.num_slow,
use_gpu=args.use_gpu)
sampler.run(train_iters=args.train_iters, mcmc_steps=args.mcmc_steps, volume_switch=volume_switch, noise=args.noise)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=5,
help="Dimensionality")
parser.add_argument('--train_iters', type=int, default=2000,
help="number of train iters")
parser.add_argument("--mcmc_steps", type=int, default=0)
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--switch', type=float, default=-1)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('-use_gpu', action='store_true')
parser.add_argument('--flow', type=str, default='nvp')
parser.add_argument('--num_blocks', type=int, default=5)
parser.add_argument('--noise', type=float, default=-1)
parser.add_argument('--run_num', type=str, default='')
parser.add_argument('--num_slow', type=int, default=2)
parser.add_argument('--log_dir', type=str, default='logs/mog4_fast')
args = parser.parse_args()
main(args)
| [
"argparse.ArgumentParser",
"numpy.log",
"numpy.asarray",
"nnest.NestedSampler",
"os.getcwd",
"numpy.sum",
"copy.deepcopy"
]
| [((107, 118), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (116, 118), False, 'import os\n'), ((2136, 2395), 'nnest.NestedSampler', 'NestedSampler', (['args.x_dim', 'loglike'], {'transform': 'transform', 'log_dir': 'args.log_dir', 'num_live_points': 'args.num_live_points', 'hidden_dim': 'args.hidden_dim', 'num_layers': 'args.num_layers', 'num_blocks': 'args.num_blocks', 'num_slow': 'args.num_slow', 'use_gpu': 'args.use_gpu'}), '(args.x_dim, loglike, transform=transform, log_dir=args.\n log_dir, num_live_points=args.num_live_points, hidden_dim=args.\n hidden_dim, num_layers=args.num_layers, num_blocks=args.num_blocks,\n num_slow=args.num_slow, use_gpu=args.use_gpu)\n', (2149, 2395), False, 'from nnest import NestedSampler\n'), ((2602, 2627), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2625, 2627), False, 'import argparse\n'), ((359, 384), 'numpy.sum', 'np.sum', (['((theta - mu) ** 2)'], {}), '((theta - mu) ** 2)\n', (365, 384), True, 'import numpy as np\n'), ((417, 447), 'numpy.log', 'np.log', (['(2 * np.pi * sigma ** 2)'], {}), '(2 * np.pi * sigma ** 2)\n', (423, 447), True, 'import numpy as np\n'), ((1268, 1288), 'numpy.asarray', 'np.asarray', (['[0, sep]'], {}), '([0, sep])\n', (1278, 1288), True, 'import numpy as np\n'), ((1315, 1336), 'numpy.asarray', 'np.asarray', (['[0, -sep]'], {}), '([0, -sep])\n', (1325, 1336), True, 'import numpy as np\n'), ((1363, 1383), 'numpy.asarray', 'np.asarray', (['[sep, 0]'], {}), '([sep, 0])\n', (1373, 1383), True, 'import numpy as np\n'), ((1410, 1431), 'numpy.asarray', 'np.asarray', (['[-sep, 0]'], {}), '([-sep, 0])\n', (1420, 1431), True, 'import numpy as np\n'), ((1596, 1616), 'copy.deepcopy', 'copy.deepcopy', (['theta'], {}), '(theta)\n', (1609, 1616), False, 'import copy\n'), ((1735, 1758), 'numpy.log', 'np.log', (['self.weights[i]'], {}), '(self.weights[i])\n', (1741, 1758), True, 'import numpy as np\n')] |
from typing import Dict, Tuple
import numpy as np
def einsum(expr: str, *args: Tuple[np.ndarray, ...], **kwargs) -> np.ndarray:
(a, b) = map(str.strip, expr.split("->"))
a_ = list(
map(lambda s: list(map(str.strip, s.split(","))), map(str.strip, a.split(";")))
)
b_ = list(map(str.strip, b.split(",")))
chars = "abcdefghijklmnopqrstuvwxyz"
char_map: Dict[str, str] = {}
i = 0
for cs in a_:
for c in cs:
if c not in char_map:
char_map[c] = chars[i]
i += 1
for c in b_:
if c not in char_map:
char_map[c] = chars[i]
i += 1
expr_ = "->".join(
[
",".join(map(lambda ss: "".join(map(lambda s: char_map[s], ss)), a_)),
"".join(map(lambda s: char_map[s], b_)),
]
)
return np.einsum(expr_, *args, **kwargs)
| [
"numpy.einsum"
]
| [((846, 879), 'numpy.einsum', 'np.einsum', (['expr_', '*args'], {}), '(expr_, *args, **kwargs)\n', (855, 879), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
class _LinearModel(object):
def __init__(self):
self.w = None
def fit(self, x, y):
pass
def predict(self, x):
return np.dot(x, self.w)
def cost(self, x, y):
pass
def precision(self, x, y):
p = self.predict(x)
return (1.0 / len(p)) * np.sum(p == y)
class LeastSquareRegression(_LinearModel):
def __init__(self):
super(LeastSquareRegression, self).__init__()
def fit(self, x, y):
xt = x.transpose()
self.w = np.linalg.pinv(np.dot(xt, x)).dot(xt).dot(y)
def cost(self, x, y):
""" Residual Sum of Squares """
r = y - np.dot(x, self.w)
rt= np.transpose(r)
return (1.0 / len(x)) * np.trace(np.dot(rt, r))
class RidgeRegression(LeastSquareRegression):
def __init__(self, incr=0.1, min_change=0.001):
super(RidgeRegression, self).__init__()
self.incr = incr
self.min_change = min_change
def fit(self, x, y):
xtrain, xval = np.split(x, [int(0.7*len(x))])
ytrain, yval = np.split(y, [int(0.7*len(y))])
alpha = 0.0
best_alpha = 0.0
best_cost = float("inf")
old_cost = float("inf")
new_cost = float("inf")
while True:
self._fit(xtrain, ytrain, alpha)
new_cost = self.cost(xval, yval)
if new_cost < best_cost:
best_cost = new_cost
best_alpha = alpha
#print("cost: %f, alpha: %f" % (best_cost, best_alpha))
if abs(new_cost - old_cost) < self.min_change:
break
old_cost = new_cost
alpha += self.incr
self._fit(xtrain, ytrain, best_alpha)
def _fit(self, x, y, alpha):
x = x[:,1:]
xt = np.transpose(x)
self.w = np.linalg.pinv(np.dot(xt, x) + alpha * np.eye(x.shape[1])).dot(xt).dot(y)
bias = np.mean(y, axis=0, keepdims=True) - np.dot(np.mean(x, axis=0, keepdims=True), self.w)
self.w = np.vstack([bias, self.w])
class LeastSquareClassification(LeastSquareRegression):
def __init__(self):
super(LeastSquareClassification, self).__init__()
def predict(self, x):
return super(LeastSquareClassification, self).predict(x).argmax(axis=1)
class RidgeClassification(RidgeRegression):
def __init__(self, incr=0.1, min_change=0.001):
super(RidgeClassification, self).__init__(incr, min_change)
def predict(self, x):
return super(RidgeClassification, self).predict(x).argmax(axis=1)
class LDAClassification(_LinearModel):
def __init__(self):
self.w = None
self.priors = None
self.means = []
self.covs = []
def fit(self, x, y):
k = y.shape[1]
y_arg = np.argmax(y, axis=1)
class_count = np.sum (y, axis=0, keepdims=True)
self.priors = (1.0 / len(y)) * np.sum (y, axis=0, keepdims=True)
self.w = self._lda(x, y)
x_proj = np.dot(x, self.w)
means = (1.0 / class_count.T) * np.dot(y.T, x_proj)
for i in xrange(k):
xk_proj = x_proj[y_arg==i]
self.means.append(np.mean(xk_proj, axis = 0))
self.covs .append(np.cov (xk_proj, rowvar=False))
def predict(self, x):
k = self.w.shape[1]
x_proj = np.dot(x, self.w)
likelihood = np.column_stack([multivariate_normal.pdf(x_proj, self.means[i], self.covs[i]) for i in xrange(k)])
posterior = (likelihood * self.priors)
posterior = posterior / np.sum(posterior, axis=1, keepdims=True)
return np.argmax(posterior, axis=1)
def _lda(self, x, y):
k = y.shape[1]
y_arg = np.argmax(y, axis=1)
class_count= np.sum (y, axis=0, keepdims=True)
total_mean = np.mean(x, axis=0, keepdims=True)
class_mean = (1.0 / class_count.T) * np.dot(y.T, x)
mk_m = class_mean - total_mean
b_cov = np.dot(class_count * mk_m.T, mk_m)
w_cov = np.zeros(b_cov.shape)
for i in xrange(k):
xk = x[y_arg == i]
xk_mk = xk - class_mean[i]
w_cov += np.dot(xk_mk.T, xk_mk)
eig_vals, eig_vecs = np.linalg.eig(np.dot(np.linalg.pinv(w_cov), b_cov))
eig_vals = np.abs(eig_vals)
eig_args = np.argsort(eig_vals)[::-1][:k]
return eig_vecs[:, eig_args]
| [
"numpy.mean",
"numpy.abs",
"numpy.eye",
"numpy.linalg.pinv",
"scipy.stats.multivariate_normal.pdf",
"numpy.argmax",
"numpy.argsort",
"numpy.sum",
"numpy.dot",
"numpy.zeros",
"numpy.vstack",
"numpy.cov",
"numpy.transpose"
]
| [((277, 294), 'numpy.dot', 'np.dot', (['x', 'self.w'], {}), '(x, self.w)\n', (283, 294), True, 'import numpy as np\n'), ((798, 813), 'numpy.transpose', 'np.transpose', (['r'], {}), '(r)\n', (810, 813), True, 'import numpy as np\n'), ((1976, 1991), 'numpy.transpose', 'np.transpose', (['x'], {}), '(x)\n', (1988, 1991), True, 'import numpy as np\n'), ((2209, 2234), 'numpy.vstack', 'np.vstack', (['[bias, self.w]'], {}), '([bias, self.w])\n', (2218, 2234), True, 'import numpy as np\n'), ((3000, 3020), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (3009, 3020), True, 'import numpy as np\n'), ((3043, 3075), 'numpy.sum', 'np.sum', (['y'], {'axis': '(0)', 'keepdims': '(True)'}), '(y, axis=0, keepdims=True)\n', (3049, 3075), True, 'import numpy as np\n'), ((3207, 3224), 'numpy.dot', 'np.dot', (['x', 'self.w'], {}), '(x, self.w)\n', (3213, 3224), True, 'import numpy as np\n'), ((3554, 3571), 'numpy.dot', 'np.dot', (['x', 'self.w'], {}), '(x, self.w)\n', (3560, 3571), True, 'import numpy as np\n'), ((3831, 3859), 'numpy.argmax', 'np.argmax', (['posterior'], {'axis': '(1)'}), '(posterior, axis=1)\n', (3840, 3859), True, 'import numpy as np\n'), ((3931, 3951), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (3940, 3951), True, 'import numpy as np\n'), ((3974, 4006), 'numpy.sum', 'np.sum', (['y'], {'axis': '(0)', 'keepdims': '(True)'}), '(y, axis=0, keepdims=True)\n', (3980, 4006), True, 'import numpy as np\n'), ((4029, 4062), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)', 'keepdims': '(True)'}), '(x, axis=0, keepdims=True)\n', (4036, 4062), True, 'import numpy as np\n'), ((4188, 4222), 'numpy.dot', 'np.dot', (['(class_count * mk_m.T)', 'mk_m'], {}), '(class_count * mk_m.T, mk_m)\n', (4194, 4222), True, 'import numpy as np\n'), ((4248, 4269), 'numpy.zeros', 'np.zeros', (['b_cov.shape'], {}), '(b_cov.shape)\n', (4256, 4269), True, 'import numpy as np\n'), ((4516, 4532), 'numpy.abs', 'np.abs', (['eig_vals'], {}), '(eig_vals)\n', (4522, 4532), True, 'import numpy as np\n'), ((431, 445), 'numpy.sum', 'np.sum', (['(p == y)'], {}), '(p == y)\n', (437, 445), True, 'import numpy as np\n'), ((768, 785), 'numpy.dot', 'np.dot', (['x', 'self.w'], {}), '(x, self.w)\n', (774, 785), True, 'import numpy as np\n'), ((2101, 2134), 'numpy.mean', 'np.mean', (['y'], {'axis': '(0)', 'keepdims': '(True)'}), '(y, axis=0, keepdims=True)\n', (2108, 2134), True, 'import numpy as np\n'), ((3117, 3149), 'numpy.sum', 'np.sum', (['y'], {'axis': '(0)', 'keepdims': '(True)'}), '(y, axis=0, keepdims=True)\n', (3123, 3149), True, 'import numpy as np\n'), ((3266, 3285), 'numpy.dot', 'np.dot', (['y.T', 'x_proj'], {}), '(y.T, x_proj)\n', (3272, 3285), True, 'import numpy as np\n'), ((3774, 3814), 'numpy.sum', 'np.sum', (['posterior'], {'axis': '(1)', 'keepdims': '(True)'}), '(posterior, axis=1, keepdims=True)\n', (3780, 3814), True, 'import numpy as np\n'), ((4108, 4122), 'numpy.dot', 'np.dot', (['y.T', 'x'], {}), '(y.T, x)\n', (4114, 4122), True, 'import numpy as np\n'), ((4392, 4414), 'numpy.dot', 'np.dot', (['xk_mk.T', 'xk_mk'], {}), '(xk_mk.T, xk_mk)\n', (4398, 4414), True, 'import numpy as np\n'), ((856, 869), 'numpy.dot', 'np.dot', (['rt', 'r'], {}), '(rt, r)\n', (862, 869), True, 'import numpy as np\n'), ((2144, 2177), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)', 'keepdims': '(True)'}), '(x, axis=0, keepdims=True)\n', (2151, 2177), True, 'import numpy as np\n'), ((3383, 3407), 'numpy.mean', 'np.mean', (['xk_proj'], {'axis': '(0)'}), '(xk_proj, axis=0)\n', (3390, 3407), True, 'import numpy as np\n'), ((3445, 3474), 'numpy.cov', 'np.cov', (['xk_proj'], {'rowvar': '(False)'}), '(xk_proj, rowvar=False)\n', (3451, 3474), True, 'import numpy as np\n'), ((3611, 3671), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['x_proj', 'self.means[i]', 'self.covs[i]'], {}), '(x_proj, self.means[i], self.covs[i])\n', (3634, 3671), False, 'from scipy.stats import multivariate_normal\n'), ((4466, 4487), 'numpy.linalg.pinv', 'np.linalg.pinv', (['w_cov'], {}), '(w_cov)\n', (4480, 4487), True, 'import numpy as np\n'), ((4553, 4573), 'numpy.argsort', 'np.argsort', (['eig_vals'], {}), '(eig_vals)\n', (4563, 4573), True, 'import numpy as np\n'), ((655, 668), 'numpy.dot', 'np.dot', (['xt', 'x'], {}), '(xt, x)\n', (661, 668), True, 'import numpy as np\n'), ((2025, 2038), 'numpy.dot', 'np.dot', (['xt', 'x'], {}), '(xt, x)\n', (2031, 2038), True, 'import numpy as np\n'), ((2049, 2067), 'numpy.eye', 'np.eye', (['x.shape[1]'], {}), '(x.shape[1])\n', (2055, 2067), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Float, DateTime, Integer
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class Order(Base):
__tablename__ = 'orders'
id = Column(Integer, primary_key=True)
currency = Column(String)
price = Column(Float)
size = Column(Float)
gdax_order_id = Column(String)
created_at = Column(DateTime)
class Withdrawal(Base):
__tablename__ = 'withdrawals'
id = Column(Integer, primary_key=True)
currency = Column(String)
amount = Column(Float)
crypto_address = Column(String)
gdax_withdrawal_id = Column(String)
class Deposit(Base):
__tablename__ = 'deposits'
id = Column(Integer, primary_key=True)
currency = Column(String)
amount = Column(Float)
payment_method_id = Column(String)
payout_at = Column(DateTime)
gdax_deposit_id = Column(String)
def get_session(engine):
engine = create_engine(engine)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
return session
| [
"sqlalchemy.orm.sessionmaker",
"sqlalchemy.create_engine",
"sqlalchemy.Column",
"sqlalchemy.ext.declarative.declarative_base"
]
| [((228, 246), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (244, 246), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((307, 340), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (313, 340), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((356, 370), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (362, 370), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((383, 396), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (389, 396), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((408, 421), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (414, 421), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((442, 456), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (448, 456), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((474, 490), 'sqlalchemy.Column', 'Column', (['DateTime'], {}), '(DateTime)\n', (480, 490), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((561, 594), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (567, 594), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((610, 624), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (616, 624), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((638, 651), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (644, 651), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((673, 687), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (679, 687), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((713, 727), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (719, 727), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((792, 825), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (798, 825), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((841, 855), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (847, 855), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((869, 882), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (875, 882), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((907, 921), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (913, 921), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((938, 954), 'sqlalchemy.Column', 'Column', (['DateTime'], {}), '(DateTime)\n', (944, 954), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((977, 991), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (983, 991), False, 'from sqlalchemy import Column, String, Float, DateTime, Integer\n'), ((1032, 1053), 'sqlalchemy.create_engine', 'create_engine', (['engine'], {}), '(engine)\n', (1045, 1053), False, 'from sqlalchemy import create_engine\n'), ((1105, 1130), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (1117, 1130), False, 'from sqlalchemy.orm import sessionmaker\n')] |
import csv
import joblib
from sklearn.metrics import accuracy_score
data = []
features = []
targets = []
feature_names = []
users = []
with open('satisfaction_feature_names.csv') as name_file:
column_name_file = csv.reader(name_file)
feature_names = next(column_name_file)[2:394]
with open('cza_satisfaction_train_0922.csv') as data_file:
csv_file = csv.reader(data_file)
idx = 0
for content in csv_file:
idx = idx + 1
if idx <= 10000:
continue
if idx > 50000:
break
content = content[:2] + list(map(float, content[2:]))
if len(content) != 0:
data.append(content)
features.append(content[2:394])
targets.append(content[-1])
users.append(content[1])
clf, sorted_feature_scores = joblib.load("cza_rf.pkl")
predict_result = clf.predict(features)
print(sorted_feature_scores)
print(accuracy_score(predict_result, targets))
result = list(zip(users, predict_result))
print(result[:10])
print(sum(predict_result))
print(sum([flag[1] for flag in result]))
with open("rf_predict_result.csv", "w", encoding="UTF-8") as w_file:
result_file = csv.writer(w_file)
for idx, row in enumerate(result):
if idx > 10:
break
row = list(row)
row.insert(0, 20200928)
result_file.writerow(row)
| [
"csv.writer",
"csv.reader",
"sklearn.metrics.accuracy_score",
"joblib.load"
]
| [((814, 839), 'joblib.load', 'joblib.load', (['"""cza_rf.pkl"""'], {}), "('cza_rf.pkl')\n", (825, 839), False, 'import joblib\n'), ((217, 238), 'csv.reader', 'csv.reader', (['name_file'], {}), '(name_file)\n', (227, 238), False, 'import csv\n'), ((364, 385), 'csv.reader', 'csv.reader', (['data_file'], {}), '(data_file)\n', (374, 385), False, 'import csv\n'), ((914, 953), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['predict_result', 'targets'], {}), '(predict_result, targets)\n', (928, 953), False, 'from sklearn.metrics import accuracy_score\n'), ((1171, 1189), 'csv.writer', 'csv.writer', (['w_file'], {}), '(w_file)\n', (1181, 1189), False, 'import csv\n')] |
import unittest
import pandas as pd
import git
import os
from dfstools import get_dataset_dtypes
from dfstools import find_related_cols_by_name
from dfstools import find_related_cols_by_content
from dfstools import find_parent_child_relationships
from dfstools import pecan_cookies_load_data
class RelationshipTools(unittest.TestCase):
def test_get_dataset_dtypes(self):
expected = {'airlines': {'carrier': {'dtype': 'O'}},
'airports': {'dest': {'dtype': 'O'}},
'flights': {'dest': {'dtype': 'O'}, 'carrier': {'dtype': 'O'},'flight_id': {'dtype': 'O'}},
'trip_logs': {'flight_id': {'dtype': 'O'}}}
result = get_dataset_dtypes(None)
self.assertEqual(expected, result)
expected = {
'airlines': {'carrier': {'dtype': 'O',
# 'key_candidate': True,
'relationships': [{'flights.carrier': {}}]}},
'airports': {'dest': {'dtype': 'O',
# 'key_candidate': True,
'relationships': [{'flights.dest': {}}]}},
'flights': {'dest': {'dtype': 'O',
# 'key_candidate': False,
'relationships': [{'airports.dest': {}}]},
'carrier': {'dtype': 'O',
# 'key_candidate': False,
'relationships': [{'airlines.carrier': {}}]},
'flight_id': {'dtype': 'O',
# 'key_candidate': True,
'relationships': [{'trip_logs.flight_id': {}}]}},
'trip_logs': {'flight_id': {'dtype': 'O',
# 'key_candidate': False,
'relationships': [{'flights.flight_id': {}}]}}}
data = os.path.join(git.Repo('.', search_parent_directories=True).working_tree_dir, 'data')
dataframe_dict = {'airlines': pd.read_csv(os.path.join(data, 'airlines', 'airlines.csv')),
'flights': pd.read_csv(os.path.join(data, 'flights', 'flights.csv')),
'airports': pd.read_csv(os.path.join(data, 'airports', 'airports.csv'))}
result = find_related_cols_by_name(dataframe_dict, result)
self.assertEqual(expected, result)
def test_find_related_cols_by_content(self):
# ---pecan cookies sprint one test case---
expected = {
'airports': {'dest': {'relationships': ['flights.origin', 'flights.dest']},
'dest_city': {'relationships': ['flights.origin_city']},
'dest_state': {'relationships': ['flights.origin_state']}},
'airlines': {'carrier': {'relationships': ['flights.carrier']}},
"flights": {
"flight_id": {"relationships": []},
"origin": {"relationships": ["airports.dest"]},
"origin_city": {"relationships": ["airports.dest_city"]},
"origin_state": {"relationships": ["airports.dest_state"]},
"dest": {"relationships": ["airports.dest"]},
"distance_group": {"relationships": []},
"carrier": {"relationships": ["airlines.carrier"]},
"flight_num": {"relationships": []},
"first_trip_logs_time": {"relationships": []}}
}
data_list = pecan_cookies_load_data()
result = find_related_cols_by_content(data_list)
self.assertEqual(expected, result)
#result = find_parent_child_relationships(None, result)
#self.assertEqual(expected, result)
if __name__ == '__main__':
unittest.main()
| [
"os.path.join",
"git.Repo",
"dfstools.get_dataset_dtypes",
"unittest.main",
"dfstools.find_related_cols_by_content",
"dfstools.find_related_cols_by_name",
"dfstools.pecan_cookies_load_data"
]
| [((4071, 4086), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4084, 4086), False, 'import unittest\n'), ((689, 713), 'dfstools.get_dataset_dtypes', 'get_dataset_dtypes', (['None'], {}), '(None)\n', (707, 713), False, 'from dfstools import get_dataset_dtypes\n'), ((2504, 2553), 'dfstools.find_related_cols_by_name', 'find_related_cols_by_name', (['dataframe_dict', 'result'], {}), '(dataframe_dict, result)\n', (2529, 2553), False, 'from dfstools import find_related_cols_by_name\n'), ((3802, 3827), 'dfstools.pecan_cookies_load_data', 'pecan_cookies_load_data', ([], {}), '()\n', (3825, 3827), False, 'from dfstools import pecan_cookies_load_data\n'), ((3845, 3884), 'dfstools.find_related_cols_by_content', 'find_related_cols_by_content', (['data_list'], {}), '(data_list)\n', (3873, 3884), False, 'from dfstools import find_related_cols_by_content\n'), ((2119, 2164), 'git.Repo', 'git.Repo', (['"""."""'], {'search_parent_directories': '(True)'}), "('.', search_parent_directories=True)\n", (2127, 2164), False, 'import git\n'), ((2242, 2288), 'os.path.join', 'os.path.join', (['data', '"""airlines"""', '"""airlines.csv"""'], {}), "(data, 'airlines', 'airlines.csv')\n", (2254, 2288), False, 'import os\n'), ((2340, 2384), 'os.path.join', 'os.path.join', (['data', '"""flights"""', '"""flights.csv"""'], {}), "(data, 'flights', 'flights.csv')\n", (2352, 2384), False, 'import os\n'), ((2437, 2483), 'os.path.join', 'os.path.join', (['data', '"""airports"""', '"""airports.csv"""'], {}), "(data, 'airports', 'airports.csv')\n", (2449, 2483), False, 'import os\n')] |
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout,QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
class App(QDialog):
def __init__(self):
super().__init__()
self.title = 'Calculator'
self.left = 10
self.top = 10
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.createGridLayout()
windowLayout = QVBoxLayout()
windowLayout.addWidget(self.horizontalGroupBox)
self.setLayout(windowLayout)
self.textbox = QLineEdit(self)
self.textbox.move(20, 40)
self.textbox.resize(600,35)
# Original Approach
# buttonp = QPushButton('+', self)
# buttonp.setToolTip('Addition Operator')
# buttonp.move(100,70)
# buttonp.clicked.connect(self.on_click)
# buttonm = QPushButton('-', self)
# buttonm.setToolTip('Subtraction Operator')
# buttonm.move(100,100)
# buttonm.clicked.connect(self.on_click)
self.show()
def createGridLayout(self):
self.horizontalGroupBox = QGroupBox("Grid")
layout = QGridLayout()
# layout.setColumnStretch(1, 2)
# layout.setColumnStretch(2, 4)
layout.addWidget(QPushButton('1'),0,0)
layout.addWidget(QPushButton('2'),0,1)
layout.addWidget(QPushButton('3'),0,2)
layout.addWidget(QPushButton('4'),1,0)
layout.addWidget(QPushButton('5'),1,1)
layout.addWidget(QPushButton('6'),1,2)
layout.addWidget(QPushButton('7'),2,0)
layout.addWidget(QPushButton('8'),2,1)
layout.addWidget(QPushButton('9'),2,2)
layout.addWidget(QPushButton('0'),3,1)
layout.addWidget(QPushButton('.'),3,0)
layout.addWidget(QPushButton('='),3,2)
layout.addWidget(QPushButton('+'),0,4)
layout.addWidget(QPushButton('-'),1,4)
layout.addWidget(QPushButton('*'),2,4)
layout.addWidget(QPushButton('/'),3,4)
self.horizontalGroupBox.setLayout(layout)
# @pyqtSlot()
# def on_click(self):
# print('Button click')
@pyqtSlot()
def on_click(self):
textboxValue = "Good"
QMessageBox.question(self, 'Message - pythonspot.com', "You typed: " + textboxValue, QMessageBox.Ok, QMessageBox.Ok)
self.textbox.setText("Good")
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_()) | [
"PyQt5.QtCore.pyqtSlot",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QMessageBox.question",
"PyQt5.QtWidgets.QGroupBox",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QLineEdit"
]
| [((2495, 2505), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (2503, 2505), False, 'from PyQt5.QtCore import pyqtSlot\n'), ((2771, 2793), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (2783, 2793), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((732, 745), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (743, 745), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((867, 882), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['self'], {}), '(self)\n', (876, 882), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((1443, 1460), 'PyQt5.QtWidgets.QGroupBox', 'QGroupBox', (['"""Grid"""'], {}), "('Grid')\n", (1452, 1460), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((1479, 1492), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (1490, 1492), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((2571, 2691), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self', '"""Message - pythonspot.com"""', "('You typed: ' + textboxValue)", 'QMessageBox.Ok', 'QMessageBox.Ok'], {}), "(self, 'Message - pythonspot.com', 'You typed: ' +\n textboxValue, QMessageBox.Ok, QMessageBox.Ok)\n", (2591, 2691), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((1611, 1627), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""1"""'], {}), "('1')\n", (1622, 1627), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((1659, 1675), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""2"""'], {}), "('2')\n", (1670, 1675), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((1707, 1723), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""3"""'], {}), "('3')\n", (1718, 1723), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((1755, 1771), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""4"""'], {}), "('4')\n", (1766, 1771), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((1803, 1819), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""5"""'], {}), "('5')\n", (1814, 1819), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((1851, 1867), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""6"""'], {}), "('6')\n", (1862, 1867), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((1899, 1915), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""7"""'], {}), "('7')\n", (1910, 1915), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((1947, 1963), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""8"""'], {}), "('8')\n", (1958, 1963), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((1995, 2011), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""9"""'], {}), "('9')\n", (2006, 2011), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((2043, 2059), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""0"""'], {}), "('0')\n", (2054, 2059), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((2091, 2107), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""."""'], {}), "('.')\n", (2102, 2107), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((2139, 2155), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""="""'], {}), "('=')\n", (2150, 2155), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((2187, 2203), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""+"""'], {}), "('+')\n", (2198, 2203), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((2235, 2251), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""-"""'], {}), "('-')\n", (2246, 2251), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((2283, 2299), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""*"""'], {}), "('*')\n", (2294, 2299), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n'), ((2331, 2347), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""/"""'], {}), "('/')\n", (2342, 2347), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout, QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox\n')] |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v3/diff/UniversalDiff.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from v3.diff import Transaction_pb2 as v3_dot_diff_dot_Transaction__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='v3/diff/UniversalDiff.proto',
package='v3.diff',
syntax='proto3',
serialized_pb=_b('\n\x1bv3/diff/UniversalDiff.proto\x12\x07v3.diff\x1a\x19v3/diff/Transaction.proto\";\n\rUniversalDiff\x12*\n\x0ctransactions\x18\x01 \x03(\x0b\x32\x14.v3.diff.Transactionb\x06proto3')
,
dependencies=[v3_dot_diff_dot_Transaction__pb2.DESCRIPTOR,])
_UNIVERSALDIFF = _descriptor.Descriptor(
name='UniversalDiff',
full_name='v3.diff.UniversalDiff',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transactions', full_name='v3.diff.UniversalDiff.transactions', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=126,
)
_UNIVERSALDIFF.fields_by_name['transactions'].message_type = v3_dot_diff_dot_Transaction__pb2._TRANSACTION
DESCRIPTOR.message_types_by_name['UniversalDiff'] = _UNIVERSALDIFF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UniversalDiff = _reflection.GeneratedProtocolMessageType('UniversalDiff', (_message.Message,), dict(
DESCRIPTOR = _UNIVERSALDIFF,
__module__ = 'v3.diff.UniversalDiff_pb2'
# @@protoc_insertion_point(class_scope:v3.diff.UniversalDiff)
))
_sym_db.RegisterMessage(UniversalDiff)
# @@protoc_insertion_point(module_scope)
| [
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor"
]
| [((494, 520), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (518, 520), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((1168, 1505), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""transactions"""', 'full_name': '"""v3.diff.UniversalDiff.transactions"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None', 'file': 'DESCRIPTOR'}), "(name='transactions', full_name=\n 'v3.diff.UniversalDiff.transactions', index=0, number=1, type=11,\n cpp_type=10, label=3, has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, options=None, file=DESCRIPTOR)\n", (1195, 1505), True, 'from google.protobuf import descriptor as _descriptor\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import annotations
import numpy as np
import torch
from torch import Tensor
from onevision.data.augment.base import BaseAugment
from onevision.data.augment.utils import apply_transform_op
from onevision.data.data_class import ObjectAnnotation
from onevision.factory import AUGMENTS
__all__ = [
"ImageBoxAugment",
]
# MARK: - Modules
@AUGMENTS.register(name="image_box_augment")
class ImageBoxAugment(BaseAugment):
r"""
Args:
policy (str):
Augmentation policy. One of: [`scratch`, `finetune`].
Default: `scratch`.
"""
cfgs = {
"scratch": [
# (op_name, p, magnitude)
(("image_box_random_perspective", 0.5, (0.0, 0.5, 0.5, 0.0, 0.0)),
("adjust_hsv", 0.5, (0.015, 0.7, 0.4)),
("hflip_image_box", 0.5, None),
("vflip_image_box", 0.5, None),),
],
"finetune": [
(("image_box_random_perspective", 0.5, (0.0, 0.5, 0.8, 0.0, 0.0)),
("adjust_hsv", 0.5, (0.015, 0.7, 0.4)),
("hflip_image_box", 0.5, None),
("vflip_image_box", 0.5, None),),
],
}
# MARK: Magic Functions
def __init__(self, policy: str = "scratch", *args, **kwargs):
super().__init__(*args, **kwargs)
if policy not in self.cfgs:
raise ValueError(f"`policy` must be one of: {self.cfgs.keys()}."
f" But got: {policy}")
self.transforms = self.cfgs[policy]
def __repr__(self) -> str:
return self.__class__.__name__ + \
f"(policy={self.policy}, fill={self.fill})"
# MARK: Configure
def _augmentation_space(self, *args, **kwargs) -> dict[str, tuple[Tensor, bool]]:
pass
# MARK: Forward Pass
def forward(self, input: np.ndarray, target: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""
Args:
input (np.ndarray):
Image to be transformed.
target (np.ndarray[*, 4):
Target to be transformed. Boxes in (x, y, x, y) format.
"""
# NOTE: Transform
transform_id = int(torch.randint(len(self.transforms), (1,)).item())
num_ops = len(self.transforms[transform_id])
probs = torch.rand((num_ops,))
for i, (op_name, p, magnitude) in enumerate(self.transforms[transform_id]):
if probs[i] > p:
continue
magnitude = magnitude if magnitude is not None else 0.0
if op_name == "image_box_random_perspective":
"""
target[:, 2:6] = box_cxcywh_norm_to_xyxy(
target[:, 2:6], input.shape[0], input.shape[1]
)
"""
input, target = apply_transform_op(
input = input,
target = target,
op_name = op_name,
magnitude = magnitude,
interpolation = self.interpolation,
fill = self.fill
)
nl = len(target) # Number of labels
if nl:
target = target
else:
target = np.zeros((nl, ObjectAnnotation.box_label_len()))
"""
target[:, 2:6] = box_xyxy_to_cxcywh_norm(
target[:, 2:6], input.shape[0], input.shape[1]
)
"""
else:
input, target = apply_transform_op(
input = input,
target = target,
op_name = op_name,
magnitude = magnitude,
interpolation = self.interpolation,
fill = self.fill
)
'''
elif op_name == "adjust_hsv":
input = adjust_hsv(
input,
h_factor = magnitude[0],
s_factor = magnitude[1],
v_factor = magnitude[2],
)
elif op_name == "hflip":
input = np.fliplr(input)
target[:, 2] = 1 - target[:, 2]
elif op_name == "vflip":
input = np.flipud(input)
target[:, 3] = 1 - target[:, 3]
'''
return input, target
| [
"onevision.data.data_class.ObjectAnnotation.box_label_len",
"onevision.data.augment.utils.apply_transform_op",
"torch.rand",
"onevision.factory.AUGMENTS.register"
]
| [((416, 459), 'onevision.factory.AUGMENTS.register', 'AUGMENTS.register', ([], {'name': '"""image_box_augment"""'}), "(name='image_box_augment')\n", (433, 459), False, 'from onevision.factory import AUGMENTS\n'), ((2353, 2375), 'torch.rand', 'torch.rand', (['(num_ops,)'], {}), '((num_ops,))\n', (2363, 2375), False, 'import torch\n'), ((2868, 3007), 'onevision.data.augment.utils.apply_transform_op', 'apply_transform_op', ([], {'input': 'input', 'target': 'target', 'op_name': 'op_name', 'magnitude': 'magnitude', 'interpolation': 'self.interpolation', 'fill': 'self.fill'}), '(input=input, target=target, op_name=op_name, magnitude=\n magnitude, interpolation=self.interpolation, fill=self.fill)\n', (2886, 3007), False, 'from onevision.data.augment.utils import apply_transform_op\n'), ((3632, 3771), 'onevision.data.augment.utils.apply_transform_op', 'apply_transform_op', ([], {'input': 'input', 'target': 'target', 'op_name': 'op_name', 'magnitude': 'magnitude', 'interpolation': 'self.interpolation', 'fill': 'self.fill'}), '(input=input, target=target, op_name=op_name, magnitude=\n magnitude, interpolation=self.interpolation, fill=self.fill)\n', (3650, 3771), False, 'from onevision.data.augment.utils import apply_transform_op\n'), ((3364, 3396), 'onevision.data.data_class.ObjectAnnotation.box_label_len', 'ObjectAnnotation.box_label_len', ([], {}), '()\n', (3394, 3396), False, 'from onevision.data.data_class import ObjectAnnotation\n')] |
from pudzu.charts import *
from pudzu.sandbox.bamboo import *
import seaborn as sns
# generate map
df = pd.read_csv("datasets/euvotes.csv").set_index('country')
palette = tmap(RGBA, sns.cubehelix_palette(11, start=0.2, rot=-0.75))
ranges = [20000000,10000000,5000000,2000000,1000000,500000,200000,100000,0]
def votecolfn(n):
return palette[8 - next(i for i,x in enumerate(ranges) if n >= x)]
def colorfn(c):
if c not in df.index:
return "white" if c in ['Sea', 'Borders'] else "grey"
return votecolfn(int(df.loc[c].votes))
def labelfn(c):
if c not in df.index: return None
dfc = df.loc[c]
label = "{name} '{year}\n({votes:.2g}M)".format(name=dfc.leader.split(" ")[-1], year=dfc.year[2:], votes=int(dfc.votes) / 1000000)
return Image.from_text(label, arial(14, bold=True), align="center", padding=2)
map = map_chart("maps/Europe.png", colorfn, labelfn)
# legend
def box(c):
return Image.new("RGBA", (30, 30), c).place(Image.from_text("", arial(16, bold=True), "black", bg=c))
vote_arr = Image.from_array([
[box(votecolfn(n)), Image.from_text("<0.1M" if n < 100000 else ">{:.2g}M".format(n/1000000), arial(16), padding=(10,0))] for n in ranges
], bg="white", xalign=0)
vote_leg = Image.from_column([Image.from_text("# votes", arial(16, bold=True)), vote_arr], bg="white", xalign=0, padding=(0,5))
note_leg = Image.from_text("Multi-party national elections for executive head or party.", arial(16), max_width=100, bg="white", padding=(0,2))
legend = Image.from_column([vote_leg, note_leg], bg="white", xalign=0, padding=5).pad(1, "black")
chart = map.place(legend, align=(1,0), padding=10)
title = Image.from_column([
Image.from_text("EUROPEAN POPULAR VOTE RECORDS", arial(48, bold=True)),
Image.from_text("candidate or party with the highest absolute popular vote", arial(36))],
bg="white")
img = Image.from_column([title, chart], bg="white", padding=2)
img.place(Image.from_text("/u/Udzu", font("arial", 16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/euvotes.png")
| [
"seaborn.cubehelix_palette"
]
| [((184, 231), 'seaborn.cubehelix_palette', 'sns.cubehelix_palette', (['(11)'], {'start': '(0.2)', 'rot': '(-0.75)'}), '(11, start=0.2, rot=-0.75)\n', (205, 231), True, 'import seaborn as sns\n')] |
import pandas as pd
import numpy as np
from copy import *
from bisect import *
from scipy.optimize import curve_fit
from sklearn.metrics import *
from collections import defaultdict as defd
import datetime,pickle
from DemandHelper import *
import warnings
warnings.filterwarnings("ignore")
#################################################################
#################################################################
#################################################################
class DemandForecastModel:
def __init__(self,rank_model='',forecast='',rmodel_beta=1.0,final_beta=1.0):
if rank_model != '':
self.ingest(rank_model,forecast,rmodel_beta,final_beta)
def ingest(self,rank_model,forecast,rmodel_beta=1.0,final_beta=1.0):
self.rank_model = rank_model
self.rmodel_beta = rmodel_beta
self.forecast = forecast
self.final_beta = final_beta
self.alldates = sorted(forecast.index)
def predict(self,rank=10000,date='2018-07-04',buybox=100):
if 'str' not in str(type(date)): date = str(date)[:10]
pred1 = self.rank_model.predict([rank])[0]
pred2 = pred1*self.rmodel_beta
d = self.forecast.loc[date]
mid,lo,hi = d['yhat'],d['yhat_lower'],d['yhat_upper']
rdr_preds = np.array([lo,mid,hi])
pred3 = pred2*rdr_preds
pred4 = pred3*self.final_beta
pred5 = global2local(pred4,buybox)
return pred5
#################################################################
#################################################################
# Export a fitted model to text file:
# These filenames normally end in '.pkl'
def ExportModel(filename,model_object):
pickle.dump(model_object, open(filename, 'wb'))
print('Model Saved TO: '+filename)
# Import a fitted model from text file:
# These filenames normally end in '.pkl'
def ImportModel(filename):
model_object = pickle.load(open(filename, 'rb'))
print('Model Imported FROM: '+filename)
return model_object
def GetToday():
today = datetime.datetime.today()
return str(today)[:10]
#################################################################
#################################################################
#################################################################
short2long = {
'H&G' : 'Home & Garden',
'L&G' : 'Lawn & Garden',
'SPORTS' : 'Sports & Outdoors',
'HI' : 'Home Improvement',
'TOY' : 'Toys & Games',
'KIT' : 'Home & Kitchen',
}
long2short = {}
for short in sorted(short2long):
long2short[short2long[short]] = short
Shorts = sorted(short2long)
Longs = sorted(long2short)
def ConvertToShort(thing):
if thing in long2short: return long2short[thing]
return thing
Models2 = {}
for SH in Shorts:
fn = 'MODELS/'+SH+'/DFM2.pkl'
model = ImportModel(fn)
Models2[SH] = model
AllDates = sorted(set([str(a)[:10] for a in Models2['H&G'].alldates]))
#################################################################
#################################################################
# Returns a list of valid category names:
def GetCategories2():
return sorted(long2short)
# SPREETAIL DEMAND PREDICTION:
# cat : Category (String or List)
# rank : Sales Rank (Integer, 2-List, Long-List)
# date1 : First Date of Forecast ("2018-09-03")
# date2 : Final Date of Forecast OR # Days Forward ("2018-10-03" or 30)
# bb_ratio : BuyBox Percent (100.0)
# md_ratio : Marketplace Distribution Percent
def SpreetailPredict(cat,rank,date1='today',date2=30,bb_ratio=1.0,md_ratio=0.62):
if (not date1) or (str(date1).lower()=='today'): date1 = GetToday()
index1 = bisect_left(AllDates,date1)
if len(str(date2)) >10: date2 = str(date2)[:10]
if len(str(date2))==10: index2 = bisect_left(AllDates,date2)
else: index2 = index1+int(date2)
index_dif = abs(index2-index1)
index1 = min([index1,index2])
index2 = index1+index_dif
DateRange = AllDates[index1:index2+1]
LEN = len(DateRange)
#--------------------------------------
tdf = pd.DataFrame()
tdf['DATE'] = DateRange
#--------------------------------------
if 'list' in str(type(cat)):
cat = [ConvertToShort(a) for a in cat]
if len(cat)==LEN: tdf['CAT'] = cat
else: tdf['CAT'] = cat[0]
else: tdf['CAT'] = ConvertToShort(cat)
#--------------------------------------
if 'list' in str(type(rank)):
if len(rank)==LEN: tdf['RANK'] = rank
elif len(rank)==2:
r1,r2 = tuple(rank)
tdf['RANK'] = np.linspace(r1,r2,LEN)
else: tdf['RANK'] = rank[0]
else: tdf['RANK'] = rank
#--------------------------------------
md_ratio2 = max(0.3,min(md_ratio,0.99))
other_ratio = (1.0-md_ratio2)/md_ratio2
tdf['BBR'] = bb_ratio
tdf['MDR'] = md_ratio2
#--------------------------------------
M = tdf.values
results = []
for row in M:
d,c,r = tuple(row[:3])
pred_100 = Models2[c].predict(r,d,100.0)
pred_bbr = Models2[c].predict(r,d,100.0*bb_ratio)
results.append([pred_100,pred_bbr])
tdf['P_100'] = [r[0][1] for r in results]
tdf['P_100_HI'] = [r[0][2] for r in results]
tdf['P_100_LO'] = [r[0][0] for r in results]
tdf['P_BBR'] = [r[1][1] for r in results]
tdf['P_BBR_HI'] = [r[1][2] for r in results]
tdf['P_BBR_LO'] = [r[1][0] for r in results]
tdf['P_OTH'] = other_ratio * tdf['P_100']
tdf['P_OTH_HI'] = other_ratio * tdf['P_100_HI']
tdf['P_OTH_LO'] = other_ratio * tdf['P_100_LO']
tdf['P_TOT'] = tdf['P_BBR'] +tdf['P_OTH']
tdf['P_TOT_HI'] = tdf['P_BBR_HI']+tdf['P_OTH_HI']
tdf['P_TOT_LO'] = tdf['P_BBR_LO']+tdf['P_OTH_LO']
cols = list(tdf.columns)[5:]
for col in cols:
col2 = col+'_C'
tdf[col2] = np.cumsum(tdf[col])
Matrix = [list(tdf.columns)]
for row in tdf.values:
Matrix.append(list(row))
MainPred = list(tdf['P_TOT_C'])[-1]
return [MainPred,Matrix]
def SpreePred(cat,rank,date1='today',date2=30,bb_ratio=1.0,md_ratio=0.62):
result = SpreetailPredict(cat,rank,date1,date2,bb_ratio,md_ratio)
M = result[1]
cols,m = M[0],M[1:]
return pd.DataFrame(m,columns=cols)
#################################################################
#################################################################
# [END]
| [
"pandas.DataFrame",
"numpy.array",
"numpy.linspace",
"datetime.datetime.today",
"numpy.cumsum",
"warnings.filterwarnings"
]
| [((265, 298), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (288, 298), False, 'import warnings\n'), ((2092, 2117), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (2115, 2117), False, 'import datetime, pickle\n'), ((4079, 4093), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4091, 4093), True, 'import pandas as pd\n'), ((6042, 6071), 'pandas.DataFrame', 'pd.DataFrame', (['m'], {'columns': 'cols'}), '(m, columns=cols)\n', (6054, 6071), True, 'import pandas as pd\n'), ((1329, 1352), 'numpy.array', 'np.array', (['[lo, mid, hi]'], {}), '([lo, mid, hi])\n', (1337, 1352), True, 'import numpy as np\n'), ((5686, 5705), 'numpy.cumsum', 'np.cumsum', (['tdf[col]'], {}), '(tdf[col])\n', (5695, 5705), True, 'import numpy as np\n'), ((4520, 4544), 'numpy.linspace', 'np.linspace', (['r1', 'r2', 'LEN'], {}), '(r1, r2, LEN)\n', (4531, 4544), True, 'import numpy as np\n')] |
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ebcli.objects.platform import PlatformVersion
from ..resources.strings import prompts
from ..resources.statics import namespaces, option_names
from ..core import io
from ..lib import elasticbeanstalk
from . import commonops
def _get_warning_message(confirm, single, rolling_enabled, webserver, noroll):
if confirm:
return None
elif single:
return prompts['upgrade.singleinstance']
elif not rolling_enabled and noroll:
return prompts['upgrade.norollingforce']
elif not rolling_enabled:
if webserver:
type = 'Health'
else:
type = 'Time'
return prompts['upgrade.norollingapply'].format(type)
elif rolling_enabled:
return prompts['upgrade.rollingupdate']
def _should_add_rolling(single, rolling_enabled, noroll):
if noroll:
return False
if single:
return False
if rolling_enabled:
return False
return True
def upgrade_env(app_name, env_name, timeout, confirm, noroll):
env = elasticbeanstalk.get_environment_settings(app_name, env_name)
latest = commonops.get_latest_solution_stack(env.platform.version)
if latest == env.platform:
io.echo(prompts['upgrade.alreadylatest'])
return
else:
single = elasticbeanstalk.get_option_setting(
env.option_settings, namespaces.ENVIRONMENT,
'EnvironmentType') == 'SingleInstance'
rolling_enabled = elasticbeanstalk.get_option_setting(
env.option_settings, namespaces.ROLLING_UPDATES,
option_names.ROLLING_UPDATE_ENABLED) == 'true'
webserver = env.tier.name.lower() == 'webserver'
io.echo()
io.echo(prompts['upgrade.infodialog'].format(env_name))
io.echo('Current platform:', env.platform)
io.echo('Latest platform: ', latest)
io.echo()
warning = _get_warning_message(confirm, single,
rolling_enabled, webserver, noroll)
if warning:
io.log_warning(warning)
io.echo(prompts['upgrade.altmessage'])
io.echo()
if not confirm:
# Get confirmation
io.validate_action(prompts['upgrade.validate'], env.name)
add_rolling = _should_add_rolling(single, rolling_enabled, noroll)
do_upgrade(env_name, add_rolling, timeout, latest.name,
health_based=webserver, platform_arn = latest.version)
def do_upgrade(env_name, add_rolling, timeout, solution_stack_name,
health_based=False, platform_arn=None):
if add_rolling:
if health_based:
roll_type = 'Health'
else:
roll_type = 'Time'
changes = [
elasticbeanstalk.create_option_setting(
namespaces.ROLLING_UPDATES,
option_names.ROLLING_UPDATE_ENABLED,
'true'),
elasticbeanstalk.create_option_setting(
namespaces.ROLLING_UPDATES,
option_names.ROLLING_UPDATE_TYPE,
roll_type)
]
io.log_warning(prompts['upgrade.applyrolling'].format(roll_type))
else:
changes = None
if PlatformVersion.is_valid_arn(platform_arn):
commonops.update_environment(
env_name, changes, None, timeout=timeout,
platform_arn=platform_arn)
else:
commonops.update_environment(
env_name, changes, None, timeout=timeout,
solution_stack_name=solution_stack_name) | [
"ebcli.objects.platform.PlatformVersion.is_valid_arn"
]
| [((3768, 3810), 'ebcli.objects.platform.PlatformVersion.is_valid_arn', 'PlatformVersion.is_valid_arn', (['platform_arn'], {}), '(platform_arn)\n', (3796, 3810), False, 'from ebcli.objects.platform import PlatformVersion\n')] |
''' Perimeterator - Enumerator for AWS ELBs (Public IPs). '''
import logging
import boto3
from perimeterator.helper import aws_elb_arn
from perimeterator.helper import dns_lookup
class Enumerator(object):
''' Perimeterator - Enumerator for AWS ELBs (Public IPs). '''
# Required for Boto and reporting.
SERVICE = 'elb'
def __init__(self, region):
self.logger = logging.getLogger(__name__)
self.region = region
self.client = boto3.client(self.SERVICE, region_name=region)
def get(self):
''' Attempt to get all Public IPs from ELBs. '''
resources = []
# Iterate over results until AWS no longer returns a 'NextMarker' in
# order to ensure all results are retrieved.
marker = ''
while marker is not None:
# Unfortunately, Marker=None or Marker='' is invalid for this API
# call, so it looks like we can't just set this to a None value,
# or use a ternary here.
if marker:
candidates = self.client.describe_load_balancers(
Marker=marker
)
else:
candidates = self.client.describe_load_balancers()
# Check if we need to continue paging.
if "NextMarker" in candidates:
self.logger.debug(
"'NextMarker' found, additional page of results to fetch"
)
marker = candidates["NextMarker"]
else:
marker = None
# For some odd reason the AWS API doesn't appear to allow a
# filter on describe operations for ELBs, so we'll have to filter
# manually.
for elb in candidates["LoadBalancerDescriptions"]:
self.logger.debug(
"Inspecting ELB %s", elb["LoadBalancerName"],
)
if elb["Scheme"] != "internet-facing":
self.logger.debug("ELB is not internet facing")
continue
# Lookup the DNS name for this ELB to get the current IPs. We
# also need to construct the ARN, as it's not provided in the
# output from a describe operation (?!)
resources.append({
"service": self.SERVICE,
"identifier": aws_elb_arn(
self.region,
elb["LoadBalancerName"]
),
"cname": elb["DNSName"],
"addresses": dns_lookup(elb["DNSName"]),
})
self.logger.info("Got IPs for %s resources", len(resources))
return resources
| [
"logging.getLogger",
"perimeterator.helper.aws_elb_arn",
"boto3.client",
"perimeterator.helper.dns_lookup"
]
| [((389, 416), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (406, 416), False, 'import logging\n'), ((468, 514), 'boto3.client', 'boto3.client', (['self.SERVICE'], {'region_name': 'region'}), '(self.SERVICE, region_name=region)\n', (480, 514), False, 'import boto3\n'), ((2378, 2427), 'perimeterator.helper.aws_elb_arn', 'aws_elb_arn', (['self.region', "elb['LoadBalancerName']"], {}), "(self.region, elb['LoadBalancerName'])\n", (2389, 2427), False, 'from perimeterator.helper import aws_elb_arn\n'), ((2577, 2603), 'perimeterator.helper.dns_lookup', 'dns_lookup', (["elb['DNSName']"], {}), "(elb['DNSName'])\n", (2587, 2603), False, 'from perimeterator.helper import dns_lookup\n')] |
import numpy as np
import spikemetrics.metrics as metrics
from .utils.thresholdcurator import ThresholdCurator
from .quality_metric import QualityMetric
import spiketoolkit as st
from spikemetrics.utils import Epoch, printProgressBar
from collections import OrderedDict
from .parameter_dictionaries import get_recording_gui_params, get_feature_gui_params
def make_curator_gui_params(params):
keys = list(params.keys())
types = [type(params[key]) for key in keys]
values = [params[key] for key in keys]
gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "Mode to compute noise SNR ('mad' | 'std' - default 'mad')"},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "Number of seconds to compute noise level from (default 10.0)"},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "Maximum number of spikes to compute templates from (default 1000)"},
{'name': keys[3], 'type': str(types[3].__name__), 'value': values[3], 'default': values[3], 'title': "Use 'mean' or 'median' to compute templates"},
{'name': keys[4], 'type': str(types[4].__name__), 'value': values[4], 'default': values[4], 'title': "If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)"},
{'name': keys[5], 'type': 'int', 'value': values[5], 'default': values[5], 'title': "Random seed for reproducibility"},
{'name': keys[6], 'type': str(types[6].__name__), 'value': values[6], 'default': values[6], 'title': "If True, will be verbose in metric computation."},]
curator_gui_params = [{'name': 'threshold', 'type': 'float', 'title': "The threshold for the given metric."},
{'name': 'threshold_sign', 'type': 'str',
'title': "If 'less', will threshold any metric less than the given threshold. "
"If 'less_or_equal', will threshold any metric less than or equal to the given threshold. "
"If 'greater', will threshold any metric greater than the given threshold. "
"If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold."}]
gui_params = curator_gui_params + gui_params + get_recording_gui_params() + get_feature_gui_params()
return gui_params
class SNR(QualityMetric):
installed = True # check at class level if installed or not
installation_mesg = "" # err
params = OrderedDict([('snr_mode',"mad"), ('snr_noise_duration',10.0), ('max_spikes_per_unit_for_snr',1000),
('template_mode', "median"), ('max_channel_peak', "both"), ('seed',None), ('verbose',False)])
curator_name = "ThresholdSNR"
curator_gui_params = make_curator_gui_params(params)
def __init__(self, metric_data):
QualityMetric.__init__(self, metric_data, metric_name="snr")
if not metric_data.has_recording():
raise ValueError("MetricData object must have a recording")
def compute_metric(self, snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, save_features_props,
recompute_info, seed, save_as_property):
snrs_epochs = []
for epoch in self._metric_data._epochs:
epoch_recording = self._metric_data._recording.get_epoch(epoch[0])
epoch_sorting = self._metric_data._sorting.get_epoch(epoch[0])
channel_noise_levels = _compute_channel_noise_levels(
recording=epoch_recording,
mode=snr_mode,
noise_duration=snr_noise_duration,
seed=seed,
)
templates = st.postprocessing.get_unit_templates(
epoch_recording,
epoch_sorting,
unit_ids=self._metric_data._unit_ids,
max_spikes_per_unit=max_spikes_per_unit_for_snr,
mode=template_mode,
save_wf_as_features=save_features_props,
recompute_waveforms=recompute_info,
save_as_property=save_features_props,
seed=seed,
)
max_channels = st.postprocessing.get_unit_max_channels(
epoch_recording,
epoch_sorting,
unit_ids=self._metric_data._unit_ids,
max_spikes_per_unit=max_spikes_per_unit_for_snr,
peak=max_channel_peak,
recompute_templates=recompute_info,
save_as_property=save_features_props,
mode=template_mode,
seed=seed,
)
snr_list = []
for i, unit_id in enumerate(self._metric_data._unit_ids):
if self._metric_data.verbose:
printProgressBar(i + 1, len(self._metric_data._unit_ids))
max_channel_idx = epoch_recording.get_channel_ids().index(
max_channels[i]
)
snr = _compute_template_SNR(
templates[i], channel_noise_levels, max_channel_idx
)
snr_list.append(snr)
snrs = np.asarray(snr_list)
snrs_epochs.append(snrs)
if save_as_property:
self.save_as_property(self._metric_data._sorting, snrs_epochs, self._metric_name)
return snrs_epochs
def threshold_metric(self, threshold, threshold_sign, snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, save_features_props, recompute_info,
seed, save_as_property):
snrs_epochs = self.compute_metric(snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, save_features_props,
recompute_info, seed, save_as_property)[0]
threshold_curator = ThresholdCurator(
sorting=self._metric_data._sorting, metrics_epoch=snrs_epochs
)
threshold_curator.threshold_sorting(
threshold=threshold, threshold_sign=threshold_sign
)
return threshold_curator
def _compute_template_SNR(template, channel_noise_levels, max_channel_idx):
"""
Computes SNR on the channel with largest amplitude
Parameters
----------
template: np.array
Template (n_elec, n_timepoints)
channel_noise_levels: list
Noise levels for the different channels
max_channel_idx: int
Index of channel with largest templaye
Returns
-------
snr: float
Signal-to-noise ratio for the template
"""
snr = (
np.max(np.abs(template[max_channel_idx]))
/ channel_noise_levels[max_channel_idx]
)
return snr
def _compute_channel_noise_levels(recording, mode, noise_duration, seed):
"""
Computes noise level channel-wise
Parameters
----------
recording: RecordingExtractor
The recording ectractor object
mode: str
'std' or 'mad' (default
noise_duration: float
Number of seconds to compute SNR from
Returns
-------
moise_levels: list
Noise levels for each channel
"""
M = recording.get_num_channels()
n_frames = int(noise_duration * recording.get_sampling_frequency())
if n_frames >= recording.get_num_frames():
start_frame = 0
end_frame = recording.get_num_frames()
else:
start_frame = np.random.RandomState(seed=seed).randint(
0, recording.get_num_frames() - n_frames
)
end_frame = start_frame + n_frames
X = recording.get_traces(start_frame=start_frame, end_frame=end_frame)
noise_levels = []
for ch in range(M):
if mode == "std":
noise_level = np.std(X[ch, :])
elif mode == "mad":
noise_level = np.median(np.abs(X[ch, :]) / 0.6745)
else:
raise Exception("'mode' can be 'std' or 'mad'")
noise_levels.append(noise_level)
return noise_levels | [
"numpy.abs",
"collections.OrderedDict",
"spiketoolkit.postprocessing.get_unit_templates",
"numpy.asarray",
"numpy.std",
"spiketoolkit.postprocessing.get_unit_max_channels",
"numpy.random.RandomState"
]
| [((2688, 2896), 'collections.OrderedDict', 'OrderedDict', (["[('snr_mode', 'mad'), ('snr_noise_duration', 10.0), (\n 'max_spikes_per_unit_for_snr', 1000), ('template_mode', 'median'), (\n 'max_channel_peak', 'both'), ('seed', None), ('verbose', False)]"], {}), "([('snr_mode', 'mad'), ('snr_noise_duration', 10.0), (\n 'max_spikes_per_unit_for_snr', 1000), ('template_mode', 'median'), (\n 'max_channel_peak', 'both'), ('seed', None), ('verbose', False)])\n", (2699, 2896), False, 'from collections import OrderedDict\n'), ((3939, 4258), 'spiketoolkit.postprocessing.get_unit_templates', 'st.postprocessing.get_unit_templates', (['epoch_recording', 'epoch_sorting'], {'unit_ids': 'self._metric_data._unit_ids', 'max_spikes_per_unit': 'max_spikes_per_unit_for_snr', 'mode': 'template_mode', 'save_wf_as_features': 'save_features_props', 'recompute_waveforms': 'recompute_info', 'save_as_property': 'save_features_props', 'seed': 'seed'}), '(epoch_recording, epoch_sorting,\n unit_ids=self._metric_data._unit_ids, max_spikes_per_unit=\n max_spikes_per_unit_for_snr, mode=template_mode, save_wf_as_features=\n save_features_props, recompute_waveforms=recompute_info,\n save_as_property=save_features_props, seed=seed)\n', (3975, 4258), True, 'import spiketoolkit as st\n'), ((4428, 4733), 'spiketoolkit.postprocessing.get_unit_max_channels', 'st.postprocessing.get_unit_max_channels', (['epoch_recording', 'epoch_sorting'], {'unit_ids': 'self._metric_data._unit_ids', 'max_spikes_per_unit': 'max_spikes_per_unit_for_snr', 'peak': 'max_channel_peak', 'recompute_templates': 'recompute_info', 'save_as_property': 'save_features_props', 'mode': 'template_mode', 'seed': 'seed'}), '(epoch_recording, epoch_sorting,\n unit_ids=self._metric_data._unit_ids, max_spikes_per_unit=\n max_spikes_per_unit_for_snr, peak=max_channel_peak, recompute_templates\n =recompute_info, save_as_property=save_features_props, mode=\n template_mode, seed=seed)\n', (4467, 4733), True, 'import spiketoolkit as st\n'), ((5414, 5434), 'numpy.asarray', 'np.asarray', (['snr_list'], {}), '(snr_list)\n', (5424, 5434), True, 'import numpy as np\n'), ((6959, 6992), 'numpy.abs', 'np.abs', (['template[max_channel_idx]'], {}), '(template[max_channel_idx])\n', (6965, 6992), True, 'import numpy as np\n'), ((8083, 8099), 'numpy.std', 'np.std', (['X[ch, :]'], {}), '(X[ch, :])\n', (8089, 8099), True, 'import numpy as np\n'), ((7761, 7793), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (7782, 7793), True, 'import numpy as np\n'), ((8164, 8180), 'numpy.abs', 'np.abs', (['X[ch, :]'], {}), '(X[ch, :])\n', (8170, 8180), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2011-2015, Michigan State University.
# Copyright (C) 2015, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: <EMAIL>
"""
Error correct reads based on a counting hash from a diginorm step.
Output sequences will be put in inputfile.corr.
% python scripts/error-correct-pass2 <counting.ct> <data1> [ <data2> <...> ]
Use '-h' for parameter help.
"""
import sys
import os
import screed
import khmer
from khmer import Countgraph
from khmer import khmer_args
from khmer.khmer_args import FileType as khFileType
DEFAULT_CUTOFF = 2
def output_single(read, new_sequence):
name = read.name
sequence = new_sequence
quality = None
if hasattr(read, 'quality'):
quality = read.quality[:len(sequence)]
sequence = sequence[:len(quality)] # sequence is _lengthened_
if quality:
assert len(sequence) == len(quality), (sequence, quality)
return "@%s\n%s\n+\n%s\n" % (name, sequence, quality)
else:
return ">%s\n%s\n" % (name, sequence)
def main():
parser = khmer_args.build_counting_args(
"Correct reads against an already-computed table",
citations=['counting', 'SeqAn'])
parser.add_argument("--trusted-cov", dest="trusted_cov", type=int,
default=DEFAULT_CUTOFF)
parser.add_argument("--theta", dest="bits_theta", type=float, default=1.0)
parser.add_argument('-o', '--output', dest='output_file',
help="output file for histogram; defaults to "
"<first filename>.corr in cwd.",
type=khFileType('w'), default=None)
parser.add_argument('counts_table')
parser.add_argument('readfile')
args = parser.parse_args()
print('loading counts')
ht = Countgraph.load(args.counts_table)
aligner = khmer.ReadAligner(ht,
args.trusted_cov,
args.bits_theta)
print("trusted:", args.trusted_cov)
corrfp = args.output_file
if not corrfp:
outfile = os.path.basename(args.readfile) + '.corr'
corrfp = open(outfile, 'w')
n_corrected = 0
for n, read in enumerate(screed.open(args.readfile)):
if n % 10000 == 0:
print('...', n, n_corrected, file=sys.stderr)
seq = read.sequence.replace('N', 'A')
# build the alignment...
score, graph_alignment, read_alignment, truncated = \
aligner.align(seq)
if not truncated:
graph_seq = graph_alignment.replace("-", "")
if graph_seq != seq:
n_corrected += 1
seq = graph_seq
corrfp.write(output_single(read, seq))
if __name__ == '__main__':
main()
| [
"screed.open",
"khmer.Countgraph.load",
"khmer.khmer_args.FileType",
"khmer.ReadAligner",
"khmer.khmer_args.build_counting_args",
"os.path.basename"
]
| [((2677, 2801), 'khmer.khmer_args.build_counting_args', 'khmer_args.build_counting_args', (['"""Correct reads against an already-computed table"""'], {'citations': "['counting', 'SeqAn']"}), "(\n 'Correct reads against an already-computed table', citations=[\n 'counting', 'SeqAn'])\n", (2707, 2801), False, 'from khmer import khmer_args\n'), ((3410, 3444), 'khmer.Countgraph.load', 'Countgraph.load', (['args.counts_table'], {}), '(args.counts_table)\n', (3425, 3444), False, 'from khmer import Countgraph\n'), ((3460, 3516), 'khmer.ReadAligner', 'khmer.ReadAligner', (['ht', 'args.trusted_cov', 'args.bits_theta'], {}), '(ht, args.trusted_cov, args.bits_theta)\n', (3477, 3516), False, 'import khmer\n'), ((3818, 3844), 'screed.open', 'screed.open', (['args.readfile'], {}), '(args.readfile)\n', (3829, 3844), False, 'import screed\n'), ((3232, 3247), 'khmer.khmer_args.FileType', 'khFileType', (['"""w"""'], {}), "('w')\n", (3242, 3247), True, 'from khmer.khmer_args import FileType as khFileType\n'), ((3690, 3721), 'os.path.basename', 'os.path.basename', (['args.readfile'], {}), '(args.readfile)\n', (3706, 3721), False, 'import os\n')] |
## Requires Python v3 and pandas (pip install pandas)
## This script takes the newcastle membership csv and attempts
## to reduce the file size as much as possible through aggregation and lookups
## Two lookup files to provide library names and dates are also created.
import csv
import os
import re
from datetime import datetime
import pandas
MEMBERDATA = '..\\data\\dashboard_newcastle_members.csv'
def read_member_data():
member_data_frame = pandas.DataFrame(
pandas.read_csv(open(os.path.join(os.path.dirname(__file__), MEMBERDATA), 'r')), index=None)
return member_data_frame
def run():
members = read_member_data()
postcodes = members['Postcode'].unique()
libraries = members['Library Registered At'].unique()
dates_added = members['Date Added'].unique()
times_added = members['Date Added'].unique()
run() | [
"os.path.dirname"
]
| [((512, 537), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (527, 537), False, 'import os\n')] |
#!/usr/bin/env python3
import sys
from argparse import ArgumentParser
from getpass import getpass
from hashlib import pbkdf2_hmac
from signal import signal, SIGINT
def die(*_, **__):
sys.exit()
signal = signal(SIGINT, die)
iwd = """[Security]
PreSharedKey={psk}"""
supplicant = """network={{
ssid={ssid}
#psk={passphrase}
psk={psk}
}}"""
parser = ArgumentParser(
description="%(prog)s pre-computes PSK entries for network configuration blocks of wpa_supplicant or iwd config. An ASCII passphrase and SSID are used to generate a 256-bit PSK."
)
parser.add_argument("ssid", help="The SSID whose passphrase should be derived.")
parser.add_argument(
"passphrase",
help="The passphrase to use. If not included on the command line, passphrase will be read from standard input.",
nargs="?",
)
parser.add_argument(
"--iwd",
"-i",
dest="template",
action="store_const",
const=iwd,
default=supplicant,
help="Generate for iwd (default: generate for wpa_supplicant).",
)
args = parser.parse_args()
if not args.passphrase:
print("# reading passphrase from stdin", file=sys.stderr)
args.passphrase = getpass(prompt="")
if not 8 <= len(args.passphrase) <= 63:
print("Passphrase must be 8..63 characters", file=sys.stderr)
sys.exit(1)
passphrase = args.passphrase.encode()
if any(b < 32 or b == 127 for b in passphrase):
print("Invalid passphrase character", file=sys.stderr)
sys.exit(1)
ssid = args.ssid.encode()
psk = pbkdf2_hmac("sha1", passphrase, ssid, iterations=4096, dklen=32)
print(args.template.format(ssid=args.ssid, passphrase=args.passphrase, psk=psk.hex()))
| [
"signal.signal",
"argparse.ArgumentParser",
"getpass.getpass",
"hashlib.pbkdf2_hmac",
"sys.exit"
]
| [((210, 229), 'signal.signal', 'signal', (['SIGINT', 'die'], {}), '(SIGINT, die)\n', (216, 229), False, 'from signal import signal, SIGINT\n'), ((369, 573), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""%(prog)s pre-computes PSK entries for network configuration blocks of wpa_supplicant or iwd config. An ASCII passphrase and SSID are used to generate a 256-bit PSK."""'}), "(description=\n '%(prog)s pre-computes PSK entries for network configuration blocks of wpa_supplicant or iwd config. An ASCII passphrase and SSID are used to generate a 256-bit PSK.'\n )\n", (383, 573), False, 'from argparse import ArgumentParser\n'), ((1496, 1560), 'hashlib.pbkdf2_hmac', 'pbkdf2_hmac', (['"""sha1"""', 'passphrase', 'ssid'], {'iterations': '(4096)', 'dklen': '(32)'}), "('sha1', passphrase, ssid, iterations=4096, dklen=32)\n", (1507, 1560), False, 'from hashlib import pbkdf2_hmac\n'), ((189, 199), 'sys.exit', 'sys.exit', ([], {}), '()\n', (197, 199), False, 'import sys\n'), ((1161, 1179), 'getpass.getpass', 'getpass', ([], {'prompt': '""""""'}), "(prompt='')\n", (1168, 1179), False, 'from getpass import getpass\n'), ((1290, 1301), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1298, 1301), False, 'import sys\n'), ((1451, 1462), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1459, 1462), False, 'import sys\n')] |
from cached_property import cached_property
from purl import URL
from onegov.translator_directory import _
from onegov.core.elements import Block, Link, LinkGroup, Confirm, Intercooler
from onegov.core.utils import linkify
from onegov.org.layout import DefaultLayout as BaseLayout
from onegov.translator_directory.collections.documents import \
TranslatorDocumentCollection
from onegov.translator_directory.collections.language import LanguageCollection
from onegov.translator_directory.collections.translator import \
TranslatorCollection
from onegov.translator_directory.constants import member_can_see, \
editor_can_see, GENDERS, ADMISSIONS, PROFESSIONAL_GUILDS, \
INTERPRETING_TYPES
class DefaultLayout(BaseLayout):
@staticmethod
def linkify(text):
return linkify(text)
@staticmethod
def format_languages(languages):
return ', '.join(sorted((lang.name for lang in languages or [])))
def format_gender(self, gender):
return self.request.translate(GENDERS[gender])
@staticmethod
def format_drive_distance(number):
if not number:
return ''
return f'{number} km'
def format_boolean(self, val):
assert isinstance(val, bool)
return self.request.translate((_('Yes') if val else _('No')))
def format_admission(self, val):
return self.request.translate(ADMISSIONS[val])
def show(self, attribute_name):
"""Some attributes on the translator are hidden for less privileged
users"""
if self.request.is_member:
return attribute_name in member_can_see
if self.request.is_editor:
return attribute_name in editor_can_see
return True
def color_class(self, count):
""" Depending how rare a language is offered by translators,
apply a color code using the returned css class
"""
if count <= 5:
return 'text-orange'
def format_prof_guild(self, key):
return self.request.translate(PROFESSIONAL_GUILDS[key])
def format_interpreting_type(self, key):
return self.request.translate(INTERPRETING_TYPES[key])
class TranslatorLayout(DefaultLayout):
@cached_property
def file_collection(self):
return TranslatorDocumentCollection(
self.request.session,
translator_id=self.model.id,
category=None
)
@cached_property
def editbar_links(self):
if self.request.is_admin:
return [
LinkGroup(
title=_('Add'),
links=(
Link(
text=_("Add translator"),
url=self.request.class_link(
TranslatorCollection, name='new'
),
attrs={'class': 'new-person'}
),
)
),
Link(
text=_("Edit"),
url=self.request.link(
self.model, name='edit'
),
attrs={'class': 'edit-link'}
),
Link(
_('Delete'),
self.csrf_protected_url(
self.request.link(self.model)
),
attrs={'class': 'delete-link'},
traits=(
Confirm(
_("Do you really want to delete "
"this translator?"),
_("This cannot be undone."),
_("Delete translator"),
_("Cancel")
),
Intercooler(
request_method='DELETE',
redirect_after=self.request.class_link(
TranslatorCollection
)
)
)
),
Link(
_('Voucher template'),
self.request.link(self.request.app.org, name='voucher'),
attrs={'class': 'create-excel'}
),
Link(
_('Documents'),
self.request.link(self.file_collection),
attrs={'class': 'documents'}
),
]
elif self.request.is_editor:
return [
Link(
text=_("Edit"),
url=self.request.link(
self.model, name='edit-restricted'
),
attrs={'class': 'edit-link'}
),
Link(
_('Voucher template'),
self.request.link(self.request.app.org, name='voucher'),
attrs={'class': 'create-excel'}
),
]
elif self.request.is_member:
return [
Link(
_('Voucher template'),
self.request.link(self.request.app.org, name='voucher'),
attrs={'class': 'create-excel'}
)
]
@cached_property
def breadcrumbs(self):
links = super().breadcrumbs + [
Link(
text=_('Translators'),
url=self.request.class_link(TranslatorCollection)
),
Link(text=self.model.title)
]
return links
class EditTranslatorLayout(TranslatorLayout):
@cached_property
def title(self):
return _('Edit translator')
@cached_property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(Link(_('Edit')))
return links
class TranslatorCollectionLayout(DefaultLayout):
@cached_property
def title(self):
return _('Search for translators')
@cached_property
def breadcrumbs(self):
return super().breadcrumbs + [
Link(
text=_('Translators'),
url=self.request.class_link(TranslatorCollection)
)
]
@cached_property
def editbar_links(self):
if self.request.is_admin:
return [
LinkGroup(
_('Add'),
links=(
Link(
text=_("Add translator"),
url=self.request.class_link(
TranslatorCollection, name='new'
),
attrs={'class': 'new-person'}
),
Link(
text=_("Add language"),
url=self.request.class_link(
LanguageCollection, name='new'
),
attrs={'class': 'new-language'}
)
)
),
Link(
_('Export Excel'),
url=self.request.class_link(
TranslatorCollection, name='export'
),
attrs={'class': 'export-link'}
),
Link(
_('Voucher template'),
self.request.link(self.request.app.org, name='voucher'),
attrs={'class': 'create-excel'}
)
]
elif self.request.is_editor or self.request.is_member:
return [
Link(
_('Voucher template'),
self.request.link(self.request.app.org, name='voucher'),
attrs={'class': 'create-excel'}
)
]
class AddTranslatorLayout(TranslatorCollectionLayout):
@cached_property
def title(self):
return _('Add translator')
@cached_property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(Link(_('Add')))
return links
@property
def editbar_links(self):
return []
class TranslatorDocumentsLayout(DefaultLayout):
@cached_property
def breadcrumbs(self):
return super().breadcrumbs + [
Link(
text=_('Translators'),
url=self.request.class_link(TranslatorCollection)
),
Link(
text=self.model.translator.title,
url=self.request.link(self.model.translator)
),
Link(text=_('Documents'))
]
@cached_property
def upload_url(self):
url = URL(self.request.link(self.model, name='upload'))
url = url.query_param('category', self.model.category)
return self.csrf_protected_url(url.as_string())
def link_for(self, category):
return self.request.class_link(
self.model.__class__,
{'translator_id': self.model.translator_id, 'category': category}
)
class LanguageCollectionLayout(DefaultLayout):
@property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(Link(_('Languages')))
return links
@property
def editbar_links(self):
return [LinkGroup(
_('Add'),
links=(
Link(
text=_("Add language"),
url=self.request.class_link(
LanguageCollection, name='new'
),
attrs={'class': 'new-language'}
),
)
)] if self.request.is_admin else []
class LanguageLayout(DefaultLayout):
@property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(
Link(_('Languages'),
url=self.request.class_link(LanguageCollection))
)
return links
class EditLanguageLayout(LanguageLayout):
@property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(Link(self.model.name))
links.append(Link(_('Edit')))
return links
@cached_property
def editbar_links(self):
if self.request.is_admin:
if not self.model.deletable:
return [
Link(
_('Delete'),
self.csrf_protected_url(
self.request.link(self.model)
),
attrs={'class': 'delete-link'},
traits=(
Block(
_("This language is used and can't be "
"deleted."),
no=_("Cancel")
),
)
),
]
return [
Link(
_('Delete'),
self.csrf_protected_url(
self.request.link(self.model)
),
attrs={'class': 'delete-link'},
traits=(
Confirm(
_("Do you really want to delete "
"this language?"),
_("This cannot be undone."),
_("Delete language"),
_("Cancel")
),
Intercooler(
request_method='DELETE',
redirect_after=self.request.class_link(
TranslatorCollection
)
)
)
),
]
return []
class AddLanguageLayout(LanguageLayout):
@property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(Link(_('Add')))
return links
@property
def editbar_links(self):
return []
| [
"onegov.translator_directory.collections.documents.TranslatorDocumentCollection",
"onegov.core.elements.Link",
"onegov.translator_directory._",
"onegov.core.utils.linkify"
]
| [((796, 809), 'onegov.core.utils.linkify', 'linkify', (['text'], {}), '(text)\n', (803, 809), False, 'from onegov.core.utils import linkify\n'), ((2275, 2374), 'onegov.translator_directory.collections.documents.TranslatorDocumentCollection', 'TranslatorDocumentCollection', (['self.request.session'], {'translator_id': 'self.model.id', 'category': 'None'}), '(self.request.session, translator_id=self.model\n .id, category=None)\n', (2303, 2374), False, 'from onegov.translator_directory.collections.documents import TranslatorDocumentCollection\n'), ((5742, 5762), 'onegov.translator_directory._', '_', (['"""Edit translator"""'], {}), "('Edit translator')\n", (5743, 5762), False, 'from onegov.translator_directory import _\n'), ((6016, 6043), 'onegov.translator_directory._', '_', (['"""Search for translators"""'], {}), "('Search for translators')\n", (6017, 6043), False, 'from onegov.translator_directory import _\n'), ((8066, 8085), 'onegov.translator_directory._', '_', (['"""Add translator"""'], {}), "('Add translator')\n", (8067, 8085), False, 'from onegov.translator_directory import _\n'), ((10225, 10246), 'onegov.core.elements.Link', 'Link', (['self.model.name'], {}), '(self.model.name)\n', (10229, 10246), False, 'from onegov.core.elements import Block, Link, LinkGroup, Confirm, Intercooler\n'), ((1278, 1286), 'onegov.translator_directory._', '_', (['"""Yes"""'], {}), "('Yes')\n", (1279, 1286), False, 'from onegov.translator_directory import _\n'), ((1299, 1306), 'onegov.translator_directory._', '_', (['"""No"""'], {}), "('No')\n", (1300, 1306), False, 'from onegov.translator_directory import _\n'), ((5577, 5604), 'onegov.core.elements.Link', 'Link', ([], {'text': 'self.model.title'}), '(text=self.model.title)\n', (5581, 5604), False, 'from onegov.core.elements import Block, Link, LinkGroup, Confirm, Intercooler\n'), ((5874, 5883), 'onegov.translator_directory._', '_', (['"""Edit"""'], {}), "('Edit')\n", (5875, 5883), False, 'from onegov.translator_directory import _\n'), ((8197, 8205), 'onegov.translator_directory._', '_', (['"""Add"""'], {}), "('Add')\n", (8198, 8205), False, 'from onegov.translator_directory import _\n'), ((9340, 9354), 'onegov.translator_directory._', '_', (['"""Languages"""'], {}), "('Languages')\n", (9341, 9354), False, 'from onegov.translator_directory import _\n'), ((9969, 9983), 'onegov.translator_directory._', '_', (['"""Languages"""'], {}), "('Languages')\n", (9970, 9983), False, 'from onegov.translator_directory import _\n'), ((10274, 10283), 'onegov.translator_directory._', '_', (['"""Edit"""'], {}), "('Edit')\n", (10275, 10283), False, 'from onegov.translator_directory import _\n'), ((12127, 12135), 'onegov.translator_directory._', '_', (['"""Add"""'], {}), "('Add')\n", (12128, 12135), False, 'from onegov.translator_directory import _\n'), ((3258, 3269), 'onegov.translator_directory._', '_', (['"""Delete"""'], {}), "('Delete')\n", (3259, 3269), False, 'from onegov.translator_directory import _\n'), ((4146, 4167), 'onegov.translator_directory._', '_', (['"""Voucher template"""'], {}), "('Voucher template')\n", (4147, 4167), False, 'from onegov.translator_directory import _\n'), ((4359, 4373), 'onegov.translator_directory._', '_', (['"""Documents"""'], {}), "('Documents')\n", (4360, 4373), False, 'from onegov.translator_directory import _\n'), ((6432, 6440), 'onegov.translator_directory._', '_', (['"""Add"""'], {}), "('Add')\n", (6433, 6440), False, 'from onegov.translator_directory import _\n'), ((7194, 7211), 'onegov.translator_directory._', '_', (['"""Export Excel"""'], {}), "('Export Excel')\n", (7195, 7211), False, 'from onegov.translator_directory import _\n'), ((7457, 7478), 'onegov.translator_directory._', '_', (['"""Voucher template"""'], {}), "('Voucher template')\n", (7458, 7478), False, 'from onegov.translator_directory import _\n'), ((9461, 9469), 'onegov.translator_directory._', '_', (['"""Add"""'], {}), "('Add')\n", (9462, 9469), False, 'from onegov.translator_directory import _\n'), ((11106, 11117), 'onegov.translator_directory._', '_', (['"""Delete"""'], {}), "('Delete')\n", (11107, 11117), False, 'from onegov.translator_directory import _\n'), ((2575, 2583), 'onegov.translator_directory._', '_', (['"""Add"""'], {}), "('Add')\n", (2576, 2583), False, 'from onegov.translator_directory import _\n'), ((3023, 3032), 'onegov.translator_directory._', '_', (['"""Edit"""'], {}), "('Edit')\n", (3024, 3032), False, 'from onegov.translator_directory import _\n'), ((4869, 4890), 'onegov.translator_directory._', '_', (['"""Voucher template"""'], {}), "('Voucher template')\n", (4870, 4890), False, 'from onegov.translator_directory import _\n'), ((5466, 5482), 'onegov.translator_directory._', '_', (['"""Translators"""'], {}), "('Translators')\n", (5467, 5482), False, 'from onegov.translator_directory import _\n'), ((6171, 6187), 'onegov.translator_directory._', '_', (['"""Translators"""'], {}), "('Translators')\n", (6172, 6187), False, 'from onegov.translator_directory import _\n'), ((7767, 7788), 'onegov.translator_directory._', '_', (['"""Voucher template"""'], {}), "('Voucher template')\n", (7768, 7788), False, 'from onegov.translator_directory import _\n'), ((8468, 8484), 'onegov.translator_directory._', '_', (['"""Translators"""'], {}), "('Translators')\n", (8469, 8484), False, 'from onegov.translator_directory import _\n'), ((8733, 8747), 'onegov.translator_directory._', '_', (['"""Documents"""'], {}), "('Documents')\n", (8734, 8747), False, 'from onegov.translator_directory import _\n'), ((10508, 10519), 'onegov.translator_directory._', '_', (['"""Delete"""'], {}), "('Delete')\n", (10509, 10519), False, 'from onegov.translator_directory import _\n'), ((4623, 4632), 'onegov.translator_directory._', '_', (['"""Edit"""'], {}), "('Edit')\n", (4624, 4632), False, 'from onegov.translator_directory import _\n'), ((5154, 5175), 'onegov.translator_directory._', '_', (['"""Voucher template"""'], {}), "('Voucher template')\n", (5155, 5175), False, 'from onegov.translator_directory import _\n'), ((3535, 3585), 'onegov.translator_directory._', '_', (['"""Do you really want to delete this translator?"""'], {}), "('Do you really want to delete this translator?')\n", (3536, 3585), False, 'from onegov.translator_directory import _\n'), ((3648, 3675), 'onegov.translator_directory._', '_', (['"""This cannot be undone."""'], {}), "('This cannot be undone.')\n", (3649, 3675), False, 'from onegov.translator_directory import _\n'), ((3705, 3727), 'onegov.translator_directory._', '_', (['"""Delete translator"""'], {}), "('Delete translator')\n", (3706, 3727), False, 'from onegov.translator_directory import _\n'), ((3757, 3768), 'onegov.translator_directory._', '_', (['"""Cancel"""'], {}), "('Cancel')\n", (3758, 3768), False, 'from onegov.translator_directory import _\n'), ((11383, 11431), 'onegov.translator_directory._', '_', (['"""Do you really want to delete this language?"""'], {}), "('Do you really want to delete this language?')\n", (11384, 11431), False, 'from onegov.translator_directory import _\n'), ((11494, 11521), 'onegov.translator_directory._', '_', (['"""This cannot be undone."""'], {}), "('This cannot be undone.')\n", (11495, 11521), False, 'from onegov.translator_directory import _\n'), ((11551, 11571), 'onegov.translator_directory._', '_', (['"""Delete language"""'], {}), "('Delete language')\n", (11552, 11571), False, 'from onegov.translator_directory import _\n'), ((11601, 11612), 'onegov.translator_directory._', '_', (['"""Cancel"""'], {}), "('Cancel')\n", (11602, 11612), False, 'from onegov.translator_directory import _\n'), ((2676, 2695), 'onegov.translator_directory._', '_', (['"""Add translator"""'], {}), "('Add translator')\n", (2677, 2695), False, 'from onegov.translator_directory import _\n'), ((6533, 6552), 'onegov.translator_directory._', '_', (['"""Add translator"""'], {}), "('Add translator')\n", (6534, 6552), False, 'from onegov.translator_directory import _\n'), ((6855, 6872), 'onegov.translator_directory._', '_', (['"""Add language"""'], {}), "('Add language')\n", (6856, 6872), False, 'from onegov.translator_directory import _\n'), ((9538, 9555), 'onegov.translator_directory._', '_', (['"""Add language"""'], {}), "('Add language')\n", (9539, 9555), False, 'from onegov.translator_directory import _\n'), ((10811, 10859), 'onegov.translator_directory._', '_', (['"""This language is used and can\'t be deleted."""'], {}), '("This language is used and can\'t be deleted.")\n', (10812, 10859), False, 'from onegov.translator_directory import _\n'), ((10933, 10944), 'onegov.translator_directory._', '_', (['"""Cancel"""'], {}), "('Cancel')\n", (10934, 10944), False, 'from onegov.translator_directory import _\n')] |
import pandas as pd
import numpy as np
import io
def info(df):
print("------------DIMENSIONS------------")
print("Rows:", df.shape[0])
print("Columns:", df.shape[1])
print("--------------DTYPES--------------")
columns = df.columns.tolist()
integers = df.select_dtypes("integer").columns.tolist()
floats = df.select_dtypes("float").columns.tolist()
bools = df.select_dtypes("bool").columns.tolist()
objects = df.select_dtypes("object").columns.tolist()
dataType = []
for el in columns:
if el in integers:
dataType.append('int')
if el in floats:
dataType.append('float')
if el in bools:
dataType.append('bool')
if el in objects:
dataType.append('object')
d = {'Column' : columns, 'Type': dataType}
print(pd.DataFrame(d))
print("----------MISSING VALUES----------")
print("Is any value missing? ", np.where(df.isnull().values.any() == False, "No", "Yes"), "\n")
buf = io.StringIO()
df.info(buf=buf)
info = buf.getvalue().split('\n')[-2].split(":")[1].strip()
print("----------MEMORY USAGE------------ \n", info) | [
"pandas.DataFrame",
"io.StringIO"
]
| [((1014, 1027), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1025, 1027), False, 'import io\n'), ((836, 851), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (848, 851), True, 'import pandas as pd\n')] |
from django.conf.urls import url, include
from .views import (GroupListAPIView,
GroupCreateAPIView,
AgendaListAPIView,
AgendaDetailAPIView,
AgendaCreateAPIView,
AgendaPostAPIView,
agenda_create,
AgendaRefreshAPIView,
NumberInGroupAPIView,
GroupProfileDetailAPIView,
GroupProfileUpdateAPIView,
number_in_group)
urlpatterns = [
url(r'^group/$', GroupListAPIView.as_view(), name="group_list"),
url(r'^group/create/$', GroupCreateAPIView.as_view(), name="group_create"),
url(r'agenda-list/$', AgendaListAPIView.as_view(), name="agenda_list"),
url(r'^(?P<group_id>\d+)/(?P<pk>\d+)/detail/$', AgendaDetailAPIView.as_view(), name='agenda_detail'),
# url(r'^create/$', AgendaCreateAPIView.as_view(), name='agenda_create'),
url(r'^(?P<group_id>\d+)/post2/$', AgendaPostAPIView.as_view(), name='agenda_create2'), # recommended api
url(r'^(?P<group_id>\d+)/post/$', agenda_create, name='agenda_create'),
url(r'^(?P<group_id>\d+)/(?P<pk>\d+)/refresh/$', AgendaRefreshAPIView.as_view(), name='agenda_refresh'),
url(r'^(?P<id>\d+)/number/$', NumberInGroupAPIView.as_view(), name="number"),
url(r'^(?P<group_id>\d+)/(?P<date>\d{4}-\d{2}-\d{2})/number/$', number_in_group, name="number2"),
url(r'^(?P<group_id>\d+)/group-profile/$', GroupProfileDetailAPIView.as_view(), name="group_profile"),
url(r'^(?P<group_id>\d+)/group-profile/update/$', GroupProfileUpdateAPIView.as_view(), name="group_profile_update"),
]
| [
"django.conf.urls.url"
]
| [((1071, 1141), 'django.conf.urls.url', 'url', (['"""^(?P<group_id>\\\\d+)/post/$"""', 'agenda_create'], {'name': '"""agenda_create"""'}), "('^(?P<group_id>\\\\d+)/post/$', agenda_create, name='agenda_create')\n", (1074, 1141), False, 'from django.conf.urls import url, include\n'), ((1338, 1441), 'django.conf.urls.url', 'url', (['"""^(?P<group_id>\\\\d+)/(?P<date>\\\\d{4}-\\\\d{2}-\\\\d{2})/number/$"""', 'number_in_group'], {'name': '"""number2"""'}), "('^(?P<group_id>\\\\d+)/(?P<date>\\\\d{4}-\\\\d{2}-\\\\d{2})/number/$',\n number_in_group, name='number2')\n", (1341, 1441), False, 'from django.conf.urls import url, include\n')] |
from django.contrib import admin
# Register your models here.
#models에서 Shop을 임폴트
from .models import Shop
from .models import Parsed_data
from .models import Img_data
from .models import Other
admin.site.register(Shop)
admin.site.register(Parsed_data)
admin.site.register(Img_data)
admin.site.register(Other)
| [
"django.contrib.admin.site.register"
]
| [((196, 221), 'django.contrib.admin.site.register', 'admin.site.register', (['Shop'], {}), '(Shop)\n', (215, 221), False, 'from django.contrib import admin\n'), ((222, 254), 'django.contrib.admin.site.register', 'admin.site.register', (['Parsed_data'], {}), '(Parsed_data)\n', (241, 254), False, 'from django.contrib import admin\n'), ((255, 284), 'django.contrib.admin.site.register', 'admin.site.register', (['Img_data'], {}), '(Img_data)\n', (274, 284), False, 'from django.contrib import admin\n'), ((285, 311), 'django.contrib.admin.site.register', 'admin.site.register', (['Other'], {}), '(Other)\n', (304, 311), False, 'from django.contrib import admin\n')] |
# -*- coding:utf-8 -*-
# author: Xinge
# @file: spconv_unet.py
# @time: 2020/06/22 15:01
import time
import numpy as np
import spconv
import torch
import torch.nn.functional as F
from torch import nn
def conv3x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False, indice_key=indice_key)
def conv1x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 3), stride=stride,
padding=(0, 1, 1), bias=False, indice_key=indice_key)
def conv1x1x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 1, 3), stride=stride,
padding=(0, 0, 1), bias=False, indice_key=indice_key)
def conv1x3x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 1), stride=stride,
padding=(0, 1, 0), bias=False, indice_key=indice_key)
def conv3x1x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 1), stride=stride,
padding=(1, 0, 0), bias=False, indice_key=indice_key)
def conv3x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 3), stride=stride,
padding=(1, 0, 1), bias=False, indice_key=indice_key)
def conv1x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=1, bias=False, indice_key=indice_key)
class ResContextBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ResContextBlock, self).__init__()
self.conv1 = conv1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.LeakyReLU()
self.conv1_2 = conv3x1(out_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.LeakyReLU()
self.conv2 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.act1(shortcut.features)
shortcut.features = self.bn0(shortcut.features)
shortcut = self.conv1_2(shortcut)
shortcut.features = self.act1_2(shortcut.features)
shortcut.features = self.bn0_2(shortcut.features)
resA = self.conv2(x)
resA.features = self.act2(resA.features)
resA.features = self.bn1(resA.features)
resA = self.conv3(resA)
resA.features = self.act3(resA.features)
resA.features = self.bn2(resA.features)
resA.features = resA.features + shortcut.features
return resA
class ResBlock(nn.Module):
def __init__(self, in_filters, out_filters, dropout_rate, kernel_size=(3, 3, 3), stride=1,
pooling=True, drop_out=True, height_pooling=False, indice_key=None):
super(ResBlock, self).__init__()
self.pooling = pooling
self.drop_out = drop_out
self.conv1 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.act1 = nn.LeakyReLU()
self.bn0 = nn.BatchNorm1d(out_filters)
self.conv1_2 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
self.act1_2 = nn.LeakyReLU()
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.conv2 = conv1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv3 = conv3x1(out_filters, out_filters, indice_key=indice_key+"bef")
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
# self.conv4 = conv3x3(out_filters, out_filters, indice_key=indice_key+"bef")
# self.act4 = nn.LeakyReLU()
# self.bn4 = nn.BatchNorm1d(out_filters)
if pooling:
# self.dropout = nn.Dropout3d(p=dropout_rate)
if height_pooling:
# self.pool = spconv.SparseMaxPool3d(kernel_size=2, stride=2)
self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=2,
padding=1, indice_key=indice_key, bias=False)
else:
# self.pool = spconv.SparseMaxPool3d(kernel_size=(2,2,1), stride=(2, 2, 1))
self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=(2,2,1),
padding=1, indice_key=indice_key, bias=False)
# else:
# self.dropout = nn.Dropout3d(p=dropout_rate)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.act1(shortcut.features)
shortcut.features = self.bn0(shortcut.features)
shortcut = self.conv1_2(shortcut)
shortcut.features = self.act1_2(shortcut.features)
shortcut.features = self.bn0_2(shortcut.features)
resA = self.conv2(x)
resA.features = self.act2(resA.features)
resA.features = self.bn1(resA.features)
resA = self.conv3(resA)
resA.features = self.act3(resA.features)
resA.features = self.bn2(resA.features)
resA.features = resA.features + shortcut.features
# resA = self.conv4(resA)
# resA.features = self.act4(resA.features)
# resA.features = self.bn4(resA.features)
if self.pooling:
# if self.drop_out:
# resB = self.dropout(resA.features)
# else:
# resB = resA
resB = self.pool(resA)
return resB, resA
else:
# if self.drop_out:
# resB = self.dropout(resA)
# else:
# resB = resA
return resA
class UpBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), indice_key=None, up_key=None):
super(UpBlock, self).__init__()
# self.drop_out = drop_out
#self.trans = nn.ConvTranspose2d(in_filters, out_filters, kernel_size, stride=(2, 2), padding=1)
self.trans_dilao = conv3x3(in_filters, out_filters, indice_key=indice_key+"new_up")
self.trans_act = nn.LeakyReLU()
self.trans_bn = nn.BatchNorm1d(out_filters)
# self.dropout1 = nn.Dropout3d(p=dropout_rate)
# self.dropout2 = nn.Dropout3d(p=dropout_rate)
self.conv1 = conv1x3(out_filters, out_filters, indice_key=indice_key)
self.act1 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv2 = conv3x1(out_filters, out_filters, indice_key=indice_key)
self.act2 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
self.conv3 = conv3x3(out_filters, out_filters, indice_key=indice_key)
self.act3 = nn.LeakyReLU()
self.bn3 = nn.BatchNorm1d(out_filters)
# self.dropout3 = nn.Dropout3d(p=dropout_rate)
self.up_subm = spconv.SparseInverseConv3d(out_filters, out_filters, kernel_size=3, indice_key=up_key, bias=False)
def forward(self, x, skip):
upA = self.trans_dilao(x)
#if upA.shape != skip.shape:
# upA = F.pad(upA, (0, 1, 0, 1), mode='replicate')
upA.features = self.trans_act(upA.features)
upA.features = self.trans_bn(upA.features)
## upsample
upA = self.up_subm(upA)
# upA = F.interpolate(upA, size=skip.size()[2:], mode='trilinear', align_corners=True)
# if self.drop_out:
# upA = self.dropout1(upA)
upA.features = upA.features + skip.features
# if self.drop_out:
# upB = self.dropout2(upB)
upE = self.conv1(upA)
upE.features = self.act1(upE.features)
upE.features = self.bn1(upE.features)
upE = self.conv2(upE)
upE.features = self.act2(upE.features)
upE.features = self.bn2(upE.features)
upE = self.conv3(upE)
upE.features = self.act3(upE.features)
upE.features = self.bn3(upE.features)
# if self.drop_out:
# upE = self.dropout3(upE)
return upE
class ReconBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ReconBlock, self).__init__()
self.conv1 = conv3x1x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.Sigmoid()
self.conv1_2 = conv1x3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.Sigmoid()
self.conv1_3 = conv1x1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_3 = nn.BatchNorm1d(out_filters)
self.act1_3 = nn.Sigmoid()
# self.conv2 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
# self.act2 = nn.LeakyReLU()
# self.bn1 = nn.BatchNorm1d(out_filters)
#
# self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
# self.act3 = nn.LeakyReLU()
# self.bn2 = nn.BatchNorm1d(out_filters)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.bn0(shortcut.features)
shortcut.features = self.act1(shortcut.features)
shortcut2 = self.conv1_2(x)
shortcut2.features = self.bn0_2(shortcut2.features)
shortcut2.features = self.act1_2(shortcut2.features)
shortcut3 = self.conv1_3(x)
shortcut3.features = self.bn0_3(shortcut3.features)
shortcut3.features = self.act1_3(shortcut3.features)
# resA = self.conv2(x)
# resA.features = self.act2(resA.features)
# resA.features = self.bn1(resA.features)
#
# resA = self.conv3(resA)
# resA.features = self.act3(resA.features)
# resA.features = self.bn2(resA.features)
shortcut.features = shortcut.features + shortcut2.features + shortcut3.features
shortcut.features = shortcut.features * x.features
return shortcut
class Spconv_salsaNet_res_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_salsaNet_res_cfg, self).__init__()
output_shape = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE
if 'FEATURE_COMPRESSION' in cfg.MODEL.MODEL_FN:
num_input_features = cfg.MODEL.MODEL_FN.FEATURE_COMPRESSION
else:
num_input_features = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
nclasses = cfg.DATA_CONFIG.NCLASS
n_height = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.nclasses = nclasses
self.nheight = n_height
self.strict = False
sparse_shape = np.array(output_shape)
# sparse_shape[0] = 11
self.sparse_shape = sparse_shape
self.downCntx = ResContextBlock(num_input_features, init_size, indice_key="pre")
# self.resBlock1 = ResBlock(init_size, init_size, 0.2, pooling=True, height_pooling=True, indice_key="down1")
self.resBlock2 = ResBlock(init_size, 2 * init_size, 0.2, height_pooling=True, indice_key="down2")
self.resBlock3 = ResBlock(2 * init_size, 4 * init_size, 0.2, height_pooling=True, indice_key="down3")
self.resBlock4 = ResBlock(4 * init_size, 8 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down4")
self.resBlock5 = ResBlock(8 * init_size, 16 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down5")
# self.resBlock6 = ResBlock(16 * init_size, 16 * init_size, 0.2, pooling=False, height_pooling=False, indice_key="down6")
# self.ReconNet = ReconBlock(16 * init_size, 16 * init_size, indice_key="recon")
self.upBlock0 = UpBlock(16 * init_size, 16 * init_size, indice_key="up0", up_key="down5")
self.upBlock1 = UpBlock(16 * init_size, 8 * init_size, indice_key="up1", up_key="down4")
self.upBlock2 = UpBlock(8 * init_size, 4 * init_size, indice_key="up2", up_key="down3")
self.upBlock3 = UpBlock(4 * init_size, 2 * init_size, indice_key="up3", up_key="down2")
# self.upBlock4 = UpBlock(4 * init_size, 2 * init_size, indice_key="up4", up_key="down2")
# self.upBlock5 = UpBlock(2 * init_size, init_size, indice_key="up5", up_key="down1")
self.ReconNet = ReconBlock(2*init_size, 2*init_size, indice_key="recon")
def forward(self, voxel_features, coors, batch_size):
# x = x.contiguous()
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.downCntx(ret)
# down0c, down0b = self.resBlock1(ret)
down1c, down1b = self.resBlock2(ret)
down2c, down2b = self.resBlock3(down1c)
down3c, down3b = self.resBlock4(down2c)
down4c, down4b = self.resBlock5(down3c)
# down5b = self.resBlock6(down4c)
# down6b = self.ReconNet(down5b)
up4e = self.upBlock0(down4c, down4b)
up3e = self.upBlock1(up4e, down3b)
up2e = self.upBlock2(up3e, down2b)
up1e = self.upBlock3(up2e, down1b)
up0e = self.ReconNet(up1e)
up0e.features = torch.cat((up0e.features, up1e.features), 1) # size 4 * init_size --> OK with the size of the semantic and instance heads
return up0e, up0e
class Spconv_sem_logits_head_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_sem_logits_head_cfg, self).__init__()
output_shape = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE
if 'FEATURE_COMPRESSION' in cfg.MODEL.MODEL_FN:
num_input_features = cfg.MODEL.MODEL_FN.FEATURE_COMPRESSION
else:
num_input_features = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
nclasses = cfg.DATA_CONFIG.NCLASS
n_height = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.logits = spconv.SubMConv3d(4 * init_size, nclasses, indice_key="logit", kernel_size=3, stride=1, padding=1, bias=True)
def forward(self, fea):
logits = self.logits(fea)
return logits.dense()
class Spconv_ins_offset_concatxyz_threelayers_head_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_ins_offset_concatxyz_threelayers_head_cfg, self).__init__()
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.pt_fea_dim = 4 * init_size
self.embedding_dim = cfg.MODEL.INS_HEAD.EMBEDDING_CHANNEL
self.conv1 = conv3x3(self.pt_fea_dim, self.pt_fea_dim, indice_key='offset_head_conv1')
self.bn1 = nn.BatchNorm1d(self.pt_fea_dim)
self.act1 = nn.LeakyReLU()
self.conv2 = conv3x3(self.pt_fea_dim, 2 * init_size, indice_key='offset_head_conv2')
self.bn2 = nn.BatchNorm1d(2 * init_size)
self.act2 = nn.LeakyReLU()
self.conv3 = conv3x3(2 * init_size, init_size, indice_key='offset_head_conv3')
self.bn3 = nn.BatchNorm1d(init_size)
self.act3 = nn.LeakyReLU()
self.offset = nn.Sequential(
nn.Linear(init_size+3, init_size, bias=True),
nn.BatchNorm1d(init_size),
nn.ReLU()
)
self.offset_linear = nn.Linear(init_size, self.embedding_dim, bias=True)
def forward(self, fea, batch):
fea = self.conv1(fea)
fea.features = self.act1(self.bn1(fea.features))
fea = self.conv2(fea)
fea.features = self.act2(self.bn2(fea.features))
fea = self.conv3(fea)
fea.features = self.act3(self.bn3(fea.features))
grid_ind = batch['grid']
xyz = batch['pt_cart_xyz']
fea = fea.dense()
fea = fea.permute(0, 2, 3, 4, 1)
pt_ins_fea_list = []
for batch_i, grid_ind_i in enumerate(grid_ind):
pt_ins_fea_list.append(fea[batch_i, grid_ind[batch_i][:,0], grid_ind[batch_i][:,1], grid_ind[batch_i][:,2]])
pt_pred_offsets_list = []
for batch_i, pt_ins_fea in enumerate(pt_ins_fea_list):
pt_pred_offsets_list.append(self.offset_linear(self.offset(torch.cat([pt_ins_fea,torch.from_numpy(xyz[batch_i]).cuda()],dim=1))))
return pt_pred_offsets_list, pt_ins_fea_list
class Spconv_alsaNet_res(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
nclasses = 20, n_height = 32, strict=False, init_size=16):
super(Spconv_alsaNet_res, self).__init__()
self.nclasses = nclasses
self.nheight = n_height
self.strict = False
sparse_shape = np.array(output_shape)
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.downCntx = ResContextBlock(num_input_features, init_size, indice_key="pre")
# self.resBlock1 = ResBlock(init_size, init_size, 0.2, pooling=True, height_pooling=True, indice_key="down1")
self.resBlock2 = ResBlock(init_size, 2 * init_size, 0.2, height_pooling=True, indice_key="down2")
self.resBlock3 = ResBlock(2 * init_size, 4 * init_size, 0.2, height_pooling=True, indice_key="down3")
self.resBlock4 = ResBlock(4 * init_size, 8 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down4")
self.resBlock5 = ResBlock(8 * init_size, 16 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down5")
# self.resBlock6 = ResBlock(16 * init_size, 16 * init_size, 0.2, pooling=False, height_pooling=False, indice_key="down6")
# self.ReconNet = ReconBlock(16 * init_size, 16 * init_size, indice_key="recon")
self.upBlock0 = UpBlock(16 * init_size, 16 * init_size, indice_key="up0", up_key="down5")
self.upBlock1 = UpBlock(16 * init_size, 8 * init_size, indice_key="up1", up_key="down4")
self.upBlock2 = UpBlock(8 * init_size, 4 * init_size, indice_key="up2", up_key="down3")
self.upBlock3 = UpBlock(4 * init_size, 2 * init_size, indice_key="up3", up_key="down2")
# self.upBlock4 = UpBlock(4 * init_size, 2 * init_size, indice_key="up4", up_key="down2")
# self.upBlock5 = UpBlock(2 * init_size, init_size, indice_key="up5", up_key="down1")
self.ReconNet = ReconBlock(2*init_size, 2*init_size, indice_key="recon")
self.logits = spconv.SubMConv3d(4 * init_size, nclasses, indice_key="logit", kernel_size=3, stride=1, padding=1, bias=True)
def forward(self, voxel_features, coors, batch_size):
# x = x.contiguous()
coors = coors.int()
import pdb
pdb.set_trace()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.downCntx(ret)
# down0c, down0b = self.resBlock1(ret)
down1c, down1b = self.resBlock2(ret)
down2c, down2b = self.resBlock3(down1c)
down3c, down3b = self.resBlock4(down2c)
down4c, down4b = self.resBlock5(down3c)
# down5b = self.resBlock6(down4c)
# down6b = self.ReconNet(down5b)
up4e = self.upBlock0(down4c, down4b)
up3e = self.upBlock1(up4e, down3b)
up2e = self.upBlock2(up3e, down2b)
up1e = self.upBlock3(up2e, down1b)
up0e = self.ReconNet(up1e)
up0e.features = torch.cat((up0e.features, up1e.features), 1)
# up2e = self.upBlock3(up3e, down2b)
# up1e = self.upBlock4(up2e, down1b)
# up0e = self.upBlock5(up1e, down0b)
# up0e_gap = nn.AdaptiveAvgPool3d((1))(up0e)
# up0e_gap = F.interpolate(up0e_gap, size=(up0e.size()[2:]), mode='trilinear', align_corners=True)
# up0e = torch.cat((up0e, up0e_gap), dim=1)
logits = self.logits(up0e)
y = logits.dense()
# y = logits.permute(0, 1, 3, 4, 2)
return y
| [
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.nn.LeakyReLU",
"spconv.SparseInverseConv3d",
"torch.from_numpy",
"spconv.SubMConv3d",
"torch.nn.BatchNorm1d",
"numpy.array",
"spconv.SparseConvTensor",
"spconv.SparseConv3d",
"torch.nn.Linear",
"pdb.set_trace",
"torch.cat"
]
| [((276, 396), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)', 'indice_key': 'indice_key'}), '(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False, indice_key=indice_key)\n', (293, 396), False, 'import spconv\n'), ((490, 627), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['in_planes', 'out_planes'], {'kernel_size': '(1, 3, 3)', 'stride': 'stride', 'padding': '(0, 1, 1)', 'bias': '(False)', 'indice_key': 'indice_key'}), '(in_planes, out_planes, kernel_size=(1, 3, 3), stride=\n stride, padding=(0, 1, 1), bias=False, indice_key=indice_key)\n', (507, 627), False, 'import spconv\n'), ((721, 858), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['in_planes', 'out_planes'], {'kernel_size': '(1, 1, 3)', 'stride': 'stride', 'padding': '(0, 0, 1)', 'bias': '(False)', 'indice_key': 'indice_key'}), '(in_planes, out_planes, kernel_size=(1, 1, 3), stride=\n stride, padding=(0, 0, 1), bias=False, indice_key=indice_key)\n', (738, 858), False, 'import spconv\n'), ((952, 1089), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['in_planes', 'out_planes'], {'kernel_size': '(1, 3, 1)', 'stride': 'stride', 'padding': '(0, 1, 0)', 'bias': '(False)', 'indice_key': 'indice_key'}), '(in_planes, out_planes, kernel_size=(1, 3, 1), stride=\n stride, padding=(0, 1, 0), bias=False, indice_key=indice_key)\n', (969, 1089), False, 'import spconv\n'), ((1183, 1320), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['in_planes', 'out_planes'], {'kernel_size': '(3, 1, 1)', 'stride': 'stride', 'padding': '(1, 0, 0)', 'bias': '(False)', 'indice_key': 'indice_key'}), '(in_planes, out_planes, kernel_size=(3, 1, 1), stride=\n stride, padding=(1, 0, 0), bias=False, indice_key=indice_key)\n', (1200, 1320), False, 'import spconv\n'), ((1413, 1550), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['in_planes', 'out_planes'], {'kernel_size': '(3, 1, 3)', 'stride': 'stride', 'padding': '(1, 0, 1)', 'bias': '(False)', 'indice_key': 'indice_key'}), '(in_planes, out_planes, kernel_size=(3, 1, 3), stride=\n stride, padding=(1, 0, 1), bias=False, indice_key=indice_key)\n', (1430, 1550), False, 'import spconv\n'), ((1642, 1762), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['in_planes', 'out_planes'], {'kernel_size': '(1)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)', 'indice_key': 'indice_key'}), '(in_planes, out_planes, kernel_size=1, stride=stride,\n padding=1, bias=False, indice_key=indice_key)\n', (1659, 1762), False, 'import spconv\n'), ((2066, 2093), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (2080, 2093), False, 'from torch import nn\n'), ((2114, 2128), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (2126, 2128), False, 'from torch import nn\n'), ((2237, 2264), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (2251, 2264), False, 'from torch import nn\n'), ((2287, 2301), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (2299, 2301), False, 'from torch import nn\n'), ((2406, 2420), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (2418, 2420), False, 'from torch import nn\n'), ((2440, 2467), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (2454, 2467), False, 'from torch import nn\n'), ((2573, 2587), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (2585, 2587), False, 'from torch import nn\n'), ((2607, 2634), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (2621, 2634), False, 'from torch import nn\n'), ((3722, 3736), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (3734, 3736), False, 'from torch import nn\n'), ((3756, 3783), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (3770, 3783), False, 'from torch import nn\n'), ((3893, 3907), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (3905, 3907), False, 'from torch import nn\n'), ((3929, 3956), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (3943, 3956), False, 'from torch import nn\n'), ((4061, 4075), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (4073, 4075), False, 'from torch import nn\n'), ((4095, 4122), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (4109, 4122), False, 'from torch import nn\n'), ((4228, 4242), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (4240, 4242), False, 'from torch import nn\n'), ((4262, 4289), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (4276, 4289), False, 'from torch import nn\n'), ((6777, 6791), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (6789, 6791), False, 'from torch import nn\n'), ((6816, 6843), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (6830, 6843), False, 'from torch import nn\n'), ((7055, 7069), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (7067, 7069), False, 'from torch import nn\n'), ((7089, 7116), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (7103, 7116), False, 'from torch import nn\n'), ((7217, 7231), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (7229, 7231), False, 'from torch import nn\n'), ((7251, 7278), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (7265, 7278), False, 'from torch import nn\n'), ((7378, 7392), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (7390, 7392), False, 'from torch import nn\n'), ((7412, 7439), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (7426, 7439), False, 'from torch import nn\n'), ((7519, 7621), 'spconv.SparseInverseConv3d', 'spconv.SparseInverseConv3d', (['out_filters', 'out_filters'], {'kernel_size': '(3)', 'indice_key': 'up_key', 'bias': '(False)'}), '(out_filters, out_filters, kernel_size=3,\n indice_key=up_key, bias=False)\n', (7545, 7621), False, 'import spconv\n'), ((8966, 8993), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (8980, 8993), False, 'from torch import nn\n'), ((9014, 9026), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (9024, 9026), False, 'from torch import nn\n'), ((9136, 9163), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (9150, 9163), False, 'from torch import nn\n'), ((9186, 9198), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (9196, 9198), False, 'from torch import nn\n'), ((9308, 9335), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (9322, 9335), False, 'from torch import nn\n'), ((9358, 9370), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (9368, 9370), False, 'from torch import nn\n'), ((11340, 11362), 'numpy.array', 'np.array', (['output_shape'], {}), '(output_shape)\n', (11348, 11362), True, 'import numpy as np\n'), ((13123, 13200), 'spconv.SparseConvTensor', 'spconv.SparseConvTensor', (['voxel_features', 'coors', 'self.sparse_shape', 'batch_size'], {}), '(voxel_features, coors, self.sparse_shape, batch_size)\n', (13146, 13200), False, 'import spconv\n'), ((13828, 13872), 'torch.cat', 'torch.cat', (['(up0e.features, up1e.features)', '(1)'], {}), '((up0e.features, up1e.features), 1)\n', (13837, 13872), False, 'import torch\n'), ((14559, 14673), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['(4 * init_size)', 'nclasses'], {'indice_key': '"""logit"""', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), "(4 * init_size, nclasses, indice_key='logit', kernel_size=\n 3, stride=1, padding=1, bias=True)\n", (14576, 14673), False, 'import spconv\n'), ((15211, 15242), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['self.pt_fea_dim'], {}), '(self.pt_fea_dim)\n', (15225, 15242), False, 'from torch import nn\n'), ((15263, 15277), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (15275, 15277), False, 'from torch import nn\n'), ((15390, 15419), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(2 * init_size)'], {}), '(2 * init_size)\n', (15404, 15419), False, 'from torch import nn\n'), ((15440, 15454), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (15452, 15454), False, 'from torch import nn\n'), ((15561, 15586), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['init_size'], {}), '(init_size)\n', (15575, 15586), False, 'from torch import nn\n'), ((15607, 15621), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (15619, 15621), False, 'from torch import nn\n'), ((15818, 15869), 'torch.nn.Linear', 'nn.Linear', (['init_size', 'self.embedding_dim'], {'bias': '(True)'}), '(init_size, self.embedding_dim, bias=True)\n', (15827, 15869), False, 'from torch import nn\n'), ((17210, 17232), 'numpy.array', 'np.array', (['output_shape'], {}), '(output_shape)\n', (17218, 17232), True, 'import numpy as np\n'), ((18914, 19028), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['(4 * init_size)', 'nclasses'], {'indice_key': '"""logit"""', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), "(4 * init_size, nclasses, indice_key='logit', kernel_size=\n 3, stride=1, padding=1, bias=True)\n", (18931, 19028), False, 'import spconv\n'), ((19167, 19182), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (19180, 19182), False, 'import pdb\n'), ((19197, 19274), 'spconv.SparseConvTensor', 'spconv.SparseConvTensor', (['voxel_features', 'coors', 'self.sparse_shape', 'batch_size'], {}), '(voxel_features, coors, self.sparse_shape, batch_size)\n', (19220, 19274), False, 'import spconv\n'), ((19902, 19946), 'torch.cat', 'torch.cat', (['(up0e.features, up1e.features)', '(1)'], {}), '((up0e.features, up1e.features), 1)\n', (19911, 19946), False, 'import torch\n'), ((15672, 15718), 'torch.nn.Linear', 'nn.Linear', (['(init_size + 3)', 'init_size'], {'bias': '(True)'}), '(init_size + 3, init_size, bias=True)\n', (15681, 15718), False, 'from torch import nn\n'), ((15730, 15755), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['init_size'], {}), '(init_size)\n', (15744, 15755), False, 'from torch import nn\n'), ((15769, 15778), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (15776, 15778), False, 'from torch import nn\n'), ((4679, 4799), 'spconv.SparseConv3d', 'spconv.SparseConv3d', (['out_filters', 'out_filters'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'indice_key': 'indice_key', 'bias': '(False)'}), '(out_filters, out_filters, kernel_size=3, stride=2,\n padding=1, indice_key=indice_key, bias=False)\n', (4698, 4799), False, 'import spconv\n'), ((4955, 5084), 'spconv.SparseConv3d', 'spconv.SparseConv3d', (['out_filters', 'out_filters'], {'kernel_size': '(3)', 'stride': '(2, 2, 1)', 'padding': '(1)', 'indice_key': 'indice_key', 'bias': '(False)'}), '(out_filters, out_filters, kernel_size=3, stride=(2, 2, \n 1), padding=1, indice_key=indice_key, bias=False)\n', (4974, 5084), False, 'import spconv\n'), ((16699, 16729), 'torch.from_numpy', 'torch.from_numpy', (['xyz[batch_i]'], {}), '(xyz[batch_i])\n', (16715, 16729), False, 'import torch\n')] |
import numpy as np
from prml.dimreduction.pca import PCA
class BayesianPCA(PCA):
def fit(self, X, iter_max=100, initial="random"):
"""
empirical bayes estimation of pca parameters
Parameters
----------
X : (sample_size, n_features) ndarray
input data
iter_max : int
maximum number of em steps
Returns
-------
mean : (n_features,) ndarray
sample mean fo the input data
W : (n_features, n_components) ndarray
projection matrix
var : float
variance of observation noise
"""
initial_list = ["random", "eigen"]
self.mean = np.mean(X, axis=0)
self.I = np.eye(self.n_components)
if initial not in initial_list:
print("availabel initializations are {}".format(initial_list))
if initial == "random":
self.W = np.eye(np.size(X, 1), self.n_components)
#self.W = np.random.randn(np.size(X, 1), self.n_components)
self.var = 1.
elif initial == "eigen":
self.eigen(X)
self.alpha = len(self.mean) / np.sum(self.W ** 2, axis=0).clip(min=1e-10)
for i in range(iter_max):
W = np.copy(self.W)
stats = self._expectation(X - self.mean)
self._maximization(X - self.mean, *stats)
#self.alpha = len(self.mean) / np.sum(self.W ** 2, axis=0).clip(min=1e-10)
#if np.allclose(W, self.W):
# break
self.n_iter = i + 1
self.C = self.W @ self.W.T + self.var * np.eye(np.size(X, 1))
self.Cinv = np.linalg.inv(self.C)
def _maximization(self, X, Ez, Ezz):
self.W = X.T @ Ez @ np.linalg.inv(np.sum(Ezz, axis=0) + self.var * np.diag(self.alpha))
self.var = np.mean(
np.mean(X ** 2, axis=-1)
- 2 * np.mean(Ez @ self.W.T * X, axis=-1)
+ np.trace((Ezz @ self.W.T @ self.W).T) / len(self.mean))
def maximize(self, D, Ez, Ezz):
self.W = D.T.dot(Ez).dot(np.linalg.inv(np.sum(Ezz, axis=0) + self.var * np.diag(self.alpha)))
self.var = np.mean(
np.mean(D ** 2, axis=-1)
- 2 * np.mean(Ez.dot(self.W.T) * D, axis=-1)
+ np.trace(Ezz.dot(self.W.T).dot(self.W).T) / self.ndim)
| [
"numpy.mean",
"numpy.eye",
"numpy.copy",
"numpy.trace",
"numpy.size",
"numpy.diag",
"numpy.sum",
"numpy.linalg.inv"
]
| [((699, 717), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (706, 717), True, 'import numpy as np\n'), ((735, 760), 'numpy.eye', 'np.eye', (['self.n_components'], {}), '(self.n_components)\n', (741, 760), True, 'import numpy as np\n'), ((1650, 1671), 'numpy.linalg.inv', 'np.linalg.inv', (['self.C'], {}), '(self.C)\n', (1663, 1671), True, 'import numpy as np\n'), ((1259, 1274), 'numpy.copy', 'np.copy', (['self.W'], {}), '(self.W)\n', (1266, 1274), True, 'import numpy as np\n'), ((936, 949), 'numpy.size', 'np.size', (['X', '(1)'], {}), '(X, 1)\n', (943, 949), True, 'import numpy as np\n'), ((1165, 1192), 'numpy.sum', 'np.sum', (['(self.W ** 2)'], {'axis': '(0)'}), '(self.W ** 2, axis=0)\n', (1171, 1192), True, 'import numpy as np\n'), ((1615, 1628), 'numpy.size', 'np.size', (['X', '(1)'], {}), '(X, 1)\n', (1622, 1628), True, 'import numpy as np\n'), ((1757, 1776), 'numpy.sum', 'np.sum', (['Ezz'], {'axis': '(0)'}), '(Ezz, axis=0)\n', (1763, 1776), True, 'import numpy as np\n'), ((1851, 1875), 'numpy.mean', 'np.mean', (['(X ** 2)'], {'axis': '(-1)'}), '(X ** 2, axis=-1)\n', (1858, 1875), True, 'import numpy as np\n'), ((1944, 1981), 'numpy.trace', 'np.trace', (['(Ezz @ self.W.T @ self.W).T'], {}), '((Ezz @ self.W.T @ self.W).T)\n', (1952, 1981), True, 'import numpy as np\n'), ((2084, 2103), 'numpy.sum', 'np.sum', (['Ezz'], {'axis': '(0)'}), '(Ezz, axis=0)\n', (2090, 2103), True, 'import numpy as np\n'), ((2179, 2203), 'numpy.mean', 'np.mean', (['(D ** 2)'], {'axis': '(-1)'}), '(D ** 2, axis=-1)\n', (2186, 2203), True, 'import numpy as np\n'), ((1790, 1809), 'numpy.diag', 'np.diag', (['self.alpha'], {}), '(self.alpha)\n', (1797, 1809), True, 'import numpy as np\n'), ((1894, 1929), 'numpy.mean', 'np.mean', (['(Ez @ self.W.T * X)'], {'axis': '(-1)'}), '(Ez @ self.W.T * X, axis=-1)\n', (1901, 1929), True, 'import numpy as np\n'), ((2117, 2136), 'numpy.diag', 'np.diag', (['self.alpha'], {}), '(self.alpha)\n', (2124, 2136), True, 'import numpy as np\n')] |
""" Twisted adapter for Kanone """
from twisted.python.failure import Failure
from twisted.internet import defer
from ..lib import Invalid
from ..util import varargs2kwargs
import logging, sys
log = logging.getLogger( __name__ )
# hacky and redundant, but it'll do for now ..
# TODO: move to proper twisted specific classes under .tx.*
# and get rid of the monkey
_python3 = sys.version_info[0]>=3
def monkeyPatch():
"""
Patches Kanone so that any validation returns a Deferred, thus
one can write asynchronous validators using Twisted's non-blocking API.
Schema and ForEach fields are validated concurrently.
"""
if getattr( monkeyPatch,'_isMonkeyPatched',False):
return
from ..lib import Context, PASS, MISSING
from ..validator.core import Tag, Compose, Tmp, Item, Not, And, Or, Call, If
from ..validator.check import Match
from ..validator.schema import Schema, ForEach, Field
from ..validator.web import MXLookup
@defer.inlineCallbacks
def context_validate( self ):
if self.isValidated:
if self.__error__ is not MISSING:
raise self.__error__
defer.returnValue( self.__result__ )
self.isValidating = True
if self.parent is not None:
if not self.parent.isValidated and not self.parent.isValidating:
yield defer.maybeDeferred\
( self.parent.validate
)
if not self.validator:
raise AttributeError("No validator set for context '%s'" % self.path )
result = defer.maybeDeferred\
( self.validator.validate
, self
, self.__value__
)
result.addErrback( context_gotError, self )
result = yield result
self.isValidated = True
self.isValidating = False
if self.__error__ is not MISSING:
raise self.__error__
else:
if result is not PASS:
self.__result__ = result
else:
self.__result__ = self.__value__
self.__result__ = result
defer.returnValue( result )
def context_gotError( error, self ):
e = error.value
if not isinstance( e, Invalid ):
self.__error__ = error
return
self.__error__ = e
e.context = self
message = e.validator.__messages__[e.key]
if message is not None:
extra = e.data['extra']
value = e.value
data = e.data
data['message'] = message
if hasattr(e,'realkey'):
data['key'] = e.realkey
extra['value.type'] = getattr(value, '__class__', None) is not None \
and getattr(value.__class__,'__name__', False) or 'unknown'
if isinstance(value,str) or not _python3 and isinstance(value,basestring):
extra['value'] = value
else:
extra['value'] = str(value)
cache = getattr( self, 'cache', None)
if cache is not None:
extra.update( cache )
self['error'] = self.__error__.data
self.root.errorlist.append( self.__error__.context.path )
def tag_gotResult( result, d, validator, tagName ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
return
e = result.value
if e.validator is validator or getattr(e,'composer',None) is validator:
e.tagName = tagName
d.errback( e )
else:
d.callback( result )
def tag_validate( self, context, value ):
validator = context.root.taggedValidators.get(self.tagID, None)
if validator is None:
validator = self.enabled and self.validator
if not validator:
return value
d = defer.Deferred()
result = defer.maybeDeferred\
( validator.validate
, context
, value
)
result.addBoth( tag_gotResult, d, validator, self.tagName )
return d
def compose_gotResult( result, d, context, tmpTags, composer ):
context.root.taggedValidators = tmpTags
if isinstance( result, Failure ):
if not isinstance( result.value, Invalid ):
d.errback( result )
return
e = result.value
if hasattr(e,'tagName'):
e.realkey = "%s_%s" % (e.tagName, getattr(e,'realkey',e.key))
e.composer = composer
del e.tagName
d.errback( e )
else:
d.callback( result )
def compose_validate( self, context, value ):
tmpTags = context.root.taggedValidators
context.root.taggedValidators = self.currentTaggedValidators
d = defer.Deferred()
result = defer.maybeDeferred\
( self.validator.validate
, context
, value
)
result.addBoth( compose_gotResult, d, context, tmpTags, self )
return d
def tmp_gotReslt( result, d, raiseError, value ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
return
if raiseError:
d.errback( result.value )
return
d.callback( value )
def tmp_validate( self, context, value ):
d = defer.Deferred()
result = defer.maybeDeferred\
( self.validator.validate
, context
, value
)
result.addBoth( tmp_gotReslt, d, self.raiseError, value )
return d
def item_gotResult( result, d, value, key, alter ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
return
d.errback( result.value )
else:
if alter:
value[key] = result
d.callback( value )
def item_validate( self, context, value ):
try:
val = value[ self.key ]
except TypeError:
raise Invalid( value, self, 'type' )
except (KeyError, IndexError):
raise Invalid( value, self, 'notFound', key=self.key )
else:
if self.validator is not None:
d = defer.Deferred()
result = defer.maybeDeferred\
( self.validator.validate
, context
, val
)
result.addBoth( item_gotResult, d , value, self.key, self.alter )
return d
else:
return val
def not_gotResult( result, d, value, validator ):
if isinstance( result, Failure ):
if not isinstance( result.value, Invalid ):
d.errback( result )
return
d.callback( value )
else:
d.errback( Invalid( value, validator ) )
def not_validate(self, context, value ):
d = defer.Deferred()
result = defer.maybeDeferred\
( self.validator.validate
, context
, value
)
result.addBoth( not_gotResult, d, value, self )
return d
def and_doTryNext( result, validators, context, value, d ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
else:
e = result.value
d.errback( e )
else:
if validators:
and_tryNext( validators, context, result, d )
else:
d.callback( result )
def and_tryNext( validators, context, value, d ):
result = defer.maybeDeferred\
( validators.pop(0).validate
, context
, value
)
result.addBoth( and_doTryNext, validators, context, value, d )
def and_validate( self, context, value ):
d = defer.Deferred()
and_tryNext( list( self.validators ), context, value, d )
return d
def or_doTryNext( result, validators, context, value, d ):
if isinstance( result, Failure ):
err = result
if not isinstance(err.value, Invalid):
d.errback( err )
return
e = err.value
if not validators:
d.errback( e )
else:
or_tryNext( validators, context, value, d )
else:
d.callback( result )
def or_tryNext( validators, context, value, d ):
result = defer.maybeDeferred\
( validators.pop(0).validate
, context
, value
)
result.addBoth( or_doTryNext, validators, context, value, d )
def or_validate( self, context, value ):
d = defer.Deferred()
or_tryNext( list(self.validators), context, value, d )
return d
@defer.inlineCallbacks
def call_validate( self, context, value ):
try:
result = yield defer.maybeDeferred\
( self.__func__
, context
, value
)
except Failure as e:
if not isinstance(e.value, Invalid):
raise
e = e.value
e.validator = self
raise e
else:
defer.returnValue( result )
def match_gotResult( result, self, value, d ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
raise
d.errback( Invalid( value, self, matchType=self.type, criterion=result.value ) )
else:
val = value
if self.ignoreCase:
result = str(result).lower()
val = str(value).lower()
if val != result:
d.errback( Invalid( value, self, matchType=self.type, criterion=result ) )
else:
d.callback( value )
def match_on_value(self, context, value ):
if self.type is Match.REGEX:
if not self.criterion.match(value):
raise Invalid( value, self, matchType=self.type, criterion=self.criterion.pattern)
return value
elif self.type is Match.VALIDATOR:
compare = defer.maybeDeferred\
( self.criterion.validate
, context
, value
)
d = defer.Deferred()
compare.addBoth( match_gotResult, self, value, d )
return d
else:
compare = self.criterion
val = value
if self.ignoreCase:
compare = str(compare).lower()
val = str(value).lower()
if val != compare:
raise Invalid( value, self, matchType=self.type, criterion=compare )
return value
def if_gotResult( result, d, context, value ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
else:
d.errback( result.value )
else:
d.callback( result )
def if_gotResultExpression( result, validator, d, context, value ):
if isinstance( result, Failure ):
if not isinstance( result.value, Invalid):
raise
value = defer.maybeDeferred\
( validator._else.validate, context, value
)
else:
value = defer.maybeDeferred\
( validator._then.validate, context, result
)
value.addBoth( if_gotResult, d, context, value )
def if_validate( self, context, value ):
d = defer.Deferred()
result = defer.maybeDeferred( self.criterion.validate, context, value )
result.addBoth( if_gotResultExpression, self, d, context, value )
return d
def schema_gotResult( result, resultset, key, isList, returnList ):
if returnList:
resultset.append( result )
else:
resultset[ key ] = result
return result
def schema_gotError( error, errorset, key ):
if isinstance( error, Failure ):
if not isinstance(error.value, Invalid):
raise error
error = error.value
errorset.append( error )
def schema__on_value_done( waste, d, schema, value, result, errors ):
if not errors:
d.callback( result )
else:
d.errback( errors.pop(0) )
def schema__createContextChildren_on_value_done( waste, d, schema, value, result, errors ):
if not errors:
d.callback( result )
else:
d.errback( Invalid( value, schema ) )
def schema__on_value( self, context, value ):
isList = isinstance(value, list) or isinstance(value,tuple) or isinstance(value,set)
if not isList and not isinstance( value, dict ):
raise Invalid( value, self, 'type')
extraFields = None
if not self.allowExtraFields:
if isList:
extraFields = max( len(value), len(self.index) )
else:
extraFields = list(value.keys())
if self.returnList:
result = []
else:
result = {}
numValues = len(value)
jobs = []
errorset = []
for pos in range(len(self.index)):
key = self.index[pos]
if isList:
if numValues>pos:
val = value[ pos ]
if not self.allowExtraFields:
extraFields-=1
else:
val = MISSING
else:
val = value.get( key, MISSING)
if not self.allowExtraFields and val is not MISSING:
try: extraFields.remove(key)
except: pass
job = defer.maybeDeferred\
( self.validators[ key ].validate
, context
, val
)
jobs.append\
( job.addCallback( schema_gotResult, result, key, isList, self.returnList )\
.addErrback( schema_gotError, errorset, key )
)
if extraFields:
raise Invalid( value, self, 'extraFields',extraFields=extraFields)
d = defer.Deferred()
jobs =defer.DeferredList( jobs )
jobs.addCallback\
( schema__on_value_done
, d
, self
, value
, result
, errorset
)
return d
def schema__createContextChildren_on_value( self, context, value ):
isList = isinstance(value, list) or isinstance(value,tuple) or isinstance(value,set)
if not isList and not isinstance( value, dict ):
raise Invalid( value, self, 'type')
extraFields = None
if not self.allowExtraFields:
if isList:
extraFields = max( len(value), len(self.index) )
else:
extraFields = list(value.keys())
errors = []
if self.returnList:
result = []
else:
result = {}
len_value = len(value)
len_index = len(self.index)
# populate
for pos in range(len_index):
key = self.index[pos]
childContext = context( key )
try:
childContext.validator = self.validators[ key ]
except KeyError:
raise SyntaxError("No validator set for %s" % childContext.path)
if isList:
if len_value<=pos:
childContext.__value__ = MISSING
else:
childContext.__value__ = value[ pos ]
else:
childContext.__value__ = value.get( key, MISSING )
if not self.allowExtraFields:
if isList:
extraFields-=1
else:
try: extraFields.remove(key)
except: pass
if extraFields:
raise Invalid( value, self, 'extraFields',extraFields=extraFields)
context.setIndexFunc( lambda index: self.index[index] )
jobs = []
# validate
for key in self.index:
jobs.append\
( context( key ).result\
.addCallback( schema_gotResult, result, key, isList, self.returnList )\
.addErrback( schema_gotError, errors, key )
)
d = defer.Deferred()
jobs = defer.DeferredList( jobs )
jobs.addCallback\
( schema__createContextChildren_on_value_done
, d
, self
, value
, result
, errors
)
return d
def forEach__on_value( self, context, value ):
if self.returnList:
result = []
else:
result = {}
isList = isinstance( value, list) or isinstance(value, tuple) or isinstance(value, set)
errorset = []
jobs = []
if isList or self.numericKeys:
for pos in range( len( value ) ):
if not isList:
val = value.get(str(pos),MISSING)
if val is MISSING:
raise Invalid( value, self, 'numericKeys', keys=list(value.keys()) )
else:
val = value[pos]
key = str(pos)
jobs.append\
( defer.maybeDeferred\
( self.validator.validate
, context, val
).addCallback\
( schema_gotResult
, result
, key
, isList
, self.returnList
)\
.addErrback\
( schema_gotError
, errorset
, key
)
)
else:
for (key, val) in value.items():
jobs.append\
( defer.maybeDeferred\
( self.validator.validate
, context, val
).addCallback\
( schema_gotResult
, result
, key
, isList
, self.returnList
)\
.addErrback\
( schema_gotError
, errorset
, key
)
)
d = defer.Deferred()
jobs = defer.DeferredList( jobs )
jobs.addCallback\
( schema__on_value_done
, d
, self
, value
, result
, errorset
)
return d
def forEach__createContextChildren_on_value( self, context, value ):
isList = isinstance( value, list) or isinstance(value, tuple) or isinstance(value, set)
if not isList:
if not isinstance(value, dict ):
raise Invalid( value, self,'type' )
if self.returnList:
result = []
else:
result = {}
errors = []
# populate
children = []
if isList or self.numericKeys:
context.setIndexFunc( lambda index: str(index) )
for pos in range( len( value ) ):
if not isList:
val = value.get(str(pos),MISSING)
if value.get(str(pos),MISSING) is MISSING:
context.setIndexFunc( None )
raise Invalid( value, self, 'numericKeys',keys=list(value.keys()))
else:
val = value[ pos ]
contextChild = context( str( pos ) )
contextChild.validator = self.validator
contextChild.__value__ = val
children.append( contextChild )
else:
context.setIndexFunc( None )
if self.returnList:
raise Invalid( value, self, 'listType' )
for (key,val) in value.items():
contextChild = context( key )
contextChild.validator = self.validator
contextChild.__value__ = val
children.append( contextChild )
jobs = []
#validate
for childContext in children:
jobs.append\
( childContext.validate()\
.addCallback\
( schema_gotResult
, result
, childContext.key
, isList
, self.returnList
)\
.addErrback\
( schema_gotError
, errors
, childContext.key
)
)
d = defer.Deferred()
jobs = defer.DeferredList( jobs )
jobs.addCallback\
( schema__createContextChildren_on_value_done
, d
, self
, value
, result
, errors
)
return d
@defer.inlineCallbacks
def field_validate(self, context, value):
fieldcontext = self.getField( context, self.path )
if not self.useResult:
result = fieldcontext.value
else:
try:
result = yield fieldcontext.result
except Invalid:
result = PASS
if self.validator is not None:
if result is not PASS:
result = yield defer.maybeDeferred\
( self.validator.validate
, fieldcontext, result
)
if self.writeToContext:
fieldcontext.__result__ = result
if self.copy:
if result is PASS:
defer.returnValue( value )
defer.returnValue( result )
defer.returnValue( value )
from twisted.names import client
from twisted.names.dns import Record_MX
from twisted.names.error import DNSNameError
from twisted.internet.defer import TimeoutError
def mxLookup_gotResult(result, d, value, validator, context ):
if isinstance( result, Failure ):
if isinstance(result.value, TimeoutError):
d.errback( Invalid( value, validator ) )
elif not isinstance(result.value, DNSNameError):
d.errback( result )
else:
d.errback( Invalid( value, validator ) )
return
(answers, auth, add) = result
if not len(answers):
d.errback( Invalid( value, validator ) )
else:
for record in answers:
if isinstance(record.payload,Record_MX):
d.callback( value )
return
d.errback( Invalid( value, validator ) )
mxLookup_resolver = client.Resolver('/etc/resolv.conf')
def mxLookup_on_value( self, context, value ):
d = defer.Deferred()
mxLookup_resolver.lookupMailExchange( value, [2,4,6,8,10] )\
.addBoth( mxLookup_gotResult, d, value, self, context )
return d
Context.validate = context_validate
Tag.validate = tag_validate
Compose.valdate = compose_validate
Tmp.validate = tmp_validate
Item.validate = item_validate
Not.validate = not_validate
And.validate = and_validate
Or.validate = or_validate
Call.validate = call_validate
Match.on_value = match_on_value
If.validate = if_validate
Schema._on_value = schema__on_value
Schema._createContextChildren_on_value = schema__createContextChildren_on_value
ForEach._on_value = forEach__on_value
ForEach._createContextChildren_on_value = forEach__createContextChildren_on_value
Field.validate = field_validate
MXLookup.on_value = mxLookup_on_value
monkeyPatch._isMonkeyPatched = True
from ..util import getArgSpec, getParameterNames
def validateDecorator_gotValidationResult\
( result
, d
, origArgs
, origKwargs
, method
, varargs
, keywords
, shifted
, onInvalid
):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
elif onInvalid is not None:
try:
result = onInvalid( result.value )
except Exception as e:
d.errback( e )
else:
d.callback( result )
else:
d.errback( result )
else:
origKwargs.update( result )
resultArgs = origKwargs.pop( varargs, origArgs )
resultArgs = [ origKwargs.pop(key) for key in shifted ] + resultArgs
if keywords is not False:
origKwargs.update( origKwargs.pop( keywords ) )
defer.maybeDeferred( method, *resultArgs, **origKwargs )\
.chainDeferred( d )
def validateDecorator( validator, method, include, exclude, onInvalid, inlineCallbacks ):
if include and exclude:
raise SyntaxError("'include' and 'exclude' cannot be used at the same time")
spec = getArgSpec( method )
hasVarargs = spec.varargs is not None
varargs = spec.varargs or '*varargs'
keywords = spec.keywords or False
methodParameterNames = getParameterNames( method, skipSelf=False )
skip = ()
if exclude:
skip = exclude
if include:
skip = set(methodParameterNames) - set(include)
varargs = varargs
hasVarargs = spec.varargs not in skip and hasVarargs
keywords = keywords not in skip and keywords
if inlineCallbacks:
method = defer.inlineCallbacks( method )
def __wrap( *fargs, **fkwargs):
d = defer.Deferred()
(fargs, fkwargs, shifted ) = varargs2kwargs( method, fargs, fkwargs, skipSelf=False )
origKwargs = dict(fkwargs)
if keywords is not False:
restKwargs = dict(\
( key, fkwargs.pop(key))\
for key in list(fkwargs.keys()) if key not in methodParameterNames
)
fkwargs[ keywords ] = restKwargs
if fargs or hasVarargs:
fkwargs[ varargs ] = list(fargs)
result = validator.context\
( dict( ( key, fkwargs[ key] ) for key in fkwargs if key not in skip )
).result
result.addBoth( validateDecorator_gotValidationResult, d, fargs, origKwargs, method, varargs, keywords, shifted, onInvalid )
return d
return __wrap
def validate( validator, include=None, exclude=None, onInvalid=None, inlineCallbacks=False ):
def __createDecorator( method ):
return validateDecorator( validator, method, include, exclude, onInvalid, inlineCallbacks)
return __createDecorator
| [
"logging.getLogger",
"twisted.names.client.Resolver",
"twisted.internet.defer.maybeDeferred",
"twisted.internet.defer.returnValue",
"twisted.internet.defer.DeferredList",
"twisted.internet.defer.Deferred",
"twisted.internet.defer.inlineCallbacks"
]
| [((201, 228), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (218, 228), False, 'import logging, sys\n'), ((23707, 23742), 'twisted.names.client.Resolver', 'client.Resolver', (['"""/etc/resolv.conf"""'], {}), "('/etc/resolv.conf')\n", (23722, 23742), False, 'from twisted.names import client\n'), ((1596, 1662), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['self.validator.validate', 'self', 'self.__value__'], {}), '(self.validator.validate, self, self.__value__)\n', (1615, 1662), False, 'from twisted.internet import defer\n'), ((2147, 2172), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['result'], {}), '(result)\n', (2164, 2172), False, 'from twisted.internet import defer\n'), ((3981, 3997), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (3995, 3997), False, 'from twisted.internet import defer\n'), ((4015, 4070), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['validator.validate', 'context', 'value'], {}), '(validator.validate, context, value)\n', (4034, 4070), False, 'from twisted.internet import defer\n'), ((4972, 4988), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (4986, 4988), False, 'from twisted.internet import defer\n'), ((5006, 5066), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['self.validator.validate', 'context', 'value'], {}), '(self.validator.validate, context, value)\n', (5025, 5066), False, 'from twisted.internet import defer\n'), ((5600, 5616), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (5614, 5616), False, 'from twisted.internet import defer\n'), ((5634, 5694), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['self.validator.validate', 'context', 'value'], {}), '(self.validator.validate, context, value)\n', (5653, 5694), False, 'from twisted.internet import defer\n'), ((7267, 7283), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (7281, 7283), False, 'from twisted.internet import defer\n'), ((7301, 7361), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['self.validator.validate', 'context', 'value'], {}), '(self.validator.validate, context, value)\n', (7320, 7361), False, 'from twisted.internet import defer\n'), ((8247, 8263), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (8261, 8263), False, 'from twisted.internet import defer\n'), ((9126, 9142), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (9140, 9142), False, 'from twisted.internet import defer\n'), ((12031, 12047), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (12045, 12047), False, 'from twisted.internet import defer\n'), ((12065, 12125), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['self.criterion.validate', 'context', 'value'], {}), '(self.criterion.validate, context, value)\n', (12084, 12125), False, 'from twisted.internet import defer\n'), ((14713, 14729), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (14727, 14729), False, 'from twisted.internet import defer\n'), ((14744, 14768), 'twisted.internet.defer.DeferredList', 'defer.DeferredList', (['jobs'], {}), '(jobs)\n', (14762, 14768), False, 'from twisted.internet import defer\n'), ((16940, 16956), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (16954, 16956), False, 'from twisted.internet import defer\n'), ((16972, 16996), 'twisted.internet.defer.DeferredList', 'defer.DeferredList', (['jobs'], {}), '(jobs)\n', (16990, 16996), False, 'from twisted.internet import defer\n'), ((19236, 19252), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (19250, 19252), False, 'from twisted.internet import defer\n'), ((19268, 19292), 'twisted.internet.defer.DeferredList', 'defer.DeferredList', (['jobs'], {}), '(jobs)\n', (19286, 19292), False, 'from twisted.internet import defer\n'), ((21628, 21644), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (21642, 21644), False, 'from twisted.internet import defer\n'), ((21660, 21684), 'twisted.internet.defer.DeferredList', 'defer.DeferredList', (['jobs'], {}), '(jobs)\n', (21678, 21684), False, 'from twisted.internet import defer\n'), ((22711, 22735), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['value'], {}), '(value)\n', (22728, 22735), False, 'from twisted.internet import defer\n'), ((23806, 23822), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (23820, 23822), False, 'from twisted.internet import defer\n'), ((26523, 26552), 'twisted.internet.defer.inlineCallbacks', 'defer.inlineCallbacks', (['method'], {}), '(method)\n', (26544, 26552), False, 'from twisted.internet import defer\n'), ((26605, 26621), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (26619, 26621), False, 'from twisted.internet import defer\n'), ((1169, 1203), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['self.__result__'], {}), '(self.__result__)\n', (1186, 1203), False, 'from twisted.internet import defer\n'), ((9662, 9687), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['result'], {}), '(result)\n', (9679, 9687), False, 'from twisted.internet import defer\n'), ((11684, 11745), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['validator._else.validate', 'context', 'value'], {}), '(validator._else.validate, context, value)\n', (11703, 11745), False, 'from twisted.internet import defer\n'), ((11816, 11878), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['validator._then.validate', 'context', 'result'], {}), '(validator._then.validate, context, result)\n', (11835, 11878), False, 'from twisted.internet import defer\n'), ((14256, 14320), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['self.validators[key].validate', 'context', 'val'], {}), '(self.validators[key].validate, context, val)\n', (14275, 14320), False, 'from twisted.internet import defer\n'), ((22674, 22699), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['result'], {}), '(result)\n', (22691, 22699), False, 'from twisted.internet import defer\n'), ((6559, 6575), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (6573, 6575), False, 'from twisted.internet import defer\n'), ((6601, 6659), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['self.validator.validate', 'context', 'val'], {}), '(self.validator.validate, context, val)\n', (6620, 6659), False, 'from twisted.internet import defer\n'), ((9340, 9390), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['self.__func__', 'context', 'value'], {}), '(self.__func__, context, value)\n', (9359, 9390), False, 'from twisted.internet import defer\n'), ((10620, 10680), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['self.criterion.validate', 'context', 'value'], {}), '(self.criterion.validate, context, value)\n', (10639, 10680), False, 'from twisted.internet import defer\n'), ((10768, 10784), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (10782, 10784), False, 'from twisted.internet import defer\n'), ((22634, 22658), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['value'], {}), '(value)\n', (22651, 22658), False, 'from twisted.internet import defer\n'), ((25693, 25747), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['method', '*resultArgs'], {}), '(method, *resultArgs, **origKwargs)\n', (25712, 25747), False, 'from twisted.internet import defer\n'), ((1377, 1418), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['self.parent.validate'], {}), '(self.parent.validate)\n', (1396, 1418), False, 'from twisted.internet import defer\n'), ((22354, 22420), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['self.validator.validate', 'fieldcontext', 'result'], {}), '(self.validator.validate, fieldcontext, result)\n', (22373, 22420), False, 'from twisted.internet import defer\n'), ((17934, 17992), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['self.validator.validate', 'context', 'val'], {}), '(self.validator.validate, context, val)\n', (17953, 17992), False, 'from twisted.internet import defer\n'), ((18634, 18692), 'twisted.internet.defer.maybeDeferred', 'defer.maybeDeferred', (['self.validator.validate', 'context', 'val'], {}), '(self.validator.validate, context, val)\n', (18653, 18692), False, 'from twisted.internet import defer\n')] |
"""
Fetches demand statistics.
Modified from <NAME>
Original article:
https://yaledailynews.com/blog/2020/01/10/yales-most-popular-courses/
Github:
https://github.com/iamdanzhao/yale-popular-classes
README:
https://github.com/iamdanzhao/yale-popular-classes/blob/master/data-guide/course_data_guide.md
"""
import argparse
from multiprocessing import Pool
from typing import List, Tuple
import ujson
from ferry import config
from ferry.crawler.common_args import add_seasons_args, parse_seasons_arg
from ferry.includes.demand_processing import fetch_season_subject_demand, get_dates
from ferry.includes.tqdm import tqdm
def handle_season_subject_demand(demand_args: Tuple[str, str, List[str], List[str]]):
"""
Handler for fetching subject codes to be passed into Pool()
"""
demand_season, demand_subject_code, demand_subject_codes, demand_dates = demand_args
courses = fetch_season_subject_demand(
demand_season, demand_subject_code, demand_subject_codes, demand_dates
)
return courses
if __name__ == "__main__":
class FetchDemandError(Exception):
"""
Error object for demand fetching exceptions.
"""
# pylint: disable=unnecessary-pass
pass
# Set season
# Pass using command line arguments
# Examples: 202001 = 2020 Spring, 201903 = 2019 Fall
# If no season is provided, the program will scrape all available seasons
parser = argparse.ArgumentParser(description="Import demand stats")
add_seasons_args(parser)
args = parser.parse_args()
# list of seasons previously from fetch_seasons.py
with open(f"{config.DATA_DIR}/demand_seasons.json", "r") as f:
all_viable_seasons = ujson.load(f)
seasons = parse_seasons_arg(args.seasons, all_viable_seasons)
print("Retrieving subjects list... ", end="")
with open(f"{config.DATA_DIR}/demand_subjects.json", "r") as f:
subjects = ujson.load(f)
subject_codes = sorted(list(subjects.keys()))
print("ok")
# set up parallel processing pool
with Pool(processes=64) as pool:
for season in seasons:
print(f"Retrieving demand by subject for season {season}")
dates = get_dates(season)
pool_args = [
(season, subject_code, subject_codes, dates)
for subject_code in subject_codes
]
season_courses = []
# use imap_unordered to report to tqdm
with tqdm(total=len(pool_args), desc="Subjects retrieved") as pbar:
for i, result in enumerate(
pool.imap_unordered(handle_season_subject_demand, pool_args)
):
pbar.update()
season_courses.append(result)
# flatten season courses
season_courses = [x for y in season_courses for x in y]
# sort courses by title (for consistency with ferry-data)
season_courses = sorted(season_courses, key=lambda x: x["title"])
with open(f"{config.DATA_DIR}/demand_stats/{season}_demand.json", "w") as f:
ujson.dump(season_courses, f, indent=4)
| [
"ferry.crawler.common_args.add_seasons_args",
"argparse.ArgumentParser",
"ferry.crawler.common_args.parse_seasons_arg",
"ujson.dump",
"ferry.includes.demand_processing.fetch_season_subject_demand",
"multiprocessing.Pool",
"ferry.includes.demand_processing.get_dates",
"ujson.load"
]
| [((900, 1003), 'ferry.includes.demand_processing.fetch_season_subject_demand', 'fetch_season_subject_demand', (['demand_season', 'demand_subject_code', 'demand_subject_codes', 'demand_dates'], {}), '(demand_season, demand_subject_code,\n demand_subject_codes, demand_dates)\n', (927, 1003), False, 'from ferry.includes.demand_processing import fetch_season_subject_demand, get_dates\n'), ((1443, 1501), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Import demand stats"""'}), "(description='Import demand stats')\n", (1466, 1501), False, 'import argparse\n'), ((1506, 1530), 'ferry.crawler.common_args.add_seasons_args', 'add_seasons_args', (['parser'], {}), '(parser)\n', (1522, 1530), False, 'from ferry.crawler.common_args import add_seasons_args, parse_seasons_arg\n'), ((1744, 1795), 'ferry.crawler.common_args.parse_seasons_arg', 'parse_seasons_arg', (['args.seasons', 'all_viable_seasons'], {}), '(args.seasons, all_viable_seasons)\n', (1761, 1795), False, 'from ferry.crawler.common_args import add_seasons_args, parse_seasons_arg\n'), ((1715, 1728), 'ujson.load', 'ujson.load', (['f'], {}), '(f)\n', (1725, 1728), False, 'import ujson\n'), ((1934, 1947), 'ujson.load', 'ujson.load', (['f'], {}), '(f)\n', (1944, 1947), False, 'import ujson\n'), ((2067, 2085), 'multiprocessing.Pool', 'Pool', ([], {'processes': '(64)'}), '(processes=64)\n', (2071, 2085), False, 'from multiprocessing import Pool\n'), ((2220, 2237), 'ferry.includes.demand_processing.get_dates', 'get_dates', (['season'], {}), '(season)\n', (2229, 2237), False, 'from ferry.includes.demand_processing import fetch_season_subject_demand, get_dates\n'), ((3145, 3184), 'ujson.dump', 'ujson.dump', (['season_courses', 'f'], {'indent': '(4)'}), '(season_courses, f, indent=4)\n', (3155, 3184), False, 'import ujson\n')] |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from qxf2_scheduler import models
from qxf2_scheduler import db
from qxf2_scheduler.__init__ import app
from flask_script import Manager
from flask_migrate import Migrate,MigrateCommand
migrate=Migrate(app, db,render_as_batch=True)
manager=Manager(app)
manager.add_command('db',MigrateCommand)
if __name__ == "__main__":
manager.run() | [
"flask_script.Manager",
"flask_migrate.Migrate"
]
| [((259, 297), 'flask_migrate.Migrate', 'Migrate', (['app', 'db'], {'render_as_batch': '(True)'}), '(app, db, render_as_batch=True)\n', (266, 297), False, 'from flask_migrate import Migrate, MigrateCommand\n'), ((305, 317), 'flask_script.Manager', 'Manager', (['app'], {}), '(app)\n', (312, 317), False, 'from flask_script import Manager\n')] |
# import packages
import requests
import pandas as pd
import time
from functions import *
# limit per sity
max_results_per_city = 100
# db of city
city_set = ['New+York','Toronto','Las+Vegas']
# job roles
job_set = ['business+analyst','data+scientist']
# file num
file = 1
# from where to skip
SKIPPER = 0
# loop on all cities
for city in city_set:
# for each job role
for job_qry in job_set:
# count
cnt = 0
startTime = time.time()
# skipper
if(file > SKIPPER):
# dataframe
df = pd.DataFrame(columns = ['unique_id', 'city', 'job_qry','job_title', 'company_name', 'location', 'summary', 'salary', 'link', 'date', 'full_text'])
# for results
for start in range(0, max_results_per_city, 10):
# get dom
page = requests.get('http://www.indeed.com/jobs?q=' + job_qry +'&l=' + str(city) + '&start=' + str(start))
#ensuring at least 1 second between page grabs
time.sleep(1)
#fetch data
soup = get_soup(page.text)
divs = soup.find_all(name="div", attrs={"class":"row"})
# if results exist
if(len(divs) == 0):
break
# for all jobs on a page
for div in divs:
#specifying row num for index of job posting in dataframe
num = (len(df) + 1)
cnt = cnt + 1
#job data after parsing
job_post = []
#append unique id
job_post.append(div['id'])
#append city name
job_post.append(city)
#append job qry
job_post.append(job_qry)
#grabbing job title
job_post.append(extract_job_title(div))
#grabbing company
job_post.append(extract_company(div))
#grabbing location name
job_post.append(extract_location(div))
#grabbing summary text
job_post.append(extract_summary(div))
#grabbing salary
job_post.append(extract_salary(div))
#grabbing link
link = extract_link(div)
job_post.append(link)
#grabbing date
job_post.append(extract_date(div))
#grabbing full_text
job_post.append(extract_fulltext(link))
#appending list of job post info to dataframe at index num
df.loc[num] = job_post
#debug add
write_logs(('Completed =>') + '\t' + city + '\t' + job_qry + '\t' + str(cnt) + '\t' + str(start) + '\t' + str(time.time() - startTime) + '\t' + ('file_' + str(file)))
#saving df as a local csv file
df.to_csv('jobs_' + str(file) + '.csv', encoding='utf-8')
else:
#debug add
write_logs(('Skipped =>') + '\t' + city + '\t' + job_qry + '\t' + str(-1) + '\t' + str(-1) + '\t' + str(time.time() - startTime) + '\t' + ('file_' + str(file)))
# increment file
file = file + 1
| [
"pandas.DataFrame",
"time.time",
"time.sleep"
]
| [((477, 488), 'time.time', 'time.time', ([], {}), '()\n', (486, 488), False, 'import time\n'), ((586, 739), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['unique_id', 'city', 'job_qry', 'job_title', 'company_name', 'location',\n 'summary', 'salary', 'link', 'date', 'full_text']"}), "(columns=['unique_id', 'city', 'job_qry', 'job_title',\n 'company_name', 'location', 'summary', 'salary', 'link', 'date',\n 'full_text'])\n", (598, 739), True, 'import pandas as pd\n'), ((1080, 1093), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1090, 1093), False, 'import time\n'), ((3361, 3372), 'time.time', 'time.time', ([], {}), '()\n', (3370, 3372), False, 'import time\n'), ((3025, 3036), 'time.time', 'time.time', ([], {}), '()\n', (3034, 3036), False, 'import time\n')] |
#
# Copyright 2022 The AI Flow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import pprint
from abc import abstractmethod
class _ModelRepoObject(object):
def __iter__(self):
# Iterate through list of properties and yield as key -> value
for prop in self._properties():
yield prop, self.__getattribute__(prop)
@classmethod
def _get_properties_helper(cls):
return sorted([p for p in cls.__dict__ if isinstance(getattr(cls, p), property)])
@classmethod
def _properties(cls):
return cls._get_properties_helper()
@classmethod
@abstractmethod
def from_proto(cls, proto):
pass
def __repr__(self):
return to_string(self)
def to_string(obj):
return _ModelRepoObjectPrinter().to_string(obj)
def get_classname(obj):
return type(obj).__name__
class _ModelRepoObjectPrinter(object):
def __init__(self):
super(_ModelRepoObjectPrinter, self).__init__()
self.printer = pprint.PrettyPrinter()
def to_string(self, obj):
if isinstance(obj, _ModelRepoObject):
return "<%s: %s>" % (get_classname(obj), self._entity_to_string(obj))
return self.printer.pformat(obj)
def _entity_to_string(self, entity):
return ", ".join(["%s=%s" % (key, self.to_string(value)) for key, value in entity])
| [
"pprint.PrettyPrinter"
]
| [((1501, 1523), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {}), '()\n', (1521, 1523), False, 'import pprint\n')] |
from torchvision.models import resnet18
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import torch
import pdb
##############################
# Encoder
##############################
class Encoder(nn.Module):
def __init__(self, latent_dim):
super(Encoder, self).__init__()
""" The encoder used in both cVAE-GAN and cLR-GAN, which encode image B or B_hat to latent vector
This encoder uses resnet-18 to extract features, and further encode them into a distribution
similar to VAE encoder.
Note: You may either add "reparametrization trick" and "KL divergence" or in the train.py file
Args in constructor:
latent_dim: latent dimension for z
Args in forward function:
img: image input (from domain B)
Returns:
mu: mean of the latent code
logvar: sigma of the latent code
"""
# Extracts features at the last fully-connected
resnet18_model = resnet18(pretrained=True)
self.feature_extractor = nn.Sequential(*list(resnet18_model.children())[:-3])
self.pooling = nn.AvgPool2d(kernel_size=8, stride=8, padding=0)
# Output is mu and log(var) for reparameterization trick used in VAEs
self.fc_mu = nn.Linear(256, latent_dim)
self.fc_logvar = nn.Linear(256, latent_dim)
def forward(self, img):
out = self.feature_extractor(img)
out = self.pooling(out)
out = out.view(out.size(0), -1)
mu = self.fc_mu(out)
logvar = self.fc_logvar(out)
return mu, logvar
##############################
# Generator
##############################
class Generator(nn.Module):
""" The generator used in both cVAE-GAN and cLR-GAN, which transform A to B
Args in constructor:
latent_dim: latent dimension for z
image_shape: (channel, h, w), you may need this to specify the output dimension (optional)
Args in forward function:
x: image input (from domain A)
z: latent vector (encoded B)
Returns:
fake_B: generated image in domain B
"""
def __init__(self, latent_dim, img_shape):
super(Generator, self).__init__()
channels, self.h, self.w = img_shape
# (TODO: add layers...)
def forward(self, x, z):
# (TODO: add layers...)
return
##############################
# Discriminator
##############################
class Discriminator(nn.Module):
def __init__(self, in_channels=3):
super(Discriminator, self).__init__()
""" The discriminator used in both cVAE-GAN and cLR-GAN
Args in constructor:
in_channels: number of channel in image (default: 3 for RGB)
Args in forward function:
x: image input (real_B, fake_B)
Returns:
discriminator output: could be a single value or a matrix depending on the type of GAN
"""
def forward(self, x):
return
| [
"torch.nn.AvgPool2d",
"torchvision.models.resnet18",
"torch.nn.Linear"
]
| [((1078, 1103), 'torchvision.models.resnet18', 'resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1086, 1103), False, 'from torchvision.models import resnet18\n'), ((1219, 1267), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(8)', 'stride': '(8)', 'padding': '(0)'}), '(kernel_size=8, stride=8, padding=0)\n', (1231, 1267), True, 'import torch.nn as nn\n'), ((1368, 1394), 'torch.nn.Linear', 'nn.Linear', (['(256)', 'latent_dim'], {}), '(256, latent_dim)\n', (1377, 1394), True, 'import torch.nn as nn\n'), ((1420, 1446), 'torch.nn.Linear', 'nn.Linear', (['(256)', 'latent_dim'], {}), '(256, latent_dim)\n', (1429, 1446), True, 'import torch.nn as nn\n')] |
from models.instructions.shared import Instruction
from models.Other.ambito import Ambito
from controllers.three_address_code import ThreeAddressCode
from controllers.procedures import Procedures
from models.instructions.Expression.expression import DATA_TYPE, PrimitiveData
class Parametro(Instruction):
def __init__(self, id, data_type, line, column):
self.id = id
self.data_type = data_type
self.line = line
self.column = column
self._tac = ''
def compile(self):
pass
def process(self, environment):
pass
def __repr__(self):
return str(vars(self))
class Funcion(Instruction):
def __init__(self, id, params, body, val_return, isNew, isCall, line, column):
self.id = id
self.params = params
self.body = body
self.val_return = val_return
self.isNew = isNew
self.isCall = isCall
self.environment = None
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, environment):
pass
def compile(self, environment):
params = len(self.params)
temporal = None
if self.isNew:
self.environment = environment # TODO verificar
if Procedures().saveProcedure(self.id, self, self.line, self.column):
var_array = self.print(environment)
temporal = self.setVariables(var_array, environment)
else:
var_array = Procedures().getProcedure(self.id, params, self.line, self.column)
if var_array:
temporal = self.setVariables(var_array, environment)
fun = ThreeAddressCode().searchFunction(self.id)
if fun:
temporal = self.setVariables(fun['variables'], environment)
return temporal
#temp = ThreeAddressCode().newTemp()
def print(self, environment):
if ThreeAddressCode().searchFunction(self.id):
return None
ThreeAddressCode().newFunction(self.id)
newAmbito = Ambito(environment)
pos = 0
var_array = []
for var in self.params:
pos = ThreeAddressCode().stackCounter
var_array.append(newAmbito.addVar(var.id, var.data_type, None,
pos, var.line, var.column))
ThreeAddressCode().incStackCounter()
pos = ThreeAddressCode().stackCounter
#Generando etiqueta de salida para la funcion
lbl_exit = ThreeAddressCode().newLabel()
newAmbito.lbl_return = lbl_exit
#Agregando cuerpo de la funcion
self.body.compile(newAmbito)
# Agregando etiqueta de salida
ThreeAddressCode().addCode(f"label .{lbl_exit}")
# Imprime primera variable declarada, NO parametro
# ThreeAddressCode().addCode(f"print(Stack[{pos}])")
ThreeAddressCode().createFunction(self.id, self.params, var_array)
return var_array
def setVariables(self, var_array, environment):
if self.isCall:
value = 0
for index, var in enumerate(var_array):
value = self.params[index].compile(environment)
if isinstance(value, PrimitiveData):
if value.data_type == DATA_TYPE.STRING:
value.value = f"\'{value.value}\'"
ThreeAddressCode().addCode(f"Stack[{var.position}] = {value.value}")
temp = ThreeAddressCode().newTemp()
#Llamando a la funcion
ThreeAddressCode().addCode(f"{self.id}()")
#Obteniendo el valor de retorno de la funcion
ThreeAddressCode().addCode("#Obteniendo valor de retorno--------")
ThreeAddressCode().addCode(f"{temp} = Stack[P]")
return temp
return None
class DropFuncion(Instruction):
def __init__(self, id, params, line, column):
self.id = id
self.params = params
self.line = line
self.column = column
class ProcedimientoAlmacenado(Instruction):
def __init__(self, id, params, body, isNew, isCall, line, column):
self.id = id
self.params = params
self.body = body
self.isNew = isNew
self.isCall = isCall
self.environment = None
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, environment):
pass
def compile(self, environment):
params = len(self.params)
if self.isNew:
self.environment = environment # TODO verificar
if Procedures().saveProcedure(self.id, self, self.line, self.column):
var_array = self.print(environment)
self.setVariables(var_array, environment)
else:
var_array = Procedures().getProcedure(self.id, params, self.line, self.column)
if var_array:
self.setVariables(var_array, environment)
fun = ThreeAddressCode().searchFunction(self.id)
if fun:
self.setVariables(fun['variables'], environment)
#temp = ThreeAddressCode().newTemp()
def print(self, environment):
if ThreeAddressCode().searchFunction(self.id):
return None
ThreeAddressCode().newFunction(self.id)
newAmbito = Ambito(environment)
pos = 0
var_array = []
for var in self.params:
pos = ThreeAddressCode().stackCounter
var_array.append(newAmbito.addVar(var.id, var.data_type, None,
pos, var.line, var.column))
ThreeAddressCode().incStackCounter()
pos = ThreeAddressCode().stackCounter
#Generando etiqueta de salida para la funcion
lbl_exit = ThreeAddressCode().newLabel()
newAmbito.lbl_return = lbl_exit
#Agregando cuerpo de la funcion
self.body.compile(newAmbito)
# Agregando etiqueta de salida
ThreeAddressCode().addCode(f"label .{lbl_exit}")
# Imprime primera variable declarada, NO parametro
ThreeAddressCode().addCode(f"print(Stack[{pos}])")
ThreeAddressCode().createFunction(self.id, self.params, var_array)
return var_array
def setVariables(self, var_array, environment):
if self.isCall:
value = 0
for index, var in enumerate(var_array):
value = self.params[index].compile(environment)
if isinstance(value, PrimitiveData):
if value.data_type == DATA_TYPE.STRING:
value.value = f"\'{value.value}\'"
ThreeAddressCode().addCode(f"Stack[{var.position}] = {value.value}")
#Llamando a la funcion
ThreeAddressCode().addCode(f"{self.id}()")
#Una procedimiento almacenado NO devuelve nada | [
"controllers.procedures.Procedures",
"models.Other.ambito.Ambito",
"controllers.three_address_code.ThreeAddressCode"
]
| [((2120, 2139), 'models.Other.ambito.Ambito', 'Ambito', (['environment'], {}), '(environment)\n', (2126, 2139), False, 'from models.Other.ambito import Ambito\n'), ((5479, 5498), 'models.Other.ambito.Ambito', 'Ambito', (['environment'], {}), '(environment)\n', (5485, 5498), False, 'from models.Other.ambito import Ambito\n'), ((2474, 2492), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (2490, 2492), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((5833, 5851), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (5849, 5851), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((1983, 2001), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (1999, 2001), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((2060, 2078), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (2076, 2078), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((2229, 2247), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (2245, 2247), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((2579, 2597), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (2595, 2597), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((2774, 2792), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (2790, 2792), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((2954, 2972), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (2970, 2972), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((5342, 5360), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (5358, 5360), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((5419, 5437), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (5435, 5437), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((5588, 5606), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (5604, 5606), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((5938, 5956), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (5954, 5956), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((6133, 6151), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (6149, 6151), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((6251, 6269), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (6267, 6269), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((6311, 6329), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (6327, 6329), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((1304, 1316), 'controllers.procedures.Procedures', 'Procedures', ([], {}), '()\n', (1314, 1316), False, 'from controllers.procedures import Procedures\n'), ((1530, 1542), 'controllers.procedures.Procedures', 'Procedures', ([], {}), '()\n', (1540, 1542), False, 'from controllers.procedures import Procedures\n'), ((1711, 1729), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (1727, 1729), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((2422, 2440), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (2438, 2440), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((3540, 3558), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (3556, 3558), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((3629, 3647), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (3645, 3647), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((3742, 3760), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (3758, 3760), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((3821, 3839), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (3837, 3839), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((4712, 4724), 'controllers.procedures.Procedures', 'Procedures', ([], {}), '()\n', (4722, 4724), False, 'from controllers.procedures import Procedures\n'), ((4927, 4939), 'controllers.procedures.Procedures', 'Procedures', ([], {}), '()\n', (4937, 4939), False, 'from controllers.procedures import Procedures\n'), ((5097, 5115), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (5113, 5115), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((5781, 5799), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (5797, 5799), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((6937, 6955), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (6953, 6955), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((3451, 3469), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (3467, 3469), False, 'from controllers.three_address_code import ThreeAddressCode\n'), ((6808, 6826), 'controllers.three_address_code.ThreeAddressCode', 'ThreeAddressCode', ([], {}), '()\n', (6824, 6826), False, 'from controllers.three_address_code import ThreeAddressCode\n')] |
import typing
import urllib.error
import urllib.request
from podcast.files import download_location
from podcast.info import build_info_content
from podcast.info import InfoContent
from podcast.models import Channel
from podcast.models import get_podcast_audio_link
from podcast.models import NewStatus
from podcast.models import Podcast
from podcast.models import Radio
from podcast.models import RadioDirectory
def _download_from_url(url: str, location: str) -> bool:
try:
urllib.request.urlretrieve(url, location)
return True
except (IOError, urllib.error.ContentTooShortError):
# If a connection can't be made, IOError is raised
# If the download gets interrupted (ContentTooShortError), we
# should try again later
# TODO: can we tell if it was a bad filename (and should stop
# requesting it), or internet connectivity (and should tell
# us), or just a fluke (and should retry)?
return False
def download_podcast(
directory: RadioDirectory,
channel: Channel,
podcast: Podcast) -> Podcast:
location = download_location(directory, channel, podcast)
url = get_podcast_audio_link(podcast)
# TODO: This takes some time, especially when there are a lot to
# download. I could have this spawn threads, or add priorities,
# and so on. For now, since it runs every few hours, and is more
# of a push than a pull situation for the user, I'm leaving it
# simple
success = _download_from_url(url, location)
if success:
return podcast._replace(status=NewStatus())
else:
return podcast
def download_channel(directory: RadioDirectory, channel: Channel) -> Channel:
updated_podcasts = []
for known_podcast in channel.known_podcasts:
if type(known_podcast.status).__name__ == 'RequestedStatus':
known_podcast = download_podcast(directory, channel, known_podcast)
updated_podcasts.append(known_podcast)
return channel._replace(known_podcasts=updated_podcasts)
def download_radio(radio: Radio) -> typing.Tuple[Radio, InfoContent]:
downloaded_channels = [
download_channel(radio.directory, channel)
for channel in radio.channels
]
radio = radio._replace(channels=downloaded_channels)
info_content = build_info_content()
return (radio, info_content)
| [
"podcast.models.NewStatus",
"podcast.models.get_podcast_audio_link",
"podcast.info.build_info_content",
"podcast.files.download_location"
]
| [((1120, 1166), 'podcast.files.download_location', 'download_location', (['directory', 'channel', 'podcast'], {}), '(directory, channel, podcast)\n', (1137, 1166), False, 'from podcast.files import download_location\n'), ((1177, 1208), 'podcast.models.get_podcast_audio_link', 'get_podcast_audio_link', (['podcast'], {}), '(podcast)\n', (1199, 1208), False, 'from podcast.models import get_podcast_audio_link\n'), ((2331, 2351), 'podcast.info.build_info_content', 'build_info_content', ([], {}), '()\n', (2349, 2351), False, 'from podcast.info import build_info_content\n'), ((1600, 1611), 'podcast.models.NewStatus', 'NewStatus', ([], {}), '()\n', (1609, 1611), False, 'from podcast.models import NewStatus\n')] |
# Import the Twython class
from twython import Twython, TwythonStreamer
import json
# import pandas as pd
import csv
import datetime
def process_tweet(tweet):
# Filter out unwanted data
d = {}
d['hashtags'] = [hashtag['text'] for hashtag in tweet['entities']['hashtags']]
try:
for key in {
'created_at', 'id', 'text', 'source', 'truncated',
'in_reply_to_status_id', 'in_reply_to_user_id',
'in_reply_to_screen_name', 'user', 'coordinates',
'place', 'quoted_status_id', 'is_quote_status', 'quoted_status',
'retweeted_status', 'quote_count', 'reply_count', 'retweet_count',
'favorite_count', 'favorited', 'retweeted', 'entities', 'extended_entities',
'possibly_sensitive', 'filter_level', 'lang', 'matching_rules'}:
if key == 'user':
pass
elif key == 'place':
pass
elif key == 'quoted_status' or key == 'retweeted_status':
pass
elif key == 'entities':
pass
elif key == 'extended_entities':
pass
else:
d[key] = tweet[key]
except KeyError as e:
pass
# d['text'] = tweet['text']
# d['user'] = tweet['user']['screen_name']
# d['user_loc'] = tweet['user']['location']
# d['date'] = tweet['created_at']
return d
# Create a class that inherits TwythonStreamer
class MyStreamer(TwythonStreamer):
# Received data
def on_success(self, data):
# # Only collect tweets in English
# if data['lang'] == 'en':
# tweet_data = process_tweet(data)
print(datetime.datetime.now())
# self.save_to_csv(tweet_data)
self.save_to_json(data)
# Problem with the API
def on_error(self, status_code, data):
print(status_code, data)
self.disconnect()
# Save each tweet to csv file
def save_to_csv(self, tweet):
# with open(r'saved_tweets.csv', 'a') as out_file:
with open(r'saved_tweets_big.csv', 'a') as out_file:
writer = csv.writer(out_file)
writer.writerow(list(tweet.values()))
def save_to_json(self, tweet):
with open('saved_tweets_big.json', 'a') as out_file:
json.dump(tweet, out_file)
def main():
# Load credentials from json file
with open("twitter_credentials.json", "r") as tw_creds:
creds = json.load(tw_creds)
# Instantiate an object
# python_tweets = Twython(creds['CONSUMER_KEY'], creds['CONSUMER_SECRET'])
# Instantiate from our streaming class
stream = MyStreamer(creds['CONSUMER_KEY'], creds['CONSUMER_SECRET'],
creds['ACCESS_TOKEN'], creds['ACCESS_SECRET'])
# Start the stream
# stream.statuses.filter(track='madrid')
stream.statuses.filter(locations='-7.876154,37.460012,3.699873,43.374723')
# # Create our query
# query = {
# 'q': 'futbol',
# 'result_type': 'mixed',
# 'lang': 'es',
# 'count': '100',
# }
#
# dict_ = {'user': [], 'date': [], 'text': [], 'favorite_count': []}
# for status in python_tweets.search(**query)['statuses']:
# print(format(status))
# dict_['user'].append(status['user']['screen_name'])
# dict_['date'].append(status['created_at'])
# dict_['text'].append(status['text'])
# dict_['favorite_count'].append(status['favorite_count'])
#
# df = pd.DataFrame(dict_)
# df.sort_values(by='favorite_count', inplace=True, ascending=False)
# print(df.values)
if __name__ == "__main__":
main()
| [
"json.load",
"datetime.datetime.now",
"csv.writer",
"json.dump"
]
| [((2476, 2495), 'json.load', 'json.load', (['tw_creds'], {}), '(tw_creds)\n', (2485, 2495), False, 'import json\n'), ((1706, 1729), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1727, 1729), False, 'import datetime\n'), ((2142, 2162), 'csv.writer', 'csv.writer', (['out_file'], {}), '(out_file)\n', (2152, 2162), False, 'import csv\n'), ((2321, 2347), 'json.dump', 'json.dump', (['tweet', 'out_file'], {}), '(tweet, out_file)\n', (2330, 2347), False, 'import json\n')] |
import pytest
import itertools
# Cartesian product of file names and extensions
# e.g. README.txt, README.md, CHANGELOG.txt, CHANGELOG.md ...
file_extensions = ['txt', 'md']
names = ['README', 'CHANGELOG', 'CONTRIBUTING', 'LICENSE', 'CODE_OF_CONDUCT']
exempt_files = [('.'.join(x)) for x in itertools.product(names, file_extensions)]
def test_filename(md_filepath):
if any(e in md_filepath for e in exempt_files):
assert True
else:
assert md_filepath.islower() == True,'Filename should be lowercase'
| [
"itertools.product"
]
| [((292, 333), 'itertools.product', 'itertools.product', (['names', 'file_extensions'], {}), '(names, file_extensions)\n', (309, 333), False, 'import itertools\n')] |
import sys
import unittest
sys.path.append("../main")
from sshtransport import *
class FakeSocket(object):
def __init__(self):
self.recv_buffer = b""
self.send_buffer = b""
def recv(self, n):
resp = self.recv_buffer[:n]
self.recv_buffer = self.recv_buffer[n:]
return resp
def send(self, x):
self.send_buffer += x
class TestIdentificationString(unittest.TestCase):
def test_recv(self):
conn = FakeSocket()
conn.recv_buffer = b"SSH-2.00-SecureMcShellface_1.0\r\n"
idstr = IdentificationString(recvfrom=conn)
self.assertEqual(idstr.protoversion, "2.00")
self.assertEqual(idstr.softwareversion, "SecureMcShellface_1.0")
def test_send(self):
conn = FakeSocket()
idstr = IdentificationString(protoversion="2.00", softwareversion="SecureMcShellface_1.0")
idstr.send(conn)
self.assertEqual(conn.send_buffer, b"SSH-2.00-SecureMcShellface_1.0\r\n")
class TestBinaryPacket(unittest.TestCase):
def test_recv(self):
conn = FakeSocket()
conn.recv_buffer = b"\x00\x00\x00\x14\x07Hello World!\x00\x00\x00\x00\x00\x00\x00"
binpkt = BinaryPacket(recvfrom=conn)
self.assertEqual(binpkt.payload, b"Hello World!")
self.assertEqual(binpkt.mac, b"")
def test_send(self):
conn = FakeSocket()
binpkt = BinaryPacket(payload=b"Hello World!")
binpkt.send(conn)
self.assertEqual(conn.send_buffer, b"\x00\x00\x00\x14\x07Hello World!\x00\x00\x00\x00\x00\x00\x00")
| [
"sys.path.append"
]
| [((28, 54), 'sys.path.append', 'sys.path.append', (['"""../main"""'], {}), "('../main')\n", (43, 54), False, 'import sys\n')] |
import datetime
import logging
from typing import Optional
from betfairlightweight.resources.bettingresources import MarketBook, MarketCatalogue
from .blotter import Blotter
from ..events import events
logger = logging.getLogger(__name__)
class Market:
def __init__(
self,
flumine,
market_id: str,
market_book: MarketBook,
market_catalogue: MarketCatalogue = None,
):
self.flumine = flumine
self.market_id = market_id
self.closed = False
self.date_time_closed = None
self.market_book = market_book
self.market_catalogue = market_catalogue
self.context = {"simulated": {}} # data store (raceCard / scores etc)
self.blotter = Blotter(self)
def __call__(self, market_book: MarketBook):
self.market_book = market_book
def open_market(self) -> None:
self.closed = False
def close_market(self) -> None:
self.closed = True
self.date_time_closed = datetime.datetime.utcnow()
# order
def place_order(self, order, execute: bool = True) -> None:
order.place(self.market_book.publish_time)
if order.id not in self.blotter:
self.blotter[order.id] = order
if order.trade.market_notes is None:
order.trade.update_market_notes(self.market_book)
self.flumine.log_control(events.TradeEvent(order.trade)) # todo dupes?
else:
return # retry attempt so ignore?
if execute: # handles replaceOrder
self.blotter.pending_place.append(order)
def cancel_order(self, order, size_reduction: float = None) -> None:
order.cancel(size_reduction)
self.blotter.pending_cancel.append(order)
def update_order(self, order, new_persistence_type: str) -> None:
order.update(new_persistence_type)
self.blotter.pending_update.append(order)
def replace_order(self, order, new_price: float) -> None:
order.replace(new_price)
self.blotter.pending_replace.append(order)
@property
def event_type_id(self) -> str:
if self.market_book:
return self.market_book.market_definition.event_type_id
@property
def event_id(self) -> str:
if self.market_book:
return self.market_book.market_definition.event_id
@property
def seconds_to_start(self):
return (self.market_start_datetime - datetime.datetime.utcnow()).total_seconds()
@property
def elapsed_seconds_closed(self) -> Optional[float]:
if self.closed and self.date_time_closed:
return (datetime.datetime.utcnow() - self.date_time_closed).total_seconds()
@property
def market_start_datetime(self):
if self.market_catalogue:
return self.market_catalogue.market_start_time
elif self.market_book:
return self.market_book.market_definition.market_time
else:
return datetime.datetime.utcfromtimestamp(0)
| [
"logging.getLogger",
"datetime.datetime.utcfromtimestamp",
"datetime.datetime.utcnow"
]
| [((213, 240), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (230, 240), False, 'import logging\n'), ((1004, 1030), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1028, 1030), False, 'import datetime\n'), ((2979, 3016), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(0)'], {}), '(0)\n', (3013, 3016), False, 'import datetime\n'), ((2450, 2476), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2474, 2476), False, 'import datetime\n'), ((2636, 2662), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2660, 2662), False, 'import datetime\n')] |
import unittest
from iterable_collections import collect
class TestList_(unittest.TestCase):
def test_list(self):
c = collect(list(range(10))).list_()
self.assertEqual(c.iterable, list(list(range(10))))
def test_set(self):
c = collect(set(range(10))).list_()
self.assertEqual(c.iterable, list(set(range(10))))
def test_tuple(self):
c = collect(tuple(range(10))).list_()
self.assertEqual(c.iterable, list(tuple(range(10))))
def test_iterator(self):
c = collect(iter(range(10))).list_()
self.assertEqual(c.iterable, list(iter(range(10))))
def test_dict(self):
c = collect({'a': 1, 'b': 2}).list_()
self.assertEqual(c.iterable, list({'a': 1, 'b': 2}))
def test_dict_items(self):
c = collect({'a': 1, 'b': 2}.items()).list_()
self.assertEqual(c.iterable, list({'a': 1, 'b': 2}.items()))
def test_enumerate(self):
c = collect(list(range(10))).enumerate().list_()
self.assertEqual(c.iterable, list(enumerate(range(10))))
| [
"iterable_collections.collect"
]
| [((662, 687), 'iterable_collections.collect', 'collect', (["{'a': 1, 'b': 2}"], {}), "({'a': 1, 'b': 2})\n", (669, 687), False, 'from iterable_collections import collect\n')] |
""" Testing array utilities
"""
import sys
import numpy as np
from ..arrfuncs import as_native_array, pinv, eigh
from numpy.testing import (assert_array_almost_equal,
assert_array_equal)
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
NATIVE_ORDER = '<' if sys.byteorder == 'little' else '>'
SWAPPED_ORDER = '>' if sys.byteorder == 'little' else '<'
def test_as_native():
arr = np.arange(5) # native
assert_equal(arr.dtype.byteorder, '=')
narr = as_native_array(arr)
assert_true(arr is narr)
sdt = arr.dtype.newbyteorder('s')
barr = arr.astype(sdt)
assert_equal(barr.dtype.byteorder, SWAPPED_ORDER)
narr = as_native_array(barr)
assert_false(barr is narr)
assert_array_equal(barr, narr)
assert_equal(narr.dtype.byteorder, NATIVE_ORDER)
def test_pinv():
arr = np.random.randn(4, 4, 4, 3, 7)
_pinv = pinv(arr)
for i in range(4):
for j in range(4):
for k in range(4):
assert_array_almost_equal(_pinv[i, j, k],
np.linalg.pinv(arr[i, j, k]))
def test_eigh():
for i in range(10):
arr = np.random.randn(7, 7)
evals1, evecs1 = eigh(arr)
evals2, evecs2 = np.linalg.eigh(arr)
assert_array_almost_equal(evals1, evals2)
assert_array_almost_equal(evecs1, evecs2)
arr = np.random.randn(4, 4, 4, 7, 7)
evals, evecs = eigh(arr)
for i in range(4):
for j in range(4):
for k in range(4):
evals_vox, evecs_vox = np.linalg.eigh(arr[i, j, k])
assert_array_almost_equal(evals[i, j, k], evals_vox)
assert_array_almost_equal(evecs[i, j, k], evecs_vox)
| [
"numpy.testing.assert_array_almost_equal",
"numpy.linalg.pinv",
"numpy.arange",
"nose.tools.assert_true",
"numpy.linalg.eigh",
"nose.tools.assert_equal",
"nose.tools.assert_false",
"numpy.random.randn",
"numpy.testing.assert_array_equal"
]
| [((447, 459), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (456, 459), True, 'import numpy as np\n'), ((474, 512), 'nose.tools.assert_equal', 'assert_equal', (['arr.dtype.byteorder', '"""="""'], {}), "(arr.dtype.byteorder, '=')\n", (486, 512), False, 'from nose.tools import assert_true, assert_false, assert_equal, assert_raises\n'), ((549, 573), 'nose.tools.assert_true', 'assert_true', (['(arr is narr)'], {}), '(arr is narr)\n', (560, 573), False, 'from nose.tools import assert_true, assert_false, assert_equal, assert_raises\n'), ((643, 692), 'nose.tools.assert_equal', 'assert_equal', (['barr.dtype.byteorder', 'SWAPPED_ORDER'], {}), '(barr.dtype.byteorder, SWAPPED_ORDER)\n', (655, 692), False, 'from nose.tools import assert_true, assert_false, assert_equal, assert_raises\n'), ((730, 756), 'nose.tools.assert_false', 'assert_false', (['(barr is narr)'], {}), '(barr is narr)\n', (742, 756), False, 'from nose.tools import assert_true, assert_false, assert_equal, assert_raises\n'), ((761, 791), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['barr', 'narr'], {}), '(barr, narr)\n', (779, 791), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((796, 844), 'nose.tools.assert_equal', 'assert_equal', (['narr.dtype.byteorder', 'NATIVE_ORDER'], {}), '(narr.dtype.byteorder, NATIVE_ORDER)\n', (808, 844), False, 'from nose.tools import assert_true, assert_false, assert_equal, assert_raises\n'), ((874, 904), 'numpy.random.randn', 'np.random.randn', (['(4)', '(4)', '(4)', '(3)', '(7)'], {}), '(4, 4, 4, 3, 7)\n', (889, 904), True, 'import numpy as np\n'), ((1408, 1438), 'numpy.random.randn', 'np.random.randn', (['(4)', '(4)', '(4)', '(7)', '(7)'], {}), '(4, 4, 4, 7, 7)\n', (1423, 1438), True, 'import numpy as np\n'), ((1195, 1216), 'numpy.random.randn', 'np.random.randn', (['(7)', '(7)'], {}), '(7, 7)\n', (1210, 1216), True, 'import numpy as np\n'), ((1277, 1296), 'numpy.linalg.eigh', 'np.linalg.eigh', (['arr'], {}), '(arr)\n', (1291, 1296), True, 'import numpy as np\n'), ((1305, 1346), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['evals1', 'evals2'], {}), '(evals1, evals2)\n', (1330, 1346), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((1355, 1396), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['evecs1', 'evecs2'], {}), '(evecs1, evecs2)\n', (1380, 1396), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((1588, 1616), 'numpy.linalg.eigh', 'np.linalg.eigh', (['arr[i, j, k]'], {}), '(arr[i, j, k])\n', (1602, 1616), True, 'import numpy as np\n'), ((1633, 1685), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['evals[i, j, k]', 'evals_vox'], {}), '(evals[i, j, k], evals_vox)\n', (1658, 1685), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((1702, 1754), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['evecs[i, j, k]', 'evecs_vox'], {}), '(evecs[i, j, k], evecs_vox)\n', (1727, 1754), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((1108, 1136), 'numpy.linalg.pinv', 'np.linalg.pinv', (['arr[i, j, k]'], {}), '(arr[i, j, k])\n', (1122, 1136), True, 'import numpy as np\n')] |
#!/usr/bin/python
import os
os.system("sudo ./scan.py")
os.system("sudo ./enable-wifi.py")
| [
"os.system"
]
| [((29, 56), 'os.system', 'os.system', (['"""sudo ./scan.py"""'], {}), "('sudo ./scan.py')\n", (38, 56), False, 'import os\n'), ((57, 91), 'os.system', 'os.system', (['"""sudo ./enable-wifi.py"""'], {}), "('sudo ./enable-wifi.py')\n", (66, 91), False, 'import os\n')] |
from keras.optimizers import RMSprop
from keras.layers import Input, Embedding, Dense, LSTM, Bidirectional, GRU
from keras.layers import concatenate, Reshape, SpatialDropout1D
from keras.models import Model
from keras import backend as K
from .AttentionWeightedAverage import AttentionWeightedAverage
def textgenrnn_model(num_classes, cfg, context_size=None,
weights_path=None,
dropout=0.0,
optimizer=RMSprop(lr=4e-3, rho=0.99)):
'''
Builds the model architecture for textgenrnn and
loads the specified weights for the model.
'''
input = Input(shape=(cfg['max_length'],), name='input')
embedded = Embedding(num_classes, cfg['dim_embeddings'],
input_length=cfg['max_length'],
name='embedding')(input)
if dropout > 0.0:
embedded = SpatialDropout1D(dropout, name='dropout')(embedded)
rnn_layer_list = []
for i in range(cfg['rnn_layers']):
prev_layer = embedded if i == 0 else rnn_layer_list[-1]
if cfg.get('rnn_type') == 'gru':
rnn_layer_list.append(new_rnn_gru(cfg, i + 1)(prev_layer))
else:
rnn_layer_list.append(new_rnn(cfg, i + 1)(prev_layer))
seq_concat = concatenate([embedded] + rnn_layer_list, name='rnn_concat')
attention = AttentionWeightedAverage(name='attention')(seq_concat)
output = Dense(num_classes, name='output', activation='softmax')(attention)
if context_size is None:
model = Model(inputs=[input], outputs=[output])
if weights_path is not None:
model.load_weights(weights_path, by_name=True)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
else:
context_input = Input(
shape=(context_size,), name='context_input')
context_reshape = Reshape((context_size,),
name='context_reshape')(context_input)
merged = concatenate([attention, context_reshape], name='concat')
main_output = Dense(num_classes, name='context_output',
activation='softmax')(merged)
model = Model(inputs=[input, context_input],
outputs=[main_output, output])
if weights_path is not None:
model.load_weights(weights_path, by_name=True)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
loss_weights=[0.8, 0.2])
return model
'''
Create a new LSTM layer per parameters. Unfortunately,
each combination of parameters must be hardcoded.
The normal LSTMs use sigmoid recurrent activations
for parity with CuDNNLSTM:
https://github.com/keras-team/keras/issues/8860
'''
def new_rnn(cfg, layer_num):
use_cudnnlstm = K.backend() == 'tensorflow' and len(K.tensorflow_backend._get_available_gpus()) > 0
if use_cudnnlstm:
from keras.layers import CuDNNLSTM
if cfg['rnn_bidirectional']:
return Bidirectional(CuDNNLSTM(cfg['rnn_size'],
return_sequences=True),
name='rnn_{}'.format(layer_num))
return CuDNNLSTM(cfg['rnn_size'],
return_sequences=True,
name='rnn_{}'.format(layer_num))
else:
if cfg['rnn_bidirectional']:
return Bidirectional(LSTM(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid'),
name='rnn_{}'.format(layer_num))
return LSTM(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid',
name='rnn_{}'.format(layer_num))
def new_rnn_gru(cfg, layer_num):
use_cudnngru = K.backend() == 'tensorflow' and len(K.tensorflow_backend._get_available_gpus()) > 0
if use_cudnngru:
from keras.layers import CuDNNGRU
if cfg['rnn_bidirectional']:
return Bidirectional(CuDNNGRU(cfg['rnn_size'],
return_sequences=True),
name='rnn_{}'.format(layer_num))
return CuDNNGRU(cfg['rnn_size'],
return_sequences=True,
name='rnn_{}'.format(layer_num))
else:
if cfg['rnn_bidirectional']:
return Bidirectional(GRU(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid',
reset_after=True),
name='rnn_{}'.format(layer_num))
return GRU(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid',
reset_after=True,
name='rnn_{}'.format(layer_num))
| [
"keras.layers.CuDNNLSTM",
"keras.backend.backend",
"keras.layers.CuDNNGRU",
"keras.layers.LSTM",
"keras.layers.Input",
"keras.layers.concatenate",
"keras.models.Model",
"keras.layers.SpatialDropout1D",
"keras.layers.Embedding",
"keras.layers.Dense",
"keras.layers.Reshape",
"keras.backend.tensorflow_backend._get_available_gpus",
"keras.optimizers.RMSprop",
"keras.layers.GRU"
]
| [((466, 493), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': '(0.004)', 'rho': '(0.99)'}), '(lr=0.004, rho=0.99)\n', (473, 493), False, 'from keras.optimizers import RMSprop\n'), ((624, 671), 'keras.layers.Input', 'Input', ([], {'shape': "(cfg['max_length'],)", 'name': '"""input"""'}), "(shape=(cfg['max_length'],), name='input')\n", (629, 671), False, 'from keras.layers import Input, Embedding, Dense, LSTM, Bidirectional, GRU\n'), ((1273, 1332), 'keras.layers.concatenate', 'concatenate', (['([embedded] + rnn_layer_list)'], {'name': '"""rnn_concat"""'}), "([embedded] + rnn_layer_list, name='rnn_concat')\n", (1284, 1332), False, 'from keras.layers import concatenate, Reshape, SpatialDropout1D\n'), ((687, 787), 'keras.layers.Embedding', 'Embedding', (['num_classes', "cfg['dim_embeddings']"], {'input_length': "cfg['max_length']", 'name': '"""embedding"""'}), "(num_classes, cfg['dim_embeddings'], input_length=cfg['max_length'\n ], name='embedding')\n", (696, 787), False, 'from keras.layers import Input, Embedding, Dense, LSTM, Bidirectional, GRU\n'), ((1417, 1472), 'keras.layers.Dense', 'Dense', (['num_classes'], {'name': '"""output"""', 'activation': '"""softmax"""'}), "(num_classes, name='output', activation='softmax')\n", (1422, 1472), False, 'from keras.layers import Input, Embedding, Dense, LSTM, Bidirectional, GRU\n'), ((1530, 1569), 'keras.models.Model', 'Model', ([], {'inputs': '[input]', 'outputs': '[output]'}), '(inputs=[input], outputs=[output])\n', (1535, 1569), False, 'from keras.models import Model\n'), ((1777, 1827), 'keras.layers.Input', 'Input', ([], {'shape': '(context_size,)', 'name': '"""context_input"""'}), "(shape=(context_size,), name='context_input')\n", (1782, 1827), False, 'from keras.layers import Input, Embedding, Dense, LSTM, Bidirectional, GRU\n'), ((1982, 2038), 'keras.layers.concatenate', 'concatenate', (['[attention, context_reshape]'], {'name': '"""concat"""'}), "([attention, context_reshape], name='concat')\n", (1993, 2038), False, 'from keras.layers import concatenate, Reshape, SpatialDropout1D\n'), ((2178, 2245), 'keras.models.Model', 'Model', ([], {'inputs': '[input, context_input]', 'outputs': '[main_output, output]'}), '(inputs=[input, context_input], outputs=[main_output, output])\n', (2183, 2245), False, 'from keras.models import Model\n'), ((882, 923), 'keras.layers.SpatialDropout1D', 'SpatialDropout1D', (['dropout'], {'name': '"""dropout"""'}), "(dropout, name='dropout')\n", (898, 923), False, 'from keras.layers import concatenate, Reshape, SpatialDropout1D\n'), ((1867, 1915), 'keras.layers.Reshape', 'Reshape', (['(context_size,)'], {'name': '"""context_reshape"""'}), "((context_size,), name='context_reshape')\n", (1874, 1915), False, 'from keras.layers import concatenate, Reshape, SpatialDropout1D\n'), ((2061, 2124), 'keras.layers.Dense', 'Dense', (['num_classes'], {'name': '"""context_output"""', 'activation': '"""softmax"""'}), "(num_classes, name='context_output', activation='softmax')\n", (2066, 2124), False, 'from keras.layers import Input, Embedding, Dense, LSTM, Bidirectional, GRU\n'), ((2798, 2809), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (2807, 2809), True, 'from keras import backend as K\n'), ((3866, 3877), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (3875, 3877), True, 'from keras import backend as K\n'), ((2834, 2876), 'keras.backend.tensorflow_backend._get_available_gpus', 'K.tensorflow_backend._get_available_gpus', ([], {}), '()\n', (2874, 2876), True, 'from keras import backend as K\n'), ((3017, 3066), 'keras.layers.CuDNNLSTM', 'CuDNNLSTM', (["cfg['rnn_size']"], {'return_sequences': '(True)'}), "(cfg['rnn_size'], return_sequences=True)\n", (3026, 3066), False, 'from keras.layers import CuDNNLSTM\n'), ((3406, 3482), 'keras.layers.LSTM', 'LSTM', (["cfg['rnn_size']"], {'return_sequences': '(True)', 'recurrent_activation': '"""sigmoid"""'}), "(cfg['rnn_size'], return_sequences=True, recurrent_activation='sigmoid')\n", (3410, 3482), False, 'from keras.layers import Input, Embedding, Dense, LSTM, Bidirectional, GRU\n'), ((3902, 3944), 'keras.backend.tensorflow_backend._get_available_gpus', 'K.tensorflow_backend._get_available_gpus', ([], {}), '()\n', (3942, 3944), True, 'from keras import backend as K\n'), ((4083, 4131), 'keras.layers.CuDNNGRU', 'CuDNNGRU', (["cfg['rnn_size']"], {'return_sequences': '(True)'}), "(cfg['rnn_size'], return_sequences=True)\n", (4091, 4131), False, 'from keras.layers import CuDNNGRU\n'), ((4467, 4564), 'keras.layers.GRU', 'GRU', (["cfg['rnn_size']"], {'return_sequences': '(True)', 'recurrent_activation': '"""sigmoid"""', 'reset_after': '(True)'}), "(cfg['rnn_size'], return_sequences=True, recurrent_activation='sigmoid',\n reset_after=True)\n", (4470, 4564), False, 'from keras.layers import Input, Embedding, Dense, LSTM, Bidirectional, GRU\n')] |
from django.contrib import admin
from .models import Image
@admin.register(Image)
class ImageAdmin(admin.ModelAdmin):
list_display = ('image', 'predict_covid', 'predict_no_findings', 'predict_pneumonia', 'created_at', 'updated_at', 'activated_at')
| [
"django.contrib.admin.register"
]
| [((61, 82), 'django.contrib.admin.register', 'admin.register', (['Image'], {}), '(Image)\n', (75, 82), False, 'from django.contrib import admin\n')] |
# Solution of;
# Project Euler Problem 668: Square root smooth Numbers
# https://projecteuler.net/problem=668
#
# A positive integer is called square root smooth if all of its prime factors
# are strictly less than its square root. Including the number $1$, there are
# $29$ square root smooth numbers not exceeding $100$. How many square root
# smooth numbers are there not exceeding $10\,000\,000\,000$?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 668
timed.caller(dummy, n, i, prob_id)
| [
"timed.caller"
]
| [((580, 614), 'timed.caller', 'timed.caller', (['dummy', 'n', 'i', 'prob_id'], {}), '(dummy, n, i, prob_id)\n', (592, 614), False, 'import timed\n')] |
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.forms import ValidationError, EmailField
from user import models
class MyAuthenticationForm(AuthenticationForm):
""""
Overide method clean from AuthenticationForm to show that a user hasn't activate their account
"""
error_messages = {
'invalid_login': (
"Please enter a correct %(username)s and password. Note that both "
"fields may be case-sensitive."
),
'inactive': ("This Account hasn't been activated yet, Please check your email :)"),
}
def confirm_login_allowed(self, user):
if not user.is_active:
raise ValidationError(
self.error_messages['inactive'],
code='inactive',
)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username is not None and password:
self.user_cache = authenticate(self.request, username=username, password=password)
if self.user_cache is None:
print(username)
try:
user_temp = User.objects.get(username=username)
except:
user_temp = None
print(user_temp)
if user_temp is not None:
self.confirm_login_allowed(user_temp)
else:
raise ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
return self.cleaned_data
class CreateUserForm(UserCreationForm):
""""
Override UserCreationForm to include email field
"""
email = EmailField(required=True, label='Email')
class Meta:
model = User
fields = ("username", "email", "password1", "<PASSWORD>")
error_messages = {
'password_mismatch': ('The two password fields didn’t match.'),
'email_taken': 'Your email has been taken'
}
def clean_email(self):
"""
Check if the email had already been taken
"""
email = self.cleaned_data.get('email')
num = User.objects.filter(email=email)
if num.count() > 0:
raise ValidationError(
self.error_messages['email_taken'],
code='email_taken',
)
return email
def save(self, commit= True):
user = super(CreateUserForm, self).save(commit=False)
email = self.cleaned_data.get('email')
user.email = email
user.is_active=False
if commit:
user.save()
return user
| [
"django.contrib.auth.authenticate",
"django.contrib.auth.models.User.objects.get",
"django.forms.ValidationError",
"django.contrib.auth.models.User.objects.filter",
"django.forms.EmailField"
]
| [((1983, 2023), 'django.forms.EmailField', 'EmailField', ([], {'required': '(True)', 'label': '"""Email"""'}), "(required=True, label='Email')\n", (1993, 2023), False, 'from django.forms import ValidationError, EmailField\n'), ((2444, 2476), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'email': 'email'}), '(email=email)\n', (2463, 2476), False, 'from django.contrib.auth.models import User\n'), ((809, 874), 'django.forms.ValidationError', 'ValidationError', (["self.error_messages['inactive']"], {'code': '"""inactive"""'}), "(self.error_messages['inactive'], code='inactive')\n", (824, 874), False, 'from django.forms import ValidationError, EmailField\n'), ((1127, 1191), 'django.contrib.auth.authenticate', 'authenticate', (['self.request'], {'username': 'username', 'password': 'password'}), '(self.request, username=username, password=password)\n', (1139, 1191), False, 'from django.contrib.auth import authenticate\n'), ((2524, 2595), 'django.forms.ValidationError', 'ValidationError', (["self.error_messages['email_taken']"], {'code': '"""email_taken"""'}), "(self.error_messages['email_taken'], code='email_taken')\n", (2539, 2595), False, 'from django.forms import ValidationError, EmailField\n'), ((1317, 1352), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': 'username'}), '(username=username)\n', (1333, 1352), False, 'from django.contrib.auth.models import User\n'), ((1599, 1733), 'django.forms.ValidationError', 'ValidationError', (["self.error_messages['invalid_login']"], {'code': '"""invalid_login"""', 'params': "{'username': self.username_field.verbose_name}"}), "(self.error_messages['invalid_login'], code='invalid_login',\n params={'username': self.username_field.verbose_name})\n", (1614, 1733), False, 'from django.forms import ValidationError, EmailField\n')] |
import re
textinput = widget_inputs["text1"]
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
is_correct = False
result = re.match(".*window.*", textinput, flags=re.IGNORECASE)
if result:
is_correct = True
commentizer("You're right, but there's a little more to it than that. Make sure you watch the solution video.")
result = re.match(".*global.*", textinput, flags=re.IGNORECASE)
if result:
is_correct = True
commentizer("Right! It's the global object.")
result = re.match(".*promise.*", textinput, flags=re.IGNORECASE)
if result:
is_correct = False
commentizer("It's not the Promise. Take another look!")
if not is_correct and len(comments) == 0:
commentizer("Not quite. Just log `this` somewhere in the Promise to see what happens.")
grade_result["comment"] = "\n\n".join(comments)
grade_result["correct"] = is_correct | [
"re.match"
]
| [((170, 224), 're.match', 're.match', (['""".*window.*"""', 'textinput'], {'flags': 're.IGNORECASE'}), "('.*window.*', textinput, flags=re.IGNORECASE)\n", (178, 224), False, 'import re\n'), ((384, 438), 're.match', 're.match', (['""".*global.*"""', 'textinput'], {'flags': 're.IGNORECASE'}), "('.*global.*', textinput, flags=re.IGNORECASE)\n", (392, 438), False, 'import re\n'), ((532, 587), 're.match', 're.match', (['""".*promise.*"""', 'textinput'], {'flags': 're.IGNORECASE'}), "('.*promise.*', textinput, flags=re.IGNORECASE)\n", (540, 587), False, 'import re\n')] |
import unittest
from html import escape
from src.markdown_parser import convert_github_markdown_to_asana_xml
class TestConvertGithubMarkdownToAsanaXml(unittest.TestCase):
def test_basic_markdown(self):
md = """~~strike~~ **bold** _italic_ `code` [link](asana.com)"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
'<s>strike</s> <strong>bold</strong> <em>italic</em> <code>code</code> <a href="asana.com">link</a>\n',
)
def test_ul_tag(self):
md = """* bullet one\n* bullet two"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml, """<ul>\n<li>bullet one</li>\n<li>bullet two</li>\n</ul>\n""",
)
def test_ol_tag(self):
md = """1. bullet one\n2. bullet two"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml, """<ol>\n<li>bullet one</li>\n<li>bullet two</li>\n</ol>\n""",
)
def test_paragraph(self):
md = "we don't wrap random text in p tags"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(md + "\n", xml)
def test_block_quote(self):
md = "> block quote"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "<em>> block quote\n</em>")
def test_horizontal_rule(self):
# Asana doesn't support <hr /> tags, so we just ignore them
md = "hello\n\n---\nworld\n"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, md) # unchanged
def test_auto_linking(self):
md = "https://asana.com/ [still works](www.test.com)"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
'<a href="https://asana.com/">https://asana.com/</a> <a href="www.test.com">still works</a>\n',
)
def test_converts_headings_to_bold(self):
md = "## heading"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "\n<b>heading</b>\n")
def test_nested_code_within_block_quote(self):
md = "> abc `123`"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "<em>> abc <code>123</code>\n</em>")
def test_removes_pre_tags_inline(self):
md = """```test```"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "<code>test</code>\n")
def test_removes_pre_tags_block(self):
md = """see:
```
function foo = () => null;
```
"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "see:\n<code>function foo = () => null;\n</code>\n")
def test_escapes_raw_html_mixed_with_markdown(self):
md = """## <img href="link" />still here <h3>header</h3>"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
"\n<b>"
+ escape('<img href="link" />')
+ "still here "
+ escape("<h3>header</h3>")
+ "</b>\n",
)
def test_escapes_raw_html_on_own_lines(self):
md = """## blah blah blah
<img href="link">
still here <h3>header</h3>"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
"\n<b>blah blah blah</b>\n"
+ escape('<img href="link">\n')
+ "still here "
+ escape("<h3>header</h3>"),
)
def test_escapes_raw_html(self):
md = """<img href="link" />still here <h3>header</h3>"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
escape('<img href="link" />') + "still here " + escape("<h3>header</h3>\n"),
)
def test_removes_images(self):
md = """"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, '<a href="https://image.com">image</a>\n')
if __name__ == "__main__":
from unittest import main as run_tests
run_tests()
| [
"unittest.main",
"html.escape",
"src.markdown_parser.convert_github_markdown_to_asana_xml"
]
| [((4094, 4105), 'unittest.main', 'run_tests', ([], {}), '()\n', (4103, 4105), True, 'from unittest import main as run_tests\n'), ((296, 336), 'src.markdown_parser.convert_github_markdown_to_asana_xml', 'convert_github_markdown_to_asana_xml', (['md'], {}), '(md)\n', (332, 336), False, 'from src.markdown_parser import convert_github_markdown_to_asana_xml\n'), ((594, 634), 'src.markdown_parser.convert_github_markdown_to_asana_xml', 'convert_github_markdown_to_asana_xml', (['md'], {}), '(md)\n', (630, 634), False, 'from src.markdown_parser import convert_github_markdown_to_asana_xml\n'), ((841, 881), 'src.markdown_parser.convert_github_markdown_to_asana_xml', 'convert_github_markdown_to_asana_xml', (['md'], {}), '(md)\n', (877, 881), False, 'from src.markdown_parser import convert_github_markdown_to_asana_xml\n'), ((1094, 1134), 'src.markdown_parser.convert_github_markdown_to_asana_xml', 'convert_github_markdown_to_asana_xml', (['md'], {}), '(md)\n', (1130, 1134), False, 'from src.markdown_parser import convert_github_markdown_to_asana_xml\n'), ((1252, 1292), 'src.markdown_parser.convert_github_markdown_to_asana_xml', 'convert_github_markdown_to_asana_xml', (['md'], {}), '(md)\n', (1288, 1292), False, 'from src.markdown_parser import convert_github_markdown_to_asana_xml\n'), ((1510, 1550), 'src.markdown_parser.convert_github_markdown_to_asana_xml', 'convert_github_markdown_to_asana_xml', (['md'], {}), '(md)\n', (1546, 1550), False, 'from src.markdown_parser import convert_github_markdown_to_asana_xml\n'), ((1708, 1748), 'src.markdown_parser.convert_github_markdown_to_asana_xml', 'convert_github_markdown_to_asana_xml', (['md'], {}), '(md)\n', (1744, 1748), False, 'from src.markdown_parser import convert_github_markdown_to_asana_xml\n'), ((1997, 2037), 'src.markdown_parser.convert_github_markdown_to_asana_xml', 'convert_github_markdown_to_asana_xml', (['md'], {}), '(md)\n', (2033, 2037), False, 'from src.markdown_parser import convert_github_markdown_to_asana_xml\n'), ((2183, 2223), 'src.markdown_parser.convert_github_markdown_to_asana_xml', 'convert_github_markdown_to_asana_xml', (['md'], {}), '(md)\n', (2219, 2223), False, 'from src.markdown_parser import convert_github_markdown_to_asana_xml\n'), ((2383, 2423), 'src.markdown_parser.convert_github_markdown_to_asana_xml', 'convert_github_markdown_to_asana_xml', (['md'], {}), '(md)\n', (2419, 2423), False, 'from src.markdown_parser import convert_github_markdown_to_asana_xml\n'), ((2595, 2635), 'src.markdown_parser.convert_github_markdown_to_asana_xml', 'convert_github_markdown_to_asana_xml', (['md'], {}), '(md)\n', (2631, 2635), False, 'from src.markdown_parser import convert_github_markdown_to_asana_xml\n'), ((2862, 2902), 'src.markdown_parser.convert_github_markdown_to_asana_xml', 'convert_github_markdown_to_asana_xml', (['md'], {}), '(md)\n', (2898, 2902), False, 'from src.markdown_parser import convert_github_markdown_to_asana_xml\n'), ((3259, 3299), 'src.markdown_parser.convert_github_markdown_to_asana_xml', 'convert_github_markdown_to_asana_xml', (['md'], {}), '(md)\n', (3295, 3299), False, 'from src.markdown_parser import convert_github_markdown_to_asana_xml\n'), ((3623, 3663), 'src.markdown_parser.convert_github_markdown_to_asana_xml', 'convert_github_markdown_to_asana_xml', (['md'], {}), '(md)\n', (3659, 3663), False, 'from src.markdown_parser import convert_github_markdown_to_asana_xml\n'), ((3903, 3943), 'src.markdown_parser.convert_github_markdown_to_asana_xml', 'convert_github_markdown_to_asana_xml', (['md'], {}), '(md)\n', (3939, 3943), False, 'from src.markdown_parser import convert_github_markdown_to_asana_xml\n'), ((3469, 3494), 'html.escape', 'escape', (['"""<h3>header</h3>"""'], {}), "('<h3>header</h3>')\n", (3475, 3494), False, 'from html import escape\n'), ((3767, 3794), 'html.escape', 'escape', (['"""<h3>header</h3>\n"""'], {}), "('<h3>header</h3>\\n')\n", (3773, 3794), False, 'from html import escape\n'), ((3052, 3077), 'html.escape', 'escape', (['"""<h3>header</h3>"""'], {}), "('<h3>header</h3>')\n", (3058, 3077), False, 'from html import escape\n'), ((3719, 3748), 'html.escape', 'escape', (['"""<img href="link" />"""'], {}), '(\'<img href="link" />\')\n', (3725, 3748), False, 'from html import escape\n'), ((3397, 3426), 'html.escape', 'escape', (['"""<img href="link">\n"""'], {}), '(\'<img href="link">\\n\')\n', (3403, 3426), False, 'from html import escape\n'), ((2980, 3009), 'html.escape', 'escape', (['"""<img href="link" />"""'], {}), '(\'<img href="link" />\')\n', (2986, 3009), False, 'from html import escape\n')] |
#!/usr/bin/python
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ftd_file_upload
short_description: Uploads files to Cisco FTD devices over HTTP(S)
description:
- Uploads files to Cisco FTD devices including disk files, backups, and upgrades.
version_added: "2.7"
author: "Cisco Systems, Inc."
options:
operation:
description:
- The name of the operation to execute.
- Only operations that upload file can be used in this module.
required: true
type: str
file_to_upload:
description:
- Absolute path to the file that should be uploaded.
required: true
type: path
version_added: "2.8"
register_as:
description:
- Specifies Ansible fact name that is used to register received response from the FTD device.
type: str
"""
EXAMPLES = """
- name: Upload disk file
ftd_file_upload:
operation: 'postuploaddiskfile'
file_to_upload: /tmp/test1.txt
"""
RETURN = """
msg:
description: The error message describing why the module failed.
returned: error
type: string
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.ftd.common import construct_ansible_facts, FtdServerError, HTTPMethod
from ansible.module_utils.network.ftd.fdm_swagger_client import OperationField
def is_upload_operation(op_spec):
return op_spec[OperationField.METHOD] == HTTPMethod.POST or 'UploadStatus' in op_spec[OperationField.MODEL_NAME]
def main():
fields = dict(
operation=dict(type='str', required=True),
file_to_upload=dict(type='path', required=True),
register_as=dict(type='str'),
)
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
params = module.params
connection = Connection(module._socket_path)
op_spec = connection.get_operation_spec(params['operation'])
if op_spec is None:
module.fail_json(msg='Operation with specified name is not found: %s' % params['operation'])
if not is_upload_operation(op_spec):
module.fail_json(
msg='Invalid upload operation: %s. The operation must make POST request and return UploadStatus model.' %
params['operation'])
try:
if module.check_mode:
module.exit_json()
resp = connection.upload_file(params['file_to_upload'], op_spec[OperationField.URL])
module.exit_json(changed=True, response=resp, ansible_facts=construct_ansible_facts(resp, module.params))
except FtdServerError as e:
module.fail_json(msg='Upload request for %s operation failed. Status code: %s. '
'Server response: %s' % (params['operation'], e.code, e.response))
if __name__ == '__main__':
main()
| [
"ansible.module_utils.basic.AnsibleModule",
"ansible.module_utils.network.ftd.common.construct_ansible_facts",
"ansible.module_utils.connection.Connection"
]
| [((2611, 2672), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'fields', 'supports_check_mode': '(True)'}), '(argument_spec=fields, supports_check_mode=True)\n', (2624, 2672), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((2744, 2775), 'ansible.module_utils.connection.Connection', 'Connection', (['module._socket_path'], {}), '(module._socket_path)\n', (2754, 2775), False, 'from ansible.module_utils.connection import Connection\n'), ((3421, 3465), 'ansible.module_utils.network.ftd.common.construct_ansible_facts', 'construct_ansible_facts', (['resp', 'module.params'], {}), '(resp, module.params)\n', (3444, 3465), False, 'from ansible.module_utils.network.ftd.common import construct_ansible_facts, FtdServerError, HTTPMethod\n')] |
"""Trust ping admin routes."""
from aiohttp import web
from aiohttp_apispec import docs
from ..connections.models.connection_record import ConnectionRecord
from .messages.ping import Ping
from ...storage.error import StorageNotFoundError
@docs(tags=["trustping"], summary="Send a trust ping to a connection")
async def connections_send_ping(request: web.BaseRequest):
"""
Request handler for sending a trust ping to a connection.
Args:
request: aiohttp request object
"""
context = request.app["request_context"]
connection_id = request.match_info["id"]
outbound_handler = request.app["outbound_message_router"]
try:
connection = await ConnectionRecord.retrieve_by_id(context, connection_id)
except StorageNotFoundError:
return web.HTTPNotFound()
if connection.is_active or connection.state == connection.STATE_RESPONSE:
msg = Ping()
await outbound_handler(msg, connection_id=connection_id)
await connection.log_activity(context, "ping", connection.DIRECTION_SENT)
return web.HTTPOk()
async def register(app: web.Application):
"""Register routes."""
app.add_routes([web.post("/connections/{id}/send-ping", connections_send_ping)])
| [
"aiohttp.web.HTTPOk",
"aiohttp_apispec.docs",
"aiohttp.web.post",
"aiohttp.web.HTTPNotFound"
]
| [((243, 312), 'aiohttp_apispec.docs', 'docs', ([], {'tags': "['trustping']", 'summary': '"""Send a trust ping to a connection"""'}), "(tags=['trustping'], summary='Send a trust ping to a connection')\n", (247, 312), False, 'from aiohttp_apispec import docs\n'), ((1074, 1086), 'aiohttp.web.HTTPOk', 'web.HTTPOk', ([], {}), '()\n', (1084, 1086), False, 'from aiohttp import web\n'), ((795, 813), 'aiohttp.web.HTTPNotFound', 'web.HTTPNotFound', ([], {}), '()\n', (811, 813), False, 'from aiohttp import web\n'), ((1179, 1241), 'aiohttp.web.post', 'web.post', (['"""/connections/{id}/send-ping"""', 'connections_send_ping'], {}), "('/connections/{id}/send-ping', connections_send_ping)\n", (1187, 1241), False, 'from aiohttp import web\n')] |
"""Exception which are not actually thrown, only their docstrings are used."""
import colorama
import sys
__all__ = [
"Error",
"ParentIsNotAFolderError",
"InvalidURLError",
"ImageFormatNotSupportedError",
"ImageNotSpecifiedError",
"FolderIconAlreadyExistsError",
"DesktopIniError",
"exception_exit",
]
class Error(Exception):
"""Base class for all FolderIkon errors."""
color = False
def __repr__(self):
return self.red(self.__doc__)
@staticmethod
def red(string):
if Error.color:
return colorama.Fore.RED + string
return string
class ParentIsNotAFolderError(Error):
"""Argument passed to --parent is not a folder."""
class InvalidURLError(Error):
"""Invalid image URL"""
def __init__(self, url):
self.__url = url
super().__init__()
def __repr__(self):
return super().__repr__() + " " + self.__url
class ImageFormatNotSupportedError(Error):
def __init__(self, fmt):
self.__fmt = fmt
super().__init__()
def __repr__(self):
return f"Image format {self.red(self.__fmt)} is not supported. Only ICO, JPG and PNG are supported."
class ImageNotSpecifiedError(Error):
"""An image with a supported format could not be found in this directory."""
class FolderIconAlreadyExistsError(Error):
"""Folder icon already exists."""
class DesktopIniError(Error):
"""The 'desktop.ini' file could not be parsed. Delete it and try again."""
def __init__(self, exc):
self.__exc = exc
super().__init__()
def __repr__(self):
exc_name = self.__exc.__class__.__name__
exc_info = f"An exception of {exc_name} occured when parsing it."
return super().__repr__() + " " + exc_info
def exception_exit(exc):
print(repr(exc()))
sys.exit(-1)
| [
"sys.exit"
]
| [((1848, 1860), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1856, 1860), False, 'import sys\n')] |
# Generated by Django 3.2 on 2021-04-10 12:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("books", "0003_auto_20210410_1231")]
operations = [
migrations.AlterField(
model_name="book",
name="category",
field=models.CharField(
choices=[
("fiction", "Fiction"),
("regular", "Regular"),
("novel", "Novel"),
],
default="regular",
max_length=7,
),
)
]
| [
"django.db.models.CharField"
]
| [((317, 449), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('fiction', 'Fiction'), ('regular', 'Regular'), ('novel', 'Novel')]", 'default': '"""regular"""', 'max_length': '(7)'}), "(choices=[('fiction', 'Fiction'), ('regular', 'Regular'), (\n 'novel', 'Novel')], default='regular', max_length=7)\n", (333, 449), False, 'from django.db import migrations, models\n')] |
"""
MAIN STYLING AND RENDERING FILE
Requirements:
------------------------------------------------------------------------------
IMPORTANT! This has only been tested with Blender 2.79 API. We have run this
on Linux and MacOS.
Execution:
------------------------------------------------------------------------------
This script is intended to run inside blender launched in background mode.
Sample invocation is:
blender --background --factory-startup --python-exit-code 1 PATH_TO_MY_BLEND.blend \
--python blender/render_main.py -- \
--width=500 <ANY OTHER PYTHON FLAGS FROM render_main.py>
'--factory-startup' is used to prevent custom settings from interfering.
'--python-exit-code 1' makes blender exit with code 1 if this script throws an error
'--' causes blender to ignore all following arguments so python can use them.
See blender --help for details. See pipeline.sh for sample usage.
Capabilities:
------------------------------------------------------------------------------
It is assumed that blender is invoked with a single blend. This script is a
jack-of-all-trades for setting up camera, lighting, styling, and rendering for
a custom stylized animation benchmark. We found it easier to run the script
separately for each phase of data processing (see pipeline.sh),
as this way the output can be easily examined for problems after every stage.
However, one-shot execution should also be possible.
See flags below for full capabilities. The trickiest bit is: different metadata
only works with particular render engine option. The script will raise errors
if incorrect engine is specified:
- Vertex paint for correspondences - blender render (no gamma correction!)
- Normals in camera space - blender render (no gamma correction!)
- Flow vector pass - cycles (blender render is buggy)
- Red stylit reference material - cycles
- Env lighting for mixamo models - blender render only
"""
import bpy
import argparse
import logging
import os
import random
import sys
import time
import traceback
# Add to path to make sure we can import modules while running inside Blender.
__sdir = os.path.dirname(os.path.realpath(__file__))
if __sdir not in sys.path:
sys.path.append(__sdir)
import color_util
import geo_util
import io_util
import render_util
import stylit_util
LOG = logging.getLogger(__name__)
if __name__ == "__main__":
try:
# FLAGS
# --------------------------------------------------------------------------
parser = argparse.ArgumentParser(
description='Configurable utility to modify blend and/or render images/flow/metadata.')
parser.add_argument(
'--random_seed', action='store', type=int, default=-1,
help='Integer seed for random number generator; used if > 0.')
# Rendering ----------------------------------------------------------------
parser.add_argument(
'--width', action='store', type=int, default=1500,
help='Width to render at.')
parser.add_argument(
'--height', action='store', type=int, default=1500,
help='Height to render at.')
parser.add_argument(
'--quality_samples', action='store', type=int, default=-1,
help='If positive and using cycles, will use this many samples per pixel; ' +
'e.g. 128 is slow, 10 is comparatively fast.')
parser.add_argument(
'--start_frame', action='store', type=int, default=0,
help='Frame to start rendering at (relative to first frame).')
parser.add_argument(
'--rendered_frames', action='store', type=int, default=0,
help='Maximum frames to render; 0 for none; -1 for all.')
parser.add_argument(
'--skip_existing_frames', action='store_true', default=False,
help='If true, skips existing frames matching --frame_output_prefix.')
parser.add_argument(
'--use_cycles', action='store_true', default=False,
help='If true, sets Cycles as the rendering engine, else leaves unchanged.')
parser.add_argument(
'--use_blender_render', action='store_true', default=False,
help='If true, sets Blender Render as the rendering engine, else leaves unchanged.')
# Outputs ------------------------------------------------------------------
parser.add_argument(
'--frame_output_prefix', action='store', type=str, default='',
help='If set, will set image output to <frame_output_prefix><frame#>.PNG; ' +
'should include full path.')
parser.add_argument(
'--render_metadata_exr', action='store_true', default=False,
help='If true, renders all metadata passes as a multilayer EXR file.')
parser.add_argument(
'--objectids_key_file', action='store', type=str, default='',
help='Directory to write objectids to, as images.')
parser.add_argument(
'--world_normals_output_dir', action='store', type=str, default='',
help='Directory to write world space normals to, as images ' +
'(only compatible with --use_cycles.')
parser.add_argument(
'--camera_normals_output_dir', action='store', type=str, default='',
help='Directory to write camera space normals to, as images ' +
'(only compatible with --use_blender_render.')
parser.add_argument(
'--enable_gamma_correction', action='store_true', default=False,
help='We disable gamma correction by default, as it corrupts the ' +
'metadata rendering; set this on to enable.')
parser.add_argument(
'--bg_name', action='store', type=str, default="STYMO_BG",
help='If any object name matches this substring, it will be treated as ' +
'background for the purpose of id labeling and stylit rendering.')
parser.add_argument(
'--output_blend', action='store', type=str, default='',
help='If set, will output modified blend here (must be absolute path); ' +
'if setting linestyle and/or material, will replace special substrings ' +
'<M> and <L> with material and linestyle.')
parser.add_argument(
'--info_file', action='store', type=str, default='',
help='If set, may output auxiliary information into this file.')
# Camera -------------------------------------------------------------------
parser.add_argument(
'--set_camera', action='store', type=int, default=0,
help='If >= 0, selects ith camera and deletes all other cameras; ' +
'if i > num cameras, generates a random one instead.')
parser.add_argument(
'--keep_extra_cameras', action='store_true',
help='If --set_camera, will not delete extra cameras.')
parser.add_argument(
'--add_random_camera_motion', action='store_true',
help='If generating a random camera and this is true, creates zoom/flyaround/pan; '
'WARNING: parameters are tuned for mixamo character blends.')
# Animation range ----------------------------------------------------------
parser.add_argument(
'--offset_scene_start_frame_by', action='store', type=int, default=0,
help='Unlike --start_frame, which just controls the rendering range, this ' +
'flag offsets the current scene start frame in the timeline by the ' +
'specified amount. Relevant to blends that do not begin at frame 0.')
parser.add_argument(
'--offset_scene_end_frame_by', action='store', type=int, default=0,
help='Unlike --rendered_frames, which just controls the rendering range, this ' +
'flag offsets the current scene end frame in the timeline by the ' +
'specified amount. Relevant to blends that do not begin at frame 0.')
# Lighting -----------------------------------------------------------------
parser.add_argument(
'--set_env_lighting_image', action='store', type=str, default='',
help='Set to image path or directory of environment map images to set ' +
'environment lighting; only works with --use_blender_render.')
parser.add_argument(
'--set_stylit_lighting', action='store_true',
help='If true, sets consistent lighting to render input for stylit.')
# Styles -------------------------------------------------------------------
parser.add_argument(
'--set_stylit_style', action='store_true',
help='If true, sets red material style used for stylit style transfer.')
parser.add_argument(
'--set_corresp_style', action='store_true',
help='If true, will set per-vertex materials to render correspondences.')
parser.add_argument(
'--set_objectids_style', action='store_true',
help='If true, will set objectids to render using flat materials.')
parser.add_argument(
'--deterministic_objectid_colors', action='store_true',
help='If true, objectid colors will not be shuffled; use for testing.')
parser.add_argument(
'--linestyles_blend', action='store', type=str, default='',
help='Path to blend containing all the line styles.')
parser.add_argument(
'--set_linestyle_matching', action='store', type=str, default='',
help='Regex matching linestyle(s) in --line_styles_blend; '
'if more than one match, picks random one; '
'"" for none; ".*" for all; "hi|bye" to match either.')
parser.add_argument(
'--randomize_line_color', action='store_true',
help='If true, randomizes line color if line is set.')
parser.add_argument(
'--materials_blend', action='store', type=str, default='',
help='Path to blend containing all the material styles (e.g. textured blender styles).')
parser.add_argument(
'--set_materials_matching', action='store', type=str, default='',
help='Regex matching materials(s) in --materials_blend; '
'if more than one match, picks random one; '
'"" for none; ".*" for all; "hi|bye" to match either.')
parser.add_argument(
'--randomize_material_color', action='store_true',
help='If true, randomizes material color if material is set.')
# Custom color control
parser.add_argument(
'--material_color_choices', action='store', type=str, default='',
help='String of format R,G,B R2,G2,B2 ... of colors to choose from if ' +
'randomizing material colors.')
parser.add_argument(
'--line_hue_range', action='store', type=str, default='0,1.0',
help='If --randomize_line_color, will keep HSV Hue in this range (two numbers,csv).')
parser.add_argument(
'--line_sat_range', action='store', type=str, default='0,1.0',
help='If --randomize_line_color, will keep HSV Saturation in this range (two numbers,csv).')
parser.add_argument(
'--line_value_range', action='store', type=str, default='0,1.0',
help='If --randomize_line_color, will keep HSV Value in this range (two numbers,csv).')
# Parse only arguments after --
# --------------------------------------------------------------------------
argv = sys.argv
if "--" not in argv:
argv = [] # as if no args are passed
else:
argv = argv[argv.index("--") + 1:]
args = parser.parse_args(argv)
if args.random_seed > 0:
print('Using --random_seed=%d as random seed.' % args.random_seed)
random.seed(args.random_seed)
else:
print('Using time as random seed.')
random.seed(time.time())
render_util.print_blend_diagnostics()
# Handle camera ------------------------------------------------------------
if args.set_camera >= 0:
cam = None
if args.keep_extra_cameras:
cam = geo_util.get_camera_by_number(args.set_camera)
else:
cam = geo_util.delete_all_but_one_camera(args.set_camera)
if cam is None:
print('Generating a random camera.')
bbox = geo_util.get_scene_bbox()
cam = geo_util.create_random_camera(bbox, 2.5, 2.5, 2.5)
if args.add_random_camera_motion:
print('Adding motion to camera.')
geo_util.mixamo_add_random_camera_motion(cam)
geo_util.disable_camera_depth_of_field(cam)
else:
cam = geo_util.get_single_camera_or_die()
# Set active camera
bpy.context.scene.camera = cam
# Handle frame bounds ------------------------------------------------------
orig_start = bpy.context.scene.frame_start
bpy.context.scene.frame_start = orig_start + args.offset_scene_start_frame_by
if args.offset_scene_end_frame_by > 0:
bpy.context.scene.frame_end = orig_start + args.offset_scene_end_frame_by
# Handle lighting ----------------------------------------------------------
info_file = None
if args.info_file:
info_file = open(args.info_file, 'w')
if len(args.set_env_lighting_image) > 0:
if not args.use_blender_render:
raise RuntimeError(
'Error: --set_env_lighting_image="img" only works with --use_blender_render')
render_util.setup_realistic_lighting(args.set_env_lighting_image, 10.0, False)
if args.set_stylit_lighting:
if not args.use_cycles:
raise RuntimeError(
'Error: --set_stylit_lighting only works with --use_cycles')
stylit_util.setup_stylit_lighting()
# Handle styles ------------------------------------------------------------
nstyles = len([x for x in [args.set_stylit_lighting,
args.set_corresp_style, args.set_objectids_style,
(args.set_linestyle_matching or args.set_materials_matching)]
if x])
if nstyles > 1:
raise RuntimeError(
'Error: incompatible rendering styles specified; only one of these can be true: ' +
'--set_stylit_lighting OR ' +
'--set_corresp_style OR --set_objectids_style OR ' +
'(--set_linestyle_matching and/or --set_materials_matching)')
linestyle_name = 'default'
material_name = 'default'
if args.set_stylit_style: # Red material used for stylit rendering
if not args.use_cycles:
raise RuntimeError(
'Error: --set_stylit_style only works with --use_cycles')
render_util.clear_unnecessary_settings()
stylit_util.setup_stylit_materials(bg_name=args.bg_name)
elif args.set_corresp_style: # Per-vertex correspondence rendering
if not args.use_blender_render:
raise RuntimeError(
'Correspondence rendering (--set_corresp_style) only implemented for ' +
'--use_blender_render')
render_util.clear_unnecessary_settings()
render_util.set_correspondence_style()
elif args.set_objectids_style: # Object Ids rendered in flat color
if not args.use_blender_render:
raise RuntimeError(
'Correspondence rendering (--set_objectids_style) only implemented for ' +
'--use_blender_render')
render_util.clear_unnecessary_settings()
idsinfo = render_util.set_objectids_style(
bg_name=args.bg_name, deterministic=args.deterministic_objectid_colors)
if idsinfo and args.objectids_key_file:
with open(os.path.join(args.objectids_key_file), 'w') as f:
for i in range(len(idsinfo)):
f.write('%s %d %d %d\n' %
(idsinfo[i][0], idsinfo[i][1][0],
idsinfo[i][1][1], idsinfo[i][1][2]))
elif args.set_linestyle_matching or args.set_materials_matching: # Freestyle / toon shading
if not args.use_blender_render:
raise RuntimeError(
'Linestyles and materials only implemented for --use_blender_render')
render_util.clear_unnecessary_settings()
if len(args.set_linestyle_matching) > 0:
if len(args.linestyles_blend) == 0:
raise RuntimeError(
'Error: Must set --linestyles_blend with line exemplars ' +
'if requesting --set_linestyle_matching.')
line_color = None
if args.randomize_line_color:
line_color = color_util.get_random_color(
prob_dark=0.8,
bounds=color_util.parse_hsv_bounds(args.line_hue_range,
args.line_sat_range,
args.line_value_range))
linestyle_name = render_util.set_linestyle(
args.linestyles_blend, args.set_linestyle_matching,
color=line_color)
if info_file:
info_file.write('LINESTYLE %s\n' % io_util.strip_blender_name(linestyle_name))
if len(args.set_materials_matching) > 0:
if len(args.materials_blend) == 0:
raise RuntimeError(
'Error: Must set --materials_blend with material ' +
'exemplars if requesting --set_materials_matching.')
mat_color_randomizer = None
if args.randomize_material_color:
if args.material_color_choices:
mat_color_randomizer = color_util.make_color_getter(
args.material_color_choices)
else:
mat_color_randomizer = color_util.make_random_color_getter()
material_name = render_util.set_materials(
args.materials_blend, args.set_materials_matching,
color_randomizer=mat_color_randomizer)
if info_file:
info_file.write('MATSTYLE %s\n' % io_util.strip_blender_name(material_name))
# Handle rendering settings ------------------------------------------------
if args.use_cycles and args.use_blender_render:
raise RuntimeError('Can specify only one of --use_cycles and --use_blender_render')
if args.use_cycles or args.use_blender_render:
nsamples = (args.quality_samples if args.quality_samples > 0 else None)
render_util.set_render_settings(args.use_cycles, nsamples=nsamples,
enable_gamma=args.enable_gamma_correction)
if args.width > 0 and args.height > 0:
render_util.set_width_height(args.width, args.height)
if args.world_normals_output_dir or args.camera_normals_output_dir:
if args.world_normals_output_dir and args.camera_normals_output_dir:
raise RuntimeError('Only one type of normals can be output at once.')
if args.world_normals_output_dir and not args.use_cycles:
raise RuntimeError('World normals can only be output with --use_cycles.')
elif args.camera_normals_output_dir and not args.use_blender_render:
raise RuntimeError('Camera space normals can only be output with --use_blender_render.')
render_util.init_normals_render_nodes(
(args.world_normals_output_dir or args.camera_normals_output_dir),
use_cycles=args.use_cycles)
# Handle saving -------------------------------------------------------
if len(args.output_blend) > 0:
bpy.ops.file.pack_all()
args.output_blend = args.output_blend.replace('<M>', io_util.strip_blender_name(material_name))
args.output_blend = args.output_blend.replace('<L>', io_util.strip_blender_name(linestyle_name))
print('Saving blend to %s' % args.output_blend)
geo_util.save_blend(args.output_blend)
if args.rendered_frames != 0:
if args.render_metadata_exr and not args.use_cycles:
raise RuntimeError('Must set --use_cycles=True to render out flow with ' +
'--render_metadata_exr')
print('Rendering frames')
render_util.render_animation(
args.frame_output_prefix, args.rendered_frames,
start_frame_offset=args.start_frame,
render_exr=args.render_metadata_exr,
skip_existing=args.skip_existing_frames)
except Exception as e:
tb = traceback.format_exc()
LOG.critical(tb)
LOG.critical('Script failed')
raise e
LOG.critical('Script completed')
| [
"logging.getLogger",
"render_util.set_width_height",
"render_util.init_normals_render_nodes",
"render_util.set_materials",
"geo_util.delete_all_but_one_camera",
"geo_util.get_camera_by_number",
"sys.path.append",
"render_util.set_objectids_style",
"stylit_util.setup_stylit_lighting",
"argparse.ArgumentParser",
"geo_util.save_blend",
"color_util.make_color_getter",
"render_util.render_animation",
"render_util.set_render_settings",
"render_util.print_blend_diagnostics",
"geo_util.get_scene_bbox",
"geo_util.disable_camera_depth_of_field",
"color_util.parse_hsv_bounds",
"color_util.make_random_color_getter",
"render_util.setup_realistic_lighting",
"geo_util.mixamo_add_random_camera_motion",
"geo_util.get_single_camera_or_die",
"io_util.strip_blender_name",
"time.time",
"render_util.clear_unnecessary_settings",
"traceback.format_exc",
"os.path.join",
"random.seed",
"os.path.realpath",
"geo_util.create_random_camera",
"stylit_util.setup_stylit_materials",
"bpy.ops.file.pack_all",
"render_util.set_correspondence_style",
"render_util.set_linestyle"
]
| [((2320, 2347), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2337, 2347), False, 'import logging\n'), ((2142, 2168), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2158, 2168), False, 'import os\n'), ((2201, 2224), 'sys.path.append', 'sys.path.append', (['__sdir'], {}), '(__sdir)\n', (2216, 2224), False, 'import sys\n'), ((2504, 2620), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Configurable utility to modify blend and/or render images/flow/metadata."""'}), "(description=\n 'Configurable utility to modify blend and/or render images/flow/metadata.')\n", (2527, 2620), False, 'import argparse\n'), ((12116, 12153), 'render_util.print_blend_diagnostics', 'render_util.print_blend_diagnostics', ([], {}), '()\n', (12151, 12153), False, 'import render_util\n'), ((11978, 12007), 'random.seed', 'random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (11989, 12007), False, 'import random\n'), ((12873, 12916), 'geo_util.disable_camera_depth_of_field', 'geo_util.disable_camera_depth_of_field', (['cam'], {}), '(cam)\n', (12911, 12916), False, 'import geo_util\n'), ((12949, 12984), 'geo_util.get_single_camera_or_die', 'geo_util.get_single_camera_or_die', ([], {}), '()\n', (12982, 12984), False, 'import geo_util\n'), ((13838, 13916), 'render_util.setup_realistic_lighting', 'render_util.setup_realistic_lighting', (['args.set_env_lighting_image', '(10.0)', '(False)'], {}), '(args.set_env_lighting_image, 10.0, False)\n', (13874, 13916), False, 'import render_util\n'), ((14120, 14155), 'stylit_util.setup_stylit_lighting', 'stylit_util.setup_stylit_lighting', ([], {}), '()\n', (14153, 14155), False, 'import stylit_util\n'), ((15172, 15212), 'render_util.clear_unnecessary_settings', 'render_util.clear_unnecessary_settings', ([], {}), '()\n', (15210, 15212), False, 'import render_util\n'), ((15225, 15281), 'stylit_util.setup_stylit_materials', 'stylit_util.setup_stylit_materials', ([], {'bg_name': 'args.bg_name'}), '(bg_name=args.bg_name)\n', (15259, 15281), False, 'import stylit_util\n'), ((19281, 19395), 'render_util.set_render_settings', 'render_util.set_render_settings', (['args.use_cycles'], {'nsamples': 'nsamples', 'enable_gamma': 'args.enable_gamma_correction'}), '(args.use_cycles, nsamples=nsamples,\n enable_gamma=args.enable_gamma_correction)\n', (19312, 19395), False, 'import render_util\n'), ((19496, 19549), 'render_util.set_width_height', 'render_util.set_width_height', (['args.width', 'args.height'], {}), '(args.width, args.height)\n', (19524, 19549), False, 'import render_util\n'), ((20153, 20288), 'render_util.init_normals_render_nodes', 'render_util.init_normals_render_nodes', (['(args.world_normals_output_dir or args.camera_normals_output_dir)'], {'use_cycles': 'args.use_cycles'}), '(args.world_normals_output_dir or args\n .camera_normals_output_dir, use_cycles=args.use_cycles)\n', (20190, 20288), False, 'import render_util\n'), ((20451, 20474), 'bpy.ops.file.pack_all', 'bpy.ops.file.pack_all', ([], {}), '()\n', (20472, 20474), False, 'import bpy\n'), ((20764, 20802), 'geo_util.save_blend', 'geo_util.save_blend', (['args.output_blend'], {}), '(args.output_blend)\n', (20783, 20802), False, 'import geo_util\n'), ((21109, 21309), 'render_util.render_animation', 'render_util.render_animation', (['args.frame_output_prefix', 'args.rendered_frames'], {'start_frame_offset': 'args.start_frame', 'render_exr': 'args.render_metadata_exr', 'skip_existing': 'args.skip_existing_frames'}), '(args.frame_output_prefix, args.rendered_frames,\n start_frame_offset=args.start_frame, render_exr=args.\n render_metadata_exr, skip_existing=args.skip_existing_frames)\n', (21137, 21309), False, 'import render_util\n'), ((21407, 21429), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (21427, 21429), False, 'import traceback\n'), ((12094, 12105), 'time.time', 'time.time', ([], {}), '()\n', (12103, 12105), False, 'import time\n'), ((12358, 12404), 'geo_util.get_camera_by_number', 'geo_util.get_camera_by_number', (['args.set_camera'], {}), '(args.set_camera)\n', (12387, 12404), False, 'import geo_util\n'), ((12445, 12496), 'geo_util.delete_all_but_one_camera', 'geo_util.delete_all_but_one_camera', (['args.set_camera'], {}), '(args.set_camera)\n', (12479, 12496), False, 'import geo_util\n'), ((12602, 12627), 'geo_util.get_scene_bbox', 'geo_util.get_scene_bbox', ([], {}), '()\n', (12625, 12627), False, 'import geo_util\n'), ((12650, 12700), 'geo_util.create_random_camera', 'geo_util.create_random_camera', (['bbox', '(2.5)', '(2.5)', '(2.5)'], {}), '(bbox, 2.5, 2.5, 2.5)\n', (12679, 12700), False, 'import geo_util\n'), ((12814, 12859), 'geo_util.mixamo_add_random_camera_motion', 'geo_util.mixamo_add_random_camera_motion', (['cam'], {}), '(cam)\n', (12854, 12859), False, 'import geo_util\n'), ((15587, 15627), 'render_util.clear_unnecessary_settings', 'render_util.clear_unnecessary_settings', ([], {}), '()\n', (15625, 15627), False, 'import render_util\n'), ((15640, 15678), 'render_util.set_correspondence_style', 'render_util.set_correspondence_style', ([], {}), '()\n', (15676, 15678), False, 'import render_util\n'), ((20540, 20581), 'io_util.strip_blender_name', 'io_util.strip_blender_name', (['material_name'], {}), '(material_name)\n', (20566, 20581), False, 'import io_util\n'), ((20648, 20690), 'io_util.strip_blender_name', 'io_util.strip_blender_name', (['linestyle_name'], {}), '(linestyle_name)\n', (20674, 20690), False, 'import io_util\n'), ((15986, 16026), 'render_util.clear_unnecessary_settings', 'render_util.clear_unnecessary_settings', ([], {}), '()\n', (16024, 16026), False, 'import render_util\n'), ((16049, 16157), 'render_util.set_objectids_style', 'render_util.set_objectids_style', ([], {'bg_name': 'args.bg_name', 'deterministic': 'args.deterministic_objectid_colors'}), '(bg_name=args.bg_name, deterministic=args.\n deterministic_objectid_colors)\n', (16080, 16157), False, 'import render_util\n'), ((16818, 16858), 'render_util.clear_unnecessary_settings', 'render_util.clear_unnecessary_settings', ([], {}), '()\n', (16856, 16858), False, 'import render_util\n'), ((17615, 17715), 'render_util.set_linestyle', 'render_util.set_linestyle', (['args.linestyles_blend', 'args.set_linestyle_matching'], {'color': 'line_color'}), '(args.linestyles_blend, args.\n set_linestyle_matching, color=line_color)\n', (17640, 17715), False, 'import render_util\n'), ((18606, 18725), 'render_util.set_materials', 'render_util.set_materials', (['args.materials_blend', 'args.set_materials_matching'], {'color_randomizer': 'mat_color_randomizer'}), '(args.materials_blend, args.set_materials_matching,\n color_randomizer=mat_color_randomizer)\n', (18631, 18725), False, 'import render_util\n'), ((16249, 16286), 'os.path.join', 'os.path.join', (['args.objectids_key_file'], {}), '(args.objectids_key_file)\n', (16261, 16286), False, 'import os\n'), ((18375, 18432), 'color_util.make_color_getter', 'color_util.make_color_getter', (['args.material_color_choices'], {}), '(args.material_color_choices)\n', (18403, 18432), False, 'import color_util\n'), ((18535, 18572), 'color_util.make_random_color_getter', 'color_util.make_random_color_getter', ([], {}), '()\n', (18570, 18572), False, 'import color_util\n'), ((17369, 17466), 'color_util.parse_hsv_bounds', 'color_util.parse_hsv_bounds', (['args.line_hue_range', 'args.line_sat_range', 'args.line_value_range'], {}), '(args.line_hue_range, args.line_sat_range, args.\n line_value_range)\n', (17396, 17466), False, 'import color_util\n'), ((17838, 17880), 'io_util.strip_blender_name', 'io_util.strip_blender_name', (['linestyle_name'], {}), '(linestyle_name)\n', (17864, 17880), False, 'import io_util\n'), ((18848, 18889), 'io_util.strip_blender_name', 'io_util.strip_blender_name', (['material_name'], {}), '(material_name)\n', (18874, 18889), False, 'import io_util\n')] |
from foundations_spec import *
from unittest.mock import call
class TestArtifactDownloader(Spec):
mock_archiver = let_mock()
make_directory_mock = let_patch_mock('os.makedirs')
@let
def source_directory(self):
return self.faker.uri_path()
@let
def download_directory(self):
return self.faker.uri_path()
@let
def artifact_downloader(self):
from foundations_contrib.archiving.artifact_downloader import ArtifactDownloader
return ArtifactDownloader(self.mock_archiver)
@let
def mock_foundations_files(self):
return [
'foundations/a',
'foundations/b',
'foundations_contrib/c',
'foundations_contrib/d',
'foundations_events/e',
'foundations_events/f',
'foundations_internal/g',
'foundations_internal/h',
'jobs/i',
'jobs/j',
'model_serving/k',
'model_serving/l',
'venv/m',
'venv/n',
'docker_image_version.sh',
'download_gui_images.sh',
'foundations_gui.sh',
'foundations_package_manifest.yaml',
'foundations_requirements.txt',
'job.tgz',
'run.env',
'run.sh',
'p.bin',
'q.bin',
'template/t',
'template/u',
]
def test_downloads_single_file_to_specified_directory(self):
self._mock_file_list(['path/to/my/file'])
self.artifact_downloader.download_files('', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_with('path/to/my/file', self.download_directory + '/path/to/my/file')
def test_downloads_multiple_files_to_specified_directory(self):
self._mock_file_list(['different/file', 'other/different/file'])
self.artifact_downloader.download_files('', self.download_directory)
first_file_download = call('different/file', self.download_directory + '/different/file')
second_file_download = call('other/different/file', self.download_directory + '/other/different/file')
self.mock_archiver.fetch_persisted_file.assert_has_calls([first_file_download, second_file_download])
def test_ensures_target_directory_exists(self):
self._mock_file_list(['path/to/my/file'])
self.artifact_downloader.download_files('', self.download_directory)
self.make_directory_mock.assert_called_with(self.download_directory + '/path/to/my', exist_ok=True)
def test_downloads_multiple_files_to_specified_directory(self):
self._mock_file_list(['different/file', 'other/different/file'])
self.artifact_downloader.download_files('', self.download_directory)
first_dirctory_creation = call(self.download_directory + '/different', exist_ok=True)
second_dirctory_creation = call(self.download_directory + '/other/different', exist_ok=True)
self.make_directory_mock.assert_has_calls([first_dirctory_creation, second_dirctory_creation])
def test_downloads_only_files_with_specified_source_directory(self):
self._mock_file_list(['different/file', 'other/different/file'])
self.artifact_downloader.download_files('other/', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_once_with('other/different/file', self.download_directory + '/other/different/file')
def test_downloads_only_files_with_specified_source_directory_with_different_source_directory(self):
self._mock_file_list(['different/file', 'other/different/file'])
self.artifact_downloader.download_files('different/', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_once_with('different/file', self.download_directory + '/different/file')
def test_download_does_not_include_foundations_files(self):
for foundations_file in self.mock_foundations_files:
self._mock_file_list(['path/to/some/file', foundations_file])
self.artifact_downloader.download_files('', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_with('path/to/some/file', self.download_directory + '/path/to/some/file')
def test_download_includes_config_yamls(self):
for foundations_file in self.mock_foundations_files:
self._mock_file_list(['a.config.yaml', foundations_file])
self.artifact_downloader.download_files('', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_with('a.config.yaml', self.download_directory + '/a.config.yaml')
def _mock_file_list(self, file_list):
self.mock_archiver.fetch_miscellaneous = ConditionalReturn()
self.mock_archiver.fetch_miscellaneous.return_when(file_list, 'job_artifact_listing.pkl') | [
"unittest.mock.call",
"foundations_contrib.archiving.artifact_downloader.ArtifactDownloader"
]
| [((502, 540), 'foundations_contrib.archiving.artifact_downloader.ArtifactDownloader', 'ArtifactDownloader', (['self.mock_archiver'], {}), '(self.mock_archiver)\n', (520, 540), False, 'from foundations_contrib.archiving.artifact_downloader import ArtifactDownloader\n'), ((2025, 2092), 'unittest.mock.call', 'call', (['"""different/file"""', "(self.download_directory + '/different/file')"], {}), "('different/file', self.download_directory + '/different/file')\n", (2029, 2092), False, 'from unittest.mock import call\n'), ((2124, 2203), 'unittest.mock.call', 'call', (['"""other/different/file"""', "(self.download_directory + '/other/different/file')"], {}), "('other/different/file', self.download_directory + '/other/different/file')\n", (2128, 2203), False, 'from unittest.mock import call\n'), ((2882, 2941), 'unittest.mock.call', 'call', (["(self.download_directory + '/different')"], {'exist_ok': '(True)'}), "(self.download_directory + '/different', exist_ok=True)\n", (2886, 2941), False, 'from unittest.mock import call\n'), ((2977, 3042), 'unittest.mock.call', 'call', (["(self.download_directory + '/other/different')"], {'exist_ok': '(True)'}), "(self.download_directory + '/other/different', exist_ok=True)\n", (2981, 3042), False, 'from unittest.mock import call\n')] |
"""Register controllers.
"""
from safe_control_gym.utils.registration import register
register(id="mpc",
entry_point="safe_control_gym.controllers.mpc.mpc:MPC",
config_entry_point="safe_control_gym.controllers.mpc:mpc.yaml")
register(id="linear_mpc",
entry_point="safe_control_gym.controllers.mpc.linear_mpc:LinearMPC",
config_entry_point="safe_control_gym.controllers.mpc:linear_mpc.yaml")
register(id="gp_mpc",
entry_point="safe_control_gym.controllers.mpc.gp_mpc:GPMPC",
config_entry_point="safe_control_gym.controllers.mpc:gp_mpc.yaml")
register(id="mpsc",
entry_point="safe_control_gym.controllers.mpsc.mpsc:MPSC",
config_entry_point="safe_control_gym.controllers.mpsc:mpsc.yaml")
register(id="ppo",
entry_point="safe_control_gym.controllers.ppo.ppo:PPO",
config_entry_point="safe_control_gym.controllers.ppo:ppo.yaml")
register(id="safe_explorer_ppo",
entry_point="safe_control_gym.controllers.safe_explorer.safe_ppo:SafeExplorerPPO",
config_entry_point="safe_control_gym.controllers.safe_explorer:safe_ppo.yaml")
| [
"safe_control_gym.utils.registration.register"
]
| [((88, 230), 'safe_control_gym.utils.registration.register', 'register', ([], {'id': '"""mpc"""', 'entry_point': '"""safe_control_gym.controllers.mpc.mpc:MPC"""', 'config_entry_point': '"""safe_control_gym.controllers.mpc:mpc.yaml"""'}), "(id='mpc', entry_point='safe_control_gym.controllers.mpc.mpc:MPC',\n config_entry_point='safe_control_gym.controllers.mpc:mpc.yaml')\n", (96, 230), False, 'from safe_control_gym.utils.registration import register\n'), ((246, 420), 'safe_control_gym.utils.registration.register', 'register', ([], {'id': '"""linear_mpc"""', 'entry_point': '"""safe_control_gym.controllers.mpc.linear_mpc:LinearMPC"""', 'config_entry_point': '"""safe_control_gym.controllers.mpc:linear_mpc.yaml"""'}), "(id='linear_mpc', entry_point=\n 'safe_control_gym.controllers.mpc.linear_mpc:LinearMPC',\n config_entry_point='safe_control_gym.controllers.mpc:linear_mpc.yaml')\n", (254, 420), False, 'from safe_control_gym.utils.registration import register\n'), ((431, 590), 'safe_control_gym.utils.registration.register', 'register', ([], {'id': '"""gp_mpc"""', 'entry_point': '"""safe_control_gym.controllers.mpc.gp_mpc:GPMPC"""', 'config_entry_point': '"""safe_control_gym.controllers.mpc:gp_mpc.yaml"""'}), "(id='gp_mpc', entry_point=\n 'safe_control_gym.controllers.mpc.gp_mpc:GPMPC', config_entry_point=\n 'safe_control_gym.controllers.mpc:gp_mpc.yaml')\n", (439, 590), False, 'from safe_control_gym.utils.registration import register\n'), ((600, 754), 'safe_control_gym.utils.registration.register', 'register', ([], {'id': '"""mpsc"""', 'entry_point': '"""safe_control_gym.controllers.mpsc.mpsc:MPSC"""', 'config_entry_point': '"""safe_control_gym.controllers.mpsc:mpsc.yaml"""'}), "(id='mpsc', entry_point=\n 'safe_control_gym.controllers.mpsc.mpsc:MPSC', config_entry_point=\n 'safe_control_gym.controllers.mpsc:mpsc.yaml')\n", (608, 754), False, 'from safe_control_gym.utils.registration import register\n'), ((764, 906), 'safe_control_gym.utils.registration.register', 'register', ([], {'id': '"""ppo"""', 'entry_point': '"""safe_control_gym.controllers.ppo.ppo:PPO"""', 'config_entry_point': '"""safe_control_gym.controllers.ppo:ppo.yaml"""'}), "(id='ppo', entry_point='safe_control_gym.controllers.ppo.ppo:PPO',\n config_entry_point='safe_control_gym.controllers.ppo:ppo.yaml')\n", (772, 906), False, 'from safe_control_gym.utils.registration import register\n'), ((922, 1130), 'safe_control_gym.utils.registration.register', 'register', ([], {'id': '"""safe_explorer_ppo"""', 'entry_point': '"""safe_control_gym.controllers.safe_explorer.safe_ppo:SafeExplorerPPO"""', 'config_entry_point': '"""safe_control_gym.controllers.safe_explorer:safe_ppo.yaml"""'}), "(id='safe_explorer_ppo', entry_point=\n 'safe_control_gym.controllers.safe_explorer.safe_ppo:SafeExplorerPPO',\n config_entry_point=\n 'safe_control_gym.controllers.safe_explorer:safe_ppo.yaml')\n", (930, 1130), False, 'from safe_control_gym.utils.registration import register\n')] |
"""Test module ``plot_profile/utils.py``."""
# Standard library
import logging
# First-party
from plot_profile.utils import count_to_log_level
def test_count_to_log_level():
assert count_to_log_level(0) == logging.ERROR
assert count_to_log_level(1) == logging.WARNING
assert count_to_log_level(2) == logging.INFO
assert count_to_log_level(3) == logging.DEBUG
| [
"plot_profile.utils.count_to_log_level"
]
| [((188, 209), 'plot_profile.utils.count_to_log_level', 'count_to_log_level', (['(0)'], {}), '(0)\n', (206, 209), False, 'from plot_profile.utils import count_to_log_level\n'), ((238, 259), 'plot_profile.utils.count_to_log_level', 'count_to_log_level', (['(1)'], {}), '(1)\n', (256, 259), False, 'from plot_profile.utils import count_to_log_level\n'), ((290, 311), 'plot_profile.utils.count_to_log_level', 'count_to_log_level', (['(2)'], {}), '(2)\n', (308, 311), False, 'from plot_profile.utils import count_to_log_level\n'), ((339, 360), 'plot_profile.utils.count_to_log_level', 'count_to_log_level', (['(3)'], {}), '(3)\n', (357, 360), False, 'from plot_profile.utils import count_to_log_level\n')] |
"""Allows the user to call the library as a cli-module."""
from argparse import ArgumentParser
from .modularcmaes import evaluate_bbob
parser = ArgumentParser(description="Run single function CMAES")
parser.add_argument(
"-f", "--fid", type=int, help="bbob function id", required=False, default=5
)
parser.add_argument(
"-d", "--dim", type=int, help="dimension", required=False, default=5
)
parser.add_argument(
"-i",
"--iterations",
type=int,
help="number of iterations per agent",
required=False,
default=50,
)
parser.add_argument(
"-l", "--logging", required=False, action="store_true", default=False
)
parser.add_argument("-L", "--label", type=str, required=False, default="")
parser.add_argument("-s", "--seed", type=int, required=False, default=42)
parser.add_argument("-p", "--data_folder", type=str, required=False)
parser.add_argument("-a", "--arguments", nargs="+", required=False)
args = vars(parser.parse_args())
for arg in args.pop("arguments") or []:
# pylint: disable=exec-used
exec(arg, None, args)
evaluate_bbob(**args)
| [
"argparse.ArgumentParser"
]
| [((148, 203), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Run single function CMAES"""'}), "(description='Run single function CMAES')\n", (162, 203), False, 'from argparse import ArgumentParser\n')] |
"""Define the CSRmatrix class."""
import numpy as np
from scipy.sparse import coo_matrix
from six import iteritems
from openmdao.matrices.coo_matrix import COOMatrix
class CSRMatrix(COOMatrix):
"""
Sparse matrix in Compressed Row Storage format.
"""
def _build(self, num_rows, num_cols):
"""
Allocate the matrix.
Parameters
----------
num_rows : int
number of rows in the matrix.
num_cols : int
number of cols in the matrix.
"""
data, rows, cols = self._build_sparse(num_rows, num_cols)
# get a set of indices that sorts into row major order
srtidxs = np.lexsort((cols, rows))
data = data[srtidxs]
rows = rows[srtidxs]
cols = cols[srtidxs]
# now sort these back into ascending order (our original stacked order)
# so in _update_submat() we can just extract the individual index
# arrays that will map each block into the combined data array.
revidxs = np.argsort(srtidxs)
metadata = self._metadata
for key, (ind1, ind2, idxs, jac_type, factor) in iteritems(metadata):
if idxs is None:
metadata[key] = (revidxs[ind1:ind2], jac_type, factor)
else:
# apply the reverse index to each part of revidxs so that
# we can avoid copying the index array during updates.
metadata[key] = (revidxs[ind1:ind2][np.argsort(idxs)],
jac_type, factor)
# data array for the CSR will be the same as for the COO since
# it was already in sorted order.
coo = coo_matrix((data, (rows, cols)), shape=(num_rows, num_cols))
coo_data_size = coo.data.size
self._matrix = coo.tocsr()
# make sure data size is the same between coo and csr, else indexing is
# messed up
if coo_data_size != self._matrix.data.size:
raise ValueError("CSR matrix data contains duplicate row/col entries. "
"This would break internal indexing.")
| [
"numpy.argsort",
"numpy.lexsort",
"six.iteritems",
"scipy.sparse.coo_matrix"
]
| [((679, 703), 'numpy.lexsort', 'np.lexsort', (['(cols, rows)'], {}), '((cols, rows))\n', (689, 703), True, 'import numpy as np\n'), ((1037, 1056), 'numpy.argsort', 'np.argsort', (['srtidxs'], {}), '(srtidxs)\n', (1047, 1056), True, 'import numpy as np\n'), ((1149, 1168), 'six.iteritems', 'iteritems', (['metadata'], {}), '(metadata)\n', (1158, 1168), False, 'from six import iteritems\n'), ((1683, 1743), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(data, (rows, cols))'], {'shape': '(num_rows, num_cols)'}), '((data, (rows, cols)), shape=(num_rows, num_cols))\n', (1693, 1743), False, 'from scipy.sparse import coo_matrix\n'), ((1485, 1501), 'numpy.argsort', 'np.argsort', (['idxs'], {}), '(idxs)\n', (1495, 1501), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import re
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
with open("src/sdfconf/_version.py", "rt") as vf:
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
for line in vf:
mo = re.search(VSRE, line, re.M)
if mo:
verstr = mo.group(1)
break
if not mo:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
setup(name = 'sdfconf',
version = verstr,
description = ("Diverse manipulation and analysis tool for .sdf files."),
long_description = read('README.rst'),
install_requires = ['numpy>=1.7.1','matplotlib>=1.4.2'],
author = '<NAME>',
author_email = '<EMAIL>',
maintainer = '<NAME>',
maintainer_email = '<EMAIL>',
packages = ['sdfconf'],
package_dir = {'sdfconf':'src/sdfconf'},
keywords = 'sdf mol2 conformation analyze histogram',
url = 'http://users.jyu.fi/~pentikai/',
license = 'MIT/expat',
entry_points =
{'console_scripts': ['sdfconf = sdfconf.runner:main'],
'setuptools.installation': ['eggsecutable = sdfconf.runner:main',],
},
classifiers= ['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
#'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry' ,
'Topic :: Software Development :: Libraries',
],
##FIXME
#'''
#package_data = {
# 'sample':['sample_data.sdf']
# },
#'''
) | [
"os.path.dirname",
"re.search"
]
| [((311, 338), 're.search', 're.search', (['VSRE', 'line', 're.M'], {}), '(VSRE, line, re.M)\n', (320, 338), False, 'import re\n'), ((136, 161), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (151, 161), False, 'import os\n')] |
from aws_cdk import (
aws_cognito as cognito,
aws_iam as iam,
aws_ssm as ssm,
core
)
class CognitoStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
prj_name = self.node.try_get_context("project_name")
env_name = self.node.try_get_context("env")
user_pool = cognito.CfnUserPool(self, 'cognitouserpool',
auto_verified_attributes = [
'email'
],
username_attributes = [
'email', 'phone_number'
],
user_pool_name = prj_name + '-user-pool',
schema = [
{
'attributeDataType': 'String',
'name': 'param1',
'mutable': True
}
],
policies = cognito.CfnUserPool.PoliciesProperty(
password_policy = cognito.CfnUserPool.PasswordPolicyProperty(
minimum_length = 10,
require_lowercase = True,
require_numbers = True,
require_symbols = False,
require_uppercase = True
)
)
)
user_pool_client = cognito.CfnUserPoolClient(self, 'pool-client',
user_pool_id = user_pool.ref,
client_name = env_name + '-app-client'
)
identity_pool = cognito.CfnIdentityPool(self, 'identity-pool',
allow_unauthenticated_identities = False,
cognito_identity_providers = [
cognito.CfnIdentityPool.CognitoIdentityProviderProperty(
client_id = user_pool_client.ref,
provider_name = user_pool.attr_provider_name
)
],
identity_pool_name = prj_name + '-identity-pool'
)
ssm.StringParameter(self, 'app-id',
parameter_name = '/' + env_name + '/cognito-app-client-id',
string_value = user_pool_client.ref
)
ssm.StringParameter(self, 'user-pool-id',
parameter_name = '/' + env_name + '/cognito-user-pool-id',
string_value = user_pool_client.user_pool_id
)
ssm.StringParameter(self, 'identity-pool-id',
parameter_name = '/' + env_name + '/cognito-identity-pool-id',
string_value = identity_pool.ref
)
| [
"aws_cdk.aws_cognito.CfnIdentityPool.CognitoIdentityProviderProperty",
"aws_cdk.aws_cognito.CfnUserPoolClient",
"aws_cdk.aws_cognito.CfnUserPool.PasswordPolicyProperty",
"aws_cdk.aws_ssm.StringParameter"
]
| [((1289, 1405), 'aws_cdk.aws_cognito.CfnUserPoolClient', 'cognito.CfnUserPoolClient', (['self', '"""pool-client"""'], {'user_pool_id': 'user_pool.ref', 'client_name': "(env_name + '-app-client')"}), "(self, 'pool-client', user_pool_id=user_pool.ref,\n client_name=env_name + '-app-client')\n", (1314, 1405), True, 'from aws_cdk import aws_cognito as cognito, aws_iam as iam, aws_ssm as ssm, core\n'), ((1913, 2045), 'aws_cdk.aws_ssm.StringParameter', 'ssm.StringParameter', (['self', '"""app-id"""'], {'parameter_name': "('/' + env_name + '/cognito-app-client-id')", 'string_value': 'user_pool_client.ref'}), "(self, 'app-id', parameter_name='/' + env_name +\n '/cognito-app-client-id', string_value=user_pool_client.ref)\n", (1932, 2045), True, 'from aws_cdk import aws_cognito as cognito, aws_iam as iam, aws_ssm as ssm, core\n'), ((2088, 2234), 'aws_cdk.aws_ssm.StringParameter', 'ssm.StringParameter', (['self', '"""user-pool-id"""'], {'parameter_name': "('/' + env_name + '/cognito-user-pool-id')", 'string_value': 'user_pool_client.user_pool_id'}), "(self, 'user-pool-id', parameter_name='/' + env_name +\n '/cognito-user-pool-id', string_value=user_pool_client.user_pool_id)\n", (2107, 2234), True, 'from aws_cdk import aws_cognito as cognito, aws_iam as iam, aws_ssm as ssm, core\n'), ((2277, 2419), 'aws_cdk.aws_ssm.StringParameter', 'ssm.StringParameter', (['self', '"""identity-pool-id"""'], {'parameter_name': "('/' + env_name + '/cognito-identity-pool-id')", 'string_value': 'identity_pool.ref'}), "(self, 'identity-pool-id', parameter_name='/' + env_name +\n '/cognito-identity-pool-id', string_value=identity_pool.ref)\n", (2296, 2419), True, 'from aws_cdk import aws_cognito as cognito, aws_iam as iam, aws_ssm as ssm, core\n'), ((1624, 1760), 'aws_cdk.aws_cognito.CfnIdentityPool.CognitoIdentityProviderProperty', 'cognito.CfnIdentityPool.CognitoIdentityProviderProperty', ([], {'client_id': 'user_pool_client.ref', 'provider_name': 'user_pool.attr_provider_name'}), '(client_id=\n user_pool_client.ref, provider_name=user_pool.attr_provider_name)\n', (1679, 1760), True, 'from aws_cdk import aws_cognito as cognito, aws_iam as iam, aws_ssm as ssm, core\n'), ((954, 1116), 'aws_cdk.aws_cognito.CfnUserPool.PasswordPolicyProperty', 'cognito.CfnUserPool.PasswordPolicyProperty', ([], {'minimum_length': '(10)', 'require_lowercase': '(True)', 'require_numbers': '(True)', 'require_symbols': '(False)', 'require_uppercase': '(True)'}), '(minimum_length=10,\n require_lowercase=True, require_numbers=True, require_symbols=False,\n require_uppercase=True)\n', (996, 1116), True, 'from aws_cdk import aws_cognito as cognito, aws_iam as iam, aws_ssm as ssm, core\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.